func printKeyAnalysis(hr *hashing.HashRing, file string) { keys := make(map[string]int) total := 0 data, err := ioutil.ReadFile(file) if err != nil { log.Fatalf("Error: %s", err) } for _, l := range strings.Split(string(data), "\n") { l = strings.TrimSpace(l) n := hr.GetNode(l) server := fmt.Sprintf("%s:%s", n.Server, n.Instance) keys[server] = keys[server] + 1 total++ } sortedKeys := make([]string, 0) for k := range keys { sortedKeys = append(sortedKeys, k) } sort.Strings(sortedKeys) fmt.Printf("Keys per node:\n") average := float64(total) / float64(hr.Len()) variance := float64(0) for _, k := range sortedKeys { fmt.Printf("%s\t%d\n", k, keys[k]) variance = variance + math.Pow(float64(keys[k])-average, 2) } fmt.Printf("\nTotal Metric Keys: %d\n", total) fmt.Printf("Ideal keys per node: %.2f\n", average) fmt.Printf("Deviation: %.4f\n", math.Sqrt(variance/float64(len(keys)))) }
func restoreTarWorker(workIn chan *MetricData, ring *hashing.HashRing, servers []string, wg *sync.WaitGroup) { for work := range workIn { server := ring.GetNode(work.Name).Server if SingleHost && server != servers[0] { log.Printf("In single mode, skipping metric %s for server %s", work.Name, server) continue } log.Printf("Uploading %s => %s", work.Name, server) err := PostMetric(server, work) if err != nil { workerErrors = true } } wg.Done() }
func printAnalysis(hr *hashing.HashRing) { hash := hr.BucketsPerNode() keys := make([]string, 0) min := 0xFFFF max := 0 v := float64(0) average := float64(0xFFFF) / float64(hr.Len()) for k := range hash { keys = append(keys, k) } sort.Strings(keys) for _, k := range keys { fmt.Printf("Node %s:\t%d\n", k, hash[k]) if hash[k] > max { max = hash[k] } if hash[k] < min { min = hash[k] } v = v + math.Pow(float64(hash[k])-average, 2) } v = v / float64(hr.Len()) fmt.Printf("\nIdeal bucket count per server: %.2f\n", average) fmt.Printf("Spread: %d - %d = %d\n", max, min, max-min) fmt.Printf("Deviation: %.4f\n", math.Sqrt(v)) }