func TestShardedkv(t *testing.T) { var shards []Shard nElements := 1000 nShards := 10 for i := 0; i < nShards; i++ { label := "test_shard" + strconv.Itoa(i) shards = append(shards, Shard{Name: label, Backend: st.New()}) } chooser := ch.New() kv := New(chooser, shards) for i := 0; i < nElements; i++ { kv.Set("test"+strconv.Itoa(i), []byte("value"+strconv.Itoa(i))) } for i := 0; i < nElements; i++ { k := "test" + strconv.Itoa(i) v, ok, err := kv.Get(k) if ok != true { t.Errorf("failed to get key: %s\n", err) } if string(v) != "value"+strconv.Itoa(i) { t.Errorf("failed to get a valid value: %s != \"value%d\"\n", v, i) } } var migrationBuckets []string for i := nShards; i < nShards*2; i++ { label := "test_shard" + strconv.Itoa(i) migrationBuckets = append(migrationBuckets, label) backend := st.New() shards = append(shards, Shard{Name: label, Backend: backend}) kv.AddShard(label, backend) } migration := ch.New() migration.SetBuckets(migrationBuckets) kv.BeginMigration(migration) // make sure requesting still works for i := 0; i < nElements; i++ { k := "test" + strconv.Itoa(i) v, ok, err := kv.Get(k) if ok != true { t.Errorf("failed to get key: %s\n", err) } if string(v) != "value"+strconv.Itoa(i) { t.Errorf("failed to get a valid value: %s != \"value%d\"\n", v, i) } } // make sure setting still works for i := 0; i < nElements; i++ { kv.Set("test"+strconv.Itoa(i), []byte("value"+strconv.Itoa(i))) } for i := 0; i < nElements; i++ { k := "test" + strconv.Itoa(i) v, ok, err := kv.Get(k) if ok != true { t.Errorf("failed to get key: %s\n", err) } if string(v) != "value"+strconv.Itoa(i) { t.Errorf("failed to get a valid value: %s != \"value%d\"\n", v, i) } } // and that deleting removes from both during a migration for i := 0; i < nElements; i++ { kv.Delete("test" + strconv.Itoa(i)) } for i := 0; i < nElements; i++ { k := "test" + strconv.Itoa(i) _, ok, _ := kv.Get(k) if ok { t.Errorf("got a key that shouldn't have been there") } } // set the keys again for i := 0; i < nElements; i++ { kv.Set("test"+strconv.Itoa(i), []byte("value"+strconv.Itoa(i))) } // end the migration kv.EndMigration() // delete the old shards for i := 0; i < nShards; i++ { label := "test_shard" + strconv.Itoa(i) kv.DeleteShard(label) } // and make sure we can still get to the keys for i := 0; i < nElements; i++ { k := "test" + strconv.Itoa(i) v, ok, err := kv.Get(k) if ok != true { t.Errorf("failed to get key: %s\n", err) } if string(v) != "value"+strconv.Itoa(i) { t.Errorf("failed to get a valid value: %s != \"value%d\"\n", v, i) } } }
func main() { chooserType := flag.String("chooser", "jump", "shardedkv chooser") expectedShard := flag.String("shard", "", "expected shard name") numShards := flag.Int("n", 16, "number of shards") fname := flag.String("f", "", "input file name") flag.Parse() var buckets []string for i := 1; i <= *numShards; i++ { buckets = append(buckets, fmt.Sprintf("shard%d", i)) } var chooser shardedkv.Chooser switch *chooserType { case "jump": chooser = jump.New(func(b []byte) uint64 { return siphash.Hash(0, 0, b) }) case "ketama": chooser = ketama.New() case "chash": chooser = chash.New() default: log.Fatalf("unknown chooser: %s; known jump,ketama,chash", *chooserType) } chooser.SetBuckets(buckets) results := make(map[string]int) var f io.Reader f, err := os.Open(*fname) if err != nil { log.Fatal("error loading input file", fname, ":", err) } if strings.HasSuffix(*fname, ".gz") { f, _ = gzip.NewReader(f) } scan := bufio.NewScanner(f) for scan.Scan() { shard := chooser.Choose(scan.Text()) if *expectedShard != "" && shard != *expectedShard { fmt.Printf("wrong shard %s %+v\n", shard, scan.Text()) } results[shard]++ } stats := onlinestats.NewRunning() fmt.Println("chooser", *chooserType) for _, b := range buckets { v := results[b] fmt.Printf("%-10s %d\n", b, v) stats.Push(float64(v)) } if *expectedShard == "" { fmt.Println("mean", stats.Mean()) fmt.Println("stdev", stats.Stddev(), stats.Stddev()/stats.Mean()) } }
func main() { chooser := flag.String("chooser", "jump", "chooser to use") numBuckets := flag.Int("b", 8, "buckets") replicas := flag.Int("r", 1, "replicas") cpuprofile := flag.String("cpuprofile", "", "cpu profile") flag.Parse() if *cpuprofile != "" { f, err := os.Create(*cpuprofile) if err != nil { log.Fatal(err) } pprof.StartCPUProfile(f) defer pprof.StopCPUProfile() } var buckets []string for i := 0; i < *numBuckets; i++ { buckets = append(buckets, fmt.Sprintf("192.168.%d.%d", i, 10+2*i)) } results := make(map[string]int) scan := bufio.NewScanner(os.Stdin) hasher := func(b []byte) uint64 { h := fnv.New64a(); h.Write(b); return h.Sum64() } var j replichooser switch *chooser { case "ketama": j = ketama.New() case "chash": j = chash.New() case "jump": j = jump.New(hasher) default: log.Fatal("unknown chooser", *chooser) } j.SetBuckets(buckets) var totalMetrics int t0 := time.Now() for scan.Scan() { key := scan.Text() totalMetrics++ r := j.ChooseReplicas(key, *replicas) for _, rr := range r { results[rr]++ } } t1 := time.Since(t0) stats := onlinestats.NewRunning() fmt.Printf("total metrics processed: %d, time spent ~%ds (%d/s)\n", totalMetrics, int(t1.Seconds()), int(float64(totalMetrics)/float64(t1.Seconds()))) fmt.Printf("replication count: %d, server count: %d\n", *replicas, *numBuckets) fmt.Println("server distribution:") min := int(^uint(0) / 2) max := 0 for _, b := range buckets { v := results[b] if v < min { min = v } if v > max { max = v } fmt.Printf("- server %15s: %6d (%.2f%%)\n", b, v, 100*float64(v)/float64(totalMetrics)) stats.Push(float64(v)) } fmt.Printf("band: %d - %d (diff %d, %.1f%%), mean: %.2f, stddev: %.2f\n", min, max, (max - min), 100*float64(max-min)/float64(totalMetrics), stats.Mean(), stats.Stddev()) }
func main() { numBuckets := flag.Int("b", 8, "buckets") chooserType := flag.String("chooser", "jump", "shardedkv chooser") verbose := flag.Bool("v", false, "verbose") replicas := flag.Int("r", 0, "replicas") flag.Parse() var buckets []string for i := 0; i < *numBuckets; i++ { // buckets = append(buckets, fmt.Sprintf("%016x", siphash.Hash(0x0ddc0ffeebadf00d, 0xcafebabedeadbeef, []byte(fmt.Sprintf("192.168.0.%d", 10+i))))) // buckets = append(buckets, fmt.Sprintf("%016x", siphash.Hash(0, 0, []byte(fmt.Sprintf("192.168.0.%d", 10+i))))) buckets = append(buckets, fmt.Sprintf("192.168.0.%d", 10+i)) } var chooser shardedkv.Chooser switch *chooserType { case "jump": chooser = jump.New(func(b []byte) uint64 { return siphash.Hash(0, 0, b) }) case "ketama": chooser = ketama.New() case "mpc": chooser = mpc.New(func(b []byte, seed uint64) uint64 { return siphash.Hash(seed, 0, b) }, [2]uint64{0x0ddc0ffeebadf00d, 0xcafebabedeadbeef}, 21) case "chash": // 5120*8 + 5120*(8+16)*2 bytes chooser = chash.New() default: log.Fatalf("unknown chooser: %s; known jump,ketama,chash,mpc", *chooserType) } chooser.SetBuckets(buckets) results := make(map[string]int) scan := bufio.NewScanner(os.Stdin) for scan.Scan() { key := scan.Text() shard := chooser.Choose(key) if *verbose { fmt.Printf("%s %s\n", shard, key) } results[shard]++ } stats := onlinestats.NewRunning() min := int(^uint(0) / 2) max := 0 fmt.Println("chooser", *chooserType) for _, b := range buckets { v := results[b] if v < min { min = v } if v > max { max = v } fmt.Printf("%-10s %d\n", b, v) stats.Push(float64(v)) } fmt.Printf("min=%d max=%d\n", min, max) fmt.Printf("mean %.2f\n", stats.Mean()) fmt.Printf("stdev %.2f %.2f%%\n", stats.Stddev(), 100*stats.Stddev()/stats.Mean()) fmt.Printf("peak/mean %.2f\n", float64(max)/stats.Mean()) }
func BenchmarkChash2048(b *testing.B) { benchmarkChooser(b, 2048, chash.New()) }
func BenchmarkChash8192(b *testing.B) { benchmarkChooser(b, 8192, chash.New()) }
func BenchmarkChash128(b *testing.B) { benchmarkChooser(b, 128, chash.New()) }
func BenchmarkChash512(b *testing.B) { benchmarkChooser(b, 512, chash.New()) }
func main() { workers := flag.Int("w", 4, "workers") chooserType := flag.String("chooser", "jump", "shardedkv chooser") verbose := flag.Bool("v", false, "verbose") flag.Parse() filech := make(chan string) donech := make(chan map[string]int) var buckets []string for i := 1; i <= 32; i++ { buckets = append(buckets, fmt.Sprintf("shard%d", i)) } var chooser shardedkv.Chooser switch *chooserType { case "jump": chooser = jump.New(func(b []byte) uint64 { return siphash.Hash(0, 0, b) }) case "ketama": chooser = ketama.New() case "chash": // 5120*8 + 5120*(8+16)*2 bytes chooser = chash.New() default: log.Fatalf("unknown chooser: %s; known jump,ketama,chash", *chooserType) } chooser.SetBuckets(buckets) for i := 0; i < *workers; i++ { go worker(chooser, filech, donech, *verbose) } files, _ := filepath.Glob("data/*") for _, f := range files { filech <- f } close(filech) results := make(map[string]int) for i := 0; i < *workers; i++ { shards := <-donech for k, v := range shards { results[k] += v } } stats := onlinestats.NewRunning() fmt.Println("chooser", *chooserType) for _, b := range buckets { v := results[b] fmt.Printf("%-10s %d\n", b, v) stats.Push(float64(v)) } fmt.Println("mean", stats.Mean()) fmt.Println("stdev", stats.Stddev(), stats.Stddev()/stats.Mean()) }
func BenchmarkChash32(b *testing.B) { benchmarkChooser(b, 32, chash.New()) }
func TestDistributionChash8192(t *testing.T) { testDistribution(t, 8192, chash.New()) }
func TestDistributionChash2048(t *testing.T) { testDistribution(t, 2048, chash.New()) }
func TestDistributionChash512(t *testing.T) { testDistribution(t, 512, chash.New()) }
func TestDistributionChash128(t *testing.T) { testDistribution(t, 128, chash.New()) }
func TestDistributionChash32(t *testing.T) { testDistribution(t, 32, chash.New()) }