func runMVCCBatchPut(valueSize, batchSize int, b *testing.B) { rng := util.NewPseudoRand() value := proto.Value{Bytes: util.RandBytes(rng, valueSize)} keyBuf := append(make([]byte, 0, 64), []byte("key-")...) rocksdb := NewInMem(proto.Attributes{Attrs: []string{"ssd"}}, testCacheSize) defer rocksdb.Stop() b.SetBytes(int64(valueSize)) b.ResetTimer() for i := 0; i < b.N; i += batchSize { end := i + batchSize if end > b.N { end = b.N } batch := rocksdb.NewBatch() for j := i; j < end; j++ { key := proto.Key(encoding.EncodeUvarint(keyBuf[0:4], uint64(j))) ts := makeTS(time.Now().UnixNano(), 0) if err := MVCCPut(batch, nil, key, ts, value, nil); err != nil { b.Fatalf("failed put: %s", err) } } if err := batch.Commit(); err != nil { b.Fatal(err) } } b.StopTimer() }
func TestApproximateSize(t *testing.T) { runWithAllEngines(func(engine Engine, t *testing.T) { var ( count = 10000 keys = make([]proto.EncodedKey, count) values = make([][]byte, count) // Random values to prevent compression rand = util.NewPseudoRand() valueLen = 10 ) for i := 0; i < count; i++ { keys[i] = []byte(fmt.Sprintf("key%8d", i)) values[i] = util.RandBytes(rand, valueLen) } insertKeysAndValues(keys, values, engine, t) if err := engine.Flush(); err != nil { t.Fatalf("Error flushing InMem: %s", err) } sizePerRecord := (len([]byte(keys[0])) + valueLen) verifyApproximateSize(keys, engine, sizePerRecord, 0.15, t) verifyApproximateSize(keys[:count/2], engine, sizePerRecord, 0.15, t) verifyApproximateSize(keys[:count/4], engine, sizePerRecord, 0.15, t) }, t) }
func newState(m *MultiRaft) *state { return &state{ MultiRaft: m, rand: util.NewPseudoRand(), groups: make(map[GroupID]*group), dirtyGroups: make(map[GroupID]*group), nodes: make(map[NodeID]*node), responses: make(chan *rpc.Call, 100), writeTask: newWriteTask(m.Storage), } }
func BenchmarkDecodeUvarint(b *testing.B) { rng := util.NewPseudoRand() vals := make([][]byte, 10000) for i := range vals { vals[i] = EncodeUvarint(nil, uint64(rng.Int63())) } b.ResetTimer() for i := 0; i < b.N; i++ { _, _ = DecodeUvarint(vals[i%len(vals)]) } }
func BenchmarkDecodeBytes(b *testing.B) { rng := util.NewPseudoRand() vals := make([][]byte, 10000) for i := range vals { vals[i] = EncodeBytes(nil, util.RandBytes(rng, 100)) } b.ResetTimer() for i := 0; i < b.N; i++ { _, _ = DecodeBytes(vals[i%len(vals)]) } }
func BenchmarkDecodeNumericFloat(b *testing.B) { rng, _ := util.NewPseudoRand() vals := make([][]byte, 10000) for i := range vals { vals[i] = EncodeNumericFloat(nil, rng.Float64()) } b.ResetTimer() for i := 0; i < b.N; i++ { _, _ = DecodeNumericFloat(vals[i%len(vals)]) } }
// TestPut starts up an N node cluster and runs N workers that write // to independent keys. func TestPut(t *testing.T) { l := localcluster.Create(*numNodes, stopper) l.Start() defer l.Stop() db := makeDBClient(t, l, 0) setDefaultRangeMaxBytes(t, db, *rangeMaxBytes) checkRangeReplication(t, l, 20*time.Second) errs := make(chan error, *numNodes) start := time.Now() deadline := start.Add(*duration) var count int64 for i := 0; i < *numNodes; i++ { go func() { r, _ := util.NewPseudoRand() value := util.RandBytes(r, 8192) for time.Now().Before(deadline) { k := atomic.AddInt64(&count, 1) v := value[:r.Intn(len(value))] if err := db.Put(fmt.Sprintf("%08d", k), v); err != nil { errs <- err return } } errs <- nil }() } for i := 0; i < *numNodes; { select { case <-stopper: t.Fatalf("interrupted") case err := <-errs: if err != nil { t.Fatal(err) } i++ case <-time.After(1 * time.Second): // Periodically print out progress so that we know the test is still // running. log.Infof("%d", atomic.LoadInt64(&count)) } } elapsed := time.Since(start) log.Infof("%d %.1f/sec", count, float64(count)/elapsed.Seconds()) }
func BenchmarkEncodeBytes(b *testing.B) { rng := util.NewPseudoRand() vals := make([][]byte, 10000) for i := range vals { vals[i] = util.RandBytes(rng, 100) } buf := make([]byte, 0, 1000) b.ResetTimer() for i := 0; i < b.N; i++ { _ = EncodeBytes(buf, vals[i%len(vals)]) } }
func BenchmarkEncodeUvarint(b *testing.B) { rng := util.NewPseudoRand() vals := make([]uint64, 10000) for i := range vals { vals[i] = uint64(rng.Int63()) } buf := make([]byte, 0, 16) b.ResetTimer() for i := 0; i < b.N; i++ { _ = EncodeUvarint(buf, vals[i%len(vals)]) } }
func BenchmarkEncodeNumericFloat(b *testing.B) { rng := util.NewPseudoRand() vals := make([]float64, 10000) for i := range vals { vals[i] = rng.Float64() } buf := make([]byte, 0, 16) b.ResetTimer() for i := 0; i < b.N; i++ { _ = EncodeNumericFloat(buf, vals[i%len(vals)]) } }
// setupMVCCData writes up to numVersions values at each of numKeys // keys. The number of versions written for each key is chosen // randomly according to a uniform distribution. Each successive // version is written starting at 5ns and then in 5ns increments. This // allows scans at various times, starting at t=5ns, and continuing to // t=5ns*(numVersions+1). A version for each key will be read on every // such scan, but the dynamics of the scan will change depending on // the historical timestamp. Earlier timestamps mean scans which must // skip more historical versions; later timestamps mean scans which // skip fewer. // // The creation of the rocksdb database is time consuming, especially // for larger numbers of versions. The database is persisted between // runs and stored in the current directory as // "mvcc_scan_<versions>_<keys>". func setupMVCCScanData(numVersions, numKeys int, b *testing.B) *RocksDB { loc := fmt.Sprintf("mvcc_scan_%d_%d", numVersions, numKeys) exists := true if _, err := os.Stat(loc); os.IsNotExist(err) { exists = false } log.Infof("creating mvcc data: %s", loc) const cacheSize = 8 << 30 // 8 GB rocksdb := NewRocksDB(proto.Attributes{Attrs: []string{"ssd"}}, loc, cacheSize) if err := rocksdb.Start(); err != nil { b.Fatalf("could not create new rocksdb db instance at %s: %v", loc, err) } if exists { return rocksdb } rng := util.NewPseudoRand() keys := make([]proto.Key, numKeys) nvs := make([]int, numKeys) for t := 1; t <= numVersions; t++ { walltime := int64(5 * t) ts := makeTS(walltime, 0) batch := rocksdb.NewBatch() for i := 0; i < numKeys; i++ { if t == 1 { keys[i] = proto.Key(encoding.EncodeUvarint([]byte("key-"), uint64(i))) nvs[i] = int(rand.Int31n(int32(numVersions)) + 1) } // Only write values if this iteration is less than the random // number of versions chosen for this key. if t <= nvs[i] { value := proto.Value{Bytes: util.RandBytes(rng, 1024)} if err := MVCCPut(batch, nil, keys[i], ts, value, nil); err != nil { b.Fatal(err) } } } if err := batch.Commit(); err != nil { b.Fatal(err) } } rocksdb.CompactRange(nil, nil) return rocksdb }
// TestRandomSplits splits the keyspace a total of TotalSplits number of times. // At the same time, a biogo LLRB tree is also maintained and at the end of the // test, the range tree and the biogo tree are compared to ensure they are // equal. func TestRandomSplits(t *testing.T) { defer leaktest.AfterTest(t) store, stopper := createTestStore(t) defer stopper.Stop() db := store.DB() rng, seed := util.NewPseudoRand() t.Logf("using pseudo random number generator with seed %d", seed) tree := &llrb.Tree{} tree.Insert(Key(proto.KeyMin)) // Test an unsplit tree. if err := compareBiogoTree(db, tree); err != nil { t.Fatalf("Unsplit trees are not equal:%v", err) } for i := 0; i < TotalSplits; i++ { keyInt := rng.Int31() keyString := strconv.Itoa(int(keyInt)) keyProto := proto.Key(keyString) key := Key(keyProto) // Make sure we avoid collisions. for tree.Get(key) != nil { keyInt = rng.Int31() keyString = strconv.Itoa(int(keyInt)) keyProto = proto.Key(keyString) key = Key(keyProto) } //t.Logf("Inserting %d:%d", i, keyInt) tree.Insert(key) // Split the range. if err := splitRange(db, keyProto); err != nil { t.Fatal(err) } } // Compare the trees if err := compareBiogoTree(db, tree); err != nil { t.Fatal(err) } }
func setupClientBenchData(useRPC, useSSL bool, numVersions, numKeys int, b *testing.B) ( *server.TestServer, *client.DB) { const cacheSize = 8 << 30 // 8 GB loc := fmt.Sprintf("client_bench_%d_%d", numVersions, numKeys) exists := true if _, err := os.Stat(loc); os.IsNotExist(err) { exists = false } s := &server.TestServer{} s.Ctx = server.NewTestContext() s.Ctx.ExperimentalRPCServer = true s.SkipBootstrap = exists if !useSSL { s.Ctx.Insecure = true } s.Engines = []engine.Engine{engine.NewRocksDB(proto.Attributes{Attrs: []string{"ssd"}}, loc, cacheSize)} if err := s.Start(); err != nil { b.Fatal(err) } var scheme string if useRPC { scheme = "rpcs" } else { scheme = "https" } db, err := client.Open(scheme + "://root@" + s.ServingAddr() + "?certs=" + s.Ctx.Certs) if err != nil { b.Fatal(err) } if exists { return s, db } rng, _ := util.NewPseudoRand() keys := make([]proto.Key, numKeys) nvs := make([]int, numKeys) for t := 1; t <= numVersions; t++ { batch := &client.Batch{} for i := 0; i < numKeys; i++ { if t == 1 { keys[i] = proto.Key(encoding.EncodeUvarint([]byte("key-"), uint64(i))) nvs[i] = int(rand.Int31n(int32(numVersions)) + 1) } // Only write values if this iteration is less than the random // number of versions chosen for this key. if t <= nvs[i] { batch.Put(proto.Key(keys[i]), util.RandBytes(rng, valueSize)) } if (i+1)%1000 == 0 { if err := db.Run(batch); err != nil { b.Fatal(err) } batch = &client.Batch{} } } if len(batch.Results) != 0 { if err := db.Run(batch); err != nil { b.Fatal(err) } } } if r, ok := s.Engines[0].(*engine.RocksDB); ok { r.CompactRange(nil, nil) } return s, db }
"reflect" "sync" "sync/atomic" "testing" "time" "github.com/cockroachdb/cockroach/proto" "github.com/cockroachdb/cockroach/util" "github.com/cockroachdb/cockroach/util/leaktest" "github.com/cockroachdb/cockroach/util/log" "github.com/coreos/etcd/raft" "github.com/coreos/etcd/raft/raftpb" "golang.org/x/net/context" ) var testRand, _ = util.NewPseudoRand() func makeCommandID() string { return string(util.RandBytes(testRand, commandIDLen)) } type testCluster struct { t *testing.T nodes []*state tickers []*manualTicker events []*eventDemux storages []*BlockableStorage transport Transport } func newTestCluster(transport Transport, size int, stopper *util.Stopper, t *testing.T) *testCluster {