func TestApproximateSize(t *testing.T) { runWithAllEngines(func(engine Engine, t *testing.T) { var ( count = 10000 keys = make([]proto.EncodedKey, count) values = make([][]byte, count) // Random values to prevent compression rand = util.NewPseudoRand() valueLen = 10 ) for i := 0; i < count; i++ { keys[i] = []byte(fmt.Sprintf("key%8d", i)) values[i] = util.RandBytes(rand, valueLen) } insertKeysAndValues(keys, values, engine, t) if err := engine.Flush(); err != nil { t.Fatalf("Error flushing InMem: %s", err) } sizePerRecord := (len([]byte(keys[0])) + valueLen) verifyApproximateSize(keys, engine, sizePerRecord, 0.15, t) verifyApproximateSize(keys[:count/2], engine, sizePerRecord, 0.15, t) verifyApproximateSize(keys[:count/4], engine, sizePerRecord, 0.15, t) }, t) }
func runMVCCBatchPut(valueSize, batchSize int, b *testing.B) { rng := util.NewPseudoRand() value := proto.Value{Bytes: util.RandBytes(rng, valueSize)} keyBuf := append(make([]byte, 0, 64), []byte("key-")...) rocksdb := NewInMem(proto.Attributes{Attrs: []string{"ssd"}}, testCacheSize) defer rocksdb.Stop() b.SetBytes(int64(valueSize)) b.ResetTimer() for i := 0; i < b.N; i += batchSize { end := i + batchSize if end > b.N { end = b.N } batch := rocksdb.NewBatch() for j := i; j < end; j++ { key := proto.Key(encoding.EncodeUvarint(keyBuf[0:4], uint64(j))) ts := makeTS(time.Now().UnixNano(), 0) if err := MVCCPut(batch, nil, key, ts, value, nil); err != nil { b.Fatalf("failed put: %s", err) } } if err := batch.Commit(); err != nil { b.Fatal(err) } } b.StopTimer() }
// fillRange writes keys with the given prefix and associated values // until bytes bytes have been written. func fillRange(store *storage.Store, raftID proto.RaftID, prefix proto.Key, bytes int64, t *testing.T) { src := rand.New(rand.NewSource(0)) for { var ms engine.MVCCStats if err := engine.MVCCGetRangeStats(store.Engine(), raftID, &ms); err != nil { t.Fatal(err) } keyBytes, valBytes := ms.KeyBytes, ms.ValBytes if keyBytes+valBytes >= bytes { return } key := append(append([]byte(nil), prefix...), util.RandBytes(src, 100)...) val := util.RandBytes(src, int(src.Int31n(1<<8))) pArgs, pReply := putArgs(key, val, raftID, store.StoreID()) pArgs.Timestamp = store.Clock().Now() if err := store.ExecuteCmd(context.Background(), proto.Call{Args: pArgs, Reply: pReply}); err != nil { t.Fatal(err) } } }
func BenchmarkDecodeBytes(b *testing.B) { rng := util.NewPseudoRand() vals := make([][]byte, 10000) for i := range vals { vals[i] = EncodeBytes(nil, util.RandBytes(rng, 100)) } b.ResetTimer() for i := 0; i < b.N; i++ { _, _ = DecodeBytes(vals[i%len(vals)]) } }
// startTestWriter creates a writer which initiates a sequence of // transactions, each which writes up to 10 times to random keys with // random values. If not nil, txnChannel is written to every time a // new transaction starts. func startTestWriter(db *client.DB, i int64, valBytes int32, wg *sync.WaitGroup, retries *int32, txnChannel chan struct{}, done <-chan struct{}, t *testing.T) { src := rand.New(rand.NewSource(i)) for j := 0; ; j++ { select { case <-done: if wg != nil { wg.Done() } return default: first := true err := db.Txn(func(txn *client.Txn) error { if first && txnChannel != nil { txnChannel <- struct{}{} } else if !first && retries != nil { atomic.AddInt32(retries, 1) } first = false for j := 0; j <= int(src.Int31n(10)); j++ { key := util.RandBytes(src, 10) val := util.RandBytes(src, int(src.Int31n(valBytes))) if err := txn.Put(key, val); err != nil { log.Infof("experienced an error in routine %d: %s", i, err) return err } } return nil }) if err != nil { t.Error(err) } else { time.Sleep(1 * time.Millisecond) } } } }
// TestPut starts up an N node cluster and runs N workers that write // to independent keys. func TestPut(t *testing.T) { l := localcluster.Create(*numNodes, stopper) l.Start() defer l.Stop() db := makeDBClient(t, l, 0) setDefaultRangeMaxBytes(t, db, *rangeMaxBytes) checkRangeReplication(t, l, 20*time.Second) errs := make(chan error, *numNodes) start := time.Now() deadline := start.Add(*duration) var count int64 for i := 0; i < *numNodes; i++ { go func() { r, _ := util.NewPseudoRand() value := util.RandBytes(r, 8192) for time.Now().Before(deadline) { k := atomic.AddInt64(&count, 1) v := value[:r.Intn(len(value))] if err := db.Put(fmt.Sprintf("%08d", k), v); err != nil { errs <- err return } } errs <- nil }() } for i := 0; i < *numNodes; { select { case <-stopper: t.Fatalf("interrupted") case err := <-errs: if err != nil { t.Fatal(err) } i++ case <-time.After(1 * time.Second): // Periodically print out progress so that we know the test is still // running. log.Infof("%d", atomic.LoadInt64(&count)) } } elapsed := time.Since(start) log.Infof("%d %.1f/sec", count, float64(count)/elapsed.Seconds()) }
func BenchmarkEncodeBytes(b *testing.B) { rng := util.NewPseudoRand() vals := make([][]byte, 10000) for i := range vals { vals[i] = util.RandBytes(rng, 100) } buf := make([]byte, 0, 1000) b.ResetTimer() for i := 0; i < b.N; i++ { _ = EncodeBytes(buf, vals[i%len(vals)]) } }
// setupMVCCData writes up to numVersions values at each of numKeys // keys. The number of versions written for each key is chosen // randomly according to a uniform distribution. Each successive // version is written starting at 5ns and then in 5ns increments. This // allows scans at various times, starting at t=5ns, and continuing to // t=5ns*(numVersions+1). A version for each key will be read on every // such scan, but the dynamics of the scan will change depending on // the historical timestamp. Earlier timestamps mean scans which must // skip more historical versions; later timestamps mean scans which // skip fewer. // // The creation of the rocksdb database is time consuming, especially // for larger numbers of versions. The database is persisted between // runs and stored in the current directory as // "mvcc_scan_<versions>_<keys>". func setupMVCCScanData(numVersions, numKeys int, b *testing.B) *RocksDB { loc := fmt.Sprintf("mvcc_scan_%d_%d", numVersions, numKeys) exists := true if _, err := os.Stat(loc); os.IsNotExist(err) { exists = false } log.Infof("creating mvcc data: %s", loc) const cacheSize = 8 << 30 // 8 GB rocksdb := NewRocksDB(proto.Attributes{Attrs: []string{"ssd"}}, loc, cacheSize) if err := rocksdb.Start(); err != nil { b.Fatalf("could not create new rocksdb db instance at %s: %v", loc, err) } if exists { return rocksdb } rng := util.NewPseudoRand() keys := make([]proto.Key, numKeys) nvs := make([]int, numKeys) for t := 1; t <= numVersions; t++ { walltime := int64(5 * t) ts := makeTS(walltime, 0) batch := rocksdb.NewBatch() for i := 0; i < numKeys; i++ { if t == 1 { keys[i] = proto.Key(encoding.EncodeUvarint([]byte("key-"), uint64(i))) nvs[i] = int(rand.Int31n(int32(numVersions)) + 1) } // Only write values if this iteration is less than the random // number of versions chosen for this key. if t <= nvs[i] { value := proto.Value{Bytes: util.RandBytes(rng, 1024)} if err := MVCCPut(batch, nil, keys[i], ts, value, nil); err != nil { b.Fatal(err) } } } if err := batch.Commit(); err != nil { b.Fatal(err) } } rocksdb.CompactRange(nil, nil) return rocksdb }
func setupClientBenchData(useRPC, useSSL bool, numVersions, numKeys int, b *testing.B) ( *server.TestServer, *client.DB) { const cacheSize = 8 << 30 // 8 GB loc := fmt.Sprintf("client_bench_%d_%d", numVersions, numKeys) exists := true if _, err := os.Stat(loc); os.IsNotExist(err) { exists = false } s := &server.TestServer{} s.Ctx = server.NewTestContext() s.Ctx.ExperimentalRPCServer = true s.SkipBootstrap = exists if !useSSL { s.Ctx.Insecure = true } s.Engines = []engine.Engine{engine.NewRocksDB(proto.Attributes{Attrs: []string{"ssd"}}, loc, cacheSize)} if err := s.Start(); err != nil { b.Fatal(err) } var scheme string if useRPC { scheme = "rpcs" } else { scheme = "https" } db, err := client.Open(scheme + "://root@" + s.ServingAddr() + "?certs=" + s.Ctx.Certs) if err != nil { b.Fatal(err) } if exists { return s, db } rng, _ := util.NewPseudoRand() keys := make([]proto.Key, numKeys) nvs := make([]int, numKeys) for t := 1; t <= numVersions; t++ { batch := &client.Batch{} for i := 0; i < numKeys; i++ { if t == 1 { keys[i] = proto.Key(encoding.EncodeUvarint([]byte("key-"), uint64(i))) nvs[i] = int(rand.Int31n(int32(numVersions)) + 1) } // Only write values if this iteration is less than the random // number of versions chosen for this key. if t <= nvs[i] { batch.Put(proto.Key(keys[i]), util.RandBytes(rng, valueSize)) } if (i+1)%1000 == 0 { if err := db.Run(batch); err != nil { b.Fatal(err) } batch = &client.Batch{} } } if len(batch.Results) != 0 { if err := db.Run(batch); err != nil { b.Fatal(err) } } } if r, ok := s.Engines[0].(*engine.RocksDB); ok { r.CompactRange(nil, nil) } return s, db }
func makeCommandID() string { return string(util.RandBytes(testRand, commandIDLen)) }
// TestStoreRangeSplitStats starts by splitting the system keys from user-space // keys and verifying that the user space side of the split (which is empty), // has all zeros for stats. It then writes random data to the user space side, // splits it halfway and verifies the two splits have stats exactly equaling // the pre-split. func TestStoreRangeSplitStats(t *testing.T) { defer leaktest.AfterTest(t) store, stopper := createTestStore(t) defer stopper.Stop() // Split the range at the first user key. args, reply := adminSplitArgs(proto.KeyMin, proto.Key("\x01"), 1, store.StoreID()) if err := store.ExecuteCmd(context.Background(), proto.Call{Args: args, Reply: reply}); err != nil { t.Fatal(err) } // Verify empty range has empty stats. rng := store.LookupRange(proto.Key("\x01"), nil) // NOTE that this value is expected to change over time, depending on what // we store in the sys-local keyspace. Update it accordingly for this test. if err := verifyRangeStats(store.Engine(), rng.Desc().RaftID, engine.MVCCStats{}); err != nil { t.Fatal(err) } // Write random data. src := rand.New(rand.NewSource(0)) for i := 0; i < 100; i++ { key := util.RandBytes(src, int(src.Int31n(1<<7))) val := util.RandBytes(src, int(src.Int31n(1<<8))) pArgs, pReply := putArgs(key, val, rng.Desc().RaftID, store.StoreID()) pArgs.Timestamp = store.Clock().Now() if err := store.ExecuteCmd(context.Background(), proto.Call{Args: pArgs, Reply: pReply}); err != nil { t.Fatal(err) } } // Get the range stats now that we have data. var ms engine.MVCCStats if err := engine.MVCCGetRangeStats(store.Engine(), rng.Desc().RaftID, &ms); err != nil { t.Fatal(err) } // Split the range at approximate halfway point ("Z" in string "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"). args, reply = adminSplitArgs(proto.Key("\x01"), proto.Key("Z"), rng.Desc().RaftID, store.StoreID()) if err := store.ExecuteCmd(context.Background(), proto.Call{Args: args, Reply: reply}); err != nil { t.Fatal(err) } var msLeft, msRight engine.MVCCStats if err := engine.MVCCGetRangeStats(store.Engine(), rng.Desc().RaftID, &msLeft); err != nil { t.Fatal(err) } rngRight := store.LookupRange(proto.Key("Z"), nil) if err := engine.MVCCGetRangeStats(store.Engine(), rngRight.Desc().RaftID, &msRight); err != nil { t.Fatal(err) } // The stats should be exactly equal when added. expMS := engine.MVCCStats{ LiveBytes: msLeft.LiveBytes + msRight.LiveBytes, KeyBytes: msLeft.KeyBytes + msRight.KeyBytes, ValBytes: msLeft.ValBytes + msRight.ValBytes, IntentBytes: msLeft.IntentBytes + msRight.IntentBytes, LiveCount: msLeft.LiveCount + msRight.LiveCount, KeyCount: msLeft.KeyCount + msRight.KeyCount, ValCount: msLeft.ValCount + msRight.ValCount, IntentCount: msLeft.IntentCount + msRight.IntentCount, } ms.SysBytes, ms.SysCount = 0, 0 if !reflect.DeepEqual(expMS, ms) { t.Errorf("expected left and right ranges to equal original: %+v + %+v != %+v", msLeft, msRight, ms) } }