// fillRange writes keys with the given prefix and associated values // until bytes bytes have been written or the given range has split. func fillRange(store *storage.Store, rangeID roachpb.RangeID, prefix roachpb.Key, bytes int64, t *testing.T) { src := rand.New(rand.NewSource(0)) for { var ms engine.MVCCStats if err := engine.MVCCGetRangeStats(store.Engine(), rangeID, &ms); err != nil { t.Fatal(err) } keyBytes, valBytes := ms.KeyBytes, ms.ValBytes if keyBytes+valBytes >= bytes { return } key := append(append([]byte(nil), prefix...), randutil.RandBytes(src, 100)...) key = keys.MakeNonColumnKey(key) val := randutil.RandBytes(src, int(src.Int31n(1<<8))) pArgs := putArgs(key, val) _, err := client.SendWrappedWith(store, nil, roachpb.Header{ RangeID: rangeID, }, &pArgs) // When the split occurs in the background, our writes may start failing. // We know we can stop writing when this happens. if _, ok := err.(*roachpb.RangeKeyMismatchError); ok { return } else if err != nil { t.Fatal(err) } } }
func fillTestRange(t testing.TB, rep *Replica, size int) { src := rand.New(rand.NewSource(0)) for i := 0; i < snapSize/(keySize+valSize); i++ { key := keys.MakeRowSentinelKey(randutil.RandBytes(src, keySize)) val := randutil.RandBytes(src, valSize) pArgs := putArgs(key, val) if _, pErr := client.SendWrappedWith(rep, nil, roachpb.Header{ RangeID: rangeID, }, &pArgs); pErr != nil { t.Fatal(pErr) } } }
func writeRandomDataToRange(t testing.TB, store *storage.Store, rangeID roachpb.RangeID, keyPrefix []byte) { src := rand.New(rand.NewSource(0)) for i := 0; i < 100; i++ { key := append([]byte(nil), keyPrefix...) key = append(key, randutil.RandBytes(src, int(src.Int31n(1<<7)))...) key = keys.MakeNonColumnKey(key) val := randutil.RandBytes(src, int(src.Int31n(1<<8))) pArgs := putArgs(key, val) if _, pErr := client.SendWrappedWith(rg1(store), nil, roachpb.Header{ RangeID: rangeID, }, &pArgs); pErr != nil { t.Fatal(pErr) } } }
func runMVCCConditionalPut(emk engineMaker, valueSize int, createFirst bool, b *testing.B) { rng, _ := randutil.NewPseudoRand() value := roachpb.MakeValueFromBytes(randutil.RandBytes(rng, valueSize)) keyBuf := append(make([]byte, 0, 64), []byte("key-")...) eng, stopper := emk(b, fmt.Sprintf("cput_%d", valueSize)) defer stopper.Stop() b.SetBytes(int64(valueSize)) var expected *roachpb.Value if createFirst { for i := 0; i < b.N; i++ { key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(i))) ts := makeTS(timeutil.Now().UnixNano(), 0) if err := MVCCPut(context.Background(), eng, nil, key, ts, value, nil); err != nil { b.Fatalf("failed put: %s", err) } } expected = &value } b.ResetTimer() for i := 0; i < b.N; i++ { key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(i))) ts := makeTS(timeutil.Now().UnixNano(), 0) if err := MVCCConditionalPut(context.Background(), eng, nil, key, ts, value, expected, nil); err != nil { b.Fatalf("failed put: %s", err) } } b.StopTimer() }
func runMVCCConditionalPut(valueSize int, createFirst bool, b *testing.B) { rng, _ := randutil.NewPseudoRand() value := roachpb.MakeValueFromBytes(randutil.RandBytes(rng, valueSize)) keyBuf := append(make([]byte, 0, 64), []byte("key-")...) stopper := stop.NewStopper() defer stopper.Stop() rocksdb := NewInMem(roachpb.Attributes{}, testCacheSize, stopper) b.SetBytes(int64(valueSize)) var expected *roachpb.Value if createFirst { for i := 0; i < b.N; i++ { key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(i))) ts := makeTS(timeutil.Now().UnixNano(), 0) if err := MVCCPut(rocksdb, nil, key, ts, value, nil); err != nil { b.Fatalf("failed put: %s", err) } } expected = &value } b.ResetTimer() for i := 0; i < b.N; i++ { key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(i))) ts := makeTS(timeutil.Now().UnixNano(), 0) if err := MVCCConditionalPut(rocksdb, nil, key, ts, value, expected, nil); err != nil { b.Fatalf("failed put: %s", err) } } b.StopTimer() }
func BenchmarkMVCCPutDelete_RocksDB(b *testing.B) { const cacheSize = 1 << 30 // 1 GB rocksdb, stopper := setupMVCCInMemRocksDB(b, "put_delete") defer stopper.Stop() r := rand.New(rand.NewSource(int64(timeutil.Now().UnixNano()))) value := roachpb.MakeValueFromBytes(randutil.RandBytes(r, 10)) zeroTS := roachpb.ZeroTimestamp var blockNum int64 for i := 0; i < b.N; i++ { blockID := r.Int63() blockNum++ key := encoding.EncodeVarintAscending(nil, blockID) key = encoding.EncodeVarintAscending(key, blockNum) if err := MVCCPut(context.Background(), rocksdb, nil, key, zeroTS, value, nil /* txn */); err != nil { b.Fatal(err) } if err := MVCCDelete(context.Background(), rocksdb, nil, key, zeroTS, nil /* txn */); err != nil { b.Fatal(err) } } }
func TestApproximateSize(t *testing.T) { defer leaktest.AfterTest(t) runWithAllEngines(func(engine Engine, t *testing.T) { var ( count = 10000 keys = make([]proto.EncodedKey, count) values = make([][]byte, count) // Random values to prevent compression rand, _ = randutil.NewPseudoRand() valueLen = 10 ) for i := 0; i < count; i++ { keys[i] = []byte(fmt.Sprintf("key%8d", i)) values[i] = randutil.RandBytes(rand, valueLen) } insertKeysAndValues(keys, values, engine, t) if err := engine.Flush(); err != nil { t.Fatalf("Error flushing InMem: %s", err) } sizePerRecord := (len([]byte(keys[0])) + valueLen) verifyApproximateSize(keys, engine, sizePerRecord, 0.15, t) verifyApproximateSize(keys[:count/2], engine, sizePerRecord, 0.15, t) verifyApproximateSize(keys[:count/4], engine, sizePerRecord, 0.15, t) }, t) }
func BenchmarkMVCCPutDelete(b *testing.B) { const cacheSize = 1 << 30 // 1 GB stopper := stop.NewStopper() rocksdb := NewInMem(roachpb.Attributes{}, cacheSize, stopper) defer stopper.Stop() r := rand.New(rand.NewSource(int64(timeutil.Now().UnixNano()))) value := roachpb.MakeValueFromBytes(randutil.RandBytes(r, 10)) zeroTS := roachpb.ZeroTimestamp var blockNum int64 for i := 0; i < b.N; i++ { blockID := r.Int63() blockNum++ key := encoding.EncodeVarintAscending(nil, blockID) key = encoding.EncodeVarintAscending(key, blockNum) if err := MVCCPut(rocksdb, nil, key, zeroTS, value, nil /* txn */); err != nil { b.Fatal(err) } if err := MVCCDelete(rocksdb, nil, key, zeroTS, nil /* txn */); err != nil { b.Fatal(err) } } }
func TestRandBytes(t *testing.T) { rand, _ := randutil.NewPseudoRand() for i := 0; i < 100; i++ { x := randutil.RandBytes(rand, i) if len(x) != i { t.Errorf("got array with unexpected length: %d (expected %d)", len(x), i) } } }
// fillRange writes keys with the given prefix and associated values // until bytes bytes have been written. func fillRange(store *storage.Store, rangeID roachpb.RangeID, prefix roachpb.Key, bytes int64, t *testing.T) { src := rand.New(rand.NewSource(0)) for { var ms engine.MVCCStats if err := engine.MVCCGetRangeStats(store.Engine(), rangeID, &ms); err != nil { t.Fatal(err) } keyBytes, valBytes := ms.KeyBytes, ms.ValBytes if keyBytes+valBytes >= bytes { return } key := append(append([]byte(nil), prefix...), randutil.RandBytes(src, 100)...) val := randutil.RandBytes(src, int(src.Int31n(1<<8))) pArgs := putArgs(key, val, rangeID, store.StoreID()) if _, err := client.SendWrapped(store, nil, &pArgs); err != nil { t.Fatal(err) } } }
// startTestWriter creates a writer which initiates a sequence of // transactions, each which writes up to 10 times to random keys with // random values. If not nil, txnChannel is written to non-blockingly // every time a new transaction starts. func startTestWriter(db *client.DB, i int64, valBytes int32, wg *sync.WaitGroup, retries *int32, txnChannel chan struct{}, done <-chan struct{}, t *testing.T) { src := rand.New(rand.NewSource(i)) defer func() { if wg != nil { wg.Done() } }() for j := 0; ; j++ { select { case <-done: return default: first := true err := db.Txn(func(txn *client.Txn) error { if first && txnChannel != nil { select { case txnChannel <- struct{}{}: default: } } else if !first && retries != nil { atomic.AddInt32(retries, 1) } first = false for j := 0; j <= int(src.Int31n(10)); j++ { key := randutil.RandBytes(src, 10) val := randutil.RandBytes(src, int(src.Int31n(valBytes))) if err := txn.Put(key, val); err != nil { log.Infof("experienced an error in routine %d: %s", i, err) return err } } return nil }) if err != nil { t.Error(err) } else { time.Sleep(1 * time.Millisecond) } } } }
func fillTestRange(t testing.TB, rep *Replica, size int64) { src := rand.New(rand.NewSource(0)) for i := int64(0); i < size/int64(keySize+valSize); i++ { key := keys.MakeRowSentinelKey(randutil.RandBytes(src, keySize)) val := randutil.RandBytes(src, valSize) pArgs := putArgs(key, val) if _, pErr := client.SendWrappedWith(rep, nil, roachpb.Header{ RangeID: rangeID, }, &pArgs); pErr != nil { t.Fatal(pErr) } } rep.mu.Lock() after := rep.mu.state.Stats.Total() rep.mu.Unlock() if after < size { t.Fatalf("range not full after filling: wrote %d, but range at %d", size, after) } }
// fillRange writes keys with the given prefix and associated values // until bytes bytes have been written. func fillRange(store *storage.Store, rangeID proto.RangeID, prefix proto.Key, bytes int64, t *testing.T) { src := rand.New(rand.NewSource(0)) for { var ms engine.MVCCStats if err := engine.MVCCGetRangeStats(store.Engine(), rangeID, &ms); err != nil { t.Fatal(err) } keyBytes, valBytes := ms.KeyBytes, ms.ValBytes if keyBytes+valBytes >= bytes { return } key := append(append([]byte(nil), prefix...), randutil.RandBytes(src, 100)...) val := randutil.RandBytes(src, int(src.Int31n(1<<8))) pArgs := putArgs(key, val, rangeID, store.StoreID()) pArgs.Timestamp = store.Clock().Now() if _, err := store.ExecuteCmd(context.Background(), &pArgs); err != nil { t.Fatal(err) } } }
// setupMVCCData writes up to numVersions values at each of numKeys // keys. The number of versions written for each key is chosen // randomly according to a uniform distribution. Each successive // version is written starting at 5ns and then in 5ns increments. This // allows scans at various times, starting at t=5ns, and continuing to // t=5ns*(numVersions+1). A version for each key will be read on every // such scan, but the dynamics of the scan will change depending on // the historical timestamp. Earlier timestamps mean scans which must // skip more historical versions; later timestamps mean scans which // skip fewer. // // The creation of the rocksdb database is time consuming, especially // for larger numbers of versions. The database is persisted between // runs and stored in the current directory as // "mvcc_scan_<versions>_<keys>_<valueBytes>". func setupMVCCScanData(numVersions, numKeys, valueBytes int, b *testing.B) (*RocksDB, *stop.Stopper) { loc := fmt.Sprintf("mvcc_scan_%d_%d_%d", numVersions, numKeys, valueBytes) exists := true if _, err := os.Stat(loc); os.IsNotExist(err) { exists = false } const cacheSize = 8 << 30 // 8 GB stopper := stop.NewStopper() rocksdb := NewRocksDB(roachpb.Attributes{}, loc, cacheSize, stopper) if err := rocksdb.Open(); err != nil { b.Fatalf("could not create new rocksdb db instance at %s: %v", loc, err) } if exists { return rocksdb, stopper } log.Infof("creating mvcc data: %s", loc) rng, _ := randutil.NewPseudoRand() keys := make([]roachpb.Key, numKeys) nvs := make([]int, numKeys) for t := 1; t <= numVersions; t++ { walltime := int64(5 * t) ts := makeTS(walltime, 0) batch := rocksdb.NewBatch() for i := 0; i < numKeys; i++ { if t == 1 { keys[i] = roachpb.Key(encoding.EncodeUvarint([]byte("key-"), uint64(i))) nvs[i] = rand.Intn(numVersions) + 1 } // Only write values if this iteration is less than the random // number of versions chosen for this key. if t <= nvs[i] { value := roachpb.MakeValueFromBytes(randutil.RandBytes(rng, valueBytes)) value.InitChecksum(keys[i]) if err := MVCCPut(batch, nil, keys[i], ts, value, nil); err != nil { b.Fatal(err) } } } if err := batch.Commit(); err != nil { b.Fatal(err) } batch.Close() } rocksdb.CompactRange(nil, nil) return rocksdb, stopper }
// TestPut starts up an N node cluster and runs N workers that write // to independent keys. func TestPut(t *testing.T) { l := localcluster.Create(*numNodes, stopper) l.Start() defer l.Stop() db, dbStopper := makeDBClient(t, l, 0) defer dbStopper.Stop() if err := configutil.SetDefaultRangeMaxBytes(db, *rangeMaxBytes); err != nil { t.Fatal(err) } checkRangeReplication(t, l, 20*time.Second) errs := make(chan error, *numNodes) start := time.Now() deadline := start.Add(*duration) var count int64 for i := 0; i < *numNodes; i++ { go func() { r, _ := randutil.NewPseudoRand() value := randutil.RandBytes(r, 8192) for time.Now().Before(deadline) { k := atomic.AddInt64(&count, 1) v := value[:r.Intn(len(value))] if err := db.Put(fmt.Sprintf("%08d", k), v); err != nil { errs <- err return } } errs <- nil }() } for i := 0; i < *numNodes; { select { case <-stopper: t.Fatalf("interrupted") case err := <-errs: if err != nil { t.Fatal(err) } i++ case <-time.After(1 * time.Second): // Periodically print out progress so that we know the test is still // running. log.Infof("%d", atomic.LoadInt64(&count)) } } elapsed := time.Since(start) log.Infof("%d %.1f/sec", count, float64(count)/elapsed.Seconds()) }
func BenchmarkPeekLengthBytesDescending(b *testing.B) { rng, _ := randutil.NewPseudoRand() vals := make([][]byte, 10000) for i := range vals { vals[i] = EncodeBytesDescending(nil, randutil.RandBytes(rng, 100)) } b.ResetTimer() for i := 0; i < b.N; i++ { _, _ = PeekLength(vals[i%len(vals)]) } }
// TestPut starts up an N node cluster and runs N workers that write // to independent keys. func TestPut(t *testing.T) { c := StartCluster(t) defer c.AssertAndStop(t) db, dbStopper := makeClient(t, c.ConnString(0)) defer dbStopper.Stop() errs := make(chan error, c.NumNodes()) start := time.Now() deadline := start.Add(*flagDuration) var count int64 for i := 0; i < c.NumNodes(); i++ { go func() { r, _ := randutil.NewPseudoRand() value := randutil.RandBytes(r, 8192) for time.Now().Before(deadline) { k := atomic.AddInt64(&count, 1) v := value[:r.Intn(len(value))] if pErr := db.Put(fmt.Sprintf("%08d", k), v); pErr != nil { errs <- pErr.GoError() return } } errs <- nil }() } for i := 0; i < c.NumNodes(); { baseCount := atomic.LoadInt64(&count) select { case <-stopper: t.Fatalf("interrupted") case err := <-errs: if err != nil { t.Fatal(err) } i++ case <-time.After(1 * time.Second): // Periodically print out progress so that we know the test is still // running. count := atomic.LoadInt64(&count) log.Infof("%d (%d/s)", count, count-baseCount) c.Assert(t) } } elapsed := time.Since(start) log.Infof("%d %.1f/sec", count, float64(count)/elapsed.Seconds()) }
func BenchmarkReplicaSnapshot(b *testing.B) { defer tracing.Disable()() defer config.TestingDisableTableSplits()() store, stopper, _ := createTestStore(b) // We want to manually control the size of the raft log. store.DisableRaftLogQueue(true) defer stopper.Stop() const rangeID = 1 const keySize = 1 << 7 // 128 B const valSize = 1 << 10 // 1 KiB const snapSize = 1 << 25 // 32 MiB rep, err := store.GetReplica(rangeID) if err != nil { b.Fatal(err) } src := rand.New(rand.NewSource(0)) for i := 0; i < snapSize/(keySize+valSize); i++ { key := keys.MakeRowSentinelKey(randutil.RandBytes(src, keySize)) val := randutil.RandBytes(src, valSize) pArgs := putArgs(key, val) if _, pErr := client.SendWrappedWith(rep, nil, roachpb.Header{ RangeID: rangeID, }, &pArgs); pErr != nil { b.Fatal(pErr) } } b.ResetTimer() for i := 0; i < b.N; i++ { if _, err := rep.GetSnapshot(); err != nil { b.Fatal(err) } } }
func BenchmarkDecodeStringDescending(b *testing.B) { rng, _ := randutil.NewPseudoRand() vals := make([][]byte, 10000) for i := range vals { vals[i] = EncodeStringDescending(nil, string(randutil.RandBytes(rng, 100))) } buf := make([]byte, 0, 1000) b.ResetTimer() for i := 0; i < b.N; i++ { _, _, _ = DecodeStringDescending(vals[i%len(vals)], buf) } }
func BenchmarkEncodeString(b *testing.B) { rng, _ := randutil.NewPseudoRand() vals := make([]string, 10000) for i := range vals { vals[i] = string(randutil.RandBytes(rng, 100)) } buf := make([]byte, 0, 1000) b.ResetTimer() for i := 0; i < b.N; i++ { _ = EncodeStringAscending(buf, vals[i%len(vals)]) } }
func BenchmarkDecodeKeyBytes(b *testing.B) { rng, _ := randutil.NewPseudoRand() vals := make([][]byte, 10000) for i := range vals { vals[i] = EncodeBytes(nil, randutil.RandBytes(rng, 100)) } result := []byte(nil) b.ResetTimer() for i := 0; i < b.N; i++ { _ = DecodeKey(vals[i%len(vals)], "%s", &result) } }
func writeRandomDataToRange( t testing.TB, store *storage.Store, rangeID roachpb.RangeID, keyPrefix []byte, ) (midpoint []byte) { src := rand.New(rand.NewSource(0)) for i := 0; i < 100; i++ { key := append([]byte(nil), keyPrefix...) key = append(key, randutil.RandBytes(src, int(src.Int31n(1<<7)))...) key = keys.MakeRowSentinelKey(key) val := randutil.RandBytes(src, int(src.Int31n(1<<8))) pArgs := putArgs(key, val) if _, pErr := client.SendWrappedWith(rg1(store), nil, roachpb.Header{ RangeID: rangeID, }, &pArgs); pErr != nil { t.Fatal(pErr) } } // Return approximate midway point ("Z" in string "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"). midKey := append([]byte(nil), keyPrefix...) midKey = append(midKey, []byte("Z")...) return keys.MakeRowSentinelKey(midKey) }
func BenchmarkEncodeKeyBytes(b *testing.B) { rng, _ := randutil.NewPseudoRand() vals := make([]interface{}, 10000) for i := range vals { vals[i] = randutil.RandBytes(rng, 100) } buf := make([]byte, 0, 1000) b.ResetTimer() for i := 0; i < b.N; i++ { _ = EncodeKey(buf, "%s", vals[i%len(vals)]) } }
func testPutInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) { db, dbStopper := c.NewClient(t, 0) defer dbStopper.Stop() errs := make(chan error, c.NumNodes()) start := timeutil.Now() deadline := start.Add(cfg.Duration) var count int64 for i := 0; i < c.NumNodes(); i++ { go func() { r, _ := randutil.NewPseudoRand() value := randutil.RandBytes(r, 8192) for timeutil.Now().Before(deadline) { k := atomic.AddInt64(&count, 1) v := value[:r.Intn(len(value))] if err := db.Put(fmt.Sprintf("%08d", k), v); err != nil { errs <- err return } } errs <- nil }() } for i := 0; i < c.NumNodes(); { baseCount := atomic.LoadInt64(&count) select { case <-stopper: t.Fatalf("interrupted") case err := <-errs: if err != nil { t.Fatal(err) } i++ case <-time.After(1 * time.Second): // Periodically print out progress so that we know the test is still // running. loadedCount := atomic.LoadInt64(&count) log.Infof(context.Background(), "%d (%d/s)", loadedCount, loadedCount-baseCount) c.Assert(t) cluster.Consistent(t, c) } } elapsed := timeutil.Since(start) log.Infof(context.Background(), "%d %.1f/sec", count, float64(count)/elapsed.Seconds()) }
func runMVCCPut(valueSize int, b *testing.B) { rng, _ := randutil.NewPseudoRand() value := proto.Value{Bytes: randutil.RandBytes(rng, valueSize)} keyBuf := append(make([]byte, 0, 64), []byte("key-")...) rocksdb := NewInMem(proto.Attributes{Attrs: []string{"ssd"}}, testCacheSize) defer rocksdb.Close() b.SetBytes(int64(valueSize)) b.ResetTimer() for i := 0; i < b.N; i++ { key := proto.Key(encoding.EncodeUvarint(keyBuf[0:4], uint64(i))) ts := makeTS(time.Now().UnixNano(), 0) if err := MVCCPut(rocksdb, nil, key, ts, value, nil); err != nil { b.Fatalf("failed put: %s", err) } } b.StopTimer() }
func runMVCCBatchPut(valueSize, batchSize int, b *testing.B) { defer tracing.Disable()() rng, _ := randutil.NewPseudoRand() value := roachpb.MakeValueFromBytes(randutil.RandBytes(rng, valueSize)) keyBuf := append(make([]byte, 0, 64), []byte("key-")...) stopper := stop.NewStopper() defer stopper.Stop() rocksdb := NewInMem(roachpb.Attributes{}, testCacheSize, stopper) b.SetBytes(int64(valueSize)) b.ResetTimer() for i := 0; i < b.N; i += batchSize { end := i + batchSize if end > b.N { end = b.N } batch := rocksdb.NewBatch() for j := i; j < end; j++ { key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(j))) ts := makeTS(time.Now().UnixNano(), 0) if err := MVCCPut(batch, nil, key, ts, value, nil); err != nil { b.Fatalf("failed put: %s", err) } } if err := batch.Commit(); err != nil { b.Fatal(err) } batch.Close() } b.StopTimer() }
func runMVCCBatchPut(emk engineMaker, valueSize, batchSize int, b *testing.B) { rng, _ := randutil.NewPseudoRand() value := roachpb.MakeValueFromBytes(randutil.RandBytes(rng, valueSize)) keyBuf := append(make([]byte, 0, 64), []byte("key-")...) stopper := stop.NewStopper() eng, stopper := emk(b, fmt.Sprintf("batch_put_%d_%d", valueSize, batchSize)) defer stopper.Stop() b.SetBytes(int64(valueSize)) b.ResetTimer() for i := 0; i < b.N; i += batchSize { end := i + batchSize if end > b.N { end = b.N } batch := eng.NewBatch() for j := i; j < end; j++ { key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(j))) ts := makeTS(timeutil.Now().UnixNano(), 0) if err := MVCCPut(context.Background(), batch, nil, key, ts, value, nil); err != nil { b.Fatalf("failed put: %s", err) } } if err := batch.Commit(); err != nil { b.Fatal(err) } batch.Close() } b.StopTimer() }
// TestStoreRangeSplitStats starts by splitting the system keys from user-space // keys and verifying that the user space side of the split (which is empty), // has all zeros for stats. It then writes random data to the user space side, // splits it halfway and verifies the two splits have stats exactly equaling // the pre-split. func TestStoreRangeSplitStats(t *testing.T) { defer leaktest.AfterTest(t) store, stopper := createTestStore(t) defer stopper.Stop() // Split the range after the last table data key. keyPrefix := keys.MakeTablePrefix(keys.MaxReservedDescID + 1) keyPrefix = keys.MakeNonColumnKey(keyPrefix) args := adminSplitArgs(roachpb.KeyMin, keyPrefix) if _, err := client.SendWrapped(rg1(store), nil, &args); err != nil { t.Fatal(err) } // Verify empty range has empty stats. rng := store.LookupReplica(keyPrefix, nil) // NOTE that this value is expected to change over time, depending on what // we store in the sys-local keyspace. Update it accordingly for this test. if err := verifyRangeStats(store.Engine(), rng.Desc().RangeID, engine.MVCCStats{}); err != nil { t.Fatal(err) } // Write random data. src := rand.New(rand.NewSource(0)) for i := 0; i < 100; i++ { key := append([]byte(nil), keyPrefix...) key = append(key, randutil.RandBytes(src, int(src.Int31n(1<<7)))...) key = keys.MakeNonColumnKey(key) val := randutil.RandBytes(src, int(src.Int31n(1<<8))) pArgs := putArgs(key, val) if _, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{ RangeID: rng.Desc().RangeID, }, &pArgs); err != nil { t.Fatal(err) } } // Get the range stats now that we have data. var ms engine.MVCCStats if err := engine.MVCCGetRangeStats(store.Engine(), rng.Desc().RangeID, &ms); err != nil { t.Fatal(err) } // Split the range at approximate halfway point ("Z" in string "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"). midKey := append([]byte(nil), keyPrefix...) midKey = append(midKey, []byte("Z")...) midKey = keys.MakeNonColumnKey(midKey) args = adminSplitArgs(keyPrefix, midKey) if _, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{ RangeID: rng.Desc().RangeID, }, &args); err != nil { t.Fatal(err) } var msLeft, msRight engine.MVCCStats if err := engine.MVCCGetRangeStats(store.Engine(), rng.Desc().RangeID, &msLeft); err != nil { t.Fatal(err) } rngRight := store.LookupReplica(midKey, nil) if err := engine.MVCCGetRangeStats(store.Engine(), rngRight.Desc().RangeID, &msRight); err != nil { t.Fatal(err) } // The stats should be exactly equal when added. expMS := engine.MVCCStats{ LiveBytes: msLeft.LiveBytes + msRight.LiveBytes, KeyBytes: msLeft.KeyBytes + msRight.KeyBytes, ValBytes: msLeft.ValBytes + msRight.ValBytes, IntentBytes: msLeft.IntentBytes + msRight.IntentBytes, LiveCount: msLeft.LiveCount + msRight.LiveCount, KeyCount: msLeft.KeyCount + msRight.KeyCount, ValCount: msLeft.ValCount + msRight.ValCount, IntentCount: msLeft.IntentCount + msRight.IntentCount, } ms.SysBytes, ms.SysCount = 0, 0 if !reflect.DeepEqual(expMS, ms) { t.Errorf("expected left and right ranges to equal original: %+v + %+v != %+v", msLeft, msRight, ms) } }
func makeCommandID() string { return string(randutil.RandBytes(testRand, commandIDLen)) }
// TestChaos starts up a cluster and, for each node, a worker writing to // independent keys, while nodes are being killed and restarted continuously. // The test measures not write performance, but cluster recovery. func TestChaos(t *testing.T) { t.Skip("TODO(tschottdorf): currently unstable") l := localcluster.Create(*numNodes, stopper) l.Start() defer l.AssertAndStop(t) checkRangeReplication(t, l, 20*time.Second) errs := make(chan error, *numNodes) start := time.Now() deadline := start.Add(*duration) var count int64 counts := make([]int64, *numNodes) clients := make([]struct { sync.RWMutex db *client.DB stopper *stop.Stopper }, *numNodes) initClient := func(i int) { db, dbStopper := makeDBClient(t, l, i) if clients[i].stopper != nil { clients[i].stopper.Stop() } clients[i].db, clients[i].stopper = db, dbStopper } for i := 0; i < *numNodes; i++ { initClient(i) go func(i int) { r, _ := randutil.NewPseudoRand() value := randutil.RandBytes(r, 8192) for time.Now().Before(deadline) { clients[i].RLock() k := atomic.AddInt64(&count, 1) atomic.AddInt64(&counts[i], 1) v := value[:r.Intn(len(value))] if err := clients[i].db.Put(fmt.Sprintf("%08d", k), v); err != nil { // These originate from DistSender when, for example, the // leader is down. With more realistic retry options, we // should probably not see them. if _, ok := err.(*roachpb.SendError); ok { log.Warning(err) } else { errs <- err clients[i].RUnlock() return } } clients[i].RUnlock() } errs <- nil }(i) } teardown := make(chan struct{}) defer func() { <-teardown for i := range clients { clients[i].stopper.Stop() clients[i].stopper = nil } }() // Chaos monkey. go func() { defer close(teardown) rnd, seed := randutil.NewPseudoRand() log.Warningf("monkey starts (seed %d)", seed) for round := 1; time.Now().Before(deadline); round++ { select { case <-stopper: return default: } nodes := rnd.Perm(*numNodes)[:rnd.Intn(*numNodes)+1] log.Infof("round %d: restarting nodes %v", round, nodes) for _, i := range nodes { clients[i].Lock() } for _, i := range nodes { log.Infof("restarting %v", i) l.Nodes[i].Kill() l.Nodes[i].Restart(5) initClient(i) clients[i].Unlock() } for cur := atomic.LoadInt64(&count); time.Now().Before(deadline) && atomic.LoadInt64(&count) == cur; time.Sleep(time.Second) { l.Assert(t) log.Warningf("monkey sleeping while cluster recovers...") } } }() for i := 0; i < *numNodes; { select { case <-teardown: case <-stopper: t.Fatal("interrupted") case err := <-errs: if err != nil { t.Error(err) } i++ case <-time.After(1 * time.Second): // Periodically print out progress so that we know the test is still // running. cur := make([]string, *numNodes) for i := range cur { cur[i] = fmt.Sprintf("%d", atomic.LoadInt64(&counts[i])) } log.Infof("%d (%s)", atomic.LoadInt64(&count), strings.Join(cur, ", ")) } } elapsed := time.Since(start) log.Infof("%d %.1f/sec", count, float64(count)/elapsed.Seconds()) }