Ejemplo n.º 1
0
// runMVCCScan first creates test data (and resets the benchmarking
// timer). It then performs b.N MVCCScans in increments of numRows
// keys over all of the data in the rocksdb instance, restarting at
// the beginning of the keyspace, as many times as necessary.
func runMVCCScan(numRows, numVersions, valueSize int, b *testing.B) {
	defer tracing.Disable()()
	// Use the same number of keys for all of the mvcc scan
	// benchmarks. Using a different number of keys per test gives
	// preferential treatment to tests with fewer keys. Note that the
	// datasets all fit in cache and the cache is pre-warmed.
	const numKeys = 100000

	rocksdb, stopper := setupMVCCData(numVersions, numKeys, valueSize, b)
	defer stopper.Stop()

	b.SetBytes(int64(numRows * valueSize))
	b.ResetTimer()

	keyBuf := append(make([]byte, 0, 64), []byte("key-")...)
	for i := 0; i < b.N; i++ {
		// Choose a random key to start scan.
		keyIdx := rand.Int31n(int32(numKeys - numRows))
		startKey := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(keyIdx)))
		walltime := int64(5 * (rand.Int31n(int32(numVersions)) + 1))
		ts := makeTS(walltime, 0)
		kvs, _, err := MVCCScan(rocksdb, startKey, keyMax, int64(numRows), ts, true, nil)
		if err != nil {
			b.Fatalf("failed scan: %s", err)
		}
		if len(kvs) != numRows {
			b.Fatalf("failed to scan: %d != %d", len(kvs), numRows)
		}
	}

	b.StopTimer()
}
Ejemplo n.º 2
0
// NB(davidt): until `SetupMultinodeTestCluster` actually returns a cluster
// with replication configured, this is only testing adding nodes to a cluster
// and then their ability to serve SQL by talking to a remote, single-node KV.
func TestMultinodeCockroach(t *testing.T) {
	defer leaktest.AfterTest(t)()
	defer tracing.Disable()()

	conns, cleanup := SetupMultinodeTestCluster(t, 3, "Testing")
	defer cleanup()

	if _, err := conns[0].Exec(`CREATE TABLE testing (k INT PRIMARY KEY, v INT)`); err != nil {
		t.Fatal(err)
	}

	if _, err := conns[0].Exec(`INSERT INTO testing VALUES (5, 1), (4, 2), (1, 2)`); err != nil {
		t.Fatal(err)
	}

	if r, err := conns[1].Query(`SELECT * FROM testing WHERE k = 5`); err != nil {
		t.Fatal(err)
	} else if !r.Next() {
		t.Fatal("no rows")
	}

	if r, err := conns[2].Exec(`DELETE FROM testing`); err != nil {
		t.Fatal(err)
	} else if rows, err := r.RowsAffected(); err != nil {
		t.Fatal(err)
	} else if expected, actual := int64(3), rows; expected != actual {
		t.Fatalf("wrong row count deleted: expected %d actual %d", expected, actual)
	}
}
Ejemplo n.º 3
0
// runMVCCGet first creates test data (and resets the benchmarking
// timer). It then performs b.N MVCCGets.
func runMVCCGet(numVersions, valueSize int, b *testing.B) {
	defer tracing.Disable()()
	const overhead = 48          // Per key/value overhead (empirically determined)
	const targetSize = 512 << 20 // 512 MB
	// Adjust the number of keys so that each test has approximately the same
	// amount of data.
	numKeys := targetSize / ((overhead + valueSize) * (1 + (numVersions-1)/2))

	rocksdb, stopper := setupMVCCData(numVersions, numKeys, valueSize, b)
	defer stopper.Stop()

	b.SetBytes(int64(valueSize))
	b.ResetTimer()

	keyBuf := append(make([]byte, 0, 64), []byte("key-")...)
	for i := 0; i < b.N; i++ {
		// Choose a random key to retrieve.
		keyIdx := rand.Int31n(int32(numKeys))
		key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(keyIdx)))
		walltime := int64(5 * (rand.Int31n(int32(numVersions)) + 1))
		ts := makeTS(walltime, 0)
		if v, _, err := MVCCGet(rocksdb, key, ts, true, nil); err != nil {
			b.Fatalf("failed get: %s", err)
		} else if v == nil {
			b.Fatalf("failed get (key not found): %d@%d", keyIdx, walltime)
		} else if valueBytes, err := v.GetBytes(); err != nil {
			b.Fatal(err)
		} else if len(valueBytes) != valueSize {
			b.Fatalf("unexpected value size: %d", len(valueBytes))
		}
	}

	b.StopTimer()
}
Ejemplo n.º 4
0
func runMVCCConditionalPut(valueSize int, createFirst bool, b *testing.B) {
	defer tracing.Disable()()
	rng, _ := randutil.NewPseudoRand()
	value := roachpb.MakeValueFromBytes(randutil.RandBytes(rng, valueSize))
	keyBuf := append(make([]byte, 0, 64), []byte("key-")...)

	stopper := stop.NewStopper()
	defer stopper.Stop()
	rocksdb := NewInMem(roachpb.Attributes{}, testCacheSize, stopper)

	b.SetBytes(int64(valueSize))
	var expected *roachpb.Value
	if createFirst {
		for i := 0; i < b.N; i++ {
			key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(i)))
			ts := makeTS(time.Now().UnixNano(), 0)
			if err := MVCCPut(rocksdb, nil, key, ts, value, nil); err != nil {
				b.Fatalf("failed put: %s", err)
			}
		}
		expected = &value
	}

	b.ResetTimer()

	for i := 0; i < b.N; i++ {
		key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(i)))
		ts := makeTS(time.Now().UnixNano(), 0)
		if err := MVCCConditionalPut(rocksdb, nil, key, ts, value, expected, nil); err != nil {
			b.Fatalf("failed put: %s", err)
		}
	}

	b.StopTimer()
}
Ejemplo n.º 5
0
func newKVNative(b *testing.B) kvInterface {
	enableTracing := tracing.Disable()
	s, _, _ := serverutils.StartServer(b, base.TestServerArgs{})

	// TestServer.DB() returns the TxnCoordSender wrapped client. But that isn't
	// a fair comparison with SQL as we want these client requests to be sent
	// over the network.
	sender, err := client.NewSender(
		rpc.NewContext(&base.Context{
			User:       security.NodeUser,
			SSLCA:      filepath.Join(security.EmbeddedCertsDir, security.EmbeddedCACert),
			SSLCert:    filepath.Join(security.EmbeddedCertsDir, "node.crt"),
			SSLCertKey: filepath.Join(security.EmbeddedCertsDir, "node.key"),
		}, nil, s.Stopper()),
		s.ServingAddr())
	if err != nil {
		b.Fatal(err)
	}

	return &kvNative{
		db: client.NewDB(sender),
		doneFn: func() {
			s.Stopper().Stop()
			enableTracing()
		},
	}
}
Ejemplo n.º 6
0
func BenchmarkReplicaSnapshot(b *testing.B) {
	defer tracing.Disable()()
	sCtx := TestStoreContext()
	sCtx.TestingKnobs.DisableSplitQueue = true
	store, _, stopper := createTestStoreWithContext(b, &sCtx)
	defer stopper.Stop()
	// We want to manually control the size of the raft log.
	store.SetRaftLogQueueActive(false)

	rep, err := store.GetReplica(rangeID)
	if err != nil {
		b.Fatal(err)
	}

	snapSize := rep.GetMaxBytes()
	fillTestRange(b, rep, snapSize)
	b.SetBytes(snapSize)

	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		if _, err := rep.GetSnapshot(context.Background()); err != nil {
			b.Fatal(err)
		}
	}
}
Ejemplo n.º 7
0
// runMVCCComputeStats benchmarks computing MVCC stats on a 64MB range of data.
func runMVCCComputeStats(valueBytes int, b *testing.B) {
	defer tracing.Disable()()
	const rangeBytes = 64 * 1024 * 1024
	const overhead = 48 // Per key/value overhead (empirically determined)
	numKeys := rangeBytes / (overhead + valueBytes)
	rocksdb, stopper := setupMVCCData(1, numKeys, valueBytes, b)
	defer stopper.Stop()

	b.SetBytes(rangeBytes)
	b.ResetTimer()

	var stats MVCCStats
	var err error
	for i := 0; i < b.N; i++ {
		iter := rocksdb.NewIterator(false)
		stats, err = iter.ComputeStats(mvccKey(roachpb.KeyMin), mvccKey(roachpb.KeyMax), 0)
		iter.Close()
		if err != nil {
			b.Fatal(err)
		}
	}

	b.StopTimer()
	log.Infof("live_bytes: %d", stats.LiveBytes)
}
Ejemplo n.º 8
0
func benchmarkMultinodeCockroach(b *testing.B, f func(b *testing.B, db *gosql.DB)) {
	defer tracing.Disable()()
	testCluster, conns, stopper := SetupMultinodeTestCluster(b, 3, "bench")
	if err := testCluster.WaitForFullReplication(); err != nil {
		b.Fatal(err)
	}
	defer stopper.Stop()
	f(b, conns[0])
}
Ejemplo n.º 9
0
func BenchmarkPgbenchExec_Cockroach(b *testing.B) {
	defer tracing.Disable()()
	s, _, _ := serverutils.StartServer(b, base.TestServerArgs{Insecure: true})
	defer s.Stopper().Stop()

	pgUrl, cleanupFn := sqlutils.PGUrl(b, s.ServingAddr(), security.RootUser, "benchmarkCockroach")
	pgUrl.RawQuery = "sslmode=disable"
	defer cleanupFn()

	execPgbench(b, pgUrl)
}
Ejemplo n.º 10
0
func BenchmarkPgbenchExec_Cockroach(b *testing.B) {
	defer tracing.Disable()()
	s := server.StartInsecureTestServer(b)
	defer s.Stop()

	pgUrl, cleanupFn := sqlutils.PGUrl(b, s, security.RootUser, "benchmarkCockroach")
	pgUrl.RawQuery = "sslmode=disable"
	defer cleanupFn()

	execPgbench(b, pgUrl)
}
Ejemplo n.º 11
0
func benchmarkCockroach(b *testing.B, f func(b *testing.B, db *gosql.DB)) {
	defer tracing.Disable()()
	s, db, _ := serverutils.StartServer(
		b, base.TestServerArgs{UseDatabase: "bench"})
	defer s.Stopper().Stop()

	if _, err := db.Exec(`CREATE DATABASE IF NOT EXISTS bench`); err != nil {
		b.Fatal(err)
	}

	f(b, db)
}
Ejemplo n.º 12
0
func newKVNative(b *testing.B) kvInterface {
	enableTracing := tracing.Disable()
	s := server.StartTestServer(b)

	return &kvNative{
		db: s.DB(),
		doneFn: func() {
			s.Stop()
			enableTracing()
		},
	}
}
Ejemplo n.º 13
0
func newKVSQL(b *testing.B) kvInterface {
	enableTracing := tracing.Disable()
	s, db, _ := serverutils.StartServer(
		b, base.TestServerArgs{UseDatabase: "bench"})

	if _, err := db.Exec(`CREATE DATABASE IF NOT EXISTS bench`); err != nil {
		b.Fatal(err)
	}

	kv := &kvSQL{}
	kv.db = db
	kv.doneFn = func() {
		s.Stopper().Stop()
		enableTracing()
	}
	return kv
}
Ejemplo n.º 14
0
func benchmarkMultinodeCockroach(b *testing.B, f func(b *testing.B, db *gosql.DB)) {
	defer tracing.Disable()()
	tc := testcluster.StartTestCluster(b, 3,
		testcluster.ClusterArgs{
			ReplicationMode: testcluster.ReplicationFull,
			ServerArgs: base.TestServerArgs{
				UseDatabase: "bench",
			},
		})
	if _, err := tc.Conns[0].Exec(`CREATE DATABASE bench`); err != nil {
		b.Fatal(err)
	}
	if err := tc.WaitForFullReplication(); err != nil {
		b.Fatal(err)
	}
	defer tc.Stopper().Stop()

	f(b, tc.Conns[0])
}
Ejemplo n.º 15
0
// benchmarkSingleRoundtripWithLatency runs a number of transactions writing to
// the same key back to back in a single round-trip. Latency is simulated
// by pausing before each RPC sent.
func benchmarkSingleRoundtripWithLatency(b *testing.B, latency time.Duration) {
	defer tracing.Disable()()
	s := &LocalTestCluster{}
	s.Latency = latency
	s.Start(b)
	defer s.Stop()
	defer b.StopTimer()
	key := roachpb.Key("key")
	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		if tErr := s.DB.Txn(func(txn *client.Txn) *roachpb.Error {
			b := txn.NewBatch()
			b.Put(key, fmt.Sprintf("value-%d", i))
			return txn.CommitInBatch(b)
		}); tErr != nil {
			b.Fatal(tErr)
		}
	}
}
Ejemplo n.º 16
0
func benchmarkCockroach(b *testing.B, f func(b *testing.B, db *sql.DB)) {
	defer tracing.Disable()()
	s := server.StartTestServer(b)
	defer s.Stop()

	pgUrl, cleanupFn := sqlutils.PGUrl(b, s, security.RootUser, "benchmarkCockroach")
	defer cleanupFn()

	db, err := sql.Open("postgres", pgUrl.String())
	if err != nil {
		b.Fatal(err)
	}
	defer db.Close()

	if _, err := db.Exec(`CREATE DATABASE IF NOT EXISTS bench`); err != nil {
		b.Fatal(err)
	}

	f(b, db)
}
Ejemplo n.º 17
0
func BenchmarkStoreRangeSplit(b *testing.B) {
	defer tracing.Disable()()
	sCtx := storage.TestStoreContext()
	sCtx.TestingKnobs.DisableSplitQueue = true
	store, stopper, _ := createTestStoreWithContext(b, sCtx)
	defer stopper.Stop()

	// Perform initial split of ranges.
	sArgs := adminSplitArgs(roachpb.KeyMin, []byte("b"))
	if _, err := client.SendWrapped(rg1(store), nil, &sArgs); err != nil {
		b.Fatal(err)
	}

	// Write some values left and right of the split key.
	aDesc := store.LookupReplica([]byte("a"), nil).Desc()
	bDesc := store.LookupReplica([]byte("c"), nil).Desc()
	writeRandomDataToRange(b, store, aDesc.RangeID, []byte("aaa"))
	writeRandomDataToRange(b, store, bDesc.RangeID, []byte("ccc"))

	// Merge the b range back into the a range.
	mArgs := adminMergeArgs(roachpb.KeyMin)
	if _, err := client.SendWrapped(rg1(store), nil, &mArgs); err != nil {
		b.Fatal(err)
	}

	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		// Split the range.
		b.StartTimer()
		if _, err := client.SendWrapped(rg1(store), nil, &sArgs); err != nil {
			b.Fatal(err)
		}

		// Merge the ranges.
		b.StopTimer()
		if _, err := client.SendWrapped(rg1(store), nil, &mArgs); err != nil {
			b.Fatal(err)
		}
	}
}
Ejemplo n.º 18
0
// runMVCCMerge merges value into numKeys separate keys.
func runMVCCMerge(value *roachpb.Value, numKeys int, b *testing.B) {
	defer tracing.Disable()()
	stopper := stop.NewStopper()
	defer stopper.Stop()
	rocksdb := NewInMem(roachpb.Attributes{}, testCacheSize, stopper)

	// Precompute keys so we don't waste time formatting them at each iteration.
	keys := make([]roachpb.Key, numKeys)
	for i := 0; i < numKeys; i++ {
		keys[i] = roachpb.Key(fmt.Sprintf("key-%d", i))
	}

	b.ResetTimer()

	ts := roachpb.Timestamp{}
	// Use parallelism if specified when test is run.
	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			ms := MVCCStats{}
			ts.Logical++
			err := MVCCMerge(rocksdb, &ms, keys[rand.Intn(numKeys)], ts, *value)
			if err != nil {
				b.Fatal(err)
			}
		}
	})

	// Read values out to force merge.
	for _, key := range keys {
		val, _, err := MVCCGet(rocksdb, key, roachpb.ZeroTimestamp, true, nil)
		if err != nil {
			b.Fatal(err)
		} else if val == nil {
			continue
		}
	}

	b.StopTimer()
}
Ejemplo n.º 19
0
func runMVCCBatchPut(valueSize, batchSize int, b *testing.B) {
	defer tracing.Disable()()
	rng, _ := randutil.NewPseudoRand()
	value := roachpb.MakeValueFromBytes(randutil.RandBytes(rng, valueSize))
	keyBuf := append(make([]byte, 0, 64), []byte("key-")...)

	stopper := stop.NewStopper()
	defer stopper.Stop()
	rocksdb := NewInMem(roachpb.Attributes{}, testCacheSize, stopper)

	b.SetBytes(int64(valueSize))
	b.ResetTimer()

	for i := 0; i < b.N; i += batchSize {
		end := i + batchSize
		if end > b.N {
			end = b.N
		}

		batch := rocksdb.NewBatch()

		for j := i; j < end; j++ {
			key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(j)))
			ts := makeTS(time.Now().UnixNano(), 0)
			if err := MVCCPut(batch, nil, key, ts, value, nil); err != nil {
				b.Fatalf("failed put: %s", err)
			}
		}

		if err := batch.Commit(); err != nil {
			b.Fatal(err)
		}

		batch.Close()
	}

	b.StopTimer()
}
Ejemplo n.º 20
0
func runMVCCDeleteRange(valueBytes int, b *testing.B) {
	defer tracing.Disable()()
	// 512 KB ranges so the benchmark doesn't take forever
	const rangeBytes = 512 * 1024
	const overhead = 48 // Per key/value overhead (empirically determined)
	numKeys := rangeBytes / (overhead + valueBytes)
	rocksdb, stopper := setupMVCCData(1, numKeys, valueBytes, b)
	stopper.Stop()

	b.SetBytes(rangeBytes)
	b.StopTimer()
	b.ResetTimer()

	for i := 0; i < b.N; i++ {
		locDirty := rocksdb.dir + "_dirty"
		if err := os.RemoveAll(locDirty); err != nil {
			b.Fatal(err)
		}
		if err := shutil.CopyTree(rocksdb.dir, locDirty, nil); err != nil {
			b.Fatal(err)
		}
		stopper := stop.NewStopper()
		dupRocksdb := NewRocksDB(roachpb.Attributes{}, locDirty, rocksdb.cacheSize,
			rocksdb.memtableBudget, stopper)
		if err := dupRocksdb.Open(); err != nil {
			b.Fatal(err)
		}

		b.StartTimer()
		_, err := MVCCDeleteRange(dupRocksdb, &MVCCStats{}, roachpb.KeyMin, roachpb.KeyMax, 0, roachpb.MaxTimestamp, nil)
		if err != nil {
			b.Fatal(err)
		}
		b.StopTimer()

		stopper.Stop()
	}
}
Ejemplo n.º 21
0
func newKVSQL(b *testing.B) kvInterface {
	enableTracing := tracing.Disable()
	s := server.StartTestServer(b)
	pgURL, cleanupURL := sqlutils.PGUrl(b, s, security.RootUser, "benchmarkCockroach")
	pgURL.Path = "bench"
	db, err := sql.Open("postgres", pgURL.String())
	if err != nil {
		b.Fatal(err)
	}
	if _, err := db.Exec(`CREATE DATABASE IF NOT EXISTS bench`); err != nil {
		b.Fatal(err)
	}

	kv := &kvSQL{}
	kv.db = db
	kv.doneFn = func() {
		db.Close()
		cleanupURL()
		s.Stop()
		enableTracing()
	}
	return kv
}
Ejemplo n.º 22
0
func BenchmarkReplicaSnapshot(b *testing.B) {
	defer tracing.Disable()()
	defer config.TestingDisableTableSplits()()
	store, stopper, _ := createTestStore(b)
	// We want to manually control the size of the raft log.
	store.DisableRaftLogQueue(true)
	defer stopper.Stop()

	const rangeID = 1
	const keySize = 1 << 7   // 128 B
	const valSize = 1 << 10  // 1 KiB
	const snapSize = 1 << 25 // 32 MiB

	rep, err := store.GetReplica(rangeID)
	if err != nil {
		b.Fatal(err)
	}

	src := rand.New(rand.NewSource(0))
	for i := 0; i < snapSize/(keySize+valSize); i++ {
		key := keys.MakeRowSentinelKey(randutil.RandBytes(src, keySize))
		val := randutil.RandBytes(src, valSize)
		pArgs := putArgs(key, val)
		if _, pErr := client.SendWrappedWith(rep, nil, roachpb.Header{
			RangeID: rangeID,
		}, &pArgs); pErr != nil {
			b.Fatal(pErr)
		}
	}

	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		if _, err := rep.GetSnapshot(); err != nil {
			b.Fatal(err)
		}
	}
}