func BenchmarkReplicaSnapshot(b *testing.B) { defer tracing.Disable()() storeCfg := TestStoreConfig(nil) storeCfg.TestingKnobs.DisableSplitQueue = true stopper := stop.NewStopper() defer stopper.Stop() store := createTestStoreWithConfig(b, stopper, &storeCfg) // We want to manually control the size of the raft log. store.SetRaftLogQueueActive(false) rep, err := store.GetReplica(rangeID) if err != nil { b.Fatal(err) } snapSize := rep.GetMaxBytes() if err := fillTestRange(rep, snapSize); err != nil { b.Fatal(err) } b.SetBytes(snapSize) b.ResetTimer() for i := 0; i < b.N; i++ { if _, err := rep.GetSnapshot(context.Background(), "bench"); err != nil { b.Fatal(err) } rep.CloseOutSnap() } }
func newKVNative(b *testing.B) kvInterface { enableTracing := tracing.Disable() s, _, _ := serverutils.StartServer(b, base.TestServerArgs{}) // TestServer.DB() returns the TxnCoordSender wrapped client. But that isn't // a fair comparison with SQL as we want these client requests to be sent // over the network. sender, err := client.NewSender( rpc.NewContext(log.AmbientContext{}, &base.Config{ User: security.NodeUser, SSLCA: filepath.Join(security.EmbeddedCertsDir, security.EmbeddedCACert), SSLCert: filepath.Join(security.EmbeddedCertsDir, "node.crt"), SSLCertKey: filepath.Join(security.EmbeddedCertsDir, "node.key"), }, nil, s.Stopper()), s.ServingAddr()) if err != nil { b.Fatal(err) } return &kvNative{ db: client.NewDB(sender), doneFn: func() { s.Stopper().Stop() enableTracing() }, } }
func BenchmarkClusterRestore(b *testing.B) { defer tracing.Disable()() // TODO(dan): count=10000 has some issues replicating. Investigate. for _, numAccounts := range []int{10, 100, 1000} { b.Run(strconv.Itoa(numAccounts), func(b *testing.B) { ctx, dir, tc, kvDB, _, cleanupFn := backupRestoreTestSetup(b, numAccounts) defer cleanupFn() // TODO(dan): Once mjibson's sql -> kv function is committed, use it // here on the output of bankDataInsert to generate the backup data // instead of this call. desc, err := sql.Backup(ctx, *kvDB, dir, tc.Server(0).Clock().Now()) if err != nil { b.Fatal(err) } b.SetBytes(desc.DataSize) rebalanceLeases(b, tc) b.ResetTimer() table := parser.TableName{DatabaseName: "bench", TableName: "bank"} for i := 0; i < b.N; i++ { if _, err := sql.Restore(ctx, *kvDB, dir, table); err != nil { b.Fatal(err) } } }) } }
func benchmarkCockroach(b *testing.B, f func(b *testing.B, db *gosql.DB)) { defer tracing.Disable()() s, db, _ := serverutils.StartServer( b, base.TestServerArgs{UseDatabase: "bench"}) defer s.Stopper().Stop() if _, err := db.Exec(`CREATE DATABASE IF NOT EXISTS bench`); err != nil { b.Fatal(err) } f(b, db) }
func BenchmarkPgbenchExec_Cockroach(b *testing.B) { defer tracing.Disable()() s, _, _ := serverutils.StartServer(b, base.TestServerArgs{Insecure: true}) defer s.Stopper().Stop() pgUrl, cleanupFn := sqlutils.PGUrl( b, s.ServingAddr(), "benchmarkCockroach", url.User(security.RootUser)) pgUrl.RawQuery = "sslmode=disable" defer cleanupFn() execPgbench(b, pgUrl) }
func benchmarkMultinodeCockroach(b *testing.B, f func(b *testing.B, db *gosql.DB)) { defer tracing.Disable()() tc := testcluster.StartTestCluster(b, 3, base.TestClusterArgs{ ReplicationMode: base.ReplicationAuto, ServerArgs: base.TestServerArgs{ UseDatabase: "bench", }, }) if _, err := tc.Conns[0].Exec(`CREATE DATABASE bench`); err != nil { b.Fatal(err) } defer tc.Stopper().Stop() f(b, tc.Conns[0]) }
func newKVSQL(b *testing.B) kvInterface { enableTracing := tracing.Disable() s, db, _ := serverutils.StartServer( b, base.TestServerArgs{UseDatabase: "bench"}) if _, err := db.Exec(`CREATE DATABASE IF NOT EXISTS bench`); err != nil { b.Fatal(err) } kv := &kvSQL{} kv.db = db kv.doneFn = func() { s.Stopper().Stop() enableTracing() } return kv }
func BenchmarkClusterBackup(b *testing.B) { defer tracing.Disable()() for _, numAccounts := range []int{10, 100, 1000, 10000} { b.Run(strconv.Itoa(numAccounts), func(b *testing.B) { ctx, dir, tc, kvDB, _, cleanupFn := backupRestoreTestSetup(b, numAccounts) defer cleanupFn() rebalanceLeases(b, tc) b.ResetTimer() for i := 0; i < b.N; i++ { desc, err := sql.Backup(ctx, *kvDB, dir, tc.Server(0).Clock().Now()) if err != nil { b.Fatal(err) } b.SetBytes(desc.DataSize) } }) } }
func newKVNative(b *testing.B) kvInterface { enableTracing := tracing.Disable() s, _, _ := serverutils.StartServer(b, base.TestServerArgs{}) // TestServer.KVClient() returns the TxnCoordSender wrapped client. But that // isn't a fair comparison with SQL as we want these client requests to be // sent over the network. rpcContext := s.RPCContext() conn, err := rpcContext.GRPCDial(s.ServingAddr()) if err != nil { b.Fatal(err) } return &kvNative{ db: client.NewDB(client.NewSender(conn)), doneFn: func() { s.Stopper().Stop() enableTracing() }, } }
func BenchmarkStoreRangeMerge(b *testing.B) { defer tracing.Disable()() storeCfg := storage.TestStoreConfig(nil) storeCfg.TestingKnobs.DisableSplitQueue = true stopper := stop.NewStopper() defer stopper.Stop() store := createTestStoreWithConfig(b, stopper, storeCfg) // Perform initial split of ranges. sArgs := adminSplitArgs(roachpb.KeyMin, []byte("b")) if _, err := client.SendWrapped(context.Background(), rg1(store), sArgs); err != nil { b.Fatal(err) } // Write some values left and right of the proposed split key. aDesc := store.LookupReplica([]byte("a"), nil).Desc() bDesc := store.LookupReplica([]byte("c"), nil).Desc() writeRandomDataToRange(b, store, aDesc.RangeID, []byte("aaa")) writeRandomDataToRange(b, store, bDesc.RangeID, []byte("ccc")) // Create args to merge the b range back into the a range. mArgs := adminMergeArgs(roachpb.KeyMin) b.ResetTimer() for i := 0; i < b.N; i++ { // Merge the ranges. b.StartTimer() if _, err := client.SendWrapped(context.Background(), rg1(store), mArgs); err != nil { b.Fatal(err) } // Split the range. b.StopTimer() if _, err := client.SendWrapped(context.Background(), rg1(store), sArgs); err != nil { b.Fatal(err) } } }