func newReplica(lagUpdateInterval, degrationInterval, degrationDuration time.Duration) *replica { t := &testing.T{} ts := zktestserver.New(t, []string{"cell1"}) wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) db := fakesqldb.Register() fakeTablet := testlib.NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_REPLICA, db, testlib.TabletKeyspaceShard(t, "ks", "-80")) fakeTablet.StartActionLoop(t, wr) target := querypb.Target{ Keyspace: "ks", Shard: "-80", TabletType: topodatapb.TabletType_REPLICA, } qs := fakes.NewStreamHealthQueryService(target) grpcqueryservice.Register(fakeTablet.RPCServer, qs) throttler, err := throttler.NewThrottler("replica", "TPS", 1, *rate, throttler.ReplicationLagModuleDisabled) if err != nil { log.Fatal(err) } var nextDegration time.Time if degrationInterval != time.Duration(0) { nextDegration = time.Now().Add(degrationInterval) } r := &replica{ fakeTablet: fakeTablet, qs: qs, throttler: throttler, replicationStream: make(chan time.Time, 1*1024*1024), lagUpdateInterval: lagUpdateInterval, degrationInterval: degrationInterval, degrationDuration: degrationDuration, nextDegration: nextDegration, stopChan: make(chan struct{}), } r.wg.Add(1) go r.processReplicationStream() return r }
// TestVerticalSplitClone will run VerticalSplitClone in the combined // online and offline mode. The online phase will copy 100 rows from the source // to the destination and the offline phase won't copy any rows as the source // has not changed in the meantime. func TestVerticalSplitClone(t *testing.T) { db := fakesqldb.Register() ts := zktestserver.New(t, []string{"cell1", "cell2"}) ctx := context.Background() wi := NewInstance(ctx, ts, "cell1", time.Second) sourceMaster := testlib.NewFakeTablet(t, wi.wr, "cell1", 0, topodatapb.TabletType_MASTER, db, testlib.TabletKeyspaceShard(t, "source_ks", "0")) sourceRdonly := testlib.NewFakeTablet(t, wi.wr, "cell1", 1, topodatapb.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "source_ks", "0")) // Create the destination keyspace with the appropriate ServedFromMap ki := &topodatapb.Keyspace{ ServedFroms: []*topodatapb.Keyspace_ServedFrom{ { TabletType: topodatapb.TabletType_MASTER, Keyspace: "source_ks", }, { TabletType: topodatapb.TabletType_REPLICA, Keyspace: "source_ks", }, { TabletType: topodatapb.TabletType_RDONLY, Keyspace: "source_ks", }, }, } wi.wr.TopoServer().CreateKeyspace(ctx, "destination_ks", ki) destMaster := testlib.NewFakeTablet(t, wi.wr, "cell1", 10, topodatapb.TabletType_MASTER, db, testlib.TabletKeyspaceShard(t, "destination_ks", "0")) destRdonly := testlib.NewFakeTablet(t, wi.wr, "cell1", 11, topodatapb.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "destination_ks", "0")) for _, ft := range []*testlib.FakeTablet{sourceMaster, sourceRdonly, destMaster, destRdonly} { ft.StartActionLoop(t, wi.wr) defer ft.StopActionLoop(t) } // add the topo and schema data we'll need if err := wi.wr.RebuildKeyspaceGraph(ctx, "source_ks", nil); err != nil { t.Fatalf("RebuildKeyspaceGraph failed: %v", err) } if err := wi.wr.RebuildKeyspaceGraph(ctx, "destination_ks", nil); err != nil { t.Fatalf("RebuildKeyspaceGraph failed: %v", err) } // Set up source rdonly which will be used as input for the diff during the clone. sourceRdonly.FakeMysqlDaemon.Schema = &tabletmanagerdatapb.SchemaDefinition{ DatabaseSchema: "", TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ { Name: "moving1", Columns: []string{"id", "msg"}, PrimaryKeyColumns: []string{"id"}, Type: tmutils.TableBaseTable, // Set the row count to avoid that --min_rows_per_chunk reduces the // number of chunks. RowCount: 100, }, { Name: "view1", Type: tmutils.TableView, }, }, } sourceRdonly.FakeMysqlDaemon.DbAppConnectionFactory = sourceRdonlyFactory( t, "vt_source_ks", "moving1", verticalSplitCloneTestMin, verticalSplitCloneTestMax) sourceRdonly.FakeMysqlDaemon.CurrentMasterPosition = replication.Position{ GTIDSet: replication.MariadbGTID{Domain: 12, Server: 34, Sequence: 5678}, } sourceRdonly.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "STOP SLAVE", "START SLAVE", } sourceRdonlyShqs := fakes.NewStreamHealthQueryService(sourceRdonly.Target()) sourceRdonlyShqs.AddDefaultHealthResponse() sourceRdonlyQs := newTestQueryService(t, sourceRdonly.Target(), sourceRdonlyShqs, 0, 1, topoproto.TabletAliasString(sourceRdonly.Tablet.Alias), true /* omitKeyspaceID */) sourceRdonlyQs.addGeneratedRows(verticalSplitCloneTestMin, verticalSplitCloneTestMax) grpcqueryservice.Register(sourceRdonly.RPCServer, sourceRdonlyQs) // Set up destination rdonly which will be used as input for the diff during the clone. destRdonlyShqs := fakes.NewStreamHealthQueryService(destRdonly.Target()) destRdonlyShqs.AddDefaultHealthResponse() destRdonlyQs := newTestQueryService(t, destRdonly.Target(), destRdonlyShqs, 0, 1, topoproto.TabletAliasString(destRdonly.Tablet.Alias), true /* omitKeyspaceID */) // This tablet is empty and does not return any rows. grpcqueryservice.Register(destRdonly.RPCServer, destRdonlyQs) // We read 100 source rows. sourceReaderCount is set to 10, so // we'll have 100/10=10 rows per table chunk. // destinationPackCount is set to 4, so we take 4 source rows // at once. So we'll process 4 + 4 + 2 rows to get to 10. // That means 3 insert statements on the target. So 3 * 10 // = 30 insert statements on the destination. destMasterFakeDb := createVerticalSplitCloneDestinationFakeDb(t, "destMaster", 30) defer destMasterFakeDb.verifyAllExecutedOrFail() destMaster.FakeMysqlDaemon.DbAppConnectionFactory = destMasterFakeDb.getFactory() // Fake stream health reponses because vtworker needs them to find the master. qs := fakes.NewStreamHealthQueryService(destMaster.Target()) qs.AddDefaultHealthResponse() grpcqueryservice.Register(destMaster.RPCServer, qs) // Only wait 1 ms between retries, so that the test passes faster *executeFetchRetryTime = (1 * time.Millisecond) // When the online clone inserted the last rows, modify the destination test // query service such that it will return them as well. destMasterFakeDb.getEntry(29).AfterFunc = func() { destRdonlyQs.addGeneratedRows(verticalSplitCloneTestMin, verticalSplitCloneTestMax) } // Run the vtworker command. args := []string{ "VerticalSplitClone", // --max_tps is only specified to enable the throttler and ensure that the // code is executed. But the intent here is not to throttle the test, hence // the rate limit is set very high. "-max_tps", "9999", "-tables", "moving.*,view1", "-source_reader_count", "10", // Each chunk pipeline will process 10 rows. To spread them out across 3 // write queries, set the max row count per query to 4. (10 = 4+4+2) "-write_query_max_rows", "4", "-min_rows_per_chunk", "10", "-destination_writer_count", "10", // This test uses only one healthy RDONLY tablet. "-min_healthy_rdonly_tablets", "1", "destination_ks/0", } if err := runCommand(t, wi, wi.wr, args); err != nil { t.Fatal(err) } if inserts := statsOnlineInsertsCounters.Counts()["moving1"]; inserts != 100 { t.Errorf("wrong number of rows inserted: got = %v, want = %v", inserts, 100) } if updates := statsOnlineUpdatesCounters.Counts()["moving1"]; updates != 0 { t.Errorf("wrong number of rows updated: got = %v, want = %v", updates, 0) } if deletes := statsOnlineDeletesCounters.Counts()["moving1"]; deletes != 0 { t.Errorf("wrong number of rows deleted: got = %v, want = %v", deletes, 0) } if inserts := statsOfflineInsertsCounters.Counts()["moving1"]; inserts != 0 { t.Errorf("no stats for the offline clone phase should have been modified. got inserts = %v", inserts) } if updates := statsOfflineUpdatesCounters.Counts()["moving1"]; updates != 0 { t.Errorf("no stats for the offline clone phase should have been modified. got updates = %v", updates) } if deletes := statsOfflineDeletesCounters.Counts()["moving1"]; deletes != 0 { t.Errorf("no stats for the offline clone phase should have been modified. got deletes = %v", deletes) } wantRetryCount := int64(1) if got := statsRetryCount.Get(); got != wantRetryCount { t.Errorf("Wrong statsRetryCounter: got %v, wanted %v", got, wantRetryCount) } wantRetryReadOnlyCount := int64(1) if got := statsRetryCounters.Counts()[retryCategoryReadOnly]; got != wantRetryReadOnlyCount { t.Errorf("Wrong statsRetryCounters: got %v, wanted %v", got, wantRetryReadOnlyCount) } }
func TestVerticalSplitDiff(t *testing.T) { db := fakesqldb.Register() ts := zktestserver.New(t, []string{"cell1", "cell2"}) ctx := context.Background() wi := NewInstance(ts, "cell1", time.Second) sourceMaster := testlib.NewFakeTablet(t, wi.wr, "cell1", 0, topodatapb.TabletType_MASTER, db, testlib.TabletKeyspaceShard(t, "source_ks", "0")) sourceRdonly1 := testlib.NewFakeTablet(t, wi.wr, "cell1", 1, topodatapb.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "source_ks", "0")) sourceRdonly2 := testlib.NewFakeTablet(t, wi.wr, "cell1", 2, topodatapb.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "source_ks", "0")) // Create the destination keyspace with the appropriate ServedFromMap ki := &topodatapb.Keyspace{ ServedFroms: []*topodatapb.Keyspace_ServedFrom{ { TabletType: topodatapb.TabletType_MASTER, Keyspace: "source_ks", }, { TabletType: topodatapb.TabletType_REPLICA, Keyspace: "source_ks", }, { TabletType: topodatapb.TabletType_RDONLY, Keyspace: "source_ks", }, }, } wi.wr.TopoServer().CreateKeyspace(ctx, "destination_ks", ki) destMaster := testlib.NewFakeTablet(t, wi.wr, "cell1", 10, topodatapb.TabletType_MASTER, db, testlib.TabletKeyspaceShard(t, "destination_ks", "0")) destRdonly1 := testlib.NewFakeTablet(t, wi.wr, "cell1", 11, topodatapb.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "destination_ks", "0")) destRdonly2 := testlib.NewFakeTablet(t, wi.wr, "cell1", 12, topodatapb.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "destination_ks", "0")) for _, ft := range []*testlib.FakeTablet{sourceMaster, sourceRdonly1, sourceRdonly2, destMaster, destRdonly1, destRdonly2} { ft.StartActionLoop(t, wi.wr) defer ft.StopActionLoop(t) } wi.wr.SetSourceShards(ctx, "destination_ks", "0", []*topodatapb.TabletAlias{sourceRdonly1.Tablet.Alias}, []string{"moving.*", "view1"}) // add the topo and schema data we'll need if err := wi.wr.RebuildKeyspaceGraph(ctx, "source_ks", nil); err != nil { t.Fatalf("RebuildKeyspaceGraph failed: %v", err) } if err := wi.wr.RebuildKeyspaceGraph(ctx, "destination_ks", nil); err != nil { t.Fatalf("RebuildKeyspaceGraph failed: %v", err) } for _, rdonly := range []*testlib.FakeTablet{sourceRdonly1, sourceRdonly2, destRdonly1, destRdonly2} { // both source and destination have the table definition for 'moving1'. // source also has "staying1" while destination has "extra1". // (Both additional tables should be ignored by the diff.) extraTable := "staying1" if rdonly == destRdonly1 || rdonly == destRdonly2 { extraTable = "extra1" } rdonly.FakeMysqlDaemon.Schema = &tabletmanagerdatapb.SchemaDefinition{ DatabaseSchema: "", TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ { Name: "moving1", Columns: []string{"id", "msg"}, PrimaryKeyColumns: []string{"id"}, Type: tmutils.TableBaseTable, }, { Name: extraTable, Columns: []string{"id", "msg"}, PrimaryKeyColumns: []string{"id"}, Type: tmutils.TableBaseTable, }, { Name: "view1", Type: tmutils.TableView, }, }, } qs := fakes.NewStreamHealthQueryService(rdonly.Target()) qs.AddDefaultHealthResponse() grpcqueryservice.Register(rdonly.RPCServer, &verticalDiffTabletServer{ t: t, StreamHealthQueryService: qs, }) } // Run the vtworker command. args := []string{"VerticalSplitDiff", "destination_ks/0"} // We need to use FakeTabletManagerClient because we don't // have a good way to fake the binlog player yet, which is // necessary for synchronizing replication. wr := wrangler.New(logutil.NewConsoleLogger(), ts, newFakeTMCTopo(ts)) if err := runCommand(t, wi, wr, args); err != nil { t.Fatal(err) } }
func (tc *splitCloneTestCase) setUpWithConcurreny(v3 bool, concurrency, writeQueryMaxRows, rowsCount int) { *useV3ReshardingMode = v3 db := fakesqldb.Register() tc.ts = zktestserver.New(tc.t, []string{"cell1", "cell2"}) ctx := context.Background() tc.wi = NewInstance(tc.ts, "cell1", time.Second) if v3 { if err := tc.ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{}); err != nil { tc.t.Fatalf("CreateKeyspace v3 failed: %v", err) } vs := &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ "table1_index": { Type: "numeric", }, }, Tables: map[string]*vschemapb.Table{ "table1": { ColumnVindexes: []*vschemapb.ColumnVindex{ { Column: "keyspace_id", Name: "table1_index", }, }, }, }, } if err := tc.ts.SaveVSchema(ctx, "ks", vs); err != nil { tc.t.Fatalf("SaveVSchema v3 failed: %v", err) } } else { if err := tc.ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{ ShardingColumnName: "keyspace_id", ShardingColumnType: topodatapb.KeyspaceIdType_UINT64, }); err != nil { tc.t.Fatalf("CreateKeyspace v2 failed: %v", err) } } sourceMaster := testlib.NewFakeTablet(tc.t, tc.wi.wr, "cell1", 0, topodatapb.TabletType_MASTER, db, testlib.TabletKeyspaceShard(tc.t, "ks", "-80")) sourceRdonly1 := testlib.NewFakeTablet(tc.t, tc.wi.wr, "cell1", 1, topodatapb.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(tc.t, "ks", "-80")) sourceRdonly2 := testlib.NewFakeTablet(tc.t, tc.wi.wr, "cell1", 2, topodatapb.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(tc.t, "ks", "-80")) leftMaster := testlib.NewFakeTablet(tc.t, tc.wi.wr, "cell1", 10, topodatapb.TabletType_MASTER, db, testlib.TabletKeyspaceShard(tc.t, "ks", "-40")) // leftReplica is used by the reparent test. leftReplica := testlib.NewFakeTablet(tc.t, tc.wi.wr, "cell1", 11, topodatapb.TabletType_REPLICA, db, testlib.TabletKeyspaceShard(tc.t, "ks", "-40")) tc.leftReplica = leftReplica leftRdonly1 := testlib.NewFakeTablet(tc.t, tc.wi.wr, "cell1", 12, topodatapb.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(tc.t, "ks", "-40")) leftRdonly2 := testlib.NewFakeTablet(tc.t, tc.wi.wr, "cell1", 13, topodatapb.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(tc.t, "ks", "-40")) rightMaster := testlib.NewFakeTablet(tc.t, tc.wi.wr, "cell1", 20, topodatapb.TabletType_MASTER, db, testlib.TabletKeyspaceShard(tc.t, "ks", "40-80")) rightRdonly1 := testlib.NewFakeTablet(tc.t, tc.wi.wr, "cell1", 22, topodatapb.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(tc.t, "ks", "40-80")) rightRdonly2 := testlib.NewFakeTablet(tc.t, tc.wi.wr, "cell1", 23, topodatapb.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(tc.t, "ks", "40-80")) tc.tablets = []*testlib.FakeTablet{sourceMaster, sourceRdonly1, sourceRdonly2, leftMaster, tc.leftReplica, leftRdonly1, leftRdonly2, rightMaster, rightRdonly1, rightRdonly2} for _, ft := range tc.tablets { ft.StartActionLoop(tc.t, tc.wi.wr) } // add the topo and schema data we'll need if err := tc.ts.CreateShard(ctx, "ks", "80-"); err != nil { tc.t.Fatalf("CreateShard(\"-80\") failed: %v", err) } if err := tc.wi.wr.SetKeyspaceShardingInfo(ctx, "ks", "keyspace_id", topodatapb.KeyspaceIdType_UINT64, false); err != nil { tc.t.Fatalf("SetKeyspaceShardingInfo failed: %v", err) } if err := tc.wi.wr.RebuildKeyspaceGraph(ctx, "ks", nil); err != nil { tc.t.Fatalf("RebuildKeyspaceGraph failed: %v", err) } for _, sourceRdonly := range []*testlib.FakeTablet{sourceRdonly1, sourceRdonly2} { sourceRdonly.FakeMysqlDaemon.Schema = &tabletmanagerdatapb.SchemaDefinition{ DatabaseSchema: "", TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ { Name: "table1", // "id" is the last column in the list on purpose to test for // regressions. The reconciliation code will SELECT with the primary // key columns first. The same ordering must be used throughout the // process e.g. by RowAggregator or the v2Resolver. Columns: []string{"msg", "keyspace_id", "id"}, PrimaryKeyColumns: []string{"id"}, Type: tmutils.TableBaseTable, // Set the row count to avoid that --min_rows_per_chunk reduces the // number of chunks. RowCount: uint64(rowsCount), }, }, } sourceRdonly.FakeMysqlDaemon.DbAppConnectionFactory = sourceRdonlyFactory( tc.t, "vt_ks", "table1", splitCloneTestMin, splitCloneTestMax) sourceRdonly.FakeMysqlDaemon.CurrentMasterPosition = replication.Position{ GTIDSet: replication.MariadbGTID{Domain: 12, Server: 34, Sequence: 5678}, } sourceRdonly.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "STOP SLAVE", "START SLAVE", } shqs := fakes.NewStreamHealthQueryService(sourceRdonly.Target()) shqs.AddDefaultHealthResponse() qs := newTestQueryService(tc.t, sourceRdonly.Target(), shqs, 0, 1, topoproto.TabletAliasString(sourceRdonly.Tablet.Alias), false /* omitKeyspaceID */) qs.addGeneratedRows(100, 100+rowsCount) grpcqueryservice.Register(sourceRdonly.RPCServer, qs) tc.sourceRdonlyQs = append(tc.sourceRdonlyQs, qs) } // Set up destination rdonlys which will be used as input for the diff during the clone. for i, destRdonly := range []*testlib.FakeTablet{leftRdonly1, rightRdonly1, leftRdonly2, rightRdonly2} { shqs := fakes.NewStreamHealthQueryService(destRdonly.Target()) shqs.AddDefaultHealthResponse() qs := newTestQueryService(tc.t, destRdonly.Target(), shqs, i%2, 2, topoproto.TabletAliasString(destRdonly.Tablet.Alias), false /* omitKeyspaceID */) grpcqueryservice.Register(destRdonly.RPCServer, qs) if i%2 == 0 { tc.leftRdonlyQs = append(tc.leftRdonlyQs, qs) } else { tc.rightRdonlyQs = append(tc.rightRdonlyQs, qs) } } tc.leftMasterFakeDb = NewFakePoolConnectionQuery(tc.t, "leftMaster") tc.leftReplicaFakeDb = NewFakePoolConnectionQuery(tc.t, "leftReplica") tc.rightMasterFakeDb = NewFakePoolConnectionQuery(tc.t, "rightMaster") // In the default test case there will be 30 inserts per destination shard // because 10 writer threads will insert 5 rows on each destination shard. // (100 rowsCount / 10 writers / 2 shards = 5 rows.) // Due to --write_query_max_rows=2 there will be 3 inserts for 5 rows. rowsPerDestinationShard := rowsCount / 2 rowsPerThread := rowsPerDestinationShard / concurrency insertsPerThread := math.Ceil(float64(rowsPerThread) / float64(writeQueryMaxRows)) insertsTotal := int(insertsPerThread) * concurrency for i := 1; i <= insertsTotal; i++ { tc.leftMasterFakeDb.addExpectedQuery("INSERT INTO `vt_ks`.`table1` (`id`, `msg`, `keyspace_id`) VALUES (*", nil) // leftReplica is unused by default. tc.rightMasterFakeDb.addExpectedQuery("INSERT INTO `vt_ks`.`table1` (`id`, `msg`, `keyspace_id`) VALUES (*", nil) } expectBlpCheckpointCreationQueries(tc.leftMasterFakeDb) expectBlpCheckpointCreationQueries(tc.rightMasterFakeDb) leftMaster.FakeMysqlDaemon.DbAppConnectionFactory = tc.leftMasterFakeDb.getFactory() leftReplica.FakeMysqlDaemon.DbAppConnectionFactory = tc.leftReplicaFakeDb.getFactory() rightMaster.FakeMysqlDaemon.DbAppConnectionFactory = tc.rightMasterFakeDb.getFactory() // Fake stream health reponses because vtworker needs them to find the master. tc.leftMasterQs = fakes.NewStreamHealthQueryService(leftMaster.Target()) tc.leftMasterQs.AddDefaultHealthResponse() tc.leftReplicaQs = fakes.NewStreamHealthQueryService(leftReplica.Target()) tc.leftReplicaQs.AddDefaultHealthResponse() tc.rightMasterQs = fakes.NewStreamHealthQueryService(rightMaster.Target()) tc.rightMasterQs.AddDefaultHealthResponse() grpcqueryservice.Register(leftMaster.RPCServer, tc.leftMasterQs) grpcqueryservice.Register(leftReplica.RPCServer, tc.leftReplicaQs) grpcqueryservice.Register(rightMaster.RPCServer, tc.rightMasterQs) tc.defaultWorkerArgs = []string{ "SplitClone", "-online=false", // --max_tps is only specified to enable the throttler and ensure that the // code is executed. But the intent here is not to throttle the test, hence // the rate limit is set very high. "-max_tps", "9999", "-write_query_max_rows", strconv.Itoa(writeQueryMaxRows), "-chunk_count", strconv.Itoa(concurrency), "-min_rows_per_chunk", strconv.Itoa(rowsPerThread), "-source_reader_count", strconv.Itoa(concurrency), "-destination_writer_count", strconv.Itoa(concurrency), "ks/-80"} }
func testWaitForDrain(t *testing.T, desc, cells string, drain drainDirective, expectedErrors []string) { const keyspace = "ks" const shard = "-80" db := fakesqldb.Register() ts := zktestserver.New(t, []string{"cell1", "cell2"}) wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) flag.Set("vtctl_healthcheck_timeout", "0.25s") vp := NewVtctlPipe(t, ts) defer vp.Close() // Create keyspace. if err := ts.CreateKeyspace(context.Background(), keyspace, &topodatapb.Keyspace{ ShardingColumnName: "keyspace_id", ShardingColumnType: topodatapb.KeyspaceIdType_UINT64, }); err != nil { t.Fatalf("CreateKeyspace failed: %v", err) } t1 := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_REPLICA, db, TabletKeyspaceShard(t, keyspace, shard)) t2 := NewFakeTablet(t, wr, "cell2", 1, topodatapb.TabletType_REPLICA, db, TabletKeyspaceShard(t, keyspace, shard)) for _, ft := range []*FakeTablet{t1, t2} { ft.StartActionLoop(t, wr) defer ft.StopActionLoop(t) } target := querypb.Target{ Keyspace: keyspace, Shard: shard, TabletType: topodatapb.TabletType_REPLICA, } fqs1 := fakes.NewStreamHealthQueryService(target) fqs2 := fakes.NewStreamHealthQueryService(target) grpcqueryservice.Register(t1.RPCServer, fqs1) grpcqueryservice.Register(t2.RPCServer, fqs2) // Run vtctl WaitForDrain and react depending on its output. timeout := "0.5s" if len(expectedErrors) == 0 { // Tests with a positive outcome should have a more generous timeout to // avoid flakyness. timeout = "30s" } stream, err := vp.RunAndStreamOutput( []string{"WaitForDrain", "-cells", cells, "-retry_delay", "100ms", "-timeout", timeout, keyspace + "/" + shard, topodatapb.TabletType_REPLICA.String()}) if err != nil { t.Fatalf("VtctlPipe.RunAndStreamOutput() failed: %v", err) } // QPS = 1.0. Tablets are not drained yet. fqs1.AddHealthResponseWithQPS(1.0) fqs2.AddHealthResponseWithQPS(1.0) var le *logutilpb.Event for { le, err = stream.Recv() if err != nil { break } line := logutil.EventString(le) t.Logf(line) if strings.Contains(line, "for all healthy tablets to be drained") { t.Log("Successfully waited for WaitForDrain to be blocked because tablets have a QPS rate > 0.0") break } else { t.Log("waiting for WaitForDrain to see a QPS rate > 0.0") } } if drain&DrainCell1 != 0 { fqs1.AddHealthResponseWithQPS(0.0) } else { fqs1.AddHealthResponseWithQPS(2.0) } if drain&DrainCell2 != 0 { fqs2.AddHealthResponseWithQPS(0.0) } else { fqs2.AddHealthResponseWithQPS(2.0) } // If a cell was drained, rate should go below <0.0 now. // If not all selected cells were drained, this will end after "-timeout". for { le, err = stream.Recv() if err == nil { vp.t.Logf(logutil.EventString(le)) } else { break } } if len(expectedErrors) == 0 { if err != io.EOF { t.Fatalf("TestWaitForDrain: %v: no error expected but got: %v", desc, err) } // else: Success. } else { if err == nil || err == io.EOF { t.Fatalf("TestWaitForDrain: %v: error expected but got none", desc) } for _, errString := range expectedErrors { if !strings.Contains(err.Error(), errString) { t.Fatalf("TestWaitForDrain: %v: error does not include expected string. got: %v want: %v", desc, err, errString) } } // Success. } }
func testSplitDiff(t *testing.T, v3 bool) { *useV3ReshardingMode = v3 db := fakesqldb.Register() ts := zktestserver.New(t, []string{"cell1", "cell2"}) ctx := context.Background() wi := NewInstance(ts, "cell1", time.Second) if v3 { if err := ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{}); err != nil { t.Fatalf("CreateKeyspace v3 failed: %v", err) } vs := &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ "table1_index": { Type: "numeric", }, }, Tables: map[string]*vschemapb.Table{ "table1": { ColumnVindexes: []*vschemapb.ColumnVindex{ { Column: "keyspace_id", Name: "table1_index", }, }, }, }, } if err := ts.SaveVSchema(ctx, "ks", vs); err != nil { t.Fatalf("SaveVSchema v3 failed: %v", err) } } else { if err := ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{ ShardingColumnName: "keyspace_id", ShardingColumnType: topodatapb.KeyspaceIdType_UINT64, }); err != nil { t.Fatalf("CreateKeyspace failed: %v", err) } } sourceMaster := testlib.NewFakeTablet(t, wi.wr, "cell1", 0, topodatapb.TabletType_MASTER, db, testlib.TabletKeyspaceShard(t, "ks", "-80")) sourceRdonly1 := testlib.NewFakeTablet(t, wi.wr, "cell1", 1, topodatapb.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "ks", "-80")) sourceRdonly2 := testlib.NewFakeTablet(t, wi.wr, "cell1", 2, topodatapb.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "ks", "-80")) leftMaster := testlib.NewFakeTablet(t, wi.wr, "cell1", 10, topodatapb.TabletType_MASTER, db, testlib.TabletKeyspaceShard(t, "ks", "-40")) leftRdonly1 := testlib.NewFakeTablet(t, wi.wr, "cell1", 11, topodatapb.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "ks", "-40")) leftRdonly2 := testlib.NewFakeTablet(t, wi.wr, "cell1", 12, topodatapb.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "ks", "-40")) for _, ft := range []*testlib.FakeTablet{sourceMaster, sourceRdonly1, sourceRdonly2, leftMaster, leftRdonly1, leftRdonly2} { ft.StartActionLoop(t, wi.wr) defer ft.StopActionLoop(t) } // add the topo and schema data we'll need if err := ts.CreateShard(ctx, "ks", "80-"); err != nil { t.Fatalf("CreateShard(\"-80\") failed: %v", err) } wi.wr.SetSourceShards(ctx, "ks", "-40", []*topodatapb.TabletAlias{sourceRdonly1.Tablet.Alias}, nil) if err := wi.wr.SetKeyspaceShardingInfo(ctx, "ks", "keyspace_id", topodatapb.KeyspaceIdType_UINT64, false); err != nil { t.Fatalf("SetKeyspaceShardingInfo failed: %v", err) } if err := wi.wr.RebuildKeyspaceGraph(ctx, "ks", nil); err != nil { t.Fatalf("RebuildKeyspaceGraph failed: %v", err) } excludedTable := "excludedTable1" for _, rdonly := range []*testlib.FakeTablet{sourceRdonly1, sourceRdonly2, leftRdonly1, leftRdonly2} { // The destination only has half the data. // For v2, we do filtering at the SQl level. // For v3, we do it in the client. // So in any case, we need real data. rdonly.FakeMysqlDaemon.Schema = &tabletmanagerdatapb.SchemaDefinition{ DatabaseSchema: "", TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ { Name: "table1", Columns: []string{"id", "msg", "keyspace_id"}, PrimaryKeyColumns: []string{"id"}, Type: tmutils.TableBaseTable, }, { Name: excludedTable, Columns: []string{"id", "msg", "keyspace_id"}, PrimaryKeyColumns: []string{"id"}, Type: tmutils.TableBaseTable, }, }, } } for _, sourceRdonly := range []*testlib.FakeTablet{sourceRdonly1, sourceRdonly2} { qs := fakes.NewStreamHealthQueryService(sourceRdonly.Target()) qs.AddDefaultHealthResponse() grpcqueryservice.Register(sourceRdonly.RPCServer, &sourceTabletServer{ t: t, StreamHealthQueryService: qs, excludedTable: excludedTable, v3: v3, }) } for _, destRdonly := range []*testlib.FakeTablet{leftRdonly1, leftRdonly2} { qs := fakes.NewStreamHealthQueryService(destRdonly.Target()) qs.AddDefaultHealthResponse() grpcqueryservice.Register(destRdonly.RPCServer, &destinationTabletServer{ t: t, StreamHealthQueryService: qs, excludedTable: excludedTable, }) } // Run the vtworker command. args := []string{ "SplitDiff", "-exclude_tables", excludedTable, "ks/-40", } // We need to use FakeTabletManagerClient because we don't // have a good way to fake the binlog player yet, which is // necessary for synchronizing replication. wr := wrangler.New(logutil.NewConsoleLogger(), ts, newFakeTMCTopo(ts)) if err := runCommand(t, wi, wr, args); err != nil { t.Fatal(err) } }
// TestRealtimeStatsWithQueryService uses fakeTablets and the fakeQueryService to // copy the environment needed for the HealthCheck object. func TestRealtimeStatsWithQueryService(t *testing.T) { // Set up testing keyspace with 2 tablets within 2 cells. keyspace := "ks" shard := "-80" tabletType := topodatapb.TabletType_REPLICA.String() ctx := context.Background() db := fakesqldb.Register() ts := zktestserver.New(t, []string{"cell1", "cell2"}) wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) if err := ts.CreateKeyspace(context.Background(), keyspace, &topodatapb.Keyspace{ ShardingColumnName: "keyspace_id", ShardingColumnType: topodatapb.KeyspaceIdType_UINT64, }); err != nil { t.Fatalf("CreateKeyspace failed: %v", err) } t1 := testlib.NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_REPLICA, db, testlib.TabletKeyspaceShard(t, keyspace, shard)) t2 := testlib.NewFakeTablet(t, wr, "cell2", 1, topodatapb.TabletType_REPLICA, db, testlib.TabletKeyspaceShard(t, keyspace, shard)) for _, ft := range []*(testlib.FakeTablet){t1, t2} { ft.StartActionLoop(t, wr) defer ft.StopActionLoop(t) } target := querypb.Target{ Keyspace: keyspace, Shard: shard, TabletType: topodatapb.TabletType_REPLICA, } fqs1 := fakes.NewStreamHealthQueryService(target) fqs2 := fakes.NewStreamHealthQueryService(target) grpcqueryservice.Register(t1.RPCServer, fqs1) grpcqueryservice.Register(t2.RPCServer, fqs2) fqs1.AddDefaultHealthResponse() realtimeStats, err := newRealtimeStats(ts) if err != nil { t.Fatalf("newRealtimeStats error: %v", err) } if err := discovery.WaitForTablets(ctx, realtimeStats.healthCheck, "cell1", keyspace, shard, []topodatapb.TabletType{topodatapb.TabletType_REPLICA}); err != nil { t.Fatalf("waitForTablets failed: %v", err) } // Test 1: tablet1's stats should be updated with the one received by the HealthCheck object. result := realtimeStats.tabletStatuses("cell1", keyspace, shard, tabletType) got := result["0"].Stats want := &querypb.RealtimeStats{ SecondsBehindMaster: 1, } if !proto.Equal(got, want) { t.Errorf("got: %v, want: %v", got, want) } // Test 2: tablet1's stats should be updated with the new one received by the HealthCheck object. fqs1.AddHealthResponseWithQPS(2.0) want2 := &querypb.RealtimeStats{ SecondsBehindMaster: 1, Qps: 2.0, } if err := checkStats(realtimeStats, "0", "cell1", keyspace, shard, tabletType, want2); err != nil { t.Errorf("%v", err) } // Test 3: tablet2's stats should be updated with the one received by the HealthCheck object, // leaving tablet1's stats unchanged. fqs2.AddHealthResponseWithQPS(3.0) want3 := &querypb.RealtimeStats{ SecondsBehindMaster: 1, Qps: 3.0, } if err := checkStats(realtimeStats, "1", "cell2", keyspace, shard, tabletType, want3); err != nil { t.Errorf("%v", err) } if err := checkStats(realtimeStats, "0", "cell1", keyspace, shard, tabletType, want2); err != nil { t.Errorf("%v", err) } }
func (tc *legacySplitCloneTestCase) setUp(v3 bool) { *useV3ReshardingMode = v3 db := fakesqldb.Register() tc.ts = zktestserver.New(tc.t, []string{"cell1", "cell2"}) ctx := context.Background() tc.wi = NewInstance(ctx, tc.ts, "cell1", time.Second) if v3 { if err := tc.ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{}); err != nil { tc.t.Fatalf("CreateKeyspace v3 failed: %v", err) } vs := &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ "table1_index": { Type: "numeric", }, }, Tables: map[string]*vschemapb.Table{ "table1": { ColumnVindexes: []*vschemapb.ColumnVindex{ { Column: "keyspace_id", Name: "table1_index", }, }, }, }, } if err := tc.ts.SaveVSchema(ctx, "ks", vs); err != nil { tc.t.Fatalf("SaveVSchema v3 failed: %v", err) } } else { if err := tc.ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{ ShardingColumnName: "keyspace_id", ShardingColumnType: topodatapb.KeyspaceIdType_UINT64, }); err != nil { tc.t.Fatalf("CreateKeyspace v2 failed: %v", err) } } sourceMaster := testlib.NewFakeTablet(tc.t, tc.wi.wr, "cell1", 0, topodatapb.TabletType_MASTER, db, testlib.TabletKeyspaceShard(tc.t, "ks", "-80")) sourceRdonly1 := testlib.NewFakeTablet(tc.t, tc.wi.wr, "cell1", 1, topodatapb.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(tc.t, "ks", "-80")) sourceRdonly2 := testlib.NewFakeTablet(tc.t, tc.wi.wr, "cell1", 2, topodatapb.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(tc.t, "ks", "-80")) leftMaster := testlib.NewFakeTablet(tc.t, tc.wi.wr, "cell1", 10, topodatapb.TabletType_MASTER, db, testlib.TabletKeyspaceShard(tc.t, "ks", "-40")) leftRdonly := testlib.NewFakeTablet(tc.t, tc.wi.wr, "cell1", 11, topodatapb.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(tc.t, "ks", "-40")) // leftReplica is used by the reparent test. leftReplica := testlib.NewFakeTablet(tc.t, tc.wi.wr, "cell1", 12, topodatapb.TabletType_REPLICA, db, testlib.TabletKeyspaceShard(tc.t, "ks", "-40")) tc.leftReplica = leftReplica rightMaster := testlib.NewFakeTablet(tc.t, tc.wi.wr, "cell1", 20, topodatapb.TabletType_MASTER, db, testlib.TabletKeyspaceShard(tc.t, "ks", "40-80")) rightRdonly := testlib.NewFakeTablet(tc.t, tc.wi.wr, "cell1", 21, topodatapb.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(tc.t, "ks", "40-80")) tc.tablets = []*testlib.FakeTablet{sourceMaster, sourceRdonly1, sourceRdonly2, leftMaster, leftRdonly, tc.leftReplica, rightMaster, rightRdonly} for _, ft := range tc.tablets { ft.StartActionLoop(tc.t, tc.wi.wr) } // add the topo and schema data we'll need if err := tc.ts.CreateShard(ctx, "ks", "80-"); err != nil { tc.t.Fatalf("CreateShard(\"-80\") failed: %v", err) } if err := tc.wi.wr.SetKeyspaceShardingInfo(ctx, "ks", "keyspace_id", topodatapb.KeyspaceIdType_UINT64, false); err != nil { tc.t.Fatalf("SetKeyspaceShardingInfo failed: %v", err) } if err := tc.wi.wr.RebuildKeyspaceGraph(ctx, "ks", nil); err != nil { tc.t.Fatalf("RebuildKeyspaceGraph failed: %v", err) } for _, sourceRdonly := range []*testlib.FakeTablet{sourceRdonly1, sourceRdonly2} { sourceRdonly.FakeMysqlDaemon.Schema = &tabletmanagerdatapb.SchemaDefinition{ DatabaseSchema: "", TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ { Name: "table1", Columns: []string{"id", "msg", "keyspace_id"}, PrimaryKeyColumns: []string{"id"}, Type: tmutils.TableBaseTable, // This informs how many rows we can pack into a single insert DataLength: 2048, }, }, } sourceRdonly.FakeMysqlDaemon.DbAppConnectionFactory = sourceRdonlyFactory( tc.t, "vt_ks.table1", legacySplitCloneTestMin, legacySplitCloneTestMax) sourceRdonly.FakeMysqlDaemon.CurrentMasterPosition = replication.Position{ GTIDSet: replication.MariadbGTID{Domain: 12, Server: 34, Sequence: 5678}, } sourceRdonly.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "STOP SLAVE", "START SLAVE", } qs := fakes.NewStreamHealthQueryService(sourceRdonly.Target()) qs.AddDefaultHealthResponse() grpcqueryservice.RegisterForTest(sourceRdonly.RPCServer, &legacyTestQueryService{ t: tc.t, StreamHealthQueryService: qs, }) } // We read 100 source rows. sourceReaderCount is set to 10, so // we'll have 100/10=10 rows per table chunk. // destinationPackCount is set to 4, so we take 4 source rows // at once. So we'll process 4 + 4 + 2 rows to get to 10. // That means 3 insert statements on each target (each // containing half of the rows, i.e. 2 + 2 + 1 rows). So 3 * 10 // = 30 insert statements on each destination. tc.leftMasterFakeDb = NewFakePoolConnectionQuery(tc.t, "leftMaster") tc.leftReplicaFakeDb = NewFakePoolConnectionQuery(tc.t, "leftReplica") tc.rightMasterFakeDb = NewFakePoolConnectionQuery(tc.t, "rightMaster") for i := 1; i <= 30; i++ { tc.leftMasterFakeDb.addExpectedQuery("INSERT INTO `vt_ks`.table1(id, msg, keyspace_id) VALUES (*", nil) // leftReplica is unused by default. tc.rightMasterFakeDb.addExpectedQuery("INSERT INTO `vt_ks`.table1(id, msg, keyspace_id) VALUES (*", nil) } expectBlpCheckpointCreationQueries(tc.leftMasterFakeDb) expectBlpCheckpointCreationQueries(tc.rightMasterFakeDb) leftMaster.FakeMysqlDaemon.DbAppConnectionFactory = tc.leftMasterFakeDb.getFactory() leftReplica.FakeMysqlDaemon.DbAppConnectionFactory = tc.leftReplicaFakeDb.getFactory() rightMaster.FakeMysqlDaemon.DbAppConnectionFactory = tc.rightMasterFakeDb.getFactory() // Fake stream health reponses because vtworker needs them to find the master. tc.leftMasterQs = fakes.NewStreamHealthQueryService(leftMaster.Target()) tc.leftMasterQs.AddDefaultHealthResponse() tc.leftReplicaQs = fakes.NewStreamHealthQueryService(leftReplica.Target()) tc.leftReplicaQs.AddDefaultHealthResponse() tc.rightMasterQs = fakes.NewStreamHealthQueryService(rightMaster.Target()) tc.rightMasterQs.AddDefaultHealthResponse() grpcqueryservice.RegisterForTest(leftMaster.RPCServer, tc.leftMasterQs) grpcqueryservice.RegisterForTest(leftReplica.RPCServer, tc.leftReplicaQs) grpcqueryservice.RegisterForTest(rightMaster.RPCServer, tc.rightMasterQs) tc.defaultWorkerArgs = []string{ "LegacySplitClone", "-source_reader_count", "10", "-destination_pack_count", "4", "-min_table_size_for_split", "1", "-destination_writer_count", "10", "ks/-80"} }
func TestVerticalSplitClone(t *testing.T) { db := fakesqldb.Register() ts := zktestserver.New(t, []string{"cell1", "cell2"}) ctx := context.Background() wi := NewInstance(ctx, ts, "cell1", time.Second) sourceMaster := testlib.NewFakeTablet(t, wi.wr, "cell1", 0, topodatapb.TabletType_MASTER, db, testlib.TabletKeyspaceShard(t, "source_ks", "0")) sourceRdonly1 := testlib.NewFakeTablet(t, wi.wr, "cell1", 1, topodatapb.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "source_ks", "0")) sourceRdonly2 := testlib.NewFakeTablet(t, wi.wr, "cell1", 2, topodatapb.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "source_ks", "0")) // Create the destination keyspace with the appropriate ServedFromMap ki := &topodatapb.Keyspace{ ServedFroms: []*topodatapb.Keyspace_ServedFrom{ { TabletType: topodatapb.TabletType_MASTER, Keyspace: "source_ks", }, { TabletType: topodatapb.TabletType_REPLICA, Keyspace: "source_ks", }, { TabletType: topodatapb.TabletType_RDONLY, Keyspace: "source_ks", }, }, } wi.wr.TopoServer().CreateKeyspace(ctx, "destination_ks", ki) destMaster := testlib.NewFakeTablet(t, wi.wr, "cell1", 10, topodatapb.TabletType_MASTER, db, testlib.TabletKeyspaceShard(t, "destination_ks", "0")) destRdonly := testlib.NewFakeTablet(t, wi.wr, "cell1", 11, topodatapb.TabletType_RDONLY, db, testlib.TabletKeyspaceShard(t, "destination_ks", "0")) for _, ft := range []*testlib.FakeTablet{sourceMaster, sourceRdonly1, sourceRdonly2, destMaster, destRdonly} { ft.StartActionLoop(t, wi.wr) defer ft.StopActionLoop(t) } // add the topo and schema data we'll need if err := wi.wr.RebuildKeyspaceGraph(ctx, "source_ks", nil); err != nil { t.Fatalf("RebuildKeyspaceGraph failed: %v", err) } if err := wi.wr.RebuildKeyspaceGraph(ctx, "destination_ks", nil); err != nil { t.Fatalf("RebuildKeyspaceGraph failed: %v", err) } for _, sourceRdonly := range []*testlib.FakeTablet{sourceRdonly1, sourceRdonly2} { sourceRdonly.FakeMysqlDaemon.Schema = &tabletmanagerdatapb.SchemaDefinition{ DatabaseSchema: "", TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ { Name: "moving1", Columns: []string{"id", "msg"}, PrimaryKeyColumns: []string{"id"}, Type: tmutils.TableBaseTable, // Set the table size to a value higher than --min_table_size_for_split. DataLength: 2048, }, { Name: "view1", Type: tmutils.TableView, }, }, } sourceRdonly.FakeMysqlDaemon.DbAppConnectionFactory = sourceRdonlyFactory( t, "vt_source_ks.moving1", verticalSplitCloneTestMin, verticalSplitCloneTestMax) sourceRdonly.FakeMysqlDaemon.CurrentMasterPosition = replication.Position{ GTIDSet: replication.MariadbGTID{Domain: 12, Server: 34, Sequence: 5678}, } sourceRdonly.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "STOP SLAVE", "START SLAVE", } qs := fakes.NewStreamHealthQueryService(sourceRdonly.Target()) qs.AddDefaultHealthResponse() grpcqueryservice.Register(sourceRdonly.RPCServer, &verticalTabletServer{ t: t, StreamHealthQueryService: qs, }) } // We read 100 source rows. sourceReaderCount is set to 10, so // we'll have 100/10=10 rows per table chunk. // destinationPackCount is set to 4, so we take 4 source rows // at once. So we'll process 4 + 4 + 2 rows to get to 10. // That means 3 insert statements on the target. So 3 * 10 // = 30 insert statements on the destination. destMasterFakeDb := createVerticalSplitCloneDestinationFakeDb(t, "destMaster", 30) defer destMasterFakeDb.verifyAllExecutedOrFail() destMaster.FakeMysqlDaemon.DbAppConnectionFactory = destMasterFakeDb.getFactory() // Fake stream health reponses because vtworker needs them to find the master. qs := fakes.NewStreamHealthQueryService(destMaster.Target()) qs.AddDefaultHealthResponse() grpcqueryservice.Register(destMaster.RPCServer, qs) // Only wait 1 ms between retries, so that the test passes faster *executeFetchRetryTime = (1 * time.Millisecond) // Run the vtworker command. args := []string{ "VerticalSplitClone", // --max_tps is only specified to enable the throttler and ensure that the // code is executed. But the intent here is not to throttle the test, hence // the rate limit is set very high. "-max_tps", "9999", "-tables", "moving.*,view1", "-source_reader_count", "10", "-destination_pack_count", "4", "-min_table_size_for_split", "1", "-destination_writer_count", "10", "destination_ks/0", } if err := runCommand(t, wi, wi.wr, args); err != nil { t.Fatal(err) } wantRetryCount := int64(1) if got := statsRetryCount.Get(); got != wantRetryCount { t.Errorf("Wrong statsRetryCounter: got %v, wanted %v", got, wantRetryCount) } wantRetryReadOnlyCount := int64(1) if got := statsRetryCounters.Counts()[retryCategoryReadOnly]; got != wantRetryReadOnlyCount { t.Errorf("Wrong statsRetryCounters: got %v, wanted %v", got, wantRetryReadOnlyCount) } }