func TestStreamerParseEventsDMLWithoutBegin(t *testing.T) { input := []replication.BinlogEvent{ rotateEvent{}, formatEvent{}, queryEvent{query: replication.Query{ Database: "vt_test_keyspace", SQL: "insert into vt_a(eid, id) values (1, 1) /* _stream vt_a (eid id ) (1 1 ); */"}}, xidEvent{}, } events := make(chan replication.BinlogEvent) want := []binlogdatapb.BinlogTransaction{ { Statements: []*binlogdatapb.BinlogTransaction_Statement{ {Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, Sql: []byte("SET TIMESTAMP=1407805592")}, {Category: binlogdatapb.BinlogTransaction_Statement_BL_DML, Sql: []byte("insert into vt_a(eid, id) values (1, 1) /* _stream vt_a (eid id ) (1 1 ); */")}, }, EventToken: &querypb.EventToken{ Timestamp: 1407805592, Position: replication.EncodePosition(replication.Position{ GTIDSet: replication.MariadbGTID{ Domain: 0, Server: 62344, Sequence: 0x0d, }, }), }, }, { Statements: nil, EventToken: &querypb.EventToken{ Timestamp: 1407805592, Position: replication.EncodePosition(replication.Position{ GTIDSet: replication.MariadbGTID{ Domain: 0, Server: 62344, Sequence: 0x0d, }, }), }, }, } var got []binlogdatapb.BinlogTransaction sendTransaction := func(trans *binlogdatapb.BinlogTransaction) error { got = append(got, *trans) return nil } bls := NewStreamer("vt_test_keyspace", nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) if _, err := bls.parseEvents(context.Background(), events); err != ErrServerEOF { t.Errorf("unexpected error: %v", err) } if !reflect.DeepEqual(got, want) { t.Errorf("binlogConnStreamer.parseEvents(): got %v, want %v", got, want) } }
// updateBlpCheckpoint returns a statement to update a value in the // _vt.blp_checkpoint table. func updateBlpCheckpoint(uid uint32, pos replication.Position, timeUpdated int64, txTimestamp int64) string { if txTimestamp != 0 { return fmt.Sprintf( "UPDATE _vt.blp_checkpoint "+ "SET pos='%v', time_updated=%v, transaction_timestamp=%v "+ "WHERE source_shard_uid=%v", replication.EncodePosition(pos), timeUpdated, txTimestamp, uid) } return fmt.Sprintf( "UPDATE _vt.blp_checkpoint "+ "SET pos='%v', time_updated=%v "+ "WHERE source_shard_uid=%v", replication.EncodePosition(pos), timeUpdated, uid) }
// PromoteSlave makes the current tablet the master func (agent *ActionAgent) PromoteSlave(ctx context.Context) (string, error) { if err := agent.lock(ctx); err != nil { return "", err } defer agent.unlock() pos, err := agent.MysqlDaemon.PromoteSlave(agent.hookExtraEnv()) if err != nil { return "", err } // If using semi-sync, we need to enable it before going read-write. if *enableSemiSync { if err := agent.enableSemiSync(true); err != nil { return "", err } } // Set the server read-write if err := agent.MysqlDaemon.SetReadOnly(false); err != nil { return "", err } if _, err := topotools.ChangeType(ctx, agent.TopoServer, agent.TabletAlias, topodatapb.TabletType_MASTER); err != nil { return "", err } if err := agent.refreshTablet(ctx, "PromoteSlave"); err != nil { return "", err } return replication.EncodePosition(pos), nil }
// MasterPosition returns the master position // Should be called under RPCWrap. func (agent *ActionAgent) MasterPosition(ctx context.Context) (string, error) { pos, err := agent.MysqlDaemon.MasterPosition() if err != nil { return "", err } return replication.EncodePosition(pos), nil }
// PromoteSlaveWhenCaughtUp waits for this slave to be caught up on // replication up to the provided point, and then makes the slave the // shard master. func (agent *ActionAgent) PromoteSlaveWhenCaughtUp(ctx context.Context, position string) (string, error) { pos, err := replication.DecodePosition(position) if err != nil { return "", err } if err := agent.MysqlDaemon.WaitMasterPos(ctx, pos); err != nil { return "", err } pos, err = agent.MysqlDaemon.PromoteSlave(agent.hookExtraEnv()) if err != nil { return "", err } // If using semi-sync, we need to enable it before going read-write. if *enableSemiSync { if err := agent.enableSemiSync(true); err != nil { return "", err } } if err := agent.MysqlDaemon.SetReadOnly(false); err != nil { return "", err } if _, err := topotools.ChangeType(ctx, agent.TopoServer, agent.TabletAlias, topodatapb.TabletType_MASTER); err != nil { return "", err } return replication.EncodePosition(pos), nil }
// InitMaster breaks slaves replication, get the current MySQL replication // position, insert a row in the reparent_journal table, and returns // the replication position func (agent *ActionAgent) InitMaster(ctx context.Context) (string, error) { // we need to insert something in the binlogs, so we can get the // current position. Let's just use the mysqlctl.CreateReparentJournal commands. cmds := mysqlctl.CreateReparentJournal() if err := agent.MysqlDaemon.ExecuteSuperQueryList(cmds); err != nil { return "", err } // get the current replication position pos, err := agent.MysqlDaemon.MasterPosition() if err != nil { return "", err } // Set the server read-write, from now on we can accept real // client writes. Note that if semi-sync replication is enabled, // we'll still need some slaves to be able to commit // transactions. if err := agent.MysqlDaemon.SetReadOnly(false); err != nil { return "", err } // Change our type to master if not already if _, err := agent.TopoServer.UpdateTabletFields(ctx, agent.TabletAlias, func(tablet *topodatapb.Tablet) error { tablet.Type = topodatapb.TabletType_MASTER tablet.HealthMap = nil return nil }); err != nil { return "", err } agent.initReplication = true return replication.EncodePosition(pos), nil }
// PromoteSlaveWhenCaughtUp waits for this slave to be caught up on // replication up to the provided point, and then makes the slave the // shard master. func (agent *ActionAgent) PromoteSlaveWhenCaughtUp(ctx context.Context, position string) (string, error) { pos, err := replication.DecodePosition(position) if err != nil { return "", err } // TODO(alainjobart) change the flavor API to take the context directly // For now, extract the timeout from the context, or wait forever var waitTimeout time.Duration if deadline, ok := ctx.Deadline(); ok { waitTimeout = deadline.Sub(time.Now()) if waitTimeout <= 0 { waitTimeout = time.Millisecond } } if err := agent.MysqlDaemon.WaitMasterPos(pos, waitTimeout); err != nil { return "", err } pos, err = agent.MysqlDaemon.PromoteSlave(agent.hookExtraEnv()) if err != nil { return "", err } if err := agent.MysqlDaemon.SetReadOnly(false); err != nil { return "", err } if _, err := topotools.ChangeType(ctx, agent.TopoServer, agent.TabletAlias, topodatapb.TabletType_MASTER, topotools.ClearHealthMap); err != nil { return "", err } return replication.EncodePosition(pos), nil }
// DemoteMaster marks the server read-only, wait until it is done with // its current transactions, and returns its master position. // Should be called under RPCWrapLockAction. func (agent *ActionAgent) DemoteMaster(ctx context.Context) (string, error) { // Set the server read-only. Note all active connections are not // affected. if err := agent.MysqlDaemon.SetReadOnly(true); err != nil { return "", err } // Now disallow queries, to make sure nobody is writing to the // database. tablet := agent.Tablet() // We don't care if the QueryService state actually changed because we'll // let vtgate keep serving read traffic from this master (see comment below). if _ /* state changed */, err := agent.disallowQueries(tablet.Type, "DemoteMaster marks server rdonly"); err != nil { return "", fmt.Errorf("disallowQueries failed: %v", err) } // If using semi-sync, we need to disable master-side. if *enableSemiSync { if err := agent.enableSemiSync(false); err != nil { return "", err } } pos, err := agent.MysqlDaemon.DemoteMaster() if err != nil { return "", err } return replication.EncodePosition(pos), nil // There is no serving graph update - the master tablet will // be replaced. Even though writes may fail, reads will // succeed. It will be less noisy to simply leave the entry // until we'll promote the master. }
// RunBlpUntil runs the binlog player server until the position is reached, // and returns the current mysql master replication position. func (agent *ActionAgent) RunBlpUntil(ctx context.Context, bpl []*tabletmanagerdatapb.BlpPosition, waitTime time.Duration) (string, error) { if agent.BinlogPlayerMap == nil { return "", fmt.Errorf("No BinlogPlayerMap configured") } if err := agent.BinlogPlayerMap.RunUntil(ctx, bpl, waitTime); err != nil { return "", err } pos, err := agent.MysqlDaemon.MasterPosition() if err != nil { return "", err } return replication.EncodePosition(pos), nil }
// InitMaster enables writes and returns the replication position. func (agent *ActionAgent) InitMaster(ctx context.Context) (string, error) { if err := agent.lock(ctx); err != nil { return "", err } defer agent.unlock() // Initializing as master implies undoing any previous "do not replicate". agent.setSlaveStopped(false) // we need to insert something in the binlogs, so we can get the // current position. Let's just use the mysqlctl.CreateReparentJournal commands. cmds := mysqlctl.CreateReparentJournal() if err := agent.MysqlDaemon.ExecuteSuperQueryList(ctx, cmds); err != nil { return "", err } // get the current replication position pos, err := agent.MysqlDaemon.MasterPosition() if err != nil { return "", err } // If using semi-sync, we need to enable it before going read-write. if *enableSemiSync { if err := agent.enableSemiSync(true); err != nil { return "", err } } // Set the server read-write, from now on we can accept real // client writes. Note that if semi-sync replication is enabled, // we'll still need some slaves to be able to commit transactions. if err := agent.MysqlDaemon.SetReadOnly(false); err != nil { return "", err } // Change our type to master if not already if _, err := agent.TopoServer.UpdateTabletFields(ctx, agent.TabletAlias, func(tablet *topodatapb.Tablet) error { tablet.Type = topodatapb.TabletType_MASTER return nil }); err != nil { return "", err } // and refresh our state agent.initReplication = true if err := agent.refreshTablet(ctx, "InitMaster"); err != nil { return "", err } return replication.EncodePosition(pos), nil }
// StopSlaveMinimum will stop the slave after it reaches at least the // provided position. Works both when Vitess manages // replication or not (using hook if not). func (agent *ActionAgent) StopSlaveMinimum(ctx context.Context, position string, waitTime time.Duration) (string, error) { pos, err := replication.DecodePosition(position) if err != nil { return "", err } if err := agent.MysqlDaemon.WaitMasterPos(pos, waitTime); err != nil { return "", err } if err := mysqlctl.StopSlave(agent.MysqlDaemon, agent.hookExtraEnv()); err != nil { return "", err } pos, err = agent.MysqlDaemon.MasterPosition() if err != nil { return "", err } return replication.EncodePosition(pos), nil }
// PromoteSlave makes the current tablet the master func (agent *ActionAgent) PromoteSlave(ctx context.Context) (string, error) { pos, err := agent.MysqlDaemon.PromoteSlave(agent.hookExtraEnv()) if err != nil { return "", err } // Set the server read-write if err := agent.MysqlDaemon.SetReadOnly(false); err != nil { return "", err } if _, err := topotools.ChangeType(ctx, agent.TopoServer, agent.TabletAlias, topodatapb.TabletType_MASTER, topotools.ClearHealthMap); err != nil { return "", err } return replication.EncodePosition(pos), nil }
func TestStreamerParseEventsMariadbBeginGTID(t *testing.T) { input := []replication.BinlogEvent{ mariadbRotateEvent, mariadbFormatEvent, mariadbBeginGTIDEvent, mariadbInsertEvent, mariadbXidEvent, } events := make(chan replication.BinlogEvent) want := []binlogdatapb.BinlogTransaction{ { Statements: []*binlogdatapb.BinlogTransaction_Statement{ {Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, Charset: charset, Sql: []byte("SET TIMESTAMP=1409892744")}, {Category: binlogdatapb.BinlogTransaction_Statement_BL_DML, Charset: charset, Sql: []byte("insert into vt_insert_test(msg) values ('test 0') /* _stream vt_insert_test (id ) (null ); */")}, }, EventToken: &querypb.EventToken{ Timestamp: 1409892744, Position: replication.EncodePosition(replication.Position{ GTIDSet: replication.MariadbGTID{ Domain: 0, Server: 62344, Sequence: 10, }, }), }, }, } var got []binlogdatapb.BinlogTransaction sendTransaction := func(trans *binlogdatapb.BinlogTransaction) error { got = append(got, *trans) return nil } bls := NewStreamer("vt_test_keyspace", nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) if _, err := bls.parseEvents(context.Background(), events); err != ErrServerEOF { t.Errorf("unexpected error: %v", err) } if !reflect.DeepEqual(got, want) { t.Errorf("binlogConnStreamer.parseEvents(): got %v, want %v", got, want) } }
// PromoteSlave makes the current tablet the master func (agent *ActionAgent) PromoteSlave(ctx context.Context) (string, error) { tablet, err := agent.TopoServer.GetTablet(ctx, agent.TabletAlias) if err != nil { return "", err } pos, err := agent.MysqlDaemon.PromoteSlave(agent.hookExtraEnv()) if err != nil { return "", err } // Set the server read-write if err := agent.MysqlDaemon.SetReadOnly(false); err != nil { return "", err } return replication.EncodePosition(pos), agent.updateReplicationGraphForPromotedSlave(ctx, tablet) }
func TestStreamerParseEventsMariadbStandaloneGTID(t *testing.T) { input := []replication.BinlogEvent{ mariadbRotateEvent, mariadbFormatEvent, mariadbStandaloneGTIDEvent, mariadbCreateEvent, } events := make(chan replication.BinlogEvent) want := []binlogdatapb.BinlogTransaction{ { Statements: []*binlogdatapb.BinlogTransaction_Statement{ {Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, Charset: &binlogdatapb.Charset{Client: 8, Conn: 8, Server: 33}, Sql: []byte("SET TIMESTAMP=1409892744")}, {Category: binlogdatapb.BinlogTransaction_Statement_BL_DDL, Charset: &binlogdatapb.Charset{Client: 8, Conn: 8, Server: 33}, Sql: []byte("create table if not exists vt_insert_test (\nid bigint auto_increment,\nmsg varchar(64),\nprimary key (id)\n) Engine=InnoDB")}, }, EventToken: &querypb.EventToken{ Timestamp: 1409892744, Position: replication.EncodePosition(replication.Position{ GTIDSet: replication.MariadbGTID{ Domain: 0, Server: 62344, Sequence: 9, }, }), }, }, } var got []binlogdatapb.BinlogTransaction sendTransaction := func(trans *binlogdatapb.BinlogTransaction) error { got = append(got, *trans) return nil } bls := NewStreamer("vt_test_keyspace", nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) if _, err := bls.parseEvents(context.Background(), events); err != ErrServerEOF { t.Errorf("unexpected error: %v", err) } if !reflect.DeepEqual(got, want) { t.Errorf("binlogConnStreamer.parseEvents(): got %v, want %v", got, want) } }
// StopSlaveMinimum will stop the slave after it reaches at least the // provided position. Works both when Vitess manages // replication or not (using hook if not). func (agent *ActionAgent) StopSlaveMinimum(ctx context.Context, position string, waitTime time.Duration) (string, error) { pos, err := replication.DecodePosition(position) if err != nil { return "", err } waitCtx, cancel := context.WithTimeout(ctx, waitTime) defer cancel() if err := agent.MysqlDaemon.WaitMasterPos(waitCtx, pos); err != nil { return "", err } if err := agent.StopSlave(ctx); err != nil { return "", err } pos, err = agent.MysqlDaemon.MasterPosition() if err != nil { return "", err } return replication.EncodePosition(pos), nil }
// DemoteMaster marks the server read-only, wait until it is done with // its current transactions, and returns its master position. // Should be called under RPCWrapLockAction. func (agent *ActionAgent) DemoteMaster(ctx context.Context) (string, error) { // Set the server read-only. Note all active connections are not // affected. if err := agent.MysqlDaemon.SetReadOnly(true); err != nil { return "", err } // Now stop the query service, to make sure nobody is writing to the // database. This will in effect close the connection pools to the // database. tablet := agent.Tablet() agent.disallowQueries(tablet.Tablet.Type, "DemoteMaster marks server rdonly") pos, err := agent.MysqlDaemon.DemoteMaster() if err != nil { return "", err } return replication.EncodePosition(pos), nil // There is no serving graph update - the master tablet will // be replaced. Even though writes may fail, reads will // succeed. It will be less noisy to simply leave the entry // until well promote the master. }
func TestMigrateServedTypes(t *testing.T) { // TODO(b/26388813): Remove the next two lines once vtctl WaitForDrain is integrated in the vtctl MigrateServed* commands. flag.Set("wait_for_drain_sleep_rdonly", "0s") flag.Set("wait_for_drain_sleep_replica", "0s") db := fakesqldb.Register() ts := zktestserver.New(t, []string{"cell1", "cell2"}) wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) vp := NewVtctlPipe(t, ts) defer vp.Close() // create keyspace if err := ts.CreateKeyspace(context.Background(), "ks", &topodatapb.Keyspace{ ShardingColumnName: "keyspace_id", ShardingColumnType: topodatapb.KeyspaceIdType_UINT64, }); err != nil { t.Fatalf("CreateKeyspace failed: %v", err) } // create the source shard sourceMaster := NewFakeTablet(t, wr, "cell1", 10, topodatapb.TabletType_MASTER, db, TabletKeyspaceShard(t, "ks", "0")) sourceReplica := NewFakeTablet(t, wr, "cell1", 11, topodatapb.TabletType_REPLICA, db, TabletKeyspaceShard(t, "ks", "0")) sourceRdonly := NewFakeTablet(t, wr, "cell1", 12, topodatapb.TabletType_RDONLY, db, TabletKeyspaceShard(t, "ks", "0")) // create the first destination shard dest1Master := NewFakeTablet(t, wr, "cell1", 20, topodatapb.TabletType_MASTER, db, TabletKeyspaceShard(t, "ks", "-80")) dest1Replica := NewFakeTablet(t, wr, "cell1", 21, topodatapb.TabletType_REPLICA, db, TabletKeyspaceShard(t, "ks", "-80")) dest1Rdonly := NewFakeTablet(t, wr, "cell1", 22, topodatapb.TabletType_RDONLY, db, TabletKeyspaceShard(t, "ks", "-80")) // create the second destination shard dest2Master := NewFakeTablet(t, wr, "cell1", 30, topodatapb.TabletType_MASTER, db, TabletKeyspaceShard(t, "ks", "80-")) dest2Replica := NewFakeTablet(t, wr, "cell1", 31, topodatapb.TabletType_REPLICA, db, TabletKeyspaceShard(t, "ks", "80-")) dest2Rdonly := NewFakeTablet(t, wr, "cell1", 32, topodatapb.TabletType_RDONLY, db, TabletKeyspaceShard(t, "ks", "80-")) // double check the shards have the right served types checkShardServedTypes(t, ts, "0", 3) checkShardServedTypes(t, ts, "-80", 0) checkShardServedTypes(t, ts, "80-", 0) // sourceRdonly will see the refresh sourceRdonly.StartActionLoop(t, wr) defer sourceRdonly.StopActionLoop(t) // sourceReplica will see the refresh sourceReplica.StartActionLoop(t, wr) defer sourceReplica.StopActionLoop(t) // sourceMaster will see the refresh, and has to respond to it // also will be asked about its replication position. sourceMaster.FakeMysqlDaemon.CurrentMasterPosition = replication.Position{ GTIDSet: replication.MariadbGTID{ Domain: 5, Server: 456, Sequence: 892, }, } sourceMaster.StartActionLoop(t, wr) defer sourceMaster.StopActionLoop(t) // dest1Rdonly will see the refresh dest1Rdonly.StartActionLoop(t, wr) defer dest1Rdonly.StopActionLoop(t) // dest1Replica will see the refresh dest1Replica.StartActionLoop(t, wr) defer dest1Replica.StopActionLoop(t) // dest1Master will see the refresh, and has to respond to it. // It will also need to respond to WaitBlpPosition, saying it's already caught up. dest1Master.FakeMysqlDaemon.FetchSuperQueryMap = map[string]*sqltypes.Result{ "SELECT pos, flags FROM _vt.blp_checkpoint WHERE source_shard_uid=0": { Rows: [][]sqltypes.Value{ { sqltypes.MakeString([]byte(replication.EncodePosition(sourceMaster.FakeMysqlDaemon.CurrentMasterPosition))), sqltypes.MakeString([]byte("")), }, }, }, } dest1Master.StartActionLoop(t, wr) defer dest1Master.StopActionLoop(t) // dest2Rdonly will see the refresh dest2Rdonly.StartActionLoop(t, wr) defer dest2Rdonly.StopActionLoop(t) // dest2Replica will see the refresh dest2Replica.StartActionLoop(t, wr) defer dest2Replica.StopActionLoop(t) // dest2Master will see the refresh, and has to respond to it. // It will also need to respond to WaitBlpPosition, saying it's already caught up. dest2Master.FakeMysqlDaemon.FetchSuperQueryMap = map[string]*sqltypes.Result{ "SELECT pos, flags FROM _vt.blp_checkpoint WHERE source_shard_uid=0": { Rows: [][]sqltypes.Value{ { sqltypes.MakeString([]byte(replication.EncodePosition(sourceMaster.FakeMysqlDaemon.CurrentMasterPosition))), sqltypes.MakeString([]byte("")), }, }, }, } dest2Master.StartActionLoop(t, wr) defer dest2Master.StopActionLoop(t) // migrate will error if the overlapping shards have no "SourceShard" entry // and we cannot decide which shard is the source or the destination. if err := vp.Run([]string{"MigrateServedTypes", "ks/0", "rdonly"}); err == nil || !strings.Contains(err.Error(), "' have a 'SourceShards' entry. Did you successfully run vtworker SplitClone before? Or did you already migrate the MASTER type?") { t.Fatalf("MigrateServedType(rdonly) should fail if no 'SourceShards' entry is present: %v", err) } // simulate the clone, by fixing the dest shard record checkShardSourceShards(t, ts, "-80", 0) checkShardSourceShards(t, ts, "80-", 0) if err := vp.Run([]string{"SourceShardAdd", "--key_range=-", "ks/-80", "0", "ks/0"}); err != nil { t.Fatalf("SourceShardAdd failed: %v", err) } if err := vp.Run([]string{"SourceShardAdd", "--key_range=-", "ks/80-", "0", "ks/0"}); err != nil { t.Fatalf("SourceShardAdd failed: %v", err) } checkShardSourceShards(t, ts, "-80", 1) checkShardSourceShards(t, ts, "80-", 1) // migrate rdonly over if err := vp.Run([]string{"MigrateServedTypes", "ks/0", "rdonly"}); err != nil { t.Fatalf("MigrateServedType(rdonly) failed: %v", err) } checkShardServedTypes(t, ts, "0", 2) checkShardServedTypes(t, ts, "-80", 1) checkShardServedTypes(t, ts, "80-", 1) checkShardSourceShards(t, ts, "-80", 1) checkShardSourceShards(t, ts, "80-", 1) // migrate replica over if err := vp.Run([]string{"MigrateServedTypes", "ks/0", "replica"}); err != nil { t.Fatalf("MigrateServedType(replica) failed: %v", err) } checkShardServedTypes(t, ts, "0", 1) checkShardServedTypes(t, ts, "-80", 2) checkShardServedTypes(t, ts, "80-", 2) checkShardSourceShards(t, ts, "-80", 1) checkShardSourceShards(t, ts, "80-", 1) // migrate master over if err := vp.Run([]string{"MigrateServedTypes", "ks/0", "master"}); err != nil { t.Fatalf("MigrateServedType(master) failed: %v", err) } checkShardServedTypes(t, ts, "0", 0) checkShardServedTypes(t, ts, "-80", 3) checkShardServedTypes(t, ts, "80-", 3) checkShardSourceShards(t, ts, "-80", 0) checkShardSourceShards(t, ts, "80-", 0) }
func TestMigrateServedTypes(t *testing.T) { db := fakesqldb.Register() ts := zktopo.NewTestServer(t, []string{"cell1", "cell2"}) wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) vp := NewVtctlPipe(t, ts) defer vp.Close() // create keyspace if err := ts.CreateKeyspace(context.Background(), "ks", &topodatapb.Keyspace{ ShardingColumnName: "keyspace_id", ShardingColumnType: topodatapb.KeyspaceIdType_UINT64, }); err != nil { t.Fatalf("CreateKeyspace failed: %v", err) } // create the source shard sourceMaster := NewFakeTablet(t, wr, "cell1", 10, topodatapb.TabletType_MASTER, db, TabletKeyspaceShard(t, "ks", "0")) sourceReplica := NewFakeTablet(t, wr, "cell1", 11, topodatapb.TabletType_REPLICA, db, TabletKeyspaceShard(t, "ks", "0")) sourceRdonly := NewFakeTablet(t, wr, "cell1", 12, topodatapb.TabletType_RDONLY, db, TabletKeyspaceShard(t, "ks", "0")) // create the first destination shard dest1Master := NewFakeTablet(t, wr, "cell1", 20, topodatapb.TabletType_MASTER, db, TabletKeyspaceShard(t, "ks", "-80")) dest1Replica := NewFakeTablet(t, wr, "cell1", 21, topodatapb.TabletType_REPLICA, db, TabletKeyspaceShard(t, "ks", "-80")) dest1Rdonly := NewFakeTablet(t, wr, "cell1", 22, topodatapb.TabletType_RDONLY, db, TabletKeyspaceShard(t, "ks", "-80")) // create the second destination shard dest2Master := NewFakeTablet(t, wr, "cell1", 30, topodatapb.TabletType_MASTER, db, TabletKeyspaceShard(t, "ks", "80-")) dest2Replica := NewFakeTablet(t, wr, "cell1", 31, topodatapb.TabletType_REPLICA, db, TabletKeyspaceShard(t, "ks", "80-")) dest2Rdonly := NewFakeTablet(t, wr, "cell1", 32, topodatapb.TabletType_RDONLY, db, TabletKeyspaceShard(t, "ks", "80-")) // double check the shards have the right served types checkShardServedTypes(t, ts, "0", 3) checkShardServedTypes(t, ts, "-80", 0) checkShardServedTypes(t, ts, "80-", 0) // sourceRdonly will see the refresh sourceRdonly.StartActionLoop(t, wr) defer sourceRdonly.StopActionLoop(t) // sourceReplica will see the refresh sourceReplica.StartActionLoop(t, wr) defer sourceReplica.StopActionLoop(t) // sourceMaster will see the refresh, and has to respond to it // also will be asked about its replication position. sourceMaster.FakeMysqlDaemon.CurrentMasterPosition = replication.Position{ GTIDSet: replication.MariadbGTID{ Domain: 5, Server: 456, Sequence: 892, }, } sourceMaster.StartActionLoop(t, wr) defer sourceMaster.StopActionLoop(t) // dest1Rdonly will see the refresh dest1Rdonly.StartActionLoop(t, wr) defer dest1Rdonly.StopActionLoop(t) // dest1Replica will see the refresh dest1Replica.StartActionLoop(t, wr) defer dest1Replica.StopActionLoop(t) // dest1Master will see the refresh, and has to respond to it. // It will also need to respond to WaitBlpPosition, saying it's already caught up. dest1Master.FakeMysqlDaemon.FetchSuperQueryMap = map[string]*sqltypes.Result{ "SELECT pos, flags FROM _vt.blp_checkpoint WHERE source_shard_uid=0": { Rows: [][]sqltypes.Value{ { sqltypes.MakeString([]byte(replication.EncodePosition(sourceMaster.FakeMysqlDaemon.CurrentMasterPosition))), sqltypes.MakeString([]byte("")), }, }, }, } dest1Master.StartActionLoop(t, wr) defer dest1Master.StopActionLoop(t) // dest2Rdonly will see the refresh dest2Rdonly.StartActionLoop(t, wr) defer dest2Rdonly.StopActionLoop(t) // dest2Replica will see the refresh dest2Replica.StartActionLoop(t, wr) defer dest2Replica.StopActionLoop(t) // dest2Master will see the refresh, and has to respond to it. // It will also need to respond to WaitBlpPosition, saying it's already caught up. dest2Master.FakeMysqlDaemon.FetchSuperQueryMap = map[string]*sqltypes.Result{ "SELECT pos, flags FROM _vt.blp_checkpoint WHERE source_shard_uid=0": { Rows: [][]sqltypes.Value{ { sqltypes.MakeString([]byte(replication.EncodePosition(sourceMaster.FakeMysqlDaemon.CurrentMasterPosition))), sqltypes.MakeString([]byte("")), }, }, }, } dest2Master.StartActionLoop(t, wr) defer dest2Master.StopActionLoop(t) // simulate the clone, by fixing the dest shard record if err := vp.Run([]string{"SourceShardAdd", "--key_range=-", "ks/-80", "0", "ks/0"}); err != nil { t.Fatalf("SourceShardAdd failed: %v", err) } if err := vp.Run([]string{"SourceShardAdd", "--key_range=-", "ks/80-", "0", "ks/0"}); err != nil { t.Fatalf("SourceShardAdd failed: %v", err) } // migrate rdonly over if err := vp.Run([]string{"MigrateServedTypes", "ks/0", "rdonly"}); err != nil { t.Fatalf("MigrateServedType(rdonly) failed: %v", err) } checkShardServedTypes(t, ts, "0", 2) checkShardServedTypes(t, ts, "-80", 1) checkShardServedTypes(t, ts, "80-", 1) // migrate replica over if err := vp.Run([]string{"MigrateServedTypes", "ks/0", "replica"}); err != nil { t.Fatalf("MigrateServedType(replica) failed: %v", err) } checkShardServedTypes(t, ts, "0", 1) checkShardServedTypes(t, ts, "-80", 2) checkShardServedTypes(t, ts, "80-", 2) // migrate master over if err := vp.Run([]string{"MigrateServedTypes", "ks/0", "master"}); err != nil { t.Fatalf("MigrateServedType(master) failed: %v", err) } checkShardServedTypes(t, ts, "0", 0) checkShardServedTypes(t, ts, "-80", 3) checkShardServedTypes(t, ts, "80-", 3) }
// ApplyBinlogEvents makes an RPC request to BinlogServer // and processes the events. It will return nil if the provided context // was canceled, or if we reached the stopping point. // It will return io.EOF if the server stops sending us updates. // It may return any other error it encounters. func (blp *BinlogPlayer) ApplyBinlogEvents(ctx context.Context) error { // Instantiate the throttler based on the configuration stored in the db. maxTPS, maxReplicationLag, err := blp.readThrottlerSettings() if err != nil { log.Error(err) return err } t, err := throttler.NewThrottler( fmt.Sprintf("BinlogPlayer/%d", blp.uid), "transactions", 1 /* threadCount */, maxTPS, maxReplicationLag) if err != nil { err := fmt.Errorf("failed to instantiate throttler: %v", err) log.Error(err) return err } defer t.Close() // Log the mode of operation and when the player stops. if len(blp.tables) > 0 { log.Infof("BinlogPlayer client %v for tables %v starting @ '%v', server: %v", blp.uid, blp.tables, blp.position, blp.tablet, ) } else { log.Infof("BinlogPlayer client %v for keyrange '%v-%v' starting @ '%v', server: %v", blp.uid, hex.EncodeToString(blp.keyRange.Start), hex.EncodeToString(blp.keyRange.End), blp.position, blp.tablet, ) } if !blp.stopPosition.IsZero() { // We need to stop at some point. Sanity check the point. switch { case blp.position.Equal(blp.stopPosition): log.Infof("Not starting BinlogPlayer, we're already at the desired position %v", blp.stopPosition) return nil case blp.position.AtLeast(blp.stopPosition): return fmt.Errorf("starting point %v greater than stopping point %v", blp.position, blp.stopPosition) default: log.Infof("Will stop player when reaching %v", blp.stopPosition) } } clientFactory, ok := clientFactories[*binlogPlayerProtocol] if !ok { return fmt.Errorf("no binlog player client factory named %v", *binlogPlayerProtocol) } blplClient := clientFactory() err = blplClient.Dial(blp.tablet, *BinlogPlayerConnTimeout) if err != nil { err := fmt.Errorf("error dialing binlog server: %v", err) log.Error(err) return err } defer blplClient.Close() // Get the current charset of our connection, so we can ask the stream server // to check that they match. The streamer will also only send per-statement // charset data if that statement's charset is different from what we specify. if dbClient, ok := blp.dbClient.(*DBClient); ok { blp.defaultCharset, err = dbClient.dbConn.GetCharset() if err != nil { return fmt.Errorf("can't get charset to request binlog stream: %v", err) } log.Infof("original charset: %v", blp.defaultCharset) blp.currentCharset = blp.defaultCharset // Restore original charset when we're done. defer func() { // If the connection has been closed, there's no need to restore // this connection-specific setting. if dbClient.dbConn == nil { return } log.Infof("restoring original charset %v", blp.defaultCharset) if csErr := dbClient.dbConn.SetCharset(blp.defaultCharset); csErr != nil { log.Errorf("can't restore original charset %v: %v", blp.defaultCharset, csErr) } }() } var stream BinlogTransactionStream if len(blp.tables) > 0 { stream, err = blplClient.StreamTables(ctx, replication.EncodePosition(blp.position), blp.tables, blp.defaultCharset) } else { stream, err = blplClient.StreamKeyRange(ctx, replication.EncodePosition(blp.position), blp.keyRange, blp.defaultCharset) } if err != nil { err := fmt.Errorf("error sending streaming query to binlog server: %v", err) log.Error(err) return err } for { // Block if we are throttled. for { backoff := t.Throttle(0 /* threadID */) if backoff == throttler.NotThrottled { break } // We don't bother checking for context cancellation here because the // sleep will block only up to 1 second. (Usually, backoff is 1s / rate // e.g. a rate of 1000 TPS results into a backoff of 1 ms.) time.Sleep(backoff) } // get the response response, err := stream.Recv() if err != nil { switch err { case context.Canceled: return nil default: // if the context is canceled, we // return nil (some RPC // implementations will remap the // context error to their own errors) select { case <-ctx.Done(): if ctx.Err() == context.Canceled { return nil } default: } return fmt.Errorf("Error received from Stream %v", err) } } // process the transaction for { ok, err = blp.processTransaction(response) if err != nil { return fmt.Errorf("Error in processing binlog event %v", err) } if ok { if !blp.stopPosition.IsZero() { if blp.position.AtLeast(blp.stopPosition) { log.Infof("Reached stopping position, done playing logs") return nil } } break } log.Infof("Retrying txn") time.Sleep(1 * time.Second) } } }
// ApplyBinlogEvents makes an RPC request to BinlogServer // and processes the events. It will return nil if the provided context // was canceled, or if we reached the stopping point. // It will return io.EOF if the server stops sending us updates. // It may return any other error it encounters. func (blp *BinlogPlayer) ApplyBinlogEvents(ctx context.Context) error { if len(blp.tables) > 0 { log.Infof("BinlogPlayer client %v for tables %v starting @ '%v', server: %v", blp.uid, blp.tables, blp.position, blp.endPoint, ) } else { log.Infof("BinlogPlayer client %v for keyrange '%v-%v' starting @ '%v', server: %v", blp.uid, hex.EncodeToString(blp.keyRange.Start), hex.EncodeToString(blp.keyRange.End), blp.position, blp.endPoint, ) } if !blp.stopPosition.IsZero() { // We need to stop at some point. Sanity check the point. switch { case blp.position.Equal(blp.stopPosition): log.Infof("Not starting BinlogPlayer, we're already at the desired position %v", blp.stopPosition) return nil case blp.position.AtLeast(blp.stopPosition): return fmt.Errorf("starting point %v greater than stopping point %v", blp.position, blp.stopPosition) default: log.Infof("Will stop player when reaching %v", blp.stopPosition) } } clientFactory, ok := clientFactories[*binlogPlayerProtocol] if !ok { return fmt.Errorf("no binlog player client factory named %v", *binlogPlayerProtocol) } blplClient := clientFactory() err := blplClient.Dial(blp.endPoint, *BinlogPlayerConnTimeout) if err != nil { log.Errorf("Error dialing binlog server: %v", err) return fmt.Errorf("error dialing binlog server: %v", err) } defer blplClient.Close() // Get the current charset of our connection, so we can ask the stream server // to check that they match. The streamer will also only send per-statement // charset data if that statement's charset is different from what we specify. if dbClient, ok := blp.dbClient.(*DBClient); ok { blp.defaultCharset, err = dbClient.dbConn.GetCharset() if err != nil { return fmt.Errorf("can't get charset to request binlog stream: %v", err) } log.Infof("original charset: %v", blp.defaultCharset) blp.currentCharset = blp.defaultCharset // Restore original charset when we're done. defer func() { log.Infof("restoring original charset %v", blp.defaultCharset) if csErr := dbClient.dbConn.SetCharset(blp.defaultCharset); csErr != nil { log.Errorf("can't restore original charset %v: %v", blp.defaultCharset, csErr) } }() } var responseChan chan *pb.BinlogTransaction var errFunc ErrFunc if len(blp.tables) > 0 { responseChan, errFunc, err = blplClient.StreamTables(ctx, replication.EncodePosition(blp.position), blp.tables, blp.defaultCharset) } else { responseChan, errFunc, err = blplClient.StreamKeyRange(ctx, replication.EncodePosition(blp.position), blp.keyspaceIDType, blp.keyRange, blp.defaultCharset) } if err != nil { log.Errorf("Error sending streaming query to binlog server: %v", err) return fmt.Errorf("error sending streaming query to binlog server: %v", err) } for response := range responseChan { for { ok, err = blp.processTransaction(response) if err != nil { return fmt.Errorf("Error in processing binlog event %v", err) } if ok { if !blp.stopPosition.IsZero() { if blp.position.AtLeast(blp.stopPosition) { log.Infof("Reached stopping position, done playing logs") return nil } } break } log.Infof("Retrying txn") time.Sleep(1 * time.Second) } } switch err := errFunc(); err { case nil: return io.EOF case context.Canceled: return nil default: // if the context is canceled, we return nil (some RPC // implementations will remap the context error to their own // errors) select { case <-ctx.Done(): if ctx.Err() == context.Canceled { return nil } default: } return fmt.Errorf("Error received from ServeBinlog %v", err) } }
// PopulateReparentJournal returns the SQL command to use to populate // the _vt.reparent_journal table, as well as the time_created_ns // value used. func PopulateReparentJournal(timeCreatedNS int64, actionName, masterAlias string, pos replication.Position) string { return fmt.Sprintf("INSERT INTO _vt.reparent_journal "+ "(time_created_ns, action_name, master_alias, replication_position) "+ "VALUES (%v, '%v', '%v', '%v')", timeCreatedNS, actionName, masterAlias, replication.EncodePosition(pos)) }
func TestMigrateServedFrom(t *testing.T) { ctx := context.Background() db := fakesqldb.Register() ts := zktopo.NewTestServer(t, []string{"cell1", "cell2"}) wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) vp := NewVtctlPipe(t, ts) defer vp.Close() // create the source keyspace tablets sourceMaster := NewFakeTablet(t, wr, "cell1", 10, topodatapb.TabletType_MASTER, db, TabletKeyspaceShard(t, "source", "0")) sourceReplica := NewFakeTablet(t, wr, "cell1", 11, topodatapb.TabletType_REPLICA, db, TabletKeyspaceShard(t, "source", "0")) sourceRdonly := NewFakeTablet(t, wr, "cell1", 12, topodatapb.TabletType_RDONLY, db, TabletKeyspaceShard(t, "source", "0")) // create the destination keyspace, served form source // double check it has all entries in map if err := vp.Run([]string{"CreateKeyspace", "-served_from", "master:source,replica:source,rdonly:source", "dest"}); err != nil { t.Fatalf("CreateKeyspace(dest) failed: %v", err) } ki, err := ts.GetKeyspace(ctx, "dest") if err != nil { t.Fatalf("GetKeyspace failed: %v", err) } if len(ki.ServedFroms) != 3 { t.Fatalf("bad initial dest ServedFroms: %+v", ki.ServedFroms) } // create the destination keyspace tablets destMaster := NewFakeTablet(t, wr, "cell1", 20, topodatapb.TabletType_MASTER, db, TabletKeyspaceShard(t, "dest", "0")) destReplica := NewFakeTablet(t, wr, "cell1", 21, topodatapb.TabletType_REPLICA, db, TabletKeyspaceShard(t, "dest", "0")) destRdonly := NewFakeTablet(t, wr, "cell1", 22, topodatapb.TabletType_RDONLY, db, TabletKeyspaceShard(t, "dest", "0")) // sourceRdonly will see the refresh sourceRdonly.StartActionLoop(t, wr) defer sourceRdonly.StopActionLoop(t) // sourceReplica will see the refresh sourceReplica.StartActionLoop(t, wr) defer sourceReplica.StopActionLoop(t) // sourceMaster will see the refresh, and has to respond to it // also will be asked about its replication position. sourceMaster.FakeMysqlDaemon.CurrentMasterPosition = replication.Position{ GTIDSet: replication.MariadbGTID{ Domain: 5, Server: 456, Sequence: 892, }, } sourceMaster.StartActionLoop(t, wr) defer sourceMaster.StopActionLoop(t) // destRdonly will see the refresh destRdonly.StartActionLoop(t, wr) defer destRdonly.StopActionLoop(t) // destReplica will see the refresh destReplica.StartActionLoop(t, wr) defer destReplica.StopActionLoop(t) // destMaster will see the refresh, and has to respond to it. // It will also need to respond to WaitBlpPosition, saying it's already caught up. destMaster.FakeMysqlDaemon.FetchSuperQueryMap = map[string]*sqltypes.Result{ "SELECT pos, flags FROM _vt.blp_checkpoint WHERE source_shard_uid=0": &sqltypes.Result{ Rows: [][]sqltypes.Value{ []sqltypes.Value{ sqltypes.MakeString([]byte(replication.EncodePosition(sourceMaster.FakeMysqlDaemon.CurrentMasterPosition))), sqltypes.MakeString([]byte("")), }, }, }, } destMaster.StartActionLoop(t, wr) defer destMaster.StopActionLoop(t) // simulate the clone, by fixing the dest shard record if err := vp.Run([]string{"SourceShardAdd", "--tables", "gone1,gone2", "dest/0", "0", "source/0"}); err != nil { t.Fatalf("SourceShardAdd failed: %v", err) } // migrate rdonly over if err := vp.Run([]string{"MigrateServedFrom", "dest/0", "rdonly"}); err != nil { t.Fatalf("MigrateServedFrom(rdonly) failed: %v", err) } // check it's gone from keyspace ki, err = ts.GetKeyspace(ctx, "dest") if err != nil { t.Fatalf("GetKeyspace failed: %v", err) } if len(ki.ServedFroms) != 2 || ki.GetServedFrom(topodatapb.TabletType_RDONLY) != nil { t.Fatalf("bad initial dest ServedFroms: %v", ki.ServedFroms) } // check the source shard has the right blacklisted tables si, err := ts.GetShard(ctx, "source", "0") if err != nil { t.Fatalf("GetShard failed: %v", err) } if len(si.TabletControls) != 1 || !reflect.DeepEqual(si.TabletControls, []*topodatapb.Shard_TabletControl{ &topodatapb.Shard_TabletControl{ TabletType: topodatapb.TabletType_RDONLY, BlacklistedTables: []string{"gone1", "gone2"}, }, }) { t.Fatalf("rdonly type doesn't have right blacklisted tables") } // migrate replica over if err := vp.Run([]string{"MigrateServedFrom", "dest/0", "replica"}); err != nil { t.Fatalf("MigrateServedFrom(replica) failed: %v", err) } // check it's gone from keyspace ki, err = ts.GetKeyspace(ctx, "dest") if err != nil { t.Fatalf("GetKeyspace failed: %v", err) } if len(ki.ServedFroms) != 1 || ki.GetServedFrom(topodatapb.TabletType_REPLICA) != nil { t.Fatalf("bad initial dest ServedFrom: %+v", ki.ServedFroms) } // check the source shard has the right blacklisted tables si, err = ts.GetShard(ctx, "source", "0") if err != nil { t.Fatalf("GetShard failed: %v", err) } if len(si.TabletControls) != 2 || !reflect.DeepEqual(si.TabletControls, []*topodatapb.Shard_TabletControl{ &topodatapb.Shard_TabletControl{ TabletType: topodatapb.TabletType_RDONLY, BlacklistedTables: []string{"gone1", "gone2"}, }, &topodatapb.Shard_TabletControl{ TabletType: topodatapb.TabletType_REPLICA, BlacklistedTables: []string{"gone1", "gone2"}, }, }) { t.Fatalf("replica type doesn't have right blacklisted tables") } // migrate master over if err := vp.Run([]string{"MigrateServedFrom", "dest/0", "master"}); err != nil { t.Fatalf("MigrateServedFrom(master) failed: %v", err) } // make sure ServedFromMap is empty ki, err = ts.GetKeyspace(ctx, "dest") if err != nil { t.Fatalf("GetKeyspace failed: %v", err) } if len(ki.ServedFroms) > 0 { t.Fatalf("dest keyspace still is ServedFrom: %+v", ki.ServedFroms) } // check the source shard has the right blacklisted tables si, err = ts.GetShard(ctx, "source", "0") if err != nil { t.Fatalf("GetShard failed: %v", err) } if len(si.TabletControls) != 3 || !reflect.DeepEqual(si.TabletControls, []*topodatapb.Shard_TabletControl{ &topodatapb.Shard_TabletControl{ TabletType: topodatapb.TabletType_RDONLY, BlacklistedTables: []string{"gone1", "gone2"}, }, &topodatapb.Shard_TabletControl{ TabletType: topodatapb.TabletType_REPLICA, BlacklistedTables: []string{"gone1", "gone2"}, }, &topodatapb.Shard_TabletControl{ TabletType: topodatapb.TabletType_MASTER, BlacklistedTables: []string{"gone1", "gone2"}, }, }) { t.Fatalf("master type doesn't have right blacklisted tables") } }
// parseEvents processes the raw binlog dump stream from the server, one event // at a time, and groups them into transactions. It is called from within the // service function launched by Stream(). // // If the sendTransaction func returns io.EOF, parseEvents returns ErrClientEOF. // If the events channel is closed, parseEvents returns ErrServerEOF. // If the context is done, returns ctx.Err(). func (bls *Streamer) parseEvents(ctx context.Context, events <-chan replication.BinlogEvent) (replication.Position, error) { var statements []*binlogdatapb.BinlogTransaction_Statement var format replication.BinlogFormat var gtid replication.GTID var pos = bls.startPos var autocommit = true var err error // A begin can be triggered either by a BEGIN query, or by a GTID_EVENT. begin := func() { if statements != nil { // If this happened, it would be a legitimate error. log.Errorf("BEGIN in binlog stream while still in another transaction; dropping %d statements: %v", len(statements), statements) binlogStreamerErrors.Add("ParseEvents", 1) } statements = make([]*binlogdatapb.BinlogTransaction_Statement, 0, 10) autocommit = false } // A commit can be triggered either by a COMMIT query, or by an XID_EVENT. // Statements that aren't wrapped in BEGIN/COMMIT are committed immediately. commit := func(timestamp uint32) error { if int64(timestamp) >= bls.timestamp { trans := &binlogdatapb.BinlogTransaction{ Statements: statements, EventToken: &querypb.EventToken{ Timestamp: int64(timestamp), Position: replication.EncodePosition(pos), }, } if err = bls.sendTransaction(trans); err != nil { if err == io.EOF { return ErrClientEOF } return fmt.Errorf("send reply error: %v", err) } } statements = nil autocommit = true return nil } // Parse events. for { var ev replication.BinlogEvent var ok bool select { case ev, ok = <-events: if !ok { // events channel has been closed, which means the connection died. log.Infof("reached end of binlog event stream") return pos, ErrServerEOF } case <-ctx.Done(): log.Infof("stopping early due to binlog Streamer service shutdown or client disconnect") return pos, ctx.Err() } // Validate the buffer before reading fields from it. if !ev.IsValid() { return pos, fmt.Errorf("can't parse binlog event, invalid data: %#v", ev) } // We need to keep checking for FORMAT_DESCRIPTION_EVENT even after we've // seen one, because another one might come along (e.g. on log rotate due to // binlog settings change) that changes the format. if ev.IsFormatDescription() { format, err = ev.Format() if err != nil { return pos, fmt.Errorf("can't parse FORMAT_DESCRIPTION_EVENT: %v, event data: %#v", err, ev) } continue } // We can't parse anything until we get a FORMAT_DESCRIPTION_EVENT that // tells us the size of the event header. if format.IsZero() { // The only thing that should come before the FORMAT_DESCRIPTION_EVENT // is a fake ROTATE_EVENT, which the master sends to tell us the name // of the current log file. if ev.IsRotate() { continue } return pos, fmt.Errorf("got a real event before FORMAT_DESCRIPTION_EVENT: %#v", ev) } // Strip the checksum, if any. We don't actually verify the checksum, so discard it. ev, _, err = ev.StripChecksum(format) if err != nil { return pos, fmt.Errorf("can't strip checksum from binlog event: %v, event data: %#v", err, ev) } // Update the GTID if the event has one. The actual event type could be // something special like GTID_EVENT (MariaDB, MySQL 5.6), or it could be // an arbitrary event with a GTID in the header (Google MySQL). if ev.HasGTID(format) { gtid, err = ev.GTID(format) if err != nil { return pos, fmt.Errorf("can't get GTID from binlog event: %v, event data: %#v", err, ev) } pos = replication.AppendGTID(pos, gtid) } switch { case ev.IsGTID(): // GTID_EVENT if ev.IsBeginGTID(format) { begin() } case ev.IsXID(): // XID_EVENT (equivalent to COMMIT) if err = commit(ev.Timestamp()); err != nil { return pos, err } case ev.IsIntVar(): // INTVAR_EVENT name, value, err := ev.IntVar(format) if err != nil { return pos, fmt.Errorf("can't parse INTVAR_EVENT: %v, event data: %#v", err, ev) } statements = append(statements, &binlogdatapb.BinlogTransaction_Statement{ Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, Sql: []byte(fmt.Sprintf("SET %s=%d", name, value)), }) case ev.IsRand(): // RAND_EVENT seed1, seed2, err := ev.Rand(format) if err != nil { return pos, fmt.Errorf("can't parse RAND_EVENT: %v, event data: %#v", err, ev) } statements = append(statements, &binlogdatapb.BinlogTransaction_Statement{ Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, Sql: []byte(fmt.Sprintf("SET @@RAND_SEED1=%d, @@RAND_SEED2=%d", seed1, seed2)), }) case ev.IsQuery(): // QUERY_EVENT // Extract the query string and group into transactions. q, err := ev.Query(format) if err != nil { return pos, fmt.Errorf("can't get query from binlog event: %v, event data: %#v", err, ev) } switch cat := getStatementCategory(q.SQL); cat { case binlogdatapb.BinlogTransaction_Statement_BL_BEGIN: begin() case binlogdatapb.BinlogTransaction_Statement_BL_ROLLBACK: // Rollbacks are possible under some circumstances. Since the stream // client keeps track of its replication position by updating the set // of GTIDs it's seen, we must commit an empty transaction so the client // can update its position. statements = nil fallthrough case binlogdatapb.BinlogTransaction_Statement_BL_COMMIT: if err = commit(ev.Timestamp()); err != nil { return pos, err } default: // BL_DDL, BL_DML, BL_SET, BL_UNRECOGNIZED if q.Database != "" && q.Database != bls.dbname { // Skip cross-db statements. continue } setTimestamp := &binlogdatapb.BinlogTransaction_Statement{ Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, Sql: []byte(fmt.Sprintf("SET TIMESTAMP=%d", ev.Timestamp())), } statement := &binlogdatapb.BinlogTransaction_Statement{ Category: cat, Sql: []byte(q.SQL), } // If the statement has a charset and it's different than our client's // default charset, send it along with the statement. // If our client hasn't told us its charset, always send it. if bls.clientCharset == nil || (q.Charset != nil && *q.Charset != *bls.clientCharset) { setTimestamp.Charset = q.Charset statement.Charset = q.Charset } statements = append(statements, setTimestamp, statement) if autocommit { if err = commit(ev.Timestamp()); err != nil { return pos, err } } } case ev.IsPreviousGTIDs(): // PREVIOUS_GTIDS_EVENT // MySQL 5.6 only: The Binlogs contain an // event that gives us all the previously // applied commits. It is *not* an // authoritative value, unless we started from // the beginning of a binlog file. if !bls.usePreviousGTIDs { continue } newPos, err := ev.PreviousGTIDs(format) if err != nil { return pos, err } pos = newPos if err = commit(ev.Timestamp()); err != nil { return pos, err } } } }