func (wr *Wrangler) setKeyspaceShardingInfo(keyspace, shardingColumnName string, shardingColumnType key.KeyspaceIdType, splitShardCount int32, force bool) error { ki, err := wr.ts.GetKeyspace(keyspace) if err != nil { return err } if ki.ShardingColumnName != "" && ki.ShardingColumnName != shardingColumnName { if force { wr.Logger().Warningf("Forcing keyspace ShardingColumnName change from %v to %v", ki.ShardingColumnName, shardingColumnName) } else { return fmt.Errorf("Cannot change ShardingColumnName from %v to %v (use -force to override)", ki.ShardingColumnName, shardingColumnName) } } if ki.ShardingColumnType != key.KIT_UNSET && ki.ShardingColumnType != shardingColumnType { if force { wr.Logger().Warningf("Forcing keyspace ShardingColumnType change from %v to %v", ki.ShardingColumnType, shardingColumnType) } else { return fmt.Errorf("Cannot change ShardingColumnType from %v to %v (use -force to override)", ki.ShardingColumnType, shardingColumnType) } } ki.ShardingColumnName = shardingColumnName ki.ShardingColumnType = shardingColumnType ki.SplitShardCount = splitShardCount return topo.UpdateKeyspace(wr.ts, ki) }
// replicaMigrateServedFrom handles the slave (replica, rdonly) migration. func (wr *Wrangler) replicaMigrateServedFrom(ki *topo.KeyspaceInfo, sourceShard *topo.ShardInfo, destinationShard *topo.ShardInfo, servedType topo.TabletType, cells []string, reverse bool, tables []string, ev *events.MigrateServedFrom) error { // Save the destination keyspace (its ServedFrom has been changed) event.DispatchUpdate(ev, "updating keyspace") if err := topo.UpdateKeyspace(wr.ts, ki); err != nil { return err } // Save the source shard (its blacklisted tables field has changed) event.DispatchUpdate(ev, "updating source shard") if err := sourceShard.UpdateSourceBlacklistedTables(servedType, cells, reverse, tables); err != nil { return fmt.Errorf("UpdateSourceBlacklistedTables(%v/%v) failed: %v", sourceShard.Keyspace(), sourceShard.ShardName(), err) } if err := topo.UpdateShard(context.TODO(), wr.ts, sourceShard); err != nil { return fmt.Errorf("UpdateShard(%v/%v) failed: %v", sourceShard.Keyspace(), sourceShard.ShardName(), err) } // Now refresh the source servers so they reload their // blacklisted table list event.DispatchUpdate(ev, "refreshing sources tablets state so they update their blacklisted tables") if err := wr.RefreshTablesByShard(sourceShard, servedType, cells); err != nil { return err } return nil }
func (wr *Wrangler) setKeyspaceServedFrom(keyspace string, servedType topo.TabletType, cells []string, sourceKeyspace string, remove bool) error { ki, err := wr.ts.GetKeyspace(keyspace) if err != nil { return err } if err := ki.UpdateServedFromMap(servedType, cells, sourceKeyspace, remove, nil); err != nil { return err } return topo.UpdateKeyspace(wr.ts, ki) }
// masterMigrateServedFrom handles the master migration. The ordering is // a bit different than for rdonly / replica to guarantee a smooth transition. // // The order is as follows: // - Add BlacklistedTables on the source shard map for master // - Refresh the source master, so it stops writing on the tables // - Get the source master position, wait until destination master reaches it // - Clear SourceShard on the destination Shard // - Refresh the destination master, so its stops its filtered // replication and starts accepting writes func (wr *Wrangler) masterMigrateServedFrom(ki *topo.KeyspaceInfo, sourceShard *topo.ShardInfo, destinationShard *topo.ShardInfo, tables []string, ev *events.MigrateServedFrom) error { // Read the data we need sourceMasterTabletInfo, err := wr.ts.GetTablet(sourceShard.MasterAlias) if err != nil { return err } destinationMasterTabletInfo, err := wr.ts.GetTablet(destinationShard.MasterAlias) if err != nil { return err } // Update source shard (more blacklisted tables) event.DispatchUpdate(ev, "updating source shard") if err := sourceShard.UpdateSourceBlacklistedTables(topo.TYPE_MASTER, nil, false, tables); err != nil { return fmt.Errorf("UpdateSourceBlacklistedTables(%v/%v) failed: %v", sourceShard.Keyspace(), sourceShard.ShardName(), err) } if err := topo.UpdateShard(context.TODO(), wr.ts, sourceShard); err != nil { return fmt.Errorf("UpdateShard(%v/%v) failed: %v", sourceShard.Keyspace(), sourceShard.ShardName(), err) } // Now refresh the blacklisted table list on the source master event.DispatchUpdate(ev, "refreshing source master so it updates its blacklisted tables") if err := wr.tmc.RefreshState(wr.ctx, sourceMasterTabletInfo); err != nil { return err } // get the position event.DispatchUpdate(ev, "getting master position") masterPosition, err := wr.tmc.MasterPosition(wr.ctx, sourceMasterTabletInfo) if err != nil { return err } // wait for it event.DispatchUpdate(ev, "waiting for destination master to catch up to source master") if err := wr.tmc.WaitBlpPosition(context.TODO(), destinationMasterTabletInfo, blproto.BlpPosition{ Uid: 0, Position: masterPosition, }, wr.ActionTimeout()); err != nil { return err } // Update the destination keyspace (its ServedFrom has changed) event.DispatchUpdate(ev, "updating keyspace") if err = topo.UpdateKeyspace(wr.ts, ki); err != nil { return err } // Update the destination shard (no more source shard) event.DispatchUpdate(ev, "updating destination shard") destinationShard.SourceShards = nil if err := topo.UpdateShard(context.TODO(), wr.ts, destinationShard); err != nil { return err } // Tell the new shards masters they can now be read-write. // Invoking a remote action will also make the tablet stop filtered // replication. event.DispatchUpdate(ev, "setting destination shard masters read-write") if err := wr.refreshMasters([]*topo.ShardInfo{destinationShard}); err != nil { return err } return nil }
func CheckKeyspace(t *testing.T, ts topo.Server) { keyspaces, err := ts.GetKeyspaces() if err != nil { t.Errorf("GetKeyspaces(empty): %v", err) } if len(keyspaces) != 0 { t.Errorf("len(GetKeyspaces()) != 0: %v", keyspaces) } if err := ts.CreateKeyspace("test_keyspace", &topo.Keyspace{}); err != nil { t.Errorf("CreateKeyspace: %v", err) } if err := ts.CreateKeyspace("test_keyspace", &topo.Keyspace{}); err != topo.ErrNodeExists { t.Errorf("CreateKeyspace(again) is not ErrNodeExists: %v", err) } keyspaces, err = ts.GetKeyspaces() if err != nil { t.Errorf("GetKeyspaces: %v", err) } if len(keyspaces) != 1 || keyspaces[0] != "test_keyspace" { t.Errorf("GetKeyspaces: want %v, got %v", []string{"test_keyspace"}, keyspaces) } k := &topo.Keyspace{ ShardingColumnName: "user_id", ShardingColumnType: key.KIT_UINT64, ServedFromMap: map[topo.TabletType]*topo.KeyspaceServedFrom{ topo.TYPE_REPLICA: &topo.KeyspaceServedFrom{ Cells: []string{"c1", "c2"}, Keyspace: "test_keyspace3", }, topo.TYPE_MASTER: &topo.KeyspaceServedFrom{ Cells: nil, Keyspace: "test_keyspace3", }, }, SplitShardCount: 64, } if err := ts.CreateKeyspace("test_keyspace2", k); err != nil { t.Errorf("CreateKeyspace: %v", err) } keyspaces, err = ts.GetKeyspaces() if err != nil { t.Errorf("GetKeyspaces: %v", err) } if len(keyspaces) != 2 || keyspaces[0] != "test_keyspace" || keyspaces[1] != "test_keyspace2" { t.Errorf("GetKeyspaces: want %v, got %v", []string{"test_keyspace", "test_keyspace2"}, keyspaces) } ki, err := ts.GetKeyspace("test_keyspace2") if err != nil { t.Fatalf("GetKeyspace: %v", err) } if !reflect.DeepEqual(ki.Keyspace, k) { t.Fatalf("returned keyspace doesn't match: got %v expected %v", ki.Keyspace, k) } ki.ShardingColumnName = "other_id" ki.ShardingColumnType = key.KIT_BYTES delete(ki.ServedFromMap, topo.TYPE_MASTER) ki.ServedFromMap[topo.TYPE_REPLICA].Keyspace = "test_keyspace4" err = topo.UpdateKeyspace(ts, ki) if err != nil { t.Fatalf("UpdateKeyspace: %v", err) } ki, err = ts.GetKeyspace("test_keyspace2") if err != nil { t.Fatalf("GetKeyspace: %v", err) } if ki.ShardingColumnName != "other_id" || ki.ShardingColumnType != key.KIT_BYTES || ki.ServedFromMap[topo.TYPE_REPLICA].Keyspace != "test_keyspace4" { t.Errorf("GetKeyspace: unexpected keyspace, got %v", *ki) } }