// NewQueryResultReaderForTablet creates a new QueryResultReader for // the provided tablet / sql query func NewQueryResultReaderForTablet(ts topo.Server, tabletAlias topo.TabletAlias, sql string) (*QueryResultReader, error) { tablet, err := ts.GetTablet(tabletAlias) if err != nil { return nil, err } endPoint, err := tablet.EndPoint() if err != nil { return nil, err } conn, err := tabletconn.GetDialer()(context.TODO(), *endPoint, tablet.Keyspace, tablet.Shard, 30*time.Second) if err != nil { return nil, err } sr, clientErrFn, err := conn.StreamExecute(context.TODO(), sql, make(map[string]interface{}), 0) if err != nil { return nil, err } // read the columns, or grab the error cols, ok := <-sr if !ok { return nil, fmt.Errorf("Cannot read Fields for query '%v': %v", sql, clientErrFn()) } return &QueryResultReader{ Output: sr, Fields: cols.Fields, conn: conn, clientErrFn: clientErrFn, }, nil }
// GetAllTabletsAcrossCells returns all tablets from known cells. // If it returns topo.ErrPartialResult, then the list is valid, but partial. func GetAllTabletsAcrossCells(ctx context.Context, ts topo.Server) ([]*topo.TabletInfo, error) { cells, err := ts.GetKnownCells() if err != nil { return nil, err } results := make([][]*topo.TabletInfo, len(cells)) errors := make([]error, len(cells)) wg := sync.WaitGroup{} wg.Add(len(cells)) for i, cell := range cells { go func(i int, cell string) { results[i], errors[i] = GetAllTablets(ctx, ts, cell) wg.Done() }(i, cell) } wg.Wait() err = nil allTablets := make([]*topo.TabletInfo, 0) for i, _ := range cells { if errors[i] == nil { allTablets = append(allTablets, results[i]...) } else { err = topo.ErrPartialResult } } return allTablets, err }
// GetAllTablets returns a sorted list of tablets. func GetAllTablets(ctx context.Context, ts topo.Server, cell string) ([]*topo.TabletInfo, error) { aliases, err := ts.GetTabletsByCell(cell) if err != nil { return nil, err } sort.Sort(topo.TabletAliasList(aliases)) tabletMap, err := topo.GetTabletMap(ctx, ts, aliases) if err != nil { // we got another error than topo.ErrNoNode return nil, err } tablets := make([]*topo.TabletInfo, 0, len(aliases)) for _, tabletAlias := range aliases { tabletInfo, ok := tabletMap[tabletAlias] if !ok { // tablet disappeared on us (GetTabletMap ignores // topo.ErrNoNode), just echo a warning log.Warningf("failed to load tablet %v", tabletAlias) } else { tablets = append(tablets, tabletInfo) } } return tablets, nil }
// UnlockShard unlocks a previously locked shard. func (n *ActionNode) UnlockShard(ctx context.Context, ts topo.Server, keyspace, shard string, lockPath string, actionError error) error { span := trace.NewSpanFromContext(ctx) span.StartClient("TopoServer.UnlockShardForAction") span.Annotate("action", n.Action) span.Annotate("keyspace", keyspace) span.Annotate("shard", shard) defer span.Finish() // first update the actionNode if actionError != nil { log.Infof("Unlocking shard %v/%v for action %v with error %v", keyspace, shard, n.Action, actionError) n.Error = actionError.Error() n.State = ACTION_STATE_FAILED } else { log.Infof("Unlocking shard %v/%v for successful action %v", keyspace, shard, n.Action) n.Error = "" n.State = ACTION_STATE_DONE } err := ts.UnlockShardForAction(keyspace, shard, lockPath, n.ToJson()) if actionError != nil { if err != nil { // this will be masked log.Warningf("UnlockShardForAction failed: %v", err) } return actionError } return err }
// VtctlClientTestSuite runs the test suite on the given topo server and client func VtctlClientTestSuite(t *testing.T, ts topo.Server, client vtctlclient.VtctlClient) { // Create a fake tablet tablet := &topo.Tablet{ Alias: topo.TabletAlias{Cell: "cell1", Uid: 1}, Hostname: "localhost", IPAddr: "10.11.12.13", Portmap: map[string]int{ "vt": 3333, "mysql": 3334, }, Tags: map[string]string{"tag": "value"}, Keyspace: "test_keyspace", Type: topo.TYPE_MASTER, State: topo.STATE_READ_WRITE, } if err := ts.CreateTablet(tablet); err != nil { t.Errorf("CreateTablet: %v", err) } // run a command that's gonna return something on the log channel logs, errFunc := client.ExecuteVtctlCommand([]string{"ListAllTablets", "cell1"}, 30*time.Second, 10*time.Second) if err := errFunc(); err != nil { t.Fatalf("Cannot execute remote command: %v", err) } count := 0 for e := range logs { expected := "cell1-0000000001 test_keyspace <null> master localhost:3333 localhost:3334 [tag: \"value\"]\n" if e.String() != expected { t.Errorf("Got unexpected log line '%v' expected '%v'", e.String(), expected) } count++ } if count != 1 { t.Errorf("Didn't get expected log line only, got %v lines", count) } if err := errFunc(); err != nil { t.Fatalf("Remote error: %v", err) } // run a command that's gonna fail logs, errFunc = client.ExecuteVtctlCommand([]string{"ListAllTablets", "cell2"}, 30*time.Second, 10*time.Second) if err := errFunc(); err != nil { t.Fatalf("Cannot execute remote command: %v", err) } if e, ok := <-logs; ok { t.Errorf("Got unexpected line for logs: %v", e.String()) } expected := "node doesn't exist" if err := errFunc(); err == nil || !strings.Contains(err.Error(), expected) { t.Fatalf("Unexpected remote error, got: '%v' was expecting to find '%v'", err, expected) } }
func getLocalCell(t *testing.T, ts topo.Server) string { cells, err := ts.GetKnownCells() if err != nil { t.Fatalf("GetKnownCells: %v", err) } if len(cells) < 1 { t.Fatalf("provided topo.Server doesn't have enough cells (need at least 1): %v", cells) } return cells[0] }
// LockKeyspace will lock the keyspace in the topology server. // UnlockKeyspace should be called if this returns no error. func (n *ActionNode) LockKeyspace(ctx context.Context, ts topo.Server, keyspace string, lockTimeout time.Duration, interrupted chan struct{}) (lockPath string, err error) { log.Infof("Locking keyspace %v for action %v", keyspace, n.Action) span := trace.NewSpanFromContext(ctx) span.StartClient("TopoServer.LockKeyspaceForAction") span.Annotate("action", n.Action) span.Annotate("keyspace", keyspace) defer span.Finish() return ts.LockKeyspaceForAction(keyspace, n.ToJson(), lockTimeout, interrupted) }
// CopyShardReplications will create the ShardReplication objects in // the destination topo func CopyShardReplications(fromTS, toTS topo.Server) { keyspaces, err := fromTS.GetKeyspaces() if err != nil { log.Fatalf("fromTS.GetKeyspaces: %v", err) } wg := sync.WaitGroup{} rec := concurrency.AllErrorRecorder{} for _, keyspace := range keyspaces { wg.Add(1) go func(keyspace string) { defer wg.Done() shards, err := fromTS.GetShardNames(keyspace) if err != nil { rec.RecordError(fmt.Errorf("GetShardNames(%v): %v", keyspace, err)) return } for _, shard := range shards { wg.Add(1) go func(keyspace, shard string) { defer wg.Done() // read the source shard to get the cells si, err := fromTS.GetShard(keyspace, shard) if err != nil { rec.RecordError(fmt.Errorf("GetShard(%v, %v): %v", keyspace, shard, err)) return } for _, cell := range si.Cells { sri, err := fromTS.GetShardReplication(cell, keyspace, shard) if err != nil { rec.RecordError(fmt.Errorf("GetShardReplication(%v, %v, %v): %v", cell, keyspace, shard, err)) continue } if err := toTS.UpdateShardReplicationFields(cell, keyspace, shard, func(oldSR *topo.ShardReplication) error { *oldSR = *sri.ShardReplication return nil }); err != nil { rec.RecordError(fmt.Errorf("UpdateShardReplicationFields(%v, %v, %v): %v", cell, keyspace, shard, err)) } } }(keyspace, shard) } }(keyspace) } wg.Wait() if rec.HasErrors() { log.Fatalf("copyShards failed: %v", rec.Error()) } }
// CopyTablets will create the tablets in the destination topo func CopyTablets(fromTS, toTS topo.Server) { cells, err := fromTS.GetKnownCells() if err != nil { log.Fatalf("fromTS.GetKnownCells: %v", err) } wg := sync.WaitGroup{} rec := concurrency.AllErrorRecorder{} for _, cell := range cells { wg.Add(1) go func(cell string) { defer wg.Done() tabletAliases, err := fromTS.GetTabletsByCell(cell) if err != nil { rec.RecordError(fmt.Errorf("GetTabletsByCell(%v): %v", cell, err)) } else { for _, tabletAlias := range tabletAliases { wg.Add(1) go func(tabletAlias topo.TabletAlias) { defer wg.Done() // read the source tablet ti, err := fromTS.GetTablet(tabletAlias) if err != nil { rec.RecordError(fmt.Errorf("GetTablet(%v): %v", tabletAlias, err)) return } // try to create the destination err = toTS.CreateTablet(ti.Tablet) if err == topo.ErrNodeExists { // update the destination tablet log.Warningf("tablet %v already exists, updating it", tabletAlias) err = toTS.UpdateTabletFields(ti.Alias, func(t *topo.Tablet) error { *t = *ti.Tablet return nil }) } if err != nil { rec.RecordError(fmt.Errorf("CreateTablet(%v): %v", tabletAlias, err)) return } }(tabletAlias) } } }(cell) } wg.Wait() if rec.HasErrors() { log.Fatalf("copyTablets failed: %v", rec.Error()) } }
// LockSrvShard will lock the serving shard in the topology server. // UnlockSrvShard should be called if this returns no error. func (n *ActionNode) LockSrvShard(ctx context.Context, ts topo.Server, cell, keyspace, shard string, lockTimeout time.Duration, interrupted chan struct{}) (lockPath string, err error) { log.Infof("Locking serving shard %v/%v/%v for action %v", cell, keyspace, shard, n.Action) span := trace.NewSpanFromContext(ctx) span.StartClient("TopoServer.LockSrvShardForAction") span.Annotate("action", n.Action) span.Annotate("keyspace", keyspace) span.Annotate("shard", shard) span.Annotate("cell", cell) defer span.Finish() return ts.LockSrvShardForAction(cell, keyspace, shard, n.ToJson(), lockTimeout, interrupted) }
// Scrap will update the tablet type to 'Scrap', and remove it from // the serving graph. // // 'force' means we are not on the tablet being scrapped, so it is // probably dead. So if 'force' is true, we will also remove pending // remote actions. And if 'force' is false, we also run an optional // hook. func Scrap(ts topo.Server, tabletAlias topo.TabletAlias, force bool) error { tablet, err := ts.GetTablet(tabletAlias) if err != nil { return err } // If you are already scrap, skip updating replication data. It won't // be there anyway. wasAssigned := tablet.IsAssigned() tablet.Type = topo.TYPE_SCRAP tablet.Parent = topo.TabletAlias{} // Update the tablet first, since that is canonical. err = topo.UpdateTablet(context.TODO(), ts, tablet) if err != nil { return err } if wasAssigned { err = topo.DeleteTabletReplicationData(ts, tablet.Tablet) if err != nil { if err == topo.ErrNoNode { log.V(6).Infof("no ShardReplication object for cell %v", tablet.Alias.Cell) err = nil } if err != nil { log.Warningf("remove replication data for %v failed: %v", tablet.Alias, err) } } } // run a hook for final cleanup, only in non-force mode. // (force mode executes on the vtctl side, not on the vttablet side) if !force { hk := hook.NewSimpleHook("postflight_scrap") ConfigureTabletHook(hk, tablet.Alias) if hookErr := hk.ExecuteOptional(); hookErr != nil { // we don't want to return an error, the server // is already in bad shape probably. log.Warningf("Scrap: postflight_scrap failed: %v", hookErr) } } return nil }
// CopyKeyspaces will create the keyspaces in the destination topo func CopyKeyspaces(fromTS, toTS topo.Server) { keyspaces, err := fromTS.GetKeyspaces() if err != nil { log.Fatalf("GetKeyspaces: %v", err) } wg := sync.WaitGroup{} rec := concurrency.AllErrorRecorder{} for _, keyspace := range keyspaces { wg.Add(1) go func(keyspace string) { defer wg.Done() k, err := fromTS.GetKeyspace(keyspace) if err != nil { rec.RecordError(fmt.Errorf("GetKeyspace(%v): %v", keyspace, err)) return } if err := toTS.CreateKeyspace(keyspace, k.Keyspace); err != nil { if err == topo.ErrNodeExists { log.Warningf("keyspace %v already exists", keyspace) } else { rec.RecordError(fmt.Errorf("CreateKeyspace(%v): %v", keyspace, err)) } } }(keyspace) } wg.Wait() if rec.HasErrors() { log.Fatalf("copyKeyspaces failed: %v", rec.Error()) } }
func CheckSrvShardLock(t *testing.T, ts topo.Server) { // make sure we can create the lock even if no directory exists interrupted := make(chan struct{}, 1) lockPath, err := ts.LockSrvShardForAction("test", "test_keyspace", "10-20", "fake-content", 5*time.Second, interrupted) if err != nil { t.Fatalf("LockSrvShardForAction: %v", err) } if err := ts.UnlockSrvShardForAction("test", "test_keyspace", "10-20", lockPath, "fake-results"); err != nil { t.Errorf("UnlockShardForAction(): %v", err) } // now take the lock again after the root exists lockPath, err = ts.LockSrvShardForAction("test", "test_keyspace", "10-20", "fake-content", 5*time.Second, interrupted) if err != nil { t.Fatalf("LockSrvShardForAction: %v", err) } // test we can't take the lock again if _, err := ts.LockSrvShardForAction("test", "test_keyspace", "10-20", "unused-fake-content", time.Second/2, interrupted); err != topo.ErrTimeout { t.Errorf("LockSrvShardForAction(again): %v", err) } // test we can interrupt taking the lock go func() { time.Sleep(time.Second / 2) close(interrupted) }() if _, err := ts.LockSrvShardForAction("test", "test_keyspace", "10-20", "unused-fake-content", 5*time.Second, interrupted); err != topo.ErrInterrupted { t.Errorf("LockSrvShardForAction(interrupted): %v", err) } // unlock now if err := ts.UnlockSrvShardForAction("test", "test_keyspace", "10-20", lockPath, "fake-results"); err != nil { t.Errorf("UnlockSrvShardForAction(): %v", err) } // test we can't unlock again if err := ts.UnlockSrvShardForAction("test", "test_keyspace", "10-20", lockPath, "fake-results"); err == nil { t.Error("UnlockSrvShardForAction(again) worked") } }
func CheckShardLock(t *testing.T, ts topo.Server) { if err := ts.CreateKeyspace("test_keyspace", &topo.Keyspace{}); err != nil { t.Fatalf("CreateKeyspace: %v", err) } if err := topo.CreateShard(ts, "test_keyspace", "10-20"); err != nil { t.Fatalf("CreateShard: %v", err) } interrupted := make(chan struct{}, 1) lockPath, err := ts.LockShardForAction("test_keyspace", "10-20", "fake-content", 5*time.Second, interrupted) if err != nil { t.Fatalf("LockShardForAction: %v", err) } // test we can't take the lock again if _, err := ts.LockShardForAction("test_keyspace", "10-20", "unused-fake-content", time.Second/2, interrupted); err != topo.ErrTimeout { t.Errorf("LockShardForAction(again): %v", err) } // test we can interrupt taking the lock go func() { time.Sleep(time.Second / 2) close(interrupted) }() if _, err := ts.LockShardForAction("test_keyspace", "10-20", "unused-fake-content", 5*time.Second, interrupted); err != topo.ErrInterrupted { t.Errorf("LockShardForAction(interrupted): %v", err) } if err := ts.UnlockShardForAction("test_keyspace", "10-20", lockPath, "fake-results"); err != nil { t.Errorf("UnlockShardForAction(): %v", err) } // test we can't unlock again if err := ts.UnlockShardForAction("test_keyspace", "10-20", lockPath, "fake-results"); err == nil { t.Error("UnlockShardForAction(again) worked") } // test we can't lock a non-existing shard interrupted = make(chan struct{}, 1) if _, err := ts.LockShardForAction("test_keyspace", "20-30", "fake-content", 5*time.Second, interrupted); err == nil { t.Fatalf("LockShardForAction(test_keyspace/20-30) worked for non-existing shard") } }
func CheckShardReplication(t *testing.T, ts topo.Server) { cell := getLocalCell(t, ts) if _, err := ts.GetShardReplication(cell, "test_keyspace", "-10"); err != topo.ErrNoNode { t.Errorf("GetShardReplication(not there): %v", err) } sr := &topo.ShardReplication{ ReplicationLinks: []topo.ReplicationLink{ topo.ReplicationLink{ TabletAlias: topo.TabletAlias{ Cell: "c1", Uid: 1, }, Parent: topo.TabletAlias{ Cell: "c2", Uid: 2, }, }, }, } if err := ts.UpdateShardReplicationFields(cell, "test_keyspace", "-10", func(oldSr *topo.ShardReplication) error { *oldSr = *sr return nil }); err != nil { t.Fatalf("UpdateShardReplicationFields() failed: %v", err) } if sri, err := ts.GetShardReplication(cell, "test_keyspace", "-10"); err != nil { t.Errorf("GetShardReplication(new guy) failed: %v", err) } else { if len(sri.ReplicationLinks) != 1 || sri.ReplicationLinks[0].TabletAlias.Cell != "c1" || sri.ReplicationLinks[0].TabletAlias.Uid != 1 || sri.ReplicationLinks[0].Parent.Cell != "c2" || sri.ReplicationLinks[0].Parent.Uid != 2 { t.Errorf("GetShardReplication(new guy) returned wrong value: %v", *sri) } } if err := ts.UpdateShardReplicationFields(cell, "test_keyspace", "-10", func(sr *topo.ShardReplication) error { sr.ReplicationLinks = append(sr.ReplicationLinks, topo.ReplicationLink{ TabletAlias: topo.TabletAlias{ Cell: "c3", Uid: 3, }, Parent: topo.TabletAlias{ Cell: "c4", Uid: 4, }, }) return nil }); err != nil { t.Errorf("UpdateShardReplicationFields() failed: %v", err) } if sri, err := ts.GetShardReplication(cell, "test_keyspace", "-10"); err != nil { t.Errorf("GetShardReplication(after append) failed: %v", err) } else { if len(sri.ReplicationLinks) != 2 || sri.ReplicationLinks[0].TabletAlias.Cell != "c1" || sri.ReplicationLinks[0].TabletAlias.Uid != 1 || sri.ReplicationLinks[0].Parent.Cell != "c2" || sri.ReplicationLinks[0].Parent.Uid != 2 || sri.ReplicationLinks[1].TabletAlias.Cell != "c3" || sri.ReplicationLinks[1].TabletAlias.Uid != 3 || sri.ReplicationLinks[1].Parent.Cell != "c4" || sri.ReplicationLinks[1].Parent.Uid != 4 { t.Errorf("GetShardReplication(new guy) returned wrong value: %v", *sri) } } if err := ts.DeleteShardReplication(cell, "test_keyspace", "-10"); err != nil { t.Errorf("DeleteShardReplication(existing) failed: %v", err) } if err := ts.DeleteShardReplication(cell, "test_keyspace", "-10"); err != topo.ErrNoNode { t.Errorf("DeleteShardReplication(again) returned: %v", err) } }
// DbServingGraph returns the ServingGraph for the given cell. func DbServingGraph(ts topo.Server, cell string) (servingGraph *ServingGraph) { servingGraph = &ServingGraph{ Cell: cell, Keyspaces: make(map[string]*KeyspaceNodes), } rec := concurrency.AllErrorRecorder{} keyspaces, err := ts.GetSrvKeyspaceNames(cell) if err != nil { servingGraph.Errors = append(servingGraph.Errors, fmt.Sprintf("GetSrvKeyspaceNames failed: %v", err)) return } wg := sync.WaitGroup{} servingTypes := []topo.TabletType{topo.TYPE_MASTER, topo.TYPE_REPLICA, topo.TYPE_RDONLY} for _, keyspace := range keyspaces { kn := newKeyspaceNodes() servingGraph.Keyspaces[keyspace] = kn wg.Add(1) go func(keyspace string, kn *KeyspaceNodes) { defer wg.Done() ks, err := ts.GetSrvKeyspace(cell, keyspace) if err != nil { rec.RecordError(fmt.Errorf("GetSrvKeyspace(%v, %v) failed: %v", cell, keyspace, err)) return } kn.ServedFrom = ks.ServedFrom displayedShards := make(map[string]bool) for _, partitionTabletType := range servingTypes { kp, ok := ks.Partitions[partitionTabletType] if !ok { continue } for _, srvShard := range kp.Shards { shard := srvShard.ShardName() if displayedShards[shard] { continue } displayedShards[shard] = true sn := &ShardNodes{ Name: shard, TabletNodes: make(TabletNodesByType), ServedTypes: srvShard.ServedTypes, } kn.ShardNodes = append(kn.ShardNodes, sn) wg.Add(1) go func(shard string, sn *ShardNodes) { defer wg.Done() tabletTypes, err := ts.GetSrvTabletTypesPerShard(cell, keyspace, shard) if err != nil { rec.RecordError(fmt.Errorf("GetSrvTabletTypesPerShard(%v, %v, %v) failed: %v", cell, keyspace, shard, err)) return } for _, tabletType := range tabletTypes { endPoints, err := ts.GetEndPoints(cell, keyspace, shard, tabletType) if err != nil { rec.RecordError(fmt.Errorf("GetEndPoints(%v, %v, %v, %v) failed: %v", cell, keyspace, shard, tabletType, err)) continue } for _, endPoint := range endPoints.Entries { sn.TabletNodes[tabletType] = append(sn.TabletNodes[tabletType], newTabletNodeFromEndPoint(endPoint, cell)) } } }(shard, sn) } } }(keyspace, kn) } wg.Wait() servingGraph.Errors = rec.ErrorStrings() return }
func CheckShard(t *testing.T, ts topo.Server) { if err := ts.CreateKeyspace("test_keyspace", &topo.Keyspace{}); err != nil { t.Fatalf("CreateKeyspace: %v", err) } if err := topo.CreateShard(ts, "test_keyspace", "b0-c0"); err != nil { t.Fatalf("CreateShard: %v", err) } if err := topo.CreateShard(ts, "test_keyspace", "b0-c0"); err != topo.ErrNodeExists { t.Errorf("CreateShard called second time, got: %v", err) } if _, err := ts.GetShard("test_keyspace", "666"); err != topo.ErrNoNode { t.Errorf("GetShard(666): %v", err) } shardInfo, err := ts.GetShard("test_keyspace", "b0-c0") if err != nil { t.Errorf("GetShard: %v", err) } if want := newKeyRange("b0-c0"); shardInfo.KeyRange != want { t.Errorf("shardInfo.KeyRange: want %v, got %v", want, shardInfo.KeyRange) } master := topo.TabletAlias{Cell: "ny", Uid: 1} shardInfo.MasterAlias = master shardInfo.KeyRange = newKeyRange("b0-c0") shardInfo.ServedTypesMap = map[topo.TabletType]*topo.ShardServedType{ topo.TYPE_MASTER: &topo.ShardServedType{}, topo.TYPE_REPLICA: &topo.ShardServedType{Cells: []string{"c1"}}, topo.TYPE_RDONLY: &topo.ShardServedType{}, } shardInfo.SourceShards = []topo.SourceShard{ topo.SourceShard{ Uid: 1, Keyspace: "source_ks", Shard: "b8-c0", KeyRange: newKeyRange("b8-c0"), Tables: []string{"table1", "table2"}, }, } shardInfo.TabletControlMap = map[topo.TabletType]*topo.TabletControl{ topo.TYPE_MASTER: &topo.TabletControl{ Cells: []string{"c1", "c2"}, BlacklistedTables: []string{"black1", "black2"}, }, topo.TYPE_REPLICA: &topo.TabletControl{ DisableQueryService: true, }, } if err := topo.UpdateShard(context.TODO(), ts, shardInfo); err != nil { t.Errorf("UpdateShard: %v", err) } updatedShardInfo, err := ts.GetShard("test_keyspace", "b0-c0") if err != nil { t.Fatalf("GetShard: %v", err) } if eq, err := shardEqual(shardInfo.Shard, updatedShardInfo.Shard); err != nil { t.Errorf("cannot compare shards: %v", err) } else if !eq { t.Errorf("put and got shards are not identical:\n%#v\n%#v", shardInfo.Shard, updatedShardInfo.Shard) } // test GetShardNames shards, err := ts.GetShardNames("test_keyspace") if err != nil { t.Errorf("GetShardNames: %v", err) } if len(shards) != 1 || shards[0] != "b0-c0" { t.Errorf(`GetShardNames: want [ "b0-c0" ], got %v`, shards) } if _, err := ts.GetShardNames("test_keyspace666"); err != topo.ErrNoNode { t.Errorf("GetShardNames(666): %v", err) } }
// Update shard file with new master, replicas, etc. // // Re-read from TopologyServer to make sure we are using the side // effects of all actions. // // This function locks individual SvrShard paths, so it doesn't need a lock // on the shard. func RebuildShard(ctx context.Context, log logutil.Logger, ts topo.Server, keyspace, shard string, cells []string, timeout time.Duration, interrupted chan struct{}) (*topo.ShardInfo, error) { log.Infof("RebuildShard %v/%v", keyspace, shard) span := trace.NewSpanFromContext(ctx) span.StartLocal("topotools.RebuildShard") defer span.Finish() ctx = trace.NewContext(ctx, span) // read the existing shard info. It has to exist. shardInfo, err := ts.GetShard(keyspace, shard) if err != nil { return nil, err } // rebuild all cells in parallel wg := sync.WaitGroup{} rec := concurrency.AllErrorRecorder{} for _, cell := range shardInfo.Cells { // skip this cell if we shouldn't rebuild it if !topo.InCellList(cell, cells) { continue } // start with the master if it's in the current cell tabletsAsMap := make(map[topo.TabletAlias]bool) if shardInfo.MasterAlias.Cell == cell { tabletsAsMap[shardInfo.MasterAlias] = true } wg.Add(1) go func(cell string) { defer wg.Done() // Lock the SrvShard so we don't race with other rebuilds of the same // shard in the same cell (e.g. from our peer tablets). actionNode := actionnode.RebuildSrvShard() lockPath, err := actionNode.LockSrvShard(ctx, ts, cell, keyspace, shard, timeout, interrupted) if err != nil { rec.RecordError(err) return } // read the ShardReplication object to find tablets sri, err := ts.GetShardReplication(cell, keyspace, shard) if err != nil { rec.RecordError(fmt.Errorf("GetShardReplication(%v, %v, %v) failed: %v", cell, keyspace, shard, err)) return } // add all relevant tablets to the map for _, rl := range sri.ReplicationLinks { tabletsAsMap[rl.TabletAlias] = true if rl.Parent.Cell == cell { tabletsAsMap[rl.Parent] = true } } // convert the map to a list aliases := make([]topo.TabletAlias, 0, len(tabletsAsMap)) for a := range tabletsAsMap { aliases = append(aliases, a) } // read all the Tablet records tablets, err := topo.GetTabletMap(ctx, ts, aliases) switch err { case nil: // keep going, we're good case topo.ErrPartialResult: log.Warningf("Got ErrPartialResult from topo.GetTabletMap in cell %v, some tablets may not be added properly to serving graph", cell) default: rec.RecordError(fmt.Errorf("GetTabletMap in cell %v failed: %v", cell, err)) return } // write the data we need to rebuildErr := rebuildCellSrvShard(ctx, log, ts, shardInfo, cell, tablets) // and unlock if err := actionNode.UnlockSrvShard(ctx, ts, cell, keyspace, shard, lockPath, rebuildErr); err != nil { rec.RecordError(err) } }(cell) } wg.Wait() return shardInfo, rec.Error() }
// rebuildCellSrvShard computes and writes the serving graph data to a // single cell func rebuildCellSrvShard(ctx context.Context, log logutil.Logger, ts topo.Server, shardInfo *topo.ShardInfo, cell string, tablets map[topo.TabletAlias]*topo.TabletInfo) error { log.Infof("rebuildCellSrvShard %v/%v in cell %v", shardInfo.Keyspace(), shardInfo.ShardName(), cell) // Get all existing db types so they can be removed if nothing // had been edited. existingTabletTypes, err := ts.GetSrvTabletTypesPerShard(cell, shardInfo.Keyspace(), shardInfo.ShardName()) if err != nil { if err != topo.ErrNoNode { return err } } // Update db type addresses in the serving graph // // locationAddrsMap is a map: // key: tabletType // value: EndPoints (list of server records) locationAddrsMap := make(map[topo.TabletType]*topo.EndPoints) for _, tablet := range tablets { if !tablet.IsInReplicationGraph() { // only valid case is a scrapped master in the // catastrophic reparent case if tablet.Parent.Uid != topo.NO_TABLET { log.Warningf("Tablet %v should not be in the replication graph, please investigate (it is being ignored in the rebuild)", tablet.Alias) } continue } // Check IsInServingGraph, we don't want to add tablets that // are not serving if !tablet.IsInServingGraph() { continue } // Check the Keyspace and Shard for the tablet are right if tablet.Keyspace != shardInfo.Keyspace() || tablet.Shard != shardInfo.ShardName() { return fmt.Errorf("CRITICAL: tablet %v is in replication graph for shard %v/%v but belongs to shard %v:%v", tablet.Alias, shardInfo.Keyspace(), shardInfo.ShardName(), tablet.Keyspace, tablet.Shard) } // Add the tablet to the list addrs, ok := locationAddrsMap[tablet.Type] if !ok { addrs = topo.NewEndPoints() locationAddrsMap[tablet.Type] = addrs } entry, err := tablet.Tablet.EndPoint() if err != nil { log.Warningf("EndPointForTablet failed for tablet %v: %v", tablet.Alias, err) continue } addrs.Entries = append(addrs.Entries, *entry) } // we're gonna parallelize a lot here: // - writing all the tabletTypes records // - removing the unused records // - writing SrvShard rec := concurrency.AllErrorRecorder{} wg := sync.WaitGroup{} // write all the EndPoints nodes everywhere we want them for tabletType, addrs := range locationAddrsMap { wg.Add(1) go func(tabletType topo.TabletType, addrs *topo.EndPoints) { log.Infof("saving serving graph for cell %v shard %v/%v tabletType %v", cell, shardInfo.Keyspace(), shardInfo.ShardName(), tabletType) span := trace.NewSpanFromContext(ctx) span.StartClient("TopoServer.UpdateEndPoints") span.Annotate("tablet_type", string(tabletType)) if err := ts.UpdateEndPoints(cell, shardInfo.Keyspace(), shardInfo.ShardName(), tabletType, addrs); err != nil { rec.RecordError(fmt.Errorf("writing endpoints for cell %v shard %v/%v tabletType %v failed: %v", cell, shardInfo.Keyspace(), shardInfo.ShardName(), tabletType, err)) } span.Finish() wg.Done() }(tabletType, addrs) } // Delete any pre-existing paths that were not updated by this process. // That's the existingTabletTypes - locationAddrsMap for _, tabletType := range existingTabletTypes { if _, ok := locationAddrsMap[tabletType]; !ok { wg.Add(1) go func(tabletType topo.TabletType) { log.Infof("removing stale db type from serving graph: %v", tabletType) span := trace.NewSpanFromContext(ctx) span.StartClient("TopoServer.DeleteEndPoints") span.Annotate("tablet_type", string(tabletType)) if err := ts.DeleteEndPoints(cell, shardInfo.Keyspace(), shardInfo.ShardName(), tabletType); err != nil { log.Warningf("unable to remove stale db type %v from serving graph: %v", tabletType, err) } span.Finish() wg.Done() }(tabletType) } } // Update srvShard object wg.Add(1) go func() { log.Infof("updating shard serving graph in cell %v for %v/%v", cell, shardInfo.Keyspace(), shardInfo.ShardName()) srvShard := &topo.SrvShard{ Name: shardInfo.ShardName(), KeyRange: shardInfo.KeyRange, ServedTypes: shardInfo.GetServedTypesPerCell(cell), MasterCell: shardInfo.MasterAlias.Cell, TabletTypes: make([]topo.TabletType, 0, len(locationAddrsMap)), } for tabletType := range locationAddrsMap { srvShard.TabletTypes = append(srvShard.TabletTypes, tabletType) } span := trace.NewSpanFromContext(ctx) span.StartClient("TopoServer.UpdateSrvShard") span.Annotate("keyspace", shardInfo.Keyspace()) span.Annotate("shard", shardInfo.ShardName()) span.Annotate("cell", cell) if err := ts.UpdateSrvShard(cell, shardInfo.Keyspace(), shardInfo.ShardName(), srvShard); err != nil { rec.RecordError(fmt.Errorf("writing serving data in cell %v for %v/%v failed: %v", cell, shardInfo.Keyspace(), shardInfo.ShardName(), err)) } span.Finish() wg.Done() }() wg.Wait() return rec.Error() }
func CheckKeyspace(t *testing.T, ts topo.Server) { keyspaces, err := ts.GetKeyspaces() if err != nil { t.Errorf("GetKeyspaces(empty): %v", err) } if len(keyspaces) != 0 { t.Errorf("len(GetKeyspaces()) != 0: %v", keyspaces) } if err := ts.CreateKeyspace("test_keyspace", &topo.Keyspace{}); err != nil { t.Errorf("CreateKeyspace: %v", err) } if err := ts.CreateKeyspace("test_keyspace", &topo.Keyspace{}); err != topo.ErrNodeExists { t.Errorf("CreateKeyspace(again) is not ErrNodeExists: %v", err) } keyspaces, err = ts.GetKeyspaces() if err != nil { t.Errorf("GetKeyspaces: %v", err) } if len(keyspaces) != 1 || keyspaces[0] != "test_keyspace" { t.Errorf("GetKeyspaces: want %v, got %v", []string{"test_keyspace"}, keyspaces) } k := &topo.Keyspace{ ShardingColumnName: "user_id", ShardingColumnType: key.KIT_UINT64, ServedFromMap: map[topo.TabletType]*topo.KeyspaceServedFrom{ topo.TYPE_REPLICA: &topo.KeyspaceServedFrom{ Cells: []string{"c1", "c2"}, Keyspace: "test_keyspace3", }, topo.TYPE_MASTER: &topo.KeyspaceServedFrom{ Cells: nil, Keyspace: "test_keyspace3", }, }, SplitShardCount: 64, } if err := ts.CreateKeyspace("test_keyspace2", k); err != nil { t.Errorf("CreateKeyspace: %v", err) } keyspaces, err = ts.GetKeyspaces() if err != nil { t.Errorf("GetKeyspaces: %v", err) } if len(keyspaces) != 2 || keyspaces[0] != "test_keyspace" || keyspaces[1] != "test_keyspace2" { t.Errorf("GetKeyspaces: want %v, got %v", []string{"test_keyspace", "test_keyspace2"}, keyspaces) } ki, err := ts.GetKeyspace("test_keyspace2") if err != nil { t.Fatalf("GetKeyspace: %v", err) } if !reflect.DeepEqual(ki.Keyspace, k) { t.Fatalf("returned keyspace doesn't match: got %v expected %v", ki.Keyspace, k) } ki.ShardingColumnName = "other_id" ki.ShardingColumnType = key.KIT_BYTES delete(ki.ServedFromMap, topo.TYPE_MASTER) ki.ServedFromMap[topo.TYPE_REPLICA].Keyspace = "test_keyspace4" err = topo.UpdateKeyspace(ts, ki) if err != nil { t.Fatalf("UpdateKeyspace: %v", err) } ki, err = ts.GetKeyspace("test_keyspace2") if err != nil { t.Fatalf("GetKeyspace: %v", err) } if ki.ShardingColumnName != "other_id" || ki.ShardingColumnType != key.KIT_BYTES || ki.ServedFromMap[topo.TYPE_REPLICA].Keyspace != "test_keyspace4" { t.Errorf("GetKeyspace: unexpected keyspace, got %v", *ki) } }
// CopyShards will create the shards in the destination topo func CopyShards(fromTS, toTS topo.Server, deleteKeyspaceShards bool) { keyspaces, err := fromTS.GetKeyspaces() if err != nil { log.Fatalf("fromTS.GetKeyspaces: %v", err) } wg := sync.WaitGroup{} rec := concurrency.AllErrorRecorder{} for _, keyspace := range keyspaces { wg.Add(1) go func(keyspace string) { defer wg.Done() shards, err := fromTS.GetShardNames(keyspace) if err != nil { rec.RecordError(fmt.Errorf("GetShardNames(%v): %v", keyspace, err)) return } if deleteKeyspaceShards { if err := toTS.DeleteKeyspaceShards(keyspace); err != nil { rec.RecordError(fmt.Errorf("DeleteKeyspaceShards(%v): %v", keyspace, err)) return } } for _, shard := range shards { wg.Add(1) go func(keyspace, shard string) { defer wg.Done() if err := topo.CreateShard(toTS, keyspace, shard); err != nil { if err == topo.ErrNodeExists { log.Warningf("shard %v/%v already exists", keyspace, shard) } else { rec.RecordError(fmt.Errorf("CreateShard(%v, %v): %v", keyspace, shard, err)) return } } si, err := fromTS.GetShard(keyspace, shard) if err != nil { rec.RecordError(fmt.Errorf("GetShard(%v, %v): %v", keyspace, shard, err)) return } toSi, err := toTS.GetShard(keyspace, shard) if err != nil { rec.RecordError(fmt.Errorf("toTS.GetShard(%v, %v): %v", keyspace, shard, err)) return } if _, err := toTS.UpdateShard(si, toSi.Version()); err != nil { rec.RecordError(fmt.Errorf("UpdateShard(%v, %v): %v", keyspace, shard, err)) } }(keyspace, shard) } }(keyspace) } wg.Wait() if rec.HasErrors() { log.Fatalf("copyShards failed: %v", rec.Error()) } }
func CheckServingGraph(t *testing.T, ts topo.Server) { cell := getLocalCell(t, ts) // test individual cell/keyspace/shard/type entries if _, err := ts.GetSrvTabletTypesPerShard(cell, "test_keyspace", "-10"); err != topo.ErrNoNode { t.Errorf("GetSrvTabletTypesPerShard(invalid): %v", err) } if _, err := ts.GetEndPoints(cell, "test_keyspace", "-10", topo.TYPE_MASTER); err != topo.ErrNoNode { t.Errorf("GetEndPoints(invalid): %v", err) } endPoints := topo.EndPoints{ Entries: []topo.EndPoint{ topo.EndPoint{ Uid: 1, Host: "host1", NamedPortMap: map[string]int{"vt": 1234, "mysql": 1235, "vts": 1236}, }, }, } if err := ts.UpdateEndPoints(cell, "test_keyspace", "-10", topo.TYPE_MASTER, &endPoints); err != nil { t.Errorf("UpdateEndPoints(master): %v", err) } if types, err := ts.GetSrvTabletTypesPerShard(cell, "test_keyspace", "-10"); err != nil || len(types) != 1 || types[0] != topo.TYPE_MASTER { t.Errorf("GetSrvTabletTypesPerShard(1): %v %v", err, types) } addrs, err := ts.GetEndPoints(cell, "test_keyspace", "-10", topo.TYPE_MASTER) if err != nil { t.Errorf("GetEndPoints: %v", err) } if len(addrs.Entries) != 1 || addrs.Entries[0].Uid != 1 { t.Errorf("GetEndPoints(1): %v", addrs) } if pm := addrs.Entries[0].NamedPortMap; pm["vt"] != 1234 || pm["mysql"] != 1235 || pm["vts"] != 1236 { t.Errorf("GetSrcTabletType(1).NamedPortmap: want %v, got %v", endPoints.Entries[0].NamedPortMap, pm) } if err := ts.UpdateTabletEndpoint(cell, "test_keyspace", "-10", topo.TYPE_REPLICA, &topo.EndPoint{Uid: 2, Host: "host2"}); err != nil { t.Errorf("UpdateTabletEndpoint(invalid): %v", err) } if err := ts.UpdateTabletEndpoint(cell, "test_keyspace", "-10", topo.TYPE_MASTER, &topo.EndPoint{Uid: 1, Host: "host2"}); err != nil { t.Errorf("UpdateTabletEndpoint(master): %v", err) } if addrs, err := ts.GetEndPoints(cell, "test_keyspace", "-10", topo.TYPE_MASTER); err != nil || len(addrs.Entries) != 1 || addrs.Entries[0].Uid != 1 { t.Errorf("GetEndPoints(2): %v %v", err, addrs) } if err := ts.UpdateTabletEndpoint(cell, "test_keyspace", "-10", topo.TYPE_MASTER, &topo.EndPoint{Uid: 3, Host: "host3"}); err != nil { t.Errorf("UpdateTabletEndpoint(master): %v", err) } if addrs, err := ts.GetEndPoints(cell, "test_keyspace", "-10", topo.TYPE_MASTER); err != nil || len(addrs.Entries) != 2 { t.Errorf("GetEndPoints(2): %v %v", err, addrs) } if err := ts.DeleteEndPoints(cell, "test_keyspace", "-10", topo.TYPE_REPLICA); err != topo.ErrNoNode { t.Errorf("DeleteEndPoints(unknown): %v", err) } if err := ts.DeleteEndPoints(cell, "test_keyspace", "-10", topo.TYPE_MASTER); err != nil { t.Errorf("DeleteEndPoints(master): %v", err) } // test cell/keyspace/shard entries (SrvShard) srvShard := topo.SrvShard{ ServedTypes: []topo.TabletType{topo.TYPE_MASTER}, TabletTypes: []topo.TabletType{topo.TYPE_REPLICA, topo.TYPE_RDONLY}, } if err := ts.UpdateSrvShard(cell, "test_keyspace", "-10", &srvShard); err != nil { t.Errorf("UpdateSrvShard(1): %v", err) } if _, err := ts.GetSrvShard(cell, "test_keyspace", "666"); err != topo.ErrNoNode { t.Errorf("GetSrvShard(invalid): %v", err) } if s, err := ts.GetSrvShard(cell, "test_keyspace", "-10"); err != nil || len(s.ServedTypes) != 1 || s.ServedTypes[0] != topo.TYPE_MASTER || len(s.TabletTypes) != 2 || s.TabletTypes[0] != topo.TYPE_REPLICA || s.TabletTypes[1] != topo.TYPE_RDONLY { t.Errorf("GetSrvShard(valid): %v", err) } // test cell/keyspace entries (SrvKeyspace) srvKeyspace := topo.SrvKeyspace{ Partitions: map[topo.TabletType]*topo.KeyspacePartition{ topo.TYPE_MASTER: &topo.KeyspacePartition{ Shards: []topo.SrvShard{ topo.SrvShard{ ServedTypes: []topo.TabletType{topo.TYPE_MASTER}, }, }, }, }, TabletTypes: []topo.TabletType{topo.TYPE_MASTER}, ShardingColumnName: "video_id", ShardingColumnType: key.KIT_UINT64, ServedFrom: map[topo.TabletType]string{ topo.TYPE_REPLICA: "other_keyspace", }, } if err := ts.UpdateSrvKeyspace(cell, "test_keyspace", &srvKeyspace); err != nil { t.Errorf("UpdateSrvKeyspace(1): %v", err) } if _, err := ts.GetSrvKeyspace(cell, "test_keyspace666"); err != topo.ErrNoNode { t.Errorf("GetSrvKeyspace(invalid): %v", err) } if k, err := ts.GetSrvKeyspace(cell, "test_keyspace"); err != nil || len(k.TabletTypes) != 1 || k.TabletTypes[0] != topo.TYPE_MASTER || len(k.Partitions) != 1 || len(k.Partitions[topo.TYPE_MASTER].Shards) != 1 || len(k.Partitions[topo.TYPE_MASTER].Shards[0].ServedTypes) != 1 || k.Partitions[topo.TYPE_MASTER].Shards[0].ServedTypes[0] != topo.TYPE_MASTER || k.ShardingColumnName != "video_id" || k.ShardingColumnType != key.KIT_UINT64 || k.ServedFrom[topo.TYPE_REPLICA] != "other_keyspace" { t.Errorf("GetSrvKeyspace(valid): %v %v", err, k) } if k, err := ts.GetSrvKeyspaceNames(cell); err != nil || len(k) != 1 || k[0] != "test_keyspace" { t.Errorf("GetSrvKeyspaceNames(): %v", err) } // check that updating a SrvKeyspace out of the blue works if err := ts.UpdateSrvKeyspace(cell, "unknown_keyspace_so_far", &srvKeyspace); err != nil { t.Errorf("UpdateSrvKeyspace(2): %v", err) } if k, err := ts.GetSrvKeyspace(cell, "unknown_keyspace_so_far"); err != nil || len(k.TabletTypes) != 1 || k.TabletTypes[0] != topo.TYPE_MASTER || len(k.Partitions) != 1 || len(k.Partitions[topo.TYPE_MASTER].Shards) != 1 || len(k.Partitions[topo.TYPE_MASTER].Shards[0].ServedTypes) != 1 || k.Partitions[topo.TYPE_MASTER].Shards[0].ServedTypes[0] != topo.TYPE_MASTER || k.ShardingColumnName != "video_id" || k.ShardingColumnType != key.KIT_UINT64 || k.ServedFrom[topo.TYPE_REPLICA] != "other_keyspace" { t.Errorf("GetSrvKeyspace(out of the blue): %v %v", err, *k) } }
func CheckTablet(ctx context.Context, t *testing.T, ts topo.Server) { cell := getLocalCell(t, ts) tablet := &topo.Tablet{ Alias: topo.TabletAlias{Cell: cell, Uid: 1}, Hostname: "localhost", IPAddr: "10.11.12.13", Portmap: map[string]int{ "vt": 3333, "mysql": 3334, }, Tags: map[string]string{"tag": "value"}, Keyspace: "test_keyspace", Type: topo.TYPE_MASTER, State: topo.STATE_READ_WRITE, KeyRange: newKeyRange("-10"), } if err := ts.CreateTablet(tablet); err != nil { t.Errorf("CreateTablet: %v", err) } if err := ts.CreateTablet(tablet); err != topo.ErrNodeExists { t.Errorf("CreateTablet(again): %v", err) } if _, err := ts.GetTablet(topo.TabletAlias{Cell: cell, Uid: 666}); err != topo.ErrNoNode { t.Errorf("GetTablet(666): %v", err) } ti, err := ts.GetTablet(tablet.Alias) if err != nil { t.Errorf("GetTablet %v: %v", tablet.Alias, err) } if eq, err := tabletEqual(ti.Tablet, tablet); err != nil { t.Errorf("cannot compare tablets: %v", err) } else if !eq { t.Errorf("put and got tablets are not identical:\n%#v\n%#v", tablet, ti.Tablet) } if _, err := ts.GetTabletsByCell("666"); err != topo.ErrNoNode { t.Errorf("GetTabletsByCell(666): %v", err) } inCell, err := ts.GetTabletsByCell(cell) if err != nil { t.Errorf("GetTabletsByCell: %v", err) } if len(inCell) != 1 || inCell[0] != tablet.Alias { t.Errorf("GetTabletsByCell: want [%v], got %v", tablet.Alias, inCell) } ti.State = topo.STATE_READ_ONLY if err := topo.UpdateTablet(ctx, ts, ti); err != nil { t.Errorf("UpdateTablet: %v", err) } ti, err = ts.GetTablet(tablet.Alias) if err != nil { t.Errorf("GetTablet %v: %v", tablet.Alias, err) } if want := topo.STATE_READ_ONLY; ti.State != want { t.Errorf("ti.State: want %v, got %v", want, ti.State) } if err := ts.UpdateTabletFields(tablet.Alias, func(t *topo.Tablet) error { t.State = topo.STATE_READ_WRITE return nil }); err != nil { t.Errorf("UpdateTabletFields: %v", err) } ti, err = ts.GetTablet(tablet.Alias) if err != nil { t.Errorf("GetTablet %v: %v", tablet.Alias, err) } if want := topo.STATE_READ_WRITE; ti.State != want { t.Errorf("ti.State: want %v, got %v", want, ti.State) } if err := ts.DeleteTablet(tablet.Alias); err != nil { t.Errorf("DeleteTablet: %v", err) } if err := ts.DeleteTablet(tablet.Alias); err != topo.ErrNoNode { t.Errorf("DeleteTablet(again): %v", err) } if _, err := ts.GetTablet(tablet.Alias); err != topo.ErrNoNode { t.Errorf("GetTablet: expected error, tablet was deleted: %v", err) } }
// ChangeType changes the type of the tablet and possibly also updates // the health informaton for it. Make this external, since these // transitions need to be forced from time to time. // // - if health is nil, we don't touch the Tablet's Health record. // - if health is an empty map, we clear the Tablet's Health record. // - if health has values, we overwrite the Tablet's Health record. func ChangeType(ts topo.Server, tabletAlias topo.TabletAlias, newType topo.TabletType, health map[string]string, runHooks bool) error { tablet, err := ts.GetTablet(tabletAlias) if err != nil { return err } if !topo.IsTrivialTypeChange(tablet.Type, newType) || !topo.IsValidTypeChange(tablet.Type, newType) { return fmt.Errorf("cannot change tablet type %v -> %v %v", tablet.Type, newType, tabletAlias) } if runHooks { // Only run the preflight_serving_type hook when // transitioning from non-serving to serving. if !topo.IsInServingGraph(tablet.Type) && topo.IsInServingGraph(newType) { if err := hook.NewSimpleHook("preflight_serving_type").ExecuteOptional(); err != nil { return err } } } tablet.Type = newType if newType == topo.TYPE_IDLE { if tablet.Parent.IsZero() { si, err := ts.GetShard(tablet.Keyspace, tablet.Shard) if err != nil { return err } rec := concurrency.AllErrorRecorder{} wg := sync.WaitGroup{} for _, cell := range si.Cells { wg.Add(1) go func(cell string) { defer wg.Done() sri, err := ts.GetShardReplication(cell, tablet.Keyspace, tablet.Shard) if err != nil { log.Warningf("Cannot check cell %v for extra replication paths, assuming it's good", cell) return } for _, rl := range sri.ReplicationLinks { if rl.Parent == tabletAlias { rec.RecordError(fmt.Errorf("Still have a ReplicationLink in cell %v", cell)) } } }(cell) } wg.Wait() if rec.HasErrors() { return rec.Error() } } tablet.Parent = topo.TabletAlias{} tablet.Keyspace = "" tablet.Shard = "" tablet.KeyRange = key.KeyRange{} tablet.Health = health } if health != nil { if len(health) == 0 { tablet.Health = nil } else { tablet.Health = health } } return topo.UpdateTablet(context.TODO(), ts, tablet) }