// UpdateSrvShard creates the SrvShard object based on the global ShardInfo, // and writes it to the given cell. func UpdateSrvShard(ctx context.Context, ts topo.Server, cell string, si *topo.ShardInfo) error { srvShard := &topo.SrvShard{ Name: si.ShardName(), KeyRange: si.KeyRange, MasterCell: si.MasterAlias.Cell, } return ts.UpdateSrvShard(ctx, cell, si.Keyspace(), si.ShardName(), srvShard) }
func CheckServingGraph(t *testing.T, ts topo.Server) { cell := getLocalCell(t, ts) // test individual cell/keyspace/shard/type entries if _, err := ts.GetSrvTabletTypesPerShard(cell, "test_keyspace", "-10"); err != topo.ErrNoNode { t.Errorf("GetSrvTabletTypesPerShard(invalid): %v", err) } if _, err := ts.GetEndPoints(cell, "test_keyspace", "-10", topo.TYPE_MASTER); err != topo.ErrNoNode { t.Errorf("GetEndPoints(invalid): %v", err) } endPoints := topo.EndPoints{ Entries: []topo.EndPoint{ topo.EndPoint{ Uid: 1, Host: "host1", NamedPortMap: map[string]int{"_vt": 1234, "_mysql": 1235, "_vts": 1236}, }, }, } if err := ts.UpdateEndPoints(cell, "test_keyspace", "-10", topo.TYPE_MASTER, &endPoints); err != nil { t.Errorf("UpdateEndPoints(master): %v", err) } if types, err := ts.GetSrvTabletTypesPerShard(cell, "test_keyspace", "-10"); err != nil || len(types) != 1 || types[0] != topo.TYPE_MASTER { t.Errorf("GetSrvTabletTypesPerShard(1): %v %v", err, types) } addrs, err := ts.GetEndPoints(cell, "test_keyspace", "-10", topo.TYPE_MASTER) if err != nil { t.Errorf("GetEndPoints: %v", err) } if len(addrs.Entries) != 1 || addrs.Entries[0].Uid != 1 { t.Errorf("GetEndPoints(1): %v", addrs) } if pm := addrs.Entries[0].NamedPortMap; pm["_vt"] != 1234 || pm["_mysql"] != 1235 || pm["_vts"] != 1236 { t.Errorf("GetSrcTabletType(1).NamedPortmap: want %v, got %v", endPoints.Entries[0].NamedPortMap, pm) } if err := ts.UpdateTabletEndpoint(cell, "test_keyspace", "-10", topo.TYPE_REPLICA, &topo.EndPoint{Uid: 2, Host: "host2"}); err != nil { t.Errorf("UpdateTabletEndpoint(invalid): %v", err) } if err := ts.UpdateTabletEndpoint(cell, "test_keyspace", "-10", topo.TYPE_MASTER, &topo.EndPoint{Uid: 1, Host: "host2"}); err != nil { t.Errorf("UpdateTabletEndpoint(master): %v", err) } if addrs, err := ts.GetEndPoints(cell, "test_keyspace", "-10", topo.TYPE_MASTER); err != nil || len(addrs.Entries) != 1 || addrs.Entries[0].Uid != 1 { t.Errorf("GetEndPoints(2): %v %v", err, addrs) } if err := ts.UpdateTabletEndpoint(cell, "test_keyspace", "-10", topo.TYPE_MASTER, &topo.EndPoint{Uid: 3, Host: "host3"}); err != nil { t.Errorf("UpdateTabletEndpoint(master): %v", err) } if addrs, err := ts.GetEndPoints(cell, "test_keyspace", "-10", topo.TYPE_MASTER); err != nil || len(addrs.Entries) != 2 { t.Errorf("GetEndPoints(2): %v %v", err, addrs) } if err := ts.DeleteSrvTabletType(cell, "test_keyspace", "-10", topo.TYPE_REPLICA); err != topo.ErrNoNode { t.Errorf("DeleteSrvTabletType(unknown): %v", err) } if err := ts.DeleteSrvTabletType(cell, "test_keyspace", "-10", topo.TYPE_MASTER); err != nil { t.Errorf("DeleteSrvTabletType(master): %v", err) } // test cell/keyspace/shard entries (SrvShard) srvShard := topo.SrvShard{ ServedTypes: []topo.TabletType{topo.TYPE_MASTER}, TabletTypes: []topo.TabletType{topo.TYPE_REPLICA, topo.TYPE_RDONLY}, } if err := ts.UpdateSrvShard(cell, "test_keyspace", "-10", &srvShard); err != nil { t.Errorf("UpdateSrvShard(1): %v", err) } if _, err := ts.GetSrvShard(cell, "test_keyspace", "666"); err != topo.ErrNoNode { t.Errorf("GetSrvShard(invalid): %v", err) } if s, err := ts.GetSrvShard(cell, "test_keyspace", "-10"); err != nil || len(s.ServedTypes) != 1 || s.ServedTypes[0] != topo.TYPE_MASTER || len(s.TabletTypes) != 2 || s.TabletTypes[0] != topo.TYPE_REPLICA || s.TabletTypes[1] != topo.TYPE_RDONLY { t.Errorf("GetSrvShard(valid): %v", err) } // test cell/keyspace entries (SrvKeyspace) srvKeyspace := topo.SrvKeyspace{ Partitions: map[topo.TabletType]*topo.KeyspacePartition{ topo.TYPE_MASTER: &topo.KeyspacePartition{ Shards: []topo.SrvShard{ topo.SrvShard{ ServedTypes: []topo.TabletType{topo.TYPE_MASTER}, }, }, }, }, TabletTypes: []topo.TabletType{topo.TYPE_MASTER}, } if err := ts.UpdateSrvKeyspace(cell, "test_keyspace", &srvKeyspace); err != nil { t.Errorf("UpdateSrvKeyspace(1): %v", err) } if _, err := ts.GetSrvKeyspace(cell, "test_keyspace666"); err != topo.ErrNoNode { t.Errorf("GetSrvKeyspace(invalid): %v", err) } if s, err := ts.GetSrvKeyspace(cell, "test_keyspace"); err != nil || len(s.TabletTypes) != 1 || s.TabletTypes[0] != topo.TYPE_MASTER || len(s.Partitions) != 1 || len(s.Partitions[topo.TYPE_MASTER].Shards) != 1 || len(s.Partitions[topo.TYPE_MASTER].Shards[0].ServedTypes) != 1 || s.Partitions[topo.TYPE_MASTER].Shards[0].ServedTypes[0] != topo.TYPE_MASTER { t.Errorf("GetSrvKeyspace(valid): %v", err) } if k, err := ts.GetSrvKeyspaceNames(cell); err != nil || len(k) != 1 || k[0] != "test_keyspace" { t.Errorf("GetSrvKeyspaceNames(): %v", err) } }
// rebuildCellSrvShard computes and writes the serving graph data to a // single cell func rebuildCellSrvShard(log logutil.Logger, ts topo.Server, shardInfo *topo.ShardInfo, cell string, tablets map[topo.TabletAlias]*topo.TabletInfo) error { log.Infof("rebuildCellSrvShard %v/%v in cell %v", shardInfo.Keyspace(), shardInfo.ShardName(), cell) // Get all existing db types so they can be removed if nothing // had been edited. existingTabletTypes, err := ts.GetSrvTabletTypesPerShard(cell, shardInfo.Keyspace(), shardInfo.ShardName()) if err != nil { if err != topo.ErrNoNode { return err } } // Update db type addresses in the serving graph // // locationAddrsMap is a map: // key: tabletType // value: EndPoints (list of server records) locationAddrsMap := make(map[topo.TabletType]*topo.EndPoints) for _, tablet := range tablets { if !tablet.IsInReplicationGraph() { // only valid case is a scrapped master in the // catastrophic reparent case if tablet.Parent.Uid != topo.NO_TABLET { log.Warningf("Tablet %v should not be in the replication graph, please investigate (it is being ignored in the rebuild)", tablet.Alias) } continue } // Check IsInServingGraph, we don't want to add tablets that // are not serving if !tablet.IsInServingGraph() { continue } // Check the Keyspace and Shard for the tablet are right if tablet.Keyspace != shardInfo.Keyspace() || tablet.Shard != shardInfo.ShardName() { return fmt.Errorf("CRITICAL: tablet %v is in replication graph for shard %v/%v but belongs to shard %v:%v", tablet.Alias, shardInfo.Keyspace(), shardInfo.ShardName(), tablet.Keyspace, tablet.Shard) } // Add the tablet to the list addrs, ok := locationAddrsMap[tablet.Type] if !ok { addrs = topo.NewEndPoints() locationAddrsMap[tablet.Type] = addrs } entry, err := tablet.Tablet.EndPoint() if err != nil { log.Warningf("EndPointForTablet failed for tablet %v: %v", tablet.Alias, err) continue } addrs.Entries = append(addrs.Entries, *entry) } // we're gonna parallelize a lot here: // - writing all the tabletTypes records // - removing the unused records // - writing SrvShard rec := concurrency.AllErrorRecorder{} wg := sync.WaitGroup{} // write all the EndPoints nodes everywhere we want them for tabletType, addrs := range locationAddrsMap { wg.Add(1) go func(tabletType topo.TabletType, addrs *topo.EndPoints) { log.Infof("saving serving graph for cell %v shard %v/%v tabletType %v", cell, shardInfo.Keyspace(), shardInfo.ShardName(), tabletType) if err := ts.UpdateEndPoints(cell, shardInfo.Keyspace(), shardInfo.ShardName(), tabletType, addrs); err != nil { rec.RecordError(fmt.Errorf("writing endpoints for cell %v shard %v/%v tabletType %v failed: %v", cell, shardInfo.Keyspace(), shardInfo.ShardName(), tabletType, err)) } wg.Done() }(tabletType, addrs) } // Delete any pre-existing paths that were not updated by this process. // That's the existingTabletTypes - locationAddrsMap for _, tabletType := range existingTabletTypes { if _, ok := locationAddrsMap[tabletType]; !ok { wg.Add(1) go func(tabletType topo.TabletType) { log.Infof("removing stale db type from serving graph: %v", tabletType) if err := ts.DeleteEndPoints(cell, shardInfo.Keyspace(), shardInfo.ShardName(), tabletType); err != nil { log.Warningf("unable to remove stale db type %v from serving graph: %v", tabletType, err) } wg.Done() }(tabletType) } } // Update srvShard object wg.Add(1) go func() { log.Infof("updating shard serving graph in cell %v for %v/%v", cell, shardInfo.Keyspace(), shardInfo.ShardName()) srvShard := &topo.SrvShard{ Name: shardInfo.ShardName(), KeyRange: shardInfo.KeyRange, ServedTypes: shardInfo.ServedTypes, MasterCell: shardInfo.MasterAlias.Cell, TabletTypes: make([]topo.TabletType, 0, len(locationAddrsMap)), } for tabletType := range locationAddrsMap { srvShard.TabletTypes = append(srvShard.TabletTypes, tabletType) } if err := ts.UpdateSrvShard(cell, shardInfo.Keyspace(), shardInfo.ShardName(), srvShard); err != nil { rec.RecordError(fmt.Errorf("writing serving data in cell %v for %v/%v failed: %v", cell, shardInfo.Keyspace(), shardInfo.ShardName(), err)) } wg.Done() }() wg.Wait() return rec.Error() }
// CheckServingGraph makes sure the serving graph functions work properly. func CheckServingGraph(ctx context.Context, t *testing.T, ts topo.Server) { cell := getLocalCell(ctx, t, ts) // test individual cell/keyspace/shard/type entries if _, err := ts.GetSrvTabletTypesPerShard(ctx, cell, "test_keyspace", "-10"); err != topo.ErrNoNode { t.Errorf("GetSrvTabletTypesPerShard(invalid): %v", err) } if _, _, err := ts.GetEndPoints(ctx, cell, "test_keyspace", "-10", topo.TYPE_MASTER); err != topo.ErrNoNode { t.Errorf("GetEndPoints(invalid): %v", err) } endPoints := &pb.EndPoints{ Entries: []*pb.EndPoint{ &pb.EndPoint{ Uid: 1, Host: "host1", PortMap: map[string]int32{ "vt": 1234, "mysql": 1235, "grpc": 1236, }, }, }, } if err := ts.CreateEndPoints(ctx, cell, "test_keyspace", "-10", topo.TYPE_MASTER, endPoints); err != nil { t.Fatalf("CreateEndPoints(master): %v", err) } // Try to create again. if err := ts.CreateEndPoints(ctx, cell, "test_keyspace", "-10", topo.TYPE_MASTER, endPoints); err != topo.ErrNodeExists { t.Fatalf("CreateEndPoints(master): err = %v, want topo.ErrNodeExists", err) } // Get version. _, version, err := ts.GetEndPoints(ctx, cell, "test_keyspace", "-10", topo.TYPE_MASTER) if err != nil { t.Fatalf("GetEndPoints(master): %v", err) } // Make a change. tmp := endPoints.Entries[0].Uid endPoints.Entries[0].Uid = tmp + 1 if err := topo.UpdateEndPoints(ctx, ts, cell, "test_keyspace", "-10", topo.TYPE_MASTER, endPoints, -1); err != nil { t.Fatalf("UpdateEndPoints(master): %v", err) } endPoints.Entries[0].Uid = tmp // Try to delete with the wrong version. if err := ts.DeleteEndPoints(ctx, cell, "test_keyspace", "-10", topo.TYPE_MASTER, version); err != topo.ErrBadVersion { t.Fatalf("DeleteEndPoints: err = %v, want topo.ErrBadVersion", err) } // Delete with the correct version. _, version, err = ts.GetEndPoints(ctx, cell, "test_keyspace", "-10", topo.TYPE_MASTER) if err != nil { t.Fatalf("GetEndPoints(master): %v", err) } if err := ts.DeleteEndPoints(ctx, cell, "test_keyspace", "-10", topo.TYPE_MASTER, version); err != nil { t.Fatalf("DeleteEndPoints: %v", err) } // Recreate it with an unconditional update. if err := topo.UpdateEndPoints(ctx, ts, cell, "test_keyspace", "-10", topo.TYPE_MASTER, endPoints, -1); err != nil { t.Fatalf("UpdateEndPoints(master): %v", err) } if types, err := ts.GetSrvTabletTypesPerShard(ctx, cell, "test_keyspace", "-10"); err != nil || len(types) != 1 || types[0] != topo.TYPE_MASTER { t.Errorf("GetSrvTabletTypesPerShard(1): %v %v", err, types) } // Delete it unconditionally. if err := ts.DeleteEndPoints(ctx, cell, "test_keyspace", "-10", topo.TYPE_MASTER, -1); err != nil { t.Fatalf("DeleteEndPoints: %v", err) } // Delete the SrvShard. if err := ts.DeleteSrvShard(ctx, cell, "test_keyspace", "-10"); err != nil { t.Fatalf("DeleteSrvShard: %v", err) } if _, err := ts.GetSrvShard(ctx, cell, "test_keyspace", "-10"); err != topo.ErrNoNode { t.Errorf("GetSrvShard(deleted) got %v, want ErrNoNode", err) } // Re-add endpoints. if err := topo.UpdateEndPoints(ctx, ts, cell, "test_keyspace", "-10", topo.TYPE_MASTER, endPoints, -1); err != nil { t.Fatalf("UpdateEndPoints(master): %v", err) } addrs, version, err := ts.GetEndPoints(ctx, cell, "test_keyspace", "-10", topo.TYPE_MASTER) if err != nil { t.Errorf("GetEndPoints: %v", err) } if len(addrs.Entries) != 1 || addrs.Entries[0].Uid != 1 { t.Errorf("GetEndPoints(1): %v", addrs) } if pm := addrs.Entries[0].PortMap; pm["vt"] != 1234 || pm["mysql"] != 1235 || pm["grpc"] != 1236 { t.Errorf("GetSrcTabletType(1).PortMap: want %v, got %v", endPoints.Entries[0].PortMap, pm) } // Update with the wrong version. if err := topo.UpdateEndPoints(ctx, ts, cell, "test_keyspace", "-10", topo.TYPE_MASTER, endPoints, version+1); err != topo.ErrBadVersion { t.Fatalf("UpdateEndPoints(master): err = %v, want topo.ErrBadVersion", err) } // Update with the right version. if err := topo.UpdateEndPoints(ctx, ts, cell, "test_keyspace", "-10", topo.TYPE_MASTER, endPoints, version); err != nil { t.Fatalf("UpdateEndPoints(master): %v", err) } // Update existing EndPoints unconditionally. if err := topo.UpdateEndPoints(ctx, ts, cell, "test_keyspace", "-10", topo.TYPE_MASTER, endPoints, -1); err != nil { t.Fatalf("UpdateEndPoints(master): %v", err) } if err := ts.DeleteEndPoints(ctx, cell, "test_keyspace", "-10", topo.TYPE_REPLICA, -1); err != topo.ErrNoNode { t.Errorf("DeleteEndPoints(unknown): %v", err) } if err := ts.DeleteEndPoints(ctx, cell, "test_keyspace", "-10", topo.TYPE_MASTER, -1); err != nil { t.Errorf("DeleteEndPoints(master): %v", err) } // test cell/keyspace/shard entries (SrvShard) srvShard := &pb.SrvShard{ Name: "-10", KeyRange: newKeyRange3("-10"), MasterCell: "test", } if err := ts.UpdateSrvShard(ctx, cell, "test_keyspace", "-10", srvShard); err != nil { t.Fatalf("UpdateSrvShard(1): %v", err) } if _, err := ts.GetSrvShard(ctx, cell, "test_keyspace", "666"); err != topo.ErrNoNode { t.Errorf("GetSrvShard(invalid): %v", err) } if s, err := ts.GetSrvShard(ctx, cell, "test_keyspace", "-10"); err != nil || s.Name != "-10" || !key.KeyRangeEqual(s.KeyRange, newKeyRange3("-10")) || s.MasterCell != "test" { t.Errorf("GetSrvShard(valid): %v", err) } // test cell/keyspace entries (SrvKeyspace) srvKeyspace := topo.SrvKeyspace{ Partitions: map[topo.TabletType]*topo.KeyspacePartition{ topo.TYPE_MASTER: &topo.KeyspacePartition{ ShardReferences: []topo.ShardReference{ topo.ShardReference{ Name: "-80", KeyRange: newKeyRange("-80"), }, }, }, }, ShardingColumnName: "video_id", ShardingColumnType: key.KIT_UINT64, ServedFrom: map[topo.TabletType]string{ topo.TYPE_REPLICA: "other_keyspace", }, } if err := ts.UpdateSrvKeyspace(ctx, cell, "test_keyspace", &srvKeyspace); err != nil { t.Errorf("UpdateSrvKeyspace(1): %v", err) } if _, err := ts.GetSrvKeyspace(ctx, cell, "test_keyspace666"); err != topo.ErrNoNode { t.Errorf("GetSrvKeyspace(invalid): %v", err) } if k, err := ts.GetSrvKeyspace(ctx, cell, "test_keyspace"); err != nil || len(k.Partitions) != 1 || len(k.Partitions[topo.TYPE_MASTER].ShardReferences) != 1 || k.Partitions[topo.TYPE_MASTER].ShardReferences[0].Name != "-80" || k.Partitions[topo.TYPE_MASTER].ShardReferences[0].KeyRange != newKeyRange("-80") || k.ShardingColumnName != "video_id" || k.ShardingColumnType != key.KIT_UINT64 || k.ServedFrom[topo.TYPE_REPLICA] != "other_keyspace" { t.Errorf("GetSrvKeyspace(valid): %v %v", err, k) } if k, err := ts.GetSrvKeyspaceNames(ctx, cell); err != nil || len(k) != 1 || k[0] != "test_keyspace" { t.Errorf("GetSrvKeyspaceNames(): %v", err) } // check that updating a SrvKeyspace out of the blue works if err := ts.UpdateSrvKeyspace(ctx, cell, "unknown_keyspace_so_far", &srvKeyspace); err != nil { t.Fatalf("UpdateSrvKeyspace(2): %v", err) } if k, err := ts.GetSrvKeyspace(ctx, cell, "unknown_keyspace_so_far"); err != nil || len(k.Partitions) != 1 || len(k.Partitions[topo.TYPE_MASTER].ShardReferences) != 1 || k.Partitions[topo.TYPE_MASTER].ShardReferences[0].Name != "-80" || k.Partitions[topo.TYPE_MASTER].ShardReferences[0].KeyRange != newKeyRange("-80") || k.ShardingColumnName != "video_id" || k.ShardingColumnType != key.KIT_UINT64 || k.ServedFrom[topo.TYPE_REPLICA] != "other_keyspace" { t.Errorf("GetSrvKeyspace(out of the blue): %v %v", err, *k) } // Delete the SrvKeyspace. if err := ts.DeleteSrvKeyspace(ctx, cell, "unknown_keyspace_so_far"); err != nil { t.Fatalf("DeleteSrvShard: %v", err) } if _, err := ts.GetSrvKeyspace(ctx, cell, "unknown_keyspace_so_far"); err != topo.ErrNoNode { t.Errorf("GetSrvKeyspace(deleted) got %v, want ErrNoNode", err) } }
// Write serving graph data to the cells func rebuildShardSrvGraph(ts topo.Server, shardInfo *topo.ShardInfo, tablets []*topo.TabletInfo, cells []string) error { log.Infof("rebuildShardSrvGraph %v/%v", shardInfo.Keyspace(), shardInfo.ShardName()) // Get all existing db types so they can be removed if nothing // had been editted. This applies to all cells, which can't // be determined until you walk through all the tablets. // // existingDbTypeLocations is a map: // key: {cell,keyspace,shard,tabletType} // value: true existingDbTypeLocations := make(map[cellKeyspaceShardType]bool) // Update db type addresses in the serving graph // // locationAddrsMap is a map: // key: {cell,keyspace,shard,tabletType} // value: EndPoints (list of server records) locationAddrsMap := make(map[cellKeyspaceShardType]*topo.EndPoints) // we keep track of the existingDbTypeLocations we've already looked at knownShardLocations := make(map[cellKeyspaceShard]bool) for _, tablet := range tablets { // only look at tablets in the cells we want to rebuild if !topo.InCellList(tablet.Tablet.Alias.Cell, cells) { continue } // this is {cell,keyspace,shard} // we'll get the children to find the existing types shardLocation := cellKeyspaceShard{tablet.Tablet.Alias.Cell, tablet.Tablet.Keyspace, tablet.Shard} // only need to do this once per cell if !knownShardLocations[shardLocation] { log.Infof("Getting tablet types on cell %v for %v/%v", tablet.Tablet.Alias.Cell, tablet.Tablet.Keyspace, tablet.Shard) tabletTypes, err := ts.GetSrvTabletTypesPerShard(tablet.Tablet.Alias.Cell, tablet.Tablet.Keyspace, tablet.Shard) if err != nil { if err != topo.ErrNoNode { return err } } else { for _, tabletType := range tabletTypes { existingDbTypeLocations[cellKeyspaceShardType{tablet.Tablet.Alias.Cell, tablet.Tablet.Keyspace, tablet.Shard, tabletType}] = true } } knownShardLocations[shardLocation] = true } // Check IsInServingGraph after we have populated // existingDbTypeLocations so we properly prune data // if the definition of serving type changes. if !tablet.IsInServingGraph() { continue } location := cellKeyspaceShardType{tablet.Tablet.Alias.Cell, tablet.Keyspace, tablet.Shard, tablet.Type} addrs, ok := locationAddrsMap[location] if !ok { addrs = topo.NewEndPoints() locationAddrsMap[location] = addrs } entry, err := tablet.Tablet.EndPoint() if err != nil { log.Warningf("EndPointForTablet failed for tablet %v: %v", tablet.Alias, err) continue } addrs.Entries = append(addrs.Entries, *entry) } // we're gonna parallelize a lot here rec := concurrency.AllErrorRecorder{} wg := sync.WaitGroup{} // write all the {cell,keyspace,shard,type} // nodes everywhere we want them for location, addrs := range locationAddrsMap { wg.Add(1) go func(location cellKeyspaceShardType, addrs *topo.EndPoints) { log.Infof("saving serving graph for cell %v shard %v/%v tabletType %v", location.cell, location.keyspace, location.shard, location.tabletType) if err := ts.UpdateEndPoints(location.cell, location.keyspace, location.shard, location.tabletType, addrs); err != nil { rec.RecordError(fmt.Errorf("writing endpoints for cell %v shard %v/%v tabletType %v failed: %v", location.cell, location.keyspace, location.shard, location.tabletType, err)) } wg.Done() }(location, addrs) } // Delete any pre-existing paths that were not updated by this process. // That's the existingDbTypeLocations - locationAddrsMap for dbTypeLocation := range existingDbTypeLocations { if _, ok := locationAddrsMap[dbTypeLocation]; !ok { cell := dbTypeLocation.cell if !topo.InCellList(cell, cells) { continue } wg.Add(1) go func(dbTypeLocation cellKeyspaceShardType) { log.Infof("removing stale db type from serving graph: %v", dbTypeLocation) if err := ts.DeleteEndPoints(dbTypeLocation.cell, dbTypeLocation.keyspace, dbTypeLocation.shard, dbTypeLocation.tabletType); err != nil { log.Warningf("unable to remove stale db type %v from serving graph: %v", dbTypeLocation, err) } wg.Done() }(dbTypeLocation) } } // wait until we're done with the background stuff to do the rest // FIXME(alainjobart) this wouldn't be necessary if UpdateSrvShard // below was creating the zookeeper nodes recursively. wg.Wait() if err := rec.Error(); err != nil { return err } // Update per-shard information per cell-specific serving path. // // srvShardByPath is a map: // key: {cell,keyspace,shard} // value: SrvShard // this will create all the SrvShard objects srvShardByPath := make(map[cellKeyspaceShard]*topo.SrvShard) for location := range locationAddrsMap { // location will be {cell,keyspace,shard,type} srvShardPath := cellKeyspaceShard{location.cell, location.keyspace, location.shard} srvShard, ok := srvShardByPath[srvShardPath] if !ok { srvShard = &topo.SrvShard{ KeyRange: shardInfo.KeyRange, ServedTypes: shardInfo.ServedTypes, TabletTypes: make([]topo.TabletType, 0, 2), } srvShardByPath[srvShardPath] = srvShard } foundType := false for _, t := range srvShard.TabletTypes { if t == location.tabletType { foundType = true } } if !foundType { srvShard.TabletTypes = append(srvShard.TabletTypes, location.tabletType) } } // Save the shard entries for cks, srvShard := range srvShardByPath { wg.Add(1) go func(cks cellKeyspaceShard, srvShard *topo.SrvShard) { log.Infof("updating shard serving graph in cell %v for %v/%v", cks.cell, cks.keyspace, cks.shard) if err := ts.UpdateSrvShard(cks.cell, cks.keyspace, cks.shard, srvShard); err != nil { rec.RecordError(fmt.Errorf("writing serving data in cell %v for %v/%v failed: %v", cks.cell, cks.keyspace, cks.shard, err)) } wg.Done() }(cks, srvShard) } wg.Wait() return rec.Error() }