// FindOverlappingShards will return an array of OverlappingShards // for the provided keyspace. // We do not support more than two overlapping shards (for instance, // having 40-80, 40-60 and 40-50 in the same keyspace is not supported and // will return an error). // If shards don't perfectly overlap, they are not returned. func FindOverlappingShards(ctx context.Context, ts topo.Server, keyspace string) ([]*OverlappingShards, error) { shardMap, err := ts.FindAllShardsInKeyspace(ctx, keyspace) if err != nil { return nil, err } return findOverlappingShards(shardMap) }
// rebuildKeyspace should only be used with an action lock on the keyspace // - otherwise the consistency of the serving graph data can't be // guaranteed. // // Take data from the global keyspace and rebuild the local serving // copies in each cell. func rebuildKeyspace(ctx context.Context, log logutil.Logger, ts topo.Server, keyspace string, cells []string, rebuildSrvShards bool) error { log.Infof("rebuildKeyspace %v", keyspace) ki, err := ts.GetKeyspace(ctx, keyspace) if err != nil { return err } var shardCache map[string]*topo.ShardInfo if rebuildSrvShards { shards, err := ts.GetShardNames(ctx, keyspace) if err != nil { return nil } // Rebuild all shards in parallel, save the shards shardCache = make(map[string]*topo.ShardInfo) wg := sync.WaitGroup{} mu := sync.Mutex{} rec := concurrency.FirstErrorRecorder{} for _, shard := range shards { wg.Add(1) go func(shard string) { if shardInfo, err := RebuildShard(ctx, log, ts, keyspace, shard, cells); err != nil { rec.RecordError(fmt.Errorf("RebuildShard failed: %v/%v %v", keyspace, shard, err)) } else { mu.Lock() shardCache[shard] = shardInfo mu.Unlock() } wg.Done() }(shard) } wg.Wait() if rec.HasErrors() { return rec.Error() } } else { shardCache, err = ts.FindAllShardsInKeyspace(ctx, keyspace) if err != nil { return err } } // Build the list of cells to work on: we get the union // of all the Cells of all the Shards, limited to the provided cells. // // srvKeyspaceMap is a map: // key: cell // value: topo.SrvKeyspace object being built srvKeyspaceMap := make(map[string]*topodatapb.SrvKeyspace) findCellsForRebuild(ki, shardCache, cells, srvKeyspaceMap) // Then we add the cells from the keyspaces we might be 'ServedFrom'. for _, ksf := range ki.ServedFroms { servedFromShards, err := ts.FindAllShardsInKeyspace(ctx, ksf.Keyspace) if err != nil { return err } findCellsForRebuild(ki, servedFromShards, cells, srvKeyspaceMap) } // for each entry in the srvKeyspaceMap map, we do the following: // - read the SrvShard structures for each shard / cell // - if not present, build an empty one from global Shard // - compute the union of the db types (replica, master, ...) // - sort the shards in the list by range // - check the ranges are compatible (no hole, covers everything) for cell, srvKeyspace := range srvKeyspaceMap { for _, si := range shardCache { servedTypes := si.GetServedTypesPerCell(cell) // for each type this shard is supposed to serve, // add it to srvKeyspace.Partitions for _, tabletType := range servedTypes { partition := topoproto.SrvKeyspaceGetPartition(srvKeyspace, tabletType) if partition == nil { partition = &topodatapb.SrvKeyspace_KeyspacePartition{ ServedType: tabletType, } srvKeyspace.Partitions = append(srvKeyspace.Partitions, partition) } partition.ShardReferences = append(partition.ShardReferences, &topodatapb.ShardReference{ Name: si.ShardName(), KeyRange: si.KeyRange, }) } } if err := orderAndCheckPartitions(cell, srvKeyspace); err != nil { return err } } // and then finally save the keyspace objects for cell, srvKeyspace := range srvKeyspaceMap { log.Infof("updating keyspace serving graph in cell %v for %v", cell, keyspace) if err := ts.UpdateSrvKeyspace(ctx, cell, keyspace, srvKeyspace); err != nil { return fmt.Errorf("writing serving data failed: %v", err) } } return nil }