func (client *client) StreamKeyRange(ctx context.Context, position string, keyspaceIdType key.KeyspaceIdType, keyRange *pb.KeyRange, charset *mproto.Charset) (chan *proto.BinlogTransaction, binlogplayer.ErrFunc, error) { req := &proto.KeyRangeRequest{ Position: position, KeyspaceIdType: keyspaceIdType, KeyRange: key.ProtoToKeyRange(keyRange), Charset: charset, } result := make(chan *proto.BinlogTransaction, 10) responseChan := make(chan *proto.BinlogTransaction, 10) resp := client.Client.StreamGo("UpdateStream.StreamKeyRange", req, responseChan) var finalError error go func() { defer close(result) for { select { case <-ctx.Done(): finalError = ctx.Err() return case r, ok := <-responseChan: if !ok { // no more results from the server finalError = resp.Error return } result <- r } } }() return result, func() error { return finalError }, nil }
// ProtoToSrvKeyspace turns a proto to a Tablet func ProtoToSrvKeyspace(s *pb.SrvKeyspace) *SrvKeyspace { result := &SrvKeyspace{ Partitions: make(map[TabletType]*KeyspacePartition), ShardingColumnName: s.ShardingColumnName, ShardingColumnType: key.ProtoToKeyspaceIdType(s.ShardingColumnType), SplitShardCount: s.SplitShardCount, } for _, p := range s.Partitions { tt := ProtoToTabletType(p.ServedType) partition := &KeyspacePartition{} for _, sr := range p.ShardReferences { partition.ShardReferences = append(partition.ShardReferences, ShardReference{ Name: sr.Name, KeyRange: key.ProtoToKeyRange(sr.KeyRange), }) } result.Partitions[tt] = partition } if len(s.ServedFrom) > 0 { result.ServedFrom = make(map[TabletType]string) for _, sf := range s.ServedFrom { tt := ProtoToTabletType(sf.TabletType) result.ServedFrom[tt] = sf.Keyspace } } return result }
func newKeyRange(value string) key.KeyRange { _, result, err := topo.ValidateShardName(value) if err != nil { panic(err) } return key.ProtoToKeyRange(result) }
// Complete validates and normalizes the tablet. If the shard name // contains a '-' it is going to try to infer the keyrange from it. func (tablet *Tablet) Complete() error { shard, kr, err := ValidateShardName(tablet.Shard) if err != nil { return err } tablet.Shard = shard tablet.KeyRange = key.ProtoToKeyRange(kr) return nil }
// NewRowSplitter returns a new row splitter for the given shard distribution. func NewRowSplitter(shardInfos []*topo.ShardInfo, typ key.KeyspaceIdType, valueIndex int) *RowSplitter { result := &RowSplitter{ Type: typ, ValueIndex: valueIndex, KeyRanges: make([]key.KeyRange, len(shardInfos)), } for i, si := range shardInfos { result.KeyRanges[i] = key.ProtoToKeyRange(si.KeyRange) } return result }
func TestVTGateSplitQuery(t *testing.T) { keyspace := "TestVTGateSplitQuery" keyranges, _ := key.ParseShardingSpec(DefaultShardSpec) s := createSandbox(keyspace) for _, kr := range keyranges { s.MapTestConn(key.KeyRangeString(kr), &sandboxConn{}) } sql := "select col1, col2 from table" splitCount := 24 result := new(proto.SplitQueryResult) err := rpcVTGate.SplitQuery(context.Background(), keyspace, sql, nil, "", splitCount, result) if err != nil { t.Errorf("want nil, got %v", err) } _, err = getAllShards(DefaultShardSpec) // Total number of splits should be number of shards * splitsPerShard if splitCount != len(result.Splits) { t.Errorf("wrong number of splits, want \n%+v, got \n%+v", splitCount, len(result.Splits)) } actualSqlsByKeyRange := map[kproto.KeyRange][]string{} for _, split := range result.Splits { if split.Size != sandboxSQRowCount { t.Errorf("wrong split size, want \n%+v, got \n%+v", sandboxSQRowCount, split.Size) } if split.Query.Keyspace != keyspace { t.Errorf("wrong split size, want \n%+v, got \n%+v", keyspace, split.Query.Keyspace) } if len(split.Query.KeyRanges) != 1 { t.Errorf("wrong number of keyranges, want \n%+v, got \n%+v", 1, len(split.Query.KeyRanges)) } if split.Query.TabletType != topo.TYPE_RDONLY { t.Errorf("wrong tablet type, want \n%+v, got \n%+v", topo.TYPE_RDONLY, split.Query.TabletType) } kr := split.Query.KeyRanges[0] actualSqlsByKeyRange[kr] = append(actualSqlsByKeyRange[kr], split.Query.Sql) } expectedSqlsByKeyRange := map[kproto.KeyRange][]string{} for _, kr := range keyranges { expectedSqlsByKeyRange[kproto.ProtoToKeyRange(kr)] = []string{ "select col1, col2 from table /*split 0 */", "select col1, col2 from table /*split 1 */", "select col1, col2 from table /*split 2 */", } } if !reflect.DeepEqual(actualSqlsByKeyRange, expectedSqlsByKeyRange) { t.Errorf("splits contain the wrong sqls and/or keyranges, got: %v, want: %v", actualSqlsByKeyRange, expectedSqlsByKeyRange) } }
// StreamKeyRange is part of the pbs.UpdateStreamServer interface func (server *UpdateStream) StreamKeyRange(req *pb.StreamKeyRangeRequest, stream pbs.UpdateStream_StreamKeyRangeServer) (err error) { defer server.updateStream.HandlePanic(&err) return server.updateStream.StreamKeyRange(&proto.KeyRangeRequest{ Position: myproto.ProtoToReplicationPosition(req.Position), KeyspaceIdType: key.ProtoToKeyspaceIdType(req.KeyspaceIdType), KeyRange: key.ProtoToKeyRange(req.KeyRange), Charset: mproto.ProtoToCharset(req.Charset), }, func(reply *proto.BinlogTransaction) error { return stream.Send(&pb.StreamKeyRangeResponse{ BinlogTransaction: proto.BinlogTransactionToProto(reply), }) }) }
// This maps a list of keyranges to shard names. func resolveKeyRangeToShards(allShards []topo.ShardReference, kr *pb.KeyRange) ([]string, error) { shards := make([]string, 0, 1) if !key.KeyRangeIsPartial(kr) { for j := 0; j < len(allShards); j++ { shards = append(shards, allShards[j].Name) } return shards, nil } for j := 0; j < len(allShards); j++ { shard := allShards[j] if key.KeyRangesIntersect(key.ProtoToKeyRange(kr), shard.KeyRange) { shards = append(shards, shard.Name) } } return shards, nil }
// SplitQueryKeyRange scatters a SplitQuery request to all shards. For a set of // splits received from a shard, it construct a KeyRange queries by // appending that shard's keyrange to the splits. Aggregates all splits across // all shards in no specific order and returns. func (stc *ScatterConn) SplitQueryKeyRange(ctx context.Context, sql string, bindVariables map[string]interface{}, splitColumn string, splitCount int, keyRangeByShard map[string]*pb.KeyRange, keyspace string) ([]proto.SplitQueryPart, error) { tabletType := pb.TabletType_RDONLY actionFunc := func(shard string, transactionID int64, results chan<- interface{}) error { // Get all splits from this shard queries, err := stc.gateway.SplitQuery(ctx, keyspace, shard, tabletType, sql, bindVariables, splitColumn, splitCount) if err != nil { return err } // Append the keyrange for this shard to all the splits received keyranges := []kproto.KeyRange{kproto.ProtoToKeyRange(keyRangeByShard[shard])} splits := []proto.SplitQueryPart{} for _, query := range queries { krq := &proto.KeyRangeQuery{ Sql: query.Query.Sql, BindVariables: query.Query.BindVariables, Keyspace: keyspace, KeyRanges: keyranges, TabletType: topo.TYPE_RDONLY, } split := proto.SplitQueryPart{ Query: krq, Size: query.RowCount, } splits = append(splits, split) } // Push all the splits from this shard to results channel results <- splits return nil } shards := []string{} for shard := range keyRangeByShard { shards = append(shards, shard) } allSplits, allErrors := stc.multiGo(ctx, "SplitQuery", keyspace, shards, tabletType, NewSafeSession(&proto.Session{}), false, actionFunc) splits := []proto.SplitQueryPart{} for s := range allSplits { splits = append(splits, s.([]proto.SplitQueryPart)...) } if allErrors.HasErrors() { err := allErrors.AggrError(stc.aggregateErrors) return nil, err } return splits, nil }
// ProtoToTablet turns a proto to a Tablet func ProtoToTablet(t *pb.Tablet) *Tablet { result := &Tablet{ Alias: ProtoToTabletAlias(t.Alias), Hostname: t.Hostname, IPAddr: t.Ip, Portmap: make(map[string]int), Keyspace: t.Keyspace, Shard: t.Shard, KeyRange: key.ProtoToKeyRange(t.KeyRange), Type: ProtoToTabletType(t.Type), DbNameOverride: t.DbNameOverride, Tags: t.Tags, Health: t.HealthMap, } for k, v := range t.PortMap { result.Portmap[k] = int(v) } return result }
// InitTablet creates or updates a tablet. If no parent is specified // in the tablet, and the tablet has a slave type, we will find the // appropriate parent. If createShardAndKeyspace is true and the // parent keyspace or shard don't exist, they will be created. If // update is true, and a tablet with the same ID exists, update it. // If Force is true, and a tablet with the same ID already exists, it // will be scrapped and deleted, and then recreated. func (wr *Wrangler) InitTablet(ctx context.Context, tablet *topo.Tablet, force, createShardAndKeyspace, update bool) error { if err := topo.TabletComplete(tablet); err != nil { return err } if topo.IsInReplicationGraph(tablet.Type) { // get the shard, possibly creating it var err error var si *topo.ShardInfo if createShardAndKeyspace { // create the parent keyspace and shard if needed si, err = topotools.GetOrCreateShard(ctx, wr.ts, tablet.Keyspace, tablet.Shard) } else { si, err = wr.ts.GetShard(ctx, tablet.Keyspace, tablet.Shard) if err == topo.ErrNoNode { return fmt.Errorf("missing parent shard, use -parent option to create it, or CreateKeyspace / CreateShard") } } // get the shard, checks a couple things if err != nil { return fmt.Errorf("cannot get (or create) shard %v/%v: %v", tablet.Keyspace, tablet.Shard, err) } if key.ProtoToKeyRange(si.KeyRange) != tablet.KeyRange { return fmt.Errorf("shard %v/%v has a different KeyRange: %v != %v", tablet.Keyspace, tablet.Shard, si.KeyRange, tablet.KeyRange) } if tablet.Type == topo.TYPE_MASTER && !topo.TabletAliasIsZero(si.MasterAlias) && topo.ProtoToTabletAlias(si.MasterAlias) != tablet.Alias && !force { return fmt.Errorf("creating this tablet would override old master %v in shard %v/%v", si.MasterAlias, tablet.Keyspace, tablet.Shard) } // update the shard record if needed if err := wr.updateShardCellsAndMaster(ctx, si, topo.TabletAliasToProto(tablet.Alias), topo.TabletTypeToProto(tablet.Type), force); err != nil { return err } } err := topo.CreateTablet(ctx, wr.ts, tablet) if err != nil && err == topo.ErrNodeExists { // Try to update nicely, but if it fails fall back to force behavior. if update || force { oldTablet, err := wr.ts.GetTablet(ctx, tablet.Alias) if err != nil { wr.Logger().Warningf("failed reading tablet %v: %v", tablet.Alias, err) } else { if oldTablet.Keyspace == tablet.Keyspace && oldTablet.Shard == tablet.Shard { *(oldTablet.Tablet) = *tablet if err := topo.UpdateTablet(ctx, wr.ts, oldTablet); err != nil { wr.Logger().Warningf("failed updating tablet %v: %v", tablet.Alias, err) // now fall through the Scrap case } else { if !topo.IsInReplicationGraph(tablet.Type) { return nil } if err := topo.UpdateTabletReplicationData(ctx, wr.ts, tablet); err != nil { wr.Logger().Warningf("failed updating tablet replication data for %v: %v", tablet.Alias, err) // now fall through the Scrap case } else { return nil } } } } } if force { if err = wr.Scrap(ctx, tablet.Alias, force, false); err != nil { wr.Logger().Errorf("failed scrapping tablet %v: %v", tablet.Alias, err) return err } if err := wr.ts.DeleteTablet(ctx, tablet.Alias); err != nil { // we ignore this wr.Logger().Errorf("failed deleting tablet %v: %v", tablet.Alias, err) } return topo.CreateTablet(ctx, wr.ts, tablet) } } return err }
// This function should only be used with an action lock on the keyspace // - otherwise the consistency of the serving graph data can't be // guaranteed. // // Take data from the global keyspace and rebuild the local serving // copies in each cell. func (wr *Wrangler) rebuildKeyspace(ctx context.Context, keyspace string, cells []string, rebuildSrvShards bool) error { wr.logger.Infof("rebuildKeyspace %v", keyspace) ki, err := wr.ts.GetKeyspace(ctx, keyspace) if err != nil { return err } var shardCache map[string]*topo.ShardInfo if rebuildSrvShards { shards, err := wr.ts.GetShardNames(ctx, keyspace) if err != nil { return nil } // Rebuild all shards in parallel, save the shards shardCache = make(map[string]*topo.ShardInfo) wg := sync.WaitGroup{} mu := sync.Mutex{} rec := concurrency.FirstErrorRecorder{} for _, shard := range shards { wg.Add(1) go func(shard string) { if shardInfo, err := wr.RebuildShardGraph(ctx, keyspace, shard, cells); err != nil { rec.RecordError(fmt.Errorf("RebuildShardGraph failed: %v/%v %v", keyspace, shard, err)) } else { mu.Lock() shardCache[shard] = shardInfo mu.Unlock() } wg.Done() }(shard) } wg.Wait() if rec.HasErrors() { return rec.Error() } } else { shardCache, err = wr.ts.FindAllShardsInKeyspace(ctx, keyspace) if err != nil { return err } } // Build the list of cells to work on: we get the union // of all the Cells of all the Shards, limited to the provided cells. // // srvKeyspaceMap is a map: // key: cell // value: topo.SrvKeyspace object being built srvKeyspaceMap := make(map[string]*topo.SrvKeyspace) wr.findCellsForRebuild(ki, shardCache, cells, srvKeyspaceMap) // Then we add the cells from the keyspaces we might be 'ServedFrom'. for _, ksf := range ki.ServedFroms { servedFromShards, err := wr.ts.FindAllShardsInKeyspace(ctx, ksf.Keyspace) if err != nil { return err } wr.findCellsForRebuild(ki, servedFromShards, cells, srvKeyspaceMap) } // for each entry in the srvKeyspaceMap map, we do the following: // - read the SrvShard structures for each shard / cell // - if not present, build an empty one from global Shard // - compute the union of the db types (replica, master, ...) // - sort the shards in the list by range // - check the ranges are compatible (no hole, covers everything) for cell, srvKeyspace := range srvKeyspaceMap { srvKeyspace.Partitions = make(map[topo.TabletType]*topo.KeyspacePartition) for _, si := range shardCache { servedTypes := si.GetServedTypesPerCell(cell) // for each type this shard is supposed to serve, // add it to srvKeyspace.Partitions for _, tabletType := range servedTypes { if _, ok := srvKeyspace.Partitions[tabletType]; !ok { srvKeyspace.Partitions[tabletType] = &topo.KeyspacePartition{ ShardReferences: make([]topo.ShardReference, 0), } } srvKeyspace.Partitions[tabletType].ShardReferences = append(srvKeyspace.Partitions[tabletType].ShardReferences, topo.ShardReference{ Name: si.ShardName(), KeyRange: key.ProtoToKeyRange(si.KeyRange), }) } } if err := wr.orderAndCheckPartitions(cell, srvKeyspace); err != nil { return err } } // and then finally save the keyspace objects for cell, srvKeyspace := range srvKeyspaceMap { wr.logger.Infof("updating keyspace serving graph in cell %v for %v", cell, keyspace) if err := wr.ts.UpdateSrvKeyspace(ctx, cell, keyspace, srvKeyspace); err != nil { return fmt.Errorf("writing serving data failed: %v", err) } } return nil }