// NewQueryResultReaderForTablet creates a new QueryResultReader for // the provided tablet / sql query func NewQueryResultReaderForTablet(ts topo.Server, tabletAlias topo.TabletAlias, sql string) (*QueryResultReader, error) { tablet, err := ts.GetTablet(tabletAlias) if err != nil { return nil, err } endPoint, err := tablet.EndPoint() if err != nil { return nil, err } conn, err := tabletconn.GetDialer()(context.TODO(), *endPoint, tablet.Keyspace, tablet.Shard, 30*time.Second) if err != nil { return nil, err } sr, clientErrFn, err := conn.StreamExecute(context.TODO(), sql, make(map[string]interface{}), 0) if err != nil { return nil, err } // read the columns, or grab the error cols, ok := <-sr if !ok { return nil, fmt.Errorf("Cannot read Fields for query '%v': %v", sql, clientErrFn()) } return &QueryResultReader{ Output: sr, Fields: cols.Fields, conn: conn, clientErrFn: clientErrFn, }, nil }
// findMasterTargets looks up the master for the destination shard, and set the destinations appropriately. // It should be used if vtworker will only want to write to masters. func (vscw *VerticalSplitCloneWorker) findMasterTargets() error { var err error // find all the targets in the destination keyspace / shard vscw.reloadAliases, err = topo.FindAllTabletAliasesInShard(context.TODO(), vscw.wr.TopoServer(), vscw.destinationKeyspace, vscw.destinationShard) if err != nil { return fmt.Errorf("cannot find all reload target tablets in %v/%v: %v", vscw.destinationKeyspace, vscw.destinationShard, err) } vscw.wr.Logger().Infof("Found %v reload target aliases", len(vscw.reloadAliases)) // get the TabletInfo for all targets vscw.reloadTablets, err = topo.GetTabletMap(context.TODO(), vscw.wr.TopoServer(), vscw.reloadAliases) if err != nil { return fmt.Errorf("cannot read all reload target tablets in %v/%v: %v", vscw.destinationKeyspace, vscw.destinationShard, err) } // find and validate the master for tabletAlias, ti := range vscw.reloadTablets { if ti.Type == topo.TYPE_MASTER { if vscw.destinationMasterAlias.IsZero() { vscw.destinationMasterAlias = tabletAlias vscw.destinationAliases = []topo.TabletAlias{tabletAlias} vscw.destinationTablets = map[topo.TabletAlias]*topo.TabletInfo{tabletAlias: ti} } else { return fmt.Errorf("multiple masters in destination shard: %v and %v at least", vscw.destinationMasterAlias, tabletAlias) } } } if vscw.destinationMasterAlias.IsZero() { return fmt.Errorf("no master in destination shard") } vscw.wr.Logger().Infof("Found target master alias %v in shard %v/%v", vscw.destinationMasterAlias, vscw.destinationKeyspace, vscw.destinationShard) return nil }
func (wr *Wrangler) finishReparent(si *topo.ShardInfo, masterElect *topo.TabletInfo, majorityRestart, leaveMasterReadOnly bool) error { // If the majority of slaves restarted, move ahead. if majorityRestart { if leaveMasterReadOnly { wr.logger.Warningf("leaving master-elect read-only, change with: vtctl SetReadWrite %v", masterElect.Alias) } else { wr.logger.Infof("marking master-elect read-write %v", masterElect.Alias) if err := wr.tmc.SetReadWrite(wr.ctx, masterElect); err != nil { wr.logger.Warningf("master master-elect read-write failed, leaving master-elect read-only, change with: vtctl SetReadWrite %v", masterElect.Alias) } } } else { wr.logger.Warningf("minority reparent, manual fixes are needed, leaving master-elect read-only, change with: vtctl SetReadWrite %v", masterElect.Alias) } // save the new master in the shard info si.MasterAlias = masterElect.Alias if err := topo.UpdateShard(context.TODO(), wr.ts, si); err != nil { wr.logger.Errorf("Failed to save new master into shard: %v", err) return err } // We rebuild all the cells, as we may have taken tablets in and // out of the graph. wr.logger.Infof("rebuilding shard serving graph data") _, err := topotools.RebuildShard(context.TODO(), wr.logger, wr.ts, masterElect.Keyspace, masterElect.Shard, nil, wr.lockTimeout, interrupted) return err }
// synchronizeReplication phase: // 1 - ask the subset slave to stop replication // 2 - sleep for 5 seconds // 3 - ask the superset slave to stop replication // Note this is not 100% correct, but good enough for now func (worker *SQLDiffWorker) synchronizeReplication() error { worker.setState(SQLDiffSynchronizeReplication) // stop replication on subset slave worker.wr.Logger().Infof("Stopping replication on subset slave %v", worker.subset.alias) subsetTablet, err := worker.wr.TopoServer().GetTablet(worker.subset.alias) if err != nil { return err } ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) if err := worker.wr.TabletManagerClient().StopSlave(ctx, subsetTablet); err != nil { return fmt.Errorf("Cannot stop slave %v: %v", worker.subset.alias, err) } cancel() if worker.CheckInterrupted() { return topo.ErrInterrupted } // change the cleaner actions from ChangeSlaveType(rdonly) // to StartSlave() + ChangeSlaveType(spare) wrangler.RecordStartSlaveAction(worker.cleaner, subsetTablet, 30*time.Second) action, err := wrangler.FindChangeSlaveTypeActionByTarget(worker.cleaner, worker.subset.alias) if err != nil { return fmt.Errorf("cannot find ChangeSlaveType action for %v: %v", worker.subset.alias, err) } action.TabletType = topo.TYPE_SPARE // sleep for a few seconds time.Sleep(5 * time.Second) if worker.CheckInterrupted() { return topo.ErrInterrupted } // stop replication on superset slave worker.wr.Logger().Infof("Stopping replication on superset slave %v", worker.superset.alias) supersetTablet, err := worker.wr.TopoServer().GetTablet(worker.superset.alias) if err != nil { return err } ctx, cancel = context.WithTimeout(context.TODO(), 30*time.Second) if err := worker.wr.TabletManagerClient().StopSlave(ctx, supersetTablet); err != nil { return fmt.Errorf("Cannot stop slave %v: %v", worker.superset.alias, err) } cancel() // change the cleaner actions from ChangeSlaveType(rdonly) // to StartSlave() + ChangeSlaveType(spare) wrangler.RecordStartSlaveAction(worker.cleaner, supersetTablet, 30*time.Second) action, err = wrangler.FindChangeSlaveTypeActionByTarget(worker.cleaner, worker.superset.alias) if err != nil { return fmt.Errorf("cannot find ChangeSlaveType action for %v: %v", worker.superset.alias, err) } action.TabletType = topo.TYPE_SPARE return nil }
// terminateHealthChecks is called when we enter lame duck mode. // We will clean up our state, and shut down query service. // We only do something if we are in targetTabletType state, and then // we just go to spare. func (agent *ActionAgent) terminateHealthChecks(targetTabletType topo.TabletType) { agent.actionMutex.Lock() defer agent.actionMutex.Unlock() log.Info("agent.terminateHealthChecks is starting") // read the current tablet record tablet := agent.Tablet() if tablet.Type != targetTabletType { log.Infof("Tablet in state %v, not changing it", tablet.Type) return } // Change the Type to spare, update the health. Note we pass in a map // that's not nil, meaning we will clear it. if err := topotools.ChangeType(agent.TopoServer, tablet.Alias, topo.TYPE_SPARE, make(map[string]string), true /*runHooks*/); err != nil { log.Infof("Error updating tablet record: %v", err) return } // Rebuild the serving graph in our cell, only if we're dealing with // a serving type if err := agent.rebuildShardIfNeeded(tablet, targetTabletType); err != nil { log.Warningf("rebuildShardIfNeeded failed (will still run post action callbacks, serving graph might be out of date): %v", err) } // We've already rebuilt the shard, which is the only reason we registered // ourself as OnTermSync (synchronous). The rest can be done asynchronously. go func() { // Run the post action callbacks (let them shutdown the query service) if err := agent.refreshTablet(context.TODO(), "terminatehealthcheck"); err != nil { log.Warningf("refreshTablet failed: %v", err) } }() }
// Snapshot takes a tablet snapshot. // // forceMasterSnapshot: Normally a master is not a viable tablet to snapshot. // However, there are degenerate cases where you need to override this, for // instance the initial clone of a new master. // // serverMode: if specified, the server will stop its mysqld, and be // ready to serve the data files directly. Slaves can just download // these and use them directly. Call SnapshotSourceEnd to return into // serving mode. If not specified, the server will create an archive // of the files, store them locally, and restart. // // If error is nil, returns the SnapshotReply from the remote host, // and the original type the server was before the snapshot. func (wr *Wrangler) Snapshot(tabletAlias topo.TabletAlias, forceMasterSnapshot bool, snapshotConcurrency int, serverMode bool) (*actionnode.SnapshotReply, topo.TabletType, error) { // read the tablet to be able to RPC to it, and also to get its // original type ti, err := wr.ts.GetTablet(tabletAlias) if err != nil { return nil, "", err } originalType := ti.Tablet.Type // execute the remote action, log the results, save the error args := &actionnode.SnapshotArgs{ Concurrency: snapshotConcurrency, ServerMode: serverMode, ForceMasterSnapshot: forceMasterSnapshot, } logStream, errFunc, err := wr.tmc.Snapshot(context.TODO(), ti, args, wr.ActionTimeout()) if err != nil { return nil, "", err } for e := range logStream { wr.Logger().Infof("Snapshot(%v): %v", tabletAlias, e) } reply, err := errFunc() return reply, originalType, err }
// runSqlCommands will send the sql commands to the remote tablet. func runSqlCommands(wr *wrangler.Wrangler, ti *topo.TabletInfo, commands []string, abort chan struct{}, disableBinLogs bool) error { for _, command := range commands { command, err := fillStringTemplate(command, map[string]string{"DatabaseName": ti.DbName()}) if err != nil { return fmt.Errorf("fillStringTemplate failed: %v", err) } ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) _, err = wr.TabletManagerClient().ExecuteFetch(ctx, ti, command, 0, false, disableBinLogs) if err != nil { return err } cancel() // check on abort select { case <-abort: return nil default: break } } return nil }
// RefreshTablesByShard calls RefreshState on all the tables of a // given type in a shard. It would work for the master, but the // discovery wouldn't be very efficient. func (wr *Wrangler) RefreshTablesByShard(si *topo.ShardInfo, tabletType topo.TabletType, cells []string) error { wr.Logger().Infof("RefreshTablesByShard called on shard %v/%v", si.Keyspace(), si.ShardName()) tabletMap, err := topo.GetTabletMapForShardByCell(context.TODO(), wr.ts, si.Keyspace(), si.ShardName(), cells) switch err { case nil: // keep going case topo.ErrPartialResult: wr.Logger().Warningf("RefreshTablesByShard: got partial result for shard %v/%v, may not refresh all tablets everywhere", si.Keyspace(), si.ShardName()) default: return err } // ignore errors in this phase wg := sync.WaitGroup{} for _, ti := range tabletMap { if ti.Type != tabletType { continue } wg.Add(1) go func(ti *topo.TabletInfo) { wr.Logger().Infof("Calling RefreshState on tablet %v", ti.Alias) if err := wr.tmc.RefreshState(wr.ctx, ti); err != nil { wr.Logger().Warningf("RefreshTablesByShard: failed to refresh %v: %v", ti.Alias, err) } wg.Done() }(ti) } wg.Wait() return nil }
// findTargets phase: // - find one rdonly in the source shard // - mark it as 'checker' pointing back to us // - get the aliases of all the targets func (vscw *VerticalSplitCloneWorker) findTargets() error { vscw.setState(stateVSCFindTargets) // find an appropriate endpoint in the source shard var err error vscw.sourceAlias, err = findChecker(vscw.wr, vscw.cleaner, vscw.cell, vscw.sourceKeyspace, "0") if err != nil { return fmt.Errorf("cannot find checker for %v/%v/0: %v", vscw.cell, vscw.sourceKeyspace, err) } vscw.wr.Logger().Infof("Using tablet %v as the source", vscw.sourceAlias) // get the tablet info for it vscw.sourceTablet, err = vscw.wr.TopoServer().GetTablet(vscw.sourceAlias) if err != nil { return fmt.Errorf("cannot read tablet %v: %v", vscw.sourceTablet, err) } // stop replication on it ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) if err := vscw.wr.TabletManagerClient().StopSlave(ctx, vscw.sourceTablet); err != nil { return fmt.Errorf("cannot stop replication on tablet %v", vscw.sourceAlias) } cancel() wrangler.RecordStartSlaveAction(vscw.cleaner, vscw.sourceTablet, 30*time.Second) action, err := wrangler.FindChangeSlaveTypeActionByTarget(vscw.cleaner, vscw.sourceAlias) if err != nil { return fmt.Errorf("cannot find ChangeSlaveType action for %v: %v", vscw.sourceAlias, err) } action.TabletType = topo.TYPE_SPARE return vscw.findMasterTargets() }
func FindPeer(n *core.IpfsNode, cmdparts []string) (string, error) { out := new(bytes.Buffer) if len(cmdparts) < 3 { return fmt.Sprintln("findpeer: '# findpeer peerid'"), ErrArgCount } var search peer.ID if cmdparts[2][0] == '$' { n, err := strconv.Atoi(cmdparts[2][1:]) if err != nil { return "", err } if n >= len(controllers) { return "", errors.New("specified peernum out of range") } search = controllers[n].PeerID() } else { search = peer.ID(b58.Decode(cmdparts[2])) } fmt.Fprintf(out, "Searching for peer: %s\n", search) ctx, _ := context.WithDeadline(context.TODO(), time.Now().Add(time.Second*5)) p, err := n.Routing.FindPeer(ctx, search) if err != nil { return "", err } fmt.Fprintf(out, "%Got peer: %s\n", p) return out.String(), nil }
// replicaMigrateServedFrom handles the slave (replica, rdonly) migration. func (wr *Wrangler) replicaMigrateServedFrom(ki *topo.KeyspaceInfo, sourceShard *topo.ShardInfo, destinationShard *topo.ShardInfo, servedType topo.TabletType, cells []string, reverse bool, tables []string, ev *events.MigrateServedFrom) error { // Save the destination keyspace (its ServedFrom has been changed) event.DispatchUpdate(ev, "updating keyspace") if err := topo.UpdateKeyspace(wr.ts, ki); err != nil { return err } // Save the source shard (its blacklisted tables field has changed) event.DispatchUpdate(ev, "updating source shard") if err := sourceShard.UpdateSourceBlacklistedTables(servedType, cells, reverse, tables); err != nil { return fmt.Errorf("UpdateSourceBlacklistedTables(%v/%v) failed: %v", sourceShard.Keyspace(), sourceShard.ShardName(), err) } if err := topo.UpdateShard(context.TODO(), wr.ts, sourceShard); err != nil { return fmt.Errorf("UpdateShard(%v/%v) failed: %v", sourceShard.Keyspace(), sourceShard.ShardName(), err) } // Now refresh the source servers so they reload their // blacklisted table list event.DispatchUpdate(ev, "refreshing sources tablets state so they update their blacklisted tables") if err := wr.RefreshTablesByShard(sourceShard, servedType, cells); err != nil { return err } return nil }
func getSrvKeyspace(rpcClient *rpcplus.Client, cell, keyspace string, verbose bool) { req := &topo.GetSrvKeyspaceArgs{ Cell: cell, Keyspace: keyspace, } reply := &topo.SrvKeyspace{} if err := rpcClient.Call(context.TODO(), "TopoReader.GetSrvKeyspace", req, reply); err != nil { log.Fatalf("TopoReader.GetSrvKeyspace error: %v", err) } if verbose { tabletTypes := make([]string, 0, len(reply.Partitions)) for t, _ := range reply.Partitions { tabletTypes = append(tabletTypes, string(t)) } sort.Strings(tabletTypes) for _, t := range tabletTypes { println(fmt.Sprintf("Partitions[%v] =", t)) for i, s := range reply.Partitions[topo.TabletType(t)].Shards { println(fmt.Sprintf(" Shards[%v]=%v", i, s.KeyRange.String())) } } for i, s := range reply.Shards { println(fmt.Sprintf("Shards[%v]=%v", i, s.KeyRange.String())) } for i, t := range reply.TabletTypes { println(fmt.Sprintf("TabletTypes[%v] = %v", i, t)) } } }
// implement Exists using Get // FIXME(alainjobart) Maybe we should add Exists in rpc API? func (conn *ZkoccConn) Exists(path string) (stat Stat, err error) { zkPath := &ZkPath{path} zkNode := &ZkNode{} if err := conn.rpcClient.Call(context.TODO(), "ZkReader.Get", zkPath, zkNode); err != nil { return nil, err } return &zkNode.Stat, nil }
func (conn *Conn) Rollback() error { if conn.TransactionId == 0 { return ErrBadRollback } // See note in Commit about the behavior of TransactionId. defer func() { conn.TransactionId = 0 }() return conn.fmtErr(conn.tabletConn.Rollback(context.TODO(), conn.TransactionId)) }
func Put(n *core.IpfsNode, cmdparts []string) (string, error) { if len(cmdparts) < 4 { return fmt.Sprintln("put: '# put key val'"), ErrArgCount } msg := fmt.Sprintf("putting value: '%s' for key '%s'\n", cmdparts[3], cmdparts[2]) ctx, _ := context.WithDeadline(context.TODO(), time.Now().Add(time.Second*5)) return msg, n.Routing.PutValue(ctx, u.Key(cmdparts[2]), []byte(cmdparts[3])) }
func (conn *ZkoccConn) Get(path string) (data string, stat Stat, err error) { zkPath := &ZkPath{path} zkNode := &ZkNode{} if err := conn.rpcClient.Call(context.TODO(), "ZkReader.Get", zkPath, zkNode); err != nil { return "", nil, err } return zkNode.Data, &zkNode.Stat, nil }
func (conn *ZkoccConn) Children(path string) (children []string, stat Stat, err error) { zkPath := &ZkPath{path} zkNode := &ZkNode{} if err := conn.rpcClient.Call(context.TODO(), "ZkReader.Children", zkPath, zkNode); err != nil { return nil, nil, err } return zkNode.Children, &zkNode.Stat, nil }
// DialAuthHTTP connects to an authenticated go HTTP RPC server using // the specified codec and credentials. // use 0 as connectTimeout for no timeout // use nil as config to not use TLS func DialAuthHTTP(network, address, user, password, codecName string, cFactory ClientCodecFactory, connectTimeout time.Duration, config *tls.Config) (conn *rpc.Client, err error) { if conn, err = dialHTTP(network, address, codecName, cFactory, true, connectTimeout, config); err != nil { return } reply := new(auth.GetNewChallengeReply) if err = conn.Call(context.TODO(), "AuthenticatorCRAMMD5.GetNewChallenge", "", reply); err != nil { return } proof := auth.CRAMMD5GetExpected(user, password, reply.Challenge) if err = conn.Call( context.TODO(), "AuthenticatorCRAMMD5.Authenticate", auth.AuthenticateRequest{Proof: proof}, new(auth.AuthenticateReply)); err != nil { return } return }
// Restore actually performs the restore action on a tablet. func (wr *Wrangler) Restore(srcTabletAlias topo.TabletAlias, srcFilePath string, dstTabletAlias, parentAlias topo.TabletAlias, fetchConcurrency, fetchRetryCount int, wasReserved, dontWaitForSlaveStart bool) error { // read our current tablet, verify its state before sending it // to the tablet itself tablet, err := wr.ts.GetTablet(dstTabletAlias) if err != nil { return err } if wasReserved { if tablet.Type != topo.TYPE_RESTORE { return fmt.Errorf("expected restore type, not %v: %v", tablet.Type, dstTabletAlias) } } else { if tablet.Type != topo.TYPE_IDLE { return fmt.Errorf("expected idle type, not %v: %v", tablet.Type, dstTabletAlias) } } // update the shard record if we need to, to update Cells srcTablet, err := wr.ts.GetTablet(srcTabletAlias) if err != nil { return err } si, err := wr.ts.GetShard(srcTablet.Keyspace, srcTablet.Shard) if err != nil { return fmt.Errorf("Cannot read shard: %v", err) } if err := wr.updateShardCellsAndMaster(si, tablet.Alias, topo.TYPE_SPARE, false); err != nil { return err } // do the work args := &actionnode.RestoreArgs{ SrcTabletAlias: srcTabletAlias, SrcFilePath: srcFilePath, ParentAlias: parentAlias, FetchConcurrency: fetchConcurrency, FetchRetryCount: fetchRetryCount, WasReserved: wasReserved, DontWaitForSlaveStart: dontWaitForSlaveStart, } logStream, errFunc, err := wr.tmc.Restore(context.TODO(), tablet, args, wr.ActionTimeout()) if err != nil { return err } for e := range logStream { wr.Logger().Infof("Restore(%v): %v", dstTabletAlias, e) } if err := errFunc(); err != nil { return err } // Restore moves us into the replication graph as a // spare. There are no consequences to the replication or // serving graphs, so no rebuild required. return nil }
func (wr *Wrangler) ValidateVersionKeyspace(keyspace string) error { // find all the shards shards, err := wr.ts.GetShardNames(keyspace) if err != nil { return err } // corner cases if len(shards) == 0 { return fmt.Errorf("No shards in keyspace %v", keyspace) } sort.Strings(shards) if len(shards) == 1 { return wr.ValidateVersionShard(keyspace, shards[0]) } // find the reference version using the first shard's master si, err := wr.ts.GetShard(keyspace, shards[0]) if err != nil { return err } if si.MasterAlias.Uid == topo.NO_TABLET { return fmt.Errorf("No master in shard %v/%v", keyspace, shards[0]) } referenceAlias := si.MasterAlias log.Infof("Gathering version for reference master %v", referenceAlias) referenceVersion, err := wr.GetVersion(referenceAlias) if err != nil { return err } // then diff with all tablets but master 0 er := concurrency.AllErrorRecorder{} wg := sync.WaitGroup{} for _, shard := range shards { aliases, err := topo.FindAllTabletAliasesInShard(context.TODO(), wr.ts, keyspace, shard) if err != nil { er.RecordError(err) continue } for _, alias := range aliases { if alias == si.MasterAlias { continue } wg.Add(1) go wr.diffVersion(referenceVersion, referenceAlias, alias, &wg, &er) } } wg.Wait() if er.HasErrors() { return fmt.Errorf("Version diffs:\n%v", er.Error().Error()) } return nil }
// findMasterTargets looks up the masters for all destination shards, and set the destinations appropriately. // It should be used if vtworker will only want to write to masters. func (scw *SplitCloneWorker) findMasterTargets() error { var err error scw.destinationAliases = make([][]topo.TabletAlias, len(scw.destinationShards)) scw.destinationTablets = make([]map[topo.TabletAlias]*topo.TabletInfo, len(scw.destinationShards)) scw.destinationMasterAliases = make([]topo.TabletAlias, len(scw.destinationShards)) scw.reloadAliases = make([][]topo.TabletAlias, len(scw.destinationShards)) scw.reloadTablets = make([]map[topo.TabletAlias]*topo.TabletInfo, len(scw.destinationShards)) for shardIndex, si := range scw.destinationShards { scw.reloadAliases[shardIndex], err = topo.FindAllTabletAliasesInShard(context.TODO(), scw.wr.TopoServer(), si.Keyspace(), si.ShardName()) if err != nil { return fmt.Errorf("cannot find all reload target tablets in %v/%v: %v", si.Keyspace(), si.ShardName(), err) } scw.wr.Logger().Infof("Found %v reload target aliases in shard %v/%v", len(scw.reloadAliases[shardIndex]), si.Keyspace(), si.ShardName()) // get the TabletInfo for all targets scw.reloadTablets[shardIndex], err = topo.GetTabletMap(context.TODO(), scw.wr.TopoServer(), scw.reloadAliases[shardIndex]) if err != nil { return fmt.Errorf("cannot read all reload target tablets in %v/%v: %v", si.Keyspace(), si.ShardName(), err) } // find and validate the master for tabletAlias, ti := range scw.reloadTablets[shardIndex] { if ti.Type == topo.TYPE_MASTER { if scw.destinationMasterAliases[shardIndex].IsZero() { scw.destinationMasterAliases[shardIndex] = tabletAlias scw.destinationAliases[shardIndex] = []topo.TabletAlias{tabletAlias} scw.destinationTablets[shardIndex] = map[topo.TabletAlias]*topo.TabletInfo{tabletAlias: ti} } else { return fmt.Errorf("multiple masters in destination shard: %v and %v at least", scw.destinationMasterAliases[shardIndex], tabletAlias) } } } if scw.destinationMasterAliases[shardIndex].IsZero() { return fmt.Errorf("no master in destination shard") } scw.wr.Logger().Infof("Found target master alias %v in shard %v/%v", scw.destinationMasterAliases[shardIndex], si.Keyspace(), si.ShardName()) } return nil }
func (tm *TabletManager) TabletExternallyReparented(ctx context.Context, args *gorpcproto.TabletExternallyReparentedArgs, reply *rpc.Unused) error { // TODO(alainjobart) we should forward the RPC deadline from // the original gorpc call. Until we support that, use a // reasonable hard-coded value. ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) defer cancel() return tm.agent.RpcWrapLock(ctx, actionnode.TABLET_ACTION_EXTERNALLY_REPARENTED, args, reply, false, func() error { return tm.agent.TabletExternallyReparented(ctx, args.ExternalID) }) }
func (wr *Wrangler) shardReplicationStatuses(shardInfo *topo.ShardInfo) ([]*topo.TabletInfo, []*myproto.ReplicationStatus, error) { // FIXME(msolomon) this assumes no hierarchical replication, which is currently the case. tabletMap, err := topo.GetTabletMapForShard(context.TODO(), wr.ts, shardInfo.Keyspace(), shardInfo.ShardName()) if err != nil { return nil, nil, err } tablets := topotools.CopyMapValues(tabletMap, []*topo.TabletInfo{}).([]*topo.TabletInfo) stats, err := wr.tabletReplicationStatuses(tablets) return tablets, stats, err }
// SetSourceShards is a utility function to override the SourceShards fields // on a Shard. func (wr *Wrangler) SetSourceShards(keyspace, shard string, sources []topo.TabletAlias, tables []string) error { // read the shard shardInfo, err := wr.ts.GetShard(keyspace, shard) if err != nil { return err } // If the shard already has sources, maybe it's already been restored, // so let's be safe and abort right here. if len(shardInfo.SourceShards) > 0 { return fmt.Errorf("Shard %v/%v already has SourceShards, not overwriting them", keyspace, shard) } // read the source tablets sourceTablets, err := topo.GetTabletMap(context.TODO(), wr.TopoServer(), sources) if err != nil { return err } // Insert their KeyRange in the SourceShards array. // We use a linear 0-based id, that matches what mysqlctld/split.go // inserts into _vt.blp_checkpoint. shardInfo.SourceShards = make([]topo.SourceShard, len(sourceTablets)) i := 0 for _, ti := range sourceTablets { shardInfo.SourceShards[i] = topo.SourceShard{ Uid: uint32(i), Keyspace: ti.Keyspace, Shard: ti.Shard, KeyRange: ti.KeyRange, Tables: tables, } i++ } // and write the shard if err = topo.UpdateShard(context.TODO(), wr.ts, shardInfo); err != nil { return err } return nil }
func (conn *Conn) Exec(query string, bindVars map[string]interface{}) (db.Result, error) { if conn.stream { sr, errFunc, err := conn.tabletConn.StreamExecute(context.TODO(), query, bindVars, conn.TransactionId) if err != nil { return nil, conn.fmtErr(err) } // read the columns, or grab the error cols, ok := <-sr if !ok { return nil, conn.fmtErr(errFunc()) } return &StreamResult{errFunc, sr, cols, nil, 0, nil}, nil } qr, err := conn.tabletConn.Execute(context.TODO(), query, bindVars, conn.TransactionId) if err != nil { return nil, conn.fmtErr(err) } return &Result{qr, 0, nil}, nil }
func (wr *Wrangler) setShardServedTypes(keyspace, shard string, cells []string, servedType topo.TabletType, remove bool) error { si, err := wr.ts.GetShard(keyspace, shard) if err != nil { return err } if err := si.UpdateServedTypesMap(servedType, cells, remove); err != nil { return err } return topo.UpdateShard(context.TODO(), wr.ts, si) }
func Provide(n *core.IpfsNode, cmdparts []string) (string, error) { if len(cmdparts) < 3 { return fmt.Sprintln("provide: '# provide key'"), ErrArgCount } ctx, _ := context.WithDeadline(context.TODO(), time.Now().Add(time.Second*5)) err := n.Routing.Provide(ctx, u.Key(cmdparts[2])) if err != nil { return "", err } return "", nil }
func Get(n *core.IpfsNode, cmdparts []string) (string, error) { if len(cmdparts) < 3 { return fmt.Sprintln("get: '# get key'"), ErrArgCount } ctx, _ := context.WithDeadline(context.TODO(), time.Now().Add(time.Second*5)) val, err := n.Routing.GetValue(ctx, u.Key(cmdparts[2])) if err != nil { return "", err } return fmt.Sprintf("Got value: '%s'\n", string(val)), nil }
func (conn *Conn) Begin() (db.Tx, error) { if conn.TransactionId != 0 { return &Tx{}, ErrNoNestedTxn } if transactionId, err := conn.tabletConn.Begin(context.TODO()); err != nil { return &Tx{}, conn.fmtErr(err) } else { conn.TransactionId = transactionId } return &Tx{conn}, nil }
// updateShardCellsAndMaster will update the 'Cells' and possibly // MasterAlias records for the shard, if needed. func (wr *Wrangler) updateShardCellsAndMaster(si *topo.ShardInfo, tabletAlias topo.TabletAlias, tabletType topo.TabletType, force bool) error { // See if we need to update the Shard: // - add the tablet's cell to the shard's Cells if needed // - change the master if needed shardUpdateRequired := false if !si.HasCell(tabletAlias.Cell) { shardUpdateRequired = true } if tabletType == topo.TYPE_MASTER && si.MasterAlias != tabletAlias { shardUpdateRequired = true } if !shardUpdateRequired { return nil } actionNode := actionnode.UpdateShard() keyspace := si.Keyspace() shard := si.ShardName() lockPath, err := wr.lockShard(keyspace, shard, actionNode) if err != nil { return err } // re-read the shard with the lock si, err = wr.ts.GetShard(keyspace, shard) if err != nil { return wr.unlockShard(keyspace, shard, actionNode, lockPath, err) } // update it wasUpdated := false if !si.HasCell(tabletAlias.Cell) { si.Cells = append(si.Cells, tabletAlias.Cell) wasUpdated = true } if tabletType == topo.TYPE_MASTER && si.MasterAlias != tabletAlias { if !si.MasterAlias.IsZero() && !force { return wr.unlockShard(keyspace, shard, actionNode, lockPath, fmt.Errorf("creating this tablet would override old master %v in shard %v/%v", si.MasterAlias, keyspace, shard)) } si.MasterAlias = tabletAlias wasUpdated = true } if wasUpdated { // write it back if err := topo.UpdateShard(context.TODO(), wr.ts, si); err != nil { return wr.unlockShard(keyspace, shard, actionNode, lockPath, err) } } // and unlock return wr.unlockShard(keyspace, shard, actionNode, lockPath, err) }