func commandVtGateSplitQuery(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { server := subFlags.String("server", "", "VtGate server to connect to") bindVariables := newBindvars(subFlags) connectTimeout := subFlags.Duration("connect_timeout", 30*time.Second, "Connection timeout for vtgate client") splitCount := subFlags.Int("split_count", 16, "number of splits to generate") keyspace := subFlags.String("keyspace", "", "keyspace to send query to") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() != 1 { return fmt.Errorf("the <sql> argument is required for the VtGateSplitQuery command") } vtgateConn, err := vtgateconn.Dial(ctx, *server, *connectTimeout) if err != nil { return fmt.Errorf("error connecting to vtgate '%v': %v", *server, err) } defer vtgateConn.Close() r, err := vtgateConn.SplitQuery(ctx, *keyspace, tproto.BoundQuery{ Sql: subFlags.Arg(0), BindVariables: *bindVariables, }, *splitCount) if err != nil { return fmt.Errorf("SplitQuery failed: %v", err) } wr.Logger().Printf("%v\n", jscfg.ToJSON(r)) return nil }
func commandVtGateExecuteShard(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { server := subFlags.String("server", "", "VtGate server to connect to") bindVariables := newBindvars(subFlags) connectTimeout := subFlags.Duration("connect_timeout", 30*time.Second, "Connection timeout for vtgate client") tabletType := subFlags.String("tablet_type", "master", "tablet type to query") keyspace := subFlags.String("keyspace", "", "keyspace to send query to") shardsStr := subFlags.String("shards", "", "comma-separated list of shards to send query to") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() != 1 { return fmt.Errorf("the <sql> argument is required for the VtGateExecuteShard command") } t, err := parseTabletType(*tabletType, []topo.TabletType{topo.TYPE_MASTER, topo.TYPE_REPLICA, topo.TYPE_RDONLY}) if err != nil { return err } var shards []string if *shardsStr != "" { shards = strings.Split(*shardsStr, ",") } vtgateConn, err := vtgateconn.Dial(ctx, *server, *connectTimeout) if err != nil { return fmt.Errorf("error connecting to vtgate '%v': %v", *server, err) } defer vtgateConn.Close() qr, err := vtgateConn.ExecuteShard(ctx, subFlags.Arg(0), *keyspace, shards, *bindVariables, t) if err != nil { return fmt.Errorf("Execute failed: %v", err) } wr.Logger().Printf("%v\n", jscfg.ToJSON(qr)) return nil }
// NewVerticalSplitCloneWorker returns a new VerticalSplitCloneWorker object. func NewVerticalSplitCloneWorker(wr *wrangler.Wrangler, cell, destinationKeyspace, destinationShard string, tables []string, strategyStr string, sourceReaderCount, destinationPackCount int, minTableSizeForSplit uint64, destinationWriterCount int) (Worker, error) { strategy, err := mysqlctl.NewSplitStrategy(wr.Logger(), strategyStr) if err != nil { return nil, err } return &VerticalSplitCloneWorker{ StatusWorker: NewStatusWorker(), wr: wr, cell: cell, destinationKeyspace: destinationKeyspace, destinationShard: destinationShard, tables: tables, strategy: strategy, sourceReaderCount: sourceReaderCount, destinationPackCount: destinationPackCount, minTableSizeForSplit: minTableSizeForSplit, destinationWriterCount: destinationWriterCount, cleaner: &wrangler.Cleaner{}, ev: &events.VerticalSplitClone{ Cell: cell, Keyspace: destinationKeyspace, Shard: destinationShard, Tables: tables, Strategy: strategy.String(), }, }, nil }
func commandWorkflowCreate(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { if WorkflowManager == nil { return fmt.Errorf("no workflow.Manager registered") } skipStart := subFlags.Bool("skip_start", false, "If set, the workflow will not be started.") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() < 1 { return fmt.Errorf("the <factoryName> argument is required for the WorkflowCreate command") } factoryName := subFlags.Arg(0) uuid, err := WorkflowManager.Create(ctx, factoryName, subFlags.Args()[1:]) if err != nil { return err } wr.Logger().Printf("uuid: %v\n", uuid) if !*skipStart { return WorkflowManager.Start(ctx, uuid) } return nil }
// NewSplitCloneWorker returns a new SplitCloneWorker object. func NewSplitCloneWorker(wr *wrangler.Wrangler, cell, keyspace, shard string, excludeTables []string, strategyStr string, sourceReaderCount, destinationPackCount int, minTableSizeForSplit uint64, destinationWriterCount int) (Worker, error) { strategy, err := newSplitStrategy(wr.Logger(), strategyStr) if err != nil { return nil, err } return &SplitCloneWorker{ StatusWorker: NewStatusWorker(), wr: wr, cell: cell, keyspace: keyspace, shard: shard, excludeTables: excludeTables, strategy: strategy, sourceReaderCount: sourceReaderCount, destinationPackCount: destinationPackCount, minTableSizeForSplit: minTableSizeForSplit, destinationWriterCount: destinationWriterCount, cleaner: &wrangler.Cleaner{}, ev: &events.SplitClone{ Cell: cell, Keyspace: keyspace, Shard: shard, ExcludeTables: excludeTables, Strategy: strategy.String(), }, }, nil }
func commandVtTabletBegin(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { connectTimeout := subFlags.Duration("connect_timeout", 30*time.Second, "Connection timeout for vttablet client") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() != 1 { return fmt.Errorf("the <tablet_alias> argument is required for the VtTabletBegin command") } tabletAlias, err := topoproto.ParseTabletAlias(subFlags.Arg(0)) if err != nil { return err } tabletInfo, err := wr.TopoServer().GetTablet(ctx, tabletAlias) if err != nil { return err } conn, err := tabletconn.GetDialer()(ctx, tabletInfo.Tablet, *connectTimeout) if err != nil { return fmt.Errorf("cannot connect to tablet %v: %v", tabletAlias, err) } defer conn.Close() transactionID, err := conn.Begin(ctx) if err != nil { return fmt.Errorf("Begin failed: %v", err) } result := map[string]int64{ "transaction_id": transactionID, } return printJSON(wr.Logger(), result) }
func commandEmergencyReparentShard(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { waitSlaveTimeout := subFlags.Duration("wait_slave_timeout", 30*time.Second, "time to wait for slaves to catch up in reparenting") keyspaceShard := subFlags.String("keyspace_shard", "", "keyspace/shard of the shard that needs to be reparented") newMaster := subFlags.String("new_master", "", "alias of a tablet that should be the new master") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() == 2 { // Legacy syntax: "<keyspace/shard> <tablet alias>". if *newMaster != "" { return fmt.Errorf("cannot use legacy syntax and flag -new_master for action EmergencyReparentShard at the same time") } *keyspaceShard = subFlags.Arg(0) *newMaster = subFlags.Arg(1) } else if subFlags.NArg() != 0 { return fmt.Errorf("action EmergencyReparentShard requires -keyspace_shard=<keyspace/shard> -new_master=<tablet alias>") } keyspace, shard, err := topoproto.ParseKeyspaceShard(*keyspaceShard) if err != nil { return err } tabletAlias, err := topoproto.ParseTabletAlias(*newMaster) if err != nil { return err } return wr.EmergencyReparentShard(ctx, keyspace, shard, tabletAlias, *waitSlaveTimeout) }
func commandVtGateSplitQuery(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { server := subFlags.String("server", "", "VtGate server to connect to") bindVariables := newBindvars(subFlags) connectTimeout := subFlags.Duration("connect_timeout", 30*time.Second, "Connection timeout for vtgate client") splitColumn := subFlags.String("split_column", "", "force the use of this column to split the query") splitCount := subFlags.Int("split_count", 16, "number of splits to generate") keyspace := subFlags.String("keyspace", "", "keyspace to send query to") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() != 1 { return fmt.Errorf("the <sql> argument is required for the VtGateSplitQuery command") } vtgateConn, err := vtgateconn.Dial(ctx, *server, *connectTimeout, "") if err != nil { return fmt.Errorf("error connecting to vtgate '%v': %v", *server, err) } defer vtgateConn.Close() r, err := vtgateConn.SplitQuery(ctx, *keyspace, subFlags.Arg(0), *bindVariables, *splitColumn, int64(*splitCount)) if err != nil { return fmt.Errorf("SplitQuery failed: %v", err) } return printJSON(wr.Logger(), r) }
func commandVtTabletRollback(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { connectTimeout := subFlags.Duration("connect_timeout", 30*time.Second, "Connection timeout for vttablet client") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() != 2 { return fmt.Errorf("the <tablet_alias> and <transaction_id> arguments are required for the VtTabletRollback command") } transactionID, err := strconv.ParseInt(subFlags.Arg(1), 10, 64) if err != nil { return err } tabletAlias, err := topoproto.ParseTabletAlias(subFlags.Arg(0)) if err != nil { return err } tabletInfo, err := wr.TopoServer().GetTablet(ctx, tabletAlias) if err != nil { return err } conn, err := tabletconn.GetDialer()(tabletInfo.Tablet, *connectTimeout) if err != nil { return fmt.Errorf("cannot connect to tablet %v: %v", tabletAlias, err) } defer conn.Close(ctx) return conn.Rollback(ctx, &querypb.Target{ Keyspace: tabletInfo.Tablet.Keyspace, Shard: tabletInfo.Tablet.Shard, TabletType: tabletInfo.Tablet.Type, }, transactionID) }
func getActions(wr *wrangler.Wrangler, zconn zk.Conn, actionPath string) ([]*actionnode.ActionNode, error) { actions, _, err := zconn.Children(actionPath) if err != nil { return nil, fmt.Errorf("getActions failed: %v %v", actionPath, err) } sort.Strings(actions) wg := sync.WaitGroup{} mu := sync.Mutex{} nodes := make([]*actionnode.ActionNode, 0, len(actions)) for _, action := range actions { wg.Add(1) go func(action string) { defer wg.Done() actionNodePath := path.Join(actionPath, action) data, _, err := zconn.Get(actionNodePath) if err != nil && !zookeeper.IsError(err, zookeeper.ZNONODE) { wr.Logger().Warningf("getActions: %v %v", actionNodePath, err) return } actionNode, err := actionnode.ActionNodeFromJson(data, actionNodePath) if err != nil { wr.Logger().Warningf("getActions: %v %v", actionNodePath, err) return } mu.Lock() nodes = append(nodes, actionNode) mu.Unlock() }(action) } wg.Wait() return nodes, nil }
func zkResolveWildcards(wr *wrangler.Wrangler, args []string) ([]string, error) { zkts, ok := wr.TopoServer().(*zktopo.Server) if !ok { return args, nil } return zk.ResolveWildcards(zkts.GetZConn(), args) }
func commandVtGateExecute(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { server := subFlags.String("server", "", "VtGate server to connect to") bindVariables := newBindvars(subFlags) connectTimeout := subFlags.Duration("connect_timeout", 30*time.Second, "Connection timeout for vtgate client") tabletType := subFlags.String("tablet_type", "master", "tablet type to query") json := subFlags.Bool("json", false, "Output JSON instead of human-readable table") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() != 1 { return fmt.Errorf("the <sql> argument is required for the VtGateExecute command") } t, err := parseTabletType(*tabletType, []topodatapb.TabletType{topodatapb.TabletType_MASTER, topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY}) if err != nil { return err } vtgateConn, err := vtgateconn.Dial(ctx, *server, *connectTimeout) if err != nil { return fmt.Errorf("error connecting to vtgate '%v': %v", *server, err) } defer vtgateConn.Close() qr, err := vtgateConn.Execute(ctx, subFlags.Arg(0), *bindVariables, t) if err != nil { return fmt.Errorf("Execute failed: %v", err) } if *json { return printJSON(wr.Logger(), qr) } printQueryResult(loggerWriter{wr.Logger()}, qr) return nil }
func commandListBackups(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() != 1 { return fmt.Errorf("action ListBackups requires <keyspace/shard>") } keyspace, shard, err := topoproto.ParseKeyspaceShard(subFlags.Arg(0)) if err != nil { return err } bucket := fmt.Sprintf("%v/%v", keyspace, shard) bs, err := backupstorage.GetBackupStorage() if err != nil { return err } defer bs.Close() bhs, err := bs.ListBackups(bucket) if err != nil { return err } for _, bh := range bhs { wr.Logger().Printf("%v\n", bh.Name()) } return nil }
// StartActionLoop will start the action loop for a fake tablet, // using ft.FakeMysqlDaemon as the backing mysqld. func (ft *FakeTablet) StartActionLoop(t *testing.T, wr *wrangler.Wrangler) { if ft.Agent != nil { t.Fatalf("Agent for %v is already running", ft.Tablet.Alias) } // Listen on a random port var err error ft.Listener, err = net.Listen("tcp", ":0") if err != nil { t.Fatalf("Cannot listen: %v", err) } port := ft.Listener.Addr().(*net.TCPAddr).Port // create a test agent on that port, and re-read the record // (it has new ports and IP) ft.Agent = tabletmanager.NewTestActionAgent(context.Background(), wr.TopoServer(), ft.Tablet.Alias, port, ft.FakeMysqlDaemon) ft.Tablet = ft.Agent.Tablet().Tablet // create the RPC server ft.RPCServer = rpcplus.NewServer() gorpctmserver.RegisterForTest(ft.RPCServer, ft.Agent) // create the HTTP server, serve the server from it handler := http.NewServeMux() bsonrpc.ServeCustomRPC(handler, ft.RPCServer, false) ft.HTTPServer = http.Server{ Handler: handler, } go ft.HTTPServer.Serve(ft.Listener) }
// FindHealthyRdonlyEndPoint returns a random healthy endpoint. // Since we don't want to use them all, we require at least // minHealthyEndPoints servers to be healthy. // May block up to -wait_for_healthy_rdonly_endpoints_timeout. func FindHealthyRdonlyEndPoint(ctx context.Context, wr *wrangler.Wrangler, cell, keyspace, shard string) (*topodatapb.TabletAlias, error) { busywaitCtx, busywaitCancel := context.WithTimeout(ctx, *WaitForHealthyEndPointsTimeout) defer busywaitCancel() // create a discovery healthcheck, wait for it to have one rdonly // endpoints at this point healthCheck := discovery.NewHealthCheck(*remoteActionsTimeout, *healthcheckRetryDelay, *healthCheckTimeout, "" /* statsSuffix */) watcher := discovery.NewShardReplicationWatcher(wr.TopoServer(), healthCheck, cell, keyspace, shard, *healthCheckTopologyRefresh, 5 /*topoReadConcurrency*/) defer watcher.Stop() defer healthCheck.Close() if err := discovery.WaitForEndPoints(ctx, healthCheck, cell, keyspace, shard, []topodatapb.TabletType{topodatapb.TabletType_RDONLY}); err != nil { return nil, fmt.Errorf("error waiting for rdonly endpoints for (%v,%v/%v): %v", cell, keyspace, shard, err) } var healthyEndpoints []*topodatapb.EndPoint for { select { case <-busywaitCtx.Done(): return nil, fmt.Errorf("Not enough endpoints to choose from in (%v,%v/%v), have %v healthy ones, need at least %v Context Error: %v", cell, keyspace, shard, len(healthyEndpoints), *minHealthyEndPoints, busywaitCtx.Err()) default: } addrs := healthCheck.GetEndPointStatsFromTarget(keyspace, shard, topodatapb.TabletType_RDONLY) healthyEndpoints = make([]*topodatapb.EndPoint, 0, len(addrs)) for _, addr := range addrs { // Note we do not check the 'Serving' flag here. // This is mainly to avoid the case where we run a // Diff between a source and destination, and the source // is not serving (disabled by TabletControl). // When we switch the tablet to 'worker', it will // go back to serving state. if addr.Stats == nil || addr.Stats.HealthError != "" || addr.Stats.SecondsBehindMaster > 30 { continue } healthyEndpoints = append(healthyEndpoints, addr.EndPoint) } if len(healthyEndpoints) >= *minHealthyEndPoints { break } deadlineForLog, _ := busywaitCtx.Deadline() wr.Logger().Infof("Waiting for enough endpoints to become available. available: %v required: %v Waiting up to %.1f more seconds.", len(healthyEndpoints), *minHealthyEndPoints, deadlineForLog.Sub(time.Now()).Seconds()) // Block for 1 second because 2 seconds is the -health_check_interval flag value in integration tests. timer := time.NewTimer(1 * time.Second) select { case <-busywaitCtx.Done(): timer.Stop() case <-timer.C: } } // random server in the list is what we want index := rand.Intn(len(healthyEndpoints)) return &topodatapb.TabletAlias{ Cell: cell, Uid: healthyEndpoints[index].Uid, }, nil }
func commandExportZknsForKeyspace(wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) (string, error) { subFlags.Parse(args) if subFlags.NArg() != 1 { log.Fatalf("action ExportZknsForKeyspace requires <keyspace|zk global keyspace path>") } keyspace := keyspaceParamToKeyspace(subFlags.Arg(0)) return "", wr.ExportZknsForKeyspace(keyspace) }
func commandReparentTablet(wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) (string, error) { subFlags.Parse(args) if subFlags.NArg() != 1 { log.Fatalf("action ReparentTablet requires <tablet alias|zk tablet path>") } tabletAlias := tabletParamToTabletAlias(subFlags.Arg(0)) return "", wr.ReparentTablet(tabletAlias) }
func commandDemoteMaster(wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) (string, error) { subFlags.Parse(args) if subFlags.NArg() != 1 { log.Fatalf("action DemoteMaster requires <tablet alias|zk tablet path>") } tabletAlias := tabletParamToTabletAlias(subFlags.Arg(0)) return wr.ActionInitiator().DemoteMaster(tabletAlias) }
func commandListShardActions(wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) (string, error) { subFlags.Parse(args) if subFlags.NArg() != 1 { log.Fatalf("action ListShardActions requires <keyspace/shard|zk shard path>") } keyspace, shard := shardParamToKeyspaceShard(subFlags.Arg(0)) return "", listActionsByShard(wr.TopoServer(), keyspace, shard) }
func commandExportZkns(wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) (string, error) { subFlags.Parse(args) if subFlags.NArg() != 1 { log.Fatalf("action ExportZkns requires <cell name|zk vt root path>") } cell := vtPathToCell(subFlags.Arg(0)) return "", wr.ExportZkns(cell) }
// StartActionLoop will start the action loop for a fake tablet, // using ft.FakeMysqlDaemon as the backing mysqld. func (ft *FakeTablet) StartActionLoop(t *testing.T, wr *wrangler.Wrangler) { if ft.Agent != nil { t.Fatalf("Agent for %v is already running", ft.Tablet.Alias) } // Listen on a random port for gRPC var err error ft.Listener, err = net.Listen("tcp", ":0") if err != nil { t.Fatalf("Cannot listen: %v", err) } gRPCPort := int32(ft.Listener.Addr().(*net.TCPAddr).Port) // if needed, listen on a random port for HTTP vtPort := ft.Tablet.PortMap["vt"] if ft.StartHTTPServer { ft.HTTPListener, err = net.Listen("tcp", ":0") if err != nil { t.Fatalf("Cannot listen on http port: %v", err) } handler := http.NewServeMux() ft.HTTPServer = http.Server{ Handler: handler, } go ft.HTTPServer.Serve(ft.HTTPListener) vtPort = int32(ft.HTTPListener.Addr().(*net.TCPAddr).Port) } // create a test agent on that port, and re-read the record // (it has new ports and IP) ft.Agent = tabletmanager.NewTestActionAgent(context.Background(), wr.TopoServer(), ft.Tablet.Alias, vtPort, gRPCPort, ft.FakeMysqlDaemon) ft.Tablet = ft.Agent.Tablet() // create the gRPC server ft.RPCServer = grpc.NewServer() grpctmserver.RegisterForTest(ft.RPCServer, ft.Agent) go ft.RPCServer.Serve(ft.Listener) // and wait for it to serve, so we don't start using it before it's // ready. timeout := 5 * time.Second step := 10 * time.Millisecond c := tmclient.NewTabletManagerClient() for timeout >= 0 { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) err := c.Ping(ctx, topo.NewTabletInfo(ft.Agent.Tablet(), -1)) cancel() if err == nil { break } time.Sleep(step) timeout -= step } if timeout < 0 { panic("StartActionLoop failed.") } }
// newCloneWorker returns a new SplitCloneWorker object which is used both by // the SplitClone and VerticalSplitClone command. // TODO(mberlin): Rename SplitCloneWorker to cloneWorker. func newCloneWorker(wr *wrangler.Wrangler, cloneType cloneType, cell, keyspace, shard string, online, offline bool, tables, excludeTables []string, strategyStr string, chunkCount, minRowsPerChunk, sourceReaderCount, writeQueryMaxRows, writeQueryMaxSize, writeQueryMaxRowsDelete, destinationWriterCount, minHealthyRdonlyTablets int, maxTPS, maxReplicationLag int64) (Worker, error) { if cloneType != horizontalResharding && cloneType != verticalSplit { return nil, fmt.Errorf("unknown cloneType: %v This is a bug. Please report", cloneType) } if tables != nil && len(tables) == 0 { return nil, errors.New("list of tablets to be split out must not be empty") } strategy, err := newSplitStrategy(wr.Logger(), strategyStr) if err != nil { return nil, err } if maxTPS != throttler.MaxRateModuleDisabled { wr.Logger().Infof("throttling enabled and set to a max of %v transactions/second", maxTPS) } if maxTPS != throttler.MaxRateModuleDisabled && maxTPS < int64(destinationWriterCount) { return nil, fmt.Errorf("-max_tps must be >= -destination_writer_count: %v >= %v", maxTPS, destinationWriterCount) } if !online && !offline { return nil, errors.New("at least one clone phase (-online, -offline) must be enabled (and not set to false)") } scw := &SplitCloneWorker{ StatusWorker: NewStatusWorker(), wr: wr, cloneType: cloneType, cell: cell, destinationKeyspace: keyspace, shard: shard, online: online, offline: offline, tables: tables, excludeTables: excludeTables, strategy: strategy, chunkCount: chunkCount, minRowsPerChunk: minRowsPerChunk, sourceReaderCount: sourceReaderCount, writeQueryMaxRows: writeQueryMaxRows, writeQueryMaxSize: writeQueryMaxSize, writeQueryMaxRowsDelete: writeQueryMaxRowsDelete, destinationWriterCount: destinationWriterCount, minHealthyRdonlyTablets: minHealthyRdonlyTablets, maxTPS: maxTPS, maxReplicationLag: maxReplicationLag, cleaner: &wrangler.Cleaner{}, tabletTracker: NewTabletTracker(), throttlers: make(map[string]*throttler.Throttler), destinationDbNames: make(map[string]string), tableStatusListOnline: &tableStatusList{}, tableStatusListOffline: &tableStatusList{}, } scw.initializeEventDescriptor() return scw, nil }
func commandReparentShard(wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) (string, error) { leaveMasterReadOnly := subFlags.Bool("leave-master-read-only", false, "leaves the master read-only after reparenting") force := subFlags.Bool("force", false, "will force the reparent even if the master is already correct") subFlags.Parse(args) if subFlags.NArg() != 2 { log.Fatalf("action ReparentShard requires <keyspace/shard|zk shard path> <tablet alias|zk tablet path>") } keyspace, shard := shardParamToKeyspaceShard(subFlags.Arg(0)) tabletAlias := tabletParamToTabletAlias(subFlags.Arg(1)) return "", wr.ReparentShard(keyspace, shard, tabletAlias, *leaveMasterReadOnly, *force) }
func commandExportZknsForKeyspace(wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() != 1 { return fmt.Errorf("action ExportZknsForKeyspace requires <keyspace|zk global keyspace path>") } keyspace, err := keyspaceParamToKeyspace(subFlags.Arg(0)) if err != nil { return err } return wr.ExportZknsForKeyspace(keyspace) }
func commandReparentTablet(wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) (string, error) { if err := subFlags.Parse(args); err != nil { return "", err } if subFlags.NArg() != 1 { return "", fmt.Errorf("action ReparentTablet requires <tablet alias|zk tablet path>") } tabletAlias, err := tabletParamToTabletAlias(subFlags.Arg(0)) if err != nil { return "", err } return "", wr.ReparentTablet(tabletAlias) }
func commandReparentTablet(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() != 1 { return fmt.Errorf("action ReparentTablet requires <tablet alias>") } tabletAlias, err := topoproto.ParseTabletAlias(subFlags.Arg(0)) if err != nil { return err } return wr.ReparentTablet(ctx, tabletAlias) }
func commandDemoteMaster(wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) (string, error) { if err := subFlags.Parse(args); err != nil { return "", err } if subFlags.NArg() != 1 { return "", fmt.Errorf("action DemoteMaster requires <tablet alias|zk tablet path>") } tabletAlias, err := tabletParamToTabletAlias(subFlags.Arg(0)) if err != nil { return "", err } return wr.ActionInitiator().DemoteMaster(tabletAlias) }
func commandExportZkns(wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() != 1 { return fmt.Errorf("action ExportZkns requires <cell name|zk vt root path>") } cell, err := vtPathToCell(subFlags.Arg(0)) if err != nil { return err } return wr.ExportZkns(cell) }
// FindHealthyRdonlyEndPoint returns a random healthy endpoint. // Since we don't want to use them all, we require at least // minHealthyEndPoints servers to be healthy. // May block up to -wait_for_healthy_rdonly_endpoints_timeout. func FindHealthyRdonlyEndPoint(ctx context.Context, wr *wrangler.Wrangler, cell, keyspace, shard string) (*topodatapb.TabletAlias, error) { busywaitCtx, busywaitCancel := context.WithTimeout(ctx, *WaitForHealthyEndPointsTimeout) defer busywaitCancel() var healthyEndpoints []*topodatapb.EndPoint for { select { case <-busywaitCtx.Done(): return nil, fmt.Errorf("Not enough endpoints to choose from in (%v,%v/%v), have %v healthy ones, need at least %v Context Error: %v", cell, keyspace, shard, len(healthyEndpoints), *minHealthyEndPoints, busywaitCtx.Err()) default: } shortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout) endPoints, _, err := wr.TopoServer().GetEndPoints(shortCtx, cell, keyspace, shard, topodatapb.TabletType_RDONLY) cancel() if err != nil { if err == topo.ErrNoNode { // If the node doesn't exist, count that as 0 available rdonly instances. endPoints = &topodatapb.EndPoints{} } else { return nil, fmt.Errorf("GetEndPoints(%v,%v,%v,rdonly) failed: %v", cell, keyspace, shard, err) } } healthyEndpoints = make([]*topodatapb.EndPoint, 0, len(endPoints.Entries)) for _, entry := range endPoints.Entries { if len(entry.HealthMap) == 0 { healthyEndpoints = append(healthyEndpoints, entry) } } if len(healthyEndpoints) < *minHealthyEndPoints { deadlineForLog, _ := busywaitCtx.Deadline() wr.Logger().Infof("Waiting for enough endpoints to become available. available: %v required: %v Waiting up to %.1f more seconds.", len(healthyEndpoints), *minHealthyEndPoints, deadlineForLog.Sub(time.Now()).Seconds()) // Block for 1 second because 2 seconds is the -health_check_interval flag value in integration tests. timer := time.NewTimer(1 * time.Second) select { case <-busywaitCtx.Done(): timer.Stop() case <-timer.C: } } else { break } } // random server in the list is what we want index := rand.Intn(len(healthyEndpoints)) return &topodatapb.TabletAlias{ Cell: cell, Uid: healthyEndpoints[index].Uid, }, nil }
func commandVtGateExecuteKeyspaceIds(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { server := subFlags.String("server", "", "VtGate server to connect to") bindVariables := newBindvars(subFlags) connectTimeout := subFlags.Duration("connect_timeout", 30*time.Second, "Connection timeout for vtgate client") tabletType := subFlags.String("tablet_type", "master", "tablet type to query") keyspace := subFlags.String("keyspace", "", "keyspace to send query to") keyspaceIDsStr := subFlags.String("keyspace_ids", "", "comma-separated list of keyspace ids (in hex) that will map into shards to send query to") options := subFlags.String("options", "", "execute options values as a text encoded proto of the ExecuteOptions structure") json := subFlags.Bool("json", false, "Output JSON instead of human-readable table") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() != 1 { return fmt.Errorf("the <sql> argument is required for the VtGateExecuteKeyspaceIds command") } t, err := parseTabletType(*tabletType, []topodatapb.TabletType{topodatapb.TabletType_MASTER, topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY}) if err != nil { return err } var keyspaceIDs [][]byte if *keyspaceIDsStr != "" { keyspaceIDHexs := strings.Split(*keyspaceIDsStr, ",") keyspaceIDs = make([][]byte, len(keyspaceIDHexs)) for i, keyspaceIDHex := range keyspaceIDHexs { keyspaceIDs[i], err = hex.DecodeString(keyspaceIDHex) if err != nil { return fmt.Errorf("cannot hex-decode value %v '%v': %v", i, keyspaceIDHex, err) } } } executeOptions, err := parseExecuteOptions(*options) if err != nil { return err } vtgateConn, err := vtgateconn.Dial(ctx, *server, *connectTimeout, "") if err != nil { return fmt.Errorf("error connecting to vtgate '%v': %v", *server, err) } defer vtgateConn.Close() qr, err := vtgateConn.ExecuteKeyspaceIds(ctx, subFlags.Arg(0), *keyspace, keyspaceIDs, *bindVariables, t, executeOptions) if err != nil { return fmt.Errorf("Execute failed: %v", err) } if *json { return printJSON(wr.Logger(), qr) } printQueryResult(loggerWriter{wr.Logger()}, qr) return nil }