func main() { flag.Parse() servenv.Init("vt_binlog_player") if *startPosFile == "" { relog.Fatal("start-pos-file was not supplied.") } if *dbConfigFile == "" { relog.Fatal("Cannot start without db-config-file") } blp, err := initBinlogPlayer(*startPosFile, *dbConfigFile, *lookupConfigFile, *dbCredFile, *useCheckpoint, *debug, *port) if err != nil { relog.Fatal("Error in initializing binlog player - '%v'", err) } blp.txnBatch = *txnBatch blp.maxTxnInterval = time.Duration(*maxTxnInterval) * time.Second blp.execDdl = *execDdl if *tables != "" { tables := strings.Split(*tables, ",") blp.tables = make([]string, len(tables)) for i, table := range tables { blp.tables[i] = strings.TrimSpace(table) } relog.Info("len tables %v tables %v", len(blp.tables), blp.tables) } relog.Info("BinlogPlayer client for keyrange '%v:%v' starting @ '%v'", blp.startPosition.KeyrangeStart, blp.startPosition.KeyrangeEnd, blp.startPosition.Position) if *port != 0 { umgmt.AddStartupCallback(func() { umgmt.StartHttpServer(fmt.Sprintf(":%v", *port)) }) } umgmt.AddStartupCallback(func() { c := make(chan os.Signal, 1) signal.Notify(c, syscall.SIGTERM) go func() { for sig := range c { umgmt.SigTermHandler(sig) } }() }) umgmt.AddCloseCallback(func() { close(interrupted) }) //Make a request to the server and start processing the events. stdout = bufio.NewWriterSize(os.Stdout, 16*1024) err = blp.applyBinlogEvents() if err != nil { relog.Error("Error in applying binlog events, err %v", err) } relog.Info("vt_binlog_player done") }
// CopyKeyspaces will create the keyspaces in the destination topo func CopyKeyspaces(fromTS, toTS topo.Server) { keyspaces, err := fromTS.GetKeyspaces() if err != nil { relog.Fatal("fromTS.GetKeyspaces failed: %v", err) } wg := sync.WaitGroup{} rec := concurrency.AllErrorRecorder{} for _, keyspace := range keyspaces { wg.Add(1) go func(keyspace string) { defer wg.Done() if err := toTS.CreateKeyspace(keyspace); err != nil { if err == topo.ErrNodeExists { relog.Warning("keyspace %v already exists", keyspace) } else { rec.RecordError(err) } } }(keyspace) } wg.Wait() if rec.HasErrors() { relog.Fatal("copyKeyspaces failed: %v", rec.Error()) } }
func main() { dbConfigsFile, dbCredentialsFile := dbconfigs.RegisterCommonFlags() flag.Parse() relog.Info("started vtaction %v", os.Args) rpc.HandleHTTP() jsonrpc.ServeHTTP() jsonrpc.ServeRPC() bsonrpc.ServeHTTP() bsonrpc.ServeRPC() logFile, err := os.OpenFile(*logFilename, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0666) if err != nil { relog.Fatal("Can't open log file: %v", err) } relog.SetOutput(logFile) relog.SetPrefix(fmt.Sprintf("vtaction [%v] ", os.Getpid())) if err := relog.SetLevelByName(*logLevel); err != nil { relog.Fatal("%v", err) } relog.HijackLog(nil) relog.HijackStdio(logFile, logFile) mycnf, mycnfErr := mysqlctl.ReadMycnf(*mycnfFile) if mycnfErr != nil { relog.Fatal("mycnf read failed: %v", mycnfErr) } relog.Debug("mycnf: %v", jscfg.ToJson(mycnf)) dbcfgs, cfErr := dbconfigs.Init(mycnf.SocketFile, *dbConfigsFile, *dbCredentialsFile) if err != nil { relog.Fatal("%s", cfErr) } mysqld := mysqlctl.NewMysqld(mycnf, dbcfgs.Dba, dbcfgs.Repl) topoServer := topo.GetServer() defer topo.CloseServers() actor := tabletmanager.NewTabletActor(mysqld, topoServer) // we delegate out startup to the micromanagement server so these actions // will occur after we have obtained our socket. bindAddr := fmt.Sprintf(":%v", *port) httpServer := &http.Server{Addr: bindAddr} go func() { if err := httpServer.ListenAndServe(); err != nil { relog.Error("httpServer.ListenAndServe err: %v", err) } }() actionErr := actor.HandleAction(*actionNode, *action, *actionGuid, *force) if actionErr != nil { relog.Fatal("action error: %v", actionErr) } relog.Info("finished vtaction %v", os.Args) }
func multisnapshotCmd(mysqld *mysqlctl.Mysqld, subFlags *flag.FlagSet, args []string) { concurrency := subFlags.Int("concurrency", 8, "how many compression jobs to run simultaneously") spec := subFlags.String("spec", "-", "shard specification") tablesString := subFlags.String("tables", "", "dump only this comma separated list of tables") skipSlaveRestart := subFlags.Bool("skip-slave-restart", false, "after the snapshot is done, do not restart slave replication") maximumFilesize := subFlags.Uint64("maximum-file-size", 128*1024*1024, "the maximum size for an uncompressed data file") subFlags.Parse(args) if subFlags.NArg() != 2 { relog.Fatal("action multisnapshot requires <db name> <key name>") } shards, err := key.ParseShardingSpec(*spec) if err != nil { relog.Fatal("multisnapshot failed: %v", err) } var tables []string if *tablesString != "" { tables = strings.Split(*tablesString, ",") } filenames, err := mysqld.CreateMultiSnapshot(shards, subFlags.Arg(0), subFlags.Arg(1), tabletAddr, false, *concurrency, tables, *skipSlaveRestart, *maximumFilesize, nil) if err != nil { relog.Fatal("multisnapshot failed: %v", err) } else { relog.Info("manifest locations: %v", filenames) } }
// CopyTablets will create the tablets in the destination topo func CopyTablets(fromTS, toTS topo.Server) { cells, err := fromTS.GetKnownCells() if err != nil { relog.Fatal("fromTS.GetKnownCells failed: %v", err) } wg := sync.WaitGroup{} rec := concurrency.AllErrorRecorder{} for _, cell := range cells { wg.Add(1) go func(cell string) { defer wg.Done() tabletAliases, err := fromTS.GetTabletsByCell(cell) if err != nil { rec.RecordError(err) } else { for _, tabletAlias := range tabletAliases { wg.Add(1) go func(tabletAlias topo.TabletAlias) { defer wg.Done() // read the source tablet ti, err := fromTS.GetTablet(tabletAlias) if err != nil { rec.RecordError(err) return } // try to create the destination err = toTS.CreateTablet(ti.Tablet) if err == topo.ErrNodeExists { // update the destination tablet _, err = toTS.UpdateTablet(ti, -1) } if err != nil { rec.RecordError(err) return } // create the replication paths // for masters only here if ti.Type == topo.TYPE_MASTER { if err = toTS.CreateReplicationPath(ti.Keyspace, ti.Shard, ti.Alias().String()); err != nil && err != topo.ErrNodeExists { rec.RecordError(err) } } }(tabletAlias) } } }(cell) } wg.Wait() if rec.HasErrors() { relog.Fatal("copyTablets failed: %v", rec.Error()) } }
func unmarshalFile(name string, val interface{}) { if name != "" { data, err := ioutil.ReadFile(name) if err != nil { relog.Fatal("could not read %v: %v", val, err) } if err = json.Unmarshal(data, val); err != nil { relog.Fatal("could not read %s: %v", val, err) } } }
func main() { flag.Usage = func() { fmt.Fprintf(os.Stderr, "Usage: %s [global parameters] command [command parameters]\n", os.Args[0]) fmt.Fprintf(os.Stderr, "\nThe global optional parameters are:\n") flag.PrintDefaults() fmt.Fprintf(os.Stderr, "\nThe commands are listed below. Use '%s <command> -h' for more help.\n\n", os.Args[0]) for _, cmd := range commands { fmt.Fprintf(os.Stderr, " %s", cmd.name) if cmd.params != "" { fmt.Fprintf(os.Stderr, " %s", cmd.params) } fmt.Fprintf(os.Stderr, "\n") } fmt.Fprintf(os.Stderr, "\n") } dbConfigsFile, dbCredentialsFile := dbconfigs.RegisterCommonFlags() flag.Parse() if err := relog.SetLevelByName(*logLevel); err != nil { relog.Fatal("%v", err) } tabletAddr = fmt.Sprintf("%v:%v", "localhost", *port) mycnf := mysqlctl.NewMycnf(uint32(*tabletUid), *mysqlPort, mysqlctl.VtReplParams{}) if *mysqlSocket != "" { mycnf.SocketFile = *mysqlSocket } dbcfgs, err := dbconfigs.Init(mycnf.SocketFile, *dbConfigsFile, *dbCredentialsFile) if err != nil { relog.Fatal("%v", err) } mysqld := mysqlctl.NewMysqld(mycnf, dbcfgs.Dba, dbcfgs.Repl) action := flag.Arg(0) for _, cmd := range commands { if cmd.name == action { subFlags := flag.NewFlagSet(action, flag.ExitOnError) subFlags.Usage = func() { fmt.Fprintf(os.Stderr, "Usage: %s %s %s\n\n", os.Args[0], cmd.name, cmd.params) fmt.Fprintf(os.Stderr, "%s\n\n", cmd.help) subFlags.PrintDefaults() } cmd.method(mysqld, subFlags, flag.Args()[1:]) return } } relog.Fatal("invalid action: %v", action) }
func snapshotCmd(mysqld *mysqlctl.Mysqld, subFlags *flag.FlagSet, args []string) { concurrency := subFlags.Int("concurrency", 4, "how many compression jobs to run simultaneously") subFlags.Parse(args) if subFlags.NArg() != 1 { relog.Fatal("Command snapshot requires <db name>") } filename, _, _, err := mysqld.CreateSnapshot(subFlags.Arg(0), tabletAddr, false, *concurrency, false, nil) if err != nil { relog.Fatal("snapshot failed: %v", err) } else { relog.Info("manifest location: %v", filename) } }
// tabletParamToTabletAlias takes either an old style ZK tablet path or a // new style tablet alias as a string, and returns a TabletAlias. func tabletParamToTabletAlias(param string) topo.TabletAlias { if param[0] == '/' { // old zookeeper path, convert to new-style string tablet alias zkPathParts := strings.Split(param, "/") if len(zkPathParts) != 6 || zkPathParts[0] != "" || zkPathParts[1] != "zk" || zkPathParts[3] != "vt" || zkPathParts[4] != "tablets" { relog.Fatal("Invalid tablet path: %v", param) } param = zkPathParts[2] + "-" + zkPathParts[5] } result, err := topo.ParseTabletAliasString(param) if err != nil { relog.Fatal("Invalid tablet alias %v: %v", param, err) } return result }
// StartHttpServer binds and starts an http server. // usually it is called like: // umgmt.AddStartupCallback(func () { umgmt.StartHttpServer(addr) }) func StartHttpServer(addr string) { httpListener, httpErr := net.Listen("tcp", addr) if httpErr != nil { relog.Fatal("StartHttpServer failed: %v", httpErr) } go asyncListener(httpListener) }
// zkocc: a proxy for zk func main() { flag.Parse() if err := servenv.Init("zkocc"); err != nil { relog.Fatal("Error in servenv.Init: %v", err) } rpc.HandleHTTP() jsonrpc.ServeHTTP() jsonrpc.ServeRPC() bsonrpc.ServeHTTP() bsonrpc.ServeRPC() zk.RegisterZkReader(zkocc.NewZkReader(*resolveLocal, flag.Args())) // we delegate out startup to the micromanagement server so these actions // will occur after we have obtained our socket. umgmt.SetLameDuckPeriod(float32(*lameDuckPeriod)) umgmt.SetRebindDelay(float32(*rebindDelay)) umgmt.AddStartupCallback(func() { umgmt.StartHttpServer(fmt.Sprintf(":%v", *port)) }) relog.Info("started zkocc %v", *port) umgmtSocket := fmt.Sprintf("/tmp/zkocc-%08x-umgmt.sock", *port) if umgmtErr := umgmt.ListenAndServe(umgmtSocket); umgmtErr != nil { relog.Error("umgmt.ListenAndServe err: %v", umgmtErr) } relog.Info("done") }
func partialRestoreCmd(mysqld *mysqlctl.Mysqld, subFlags *flag.FlagSet, args []string) { fetchConcurrency := subFlags.Int("fetch-concurrency", 3, "how many files to fetch simultaneously") fetchRetryCount := subFlags.Int("fetch-retry-count", 3, "how many times to retry a failed transfer") subFlags.Parse(args) if subFlags.NArg() != 1 { relog.Fatal("Command partialrestore requires <split snapshot manifest file>") } rs, err := mysqlctl.ReadSplitSnapshotManifest(subFlags.Arg(0)) if err == nil { err = mysqld.RestoreFromPartialSnapshot(rs, *fetchConcurrency, *fetchRetryCount) } if err != nil { relog.Fatal("partialrestore failed: %v", err) } }
func partialSnapshotCmd(mysqld *mysqlctl.Mysqld, subFlags *flag.FlagSet, args []string) { start := subFlags.String("start", "", "start of the key range") end := subFlags.String("end", "", "end of the key range") concurrency := subFlags.Int("concurrency", 4, "how many compression jobs to run simultaneously") subFlags.Parse(args) if subFlags.NArg() != 2 { relog.Fatal("action partialsnapshot requires <db name> <key name>") } filename, err := mysqld.CreateSplitSnapshot(subFlags.Arg(0), subFlags.Arg(1), key.HexKeyspaceId(*start), key.HexKeyspaceId(*end), tabletAddr, false, *concurrency, nil) if err != nil { relog.Fatal("partialsnapshot failed: %v", err) } else { relog.Info("manifest location: %v", filename) } }
func snapshotSourceStartCmd(mysqld *mysqlctl.Mysqld, subFlags *flag.FlagSet, args []string) { concurrency := subFlags.Int("concurrency", 4, "how many checksum jobs to run simultaneously") subFlags.Parse(args) if subFlags.NArg() != 1 { relog.Fatal("Command snapshotsourcestart requires <db name>") } filename, slaveStartRequired, readOnly, err := mysqld.CreateSnapshot(subFlags.Arg(0), tabletAddr, false, *concurrency, true, nil) if err != nil { relog.Fatal("snapshot failed: %v", err) } else { relog.Info("manifest location: %v", filename) relog.Info("slave start required: %v", slaveStartRequired) relog.Info("read only: %v", readOnly) } }
func main() { flag.Parse() servenv.Init("vt_binlog_server") binlogServer := new(BinlogServer) if *mycnfFile == "" { relog.Fatal("Please specify the path for mycnf file.") } mycnf, err := mysqlctl.ReadMycnf(*mycnfFile) if err != nil { relog.Fatal("Error reading mycnf file %v", *mycnfFile) } binlogServer.mycnf = mycnf binlogServer.dbname = strings.ToLower(strings.TrimSpace(*dbname)) binlogServer.blpStats = NewBlpStats() rpc.Register(binlogServer) rpcwrap.RegisterAuthenticated(binlogServer) //bsonrpc.ServeAuthRPC() rpc.HandleHTTP() bsonrpc.ServeHTTP() bsonrpc.ServeRPC() umgmt.SetLameDuckPeriod(30.0) umgmt.SetRebindDelay(0.01) umgmt.AddStartupCallback(func() { umgmt.StartHttpServer(fmt.Sprintf(":%v", *port)) }) umgmt.AddStartupCallback(func() { c := make(chan os.Signal, 1) signal.Notify(c, syscall.SIGTERM) go func() { for sig := range c { umgmt.SigTermHandler(sig) } }() }) relog.Info("vt_binlog_server registered at port %v", *port) umgmtSocket := fmt.Sprintf("/tmp/vt_binlog_server-%08x-umgmt.sock", *port) if umgmtErr := umgmt.ListenAndServe(umgmtSocket); umgmtErr != nil { relog.Error("umgmt.ListenAndServe err: %v", umgmtErr) } relog.Info("done") }
func multiRestoreCmd(mysqld *mysqlctl.Mysqld, subFlags *flag.FlagSet, args []string) { start := subFlags.String("start", "", "start of the key range") end := subFlags.String("end", "", "end of the key range") fetchRetryCount := subFlags.Int("fetch-retry-count", 3, "how many times to retry a failed transfer") concurrency := subFlags.Int("concurrency", 8, "how many concurrent db inserts to run simultaneously") fetchConcurrency := subFlags.Int("fetch-concurrency", 4, "how many files to fetch simultaneously") insertTableConcurrency := subFlags.Int("insert-table-concurrency", 4, "how many myisam tables to load into a single destination table simultaneously") strategy := subFlags.String("strategy", "", "which strategy to use for restore, can contain:\n"+ " skipAutoIncrement(TTT): we won't add the AUTO_INCREMENT back to that table\n"+ " delayPrimaryKey: we won't add the primary key until after the table is populated\n"+ " delaySecondaryIndexes: we won't add the secondary indexes until after the table is populated\n"+ " useMyIsam: create the table as MyISAM, then convert it to InnoDB after population\n"+ " writeBinLogs: write all operations to the binlogs") subFlags.Parse(args) s, err := key.HexKeyspaceId(*start).Unhex() if err != nil { relog.Fatal("Invalid start key %v: %v", *start, err) } e, err := key.HexKeyspaceId(*end).Unhex() if err != nil { relog.Fatal("Invalid end key %v: %v", *end, err) } keyRange := key.KeyRange{Start: s, End: e} if subFlags.NArg() < 2 { relog.Fatal("multirestore requires <destination_dbname> <source_host>[/<source_dbname>]... %v", args) } dbName, dbis := subFlags.Arg(0), subFlags.Args()[1:] sources := make([]*url.URL, len(dbis)) uids := make([]uint32, len(dbis)) for i, dbi := range dbis { if !strings.HasPrefix(dbi, "vttp://") && !strings.HasPrefix(dbi, "http://") { dbi = "vttp://" + dbi } dbUrl, err := url.Parse(dbi) if err != nil { relog.Fatal("incorrect source url: %v", err) } sources[i] = dbUrl uids[i] = uint32(i) } if err := mysqld.RestoreFromMultiSnapshot(dbName, keyRange, sources, uids, *concurrency, *fetchConcurrency, *insertTableConcurrency, *fetchRetryCount, *strategy); err != nil { relog.Fatal("multirestore failed: %v", err) } }
func shutdownCmd(mysqld *mysqlctl.Mysqld, subFlags *flag.FlagSet, args []string) { waitTime := subFlags.Duration("wait-time", mysqlctl.MysqlWaitTime, "how long to wait for shutdown") subFlags.Parse(args) if mysqlErr := mysqlctl.Shutdown(mysqld, true, *waitTime); mysqlErr != nil { relog.Fatal("failed shutdown mysql: %v", mysqlErr) } }
func restoreCmd(mysqld *mysqlctl.Mysqld, subFlags *flag.FlagSet, args []string) { dontWaitForSlaveStart := subFlags.Bool("dont-wait-for-slave-start", false, "won't wait for replication to start (useful when restoring from master server)") fetchConcurrency := subFlags.Int("fetch-concurrency", 3, "how many files to fetch simultaneously") fetchRetryCount := subFlags.Int("fetch-retry-count", 3, "how many times to retry a failed transfer") subFlags.Parse(args) if subFlags.NArg() != 1 { relog.Fatal("Command restore requires <snapshot manifest file>") } rs, err := mysqlctl.ReadSnapshotManifest(subFlags.Arg(0)) if err == nil { err = mysqld.RestoreFromSnapshot(rs, *fetchConcurrency, *fetchRetryCount, *dontWaitForSlaveStart, nil) } if err != nil { relog.Fatal("restore failed: %v", err) } }
func commandExportZkns(wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) (string, error) { subFlags.Parse(args) if subFlags.NArg() != 1 { relog.Fatal("action ExportZkns requires <cell name|zk vt root path>") } cell := vtPathToCell(subFlags.Arg(0)) return "", wr.ExportZkns(cell) }
func commandExportZknsForKeyspace(wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) (string, error) { subFlags.Parse(args) if subFlags.NArg() != 1 { relog.Fatal("action ExportZknsForKeyspace requires <keyspace|zk global keyspace path>") } keyspace := keyspaceParamToKeyspace(subFlags.Arg(0)) return "", wr.ExportZknsForKeyspace(keyspace) }
func commandListShardActions(wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) (string, error) { subFlags.Parse(args) if subFlags.NArg() != 1 { relog.Fatal("action ListShardActions requires <keyspace/shard|zk shard path>") } keyspace, shard := shardParamToKeyspaceShard(subFlags.Arg(0)) return "", listActionsByShard(wr.TopoServer(), keyspace, shard) }
func loadCustomRules(customrules string) *ts.QueryRules { if customrules == "" { return ts.NewQueryRules() } data, err := ioutil.ReadFile(customrules) if err != nil { relog.Fatal("Error reading file %v: %v", customrules, err) } qrs := ts.NewQueryRules() err = qrs.UnmarshalJSON(data) if err != nil { relog.Fatal("Error unmarshaling query rules %v", err) } return qrs }
func initCmd(mysqld *mysqlctl.Mysqld, subFlags *flag.FlagSet, args []string) { waitTime := subFlags.Duration("wait-time", mysqlctl.MysqlWaitTime, "how long to wait for startup") subFlags.Parse(args) if err := mysqlctl.Init(mysqld, *waitTime); err != nil { relog.Fatal("failed init mysql: %v", err) } }
func teardownCmd(mysqld *mysqlctl.Mysqld, subFlags *flag.FlagSet, args []string) { force := subFlags.Bool("force", false, "will remove the root directory even if mysqld shutdown fails") subFlags.Parse(args) if err := mysqlctl.Teardown(mysqld, *force); err != nil { relog.Fatal("failed teardown mysql (forced? %v): %v", *force, err) } }
// CopyShards will create the keyspaces in the destination topo func CopyShards(fromTS, toTS topo.Server, deleteKeyspaceShards bool) { keyspaces, err := fromTS.GetKeyspaces() if err != nil { relog.Fatal("fromTS.GetKeyspaces failed: %v", err) } wg := sync.WaitGroup{} rec := concurrency.AllErrorRecorder{} for _, keyspace := range keyspaces { wg.Add(1) go func(keyspace string) { defer wg.Done() shards, err := fromTS.GetShardNames(keyspace) if err != nil { rec.RecordError(err) return } if deleteKeyspaceShards { if err := toTS.DeleteKeyspaceShards(keyspace); err != nil { rec.RecordError(err) return } } for _, shard := range shards { wg.Add(1) go func(keyspace, shard string) { defer wg.Done() if err := toTS.CreateShard(keyspace, shard); err != nil { if err == topo.ErrNodeExists { relog.Warning("shard %v/%v already exists", keyspace, shard) } else { rec.RecordError(err) } } }(keyspace, shard) } }(keyspace) } wg.Wait() if rec.HasErrors() { relog.Fatal("copyShards failed: %v", rec.Error()) } }
func NewCachePool(commandLine []string, queryTimeout time.Duration, idleTimeout time.Duration) *CachePool { cp := &CachePool{idleTimeout: idleTimeout} http.Handle(statsURL, cp) if len(commandLine) == 0 { return cp } cp.commandLine = commandLine // Start with memcached defaults cp.capacity = 1024 - 50 cp.port = "11211" for i := 0; i < len(commandLine); i++ { switch commandLine[i] { case "-p", "-s": i++ if i == len(commandLine) { relog.Fatal("expecting value after -p") } cp.port = commandLine[i] case "-c": i++ if i == len(commandLine) { relog.Fatal("expecting value after -c") } capacity, err := strconv.Atoi(commandLine[i]) if err != nil { relog.Fatal("%V", err) } if capacity <= 50 { relog.Fatal("insufficient capacity: %d", capacity) } cp.capacity = capacity - 50 } } seconds := uint64(queryTimeout / time.Second) // Add an additional grace period for // memcache expiry of deleted items if seconds != 0 { cp.DeleteExpiry = 2*seconds + 15 } return cp }
func readMycnf(tabletId uint32) *mysqlctl.Mycnf { if *mycnfFile == "" { *mycnfFile = mysqlctl.MycnfFile(tabletId) } mycnf, mycnfErr := mysqlctl.ReadMycnf(*mycnfFile) if mycnfErr != nil { relog.Fatal("mycnf read failed: %v", mycnfErr) } return mycnf }
func snapshotSourceEndCmd(mysqld *mysqlctl.Mysqld, subFlags *flag.FlagSet, args []string) { slaveStartRequired := subFlags.Bool("slave-start", false, "will restart replication") readWrite := subFlags.Bool("read-write", false, "will make the server read-write") subFlags.Parse(args) err := mysqld.SnapshotSourceEnd(*slaveStartRequired, !(*readWrite), true) if err != nil { relog.Fatal("snapshotsourceend failed: %v", err) } }
func main() { defer func() { if panicErr := recover(); panicErr != nil { relog.Fatal("panic: %v", tb.Errorf("%v", panicErr)) } }() flag.Parse() args := flag.Args() if len(args) != 0 { flag.Usage() os.Exit(1) } logLevel, err := relog.LogNameToLogLevel(*logLevel) if err != nil { relog.Fatal("%v", err) } relog.SetLevel(logLevel) if *fromTopo == "" || *toTopo == "" { relog.Fatal("Need both from and to topo") } fromTS := topo.GetServerByName(*fromTopo) toTS := topo.GetServerByName(*toTopo) if *doKeyspaces { topotools.CopyKeyspaces(fromTS, toTS) } if *doShards { topotools.CopyShards(fromTS, toTS, *deleteKeyspaceShards) } if *doTablets { topotools.CopyTablets(fromTS, toTS) } }
func commandStaleActions(wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) (string, error) { maxStaleness := subFlags.Duration("max-staleness", 5*time.Minute, "how long since the last modification before an action considered stale") purge := subFlags.Bool("purge", false, "purge stale actions") subFlags.Parse(args) if subFlags.NArg() == 0 { relog.Fatal("action StaleActions requires <zk action path>") } zkts, ok := wr.TopoServer().(*zktopo.Server) if !ok { return "", fmt.Errorf("StaleActions requires a zktopo.Server") } zkPaths, err := resolveWildcards(wr, subFlags.Args()) if err != nil { return "", err } var errCount sync2.AtomicInt32 wg := sync.WaitGroup{} for _, apath := range zkPaths { wg.Add(1) go func(zkActionPath string) { defer wg.Done() staleActions, err := staleActions(zkts, zkActionPath, *maxStaleness) if err != nil { errCount.Add(1) relog.Error("can't check stale actions: %v %v", zkActionPath, err) return } for _, action := range staleActions { fmt.Println(fmtAction(action)) } if *purge && len(staleActions) > 0 { err := zkts.PurgeActions(zkActionPath, tm.ActionNodeCanBePurged) if err != nil { errCount.Add(1) relog.Error("can't purge stale actions: %v %v", zkActionPath, err) return } } }(apath) } wg.Wait() if errCount.Get() > 0 { return "", fmt.Errorf("some errors occurred, check the log") } return "", nil }