//This function determines whether streaming is behind replication as it should be. func (blp *Blp) slavePosBehindReplication() bool { repl, err := blp.logMetadata.GetCurrentReplicationPosition() if err != nil { relog.Error(err.Error()) panic(NewBinlogParseError(fmt.Sprintf("Error in obtaining current replication position %v", err))) } currentCoord := blp.currentPosition.Position if repl.MasterFilename == currentCoord.MasterFilename { if currentCoord.MasterPosition <= repl.MasterPosition { return true } } else { replExt, err := strconv.ParseUint(strings.Split(repl.MasterFilename, ".")[1], 10, 64) if err != nil { relog.Error(err.Error()) panic(NewBinlogParseError(fmt.Sprintf("Error in obtaining current replication position %v", err))) } parseExt, err := strconv.ParseUint(strings.Split(currentCoord.MasterFilename, ".")[1], 10, 64) if err != nil { relog.Error(err.Error()) panic(NewBinlogParseError(fmt.Sprintf("Error in obtaining current replication position %v", err))) } if replExt >= parseExt { return true } } return false }
// A non-nil return signals that event processing should stop. func (agent *ActionAgent) dispatchAction(actionPath, data string) error { relog.Info("action dispatch %v", actionPath) actionNode, err := ActionNodeFromJson(data, actionPath) if err != nil { relog.Error("action decode failed: %v %v", actionPath, err) return nil } logfile := flag.Lookup("logfile").Value.String() if !strings.HasPrefix(logfile, "/dev") { logfile = path.Join(path.Dir(logfile), "vtaction.log") } cmd := []string{ agent.vtActionBinFile, "-action", actionNode.Action, "-action-node", actionPath, "-action-guid", actionNode.ActionGuid, "-mycnf-file", agent.MycnfFile, "-logfile", logfile, } cmd = append(cmd, agent.ts.GetSubprocessFlags()...) if agent.DbConfigsFile != "" { cmd = append(cmd, "-db-configs-file", agent.DbConfigsFile) } if agent.DbCredentialsFile != "" { cmd = append(cmd, "-db-credentials-file", agent.DbCredentialsFile) } relog.Info("action launch %v", cmd) vtActionCmd := exec.Command(cmd[0], cmd[1:]...) stdOut, vtActionErr := vtActionCmd.CombinedOutput() if vtActionErr != nil { relog.Error("agent action failed: %v %v\n%s", actionPath, vtActionErr, stdOut) // If the action failed, preserve single execution path semantics. return vtActionErr } relog.Info("agent action completed %v %s", actionPath, stdOut) // Save the old tablet so callbacks can have a better idea of the precise // nature of the transition. oldTablet := agent.Tablet().Tablet // Actions should have side effects on the tablet, so reload the data. if err := agent.readTablet(); err != nil { relog.Warning("failed rereading tablet after action - services may be inconsistent: %v %v", actionPath, err) } else { agent.runChangeCallbacks(oldTablet, actionPath) } // Maybe invalidate the schema. // This adds a dependency between tabletmanager and tabletserver, // so it's not ideal. But I (alainjobart) think it's better // to have up to date schema in vtocc. if actionNode.Action == TABLET_ACTION_APPLY_SCHEMA { tabletserver.ReloadSchema() } return nil }
//This function determines whether streaming is behind replication as it should be. func (blp *Blp) slavePosBehindReplication() bool { repl, err := blp.globalState.getReplicationPosition() if err != nil { relog.Error(err.Error()) panic(NewBinlogParseError(REPLICATION_ERROR, fmt.Sprintf("Error in obtaining current replication position %v", err))) } if repl.MasterFilename == blp.currentPosition.Position.MasterFilename { if blp.currentPosition.Position.MasterPosition <= repl.MasterPosition { return true } } else { replExt, err := strconv.ParseUint(strings.Split(repl.MasterFilename, ".")[1], 10, 64) if err != nil { relog.Error(err.Error()) panic(NewBinlogParseError(CODE_ERROR, fmt.Sprintf("Error in extracting replication position %v", err))) } parseExt, err := strconv.ParseUint(strings.Split(blp.currentPosition.Position.MasterFilename, ".")[1], 10, 64) if err != nil { relog.Error(err.Error()) panic(NewBinlogParseError(CODE_ERROR, fmt.Sprintf("Error in extracting replication position %v", err))) } if replExt >= parseExt { return true } } return false }
func (zkd *Zkd) init(preserveData bool) error { relog.Info("zkd.Init") for _, path := range zkd.config.DirectoryList() { if err := os.MkdirAll(path, 0775); err != nil { relog.Error(err.Error()) return err } // FIXME(msolomon) validate permissions? } configData, err := zkd.makeCfg() if err == nil { err = ioutil.WriteFile(zkd.config.ConfigFile(), []byte(configData), 0664) } if err != nil { relog.Error("failed creating %v: %v", zkd.config.ConfigFile(), err) return err } err = zkd.config.WriteMyid() if err != nil { relog.Error("failed creating %v: %v", zkd.config.MyidFile(), err) return err } if err = zkd.Start(); err != nil { relog.Error("failed starting, check %v", zkd.config.LogDir()) return err } zkAddr := fmt.Sprintf("localhost:%v", zkd.config.ClientPort) zk, session, err := zookeeper.Dial(zkAddr, StartWaitTime*time.Second) if err != nil { return err } event := <-session if event.State != zookeeper.STATE_CONNECTED { return err } defer zk.Close() if !preserveData { _, err = zk.Create("/zk", "", 0, zookeeper.WorldACL(zookeeper.PERM_ALL)) if err != nil && !zookeeper.IsError(err, zookeeper.ZNODEEXISTS) { return err } if zkd.config.Global { _, err = zk.Create("/zk/global", "", 0, zookeeper.WorldACL(zookeeper.PERM_ALL)) if err != nil && !zookeeper.IsError(err, zookeeper.ZNODEEXISTS) { return err } } } return nil }
func (blp *Blp) streamBinlog(sendReply mysqlctl.SendUpdateStreamResponse) { var readErr error defer func() { reqIdentifier := fmt.Sprintf("%v, line: '%v'", blp.currentPosition.Position.String(), blp.currentLine) if x := recover(); x != nil { serr, ok := x.(*BinlogParseError) if !ok { relog.Error("[%v:%v] Uncaught panic for stream @ %v, err: %v ", blp.keyspaceRange.Start.Hex(), blp.keyspaceRange.End.Hex(), reqIdentifier, x) panic(x) } err := *serr if readErr != nil { relog.Error("[%v:%v] StreamBinlog error @ %v, error: %v, readErr %v", blp.keyspaceRange.Start.Hex(), blp.keyspaceRange.End.Hex(), reqIdentifier, err, readErr) err = BinlogParseError{Msg: fmt.Sprintf("%v, readErr: %v", err, readErr)} } else { relog.Error("[%v:%v] StreamBinlog error @ %v, error: %v", blp.keyspaceRange.Start.Hex(), blp.keyspaceRange.End.Hex(), reqIdentifier, err) } sendError(sendReply, reqIdentifier, err, blp.currentPosition) } }() blr := mysqlctl.NewBinlogReader(blp.binlogPrefix) var binlogReader io.Reader var blrReader, blrWriter *os.File var err, pipeErr error blrReader, blrWriter, pipeErr = os.Pipe() if pipeErr != nil { panic(NewBinlogParseError(pipeErr.Error())) } defer blrWriter.Close() defer blrReader.Close() readErrChan := make(chan error, 1) //This reads the binlogs - read end of data pipeline. go blp.getBinlogStream(blrWriter, blr, readErrChan) //Decode end of the data pipeline. binlogDecoder := new(mysqlctl.BinlogDecoder) binlogReader, err = binlogDecoder.DecodeMysqlBinlog(blrReader) if err != nil { panic(NewBinlogParseError(err.Error())) } //This function monitors the exit of read data pipeline. go func(readErr *error, readErrChan chan error, binlogDecoder *mysqlctl.BinlogDecoder) { *readErr = <-readErrChan //relog.Info("Read data-pipeline returned readErr: '%v'", *readErr) if *readErr != nil { binlogDecoder.Kill() } }(&readErr, readErrChan, binlogDecoder) blp.parseBinlogEvents(sendReply, binlogReader) }
func (pd *pdns) Serve(r io.Reader, w io.Writer) { relog.Info("starting zkns resolver") bufr := bufio.NewReader(r) needHandshake := true for { line, isPrefix, err := bufr.ReadLine() if err == nil && isPrefix { err = errLongLine } if err == io.EOF { return } if err != nil { relog.Error("failed reading request: %v", err) continue } if needHandshake { if !bytes.Equal(line, GREETING_ABI_V2) { relog.Error("handshake failed: %v != %v", line, GREETING_ABI_V2) write(w, FAIL_REPLY) } else { needHandshake = false write(w, GREETING_REPLY) } continue } requestCount.Add(1) req, err := parseReq(line) if err != nil { errorCount.Add(1) relog.Error("failed parsing request: %v", err) write(w, FAIL_REPLY) continue } switch req.kind { case KIND_Q: respLines, err := pd.handleQReq(req) if err != nil { errorCount.Add(1) relog.Error("failed query: %v %v", req.qname, err) write(w, FAIL_REPLY) continue } for _, line := range respLines { write(w, line) } case KIND_AXFR: // FIXME(mike) unimplemented } write(w, END_REPLY) } }
// Operate on restore tablet. // Check that the SnapshotManifest is valid and the master has not changed. // Put Mysql in read-only mode. // Load the snapshot from source tablet. // FIXME(alainjobart) which state should the tablet be in? it is a slave, // but with a much smaller keyspace. For now, do the same as snapshot, // but this is very dangerous, it cannot be used as a real slave // or promoted to master in the same shard! // Put tablet into the replication graph as a spare. func (ta *TabletActor) partialRestore(actionNode *ActionNode) error { args := actionNode.args.(*RestoreArgs) BackfillAlias(args.ZkSrcTabletPath, &args.SrcTabletAlias) BackfillAlias(args.ZkParentPath, &args.ParentAlias) // read our current tablet, verify its state tablet, err := ta.ts.GetTablet(ta.tabletAlias) if err != nil { return err } if tablet.Type != topo.TYPE_IDLE { return fmt.Errorf("expected idle type, not %v: %v", tablet.Type, ta.tabletAlias) } // read the source tablet sourceTablet, err := ta.ts.GetTablet(args.SrcTabletAlias) if err != nil { return err } // read the parent tablet, verify its state parentTablet, err := ta.ts.GetTablet(args.ParentAlias) if err != nil { return err } if parentTablet.Type != topo.TYPE_MASTER { return fmt.Errorf("restore expected master parent: %v %v", parentTablet.Type, args.ParentAlias) } // read & unpack the manifest ssm := new(mysqlctl.SplitSnapshotManifest) if err := fetchAndParseJsonFile(sourceTablet.Addr, args.SrcFilePath, ssm); err != nil { return err } // change our type to RESTORE and set all the other arguments. if err := ta.changeTypeToRestore(tablet, sourceTablet, parentTablet.Alias(), ssm.KeyRange); err != nil { return err } // do the work if err := ta.mysqld.RestoreFromPartialSnapshot(ssm, args.FetchConcurrency, args.FetchRetryCount); err != nil { relog.Error("RestoreFromPartialSnapshot failed: %v", err) if err := Scrap(ta.ts, ta.tabletAlias, false); err != nil { relog.Error("Failed to Scrap after failed RestoreFromPartialSnapshot: %v", err) } return err } // change to TYPE_MASTER, we're done! return ChangeType(ta.ts, ta.tabletAlias, topo.TYPE_MASTER, true) }
func handleSnapshot(rw http.ResponseWriter, req *http.Request, snapshotDir string, allowedPaths []string) { // if we get any error, we'll try to write a server error // (it will fail if the header has already been written, but at least // we won't crash vttablet) defer func() { if x := recover(); x != nil { relog.Error("vttablet http server panic: %v", x) http.Error(rw, fmt.Sprintf("500 internal server error: %v", x), http.StatusInternalServerError) } }() // /snapshot must be rewritten to the actual location of the snapshot. relative, err := filepath.Rel(mysqlctl.SnapshotURLPath, req.URL.Path) if err != nil { relog.Error("bad snapshot relative path %v %v", req.URL.Path, err) http.Error(rw, "400 bad request", http.StatusBadRequest) return } // Make sure that realPath is absolute and resolve any escaping from // snapshotDir through a symlink. realPath, err := filepath.Abs(path.Join(snapshotDir, relative)) if err != nil { relog.Error("bad snapshot absolute path %v %v", req.URL.Path, err) http.Error(rw, "400 bad request", http.StatusBadRequest) return } realPath, err = filepath.EvalSymlinks(realPath) if err != nil { relog.Error("bad snapshot symlink eval %v %v", req.URL.Path, err) http.Error(rw, "400 bad request", http.StatusBadRequest) return } // Resolve all the possible roots and make sure we're serving // from one of them for _, allowedPath := range allowedPaths { // eval the symlinks of the allowed path allowedPath, err := filepath.EvalSymlinks(allowedPath) if err != nil { continue } if strings.HasPrefix(realPath, allowedPath) { sendFile(rw, req, realPath) return } } relog.Error("bad snapshot real path %v %v", req.URL.Path, realPath) http.Error(rw, "400 bad request", http.StatusBadRequest) }
func handleError(err *error, blp *BinlogPlayer) { lastTxnPosition := blp.recoveryState.Position if x := recover(); x != nil { serr, ok := x.(error) if ok { *err = serr relog.Error("Last Txn Position '%v', error %v", lastTxnPosition, serr) return } relog.Error("uncaught panic %v", x) panic(x) } }
func (wr *Wrangler) SnapshotSourceEnd(tabletAlias topo.TabletAlias, slaveStartRequired, readWrite bool, originalType topo.TabletType) (err error) { var ti *topo.TabletInfo ti, err = wr.ts.GetTablet(tabletAlias) if err != nil { return } var actionPath string actionPath, err = wr.ai.SnapshotSourceEnd(tabletAlias, &tm.SnapshotSourceEndArgs{slaveStartRequired, !readWrite}) if err != nil { return } // wait for completion, and save the error err = wr.ai.WaitForCompletion(actionPath, wr.actionTimeout()) if err != nil { relog.Error("SnapshotSourceEnd failed (%v), leaving tablet type alone", err) return } if ti.Tablet.Parent.Uid == topo.NO_TABLET { ti.Tablet.Type = topo.TYPE_MASTER err = topo.UpdateTablet(wr.ts, ti) } else { err = wr.ChangeType(ti.Alias(), originalType, false) } return err }
func (hh *httpHandler) ServeHTTP(c http.ResponseWriter, req *http.Request) { conn := &httpConnectionBroker{c, req.Body} codec := hh.cFactory(conn) if err := rpc.ServeRequestWithContext(codec, &proto.Context{RemoteAddr: req.RemoteAddr}); err != nil { relog.Error("rpcwrap: %v", err) } }
// ConcurrentMap applies fun in a concurrent manner on integers from 0 // to n-1 (they are assumed to be indexes of some slice containing // items to be processed). The first error returned by a fun // application will returned (subsequent errors will only be // logged). It will use concurrency goroutines. func ConcurrentMap(concurrency, n int, fun MapFunc) error { errors := make(chan error) work := make(chan int, n) for i := 0; i < n; i++ { work <- i } close(work) for j := 0; j < concurrency; j++ { go func() { for i := range work { errors <- fun(i) } }() } var err error for i := 0; i < n; i++ { if e := <-errors; e != nil { if err != nil { relog.Error("multiple errors, this one happened but it won't be returned: %v", err) } err = e } } return err }
// forceMasterSnapshot: Normally a master is not a viable tablet to snapshot. // However, there are degenerate cases where you need to override this, for // instance the initial clone of a new master. func (wr *Wrangler) PartialSnapshot(tabletAlias topo.TabletAlias, keyName string, startKey, endKey key.HexKeyspaceId, forceMasterSnapshot bool, concurrency int) (manifest string, parent topo.TabletAlias, err error) { restoreAfterSnapshot, err := wr.prepareToSnapshot(tabletAlias, forceMasterSnapshot) if err != nil { return } defer func() { err = replaceError(err, restoreAfterSnapshot()) }() actionPath, err := wr.ai.PartialSnapshot(tabletAlias, &tm.PartialSnapshotArgs{keyName, startKey, endKey, concurrency}) if err != nil { return } results, actionErr := wr.ai.WaitForCompletionReply(actionPath, wr.actionTimeout()) var reply *tm.SnapshotReply if actionErr != nil { relog.Error("PartialSnapshot failed, still restoring tablet type: %v", actionErr) reply = &tm.SnapshotReply{} } else { reply = results.(*tm.SnapshotReply) tm.BackfillAlias(reply.ZkParentPath, &reply.ParentAlias) } return reply.ManifestPath, reply.ParentAlias, actionErr }
func main() { dbConfigsFile, dbCredentialsFile := dbconfigs.RegisterCommonFlags() flag.Parse() relog.Info("started vtaction %v", os.Args) rpc.HandleHTTP() jsonrpc.ServeHTTP() jsonrpc.ServeRPC() bsonrpc.ServeHTTP() bsonrpc.ServeRPC() logFile, err := os.OpenFile(*logFilename, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0666) if err != nil { relog.Fatal("Can't open log file: %v", err) } relog.SetOutput(logFile) relog.SetPrefix(fmt.Sprintf("vtaction [%v] ", os.Getpid())) if err := relog.SetLevelByName(*logLevel); err != nil { relog.Fatal("%v", err) } relog.HijackLog(nil) relog.HijackStdio(logFile, logFile) mycnf, mycnfErr := mysqlctl.ReadMycnf(*mycnfFile) if mycnfErr != nil { relog.Fatal("mycnf read failed: %v", mycnfErr) } relog.Debug("mycnf: %v", jscfg.ToJson(mycnf)) dbcfgs, cfErr := dbconfigs.Init(mycnf.SocketFile, *dbConfigsFile, *dbCredentialsFile) if err != nil { relog.Fatal("%s", cfErr) } mysqld := mysqlctl.NewMysqld(mycnf, dbcfgs.Dba, dbcfgs.Repl) topoServer := topo.GetServer() defer topo.CloseServers() actor := tabletmanager.NewTabletActor(mysqld, topoServer) // we delegate out startup to the micromanagement server so these actions // will occur after we have obtained our socket. bindAddr := fmt.Sprintf(":%v", *port) httpServer := &http.Server{Addr: bindAddr} go func() { if err := httpServer.ListenAndServe(); err != nil { relog.Error("httpServer.ListenAndServe err: %v", err) } }() actionErr := actor.HandleAction(*actionNode, *action, *actionGuid, *force) if actionErr != nil { relog.Fatal("action error: %v", actionErr) } relog.Info("finished vtaction %v", os.Args) }
func (rowCache *InvalidationProcessor) processEvent(event *mysqlctl.UpdateResponse) error { position := "" if event.BinlogPosition.Valid() { position = event.BinlogPosition.String() } if event.Error != "" { relog.Error("Update stream returned error '%v'", event.Error) // Check if update stream error is fatal, else record it and move on. if strings.HasPrefix(event.Error, mysqlctl.FATAL) { relog.Info("Returning Service Error") return NewInvalidationError(FATAL_ERROR, event.Error, position) } rowCache.updateErrCounters(NewInvalidationError(INVALID_EVENT, event.Error, position)) return nil } if !event.BinlogPosition.Valid() { rowCache.updateErrCounters(NewInvalidationError(INVALID_EVENT, "no error, position is not set", "")) return nil } var err error switch event.EventData.SqlType { case mysqlctl.DDL: err = rowCache.handleDdlEvent(event) if err != nil { return err } case mysqlctl.BEGIN: rowCache.dmlBuffer = rowCache.dmlBuffer[:0] if rowCache.inTxn { rowCache.updateErrCounters(NewInvalidationError(INVALID_EVENT, "Invalid 'BEGIN' event, transaction already in progress", position)) return nil } rowCache.inTxn = true case mysqlctl.COMMIT: if !rowCache.inTxn { rowCache.updateErrCounters(NewInvalidationError(INVALID_EVENT, "Invalid 'COMMIT' event for a non-transaction", position)) return nil } err = rowCache.handleTxn(event) if err != nil { return err } rowCache.inTxn = false rowCache.dmlBuffer = rowCache.dmlBuffer[:0] case "insert", "update", "delete": dml, err := rowCache.buildDmlData(event) if err != nil { return err } if dml != nil { rowCache.dmlBuffer = append(rowCache.dmlBuffer, dml) } default: rowCache.updateErrCounters(NewInvalidationError(INVALID_EVENT, fmt.Sprintf("Unknown SqlType, %v %v", event.EventData.SqlType, event.EventData.Sql), position)) //return NewInvalidationError(INVALID_EVENT, fmt.Sprintf("Unknown SqlType, %v %v", event.EventData.SqlType, event.EventData.Sql)) } return nil }
//Main entry function for reading and parsing the binlog. func (blp *Blp) StreamBinlog(sendReply SendUpdateStreamResponse, binlogPrefix string) { var err error for { err = blp.streamBinlog(sendReply, binlogPrefix) sErr, ok := err.(*BinlogParseError) if ok && sErr.IsEOF() { // Double check the current parse position // with replication position, if not so, it is an error // otherwise it is a true EOF so retry. relog.Info("EOF, retrying") ok, replErr := blp.isBehindReplication() if replErr != nil { err = replErr } else if !ok { err = NewBinlogParseError(REPLICATION_ERROR, "EOF, but parse position behind replication") } else { time.Sleep(5.0 * time.Second) blp.startPosition = &ReplicationCoordinates{MasterFilename: blp.currentPosition.Position.MasterFilename, MasterPosition: blp.currentPosition.Position.MasterPosition, RelayFilename: blp.currentPosition.Position.RelayFilename, RelayPosition: blp.currentPosition.Position.RelayPosition} continue } } relog.Error("StreamBinlog error @ %v, error: %v", blp.currentPosition.String(), err.Error()) SendError(sendReply, err, blp.currentPosition) break } }
func main() { flag.Parse() servenv.Init("vt_binlog_player") if *startPosFile == "" { relog.Fatal("start-pos-file was not supplied.") } if *dbConfigFile == "" { relog.Fatal("Cannot start without db-config-file") } blp, err := initBinlogPlayer(*startPosFile, *dbConfigFile, *lookupConfigFile, *dbCredFile, *useCheckpoint, *debug, *port) if err != nil { relog.Fatal("Error in initializing binlog player - '%v'", err) } blp.txnBatch = *txnBatch blp.maxTxnInterval = time.Duration(*maxTxnInterval) * time.Second blp.execDdl = *execDdl if *tables != "" { tables := strings.Split(*tables, ",") blp.tables = make([]string, len(tables)) for i, table := range tables { blp.tables[i] = strings.TrimSpace(table) } relog.Info("len tables %v tables %v", len(blp.tables), blp.tables) } relog.Info("BinlogPlayer client for keyrange '%v:%v' starting @ '%v'", blp.startPosition.KeyrangeStart, blp.startPosition.KeyrangeEnd, blp.startPosition.Position) if *port != 0 { umgmt.AddStartupCallback(func() { umgmt.StartHttpServer(fmt.Sprintf(":%v", *port)) }) } umgmt.AddStartupCallback(func() { c := make(chan os.Signal, 1) signal.Notify(c, syscall.SIGTERM) go func() { for sig := range c { umgmt.SigTermHandler(sig) } }() }) umgmt.AddCloseCallback(func() { close(interrupted) }) //Make a request to the server and start processing the events. stdout = bufio.NewWriterSize(os.Stdout, 16*1024) err = blp.applyBinlogEvents() if err != nil { relog.Error("Error in applying binlog events, err %v", err) } relog.Info("vt_binlog_player done") }
// zkocc: a proxy for zk func main() { flag.Parse() if err := servenv.Init("zkocc"); err != nil { relog.Fatal("Error in servenv.Init: %v", err) } rpc.HandleHTTP() jsonrpc.ServeHTTP() jsonrpc.ServeRPC() bsonrpc.ServeHTTP() bsonrpc.ServeRPC() zk.RegisterZkReader(zkocc.NewZkReader(*resolveLocal, flag.Args())) // we delegate out startup to the micromanagement server so these actions // will occur after we have obtained our socket. umgmt.SetLameDuckPeriod(float32(*lameDuckPeriod)) umgmt.SetRebindDelay(float32(*rebindDelay)) umgmt.AddStartupCallback(func() { umgmt.StartHttpServer(fmt.Sprintf(":%v", *port)) }) relog.Info("started zkocc %v", *port) umgmtSocket := fmt.Sprintf("/tmp/zkocc-%08x-umgmt.sock", *port) if umgmtErr := umgmt.ListenAndServe(umgmtSocket); umgmtErr != nil { relog.Error("umgmt.ListenAndServe err: %v", umgmtErr) } relog.Info("done") }
func (rowCache *InvalidationProcessor) updateErrCounters(err *InvalidationError) { relog.Error(err.Error()) if errorStats == nil { relog.Warning("errorStats is not initialized") return } errorStats.Add(err.errType, 1) }
func commandStaleActions(wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) (string, error) { maxStaleness := subFlags.Duration("max-staleness", 5*time.Minute, "how long since the last modification before an action considered stale") purge := subFlags.Bool("purge", false, "purge stale actions") subFlags.Parse(args) if subFlags.NArg() == 0 { relog.Fatal("action StaleActions requires <zk action path>") } zkts, ok := wr.TopoServer().(*zktopo.Server) if !ok { return "", fmt.Errorf("StaleActions requires a zktopo.Server") } zkPaths, err := resolveWildcards(wr, subFlags.Args()) if err != nil { return "", err } var errCount sync2.AtomicInt32 wg := sync.WaitGroup{} for _, apath := range zkPaths { wg.Add(1) go func(zkActionPath string) { defer wg.Done() staleActions, err := staleActions(zkts, zkActionPath, *maxStaleness) if err != nil { errCount.Add(1) relog.Error("can't check stale actions: %v %v", zkActionPath, err) return } for _, action := range staleActions { fmt.Println(fmtAction(action)) } if *purge && len(staleActions) > 0 { err := zkts.PurgeActions(zkActionPath, tm.ActionNodeCanBePurged) if err != nil { errCount.Add(1) relog.Error("can't purge stale actions: %v %v", zkActionPath, err) return } } }(apath) } wg.Wait() if errCount.Get() > 0 { return "", fmt.Errorf("some errors occurred, check the log") } return "", nil }
func (dc DBClient) Rollback() error { _, err := dc.dbConn.ExecuteFetch(ROLLBACK, 1, false) if err != nil { relog.Error("ROLLBACK failed w/ error %v", err) dc.dbConn.Close() } return err }
func (dc DBClient) Commit() error { _, err := dc.dbConn.ExecuteFetch(COMMIT, 1, false) if err != nil { relog.Error("COMMIT failed w/ error %v", err) dc.dbConn.Close() } return err }
func (dc DBClient) Begin() error { _, err := dc.dbConn.ExecuteFetch(BEGIN, 1, false) if err != nil { relog.Error("BEGIN failed w/ error %v", err) dc.handleError(err) } return err }
// replaceError replaces original with recent if recent is not nil, // logging original if it wasn't nil. This should be used in deferred // cleanup functions if they change the returned error. func replaceError(original, recent error) error { if recent == nil { return original } if original != nil { relog.Error("One of multiple error: %v", original) } return recent }
func (client *Client) CloseListeners() error { request := new(Request) reply := new(Reply) err := client.Call("UmgmtService.CloseListeners", request, reply) if err != nil { relog.Error("CloseListeners err: %v", err) } return err }
func (client *Client) GracefulShutdown() error { request := new(Request) reply := new(Reply) err := client.Call("UmgmtService.GracefulShutdown", request, reply) if err != nil && err != io.ErrUnexpectedEOF { relog.Error("GracefulShutdown err: %v", err) } return err }
// return a Reader from which the decoded binlog can be read func (decoder *BinlogDecoder) DecodeMysqlBinlog(binlog *os.File) (io.Reader, error) { dir, err := findVtMysqlbinlogDir() if err != nil { return nil, err } dir = path.Join(dir, "bin") name := "vt_mysqlbinlog" arg := []string{"vt_mysqlbinlog", "-"} dataRdFile, dataWrFile, pipeErr := os.Pipe() if pipeErr != nil { relog.Error("DecodeMysqlBinlog: error in creating pipe %v", pipeErr) return nil, pipeErr } // let the caller close the read file defer dataWrFile.Close() fds := []*os.File{ binlog, dataWrFile, os.Stderr, } attrs := &os.ProcAttr{Dir: dir, Files: fds} process, err := os.StartProcess(name, arg, attrs) if err != nil { relog.Error("DecodeMysqlBinlog: error in decoding binlog %v", err) return nil, err } decoder.process = process go func() { // just make sure we don't spawn zombies waitMsg, err := decoder.process.Wait() if err != nil { relog.Error("vt_mysqlbinlog exited: %v err: %v", waitMsg, err) } else { relog.Info("vt_mysqlbinlog exited: %v err: %v", waitMsg, err) } }() return dataRdFile, nil }
func (dc DBClient) ExecuteFetch(query string, maxrows int, wantfields bool) (*proto.QueryResult, error) { mqr, err := dc.dbConn.ExecuteFetch(query, maxrows, wantfields) if err != nil { relog.Error("ExecuteFetch failed w/ error %v", err) dc.handleError(err) return nil, err } qr := proto.QueryResult(*mqr) return &qr, nil }
func (wr *Wrangler) UnreserveForRestoreMulti(dstTabletAliases []topo.TabletAlias) { for _, dstTabletAlias := range dstTabletAliases { ufrErr := wr.UnreserveForRestore(dstTabletAlias) if ufrErr != nil { relog.Error("Failed to UnreserveForRestore destination tablet after failed source snapshot: %v", ufrErr) } else { relog.Info("Un-reserved %v", dstTabletAlias) } } }
func (client *Client) Ping() (string, error) { request := new(Request) reply := new(Reply) err := client.Call("UmgmtService.Ping", request, reply) if err != nil { relog.Error("rpc err: %v", err) return "ERROR", err } return reply.Message, nil }