func TestUmgmt(t *testing.T) { go serve(t) <-ready client, err := Dial("/tmp/test-sock") if err != nil { t.Fatalf("can't connect %v", err) } request := new(Request) reply := new(Reply) callErr := client.Call("UmgmtService.Ping", request, reply) if callErr != nil { t.Fatalf("callErr: %v", callErr) } relog.Info("Ping reply: %v", reply.Message) reply = new(Reply) callErr = client.Call("UmgmtService.CloseListeners", reply, reply) if callErr != nil { t.Fatalf("callErr: %v", callErr) } relog.Info("CloseListeners reply: %v", reply.Message) reply = new(Reply) callErr = client.Call("UmgmtService.GracefulShutdown", reply, reply) if callErr != nil { t.Fatalf("callErr: %v", callErr) } relog.Info("GracefulShutdown reply: %v", reply.Message) }
// zkocc: a proxy for zk func main() { flag.Parse() if err := servenv.Init("zkocc"); err != nil { relog.Fatal("Error in servenv.Init: %v", err) } rpc.HandleHTTP() jsonrpc.ServeHTTP() jsonrpc.ServeRPC() bsonrpc.ServeHTTP() bsonrpc.ServeRPC() zk.RegisterZkReader(zkocc.NewZkReader(*resolveLocal, flag.Args())) // we delegate out startup to the micromanagement server so these actions // will occur after we have obtained our socket. umgmt.SetLameDuckPeriod(float32(*lameDuckPeriod)) umgmt.SetRebindDelay(float32(*rebindDelay)) umgmt.AddStartupCallback(func() { umgmt.StartHttpServer(fmt.Sprintf(":%v", *port)) }) relog.Info("started zkocc %v", *port) umgmtSocket := fmt.Sprintf("/tmp/zkocc-%08x-umgmt.sock", *port) if umgmtErr := umgmt.ListenAndServe(umgmtSocket); umgmtErr != nil { relog.Error("umgmt.ListenAndServe err: %v", umgmtErr) } relog.Info("done") }
// A non-nil return signals that event processing should stop. func (agent *ActionAgent) dispatchAction(actionPath, data string) error { relog.Info("action dispatch %v", actionPath) actionNode, err := ActionNodeFromJson(data, actionPath) if err != nil { relog.Error("action decode failed: %v %v", actionPath, err) return nil } logfile := flag.Lookup("logfile").Value.String() if !strings.HasPrefix(logfile, "/dev") { logfile = path.Join(path.Dir(logfile), "vtaction.log") } cmd := []string{ agent.vtActionBinFile, "-action", actionNode.Action, "-action-node", actionPath, "-action-guid", actionNode.ActionGuid, "-mycnf-file", agent.MycnfFile, "-logfile", logfile, } cmd = append(cmd, agent.ts.GetSubprocessFlags()...) if agent.DbConfigsFile != "" { cmd = append(cmd, "-db-configs-file", agent.DbConfigsFile) } if agent.DbCredentialsFile != "" { cmd = append(cmd, "-db-credentials-file", agent.DbCredentialsFile) } relog.Info("action launch %v", cmd) vtActionCmd := exec.Command(cmd[0], cmd[1:]...) stdOut, vtActionErr := vtActionCmd.CombinedOutput() if vtActionErr != nil { relog.Error("agent action failed: %v %v\n%s", actionPath, vtActionErr, stdOut) // If the action failed, preserve single execution path semantics. return vtActionErr } relog.Info("agent action completed %v %s", actionPath, stdOut) // Save the old tablet so callbacks can have a better idea of the precise // nature of the transition. oldTablet := agent.Tablet().Tablet // Actions should have side effects on the tablet, so reload the data. if err := agent.readTablet(); err != nil { relog.Warning("failed rereading tablet after action - services may be inconsistent: %v %v", actionPath, err) } else { agent.runChangeCallbacks(oldTablet, actionPath) } // Maybe invalidate the schema. // This adds a dependency between tabletmanager and tabletserver, // so it's not ideal. But I (alainjobart) think it's better // to have up to date schema in vtocc. if actionNode.Action == TABLET_ACTION_APPLY_SCHEMA { tabletserver.ReloadSchema() } return nil }
func main() { flag.Parse() servenv.Init("vt_binlog_player") if *startPosFile == "" { relog.Fatal("start-pos-file was not supplied.") } if *dbConfigFile == "" { relog.Fatal("Cannot start without db-config-file") } blp, err := initBinlogPlayer(*startPosFile, *dbConfigFile, *lookupConfigFile, *dbCredFile, *useCheckpoint, *debug, *port) if err != nil { relog.Fatal("Error in initializing binlog player - '%v'", err) } blp.txnBatch = *txnBatch blp.maxTxnInterval = time.Duration(*maxTxnInterval) * time.Second blp.execDdl = *execDdl if *tables != "" { tables := strings.Split(*tables, ",") blp.tables = make([]string, len(tables)) for i, table := range tables { blp.tables[i] = strings.TrimSpace(table) } relog.Info("len tables %v tables %v", len(blp.tables), blp.tables) } relog.Info("BinlogPlayer client for keyrange '%v:%v' starting @ '%v'", blp.startPosition.KeyrangeStart, blp.startPosition.KeyrangeEnd, blp.startPosition.Position) if *port != 0 { umgmt.AddStartupCallback(func() { umgmt.StartHttpServer(fmt.Sprintf(":%v", *port)) }) } umgmt.AddStartupCallback(func() { c := make(chan os.Signal, 1) signal.Notify(c, syscall.SIGTERM) go func() { for sig := range c { umgmt.SigTermHandler(sig) } }() }) umgmt.AddCloseCallback(func() { close(interrupted) }) //Make a request to the server and start processing the events. stdout = bufio.NewWriterSize(os.Stdout, 16*1024) err = blp.applyBinlogEvents() if err != nil { relog.Error("Error in applying binlog events, err %v", err) } relog.Info("vt_binlog_player done") }
func Start(mt *Mysqld, mysqlWaitTime time.Duration) error { var name string // try the mysqld start hook, if any h := hook.NewSimpleHook("mysqld_start") hr := h.Execute() switch hr.ExitStatus { case hook.HOOK_SUCCESS: // hook exists and worked, we can keep going name = "mysqld_start hook" case hook.HOOK_DOES_NOT_EXIST: // hook doesn't exist, run mysqld_safe ourselves relog.Info("No mysqld_start hook, running mysqld_safe directly") dir, err := vtenv.VtMysqlRoot() if err != nil { return err } name = path.Join(dir, "bin/mysqld_safe") arg := []string{ "--defaults-file=" + mt.config.path} env := []string{os.ExpandEnv("LD_LIBRARY_PATH=$VT_MYSQL_ROOT/lib/mysql")} cmd := exec.Command(name, arg...) cmd.Dir = dir cmd.Env = env relog.Info("mysqlctl.Start mysqlWaitTime:%v %#v", mysqlWaitTime, cmd) _, err = cmd.StderrPipe() if err != nil { return nil } err = cmd.Start() if err != nil { return nil } // wait so we don't get a bunch of defunct processes go cmd.Wait() default: // hook failed, we report error return fmt.Errorf("mysqld_start hook failed: %v", hr.String()) } // give it some time to succeed - usually by the time the socket emerges // we are in good shape for i := mysqlWaitTime; i >= 0; i -= time.Second { _, statErr := os.Stat(mt.config.SocketFile) if statErr == nil { // Make sure the socket file isn't stale. conn, connErr := mt.createConnection() if connErr == nil { conn.Close() return nil } } else if !os.IsNotExist(statErr) { return statErr } time.Sleep(time.Second) } return errors.New(name + ": deadline exceeded waiting for " + mt.config.SocketFile) }
func (ti *TableInfo) initRowCache(conn PoolConnection, tableType string, createTime sqltypes.Value, comment string, cachePool *CachePool) { if cachePool.IsClosed() { return } if strings.Contains(comment, "vtocc_nocache") { relog.Info("%s commented as vtocc_nocache. Will not be cached.", ti.Name) return } if tableType == "VIEW" { relog.Info("%s is a view. Will not be cached.", ti.Name) return } if ti.PKColumns == nil { relog.Info("Table %s has no primary key. Will not be cached.", ti.Name) return } for _, col := range ti.PKColumns { if ti.Columns[col].Category == schema.CAT_OTHER { relog.Info("Table %s pk has unsupported column types. Will not be cached.", ti.Name) return } } ti.CacheType = schema.CACHE_RW ti.Cache = NewRowCache(ti, cachePool) }
func (mysqld *Mysqld) prepareToSnapshot(allowHierarchicalReplication bool) (slaveStartRequired, readOnly bool, replicationPosition, myMasterPosition *ReplicationPosition, masterAddr string, err error) { // save initial state so we can restore on Start() if slaveStatus, slaveErr := mysqld.slaveStatus(); slaveErr == nil { slaveStartRequired = (slaveStatus["Slave_IO_Running"] == "Yes" && slaveStatus["Slave_SQL_Running"] == "Yes") } // For masters, set read-only so we don't write anything during snapshot readOnly = true if readOnly, err = mysqld.IsReadOnly(); err != nil { return } relog.Info("Set Read Only") if !readOnly { mysqld.SetReadOnly(true) } relog.Info("Stop Slave") if err = mysqld.StopSlave(); err != nil { return } // If the source is a slave use the master replication position, // unless we are allowing hierachical replicas. replicationPosition, err = mysqld.SlaveStatus() if err != nil { if err != ErrNotSlave { // this is a real error return } // we are really a master, so we need that position replicationPosition, err = mysqld.MasterStatus() if err != nil { return } masterAddr = mysqld.IpAddr() } else { // we are a slave, check our replication strategy if allowHierarchicalReplication { masterAddr = mysqld.IpAddr() } else { masterAddr, err = mysqld.GetMasterAddr() if err != nil { return } } } // get our master position, some targets may use it myMasterPosition, err = mysqld.MasterStatus() if err != nil && err != ErrNotMaster { // this is a real error return } relog.Info("Flush tables") if err = mysqld.executeSuperQuery("FLUSH TABLES WITH READ LOCK"); err != nil { return } return }
func main() { dbConfigsFile, dbCredentialsFile := dbconfigs.RegisterCommonFlags() flag.Parse() relog.Info("started vtaction %v", os.Args) rpc.HandleHTTP() jsonrpc.ServeHTTP() jsonrpc.ServeRPC() bsonrpc.ServeHTTP() bsonrpc.ServeRPC() logFile, err := os.OpenFile(*logFilename, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0666) if err != nil { relog.Fatal("Can't open log file: %v", err) } relog.SetOutput(logFile) relog.SetPrefix(fmt.Sprintf("vtaction [%v] ", os.Getpid())) if err := relog.SetLevelByName(*logLevel); err != nil { relog.Fatal("%v", err) } relog.HijackLog(nil) relog.HijackStdio(logFile, logFile) mycnf, mycnfErr := mysqlctl.ReadMycnf(*mycnfFile) if mycnfErr != nil { relog.Fatal("mycnf read failed: %v", mycnfErr) } relog.Debug("mycnf: %v", jscfg.ToJson(mycnf)) dbcfgs, cfErr := dbconfigs.Init(mycnf.SocketFile, *dbConfigsFile, *dbCredentialsFile) if err != nil { relog.Fatal("%s", cfErr) } mysqld := mysqlctl.NewMysqld(mycnf, dbcfgs.Dba, dbcfgs.Repl) topoServer := topo.GetServer() defer topo.CloseServers() actor := tabletmanager.NewTabletActor(mysqld, topoServer) // we delegate out startup to the micromanagement server so these actions // will occur after we have obtained our socket. bindAddr := fmt.Sprintf(":%v", *port) httpServer := &http.Server{Addr: bindAddr} go func() { if err := httpServer.ListenAndServe(); err != nil { relog.Error("httpServer.ListenAndServe err: %v", err) } }() actionErr := actor.HandleAction(*actionNode, *action, *actionGuid, *force) if actionErr != nil { relog.Fatal("action error: %v", actionErr) } relog.Info("finished vtaction %v", os.Args) }
func EnableUpdateStreamService(tabletType string, dbcfgs dbconfigs.DBConfigs) { defer logError() UpdateStreamRpcService.actionLock.Lock() defer UpdateStreamRpcService.actionLock.Unlock() if !dbcfgsCorrect(tabletType, dbcfgs) { relog.Warning("missing/incomplete db configs file, cannot enable update stream service") return } if UpdateStreamRpcService.mycnf.BinLogPath == "" { relog.Warning("Update stream service requires binlogs enabled") return } if UpdateStreamRpcService.isServiceEnabled() { relog.Warning("Update stream service is already enabled") return } UpdateStreamRpcService.setState(ENABLED) UpdateStreamRpcService.mysqld = NewMysqld(UpdateStreamRpcService.mycnf, dbcfgs.Dba, dbcfgs.Repl) UpdateStreamRpcService.dbname = dbcfgs.App.Dbname relog.Info("dbcfgs.App.Dbname %v DbName %v", dbcfgs.App.Dbname, UpdateStreamRpcService.dbname) relog.Info("mycnf.BinLogPath %v mycnf.RelayLogPath %v", UpdateStreamRpcService.mycnf.BinLogPath, UpdateStreamRpcService.mycnf.RelayLogPath) UpdateStreamRpcService.tabletType = tabletType UpdateStreamRpcService.binlogPrefix = UpdateStreamRpcService.mycnf.BinLogPath UpdateStreamRpcService.logsDir = path.Dir(UpdateStreamRpcService.binlogPrefix) relog.Info("Update Stream enabled, logsDir %v", UpdateStreamRpcService.logsDir) }
func (si *SchemaInfo) createTable(conn PoolConnection, tableName string) { tables, err := conn.ExecuteFetch(fmt.Sprintf("%s and table_name = '%s'", base_show_tables, tableName), 1, false) if err != nil { panic(NewTabletError(FAIL, "Error fetching table %s: %v", tableName, err)) } if len(tables.Rows) != 1 { panic(NewTabletError(FAIL, "rows for %s !=1: %v", tableName, len(tables.Rows))) } tableInfo := NewTableInfo( conn, tableName, tables.Rows[0][1].String(), // table_type tables.Rows[0][2], // create_time tables.Rows[0][3].String(), // table_comment si.cachePool, ) if tableInfo == nil { panic(NewTabletError(FATAL, "Could not read table info: %s", tableName)) } if tableInfo.CacheType == schema.CACHE_NONE { relog.Info("Initialized table: %s", tableName) } else { relog.Info("Initialized cached table: %s", tableInfo.Cache.prefix) } si.mu.Lock() defer si.mu.Unlock() if _, ok := si.tables[tableName]; ok { panic(NewTabletError(FAIL, "Table %s already exists", tableName)) } si.tables[tableName] = tableInfo }
func StopRowCacheInvalidation() { if !CacheInvalidationProcessor.isServiceEnabled() { relog.Info("Invalidator is already disabled - NOP") return } CacheInvalidationProcessor.stopRowCacheInvalidation() relog.Info("Rowcache invalidator stopped") }
func serve(t *testing.T) { AddStartupCallback(func() { ready <- true }) AddShutdownCallback(func() { relog.Info("test server GracefulShutdown callback") }) err := ListenAndServe("/tmp/test-sock") if err != nil { t.Fatalf("listen err: %v", err) } relog.Info("test server finished") }
func CacheCreator(dbconfig dbconfigs.DBConfig) CreateCacheFunc { if dbconfig.Memcache == "" { relog.Info("rowcache not enabled") return nil } relog.Info("rowcache is enabled") return func() (*memcache.Connection, error) { return memcache.Connect(dbconfig.Memcache) } }
func (blServer *BinlogServer) ServeBinlog(req *mysqlctl.BinlogServerRequest, sendReply mysqlctl.SendUpdateStreamResponse) error { defer func() { if x := recover(); x != nil { //Send the error to the client. _, ok := x.(*BinlogParseError) if !ok { relog.Error("Uncaught panic at top-most level: '%v'", x) //panic(x) } sendError(sendReply, req.StartPosition.String(), x.(error), nil) } }() relog.Info("received req: %v kr start %v end %v", req.StartPosition.String(), req.KeyspaceStart, req.KeyspaceEnd) if !isRequestValid(req) { panic(NewBinlogParseError("Invalid request, cannot serve the stream")) } usingRelayLogs := false var binlogPrefix, logsDir string if req.StartPosition.RelayFilename != "" { usingRelayLogs = true binlogPrefix = blServer.mycnf.RelayLogPath logsDir = path.Dir(binlogPrefix) if !mysqlctl.IsRelayPositionValid(&req.StartPosition, logsDir) { panic(NewBinlogParseError(fmt.Sprintf("Invalid start position %v, cannot serve the stream, cannot locate start position", req.StartPosition))) } } else { binlogPrefix = blServer.mycnf.BinLogPath logsDir = path.Dir(binlogPrefix) if !mysqlctl.IsMasterPositionValid(&req.StartPosition) { panic(NewBinlogParseError(fmt.Sprintf("Invalid start position %v, cannot serve the stream, cannot locate start position", req.StartPosition))) } } startKey, err := key.HexKeyspaceId(req.KeyspaceStart).Unhex() if err != nil { panic(NewBinlogParseError(fmt.Sprintf("Unhex on key '%v' failed", req.KeyspaceStart))) } endKey, err := key.HexKeyspaceId(req.KeyspaceEnd).Unhex() if err != nil { panic(NewBinlogParseError(fmt.Sprintf("Unhex on key '%v' failed", req.KeyspaceEnd))) } keyRange := &key.KeyRange{Start: startKey, End: endKey} blp := NewBlp(&req.StartPosition, blServer, keyRange) blp.usingRelayLogs = usingRelayLogs blp.binlogPrefix = binlogPrefix blp.logMetadata = mysqlctl.NewSlaveMetadata(logsDir, blServer.mycnf.RelayLogInfoPath) relog.Info("usingRelayLogs %v blp.binlogPrefix %v logsDir %v", blp.usingRelayLogs, blp.binlogPrefix, logsDir) blp.streamBinlog(sendReply) return nil }
// helper method to asynchronously diff a permissions func (wr *Wrangler) diffPermissions(masterPermissions *mysqlctl.Permissions, masterAlias topo.TabletAlias, alias topo.TabletAlias, wg *sync.WaitGroup, er concurrency.ErrorRecorder) { defer wg.Done() relog.Info("Gathering permissions for %v", alias) slavePermissions, err := wr.GetPermissions(alias) if err != nil { er.RecordError(err) return } relog.Info("Diffing permissions for %v", alias) mysqlctl.DiffPermissions(masterAlias.String(), masterPermissions, alias.String(), slavePermissions, er) }
// helper method to asynchronously diff a schema func (wr *Wrangler) diffSchema(masterSchema *mysqlctl.SchemaDefinition, masterTabletAlias, alias topo.TabletAlias, includeViews bool, wg *sync.WaitGroup, er concurrency.ErrorRecorder) { defer wg.Done() relog.Info("Gathering schema for %v", alias) slaveSchema, err := wr.GetSchema(alias, nil, includeViews) if err != nil { er.RecordError(err) return } relog.Info("Diffing schema for %v", alias) mysqlctl.DiffSchema(masterTabletAlias.String(), masterSchema, alias.String(), slaveSchema, er) }
/* waitForMysqld: should the function block until mysqld has stopped? This can actually take a *long* time if the buffer cache needs to be fully flushed - on the order of 20-30 minutes. */ func Shutdown(mt *Mysqld, waitForMysqld bool, mysqlWaitTime time.Duration) error { relog.Info("mysqlctl.Shutdown") // possibly mysql is already shutdown, check for a few files first _, socketPathErr := os.Stat(mt.config.SocketFile) _, pidPathErr := os.Stat(mt.config.PidFile) if socketPathErr != nil && pidPathErr != nil { relog.Warning("assuming shutdown - no socket, no pid file") return nil } // try the mysqld shutdown hook, if any h := hook.NewSimpleHook("mysqld_shutdown") hr := h.Execute() switch hr.ExitStatus { case hook.HOOK_SUCCESS: // hook exists and worked, we can keep going case hook.HOOK_DOES_NOT_EXIST: // hook doesn't exist, try mysqladmin relog.Info("No mysqld_shutdown hook, running mysqladmin directly") dir, err := vtenv.VtMysqlRoot() if err != nil { return err } name := path.Join(dir, "bin/mysqladmin") arg := []string{ "-u", "vt_dba", "-S", mt.config.SocketFile, "shutdown"} env := []string{ os.ExpandEnv("LD_LIBRARY_PATH=$VT_MYSQL_ROOT/lib/mysql"), } _, err = execCmd(name, arg, env, dir) if err != nil { return err } default: // hook failed, we report error return fmt.Errorf("mysqld_shutdown hook failed: %v", hr.String()) } // wait for mysqld to really stop. use the sock file as a proxy for that since // we can't call wait() in a process we didn't start. if waitForMysqld { for i := mysqlWaitTime; i >= 0; i -= time.Second { _, statErr := os.Stat(mt.config.SocketFile) if statErr != nil && os.IsNotExist(statErr) { return nil } time.Sleep(time.Second) } return errors.New("gave up waiting for mysqld to stop") } return nil }
func GcHandler(response http.ResponseWriter, request *http.Request) { go func() { // NOTE(msolomon) I'm not sure if this blocks or not - a cursory glance at the // code didn't reveal enough and I'm being lazy relog.Info("start forced garbage collection") runtime.GC() relog.Info("finished forced garbage collection") }() data := "forced gc\n" response.Write([]byte(data)) }
// background routine to initiate a connection sequence // only connect if state == CELL_DISCONNECTED // will change state to CELL_CONNECTING during the connection process // will then change to CELL_CONNECTED (and braodcast the cond) // or to CELL_BACKOFF (and schedule a new reconnection soon) func (zcell *zkCell) connect() { // change our state, we're working on connecting zcell.mutex.Lock() if zcell.state != CELL_DISCONNECTED { // someone else is already connecting zcell.mutex.Unlock() return } zcell.setState(CELL_CONNECTING) zcell.mutex.Unlock() // now connect zconn, session, err := zk.DialZkTimeout(zcell.zkAddr, *baseTimeout, *connectTimeout) if err == nil { zcell.zconn = zconn go zcell.handleSessionEvents(session) } // and change our state zcell.mutex.Lock() if zcell.state != CELL_CONNECTING { panic(fmt.Errorf("Unexpected state: %v", zcell.state)) } if err == nil { relog.Info("zk cell conn: cell %v connected", zcell.cellName) zcell.setState(CELL_CONNECTED) zcell.lastErr = nil } else { relog.Info("zk cell conn: cell %v connection failed: %v", zcell.cellName, err) zcell.setState(CELL_BACKOFF) zcell.lastErr = err go func() { // we're going to try to reconnect at some point // FIXME(alainjobart) backoff algorithm? <-time.NewTimer(time.Duration(*reconnectInterval) * time.Second).C // switch back to DISCONNECTED, and trigger a connect zcell.mutex.Lock() zcell.setState(CELL_DISCONNECTED) zcell.mutex.Unlock() zcell.connect() }() } // we broadcast on the condition to get everybody unstuck, // whether we succeeded to connect or not zcell.ready.Broadcast() zcell.mutex.Unlock() }
func (rowCache *InvalidationProcessor) runInvalidationLoop() { var err error purgeCache := false purgeReason := "" replPos, err := mysqlctl.GetReplicationPosition() if err != nil { rErr := NewInvalidationError(FATAL_ERROR, fmt.Sprintf("Cannot determine replication position %v", err), "") rowCache.updateErrCounters(rErr) rowCache.stopCache(rErr.Error()) return } startPosition := &mysqlctl.BinlogPosition{Position: *replPos} checkpoint, ok := rowCache.getCheckpoint() // Cannot resume from last checkpoint position. // Purging the cache. if !ok { purgeCache = true purgeReason = "Error in locating invalidation checkpoint" } else if checkpoint == nil { //NOTE: not purging the cache here - since no checkpoint is found, assuming cache is empty. relog.Info("No saved position found, invalidation starting at current replication position.") } else if !isCheckpointValid(&checkpoint.Position, replPos) { purgeCache = true purgeReason = "Invalidation checkpoint too old" } else { relog.Info("Starting at saved checkpoint %v", checkpoint.String()) startPosition = checkpoint } if purgeCache { PurgeRowCache() startPosition = &mysqlctl.BinlogPosition{Position: *replPos} relog.Warning("Purging cache because '%v'", purgeReason) } relog.Info("Starting @ %v", startPosition.String()) req := &mysqlctl.UpdateStreamRequest{StartPosition: *startPosition} err = mysqlctl.ServeUpdateStream(req, rowCache.receiveEvent) if err != nil { relog.Error("mysqlctl.ServeUpdateStream returned err '%v'", err.Error()) if rErr, ok := err.(*InvalidationError); ok { rowCache.updateErrCounters(rErr) } rowCache.stopCache(fmt.Sprintf("Unexpected or fatal error, '%v'", err.Error())) } }
func StartRowCacheInvalidation() { if !shouldInvalidatorRun() { relog.Warning("Row-cache invalidator not being enabled, criteria not met") CacheInvalidationProcessor.stopRowCacheInvalidation() return } if CacheInvalidationProcessor.isServiceEnabled() { relog.Warning("Row-cache invalidator service is already enabled") return } CacheInvalidationProcessor.stateLock.Lock() if shouldInvalidatorRun() { CacheInvalidationProcessor.setState(ENABLED) CacheInvalidationProcessor.stateLock.Unlock() } else { CacheInvalidationProcessor.setState(DISABLED) CacheInvalidationProcessor.stateLock.Unlock() return } relog.Info("Starting RowCacheInvalidation Service") CacheInvalidationProcessor.runInvalidationLoop() }
func (rowCache *InvalidationProcessor) processEvent(event *mysqlctl.UpdateResponse) error { position := "" if event.BinlogPosition.Valid() { position = event.BinlogPosition.String() } if event.Error != "" { relog.Error("Update stream returned error '%v'", event.Error) // Check if update stream error is fatal, else record it and move on. if strings.HasPrefix(event.Error, mysqlctl.FATAL) { relog.Info("Returning Service Error") return NewInvalidationError(FATAL_ERROR, event.Error, position) } rowCache.updateErrCounters(NewInvalidationError(INVALID_EVENT, event.Error, position)) return nil } if !event.BinlogPosition.Valid() { rowCache.updateErrCounters(NewInvalidationError(INVALID_EVENT, "no error, position is not set", "")) return nil } var err error switch event.EventData.SqlType { case mysqlctl.DDL: err = rowCache.handleDdlEvent(event) if err != nil { return err } case mysqlctl.BEGIN: rowCache.dmlBuffer = rowCache.dmlBuffer[:0] if rowCache.inTxn { rowCache.updateErrCounters(NewInvalidationError(INVALID_EVENT, "Invalid 'BEGIN' event, transaction already in progress", position)) return nil } rowCache.inTxn = true case mysqlctl.COMMIT: if !rowCache.inTxn { rowCache.updateErrCounters(NewInvalidationError(INVALID_EVENT, "Invalid 'COMMIT' event for a non-transaction", position)) return nil } err = rowCache.handleTxn(event) if err != nil { return err } rowCache.inTxn = false rowCache.dmlBuffer = rowCache.dmlBuffer[:0] case "insert", "update", "delete": dml, err := rowCache.buildDmlData(event) if err != nil { return err } if dml != nil { rowCache.dmlBuffer = append(rowCache.dmlBuffer, dml) } default: rowCache.updateErrCounters(NewInvalidationError(INVALID_EVENT, fmt.Sprintf("Unknown SqlType, %v %v", event.EventData.SqlType, event.EventData.Sql), position)) //return NewInvalidationError(INVALID_EVENT, fmt.Sprintf("Unknown SqlType, %v %v", event.EventData.SqlType, event.EventData.Sql)) } return nil }
func (cp *CachePool) Open() { if len(cp.commandLine) == 0 { relog.Info("rowcache not enabled") return } cp.startMemcache() relog.Info("rowcache is enabled") f := func() (pools.Resource, error) { c, err := memcache.Connect(cp.port) if err != nil { return nil, err } return &Cache{c, cp}, nil } cp.pool = pools.NewResourcePool(f, cp.capacity, cp.capacity, cp.idleTimeout) }
func multisnapshotCmd(mysqld *mysqlctl.Mysqld, subFlags *flag.FlagSet, args []string) { concurrency := subFlags.Int("concurrency", 8, "how many compression jobs to run simultaneously") spec := subFlags.String("spec", "-", "shard specification") tablesString := subFlags.String("tables", "", "dump only this comma separated list of tables") skipSlaveRestart := subFlags.Bool("skip-slave-restart", false, "after the snapshot is done, do not restart slave replication") maximumFilesize := subFlags.Uint64("maximum-file-size", 128*1024*1024, "the maximum size for an uncompressed data file") subFlags.Parse(args) if subFlags.NArg() != 2 { relog.Fatal("action multisnapshot requires <db name> <key name>") } shards, err := key.ParseShardingSpec(*spec) if err != nil { relog.Fatal("multisnapshot failed: %v", err) } var tables []string if *tablesString != "" { tables = strings.Split(*tablesString, ",") } filenames, err := mysqld.CreateMultiSnapshot(shards, subFlags.Arg(0), subFlags.Arg(1), tabletAddr, false, *concurrency, tables, *skipSlaveRestart, *maximumFilesize, nil) if err != nil { relog.Fatal("multisnapshot failed: %v", err) } else { relog.Info("manifest locations: %v", filenames) } }
func snapshotSourceStartCmd(mysqld *mysqlctl.Mysqld, subFlags *flag.FlagSet, args []string) { concurrency := subFlags.Int("concurrency", 4, "how many checksum jobs to run simultaneously") subFlags.Parse(args) if subFlags.NArg() != 1 { relog.Fatal("Command snapshotsourcestart requires <db name>") } filename, slaveStartRequired, readOnly, err := mysqld.CreateSnapshot(subFlags.Arg(0), tabletAddr, false, *concurrency, true, nil) if err != nil { relog.Fatal("snapshot failed: %v", err) } else { relog.Info("manifest location: %v", filename) relog.Info("slave start required: %v", slaveStartRequired) relog.Info("read only: %v", readOnly) } }
//Main entry function for reading and parsing the binlog. func (blp *Blp) StreamBinlog(sendReply SendUpdateStreamResponse, binlogPrefix string) { var err error for { err = blp.streamBinlog(sendReply, binlogPrefix) sErr, ok := err.(*BinlogParseError) if ok && sErr.IsEOF() { // Double check the current parse position // with replication position, if not so, it is an error // otherwise it is a true EOF so retry. relog.Info("EOF, retrying") ok, replErr := blp.isBehindReplication() if replErr != nil { err = replErr } else if !ok { err = NewBinlogParseError(REPLICATION_ERROR, "EOF, but parse position behind replication") } else { time.Sleep(5.0 * time.Second) blp.startPosition = &ReplicationCoordinates{MasterFilename: blp.currentPosition.Position.MasterFilename, MasterPosition: blp.currentPosition.Position.MasterPosition, RelayFilename: blp.currentPosition.Position.RelayFilename, RelayPosition: blp.currentPosition.Position.RelayPosition} continue } } relog.Error("StreamBinlog error @ %v, error: %v", blp.currentPosition.String(), err.Error()) SendError(sendReply, err, blp.currentPosition) break } }
func (sq *SqlQuery) disallowQueries(forRestart bool) { sq.statemu.Lock() defer sq.statemu.Unlock() switch sq.state.Get() { case CONNECTING: sq.setState(ABORT) return case NOT_SERVING, CLOSED: if forRestart { sq.setState(CLOSED) } else { sq.setState(NOT_SERVING) } return case ABORT: return case INITIALIZING, SHUTTING_DOWN: panic("unreachable") } // state is OPEN sq.setState(SHUTTING_DOWN) defer func() { if forRestart { sq.setState(CLOSED) } else { sq.setState(NOT_SERVING) } }() relog.Info("Stopping query service: %d", sq.sessionId) sq.qe.Close(forRestart) sq.sessionId = 0 sq.dbconfig = dbconfigs.DBConfig{} }
func (si *SchemaInfo) DropTable(tableName string) { si.mu.Lock() defer si.mu.Unlock() delete(si.tables, tableName) si.queries.Clear() relog.Info("Table %s forgotten", tableName) }
func (mysqld *Mysqld) SnapshotSourceEnd(slaveStartRequired, readOnly, deleteSnapshot bool) error { if deleteSnapshot { // clean out our files relog.Info("removing snapshot links: %v", mysqld.SnapshotDir) if err := os.RemoveAll(mysqld.SnapshotDir); err != nil { relog.Warning("failed to remove old snapshot: %v", err) return err } } // Try to restart mysqld if err := Start(mysqld, MysqlWaitTime); err != nil { return err } // Restore original mysqld state that we saved above. if slaveStartRequired { if err := mysqld.StartSlave(); err != nil { return err } // this should be quick, but we might as well just wait if err := mysqld.WaitForSlaveStart(SlaveStartDeadline); err != nil { return err } } // And set read-only mode if err := mysqld.SetReadOnly(readOnly); err != nil { return err } return nil }
func (blp *Blp) handleCommitEvent(sendReply mysqlctl.SendUpdateStreamResponse, commitEvent *eventBuffer) { if !blp.dbmatch { return } if blp.usingRelayLogs { for !blp.slavePosBehindReplication() { relog.Info("[%v:%v] parsing is not behind replication, sleeping", blp.keyspaceRange.Start.Hex(), blp.keyspaceRange.End.Hex()) time.Sleep(2 * time.Second) } } commitEvent.BlPosition.Xid = blp.currentPosition.Xid commitEvent.BlPosition.GroupId = blp.currentPosition.GroupId blp.txnLineBuffer = append(blp.txnLineBuffer, commitEvent) //txn block for DMLs, parse it and send events for a txn var dmlCount int64 //This filters the dmls for keyrange supplied by the client. blp.responseStream, dmlCount = blp.buildTxnResponse() //No dmls matched the keyspace id so return if dmlCount == 0 { return } if err := sendStream(sendReply, blp.responseStream); err != nil { panic(NewBinlogParseError(fmt.Sprintf("Error in sending event to client %v", err))) } blp.globalState.dmlCount.Add("DmlCount."+blp.keyrangeTag, dmlCount) blp.globalState.txnCount.Add("TxnCount."+blp.keyrangeTag, 1) }