func (s *ManagerTestSuite) TestAddWatcher(t *check.C) { now = int64(1380330697385120263) // Fri Sep 27 18:11:37.385120 -0700 PDT 2013 s.tickerFactory.Set([]ticker.Ticker{s.mockTicker}) m := ticker.NewClock(s.tickerFactory, nowFunc) c := make(chan time.Time) m.Add(c, 79, true) if !test.WaitState(s.mockTicker.RunningChan) { t.Error("Starts ticker") } if ok, diff := test.IsDeeply(s.tickerFactory.Made, []uint{79}); !ok { t.Errorf("Make 79s ticker, got %#v", diff) } if len(s.mockTicker.Added) == 0 { t.Error("Ticker added watcher") } // Manager should call ticker's ETA() to return time to next tick. d := m.ETA(c) if d != 0.1 { t.Error("clock.Manager.ETA()") } m.Remove(c) }
func run() error { version := fmt.Sprintf("percona-agent %s rev %s", agent.VERSION, agent.REVISION) if flagVersion { fmt.Println(version) return nil } golog.Printf("Running %s pid %d\n", version, os.Getpid()) if err := pct.Basedir.Init(flagBasedir); err != nil { return err } // Start-lock file is used to let agent1 self-update, create start-lock, // start updated agent2, exit cleanly, then agent2 starts. agent1 may // not use a PID file, so this special file is required. if err := pct.WaitStartLock(); err != nil { return err } // NOTE: This must run last, and defer if LIFO, so it must be declared first. defer os.Remove(pct.Basedir.File("start-lock")) /** * Agent config (require API key and agent UUID) */ if !pct.FileExists(pct.Basedir.ConfigFile("agent")) { return fmt.Errorf("Agent config file %s does not exist", pct.Basedir.ConfigFile("agent")) } bytes, err := agent.LoadConfig() if err != nil { return fmt.Errorf("Invalid agent config: %s\n", err) } agentConfig := &agent.Config{} if err := json.Unmarshal(bytes, agentConfig); err != nil { return fmt.Errorf("Error parsing "+pct.Basedir.ConfigFile("agent")+": ", err) } golog.Println("ApiHostname: " + agentConfig.ApiHostname) golog.Println("AgentUuid: " + agentConfig.AgentUuid) /** * Ping and exit, maybe. */ if flagPing { t0 := time.Now() code, err := pct.Ping(agentConfig.ApiHostname, agentConfig.ApiKey) d := time.Now().Sub(t0) if err != nil || code != 200 { return fmt.Errorf("Ping FAIL (%d %d %s)", d, code, err) } else { golog.Printf("Ping OK (%s)", d) return nil } } /** * PID file */ if flagPidFile != "" { pidFile := pct.NewPidFile() if err := pidFile.Set(flagPidFile); err != nil { golog.Fatalln(err) } defer pidFile.Remove() } /** * REST API */ api, err := ConnectAPI(agentConfig) if err != nil { golog.Fatal(err) } /** * Log relay */ logChan := make(chan *proto.LogEntry, log.BUFFER_SIZE*3) // Log websocket client, possibly disabled later. logClient, err := client.NewWebsocketClient(pct.NewLogger(logChan, "log-ws"), api, "log") if err != nil { golog.Fatalln(err) } logManager := log.NewManager( logClient, logChan, ) if err := logManager.Start(); err != nil { return fmt.Errorf("Error starting logmanager: %s\n", err) } /** * Instance manager */ itManager := instance.NewManager( pct.NewLogger(logChan, "instance-manager"), pct.Basedir.Dir("config"), api, ) if err := itManager.Start(); err != nil { return fmt.Errorf("Error starting instance manager: %s\n", err) } /** * Data spooler and sender */ hostname, _ := os.Hostname() dataClient, err := client.NewWebsocketClient(pct.NewLogger(logChan, "data-ws"), api, "data") if err != nil { golog.Fatalln(err) } dataManager := data.NewManager( pct.NewLogger(logChan, "data"), pct.Basedir.Dir("data"), hostname, dataClient, ) if err := dataManager.Start(); err != nil { return fmt.Errorf("Error starting data manager: %s\n", err) } /** * Collecct/report ticker (master clock) */ nowFunc := func() int64 { return time.Now().UTC().UnixNano() } clock := ticker.NewClock(&ticker.RealTickerFactory{}, nowFunc) /** * Metric and system config monitors */ mmManager := mm.NewManager( pct.NewLogger(logChan, "mm"), mmMonitor.NewFactory(logChan, itManager.Repo()), clock, dataManager.Spooler(), itManager.Repo(), ) if err := mmManager.Start(); err != nil { return fmt.Errorf("Error starting mm manager: %s\n", err) } sysconfigManager := sysconfig.NewManager( pct.NewLogger(logChan, "sysconfig"), sysconfigMonitor.NewFactory(logChan, itManager.Repo()), clock, dataManager.Spooler(), itManager.Repo(), ) if err := sysconfigManager.Start(); err != nil { return fmt.Errorf("Error starting sysconfig manager: %s\n", err) } /** * Query Analytics */ qanManager := qan.NewManager( pct.NewLogger(logChan, "qan"), &mysql.RealConnectionFactory{}, clock, qan.NewFileIntervalIterFactory(logChan), qan.NewSlowLogWorkerFactory(logChan), dataManager.Spooler(), itManager.Repo(), ) if err := qanManager.Start(); err != nil { return fmt.Errorf("Error starting qan manager: %s\n", err) } /** * Signal handler */ // Generally the agent has a crash-only design, but QAN is so far the only service // which reconfigures MySQL: it enables the slow log, sets long_query_time, etc. // It's not terrible to leave slow log on, but it's nicer to turn it off. sigChan := make(chan os.Signal, 1) stopChan := make(chan error, 2) signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM) go func() { sig := <-sigChan golog.Printf("Caught %s signal, shutting down...\n", sig) stopChan <- qanManager.Stop() }() /** * Agent */ cmdClient, err := client.NewWebsocketClient(pct.NewLogger(logChan, "agent-ws"), api, "cmd") if err != nil { golog.Fatal(err) } // The official list of services known to the agent. Adding a new service // requires a manager, starting the manager as above, and adding the manager // to this map. services := map[string]pct.ServiceManager{ "log": logManager, "data": dataManager, "qan": qanManager, "mm": mmManager, "instance": itManager, "sysconfig": sysconfigManager, } agent := agent.NewAgent( agentConfig, pct.NewLogger(logChan, "agent"), api, cmdClient, services, ) /** * Run agent, wait for it to stop or signal. */ go func() { stopChan <- agent.Run() }() stopErr := <-stopChan // agent or signal golog.Println("Agent stopped, shutting down...") qanManager.Stop() // see Signal handler ^ time.Sleep(2 * time.Second) // wait for final replies and log entries return stopErr }
func run() error { version := fmt.Sprintf("percona-agent %s%s rev %s", agent.VERSION, agent.REL, agent.REVISION) if flagVersion { fmt.Println(version) return nil } golog.Printf("Running %s pid %d\n", version, os.Getpid()) if err := pct.Basedir.Init(flagBasedir); err != nil { return err } // Start-lock file is used to let agent1 self-update, create start-lock, // start updated agent2, exit cleanly, then agent2 starts. agent1 may // not use a PID file, so this special file is required. if err := pct.WaitStartLock(); err != nil { return err } // NOTE: This must run last, and defer if LIFO, so it must be declared first. defer os.Remove(pct.Basedir.File("start-lock")) /** * Agent config (require API key and agent UUID) */ if !pct.FileExists(pct.Basedir.ConfigFile("agent")) { return fmt.Errorf("Agent config file %s does not exist", pct.Basedir.ConfigFile("agent")) } bytes, err := agent.LoadConfig() if err != nil { return fmt.Errorf("Invalid agent config: %s\n", err) } agentConfig := &agent.Config{} if err := json.Unmarshal(bytes, agentConfig); err != nil { return fmt.Errorf("Error parsing "+pct.Basedir.ConfigFile("agent")+": ", err) } golog.Println("ApiHostname: " + agentConfig.ApiHostname) golog.Println("AgentUuid: " + agentConfig.AgentUuid) /** * Ping and exit, maybe. */ // Set for all connections to API. X-Percona-API-Key is set automatically // using the pct.APIConnector. headers := map[string]string{ "X-Percona-Agent-Version": agent.VERSION, } if flagPing { t0 := time.Now() code, err := pct.Ping(agentConfig.ApiHostname, agentConfig.ApiKey, headers) d := time.Now().Sub(t0) if err != nil || code != 200 { return fmt.Errorf("Ping FAIL (%d %d %s)", d, code, err) } else { golog.Printf("Ping OK (%s)", d) return nil } } /** * PID file */ pidFilePath := agentConfig.PidFile if flagPidFile != "" { pidFilePath = flagPidFile } if pidFilePath != "" { pidFile := pct.NewPidFile() if err := pidFile.Set(pidFilePath); err != nil { golog.Fatalln(err) } defer pidFile.Remove() } /** * REST API */ retry := -1 // unlimited if flagStatus { retry = 1 } api, err := ConnectAPI(agentConfig, retry) if err != nil { golog.Fatal(err) } // Get agent status via API and exit. if flagStatus { code, bytes, err := api.Get(agentConfig.ApiKey, api.AgentLink("self")+"/status") if err != nil { return err } if code == 404 { return fmt.Errorf("Agent not found") } status := make(map[string]string) if err := json.Unmarshal(bytes, &status); err != nil { return err } golog.Println(status) return nil } /** * Connection factory */ connFactory := &mysql.RealConnectionFactory{} /** * Log relay */ logChan := make(chan *proto.LogEntry, log.BUFFER_SIZE*3) // Log websocket client, possibly disabled later. logClient, err := client.NewWebsocketClient(pct.NewLogger(logChan, "log-ws"), api, "log", headers) if err != nil { golog.Fatalln(err) } logManager := log.NewManager( logClient, logChan, ) if err := logManager.Start(); err != nil { return fmt.Errorf("Error starting logmanager: %s\n", err) } /** * MRMS (MySQL Restart Monitoring Service) */ mrm := mrmsMonitor.NewMonitor( pct.NewLogger(logChan, "mrms-monitor"), connFactory, ) mrmsManager := mrms.NewManager( pct.NewLogger(logChan, "mrms-manager"), mrm, ) if err := mrmsManager.Start(); err != nil { return fmt.Errorf("Error starting mrms manager: %s\n", err) } /** * Instance manager */ itManager := instance.NewManager( pct.NewLogger(logChan, "instance-manager"), pct.Basedir.Dir("config"), api, mrm, ) if err := itManager.Start(); err != nil { return fmt.Errorf("Error starting instance manager: %s\n", err) } /** * Data spooler and sender */ hostname, _ := os.Hostname() dataClient, err := client.NewWebsocketClient(pct.NewLogger(logChan, "data-ws"), api, "data", headers) if err != nil { golog.Fatalln(err) } dataManager := data.NewManager( pct.NewLogger(logChan, "data"), pct.Basedir.Dir("data"), pct.Basedir.Dir("trash"), hostname, dataClient, ) if err := dataManager.Start(); err != nil { return fmt.Errorf("Error starting data manager: %s\n", err) } /** * Collecct/report ticker (master clock) */ nowFunc := func() int64 { return time.Now().UTC().UnixNano() } clock := ticker.NewClock(&ticker.RealTickerFactory{}, nowFunc) /** * Metric and system config monitors */ mmManager := mm.NewManager( pct.NewLogger(logChan, "mm"), mmMonitor.NewFactory(logChan, itManager.Repo(), mrm), clock, dataManager.Spooler(), itManager.Repo(), mrm, ) if err := mmManager.Start(); err != nil { return fmt.Errorf("Error starting mm manager: %s\n", err) } sysconfigManager := sysconfig.NewManager( pct.NewLogger(logChan, "sysconfig"), sysconfigMonitor.NewFactory(logChan, itManager.Repo()), clock, dataManager.Spooler(), itManager.Repo(), ) if err := sysconfigManager.Start(); err != nil { return fmt.Errorf("Error starting sysconfig manager: %s\n", err) } /** * Query service (real-time EXPLAIN, SHOW CREATE TABLE, etc.) */ queryManager := query.NewManager( pct.NewLogger(logChan, "query"), itManager.Repo(), &mysql.RealConnectionFactory{}, ) if err := queryManager.Start(); err != nil { return fmt.Errorf("Error starting query manager: %s\n", err) } /** * Query Analytics */ qanManager := qan.NewManager( pct.NewLogger(logChan, "qan"), clock, itManager.Repo(), mrm, connFactory, qanFactory.NewRealAnalyzerFactory( logChan, qanFactory.NewRealIntervalIterFactory(logChan), slowlog.NewRealWorkerFactory(logChan), perfschema.NewRealWorkerFactory(logChan), dataManager.Spooler(), clock, ), ) if err := qanManager.Start(); err != nil { return fmt.Errorf("Error starting qan manager: %s\n", err) } /** * Sysinfo */ sysinfoManager := sysinfo.NewManager( pct.NewLogger(logChan, "sysinfo"), ) // MySQL Sysinfo mysqlSysinfoService := mysqlSysinfo.NewMySQL( pct.NewLogger(logChan, "sysinfo-mysql"), itManager.Repo(), ) if err := sysinfoManager.RegisterService("MySQLSummary", mysqlSysinfoService); err != nil { return fmt.Errorf("Error registering Mysql Sysinfo service: %s\n", err) } // System Sysinfo systemSysinfoService := systemSysinfo.NewSystem( pct.NewLogger(logChan, "sysinfo-system"), ) if err := sysinfoManager.RegisterService("SystemSummary", systemSysinfoService); err != nil { return fmt.Errorf("Error registering System Sysinfo service: %s\n", err) } // Start Sysinfo manager if err := sysinfoManager.Start(); err != nil { return fmt.Errorf("Error starting Sysinfo manager: %s\n", err) } /** * Signal handler */ // Generally the agent has a crash-only design, but QAN is so far the only service // which reconfigures MySQL: it enables the slow log, sets long_query_time, etc. // It's not terrible to leave slow log on, but it's nicer to turn it off. sigChan := make(chan os.Signal, 1) stopChan := make(chan error, 2) signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM) go func() { sig := <-sigChan golog.Printf("Caught %s signal, shutting down...\n", sig) stopChan <- qanManager.Stop() }() /** * Agent */ cmdClient, err := client.NewWebsocketClient(pct.NewLogger(logChan, "agent-ws"), api, "cmd", headers) if err != nil { golog.Fatal(err) } // The official list of services known to the agent. Adding a new service // requires a manager, starting the manager as above, and adding the manager // to this map. services := map[string]pct.ServiceManager{ "log": logManager, "data": dataManager, "qan": qanManager, "mm": mmManager, "instance": itManager, "mrms": mrmsManager, "sysconfig": sysconfigManager, "query": queryManager, "sysinfo": sysinfoManager, } // Set the global pct/cmd.Factory, used for the Restart cmd. pctCmd.Factory = &pctCmd.RealCmdFactory{} agentLogger := pct.NewLogger(logChan, "agent") agent := agent.NewAgent( agentConfig, agentLogger, api, cmdClient, services, ) /** * Run agent, wait for it to stop, signal, or crash. */ var stopErr error go func() { defer func() { if err := recover(); err != nil { errMsg := fmt.Sprintf("Agent crashed: %s", err) golog.Println(errMsg) agentLogger.Error(errMsg) stopChan <- fmt.Errorf("%s", errMsg) } }() stopChan <- agent.Run() }() // Wait for agent to stop, or for signals. agentRunning := true statusSigChan := make(chan os.Signal, 1) signal.Notify(statusSigChan, syscall.SIGUSR1) // kill -USER1 PID reconnectSigChan := make(chan os.Signal, 1) signal.Notify(reconnectSigChan, syscall.SIGHUP) // kill -HUP PID for agentRunning { select { case stopErr = <-stopChan: // agent or signal golog.Println("Agent stopped, shutting down...") agentLogger.Info("Agent stopped") agentRunning = false case <-statusSigChan: status := agent.AllStatus() golog.Printf("Status: %+v\n", status) case <-reconnectSigChan: u, _ := user.Current() cmd := &proto.Cmd{ Ts: time.Now().UTC(), User: u.Username + " (SIGHUP)", AgentUuid: agentConfig.AgentUuid, Service: "agent", Cmd: "Reconnect", } agent.Handle(cmd) } } qanManager.Stop() // see Signal handler ^ time.Sleep(2 * time.Second) // wait for final replies and log entries return stopErr }