func (s *ManagerTestSuite) TestStartStopManager(t *C) { /** * mm is a proxy manager for monitors, so it's always running. * It should implement the service manager interface anyway, * but it doesn't actually start or stop. Its main work is done * in Handle, starting and stopping monitors (tested later). */ mrm := mock.NewMrmsMonitor() m := mm.NewManager(s.logger, s.factory, s.clock, s.spool, s.im, mrm) if m == nil { t.Fatal("Make new mm.Manager") } // It shouldn't have added a tickChan yet. if len(s.clock.Added) != 0 { t.Error("tickChan not added yet") } // First the API marshals an mm.Config. config := &mm.Config{ ServiceInstance: proto.ServiceInstance{ Service: "mysql", InstanceId: 1, }, Collect: 1, Report: 60, // No monitor-specific config } err := pct.Basedir.WriteConfig("mm-mysql-1", config) t.Assert(err, IsNil) // The agent calls mm.Start(). err = m.Start() t.Assert(err, IsNil) // There is a monitor so there should be tickers. if ok, diff := test.IsDeeply(s.clock.Added, []uint{1}); !ok { test.Dump(s.clock.Added) t.Errorf("Does not add tickChan, got %#v", diff) } // Its status should be "Running". status := m.Status() t.Check(status["mm"], Equals, "Running") // Can't start mm twice. err = m.Start() t.Check(err, Not(Equals), "") // Stopping should be idempotent. err = m.Stop() t.Check(err, IsNil) err = m.Stop() t.Check(err, IsNil) status = m.Status() t.Check(status["mm"], Equals, "Stopped") }
func (s *ManagerTestSuite) TestGetConfig(t *C) { m := mm.NewManager(s.logger, s.factory, s.clock, s.spool, s.im) t.Assert(m, NotNil) err := m.Start() t.Assert(err, IsNil) /** * Start a mock MySQL monitor. */ mysqlMonitorConfig := &mysql.Config{ Config: mm.Config{ ServiceInstance: proto.ServiceInstance{ Service: "mysql", InstanceId: 1, }, Collect: 1, Report: 60, }, Status: map[string]string{ "threads_connected": "gauge", "threads_running": "gauge", }, } mysqlData, err := json.Marshal(mysqlMonitorConfig) t.Assert(err, IsNil) cmd := &proto.Cmd{ User: "******", Service: "mm", Cmd: "StartService", Data: mysqlData, } s.mysqlMonitor.SetConfig(mysqlMonitorConfig) reply := m.Handle(cmd) t.Assert(reply, NotNil) t.Assert(reply.Error, Equals, "") /** * Start a mock system monitor. */ systemMonitorConfig := &system.Config{ Config: mm.Config{ ServiceInstance: proto.ServiceInstance{ Service: "server", InstanceId: 1, }, Collect: 10, Report: 60, }, } systemData, err := json.Marshal(systemMonitorConfig) t.Assert(err, IsNil) cmd = &proto.Cmd{ User: "******", Service: "mm", Cmd: "StartService", Data: systemData, } s.systemMonitor.SetConfig(systemMonitorConfig) reply = m.Handle(cmd) t.Assert(reply, NotNil) t.Assert(reply.Error, Equals, "") /** * GetConfig from mm which should return all monitors' configs. */ cmd = &proto.Cmd{ Cmd: "GetConfig", Service: "mm", } reply = m.Handle(cmd) t.Assert(reply, NotNil) t.Assert(reply.Error, Equals, "") t.Assert(reply.Data, NotNil) gotConfig := []proto.AgentConfig{} if err := json.Unmarshal(reply.Data, &gotConfig); err != nil { t.Fatal(err) } expectConfig := []proto.AgentConfig{ { InternalService: "mm", ExternalService: proto.ServiceInstance{ Service: "mysql", InstanceId: 1, }, Config: string(mysqlData), Running: true, }, { InternalService: "mm", ExternalService: proto.ServiceInstance{ Service: "server", InstanceId: 1, }, Config: string(systemData), Running: true, }, } if same, diff := test.IsDeeply(gotConfig, expectConfig); !same { test.Dump(gotConfig) t.Error(diff) } }
/** * Tests: * - starting monitor * - stopping monitor * - starting monitor again (restarting monitor) * - sneaked in:) unknown cmd test */ func (s *ManagerTestSuite) TestRestartMonitor(t *C) { // Create and start mm, no monitors yet. m := mm.NewManager(s.logger, s.factory, s.clock, s.spool, s.im) t.Assert(m, NotNil) err := m.Start() t.Assert(err, IsNil) // Start a monitor by sending StartService + monitor config. // This is the config in test/mm/config/mm-mysql-1.conf. mmConfig := &mysql.Config{ Config: mm.Config{ ServiceInstance: proto.ServiceInstance{ Service: "mysql", InstanceId: 1, }, Collect: 1, Report: 60, }, Status: map[string]string{ "threads_connected": "gauge", "threads_running": "gauge", }, } mmConfigData, err := json.Marshal(mmConfig) t.Assert(err, IsNil) // If this were a real monitor, it would decode and set its own config. // The mock monitor doesn't have any real config type, so we set it manually. s.mysqlMonitor.SetConfig(mmConfig) // The agent calls mm.Handle() with the cmd (for logging and status) and the config data. cmd := &proto.Cmd{ User: "******", Service: "mm", Cmd: "StartService", Data: mmConfigData, } reply := m.Handle(cmd) t.Assert(reply, NotNil) t.Check(reply.Error, Equals, "") // The monitor should be running. The mock monitor returns "Running" if // Start() has been called; else it returns "Stopped". status := m.Status() t.Check(status["monitor"], Equals, "Running") // There should be a 1s collect ticker for the monitor. if ok, diff := test.IsDeeply(s.clock.Added, []uint{1}); !ok { t.Errorf("Make 1s ticker for collect interval\n%s", diff) } // After starting a monitor, mm should write its config to the dir // it learned when mm.LoadConfig() was called. Next time agent starts, // it will have mm start the monitor with this config. data, err := ioutil.ReadFile(s.configDir + "/mm-mysql-1.conf") t.Check(err, IsNil) gotConfig := &mysql.Config{} err = json.Unmarshal(data, gotConfig) t.Check(err, IsNil) if same, diff := test.IsDeeply(gotConfig, mmConfig); !same { test.Dump(gotConfig) t.Error(diff) } /** * Stop the monitor. */ cmd = &proto.Cmd{ User: "******", Service: "mm", Cmd: "StopService", Data: mmConfigData, } // Handles StopService without error. reply = m.Handle(cmd) t.Assert(reply, NotNil) t.Check(reply.Error, Equals, "") // Stop a monitor removes it from the managers list of monitors. // So it's no longer present in a status request. status = m.Status() t.Check(status["monitor"], Equals, "") // After stopping the monitor, the manager should remove its tickChan. if len(s.clock.Removed) != 1 { t.Error("Remove's monitor's tickChan from clock") } // After stopping a monitor, mm should remove its config file so agent // doesn't start it on restart. file := s.configDir + "/mm-mysql-1.conf" if pct.FileExists(file) { t.Error("Stopping monitor removes its config; ", file, " exists") } /** * Start the monitor again (restarting monitor). */ cmd = &proto.Cmd{ User: "******", Service: "mm", Cmd: "StartService", Data: mmConfigData, } // If this were a real monitor, it would decode and set its own config. // The mock monitor doesn't have any real config type, so we set it manually. s.mysqlMonitor.SetConfig(mmConfig) // The agent calls mm.Handle() with the cmd (for logging and status) and the config data. reply = m.Handle(cmd) t.Assert(reply, NotNil) t.Check(reply.Error, Equals, "") // The monitor should be running. The mock monitor returns "Running" if // Start() has been called; else it returns "Stopped". status = m.Status() t.Check(status["monitor"], Equals, "Running") // There should be a 1s collect ticker for the monitor. // (Actually two in s.clock.Added, as this is mock and we started monitor twice) if ok, diff := test.IsDeeply(s.clock.Added, []uint{1, 1}); !ok { t.Errorf("Make 1s ticker for collect interval\n%s", diff) } // After starting a monitor, mm should write its config to the dir // it learned when mm.LoadConfig() was called. Next time agent starts, // it will have mm start the monitor with this config. data, err = ioutil.ReadFile(s.configDir + "/mm-mysql-1.conf") t.Check(err, IsNil) gotConfig = &mysql.Config{} err = json.Unmarshal(data, gotConfig) t.Check(err, IsNil) if same, diff := test.IsDeeply(gotConfig, mmConfig); !same { t.Logf("%+v", gotConfig) t.Error(diff) } /** * While we're all setup and working, let's sneak in an unknown cmd test. */ cmd = &proto.Cmd{ User: "******", Service: "mm", Cmd: "Pontificate", Data: mmConfigData, } // Unknown cmd causes error. reply = m.Handle(cmd) t.Assert(reply, NotNil) t.Check(reply.Error, Not(Equals), "") }
func run() error { version := fmt.Sprintf("percona-agent %s rev %s", agent.VERSION, agent.REVISION) if flagVersion { fmt.Println(version) return nil } golog.Printf("Running %s pid %d\n", version, os.Getpid()) if err := pct.Basedir.Init(flagBasedir); err != nil { return err } // Start-lock file is used to let agent1 self-update, create start-lock, // start updated agent2, exit cleanly, then agent2 starts. agent1 may // not use a PID file, so this special file is required. if err := pct.WaitStartLock(); err != nil { return err } // NOTE: This must run last, and defer if LIFO, so it must be declared first. defer os.Remove(pct.Basedir.File("start-lock")) /** * Agent config (require API key and agent UUID) */ if !pct.FileExists(pct.Basedir.ConfigFile("agent")) { return fmt.Errorf("Agent config file %s does not exist", pct.Basedir.ConfigFile("agent")) } bytes, err := agent.LoadConfig() if err != nil { return fmt.Errorf("Invalid agent config: %s\n", err) } agentConfig := &agent.Config{} if err := json.Unmarshal(bytes, agentConfig); err != nil { return fmt.Errorf("Error parsing "+pct.Basedir.ConfigFile("agent")+": ", err) } golog.Println("ApiHostname: " + agentConfig.ApiHostname) golog.Println("AgentUuid: " + agentConfig.AgentUuid) /** * Ping and exit, maybe. */ if flagPing { t0 := time.Now() code, err := pct.Ping(agentConfig.ApiHostname, agentConfig.ApiKey) d := time.Now().Sub(t0) if err != nil || code != 200 { return fmt.Errorf("Ping FAIL (%d %d %s)", d, code, err) } else { golog.Printf("Ping OK (%s)", d) return nil } } /** * PID file */ if flagPidFile != "" { pidFile := pct.NewPidFile() if err := pidFile.Set(flagPidFile); err != nil { golog.Fatalln(err) } defer pidFile.Remove() } /** * REST API */ api, err := ConnectAPI(agentConfig) if err != nil { golog.Fatal(err) } /** * Log relay */ logChan := make(chan *proto.LogEntry, log.BUFFER_SIZE*3) // Log websocket client, possibly disabled later. logClient, err := client.NewWebsocketClient(pct.NewLogger(logChan, "log-ws"), api, "log") if err != nil { golog.Fatalln(err) } logManager := log.NewManager( logClient, logChan, ) if err := logManager.Start(); err != nil { return fmt.Errorf("Error starting logmanager: %s\n", err) } /** * Instance manager */ itManager := instance.NewManager( pct.NewLogger(logChan, "instance-manager"), pct.Basedir.Dir("config"), api, ) if err := itManager.Start(); err != nil { return fmt.Errorf("Error starting instance manager: %s\n", err) } /** * Data spooler and sender */ hostname, _ := os.Hostname() dataClient, err := client.NewWebsocketClient(pct.NewLogger(logChan, "data-ws"), api, "data") if err != nil { golog.Fatalln(err) } dataManager := data.NewManager( pct.NewLogger(logChan, "data"), pct.Basedir.Dir("data"), hostname, dataClient, ) if err := dataManager.Start(); err != nil { return fmt.Errorf("Error starting data manager: %s\n", err) } /** * Collecct/report ticker (master clock) */ nowFunc := func() int64 { return time.Now().UTC().UnixNano() } clock := ticker.NewClock(&ticker.RealTickerFactory{}, nowFunc) /** * Metric and system config monitors */ mmManager := mm.NewManager( pct.NewLogger(logChan, "mm"), mmMonitor.NewFactory(logChan, itManager.Repo()), clock, dataManager.Spooler(), itManager.Repo(), ) if err := mmManager.Start(); err != nil { return fmt.Errorf("Error starting mm manager: %s\n", err) } sysconfigManager := sysconfig.NewManager( pct.NewLogger(logChan, "sysconfig"), sysconfigMonitor.NewFactory(logChan, itManager.Repo()), clock, dataManager.Spooler(), itManager.Repo(), ) if err := sysconfigManager.Start(); err != nil { return fmt.Errorf("Error starting sysconfig manager: %s\n", err) } /** * Query Analytics */ qanManager := qan.NewManager( pct.NewLogger(logChan, "qan"), &mysql.RealConnectionFactory{}, clock, qan.NewFileIntervalIterFactory(logChan), qan.NewSlowLogWorkerFactory(logChan), dataManager.Spooler(), itManager.Repo(), ) if err := qanManager.Start(); err != nil { return fmt.Errorf("Error starting qan manager: %s\n", err) } /** * Signal handler */ // Generally the agent has a crash-only design, but QAN is so far the only service // which reconfigures MySQL: it enables the slow log, sets long_query_time, etc. // It's not terrible to leave slow log on, but it's nicer to turn it off. sigChan := make(chan os.Signal, 1) stopChan := make(chan error, 2) signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM) go func() { sig := <-sigChan golog.Printf("Caught %s signal, shutting down...\n", sig) stopChan <- qanManager.Stop() }() /** * Agent */ cmdClient, err := client.NewWebsocketClient(pct.NewLogger(logChan, "agent-ws"), api, "cmd") if err != nil { golog.Fatal(err) } // The official list of services known to the agent. Adding a new service // requires a manager, starting the manager as above, and adding the manager // to this map. services := map[string]pct.ServiceManager{ "log": logManager, "data": dataManager, "qan": qanManager, "mm": mmManager, "instance": itManager, "sysconfig": sysconfigManager, } agent := agent.NewAgent( agentConfig, pct.NewLogger(logChan, "agent"), api, cmdClient, services, ) /** * Run agent, wait for it to stop or signal. */ go func() { stopChan <- agent.Run() }() stopErr := <-stopChan // agent or signal golog.Println("Agent stopped, shutting down...") qanManager.Stop() // see Signal handler ^ time.Sleep(2 * time.Second) // wait for final replies and log entries return stopErr }
func run() error { version := fmt.Sprintf("percona-agent %s%s rev %s", agent.VERSION, agent.REL, agent.REVISION) if flagVersion { fmt.Println(version) return nil } golog.Printf("Running %s pid %d\n", version, os.Getpid()) if err := pct.Basedir.Init(flagBasedir); err != nil { return err } // Start-lock file is used to let agent1 self-update, create start-lock, // start updated agent2, exit cleanly, then agent2 starts. agent1 may // not use a PID file, so this special file is required. if err := pct.WaitStartLock(); err != nil { return err } // NOTE: This must run last, and defer if LIFO, so it must be declared first. defer os.Remove(pct.Basedir.File("start-lock")) /** * Agent config (require API key and agent UUID) */ if !pct.FileExists(pct.Basedir.ConfigFile("agent")) { return fmt.Errorf("Agent config file %s does not exist", pct.Basedir.ConfigFile("agent")) } bytes, err := agent.LoadConfig() if err != nil { return fmt.Errorf("Invalid agent config: %s\n", err) } agentConfig := &agent.Config{} if err := json.Unmarshal(bytes, agentConfig); err != nil { return fmt.Errorf("Error parsing "+pct.Basedir.ConfigFile("agent")+": ", err) } golog.Println("ApiHostname: " + agentConfig.ApiHostname) golog.Println("AgentUuid: " + agentConfig.AgentUuid) /** * Ping and exit, maybe. */ // Set for all connections to API. X-Percona-API-Key is set automatically // using the pct.APIConnector. headers := map[string]string{ "X-Percona-Agent-Version": agent.VERSION, } if flagPing { t0 := time.Now() code, err := pct.Ping(agentConfig.ApiHostname, agentConfig.ApiKey, headers) d := time.Now().Sub(t0) if err != nil || code != 200 { return fmt.Errorf("Ping FAIL (%d %d %s)", d, code, err) } else { golog.Printf("Ping OK (%s)", d) return nil } } /** * PID file */ pidFilePath := agentConfig.PidFile if flagPidFile != "" { pidFilePath = flagPidFile } if pidFilePath != "" { pidFile := pct.NewPidFile() if err := pidFile.Set(pidFilePath); err != nil { golog.Fatalln(err) } defer pidFile.Remove() } /** * REST API */ retry := -1 // unlimited if flagStatus { retry = 1 } api, err := ConnectAPI(agentConfig, retry) if err != nil { golog.Fatal(err) } // Get agent status via API and exit. if flagStatus { code, bytes, err := api.Get(agentConfig.ApiKey, api.AgentLink("self")+"/status") if err != nil { return err } if code == 404 { return fmt.Errorf("Agent not found") } status := make(map[string]string) if err := json.Unmarshal(bytes, &status); err != nil { return err } golog.Println(status) return nil } /** * Connection factory */ connFactory := &mysql.RealConnectionFactory{} /** * Log relay */ logChan := make(chan *proto.LogEntry, log.BUFFER_SIZE*3) // Log websocket client, possibly disabled later. logClient, err := client.NewWebsocketClient(pct.NewLogger(logChan, "log-ws"), api, "log", headers) if err != nil { golog.Fatalln(err) } logManager := log.NewManager( logClient, logChan, ) if err := logManager.Start(); err != nil { return fmt.Errorf("Error starting logmanager: %s\n", err) } /** * MRMS (MySQL Restart Monitoring Service) */ mrm := mrmsMonitor.NewMonitor( pct.NewLogger(logChan, "mrms-monitor"), connFactory, ) mrmsManager := mrms.NewManager( pct.NewLogger(logChan, "mrms-manager"), mrm, ) if err := mrmsManager.Start(); err != nil { return fmt.Errorf("Error starting mrms manager: %s\n", err) } /** * Instance manager */ itManager := instance.NewManager( pct.NewLogger(logChan, "instance-manager"), pct.Basedir.Dir("config"), api, mrm, ) if err := itManager.Start(); err != nil { return fmt.Errorf("Error starting instance manager: %s\n", err) } /** * Data spooler and sender */ hostname, _ := os.Hostname() dataClient, err := client.NewWebsocketClient(pct.NewLogger(logChan, "data-ws"), api, "data", headers) if err != nil { golog.Fatalln(err) } dataManager := data.NewManager( pct.NewLogger(logChan, "data"), pct.Basedir.Dir("data"), pct.Basedir.Dir("trash"), hostname, dataClient, ) if err := dataManager.Start(); err != nil { return fmt.Errorf("Error starting data manager: %s\n", err) } /** * Collecct/report ticker (master clock) */ nowFunc := func() int64 { return time.Now().UTC().UnixNano() } clock := ticker.NewClock(&ticker.RealTickerFactory{}, nowFunc) /** * Metric and system config monitors */ mmManager := mm.NewManager( pct.NewLogger(logChan, "mm"), mmMonitor.NewFactory(logChan, itManager.Repo(), mrm), clock, dataManager.Spooler(), itManager.Repo(), mrm, ) if err := mmManager.Start(); err != nil { return fmt.Errorf("Error starting mm manager: %s\n", err) } sysconfigManager := sysconfig.NewManager( pct.NewLogger(logChan, "sysconfig"), sysconfigMonitor.NewFactory(logChan, itManager.Repo()), clock, dataManager.Spooler(), itManager.Repo(), ) if err := sysconfigManager.Start(); err != nil { return fmt.Errorf("Error starting sysconfig manager: %s\n", err) } /** * Query service (real-time EXPLAIN, SHOW CREATE TABLE, etc.) */ queryManager := query.NewManager( pct.NewLogger(logChan, "query"), itManager.Repo(), &mysql.RealConnectionFactory{}, ) if err := queryManager.Start(); err != nil { return fmt.Errorf("Error starting query manager: %s\n", err) } /** * Query Analytics */ qanManager := qan.NewManager( pct.NewLogger(logChan, "qan"), clock, itManager.Repo(), mrm, connFactory, qanFactory.NewRealAnalyzerFactory( logChan, qanFactory.NewRealIntervalIterFactory(logChan), slowlog.NewRealWorkerFactory(logChan), perfschema.NewRealWorkerFactory(logChan), dataManager.Spooler(), clock, ), ) if err := qanManager.Start(); err != nil { return fmt.Errorf("Error starting qan manager: %s\n", err) } /** * Sysinfo */ sysinfoManager := sysinfo.NewManager( pct.NewLogger(logChan, "sysinfo"), ) // MySQL Sysinfo mysqlSysinfoService := mysqlSysinfo.NewMySQL( pct.NewLogger(logChan, "sysinfo-mysql"), itManager.Repo(), ) if err := sysinfoManager.RegisterService("MySQLSummary", mysqlSysinfoService); err != nil { return fmt.Errorf("Error registering Mysql Sysinfo service: %s\n", err) } // System Sysinfo systemSysinfoService := systemSysinfo.NewSystem( pct.NewLogger(logChan, "sysinfo-system"), ) if err := sysinfoManager.RegisterService("SystemSummary", systemSysinfoService); err != nil { return fmt.Errorf("Error registering System Sysinfo service: %s\n", err) } // Start Sysinfo manager if err := sysinfoManager.Start(); err != nil { return fmt.Errorf("Error starting Sysinfo manager: %s\n", err) } /** * Signal handler */ // Generally the agent has a crash-only design, but QAN is so far the only service // which reconfigures MySQL: it enables the slow log, sets long_query_time, etc. // It's not terrible to leave slow log on, but it's nicer to turn it off. sigChan := make(chan os.Signal, 1) stopChan := make(chan error, 2) signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM) go func() { sig := <-sigChan golog.Printf("Caught %s signal, shutting down...\n", sig) stopChan <- qanManager.Stop() }() /** * Agent */ cmdClient, err := client.NewWebsocketClient(pct.NewLogger(logChan, "agent-ws"), api, "cmd", headers) if err != nil { golog.Fatal(err) } // The official list of services known to the agent. Adding a new service // requires a manager, starting the manager as above, and adding the manager // to this map. services := map[string]pct.ServiceManager{ "log": logManager, "data": dataManager, "qan": qanManager, "mm": mmManager, "instance": itManager, "mrms": mrmsManager, "sysconfig": sysconfigManager, "query": queryManager, "sysinfo": sysinfoManager, } // Set the global pct/cmd.Factory, used for the Restart cmd. pctCmd.Factory = &pctCmd.RealCmdFactory{} agentLogger := pct.NewLogger(logChan, "agent") agent := agent.NewAgent( agentConfig, agentLogger, api, cmdClient, services, ) /** * Run agent, wait for it to stop, signal, or crash. */ var stopErr error go func() { defer func() { if err := recover(); err != nil { errMsg := fmt.Sprintf("Agent crashed: %s", err) golog.Println(errMsg) agentLogger.Error(errMsg) stopChan <- fmt.Errorf("%s", errMsg) } }() stopChan <- agent.Run() }() // Wait for agent to stop, or for signals. agentRunning := true statusSigChan := make(chan os.Signal, 1) signal.Notify(statusSigChan, syscall.SIGUSR1) // kill -USER1 PID reconnectSigChan := make(chan os.Signal, 1) signal.Notify(reconnectSigChan, syscall.SIGHUP) // kill -HUP PID for agentRunning { select { case stopErr = <-stopChan: // agent or signal golog.Println("Agent stopped, shutting down...") agentLogger.Info("Agent stopped") agentRunning = false case <-statusSigChan: status := agent.AllStatus() golog.Printf("Status: %+v\n", status) case <-reconnectSigChan: u, _ := user.Current() cmd := &proto.Cmd{ Ts: time.Now().UTC(), User: u.Username + " (SIGHUP)", AgentUuid: agentConfig.AgentUuid, Service: "agent", Cmd: "Reconnect", } agent.Handle(cmd) } } qanManager.Stop() // see Signal handler ^ time.Sleep(2 * time.Second) // wait for final replies and log entries return stopErr }