Esempio n. 1
0
// cmdHandler:@goroutine[3]
func (agent *Agent) Handle(cmd *proto.Cmd) *proto.Reply {
	agent.status.UpdateRe("agent-cmd-handler", "Handling", cmd)
	agent.logger.Info("Running", cmd)

	defer func() {
		agent.logger.Info("Done running", cmd)
	}()

	var data interface{}
	var err error
	var errs []error
	switch cmd.Cmd {
	case "StartService":
		data, err = agent.handleStartService(cmd)
	case "StopService":
		data, err = agent.handleStopService(cmd)
	case "GetConfig":
		data, errs = agent.handleGetConfig(cmd)
	case "GetAllConfigs":
		data, errs = agent.handleGetAllConfigs(cmd)
	case "SetConfig":
		data, errs = agent.handleSetConfig(cmd)
	case "Update":
		data, errs = agent.handleUpdate(cmd)
	case "Version":
		data, errs = agent.handleVersion(cmd)
	default:
		errs = append(errs, pct.UnknownCmdError{Cmd: cmd.Cmd})
	}

	if err != nil {
		errs = append(errs, err)
	}
	if len(errs) > 0 {
		for _, err := range errs {
			if err != nil {
				agent.logger.Error(err)
			}
		}
	}

	return cmd.Reply(data, errs...)
}
Esempio n. 2
0
// @goroutine[0]
func (m *Manager) Handle(cmd *proto.Cmd) *proto.Reply {
	m.status.UpdateRe("instance", "Handling", cmd)
	defer m.status.Update("instance", "Running")

	it := &proto.ServiceInstance{}
	if err := json.Unmarshal(cmd.Data, it); err != nil {
		return cmd.Reply(nil, err)
	}

	switch cmd.Cmd {
	case "Add":
		err := m.repo.Add(it.Service, it.InstanceId, it.Instance, true) // true = write to disk
		return cmd.Reply(nil, err)
	case "Remove":
		err := m.repo.Remove(it.Service, it.InstanceId)
		return cmd.Reply(nil, err)
	case "GetInfo":
		info, err := m.handleGetInfo(it.Service, it.Instance)
		return cmd.Reply(info, err)
	default:
		return cmd.Reply(nil, pct.UnknownCmdError{Cmd: cmd.Cmd})
	}
}
Esempio n. 3
0
// @goroutine[0]
func (m *Manager) Handle(cmd *proto.Cmd) *proto.Reply {
	m.status.UpdateRe("data", "Handling", cmd)
	defer m.status.Update("data", "Running")

	m.logger.Info("Handle", cmd)

	switch cmd.Cmd {
	case "GetConfig":
		config, errs := m.GetConfig()
		return cmd.Reply(config, errs...)
	case "SetConfig":
		newConfig, errs := m.handleSetConfig(cmd)
		return cmd.Reply(newConfig, errs...)
	default:
		return cmd.Reply(nil, pct.UnknownCmdError{Cmd: cmd.Cmd})
	}
}
Esempio n. 4
0
func (m *Manager) Handle(cmd *proto.Cmd) *proto.Reply {
	m.status.UpdateRe("qan", "Handling", cmd)
	defer m.status.Update("qan", "Running")

	switch cmd.Cmd {
	case "StartService":
		m.mux.Lock()
		defer m.mux.Unlock()
		if m.running {
			return cmd.Reply(nil, pct.ServiceIsRunningError{Service: "qan"})
		}

		config := &Config{}
		if err := json.Unmarshal(cmd.Data, config); err != nil {
			return cmd.Reply(nil, err)
		}

		// Start run()/qan-log-parser.
		if err := m.start(config); err != nil {
			return cmd.Reply(nil, err)
		}
		m.running = true

		// Save the config.
		m.config = config
		if err := pct.Basedir.WriteConfig("qan", config); err != nil {
			return cmd.Reply(nil, err)
		}

		return cmd.Reply(nil) // success
	case "StopService":
		m.mux.Lock()
		defer m.mux.Unlock()
		if !m.running {
			return cmd.Reply(nil)
		}
		errs := []error{}
		if err := m.stop(); err != nil {
			errs = append(errs, err)
		}
		m.running = false
		if err := pct.Basedir.RemoveConfig("qan"); err != nil {
			errs = append(errs, err)
		}
		return cmd.Reply(nil, errs...)
	case "GetConfig":
		config, errs := m.GetConfig()
		return cmd.Reply(config, errs...)
	default:
		// SetConfig does not work by design.  To re-configure QAN,
		// stop it then start it again with the new config.
		return cmd.Reply(nil, pct.UnknownCmdError{Cmd: cmd.Cmd})
	}
}
Esempio n. 5
0
// @goroutine[0]
func (m *Manager) Handle(cmd *proto.Cmd) *proto.Reply {
	m.status.UpdateRe("mm", "Handling", cmd)
	defer m.status.Update("mm", "Running")

	switch cmd.Cmd {
	case "StartService":
		mm, name, err := m.getMonitorConfig(cmd)
		if err != nil {
			return cmd.Reply(nil, err)
		}

		m.status.UpdateRe("mm", "Starting "+name, cmd)
		m.logger.Info("Start", name, cmd)

		// Monitors names must be unique.
		m.mux.RLock()
		_, haveMonitor := m.monitors[name]
		m.mux.RUnlock()
		if haveMonitor {
			return cmd.Reply(nil, errors.New("Duplicate monitor: "+name))
		}

		// Create the monitor based on its type.
		monitor, err := m.factory.Make(mm.Service, mm.InstanceId, cmd.Data)
		if err != nil {
			return cmd.Reply(nil, errors.New("Factory: "+err.Error()))
		}

		// Make synchronized (3rd arg=true) ticker for collect interval.  It's
		// synchronized so all data aligns in charts, else we can get MySQL metrics
		// at 00:03 and system metrics at 00:05 and other metrics at 00:06 which
		// makes it very difficult to see all metrics at a single point in time
		// or meaningfully compare a single interval, e.g. 00:00 to 00:05.
		tickChan := make(chan time.Time)
		m.clock.Add(tickChan, mm.Collect, true)

		// We need one aggregator for each unique report interval.  There's usually
		// just one: 60s.  Remember: report interval != collect interval.  Monitors
		// can collect at different intervals (typically 1s and 10s), yet all report
		// at the same 60s interval, or different report intervals.
		a, ok := m.aggregators[mm.Report]
		if !ok {
			// Make new aggregator for this report interval.
			logger := pct.NewLogger(m.logger.LogChan(), fmt.Sprintf("mm-ag-%d", mm.Report))
			collectionChan := make(chan *Collection, 5)
			aggregator := NewAggregator(logger, int64(mm.Report), collectionChan, m.spool)
			aggregator.Start()

			// Save aggregator for other monitors with same report interval.
			a = &Binding{aggregator, collectionChan}
			m.aggregators[mm.Report] = a
			m.logger.Info("Created", mm.Report, "second aggregator")
		}

		// Start the monitor.
		if err := monitor.Start(tickChan, a.collectionChan); err != nil {
			return cmd.Reply(nil, errors.New("Start "+name+": "+err.Error()))
		}
		m.mux.Lock()
		m.monitors[name] = monitor
		m.mux.Unlock()

		// Save the monitor-specific config to disk so agent starts on restart.
		monitorConfig := monitor.Config()
		if err := pct.Basedir.WriteConfig(name, monitorConfig); err != nil {
			return cmd.Reply(nil, errors.New("Write "+name+" config:"+err.Error()))
		}

		return cmd.Reply(nil) // success
	case "StopService":
		_, name, err := m.getMonitorConfig(cmd)
		if err != nil {
			return cmd.Reply(nil, err)
		}
		m.status.UpdateRe("mm", "Stopping "+name, cmd)
		m.logger.Info("Stop", name, cmd)
		m.mux.RLock()
		monitor, ok := m.monitors[name]
		m.mux.RUnlock()
		if !ok {
			return cmd.Reply(nil, errors.New("Unknown monitor: "+name))
		}
		if err := monitor.Stop(); err != nil {
			return cmd.Reply(nil, errors.New("Stop "+name+": "+err.Error()))
		}
		m.clock.Remove(monitor.TickChan())
		if err := pct.Basedir.RemoveConfig(name); err != nil {
			return cmd.Reply(nil, errors.New("Remove "+name+": "+err.Error()))
		}
		m.mux.Lock()
		delete(m.monitors, name)
		m.mux.Unlock()
		return cmd.Reply(nil) // success
	case "GetConfig":
		config, errs := m.GetConfig()
		return cmd.Reply(config, errs...)
	default:
		// SetConfig does not work by design.  To re-configure a monitor,
		// stop it then start it again with the new config.
		return cmd.Reply(nil, pct.UnknownCmdError{Cmd: cmd.Cmd})
	}
}
Esempio n. 6
0
// @goroutine[0]
func (m *Manager) Handle(cmd *proto.Cmd) *proto.Reply {
	m.status.UpdateRe("log", "Handling", cmd)
	defer m.status.Update("log", "Running")

	switch cmd.Cmd {
	case "SetConfig":
		m.mux.Lock()
		defer m.mux.Unlock()

		// proto.Cmd[Service:log, Cmd:SetConfig, Data:log.Config]
		newConfig := &Config{}
		if err := json.Unmarshal(cmd.Data, newConfig); err != nil {
			return cmd.Reply(nil, err)
		}

		if err := m.validateConfig(newConfig); err != nil {
			return cmd.Reply(nil, err)
		}

		errs := []error{}
		if m.config.File != newConfig.File {
			select {
			case m.relay.LogFileChan() <- newConfig.File:
				m.config.File = newConfig.File
			case <-time.After(3 * time.Second):
				errs = append(errs, errors.New("Timeout setting new log file"))
			}
		}
		if m.config.Level != newConfig.Level {
			level := proto.LogLevelNumber[newConfig.Level] // already validated
			select {
			case m.relay.LogLevelChan() <- level:
				m.config.Level = newConfig.Level
			case <-time.After(3 * time.Second):
				errs = append(errs, errors.New("Timeout setting new log level"))
			}
		}

		// Write the new, updated config.  If this fails, agent will use old config if restarted.
		if err := pct.Basedir.WriteConfig("log", m.config); err != nil {
			errs = append(errs, errors.New("log.WriteConfig:"+err.Error()))
		}

		return cmd.Reply(m.config, errs...)
	case "GetConfig":
		config, errs := m.GetConfig()
		return cmd.Reply(config, errs...)
	case "Reconnect":
		err := m.client.Disconnect()
		return cmd.Reply(nil, err)
	default:
		return cmd.Reply(nil, pct.UnknownCmdError{Cmd: cmd.Cmd})
	}
}
Esempio n. 7
0
// @goroutine[0]
func (m *Manager) Handle(cmd *proto.Cmd) *proto.Reply {
	m.status.UpdateRe("sysconfig", "Handling", cmd)
	defer m.status.Update("sysconfig", "Running")

	switch cmd.Cmd {
	case "StartService":
		c, name, err := m.getMonitorConfig(cmd)
		if err != nil {
			return cmd.Reply(nil, err)
		}

		m.status.UpdateRe("sysconfig", "Starting "+name, cmd)
		m.logger.Info("Start", name, cmd)

		// Monitors names must be unique.
		m.mux.RLock()
		_, haveMonitor := m.monitors[name]
		m.mux.RUnlock()
		if haveMonitor {
			return cmd.Reply(nil, errors.New("Duplicate monitor: "+name))
		}

		// Create the monitor based on its type.
		var monitor Monitor
		if monitor, err = m.factory.Make(c.Service, c.InstanceId, cmd.Data); err != nil {
			return cmd.Reply(nil, errors.New("Factory: "+err.Error()))
		}

		// Make unsynchronized (3rd arg=false) ticker for collect interval,
		// it's unsynchronized because 1) we don't need sysconfig data to be
		// synchronized, and 2) sysconfig monitors usually collect very slowly,
		// e.g. 1h, so if we synced it it could wait awhile before 1st tick.
		tickChan := make(chan time.Time)
		m.clock.Add(tickChan, c.Report, false)

		// Start the monitor.
		if err = monitor.Start(tickChan, m.reportChan); err != nil {
			return cmd.Reply(nil, errors.New("Start "+name+": "+err.Error()))
		}
		m.mux.Lock()
		m.monitors[name] = monitor
		m.mux.Unlock()

		// Save the monitor-specific config to disk so agent starts on restart.
		monitorConfig := monitor.Config()
		if err = pct.Basedir.WriteConfig(name, monitorConfig); err != nil {
			return cmd.Reply(nil, errors.New("Write "+name+" config:"+err.Error()))
		}
		return cmd.Reply(nil) // success
	case "StopService":
		_, name, err := m.getMonitorConfig(cmd)
		if err != nil {
			return cmd.Reply(nil, err)
		}
		m.status.UpdateRe("sysconfig", "Stopping "+name, cmd)
		m.logger.Info("Stop", name, cmd)
		m.mux.RLock()
		monitor, ok := m.monitors[name]
		m.mux.RUnlock()
		if !ok {
			return cmd.Reply(nil, errors.New("Unknown monitor: "+name))
		}
		if err = monitor.Stop(); err != nil {
			return cmd.Reply(nil, errors.New("Stop "+name+": "+err.Error()))
		}
		m.clock.Remove(monitor.TickChan())
		if err := pct.Basedir.RemoveConfig(name); err != nil {
			return cmd.Reply(nil, errors.New("Remove "+name+": "+err.Error()))
		}
		m.mux.Lock()
		delete(m.monitors, name)
		m.mux.Unlock()
		return cmd.Reply(nil) // success
	case "GetConfig":
		config, errs := m.GetConfig()
		return cmd.Reply(config, errs...)
	default:
		// SetConfig does not work by design.  To re-configure a monitor,
		// stop it then start it again with the new config.
		return cmd.Reply(nil, pct.UnknownCmdError{Cmd: cmd.Cmd})
	}
}