コード例 #1
0
ファイル: wait.go プロジェクト: hg3rdrock/percona-agent
func NewWaitTicker(atInterval uint) *WaitTicker {
	wt := &WaitTicker{
		atInterval: atInterval,
		sync:       pct.NewSyncChan(),
	}
	return wt
}
コード例 #2
0
func NewMockIntervalIter(intervalChan chan *qan.Interval) *MockIntervalIter {
	iter := &MockIntervalIter{
		intervalChan:     make(chan *qan.Interval),
		testIntervalChan: intervalChan,
		sync:             pct.NewSyncChan(),
	}
	return iter
}
コード例 #3
0
ファイル: sender.go プロジェクト: huangzhiyong/percona-agent
func NewSender(logger *pct.Logger, client pct.WebsocketClient) *Sender {
	s := &Sender{
		logger: logger,
		client: client,
		sync:   pct.NewSyncChan(),
		status: pct.NewStatus([]string{"data-sender"}),
	}
	return s
}
コード例 #4
0
ファイル: ticker.go プロジェクト: hg3rdrock/percona-agent
func NewEvenTicker(atInterval uint, sleep func(time.Duration)) *EvenTicker {
	et := &EvenTicker{
		atInterval: atInterval,
		sleep:      sleep,
		watcher:    make(map[chan time.Time]bool),
		watcherMux: new(sync.Mutex),
		sync:       pct.NewSyncChan(),
	}
	return et
}
コード例 #5
0
ファイル: iter.go プロジェクト: hg3rdrock/percona-agent
func NewIter(logger *pct.Logger, tickChan chan time.Time) *Iter {
	iter := &Iter{
		logger:   logger,
		tickChan: tickChan,
		// --
		intervalChan: make(chan *qan.Interval, 1),
		sync:         pct.NewSyncChan(),
	}
	return iter
}
コード例 #6
0
func NewIter(intervalChan chan *qan.Interval) *Iter {
	iter := &Iter{
		testIntervalChan: intervalChan,
		// --
		intervalChan: make(chan *qan.Interval, 1),
		sync:         pct.NewSyncChan(),
		tickChan:     make(chan time.Time),
		calls:        []string{},
	}
	return iter
}
コード例 #7
0
ファイル: sender.go プロジェクト: hg3rdrock/percona-agent
func NewSender(logger *pct.Logger, client pct.WebsocketClient) *Sender {
	s := &Sender{
		logger:     logger,
		client:     client,
		sync:       pct.NewSyncChan(),
		status:     pct.NewStatus([]string{"data-sender", "data-sender-last", "data-sender-1d"}),
		lastStats:  NewSenderStats(0),
		dailyStats: NewSenderStats(24 * time.Hour),
	}
	return s
}
コード例 #8
0
ファイル: aggregator.go プロジェクト: hg3rdrock/percona-agent
func NewAggregator(logger *pct.Logger, interval int64, collectionChan chan *Collection, spool data.Spooler) *Aggregator {
	a := &Aggregator{
		logger:         logger,
		interval:       interval,
		collectionChan: collectionChan,
		spool:          spool,
		// --
		sync: pct.NewSyncChan(),
	}
	return a
}
コード例 #9
0
func NewFileIntervalIter(logger *pct.Logger, filename FilenameFunc, tickChan chan time.Time) *FileIntervalIter {
	iter := &FileIntervalIter{
		logger:   logger,
		filename: filename,
		tickChan: tickChan,
		// --
		intervalChan: make(chan *Interval, 1),
		running:      false,
		sync:         pct.NewSyncChan(),
	}
	return iter
}
コード例 #10
0
ファイル: ws.go プロジェクト: huangzhiyong/percona-agent
func NewWebsocketClient(logger *pct.Logger, api pct.APIConnector, link string) (*WebsocketClient, error) {
	name := logger.Service()
	c := &WebsocketClient{
		logger: logger,
		api:    api,
		link:   link,
		// --
		conn:        nil,
		recvChan:    make(chan *proto.Cmd, RECV_BUFFER_SIZE),
		sendChan:    make(chan *proto.Reply, SEND_BUFFER_SIZE),
		connectChan: make(chan bool, 1),
		errChan:     make(chan error, 2),
		backoff:     pct.NewBackoff(5 * time.Minute),
		sendSync:    pct.NewSyncChan(),
		recvSync:    pct.NewSyncChan(),
		mux:         new(sync.Mutex),
		name:        name,
		status:      pct.NewStatus([]string{name, name + "-link"}),
	}
	return c, nil
}
コード例 #11
0
ファイル: monitor.go プロジェクト: hg3rdrock/percona-agent
func NewMonitor(name string, config *Config, logger *pct.Logger, conn mysql.Connector) *Monitor {
	m := &Monitor{
		name:   name,
		config: config,
		logger: logger,
		conn:   conn,
		// --
		sync:   pct.NewSyncChan(),
		status: pct.NewStatus([]string{name, name + "-mysql"}),
	}
	return m
}
コード例 #12
0
ファイル: spooler.go プロジェクト: huangzhiyong/percona-agent
func NewDiskvSpooler(logger *pct.Logger, dataDir string, hostname string) *DiskvSpooler {
	s := &DiskvSpooler{
		logger:   logger,
		dataDir:  dataDir,
		hostname: hostname,
		// --
		dataChan: make(chan *proto.Data, WRITE_BUFFER),
		sync:     pct.NewSyncChan(),
		status:   pct.NewStatus([]string{"data-spooler"}),
		mux:      new(sync.Mutex),
	}
	return s
}
コード例 #13
0
ファイル: monitor.go プロジェクト: hg3rdrock/percona-agent
func NewMonitor(logger *pct.Logger, mysqlConnFactory mysql.ConnectionFactory) mrms.Monitor {
	m := &Monitor{
		logger:           logger,
		mysqlConnFactory: mysqlConnFactory,
		// --
		mysqlInstances: make(map[string]*MysqlInstance),
		// --
		status:     pct.NewStatus([]string{MONITOR_NAME}),
		sync:       pct.NewSyncChan(),
		globalChan: make(chan string, 100),
	}
	return m
}
コード例 #14
0
ファイル: monitor.go プロジェクト: hg3rdrock/percona-agent
func NewMonitor(name string, config *Config, logger *pct.Logger) *Monitor {
	m := &Monitor{
		name:   name,
		config: config,
		logger: logger,
		// --
		prevCPUval: make(map[string][]float64),
		prevCPUsum: make(map[string]float64),
		status:     pct.NewStatus([]string{name}),
		sync:       pct.NewSyncChan(),
	}
	return m
}
コード例 #15
0
ファイル: analyzer.go プロジェクト: hg3rdrock/percona-agent
func NewRealAnalyzer(logger *pct.Logger, config Config, iter IntervalIter, mysqlConn mysql.Connector, restartChan <-chan bool, worker Worker, clock ticker.Manager, spool data.Spooler) *RealAnalyzer {
	name := logger.Service()
	a := &RealAnalyzer{
		logger:      logger,
		config:      config,
		iter:        iter,
		mysqlConn:   mysqlConn,
		restartChan: restartChan,
		worker:      worker,
		clock:       clock,
		spool:       spool,
		// --
		name:                name,
		mysqlConfiguredChan: make(chan bool, 1),
		workerDoneChan:      make(chan *Interval, 1),
		status:              pct.NewStatus([]string{name, name + "-last-interval", name + "-next-interval"}),
		runSync:             pct.NewSyncChan(),
		configureMySQLSync:  pct.NewSyncChan(),
		mux:                 &sync.RWMutex{},
	}
	return a
}
コード例 #16
0
ファイル: monitor.go プロジェクト: hg3rdrock/percona-agent
func NewMonitor(name string, config *Config, logger *pct.Logger, conn mysql.Connector, mrm mrms.Monitor) *Monitor {
	m := &Monitor{
		name:   name,
		config: config,
		logger: logger,
		conn:   conn,
		// --
		connectedChan: make(chan bool, 1),
		restartChan:   nil,
		status:        pct.NewStatus([]string{name, name + "-mysql"}),
		sync:          pct.NewSyncChan(),
		collectLimit:  float64(config.Collect) * 0.1, // 10% of Collect time
		mrm:           mrm,
	}
	return m
}
コード例 #17
0
ファイル: spooler.go プロジェクト: hg3rdrock/percona-agent
func NewDiskvSpooler(logger *pct.Logger, dataDir, trashDir, hostname string, limits proto.DataSpoolLimits) *DiskvSpooler {
	s := &DiskvSpooler{
		logger:   logger,
		dataDir:  dataDir,
		trashDir: trashDir,
		hostname: hostname,
		limits:   limits,
		// --
		dataChan: make(chan *proto.Data, DEFAULT_DATA_MAX_FILES),
		sync:     pct.NewSyncChan(),
		status:   pct.NewStatus([]string{"data-spooler", "data-spooler-count", "data-spooler-size", "data-spooler-oldest"}),
		mux:      new(sync.Mutex),
		fileSize: make(map[string]int),
	}
	return s
}
コード例 #18
0
ファイル: manager.go プロジェクト: huangzhiyong/percona-agent
func NewManager(logger *pct.Logger, mysqlFactory mysql.ConnectionFactory, clock ticker.Manager, iterFactory IntervalIterFactory, workerFactory WorkerFactory, spool data.Spooler, im *instance.Repo) *Manager {
	m := &Manager{
		logger:        logger,
		mysqlFactory:  mysqlFactory,
		clock:         clock,
		iterFactory:   iterFactory,
		workerFactory: workerFactory,
		spool:         spool,
		im:            im,
		// --
		mux:            new(sync.RWMutex),
		tickChan:       make(chan time.Time),
		workers:        make(map[Worker]*Interval),
		workersMux:     new(sync.RWMutex),
		workerDoneChan: make(chan Worker, 2),
		status:         pct.NewStatus([]string{"qan", "qan-log-parser", "qan-last-interval", "qan-next-interval"}),
		sync:           pct.NewSyncChan(),
		oldSlowLogs:    make(map[string]int),
	}
	return m
}
コード例 #19
0
ファイル: agent.go プロジェクト: hg3rdrock/percona-agent
// percona-agent:@goroutine[0]
func (agent *Agent) Run() error {
	logger := agent.logger
	logger.Debug("Run:call")
	defer logger.Debug("Run:return")

	// Start client goroutines for sending/receving cmd/reply via channels
	// so we can do non-blocking send/recv.  This only needs to be done once.
	// The chans are buffered, so they work for awhile if not connected.
	client := agent.client
	client.Start()
	cmdChan := client.RecvChan()
	connected := false
	go agent.connect()

	/*
	 * Start the status and cmd handlers.  Most messages must be serialized because,
	 * for example, handling start-service and stop-service at the same
	 * time would cause weird problems.  The cmdChan serializes messages,
	 * so it's "first come, first serve" (i.e. fifo).  Concurrency has
	 * consequences: e.g. if user1 sends a start-service and it succeeds
	 * and user2 send the same start-service, user2 will get a ServiceIsRunningError.
	 * Status requests are handled concurrently so the user can always see what
	 * the agent is doing even if it's busy processing commands.
	 */
	agent.cmdHandlerSync = pct.NewSyncChan()
	go agent.cmdHandler()

	agent.statusHandlerSync = pct.NewSyncChan()
	go agent.statusHandler()

	// Allow those ^ goroutines to crash up to MAX_ERRORS.  Any more and it's
	// probably a code bug rather than  bad input, network error, etc.
	cmdHandlerErrors := 0
	statusHandlerErrors := 0

	// Send Pong to API to keep cmd ws open or detect if API end is closed.
	// https://jira.percona.com/browse/PCT-765
	agent.keepalive = time.NewTicker(time.Duration(agent.config.Keepalive) * time.Second)

	logger.Info("Started version: " + VERSION)

	for {
		logger.Debug("idle")
		agent.status.Update("agent", "Idle")

		select {
		case cmd := <-cmdChan: // from API
			if cmd.Cmd == "Abort" {
				panic(cmd)
			}
			switch cmd.Cmd {
			case "Restart":
				logger.Debug("cmd:restart")
				agent.status.UpdateRe("agent", "Restarting", cmd)

				// Secure the start-lock file.  This lets us start our self but
				// wait until this process has exited, at which time the start-lock
				// is removed and the 2nd self continues starting.
				if err := pct.MakeStartLock(); err != nil {
					agent.reply(cmd.Reply(nil, err))
					continue
				}

				// Start our self with the same args this process was started with.
				cwd, err := os.Getwd()
				if err != nil {
					agent.reply(cmd.Reply(nil, err))
				}
				comment := fmt.Sprintf(
					"This script was created by percona-agent in response to this Restart command:\n"+
						"# %s\n"+
						"# It is safe to delete.", cmd)
				sh := fmt.Sprintf("#!/bin/sh\n# %s\ncd %s\n%s %s >> %s/percona-agent.log 2>&1 &\n",
					comment,
					cwd,
					os.Args[0],
					strings.Join(os.Args[1:len(os.Args)], " "),
					pct.Basedir.Path(),
				)
				startScript := pct.Basedir.File("start-script")
				if err := ioutil.WriteFile(startScript, []byte(sh), os.FileMode(0754)); err != nil {
					agent.reply(cmd.Reply(nil, err))
				}
				logger.Debug("Restart:sh")
				self := pctCmd.Factory.Make(startScript)
				output, err := self.Run()
				agent.reply(cmd.Reply(output, err))
				logger.Debug("Restart:done")
				return nil
			case "Stop":
				logger.Debug("cmd:stop")
				logger.Info("Stopping", cmd)
				agent.status.UpdateRe("agent", "Stopping", cmd)
				agent.stop()
				agent.reply(cmd.Reply(nil))
				logger.Info("Stopped", cmd)
				agent.status.UpdateRe("agent", "Stopped", cmd)
				return nil
			case "Status":
				logger.Debug("cmd:status")
				agent.status.UpdateRe("agent", "Queueing", cmd)
				select {
				case agent.statusChan <- cmd: // to statusHandler
				default:
					err := pct.QueueFullError{Cmd: cmd.Cmd, Name: "statusQueue", Size: STATUS_QUEUE_SIZE}
					agent.reply(cmd.Reply(nil, err))
				}
			default:
				logger.Debug("cmd")
				agent.status.UpdateRe("agent", "Queueing", cmd)
				select {
				case agent.cmdChan <- cmd: // to cmdHandler
				default:
					err := pct.QueueFullError{Cmd: cmd.Cmd, Name: "cmdQueue", Size: CMD_QUEUE_SIZE}
					agent.reply(cmd.Reply(nil, err))
				}
			}
		case <-agent.cmdHandlerSync.CrashChan:
			cmdHandlerErrors++
			if cmdHandlerErrors < MAX_ERRORS {
				logger.Error("cmdHandler crashed, restarting")
				go agent.cmdHandler()
			} else {
				logger.Fatal("Too many cmdHandler errors")
				// todo: return or exit?
			}
		case <-agent.statusHandlerSync.CrashChan:
			statusHandlerErrors++
			if statusHandlerErrors < MAX_ERRORS {
				logger.Error("statusHandler crashed, restarting")
				go agent.statusHandler()
			} else {
				logger.Fatal("Too many statusHandler errors")
				// todo: return or exit?
			}
		case err := <-client.ErrorChan():
			logger.Warn("ws error:", err)
		case connected = <-client.ConnectChan():
			if connected {
				logger.Info("Connected to API")
				cmdHandlerErrors = 0
				statusHandlerErrors = 0
			} else {
				// websocket closed/crashed/err
				logger.Warn("Lost connection to API")
				go agent.connect()
			}
		case <-agent.keepalive.C:
			// Send keepalive (i.e. check if ws cmd chan is still open on API end).
			logger.Debug("pong")
			if connected {
				cmd := &proto.Cmd{Cmd: "Pong"}
				agent.reply(cmd.Reply(nil, nil))
			}
		}
	}
}