Esempio n. 1
0
func (s *RelayTestSuite) TestOfflineBuffering(t *C) {
	l := s.logger

	// We're going to cause the relay's client Recv() to get an error
	// which will cause the relay to connect again.  We block this 2nd
	// connect by blocking this chan.  End result: relay remains offline.
	s.client.SetConnectChan(s.connectChan)
	doneChan := make(chan bool, 1)
	go func() {
		s.client.RecvError <- io.EOF
		doneChan <- true
	}()
	// Wait for the relay to recv the recv error.
	<-doneChan

	// Wait for the relay to call client.Connect().
	<-s.connectChan

	// Double-check that relay is offline.
	if !test.WaitStatus(1, s.relay, "ws", "Disconnected") {
		t.Fatal("Relay connects")
	}

	// Relay is offline and trying to connect again in another goroutine.
	// These entries should therefore not be sent.  There's a minor race
	// condition: when relay goes offline, it sends an internal log entry.
	// Sometimes we get that here (Service="log") and sometimes not
	// (len(got)==0).  Either condition is correct for this test.
	l.Error("err1")
	l.Error("err2")
	got := test.WaitLog(s.recvChan, 0)
	if len(got) > 0 && got[0].Service != "log" {
		t.Errorf("Log entries are not sent while offline: %+v", got)
	}

	// Unblock the relay's connect attempt.
	s.connectChan <- true
	if !test.WaitStatus(1, s.relay, "ws", "Connected") {
		t.Fatal("Relay connects")
	}

	// Wait for the relay resend what it had ^ buffered.
	got = test.WaitLog(s.recvChan, 3)
	expect := []proto.LogEntry{
		{Ts: test.Ts, Level: proto.LOG_WARNING, Service: "log", Msg: "connected: false"},
		{Ts: test.Ts, Level: proto.LOG_ERROR, Service: "test", Msg: "err1"},
		{Ts: test.Ts, Level: proto.LOG_ERROR, Service: "test", Msg: "err2"},
		{Ts: test.Ts, Level: proto.LOG_WARNING, Service: "log", Msg: "connected: true"},
	}
	if same, diff := test.IsDeeply(got, expect); !same {
		t.Error(diff)
	}
}
Esempio n. 2
0
func (s *SenderTestSuite) TestConnectErrors(t *C) {
	spool := mock.NewSpooler(nil)

	spool.FilesOut = []string{"slow001.json"}
	spool.DataOut = map[string][]byte{"slow001.json": []byte("...")}

	sender := data.NewSender(s.logger, s.client)

	err := sender.Start(spool, s.tickerChan, 60, false)
	t.Assert(err, IsNil)

	// Any connect error will do.
	s.client.ConnectError = io.EOF
	defer func() { s.client.ConnectError = nil }()

	// Tick causes send to connect and send all files.
	s.tickerChan <- time.Now()
	t0 := time.Now()

	// Wait for sender to start trying to connect...
	if !test.WaitStatus(5, sender, "data-sender", "Connecting") {
		t.Fatal("Timeout waiting for data-sender status=Connecting")
	}
	// ...then wait for it to finsih and return.
	if !test.WaitStatusPrefix(data.MAX_SEND_ERRORS*data.CONNECT_ERROR_WAIT, sender, "data-sender", "Idle") {
		t.Fatal("Timeout waiting for data-sender status=Idle")
	}
	d := time.Now().Sub(t0).Seconds()

	// It should wait between reconnects, but not too long.
	if d < float64((data.MAX_SEND_ERRORS-1)*data.CONNECT_ERROR_WAIT) {
		t.Error("Waits between reconnects")
	}
	if d > float64(data.MAX_SEND_ERRORS*data.CONNECT_ERROR_WAIT) {
		t.Error("Waited too long between reconnects")
	}

	err = sender.Stop()
	t.Assert(err, IsNil)

	// Couldn't connect, so it doesn't send or reject anything.
	t.Check(len(spool.DataOut), Equals, 1)
	t.Check(len(spool.RejectedFiles), Equals, 0)

	// It should have called ConnectOnce() serveral times, else it didn't
	// really try to reconnect.
	trace := test.DrainTraceChan(s.client.TraceChan)
	t.Check(trace, DeepEquals, []string{
		"ConnectOnce",
		"ConnectOnce",
		"ConnectOnce",
		"DisconnectOnce",
	})
}
Esempio n. 3
0
func (s *ManagerTestSuite) TestStatus(t *C) {
	// Start a data manager.
	m := data.NewManager(s.logger, s.dataDir, s.trashDir, "localhost", s.client)
	t.Assert(m, NotNil)
	config := &data.Config{
		Encoding:     "gzip",
		SendInterval: 1,
	}
	pct.Basedir.WriteConfig("data", config)

	err := m.Start()
	t.Assert(err, IsNil)

	// Get its status directly.
	if !test.WaitStatus(5, m, "data", "Running") {
		t.Fatal("test.WaitStatus() timeout")
	}
	status := m.Status()
	t.Check(status["data"], Equals, "Running")
	t.Check(status["data-spooler"], Equals, "Idle")
	t.Check(status["data-sender"], Equals, "Idle")
}
Esempio n. 4
0
func (s *TestSuite) TestSlowResponse(t *C) {
	// https://jira.percona.com/browse/PCT-565
	config := &mysql.Config{
		Config: mm.Config{
			ServiceInstance: proto.ServiceInstance{
				Service:    "mysql",
				InstanceId: 1,
			},
			Collect: 1,
			Report:  60,
		},
		UserStats: true,
	}

	slowCon := mock.NewSlowMySQL(dsn)
	slowCon.SetGlobalDelay(time.Duration(config.Collect+1) * time.Second)
	m := mysql.NewMonitor(s.name, config, s.logger, slowCon, s.mrm)
	if m == nil {
		t.Fatal("Make new mysql.Monitor")
	}

	err := m.Start(s.tickChan, s.collectionChan)
	if err != nil {
		t.Fatalf("Start monitor without error, got %s", err)
	}
	defer m.Stop()

	if ok := test.WaitStatus(5, m, s.name+"-mysql", "Connected"); !ok {
		t.Fatal("Monitor is ready")
	}

	s.tickChan <- time.Now()
	got := test.WaitCollection(s.collectionChan, 1)
	// If it took more than 10% of config.Collect, the monitor must
	// discard those metrics -> len(got) == 0
	t.Check(got, HasLen, 0)
}
Esempio n. 5
0
func (s *TestSuite) TestStartCollectStop(t *C) {
	// Create the monitor.
	config := &mysql.Config{
		Config: sysconfig.Config{
			ServiceInstance: proto.ServiceInstance{
				Service:    "mysql",
				InstanceId: 1,
			},
		},
	}
	m := mysql.NewMonitor(s.name, config, s.logger, mysqlConn.NewConnection(dsn))
	if m == nil {
		t.Fatal("Make new mysql.Monitor")
	}

	// Start the monitor.
	err := m.Start(s.tickChan, s.reportChan)
	if err != nil {
		t.Fatalf("Start monitor without error, got %s", err)
	}

	// monitor=Ready once it has successfully connected to MySQL.  This may
	// take a few seconds (hopefully < 5) on a slow test machine.
	if ok := test.WaitStatusPrefix(5, m, s.name, "Idle"); !ok {
		t.Fatal("Monitor is ready")
	}

	// Send tick to make the monitor collect.
	now := time.Now().UTC()
	s.tickChan <- now
	got := test.WaitSystemConfig(s.reportChan, 1)
	if len(got) == 0 {
		t.Fatal("Got a sysconfig after tick")
	}
	c := got[0]

	if c.Ts != now.Unix() {
		t.Error("Report.Ts set to %s; got %s", now.Unix(), c.Ts)
	}

	if len(c.Settings) < 100 {
		t.Fatal("Collect > 100 vars; got %+v", c.Settings)
	}

	haveWaitTimeout := false
	val := ""
	for _, s := range c.Settings {
		if s[0] == "wait_timeout" {
			haveWaitTimeout = true
			val = s[1]
		}
	}
	if !haveWaitTimeout {
		t.Logf("%+v\n", c)
		t.Error("Got wait_timeout")
	}
	if val == "" {
		t.Error("wait_timeout has value")
	}

	/**
	 * Stop the monitor.
	 */

	m.Stop()

	if ok := test.WaitStatus(5, m, s.name, "Stopped"); !ok {
		t.Fatal("Monitor has stopped")
	}
}
Esempio n. 6
0
func (s *ManagerTestSuite) TestStartCollectStop(t *C) {
	files := []string{"stat", "meminfo", "vmstat", "loadavg", "diskstats"}
	for _, file := range files {
		if !pct.FileExists("/proc/" + file) {
			t.Fatal("/proc/" + file + " does not exist")
		}
	}

	// Create the monitor.
	m := system.NewMonitor(s.name, &system.Config{}, s.logger)
	if m == nil {
		t.Fatal("Make new system.Monitor")
	}

	// Start the monitor.
	err := m.Start(s.tickChan, s.collectionChan)
	if err != nil {
		t.Fatalf("Start monitor without error, got %s", err)
	}

	// system-monitor=Ready once it has started its internals,
	// should be very fast.
	if ok := test.WaitStatusPrefix(3, m, s.name, "Idle"); !ok {
		t.Fatal("Monitor is ready")
	}

	// The monitor should only collect and send metrics on ticks; we haven't ticked yet.
	got := test.WaitCollection(s.collectionChan, 0)
	if len(got) > 0 {
		t.Fatal("No tick, no collection; got %+v", got)
	}

	// Now tick.  This should make monitor collect.
	now := time.Now()
	s.tickChan <- now

	got = test.WaitCollection(s.collectionChan, 1)
	t.Assert(got, Not(HasLen), 0)
	t.Check(got, HasLen, 1)

	c := got[0]
	t.Check(c.Ts, Equals, now.Unix())

	t.Assert(c.Metrics, Not(HasLen), 0)

	// /proc/stat values are relative (current - prev) so there shouldn't be any
	// after one tick.
	haveCPU, _ := haveMetric("cpu/user", c.Metrics)
	t.Check(haveCPU, Equals, false)

	// But other metrics are not relative, so we should have them.
	metrics := []string{"memory/MemTotal", "vmstat/numa_local", "loadavg/running", "disk/sda/reads"}
	for _, metric := range metrics {
		ok, val := haveMetric(metric, c.Metrics)
		t.Check(ok, Equals, true)
		t.Check(val, Not(Equals), 0)
	}

	// Tick a 2nd time and now we should get CPU metrics.
	time.Sleep(200 * time.Millisecond)
	now = time.Now()
	s.tickChan <- now

	got = test.WaitCollection(s.collectionChan, 1)
	t.Assert(got, Not(HasLen), 0)
	t.Check(got, HasLen, 1)
	c = got[0]
	t.Check(c.Ts, Equals, now.Unix())
	t.Assert(c.Metrics, Not(HasLen), 0)

	metrics = []string{"cpu/user", "cpu/nice", "cpu/system", "cpu/idle"}
	for _, metric := range metrics {
		ok, val := haveMetric(metric, c.Metrics)
		t.Check(ok, Equals, true)

		// Running this test requires some CPU so user and idle shouldn't be zero.
		if metric == "cpu/user" || metric == "cpu/idle" {
			t.Check(val, Not(Equals), 0)
		}
	}

	/**
	 * Stop the monitor.
	 */

	m.Stop()

	if ok := test.WaitStatus(5, m, s.name, "Stopped"); !ok {
		t.Fatal("Monitor has stopped")
	}
}
Esempio n. 7
0
func (s *TestSuite) TestStartCollectStop(t *C) {
	/**
	 * The mm manager uses a mm monitor factory to create monitors.  This is
	 * what the factory does...
	 */

	// First, monitors monitor an instance of some service (MySQL, RabbitMQ, etc.)
	// So the instance name and id are given.  Second, every monitor has its own
	// specific config info which is sent as the proto.Cmd.Data.  This config
	// embed a mm.Config which embed an instance.Config:
	config := &mysql.Config{
		Config: mm.Config{
			ServiceInstance: proto.ServiceInstance{
				Service:    "mysql",
				InstanceId: 1,
			},
			Collect: 1,
			Report:  60,
		},
		Status: map[string]string{
			"threads_connected": "gauge",
			"threads_running":   "gauge",
		},
	}

	// From the config, the factory determine's the monitor's name based on
	// the service instance it's monitoring, and it creates a mysql.Connector
	// for the DSN for that service (since it's a MySQL monitor in this case).
	// It creates the monitor with these args:

	m := mysql.NewMonitor(s.name, config, s.logger, mysqlConn.NewConnection(dsn), s.mrm)
	if m == nil {
		t.Fatal("Make new mysql.Monitor")
	}

	// The factory returns the monitor to the manager which starts it with
	// the necessary channels:
	err := m.Start(s.tickChan, s.collectionChan)
	if err != nil {
		t.Fatalf("Start monitor without error, got %s", err)
	}

	// monitor=Ready once it has successfully connected to MySQL.  This may
	// take a few seconds (hopefully < 5) on a slow test machine.
	if ok := test.WaitStatus(5, m, s.name+"-mysql", "Connected"); !ok {
		t.Fatal("Monitor is ready")
	}

	// The monitor should only collect and send metrics on ticks; we haven't ticked yet.
	got := test.WaitCollection(s.collectionChan, 0)
	if len(got) > 0 {
		t.Fatal("No tick, no collection; got %+v", got)
	}

	// Now tick.  This should make monitor collect.
	now := time.Now()
	s.tickChan <- now
	got = test.WaitCollection(s.collectionChan, 1)
	if len(got) == 0 {
		t.Fatal("Got a collection after tick")
	}
	c := got[0]

	if c.Ts != now.Unix() {
		t.Error("Collection.Ts set to %s; got %s", now.Unix(), c.Ts)
	}

	// Only two metrics should be reported, from the config ^: threads_connected,
	// threads_running.  Their values (from MySQL) are variable, but we know they
	// should be > 1 because we're a thread connected and running.
	if len(c.Metrics) != 2 {
		t.Fatal("Collected only configured metrics; got %+v", c.Metrics)
	}
	if c.Metrics[0].Name != "mysql/threads_connected" {
		t.Error("First metric is ", "mysql/threads_connected; got", c.Metrics[0].Name)
	}
	if c.Metrics[0].Number < 1 {
		t.Error("mysql/threads_connected > 1; got", c.Metrics[0].Number)
	}
	if c.Metrics[1].Name != "mysql/threads_running" {
		t.Error("Second metric is ", "mysql/threads_running got", c.Metrics[1].Name)
	}
	if c.Metrics[1].Number < 1 {
		t.Error("mysql/threads_running > 1; got", c.Metrics[0].Number)
	}

	/**
	 * Stop the monitor.
	 */

	m.Stop()

	if ok := test.WaitStatus(5, m, s.name, "Stopped"); !ok {
		t.Fatal("Monitor has stopped")
	}
}
Esempio n. 8
0
// This test is the same as TestCollectInnoDBStats with the only difference that
// now we are simulating a MySQL disconnection.
// After a disconnection, we must still be able to collect InnoDB stats
func (s *TestSuite) TestHandleMySQLRestarts(t *C) {
	/**
	 * Disable and reset InnoDB metrics so we can test that the monitor enables and sets them.
	 */
	if _, err := s.db.Exec("set global innodb_monitor_disable = '%'"); err != nil {
		t.Fatal(err)
	}
	if _, err := s.db.Exec("set global innodb_monitor_reset_all = '%'"); err != nil {
		t.Fatal(err)
	}

	s.db.Exec("drop database if exists percona_agent_test")
	s.db.Exec("create database percona_agent_test")
	s.db.Exec("create table percona_agent_test.t (i int) engine=innodb")
	defer s.db.Exec("drop database if exists percona_agent_test")

	config := &mysql.Config{
		Config: mm.Config{
			ServiceInstance: proto.ServiceInstance{
				Service:    "mysql",
				InstanceId: 1,
			},
			Collect: 1,
			Report:  60,
		},
		Status: map[string]string{},
		InnoDB: []string{"dml_%"}, // same as above ^
	}

	m := mysql.NewMonitor(s.name, config, s.logger, mysqlConn.NewConnection(dsn), s.mrm)
	if m == nil {
		t.Fatal("Make new mysql.Monitor")
	}

	err := m.Start(s.tickChan, s.collectionChan)
	if err != nil {
		t.Fatalf("Start monitor without error, got %s", err)
	}

	if ok := test.WaitStatus(5, m, s.name+"-mysql", "Connected"); !ok {
		t.Fatal("Monitor is ready")
	}

	/**
	 * Simulate a MySQL disconnection by disabling InnoDB metrics and putting a
	 * true into the restart channel. The monitor must enable them again
	 */
	if _, err := s.db.Exec("set global innodb_monitor_disable = '%'"); err != nil {
		t.Fatal(err)
	}
	if _, err := s.db.Exec("set global innodb_monitor_reset_all = '%'"); err != nil {
		t.Fatal(err)
	}
	s.mrm.SimulateMySQLRestart()

	if ok := test.WaitStatus(5, m, s.name+"-mysql", "Connected"); !ok {
		t.Fatal("Monitor is ready")
	}

	// Do INSERT to increment dml_inserts before monitor collects.  If it enabled
	// the InnoDB metrics and collects them, we should get dml_inserts=1 this later..
	s.db.Exec("insert into percona_agent_test.t (i) values (42)")

	s.tickChan <- time.Now()
	got := test.WaitCollection(s.collectionChan, 1)
	if len(got) == 0 {
		t.Fatal("Got a collection after tick")
	}
	c := got[0]

	/**
	 * ...monitor should have collected the InnoDB metrics:
	 *
	 * mysql> SELECT NAME, SUBSYSTEM, COUNT, TYPE FROM INFORMATION_SCHEMA.INNODB_METRICS WHERE STATUS='enabled';
	 * +-------------+-----------+-------+----------------+
	 * | NAME        | SUBSYSTEM | COUNT | TYPE           |
	 * +-------------+-----------+-------+----------------+
	 * | dml_reads   | dml       |     0 | status_counter |
	 * | dml_inserts | dml       |     1 | status_counter |
	 * | dml_deletes | dml       |     0 | status_counter |
	 * | dml_updates | dml       |     0 | status_counter |
	 * +-------------+-----------+-------+----------------+
	 */
	if len(c.Metrics) != 4 {
		t.Fatal("Collect 4 InnoDB metrics; got %+v", c.Metrics)
	}
	expect := []mm.Metric{
		{Name: "mysql/innodb/dml/dml_reads", Type: "counter", Number: 0},
		{Name: "mysql/innodb/dml/dml_inserts", Type: "counter", Number: 1}, // <-- our INSERT
		{Name: "mysql/innodb/dml/dml_deletes", Type: "counter", Number: 0},
		{Name: "mysql/innodb/dml/dml_updates", Type: "counter", Number: 0},
	}
	if ok, diff := test.IsDeeply(c.Metrics, expect); !ok {
		t.Error(diff)
	}

	// Stop montior, clean up.
	m.Stop()
}
Esempio n. 9
0
func (s *TestSuite) TestCollectUserstats(t *C) {
	/**
	 * Disable and reset user stats.
	 */
	if _, err := s.db.Exec("set global userstat = off"); err != nil {
		t.Fatal(err)
	}
	if _, err := s.db.Exec("flush user_statistics"); err != nil {
		t.Fatal(err)
	}
	if _, err := s.db.Exec("flush index_statistics"); err != nil {
		t.Fatal(err)
	}

	config := &mysql.Config{
		Config: mm.Config{
			ServiceInstance: proto.ServiceInstance{
				Service:    "mysql",
				InstanceId: 1,
			},
			Collect: 1,
			Report:  60,
		},
		UserStats: true,
	}

	m := mysql.NewMonitor(s.name, config, s.logger, mysqlConn.NewConnection(dsn), s.mrm)
	if m == nil {
		t.Fatal("Make new mysql.Monitor")
	}

	err := m.Start(s.tickChan, s.collectionChan)
	if err != nil {
		t.Fatalf("Start monitor without error, got %s", err)
	}

	if ok := test.WaitStatus(5, m, s.name+"-mysql", "Connected"); !ok {
		t.Fatal("Monitor is ready")
	}

	var user, host string
	err = s.db.QueryRow("SELECT SUBSTRING_INDEX(CURRENT_USER(),'@',1) AS 'user', SUBSTRING_INDEX(CURRENT_USER(),'@',-1) AS host").Scan(&user, &host)
	if err != nil {
		t.Fatal(err)
	}

	// To get index stats, we need to use an index: mysq.user PK <host, user>
	rows, err := s.db.Query("select * from mysql.user where host=? and user=?", host, user)
	if err != nil {
		t.Fatal(err)
	}
	defer rows.Close()

	s.tickChan <- time.Now()
	got := test.WaitCollection(s.collectionChan, 1)
	if len(got) == 0 {
		t.Fatal("Got a collection after tick")
	}
	c := got[0]

	/**
	 * Monitor should have collected the user stats: just table and index.
	 * Values vary a little, but there should be a table metric for mysql.user
	 * because login uses this table.
	 */
	if len(c.Metrics) < 1 {
		t.Fatalf("Collect at least 1 user stat metric; got %+v", c.Metrics)
	}

	var tblStat mm.Metric
	var idxStat mm.Metric
	for _, m := range c.Metrics {
		switch m.Name {
		case "mysql/db.mysql/t.user/rows_read":
			tblStat = m
		case "mysql/db.mysql/t.user/idx.PRIMARY/rows_read":
			idxStat = m
		}
	}

	// At least 2 rows should have been read from mysql.user:
	//   1: our db connection
	//   2: the monitor's db connection
	if tblStat.Number < 2 {
		t.Errorf("mysql/db.mysql/t.user/rows_read >= 2, got %+v", tblStat)
	}

	// At least 1 index read on mysql.user PK due to our SELECT ^.
	if idxStat.Number < 1 {
		t.Errorf("mysql/db.mysql/t.user/idx.PRIMARY/rows_read >= 1, got %+v", idxStat)
	}

	// Stop montior, clean up.
	m.Stop()
}
Esempio n. 10
0
func (s *RelayTestSuite) TestOfflineBufferOverflow(t *C) {
	// Same magic as in TestOfflineBuffering to force relay offline.
	l := s.logger
	s.client.SetConnectChan(s.connectChan)
	doneChan := make(chan bool, 1)
	go func() {
		s.client.RecvError <- io.EOF
		doneChan <- true
	}()
	<-doneChan
	<-s.connectChan
	// Relay is offline, trying to connect.

	// Overflow the first buffer but not the second.  We should get all
	// log entries back.
	for i := 0; i < log.BUFFER_SIZE+1; i++ {
		l.Error(fmt.Sprintf("a:%d", i))
	}
	if !test.WaitStatus(3, s.relay, "log-buf1", fmt.Sprintf("%d", log.BUFFER_SIZE)) {
		t.Error("First buffer full")
	}

	// Unblock the relay's connect attempt.
	s.connectChan <- true
	if !test.WaitStatus(1, s.relay, "ws", "Connected") {
		t.Fatal("Relay connects")
	}

	// Wait for the relay resend what it had ^ buffered.
	// +2 for "connected: false" and "connected: true".
	got := test.WaitLog(s.recvChan, log.BUFFER_SIZE+1+2)

	expect := make([]proto.LogEntry, log.BUFFER_SIZE+1+2)
	expect[0] = proto.LogEntry{Ts: test.Ts, Level: proto.LOG_WARNING, Service: "log", Msg: "connected: false"}
	for i, n := 0, 1; i < log.BUFFER_SIZE+1; i, n = i+1, n+1 {
		expect[n] = proto.LogEntry{Ts: test.Ts, Level: proto.LOG_ERROR, Service: "test", Msg: fmt.Sprintf("a:%d", i)}
	}
	expect[log.BUFFER_SIZE+1+1] = proto.LogEntry{Ts: test.Ts, Level: proto.LOG_WARNING, Service: "log", Msg: "connected: true"}
	if same, diff := test.IsDeeply(got, expect); !same {
		t.Error(diff)
	}

	if !test.WaitStatus(3, s.relay, "log-buf2", "0") {
		status := s.relay.Status()
		t.Log(status)
		t.Fatal("1st buf empty")
	}
	expect = []proto.LogEntry{}

	// Force the relay offline again, then overflow both buffers. We should get
	// the first buffer, an entry about lost entries (from the second buffer),
	// then the second buffer with the very latest.
	go func() {
		s.client.RecvError <- io.EOF
		doneChan <- true
	}()
	<-doneChan
	<-s.connectChan
	// Relay is offline, trying to connect.

	overflow := 3
	for i := 0; i < (log.BUFFER_SIZE*2)+overflow; i++ {
		l.Error(fmt.Sprintf("b:%d", i))
	}
	if !test.WaitStatus(3, s.relay, "log-buf2", fmt.Sprintf("%d", overflow+1)) {
		status := s.relay.Status()
		t.Log(status)
		t.Fatal("2nd buf full")
	}

	// Unblock the relay's connect attempt.
	s.connectChan <- true
	if !test.WaitStatus(1, s.relay, "ws", "Connected") {
		t.Fatal("Relay connects")
	}

	// +3 for "connected: false", "Lost N entries", and "connected: true".
	got = test.WaitLog(s.recvChan, log.BUFFER_SIZE+overflow+3)

	expect = make([]proto.LogEntry, log.BUFFER_SIZE+overflow+3)
	expect[0] = proto.LogEntry{Ts: test.Ts, Level: proto.LOG_WARNING, Service: "log", Msg: "connected: false"}
	n := 1
	// If buf size is 10, then we should "Lost connection", get 0-8, a "Lost 10" message for 9-18, then 19-22.
	/**
	 *  /10, first buffer
	 * 1		Lost connection
	 * 2		entry 0
	 * 3		entry 1
	 * 4		entry 2
	 * 5		entry 3
	 * 6		entry 4
	 * 7		entry 5
	 * 8		entry 6
	 * 9		entry 7
	 * 10		entry 8
	 * Entry 9-18 into second buffer, entry 19 causes the overflow and reset:
	 *  /10, second buffer
	 * 1		entry 19
	 * 2		entry 20
	 * 3		entry 21
	 * 4		entry 22
	 */
	for i := 0; i < log.BUFFER_SIZE-1; i++ {
		expect[n] = proto.LogEntry{Ts: test.Ts, Level: proto.LOG_ERROR, Service: "test", Msg: fmt.Sprintf("b:%d", i)}
		n++
	}
	expect[n] = proto.LogEntry{Ts: test.Ts, Level: proto.LOG_WARNING, Service: "log", Msg: fmt.Sprintf("Lost %d log entries", log.BUFFER_SIZE)}
	n++
	for i, j := log.BUFFER_SIZE, log.BUFFER_SIZE*2-1; i < log.BUFFER_SIZE+overflow+1; i, j = i+1, j+1 {
		expect[n] = proto.LogEntry{Ts: test.Ts, Level: proto.LOG_ERROR, Service: "test", Msg: fmt.Sprintf("b:%d", j)}
		n++
	}
	expect[log.BUFFER_SIZE+overflow+2] = proto.LogEntry{Ts: test.Ts, Level: proto.LOG_WARNING, Service: "log", Msg: "connected: true"}
	if same, diff := test.IsDeeply(got, expect); !same {
		// @todo: this test may still be unstable
		n := len(got)
		if len(expect) > n {
			n = len(expect)
		}
		for i := 0; i < n; i++ {
			var gotL proto.LogEntry
			var expectL proto.LogEntry
			if i < len(got) {
				gotL = got[i]
			}
			if i < len(expect) {
				expectL = expect[i]
			}
			t.Logf("%+v %+v\n", gotL, expectL)
		}
		t.Error(diff)
	}
}
Esempio n. 11
0
func (s *ManagerTestSuite) TestGetConfig(t *C) {
	m := data.NewManager(s.logger, s.dataDir, s.trashDir, "localhost", s.client)
	t.Assert(m, NotNil)

	config := &data.Config{
		Encoding:     "",
		SendInterval: 1,
		Limits: proto.DataSpoolLimits{
			MaxAge:   data.DEFAULT_DATA_MAX_AGE,
			MaxSize:  data.DEFAULT_DATA_MAX_SIZE,
			MaxFiles: data.DEFAULT_DATA_MAX_FILES,
		},
	}
	bytes, _ := json.Marshal(config)
	// Write config to disk because manager reads it on start,
	// else it uses default config.
	pct.Basedir.WriteConfig("data", config)

	err := m.Start()
	t.Assert(err, IsNil)

	sender := m.Sender()
	t.Check(sender, NotNil)

	/**
	 * GetConfig
	 */

	cmd := &proto.Cmd{
		User:    "******",
		Service: "data",
		Cmd:     "GetConfig",
	}

	reply := m.Handle(cmd)
	t.Assert(reply.Error, Equals, "")
	t.Assert(reply.Data, NotNil)
	gotConfig := []proto.AgentConfig{}
	if err := json.Unmarshal(reply.Data, &gotConfig); err != nil {
		t.Fatal(err)
	}
	expectConfig := []proto.AgentConfig{
		{
			InternalService: "data",
			Config:          string(bytes),
			Running:         true,
		},
	}
	if same, diff := test.IsDeeply(gotConfig, expectConfig); !same {
		test.Dump(gotConfig)
		t.Error(diff)
	}

	err = m.Stop()
	t.Assert(err, IsNil)
	if !test.WaitStatus(5, m, "data", "Stopped") {
		t.Fatal("test.WaitStatus() timeout")
	}
	status := m.Status()
	t.Check(status["data-spooler"], Equals, "Stopped")
	t.Check(status["data-sender"], Equals, "Stopped")

	// Config should report Running: false.
	reply = m.Handle(cmd)
	t.Assert(reply.Error, Equals, "")
	t.Assert(reply.Data, NotNil)
	if err := json.Unmarshal(reply.Data, &gotConfig); err != nil {
		t.Fatal(err)
	}
	expectConfig[0].Running = false
	if same, diff := test.IsDeeply(gotConfig, expectConfig); !same {
		test.Dump(gotConfig)
		t.Error(diff)
	}
}