Esempio n. 1
0
func (s *AgentTestSuite) TestGetConfig(t *C) {
	cmd := &proto.Cmd{
		Ts:      time.Now(),
		User:    "******",
		Cmd:     "GetConfig",
		Service: "agent",
	}
	s.sendChan <- cmd

	got := test.WaitReply(s.recvChan)
	t.Assert(len(got), Equals, 1)
	gotConfig := []proto.AgentConfig{}
	if err := json.Unmarshal(got[0].Data, &gotConfig); err != nil {
		t.Fatal(err)
	}

	config := *s.config
	config.Links = nil
	bytes, _ := json.Marshal(config)
	expect := []proto.AgentConfig{
		{
			InternalService: "agent",
			Config:          string(bytes),
			Running:         true,
		},
	}

	if ok, diff := test.IsDeeply(gotConfig, expect); !ok {
		t.Logf("%+v", gotConfig)
		t.Error(diff)
	}
}
Esempio n. 2
0
func (s *AggregatorTestSuite) TestC002(t *C) {
	interval := int64(300)
	a := mm.NewAggregator(s.logger, interval, s.collectionChan, s.spool)
	go a.Start()
	defer a.Stop()

	// Ts in c002-1 is 2009-11-10 23:00:00.
	t1, _ := time.Parse("2006-01-02 15:04:05", "2009-11-10 23:00:00")

	for i := 1; i <= 5; i++ {
		file := fmt.Sprintf("%s/c002-%d.json", sample, i)
		if err := sendCollection(file, s.collectionChan); err != nil {
			t.Fatal(file, err)
		}
	}
	// Next interval causes 1st to be reported.
	file := fmt.Sprintf("%s/c002-n.json", sample)
	if err := sendCollection(file, s.collectionChan); err != nil {
		t.Fatal(file, err)
	}

	got := test.WaitMmReport(s.dataChan)
	t.Assert(got, NotNil)
	t.Check(got.Ts, Equals, t1)
	t.Check(uint64(got.Duration), Equals, uint64(interval))

	expect := &mm.Report{}
	if err := test.LoadMmReport(sample+"/c002r.json", expect); err != nil {
		t.Fatal("c002r.json ", err)
	}
	if ok, diff := test.IsDeeply(got.Stats, expect.Stats); !ok {
		t.Fatal(diff)
	}
}
Esempio n. 3
0
func (s *ManagerTestSuite) TestAddWatcher(t *check.C) {
	now = int64(1380330697385120263) // Fri Sep 27 18:11:37.385120 -0700 PDT 2013
	s.tickerFactory.Set([]ticker.Ticker{s.mockTicker})

	m := ticker.NewClock(s.tickerFactory, nowFunc)

	c := make(chan time.Time)
	m.Add(c, 79, true)

	if !test.WaitState(s.mockTicker.RunningChan) {
		t.Error("Starts ticker")
	}

	if ok, diff := test.IsDeeply(s.tickerFactory.Made, []uint{79}); !ok {
		t.Errorf("Make 79s ticker, got %#v", diff)
	}

	if len(s.mockTicker.Added) == 0 {
		t.Error("Ticker added watcher")
	}

	// Manager should call ticker's ETA() to return time to next tick.
	d := m.ETA(c)
	if d != 0.1 {
		t.Error("clock.Manager.ETA()")
	}

	m.Remove(c)
}
Esempio n. 4
0
func (s *ProcMeminfoTestSuite) TestProcMeminfo001(t *C) {
	m := system.NewMonitor("", &system.Config{}, s.logger)
	content, err := ioutil.ReadFile(sample + "/proc/meminfo001.txt")
	if err != nil {
		t.Fatal(err)
	}
	got, err := m.ProcMeminfo(content)
	if err != nil {
		t.Fatal(err)
	}
	// Remember: the order of this array must match order in which each
	// stat appears in the input file:
	expect := []mm.Metric{
		{Name: "memory/MemTotal", Type: "gauge", Number: 8046892},  // ok
		{Name: "memory/MemFree", Type: "gauge", Number: 5273644},   // ok
		{Name: "memory/Buffers", Type: "gauge", Number: 300684},    // ok
		{Name: "memory/Cached", Type: "gauge", Number: 946852},     // ok
		{Name: "memory/SwapCached", Type: "gauge", Number: 0},      // ok
		{Name: "memory/Active", Type: "gauge", Number: 1936436},    // ok
		{Name: "memory/Inactive", Type: "gauge", Number: 598916},   // ok
		{Name: "memory/SwapTotal", Type: "gauge", Number: 8253436}, // ok
		{Name: "memory/SwapFree", Type: "gauge", Number: 8253436},  // ok
		{Name: "memory/Dirty", Type: "gauge", Number: 0},           // ok
	}
	if same, diff := test.IsDeeply(got, expect); !same {
		test.Dump(got)
		t.Error(diff)
	}
}
Esempio n. 5
0
func (s *ProcVmstatTestSuite) TestProcVmstat001(t *C) {
	m := system.NewMonitor("", &system.Config{}, s.logger)
	content, err := ioutil.ReadFile(sample + "/proc/vmstat001.txt")
	if err != nil {
		t.Fatal(err)
	}
	got, err := m.ProcVmstat(content)
	if err != nil {
		t.Fatal(err)
	}
	// Remember: the order of this array must match order in which each
	// stat appears in the input file:
	expect := []mm.Metric{
		{Name: "vmstat/numa_hit", Type: "counter", Number: 42594095},    // ok
		{Name: "vmstat/numa_miss", Type: "counter", Number: 0},          // ok
		{Name: "vmstat/numa_foreign", Type: "counter", Number: 0},       // ok
		{Name: "vmstat/numa_interleave", Type: "counter", Number: 7297}, // ok
		{Name: "vmstat/numa_local", Type: "counter", Number: 42594095},  // ok
		{Name: "vmstat/numa_other", Type: "counter", Number: 0},         // ok
		{Name: "vmstat/pgpgin", Type: "counter", Number: 646645},        // ok
		{Name: "vmstat/pgpgout", Type: "counter", Number: 5401659},      // ok
		{Name: "vmstat/pswpin", Type: "counter", Number: 0},             // ok
		{Name: "vmstat/pswpout", Type: "counter", Number: 0},            // ok
	}
	if same, diff := test.IsDeeply(got, expect); !same {
		test.Dump(got)
		t.Error(diff)
	}
}
func (s *RepoTestSuite) TestInit(t *C) {
	im := instance.NewRepo(s.logger, s.configDir, s.api)
	t.Assert(im, NotNil)

	err := im.Init()
	t.Check(err, IsNil)

	err = test.CopyFile(test.RootDir+"/mm/config/mysql-1.conf", s.configDir)
	t.Assert(err, IsNil)

	err = im.Init()
	t.Assert(err, IsNil)

	mysqlIt := &proto.MySQLInstance{}
	err = im.Get("mysql", 1, mysqlIt)
	t.Assert(err, IsNil)
	expect := &proto.MySQLInstance{
		Id:       1,
		Hostname: "db1",
		DSN:      "user:host@tcp:(127.0.0.1:3306)",
		Distro:   "Percona Server",
		Version:  "5.6.16",
	}

	if same, diff := test.IsDeeply(mysqlIt, expect); !same {
		test.Dump(mysqlIt)
		test.Dump(expect)
		t.Error(diff)
	}
}
Esempio n. 7
0
// All zero values
func (s *AggregatorTestSuite) TestC000(t *C) {
	interval := int64(60)
	a := mm.NewAggregator(s.logger, interval, s.collectionChan, s.spool)
	go a.Start()
	defer a.Stop()

	// Ts in c000 is 2009-11-10 23:00:00.
	t1, _ := time.Parse("2006-01-02 15:04:05", "2009-11-10 23:00:00")

	file := sample + "/c000.json"
	if err := sendCollection(file, s.collectionChan); err != nil {
		t.Fatal(file, err)
	}
	file = sample + "/c000-n.json"
	if err := sendCollection(file, s.collectionChan); err != nil {
		t.Fatal(file, err)
	}

	got := test.WaitMmReport(s.dataChan)
	t.Assert(got, NotNil)
	t.Check(got.Ts, Equals, t1)
	t.Check(uint64(got.Duration), Equals, uint64(interval))

	expect := &mm.Report{}
	if err := test.LoadMmReport(sample+"/c000r.json", expect); err != nil {
		t.Fatal("c000r.json ", err)
	}
	if ok, diff := test.IsDeeply(got.Stats, expect.Stats); !ok {
		t.Fatal(diff)
	}
}
Esempio n. 8
0
func (s *ManagerTestSuite) TestStartStopManager(t *C) {
	/**
	 * mm is a proxy manager for monitors, so it's always running.
	 * It should implement the service manager interface anyway,
	 * but it doesn't actually start or stop.  Its main work is done
	 * in Handle, starting and stopping monitors (tested later).
	 */
	mrm := mock.NewMrmsMonitor()
	m := mm.NewManager(s.logger, s.factory, s.clock, s.spool, s.im, mrm)
	if m == nil {
		t.Fatal("Make new mm.Manager")
	}

	// It shouldn't have added a tickChan yet.
	if len(s.clock.Added) != 0 {
		t.Error("tickChan not added yet")
	}

	// First the API marshals an mm.Config.
	config := &mm.Config{
		ServiceInstance: proto.ServiceInstance{
			Service:    "mysql",
			InstanceId: 1,
		},
		Collect: 1,
		Report:  60,
		// No monitor-specific config
	}
	err := pct.Basedir.WriteConfig("mm-mysql-1", config)
	t.Assert(err, IsNil)

	// The agent calls mm.Start().
	err = m.Start()
	t.Assert(err, IsNil)

	// There is a monitor so there should be tickers.
	if ok, diff := test.IsDeeply(s.clock.Added, []uint{1}); !ok {
		test.Dump(s.clock.Added)
		t.Errorf("Does not add tickChan, got %#v", diff)
	}

	// Its status should be "Running".
	status := m.Status()
	t.Check(status["mm"], Equals, "Running")

	// Can't start mm twice.
	err = m.Start()
	t.Check(err, Not(Equals), "")

	// Stopping should be idempotent.
	err = m.Stop()
	t.Check(err, IsNil)
	err = m.Stop()
	t.Check(err, IsNil)

	status = m.Status()
	t.Check(status["mm"], Equals, "Stopped")
}
Esempio n. 9
0
func (s *AgentTestSuite) TestLoadConfig(t *C) {
	// Load a partial config to make sure LoadConfig() works in general but also
	// when the config has missing options (which is normal).
	os.Remove(s.configFile)
	test.CopyFile(sample+"/config001.json", s.configFile)
	bytes, err := agent.LoadConfig()
	t.Assert(err, IsNil)
	got := &agent.Config{}
	if err := json.Unmarshal(bytes, got); err != nil {
		t.Fatal(err)
	}
	expect := &agent.Config{
		AgentUuid:   "abc-123-def",
		ApiHostname: agent.DEFAULT_API_HOSTNAME,
		ApiKey:      "123",
		Keepalive:   agent.DEFAULT_KEEPALIVE,
		PidFile:     agent.DEFAULT_PIDFILE,
	}
	if same, diff := test.IsDeeply(got, expect); !same {
		// @todo: if expect is not ptr, IsDeeply dies with "got ptr, expected struct"
		test.Dump(got)
		t.Error(diff)
	}

	// Load a config with all options to make sure LoadConfig() hasn't missed any.
	os.Remove(s.configFile)
	test.CopyFile(sample+"/full_config.json", s.configFile)
	bytes, err = agent.LoadConfig()
	t.Assert(err, IsNil)
	got = &agent.Config{}
	if err := json.Unmarshal(bytes, got); err != nil {
		t.Fatal(err)
	}
	expect = &agent.Config{
		ApiHostname: "agent hostname",
		ApiKey:      "api key",
		AgentUuid:   "agent uuid",
		Keepalive:   agent.DEFAULT_KEEPALIVE,
		PidFile:     "pid file",
	}
	if same, diff := test.IsDeeply(got, expect); !same {
		test.Dump(got)
		t.Error(diff)
	}
}
Esempio n. 10
0
func (s *RelayTestSuite) TestOfflineBuffering(t *C) {
	l := s.logger

	// We're going to cause the relay's client Recv() to get an error
	// which will cause the relay to connect again.  We block this 2nd
	// connect by blocking this chan.  End result: relay remains offline.
	s.client.SetConnectChan(s.connectChan)
	doneChan := make(chan bool, 1)
	go func() {
		s.client.RecvError <- io.EOF
		doneChan <- true
	}()
	// Wait for the relay to recv the recv error.
	<-doneChan

	// Wait for the relay to call client.Connect().
	<-s.connectChan

	// Double-check that relay is offline.
	if !test.WaitStatus(1, s.relay, "ws", "Disconnected") {
		t.Fatal("Relay connects")
	}

	// Relay is offline and trying to connect again in another goroutine.
	// These entries should therefore not be sent.  There's a minor race
	// condition: when relay goes offline, it sends an internal log entry.
	// Sometimes we get that here (Service="log") and sometimes not
	// (len(got)==0).  Either condition is correct for this test.
	l.Error("err1")
	l.Error("err2")
	got := test.WaitLog(s.recvChan, 0)
	if len(got) > 0 && got[0].Service != "log" {
		t.Errorf("Log entries are not sent while offline: %+v", got)
	}

	// Unblock the relay's connect attempt.
	s.connectChan <- true
	if !test.WaitStatus(1, s.relay, "ws", "Connected") {
		t.Fatal("Relay connects")
	}

	// Wait for the relay resend what it had ^ buffered.
	got = test.WaitLog(s.recvChan, 3)
	expect := []proto.LogEntry{
		{Ts: test.Ts, Level: proto.LOG_WARNING, Service: "log", Msg: "connected: false"},
		{Ts: test.Ts, Level: proto.LOG_ERROR, Service: "test", Msg: "err1"},
		{Ts: test.Ts, Level: proto.LOG_ERROR, Service: "test", Msg: "err2"},
		{Ts: test.Ts, Level: proto.LOG_WARNING, Service: "log", Msg: "connected: true"},
	}
	if same, diff := test.IsDeeply(got, expect); !same {
		t.Error(diff)
	}
}
Esempio n. 11
0
func (s *RepoTestSuite) TestAddRemove(t *C) {
	im := instance.NewRepo(s.logger, s.configDir, s.api)
	t.Assert(im, NotNil)

	t.Check(test.FileExists(s.configDir+"/mysql-1.conf"), Equals, false)

	mysqlIt := &proto.MySQLInstance{
		Id:       1,
		Hostname: "db1",
		DSN:      "user:host@tcp:(127.0.0.1:3306)",
		Distro:   "Percona Server",
		Version:  "5.6.16",
	}
	data, err := json.Marshal(mysqlIt)
	t.Assert(err, IsNil)
	err = im.Add("mysql", 1, data, true)
	t.Assert(err, IsNil)

	t.Check(test.FileExists(s.configDir+"/mysql-1.conf"), Equals, true)

	got := &proto.MySQLInstance{}
	err = im.Get("mysql", 1, got)
	t.Assert(err, IsNil)
	if same, diff := test.IsDeeply(got, mysqlIt); !same {
		t.Error(diff)
	}

	data, err = ioutil.ReadFile(s.configDir + "/mysql-1.conf")
	t.Assert(err, IsNil)

	got = &proto.MySQLInstance{}
	err = json.Unmarshal(data, got)
	t.Assert(err, IsNil)
	if same, diff := test.IsDeeply(got, mysqlIt); !same {
		t.Error(diff)
	}

	im.Remove("mysql", 1)
	t.Check(test.FileExists(s.configDir+"/mysql-1.conf"), Equals, false)
}
Esempio n. 12
0
func (s *SenderTestSuite) Test500Error(t *C) {
	spool := mock.NewSpooler(nil)
	spool.FilesOut = []string{"file1", "file2", "file3"}
	spool.DataOut = map[string][]byte{
		"file1": []byte("file1"),
		"file2": []byte("file2"),
		"file3": []byte("file3"),
	}

	sender := data.NewSender(s.logger, s.client)
	err := sender.Start(spool, s.tickerChan, 5, false)
	t.Assert(err, IsNil)

	s.tickerChan <- time.Now()

	got := test.WaitBytes(s.dataChan)
	if same, diff := test.IsDeeply(got[0], []byte("file1")); !same {
		t.Error(diff)
	}

	// 3 files before API error.
	t.Check(len(spool.DataOut), Equals, 3)

	// Simulate API error.
	select {
	case s.respChan <- &proto.Response{Code: 503}:
	case <-time.After(500 * time.Millisecond):
		t.Error("Sender receives prot.Response after sending data")
	}

	// Wait for it to finsih and return.
	if !test.WaitStatusPrefix(data.MAX_SEND_ERRORS*data.CONNECT_ERROR_WAIT, sender, "data-sender", "Idle") {
		t.Fatal("Timeout waiting for data-sender status=Idle")
	}

	// Still 3 files after API error.
	t.Check(len(spool.DataOut), Equals, 3)
	t.Check(len(spool.RejectedFiles), Equals, 0)

	// There's only 1 call to SendBytes because after an API error
	// the send stops immediately.
	trace := test.DrainTraceChan(s.client.TraceChan)
	t.Check(trace, DeepEquals, []string{
		"ConnectOnce",
		"SendBytes",
		"Recv",
		"DisconnectOnce",
	})

	err = sender.Stop()
	t.Assert(err, IsNil)
}
Esempio n. 13
0
func (s *SenderTestSuite) TestSendData(t *C) {
	spool := mock.NewSpooler(nil)

	slow001, err := ioutil.ReadFile(sample + "slow001.json")
	if err != nil {
		t.Fatal(err)
	}

	spool.FilesOut = []string{"slow001.json"}
	spool.DataOut = map[string][]byte{"slow001.json": slow001}

	sender := data.NewSender(s.logger, s.client)

	err = sender.Start(spool, s.tickerChan, 5, false)
	if err != nil {
		t.Fatal(err)
	}

	data := test.WaitBytes(s.dataChan)
	if len(data) != 0 {
		t.Errorf("No data sent before tick; got %+v", data)
	}

	s.tickerChan <- time.Now()

	data = test.WaitBytes(s.dataChan)
	if same, diff := test.IsDeeply(data[0], slow001); !same {
		t.Error(diff)
	}

	t.Check(len(spool.DataOut), Equals, 1)

	select {
	case s.respChan <- &proto.Response{Code: 200}:
	case <-time.After(500 * time.Millisecond):
		t.Error("Sender receives prot.Response after sending data")
	}

	// Sender should include its websocket client status.  We're using a mock ws client
	// which reports itself as "data-client: ok".
	status := sender.Status()
	t.Check(status["data-client"], Equals, "ok")

	err = sender.Stop()
	t.Assert(err, IsNil)

	t.Check(len(spool.DataOut), Equals, 0)
	t.Check(len(spool.RejectedFiles), Equals, 0)
}
Esempio n. 14
0
// COUNTER
func (s *AggregatorTestSuite) TestC003(t *C) {
	interval := int64(5)
	a := mm.NewAggregator(s.logger, interval, s.collectionChan, s.spool)
	go a.Start()
	defer a.Stop()

	// Ts in c003 is 2009-11-10 23:00:00.
	t1, _ := time.Parse("2006-01-02 15:04:05", "2009-11-10 23:00:00")

	for i := 1; i <= 5; i++ {
		file := fmt.Sprintf("%s/c003-%d.json", sample, i)
		if err := sendCollection(file, s.collectionChan); err != nil {
			t.Fatal(file, err)
		}
	}
	// Next interval causes 1st to be reported.
	file := fmt.Sprintf("%s/c003-n.json", sample)
	if err := sendCollection(file, s.collectionChan); err != nil {
		t.Fatal(file, err)
	}

	/**
	 * Pretend we're monitoring Bytes_sents every second:
	 * first val = 100
	 *           prev this diff val/s
	 * next val  100   200  100   100
	 * next val  200   400  200   200
	 * next val  400   800  400   400
	 * next val  800  1600  800   800
	 *
	 * So min bytes/s = 100, max = 800, avg = 375.  These are
	 * the values in c003r.json.
	 */
	got := test.WaitMmReport(s.dataChan)
	t.Assert(got, NotNil)
	t.Check(got.Ts, Equals, t1)
	t.Check(uint64(got.Duration), Equals, uint64(interval))
	expect := &mm.Report{}
	if err := test.LoadMmReport(sample+"/c003r.json", expect); err != nil {
		t.Fatal("c003r.json ", err)
	}
	if ok, diff := test.IsDeeply(got.Stats, expect.Stats); !ok {
		t.Fatal(diff)
	}
}
Esempio n. 15
0
func (s *AgentTestSuite) TestGetAllConfigs(t *C) {
	cmd := &proto.Cmd{
		Ts:      time.Now(),
		User:    "******",
		Cmd:     "GetAllConfigs",
		Service: "agent",
	}
	s.sendChan <- cmd

	got := test.WaitReply(s.recvChan)
	t.Assert(len(got), Equals, 1)
	reply := got[0]
	t.Check(reply.Error, Equals, "")
	t.Assert(reply.Data, Not(HasLen), 0)

	gotConfigs := []proto.AgentConfig{}
	err := json.Unmarshal(reply.Data, &gotConfigs)
	t.Assert(err, IsNil)

	bytes, _ := json.Marshal(s.config)

	sort.Sort(test.ByInternalService(gotConfigs))
	expectConfigs := []proto.AgentConfig{
		{
			InternalService: "agent",
			Config:          string(bytes),
			Running:         true,
		},
		{
			InternalService: "mm",
			Config:          `{"Foo":"bar"}`,
			Running:         false,
		},
		{
			InternalService: "qan",
			Config:          `{"Foo":"bar"}`,
			Running:         false,
		},
	}
	if ok, diff := test.IsDeeply(gotConfigs, expectConfigs); !ok {
		test.Dump(gotConfigs)
		t.Error(diff)
	}
}
Esempio n. 16
0
func (s *AggregatorTestSuite) TestC003Lost(t *C) {
	interval := int64(5)
	a := mm.NewAggregator(s.logger, interval, s.collectionChan, s.spool)
	go a.Start()
	defer a.Stop()

	// Ts in c003 is 2009-11-10 23:00:00.
	t1, _ := time.Parse("2006-01-02 15:04:05", "2009-11-10 23:00:00")

	// The full sequence is files 1-5, but we send only 1 and 5,
	// simulating monitor failure during 2-4.  More below...
	file := fmt.Sprintf("%s/c003-1.json", sample)
	if err := sendCollection(file, s.collectionChan); err != nil {
		t.Fatal(file, err)
	}
	file = fmt.Sprintf("%s/c003-5.json", sample)
	if err := sendCollection(file, s.collectionChan); err != nil {
		t.Fatal(file, err)
	}
	// Next interval causes 1st to be reported.
	file = fmt.Sprintf("%s/c003-n.json", sample)
	if err := sendCollection(file, s.collectionChan); err != nil {
		t.Fatal(file, err)
	}

	/**
	 * Values we did get are 100 and 1600 and ts 00 to 04.  So that looks like
	 * 1500 bytes / 4s = 375.  And since there was only 1 interval, we expect
	 * 375 for all stat values.
	 */
	got := test.WaitMmReport(s.dataChan)
	t.Assert(got, NotNil)
	t.Check(got.Ts, Equals, t1)
	t.Check(uint64(got.Duration), Equals, uint64(interval))
	expect := &mm.Report{}
	if err := test.LoadMmReport(sample+"/c003rlost.json", expect); err != nil {
		t.Fatal("c003r.json ", err)
	}
	if ok, diff := test.IsDeeply(got.Stats, expect.Stats); !ok {
		test.Dump(got.Stats)
		test.Dump(expect.Stats)
		t.Fatal(diff)
	}
}
Esempio n. 17
0
func (s *ManagerTestSuite) TestStartStopManager(t *C) {
	m := sysconfig.NewManager(s.logger, s.factory, s.clock, s.spool, s.im)
	t.Assert(m, NotNil)

	// It shouldn't have added a tickChan yet.
	if len(s.clock.Added) != 0 {
		t.Error("tickChan not added yet")
	}

	// Write a sysconfig monitor config to disk.
	config := &sysconfig.Config{
		ServiceInstance: proto.ServiceInstance{Service: "mysql", InstanceId: 1},
		Report:          3600,
		// No monitor-specific config
	}
	pct.Basedir.WriteConfig("sysconfig-mysql-1", config)

	// The agent calls sysconfig.Start() to start manager which starts all monitors.
	err := m.Start()
	t.Assert(err, IsNil)

	// It should not add a tickChan to the clock (this is done in Handle()).
	if ok, diff := test.IsDeeply(s.clock.Added, []uint{3600}); !ok {
		t.Errorf("Adds tickChan, got %#v", diff)
	}

	// Its status should be "Running".
	status := m.Status()
	t.Check(status["sysconfig"], Equals, "Running")

	// Can't start manager twice.
	err = m.Start()
	t.Check(err, NotNil)

	// Stopping is idempotent.
	err = m.Stop()
	t.Check(err, IsNil)
	err = m.Stop()
	t.Check(err, IsNil)

	status = m.Status()
	t.Check(status["sysconfig"], Equals, "Stopped")
}
Esempio n. 18
0
func (s *AggregatorTestSuite) TestC001(t *C) {
	interval := int64(300)
	a := mm.NewAggregator(s.logger, interval, s.collectionChan, s.spool)
	go a.Start()
	defer a.Stop()

	// Load collection from file and send to aggregator.
	if err := sendCollection(sample+"/c001-1.json", s.collectionChan); err != nil {
		t.Fatal(err)
	}

	// Ts in c001 is 2009-11-10 23:00:00.
	t1, _ := time.Parse("2006-01-02 15:04:05", "2009-11-10 23:00:00")

	got := test.WaitMmReport(s.dataChan)
	if got != nil {
		t.Error("No report before 2nd interval, got: %+v", got)
	}

	// Ts in c001 is 2009-11-10 23:05:01, 1s into the next interval.
	if err := sendCollection(sample+"/c001-2.json", s.collectionChan); err != nil {
		t.Fatal(err)
	}

	got = test.WaitMmReport(s.dataChan)
	t.Assert(got, NotNil)
	t.Check(got.Ts, Equals, t1)
	t.Check(uint64(got.Duration), Equals, uint64(interval))

	expect := &mm.Report{}
	if err := test.LoadMmReport(sample+"/c001r.json", expect); err != nil {
		t.Fatal(err)
	}
	t.Check(got.Ts, Equals, t1)
	if ok, diff := test.IsDeeply(got.Stats, expect.Stats); !ok {
		test.Dump(got.Stats)
		test.Dump(expect.Stats)
		t.Fatal(diff)
	}

}
Esempio n. 19
0
func (s *ProcLoadavgTestSuite) TestProcLoadavg001(t *C) {
	m := system.NewMonitor("", &system.Config{}, s.logger)
	content, err := ioutil.ReadFile(sample + "/proc/loadavg001.txt")
	if err != nil {
		t.Fatal(err)
	}
	got, err := m.ProcLoadavg(content)
	if err != nil {
		t.Fatal(err)
	}
	// Remember: the order of this array must match order in which each
	// stat appears in the input file:
	expect := []mm.Metric{
		{Name: "loadavg/1min", Type: "gauge", Number: 0.45},     // ok
		{Name: "loadavg/5min", Type: "gauge", Number: 0.56},     // ok
		{Name: "loadavg/15min", Type: "gauge", Number: 0.58},    // ok
		{Name: "loadavg/running", Type: "gauge", Number: 1},     // ok
		{Name: "loadavg/processes", Type: "gauge", Number: 598}, // ok
	}
	if same, diff := test.IsDeeply(got, expect); !same {
		test.Dump(got)
		t.Error(diff)
	}
}
Esempio n. 20
0
/**
 * Tests:
 * - starting monitor
 * - stopping monitor
 * - starting monitor again (restarting monitor)
 * - sneaked in:) unknown cmd test
 */
func (s *ManagerTestSuite) TestRestartMonitor(t *C) {
	// Create and start mm, no monitors yet.
	m := mm.NewManager(s.logger, s.factory, s.clock, s.spool, s.im)
	t.Assert(m, NotNil)
	err := m.Start()
	t.Assert(err, IsNil)

	// Start a monitor by sending StartService + monitor config.
	// This is the config in test/mm/config/mm-mysql-1.conf.
	mmConfig := &mysql.Config{
		Config: mm.Config{
			ServiceInstance: proto.ServiceInstance{
				Service:    "mysql",
				InstanceId: 1,
			},
			Collect: 1,
			Report:  60,
		},
		Status: map[string]string{
			"threads_connected": "gauge",
			"threads_running":   "gauge",
		},
	}
	mmConfigData, err := json.Marshal(mmConfig)
	t.Assert(err, IsNil)

	// If this were a real monitor, it would decode and set its own config.
	// The mock monitor doesn't have any real config type, so we set it manually.
	s.mysqlMonitor.SetConfig(mmConfig)

	// The agent calls mm.Handle() with the cmd (for logging and status) and the config data.
	cmd := &proto.Cmd{
		User:    "******",
		Service: "mm",
		Cmd:     "StartService",
		Data:    mmConfigData,
	}
	reply := m.Handle(cmd)
	t.Assert(reply, NotNil)
	t.Check(reply.Error, Equals, "")

	// The monitor should be running.  The mock monitor returns "Running" if
	// Start() has been called; else it returns "Stopped".
	status := m.Status()
	t.Check(status["monitor"], Equals, "Running")

	// There should be a 1s collect ticker for the monitor.
	if ok, diff := test.IsDeeply(s.clock.Added, []uint{1}); !ok {
		t.Errorf("Make 1s ticker for collect interval\n%s", diff)
	}

	// After starting a monitor, mm should write its config to the dir
	// it learned when mm.LoadConfig() was called.  Next time agent starts,
	// it will have mm start the monitor with this config.
	data, err := ioutil.ReadFile(s.configDir + "/mm-mysql-1.conf")
	t.Check(err, IsNil)
	gotConfig := &mysql.Config{}
	err = json.Unmarshal(data, gotConfig)
	t.Check(err, IsNil)
	if same, diff := test.IsDeeply(gotConfig, mmConfig); !same {
		test.Dump(gotConfig)
		t.Error(diff)
	}

	/**
	 * Stop the monitor.
	 */

	cmd = &proto.Cmd{
		User:    "******",
		Service: "mm",
		Cmd:     "StopService",
		Data:    mmConfigData,
	}

	// Handles StopService without error.
	reply = m.Handle(cmd)
	t.Assert(reply, NotNil)
	t.Check(reply.Error, Equals, "")

	// Stop a monitor removes it from the managers list of monitors.
	// So it's no longer present in a status request.
	status = m.Status()
	t.Check(status["monitor"], Equals, "")

	// After stopping the monitor, the manager should remove its tickChan.
	if len(s.clock.Removed) != 1 {
		t.Error("Remove's monitor's tickChan from clock")
	}

	// After stopping a monitor, mm should remove its config file so agent
	// doesn't start it on restart.
	file := s.configDir + "/mm-mysql-1.conf"
	if pct.FileExists(file) {
		t.Error("Stopping monitor removes its config; ", file, " exists")
	}

	/**
	 * Start the monitor again (restarting monitor).
	 */
	cmd = &proto.Cmd{
		User:    "******",
		Service: "mm",
		Cmd:     "StartService",
		Data:    mmConfigData,
	}

	// If this were a real monitor, it would decode and set its own config.
	// The mock monitor doesn't have any real config type, so we set it manually.
	s.mysqlMonitor.SetConfig(mmConfig)

	// The agent calls mm.Handle() with the cmd (for logging and status) and the config data.
	reply = m.Handle(cmd)
	t.Assert(reply, NotNil)
	t.Check(reply.Error, Equals, "")

	// The monitor should be running.  The mock monitor returns "Running" if
	// Start() has been called; else it returns "Stopped".
	status = m.Status()
	t.Check(status["monitor"], Equals, "Running")

	// There should be a 1s collect ticker for the monitor.
	// (Actually two in s.clock.Added, as this is mock and we started monitor twice)
	if ok, diff := test.IsDeeply(s.clock.Added, []uint{1, 1}); !ok {
		t.Errorf("Make 1s ticker for collect interval\n%s", diff)
	}

	// After starting a monitor, mm should write its config to the dir
	// it learned when mm.LoadConfig() was called.  Next time agent starts,
	// it will have mm start the monitor with this config.
	data, err = ioutil.ReadFile(s.configDir + "/mm-mysql-1.conf")
	t.Check(err, IsNil)
	gotConfig = &mysql.Config{}
	err = json.Unmarshal(data, gotConfig)
	t.Check(err, IsNil)
	if same, diff := test.IsDeeply(gotConfig, mmConfig); !same {
		t.Logf("%+v", gotConfig)
		t.Error(diff)
	}

	/**
	 * While we're all setup and working, let's sneak in an unknown cmd test.
	 */

	cmd = &proto.Cmd{
		User:    "******",
		Service: "mm",
		Cmd:     "Pontificate",
		Data:    mmConfigData,
	}

	// Unknown cmd causes error.
	reply = m.Handle(cmd)
	t.Assert(reply, NotNil)
	t.Check(reply.Error, Not(Equals), "")
}
Esempio n. 21
0
func (s *ManagerTestSuite) TestSetConfig(t *C) {
	m := data.NewManager(s.logger, s.dataDir, s.trashDir, "localhost", s.client)
	t.Assert(m, NotNil)

	config := &data.Config{
		Encoding:     "",
		SendInterval: 1,
		Limits: proto.DataSpoolLimits{
			MaxAge:   data.DEFAULT_DATA_MAX_AGE,
			MaxSize:  data.DEFAULT_DATA_MAX_SIZE,
			MaxFiles: data.DEFAULT_DATA_MAX_FILES,
		},
	}
	pct.Basedir.WriteConfig("data", config)

	err := m.Start()
	t.Assert(err, IsNil)

	sender := m.Sender()
	t.Check(sender, NotNil)

	/**
	 * Change SendInterval
	 */
	config.SendInterval = 5
	configData, err := json.Marshal(config)
	t.Assert(err, IsNil)
	cmd := &proto.Cmd{
		User:    "******",
		Service: "data",
		Cmd:     "SetConfig",
		Data:    configData,
	}

	gotReply := m.Handle(cmd)
	t.Assert(gotReply.Error, Equals, "")

	cmd = &proto.Cmd{
		User:    "******",
		Service: "data",
		Cmd:     "GetConfig",
	}
	reply := m.Handle(cmd)
	t.Assert(reply.Error, Equals, "")
	t.Assert(reply.Data, NotNil)
	gotConfigRes := []proto.AgentConfig{}
	if err := json.Unmarshal(reply.Data, &gotConfigRes); err != nil {
		t.Fatal(err)
	}
	expectConfigRes := []proto.AgentConfig{
		{
			InternalService: "data",
			Config:          string(configData),
			Running:         true,
		},
	}
	if same, diff := test.IsDeeply(gotConfigRes, expectConfigRes); !same {
		test.Dump(gotConfigRes)
		t.Error(diff)
	}

	// Verify new config on disk.
	content, err := ioutil.ReadFile(pct.Basedir.ConfigFile("data"))
	t.Assert(err, IsNil)
	gotConfig := &data.Config{}
	if err := json.Unmarshal(content, gotConfig); err != nil {
		t.Fatal(err)
	}
	if same, diff := test.IsDeeply(gotConfig, config); !same {
		test.Dump(gotConfig)
		t.Error(diff)
	}

	/**
	 * Change Encoding
	 */
	config.Encoding = "gzip"
	configData, err = json.Marshal(config)
	t.Assert(err, IsNil)
	cmd = &proto.Cmd{
		User:    "******",
		Service: "data",
		Cmd:     "SetConfig",
		Data:    configData,
	}

	gotReply = m.Handle(cmd)
	t.Assert(gotReply.Error, Equals, "")

	cmd = &proto.Cmd{
		User:    "******",
		Service: "data",
		Cmd:     "GetConfig",
	}
	reply = m.Handle(cmd)
	t.Assert(reply.Error, Equals, "")
	t.Assert(reply.Data, NotNil)
	if err := json.Unmarshal(reply.Data, &gotConfigRes); err != nil {
		t.Fatal(err)
	}
	expectConfigRes = []proto.AgentConfig{
		{
			InternalService: "data",
			Config:          string(configData),
			Running:         true,
		},
	}
	if same, diff := test.IsDeeply(gotConfigRes, expectConfigRes); !same {
		test.Dump(gotConfigRes)
		t.Error(diff)
	}

	// Verify new config on disk.
	content, err = ioutil.ReadFile(pct.Basedir.ConfigFile("data"))
	t.Assert(err, IsNil)
	gotConfig = &data.Config{}
	if err := json.Unmarshal(content, gotConfig); err != nil {
		t.Fatal(err)
	}
	if same, diff := test.IsDeeply(gotConfig, config); !same {
		test.Dump(gotConfig)
		t.Error(diff)
	}
}
Esempio n. 22
0
func (s *ManagerTestSuite) TestGetConfig(t *C) {
	m := sysconfig.NewManager(s.logger, s.factory, s.clock, s.spool, s.im)
	t.Assert(m, NotNil)

	err := m.Start()
	t.Assert(err, IsNil)

	// Start a sysconfig monitor.
	sysconfigConfig := &mysql.Config{
		Config: sysconfig.Config{
			ServiceInstance: proto.ServiceInstance{
				Service:    "mysql",
				InstanceId: 1,
			},
			Report: 3600,
		},
	}
	sysconfigConfigData, err := json.Marshal(sysconfigConfig)
	t.Assert(err, IsNil)

	cmd := &proto.Cmd{
		User:    "******",
		Service: "sysconfig",
		Cmd:     "StartService",
		Data:    sysconfigConfigData,
	}
	reply := m.Handle(cmd)
	t.Assert(reply, NotNil)
	t.Check(reply.Error, Equals, "")
	s.mockMonitor.SetConfig(sysconfigConfig)

	/**
	 * GetConfig from sysconfig which should return all monitors' configs.
	 */
	cmd = &proto.Cmd{
		Cmd:     "GetConfig",
		Service: "sysconfig",
	}
	reply = m.Handle(cmd)
	t.Assert(reply, NotNil)
	t.Assert(reply.Error, Equals, "")
	t.Assert(reply.Data, NotNil)
	gotConfig := []proto.AgentConfig{}
	if err := json.Unmarshal(reply.Data, &gotConfig); err != nil {
		t.Fatal(err)
	}
	expectConfig := []proto.AgentConfig{
		{
			InternalService: "sysconfig",
			ExternalService: proto.ServiceInstance{
				Service:    "mysql",
				InstanceId: 1,
			},
			Config:  string(sysconfigConfigData),
			Running: true,
		},
	}
	if same, diff := test.IsDeeply(gotConfig, expectConfig); !same {
		test.Dump(gotConfig)
		t.Error(diff)
	}
}
Esempio n. 23
0
func (s *ProcStatTestSuite) TestProcStat001(t *C) {
	files, err := filepath.Glob(sample + "/proc/stat001-*.txt")
	if err != nil {
		t.Fatal(err)
	}

	m := system.NewMonitor("", &system.Config{}, s.logger)

	metrics := []mm.Metric{}

	for _, file := range files {
		content, err := ioutil.ReadFile(file)
		if err != nil {
			t.Fatal(err)
		}
		got, err := m.ProcStat(content)
		if err != nil {
			t.Fatal(err)
		}
		metrics = append(metrics, got...)
	}

	/*
					Totals		Diff
		stat001-1
			cpu		390817611
			cpu0	97641434
			cpu1	97717127
		stat001-2
			cpu		391386603	568992
			cpu0	97783608	142174	These don't add up because the real input has 4 CPU.
			cpu1	97859411	142284  This does not affect the tests.
		stat001-3
			cpu		391759882	373279
			cpu0	97876875	93267
			cpu1	97952757	93346

			1    2    3      4    5      6   7       8     9     10
			user nice system idle iowait irq softirq steal guest guestlow
	*/
	expect := []mm.Metric{
		// First input, no CPU because that requires previous values, so only cpu-ext:
		{Name: "cpu-ext/intr", Type: "counter", Number: 39222211},    // ok
		{Name: "cpu-ext/ctxt", Type: "counter", Number: 122462971},   // ok
		{Name: "cpu-ext/processes", Type: "counter", Number: 227223}, // ok
		{Name: "cpu-ext/procs_running", Type: "gauge", Number: 1},    // ok
		{Name: "cpu-ext/procs_blocked", Type: "gauge", Number: 0},    // ok
		// Second input, now we have CPU values, plus more cpu-ext values.
		{Name: "cpu/user", Type: "gauge", Number: 0.041477},    // ok 5
		{Name: "cpu/nice", Type: "gauge", Number: 0},           // ok
		{Name: "cpu/system", Type: "gauge", Number: 0.017751},  // ok 7
		{Name: "cpu/idle", Type: "gauge", Number: 99.938488},   // ok
		{Name: "cpu/iowait", Type: "gauge", Number: 0.002285},  // ok 9
		{Name: "cpu/irq", Type: "gauge", Number: 0},            // ok
		{Name: "cpu/softirq", Type: "gauge", Number: 0},        // ok 11
		{Name: "cpu/steal", Type: "gauge", Number: 0},          // ok
		{Name: "cpu/guest", Type: "gauge", Number: 0},          // ok 13
		{Name: "cpu0/user", Type: "gauge", Number: 0.131529},   // ok
		{Name: "cpu0/nice", Type: "gauge", Number: 0},          // ok 15
		{Name: "cpu0/system", Type: "gauge", Number: 0.039388}, // ok
		{Name: "cpu0/idle", Type: "gauge", Number: 99.819939},  // ok 17
		{Name: "cpu0/iowait", Type: "gauge", Number: 0.009144},
		{Name: "cpu0/irq", Type: "gauge", Number: 0}, // 19
		{Name: "cpu0/softirq", Type: "gauge", Number: 0},
		{Name: "cpu0/steal", Type: "gauge", Number: 0}, // 21
		{Name: "cpu0/guest", Type: "gauge", Number: 0},
		{Name: "cpu1/user", Type: "gauge", Number: 0.026707}, // 23
		{Name: "cpu1/nice", Type: "gauge", Number: 0},
		{Name: "cpu1/system", Type: "gauge", Number: 0.023193}, // 25
		{Name: "cpu1/idle", Type: "gauge", Number: 99.950100},
		{Name: "cpu1/iowait", Type: "gauge", Number: 0}, // 27
		{Name: "cpu1/irq", Type: "gauge", Number: 0},
		{Name: "cpu1/softirq", Type: "gauge", Number: 0}, // 29
		{Name: "cpu1/steal", Type: "gauge", Number: 0},
		{Name: "cpu1/guest", Type: "gauge", Number: 0},               // ok 31
		{Name: "cpu-ext/intr", Type: "counter", Number: 39276666},    // ok
		{Name: "cpu-ext/ctxt", Type: "counter", Number: 122631533},   // ok 33
		{Name: "cpu-ext/processes", Type: "counter", Number: 227521}, // ok
		{Name: "cpu-ext/procs_running", Type: "gauge", Number: 2},    // ok 35
		{Name: "cpu-ext/procs_blocked", Type: "gauge", Number: 0},    // ok
		// Third input.
		{Name: "cpu/user", Type: "gauge", Number: 0.038309},   // ok 37
		{Name: "cpu/nice", Type: "gauge", Number: 0},          // ok
		{Name: "cpu/system", Type: "gauge", Number: 0.017681}, // ok 39
		{Name: "cpu/idle", Type: "gauge", Number: 99.941063},
		{Name: "cpu/iowait", Type: "gauge", Number: 0.002947}, // 41
		{Name: "cpu/irq", Type: "gauge", Number: 0},
		{Name: "cpu/softirq", Type: "gauge", Number: 0}, // 43
		{Name: "cpu/steal", Type: "gauge", Number: 0},
		{Name: "cpu/guest", Type: "gauge", Number: 0}, // 45
		{Name: "cpu0/user", Type: "gauge", Number: 0.122230},
		{Name: "cpu0/nice", Type: "gauge", Number: 0}, // 47
		{Name: "cpu0/system", Type: "gauge", Number: 0.041815},
		{Name: "cpu0/idle", Type: "gauge", Number: 99.824161}, // 49
		{Name: "cpu0/iowait", Type: "gauge", Number: 0.011794},
		{Name: "cpu0/irq", Type: "gauge", Number: 0}, // 51
		{Name: "cpu0/softirq", Type: "gauge", Number: 0},
		{Name: "cpu0/steal", Type: "gauge", Number: 0}, // 53
		{Name: "cpu0/guest", Type: "gauge", Number: 0},
		{Name: "cpu1/user", Type: "gauge", Number: 0.021426},   // ok 55
		{Name: "cpu1/nice", Type: "gauge", Number: 0},          // ok
		{Name: "cpu1/system", Type: "gauge", Number: 0.024640}, // -- 57
		{Name: "cpu1/idle", Type: "gauge", Number: 99.953935},
		{Name: "cpu1/iowait", Type: "gauge", Number: 0}, // 59
		{Name: "cpu1/irq", Type: "gauge", Number: 0},
		{Name: "cpu1/softirq", Type: "gauge", Number: 0}, // 61
		{Name: "cpu1/steal", Type: "gauge", Number: 0},
		{Name: "cpu1/guest", Type: "gauge", Number: 0},               // 63
		{Name: "cpu-ext/intr", Type: "counter", Number: 39312673},    // ok
		{Name: "cpu-ext/ctxt", Type: "counter", Number: 122742465},   // 65 ok
		{Name: "cpu-ext/processes", Type: "counter", Number: 227717}, // ok
		{Name: "cpu-ext/procs_running", Type: "gauge", Number: 3},    // 67 ok
		{Name: "cpu-ext/procs_blocked", Type: "gauge", Number: 0},    // ok
	}

	if same, diff := test.IsDeeply(metrics, expect); !same {
		test.Dump(metrics)
		t.Error(diff)
	}
}
Esempio n. 24
0
func (s *ProcDiskstatsTestSuite) TestProcDiskstats001(t *C) {
	m := system.NewMonitor("", &system.Config{}, s.logger)
	content, err := ioutil.ReadFile(sample + "/proc/diskstats001.txt")
	if err != nil {
		t.Fatal(err)
	}
	got, err := m.ProcDiskstats(content)
	if err != nil {
		t.Fatal(err)
	}
	// Remember: the order of this array must match order in which each
	// stat appears in the input file:
	expect := []mm.Metric{
		{Name: "disk/sda/reads", Type: "counter", Number: 56058},
		{Name: "disk/sda/reads_merged", Type: "counter", Number: 2313},
		{Name: "disk/sda/sectors_read", Type: "counter", Number: 1270506},
		{Name: "disk/sda/read_time", Type: "counter", Number: 280760},
		{Name: "disk/sda/writes", Type: "counter", Number: 232825},
		{Name: "disk/sda/writes_merged", Type: "counter", Number: 256917},
		{Name: "disk/sda/sectors_written", Type: "counter", Number: 10804063},
		{Name: "disk/sda/write_time", Type: "counter", Number: 2097320},
		{Name: "disk/sda/io_time", Type: "counter", Number: 1163068},
		{Name: "disk/sda/io_time_weighted", Type: "counter", Number: 2378728},
		{Name: "disk/sda/iops", Type: "counter", Number: 56058 + 232825},
		// --
		{Name: "disk/sda1/reads", Type: "counter", Number: 385},
		{Name: "disk/sda1/reads_merged", Type: "counter", Number: 1138},
		{Name: "disk/sda1/sectors_read", Type: "counter", Number: 4518},
		{Name: "disk/sda1/read_time", Type: "counter", Number: 4480},
		{Name: "disk/sda1/writes", Type: "counter", Number: 1},
		{Name: "disk/sda1/writes_merged", Type: "counter", Number: 0},
		{Name: "disk/sda1/sectors_written", Type: "counter", Number: 1},
		{Name: "disk/sda1/write_time", Type: "counter", Number: 0},
		{Name: "disk/sda1/io_time", Type: "counter", Number: 2808},
		{Name: "disk/sda1/io_time_weighted", Type: "counter", Number: 4480},
		{Name: "disk/sda1/iops", Type: "counter", Number: 385 + 1},
		// --
		{Name: "disk/sda2/reads", Type: "counter", Number: 276},
		{Name: "disk/sda2/reads_merged", Type: "counter", Number: 240},
		{Name: "disk/sda2/sectors_read", Type: "counter", Number: 2104},
		{Name: "disk/sda2/read_time", Type: "counter", Number: 1692},
		{Name: "disk/sda2/writes", Type: "counter", Number: 15},
		{Name: "disk/sda2/writes_merged", Type: "counter", Number: 0},
		{Name: "disk/sda2/sectors_written", Type: "counter", Number: 30},
		{Name: "disk/sda2/write_time", Type: "counter", Number: 0},
		{Name: "disk/sda2/io_time", Type: "counter", Number: 1592},
		{Name: "disk/sda2/io_time_weighted", Type: "counter", Number: 1692},
		{Name: "disk/sda2/iops", Type: "counter", Number: 276 + 15},
		// --
		{Name: "disk/sda3/reads", Type: "counter", Number: 55223},
		{Name: "disk/sda3/reads_merged", Type: "counter", Number: 932},
		{Name: "disk/sda3/sectors_read", Type: "counter", Number: 1262468},
		{Name: "disk/sda3/read_time", Type: "counter", Number: 270204},
		{Name: "disk/sda3/writes", Type: "counter", Number: 184397},
		{Name: "disk/sda3/writes_merged", Type: "counter", Number: 256917},
		{Name: "disk/sda3/sectors_written", Type: "counter", Number: 10804032},
		{Name: "disk/sda3/write_time", Type: "counter", Number: 1436428},
		{Name: "disk/sda3/io_time", Type: "counter", Number: 512824},
		{Name: "disk/sda3/io_time_weighted", Type: "counter", Number: 1707280},
		{Name: "disk/sda3/iops", Type: "counter", Number: 55223 + 184397},
		// --
		{Name: "disk/sr0/reads", Type: "counter", Number: 0},
		{Name: "disk/sr0/reads_merged", Type: "counter", Number: 0},
		{Name: "disk/sr0/sectors_read", Type: "counter", Number: 0},
		{Name: "disk/sr0/read_time", Type: "counter", Number: 0},
		{Name: "disk/sr0/writes", Type: "counter", Number: 0},
		{Name: "disk/sr0/writes_merged", Type: "counter", Number: 0},
		{Name: "disk/sr0/sectors_written", Type: "counter", Number: 0},
		{Name: "disk/sr0/write_time", Type: "counter", Number: 0},
		{Name: "disk/sr0/io_time", Type: "counter", Number: 0},
		{Name: "disk/sr0/io_time_weighted", Type: "counter", Number: 0},
		{Name: "disk/sr0/iops", Type: "counter", Number: 0},
		// --
		{Name: "disk/dm-0/reads", Type: "counter", Number: 43661},
		{Name: "disk/dm-0/reads_merged", Type: "counter", Number: 0},
		{Name: "disk/dm-0/sectors_read", Type: "counter", Number: 1094074},
		{Name: "disk/dm-0/read_time", Type: "counter", Number: 262092},
		{Name: "disk/dm-0/writes", Type: "counter", Number: 132099},
		{Name: "disk/dm-0/writes_merged", Type: "counter", Number: 0},
		{Name: "disk/dm-0/sectors_written", Type: "counter", Number: 5731328},
		{Name: "disk/dm-0/write_time", Type: "counter", Number: 4209168},
		{Name: "disk/dm-0/io_time", Type: "counter", Number: 231792},
		{Name: "disk/dm-0/io_time_weighted", Type: "counter", Number: 4471268},
		{Name: "disk/dm-0/iops", Type: "counter", Number: 43661 + 132099},
		// --
		{Name: "disk/dm-1/reads", Type: "counter", Number: 287},
		{Name: "disk/dm-1/reads_merged", Type: "counter", Number: 0},
		{Name: "disk/dm-1/sectors_read", Type: "counter", Number: 2296},
		{Name: "disk/dm-1/read_time", Type: "counter", Number: 1692},
		{Name: "disk/dm-1/writes", Type: "counter", Number: 0},
		{Name: "disk/dm-1/writes_merged", Type: "counter", Number: 0},
		{Name: "disk/dm-1/sectors_written", Type: "counter", Number: 0},
		{Name: "disk/dm-1/write_time", Type: "counter", Number: 0},
		{Name: "disk/dm-1/io_time", Type: "counter", Number: 700},
		{Name: "disk/dm-1/io_time_weighted", Type: "counter", Number: 1692},
		{Name: "disk/dm-1/iops", Type: "counter", Number: 287 + 0},
		// --
		{Name: "disk/dm-2/reads", Type: "counter", Number: 12213},
		{Name: "disk/dm-2/reads_merged", Type: "counter", Number: 0},
		{Name: "disk/dm-2/sectors_read", Type: "counter", Number: 165618},
		{Name: "disk/dm-2/read_time", Type: "counter", Number: 42444},
		{Name: "disk/dm-2/writes", Type: "counter", Number: 310480},
		{Name: "disk/dm-2/writes_merged", Type: "counter", Number: 0},
		{Name: "disk/dm-2/sectors_written", Type: "counter", Number: 5072704},
		{Name: "disk/dm-2/write_time", Type: "counter", Number: 1084328},
		{Name: "disk/dm-2/io_time", Type: "counter", Number: 946036},
		{Name: "disk/dm-2/io_time_weighted", Type: "counter", Number: 1126764},
		{Name: "disk/dm-2/iops", Type: "counter", Number: 12213 + 310480},
	}
	if same, diff := test.IsDeeply(got, expect); !same {
		t.Logf("%+v\n", got)
		t.Error(diff)
	}
}
Esempio n. 25
0
// This test is the same as TestCollectInnoDBStats with the only difference that
// now we are simulating a MySQL disconnection.
// After a disconnection, we must still be able to collect InnoDB stats
func (s *TestSuite) TestHandleMySQLRestarts(t *C) {
	/**
	 * Disable and reset InnoDB metrics so we can test that the monitor enables and sets them.
	 */
	if _, err := s.db.Exec("set global innodb_monitor_disable = '%'"); err != nil {
		t.Fatal(err)
	}
	if _, err := s.db.Exec("set global innodb_monitor_reset_all = '%'"); err != nil {
		t.Fatal(err)
	}

	s.db.Exec("drop database if exists percona_agent_test")
	s.db.Exec("create database percona_agent_test")
	s.db.Exec("create table percona_agent_test.t (i int) engine=innodb")
	defer s.db.Exec("drop database if exists percona_agent_test")

	config := &mysql.Config{
		Config: mm.Config{
			ServiceInstance: proto.ServiceInstance{
				Service:    "mysql",
				InstanceId: 1,
			},
			Collect: 1,
			Report:  60,
		},
		Status: map[string]string{},
		InnoDB: []string{"dml_%"}, // same as above ^
	}

	m := mysql.NewMonitor(s.name, config, s.logger, mysqlConn.NewConnection(dsn), s.mrm)
	if m == nil {
		t.Fatal("Make new mysql.Monitor")
	}

	err := m.Start(s.tickChan, s.collectionChan)
	if err != nil {
		t.Fatalf("Start monitor without error, got %s", err)
	}

	if ok := test.WaitStatus(5, m, s.name+"-mysql", "Connected"); !ok {
		t.Fatal("Monitor is ready")
	}

	/**
	 * Simulate a MySQL disconnection by disabling InnoDB metrics and putting a
	 * true into the restart channel. The monitor must enable them again
	 */
	if _, err := s.db.Exec("set global innodb_monitor_disable = '%'"); err != nil {
		t.Fatal(err)
	}
	if _, err := s.db.Exec("set global innodb_monitor_reset_all = '%'"); err != nil {
		t.Fatal(err)
	}
	s.mrm.SimulateMySQLRestart()

	if ok := test.WaitStatus(5, m, s.name+"-mysql", "Connected"); !ok {
		t.Fatal("Monitor is ready")
	}

	// Do INSERT to increment dml_inserts before monitor collects.  If it enabled
	// the InnoDB metrics and collects them, we should get dml_inserts=1 this later..
	s.db.Exec("insert into percona_agent_test.t (i) values (42)")

	s.tickChan <- time.Now()
	got := test.WaitCollection(s.collectionChan, 1)
	if len(got) == 0 {
		t.Fatal("Got a collection after tick")
	}
	c := got[0]

	/**
	 * ...monitor should have collected the InnoDB metrics:
	 *
	 * mysql> SELECT NAME, SUBSYSTEM, COUNT, TYPE FROM INFORMATION_SCHEMA.INNODB_METRICS WHERE STATUS='enabled';
	 * +-------------+-----------+-------+----------------+
	 * | NAME        | SUBSYSTEM | COUNT | TYPE           |
	 * +-------------+-----------+-------+----------------+
	 * | dml_reads   | dml       |     0 | status_counter |
	 * | dml_inserts | dml       |     1 | status_counter |
	 * | dml_deletes | dml       |     0 | status_counter |
	 * | dml_updates | dml       |     0 | status_counter |
	 * +-------------+-----------+-------+----------------+
	 */
	if len(c.Metrics) != 4 {
		t.Fatal("Collect 4 InnoDB metrics; got %+v", c.Metrics)
	}
	expect := []mm.Metric{
		{Name: "mysql/innodb/dml/dml_reads", Type: "counter", Number: 0},
		{Name: "mysql/innodb/dml/dml_inserts", Type: "counter", Number: 1}, // <-- our INSERT
		{Name: "mysql/innodb/dml/dml_deletes", Type: "counter", Number: 0},
		{Name: "mysql/innodb/dml/dml_updates", Type: "counter", Number: 0},
	}
	if ok, diff := test.IsDeeply(c.Metrics, expect); !ok {
		t.Error(diff)
	}

	// Stop montior, clean up.
	m.Stop()
}
Esempio n. 26
0
func (s *RelayTestSuite) TestOfflineBufferOverflow(t *C) {
	// Same magic as in TestOfflineBuffering to force relay offline.
	l := s.logger
	s.client.SetConnectChan(s.connectChan)
	doneChan := make(chan bool, 1)
	go func() {
		s.client.RecvError <- io.EOF
		doneChan <- true
	}()
	<-doneChan
	<-s.connectChan
	// Relay is offline, trying to connect.

	// Overflow the first buffer but not the second.  We should get all
	// log entries back.
	for i := 0; i < log.BUFFER_SIZE+1; i++ {
		l.Error(fmt.Sprintf("a:%d", i))
	}
	if !test.WaitStatus(3, s.relay, "log-buf1", fmt.Sprintf("%d", log.BUFFER_SIZE)) {
		t.Error("First buffer full")
	}

	// Unblock the relay's connect attempt.
	s.connectChan <- true
	if !test.WaitStatus(1, s.relay, "ws", "Connected") {
		t.Fatal("Relay connects")
	}

	// Wait for the relay resend what it had ^ buffered.
	// +2 for "connected: false" and "connected: true".
	got := test.WaitLog(s.recvChan, log.BUFFER_SIZE+1+2)

	expect := make([]proto.LogEntry, log.BUFFER_SIZE+1+2)
	expect[0] = proto.LogEntry{Ts: test.Ts, Level: proto.LOG_WARNING, Service: "log", Msg: "connected: false"}
	for i, n := 0, 1; i < log.BUFFER_SIZE+1; i, n = i+1, n+1 {
		expect[n] = proto.LogEntry{Ts: test.Ts, Level: proto.LOG_ERROR, Service: "test", Msg: fmt.Sprintf("a:%d", i)}
	}
	expect[log.BUFFER_SIZE+1+1] = proto.LogEntry{Ts: test.Ts, Level: proto.LOG_WARNING, Service: "log", Msg: "connected: true"}
	if same, diff := test.IsDeeply(got, expect); !same {
		t.Error(diff)
	}

	if !test.WaitStatus(3, s.relay, "log-buf2", "0") {
		status := s.relay.Status()
		t.Log(status)
		t.Fatal("1st buf empty")
	}
	expect = []proto.LogEntry{}

	// Force the relay offline again, then overflow both buffers. We should get
	// the first buffer, an entry about lost entries (from the second buffer),
	// then the second buffer with the very latest.
	go func() {
		s.client.RecvError <- io.EOF
		doneChan <- true
	}()
	<-doneChan
	<-s.connectChan
	// Relay is offline, trying to connect.

	overflow := 3
	for i := 0; i < (log.BUFFER_SIZE*2)+overflow; i++ {
		l.Error(fmt.Sprintf("b:%d", i))
	}
	if !test.WaitStatus(3, s.relay, "log-buf2", fmt.Sprintf("%d", overflow+1)) {
		status := s.relay.Status()
		t.Log(status)
		t.Fatal("2nd buf full")
	}

	// Unblock the relay's connect attempt.
	s.connectChan <- true
	if !test.WaitStatus(1, s.relay, "ws", "Connected") {
		t.Fatal("Relay connects")
	}

	// +3 for "connected: false", "Lost N entries", and "connected: true".
	got = test.WaitLog(s.recvChan, log.BUFFER_SIZE+overflow+3)

	expect = make([]proto.LogEntry, log.BUFFER_SIZE+overflow+3)
	expect[0] = proto.LogEntry{Ts: test.Ts, Level: proto.LOG_WARNING, Service: "log", Msg: "connected: false"}
	n := 1
	// If buf size is 10, then we should "Lost connection", get 0-8, a "Lost 10" message for 9-18, then 19-22.
	/**
	 *  /10, first buffer
	 * 1		Lost connection
	 * 2		entry 0
	 * 3		entry 1
	 * 4		entry 2
	 * 5		entry 3
	 * 6		entry 4
	 * 7		entry 5
	 * 8		entry 6
	 * 9		entry 7
	 * 10		entry 8
	 * Entry 9-18 into second buffer, entry 19 causes the overflow and reset:
	 *  /10, second buffer
	 * 1		entry 19
	 * 2		entry 20
	 * 3		entry 21
	 * 4		entry 22
	 */
	for i := 0; i < log.BUFFER_SIZE-1; i++ {
		expect[n] = proto.LogEntry{Ts: test.Ts, Level: proto.LOG_ERROR, Service: "test", Msg: fmt.Sprintf("b:%d", i)}
		n++
	}
	expect[n] = proto.LogEntry{Ts: test.Ts, Level: proto.LOG_WARNING, Service: "log", Msg: fmt.Sprintf("Lost %d log entries", log.BUFFER_SIZE)}
	n++
	for i, j := log.BUFFER_SIZE, log.BUFFER_SIZE*2-1; i < log.BUFFER_SIZE+overflow+1; i, j = i+1, j+1 {
		expect[n] = proto.LogEntry{Ts: test.Ts, Level: proto.LOG_ERROR, Service: "test", Msg: fmt.Sprintf("b:%d", j)}
		n++
	}
	expect[log.BUFFER_SIZE+overflow+2] = proto.LogEntry{Ts: test.Ts, Level: proto.LOG_WARNING, Service: "log", Msg: "connected: true"}
	if same, diff := test.IsDeeply(got, expect); !same {
		// @todo: this test may still be unstable
		n := len(got)
		if len(expect) > n {
			n = len(expect)
		}
		for i := 0; i < n; i++ {
			var gotL proto.LogEntry
			var expectL proto.LogEntry
			if i < len(got) {
				gotL = got[i]
			}
			if i < len(expect) {
				expectL = expect[i]
			}
			t.Logf("%+v %+v\n", gotL, expectL)
		}
		t.Error(diff)
	}
}
Esempio n. 27
0
func (s *ManagerTestSuite) TestGetConfig(t *C) {
	m := mm.NewManager(s.logger, s.factory, s.clock, s.spool, s.im)
	t.Assert(m, NotNil)
	err := m.Start()
	t.Assert(err, IsNil)

	/**
	 * Start a mock MySQL monitor.
	 */
	mysqlMonitorConfig := &mysql.Config{
		Config: mm.Config{
			ServiceInstance: proto.ServiceInstance{
				Service:    "mysql",
				InstanceId: 1,
			},
			Collect: 1,
			Report:  60,
		},
		Status: map[string]string{
			"threads_connected": "gauge",
			"threads_running":   "gauge",
		},
	}
	mysqlData, err := json.Marshal(mysqlMonitorConfig)
	t.Assert(err, IsNil)
	cmd := &proto.Cmd{
		User:    "******",
		Service: "mm",
		Cmd:     "StartService",
		Data:    mysqlData,
	}
	s.mysqlMonitor.SetConfig(mysqlMonitorConfig)
	reply := m.Handle(cmd)
	t.Assert(reply, NotNil)
	t.Assert(reply.Error, Equals, "")

	/**
	 * Start a mock system monitor.
	 */
	systemMonitorConfig := &system.Config{
		Config: mm.Config{
			ServiceInstance: proto.ServiceInstance{
				Service:    "server",
				InstanceId: 1,
			},
			Collect: 10,
			Report:  60,
		},
	}
	systemData, err := json.Marshal(systemMonitorConfig)
	t.Assert(err, IsNil)
	cmd = &proto.Cmd{
		User:    "******",
		Service: "mm",
		Cmd:     "StartService",
		Data:    systemData,
	}
	s.systemMonitor.SetConfig(systemMonitorConfig)
	reply = m.Handle(cmd)
	t.Assert(reply, NotNil)
	t.Assert(reply.Error, Equals, "")

	/**
	 * GetConfig from mm which should return all monitors' configs.
	 */
	cmd = &proto.Cmd{
		Cmd:     "GetConfig",
		Service: "mm",
	}
	reply = m.Handle(cmd)
	t.Assert(reply, NotNil)
	t.Assert(reply.Error, Equals, "")
	t.Assert(reply.Data, NotNil)
	gotConfig := []proto.AgentConfig{}
	if err := json.Unmarshal(reply.Data, &gotConfig); err != nil {
		t.Fatal(err)
	}
	expectConfig := []proto.AgentConfig{
		{
			InternalService: "mm",
			ExternalService: proto.ServiceInstance{
				Service:    "mysql",
				InstanceId: 1,
			},
			Config:  string(mysqlData),
			Running: true,
		},
		{
			InternalService: "mm",
			ExternalService: proto.ServiceInstance{
				Service:    "server",
				InstanceId: 1,
			},
			Config:  string(systemData),
			Running: true,
		},
	}
	if same, diff := test.IsDeeply(gotConfig, expectConfig); !same {
		test.Dump(gotConfig)
		t.Error(diff)
	}
}
Esempio n. 28
0
func (s *ManagerTestSuite) TestStartStopMonitor(t *C) {
	m := sysconfig.NewManager(s.logger, s.factory, s.clock, s.spool, s.im)
	t.Assert(m, NotNil)

	err := m.Start()
	t.Assert(err, IsNil)

	// Starting a monitor is like starting the manager: it requires
	// a "StartService" cmd and the monitor's config.  This is the
	// config in configDir/db1-mysql-monitor.conf.
	sysconfigConfig := &mysql.Config{
		Config: sysconfig.Config{
			ServiceInstance: proto.ServiceInstance{
				Service:    "mysql",
				InstanceId: 1,
			},
			Report: 3600,
		},
	}
	sysconfigConfigData, err := json.Marshal(sysconfigConfig)
	t.Assert(err, IsNil)

	cmd := &proto.Cmd{
		User:    "******",
		Service: "sysconfig",
		Cmd:     "StartService",
		Data:    sysconfigConfigData,
	}

	// If this were a real monitor, it would decode and set its own config.
	// The mock monitor doesn't have any real config type, so we set it manually.
	s.mockMonitor.SetConfig(sysconfigConfig)

	// The agent calls sysconfig.Handle() with the cmd (for logging and status) and the config data.
	reply := m.Handle(cmd)
	t.Assert(reply, NotNil)
	t.Check(reply.Error, Equals, "")

	// The monitor should be running.  The mock monitor returns "Running" if
	// Start() has been called; else it returns "Stopped".
	status := s.mockMonitor.Status()
	if status["monitor"] != "Running" {
		t.Error("Monitor running")
	}

	// There should be a 60s report ticker for the aggregator and a 1s collect ticker
	// for the monitor.
	if ok, diff := test.IsDeeply(s.clock.Added, []uint{3600}); !ok {
		t.Errorf("Make 3600s ticker for collect interval\n%s", diff)
	}

	// After starting a monitor, sysconfig should write its config to the dir
	// it learned when sysconfig.LoadConfig() was called.  Next time agent starts,
	// it will have sysconfig start the monitor with this config.
	data, err := ioutil.ReadFile(s.configDir + "/sysconfig-mysql-1.conf")
	t.Check(err, IsNil)
	gotConfig := &mysql.Config{}
	err = json.Unmarshal(data, gotConfig)
	t.Check(err, IsNil)
	if same, diff := test.IsDeeply(gotConfig, sysconfigConfig); !same {
		t.Logf("%+v", gotConfig)
		t.Error(diff)
	}

	/**
	 * Stop the monitor.
	 */

	cmd = &proto.Cmd{
		User:    "******",
		Service: "sysconfig",
		Cmd:     "StopService",
		Data:    sysconfigConfigData,
	}

	// Handles StopService without error.
	reply = m.Handle(cmd)
	t.Assert(reply, NotNil)
	t.Check(reply.Error, Equals, "")

	status = s.mockMonitor.Status()
	if status["monitor"] != "Stopped" {
		t.Error("Monitor stopped")
	}

	// After stopping the monitor, the manager should remove its tickChan.
	if len(s.clock.Removed) != 1 {
		t.Error("Remove's monitor's tickChan from clock")
	}

	// After stopping a monitor, sysconfig should remove its config file so agent
	// doesn't start it on restart.
	file := s.configDir + "/sysconfig-mysql-1.conf"
	if pct.FileExists(file) {
		t.Error("Stopping monitor removes its config; ", file, " exists")
	}

	/**
	 * While we're all setup and working, let's sneak in an unknown cmd test.
	 */

	cmd = &proto.Cmd{
		User:    "******",
		Service: "sysconfig",
		Cmd:     "Pontificate",
		Data:    sysconfigConfigData,
	}

	// Unknown cmd causes error.
	reply = m.Handle(cmd)
	t.Assert(reply, NotNil)
	if reply.Error == "" {
		t.Fatalf("Unknown Cmd to Handle() causes error")
	}

	/**
	 * Clean up
	 */
	m.Stop()
}
Esempio n. 29
0
// COUNTER
func (s *AggregatorTestSuite) TestC003(t *C) {
	interval := int64(5)
	a := mm.NewAggregator(s.logger, interval, s.collectionChan, s.spool)
	go a.Start()
	defer a.Stop()

	// Ts in c003 is 2009-11-10 23:00:00.
	t1, _ := time.Parse("2006-01-02 15:04:05", "2009-11-10 23:00:00")

	for i := 1; i <= 5; i++ {
		file := fmt.Sprintf("%s/c003-%d.json", sample, i)
		if err := sendCollection(file, s.collectionChan); err != nil {
			t.Fatal(file, err)
		}
	}
	// Next interval causes 1st to be reported.
	file := fmt.Sprintf("%s/c003-n.json", sample)
	if err := sendCollection(file, s.collectionChan); err != nil {
		t.Fatal(file, err)
	}

	/**
	 * Pretend we're monitoring Bytes_sents every second:
	 * first val = 100
	 *           prev this diff val/s
	 * next val  100   200  100   100
	 * next val  200   400  200   200
	 * next val  400   800  400   400
	 * next val  800  1600  800   800
	 *
	 * So min bytes/s = 100, max = 800, avg = 375.  These are
	 * the values in c003r.json.
	 */
	got := test.WaitMmReport(s.dataChan)
	t.Assert(got, NotNil)
	t.Check(got.Ts, Equals, t1)
	t.Check(uint64(got.Duration), Equals, uint64(interval))
	expect := &mm.Report{}
	if err := test.LoadMmReport(sample+"/c003r.json", expect); err != nil {
		t.Fatal("c003r.json ", err)
	}
	if ok, diff := test.IsDeeply(got.Stats, expect.Stats); !ok {
		t.Fatal(diff)
	}

	// Get the collected stats
	// As got.Stats[0].Stats is a map, we run this empty 'for' loop just to get
	// the stats for the first key in the map, into the stats variable.
	var stats *mm.Stats
	for _, stats = range got.Stats[0].Stats {
	}
	// First time, stats.Cnt must be equal to the number of seconds in the interval
	// minus 1 because the first value is used to bootstrap the aggregator
	t.Check(int64(stats.Cnt), Equals, interval-1)

	// Let's complete the second interval
	for i := 6; i <= 9; i++ {
		file := fmt.Sprintf("%s/c003-%d.json", sample, i)
		if err := sendCollection(file, s.collectionChan); err != nil {
			t.Fatal(file, err)
		}
	}
	// Sample #10 will be in the 3rd interval, so the 2nd will be reported
	file = fmt.Sprintf("%s/c003-%d.json", sample, 10)
	if err := sendCollection(file, s.collectionChan); err != nil {
		t.Fatal(file, err)
	}

	got = test.WaitMmReport(s.dataChan)
	t.Assert(got, NotNil)
	// Get the collected stats
	for _, stats = range got.Stats[0].Stats {
	}
	// stats.Cnt must be equal to the number of seconds in the interval
	t.Check(int64(stats.Cnt), Equals, interval)
	if err := test.LoadMmReport(sample+"/c003r2.json", expect); err != nil {
		t.Fatal("c003r2.json ", err)
	}
	if ok, diff := test.IsDeeply(got.Stats, expect.Stats); !ok {
		t.Fatal(diff)
	}
}
Esempio n. 30
0
func (s *ManagerTestSuite) TestLogService(t *C) {
	config := &log.Config{
		File:  s.logFile,
		Level: "info",
	}
	pct.Basedir.WriteConfig("log", config)

	m := log.NewManager(s.client, s.logChan)
	err := m.Start()
	t.Assert(err, IsNil)

	relay := m.Relay()
	t.Assert(relay, NotNil)

	logger := pct.NewLogger(relay.LogChan(), "log-svc-test")
	logger.Info("i'm a log entry")

	// Log entry should be sent to API.
	got := test.WaitLog(s.recvChan, 3)
	if len(got) == 0 {
		t.Fatal("No log entries")
	}
	var gotLog proto.LogEntry
	for _, l := range got {
		if l.Service == "log-svc-test" {
			gotLog = l
			break
		}
	}
	t.Assert(gotLog, NotNil)
	expectLog := proto.LogEntry{Ts: test.Ts, Level: proto.LOG_INFO, Service: "log-svc-test", Msg: "i'm a log entry"}
	if same, diff := test.IsDeeply(gotLog, expectLog); !same {
		t.Logf("%+v", got)
		t.Error(diff)
	}

	// Since there's a log file, entry should be written to it too.
	size, _ := test.FileSize(s.logFile)
	test.WaitFileSize(s.logFile, size)
	var content []byte
	content, err = ioutil.ReadFile(s.logFile)
	t.Assert(err, IsNil)

	if !strings.Contains(string(content), "i'm a log entry") {
		t.Error("Writes log entry to log file, got\n", string(content))
	}

	// Can't stop log service, but Stop() should work and not return error.
	err = m.Stop()
	t.Assert(err, IsNil)

	/**
	 * Change log level and file
	 */

	newLogFile := s.logFile + "-2"
	defer os.Remove(newLogFile)

	config = &log.Config{
		File:  newLogFile,
		Level: "warning",
	}
	configData, err := json.Marshal(config)
	t.Assert(err, IsNil)

	cmd := &proto.Cmd{
		User:    "******",
		Service: "log",
		Cmd:     "SetConfig",
		Data:    configData,
	}

	gotReply := m.Handle(cmd)
	expectReply := cmd.Reply(config)
	if same, diff := test.IsDeeply(gotReply, expectReply); !same {
		t.Logf("%+v", gotReply)
		t.Error(diff)
	}

	// Log entry should NOT be sent to API if log level was really changed.
	logger.Info("i'm lost")
	got = test.WaitLog(s.recvChan, 3)
	if len(got) != 0 {
		t.Logf("%+v", got)
		t.Error("Log level changed dynamically")
	}

	logger.Warn("blah")
	got = test.WaitLog(s.recvChan, 3)
	gotLog = proto.LogEntry{}
	for _, l := range got {
		if l.Service == "log-svc-test" {
			gotLog = l
			break
		}
	}
	expectLog = proto.LogEntry{Ts: test.Ts, Level: proto.LOG_WARNING, Service: "log-svc-test", Msg: "blah"}
	if same, diff := test.IsDeeply(gotLog, expectLog); !same {
		t.Logf("%+v", got)
		t.Error(diff)
	}

	// Entry should be written to new log file if it was really changed.
	size, _ = test.FileSize(newLogFile)
	test.WaitFileSize(newLogFile, size)
	content, err = ioutil.ReadFile(newLogFile)
	t.Assert(err, IsNil)
	if !strings.Contains(string(content), "blah") {
		t.Error("Log file changed dynamically, got\n", string(content))
	}

	// Verify new log config on disk.
	data, err := ioutil.ReadFile(pct.Basedir.ConfigFile("log"))
	t.Assert(err, IsNil)
	gotConfig := &log.Config{}
	if err := json.Unmarshal(data, gotConfig); err != nil {
		t.Fatal(err)
	}
	if same, diff := test.IsDeeply(gotConfig, config); !same {
		test.Dump(gotConfig)
		t.Error(diff)
	}

	/**
	 * GetConfig
	 */

	cmd = &proto.Cmd{
		User:    "******",
		Service: "log",
		Cmd:     "GetConfig",
	}
	reply := m.Handle(cmd)
	t.Assert(reply.Error, Equals, "")
	t.Assert(reply.Data, NotNil)
	gotConfigRes := []proto.AgentConfig{}
	if err := json.Unmarshal(reply.Data, &gotConfigRes); err != nil {
		t.Fatal(err)
	}
	expectConfigRes := []proto.AgentConfig{
		{
			InternalService: "log",
			Config:          string(configData),
			Running:         true,
		},
	}
	if same, diff := test.IsDeeply(gotConfigRes, expectConfigRes); !same {
		test.Dump(gotConfigRes)
		t.Error(diff)
	}

	/**
	 * Status (internal status of log and relay)
	 */

	status := m.Status()
	t.Check(status["ws"], Equals, "Connected")
	t.Check(status["log-file"], Equals, newLogFile)
	t.Check(status["log-level"], Equals, "warning")
}