Beispiel #1
0
func (s *AggregatorTestSuite) TestBadMetric(t *C) {
	/**
	 * Bad metrics should not exist and certainly not aggregated because they
	 * can go undetected for a long time because they'll result in zero values
	 * which are valid in normal cases.  The metric is bad in the input because
	 * its type is "guage" instead of "gauge", and it's the only metric so the
	 * result should be zero metrics.
	 */
	a := mm.NewAggregator(s.logger, 60, s.collectionChan, s.spool)
	go a.Start()
	defer a.Stop()

	file := fmt.Sprintf("%s/bad_metric.json", sample)
	if err := sendCollection(file, s.collectionChan); err != nil {
		t.Fatal(file, err)
	}
	file = fmt.Sprintf("%s/bad_metric-n.json", sample)
	if err := sendCollection(file, s.collectionChan); err != nil {
		t.Fatal(file, err)
	}

	got := test.WaitMmReport(s.dataChan)
	t.Check(len(got.Stats), Equals, 1)          // instance
	t.Check(len(got.Stats[0].Stats), Equals, 0) // ^ its metrics
}
Beispiel #2
0
// All zero values
func (s *AggregatorTestSuite) TestC000(t *C) {
	interval := int64(60)
	a := mm.NewAggregator(s.logger, interval, s.collectionChan, s.spool)
	go a.Start()
	defer a.Stop()

	// Ts in c000 is 2009-11-10 23:00:00.
	t1, _ := time.Parse("2006-01-02 15:04:05", "2009-11-10 23:00:00")

	file := sample + "/c000.json"
	if err := sendCollection(file, s.collectionChan); err != nil {
		t.Fatal(file, err)
	}
	file = sample + "/c000-n.json"
	if err := sendCollection(file, s.collectionChan); err != nil {
		t.Fatal(file, err)
	}

	got := test.WaitMmReport(s.dataChan)
	t.Assert(got, NotNil)
	t.Check(got.Ts, Equals, t1)
	t.Check(uint64(got.Duration), Equals, uint64(interval))

	expect := &mm.Report{}
	if err := test.LoadMmReport(sample+"/c000r.json", expect); err != nil {
		t.Fatal("c000r.json ", err)
	}
	if ok, diff := test.IsDeeply(got.Stats, expect.Stats); !ok {
		t.Fatal(diff)
	}
}
Beispiel #3
0
func (s *AggregatorTestSuite) TestC002(t *C) {
	interval := int64(300)
	a := mm.NewAggregator(s.logger, interval, s.collectionChan, s.spool)
	go a.Start()
	defer a.Stop()

	// Ts in c002-1 is 2009-11-10 23:00:00.
	t1, _ := time.Parse("2006-01-02 15:04:05", "2009-11-10 23:00:00")

	for i := 1; i <= 5; i++ {
		file := fmt.Sprintf("%s/c002-%d.json", sample, i)
		if err := sendCollection(file, s.collectionChan); err != nil {
			t.Fatal(file, err)
		}
	}
	// Next interval causes 1st to be reported.
	file := fmt.Sprintf("%s/c002-n.json", sample)
	if err := sendCollection(file, s.collectionChan); err != nil {
		t.Fatal(file, err)
	}

	got := test.WaitMmReport(s.dataChan)
	t.Assert(got, NotNil)
	t.Check(got.Ts, Equals, t1)
	t.Check(uint64(got.Duration), Equals, uint64(interval))

	expect := &mm.Report{}
	if err := test.LoadMmReport(sample+"/c002r.json", expect); err != nil {
		t.Fatal("c002r.json ", err)
	}
	if ok, diff := test.IsDeeply(got.Stats, expect.Stats); !ok {
		t.Fatal(diff)
	}
}
Beispiel #4
0
func (s *AggregatorTestSuite) TestMissingAllMetrics(t *C) {
	/*
		This test verifies that missing metrics are not reported as their
		previous values: https://jira.percona.com/browse/PCT-911
		See also TestMissingSomeMetrics.

		The first interval starts at 2009-11-10 23:00:00 and we get collections
		for seconds 00 and 01, but then we fake that 02 and 03 are missed,
		and the next collection is 04.  The duration for 04 should be 3s (4-1).
		c005-n is the next interval which causes the report for the 1st interval
		and its 3 collections.
	*/

	// First we do same as TestC001 which has 3 metrics:
	// host1/a, host1/b, host1/c.  Then we collect only 1
	// new metrics: host1/foo.  Metrics a-c shouldn't be
	// reported.

	interval := int64(300)
	a := mm.NewAggregator(s.logger, interval, s.collectionChan, s.spool)
	go a.Start()
	defer a.Stop()

	for _, n := range []string{"0", "1", "4", "n"} {
		err := sendCollection(sample+"/c005-"+n+".json", s.collectionChan)
		t.Assert(err, IsNil)
	}
	got := test.WaitMmReport(s.dataChan)
	t.Assert(got, NotNil)

	/*
		Values are:
			@	Val	Inc	Dur	Rate/s
			00	10
			01	100	90	1	90
			04	400	300	3	100
		So rate min=90, max=100, avg=95  for COUNTER (bar)
		    val min=10, max=400, avg=170 for GAUGE (foo)
	*/
	t.Check(got.Stats[0].Stats["bar"].Min, Equals, float64(90))
	t.Check(got.Stats[0].Stats["bar"].Max, Equals, float64(100))
	t.Check(got.Stats[0].Stats["bar"].Avg, Equals, float64(95))

	t.Check(got.Stats[0].Stats["foo"].Min, Equals, float64(10))
	t.Check(got.Stats[0].Stats["foo"].Max, Equals, float64(400))
	t.Check(got.Stats[0].Stats["foo"].Avg, Equals, float64(170))
}
Beispiel #5
0
// COUNTER
func (s *AggregatorTestSuite) TestC003(t *C) {
	interval := int64(5)
	a := mm.NewAggregator(s.logger, interval, s.collectionChan, s.spool)
	go a.Start()
	defer a.Stop()

	// Ts in c003 is 2009-11-10 23:00:00.
	t1, _ := time.Parse("2006-01-02 15:04:05", "2009-11-10 23:00:00")

	for i := 1; i <= 5; i++ {
		file := fmt.Sprintf("%s/c003-%d.json", sample, i)
		if err := sendCollection(file, s.collectionChan); err != nil {
			t.Fatal(file, err)
		}
	}
	// Next interval causes 1st to be reported.
	file := fmt.Sprintf("%s/c003-n.json", sample)
	if err := sendCollection(file, s.collectionChan); err != nil {
		t.Fatal(file, err)
	}

	/**
	 * Pretend we're monitoring Bytes_sents every second:
	 * first val = 100
	 *           prev this diff val/s
	 * next val  100   200  100   100
	 * next val  200   400  200   200
	 * next val  400   800  400   400
	 * next val  800  1600  800   800
	 *
	 * So min bytes/s = 100, max = 800, avg = 375.  These are
	 * the values in c003r.json.
	 */
	got := test.WaitMmReport(s.dataChan)
	t.Assert(got, NotNil)
	t.Check(got.Ts, Equals, t1)
	t.Check(uint64(got.Duration), Equals, uint64(interval))
	expect := &mm.Report{}
	if err := test.LoadMmReport(sample+"/c003r.json", expect); err != nil {
		t.Fatal("c003r.json ", err)
	}
	if ok, diff := test.IsDeeply(got.Stats, expect.Stats); !ok {
		t.Fatal(diff)
	}
}
Beispiel #6
0
func (s *AggregatorTestSuite) TestC003Lost(t *C) {
	interval := int64(5)
	a := mm.NewAggregator(s.logger, interval, s.collectionChan, s.spool)
	go a.Start()
	defer a.Stop()

	// Ts in c003 is 2009-11-10 23:00:00.
	t1, _ := time.Parse("2006-01-02 15:04:05", "2009-11-10 23:00:00")

	// The full sequence is files 1-5, but we send only 1 and 5,
	// simulating monitor failure during 2-4.  More below...
	file := fmt.Sprintf("%s/c003-1.json", sample)
	if err := sendCollection(file, s.collectionChan); err != nil {
		t.Fatal(file, err)
	}
	file = fmt.Sprintf("%s/c003-5.json", sample)
	if err := sendCollection(file, s.collectionChan); err != nil {
		t.Fatal(file, err)
	}
	// Next interval causes 1st to be reported.
	file = fmt.Sprintf("%s/c003-n.json", sample)
	if err := sendCollection(file, s.collectionChan); err != nil {
		t.Fatal(file, err)
	}

	/**
	 * Values we did get are 100 and 1600 and ts 00 to 04.  So that looks like
	 * 1500 bytes / 4s = 375.  And since there was only 1 interval, we expect
	 * 375 for all stat values.
	 */
	got := test.WaitMmReport(s.dataChan)
	t.Assert(got, NotNil)
	t.Check(got.Ts, Equals, t1)
	t.Check(uint64(got.Duration), Equals, uint64(interval))
	expect := &mm.Report{}
	if err := test.LoadMmReport(sample+"/c003rlost.json", expect); err != nil {
		t.Fatal("c003r.json ", err)
	}
	if ok, diff := test.IsDeeply(got.Stats, expect.Stats); !ok {
		test.Dump(got.Stats)
		test.Dump(expect.Stats)
		t.Fatal(diff)
	}
}
Beispiel #7
0
func (s *AggregatorTestSuite) TestC001(t *C) {
	interval := int64(300)
	a := mm.NewAggregator(s.logger, interval, s.collectionChan, s.spool)
	go a.Start()
	defer a.Stop()

	// Load collection from file and send to aggregator.
	if err := sendCollection(sample+"/c001-1.json", s.collectionChan); err != nil {
		t.Fatal(err)
	}

	// Ts in c001 is 2009-11-10 23:00:00.
	t1, _ := time.Parse("2006-01-02 15:04:05", "2009-11-10 23:00:00")

	got := test.WaitMmReport(s.dataChan)
	if got != nil {
		t.Error("No report before 2nd interval, got: %+v", got)
	}

	// Ts in c001 is 2009-11-10 23:05:01, 1s into the next interval.
	if err := sendCollection(sample+"/c001-2.json", s.collectionChan); err != nil {
		t.Fatal(err)
	}

	got = test.WaitMmReport(s.dataChan)
	t.Assert(got, NotNil)
	t.Check(got.Ts, Equals, t1)
	t.Check(uint64(got.Duration), Equals, uint64(interval))

	expect := &mm.Report{}
	if err := test.LoadMmReport(sample+"/c001r.json", expect); err != nil {
		t.Fatal(err)
	}
	t.Check(got.Ts, Equals, t1)
	if ok, diff := test.IsDeeply(got.Stats, expect.Stats); !ok {
		test.Dump(got.Stats)
		test.Dump(expect.Stats)
		t.Fatal(diff)
	}

}
Beispiel #8
0
func (s *AggregatorTestSuite) TestMissingSomeMetrics(t *C) {
	/*
		This test verifies that missing metrics are not reported.  E.g. the
		sample data has collections with metrics a, b, c, foo, and various
		combinations of these.  A metric is reported only if it has values
		(i.e. a value was collected and sent to the aggregator).

		NOTE: This test is more strict than our actual assumption that
		      collections are all-or-nothing and therefore cannot be partial
			  like this.  In other words, either the collection for SHOW STATUS
			  has all metrics or it has no metrics; we assume that it cannot
			  have, for example, only 100 of the usual 500 (or however many).
			  See TestMissingAllMetrics for a more realistic example.
	*/

	// First we do same as TestC001 which has 3 metrics:
	// host1/a, host1/b, host1/c.  Then we collect only 1
	// new metrics: host1/foo.  Metrics a-c shouldn't be
	// reported.

	interval := int64(300)
	a := mm.NewAggregator(s.logger, interval, s.collectionChan, s.spool)
	go a.Start()
	defer a.Stop()

	/*
		c001-1.json:  "Ts":1257894000,	2009-11-10 23:00:00	report 1
		c001-2.json:  "Ts":1257894301,	2009-11-10 23:05:01	report 2
		c004-1.json:  "Ts":1257894601,	2009-11-10 23:10:01	report 3
		c004-2.json:  "Ts":1257894901,  2009-11-10 23:15:01 report 4
		c004-3.json:  "Ts":1257895201,  2009-11-10 23:20:01
	*/

	// Send c001-1 and -2.  -2 ts is >5m after -1 so it causes data
	// from -1 to be sent as 1st report.
	err := sendCollection(sample+"/c001-1.json", s.collectionChan)
	t.Assert(err, IsNil)
	err = sendCollection(sample+"/c001-2.json", s.collectionChan)
	t.Assert(err, IsNil)
	report1 := test.WaitMmReport(s.dataChan)
	t.Assert(report1, NotNil)
	stats := []string{}
	for stat, _ := range report1.Stats[0].Stats {
		stats = append(stats, stat)
	}
	sort.Strings(stats)
	t.Check(stats, DeepEquals, []string{"host1/a", "host1/b", "host1/c"})

	// The c004-1 ts is >5m after c001-2 so it causes data from c001-2
	// to be sent as 2nd report.
	err = sendCollection(sample+"/c004-1.json", s.collectionChan)
	t.Assert(err, IsNil)
	report2 := test.WaitMmReport(s.dataChan)
	t.Assert(report2, NotNil)
	stats = []string{}
	for stat, _ := range report2.Stats[0].Stats {
		stats = append(stats, stat)
	}
	sort.Strings(stats)
	t.Check(stats, DeepEquals, []string{"host1/a", "host1/b", "host1/c"})

	// The c004-2 ts is >5m after c004-1 so it causes data from c004-1
	// to be sent as 3rd report.
	err = sendCollection(sample+"/c004-2.json", s.collectionChan)
	t.Assert(err, IsNil)
	report3 := test.WaitMmReport(s.dataChan)
	t.Assert(report3, NotNil)
	stats = []string{}
	for stat, _ := range report3.Stats[0].Stats {
		stats = append(stats, stat)
	}
	sort.Strings(stats)
	t.Check(stats, DeepEquals, []string{"host1/foo"})

	err = sendCollection(sample+"/c004-3.json", s.collectionChan)
	t.Assert(err, IsNil)
	report4 := test.WaitMmReport(s.dataChan)
	t.Assert(report4, NotNil)
	stats = []string{}
	for stat, _ := range report4.Stats[0].Stats {
		stats = append(stats, stat)
	}
	sort.Strings(stats)
	t.Check(stats, DeepEquals, []string{"host1/a", "host1/b", "host1/foo"})
}
Beispiel #9
0
// COUNTER
func (s *AggregatorTestSuite) TestC003(t *C) {
	interval := int64(5)
	a := mm.NewAggregator(s.logger, interval, s.collectionChan, s.spool)
	go a.Start()
	defer a.Stop()

	// Ts in c003 is 2009-11-10 23:00:00.
	t1, _ := time.Parse("2006-01-02 15:04:05", "2009-11-10 23:00:00")

	for i := 1; i <= 5; i++ {
		file := fmt.Sprintf("%s/c003-%d.json", sample, i)
		if err := sendCollection(file, s.collectionChan); err != nil {
			t.Fatal(file, err)
		}
	}
	// Next interval causes 1st to be reported.
	file := fmt.Sprintf("%s/c003-n.json", sample)
	if err := sendCollection(file, s.collectionChan); err != nil {
		t.Fatal(file, err)
	}

	/**
	 * Pretend we're monitoring Bytes_sents every second:
	 * first val = 100
	 *           prev this diff val/s
	 * next val  100   200  100   100
	 * next val  200   400  200   200
	 * next val  400   800  400   400
	 * next val  800  1600  800   800
	 *
	 * So min bytes/s = 100, max = 800, avg = 375.  These are
	 * the values in c003r.json.
	 */
	got := test.WaitMmReport(s.dataChan)
	t.Assert(got, NotNil)
	t.Check(got.Ts, Equals, t1)
	t.Check(uint64(got.Duration), Equals, uint64(interval))
	expect := &mm.Report{}
	if err := test.LoadMmReport(sample+"/c003r.json", expect); err != nil {
		t.Fatal("c003r.json ", err)
	}
	if ok, diff := test.IsDeeply(got.Stats, expect.Stats); !ok {
		t.Fatal(diff)
	}

	// Get the collected stats
	// As got.Stats[0].Stats is a map, we run this empty 'for' loop just to get
	// the stats for the first key in the map, into the stats variable.
	var stats *mm.Stats
	for _, stats = range got.Stats[0].Stats {
	}
	// First time, stats.Cnt must be equal to the number of seconds in the interval
	// minus 1 because the first value is used to bootstrap the aggregator
	t.Check(int64(stats.Cnt), Equals, interval-1)

	// Let's complete the second interval
	for i := 6; i <= 9; i++ {
		file := fmt.Sprintf("%s/c003-%d.json", sample, i)
		if err := sendCollection(file, s.collectionChan); err != nil {
			t.Fatal(file, err)
		}
	}
	// Sample #10 will be in the 3rd interval, so the 2nd will be reported
	file = fmt.Sprintf("%s/c003-%d.json", sample, 10)
	if err := sendCollection(file, s.collectionChan); err != nil {
		t.Fatal(file, err)
	}

	got = test.WaitMmReport(s.dataChan)
	t.Assert(got, NotNil)
	// Get the collected stats
	for _, stats = range got.Stats[0].Stats {
	}
	// stats.Cnt must be equal to the number of seconds in the interval
	t.Check(int64(stats.Cnt), Equals, interval)
	if err := test.LoadMmReport(sample+"/c003r2.json", expect); err != nil {
		t.Fatal("c003r2.json ", err)
	}
	if ok, diff := test.IsDeeply(got.Stats, expect.Stats); !ok {
		t.Fatal(diff)
	}
}