Esempio n. 1
0
func (s *WorkerTestSuite) TestEmptyDigest(t *C) {
	// This is the simplest input possible: 1 query in iter 1 and 2. The result
	// is just the increase in its values.

	rows, err := s.loadData("004")
	t.Assert(err, IsNil)
	getRows := makeGetRowsFunc(rows)
	getText := makeGetTextFunc("select 1")
	w := perfschema.NewWorker(s.logger, s.nullmysql, getRows, getText)

	// First run doesn't produce a result because 2 snapshots are required.
	i := &qan.Interval{
		Number:    1,
		StartTime: time.Now().UTC(),
	}
	err = w.Setup(i)
	t.Assert(err, IsNil)

	res, err := w.Run()
	t.Assert(err, IsNil)
	t.Check(res, IsNil)

	err = w.Cleanup()
	t.Assert(err, IsNil)

}
Esempio n. 2
0
func (s *WorkerTestSuite) Test001(t *C) {
	// This is the simplest input possible: 1 query in iter 1 and 2. The result
	// is just the increase in its values.

	rows, err := s.loadData("001")
	t.Assert(err, IsNil)
	getRows := makeGetRowsFunc(rows)
	getText := makeGetTextFunc("select 1")
	w := perfschema.NewWorker(s.logger, s.nullmysql, getRows, getText)

	// First run doesn't produce a result because 2 snapshots are required.
	i := &qan.Interval{
		Number:    1,
		StartTime: time.Now().UTC(),
	}
	err = w.Setup(i)
	t.Assert(err, IsNil)

	res, err := w.Run()
	t.Assert(err, IsNil)
	t.Check(res, IsNil)

	err = w.Cleanup()
	t.Assert(err, IsNil)

	// The second run produces a result: the diff of 2nd - 1st.
	i = &qan.Interval{
		Number:    2,
		StartTime: time.Now().UTC(),
	}
	err = w.Setup(i)
	t.Assert(err, IsNil)

	res, err = w.Run()
	t.Assert(err, IsNil)
	normalizeResult(res)
	expect, err := s.loadResult("001/res01.json")
	t.Assert(err, IsNil)
	if same, diff := IsDeeply(res, expect); !same {
		Dump(res)
		t.Error(diff)
	}

	err = w.Cleanup()
	t.Assert(err, IsNil)

	// Quick side test that Status() works and reports last stats.
	status := w.Status()
	t.Logf("%+v", status)
	t.Check(strings.HasPrefix(status["qan-worker-last"], "rows: 1"), Equals, true)
}
Esempio n. 3
0
func (s *WorkerTestSuite) Test002(t *C) {
	// This is the 2nd most simplest input after 001: two queries, same digest,
	// but different schemas. The reuslt is the aggregate of their value diffs
	// from iter 1 to 2.

	rows, err := s.loadData("002")
	t.Assert(err, IsNil)
	getRows := makeGetRowsFunc(rows)
	getText := makeGetTextFunc("select 1")
	w := perfschema.NewWorker(s.logger, s.nullmysql, getRows, getText)

	// First run doesn't produce a result because 2 snapshots are required.
	i := &qan.Interval{
		Number:    1,
		StartTime: time.Now().UTC(),
	}
	err = w.Setup(i)
	t.Assert(err, IsNil)

	res, err := w.Run()
	t.Assert(err, IsNil)
	t.Check(res, IsNil)

	err = w.Cleanup()
	t.Assert(err, IsNil)

	// The second run produces a result: the diff of 2nd - 1st.
	i = &qan.Interval{
		Number:    2,
		StartTime: time.Now().UTC(),
	}
	err = w.Setup(i)
	t.Assert(err, IsNil)

	res, err = w.Run()
	t.Assert(err, IsNil)
	normalizeResult(res)
	expect, err := s.loadResult("002/res01.json")
	t.Assert(err, IsNil)
	if same, diff := IsDeeply(res, expect); !same {
		Dump(res)
		t.Error(diff)
	}

	err = w.Cleanup()
	t.Assert(err, IsNil)
}
Esempio n. 4
0
func (s *WorkerTestSuite) Test003(t *C) {
	// This test has 4 iters:
	//   1: 2 queries
	//   2: 2 queries (res02)
	//   3: 4 queries (res03)
	//   4: 4 queries but 4th has same COUNT_STAR (res04)
	rows, err := s.loadData("003")
	t.Assert(err, IsNil)
	getRows := makeGetRowsFunc(rows)
	getText := makeGetTextFunc("select 1", "select 2", "select 3", "select 4")
	w := perfschema.NewWorker(s.logger, s.nullmysql, getRows, getText)

	// First interval doesn't produce a result because 2 snapshots are required.
	i := &qan.Interval{
		Number:    1,
		StartTime: time.Now().UTC(),
	}
	err = w.Setup(i)
	t.Assert(err, IsNil)

	res, err := w.Run()
	t.Assert(err, IsNil)
	t.Check(res, IsNil)

	err = w.Cleanup()
	t.Assert(err, IsNil)

	// Second interval produces a result: the diff of 2nd - 1st.
	i = &qan.Interval{
		Number:    2,
		StartTime: time.Now().UTC(),
	}
	err = w.Setup(i)
	t.Assert(err, IsNil)

	res, err = w.Run()
	t.Assert(err, IsNil)
	normalizeResult(res)
	expect, err := s.loadResult("003/res02.json")
	t.Assert(err, IsNil)
	if same, diff := IsDeeply(res, expect); !same {
		Dump(res)
		t.Error(diff)
	}

	err = w.Cleanup()
	t.Assert(err, IsNil)

	// Third interval...
	i = &qan.Interval{
		Number:    3,
		StartTime: time.Now().UTC(),
	}
	err = w.Setup(i)
	t.Assert(err, IsNil)

	res, err = w.Run()
	t.Assert(err, IsNil)
	normalizeResult(res)
	expect, err = s.loadResult("003/res03.json")
	t.Assert(err, IsNil)

	// Hash order randomness combined with
	//   globalStats.Avg = (globalStats.Avg + classStats.Avg) / 2
	// in event.GlobalClass create a different average depending
	// on the order of values. In real world the variation is small
	// and acceptable, but it makes exact static tests impossible.
	res.Global.Metrics.TimeMetrics["Query_time"].Avg = 0

	if same, diff := IsDeeply(res, expect); !same {
		Dump(res)
		t.Error(diff)
	}

	err = w.Cleanup()
	t.Assert(err, IsNil)

	// Fourth interval...
	i = &qan.Interval{
		Number:    4,
		StartTime: time.Now().UTC(),
	}
	err = w.Setup(i)
	t.Assert(err, IsNil)

	res, err = w.Run()
	t.Assert(err, IsNil)
	normalizeResult(res)
	expect, err = s.loadResult("003/res04.json")
	t.Assert(err, IsNil)
	res.Global.Metrics.TimeMetrics["Query_time"].Avg = 0
	if same, diff := IsDeeply(res, expect); !same {
		Dump(res)
		t.Error(diff)
	}

	err = w.Cleanup()
	t.Assert(err, IsNil)
}