コード例 #1
0
ファイル: data_test.go プロジェクト: hg3rdrock/percona-agent
func (s *ManagerTestSuite) TestPurge(t *C) {
	m := data.NewManager(s.logger, s.dataDir, s.trashDir, "localhost", s.client)
	t.Assert(m, NotNil)

	config := &data.Config{
		Encoding:     "",
		SendInterval: 1,
		Limits: proto.DataSpoolLimits{
			MaxAge:   300,
			MaxSize:  1024,
			MaxFiles: 2,
		},
	}
	pct.Basedir.WriteConfig("data", config)

	err := m.Start()
	t.Assert(err, IsNil)

	sender := m.Sender()
	t.Check(sender, NotNil)

	cmd := &proto.Cmd{
		Service: "data",
		Cmd:     "Purge",
		// no SpoolDataLimits in Data causes full purge
	}

	spool := m.Spooler()
	now := time.Now()
	logEntry := &proto.LogEntry{
		Ts:  now,
		Msg: "1",
	}
	spool.Write("log", logEntry)
	spool.Write("log", logEntry)
	files := test.WaitFiles(s.dataDir, 2)
	t.Assert(files, HasLen, 2)

	reply := m.Handle(cmd)
	t.Assert(reply.Error, Equals, "")
	t.Assert(reply.Data, NotNil)
	got := map[string][]string{}
	if err := json.Unmarshal(reply.Data, &got); err != nil {
		t.Fatal(err)
	}
	t.Check(got["purged"], HasLen, 2) // here it is
	t.Check(got["age"], HasLen, 0)
	t.Check(got["size"], HasLen, 0)
	t.Assert(got["files"], HasLen, 0)
}
コード例 #2
0
ファイル: data_test.go プロジェクト: hg3rdrock/percona-agent
func (s *DiskvSpoolerTestSuite) TestSpoolData(t *C) {
	sz := data.NewJsonSerializer()

	// Create and start the spooler.
	spool := data.NewDiskvSpooler(s.logger, s.dataDir, s.trashDir, "localhost", s.limits)
	t.Assert(spool, NotNil)

	err := spool.Start(sz)
	if err != nil {
		t.Fatal(err)
	}

	// Doesn't matter what data we spool; just send some bytes...
	now := time.Now()
	logEntry := &proto.LogEntry{
		Ts:      now,
		Level:   1,
		Service: "mm",
		Msg:     "hello world",
	}
	spool.Write("log", logEntry)

	// Spooler should wrap data in proto.Data and write to disk, in format of serializer.
	files := test.WaitFiles(s.dataDir, 1)
	if len(files) != 1 {
		t.Fatalf("Expected 1 file, got %d\n", len(files))
	}

	gotFiles := []string{}
	filesChan := spool.Files()
	for file := range filesChan {
		gotFiles = append(gotFiles, file)
	}
	if gotFiles[0] != files[0].Name() {
		t.Error("Spool writes and returns " + files[0].Name())
	}
	if len(gotFiles) != len(files) {
		t.Error("Spool writes and returns ", len(files), " file")
	}

	// data is proto.Data[ metadata, Data: proto.LogEntry[...] ]
	data, err := spool.Read(gotFiles[0])
	if err != nil {
		t.Error(err)
	}
	protoData := &proto.Data{}
	if err := json.Unmarshal(data, protoData); err != nil {
		t.Fatal(err)
	}
	t.Check(protoData.Service, Equals, "log")
	t.Check(protoData.ContentType, Equals, "application/json")
	t.Check(protoData.ContentEncoding, Equals, "")
	if protoData.Created.IsZero() || protoData.Created.Before(now) {
		// The proto.Data can't be created before the data it contains.
		t.Error("proto.Data.Created after data, got %s", protoData.Created)
	}

	// The LogoEntry we get back should be identical the one we spooled.
	gotLogEntry := &proto.LogEntry{}
	if err := json.Unmarshal(protoData.Data, gotLogEntry); err != nil {
		t.Fatal(err)
	}
	if same, diff := test.IsDeeply(gotLogEntry, logEntry); !same {
		t.Logf("%#v", gotLogEntry)
		t.Error(diff)
	}

	// Removing data from spooler should remove the file.
	spool.Remove(gotFiles[0])
	files = test.WaitFiles(s.dataDir, -1)
	if len(files) != 0 {
		t.Fatalf("Expected no files, got %d\n", len(files))
	}

	spool.Stop()
}
コード例 #3
0
ファイル: data_test.go プロジェクト: hg3rdrock/percona-agent
func (s *DiskvSpoolerTestSuite) TestSpoolLimits(t *C) {
	// as of 1.0.13

	limits := proto.DataSpoolLimits{
		MaxAge:   10,   // seconds
		MaxSize:  1024, // bytes
		MaxFiles: 2,
	}

	sz := data.NewJsonSerializer()
	spool := data.NewDiskvSpooler(s.logger, s.dataDir, s.trashDir, "localhost", limits)
	t.Assert(spool, NotNil)
	err := spool.Start(sz)
	t.Assert(err, IsNil)

	// Spool 3 data files (doesn't matter what, any data works).
	now := time.Now()
	logEntry := &proto.LogEntry{
		Ts:  now,
		Msg: "1",
	}
	spool.Write("log", logEntry)
	logEntry.Msg = "2"
	spool.Write("log", logEntry)
	logEntry.Msg = "3"
	spool.Write("log", logEntry)

	// Wait for spooler to write the data files.
	files := test.WaitFiles(s.dataDir, 3)
	t.Assert(files, HasLen, 3)

	// Purge the spool and 1 data file should be droppoed because
	// we set limits.MaxFiles=2.
	n, removed := spool.Purge(time.Now().UTC(), limits)
	t.Check(n, Equals, 1)
	t.Check(removed["purged"], HasLen, 0)
	t.Check(removed["age"], HasLen, 0)
	t.Check(removed["size"], HasLen, 0)
	t.Assert(removed["files"], HasLen, 1) // here it is

	// Find out how large the files are so we can purge based on MaxSize.
	totalSize := 0
	for file := range spool.Files() {
		data, err := spool.Read(file)
		t.Assert(err, IsNil)
		totalSize += len(data)
	}

	// Set MaxSize a few bytes less than the total which should cause only one
	// data file to be purged.
	limits.MaxSize = uint64(totalSize - 10)

	n, removed = spool.Purge(time.Now().UTC(), limits)
	t.Check(n, Equals, 1)
	t.Check(removed["purged"], HasLen, 0)
	t.Check(removed["age"], HasLen, 0)
	t.Check(removed["size"], HasLen, 1) // here it is
	t.Assert(removed["files"], HasLen, 0)

	// To test MaxAge, pass in a now arg that's in the past and it should cause
	// the last file to be purged.
	n, removed = spool.Purge(time.Now().Add(-1*time.Minute).UTC(), limits)
	t.Check(n, Equals, 1)
	t.Check(removed["purged"], HasLen, 0)
	t.Check(removed["age"], HasLen, 1) // here it is
	t.Check(removed["size"], HasLen, 0)
	t.Assert(removed["files"], HasLen, 0)

	// Test a full spool purge by passing no limits.
	spool.Write("log", logEntry)
	spool.Write("log", logEntry)
	spool.Write("log", logEntry)

	files = test.WaitFiles(s.dataDir, 3)
	t.Assert(files, HasLen, 3)

	limits = proto.DataSpoolLimits{} // no limit = purge all
	n, removed = spool.Purge(time.Now().UTC(), limits)
	t.Check(n, Equals, 3)
	t.Check(removed["purged"], HasLen, 3) // here it is
	t.Check(removed["age"], HasLen, 0)
	t.Check(removed["size"], HasLen, 0)
	t.Assert(removed["files"], HasLen, 0)

	// Finally, test that the auto-purge works by sending a tick manually.
	limits = proto.DataSpoolLimits{
		MaxAge:   10,   // seconds
		MaxSize:  1024, // bytes
		MaxFiles: 2,
	}
	spool = data.NewDiskvSpooler(s.logger, s.dataDir, s.trashDir, "localhost", limits)
	t.Assert(spool, NotNil)

	purgeChan := make(chan time.Time, 1)
	spool.PurgeChan(purgeChan) // must set before calling Start

	err = spool.Start(sz)
	t.Assert(err, IsNil)

	spool.Write("log", logEntry)
	spool.Write("log", logEntry)
	spool.Write("log", logEntry) // one too many
	files = test.WaitFiles(s.dataDir, 3)
	t.Assert(files, HasLen, 3)

	purgeChan <- time.Now() // cause auto-purge in run()
	time.Sleep(200 * time.Millisecond)
	files = test.WaitFiles(s.dataDir, 2)
	t.Assert(files, HasLen, 2)
}
コード例 #4
0
ファイル: data_test.go プロジェクト: hg3rdrock/percona-agent
func (s *DiskvSpoolerTestSuite) TestRejectData(t *C) {
	sz := data.NewJsonSerializer()

	// Create and start the spooler.
	spool := data.NewDiskvSpooler(s.logger, s.dataDir, s.trashDir, "localhost", s.limits)
	t.Assert(spool, NotNil)

	err := spool.Start(sz)
	t.Assert(err, IsNil)

	// Spooler should create the bad data dir.
	badDataDir := path.Join(s.trashDir, "data")
	ok := pct.FileExists(badDataDir)
	t.Assert(ok, Equals, true)

	// Spool any data...
	now := time.Now()
	logEntry := &proto.LogEntry{
		Ts:      now,
		Level:   1,
		Service: "mm",
		Msg:     "hello world",
	}
	err = spool.Write("log", logEntry)
	t.Check(err, IsNil)

	// Wait for spooler to write data to disk.
	files := test.WaitFiles(s.dataDir, 1)
	t.Assert(files, HasLen, 1)

	// Get the file name the spooler saved the data as.
	gotFiles := []string{}
	filesChan := spool.Files()
	for file := range filesChan {
		gotFiles = append(gotFiles, file)
	}
	t.Assert(gotFiles, HasLen, 1)

	// Reject the file.  The spooler should move it to the bad data dir
	// then remove it from the list.
	err = spool.Reject(gotFiles[0])
	t.Check(err, IsNil)

	ok = pct.FileExists(path.Join(s.dataDir, gotFiles[0]))
	t.Assert(ok, Equals, false)

	badFile := path.Join(badDataDir, gotFiles[0])
	ok = pct.FileExists(path.Join(badFile))
	t.Assert(ok, Equals, true)

	spool.Stop()

	/**
	 * Start another spooler now that we have data/bad/file to ensure
	 * that the spooler does not read/index/cache bad files.
	 */

	spool = data.NewDiskvSpooler(s.logger, s.dataDir, s.trashDir, "localhost", s.limits)
	t.Assert(spool, NotNil)
	err = spool.Start(sz)
	t.Assert(err, IsNil)
	spool.Write("log", logEntry)
	files = test.WaitFiles(s.dataDir, 1)
	t.Assert(files, HasLen, 1)

	// There should only be 1 new file in the spool.
	gotFiles = []string{}
	filesChan = spool.Files()
	for file := range filesChan {
		t.Check(file, Not(Equals), badFile)
		gotFiles = append(gotFiles, file)
	}
	t.Assert(gotFiles, HasLen, 1)

	spool.Stop()
}
コード例 #5
0
ファイル: data_test.go プロジェクト: hg3rdrock/percona-agent
func (s *DiskvSpoolerTestSuite) TestSpoolGzipData(t *C) {
	// Same as TestSpoolData, but use the gzip serializer.

	sz := data.NewJsonGzipSerializer()

	// See TestSpoolData() for description of these tasks.
	spool := data.NewDiskvSpooler(s.logger, s.dataDir, s.trashDir, "localhost", s.limits)
	t.Assert(spool, NotNil)

	err := spool.Start(sz)
	if err != nil {
		t.Fatal(err)
	}

	now := time.Now()
	logEntry := &proto.LogEntry{
		Ts:      now,
		Level:   1,
		Service: "mm",
		Msg:     "hello world",
	}
	spool.Write("log", logEntry)

	files := test.WaitFiles(s.dataDir, 1)
	if len(files) != 1 {
		t.Fatalf("Expected 1 file, got %d\n", len(files))
	}

	gotFiles := []string{}
	filesChan := spool.Files()
	for file := range filesChan {
		gotFiles = append(gotFiles, file)
	}

	gotData, err := spool.Read(gotFiles[0])
	if err != nil {
		t.Error(err)
	}
	if len(gotData) <= 0 {
		t.Fatal("1st file has data")
	}

	protoData := &proto.Data{}
	if err := json.Unmarshal(gotData, protoData); err != nil {
		t.Fatal(err)
	}
	t.Check(protoData.Service, Equals, "log")
	t.Check(protoData.ContentType, Equals, "application/json")
	t.Check(protoData.ContentEncoding, Equals, "gzip")

	// Decompress and decode and we should have the same LogEntry.
	b := bytes.NewBuffer(protoData.Data)
	g, err := gzip.NewReader(b)
	if err != nil {
		t.Error(err)
	}
	d := json.NewDecoder(g)
	gotLogEntry := &proto.LogEntry{}
	err = d.Decode(gotLogEntry)
	if err := d.Decode(gotLogEntry); err != io.EOF {
		t.Error(err)
	}

	if same, diff := test.IsDeeply(gotLogEntry, logEntry); !same {
		t.Error(diff)
	}

	/**
	 * Do it again to test that serialize is stateless, so to speak.
	 */

	logEntry2 := &proto.LogEntry{
		Ts:      now,
		Level:   2,
		Service: "mm",
		Msg:     "number 2",
	}
	spool.Write("log", logEntry2)

	files = test.WaitFiles(s.dataDir, 2)
	if len(files) != 2 {
		t.Fatalf("Expected 2 file, got %d\n", len(files))
	}

	gotFiles = []string{}
	filesChan = spool.Files()
	for file := range filesChan {
		gotFiles = append(gotFiles, file)
	}

	gotData, err = spool.Read(gotFiles[1]) // 2nd data, 2nd file
	if err != nil {
		t.Error(err)
	}
	if len(gotData) <= 0 {
		t.Fatal("2nd file has data")
	}

	protoData = &proto.Data{}
	if err := json.Unmarshal(gotData, protoData); err != nil {
		t.Fatal(err)
	}
	t.Check(protoData.Service, Equals, "log")
	t.Check(protoData.ContentType, Equals, "application/json")
	t.Check(protoData.ContentEncoding, Equals, "gzip")

	b = bytes.NewBuffer(protoData.Data)
	g, err = gzip.NewReader(b)
	if err != nil {
		t.Error(err)
	}
	d = json.NewDecoder(g)
	gotLogEntry = &proto.LogEntry{}
	err = d.Decode(gotLogEntry)
	if err := d.Decode(gotLogEntry); err != io.EOF {
		t.Error(err)
	}

	if same, diff := test.IsDeeply(gotLogEntry, logEntry2); !same {
		t.Error(diff)
	}

	spool.Stop()
}