示例#1
0
// NewTestStorage creates a storage instance backed by files in a temporary
// directory. The returned storage is already in serving state. Upon closing the
// returned test.Closer, the temporary directory is cleaned up.
func NewTestStorage(t testutil.T, encoding chunkEncoding) (*memorySeriesStorage, testutil.Closer) {
	DefaultChunkEncoding = encoding
	directory := testutil.NewTemporaryDirectory("test_storage", t)
	o := &MemorySeriesStorageOptions{
		MemoryChunks:               1000000,
		MaxChunksToPersist:         1000000,
		PersistenceRetentionPeriod: 24 * time.Hour * 365 * 100, // Enough to never trigger purging.
		PersistenceStoragePath:     directory.Path(),
		CheckpointInterval:         time.Hour,
		SyncStrategy:               Adaptive,
	}
	storage := NewMemorySeriesStorage(o)
	storage.(*memorySeriesStorage).archiveHighWatermark = model.Latest
	if err := storage.Start(); err != nil {
		directory.Close()
		t.Fatalf("Error creating storage: %s", err)
	}

	closer := &testStorageCloser{
		storage:   storage,
		directory: directory,
	}

	return storage.(*memorySeriesStorage), closer
}
示例#2
0
func TestLocking(t *testing.T) {
	dir := testutil.NewTemporaryDirectory("test_flock", t)
	defer dir.Close()

	fileName := filepath.Join(dir.Path(), "LOCK")

	if _, err := os.Stat(fileName); err == nil {
		t.Fatalf("File %q unexpectedly exists.", fileName)
	}

	lock, existed, err := New(fileName)
	if err != nil {
		t.Fatalf("Error locking file %q: %s", fileName, err)
	}
	if existed {
		t.Errorf("File %q reported as existing during locking.", fileName)
	}

	// File must now exist.
	if _, err := os.Stat(fileName); err != nil {
		t.Errorf("Could not stat file %q expected to exist: %s", fileName, err)
	}

	// Try to lock again.
	lockedAgain, existed, err := New(fileName)
	if err == nil {
		t.Fatalf("File %q locked twice.", fileName)
	}
	if lockedAgain != nil {
		t.Error("Unsuccessful locking did not return nil.")
	}
	if !existed {
		t.Errorf("Existing file %q not recognized.", fileName)
	}

	if err := lock.Release(); err != nil {
		t.Errorf("Error releasing lock for file %q: %s", fileName, err)
	}

	// File must still exist.
	if _, err := os.Stat(fileName); err != nil {
		t.Errorf("Could not stat file %q expected to exist: %s", fileName, err)
	}

	// Lock existing file.
	lock, existed, err = New(fileName)
	if err != nil {
		t.Fatalf("Error locking file %q: %s", fileName, err)
	}
	if !existed {
		t.Errorf("Existing file %q not recognized.", fileName)
	}

	if err := lock.Release(); err != nil {
		t.Errorf("Error releasing lock for file %q: %s", fileName, err)
	}
}
示例#3
0
func newTestPersistence(t *testing.T, encoding chunk.Encoding) (*persistence, testutil.Closer) {
	chunk.DefaultEncoding = encoding
	dir := testutil.NewTemporaryDirectory("test_persistence", t)
	p, err := newPersistence(dir.Path(), false, false, func() bool { return false }, 0.1)
	if err != nil {
		dir.Close()
		t.Fatal(err)
	}
	go p.run()
	return p, testutil.NewCallbackCloser(func() {
		p.close()
		dir.Close()
	})
}
示例#4
0
// TestLoop is just a smoke test for the loop method, if we can switch it on and
// off without disaster.
func TestLoop(t *testing.T) {
	if testing.Short() {
		t.Skip("Skipping test in short mode.")
	}
	samples := make(model.Samples, 1000)
	for i := range samples {
		samples[i] = &model.Sample{
			Timestamp: model.Time(2 * i),
			Value:     model.SampleValue(float64(i) * 0.2),
		}
	}
	directory := testutil.NewTemporaryDirectory("test_storage", t)
	defer directory.Close()
	o := &MemorySeriesStorageOptions{
		MemoryChunks:               50,
		MaxChunksToPersist:         1000000,
		PersistenceRetentionPeriod: 24 * 7 * time.Hour,
		PersistenceStoragePath:     directory.Path(),
		CheckpointInterval:         250 * time.Millisecond,
		SyncStrategy:               Adaptive,
		MinShrinkRatio:             0.1,
	}
	storage := NewMemorySeriesStorage(o)
	if err := storage.Start(); err != nil {
		t.Errorf("Error starting storage: %s", err)
	}
	for _, s := range samples {
		storage.Append(s)
	}
	storage.WaitForIndexing()
	series, _ := storage.(*memorySeriesStorage).fpToSeries.get(model.Metric{}.FastFingerprint())
	cdsBefore := len(series.chunkDescs)
	time.Sleep(fpMaxWaitDuration + time.Second) // TODO(beorn7): Ugh, need to wait for maintenance to kick in.
	cdsAfter := len(series.chunkDescs)
	storage.Stop()
	if cdsBefore <= cdsAfter {
		t.Errorf(
			"Number of chunk descriptors should have gone down by now. Got before %d, after %d.",
			cdsBefore, cdsAfter,
		)
	}
}
示例#5
0
// benchmarkFuzz is the benchmark version of testFuzz. The storage options are
// set such that evictions, checkpoints, and purging will happen concurrently,
// too. This benchmark will have a very long runtime (up to minutes). You can
// use it as an actual benchmark. Run it like this:
//
// go test -cpu 1,2,4,8 -run=NONE -bench BenchmarkFuzzChunkType -benchmem
//
// You can also use it as a test for races. In that case, run it like this (will
// make things even slower):
//
// go test -race -cpu 8 -short -bench BenchmarkFuzzChunkType
func benchmarkFuzz(b *testing.B, encoding chunkEncoding) {
	DefaultChunkEncoding = encoding
	const samplesPerRun = 100000
	rand.Seed(42)
	directory := testutil.NewTemporaryDirectory("test_storage", b)
	defer directory.Close()
	o := &MemorySeriesStorageOptions{
		MemoryChunks:               100,
		MaxChunksToPersist:         1000000,
		PersistenceRetentionPeriod: time.Hour,
		PersistenceStoragePath:     directory.Path(),
		CheckpointInterval:         time.Second,
		SyncStrategy:               Adaptive,
		MinShrinkRatio:             0.1,
	}
	s := NewMemorySeriesStorage(o)
	if err := s.Start(); err != nil {
		b.Fatalf("Error starting storage: %s", err)
	}
	s.Start()
	defer s.Stop()

	samples := createRandomSamples("benchmark_fuzz", samplesPerRun*b.N)

	b.ResetTimer()

	for i := 0; i < b.N; i++ {
		start := samplesPerRun * i
		end := samplesPerRun * (i + 1)
		middle := (start + end) / 2
		for _, sample := range samples[start:middle] {
			s.Append(sample)
		}
		verifyStorageRandom(b, s.(*memorySeriesStorage), samples[:middle])
		for _, sample := range samples[middle:end] {
			s.Append(sample)
		}
		verifyStorageRandom(b, s.(*memorySeriesStorage), samples[:end])
		verifyStorageSequential(b, s.(*memorySeriesStorage), samples)
	}
}