Пример #1
0
func testKVPutMultipleTimes(t *testing.T, f putFunc) {
	b, tmpPath := backend.NewDefaultTmpBackend()
	s := NewStore(b, &lease.FakeLessor{}, nil)
	defer cleanup(s, b, tmpPath)

	for i := 0; i < 10; i++ {
		base := int64(i + 1)

		rev := f(s, []byte("foo"), []byte("bar"), lease.LeaseID(base))
		if rev != base+1 {
			t.Errorf("#%d: rev = %d, want %d", i, rev, base+1)
		}

		kvs, _, err := s.Range([]byte("foo"), nil, 0, 0)
		if err != nil {
			t.Fatal(err)
		}
		wkvs := []storagepb.KeyValue{
			{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: base + 1, Version: base, Lease: base},
		}
		if !reflect.DeepEqual(kvs, wkvs) {
			t.Errorf("#%d: kvs = %+v, want %+v", i, kvs, wkvs)
		}
	}
}
Пример #2
0
func TestKVTxnBlockNonTxnOperations(t *testing.T) {
	b, tmpPath := backend.NewDefaultTmpBackend()
	s := NewStore(b, &lease.FakeLessor{}, nil)

	tests := []func(){
		func() { s.Range([]byte("foo"), nil, 0, 0) },
		func() { s.Put([]byte("foo"), nil, lease.NoLease) },
		func() { s.DeleteRange([]byte("foo"), nil) },
	}
	for i, tt := range tests {
		id := s.TxnBegin()
		done := make(chan struct{}, 1)
		go func() {
			tt()
			done <- struct{}{}
		}()
		select {
		case <-done:
			t.Fatalf("#%d: operation failed to be blocked", i)
		case <-time.After(10 * time.Millisecond):
		}

		s.TxnEnd(id)
		select {
		case <-done:
		case <-time.After(10 * time.Second):
			testutil.FatalStack(t, fmt.Sprintf("#%d: operation failed to be unblocked", i))
		}
	}

	// only close backend when we know all the tx are finished
	cleanup(s, b, tmpPath)
}
Пример #3
0
func testKVRangeRev(t *testing.T, f rangeFunc) {
	b, tmpPath := backend.NewDefaultTmpBackend()
	s := NewStore(b, &lease.FakeLessor{}, nil)
	defer cleanup(s, b, tmpPath)

	kvs := put3TestKVs(s)

	tests := []struct {
		rev  int64
		wrev int64
		wkvs []storagepb.KeyValue
	}{
		{-1, 4, kvs},
		{0, 4, kvs},
		{2, 4, kvs[:1]},
		{3, 4, kvs[:2]},
		{4, 4, kvs},
	}

	for i, tt := range tests {
		kvs, rev, err := f(s, []byte("foo"), []byte("foo3"), 0, tt.rev)
		if err != nil {
			t.Fatal(err)
		}
		if rev != tt.wrev {
			t.Errorf("#%d: rev = %d, want %d", i, rev, tt.wrev)
		}
		if !reflect.DeepEqual(kvs, tt.wkvs) {
			t.Errorf("#%d: kvs = %+v, want %+v", i, kvs, tt.wkvs)
		}
	}
}
Пример #4
0
func TestTxnBlockBackendForceCommit(t *testing.T) {
	b, tmpPath := backend.NewDefaultTmpBackend()
	s := NewStore(b, &lease.FakeLessor{})
	defer os.Remove(tmpPath)

	id := s.TxnBegin()

	done := make(chan struct{})
	go func() {
		s.b.ForceCommit()
		done <- struct{}{}
	}()
	select {
	case <-done:
		t.Fatalf("failed to block ForceCommit")
	case <-time.After(100 * time.Millisecond):
	}

	s.TxnEnd(id)
	select {
	case <-done:
	case <-time.After(5 * time.Second): // wait 5 seconds for CI with slow IO
		testutil.FatalStack(t, "failed to execute ForceCommit")
	}
}
Пример #5
0
// TestWatchBatchUnsynced tests batching on unsynced watchers
func TestWatchBatchUnsynced(t *testing.T) {
	b, tmpPath := backend.NewDefaultTmpBackend()
	s := newWatchableStore(b, &lease.FakeLessor{}, nil)

	oldMaxRevs := watchBatchMaxRevs
	defer func() {
		watchBatchMaxRevs = oldMaxRevs
		s.store.Close()
		os.Remove(tmpPath)
	}()
	batches := 3
	watchBatchMaxRevs = 4

	v := []byte("foo")
	for i := 0; i < watchBatchMaxRevs*batches; i++ {
		s.Put(v, v, lease.NoLease)
	}

	w := s.NewWatchStream()
	w.Watch(v, nil, 1)
	for i := 0; i < batches; i++ {
		if resp := <-w.Chan(); len(resp.Events) != watchBatchMaxRevs {
			t.Fatalf("len(events) = %d, want %d", len(resp.Events), watchBatchMaxRevs)
		}
	}

	s.store.mu.Lock()
	defer s.store.mu.Unlock()
	if size := s.synced.size(); size != 1 {
		t.Errorf("synced size = %d, want 1", size)
	}
}
Пример #6
0
func TestKVTxnWrongID(t *testing.T) {
	b, tmpPath := backend.NewDefaultTmpBackend()
	s := NewStore(b, &lease.FakeLessor{}, nil)
	defer cleanup(s, b, tmpPath)

	id := s.TxnBegin()
	wrongid := id + 1

	tests := []func() error{
		func() error {
			_, _, err := s.TxnRange(wrongid, []byte("foo"), nil, 0, 0)
			return err
		},
		func() error {
			_, err := s.TxnPut(wrongid, []byte("foo"), nil, lease.NoLease)
			return err
		},
		func() error {
			_, _, err := s.TxnDeleteRange(wrongid, []byte("foo"), nil)
			return err
		},
		func() error { return s.TxnEnd(wrongid) },
	}
	for i, tt := range tests {
		err := tt()
		if err != ErrTxnIDMismatch {
			t.Fatalf("#%d: err = %+v, want %+v", i, err, ErrTxnIDMismatch)
		}
	}

	err := s.TxnEnd(id)
	if err != nil {
		t.Fatalf("end err = %+v, want %+v", err, nil)
	}
}
Пример #7
0
func TestTxnPut(t *testing.T) {
	// assign arbitrary size
	bytesN := 30
	sliceN := 100
	keys := createBytesSlice(bytesN, sliceN)
	vals := createBytesSlice(bytesN, sliceN)

	b, tmpPath := backend.NewDefaultTmpBackend()
	s := NewStore(b, &lease.FakeLessor{})
	defer cleanup(s, b, tmpPath)

	for i := 0; i < sliceN; i++ {
		id := s.TxnBegin()
		base := int64(i + 2)

		rev, err := s.TxnPut(id, keys[i], vals[i], lease.NoLease)
		if err != nil {
			t.Error("txn put error")
		}
		if rev != base {
			t.Errorf("#%d: rev = %d, want %d", i, rev, base)
		}

		s.TxnEnd(id)
	}
}
Пример #8
0
func TestTxnBlockBackendForceCommit(t *testing.T) {
	b, tmpPath := backend.NewDefaultTmpBackend()
	s := NewStore(b)
	defer os.Remove(tmpPath)

	id := s.TxnBegin()

	done := make(chan struct{})
	go func() {
		s.b.ForceCommit()
		done <- struct{}{}
	}()
	select {
	case <-done:
		t.Fatalf("failed to block ForceCommit")
	case <-time.After(100 * time.Millisecond):
	}

	s.TxnEnd(id)
	select {
	case <-done:
	case <-time.After(time.Second):
		t.Fatalf("failed to execute ForceCommit")
	}

}
Пример #9
0
func TestKVCompactBad(t *testing.T) {
	b, tmpPath := backend.NewDefaultTmpBackend()
	s := NewStore(b, &lease.FakeLessor{}, nil)
	defer cleanup(s, b, tmpPath)

	s.Put([]byte("foo"), []byte("bar0"), lease.NoLease)
	s.Put([]byte("foo"), []byte("bar1"), lease.NoLease)
	s.Put([]byte("foo"), []byte("bar2"), lease.NoLease)

	// rev in tests will be called in Compact() one by one on the same store
	tests := []struct {
		rev  int64
		werr error
	}{
		{0, nil},
		{1, nil},
		{1, ErrCompacted},
		{4, nil},
		{5, ErrFutureRev},
		{100, ErrFutureRev},
	}
	for i, tt := range tests {
		_, err := s.Compact(tt.rev)
		if err != tt.werr {
			t.Errorf("#%d: compact error = %v, want %v", i, err, tt.werr)
		}
	}
}
Пример #10
0
func TestKVTxnBlockNonTnxOperations(t *testing.T) {
	b, tmpPath := backend.NewDefaultTmpBackend()
	s := NewStore(b)
	defer cleanup(s, b, tmpPath)

	tests := []func(){
		func() { s.Range([]byte("foo"), nil, 0, 0) },
		func() { s.Put([]byte("foo"), nil, lease.NoLease) },
		func() { s.DeleteRange([]byte("foo"), nil) },
	}
	for i, tt := range tests {
		id := s.TxnBegin()
		done := make(chan struct{})
		go func() {
			tt()
			done <- struct{}{}
		}()
		select {
		case <-done:
			t.Fatalf("#%d: operation failed to be blocked", i)
		case <-time.After(10 * time.Millisecond):
		}

		s.TxnEnd(id)
		select {
		case <-done:
		case <-time.After(100 * time.Millisecond):
			t.Fatalf("#%d: operation failed to be unblocked", i)
		}
	}
}
Пример #11
0
func TestKVSnapshot(t *testing.T) {
	b, tmpPath := backend.NewDefaultTmpBackend()
	s := NewStore(b, &lease.FakeLessor{}, nil)
	defer cleanup(s, b, tmpPath)

	wkvs := put3TestKVs(s)

	newPath := "new_test"
	f, err := os.Create(newPath)
	if err != nil {
		t.Fatal(err)
	}
	defer os.Remove(newPath)

	snap := s.b.Snapshot()
	defer snap.Close()
	_, err = snap.WriteTo(f)
	if err != nil {
		t.Fatal(err)
	}
	f.Close()

	ns := NewStore(b, &lease.FakeLessor{}, nil)
	defer ns.Close()
	kvs, rev, err := ns.Range([]byte("a"), []byte("z"), 0, 0)
	if err != nil {
		t.Errorf("unexpect range error (%v)", err)
	}
	if !reflect.DeepEqual(kvs, wkvs) {
		t.Errorf("kvs = %+v, want %+v", kvs, wkvs)
	}
	if rev != 4 {
		t.Errorf("rev = %d, want %d", rev, 4)
	}
}
Пример #12
0
func testKVRangeBadRev(t *testing.T, f rangeFunc) {
	b, tmpPath := backend.NewDefaultTmpBackend()
	s := NewStore(b)
	defer cleanup(s, b, tmpPath)

	s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
	s.Put([]byte("foo1"), []byte("bar1"), lease.NoLease)
	s.Put([]byte("foo2"), []byte("bar2"), lease.NoLease)
	if err := s.Compact(3); err != nil {
		t.Fatalf("compact error (%v)", err)
	}

	tests := []struct {
		rev  int64
		werr error
	}{
		{-1, ErrCompacted},
		{2, ErrCompacted},
		{3, ErrCompacted},
		{4, ErrFutureRev},
		{100, ErrFutureRev},
	}
	for i, tt := range tests {
		_, _, err := f(s, []byte("foo"), []byte("foo3"), 0, tt.rev)
		if err != tt.werr {
			t.Errorf("#%d: error = %v, want %v", i, err, tt.werr)
		}
	}
}
Пример #13
0
// TestWatchStreamCancel ensures cancel calls the cancel func of the watcher
// with given id inside watchStream.
func TestWatchStreamCancelWatcherByID(t *testing.T) {
	b, tmpPath := backend.NewDefaultTmpBackend()
	s := WatchableKV(newWatchableStore(b))
	defer cleanup(s, b, tmpPath)

	w := s.NewWatchStream()
	defer w.Close()

	id := w.Watch([]byte("foo"), false, 0)

	tests := []struct {
		cancelID WatchID
		werr     error
	}{
		// no error should be returned when cancel the created watcher.
		{id, nil},
		// not exist error should be returned when cancel again.
		{id, ErrWatcherNotExist},
		// not exist error should be returned when cancel a bad id.
		{id + 1, ErrWatcherNotExist},
	}

	for i, tt := range tests {
		gerr := w.Cancel(tt.cancelID)

		if gerr != tt.werr {
			t.Errorf("#%d: err = %v, want %v", i, gerr, tt.werr)
		}
	}

	if l := len(w.(*watchStream).cancels); l != 0 {
		t.Errorf("cancels = %d, want 0", l)
	}
}
Пример #14
0
func TestTxnBlockBackendForceCommit(t *testing.T) {
	b, tmpPath := backend.NewDefaultTmpBackend()
	s := NewStore(b, &lease.FakeLessor{})
	defer os.Remove(tmpPath)

	id := s.TxnBegin()

	done := make(chan struct{})
	go func() {
		s.b.ForceCommit()
		done <- struct{}{}
	}()
	select {
	case <-done:
		t.Fatalf("failed to block ForceCommit")
	case <-time.After(100 * time.Millisecond):
	}

	s.TxnEnd(id)
	select {
	case <-done:
	case <-time.After(5 * time.Second): // wait 5 seconds for CI with slow IO
		// print out stack traces of all routines if there is a failure
		stackTrace := make([]byte, 8*1024)
		n := runtime.Stack(stackTrace, true)
		t.Error(string(stackTrace[:n]))
		t.Fatalf("failed to execute ForceCommit")
	}
}
Пример #15
0
func testKVRangeLimit(t *testing.T, f rangeFunc) {
	b, tmpPath := backend.NewDefaultTmpBackend()
	s := NewStore(b, &lease.FakeLessor{}, nil)
	defer cleanup(s, b, tmpPath)

	kvs := put3TestKVs(s)

	wrev := int64(4)
	tests := []struct {
		limit int64
		wkvs  []storagepb.KeyValue
	}{
		// no limit
		{-1, kvs},
		// no limit
		{0, kvs},
		{1, kvs[:1]},
		{2, kvs[:2]},
		{3, kvs},
		{100, kvs},
	}
	for i, tt := range tests {
		kvs, rev, err := f(s, []byte("foo"), []byte("foo3"), tt.limit, 0)
		if err != nil {
			t.Fatalf("#%d: range error (%v)", i, err)
		}
		if !reflect.DeepEqual(kvs, tt.wkvs) {
			t.Errorf("#%d: kvs = %+v, want %+v", i, kvs, tt.wkvs)
		}
		if rev != wrev {
			t.Errorf("#%d: rev = %d, want %d", i, rev, wrev)
		}
	}
}
Пример #16
0
func testKVRangeBadRev(t *testing.T, f rangeFunc) {
	b, tmpPath := backend.NewDefaultTmpBackend()
	s := NewStore(b, &lease.FakeLessor{}, nil)
	defer cleanup(s, b, tmpPath)

	put3TestKVs(s)
	if _, err := s.Compact(4); err != nil {
		t.Fatalf("compact error (%v)", err)
	}

	tests := []struct {
		rev  int64
		werr error
	}{
		{-1, nil}, // <= 0 is most recent store
		{0, nil},
		{1, ErrCompacted},
		{2, ErrCompacted},
		{4, nil},
		{5, ErrFutureRev},
		{100, ErrFutureRev},
	}
	for i, tt := range tests {
		_, _, err := f(s, []byte("foo"), []byte("foo3"), 0, tt.rev)
		if err != tt.werr {
			t.Errorf("#%d: error = %v, want %v", i, err, tt.werr)
		}
	}
}
Пример #17
0
func testKVRange(t *testing.T, f rangeFunc) {
	b, tmpPath := backend.NewDefaultTmpBackend()
	s := NewStore(b, &lease.FakeLessor{})
	defer cleanup(s, b, tmpPath)

	kvs := put3TestKVs(s)

	wrev := int64(4)
	tests := []struct {
		key, end []byte
		wkvs     []storagepb.KeyValue
	}{
		// get no keys
		{
			[]byte("doo"), []byte("foo"),
			nil,
		},
		// get no keys when key == end
		{
			[]byte("foo"), []byte("foo"),
			nil,
		},
		// get no keys when ranging single key
		{
			[]byte("doo"), nil,
			nil,
		},
		// get all keys
		{
			[]byte("foo"), []byte("foo3"),
			kvs,
		},
		// get partial keys
		{
			[]byte("foo"), []byte("foo1"),
			kvs[:1],
		},
		// get single key
		{
			[]byte("foo"), nil,
			kvs[:1],
		},
	}

	for i, tt := range tests {
		kvs, rev, err := f(s, tt.key, tt.end, 0, 0)
		if err != nil {
			t.Fatal(err)
		}
		if rev != wrev {
			t.Errorf("#%d: rev = %d, want %d", i, rev, wrev)
		}
		if !reflect.DeepEqual(kvs, tt.wkvs) {
			t.Errorf("#%d: kvs = %+v, want %+v", i, kvs, tt.wkvs)
		}
	}
}
// Benchmarks on cancel function performance for unsynced watchers
// in a WatchableStore. It creates k*N watchers to populate unsynced
// with a reasonably large number of watchers. And measures the time it
// takes to cancel N watchers out of k*N watchers. The performance is
// expected to differ depending on the unsynced member implementation.
// TODO: k is an arbitrary constant. We need to figure out what factor
// we should put to simulate the real-world use cases.
func BenchmarkWatchableStoreUnsyncedCancel(b *testing.B) {
	be, tmpPath := backend.NewDefaultTmpBackend()
	s := NewStore(be, &lease.FakeLessor{})

	// manually create watchableStore instead of newWatchableStore
	// because newWatchableStore periodically calls syncWatchersLoop
	// method to sync watchers in unsynced map. We want to keep watchers
	// in unsynced for this benchmark.
	ws := &watchableStore{
		store:    s,
		unsynced: newWatcherGroup(),

		// to make the test not crash from assigning to nil map.
		// 'synced' doesn't get populated in this test.
		synced: newWatcherGroup(),
	}

	defer func() {
		ws.store.Close()
		os.Remove(tmpPath)
	}()

	// Put a key so that we can spawn watchers on that key
	// (testKey in this test). This increases the rev to 1,
	// and later we can we set the watcher's startRev to 1,
	// and force watchers to be in unsynced.
	testKey := []byte("foo")
	testValue := []byte("bar")
	s.Put(testKey, testValue, lease.NoLease)

	w := ws.NewWatchStream()

	const k int = 2
	benchSampleN := b.N
	watcherN := k * benchSampleN

	watchIDs := make([]WatchID, watcherN)
	for i := 0; i < watcherN; i++ {
		// non-0 value to keep watchers in unsynced
		watchIDs[i] = w.Watch(testKey, nil, 1)
	}

	// random-cancel N watchers to make it not biased towards
	// data structures with an order, such as slice.
	ix := rand.Perm(watcherN)

	b.ResetTimer()
	b.ReportAllocs()

	// cancel N watchers
	for _, idx := range ix[:benchSampleN] {
		if err := w.Cancel(watchIDs[idx]); err != nil {
			b.Error(err)
		}
	}
}
Пример #19
0
// test that txn range, put, delete on single key in sequence repeatedly works correctly.
func TestKVTxnOperationInSequence(t *testing.T) {
	b, tmpPath := backend.NewDefaultTmpBackend()
	s := NewStore(b, &lease.FakeLessor{}, nil)
	defer cleanup(s, b, tmpPath)

	for i := 0; i < 10; i++ {
		id := s.TxnBegin()
		base := int64(i + 1)

		// put foo
		rev, err := s.TxnPut(id, []byte("foo"), []byte("bar"), lease.NoLease)
		if err != nil {
			t.Fatal(err)
		}
		if rev != base+1 {
			t.Errorf("#%d: put rev = %d, want %d", i, rev, base+1)
		}

		kvs, rev, err := s.TxnRange(id, []byte("foo"), nil, 0, base+1)
		if err != nil {
			t.Fatal(err)
		}
		wkvs := []storagepb.KeyValue{
			{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: base + 1, ModRevision: base + 1, Version: 1, Lease: int64(lease.NoLease)},
		}
		if !reflect.DeepEqual(kvs, wkvs) {
			t.Errorf("#%d: kvs = %+v, want %+v", i, kvs, wkvs)
		}
		if rev != base+1 {
			t.Errorf("#%d: range rev = %d, want %d", i, rev, base+1)
		}

		// delete foo
		n, rev, err := s.TxnDeleteRange(id, []byte("foo"), nil)
		if err != nil {
			t.Fatal(err)
		}
		if n != 1 || rev != base+1 {
			t.Errorf("#%d: n = %d, rev = %d, want (%d, %d)", i, n, rev, 1, base+1)
		}

		kvs, rev, err = s.TxnRange(id, []byte("foo"), nil, 0, base+1)
		if err != nil {
			t.Errorf("#%d: range error (%v)", i, err)
		}
		if kvs != nil {
			t.Errorf("#%d: kvs = %+v, want %+v", i, kvs, nil)
		}
		if rev != base+1 {
			t.Errorf("#%d: range rev = %d, want %d", i, rev, base+1)
		}

		s.TxnEnd(id)
	}
}
Пример #20
0
// TestCancelUnsynced tests if running CancelFunc removes watchers from unsynced.
func TestCancelUnsynced(t *testing.T) {
	b, tmpPath := backend.NewDefaultTmpBackend()

	// manually create watchableStore instead of newWatchableStore
	// because newWatchableStore automatically calls syncWatchers
	// method to sync watchers in unsynced map. We want to keep watchers
	// in unsynced to test if syncWatchers works as expected.
	s := &watchableStore{
		store:    NewStore(b, &lease.FakeLessor{}, nil),
		unsynced: newWatcherGroup(),

		// to make the test not crash from assigning to nil map.
		// 'synced' doesn't get populated in this test.
		synced: newWatcherGroup(),
	}

	defer func() {
		s.store.Close()
		os.Remove(tmpPath)
	}()

	// Put a key so that we can spawn watchers on that key.
	// (testKey in this test). This increases the rev to 1,
	// and later we can we set the watcher's startRev to 1,
	// and force watchers to be in unsynced.
	testKey := []byte("foo")
	testValue := []byte("bar")
	s.Put(testKey, testValue, lease.NoLease)

	w := s.NewWatchStream()

	// arbitrary number for watchers
	watcherN := 100

	// create watcherN of watch ids to cancel
	watchIDs := make([]WatchID, watcherN)
	for i := 0; i < watcherN; i++ {
		// use 1 to keep watchers in unsynced
		watchIDs[i] = w.Watch(testKey, nil, 1)
	}

	for _, idx := range watchIDs {
		if err := w.Cancel(idx); err != nil {
			t.Error(err)
		}
	}

	// After running CancelFunc
	//
	// unsynced should be empty
	// because cancel removes watcher from unsynced
	if size := s.unsynced.size(); size != 0 {
		t.Errorf("unsynced size = %d, want 0", size)
	}
}
Пример #21
0
// TestWatcherRequestProgress ensures synced watcher can correctly
// report its correct progress.
func TestWatcherRequestProgress(t *testing.T) {
	b, tmpPath := backend.NewDefaultTmpBackend()

	// manually create watchableStore instead of newWatchableStore
	// because newWatchableStore automatically calls syncWatchers
	// method to sync watchers in unsynced map. We want to keep watchers
	// in unsynced to test if syncWatchers works as expected.
	s := &watchableStore{
		store:    NewStore(b, &lease.FakeLessor{}, nil),
		unsynced: newWatcherGroup(),
		synced:   newWatcherGroup(),
	}

	defer func() {
		s.store.Close()
		os.Remove(tmpPath)
	}()

	testKey := []byte("foo")
	notTestKey := []byte("bad")
	testValue := []byte("bar")
	s.Put(testKey, testValue, lease.NoLease)

	w := s.NewWatchStream()

	badID := WatchID(1000)
	w.RequestProgress(badID)
	select {
	case resp := <-w.Chan():
		t.Fatalf("unexpected %+v", resp)
	default:
	}

	id := w.Watch(notTestKey, nil, 1)
	w.RequestProgress(id)
	select {
	case resp := <-w.Chan():
		t.Fatalf("unexpected %+v", resp)
	default:
	}

	s.syncWatchers()

	w.RequestProgress(id)
	wrs := WatchResponse{WatchID: 0, Revision: 2}
	select {
	case resp := <-w.Chan():
		if !reflect.DeepEqual(resp, wrs) {
			t.Fatalf("got %+v, expect %+v", resp, wrs)
		}
	case <-time.After(time.Second):
		t.Fatal("failed to receive progress")
	}
}
Пример #22
0
func TestKVCompactReserveLastValue(t *testing.T) {
	b, tmpPath := backend.NewDefaultTmpBackend()
	s := NewStore(b, &lease.FakeLessor{}, nil)
	defer cleanup(s, b, tmpPath)

	s.Put([]byte("foo"), []byte("bar0"), 1)
	s.Put([]byte("foo"), []byte("bar1"), 2)
	s.DeleteRange([]byte("foo"), nil)
	s.Put([]byte("foo"), []byte("bar2"), 3)

	// rev in tests will be called in Compact() one by one on the same store
	tests := []struct {
		rev int64
		// wanted kvs right after the compacted rev
		wkvs []storagepb.KeyValue
	}{
		{
			1,
			[]storagepb.KeyValue{
				{Key: []byte("foo"), Value: []byte("bar0"), CreateRevision: 2, ModRevision: 2, Version: 1, Lease: 1},
			},
		},
		{
			2,
			[]storagepb.KeyValue{
				{Key: []byte("foo"), Value: []byte("bar1"), CreateRevision: 2, ModRevision: 3, Version: 2, Lease: 2},
			},
		},
		{
			3,
			nil,
		},
		{
			4,
			[]storagepb.KeyValue{
				{Key: []byte("foo"), Value: []byte("bar2"), CreateRevision: 5, ModRevision: 5, Version: 1, Lease: 3},
			},
		},
	}
	for i, tt := range tests {
		_, err := s.Compact(tt.rev)
		if err != nil {
			t.Errorf("#%d: unexpect compact error %v", i, err)
		}
		kvs, _, err := s.Range([]byte("foo"), nil, 0, tt.rev+1)
		if err != nil {
			t.Errorf("#%d: unexpect range error %v", i, err)
		}
		if !reflect.DeepEqual(kvs, tt.wkvs) {
			t.Errorf("#%d: kvs = %+v, want %+v", i, kvs, tt.wkvs)
		}
	}
}
Пример #23
0
func TestStoreRev(t *testing.T) {
	b, tmpPath := backend.NewDefaultTmpBackend()
	s := NewStore(b, &lease.FakeLessor{})
	defer os.Remove(tmpPath)

	for i := 1; i <= 3; i++ {
		s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
		if r := s.Rev(); r != int64(i+1) {
			t.Errorf("#%d: rev = %d, want %d", i, r, i+1)
		}
	}
}
func TestConsistentWatchableStoreSkip(t *testing.T) {
	idx := indexVal(5)
	b, tmpPath := backend.NewDefaultTmpBackend()
	s := newConsistentWatchableStore(b, &idx)
	defer cleanup(s, b, tmpPath)

	s.Put([]byte("foo"), []byte("bar"), NoLease)

	// put is skipped
	rev := s.Put([]byte("foo"), []byte("bar"), NoLease)
	if rev != 0 {
		t.Errorf("rev = %d, want 0", rev)
	}
}
Пример #25
0
func BenchmarkKVWatcherMemoryUsage(b *testing.B) {
	be, tmpPath := backend.NewDefaultTmpBackend()
	watchable := newWatchableStore(be, &lease.FakeLessor{})

	defer cleanup(watchable, be, tmpPath)

	w := watchable.NewWatchStream()

	b.ReportAllocs()
	b.StartTimer()
	for i := 0; i < b.N; i++ {
		w.Watch([]byte(fmt.Sprint("foo", i)), nil, 0)
	}
}
Пример #26
0
// Applied > SnapCount should trigger a SaveSnap event
func TestTriggerSnap(t *testing.T) {
	be, tmpPath := backend.NewDefaultTmpBackend()
	defer func() {
		os.RemoveAll(tmpPath)
	}()

	snapc := 10
	st := mockstore.NewRecorder()
	p := mockstorage.NewStorageRecorderStream("")
	srv := &EtcdServer{
		cfg:       &ServerConfig{TickMs: 1},
		snapCount: uint64(snapc),
		r: raftNode{
			Node:        newNodeCommitter(),
			raftStorage: raft.NewMemoryStorage(),
			storage:     p,
			transport:   rafthttp.NewNopTransporter(),
		},
		store:    st,
		reqIDGen: idutil.NewGenerator(0, time.Time{}),
	}
	srv.kv = dstorage.New(be, &lease.FakeLessor{}, &srv.consistIndex)
	srv.be = be

	srv.start()

	donec := make(chan struct{})
	go func() {
		wcnt := 2 + snapc
		gaction, _ := p.Wait(wcnt)

		// each operation is recorded as a Save
		// (SnapCount+1) * Puts + SaveSnap = (SnapCount+1) * Save + SaveSnap
		if len(gaction) != wcnt {
			t.Fatalf("len(action) = %d, want %d", len(gaction), wcnt)
		}
		if !reflect.DeepEqual(gaction[wcnt-1], testutil.Action{Name: "SaveSnap"}) {
			t.Errorf("action = %s, want SaveSnap", gaction[wcnt-1])
		}
		close(donec)
	}()

	for i := 0; i < snapc+1; i++ {
		srv.Do(context.Background(), pb.Request{Method: "PUT"})
	}

	srv.Stop()
	<-donec
}
Пример #27
0
// TestWatcherWatchID tests that each watcher provides unique watchID,
// and the watched event attaches the correct watchID.
func TestWatcherWatchID(t *testing.T) {
	b, tmpPath := backend.NewDefaultTmpBackend()
	s := WatchableKV(newWatchableStore(b))
	defer cleanup(s, b, tmpPath)

	w := s.NewWatchStream()
	defer w.Close()

	idm := make(map[WatchID]struct{})

	for i := 0; i < 10; i++ {
		id := w.Watch([]byte("foo"), false, 0)
		if _, ok := idm[id]; ok {
			t.Errorf("#%d: id %d exists", i, id)
		}
		idm[id] = struct{}{}

		s.Put([]byte("foo"), []byte("bar"), NoLease)

		resp := <-w.Chan()
		if resp.WatchID != id {
			t.Errorf("#%d: watch id in event = %d, want %d", i, resp.WatchID, id)
		}

		if err := w.Cancel(id); err != nil {
			t.Error(err)
		}
	}

	s.Put([]byte("foo2"), []byte("bar"), NoLease)

	// unsynced watchers
	for i := 10; i < 20; i++ {
		id := w.Watch([]byte("foo2"), false, 1)
		if _, ok := idm[id]; ok {
			t.Errorf("#%d: id %d exists", i, id)
		}
		idm[id] = struct{}{}

		resp := <-w.Chan()
		if resp.WatchID != id {
			t.Errorf("#%d: watch id in event = %d, want %d", i, resp.WatchID, id)
		}

		if err := w.Cancel(id); err != nil {
			t.Error(err)
		}
	}
}
Пример #28
0
func TestRestoreContinueUnfinishedCompaction(t *testing.T) {
	b, tmpPath := backend.NewDefaultTmpBackend()
	s0 := NewStore(b)
	defer os.Remove(tmpPath)

	s0.Put([]byte("foo"), []byte("bar"), NoLease)
	s0.Put([]byte("foo"), []byte("bar1"), NoLease)
	s0.Put([]byte("foo"), []byte("bar2"), NoLease)

	// write scheduled compaction, but not do compaction
	rbytes := newRevBytes()
	revToBytes(revision{main: 2}, rbytes)
	tx := s0.b.BatchTx()
	tx.Lock()
	tx.UnsafePut(metaBucketName, scheduledCompactKeyName, rbytes)
	tx.Unlock()

	s0.Close()

	s1 := NewStore(b)
	s1.Restore()

	// wait for scheduled compaction to be finished
	time.Sleep(100 * time.Millisecond)

	if _, _, err := s1.Range([]byte("foo"), nil, 0, 2); err != ErrCompacted {
		t.Errorf("range on compacted rev error = %v, want %v", err, ErrCompacted)
	}
	// check the key in backend is deleted
	revbytes := newRevBytes()
	// TODO: compact should delete main=2 key too
	revToBytes(revision{main: 1}, revbytes)

	// The disk compaction is done asynchronously and requires more time on slow disk.
	// try 5 times for CI with slow IO.
	for i := 0; i < 5; i++ {
		tx = s1.b.BatchTx()
		tx.Lock()
		ks, _ := tx.UnsafeRange(keyBucketName, revbytes, nil, 0)
		tx.Unlock()
		if len(ks) != 0 {
			time.Sleep(100 * time.Millisecond)
			continue
		}
		return
	}

	t.Errorf("key for rev %+v still exists, want deleted", bytesToRev(revbytes))
}
Пример #29
0
func testKVDeleteRange(t *testing.T, f deleteRangeFunc) {
	tests := []struct {
		key, end []byte

		wrev int64
		wN   int64
	}{
		{
			[]byte("foo"), nil,
			5, 1,
		},
		{
			[]byte("foo"), []byte("foo1"),
			5, 1,
		},
		{
			[]byte("foo"), []byte("foo2"),
			5, 2,
		},
		{
			[]byte("foo"), []byte("foo3"),
			5, 3,
		},
		{
			[]byte("foo3"), []byte("foo8"),
			4, 0,
		},
		{
			[]byte("foo3"), nil,
			4, 0,
		},
	}

	for i, tt := range tests {
		b, tmpPath := backend.NewDefaultTmpBackend()
		s := NewStore(b, &lease.FakeLessor{}, nil)

		s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
		s.Put([]byte("foo1"), []byte("bar1"), lease.NoLease)
		s.Put([]byte("foo2"), []byte("bar2"), lease.NoLease)

		n, rev := f(s, tt.key, tt.end)
		if n != tt.wN || rev != tt.wrev {
			t.Errorf("#%d: n = %d, rev = %d, want (%d, %d)", i, n, rev, tt.wN, tt.wrev)
		}

		cleanup(s, b, tmpPath)
	}
}
Пример #30
0
func BenchmarkStorePut(b *testing.B) {
	be, tmpPath := backend.NewDefaultTmpBackend()
	s := NewStore(be, &lease.FakeLessor{})
	defer cleanup(s, be, tmpPath)

	// arbitrary number of bytes
	bytesN := 64
	keys := createBytesSlice(bytesN, b.N)
	vals := createBytesSlice(bytesN, b.N)

	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		s.Put(keys[i], vals[i], lease.NoLease)
	}
}