Example #1
0
// TestTimestampCacheLayeredIntervals verifies the maximum timestamp
// is chosen if previous entries have ranges which are layered over
// each other.
func TestTimestampCacheLayeredIntervals(t *testing.T) {
	manual := hlc.ManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	clock.SetMaxDrift(maxClockSkew)
	tc := NewTimestampCache(clock)
	manual = hlc.ManualClock(maxClockSkew.Nanoseconds() + 1)

	adTS := clock.Now()
	tc.Add(engine.Key("a"), engine.Key("d"), adTS)

	beTS := clock.Now()
	tc.Add(engine.Key("b"), engine.Key("e"), beTS)

	cTS := clock.Now()
	tc.Add(engine.Key("c"), nil, cTS)

	// Try different sub ranges.
	if !tc.GetMax(engine.Key("a"), nil).Equal(adTS) {
		t.Error("expected \"a\" to have adTS timestamp")
	}
	if !tc.GetMax(engine.Key("b"), nil).Equal(beTS) {
		t.Error("expected \"b\" to have beTS timestamp")
	}
	if !tc.GetMax(engine.Key("c"), nil).Equal(cTS) {
		t.Error("expected \"b\" to have cTS timestamp")
	}
	if !tc.GetMax(engine.Key("d"), nil).Equal(beTS) {
		t.Error("expected \"d\" to have beTS timestamp")
	}
	if !tc.GetMax(engine.Key("a"), engine.Key("b")).Equal(adTS) {
		t.Error("expected \"a\"-\"b\" to have adTS timestamp")
	}
	if !tc.GetMax(engine.Key("a"), engine.Key("c")).Equal(beTS) {
		t.Error("expected \"a\"-\"c\" to have beTS timestamp")
	}
	if !tc.GetMax(engine.Key("a"), engine.Key("d")).Equal(cTS) {
		t.Error("expected \"a\"-\"d\" to have cTS timestamp")
	}
	if !tc.GetMax(engine.Key("b"), engine.Key("d")).Equal(cTS) {
		t.Error("expected \"b\"-\"d\" to have cTS timestamp")
	}
	if !tc.GetMax(engine.Key("c"), engine.Key("d")).Equal(cTS) {
		t.Error("expected \"c\"-\"d\" to have cTS timestamp")
	}
	if !tc.GetMax(engine.Key("c0"), engine.Key("d")).Equal(beTS) {
		t.Error("expected \"c0\"-\"d\" to have beTS timestamp")
	}
}
Example #2
0
// createTestStore creates a test store using an in-memory
// engine. Returns the store clock's manual unix nanos time and the
// store. If createDefaultRange is true, creates a single range from
// key "a" to key "z" with a default replica descriptor (i.e. StoreID
// = 0, RangeID = 1, etc.). The caller is responsible for closing the
// store on exit.
func createTestStore(createDefaultRange bool, t *testing.T) (*Store, *hlc.ManualClock) {
	manual := hlc.ManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	eng := engine.NewInMem(proto.Attributes{}, 1<<20)
	store := NewStore(clock, eng, nil, nil)
	if err := store.Bootstrap(proto.StoreIdent{StoreID: 1}); err != nil {
		t.Fatal(err)
	}
	db, _ := newTestDB(store)
	store.db = db
	replica := proto.Replica{StoreID: 1, RangeID: 1}
	// Create system key range for allocations.
	meta := store.BootstrapRangeMetadata()
	meta.StartKey = engine.KeySystemPrefix
	meta.EndKey = engine.PrefixEndKey(engine.KeySystemPrefix)
	_, err := store.CreateRange(meta)
	if err != nil {
		t.Fatal(err)
	}
	if err := store.Init(); err != nil {
		t.Fatal(err)
	}
	// Now that the system key range is available, initialize the store. set store DB so new
	// ranges can be allocated as needed for tests.
	// If requested, create a default range for tests from "a"-"z".
	if createDefaultRange {
		replica = proto.Replica{StoreID: 1}
		_, err := store.CreateRange(store.NewRangeMetadata(engine.Key("a"), engine.Key("z"), []proto.Replica{replica}))
		if err != nil {
			t.Fatal(err)
		}
	}
	return store, &manual
}
Example #3
0
// TestCoordinatorHeartbeat verifies periodic heartbeat of the
// transaction record.
func TestCoordinatorHeartbeat(t *testing.T) {
	db, _, manual := createTestDB(t)
	defer db.Close()

	// Set heartbeat interval to 1ms for testing.
	db.coordinator.heartbeatInterval = 1 * time.Millisecond

	txnID := engine.Key("txn")
	<-db.Put(createPutRequest(engine.Key("a"), []byte("value"), txnID))

	// Verify 3 heartbeats.
	var heartbeatTS proto.Timestamp
	for i := 0; i < 3; i++ {
		if err := util.IsTrueWithin(func() bool {
			ok, txn, err := getTxn(db, engine.MakeKey(engine.KeyLocalTransactionPrefix, txnID))
			if !ok || err != nil {
				return false
			}
			// Advance clock by 1ns.
			// Locking the coordinator to prevent a data race.
			db.coordinator.Lock()
			*manual = hlc.ManualClock(*manual + 1)
			db.coordinator.Unlock()
			if heartbeatTS.Less(*txn.LastHeartbeat) {
				heartbeatTS = *txn.LastHeartbeat
				return true
			}
			return false
		}, 50*time.Millisecond); err != nil {
			t.Error("expected initial heartbeat within 50ms")
		}
	}
}
Example #4
0
// TestCoordinatorGC verifies that the coordinator cleans up extant
// transactions after the lastUpdateTS exceeds the timeout.
func TestCoordinatorGC(t *testing.T) {
	db, _, manual := createTestDB(t)
	defer db.Close()

	// Set heartbeat interval to 1ms for testing.
	db.coordinator.heartbeatInterval = 1 * time.Millisecond

	txnID := engine.Key("txn")
	<-db.Put(createPutRequest(engine.Key("a"), []byte("value"), txnID))

	// Now, advance clock past the default client timeout.
	// Locking the coordinator to prevent a data race.
	db.coordinator.Lock()
	*manual = hlc.ManualClock(defaultClientTimeout.Nanoseconds() + 1)
	db.coordinator.Unlock()

	if err := util.IsTrueWithin(func() bool {
		// Locking the coordinator to prevent a data race.
		db.coordinator.Lock()
		_, ok := db.coordinator.txns[string(txnID)]
		db.coordinator.Unlock()
		return !ok
	}, 50*time.Millisecond); err != nil {
		t.Error("expected garbage collection")
	}
}
Example #5
0
// TestCoordinatorAddRequest verifies adding a request creates a
// transaction metadata and adding multiple requests with same
// transaction ID updates the last update timestamp.
func TestCoordinatorAddRequest(t *testing.T) {
	db, clock, manual := createTestDB(t)
	defer db.Close()

	txnID := engine.Key("txn")
	putReq := createPutRequest(engine.Key("a"), []byte("value"), txnID)

	// Put request will create a new transaction.
	<-db.Put(putReq)
	txnMeta, ok := db.coordinator.txns[string(txnID)]
	if !ok {
		t.Fatal("expected a transaction to be created on coordinator")
	}
	ts := txnMeta.lastUpdateTS
	if !ts.Less(clock.Now()) {
		t.Errorf("expected earlier last update timestamp; got: %+v", ts)
	}

	// Advance time and send another put request.
	// Locking the coordinator to prevent a data race.
	db.coordinator.Lock()
	*manual = hlc.ManualClock(1)
	db.coordinator.Unlock()
	<-db.Put(putReq)
	if len(db.coordinator.txns) != 1 {
		t.Errorf("expected length of transactions map to be 1; got %d", len(db.coordinator.txns))
	}
	txnMeta = db.coordinator.txns[string(txnID)]
	if !ts.Less(txnMeta.lastUpdateTS) || txnMeta.lastUpdateTS.WallTime != int64(*manual) {
		t.Errorf("expected last update time to advance; got %+v", txnMeta.lastUpdateTS)
	}
}
Example #6
0
// createTestRange creates a range using a blocking engine. Returns
// the range clock's manual unix nanos time and the range.
func createTestRangeWithClock(t *testing.T) (*Range, *hlc.ManualClock, *hlc.Clock, *blockingEngine) {
	manual := hlc.ManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	engine := newBlockingEngine()
	rng := NewRange(&proto.RangeMetadata{}, clock, engine, nil, nil)
	rng.Start()
	return rng, &manual, clock, engine
}
Example #7
0
func TestLocalKVLookupReplica(t *testing.T) {
	manual := hlc.ManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	eng := engine.NewInMem(proto.Attributes{}, 1<<20)
	kv := NewLocalKV()
	db := NewDB(kv, clock)
	store := storage.NewStore(clock, eng, db, nil)
	if err := store.Bootstrap(proto.StoreIdent{StoreID: 1}); err != nil {
		t.Fatal(err)
	}
	kv.AddStore(store)
	meta := store.BootstrapRangeMetadata()
	meta.StartKey = engine.KeySystemPrefix
	meta.EndKey = engine.PrefixEndKey(engine.KeySystemPrefix)
	if _, err := store.CreateRange(meta); err != nil {
		t.Fatal(err)
	}
	if err := store.Init(); err != nil {
		t.Fatal(err)
	}
	// Create two new stores with ranges we care about.
	var s [2]*storage.Store
	ranges := []struct {
		storeID    int32
		start, end engine.Key
	}{
		{2, engine.Key("a"), engine.Key("c")},
		{3, engine.Key("x"), engine.Key("z")},
	}
	for i, rng := range ranges {
		s[i] = storage.NewStore(clock, eng, db, nil)
		s[i].Ident.StoreID = rng.storeID
		replica := proto.Replica{StoreID: rng.storeID}
		_, err := s[i].CreateRange(store.NewRangeMetadata(rng.start, rng.end, []proto.Replica{replica}))
		if err != nil {
			t.Fatal(err)
		}
		kv.AddStore(s[i])
	}

	if r, err := kv.lookupReplica(engine.Key("a"), engine.Key("c")); r.StoreID != s[0].Ident.StoreID || err != nil {
		t.Errorf("expected store %d; got %d: %v", s[0].Ident.StoreID, r.StoreID, err)
	}
	if r, err := kv.lookupReplica(engine.Key("b"), nil); r.StoreID != s[0].Ident.StoreID || err != nil {
		t.Errorf("expected store %d; got %d: %v", s[0].Ident.StoreID, r.StoreID, err)
	}
	if r, err := kv.lookupReplica(engine.Key("b"), engine.Key("d")); r != nil || err == nil {
		t.Errorf("expected store 0 and error got %d", r.StoreID)
	}
	if r, err := kv.lookupReplica(engine.Key("x"), engine.Key("z")); r.StoreID != s[1].Ident.StoreID {
		t.Errorf("expected store %d; got %d: %v", s[1].Ident.StoreID, r.StoreID, err)
	}
	if r, err := kv.lookupReplica(engine.Key("y"), nil); r.StoreID != s[1].Ident.StoreID || err != nil {
		t.Errorf("expected store %d; got %d: %v", s[1].Ident.StoreID, r.StoreID, err)
	}
}
Example #8
0
// TestTimestampCacheEviction verifies the eviction of
// timestamp cache entries after minCacheWindow interval.
func TestTimestampCacheEviction(t *testing.T) {
	manual := hlc.ManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	clock.SetMaxDrift(maxClockSkew)
	tc := NewTimestampCache(clock)

	// Increment time to the maxClockSkew high water mark + 1.
	manual = hlc.ManualClock(maxClockSkew.Nanoseconds() + 1)
	aTS := clock.Now()
	tc.Add(engine.Key("a"), nil, aTS)

	// Increment time by the minCacheWindow and add another key.
	manual = hlc.ManualClock(int64(manual) + minCacheWindow.Nanoseconds())
	tc.Add(engine.Key("b"), nil, clock.Now())

	// Verify looking up key "c" returns the new high water mark ("a"'s timestamp).
	if !tc.GetMax(engine.Key("c"), nil).Equal(aTS) {
		t.Errorf("expected high water mark %+v, got %+v", aTS, tc.GetMax(engine.Key("c"), nil))
	}
}
Example #9
0
// createTestStore creates a test store using an in-memory
// engine. Returns the store clock's manual unix nanos time and the
// store. A single range from key "a" to key "z" is setup in the store
// with a default replica descriptor (i.e. StoreID = 0, RangeID = 1,
// etc.). The caller is responsible for closing the store on exit.
func createTestStore(t *testing.T) (*Store, *hlc.ManualClock) {
	manual := hlc.ManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	eng := engine.NewInMem(proto.Attributes{}, 1<<20)
	store := NewStore(clock, eng, nil)
	replica := proto.Replica{RangeID: 1}
	_, err := store.CreateRange(engine.Key("a"), engine.Key("z"), []proto.Replica{replica})
	if err != nil {
		t.Fatal(err)
	}
	return store, &manual
}
Example #10
0
func TestTimestampCacheClear(t *testing.T) {
	manual := hlc.ManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	clock.SetMaxDrift(maxClockSkew)
	tc := NewTimestampCache(clock)

	// Increment time to the maxClockSkew high water mark + 1.
	manual = hlc.ManualClock(maxClockSkew.Nanoseconds() + 1)
	ts := clock.Now()
	tc.Add(engine.Key("a"), nil, ts)

	// Clear the cache, which will reset the high water mark to
	// the current time + maxClockSkew.
	tc.Clear(clock)

	// Fetching any keys should give current time + maxClockSkew
	expTS := clock.Timestamp()
	expTS.WallTime += maxClockSkew.Nanoseconds()
	if !tc.GetMax(engine.Key("a"), nil).Equal(expTS) {
		t.Error("expected \"a\" to have cleared timestamp")
	}
}
Example #11
0
// createTestDB creates a test kv.DB using a LocalKV object built with
// a store using an in-memory engine. Returns the created kv.DB and
// associated clock's manual time.
func createTestDB(t *testing.T) (*DB, *hlc.Clock, *hlc.ManualClock) {
	manual := hlc.ManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	eng := engine.NewInMem(proto.Attributes{}, 1<<20)
	store := storage.NewStore(clock, eng, nil)
	store.Ident.StoreID = 1
	replica := proto.Replica{StoreID: 1, RangeID: 1}
	_, err := store.CreateRange(engine.KeyMin, engine.KeyMax, []proto.Replica{replica})
	if err != nil {
		t.Fatal(err)
	}
	kv := NewLocalKV()
	kv.AddStore(store)
	db := NewDB(kv, clock)
	return db, clock, &manual
}
Example #12
0
// TestEndTransactionWithErrors verifies various error conditions
// are checked such as transaction already being committed or
// aborted, or timestamp or epoch regression.
func TestEndTransactionWithErrors(t *testing.T) {
	rng, mc, clock, _ := createTestRangeWithClock(t)
	defer rng.Stop()

	regressTS := clock.Now()
	*mc = hlc.ManualClock(1)
	txn := NewTransaction(engine.Key(""), 1, proto.SERIALIZABLE, clock)

	testCases := []struct {
		key          engine.Key
		existStatus  proto.TransactionStatus
		existEpoch   int32
		existTS      proto.Timestamp
		expErrRegexp string
	}{
		{engine.Key("a"), proto.COMMITTED, txn.Epoch, txn.Timestamp, "txn {.*}: already committed"},
		{engine.Key("b"), proto.ABORTED, txn.Epoch, txn.Timestamp, "txn {.*}: already aborted"},
		{engine.Key("c"), proto.PENDING, txn.Epoch + 1, txn.Timestamp, "txn {.*}: epoch regression: 0"},
		{engine.Key("d"), proto.PENDING, txn.Epoch, regressTS, "txn {.*}: timestamp regression: {WallTime:1 Logical:0 .*}"},
	}
	for _, test := range testCases {
		// Establish existing txn state by writing directly to range engine.
		var existTxn proto.Transaction
		gogoproto.Merge(&existTxn, txn)
		existTxn.ID = test.key
		existTxn.Status = test.existStatus
		existTxn.Epoch = test.existEpoch
		existTxn.Timestamp = test.existTS
		txnKey := engine.MakeKey(engine.KeyLocalTransactionPrefix, test.key)
		if err := engine.PutProto(rng.engine, txnKey, &existTxn); err != nil {
			t.Fatal(err)
		}

		// End the transaction, verify expected error.
		txn.ID = test.key
		args, reply := endTxnArgs(txn, true, 0)
		args.Timestamp = txn.Timestamp
		err := rng.ReadWriteCmd("EndTransaction", args, reply)
		if err == nil {
			t.Errorf("expected error matching %q", test.expErrRegexp)
		} else {
			if matched, regexpErr := regexp.MatchString(test.expErrRegexp, err.Error()); !matched || regexpErr != nil {
				t.Errorf("expected error to match %q (%v): %v", test.expErrRegexp, regexpErr, err.Error())
			}
		}
	}
}
Example #13
0
// TestStoreExecuteCmdWithClockDrift verifies that if the request
// specifies a timestamp further into the future than the node's
// maximum allowed clock drift, the cmd fails with an error.
func TestStoreExecuteCmdWithClockDrift(t *testing.T) {
	store, mc := createTestStore(t)
	defer store.Close()
	args, reply := getArgs("a", 1)

	// Set clock to time 1.
	*mc = hlc.ManualClock(1)
	// Set clock max drift to 250ms.
	maxDrift := 250 * time.Millisecond
	store.clock.SetMaxDrift(maxDrift)
	// Set args timestamp to exceed max drift.
	args.Timestamp = store.clock.Now()
	args.Timestamp.WallTime += maxDrift.Nanoseconds() + 1
	err := store.ExecuteCmd("Get", args, reply)
	if err == nil {
		t.Error("expected max drift clock error")
	}
}
Example #14
0
// TestStoreExecuteCmdWithZeroTime verifies that no timestamp causes
// the command to assume the node's wall time.
func TestStoreExecuteCmdWithZeroTime(t *testing.T) {
	store, mc := createTestStore(t)
	defer store.Close()
	args, reply := getArgs("a", 1)

	// Set clock to time 1.
	*mc = hlc.ManualClock(1)
	err := store.ExecuteCmd("Get", args, reply)
	if err != nil {
		t.Fatal(err)
	}
	// The Logical time will increase over the course of the command
	// execution so we can only rely on comparing the WallTime.
	if reply.Timestamp.WallTime != store.clock.Timestamp().WallTime {
		t.Errorf("expected reply to have store clock time %+v; got %+v",
			store.clock.Timestamp(), reply.Timestamp)
	}
}
Example #15
0
// TestInternalPushTxnHeartbeatTimeout verifies that a txn which
// hasn't been heartbeat within 2x the heartbeat interval can be
// aborted.
func TestInternalPushTxnHeartbeatTimeout(t *testing.T) {
	rng, mc, clock, _ := createTestRangeWithClock(t)
	defer rng.Stop()

	ts := proto.Timestamp{WallTime: 1}
	ns := DefaultHeartbeatInterval.Nanoseconds()
	testCases := []struct {
		heartbeat   *proto.Timestamp // nil indicates no heartbeat
		currentTime int64            // nanoseconds
		expSuccess  bool
	}{
		{nil, 0, false},
		{nil, ns, false},
		{nil, ns*2 - 1, false},
		{nil, ns * 2, false},
		{&ts, ns*2 + 1, false},
		{&ts, ns*2 + 2, true},
	}

	for i, test := range testCases {
		key := engine.Key(fmt.Sprintf("key-%d", i))
		pusher := NewTransaction(engine.MakeKey(key, []byte{1}), 1, proto.SERIALIZABLE, clock)
		pushee := NewTransaction(engine.MakeKey(key, []byte{2}), 1, proto.SERIALIZABLE, clock)
		pushee.Priority = 2
		pusher.Priority = 1 // Pusher won't win based on priority.

		// First, establish "start" of existing pushee's txn via heartbeat.
		if test.heartbeat != nil {
			hbArgs, hbReply := heartbeatArgs(pushee, 0)
			hbArgs.Timestamp = *test.heartbeat
			if err := rng.ReadWriteCmd("InternalHeartbeatTxn", hbArgs, hbReply); err != nil {
				t.Fatal(err)
			}
		}

		// Now, attempt to push the transaction with clock set to "currentTime".
		*mc = hlc.ManualClock(test.currentTime)
		args, reply := pushTxnArgs(pusher, pushee, true, 0)
		err := rng.ReadWriteCmd("InternalPushTxn", args, reply)
		if test.expSuccess != (err == nil) {
			t.Errorf("expected success on trial %d? %t; got err %v", i, test.expSuccess, err)
		}
	}
}
Example #16
0
// TestEndTransactionWithPushedTimestamp verifies that txn can be
// ended (both commit or abort) correctly when the commit timestamp is
// greater than the transaction timestamp, depending on the isolation
// level.
func TestEndTransactionWithPushedTimestamp(t *testing.T) {
	rng, mc, clock, _ := createTestRangeWithClock(t)
	defer rng.Stop()

	testCases := []struct {
		commit    bool
		isolation proto.IsolationType
		expErr    bool
	}{
		{true, proto.SERIALIZABLE, true},
		{true, proto.SNAPSHOT, false},
		{false, proto.SERIALIZABLE, false},
		{false, proto.SNAPSHOT, false},
	}
	key := []byte("a")
	for _, test := range testCases {
		txn := NewTransaction(key, 1, test.isolation, clock)
		// End the transaction with args timestamp moved forward in time.
		args, reply := endTxnArgs(txn, test.commit, 0)
		*mc = hlc.ManualClock(1)
		args.Timestamp = clock.Now()
		err := rng.ReadWriteCmd("EndTransaction", args, reply)
		if test.expErr {
			if err == nil {
				t.Errorf("expected error")
			}
			if _, ok := err.(*proto.TransactionRetryError); !ok {
				t.Errorf("expected retry error; got %s", err)
			}
		} else {
			if err != nil {
				t.Errorf("unexpected error: %v", err)
			}
			expStatus := proto.COMMITTED
			if !test.commit {
				expStatus = proto.ABORTED
			}
			if reply.Txn.Status != expStatus {
				t.Errorf("expected transaction status to be %s; got %s", expStatus, reply.Txn.Status)
			}
		}
	}
}
Example #17
0
// createTestDB creates a test kv.DB using a LocalKV object built with
// a store using an in-memory engine. Returns the created kv.DB and
// associated clock's manual time.
func createTestDB(t *testing.T) (*DB, *hlc.Clock, *hlc.ManualClock) {
	manual := hlc.ManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	eng := engine.NewInMem(proto.Attributes{}, 1<<20)
	kv := NewLocalKV()
	db := NewDB(kv, clock)
	store := storage.NewStore(clock, eng, db, nil)
	if err := store.Bootstrap(proto.StoreIdent{StoreID: 1}); err != nil {
		t.Fatal(err)
	}
	kv.AddStore(store)
	_, err := store.CreateRange(store.BootstrapRangeMetadata())
	if err != nil {
		t.Fatal(err)
	}
	if err := store.Init(); err != nil {
		t.Fatal(err)
	}
	return db, clock, &manual
}
Example #18
0
// TestRangeUseTSCache verifies that write timestamps are upgraded
// based on the read timestamp cache.
func TestRangeUseTSCache(t *testing.T) {
	rng, mc, clock, _ := createTestRangeWithClock(t)
	defer rng.Stop()
	// Set clock to time 1s and do the read.
	t0 := 1 * time.Second
	*mc = hlc.ManualClock(t0.Nanoseconds())
	args, reply := getArgs("a", 0)
	args.Timestamp = clock.Now()
	err := rng.ReadOnlyCmd("Get", args, reply)
	if err != nil {
		t.Error(err)
	}
	pArgs, pReply := putArgs("a", "value", 0)
	err = rng.ReadWriteCmd("Put", pArgs, pReply)
	if err != nil {
		t.Fatal(err)
	}
	if pReply.Timestamp.WallTime != clock.Timestamp().WallTime {
		t.Errorf("expected write timestamp to upgrade to 1s; got %+v", pReply.Timestamp)
	}
}
Example #19
0
// TestStoreInitAndBootstrap verifies store initialization and
// bootstrap.
func TestStoreInitAndBootstrap(t *testing.T) {
	manual := hlc.ManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	eng := engine.NewInMem(proto.Attributes{}, 1<<20)
	store := NewStore(clock, eng, nil)
	defer store.Close()

	// Can't init as haven't bootstrapped.
	if err := store.Init(); err == nil {
		t.Error("expected failure init'ing un-bootstrapped store")
	}

	// Bootstrap with a fake ident.
	if err := store.Bootstrap(testIdent); err != nil {
		t.Errorf("error bootstrapping store: %v", err)
	}

	// Try to get 1st range--non-existent.
	if _, err := store.GetRange(1); err == nil {
		t.Error("expected error fetching non-existent range")
	}

	// Create range and fetch.
	if _, err := store.CreateRange(engine.KeyMin, engine.KeyMax, []proto.Replica{}); err != nil {
		t.Errorf("failure to create first range: %v", err)
	}
	if _, err := store.GetRange(1); err != nil {
		t.Errorf("failure fetching 1st range: %v", err)
	}

	// Now, attempt to initialize a store with a now-bootstrapped engine.
	store = NewStore(clock, eng, nil)
	if err := store.Init(); err != nil {
		t.Errorf("failure initializing bootstrapped store: %v", err)
	}
	// 1st range should be available.
	if _, err := store.GetRange(1); err != nil {
		t.Errorf("failure fetching 1st range: %v", err)
	}
}
Example #20
0
// TestBootstrapOfNonEmptyStore verifies bootstrap failure if engine
// is not empty.
func TestBootstrapOfNonEmptyStore(t *testing.T) {
	eng := engine.NewInMem(proto.Attributes{}, 1<<20)

	// Put some random garbage into the engine.
	if err := eng.Put(engine.Key("foo"), []byte("bar")); err != nil {
		t.Errorf("failure putting key foo into engine: %v", err)
	}
	manual := hlc.ManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	store := NewStore(clock, eng, nil)
	defer store.Close()

	// Can't init as haven't bootstrapped.
	if err := store.Init(); err == nil {
		t.Error("expected failure init'ing un-bootstrapped store")
	}

	// Bootstrap should fail on non-empty engine.
	if err := store.Bootstrap(testIdent); err == nil {
		t.Error("expected bootstrap error on non-empty store")
	}
}
Example #21
0
// TestRangeUpdateTSCache verifies that reads update the read
// timestamp cache.
func TestRangeUpdateTSCache(t *testing.T) {
	rng, mc, clock, _ := createTestRangeWithClock(t)
	defer rng.Stop()
	// Set clock to time 1s and do the read.
	t0 := 1 * time.Second
	*mc = hlc.ManualClock(t0.Nanoseconds())
	args, reply := getArgs("a", 0)
	args.Timestamp = clock.Now()
	err := rng.ReadOnlyCmd("Get", args, reply)
	if err != nil {
		t.Error(err)
	}
	// Verify the read timestamp cache has 1sec for "a".
	ts := rng.tsCache.GetMax(engine.Key("a"), nil)
	if ts.WallTime != t0.Nanoseconds() {
		t.Errorf("expected wall time to have 1s, but got %+v", ts)
	}
	// Verify another key ("b") has 0sec in timestamp cache.
	ts = rng.tsCache.GetMax(engine.Key("b"), nil)
	if ts.WallTime != 0 {
		t.Errorf("expected wall time to have 0s, but got %+v", ts)
	}
}
Example #22
0
func newTestDB(store *Store) (*testDB, *hlc.ManualClock) {
	manual := hlc.ManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	return &testDB{store: store, clock: clock}, &manual
}
func TestReadTimestampCache(t *testing.T) {
	manual := hlc.ManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	clock.SetMaxDrift(maxClockSkew)
	rtc := NewReadTimestampCache(clock)

	// First simulate a read of just "a" at time 0.
	rtc.Add(engine.Key("a"), nil, clock.Now())
	// Verify GetMax returns the highWater mark which is maxClockSkew.
	if rtc.GetMax(engine.Key("a"), nil).WallTime != maxClockSkew.Nanoseconds() {
		t.Error("expected maxClockSkew for key \"a\"")
	}
	if rtc.GetMax(engine.Key("notincache"), nil).WallTime != maxClockSkew.Nanoseconds() {
		t.Error("expected maxClockSkew for key \"notincache\"")
	}

	// Advance the clock and verify same high water mark.
	manual = hlc.ManualClock(maxClockSkew.Nanoseconds() + 1)
	if rtc.GetMax(engine.Key("a"), nil).WallTime != maxClockSkew.Nanoseconds() {
		t.Error("expected maxClockSkew for key \"a\"")
	}
	if rtc.GetMax(engine.Key("notincache"), nil).WallTime != maxClockSkew.Nanoseconds() {
		t.Error("expected maxClockSkew for key \"notincache\"")
	}

	// Sim a read of "b"-"c" at time maxClockSkew + 1.
	ts := clock.Now()
	rtc.Add(engine.Key("b"), engine.Key("c"), ts)

	// Verify all permutations of direct and range access.
	if !rtc.GetMax(engine.Key("b"), nil).Equal(ts) {
		t.Errorf("expected current time for key \"b\"; got %+v", rtc.GetMax(engine.Key("b"), nil))
	}
	if !rtc.GetMax(engine.Key("bb"), nil).Equal(ts) {
		t.Error("expected current time for key \"bb\"")
	}
	if rtc.GetMax(engine.Key("c"), nil).WallTime != maxClockSkew.Nanoseconds() {
		t.Error("expected maxClockSkew for key \"c\"")
	}
	if !rtc.GetMax(engine.Key("b"), engine.Key("c")).Equal(ts) {
		t.Error("expected current time for key \"b\"-\"c\"")
	}
	if !rtc.GetMax(engine.Key("bb"), engine.Key("bz")).Equal(ts) {
		t.Error("expected current time for key \"bb\"-\"bz\"")
	}
	if rtc.GetMax(engine.Key("a"), engine.Key("b")).WallTime != maxClockSkew.Nanoseconds() {
		t.Error("expected maxClockSkew for key \"a\"-\"b\"")
	}
	if !rtc.GetMax(engine.Key("a"), engine.Key("bb")).Equal(ts) {
		t.Error("expected current time for key \"a\"-\"bb\"")
	}
	if !rtc.GetMax(engine.Key("a"), engine.Key("d")).Equal(ts) {
		t.Error("expected current time for key \"a\"-\"d\"")
	}
	if !rtc.GetMax(engine.Key("bz"), engine.Key("c")).Equal(ts) {
		t.Error("expected current time for key \"bz\"-\"c\"")
	}
	if !rtc.GetMax(engine.Key("bz"), engine.Key("d")).Equal(ts) {
		t.Error("expected current time for key \"bz\"-\"d\"")
	}
	if rtc.GetMax(engine.Key("c"), engine.Key("d")).WallTime != maxClockSkew.Nanoseconds() {
		t.Error("expected maxClockSkew for key \"c\"-\"d\"")
	}
}
Example #24
0
func TestTimestampCache(t *testing.T) {
	manual := hlc.ManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	clock.SetMaxDrift(maxClockSkew)
	tc := NewTimestampCache(clock)

	// First simulate a read of just "a" at time 0.
	tc.Add(engine.Key("a"), nil, clock.Now())
	// Although we added "a" at time 0, the internal cache should still
	// be empty because the t=0 < highWater.
	if tc.cache.Len() > 0 {
		t.Errorf("expected cache to be empty, but contains %d elements", tc.cache.Len())
	}
	// Verify GetMax returns the highWater mark which is maxClockSkew.
	if tc.GetMax(engine.Key("a"), nil).WallTime != maxClockSkew.Nanoseconds() {
		t.Error("expected maxClockSkew for key \"a\"")
	}
	if tc.GetMax(engine.Key("notincache"), nil).WallTime != maxClockSkew.Nanoseconds() {
		t.Error("expected maxClockSkew for key \"notincache\"")
	}

	// Advance the clock and verify same high water mark.
	manual = hlc.ManualClock(maxClockSkew.Nanoseconds() + 1)
	if tc.GetMax(engine.Key("a"), nil).WallTime != maxClockSkew.Nanoseconds() {
		t.Error("expected maxClockSkew for key \"a\"")
	}
	if tc.GetMax(engine.Key("notincache"), nil).WallTime != maxClockSkew.Nanoseconds() {
		t.Error("expected maxClockSkew for key \"notincache\"")
	}

	// Sim a read of "b"-"c" at time maxClockSkew + 1.
	ts := clock.Now()
	tc.Add(engine.Key("b"), engine.Key("c"), ts)

	// Verify all permutations of direct and range access.
	if !tc.GetMax(engine.Key("b"), nil).Equal(ts) {
		t.Errorf("expected current time for key \"b\"; got %+v", tc.GetMax(engine.Key("b"), nil))
	}
	if !tc.GetMax(engine.Key("bb"), nil).Equal(ts) {
		t.Error("expected current time for key \"bb\"")
	}
	if tc.GetMax(engine.Key("c"), nil).WallTime != maxClockSkew.Nanoseconds() {
		t.Error("expected maxClockSkew for key \"c\"")
	}
	if !tc.GetMax(engine.Key("b"), engine.Key("c")).Equal(ts) {
		t.Error("expected current time for key \"b\"-\"c\"")
	}
	if !tc.GetMax(engine.Key("bb"), engine.Key("bz")).Equal(ts) {
		t.Error("expected current time for key \"bb\"-\"bz\"")
	}
	if tc.GetMax(engine.Key("a"), engine.Key("b")).WallTime != maxClockSkew.Nanoseconds() {
		t.Error("expected maxClockSkew for key \"a\"-\"b\"")
	}
	if !tc.GetMax(engine.Key("a"), engine.Key("bb")).Equal(ts) {
		t.Error("expected current time for key \"a\"-\"bb\"")
	}
	if !tc.GetMax(engine.Key("a"), engine.Key("d")).Equal(ts) {
		t.Error("expected current time for key \"a\"-\"d\"")
	}
	if !tc.GetMax(engine.Key("bz"), engine.Key("c")).Equal(ts) {
		t.Error("expected current time for key \"bz\"-\"c\"")
	}
	if !tc.GetMax(engine.Key("bz"), engine.Key("d")).Equal(ts) {
		t.Error("expected current time for key \"bz\"-\"d\"")
	}
	if tc.GetMax(engine.Key("c"), engine.Key("d")).WallTime != maxClockSkew.Nanoseconds() {
		t.Error("expected maxClockSkew for key \"c\"-\"d\"")
	}
}