// TestMultiRangeScanReverseScanInconsistent verifies that a Scan/ReverseScan
// across ranges that doesn't require read consistency will set a timestamp
// using the clock local to the distributed sender.
func TestMultiRangeScanReverseScanInconsistent(t *testing.T) {
	defer leaktest.AfterTest(t)
	s, db := setupMultipleRanges(t, "b")
	defer s.Stop()

	// Write keys "a" and "b", the latter of which is the first key in the
	// second range.
	keys := []string{"a", "b"}
	ts := []time.Time{}
	for i, key := range keys {
		b := &client.Batch{}
		b.Put(key, "value")
		if err := db.Run(b); err != nil {
			t.Fatal(err)
		}
		ts = append(ts, b.Results[0].Rows[0].Timestamp())
		log.Infof("%d: %s", i, b.Results[0].Rows[0].Timestamp())
	}

	// Do an inconsistent Scan/ReverseScan from a new DistSender and verify
	// it does the read at its local clock and doesn't receive an
	// OpRequiresTxnError. We set the local clock to the timestamp of
	// the first key to verify it's used to read only key "a".
	manual := hlc.NewManualClock(ts[1].UnixNano() - 1)
	clock := hlc.NewClock(manual.UnixNano)
	ds := kv.NewDistSender(&kv.DistSenderContext{Clock: clock}, s.Gossip())

	// Scan.
	sa := roachpb.NewScan(roachpb.Key("a"), roachpb.Key("c"), 0).(*roachpb.ScanRequest)
	reply, err := client.SendWrappedWith(ds, nil, roachpb.BatchRequest_Header{
		ReadConsistency: roachpb.INCONSISTENT,
	}, sa)
	if err != nil {
		t.Fatal(err)
	}
	sr := reply.(*roachpb.ScanResponse)

	if l := len(sr.Rows); l != 1 {
		t.Fatalf("expected 1 row; got %d", l)
	}
	if key := string(sr.Rows[0].Key); keys[0] != key {
		t.Errorf("expected key %q; got %q", keys[0], key)
	}

	// ReverseScan.
	rsa := roachpb.NewReverseScan(roachpb.Key("a"), roachpb.Key("c"), 0).(*roachpb.ReverseScanRequest)
	reply, err = client.SendWrappedWith(ds, nil, roachpb.BatchRequest_Header{
		ReadConsistency: roachpb.INCONSISTENT,
	}, rsa)
	if err != nil {
		t.Fatal(err)
	}
	rsr := reply.(*roachpb.ReverseScanResponse)
	if l := len(rsr.Rows); l != 1 {
		t.Fatalf("expected 1 row; got %d", l)
	}
	if key := string(rsr.Rows[0].Key); keys[0] != key {
		t.Errorf("expected key %q; got %q", keys[0], key)
	}
}
예제 #2
0
// TestRejectFutureCommand verifies that lease holders reject commands that
// would cause a large time jump.
func TestRejectFutureCommand(t *testing.T) {
	defer leaktest.AfterTest(t)()

	const maxOffset = 100 * time.Millisecond
	manual := hlc.NewManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	clock.SetMaxOffset(maxOffset)
	mtc := multiTestContext{
		clock: clock,
	}
	mtc.Start(t, 1)
	defer mtc.Stop()

	// First do a write. The first write will advance the clock by MaxOffset
	// because of the read cache's low water mark.
	getArgs := putArgs([]byte("b"), []byte("b"))
	if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &getArgs); err != nil {
		t.Fatal(err)
	}
	if now := clock.Now(); now.WallTime != int64(maxOffset) {
		t.Fatalf("expected clock to advance to 100ms; got %s", now)
	}
	// The logical clock has advanced past the physical clock; increment
	// the "physical" clock to catch up.
	manual.Increment(int64(maxOffset))

	startTime := manual.UnixNano()

	// Commands with a future timestamp that is within the MaxOffset
	// bound will be accepted and will cause the clock to advance.
	for i := int64(0); i < 3; i++ {
		incArgs := incrementArgs([]byte("a"), 5)
		ts := hlc.ZeroTimestamp.Add(startTime+((i+1)*30)*int64(time.Millisecond), 0)
		if _, err := client.SendWrappedWith(rg1(mtc.stores[0]), nil, roachpb.Header{Timestamp: ts}, &incArgs); err != nil {
			t.Fatal(err)
		}
	}
	if now := clock.Now(); now.WallTime != int64(190*time.Millisecond) {
		t.Fatalf("expected clock to advance to 190ms; got %s", now)
	}

	// Once the accumulated offset reaches MaxOffset, commands will be rejected.
	incArgs := incrementArgs([]byte("a"), 11)
	ts := hlc.ZeroTimestamp.Add(int64((time.Duration(startTime)+maxOffset+1)*time.Millisecond), 0)
	if _, err := client.SendWrappedWith(rg1(mtc.stores[0]), nil, roachpb.Header{Timestamp: ts}, &incArgs); err == nil {
		t.Fatalf("expected clock offset error but got nil")
	}

	// The clock remained at 190ms and the final command was not executed.
	if now := clock.Now(); now.WallTime != int64(190*time.Millisecond) {
		t.Errorf("expected clock to advance to 190ms; got %s", now)
	}
	val, _, err := engine.MVCCGet(context.Background(), mtc.engines[0], roachpb.Key("a"), clock.Now(), true, nil)
	if err != nil {
		t.Fatal(err)
	}
	if v := mustGetInt(val); v != 15 {
		t.Errorf("expected 15, got %v", v)
	}
}
예제 #3
0
// TestScannerAddToQueues verifies that ranges are added to and
// removed from multiple queues.
func TestScannerAddToQueues(t *testing.T) {
	const count = 3
	iter := newTestIterator(count)
	q1, q2 := &testQueue{}, &testQueue{}
	s := newRangeScanner(1*time.Millisecond, iter)
	s.AddQueues(q1, q2)
	mc := hlc.NewManualClock(0)
	clock := hlc.NewClock(mc.UnixNano)
	stopper := util.NewStopper(0)

	// Start queue and verify that all ranges are added to both queues.
	s.Start(clock, stopper)
	if err := util.IsTrueWithin(func() bool {
		return q1.count() == count && q2.count() == count
	}, 50*time.Millisecond); err != nil {
		t.Error(err)
	}

	// Remove first range and verify it does not exist in either range.
	rng := iter.remove(0)
	s.RemoveRange(rng)
	if err := util.IsTrueWithin(func() bool {
		return q1.count() == count-1 && q2.count() == count-1
	}, 10*time.Millisecond); err != nil {
		t.Error(err)
	}

	// Stop scanner and verify both queues are stopped.
	stopper.Stop()
	if !q1.isDone() || !q2.isDone() {
		t.Errorf("expected all queues to stop; got %t, %t", q1.isDone(), q2.isDone())
	}
}
예제 #4
0
// createTestStoreWithoutStart creates a test store using an in-memory
// engine without starting the store. It returns the store, the store
// clock's manual unix nanos time and a stopper. The caller is
// responsible for stopping the stopper upon completion.
func createTestStoreWithoutStart(t *testing.T) (*Store, *hlc.ManualClock, *stop.Stopper) {
	stopper := stop.NewStopper()
	// Setup fake zone config handler.
	config.TestingSetupZoneConfigHook(stopper)
	rpcContext := rpc.NewContext(&base.Context{}, hlc.NewClock(hlc.UnixNano), stopper)
	ctx := TestStoreContext
	ctx.Gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap)
	ctx.StorePool = NewStorePool(ctx.Gossip, TestTimeUntilStoreDeadOff, stopper)
	manual := hlc.NewManualClock(0)
	ctx.Clock = hlc.NewClock(manual.UnixNano)
	eng := engine.NewInMem(roachpb.Attributes{}, 10<<20, stopper)
	ctx.Transport = multiraft.NewLocalRPCTransport(stopper)
	stopper.AddCloser(ctx.Transport)
	sender := &testSender{}
	ctx.DB = client.NewDB(sender)
	store := NewStore(ctx, eng, &roachpb.NodeDescriptor{NodeID: 1})
	sender.store = store
	if err := store.Bootstrap(roachpb.StoreIdent{NodeID: 1, StoreID: 1}, stopper); err != nil {
		t.Fatal(err)
	}
	if err := store.BootstrapRange(nil); err != nil {
		t.Fatal(err)
	}
	return store, manual, stopper
}
예제 #5
0
// TestScannerTiming verifies that ranges are scanned, regardless
// of how many, to match scanInterval.
//
// TODO(spencer): in order to make this test not take too much time,
// we're running these loops at speeds where clock ticks may be
// an issue on virtual machines used for continuous integration.
func TestScannerTiming(t *testing.T) {
	const count = 3
	const runTime = 50 * time.Millisecond
	const maxError = 7500 * time.Microsecond
	durations := []time.Duration{
		5 * time.Millisecond,
		12500 * time.Microsecond,
	}
	for i, duration := range durations {
		iter := newTestIterator(count)
		q := &testQueue{}
		s := newRangeScanner(duration, iter)
		s.AddQueues(q)
		mc := hlc.NewManualClock(0)
		clock := hlc.NewClock(mc.UnixNano)
		stopper := util.NewStopper(0)
		defer stopper.Stop()
		s.Start(clock, stopper)
		time.Sleep(runTime)

		avg := iter.avgScan()
		log.Infof("%d: average scan: %s\n", i, avg)
		if avg.Nanoseconds()-duration.Nanoseconds() > maxError.Nanoseconds() ||
			duration.Nanoseconds()-avg.Nanoseconds() > maxError.Nanoseconds() {
			t.Errorf("expected %s, got %s: exceeds max error of %s", duration, avg, maxError)
		}
	}
}
예제 #6
0
// createTestBookie creates a new bookie, stopper and manual clock for testing.
func createTestBookie(reservationTimeout time.Duration) (*stop.Stopper, *hlc.ManualClock, *bookie) {
	stopper := stop.NewStopper()
	mc := hlc.NewManualClock(0)
	clock := hlc.NewClock(mc.UnixNano)
	b := newBookie(clock, reservationTimeout, stopper)
	return stopper, mc, b
}
예제 #7
0
// createTestStoreWithoutStart creates a test store using an in-memory
// engine without starting the store. It returns the store, the store
// clock's manual unix nanos time and a stopper. The caller is
// responsible for stopping the stopper upon completion.
func createTestStoreWithoutStart(t *testing.T) (*Store, *hlc.ManualClock, *stop.Stopper) {
	stopper := stop.NewStopper()
	rpcContext := rpc.NewContext(rootTestBaseContext, hlc.NewClock(hlc.UnixNano), stopper)
	ctx := TestStoreContext
	ctx.Gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap)
	manual := hlc.NewManualClock(0)
	ctx.Clock = hlc.NewClock(manual.UnixNano)
	eng := engine.NewInMem(proto.Attributes{}, 10<<20)
	ctx.Transport = multiraft.NewLocalRPCTransport()
	stopper.AddCloser(ctx.Transport)
	sender := &testSender{}
	var err error
	if ctx.DB, err = client.Open("//root@", client.SenderOpt(sender)); err != nil {
		t.Fatal(err)
	}
	store := NewStore(ctx, eng, &proto.NodeDescriptor{NodeID: 1})
	sender.store = store
	if err := store.Bootstrap(proto.StoreIdent{NodeID: 1, StoreID: 1}, stopper); err != nil {
		t.Fatal(err)
	}
	if err := store.BootstrapRange(); err != nil {
		t.Fatal(err)
	}
	return store, manual, stopper
}
예제 #8
0
func (m *multiTestContext) Start(t *testing.T, numStores int) {
	if m.manualClock == nil {
		m.manualClock = hlc.NewManualClock(0)
	}
	if m.clock == nil {
		m.clock = hlc.NewClock(m.manualClock.UnixNano)
	}
	if m.gossip == nil {
		rpcContext := rpc.NewContext(m.clock, rpc.LoadInsecureTLSConfig())
		m.gossip = gossip.New(rpcContext, gossip.TestInterval, "")
	}
	if m.transport == nil {
		m.transport = multiraft.NewLocalRPCTransport()
	}
	if m.sender == nil {
		m.sender = kv.NewLocalSender()
	}
	if m.db == nil {
		txnSender := kv.NewTxnCoordSender(m.sender, m.clock, false)
		m.db = client.NewKV(txnSender, nil)
		m.db.User = storage.UserRoot
	}

	for i := 0; i < numStores; i++ {
		m.addStore(t)
	}
}
예제 #9
0
// TestTimestampCacheReadVsWrite verifies that the timestamp cache
// can differentiate between read and write timestamp.
func TestTimestampCacheReadVsWrite(t *testing.T) {
	defer leaktest.AfterTest(t)
	manual := hlc.NewManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	tc := NewTimestampCache(clock)

	// Add read-only non-txn entry at current time.
	ts1 := clock.Now()
	tc.Add(roachpb.Key("a"), roachpb.Key("b"), ts1, nil, true)

	// Add two successive txn entries; one read-only and one read-write.
	txn1ID := uuid.NewUUID4()
	txn2ID := uuid.NewUUID4()
	ts2 := clock.Now()
	tc.Add(roachpb.Key("a"), nil, ts2, txn1ID, true)
	ts3 := clock.Now()
	tc.Add(roachpb.Key("a"), nil, ts3, txn2ID, false)

	// Fetching with no transaction gets latest values.
	if rTS, wTS := tc.GetMax(roachpb.Key("a"), nil, nil); !rTS.Equal(ts2) || !wTS.Equal(ts3) {
		t.Errorf("expected %s %s; got %s %s", ts2, ts3, rTS, wTS)
	}
	// Fetching with txn ID "1" gets original for read and most recent for write.
	if rTS, wTS := tc.GetMax(roachpb.Key("a"), nil, txn1ID); !rTS.Equal(ts1) || !wTS.Equal(ts3) {
		t.Errorf("expected %s %s; got %s %s", ts1, ts3, rTS, wTS)
	}
	// Fetching with txn ID "2" gets ts2 for read and low water mark for write.
	if rTS, wTS := tc.GetMax(roachpb.Key("a"), nil, txn2ID); !rTS.Equal(ts2) || !wTS.Equal(tc.lowWater) {
		t.Errorf("expected %s %s; got %s %s", ts2, tc.lowWater, rTS, wTS)
	}
}
예제 #10
0
// TestTxnCoordSenderSingleRoundtripTxn checks that a batch which completely
// holds the writing portion of a Txn (including EndTransaction) does not
// launch a heartbeat goroutine at all.
func TestTxnCoordSenderSingleRoundtripTxn(t *testing.T) {
	defer leaktest.AfterTest(t)
	stopper := stop.NewStopper()
	manual := hlc.NewManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	clock.SetMaxOffset(20)

	ts := NewTxnCoordSender(senderFn(func(_ context.Context, ba proto.BatchRequest) (*proto.BatchResponse, *proto.Error) {
		return ba.CreateReply().(*proto.BatchResponse), nil
	}), clock, false, nil, stopper)

	// Stop the stopper manually, prior to trying the transaction. This has the
	// effect of returning a NodeUnavailableError for any attempts at launching
	// a heartbeat goroutine.
	stopper.Stop()

	var ba proto.BatchRequest
	put := &proto.PutRequest{}
	put.Key = proto.Key("test")
	ba.Add(put)
	ba.Add(&proto.EndTransactionRequest{})
	ba.Txn = &proto.Transaction{Name: "test"}
	_, pErr := ts.Send(context.Background(), ba)
	if pErr != nil {
		t.Fatal(pErr)
	}
}
예제 #11
0
func TestClockOffsetMetrics(t *testing.T) {
	defer leaktest.AfterTest(t)()
	t.Skip()
	stopper := stop.NewStopper()
	defer stopper.Stop()

	// Create a RemoteClockMonitor with a hand-picked offset.
	offset := RemoteOffset{
		Offset:      13,
		Uncertainty: 7,
		MeasuredAt:  6,
	}
	clock := hlc.NewClock(hlc.NewManualClock(123).UnixNano)
	clock.SetMaxOffset(20 * time.Nanosecond)
	monitor := newRemoteClockMonitor(clock, time.Hour)
	monitor.mu.offsets = map[string]RemoteOffset{
		"0": offset,
	}

	if err := monitor.VerifyClockOffset(); err != nil {
		t.Fatal(err)
	}

	reg := monitor.Registry()
	expLower := offset.Offset - offset.Uncertainty
	if a, e := reg.GetGauge("lower-bound-nanos").Value(), expLower; a != e {
		t.Errorf("lower bound %d != expected %d", a, e)
	}
	expHigher := offset.Offset + offset.Uncertainty
	if a, e := reg.GetGauge("upper-bound-nanos").Value(), expHigher; a != e {
		t.Errorf("upper bound %d != expected %d", a, e)
	}
}
예제 #12
0
// TestTimestampCacheWithTxnID verifies that timestamps matching
// the specified txn ID are ignored.
func TestTimestampCacheWithTxnID(t *testing.T) {
	defer leaktest.AfterTest(t)
	manual := hlc.NewManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	tc := NewTimestampCache(clock)

	// Add two successive txn entries.
	txn1ID := uuid.NewUUID4()
	txn2ID := uuid.NewUUID4()
	ts1 := clock.Now()
	tc.Add(roachpb.Key("a"), roachpb.Key("c"), ts1, txn1ID, true)
	ts2 := clock.Now()
	// This entry will remove "a"-"b" from the cache.
	tc.Add(roachpb.Key("b"), roachpb.Key("d"), ts2, txn2ID, true)

	// Fetching with no transaction gets latest value.
	if ts, _ := tc.GetMax(roachpb.Key("b"), nil, nil); !ts.Equal(ts2) {
		t.Errorf("expected %s; got %s", ts2, ts)
	}
	// Fetching with txn ID "1" gets most recent.
	if ts, _ := tc.GetMax(roachpb.Key("b"), nil, txn1ID); !ts.Equal(ts2) {
		t.Errorf("expected %s; got %s", ts2, ts)
	}
	// Fetching with txn ID "2" skips most recent.
	if ts, _ := tc.GetMax(roachpb.Key("b"), nil, txn2ID); !ts.Equal(ts1) {
		t.Errorf("expected %s; got %s", ts1, ts)
	}
}
예제 #13
0
// TestBuildEndpointListRemoveStagnantClocks tests the side effect of removing
// older offsets when we build an endpoint list.
func TestBuildEndpointListRemoveStagnantClocks(t *testing.T) {
	defer leaktest.AfterTest(t)()
	offsets := map[string]RemoteOffset{
		"0":         {Offset: 0, Uncertainty: 10, MeasuredAt: 11},
		"stagnant0": {Offset: 1, Uncertainty: 10, MeasuredAt: 0},
		"1":         {Offset: 2, Uncertainty: 10, MeasuredAt: 20},
		"stagnant1": {Offset: 3, Uncertainty: 10, MeasuredAt: 9},
	}

	manual := hlc.NewManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	clock.SetMaxOffset(5 * time.Nanosecond)
	remoteClocks := newRemoteClockMonitor(clock)
	// The stagnant offsets older than this will be removed.
	remoteClocks.monitorInterval = 10 * time.Nanosecond
	remoteClocks.mu.offsets = offsets
	remoteClocks.mu.lastMonitoredAt = time.Unix(0, 10) // offsets measured before this will be removed.

	remoteClocks.buildEndpointList()

	_, ok0 := offsets["stagnant0"]
	_, ok1 := offsets["stagnant1"]

	if ok0 || ok1 {
		t.Errorf("expected stagant offsets removed, instead offsets: %v", offsets)
	}
}
예제 #14
0
// TestScannerTiming verifies that ranges are scanned, regardless
// of how many, to match scanInterval.
func TestScannerTiming(t *testing.T) {
	defer leaktest.AfterTest(t)()
	const count = 3
	const runTime = 100 * time.Millisecond
	const maxError = 7500 * time.Microsecond
	durations := []time.Duration{
		15 * time.Millisecond,
		25 * time.Millisecond,
	}
	for i, duration := range durations {
		util.SucceedsSoon(t, func() error {
			ranges := newTestRangeSet(count, t)
			q := &testQueue{}
			s := newReplicaScanner(duration, 0, ranges)
			s.AddQueues(q)
			mc := hlc.NewManualClock(0)
			clock := hlc.NewClock(mc.UnixNano)
			stopper := stop.NewStopper()
			s.Start(clock, stopper)
			time.Sleep(runTime)
			stopper.Stop()

			avg := s.avgScan()
			log.Infof("%d: average scan: %s", i, avg)
			if avg.Nanoseconds()-duration.Nanoseconds() > maxError.Nanoseconds() ||
				duration.Nanoseconds()-avg.Nanoseconds() > maxError.Nanoseconds() {
				return errors.Errorf("expected %s, got %s: exceeds max error of %s", duration, avg, maxError)
			}
			return nil
		})
	}
}
예제 #15
0
// TestBuildEndpointListRemoveStagnantClocks tests the side effect of removing
// older offsets when we build an endpoint list.
func TestBuildEndpointListRemoveStagnantClocks(t *testing.T) {
	offsets := map[string]proto.RemoteOffset{
		"0":         {Offset: 0, Error: 10, MeasuredAt: 11},
		"stagnant0": {Offset: 1, Error: 10, MeasuredAt: 0},
		"1":         {Offset: 2, Error: 10, MeasuredAt: 20},
		"stagnant1": {Offset: 3, Error: 10, MeasuredAt: 9},
	}

	// The stagnant offsets older than 10ns ago will be removed.
	monitorInterval = 10 * time.Nanosecond

	manual := hlc.NewManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	clock.SetMaxOffset(5 * time.Nanosecond)
	remoteClocks := &RemoteClockMonitor{
		offsets:         offsets,
		lClock:          clock,
		lastMonitoredAt: 10, // offsets measured before this will be removed.
	}

	remoteClocks.buildEndpointList()

	_, ok0 := offsets["stagnant0"]
	_, ok1 := offsets["stagnant1"]

	if ok0 || ok1 {
		t.Errorf("expected stagant offsets removed, instead offsets: %v", offsets)
	}
}
예제 #16
0
// TestFindOffsetWithLargeError tests a case where offset errors are
// bigger than the max offset (e.g., a case where heartbeat messages
// to the node are having high latency).
func TestFindOffsetWithLargeError(t *testing.T) {
	defer leaktest.AfterTest(t)()
	maxOffset := 100 * time.Nanosecond

	manual := hlc.NewManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	clock.SetMaxOffset(maxOffset)
	offsets := map[string]RemoteOffset{}
	// Offsets are bigger than maxOffset, but Errors are also bigger than Offset.
	offsets["0"] = RemoteOffset{Offset: 110, Uncertainty: 300}
	offsets["1"] = RemoteOffset{Offset: 120, Uncertainty: 300}
	offsets["2"] = RemoteOffset{Offset: 130, Uncertainty: 300}

	remoteClocks := newRemoteClockMonitor(clock)
	remoteClocks.mu.offsets = offsets

	interval, err := remoteClocks.findOffsetInterval()
	if err != nil {
		t.Fatal(err)
	}
	expectedInterval := clusterOffsetInterval{lowerbound: -270, upperbound: 510}
	if interval != expectedInterval {
		t.Errorf("expected interval %v, instead %v", expectedInterval, interval)
	}
	// The interval is still considered healthy.
	assertIntervalHealth(true, interval, maxOffset, t)
}
예제 #17
0
// TestScannerStats verifies that stats accumulate from all ranges.
func TestScannerStats(t *testing.T) {
	defer leaktest.AfterTest(t)
	const count = 3
	ranges := newTestRangeSet(count, t)
	q := &testQueue{}
	stopper := util.NewStopper()
	defer stopper.Stop()
	s := newRangeScanner(1*time.Millisecond, 0, ranges, nil)
	s.AddQueues(q)
	mc := hlc.NewManualClock(0)
	clock := hlc.NewClock(mc.UnixNano)
	// At start, scanner stats should be blank for MVCC, but have accurate number of ranges.
	if rc := s.Stats().RangeCount; rc != count {
		t.Errorf("range count expected %d; got %d", count, rc)
	}
	if vb := s.Stats().MVCC.ValBytes; vb != 0 {
		t.Errorf("value bytes expected %d; got %d", 0, vb)
	}
	s.Start(clock, stopper)
	// We expect a full run to accumulate stats from all ranges.
	if err := util.IsTrueWithin(func() bool {
		if rc := s.Stats().RangeCount; rc != count {
			return false
		}
		if vb := s.Stats().MVCC.ValBytes; vb != count*2 {
			return false
		}
		return true
	}, 100*time.Millisecond); err != nil {
		t.Error(err)
	}
}
func TestTimestampCacheMergeInto(t *testing.T) {
	defer leaktest.AfterTest(t)
	manual := hlc.NewManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)

	testCases := []struct {
		useClear bool
		expLen   int
	}{
		{true, 3},
		{false, 5},
	}
	for _, test := range testCases {
		tc1 := NewTimestampCache(clock)
		tc2 := NewTimestampCache(clock)

		bfTS := clock.Now()
		tc2.Add(roachpb.Key("b"), roachpb.Key("f"), bfTS, nil, true)

		adTS := clock.Now()
		tc1.Add(roachpb.Key("a"), roachpb.Key("d"), adTS, nil, true)

		beTS := clock.Now()
		tc1.Add(roachpb.Key("b"), roachpb.Key("e"), beTS, nil, true)

		aaTS := clock.Now()
		tc2.Add(roachpb.Key("aa"), nil, aaTS, nil, true)

		cTS := clock.Now()
		tc1.Add(roachpb.Key("c"), nil, cTS, nil, true)

		tc1.MergeInto(tc2, test.useClear)

		if tc2.cache.Len() != test.expLen {
			t.Errorf("expected merged length of %d; got %d", test.expLen, tc2.cache.Len())
		}
		if !tc2.latest.Equal(tc1.latest) {
			t.Errorf("expected latest to be updated to %s; got %s", tc1.latest, tc2.latest)
		}

		if rTS, _ := tc2.GetMax(roachpb.Key("a"), nil, nil); !rTS.Equal(adTS) {
			t.Error("expected \"a\" to have adTS timestamp")
		}
		if rTS, _ := tc2.GetMax(roachpb.Key("b"), nil, nil); !rTS.Equal(beTS) {
			t.Error("expected \"b\" to have beTS timestamp")
		}
		if test.useClear {
			if rTS, _ := tc2.GetMax(roachpb.Key("aa"), nil, nil); !rTS.Equal(adTS) {
				t.Error("expected \"aa\" to have adTS timestamp")
			}
		} else {
			if rTS, _ := tc2.GetMax(roachpb.Key("aa"), nil, nil); !rTS.Equal(aaTS) {
				t.Error("expected \"aa\" to have aaTS timestamp")
			}
			if rTS, _ := tc2.GetMax(roachpb.Key("a"), roachpb.Key("c"), nil); !rTS.Equal(aaTS) {
				t.Error("expected \"a\"-\"c\" to have aaTS timestamp")
			}
		}
	}
}
예제 #19
0
// TestTimestampCacheNoEviction verifies that even after
// the MinTSCacheWindow interval, if the cache has not hit
// its size threshold, it will not evict entries.
func TestTimestampCacheNoEviction(t *testing.T) {
	defer leaktest.AfterTest(t)()
	manual := hlc.NewManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	clock.SetMaxOffset(maxClockOffset)
	tc := newTimestampCache(clock)

	// Increment time to the maxClockOffset low water mark + 1.
	manual.Set(maxClockOffset.Nanoseconds() + 1)
	aTS := clock.Now()
	tc.add(roachpb.Key("a"), nil, aTS, nil, true)
	tc.AddRequest(cacheRequest{
		reads:     []roachpb.Span{{Key: roachpb.Key("c")}},
		timestamp: aTS,
	})

	// Increment time by the MinTSCacheWindow and add another key.
	manual.Increment(MinTSCacheWindow.Nanoseconds())
	tc.add(roachpb.Key("b"), nil, clock.Now(), nil, true)
	tc.AddRequest(cacheRequest{
		reads:     []roachpb.Span{{Key: roachpb.Key("d")}},
		timestamp: clock.Now(),
	})

	// Verify that the cache still has 4 entries in it
	if l, want := tc.len(), 4; l != want {
		t.Errorf("expected %d entries to remain, got %d", want, l)
	}
}
예제 #20
0
// TestBaseQueueAddRemove adds then removes a range; ensure range is not processed.
func TestBaseQueueAddRemove(t *testing.T) {
	defer leaktest.AfterTest(t)
	r := &Range{}
	if err := r.setDesc(&proto.RangeDescriptor{RaftID: 1}); err != nil {
		t.Fatal(err)
	}
	testQueue := &testQueueImpl{
		shouldQueueFn: func(now proto.Timestamp, r *Range) (shouldQueue bool, priority float64) {
			shouldQueue = true
			priority = 1.0
			return
		},
	}
	bq := newBaseQueue("test", testQueue, 2)
	stopper := stop.NewStopper()
	mc := hlc.NewManualClock(0)
	clock := hlc.NewClock(mc.UnixNano)
	bq.Start(clock, stopper)
	defer stopper.Stop()

	bq.MaybeAdd(r, proto.ZeroTimestamp)
	bq.MaybeRemove(r)

	time.Sleep(5 * time.Millisecond)
	if pc := atomic.LoadInt32(&testQueue.processed); pc > 0 {
		t.Errorf("expected processed count of 0; got %d", pc)
	}
}
예제 #21
0
func TestVerifyClockOffset(t *testing.T) {
	defer leaktest.AfterTest(t)()

	clock := hlc.NewClock(hlc.NewManualClock(123).UnixNano)
	clock.SetMaxOffset(50 * time.Nanosecond)
	monitor := newRemoteClockMonitor(clock, time.Hour)

	for idx, tc := range []struct {
		offsets       []RemoteOffset
		expectedError bool
	}{
		// no error if no offsets.
		{[]RemoteOffset{}, false},
		// no error when a majority of offsets are under the maximum offset.
		{[]RemoteOffset{{Offset: 20, Uncertainty: 10}, {Offset: 58, Uncertainty: 20}, {Offset: 71, Uncertainty: 25}, {Offset: 91, Uncertainty: 31}}, false},
		// error when less than a majority of offsets are under the maximum offset.
		{[]RemoteOffset{{Offset: 20, Uncertainty: 10}, {Offset: 58, Uncertainty: 20}, {Offset: 85, Uncertainty: 25}, {Offset: 91, Uncertainty: 31}}, true},
	} {
		monitor.mu.offsets = make(map[string]RemoteOffset)
		for i, offset := range tc.offsets {
			monitor.mu.offsets[strconv.Itoa(i)] = offset
		}

		if tc.expectedError {
			if err := monitor.VerifyClockOffset(); !testutils.IsError(err, errOffsetGreaterThanMaxOffset) {
				t.Errorf("%d: unexpected error %v", idx, err)
			}
		} else {
			if err := monitor.VerifyClockOffset(); err != nil {
				t.Errorf("%d: unexpected error %s", idx, err)
			}
		}
	}
}
예제 #22
0
// TestTxnCoordSenderSingleRoundtripTxn checks that a batch which completely
// holds the writing portion of a Txn (including EndTransaction) does not
// launch a heartbeat goroutine at all.
func TestTxnCoordSenderSingleRoundtripTxn(t *testing.T) {
	defer leaktest.AfterTest(t)
	stopper := stop.NewStopper()
	manual := hlc.NewManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	clock.SetMaxOffset(20)

	ts := NewTxnCoordSender(senderFn(func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
		br := ba.CreateReply()
		br.Txn = ba.Txn.Clone()
		br.Txn.Writing = true
		return br, nil
	}), clock, false, nil, stopper)

	// Stop the stopper manually, prior to trying the transaction. This has the
	// effect of returning a NodeUnavailableError for any attempts at launching
	// a heartbeat goroutine.
	stopper.Stop()

	var ba roachpb.BatchRequest
	key := roachpb.Key("test")
	ba.Add(&roachpb.BeginTransactionRequest{Span: roachpb.Span{Key: key}})
	ba.Add(&roachpb.PutRequest{Span: roachpb.Span{Key: key}})
	ba.Add(&roachpb.EndTransactionRequest{})
	ba.Txn = &roachpb.Transaction{Name: "test"}
	_, pErr := ts.Send(context.Background(), ba)
	if pErr != nil {
		t.Fatal(pErr)
	}
}
예제 #23
0
func TestHeartbeatReply(t *testing.T) {
	defer leaktest.AfterTest(t)()
	manual := hlc.NewManualClock(5)
	clock := hlc.NewClock(manual.UnixNano)
	heartbeat := &HeartbeatService{
		clock:              clock,
		remoteClockMonitor: newRemoteClockMonitor(clock, time.Hour),
	}

	request := &PingRequest{
		Ping: "testPing",
	}
	response, err := heartbeat.Ping(context.Background(), request)
	if err != nil {
		t.Fatal(err)
	}

	if response.Pong != request.Ping {
		t.Errorf("expected %s to be equal to %s", response.Pong, request.Ping)
	}

	if response.ServerTime != 5 {
		t.Errorf("expected server time 5, instead %d", response.ServerTime)
	}
}
예제 #24
0
// TestTxnCoordSenderErrorWithIntent validates that if a transactional request
// returns an error but also indicates a Writing transaction, the coordinator
// tracks it just like a successful request.
func TestTxnCoordSenderErrorWithIntent(t *testing.T) {
	defer leaktest.AfterTest(t)
	stopper := stop.NewStopper()
	manual := hlc.NewManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	clock.SetMaxOffset(20)

	ts := NewTxnCoordSender(senderFn(func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
		txn := ba.Txn.Clone()
		txn.Writing = true
		pErr := roachpb.NewError(roachpb.NewTransactionRetryError())
		pErr.SetTxn(txn)
		return nil, pErr
	}), clock, false, nil, stopper)
	defer stopper.Stop()

	var ba roachpb.BatchRequest
	key := roachpb.Key("test")
	ba.Add(&roachpb.BeginTransactionRequest{Span: roachpb.Span{Key: key}})
	ba.Add(&roachpb.PutRequest{Span: roachpb.Span{Key: key}})
	ba.Add(&roachpb.EndTransactionRequest{})
	ba.Txn = &roachpb.Transaction{Name: "test"}
	if _, pErr := ts.Send(context.Background(), ba); !testutils.IsPError(pErr, "retry txn") {
		t.Fatalf("unexpected error: %v", pErr)
	}

	defer teardownHeartbeats(ts)
	ts.Lock()
	defer ts.Unlock()
	if len(ts.txns) != 1 {
		t.Fatalf("expected transaction to be tracked")
	}
}
예제 #25
0
// TestBootstrapOfNonEmptyStore verifies bootstrap failure if engine
// is not empty.
func TestBootstrapOfNonEmptyStore(t *testing.T) {
	defer leaktest.AfterTest(t)
	eng := engine.NewInMem(proto.Attributes{}, 1<<20)

	// Put some random garbage into the engine.
	if err := eng.Put(proto.EncodedKey("foo"), []byte("bar")); err != nil {
		t.Errorf("failure putting key foo into engine: %s", err)
	}
	ctx := TestStoreContext
	manual := hlc.NewManualClock(0)
	ctx.Clock = hlc.NewClock(manual.UnixNano)
	ctx.Transport = multiraft.NewLocalRPCTransport()
	stopper := stop.NewStopper()
	stopper.AddCloser(ctx.Transport)
	defer stopper.Stop()
	store := NewStore(ctx, eng, &proto.NodeDescriptor{NodeID: 1})

	// Can't init as haven't bootstrapped.
	if err := store.Start(stopper); err == nil {
		t.Error("expected failure init'ing un-bootstrapped store")
	}

	// Bootstrap should fail on non-empty engine.
	if err := store.Bootstrap(testIdent, stopper); err == nil {
		t.Error("expected bootstrap error on non-empty store")
	}
}
예제 #26
0
func TestHeartbeatReply(t *testing.T) {
	defer leaktest.AfterTest(t)
	manual := hlc.NewManualClock(5)
	clock := hlc.NewClock(manual.UnixNano)
	heartbeat := &HeartbeatService{
		clock:              clock,
		remoteClockMonitor: newRemoteClockMonitor(clock),
	}

	request := &PingRequest{
		Ping: "testPing",
	}
	var response *PingResponse
	if responseI, err := heartbeat.Ping(request); err != nil {
		t.Fatal(err)
	} else {
		response = responseI.(*PingResponse)
	}

	if response.Pong != request.Ping {
		t.Errorf("expected %s to be equal to %s", response.Pong, request.Ping)
	}

	if response.ServerTime != 5 {
		t.Errorf("expected server time 5, instead %d", response.ServerTime)
	}
}
예제 #27
0
// Start starts the test cluster by bootstrapping an in-memory store
// (defaults to maximum of 50M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.Addr after Start() for client connections. Use Stop()
// to shutdown the server after the test completes.
func (ltc *LocalTestCluster) Start(t util.Tester) {
	ltc.Manual = hlc.NewManualClock(0)
	ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano)
	ltc.Stopper = stop.NewStopper()
	rpcContext := rpc.NewContext(testutils.NewRootTestBaseContext(), ltc.Clock, ltc.Stopper)
	ltc.Gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap)
	ltc.Eng = engine.NewInMem(proto.Attributes{}, 50<<20)
	ltc.lSender = newRetryableLocalSender(NewLocalSender())
	ltc.Sender = NewTxnCoordSender(ltc.lSender, ltc.Clock, false, nil, ltc.Stopper)
	var err error
	if ltc.DB, err = client.Open("//root@", client.SenderOpt(ltc.Sender)); err != nil {
		t.Fatal(err)
	}
	transport := multiraft.NewLocalRPCTransport(ltc.Stopper)
	ltc.Stopper.AddCloser(transport)
	ctx := storage.TestStoreContext
	ctx.Clock = ltc.Clock
	ctx.DB = ltc.DB
	ctx.Gossip = ltc.Gossip
	ctx.Transport = transport
	ltc.Store = storage.NewStore(ctx, ltc.Eng, &proto.NodeDescriptor{NodeID: 1})
	if err := ltc.Store.Bootstrap(proto.StoreIdent{NodeID: 1, StoreID: 1}, ltc.Stopper); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	ltc.lSender.AddStore(ltc.Store)
	if err := ltc.Store.BootstrapRange(nil); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	if err := ltc.Store.Start(ltc.Stopper); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
}
예제 #28
0
// Start starts the test cluster by bootstrapping an in-memory store
// (defaults to maximum of 50M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.Addr after Start() for client connections. Use Stop()
// to shutdown the server after the test completes.
func (ltc *LocalTestCluster) Start(t util.Tester) {

	nodeDesc := &proto.NodeDescriptor{NodeID: 1}
	ltc.tester = t
	ltc.Manual = hlc.NewManualClock(0)
	ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano)
	ltc.Stopper = stop.NewStopper()
	rpcContext := rpc.NewContext(testutils.NewNodeTestBaseContext(), ltc.Clock, ltc.Stopper)
	ltc.Gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap)
	ltc.Eng = engine.NewInMem(proto.Attributes{}, 50<<20, ltc.Stopper)

	ltc.localSender = NewLocalSender()
	var rpcSend rpcSendFn = func(_ rpc.Options, _ string, _ []net.Addr,
		getArgs func(addr net.Addr) gogoproto.Message, getReply func() gogoproto.Message,
		_ *rpc.Context) ([]gogoproto.Message, error) {
		// TODO(tschottdorf): remove getReply().
		br, pErr := ltc.localSender.Send(context.Background(), *getArgs(nil).(*proto.BatchRequest))
		if br == nil {
			br = &proto.BatchResponse{}
		}
		if br.Error != nil {
			panic(proto.ErrorUnexpectedlySet(ltc.localSender, br))
		}
		br.Error = pErr
		return []gogoproto.Message{br}, nil
	}
	ltc.distSender = NewDistSender(&DistSenderContext{
		Clock: ltc.Clock,
		RangeDescriptorCacheSize: defaultRangeDescriptorCacheSize,
		RangeLookupMaxRanges:     defaultRangeLookupMaxRanges,
		LeaderCacheSize:          defaultLeaderCacheSize,
		RPCRetryOptions:          &defaultRPCRetryOptions,
		nodeDescriptor:           nodeDesc,
		RPCSend:                  rpcSend,         // defined above
		RangeDescriptorDB:        ltc.localSender, // for descriptor lookup
	}, ltc.Gossip)

	ltc.Sender = NewTxnCoordSender(ltc.distSender, ltc.Clock, false /* !linearizable */, nil /* tracer */, ltc.Stopper)
	ltc.DB = client.NewDB(ltc.Sender)

	transport := multiraft.NewLocalRPCTransport(ltc.Stopper)
	ltc.Stopper.AddCloser(transport)
	ctx := storage.TestStoreContext
	ctx.Clock = ltc.Clock
	ctx.DB = ltc.DB
	ctx.Gossip = ltc.Gossip
	ctx.Transport = transport
	ltc.Store = storage.NewStore(ctx, ltc.Eng, nodeDesc)
	if err := ltc.Store.Bootstrap(proto.StoreIdent{NodeID: 1, StoreID: 1}, ltc.Stopper); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	ltc.localSender.AddStore(ltc.Store)
	if err := ltc.Store.BootstrapRange(nil); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	if err := ltc.Store.Start(ltc.Stopper); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
}
예제 #29
0
func TestFailedOffsetMeasurement(t *testing.T) {
	defer leaktest.AfterTest(t)

	stopper := stop.NewStopper()
	defer stopper.Stop()

	serverManual := hlc.NewManualClock(0)
	serverClock := hlc.NewClock(serverManual.UnixNano)
	ctx := newNodeTestContext(serverClock, stopper)
	s, ln := newTestServer(t, ctx, true)

	heartbeat := &ManualHeartbeatService{
		clock:              serverClock,
		remoteClockMonitor: newRemoteClockMonitor(serverClock),
		ready:              make(chan struct{}),
		stopper:            stopper,
	}
	if err := heartbeat.Register(s); err != nil {
		t.Fatalf("Unable to register heartbeat service: %s", err)
	}

	// Create a client that never receives a heartbeat after the first.
	clientManual := hlc.NewManualClock(0)
	clientClock := hlc.NewClock(clientManual.UnixNano)
	context := newNodeTestContext(clientClock, stopper)
	context.heartbeatTimeout = 20 * context.heartbeatInterval
	c := NewClient(ln.Addr(), context)
	heartbeat.ready <- struct{}{} // Allow one heartbeat for initialization.
	<-c.Healthy()

	// Synchronously wait on missing the next heartbeat.
	if err := util.IsTrueWithin(func() bool {
		select {
		case <-c.Healthy():
			return false
		default:
			return true
		}
	}, context.heartbeatTimeout*10); err != nil {
		t.Fatal(err)
	}
	if !proto.Equal(&c.remoteOffset, &RemoteOffset{}) {
		t.Errorf("expected offset %v, actual %v",
			RemoteOffset{}, c.remoteOffset)
	}
}
예제 #30
0
func TestLocalSenderLookupReplica(t *testing.T) {
	defer leaktest.AfterTest(t)
	stopper := stop.NewStopper()
	defer stopper.Stop()
	ctx := storage.TestStoreContext
	manualClock := hlc.NewManualClock(0)
	ctx.Clock = hlc.NewClock(manualClock.UnixNano)
	ls := NewLocalSender()

	// Create two new stores with ranges we care about.
	var e [2]engine.Engine
	var s [2]*storage.Store
	ranges := []struct {
		storeID    proto.StoreID
		start, end proto.Key
	}{
		{2, proto.Key("a"), proto.Key("c")},
		{3, proto.Key("x"), proto.Key("z")},
	}
	for i, rng := range ranges {
		e[i] = engine.NewInMem(proto.Attributes{}, 1<<20)
		ctx.Transport = multiraft.NewLocalRPCTransport(stopper)
		defer ctx.Transport.Close()
		s[i] = storage.NewStore(ctx, e[i], &proto.NodeDescriptor{NodeID: 1})
		s[i].Ident.StoreID = rng.storeID

		desc := &proto.RangeDescriptor{
			RangeID:  proto.RangeID(i),
			StartKey: rng.start,
			EndKey:   rng.end,
			Replicas: []proto.Replica{{StoreID: rng.storeID}},
		}
		newRng, err := storage.NewReplica(desc, s[i])
		if err != nil {
			t.Fatal(err)
		}
		if err := s[i].AddRangeTest(newRng); err != nil {
			t.Error(err)
		}
		ls.AddStore(s[i])
	}

	if _, r, err := ls.lookupReplica(proto.Key("a"), proto.Key("c")); r.StoreID != s[0].Ident.StoreID || err != nil {
		t.Errorf("expected store %d; got %d: %v", s[0].Ident.StoreID, r.StoreID, err)
	}
	if _, r, err := ls.lookupReplica(proto.Key("b"), nil); r.StoreID != s[0].Ident.StoreID || err != nil {
		t.Errorf("expected store %d; got %d: %v", s[0].Ident.StoreID, r.StoreID, err)
	}
	if _, r, err := ls.lookupReplica(proto.Key("b"), proto.Key("d")); r != nil || err == nil {
		t.Errorf("expected store 0 and error got %d", r.StoreID)
	}
	if _, r, err := ls.lookupReplica(proto.Key("x"), proto.Key("z")); r.StoreID != s[1].Ident.StoreID {
		t.Errorf("expected store %d; got %d: %v", s[1].Ident.StoreID, r.StoreID, err)
	}
	if _, r, err := ls.lookupReplica(proto.Key("y"), nil); r.StoreID != s[1].Ident.StoreID || err != nil {
		t.Errorf("expected store %d; got %d: %v", s[1].Ident.StoreID, r.StoreID, err)
	}
}