コード例 #1
0
// TestRejectFutureCommand verifies that leaders reject commands that
// would cause a large time jump.
func TestRejectFutureCommand(t *testing.T) {
	defer leaktest.AfterTest(t)

	const maxOffset = 100 * time.Millisecond
	manual := hlc.NewManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	clock.SetMaxOffset(maxOffset)
	mtc := multiTestContext{
		clock: clock,
	}
	mtc.Start(t, 1)
	defer mtc.Stop()

	// First do a write. The first write will advance the clock by MaxOffset
	// because of the read cache's low water mark.
	getArgs := putArgs([]byte("b"), []byte("b"), 1, mtc.stores[0].StoreID())
	if _, err := client.SendWrapped(mtc.stores[0], nil, &getArgs); err != nil {
		t.Fatal(err)
	}
	if now := clock.Now(); now.WallTime != int64(maxOffset) {
		t.Fatalf("expected clock to advance to 100ms; got %s", now)
	}
	// The logical clock has advanced past the physical clock; increment
	// the "physical" clock to catch up.
	manual.Increment(int64(maxOffset))

	startTime := manual.UnixNano()

	// Commands with a future timestamp that is within the MaxOffset
	// bound will be accepted and will cause the clock to advance.
	for i := int64(0); i < 3; i++ {
		incArgs := incrementArgs([]byte("a"), 5, 1, mtc.stores[0].StoreID())
		ts := roachpb.ZeroTimestamp.Add(startTime+((i+1)*30)*int64(time.Millisecond), 0)
		if _, err := client.SendWrappedAt(mtc.stores[0], nil, ts, &incArgs); err != nil {
			t.Fatal(err)
		}
	}
	if now := clock.Now(); now.WallTime != int64(190*time.Millisecond) {
		t.Fatalf("expected clock to advance to 190ms; got %s", now)
	}

	// Once the accumulated offset reaches MaxOffset, commands will be rejected.
	incArgs := incrementArgs([]byte("a"), 11, 1, mtc.stores[0].StoreID())
	ts := roachpb.ZeroTimestamp.Add(int64((time.Duration(startTime)+maxOffset+1)*time.Millisecond), 0)
	if _, err := client.SendWrappedAt(mtc.stores[0], nil, ts, &incArgs); err == nil {
		t.Fatalf("expected clock offset error but got nil")
	}

	// The clock remained at 190ms and the final command was not executed.
	if now := clock.Now(); now.WallTime != int64(190*time.Millisecond) {
		t.Errorf("expected clock to advance to 190ms; got %s", now)
	}
	val, _, err := engine.MVCCGet(mtc.engines[0], roachpb.Key("a"), clock.Now(), true, nil)
	if err != nil {
		t.Fatal(err)
	}
	if v := mustGetInt(val); v != 15 {
		t.Errorf("expected 15, got %v", v)
	}
}
コード例 #2
0
ファイル: store_test.go プロジェクト: GokulSrinivas/cockroach
// TestStoreResolveWriteIntentSnapshotIsolation verifies that the
// timestamp can always be pushed if txn has snapshot isolation.
func TestStoreResolveWriteIntentSnapshotIsolation(t *testing.T) {
	defer leaktest.AfterTest(t)
	store, _, stopper := createTestStore(t)
	defer stopper.Stop()

	key := roachpb.Key("a")
	pusher := newTransaction("test", key, 1, roachpb.SERIALIZABLE, store.ctx.Clock)
	pushee := newTransaction("test", key, 1, roachpb.SNAPSHOT, store.ctx.Clock)
	pushee.Priority = 2
	pusher.Priority = 1 // Pusher would lose based on priority.

	// First, write original value.
	args := putArgs(key, []byte("value1"), 1, store.StoreID())
	ts := store.ctx.Clock.Now()
	if _, err := client.SendWrappedAt(store, nil, ts, &args); err != nil {
		t.Fatal(err)
	}

	// Lay down intent using the pushee's txn.
	ts = store.ctx.Clock.Now()
	args.Txn = pushee
	args.Value.Bytes = []byte("value2")
	if _, err := client.SendWrappedAt(store, nil, ts, &args); err != nil {
		t.Fatal(err)
	}

	// Now, try to read value using the pusher's txn.
	gArgs := getArgs(key, 1, store.StoreID())
	gTS := store.ctx.Clock.Now()
	gArgs.Txn = pusher
	if reply, err := client.SendWrappedAt(store, nil, gTS, &gArgs); err != nil {
		t.Errorf("expected read to succeed: %s", err)
	} else if gReply := reply.(*roachpb.GetResponse); !bytes.Equal(gReply.Value.Bytes, []byte("value1")) {
		t.Errorf("expected bytes to be %q, got %q", "value1", gReply.Value.Bytes)
	}

	// Finally, try to end the pushee's transaction; since it's got
	// SNAPSHOT isolation, the end should work, but verify the txn
	// commit timestamp is equal to gArgs.Timestamp + 1.
	etArgs := endTxnArgs(pushee, true, 1, store.StoreID())
	ts = pushee.Timestamp
	reply, err := client.SendWrappedAt(store, nil, ts, &etArgs)
	if err != nil {
		t.Fatal(err)
	}
	etReply := reply.(*roachpb.EndTransactionResponse)
	expTimestamp := gTS
	expTimestamp.Logical++
	if etReply.Txn.Status != roachpb.COMMITTED || !etReply.Txn.Timestamp.Equal(expTimestamp) {
		t.Errorf("txn commit didn't yield expected status (COMMITTED) or timestamp %s: %s",
			expTimestamp, etReply.Txn)
	}
}
コード例 #3
0
// TestRangeCommandClockUpdate verifies that followers update their
// clocks when executing a command, even if the leader's clock is far
// in the future.
func TestRangeCommandClockUpdate(t *testing.T) {
	defer leaktest.AfterTest(t)

	const numNodes = 3
	var manuals []*hlc.ManualClock
	var clocks []*hlc.Clock
	for i := 0; i < numNodes; i++ {
		manuals = append(manuals, hlc.NewManualClock(1))
		clocks = append(clocks, hlc.NewClock(manuals[i].UnixNano))
		clocks[i].SetMaxOffset(100 * time.Millisecond)
	}
	mtc := multiTestContext{
		clocks: clocks,
	}
	mtc.Start(t, numNodes)
	defer mtc.Stop()
	mtc.replicateRange(1, 0, 1, 2)

	// Advance the leader's clock ahead of the followers (by more than
	// MaxOffset but less than the leader lease) and execute a command.
	manuals[0].Increment(int64(500 * time.Millisecond))
	incArgs := incrementArgs([]byte("a"), 5, 1, mtc.stores[0].StoreID())
	ts := clocks[0].Now()
	if _, err := client.SendWrappedAt(mtc.stores[0], nil, ts, &incArgs); err != nil {
		t.Fatal(err)
	}

	// Wait for that command to execute on all the followers.
	util.SucceedsWithin(t, 50*time.Millisecond, func() error {
		values := []int64{}
		for _, eng := range mtc.engines {
			val, _, err := engine.MVCCGet(eng, roachpb.Key("a"), clocks[0].Now(), true, nil)
			if err != nil {
				return err
			}
			values = append(values, mustGetInt(val))
		}
		if !reflect.DeepEqual(values, []int64{5, 5, 5}) {
			return util.Errorf("expected (5, 5, 5), got %v", values)
		}
		return nil
	})

	// Verify that all the followers have accepted the clock update from
	// node 0 even though it comes from outside the usual max offset.
	now := clocks[0].Now()
	for i, clock := range clocks {
		// Only compare the WallTimes: it's normal for clock 0 to be a few logical ticks ahead.
		if clock.Now().WallTime < now.WallTime {
			t.Errorf("clock %d is behind clock 0: %s vs %s", i, clock.Now(), now)
		}
	}
}
コード例 #4
0
ファイル: store_test.go プロジェクト: GokulSrinivas/cockroach
// TestStoreSendUpdateTime verifies that the node clock is updated.
func TestStoreSendUpdateTime(t *testing.T) {
	defer leaktest.AfterTest(t)
	store, _, stopper := createTestStore(t)
	defer stopper.Stop()
	args := getArgs([]byte("a"), 1, store.StoreID())
	reqTS := store.ctx.Clock.Now()
	reqTS.WallTime += (100 * time.Millisecond).Nanoseconds()
	_, err := client.SendWrappedAt(store, nil, reqTS, &args)
	if err != nil {
		t.Fatal(err)
	}
	ts := store.ctx.Clock.Timestamp()
	if ts.WallTime != reqTS.WallTime || ts.Logical <= reqTS.Logical {
		t.Errorf("expected store clock to advance to %s; got %s", reqTS, ts)
	}
}
コード例 #5
0
ファイル: store_test.go プロジェクト: GokulSrinivas/cockroach
// TestStoreSendWithClockOffset verifies that if the request
// specifies a timestamp further into the future than the node's
// maximum allowed clock offset, the cmd fails.
func TestStoreSendWithClockOffset(t *testing.T) {
	defer leaktest.AfterTest(t)
	store, mc, stopper := createTestStore(t)
	defer stopper.Stop()
	args := getArgs([]byte("a"), 1, store.StoreID())

	// Set clock to time 1.
	mc.Set(1)
	// Set clock max offset to 250ms.
	maxOffset := 250 * time.Millisecond
	store.ctx.Clock.SetMaxOffset(maxOffset)
	// Set args timestamp to exceed max offset.
	ts := store.ctx.Clock.Now().Add(maxOffset.Nanoseconds()+1, 0)
	if _, err := client.SendWrappedAt(store, nil, ts, &args); err == nil {
		t.Error("expected max offset clock error")
	}
}
コード例 #6
0
ファイル: store_test.go プロジェクト: GokulSrinivas/cockroach
// TestStoreResolveWriteIntentPushOnRead verifies that resolving a
// write intent for a read will push the timestamp. On failure to
// push, verify a write intent error is returned with !Resolvable.
func TestStoreResolveWriteIntentPushOnRead(t *testing.T) {
	defer leaktest.AfterTest(t)
	store, _, stopper := createTestStore(t)
	defer stopper.Stop()
	setTestRetryOptions(store)

	testCases := []struct {
		resolvable bool
		pusheeIso  roachpb.IsolationType
	}{
		// Resolvable is true, so we can read, but SERIALIZABLE means we can't commit.
		{true, roachpb.SERIALIZABLE},
		// Pushee is SNAPSHOT, meaning we can commit.
		{true, roachpb.SNAPSHOT},
		// Resolvable is false and SERIALIZABLE so can't read.
		{false, roachpb.SERIALIZABLE},
		// Resolvable is false, but SNAPSHOT means we can push it anyway, so can read.
		{false, roachpb.SNAPSHOT},
	}
	for i, test := range testCases {
		key := roachpb.Key(fmt.Sprintf("key-%d", i))
		pusher := newTransaction("test", key, 1, roachpb.SERIALIZABLE, store.ctx.Clock)
		pushee := newTransaction("test", key, 1, test.pusheeIso, store.ctx.Clock)
		if test.resolvable {
			pushee.Priority = 1
			pusher.Priority = 2 // Pusher will win.
		} else {
			pushee.Priority = 2
			pusher.Priority = 1 // Pusher will lose.
		}

		// First, write original value.
		args := putArgs(key, []byte("value1"), 1, store.StoreID())
		if _, err := client.SendWrapped(store, nil, &args); err != nil {
			t.Fatal(err)
		}

		// Second, lay down intent using the pushee's txn.
		args.Txn = pushee
		args.Value.Bytes = []byte("value2")
		if _, err := client.SendWrapped(store, nil, &args); err != nil {
			t.Fatal(err)
		}

		// Now, try to read value using the pusher's txn.
		ts := store.ctx.Clock.Now()
		gArgs := getArgs(key, 1, store.StoreID())
		gArgs.Txn = pusher
		firstReply, err := client.SendWrappedAt(store, nil, ts, &gArgs)
		if test.resolvable {
			if err != nil {
				t.Errorf("%d: expected read to succeed: %s", i, err)
			} else if gReply := firstReply.(*roachpb.GetResponse); !bytes.Equal(gReply.Value.Bytes, []byte("value1")) {
				t.Errorf("%d: expected bytes to be %q, got %q", i, "value1", gReply.Value.Bytes)
			}

			// Finally, try to end the pushee's transaction; if we have
			// SNAPSHOT isolation, the commit should work: verify the txn
			// commit timestamp is equal to pusher's Timestamp + 1. Otherwise,
			// verify commit fails with TransactionRetryError.
			etArgs := endTxnArgs(pushee, true, 1, store.StoreID())
			reply, cErr := client.SendWrapped(store, nil, &etArgs)

			expTimestamp := pusher.Timestamp
			expTimestamp.Logical++
			if test.pusheeIso == roachpb.SNAPSHOT {
				if cErr != nil {
					t.Errorf("unexpected error on commit: %s", cErr)
				}
				etReply := reply.(*roachpb.EndTransactionResponse)
				if etReply.Txn.Status != roachpb.COMMITTED || !etReply.Txn.Timestamp.Equal(expTimestamp) {
					t.Errorf("txn commit didn't yield expected status (COMMITTED) or timestamp %s: %s",
						expTimestamp, etReply.Txn)
				}
			} else {
				if _, ok := cErr.(*roachpb.TransactionRetryError); !ok {
					t.Errorf("expected transaction retry error; got %s", cErr)
				}
			}
		} else {
			// If isolation of pushee is SNAPSHOT, we can always push, so
			// even a non-resolvable read will succeed. Otherwise, verify we
			// receive a transaction retry error (because we max out retries).
			if test.pusheeIso == roachpb.SNAPSHOT {
				if err != nil {
					t.Errorf("expected read to succeed: %s", err)
				} else if gReply := firstReply.(*roachpb.GetResponse); !bytes.Equal(gReply.Value.Bytes, []byte("value1")) {
					t.Errorf("expected bytes to be %q, got %q", "value1", gReply.Value.Bytes)
				}
			} else {
				if err == nil {
					t.Errorf("expected read to fail")
				}
				if _, ok := err.(*roachpb.TransactionRetryError); !ok {
					t.Errorf("expected transaction retry error; got %T", err)
				}
			}
		}
	}
}
コード例 #7
0
ファイル: store_test.go プロジェクト: GokulSrinivas/cockroach
// TestStoreScanIntents verifies that a scan across 10 intents resolves
// them in one fell swoop using both consistent and inconsistent reads.
func TestStoreScanIntents(t *testing.T) {
	defer leaktest.AfterTest(t)
	defer func() { TestingCommandFilter = nil }()

	store, _, stopper := createTestStore(t)
	defer stopper.Stop()

	var count int32
	countPtr := &count

	TestingCommandFilter = func(args roachpb.Request) error {
		if _, ok := args.(*roachpb.ScanRequest); ok {
			atomic.AddInt32(countPtr, 1)
		}
		return nil
	}

	testCases := []struct {
		consistent bool
		canPush    bool  // can the txn be pushed?
		expFinish  bool  // do we expect the scan to finish?
		expCount   int32 // how many times do we expect to scan?
	}{
		// Consistent which can push will make two loops.
		{true, true, true, 2},
		// Consistent but can't push will backoff and retry and not finish.
		{true, false, false, -1},
		// Inconsistent and can push will make one loop, with async resolves.
		{false, true, true, 1},
		// Inconsistent and can't push will just read inconsistent (will read nils).
		{false, false, true, 1},
	}
	for i, test := range testCases {
		// The command filter just counts the number of scan requests which are
		// submitted to the range.
		atomic.StoreInt32(countPtr, 0)

		// Lay down 10 intents to scan over.
		var txn *roachpb.Transaction
		keys := []roachpb.Key{}
		for j := 0; j < 10; j++ {
			key := roachpb.Key(fmt.Sprintf("key%d-%02d", i, j))
			keys = append(keys, key)
			if txn == nil {
				priority := int32(-1)
				if !test.canPush {
					priority = -roachpb.MaxPriority
				}
				txn = newTransaction(fmt.Sprintf("test-%d", i), key, priority, roachpb.SERIALIZABLE, store.ctx.Clock)
			}
			args := putArgs(key, []byte(fmt.Sprintf("value%02d", j)), 1, store.StoreID())
			args.Txn = txn
			if _, err := client.SendWrapped(store, nil, &args); err != nil {
				t.Fatal(err)
			}
		}

		// Scan the range and verify count. Do this in a goroutine in case
		// it isn't expected to finish.
		sArgs := scanArgs(keys[0], keys[9].Next(), 1, store.StoreID())
		var sReply *roachpb.ScanResponse
		ts := store.Clock().Now()
		if !test.consistent {
			sArgs.ReadConsistency = roachpb.INCONSISTENT
		}
		done := make(chan struct{})
		go func() {
			if reply, err := client.SendWrappedAt(store, nil, ts, &sArgs); err != nil {
				t.Fatal(err)
			} else {
				sReply = reply.(*roachpb.ScanResponse)
			}
			close(done)
		}()

		wait := 1 * time.Second
		if !test.expFinish {
			wait = 10 * time.Millisecond
		}
		select {
		case <-done:
			if len(sReply.Rows) != 0 {
				t.Errorf("expected empty scan result; got %+v", sReply.Rows)
			}
			if countVal := atomic.LoadInt32(countPtr); countVal != test.expCount {
				t.Errorf("%d: expected scan count %d; got %d", i, test.expCount, countVal)
			}
		case <-time.After(wait):
			if test.expFinish {
				t.Errorf("%d: scan failed to finish after %s", i, wait)
			} else {
				// Commit the unpushable txn so the read can finish.
				etArgs := endTxnArgs(txn, true, 1, store.StoreID())
				for _, key := range keys {
					etArgs.Intents = append(etArgs.Intents, roachpb.Intent{Key: key})
				}
				if _, err := client.SendWrapped(store, nil, &etArgs); err != nil {
					t.Fatal(err)
				}
				<-done
			}
		}
	}
}
コード例 #8
0
ファイル: store_test.go プロジェクト: GokulSrinivas/cockroach
// TestStoreResolveWriteIntentNoTxn verifies that reads and writes
// which are not part of a transaction can push intents.
func TestStoreResolveWriteIntentNoTxn(t *testing.T) {
	defer leaktest.AfterTest(t)
	store, _, stopper := createTestStore(t)
	defer stopper.Stop()

	key := roachpb.Key("a")
	pushee := newTransaction("test", key, 1, roachpb.SERIALIZABLE, store.ctx.Clock)
	pushee.Priority = 0 // pushee should lose all conflicts

	// First, lay down intent from pushee.
	args := putArgs(key, []byte("value1"), 1, store.StoreID())
	args.Txn = pushee
	if _, err := client.SendWrapped(store, nil, &args); err != nil {
		t.Fatal(err)
	}

	// Now, try to read outside a transaction.
	gArgs := getArgs(key, 1, store.StoreID())
	getTS := store.ctx.Clock.Now()
	gArgs.UserPriority = proto.Int32(math.MaxInt32)
	if reply, err := client.SendWrappedAt(store, nil, getTS, &gArgs); err != nil {
		t.Errorf("expected read to succeed: %s", err)
	} else if gReply := reply.(*roachpb.GetResponse); gReply.Value != nil {
		t.Errorf("expected value to be nil, got %+v", gReply.Value)
	}

	// Next, try to write outside of a transaction. We will succeed in pushing txn.
	putTS := store.ctx.Clock.Now()
	args.Value.Bytes = []byte("value2")
	args.Txn = nil
	args.UserPriority = proto.Int32(math.MaxInt32)
	if _, err := client.SendWrappedAt(store, nil, putTS, &args); err != nil {
		t.Errorf("expected success aborting pushee's txn; got %s", err)
	}

	// Read pushee's txn.
	txnKey := keys.TransactionKey(pushee.Key, pushee.ID)
	var txn roachpb.Transaction
	if ok, err := engine.MVCCGetProto(store.Engine(), txnKey, roachpb.ZeroTimestamp, true, nil, &txn); !ok || err != nil {
		t.Fatalf("not found or err: %s", err)
	}
	if txn.Status != roachpb.ABORTED {
		t.Errorf("expected pushee to be aborted; got %s", txn.Status)
	}

	// Verify that the pushee's timestamp was moved forward on
	// former read, since we have it available in write intent error.
	expTS := getTS
	expTS.Logical++
	if !txn.Timestamp.Equal(expTS) {
		t.Errorf("expected pushee timestamp pushed to %s; got %s", expTS, txn.Timestamp)
	}
	// Similarly, verify that pushee's priority was moved from 0
	// to math.MaxInt32-1 during push.
	if txn.Priority != math.MaxInt32-1 {
		t.Errorf("expected pushee priority to be pushed to %d; got %d", math.MaxInt32-1, txn.Priority)
	}

	// Finally, try to end the pushee's transaction; it should have
	// been aborted.
	etArgs := endTxnArgs(pushee, true, 1, store.StoreID())
	_, err := client.SendWrapped(store, nil, &etArgs)
	if err == nil {
		t.Errorf("unexpected success committing transaction")
	}
	if _, ok := err.(*roachpb.TransactionAbortedError); !ok {
		t.Errorf("expected transaction aborted error; got %s", err)
	}
}
コード例 #9
0
// TestTxnPutOutOfOrder tests a case where a put operation of an older
// timestamp comes after a put operation of a newer timestamp in a
// txn. The test ensures such an out-of-order put succeeds and
// overrides an old value. The test uses a "Writer" and a "Reader"
// to reproduce an out-of-order put.
//
// 1) The Writer executes a put operation and writes a write intent with
//    time T in a txn.
// 2) Before the Writer's txn is committed, the Reader sends a high priority
//    get operation with time T+100. This pushes the Writer txn timestamp to
//    T+100 and triggers the restart of the Writer's txn. The original
//    write intent timestamp is also updated to T+100.
// 3) The Writer starts a new epoch of the txn, but before it writes, the
//    Reader sends another high priority get operation with time T+200. This
//    pushes the Writer txn timestamp to T+200 to trigger a restart of the
//    Writer txn. The Writer will not actually restart until it tries to commit
//    the current epoch of the transaction. The Reader updates the timestamp of
//    the write intent to T+200. The test deliberately fails the Reader get
//    operation, and cockroach doesn't update its read timestamp cache.
// 4) The Writer executes the put operation again. This put operation comes
//    out-of-order since its timestamp is T+100, while the intent timestamp
//    updated at Step 3 is T+200.
// 5) The put operation overrides the old value using timestamp T+100.
// 6) When the Writer attempts to commit its txn, the txn will be restarted
//    again at a new epoch timestamp T+200, which will finally succeed.
func TestTxnPutOutOfOrder(t *testing.T) {
	defer leaktest.AfterTest(t)

	key := "key"
	// Set up a filter to so that the get operation at Step 3 will return an error.
	var numGets int32
	storage.TestingCommandFilter = func(args roachpb.Request) error {
		if _, ok := args.(*roachpb.GetRequest); ok &&
			args.Header().Key.Equal(roachpb.Key(key)) &&
			args.Header().Txn == nil {
			// The Reader executes two get operations, each of which triggers two get requests
			// (the first request fails and triggers txn push, and then the second request
			// succeeds). Returns an error for the fourth get request to avoid timestamp cache
			// update after the third get operation pushes the txn timestamp.
			if atomic.AddInt32(&numGets, 1) == 4 {
				return util.Errorf("Test")
			}
		}
		return nil
	}
	defer func() {
		storage.TestingCommandFilter = nil
	}()

	manualClock := hlc.NewManualClock(0)
	clock := hlc.NewClock(manualClock.UnixNano)
	stopper := stop.NewStopper()
	defer stopper.Stop()
	store := createTestStoreWithEngine(t,
		engine.NewInMem(roachpb.Attributes{}, 10<<20, stopper),
		clock,
		true,
		nil,
		stopper)

	// Put an initial value.
	initVal := []byte("initVal")
	err := store.DB().Put(key, initVal)
	if err != nil {
		t.Fatalf("failed to put: %s", err)
	}

	waitPut := make(chan struct{})
	waitFirstGet := make(chan struct{})
	waitTxnRestart := make(chan struct{})
	waitSecondGet := make(chan struct{})
	waitTxnComplete := make(chan struct{})

	// Start the Writer.
	go func() {
		epoch := -1
		// Start a txn that does read-after-write.
		// The txn will be restarted twice, and the out-of-order put
		// will happen in the second epoch.
		if err := store.DB().Txn(func(txn *client.Txn) error {
			epoch++

			if epoch == 1 {
				// Wait until the second get operation is issued.
				close(waitTxnRestart)
				<-waitSecondGet
			}

			updatedVal := []byte("updatedVal")
			if err := txn.Put(key, updatedVal); err != nil {
				return err
			}

			// Make sure a get will return the value that was just written.
			actual, err := txn.Get(key)
			if err != nil {
				return err
			}
			if !bytes.Equal(actual.ValueBytes(), updatedVal) {
				t.Fatalf("unexpected get result: %s", actual)
			}

			if epoch == 0 {
				// Wait until the first get operation will push the txn timestamp.
				close(waitPut)
				<-waitFirstGet
			}

			b := &client.Batch{}
			err = txn.CommitInBatch(b)
			return err
		}); err != nil {
			t.Fatal(err)
		}

		if epoch != 2 {
			t.Fatalf("unexpected number of txn retries: %d", epoch)
		}

		close(waitTxnComplete)
	}()

	<-waitPut

	// Start the Reader.

	// Advance the clock and send a get operation with higher
	// priority to trigger the txn restart.
	manualClock.Increment(100)

	priority := int32(math.MaxInt32)
	requestHeader := roachpb.RequestHeader{
		Key:          roachpb.Key(key),
		RangeID:      1,
		Replica:      roachpb.ReplicaDescriptor{StoreID: store.StoreID()},
		UserPriority: &priority,
	}
	ts := clock.Now()
	if _, err := client.SendWrappedAt(store, nil, ts, &roachpb.GetRequest{RequestHeader: requestHeader}); err != nil {
		t.Fatalf("failed to get: %s", err)
	}

	// Wait until the writer restarts the txn.
	close(waitFirstGet)
	<-waitTxnRestart

	// Advance the clock and send a get operation again. This time
	// we use TestingCommandFilter so that a get operation is not
	// processed after the write intent is resolved (to prevent the
	// timestamp cache from being updated).
	manualClock.Increment(100)

	ts = clock.Now()
	if _, err := client.SendWrappedAt(store, nil, ts, &roachpb.GetRequest{RequestHeader: requestHeader}); err == nil {
		t.Fatal("unexpected success of get")
	}

	close(waitSecondGet)
	<-waitTxnComplete
}
コード例 #10
0
// TestMultiRangeScanDeleteRange tests that commands which access multiple
// ranges are carried out properly.
func TestMultiRangeScanDeleteRange(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := StartTestServer(t)
	defer s.Stop()
	ds := kv.NewDistSender(&kv.DistSenderContext{Clock: s.Clock()}, s.Gossip())
	tds := kv.NewTxnCoordSender(ds, s.Clock(), testContext.Linearizable, nil, s.stopper)

	if err := s.node.ctx.DB.AdminSplit("m"); err != nil {
		t.Fatal(err)
	}
	writes := []roachpb.Key{roachpb.Key("a"), roachpb.Key("z")}
	get := &roachpb.GetRequest{
		RequestHeader: roachpb.RequestHeader{Key: writes[0]},
	}
	get.EndKey = writes[len(writes)-1]
	if _, err := client.SendWrapped(tds, nil, get); err == nil {
		t.Errorf("able to call Get with a key range: %v", get)
	}
	var delTS roachpb.Timestamp
	for i, k := range writes {
		put := roachpb.NewPut(k, roachpb.Value{Bytes: k})
		reply, err := client.SendWrapped(tds, nil, put)
		if err != nil {
			t.Fatal(err)
		}
		scan := roachpb.NewScan(writes[0], writes[len(writes)-1].Next(), 0).(*roachpb.ScanRequest)
		// The Put ts may have been pushed by tsCache,
		// so make sure we see their values in our Scan.
		delTS = reply.(*roachpb.PutResponse).Timestamp
		reply, err = client.SendWrappedAt(tds, nil, delTS, scan)
		if err != nil {
			t.Fatal(err)
		}
		sr := reply.(*roachpb.ScanResponse)
		if sr.Txn != nil {
			// This was the other way around at some point in the past.
			// Same below for Delete, etc.
			t.Errorf("expected no transaction in response header")
		}
		if rows := sr.Rows; len(rows) != i+1 {
			t.Fatalf("expected %d rows, but got %d", i+1, len(rows))
		}
	}

	del := &roachpb.DeleteRangeRequest{
		RequestHeader: roachpb.RequestHeader{
			Key:    writes[0],
			EndKey: roachpb.Key(writes[len(writes)-1]).Next(),
		},
	}
	reply, err := client.SendWrappedAt(tds, nil, delTS, del)
	if err != nil {
		t.Fatal(err)
	}
	dr := reply.(*roachpb.DeleteRangeResponse)
	if dr.Txn != nil {
		t.Errorf("expected no transaction in response header")
	}
	if n := dr.NumDeleted; n != int64(len(writes)) {
		t.Errorf("expected %d keys to be deleted, but got %d instead",
			len(writes), n)
	}

	scan := roachpb.NewScan(writes[0], writes[len(writes)-1].Next(), 0).(*roachpb.ScanRequest)
	scan.Txn = &roachpb.Transaction{Name: "MyTxn"}
	reply, err = client.SendWrappedAt(tds, nil, dr.Timestamp, scan)
	if err != nil {
		t.Fatal(err)
	}
	sr := reply.(*roachpb.ScanResponse)
	if txn := sr.Txn; txn == nil || txn.Name != "MyTxn" {
		t.Errorf("wanted Txn to persist, but it changed to %v", txn)
	}
	if rows := sr.Rows; len(rows) > 0 {
		t.Fatalf("scan after delete returned rows: %v", rows)
	}
}
コード例 #11
0
// TestGCQueueProcess creates test data in the range over various time
// scales and verifies that scan queue process properly GCs test data.
func TestGCQueueProcess(t *testing.T) {
	defer leaktest.AfterTest(t)
	tc := testContext{}
	tc.Start(t)
	defer tc.Stop()

	const now int64 = 48 * 60 * 60 * 1E9 // 2d past the epoch
	tc.manualClock.Set(now)

	ts1 := makeTS(now-2*24*60*60*1E9+1, 0)                     // 2d old (add one nanosecond so we're not using zero timestamp)
	ts2 := makeTS(now-25*60*60*1E9, 0)                         // GC will occur at time=25 hours
	ts3 := makeTS(now-intentAgeThreshold.Nanoseconds(), 0)     // 2h old
	ts4 := makeTS(now-(intentAgeThreshold.Nanoseconds()-1), 0) // 2h-1ns old
	ts5 := makeTS(now-1E9, 0)                                  // 1s old
	key1 := roachpb.Key("a")
	key2 := roachpb.Key("b")
	key3 := roachpb.Key("c")
	key4 := roachpb.Key("d")
	key5 := roachpb.Key("e")
	key6 := roachpb.Key("f")
	key7 := roachpb.Key("g")
	key8 := roachpb.Key("h")
	key9 := roachpb.Key("i")

	data := []struct {
		key roachpb.Key
		ts  roachpb.Timestamp
		del bool
		txn bool
	}{
		// For key1, we expect first two values to GC.
		{key1, ts1, false, false},
		{key1, ts2, false, false},
		{key1, ts5, false, false},
		// For key2, we expect all values to GC, because most recent is deletion.
		{key2, ts1, false, false},
		{key2, ts2, false, false},
		{key2, ts5, true, false},
		// For key3, we expect just ts1 to GC, because most recent deletion is intent.
		{key3, ts1, false, false},
		{key3, ts2, false, false},
		{key3, ts5, true, true},
		// For key4, expect oldest value to GC.
		{key4, ts1, false, false},
		{key4, ts2, false, false},
		// For key5, expect all values to GC (most recent value deleted).
		{key5, ts1, false, false},
		{key5, ts2, true, false},
		// For key6, expect no values to GC because most recent value is intent.
		{key6, ts1, false, false},
		{key6, ts5, false, true},
		// For key7, expect no values to GC because intent is exactly 2h old.
		{key7, ts2, false, false},
		{key7, ts4, false, true},
		// For key8, expect most recent value to resolve by aborting, which will clean it up.
		{key8, ts2, false, false},
		{key8, ts3, true, true},
		// For key9, resolve naked intent with no remaining values.
		{key9, ts3, true, false},
	}

	for i, datum := range data {
		if datum.del {
			dArgs := deleteArgs(datum.key, tc.rng.Desc().RangeID, tc.store.StoreID())
			if datum.txn {
				dArgs.Txn = newTransaction("test", datum.key, 1, roachpb.SERIALIZABLE, tc.clock)
				dArgs.Txn.OrigTimestamp = datum.ts
				dArgs.Txn.Timestamp = datum.ts
			}
			if _, err := client.SendWrappedAt(tc.rng, tc.rng.context(), datum.ts, &dArgs); err != nil {
				t.Fatalf("%d: could not delete data: %s", i, err)
			}
		} else {
			pArgs := putArgs(datum.key, []byte("value"), tc.rng.Desc().RangeID, tc.store.StoreID())
			if datum.txn {
				pArgs.Txn = newTransaction("test", datum.key, 1, roachpb.SERIALIZABLE, tc.clock)
				pArgs.Txn.OrigTimestamp = datum.ts
				pArgs.Txn.Timestamp = datum.ts
			}
			if _, err := client.SendWrappedAt(tc.rng, tc.rng.context(), datum.ts, &pArgs); err != nil {
				t.Fatalf("%d: could not put data: %s", i, err)
			}
		}
	}

	cfg := tc.gossip.GetSystemConfig()
	if cfg == nil {
		t.Fatal("nil config")
	}

	// Process through a scan queue.
	gcQ := newGCQueue(tc.gossip)
	if err := gcQ.process(tc.clock.Now(), tc.rng, cfg); err != nil {
		t.Fatal(err)
	}

	expKVs := []struct {
		key roachpb.Key
		ts  roachpb.Timestamp
	}{
		{key1, roachpb.ZeroTimestamp},
		{key1, ts5},
		{key3, roachpb.ZeroTimestamp},
		{key3, ts5},
		{key3, ts2},
		{key4, roachpb.ZeroTimestamp},
		{key4, ts2},
		{key6, roachpb.ZeroTimestamp},
		{key6, ts5},
		{key6, ts1},
		{key7, roachpb.ZeroTimestamp},
		{key7, ts4},
		{key7, ts2},
		{key8, roachpb.ZeroTimestamp},
		{key8, ts2},
	}
	// Read data directly from engine to avoid intent errors from MVCC.
	kvs, err := engine.Scan(tc.store.Engine(), engine.MVCCEncodeKey(key1), engine.MVCCEncodeKey(keys.TableDataPrefix), 0)
	if err != nil {
		t.Fatal(err)
	}
	for i, kv := range kvs {
		if key, ts, isValue, err := engine.MVCCDecodeKey(kv.Key); isValue {
			if err != nil {
				t.Fatal(err)
			}
			if log.V(1) {
				log.Infof("%d: %q, ts=%s", i, key, ts)
			}
		} else {
			if log.V(1) {
				log.Infof("%d: %q meta", i, key)
			}
		}
	}
	if len(kvs) != len(expKVs) {
		t.Fatalf("expected length %d; got %d", len(expKVs), len(kvs))
	}
	for i, kv := range kvs {
		key, ts, isValue, err := engine.MVCCDecodeKey(kv.Key)
		if err != nil {
			t.Fatal(err)
		}
		if !key.Equal(expKVs[i].key) {
			t.Errorf("%d: expected key %q; got %q", i, expKVs[i].key, key)
		}
		if !ts.Equal(expKVs[i].ts) {
			t.Errorf("%d: expected ts=%s; got %s", i, expKVs[i].ts, ts)
		}
		if isValue {
			if log.V(1) {
				log.Infof("%d: %q, ts=%s", i, key, ts)
			}
		} else {
			if log.V(1) {
				log.Infof("%d: %q meta", i, key)
			}
		}
	}

	// Verify the oldest extant intent age.
	gcMeta, err := tc.rng.GetGCMetadata()
	if err != nil {
		t.Fatal(err)
	}
	if gcMeta.LastScanNanos != now {
		t.Errorf("expected last scan nanos=%d; got %d", now, gcMeta.LastScanNanos)
	}
	if *gcMeta.OldestIntentNanos != ts4.WallTime {
		t.Errorf("expected oldest intent nanos=%d; got %d", ts4.WallTime, gcMeta.OldestIntentNanos)
	}

	// Verify that the last verification timestamp was updated as whole range was scanned.
	ts, err := tc.rng.GetLastVerificationTimestamp()
	if err != nil {
		t.Fatal(err)
	}
	if gcMeta.LastScanNanos != ts.WallTime {
		t.Errorf("expected walltime nanos %d; got %d", gcMeta.LastScanNanos, ts.WallTime)
	}
}