コード例 #1
0
// TestKVClientEmptyValues verifies that empty values are preserved
// for both empty []byte and integer=0. This used to fail when we
// allowed the protobufs to be gob-encoded using the default go rpc
// gob codec because gob treats pointer values and non-pointer values
// as equivalent and elides zero-valued defaults on decode.
func TestKVClientEmptyValues(t *testing.T) {
	s := StartTestServer(t)
	defer s.Stop()
	kvClient := createTestClient(s.HTTPAddr)
	kvClient.User = storage.UserRoot

	kvClient.Call(proto.Put, proto.PutArgs(proto.Key("a"), []byte{}), &proto.PutResponse{})
	kvClient.Call(proto.Put, &proto.PutRequest{
		RequestHeader: proto.RequestHeader{
			Key: proto.Key("b"),
		},
		Value: proto.Value{
			Integer: gogoproto.Int64(0),
		},
	}, &proto.PutResponse{})

	getResp := &proto.GetResponse{}
	kvClient.Call(proto.Get, proto.GetArgs(proto.Key("a")), getResp)
	if bytes := getResp.Value.Bytes; bytes == nil || len(bytes) != 0 {
		t.Errorf("expected non-nil empty byte slice; got %q", bytes)
	}
	kvClient.Call(proto.Get, proto.GetArgs(proto.Key("b")), getResp)
	if intVal := getResp.Value.Integer; intVal == nil || *intVal != 0 {
		t.Errorf("expected non-nil 0-valued integer; got %p, %d", getResp.Value.Integer, getResp.Value.GetInteger())
	}
}
コード例 #2
0
// TestKVClientRunTransaction verifies some simple transaction isolation
// semantics.
func TestKVClientRunTransaction(t *testing.T) {
	client.TxnRetryOptions.Backoff = 1 * time.Millisecond

	s := StartTestServer(t)
	defer s.Stop()
	kvClient := createTestClient(s.HTTPAddr)
	kvClient.User = storage.UserRoot

	for _, commit := range []bool{true, false} {
		value := []byte("value")
		key := []byte(fmt.Sprintf("key-%t", commit))

		// Use snapshot isolation so non-transactional read can always push.
		err := kvClient.RunTransaction(&client.TransactionOptions{Isolation: proto.SNAPSHOT}, func(txn *client.KV) error {
			// Put transactional value.
			if err := txn.Call(proto.Put, proto.PutArgs(key, value), &proto.PutResponse{}); err != nil {
				return err
			}
			// Attempt to read outside of txn.
			gr := &proto.GetResponse{}
			if err := kvClient.Call(proto.Get, proto.GetArgs(key), gr); err != nil {
				return err
			}
			if gr.Value != nil {
				return util.Errorf("expected nil value; got %+v", gr.Value)
			}
			// Read within the transaction.
			if err := txn.Call(proto.Get, proto.GetArgs(key), gr); err != nil {
				return err
			}
			if gr.Value == nil || !bytes.Equal(gr.Value.Bytes, value) {
				return util.Errorf("expected value %q; got %q", value, gr.Value.Bytes)
			}
			if !commit {
				return errors.New("purposefully failing transaction")
			}
			return nil
		})

		if commit != (err == nil) {
			t.Errorf("expected success? %t; got %s", commit, err)
		} else if !commit && err.Error() != "purposefully failing transaction" {
			t.Errorf("unexpected failure with !commit: %s", err)
		}

		// Verify the value is now visible on commit == true, and not visible otherwise.
		gr := &proto.GetResponse{}
		err = kvClient.Call(proto.Get, proto.GetArgs(key), gr)
		if commit {
			if err != nil || gr.Value == nil || !bytes.Equal(gr.Value.Bytes, value) {
				t.Errorf("expected success reading value: %+v, %s", gr.Value, err)
			}
		} else {
			if err != nil || gr.Value != nil {
				t.Errorf("expected success and nil value: %+v, %s", gr.Value, err)
			}
		}
	}
}
コード例 #3
0
ファイル: txn_test.go プロジェクト: josephwinston/cockroach
// Indirectly this tests that the transaction remembers the NodeID of the node
// being read from correctly, at least in this simple case. Not remembering the
// node would lead to thousands of transaction restarts and almost certainly a
// test timeout.
func TestUncertaintyRestarts(t *testing.T) {
	{
		db, eng, clock, mClock, _, transport, err := createTestDB()
		if err != nil {
			t.Fatal(err)
		}
		defer transport.Close()
		// Set a large offset so that a busy restart-loop
		// really shows. Also makes sure that the values
		// we write in the future below don't actually
		// wind up in the past.
		offset := 4000 * time.Millisecond
		clock.SetMaxOffset(offset)
		key := proto.Key("key")
		value := proto.Value{
			Bytes: nil, // Set for each Put
		}
		// With the correct restart behaviour, we see only one restart
		// and the value read is the very first one (as nothing else
		// has been written)
		wantedBytes := []byte("value-0")

		txnOpts := &client.TransactionOptions{
			Name: "uncertainty",
		}
		gr := &proto.GetResponse{}
		i := -1
		tErr := db.RunTransaction(txnOpts, func(txn *client.KV) error {
			i++
			mClock.Increment(1)
			futureTS := clock.Now()
			futureTS.WallTime++
			value.Bytes = []byte(fmt.Sprintf("value-%d", i))
			err = engine.MVCCPut(eng, nil, key, futureTS, value, nil)
			if err != nil {
				t.Fatal(err)
			}
			gr.Reset()
			if err := txn.Call(proto.Get, proto.GetArgs(key), gr); err != nil {
				return err
			}
			if gr.Value == nil || !bytes.Equal(gr.Value.Bytes, wantedBytes) {
				t.Fatalf("%d: read wrong value: %v, wanted %q", i,
					gr.Value, wantedBytes)
			}
			return nil
		})
		if i != 1 {
			t.Errorf("txn restarted %d times, expected only one restart", i)
		}
		if tErr != nil {
			t.Fatal(tErr)
		}
	}
}
コード例 #4
0
// This is an example for using the Call() method to Put and then Get
// a value for a given key.
func ExampleKV_Call() {
	// Using built-in test server for this example code.
	serv := StartTestServer(nil)
	defer serv.Stop()

	// Replace with actual host:port address string (ex "localhost:8080") for server cluster.
	serverAddress := serv.HTTPAddr

	// Key Value Client initialization.
	sender := client.NewHTTPSender(serverAddress, &http.Transport{
		TLSClientConfig: rpc.LoadInsecureTLSConfig().Config(),
	})
	kvClient := client.NewKV(sender, nil)
	kvClient.User = storage.UserRoot
	defer kvClient.Close()

	key := proto.Key("a")
	value := []byte{1, 2, 3, 4}

	// Store test value.
	putResp := &proto.PutResponse{}
	if err := kvClient.Call(proto.Put, proto.PutArgs(key, value), putResp); err != nil {
		log.Fatal(err)
	}

	// Retrieve test value using same key.
	getResp := &proto.GetResponse{}
	if err := kvClient.Call(proto.Get, proto.GetArgs(key), getResp); err != nil {
		log.Fatal(err)
	}

	// Data validation.
	if getResp.Value == nil {
		log.Fatal("No value returned.")
	}
	if !bytes.Equal(value, getResp.Value.Bytes) {
		log.Fatal("Data mismatch on retrieved value.")
	}

	fmt.Println("Client example done.")
	// Output: Client example done.
}
コード例 #5
0
// TestMultiRangeScanDeleteRange tests that commands that commands which access
// multiple ranges are carried out properly.
func TestMultiRangeScanDeleteRange(t *testing.T) {
	s := startTestServer(t)
	defer s.Stop()
	tds := kv.NewTxnCoordSender(kv.NewDistSender(s.Gossip()), s.Clock(), testContext.Linearizable)
	defer tds.Close()

	if err := s.node.db.Call(proto.AdminSplit,
		&proto.AdminSplitRequest{
			RequestHeader: proto.RequestHeader{
				Key: proto.Key("m"),
			},
			SplitKey: proto.Key("m"),
		}, &proto.AdminSplitResponse{}); err != nil {
		t.Fatal(err)
	}
	writes := []proto.Key{proto.Key("a"), proto.Key("z")}
	get := &client.Call{
		Method: proto.Get,
		Args:   proto.GetArgs(writes[0]),
		Reply:  &proto.GetResponse{},
	}
	get.Args.Header().User = storage.UserRoot
	get.Args.Header().EndKey = writes[len(writes)-1]
	tds.Send(get)
	if err := get.Reply.Header().GoError(); err == nil {
		t.Errorf("able to call Get with a key range: %v", get)
	}
	var call *client.Call
	for i, k := range writes {
		call = &client.Call{
			Method: proto.Put,
			Args:   proto.PutArgs(k, k),
			Reply:  &proto.PutResponse{},
		}
		call.Args.Header().User = storage.UserRoot
		tds.Send(call)
		if err := call.Reply.Header().GoError(); err != nil {
			t.Fatal(err)
		}
		scan := &client.Call{
			Method: proto.Scan,
			Args:   proto.ScanArgs(writes[0], writes[len(writes)-1].Next(), 0),
			Reply:  &proto.ScanResponse{},
		}
		// The Put ts may have been pushed by tsCache,
		// so make sure we see their values in our Scan.
		scan.Args.Header().Timestamp = call.Reply.Header().Timestamp
		scan.Args.Header().User = storage.UserRoot
		tds.Send(scan)
		if err := scan.Reply.Header().GoError(); err != nil {
			t.Fatal(err)
		}
		if scan.Reply.Header().Txn == nil {
			t.Errorf("expected Scan to be wrapped in a Transaction")
		}
		if rows := scan.Reply.(*proto.ScanResponse).Rows; len(rows) != i+1 {
			t.Fatalf("expected %d rows, but got %d", i+1, len(rows))
		}
	}
	del := &client.Call{
		Method: proto.DeleteRange,
		Args: &proto.DeleteRangeRequest{
			RequestHeader: proto.RequestHeader{
				User:      storage.UserRoot,
				Key:       writes[0],
				EndKey:    proto.Key(writes[len(writes)-1]).Next(),
				Timestamp: call.Reply.Header().Timestamp,
			},
		},
		Reply: &proto.DeleteRangeResponse{},
	}
	tds.Send(del)
	if err := del.Reply.Header().GoError(); err != nil {
		t.Fatal(err)
	}
	if del.Reply.Header().Txn == nil {
		t.Errorf("expected DeleteRange to be wrapped in a Transaction")
	}
	if n := del.Reply.(*proto.DeleteRangeResponse).NumDeleted; n != int64(len(writes)) {
		t.Errorf("expected %d keys to be deleted, but got %d instead",
			len(writes), n)
	}

	scan := &client.Call{
		Method: proto.Scan,
		Args:   proto.ScanArgs(writes[0], writes[len(writes)-1].Next(), 0),
		Reply:  &proto.ScanResponse{},
	}
	scan.Args.Header().Timestamp = del.Reply.Header().Timestamp
	scan.Args.Header().User = storage.UserRoot
	scan.Args.Header().Txn = &proto.Transaction{Name: "MyTxn"}
	tds.Send(scan)
	if err := scan.Reply.Header().GoError(); err != nil {
		t.Fatal(err)
	}
	if txn := scan.Reply.Header().Txn; txn == nil || txn.Name != "MyTxn" {
		t.Errorf("wanted Txn to persist, but it changed to %v", txn)
	}
	if rows := scan.Reply.(*proto.ScanResponse).Rows; len(rows) > 0 {
		t.Fatalf("scan after delete returned rows: %v", rows)
	}
}
コード例 #6
0
// TestKVClientRetryNonTxn verifies that non-transactional client will
// succeed despite write/write and read/write conflicts. In the case
// where the non-transactional put can push the txn, we expect the
// transaction's value to be written after all retries are complete.
func TestKVClientRetryNonTxn(t *testing.T) {
	s := StartTestServer(t)
	defer s.Stop()
	s.SetRangeRetryOptions(util.RetryOptions{
		Backoff:     1 * time.Millisecond,
		MaxBackoff:  5 * time.Millisecond,
		Constant:    2,
		MaxAttempts: 2,
	})
	kvClient := createTestClient(s.HTTPAddr)
	kvClient.User = storage.UserRoot

	testCases := []struct {
		method      string
		isolation   proto.IsolationType
		canPush     bool
		expAttempts int
	}{
		// Write/write conflicts.
		{proto.Put, proto.SNAPSHOT, true, 2},
		{proto.Put, proto.SERIALIZABLE, true, 2},
		{proto.Put, proto.SNAPSHOT, false, 1},
		{proto.Put, proto.SERIALIZABLE, false, 1},
		// Read/write conflicts.
		{proto.Get, proto.SNAPSHOT, true, 1},
		{proto.Get, proto.SERIALIZABLE, true, 2},
		{proto.Get, proto.SNAPSHOT, false, 1},
		{proto.Get, proto.SERIALIZABLE, false, 1},
	}
	// Lay down a write intent using a txn and attempt to write to same
	// key. Try this twice--once with priorities which will allow the
	// intent to be pushed and once with priorities which will not.
	for i, test := range testCases {
		log.Infof("starting test case %d", i)
		key := proto.Key(fmt.Sprintf("key-%d", i))
		txnPri := int32(-1)
		clientPri := int32(-1)
		if test.canPush {
			clientPri = -2
		} else {
			txnPri = -2
		}
		kvClient.UserPriority = clientPri

		// doneCall signals when the non-txn read or write has completed.
		doneCall := make(chan struct{})
		count := 0 // keeps track of retries
		if err := kvClient.RunTransaction(&client.TransactionOptions{Isolation: test.isolation}, func(txn *client.KV) error {
			txn.UserPriority = txnPri
			count++
			// Lay down the intent.
			if err := txn.Call(proto.Put, proto.PutArgs(key, []byte("txn-value")), &proto.PutResponse{}); err != nil {
				return err
			}
			// The wait group lets us pause txn until after the non-txn method has run once.
			wg := sync.WaitGroup{}
			// On the first true, send the non-txn put or get.
			if count == 1 {
				// We use a "notifying" sender here, which allows us to know exactly when the
				// call has been processed; otherwise, we'd be dependent on timing.
				kvClient.Sender().(*notifyingSender).reset(&wg)
				// We must try the non-txn put or get in a goroutine because
				// it might have to retry and will only succeed immediately in
				// the event we can push.
				go func() {
					args, reply, err := proto.CreateArgsAndReply(test.method)
					if err != nil {
						t.Errorf("error creating args and reply for method %s: %s", test.method, err)
					}
					args.Header().Key = key
					if test.method == proto.Put {
						args.(*proto.PutRequest).Value.Bytes = []byte("value")
					}
					for i := 0; ; i++ {
						err = kvClient.Call(test.method, args, reply)
						if _, ok := err.(*proto.WriteIntentError); !ok {
							break
						}
					}
					close(doneCall)
					if err != nil {
						t.Fatalf("%d: expected success on non-txn call to %s; got %s", i, err, test.method)
					}
				}()
				kvClient.Sender().(*notifyingSender).wait()
			}
			return nil
		}); err != nil {
			t.Fatalf("%d: expected success writing transactionally; got %s", i, err)
		}

		// Make sure non-txn put or get has finished.
		<-doneCall

		// Get the current value to verify whether the txn happened first.
		getReply := &proto.GetResponse{}
		if err := kvClient.Call(proto.Get, proto.GetArgs(key), getReply); err != nil {
			t.Fatalf("%d: expected success getting %q: %s", i, key, err)
		}
		if test.canPush || test.method == proto.Get {
			if !bytes.Equal(getReply.Value.Bytes, []byte("txn-value")) {
				t.Errorf("%d: expected \"txn-value\"; got %q", i, getReply.Value.Bytes)
			}
		} else {
			if !bytes.Equal(getReply.Value.Bytes, []byte("value")) {
				t.Errorf("%d: expected \"value\"; got %q", i, getReply.Value.Bytes)
			}
		}
		if count != test.expAttempts {
			t.Errorf("%d: expected %d attempt(s); got %d", i, test.expAttempts, count)
		}
	}
}
コード例 #7
0
// concurrentIncrements starts two Goroutines in parallel, both of which
// read the integers stored at the other's key and add it onto their own.
// It is checked that the outcome is serializable, i.e. exactly one of the
// two Goroutines (the later write) sees the previous write by the other.
func concurrentIncrements(kvClient *client.KV, t *testing.T) {
	// wgStart waits for all transactions to line up, wgEnd has the main
	// function wait for them to finish.
	var wgStart, wgEnd sync.WaitGroup
	wgStart.Add(2 + 1)
	wgEnd.Add(2)

	for i := 0; i < 2; i++ {
		go func(i int) {
			// Read the other key, write key i.
			readKey := []byte(fmt.Sprintf("value-%d", (i+1)%2))
			writeKey := []byte(fmt.Sprintf("value-%d", i))
			defer wgEnd.Done()
			wgStart.Done()
			// Wait until the other goroutines are running.
			wgStart.Wait()

			txnOpts := &client.TransactionOptions{
				Name: fmt.Sprintf("test-%d", i),
			}
			if err := kvClient.RunTransaction(txnOpts, func(txn *client.KV) error {
				// Retrieve the other key.
				gr := &proto.GetResponse{}
				if err := txn.Call(proto.Get, proto.GetArgs(readKey), gr); err != nil {
					return err
				}

				otherValue := int64(0)
				if gr.Value != nil && gr.Value.Integer != nil {
					otherValue = *gr.Value.Integer
				}

				pr := &proto.IncrementResponse{}
				pa := proto.IncrementArgs(writeKey, 1+otherValue)
				if err := txn.Call(proto.Increment, pa, pr); err != nil {
					return err
				}

				return nil
			}); err != nil {
				t.Error(err)
			}
		}(i)
	}

	// Kick the goroutines loose.
	wgStart.Done()
	// Wait for the goroutines to finish.
	wgEnd.Wait()
	// Verify that both keys contain something and, more importantly, that
	// one key actually contains the value of the first writer and not only
	// its own.
	total := int64(0)
	results := []int64(nil)
	for i := 0; i < 2; i++ {
		readKey := []byte(fmt.Sprintf("value-%d", i))
		gr := &proto.GetResponse{}
		if err := kvClient.Call(proto.Get, proto.GetArgs(readKey), gr); err != nil {
			log.Fatal(err)
		}
		if gr.Value == nil || gr.Value.Integer == nil {
			t.Fatalf("unexpected empty key: %v=%v", readKey, gr.Value)
		}
		total += *gr.Value.Integer
		results = append(results, *gr.Value.Integer)
	}

	// First writer should have 1, second one 2
	if total != 3 {
		t.Fatalf("got unserializable values %v", results)
	}

}
コード例 #8
0
// This is an example for using the RunTransaction() method to submit
// multiple Key Value API operations inside a transaction.
func ExampleKV_RunTransaction() {
	// Using built-in test server for this example code.
	serv := StartTestServer(nil)
	defer serv.Stop()

	// Replace with actual host:port address string (ex "localhost:8080") for server cluster.
	serverAddress := serv.HTTPAddr

	// Key Value Client initialization.
	sender := client.NewHTTPSender(serverAddress, &http.Transport{
		TLSClientConfig: rpc.LoadInsecureTLSConfig().Config(),
	})
	kvClient := client.NewKV(sender, nil)
	kvClient.User = storage.UserRoot
	defer kvClient.Close()

	// Create test data.
	numKVPairs := 10
	keys := make([]string, numKVPairs)
	values := make([][]byte, numKVPairs)
	for i := 0; i < numKVPairs; i++ {
		keys[i] = fmt.Sprintf("testkey-%03d", i)
		values[i] = []byte(fmt.Sprintf("testvalue-%03d", i))
	}

	// Insert all KV pairs inside a transaction.
	putOpts := client.TransactionOptions{Name: "example put"}
	err := kvClient.RunTransaction(&putOpts, func(txn *client.KV) error {
		for i := 0; i < numKVPairs; i++ {
			txn.Prepare(proto.Put, proto.PutArgs(proto.Key(keys[i]), values[i]), &proto.PutResponse{})
		}
		// Note that the KV client is flushed automatically on transaction
		// commit. Invoking Flush after individual API methods is only
		// required if the result needs to be received to take conditional
		// action.
		return nil
	})
	if err != nil {
		log.Fatal(err)
	}

	// Read back KV pairs inside a transaction.
	getResponses := make([]proto.GetResponse, numKVPairs)
	getOpts := client.TransactionOptions{Name: "example get"}
	err = kvClient.RunTransaction(&getOpts, func(txn *client.KV) error {
		for i := 0; i < numKVPairs; i++ {
			txn.Prepare(proto.Get, proto.GetArgs(proto.Key(keys[i])), &getResponses[i])
		}
		return nil
	})
	if err != nil {
		log.Fatal(err)
	}

	// Check results.
	for i, getResp := range getResponses {
		if getResp.Value == nil {
			log.Fatal("No value returned for ", keys[i])
		} else {
			if !bytes.Equal(values[i], getResp.Value.Bytes) {
				log.Fatal("Data mismatch for ", keys[i], ", got: ", getResp.Value.Bytes)
			}
		}
	}

	fmt.Println("Transaction example done.")
	// Output: Transaction example done.
}
コード例 #9
0
ファイル: txn_test.go プロジェクト: josephwinston/cockroach
// TestTxnDBBasics verifies that a simple transaction can be run and
// either committed or aborted. On commit, mutations are visible; on
// abort, mutations are never visible. During the txn, verify that
// uncommitted writes cannot be read outside of the txn but can be
// read from inside the txn.
func TestTxnDBBasics(t *testing.T) {
	db, _, _, _, _, transport, err := createTestDB()
	if err != nil {
		t.Fatal(err)
	}
	defer transport.Close()
	value := []byte("value")

	for _, commit := range []bool{true, false} {
		key := []byte(fmt.Sprintf("key-%t", commit))

		// Use snapshot isolation so non-transactional read can always push.
		txnOpts := &client.TransactionOptions{
			Name:      "test",
			Isolation: proto.SNAPSHOT,
		}
		err := db.RunTransaction(txnOpts, func(txn *client.KV) error {
			// Put transactional value.
			if err := txn.Call(proto.Put, proto.PutArgs(key, value), &proto.PutResponse{}); err != nil {
				return err
			}

			// Attempt to read outside of txn.
			gr := &proto.GetResponse{}
			if err := db.Call(proto.Get, proto.GetArgs(key), gr); err != nil {
				return err
			}
			if gr.Value != nil {
				return util.Errorf("expected nil value; got %+v", gr.Value)
			}

			// Read within the transaction.
			if err := txn.Call(proto.Get, proto.GetArgs(key), gr); err != nil {
				return err
			}
			if gr.Value == nil || !bytes.Equal(gr.Value.Bytes, value) {
				return util.Errorf("expected value %q; got %q", value, gr.Value.Bytes)
			}

			if !commit {
				return errors.New("purposefully failing transaction")
			}
			return nil
		})

		if commit != (err == nil) {
			t.Errorf("expected success? %t; got %s", commit, err)
		} else if !commit && err.Error() != "purposefully failing transaction" {
			t.Errorf("unexpected failure with !commit: %s", err)
		}

		// Verify the value is now visible on commit == true, and not visible otherwise.
		gr := &proto.GetResponse{}
		err = db.Call(proto.Get, proto.GetArgs(key), gr)
		if commit {
			if err != nil || gr.Value == nil || !bytes.Equal(gr.Value.Bytes, value) {
				t.Errorf("expected success reading value: %+v, %s", gr.Value, err)
			}
		} else {
			if err != nil || gr.Value != nil {
				t.Errorf("expected success and nil value: %+v, %s", gr.Value, err)
			}
		}
	}
}
コード例 #10
0
ファイル: txn_test.go プロジェクト: josephwinston/cockroach
// TestUncertaintyMaxTimestampForwarding checks that we correctly read from
// hosts which for which we control the uncertainty by checking that when a
// transaction restarts after an uncertain read, it will also take into account
// the target node's clock at the time of the failed read when forwarding the
// read timestamp.
// This is a prerequisite for being able to prevent further uncertainty
// restarts for that node and transaction without sacrificing correctness.
// See proto.Transaction.CertainNodes for details.
func TestUncertaintyMaxTimestampForwarding(t *testing.T) {
	db, eng, clock, mClock, _, transport, err := createTestDB()
	defer transport.Close()
	// Large offset so that any value in the future is an uncertain read.
	// Also makes sure that the values we write in the future below don't
	// actually wind up in the past.
	clock.SetMaxOffset(50000 * time.Millisecond)

	txnOpts := &client.TransactionOptions{
		Name: "uncertainty",
	}

	offsetNS := int64(100)
	keySlow := proto.Key("slow")
	keyFast := proto.Key("fast")
	valSlow := []byte("wols")
	valFast := []byte("tsaf")

	// Write keySlow at now+offset, keyFast at now+2*offset
	futureTS := clock.Now()
	futureTS.WallTime += offsetNS
	err = engine.MVCCPut(eng, nil, keySlow, futureTS,
		proto.Value{Bytes: valSlow}, nil)
	if err != nil {
		t.Fatal(err)
	}
	futureTS.WallTime += offsetNS
	err = engine.MVCCPut(eng, nil, keyFast, futureTS,
		proto.Value{Bytes: valFast}, nil)
	if err != nil {
		t.Fatal(err)
	}

	i := 0
	if tErr := db.RunTransaction(txnOpts, func(txn *client.KV) error {
		i++
		// The first command serves to start a Txn, fixing the timestamps.
		// There will be a restart, but this is idempotent.
		sr := &proto.ScanResponse{}
		if err = txn.Call(proto.Scan, proto.ScanArgs(proto.Key("t"), proto.Key("t"),
			0), sr); err != nil {
			t.Fatal(err)
		}

		// The server's clock suddenly jumps ahead of keyFast's timestamp.
		// There will be a restart, but this is idempotent.
		mClock.Set(2*offsetNS + 1)

		// Now read slowKey first. It should read at 0, catch an uncertainty error,
		// and get keySlow's timestamp in that error, but upgrade it to the larger
		// node clock (which is ahead of keyFast as well). If the last part does
		// not happen, the read of keyFast should fail (i.e. read nothing).
		// There will be exactly one restart here.
		gr := &proto.GetResponse{}
		if err = txn.Call(proto.Get, proto.GetArgs(keySlow), gr); err != nil {
			if i != 1 {
				t.Errorf("unexpected transaction error: %v", err)
			}
			return err
		}
		if gr.Value == nil || !bytes.Equal(gr.Value.Bytes, valSlow) {
			t.Errorf("read of %q returned %v, wanted value %q", keySlow, gr.Value,
				valSlow)
		}

		gr.Reset()
		// The node should already be certain, so we expect no restart here
		// and to read the correct key.
		if err = txn.Call(proto.Get, proto.GetArgs(keyFast), gr); err != nil {
			t.Errorf("second Get failed with %v", err)
		}
		if gr.Value == nil || !bytes.Equal(gr.Value.Bytes, valFast) {
			t.Errorf("read of %q returned %v, wanted value %q", keyFast, gr.Value,
				valFast)
		}
		return nil
	}); tErr != nil {
		t.Fatal(tErr)
	}
}