Esempio n. 1
0
// TestKVClientPrepareAndFlush prepares a sequence of increment
// calls and then flushes them and verifies the results.
func TestKVClientPrepareAndFlush(t *testing.T) {
	s := StartTestServer(t)
	defer s.Stop()
	kvClient := createTestClient(s.HTTPAddr)
	kvClient.User = storage.UserRoot

	replies := []*proto.IncrementResponse{}
	keys := []proto.Key{}
	for i := 0; i < 10; i++ {
		key := proto.Key(fmt.Sprintf("key %02d", i))
		keys = append(keys, key)
		reply := &proto.IncrementResponse{}
		replies = append(replies, reply)
		kvClient.Prepare(proto.Increment, proto.IncrementArgs(key, int64(i)), reply)
	}

	if err := kvClient.Flush(); err != nil {
		t.Fatal(err)
	}

	for i, reply := range replies {
		if reply.NewValue != int64(i) {
			t.Errorf("%d: expected %d; got %d", i, i, reply.NewValue)
		}
	}

	// Now try 2 scans.
	scan1 := &proto.ScanResponse{}
	scan2 := &proto.ScanResponse{}
	kvClient.Prepare(proto.Scan, proto.ScanArgs(proto.Key("key 00"), proto.Key("key 05"), 0), scan1)
	kvClient.Prepare(proto.Scan, proto.ScanArgs(proto.Key("key 05"), proto.Key("key 10"), 0), scan2)

	if err := kvClient.Flush(); err != nil {
		t.Fatal(err)
	}

	if len(scan1.Rows) != 5 || len(scan2.Rows) != 5 {
		t.Errorf("expected scan results to include 5 and 5 rows; got %d and %d",
			len(scan1.Rows), len(scan2.Rows))
	}
	for i := 0; i < 5; i++ {
		if key := scan1.Rows[i].Key; !key.Equal(keys[i]) {
			t.Errorf("expected scan1 key %d to be %q; got %q", i, keys[i], key)
		}
		if val := scan1.Rows[i].Value.GetInteger(); val != int64(i) {
			t.Errorf("expected scan1 result %d to be %d; got %d", i, i, val)
		}

		if key := scan2.Rows[i].Key; !key.Equal(keys[i+5]) {
			t.Errorf("expected scan2 key %d to be %q; got %q", i, keys[i+5], key)
		}
		if val := scan2.Rows[i].Value.GetInteger(); val != int64(i+5) {
			t.Errorf("expected scan2 result %d to be %d; got %d", i, i+5, val)
		}
	}
}
Esempio n. 2
0
// TestMultiRangeScanDeleteRange tests that commands that commands which access
// multiple ranges are carried out properly.
func TestMultiRangeScanDeleteRange(t *testing.T) {
	s := startTestServer(t)
	defer s.Stop()
	tds := kv.NewTxnCoordSender(kv.NewDistSender(s.Gossip()), s.Clock(), testContext.Linearizable)
	defer tds.Close()

	if err := s.node.db.Call(proto.AdminSplit,
		&proto.AdminSplitRequest{
			RequestHeader: proto.RequestHeader{
				Key: proto.Key("m"),
			},
			SplitKey: proto.Key("m"),
		}, &proto.AdminSplitResponse{}); err != nil {
		t.Fatal(err)
	}
	writes := []proto.Key{proto.Key("a"), proto.Key("z")}
	get := &client.Call{
		Method: proto.Get,
		Args:   proto.GetArgs(writes[0]),
		Reply:  &proto.GetResponse{},
	}
	get.Args.Header().User = storage.UserRoot
	get.Args.Header().EndKey = writes[len(writes)-1]
	tds.Send(get)
	if err := get.Reply.Header().GoError(); err == nil {
		t.Errorf("able to call Get with a key range: %v", get)
	}
	var call *client.Call
	for i, k := range writes {
		call = &client.Call{
			Method: proto.Put,
			Args:   proto.PutArgs(k, k),
			Reply:  &proto.PutResponse{},
		}
		call.Args.Header().User = storage.UserRoot
		tds.Send(call)
		if err := call.Reply.Header().GoError(); err != nil {
			t.Fatal(err)
		}
		scan := &client.Call{
			Method: proto.Scan,
			Args:   proto.ScanArgs(writes[0], writes[len(writes)-1].Next(), 0),
			Reply:  &proto.ScanResponse{},
		}
		// The Put ts may have been pushed by tsCache,
		// so make sure we see their values in our Scan.
		scan.Args.Header().Timestamp = call.Reply.Header().Timestamp
		scan.Args.Header().User = storage.UserRoot
		tds.Send(scan)
		if err := scan.Reply.Header().GoError(); err != nil {
			t.Fatal(err)
		}
		if scan.Reply.Header().Txn == nil {
			t.Errorf("expected Scan to be wrapped in a Transaction")
		}
		if rows := scan.Reply.(*proto.ScanResponse).Rows; len(rows) != i+1 {
			t.Fatalf("expected %d rows, but got %d", i+1, len(rows))
		}
	}
	del := &client.Call{
		Method: proto.DeleteRange,
		Args: &proto.DeleteRangeRequest{
			RequestHeader: proto.RequestHeader{
				User:      storage.UserRoot,
				Key:       writes[0],
				EndKey:    proto.Key(writes[len(writes)-1]).Next(),
				Timestamp: call.Reply.Header().Timestamp,
			},
		},
		Reply: &proto.DeleteRangeResponse{},
	}
	tds.Send(del)
	if err := del.Reply.Header().GoError(); err != nil {
		t.Fatal(err)
	}
	if del.Reply.Header().Txn == nil {
		t.Errorf("expected DeleteRange to be wrapped in a Transaction")
	}
	if n := del.Reply.(*proto.DeleteRangeResponse).NumDeleted; n != int64(len(writes)) {
		t.Errorf("expected %d keys to be deleted, but got %d instead",
			len(writes), n)
	}

	scan := &client.Call{
		Method: proto.Scan,
		Args:   proto.ScanArgs(writes[0], writes[len(writes)-1].Next(), 0),
		Reply:  &proto.ScanResponse{},
	}
	scan.Args.Header().Timestamp = del.Reply.Header().Timestamp
	scan.Args.Header().User = storage.UserRoot
	scan.Args.Header().Txn = &proto.Transaction{Name: "MyTxn"}
	tds.Send(scan)
	if err := scan.Reply.Header().GoError(); err != nil {
		t.Fatal(err)
	}
	if txn := scan.Reply.Header().Txn; txn == nil || txn.Name != "MyTxn" {
		t.Errorf("wanted Txn to persist, but it changed to %v", txn)
	}
	if rows := scan.Reply.(*proto.ScanResponse).Rows; len(rows) > 0 {
		t.Fatalf("scan after delete returned rows: %v", rows)
	}
}
Esempio n. 3
0
// This is an example for using the Prepare() method to submit
// multiple Key Value API operations to be run in parallel. Flush() is
// then used to begin execution of all the prepared operations.
func ExampleKV_Prepare() {
	// Using built-in test server for this example code.
	serv := StartTestServer(nil)
	defer serv.Stop()

	// Replace with actual host:port address string (ex "localhost:8080") for server cluster.
	serverAddress := serv.HTTPAddr

	// Key Value Client initialization.
	sender := client.NewHTTPSender(serverAddress, &http.Transport{
		TLSClientConfig: rpc.LoadInsecureTLSConfig().Config(),
	})
	kvClient := client.NewKV(sender, nil)
	kvClient.User = storage.UserRoot
	defer kvClient.Close()

	// Insert test data.
	batchSize := 12
	keys := make([]string, batchSize)
	values := make([][]byte, batchSize)
	for i := 0; i < batchSize; i++ {
		keys[i] = fmt.Sprintf("key-%03d", i)
		values[i] = []byte(fmt.Sprintf("value-%03d", i))

		putReq := proto.PutArgs(proto.Key(keys[i]), values[i])
		putResp := &proto.PutResponse{}
		kvClient.Prepare(proto.Put, putReq, putResp)
	}

	// Flush all puts for parallel execution.
	if err := kvClient.Flush(); err != nil {
		log.Fatal(err)
	}

	// Scan for the newly inserted rows in parallel.
	numScans := 3
	rowsPerScan := batchSize / numScans
	scanResponses := make([]proto.ScanResponse, numScans)
	for i := 0; i < numScans; i++ {
		firstKey := proto.Key(keys[i*rowsPerScan])
		lastKey := proto.Key(keys[((i+1)*rowsPerScan)-1])
		kvClient.Prepare(proto.Scan, proto.ScanArgs(firstKey, lastKey.Next(), int64(rowsPerScan)), &scanResponses[i])
	}
	// Flush all scans for parallel execution.
	if err := kvClient.Flush(); err != nil {
		log.Fatal(err)
	}

	// Check results which may be returned out-of-order from creation.
	var matchCount int
	for i := 0; i < numScans; i++ {
		for _, keyVal := range scanResponses[i].Rows {
			currKey := keyVal.Key
			currValue := keyVal.Value.Bytes
			for j, origKey := range keys {
				if bytes.Equal(currKey, proto.Key(origKey)) && bytes.Equal(currValue, values[j]) {
					matchCount++
				}
			}
		}
	}
	if matchCount != batchSize {
		log.Fatal("Data mismatch.")
	}

	fmt.Println("Prepare Flush example done.")
	// Output: Prepare Flush example done.
}
Esempio n. 4
0
// TestUncertaintyMaxTimestampForwarding checks that we correctly read from
// hosts which for which we control the uncertainty by checking that when a
// transaction restarts after an uncertain read, it will also take into account
// the target node's clock at the time of the failed read when forwarding the
// read timestamp.
// This is a prerequisite for being able to prevent further uncertainty
// restarts for that node and transaction without sacrificing correctness.
// See proto.Transaction.CertainNodes for details.
func TestUncertaintyMaxTimestampForwarding(t *testing.T) {
	db, eng, clock, mClock, _, transport, err := createTestDB()
	defer transport.Close()
	// Large offset so that any value in the future is an uncertain read.
	// Also makes sure that the values we write in the future below don't
	// actually wind up in the past.
	clock.SetMaxOffset(50000 * time.Millisecond)

	txnOpts := &client.TransactionOptions{
		Name: "uncertainty",
	}

	offsetNS := int64(100)
	keySlow := proto.Key("slow")
	keyFast := proto.Key("fast")
	valSlow := []byte("wols")
	valFast := []byte("tsaf")

	// Write keySlow at now+offset, keyFast at now+2*offset
	futureTS := clock.Now()
	futureTS.WallTime += offsetNS
	err = engine.MVCCPut(eng, nil, keySlow, futureTS,
		proto.Value{Bytes: valSlow}, nil)
	if err != nil {
		t.Fatal(err)
	}
	futureTS.WallTime += offsetNS
	err = engine.MVCCPut(eng, nil, keyFast, futureTS,
		proto.Value{Bytes: valFast}, nil)
	if err != nil {
		t.Fatal(err)
	}

	i := 0
	if tErr := db.RunTransaction(txnOpts, func(txn *client.KV) error {
		i++
		// The first command serves to start a Txn, fixing the timestamps.
		// There will be a restart, but this is idempotent.
		sr := &proto.ScanResponse{}
		if err = txn.Call(proto.Scan, proto.ScanArgs(proto.Key("t"), proto.Key("t"),
			0), sr); err != nil {
			t.Fatal(err)
		}

		// The server's clock suddenly jumps ahead of keyFast's timestamp.
		// There will be a restart, but this is idempotent.
		mClock.Set(2*offsetNS + 1)

		// Now read slowKey first. It should read at 0, catch an uncertainty error,
		// and get keySlow's timestamp in that error, but upgrade it to the larger
		// node clock (which is ahead of keyFast as well). If the last part does
		// not happen, the read of keyFast should fail (i.e. read nothing).
		// There will be exactly one restart here.
		gr := &proto.GetResponse{}
		if err = txn.Call(proto.Get, proto.GetArgs(keySlow), gr); err != nil {
			if i != 1 {
				t.Errorf("unexpected transaction error: %v", err)
			}
			return err
		}
		if gr.Value == nil || !bytes.Equal(gr.Value.Bytes, valSlow) {
			t.Errorf("read of %q returned %v, wanted value %q", keySlow, gr.Value,
				valSlow)
		}

		gr.Reset()
		// The node should already be certain, so we expect no restart here
		// and to read the correct key.
		if err = txn.Call(proto.Get, proto.GetArgs(keyFast), gr); err != nil {
			t.Errorf("second Get failed with %v", err)
		}
		if gr.Value == nil || !bytes.Equal(gr.Value.Bytes, valFast) {
			t.Errorf("read of %q returned %v, wanted value %q", keyFast, gr.Value,
				valFast)
		}
		return nil
	}); tErr != nil {
		t.Fatal(tErr)
	}
}