예제 #1
0
// TestTxnCoordSenderEndTxn verifies that ending a transaction
// sends resolve write intent requests and removes the transaction
// from the txns map.
func TestTxnCoordSenderEndTxn(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := createTestDB(t)
	defer s.Stop()

	txn := newTxn(s.Clock, proto.Key("a"))
	key := proto.Key("a")
	put := createPutRequest(key, []byte("value"), txn)
	reply, err := batchutil.SendWrapped(s.Sender, put)
	if err != nil {
		t.Fatal(err)
	}
	pReply := reply.(*proto.PutResponse)
	if _, err := batchutil.SendWrapped(s.Sender, &proto.EndTransactionRequest{
		RequestHeader: proto.RequestHeader{
			Key:       txn.Key,
			Timestamp: txn.Timestamp,
			Txn:       pReply.Header().Txn,
		},
		Commit: true,
	}); err != nil {
		t.Fatal(err)
	}
	verifyCleanup(key, s.Sender, s.Eng, t)
}
// TestMultiRangeScanReverseScanInconsistent verifies that a Scan/ReverseScan
// across ranges that doesn't require read consistency will set a timestamp
// using the clock local to the distributed sender.
func TestMultiRangeScanReverseScanInconsistent(t *testing.T) {
	defer leaktest.AfterTest(t)
	s, db := setupMultipleRanges(t, "b")
	defer s.Stop()

	// Write keys "a" and "b", the latter of which is the first key in the
	// second range.
	keys := []string{"a", "b"}
	ts := []time.Time{}
	b := &client.Batch{}
	for _, key := range keys {
		b.Put(key, "value")
	}
	if err := db.Run(b); err != nil {
		t.Fatal(err)
	}
	for i := range keys {
		ts = append(ts, b.Results[i].Rows[0].Timestamp())
		log.Infof("%d: %s", i, b.Results[i].Rows[0].Timestamp())
	}

	// Do an inconsistent Scan/ReverseScan from a new DistSender and verify
	// it does the read at its local clock and doesn't receive an
	// OpRequiresTxnError. We set the local clock to the timestamp of
	// the first key to verify it's used to read only key "a".
	manual := hlc.NewManualClock(ts[1].UnixNano() - 1)
	clock := hlc.NewClock(manual.UnixNano)
	ds := kv.NewDistSender(&kv.DistSenderContext{Clock: clock}, s.Gossip())

	// Scan.
	sa := proto.NewScan(proto.Key("a"), proto.Key("c"), 0).(*proto.ScanRequest)
	sa.ReadConsistency = proto.INCONSISTENT
	reply, err := batchutil.SendWrapped(ds, sa)
	if err != nil {
		t.Fatal(err)
	}
	sr := reply.(*proto.ScanResponse)

	if l := len(sr.Rows); l != 1 {
		t.Fatalf("expected 1 row; got %d", l)
	}
	if key := string(sr.Rows[0].Key); keys[0] != key {
		t.Errorf("expected key %q; got %q", keys[0], key)
	}

	// ReverseScan.
	rsa := proto.NewReverseScan(proto.Key("a"), proto.Key("c"), 0).(*proto.ReverseScanRequest)
	rsa.ReadConsistency = proto.INCONSISTENT
	reply, err = batchutil.SendWrapped(ds, rsa)
	if err != nil {
		t.Fatal(err)
	}
	rsr := reply.(*proto.ReverseScanResponse)
	if l := len(rsr.Rows); l != 1 {
		t.Fatalf("expected 1 row; got %d", l)
	}
	if key := string(rsr.Rows[0].Key); keys[0] != key {
		t.Errorf("expected key %q; got %q", keys[0], key)
	}
}
예제 #3
0
// TestMultiRangeScanWithMaxResults tests that commands which access multiple
// ranges with MaxResults parameter are carried out properly.
func TestMultiRangeScanWithMaxResults(t *testing.T) {
	defer leaktest.AfterTest(t)
	testCases := []struct {
		splitKeys []proto.Key
		keys      []proto.Key
	}{
		{[]proto.Key{proto.Key("m")},
			[]proto.Key{proto.Key("a"), proto.Key("z")}},
		{[]proto.Key{proto.Key("h"), proto.Key("q")},
			[]proto.Key{proto.Key("b"), proto.Key("f"), proto.Key("k"),
				proto.Key("r"), proto.Key("w"), proto.Key("y")}},
	}

	for i, tc := range testCases {
		s := StartTestServer(t)
		ds := kv.NewDistSender(&kv.DistSenderContext{Clock: s.Clock()}, s.Gossip())
		tds := kv.NewTxnCoordSender(ds, s.Clock(), testContext.Linearizable, nil, s.stopper)

		for _, sk := range tc.splitKeys {
			if err := s.node.ctx.DB.AdminSplit(sk); err != nil {
				t.Fatal(err)
			}
		}

		var reply proto.Response
		for _, k := range tc.keys {
			put := proto.NewPut(k, proto.Value{Bytes: k})
			var err error
			reply, err = batchutil.SendWrapped(tds, put)
			if err != nil {
				t.Fatal(err)
			}
		}

		// Try every possible ScanRequest startKey.
		for start := 0; start < len(tc.keys); start++ {
			// Try every possible maxResults, from 1 to beyond the size of key array.
			for maxResults := 1; maxResults <= len(tc.keys)-start+1; maxResults++ {
				scan := proto.NewScan(tc.keys[start], tc.keys[len(tc.keys)-1].Next(),
					int64(maxResults))
				scan.Header().Timestamp = reply.Header().Timestamp
				reply, err := batchutil.SendWrapped(tds, scan)
				if err != nil {
					t.Fatal(err)
				}
				rows := reply.(*proto.ScanResponse).Rows
				if start+maxResults <= len(tc.keys) && len(rows) != maxResults {
					t.Errorf("%d: start=%s: expected %d rows, but got %d", i, tc.keys[start], maxResults, len(rows))
				} else if start+maxResults == len(tc.keys)+1 && len(rows) != maxResults-1 {
					t.Errorf("%d: expected %d rows, but got %d", i, maxResults-1, len(rows))
				}
			}
		}
		defer s.Stop()
	}
}
예제 #4
0
// TestTxnMultipleCoord checks that a coordinator uses the Writing flag to
// enforce that only one coordinator can be used for transactional writes.
func TestTxnMultipleCoord(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := createTestDB(t)
	defer s.Stop()

	for i, tc := range []struct {
		args    proto.Request
		writing bool
		ok      bool
	}{
		{proto.NewGet(proto.Key("a")), true, true},
		{proto.NewGet(proto.Key("a")), false, true},
		{proto.NewPut(proto.Key("a"), proto.Value{}), false, true},
		{proto.NewPut(proto.Key("a"), proto.Value{}), true, false},
	} {
		{
			txn := newTxn(s.Clock, proto.Key("a"))
			txn.Writing = tc.writing
			tc.args.Header().Txn = txn
		}
		reply, err := batchutil.SendWrapped(s.Sender, tc.args)
		if err == nil != tc.ok {
			t.Errorf("%d: %T (writing=%t): success_expected=%t, but got: %v",
				i, tc.args, tc.writing, tc.ok, err)
		}
		if err != nil {
			continue
		}

		txn := reply.Header().Txn
		// The transaction should come back rw if it started rw or if we just
		// wrote.
		isWrite := proto.IsTransactionWrite(tc.args)
		if (tc.writing || isWrite) != txn.Writing {
			t.Errorf("%d: unexpected writing state: %s", i, txn)
		}
		if !isWrite {
			continue
		}
		// Abort for clean shutdown.
		if _, err := batchutil.SendWrapped(s.Sender, &proto.EndTransactionRequest{
			RequestHeader: proto.RequestHeader{
				Key:       txn.Key,
				Timestamp: txn.Timestamp,
				Txn:       txn,
			},
			Commit: false,
		}); err != nil {
			t.Fatal(err)
		}
	}
}
예제 #5
0
// TestTxnCoordSenderCleanupOnAborted verifies that if a txn receives a
// TransactionAbortedError, the coordinator cleans up the transaction.
func TestTxnCoordSenderCleanupOnAborted(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := createTestDB(t)
	defer s.Stop()

	// Create a transaction with intent at "a".
	key := proto.Key("a")
	txn := newTxn(s.Clock, key)
	txn.Priority = 1
	put := createPutRequest(key, []byte("value"), txn)
	if reply, err := batchutil.SendWrapped(s.Sender, put); err != nil {
		t.Fatal(err)
	} else {
		txn = reply.Header().Txn
	}

	// Push the transaction to abort it.
	txn2 := newTxn(s.Clock, key)
	txn2.Priority = 2
	pushArgs := &proto.PushTxnRequest{
		RequestHeader: proto.RequestHeader{
			Key: txn.Key,
		},
		Now:       s.Clock.Now(),
		PusherTxn: txn2,
		PusheeTxn: *txn,
		PushType:  proto.ABORT_TXN,
	}
	if _, err := batchutil.SendWrapped(s.Sender, pushArgs); err != nil {
		t.Fatal(err)
	}

	// Now end the transaction and verify we've cleanup up, even though
	// end transaction failed.
	etArgs := &proto.EndTransactionRequest{
		RequestHeader: proto.RequestHeader{
			Key:       txn.Key,
			Timestamp: txn.Timestamp,
			Txn:       txn,
		},
		Commit: true,
	}
	_, err := batchutil.SendWrapped(s.Sender, etArgs)
	switch err.(type) {
	case *proto.TransactionAbortedError:
		// Expected
	default:
		t.Fatalf("expected transaction aborted error; got %s", err)
	}
	verifyCleanup(key, s.Sender, s.Eng, t)
}
예제 #6
0
// TestRangeLookupOptionOnReverseScan verifies that a lookup triggered by a
// ReverseScan request has the `useReverseScan` option specified.
func TestRangeLookupOptionOnReverseScan(t *testing.T) {
	defer leaktest.AfterTest(t)
	g, s := makeTestGossip(t)
	defer s()

	var testFn rpcSendFn = func(_ rpc.Options, method string, addrs []net.Addr, getArgs func(addr net.Addr) gogoproto.Message, getReply func() gogoproto.Message, _ *rpc.Context) ([]gogoproto.Message, error) {
		return []gogoproto.Message{getReply()}, nil
	}

	ctx := &DistSenderContext{
		RPCSend: testFn,
		RangeDescriptorDB: mockRangeDescriptorDB(func(k proto.Key, opts lookupOptions) ([]proto.RangeDescriptor, error) {
			if len(k) > 0 && !opts.useReverseScan {
				t.Fatalf("expected useReverseScan to be set")
			}
			return []proto.RangeDescriptor{testRangeDescriptor}, nil
		}),
	}
	ds := NewDistSender(ctx, g)
	rScan := &proto.ReverseScanRequest{
		RequestHeader: proto.RequestHeader{Key: proto.Key("a"), EndKey: proto.Key("b")},
	}
	if _, err := batchutil.SendWrapped(ds, rScan); err != nil {
		t.Fatal(err)
	}
}
예제 #7
0
// TestTxnCoordSenderBeginTransaction verifies that a command sent with a
// not-nil Txn with empty ID gets a new transaction initialized.
func TestTxnCoordSenderBeginTransaction(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := createTestDB(t)
	defer s.Stop()
	defer teardownHeartbeats(s.Sender)

	key := proto.Key("key")
	reply, err := batchutil.SendWrapped(s.Sender, &proto.PutRequest{
		RequestHeader: proto.RequestHeader{
			Key:          key,
			UserPriority: gogoproto.Int32(-10), // negative user priority is translated into positive priority
			Txn: &proto.Transaction{
				Name:      "test txn",
				Isolation: proto.SNAPSHOT,
			},
		},
	})
	if err != nil {
		t.Fatal(err)
	}
	pr := reply.(*proto.PutResponse)
	if pr.Txn.Name != "test txn" {
		t.Errorf("expected txn name to be %q; got %q", "test txn", pr.Txn.Name)
	}
	if pr.Txn.Priority != 10 {
		t.Errorf("expected txn priority 10; got %d", pr.Txn.Priority)
	}
	if !bytes.Equal(pr.Txn.Key, key) {
		t.Errorf("expected txn Key to match %q != %q", key, pr.Txn.Key)
	}
	if pr.Txn.Isolation != proto.SNAPSHOT {
		t.Errorf("expected txn isolation to be SNAPSHOT; got %s", pr.Txn.Isolation)
	}
}
예제 #8
0
// TestTxnCoordSenderGC verifies that the coordinator cleans up extant
// transactions after the lastUpdateNanos exceeds the timeout.
func TestTxnCoordSenderGC(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := createTestDB(t)
	defer s.Stop()

	// Set heartbeat interval to 1ms for testing.
	s.Sender.heartbeatInterval = 1 * time.Millisecond

	txn := newTxn(s.Clock, proto.Key("a"))
	put := createPutRequest(proto.Key("a"), []byte("value"), txn)
	if _, err := batchutil.SendWrapped(s.Sender, put); err != nil {
		t.Fatal(err)
	}

	// Now, advance clock past the default client timeout.
	// Locking the TxnCoordSender to prevent a data race.
	s.Sender.Lock()
	s.Manual.Set(defaultClientTimeout.Nanoseconds() + 1)
	s.Sender.Unlock()

	if err := util.IsTrueWithin(func() bool {
		// Locking the TxnCoordSender to prevent a data race.
		s.Sender.Lock()
		_, ok := s.Sender.txns[string(txn.ID)]
		s.Sender.Unlock()
		return !ok
	}, 50*time.Millisecond); err != nil {
		t.Error("expected garbage collection")
	}
}
예제 #9
0
// TestSendRPCRetry verifies that sendRPC failed on first address but succeed on
// second address, the second reply should be successfully returned back.
func TestSendRPCRetry(t *testing.T) {
	defer leaktest.AfterTest(t)
	g, s := makeTestGossip(t)
	defer s()
	if err := g.SetNodeDescriptor(&proto.NodeDescriptor{NodeID: 1}); err != nil {
		t.Fatal(err)
	}
	// Fill RangeDescriptor with 2 replicas
	var descriptor = proto.RangeDescriptor{
		RangeID:  1,
		StartKey: proto.Key("a"),
		EndKey:   proto.Key("z"),
	}
	for i := 1; i <= 2; i++ {
		addr := util.MakeUnresolvedAddr("tcp", fmt.Sprintf("node%d", i))
		nd := &proto.NodeDescriptor{
			NodeID:  proto.NodeID(i),
			Address: util.MakeUnresolvedAddr(addr.Network(), addr.String()),
		}
		if err := g.AddInfoProto(gossip.MakeNodeIDKey(proto.NodeID(i)), nd, time.Hour); err != nil {
			t.Fatal(err)
		}

		descriptor.Replicas = append(descriptor.Replicas, proto.Replica{
			NodeID:  proto.NodeID(i),
			StoreID: proto.StoreID(i),
		})
	}
	// Define our rpcSend stub which returns success on the second address.
	var testFn rpcSendFn = func(_ rpc.Options, method string, addrs []net.Addr, getArgs func(addr net.Addr) gogoproto.Message, getReply func() gogoproto.Message, _ *rpc.Context) ([]gogoproto.Message, error) {
		if method == "Node.Batch" {
			// reply from first address failed
			_ = getReply()
			// reply from second address succeed
			batchReply := getReply().(*proto.BatchResponse)
			reply := &proto.ScanResponse{}
			batchReply.Add(reply)
			reply.Rows = append([]proto.KeyValue{}, proto.KeyValue{Key: proto.Key("b"), Value: proto.Value{}})
			return []gogoproto.Message{batchReply}, nil
		}
		return nil, util.Errorf("unexpected method %v", method)
	}
	ctx := &DistSenderContext{
		RPCSend: testFn,
		RangeDescriptorDB: mockRangeDescriptorDB(func(_ proto.Key, _ lookupOptions) ([]proto.RangeDescriptor, error) {
			return []proto.RangeDescriptor{descriptor}, nil
		}),
	}
	ds := NewDistSender(ctx, g)
	scan := proto.NewScan(proto.Key("a"), proto.Key("d"), 1)
	sr, err := batchutil.SendWrapped(ds, scan)
	if err != nil {
		t.Fatal(err)
	}
	if l := len(sr.(*proto.ScanResponse).Rows); l != 1 {
		t.Fatalf("expected 1 row; got %d", l)
	}
}
예제 #10
0
// TestTxnCoordSenderKeyRanges verifies that multiple requests to same or
// overlapping key ranges causes the coordinator to keep track only of
// the minimum number of ranges.
func TestTxnCoordSenderKeyRanges(t *testing.T) {
	defer leaktest.AfterTest(t)
	ranges := []struct {
		start, end proto.Key
	}{
		{proto.Key("a"), proto.Key(nil)},
		{proto.Key("a"), proto.Key(nil)},
		{proto.Key("aa"), proto.Key(nil)},
		{proto.Key("b"), proto.Key(nil)},
		{proto.Key("aa"), proto.Key("c")},
		{proto.Key("b"), proto.Key("c")},
	}

	s := createTestDB(t)
	defer s.Stop()
	defer teardownHeartbeats(s.Sender)
	txn := newTxn(s.Clock, proto.Key("a"))

	for _, rng := range ranges {
		if rng.end != nil {
			delRangeReq := createDeleteRangeRequest(rng.start, rng.end, txn)
			if _, err := batchutil.SendWrapped(s.Sender, delRangeReq); err != nil {
				t.Fatal(err)
			}
		} else {
			putReq := createPutRequest(rng.start, []byte("value"), txn)
			if _, err := batchutil.SendWrapped(s.Sender, putReq); err != nil {
				t.Fatal(err)
			}
		}
		txn.Writing = true // required for all but first req
	}

	// Verify that the transaction metadata contains only two entries
	// in its "keys" interval cache. "a" and range "aa"-"c".
	txnMeta, ok := s.Sender.txns[string(txn.ID)]
	if !ok {
		t.Fatalf("expected a transaction to be created on coordinator")
	}
	if txnMeta.keys.Len() != 2 {
		t.Errorf("expected 2 entries in keys interval cache; got %v", txnMeta.keys)
	}
}
예제 #11
0
// TestRetryOnDescriptorLookupError verifies that the DistSender retries a descriptor
// lookup on retryable errors.
func TestRetryOnDescriptorLookupError(t *testing.T) {
	defer leaktest.AfterTest(t)
	g, s := makeTestGossip(t)
	defer s()

	var testFn rpcSendFn = func(_ rpc.Options, _ string, _ []net.Addr, _ func(addr net.Addr) gogoproto.Message, getReply func() gogoproto.Message, _ *rpc.Context) ([]gogoproto.Message, error) {
		return []gogoproto.Message{getReply()}, nil
	}

	errors := []error{
		errors.New("fatal boom"),
		&proto.RangeKeyMismatchError{}, // retryable
		nil,
	}

	ctx := &DistSenderContext{
		RPCSend: testFn,
		RangeDescriptorDB: mockRangeDescriptorDB(func(k proto.Key, _ lookupOptions) ([]proto.RangeDescriptor, error) {
			// Return next error and truncate the prefix of the errors array.
			var err error
			if k != nil {
				err = errors[0]
				errors = errors[1:]
			}
			return []proto.RangeDescriptor{testRangeDescriptor}, err
		}),
	}
	ds := NewDistSender(ctx, g)
	put := proto.NewPut(proto.Key("a"), proto.Value{Bytes: []byte("value")})
	// Fatal error on descriptor lookup, propagated to reply.
	if _, err := batchutil.SendWrapped(ds, put); err.Error() != "fatal boom" {
		t.Errorf("unexpected error: %s", err)
	}
	// Retryable error on descriptor lookup, second attempt successful.
	if _, err := batchutil.SendWrapped(ds, put); err != nil {
		t.Errorf("unexpected error: %s", err)
	}
	if len(errors) != 0 {
		t.Fatalf("expected more descriptor lookups, leftover errors: %+v", errors)
	}
}
예제 #12
0
// TestTxnCoordSenderMultipleTxns verifies correct operation with
// multiple outstanding transactions.
func TestTxnCoordSenderMultipleTxns(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := createTestDB(t)
	defer s.Stop()
	defer teardownHeartbeats(s.Sender)

	txn1 := newTxn(s.Clock, proto.Key("a"))
	txn2 := newTxn(s.Clock, proto.Key("b"))
	put1 := createPutRequest(proto.Key("a"), []byte("value"), txn1)
	if _, err := batchutil.SendWrapped(s.Sender, put1); err != nil {
		t.Fatal(err)
	}
	put2 := createPutRequest(proto.Key("b"), []byte("value"), txn2)
	if _, err := batchutil.SendWrapped(s.Sender, put2); err != nil {
		t.Fatal(err)
	}

	if len(s.Sender.txns) != 2 {
		t.Errorf("expected length of transactions map to be 2; got %d", len(s.Sender.txns))
	}
}
예제 #13
0
// TestTxnCoordSenderAddRequest verifies adding a request creates a
// transaction metadata and adding multiple requests with same
// transaction ID updates the last update timestamp.
func TestTxnCoordSenderAddRequest(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := createTestDB(t)
	defer s.Stop()
	defer teardownHeartbeats(s.Sender)

	txn := newTxn(s.Clock, proto.Key("a"))
	put := createPutRequest(proto.Key("a"), []byte("value"), txn)

	// Put request will create a new transaction.
	reply, err := batchutil.SendWrapped(s.Sender, put)
	if err != nil {
		t.Fatal(err)
	}
	txnMeta, ok := s.Sender.txns[string(txn.ID)]
	if !ok {
		t.Fatal("expected a transaction to be created on coordinator")
	}
	if !reply.Header().Txn.Writing {
		t.Fatal("response Txn is not marked as writing")
	}
	ts := atomic.LoadInt64(&txnMeta.lastUpdateNanos)

	// Advance time and send another put request. Lock the coordinator
	// to prevent a data race.
	s.Sender.Lock()
	s.Manual.Set(1)
	s.Sender.Unlock()
	put.Txn.Writing = true
	if _, err := batchutil.SendWrapped(s.Sender, put); err != nil {
		t.Fatal(err)
	}
	if len(s.Sender.txns) != 1 {
		t.Errorf("expected length of transactions map to be 1; got %d", len(s.Sender.txns))
	}
	txnMeta = s.Sender.txns[string(txn.ID)]
	if lu := atomic.LoadInt64(&txnMeta.lastUpdateNanos); ts >= lu || lu != s.Manual.UnixNano() {
		t.Errorf("expected last update time to advance; got %d", lu)
	}
}
예제 #14
0
// TestRetryOnWrongReplicaError sets up a DistSender on a minimal gossip
// network and a mock of rpc.Send, and verifies that the DistSender correctly
// retries upon encountering a stale entry in its range descriptor cache.
func TestRetryOnWrongReplicaError(t *testing.T) {
	defer leaktest.AfterTest(t)
	g, s := makeTestGossip(t)
	defer s()
	// Updated below, after it has first been returned.
	badStartKey := proto.Key("m")
	newRangeDescriptor := testRangeDescriptor
	goodStartKey := newRangeDescriptor.StartKey
	newRangeDescriptor.StartKey = badStartKey
	descStale := true

	var testFn rpcSendFn = func(_ rpc.Options, method string, addrs []net.Addr, getArgs func(addr net.Addr) gogoproto.Message, getReply func() gogoproto.Message, _ *rpc.Context) ([]gogoproto.Message, error) {
		ba := getArgs(testAddress).(*proto.BatchRequest)
		if _, ok := ba.GetArg(proto.RangeLookup); ok {
			if !descStale && bytes.HasPrefix(ba.Key, keys.Meta2Prefix) {
				t.Errorf("unexpected extra lookup for non-stale replica descriptor at %s",
					ba.Key)
			}

			br := getReply().(*proto.BatchResponse)
			r := &proto.RangeLookupResponse{}
			r.Ranges = append(r.Ranges, newRangeDescriptor)
			br.Add(r)
			// If we just returned the stale descriptor, set up returning the
			// good one next time.
			if bytes.HasPrefix(ba.Key, keys.Meta2Prefix) {
				if newRangeDescriptor.StartKey.Equal(badStartKey) {
					newRangeDescriptor.StartKey = goodStartKey
				} else {
					descStale = false
				}
			}
			return []gogoproto.Message{br}, nil
		}
		// When the Scan first turns up, update the descriptor for future
		// range descriptor lookups.
		if !newRangeDescriptor.StartKey.Equal(goodStartKey) {
			return nil, &proto.RangeKeyMismatchError{RequestStartKey: ba.Key,
				RequestEndKey: ba.EndKey}
		}
		return []gogoproto.Message{ba.CreateReply().(*proto.BatchResponse)}, nil
	}

	ctx := &DistSenderContext{
		RPCSend: testFn,
	}
	ds := NewDistSender(ctx, g)
	scan := proto.NewScan(proto.Key("a"), proto.Key("d"), 0)
	if _, err := batchutil.SendWrapped(ds, scan); err != nil {
		t.Errorf("scan encountered error: %s", err)
	}
}
예제 #15
0
// getTxn fetches the requested key and returns the transaction info.
func getTxn(coord *TxnCoordSender, txn *proto.Transaction) (bool, *proto.Transaction, error) {
	hb := &proto.HeartbeatTxnRequest{
		RequestHeader: proto.RequestHeader{
			Key: txn.Key,
			Txn: txn,
		},
	}
	reply, err := batchutil.SendWrapped(coord, hb)
	if err != nil {
		return false, nil, err
	}
	return true, reply.(*proto.HeartbeatTxnResponse).Txn, nil
}
예제 #16
0
// TestTxnCoordIdempotentCleanup verifies that cleanupTxn is idempotent.
func TestTxnCoordIdempotentCleanup(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := createTestDB(t)
	defer s.Stop()

	txn := newTxn(s.Clock, proto.Key("a"))
	key := proto.Key("a")
	put := createPutRequest(key, []byte("value"), txn)
	if _, err := batchutil.SendWrapped(s.Sender, put); err != nil {
		t.Fatal(err)
	}
	s.Sender.cleanupTxn(nil, *txn) // first call
	if _, err := batchutil.SendWrapped(s.Sender, &proto.EndTransactionRequest{
		RequestHeader: proto.RequestHeader{
			Key:       txn.Key,
			Timestamp: txn.Timestamp,
			Txn:       txn,
		},
		Commit: true,
	}); /* second call */ err != nil {
		t.Fatal(err)
	}
}
예제 #17
0
// TestRetryOnNotLeaderError verifies that the DistSender correctly updates the
// leader cache and retries when receiving a NotLeaderError.
func TestRetryOnNotLeaderError(t *testing.T) {
	defer leaktest.AfterTest(t)
	g, s := makeTestGossip(t)
	defer s()
	leader := proto.Replica{
		NodeID:  99,
		StoreID: 999,
	}
	first := true

	var testFn rpcSendFn = func(_ rpc.Options, method string, addrs []net.Addr, getArgs func(addr net.Addr) gogoproto.Message, getReply func() gogoproto.Message, _ *rpc.Context) ([]gogoproto.Message, error) {
		if first {
			reply := getReply()
			reply.(proto.Response).Header().SetGoError(
				&proto.NotLeaderError{Leader: &leader, Replica: &proto.Replica{}})
			first = false
			return []gogoproto.Message{reply}, nil
		}
		return []gogoproto.Message{getReply()}, nil
	}

	ctx := &DistSenderContext{
		RPCSend: testFn,
		RangeDescriptorDB: mockRangeDescriptorDB(func(_ proto.Key, _ lookupOptions) ([]proto.RangeDescriptor, error) {
			return []proto.RangeDescriptor{testRangeDescriptor}, nil
		}),
	}
	ds := NewDistSender(ctx, g)
	put := proto.NewPut(proto.Key("a"), proto.Value{Bytes: []byte("value")})
	if _, err := batchutil.SendWrapped(ds, put); err != nil {
		t.Errorf("put encountered error: %s", err)
	}
	if first {
		t.Errorf("The command did not retry")
	}
	if cur := ds.leaderCache.Lookup(1); cur.StoreID != leader.StoreID {
		t.Errorf("leader cache was not updated: expected %v, got %v",
			&leader, cur)
	}
}
예제 #18
0
// TestTxnCoordSenderHeartbeat verifies periodic heartbeat of the
// transaction record.
func TestTxnCoordSenderHeartbeat(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := createTestDB(t)
	defer s.Stop()
	defer teardownHeartbeats(s.Sender)

	// Set heartbeat interval to 1ms for testing.
	s.Sender.heartbeatInterval = 1 * time.Millisecond

	initialTxn := newTxn(s.Clock, proto.Key("a"))
	put := createPutRequest(proto.Key("a"), []byte("value"), initialTxn)
	if reply, err := batchutil.SendWrapped(s.Sender, put); err != nil {
		t.Fatal(err)
	} else {
		*initialTxn = *reply.Header().Txn
	}

	// Verify 3 heartbeats.
	var heartbeatTS proto.Timestamp
	for i := 0; i < 3; i++ {
		if err := util.IsTrueWithin(func() bool {
			ok, txn, err := getTxn(s.Sender, initialTxn)
			if !ok || err != nil {
				return false
			}
			// Advance clock by 1ns.
			// Locking the TxnCoordSender to prevent a data race.
			s.Sender.Lock()
			s.Manual.Increment(1)
			s.Sender.Unlock()
			if heartbeatTS.Less(*txn.LastHeartbeat) {
				heartbeatTS = *txn.LastHeartbeat
				return true
			}
			return false
		}, 50*time.Millisecond); err != nil {
			t.Error("expected initial heartbeat within 50ms")
		}
	}
}
예제 #19
0
// TestTxnCoordSenderBeginTransactionMinPriority verifies that when starting
// a new transaction, a non-zero priority is treated as a minimum value.
func TestTxnCoordSenderBeginTransactionMinPriority(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := createTestDB(t)
	defer s.Stop()
	defer teardownHeartbeats(s.Sender)

	reply, err := batchutil.SendWrapped(s.Sender, &proto.PutRequest{
		RequestHeader: proto.RequestHeader{
			Key:          proto.Key("key"),
			UserPriority: gogoproto.Int32(-10), // negative user priority is translated into positive priority
			Txn: &proto.Transaction{
				Name:      "test txn",
				Isolation: proto.SNAPSHOT,
				Priority:  11,
			},
		},
	})
	if err != nil {
		t.Fatal(err)
	}
	if prio := reply.(*proto.PutResponse).Txn.Priority; prio != 11 {
		t.Errorf("expected txn priority 11; got %d", prio)
	}
}
예제 #20
0
// TestTxnDrainingNode tests that pending transactions tasks' intents are resolved
// if they commit while draining, and that a NodeUnavailableError is received
// when attempting to run a new transaction on a draining node.
func TestTxnDrainingNode(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := createTestDB(t)

	done := make(chan struct{})
	// Dummy task that keeps the node in draining state.
	if !s.Stopper.RunAsyncTask(func() {
		<-done
	}) {
		t.Fatal("stopper draining prematurely")
	}

	txn := newTxn(s.Clock, proto.Key("a"))
	key := proto.Key("a")
	beginTxn := func() {
		put := createPutRequest(key, []byte("value"), txn)
		if reply, err := batchutil.SendWrapped(s.Sender, put); err != nil {
			t.Fatal(err)
		} else {
			txn = reply.Header().Txn
		}
	}
	endTxn := func() {
		if _, err := batchutil.SendWrapped(s.Sender, &proto.EndTransactionRequest{
			RequestHeader: proto.RequestHeader{
				Timestamp: txn.Timestamp,
				Txn:       txn,
			},
			Commit: true}); err != nil {
			t.Fatal(err)
		}
	}

	beginTxn() // begin before draining
	go func() {
		s.Stopper.Stop()
	}()

	util.SucceedsWithin(t, time.Second, func() error {
		if s.Stopper.RunTask(func() {}) {
			return errors.New("stopper not yet draining")
		}
		return nil
	})
	endTxn()                               // commit after draining
	verifyCleanup(key, s.Sender, s.Eng, t) // make sure intent gets resolved

	// Attempt to start another transaction, but it should be too late.
	key = proto.Key("key")
	_, err := batchutil.SendWrapped(s.Sender, &proto.PutRequest{
		RequestHeader: proto.RequestHeader{
			Key: key,
			Txn: &proto.Transaction{
				Name: "test txn",
			},
		},
	})
	if _, ok := err.(*proto.NodeUnavailableError); !ok {
		teardownHeartbeats(s.Sender)
		t.Fatal(err)
	}
	close(done)
	<-s.Stopper.IsStopped()
}
예제 #21
0
// TestTxnCoordSenderTxnUpdatedOnError verifies that errors adjust the
// response transaction's timestamp and priority as appropriate.
func TestTxnCoordSenderTxnUpdatedOnError(t *testing.T) {
	defer leaktest.AfterTest(t)
	t.Skip("TODO(tschottdorf): fix up and re-enable. It depends on each logical clock tick, so not fun.")
	manual := hlc.NewManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	clock.SetMaxOffset(20)

	testCases := []struct {
		err       error
		expEpoch  int32
		expPri    int32
		expTS     proto.Timestamp
		expOrigTS proto.Timestamp
		nodeSeen  bool
	}{
		{nil, 0, 1, makeTS(0, 1), makeTS(0, 1), false},
		{&proto.ReadWithinUncertaintyIntervalError{
			ExistingTimestamp: makeTS(10, 10)}, 1, 1, makeTS(10, 11),
			makeTS(10, 11), true},
		{&proto.TransactionAbortedError{Txn: proto.Transaction{
			Timestamp: makeTS(20, 10), Priority: 10}}, 0, 10, makeTS(20, 10),
			makeTS(0, 1), false},
		{&proto.TransactionPushError{PusheeTxn: proto.Transaction{
			Timestamp: makeTS(10, 10), Priority: int32(10)}}, 1, 9,
			makeTS(10, 11), makeTS(10, 11), false},
		{&proto.TransactionRetryError{Txn: proto.Transaction{
			Timestamp: makeTS(10, 10), Priority: int32(10)}}, 1, 10,
			makeTS(10, 10), makeTS(10, 10), false},
	}

	var testPutReq = &proto.PutRequest{
		RequestHeader: proto.RequestHeader{
			Key:          proto.Key("test-key"),
			UserPriority: gogoproto.Int32(-1),
			Txn: &proto.Transaction{
				Name: "test txn",
			},
			Replica: proto.Replica{
				NodeID: 12345,
			},
		},
	}

	for i, test := range testCases {
		stopper := stop.NewStopper()
		ts := NewTxnCoordSender(senderFn(func(_ context.Context, _ proto.BatchRequest) (*proto.BatchResponse, *proto.Error) {
			return nil, proto.NewError(test.err)
		}), clock, false, nil, stopper)
		var reply *proto.PutResponse
		if r, err := batchutil.SendWrapped(ts, gogoproto.Clone(testPutReq).(proto.Request)); err != nil {
			t.Fatal(err)
		} else {
			reply = r.(*proto.PutResponse)
		}
		teardownHeartbeats(ts)
		stopper.Stop()

		if reflect.TypeOf(test.err) != reflect.TypeOf(reply.GoError()) {
			t.Fatalf("%d: expected %T; got %T: %v", i, test.err, reply.GoError(), reply.GoError())
		}
		if reply.Txn.Epoch != test.expEpoch {
			t.Errorf("%d: expected epoch = %d; got %d",
				i, test.expEpoch, reply.Txn.Epoch)
		}
		if reply.Txn.Priority != test.expPri {
			t.Errorf("%d: expected priority = %d; got %d",
				i, test.expPri, reply.Txn.Priority)
		}
		if !reply.Txn.Timestamp.Equal(test.expTS) {
			t.Errorf("%d: expected timestamp to be %s; got %s",
				i, test.expTS, reply.Txn.Timestamp)
		}
		if !reply.Txn.OrigTimestamp.Equal(test.expOrigTS) {
			t.Errorf("%d: expected orig timestamp to be %s + 1; got %s",
				i, test.expOrigTS, reply.Txn.OrigTimestamp)
		}
		if nodes := reply.Txn.CertainNodes.Nodes; (len(nodes) != 0) != test.nodeSeen {
			t.Errorf("%d: expected nodeSeen=%t, but list of hosts is %v",
				i, test.nodeSeen, nodes)
		}
	}
}
예제 #22
0
// TestTxnCoordSenderBatchTransaction tests that it is possible to send
// one-off transactional calls within a batch under certain circumstances.
func TestTxnCoordSenderBatchTransaction(t *testing.T) {
	defer leaktest.AfterTest(t)
	t.Skip("TODO(tschottdorf): remove this test; behavior is more transparent now")
	defer leaktest.AfterTest(t)
	stopper := stop.NewStopper()
	defer stopper.Stop()
	clock := hlc.NewClock(hlc.UnixNano)
	var called bool
	var alwaysError = errors.New("success")
	ts := NewTxnCoordSender(senderFn(func(_ context.Context, _ proto.BatchRequest) (*proto.BatchResponse, *proto.Error) {
		called = true
		// Returning this error is an easy way of preventing heartbeats
		// to be started for otherwise "successful" calls.
		return nil, proto.NewError(alwaysError)
	}), clock, false, nil, stopper)

	pushArg := &proto.PushTxnRequest{}
	putArg := &proto.PutRequest{}
	getArg := &proto.GetRequest{}
	testCases := []struct {
		req            proto.Request
		batch, arg, ok bool
	}{
		// Lays intents: can't have this on individual calls at all.
		{putArg, false, false, true},
		{putArg, true, false, true},
		{putArg, true, true, false},
		{putArg, false, true, false},

		// No intents: all ok, except when batch and arg have different txns.
		{pushArg, false, false, true},
		{pushArg, true, false, true},
		{pushArg, true, true, false},
		{pushArg, false, true, true},
		{getArg, false, false, true},
		{getArg, true, false, true},
		{getArg, true, true, false},
		{getArg, false, true, true},
	}

	txn1 := &proto.Transaction{ID: []byte("txn1")}
	txn2 := &proto.Transaction{ID: []byte("txn2")}

	for i, tc := range testCases {
		called = false
		tc.req.Reset()
		ba := &proto.BatchRequest{}

		if tc.arg {
			tc.req.Header().Txn = txn1
		}
		ba.Add(tc.req)
		if tc.batch {
			ba.Txn = txn2
		}
		called = false
		_, err := batchutil.SendWrapped(ts, ba)
		if !tc.ok && err == alwaysError {
			t.Fatalf("%d: expected error%s", i, err)
		} else if tc.ok != called {
			t.Fatalf("%d: wanted call: %t, got call: %t", i, tc.ok, called)
		}
	}
}
예제 #23
0
func TestEvictCacheOnError(t *testing.T) {
	defer leaktest.AfterTest(t)
	// if rpcError is true, the first attempt gets an RPC error, otherwise
	// the RPC call succeeds but there is an error in the RequestHeader.
	// Currently leader and cached range descriptor are treated equally.
	testCases := []struct{ rpcError, retryable, shouldClearLeader, shouldClearReplica bool }{
		{false, false, false, false}, // non-retryable replica error
		{false, true, false, false},  // retryable replica error
		{true, false, true, true},    // RPC error aka all nodes dead
		{true, true, false, false},   // retryable RPC error
	}

	for i, tc := range testCases {
		g, s := makeTestGossip(t)
		defer s()
		leader := proto.Replica{
			NodeID:  99,
			StoreID: 999,
		}
		first := true

		var testFn rpcSendFn = func(_ rpc.Options, _ string, _ []net.Addr, _ func(addr net.Addr) gogoproto.Message, getReply func() gogoproto.Message, _ *rpc.Context) ([]gogoproto.Message, error) {
			if !first {
				return []gogoproto.Message{getReply()}, nil
			}
			first = false
			if tc.rpcError {
				return nil, rpc.NewSendError("boom", tc.retryable)
			}
			var err error
			if tc.retryable {
				err = &proto.RangeKeyMismatchError{}
			} else {
				err = errors.New("boom")
			}
			reply := getReply()
			reply.(proto.Response).Header().SetGoError(err)
			return []gogoproto.Message{reply}, nil
		}

		ctx := &DistSenderContext{
			RPCSend: testFn,
			RangeDescriptorDB: mockRangeDescriptorDB(func(_ proto.Key, _ lookupOptions) ([]proto.RangeDescriptor, error) {
				return []proto.RangeDescriptor{testRangeDescriptor}, nil
			}),
		}
		ds := NewDistSender(ctx, g)
		ds.updateLeaderCache(1, leader)

		put := proto.NewPut(proto.Key("a"), proto.Value{Bytes: []byte("value")}).(*proto.PutRequest)

		if _, err := batchutil.SendWrapped(ds, put); err != nil && !testutils.IsError(err, "boom") {
			t.Errorf("put encountered unexpected error: %s", err)
		}
		if cur := ds.leaderCache.Lookup(1); reflect.DeepEqual(cur, &proto.Replica{}) && !tc.shouldClearLeader {
			t.Errorf("%d: leader cache eviction: shouldClearLeader=%t, but value is %v", i, tc.shouldClearLeader, cur)
		}
		_, cachedDesc := ds.rangeCache.getCachedRangeDescriptor(put.Key, false /* !inclusive */)
		if cachedDesc == nil != tc.shouldClearReplica {
			t.Errorf("%d: unexpected second replica lookup behaviour: wanted=%t", i, tc.shouldClearReplica)
		}
	}
}
예제 #24
0
// TestSendRPCOrder verifies that sendRPC correctly takes into account the
// leader, attributes and required consistency to determine where to send
// remote requests.
func TestSendRPCOrder(t *testing.T) {
	defer leaktest.AfterTest(t)
	g, s := makeTestGossip(t)
	defer s()
	rangeID := proto.RangeID(99)

	nodeAttrs := map[int32][]string{
		1: {}, // The local node, set in each test case.
		2: {"us", "west", "gpu"},
		3: {"eu", "dublin", "pdu2", "gpu"},
		4: {"us", "east", "gpu"},
		5: {"us", "east", "gpu", "flaky"},
	}

	// Gets filled below to identify the replica by its address.
	addrToNode := make(map[string]int32)
	makeVerifier := func(expOrder rpc.OrderingPolicy,
		expAddrs []int32) func(rpc.Options, []net.Addr) error {
		return func(o rpc.Options, addrs []net.Addr) error {
			if o.Ordering != expOrder {
				return util.Errorf("unexpected ordering, wanted %v, got %v",
					expOrder, o.Ordering)
			}
			var actualAddrs []int32
			for i, a := range addrs {
				if len(expAddrs) <= i {
					return util.Errorf("got unexpected address: %s", a)
				}
				if expAddrs[i] == 0 {
					actualAddrs = append(actualAddrs, 0)
				} else {
					actualAddrs = append(actualAddrs, addrToNode[a.String()])
				}
			}
			if !reflect.DeepEqual(expAddrs, actualAddrs) {
				return util.Errorf("expected %d, but found %d", expAddrs, actualAddrs)
			}
			return nil
		}
	}

	testCases := []struct {
		args       proto.Request
		attrs      []string
		order      rpc.OrderingPolicy
		expReplica []int32
		leader     int32 // 0 for not caching a leader.
		// Naming is somewhat off, as eventually consistent reads usually
		// do not have to go to the leader when a node has a read lease.
		// Would really want CONSENSUS here, but that is not implemented.
		// Likely a test setup here will never have a read lease, but good
		// to keep in mind.
		consistent bool
	}{
		// Inconsistent Scan without matching attributes.
		{
			args:       &proto.ScanRequest{},
			attrs:      []string{},
			order:      rpc.OrderRandom,
			expReplica: []int32{1, 2, 3, 4, 5},
		},
		// Inconsistent Scan with matching attributes.
		// Should move the two nodes matching the attributes to the front and
		// go stable.
		{
			args:  &proto.ScanRequest{},
			attrs: nodeAttrs[5],
			order: rpc.OrderStable,
			// Compare only the first two resulting addresses.
			expReplica: []int32{5, 4, 0, 0, 0},
		},

		// Scan without matching attributes that requires but does not find
		// a leader.
		{
			args:       &proto.ScanRequest{},
			attrs:      []string{},
			order:      rpc.OrderRandom,
			expReplica: []int32{1, 2, 3, 4, 5},
			consistent: true,
		},
		// Put without matching attributes that requires but does not find leader.
		// Should go random and not change anything.
		{
			args:       &proto.PutRequest{},
			attrs:      []string{"nomatch"},
			order:      rpc.OrderRandom,
			expReplica: []int32{1, 2, 3, 4, 5},
		},
		// Put with matching attributes but no leader.
		// Should move the two nodes matching the attributes to the front and
		// go stable.
		{
			args:  &proto.PutRequest{},
			attrs: append(nodeAttrs[5], "irrelevant"),
			// Compare only the first two resulting addresses.
			order:      rpc.OrderStable,
			expReplica: []int32{5, 4, 0, 0, 0},
		},
		// Put with matching attributes that finds the leader (node 3).
		// Should address the leader and the two nodes matching the attributes
		// (the last and second to last) in that order.
		{
			args:  &proto.PutRequest{},
			attrs: append(nodeAttrs[5], "irrelevant"),
			// Compare only the first resulting addresses as we have a leader
			// and that means we're only trying to send there.
			order:      rpc.OrderStable,
			expReplica: []int32{2, 5, 4, 0, 0},
			leader:     2,
		},
		// Inconsistent Get without matching attributes but leader (node 3). Should just
		// go random as the leader does not matter.
		{
			args:       &proto.GetRequest{},
			attrs:      []string{},
			order:      rpc.OrderRandom,
			expReplica: []int32{1, 2, 3, 4, 5},
			leader:     2,
		},
	}

	descriptor := proto.RangeDescriptor{
		StartKey: proto.KeyMin,
		EndKey:   proto.KeyMax,
		RangeID:  rangeID,
		Replicas: nil,
	}

	// Stub to be changed in each test case.
	var verifyCall func(rpc.Options, []net.Addr) error

	var testFn rpcSendFn = func(opts rpc.Options, method string,
		addrs []net.Addr, _ func(addr net.Addr) gogoproto.Message,
		getReply func() gogoproto.Message, _ *rpc.Context) ([]gogoproto.Message, error) {
		if err := verifyCall(opts, addrs); err != nil {
			return nil, err
		}
		return []gogoproto.Message{getReply()}, nil
	}

	ctx := &DistSenderContext{
		RPCSend: testFn,
		RangeDescriptorDB: mockRangeDescriptorDB(func(proto.Key, lookupOptions) ([]proto.RangeDescriptor, error) {
			return []proto.RangeDescriptor{descriptor}, nil
		}),
	}

	ds := NewDistSender(ctx, g)

	for n, tc := range testCases {
		verifyCall = makeVerifier(tc.order, tc.expReplica)
		descriptor.Replicas = nil // could do this once above, but more convenient here
		for i := int32(1); i <= 5; i++ {
			addr := util.MakeUnresolvedAddr("tcp", fmt.Sprintf("node%d", i))
			addrToNode[addr.String()] = i
			nd := &proto.NodeDescriptor{
				NodeID:  proto.NodeID(i),
				Address: util.MakeUnresolvedAddr(addr.Network(), addr.String()),
				Attrs: proto.Attributes{
					Attrs: nodeAttrs[i],
				},
			}
			if err := g.AddInfoProto(gossip.MakeNodeIDKey(proto.NodeID(i)), nd, time.Hour); err != nil {
				t.Fatal(err)
			}
			descriptor.Replicas = append(descriptor.Replicas, proto.Replica{
				NodeID:  proto.NodeID(i),
				StoreID: proto.StoreID(i),
			})
		}

		{
			// The local node needs to get its attributes during sendRPC.
			nd := &proto.NodeDescriptor{
				NodeID: 6,
				Attrs: proto.Attributes{
					Attrs: tc.attrs,
				},
			}
			if err := g.SetNodeDescriptor(nd); err != nil {
				t.Fatal(err)
			}
		}

		ds.leaderCache.Update(proto.RangeID(rangeID), proto.Replica{})
		if tc.leader > 0 {
			ds.leaderCache.Update(proto.RangeID(rangeID), descriptor.Replicas[tc.leader-1])
		}

		args := tc.args
		args.Header().RangeID = rangeID // Not used in this test, but why not.
		args.Header().Key = proto.Key("a")
		if proto.IsRange(args) {
			args.Header().EndKey = proto.Key("b")
		}
		if !tc.consistent {
			args.Header().ReadConsistency = proto.INCONSISTENT
		}
		// Kill the cached NodeDescriptor, enforcing a lookup from Gossip.
		ds.nodeDescriptor = nil
		if _, err := batchutil.SendWrapped(ds, args); err != nil {
			t.Errorf("%d: %s", n, err)
		}
	}
}
예제 #25
0
// TestMultiRangeMergeStaleDescriptor simulates the situation in which the
// DistSender executes a multi-range scan which encounters the stale descriptor
// of a range which has since incorporated its right neighbor by means of a
// merge. It is verified that the DistSender scans the correct keyrange exactly
// once.
func TestMultiRangeMergeStaleDescriptor(t *testing.T) {
	defer leaktest.AfterTest(t)
	g, s := makeTestGossip(t)
	defer s()
	// Assume we have two ranges, [a-b) and [b-KeyMax).
	merged := false
	// The stale first range descriptor which is unaware of the merge.
	var firstRange = proto.RangeDescriptor{
		RangeID:  1,
		StartKey: proto.Key("a"),
		EndKey:   proto.Key("b"),
		Replicas: []proto.Replica{
			{
				NodeID:  1,
				StoreID: 1,
			},
		},
	}
	// The merged descriptor, which will be looked up after having processed
	// the stale range [a,b).
	var mergedRange = proto.RangeDescriptor{
		RangeID:  1,
		StartKey: proto.Key("a"),
		EndKey:   proto.KeyMax,
		Replicas: []proto.Replica{
			{
				NodeID:  1,
				StoreID: 1,
			},
		},
	}
	// Assume we have two key-value pairs, a=1 and c=2.
	existingKVs := []proto.KeyValue{
		{Key: proto.Key("a"), Value: proto.Value{Bytes: []byte("1")}},
		{Key: proto.Key("c"), Value: proto.Value{Bytes: []byte("2")}},
	}
	var testFn rpcSendFn = func(_ rpc.Options, method string, addrs []net.Addr, getArgs func(addr net.Addr) gogoproto.Message, getReply func() gogoproto.Message, _ *rpc.Context) ([]gogoproto.Message, error) {
		if method != "Node.Batch" {
			t.Fatalf("unexpected method:%s", method)
		}
		header := getArgs(testAddress).(proto.Request).Header()
		batchReply := getReply().(*proto.BatchResponse)
		reply := &proto.ScanResponse{}
		batchReply.Add(reply)
		results := []proto.KeyValue{}
		for _, curKV := range existingKVs {
			if header.Key.Less(curKV.Key.Next()) && curKV.Key.Less(header.EndKey) {
				results = append(results, curKV)
			}
		}
		reply.Rows = results
		return []gogoproto.Message{batchReply}, nil
	}
	ctx := &DistSenderContext{
		RPCSend: testFn,
		RangeDescriptorDB: mockRangeDescriptorDB(func(key proto.Key, _ lookupOptions) ([]proto.RangeDescriptor, error) {
			if !merged {
				// Assume a range merge operation happened.
				merged = true
				return []proto.RangeDescriptor{firstRange}, nil
			}
			return []proto.RangeDescriptor{mergedRange}, nil
		}),
	}
	ds := NewDistSender(ctx, g)
	scan := proto.NewScan(proto.Key("a"), proto.Key("d"), 10).(*proto.ScanRequest)
	// Set the Txn info to avoid an OpRequiresTxnError.
	scan.Txn = &proto.Transaction{}
	reply, err := batchutil.SendWrapped(ds, scan)
	if err != nil {
		t.Fatalf("scan encountered error: %s", err)
	}
	sr := reply.(*proto.ScanResponse)
	if !reflect.DeepEqual(existingKVs, sr.Rows) {
		t.Fatalf("expect get %v, actual get %v", existingKVs, sr.Rows)
	}
}
예제 #26
0
// TestMultiRangeScanDeleteRange tests that commands which access multiple
// ranges are carried out properly.
func TestMultiRangeScanDeleteRange(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := StartTestServer(t)
	defer s.Stop()
	ds := kv.NewDistSender(&kv.DistSenderContext{Clock: s.Clock()}, s.Gossip())
	tds := kv.NewTxnCoordSender(ds, s.Clock(), testContext.Linearizable, nil, s.stopper)

	if err := s.node.ctx.DB.AdminSplit("m"); err != nil {
		t.Fatal(err)
	}
	writes := []proto.Key{proto.Key("a"), proto.Key("z")}
	get := &proto.GetRequest{
		RequestHeader: proto.RequestHeader{Key: writes[0]},
	}
	get.EndKey = writes[len(writes)-1]
	if _, err := batchutil.SendWrapped(tds, get); err == nil {
		t.Errorf("able to call Get with a key range: %v", get)
	}
	var delTS proto.Timestamp
	for i, k := range writes {
		put := proto.NewPut(k, proto.Value{Bytes: k})
		reply, err := batchutil.SendWrapped(tds, put)
		if err != nil {
			t.Fatal(err)
		}
		scan := proto.NewScan(writes[0], writes[len(writes)-1].Next(), 0).(*proto.ScanRequest)
		// The Put ts may have been pushed by tsCache,
		// so make sure we see their values in our Scan.
		delTS = reply.(*proto.PutResponse).Timestamp
		scan.Timestamp = delTS
		reply, err = batchutil.SendWrapped(tds, scan)
		if err != nil {
			t.Fatal(err)
		}
		sr := reply.(*proto.ScanResponse)
		if sr.Txn != nil {
			// This was the other way around at some point in the past.
			// Same below for Delete, etc.
			t.Errorf("expected no transaction in response header")
		}
		if rows := sr.Rows; len(rows) != i+1 {
			t.Fatalf("expected %d rows, but got %d", i+1, len(rows))
		}
	}

	del := &proto.DeleteRangeRequest{
		RequestHeader: proto.RequestHeader{
			Key:       writes[0],
			EndKey:    proto.Key(writes[len(writes)-1]).Next(),
			Timestamp: delTS,
		},
	}
	reply, err := batchutil.SendWrapped(tds, del)
	if err != nil {
		t.Fatal(err)
	}
	dr := reply.(*proto.DeleteRangeResponse)
	if dr.Txn != nil {
		t.Errorf("expected no transaction in response header")
	}
	if n := dr.NumDeleted; n != int64(len(writes)) {
		t.Errorf("expected %d keys to be deleted, but got %d instead",
			len(writes), n)
	}

	scan := proto.NewScan(writes[0], writes[len(writes)-1].Next(), 0).(*proto.ScanRequest)
	scan.Timestamp = dr.Timestamp
	scan.Txn = &proto.Transaction{Name: "MyTxn"}
	reply, err = batchutil.SendWrapped(tds, scan)
	if err != nil {
		t.Fatal(err)
	}
	sr := reply.(*proto.ScanResponse)
	if txn := sr.Txn; txn == nil || txn.Name != "MyTxn" {
		t.Errorf("wanted Txn to persist, but it changed to %v", txn)
	}
	if rows := sr.Rows; len(rows) > 0 {
		t.Fatalf("scan after delete returned rows: %v", rows)
	}
}