Пример #1
0
// TestTxnMultipleCoord checks that a coordinator uses the Writing flag to
// enforce that only one coordinator can be used for transactional writes.
func TestTxnMultipleCoord(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := createTestDB(t)
	defer s.Stop()

	for i, tc := range []struct {
		args    proto.Request
		writing bool
		ok      bool
	}{
		{proto.NewGet(proto.Key("a")), true, true},
		{proto.NewGet(proto.Key("a")), false, true},
		{proto.NewPut(proto.Key("a"), proto.Value{}), false, true},
		{proto.NewPut(proto.Key("a"), proto.Value{}), true, false},
	} {
		{
			txn := newTxn(s.Clock, proto.Key("a"))
			txn.Writing = tc.writing
			tc.args.Header().Txn = txn
		}
		reply, err := batchutil.SendWrapped(s.Sender, tc.args)
		if err == nil != tc.ok {
			t.Errorf("%d: %T (writing=%t): success_expected=%t, but got: %v",
				i, tc.args, tc.writing, tc.ok, err)
		}
		if err != nil {
			continue
		}

		txn := reply.Header().Txn
		// The transaction should come back rw if it started rw or if we just
		// wrote.
		isWrite := proto.IsTransactionWrite(tc.args)
		if (tc.writing || isWrite) != txn.Writing {
			t.Errorf("%d: unexpected writing state: %s", i, txn)
		}
		if !isWrite {
			continue
		}
		// Abort for clean shutdown.
		if _, err := batchutil.SendWrapped(s.Sender, &proto.EndTransactionRequest{
			RequestHeader: proto.RequestHeader{
				Key:       txn.Key,
				Timestamp: txn.Timestamp,
				Txn:       txn,
			},
			Commit: false,
		}); err != nil {
			t.Fatal(err)
		}
	}
}
Пример #2
0
// TestMultiRangeScanWithMaxResults tests that commands which access multiple
// ranges with MaxResults parameter are carried out properly.
func TestMultiRangeScanWithMaxResults(t *testing.T) {
	defer leaktest.AfterTest(t)
	testCases := []struct {
		splitKeys []proto.Key
		keys      []proto.Key
	}{
		{[]proto.Key{proto.Key("m")},
			[]proto.Key{proto.Key("a"), proto.Key("z")}},
		{[]proto.Key{proto.Key("h"), proto.Key("q")},
			[]proto.Key{proto.Key("b"), proto.Key("f"), proto.Key("k"),
				proto.Key("r"), proto.Key("w"), proto.Key("y")}},
	}

	for i, tc := range testCases {
		s := StartTestServer(t)
		ds := kv.NewDistSender(&kv.DistSenderContext{Clock: s.Clock()}, s.Gossip())
		tds := kv.NewTxnCoordSender(ds, s.Clock(), testContext.Linearizable, nil, s.stopper)

		for _, sk := range tc.splitKeys {
			if err := s.node.ctx.DB.AdminSplit(sk); err != nil {
				t.Fatal(err)
			}
		}

		var reply proto.Response
		for _, k := range tc.keys {
			put := proto.NewPut(k, proto.Value{Bytes: k})
			var err error
			reply, err = batchutil.SendWrapped(tds, put)
			if err != nil {
				t.Fatal(err)
			}
		}

		// Try every possible ScanRequest startKey.
		for start := 0; start < len(tc.keys); start++ {
			// Try every possible maxResults, from 1 to beyond the size of key array.
			for maxResults := 1; maxResults <= len(tc.keys)-start+1; maxResults++ {
				scan := proto.NewScan(tc.keys[start], tc.keys[len(tc.keys)-1].Next(),
					int64(maxResults))
				scan.Header().Timestamp = reply.Header().Timestamp
				reply, err := batchutil.SendWrapped(tds, scan)
				if err != nil {
					t.Fatal(err)
				}
				rows := reply.(*proto.ScanResponse).Rows
				if start+maxResults <= len(tc.keys) && len(rows) != maxResults {
					t.Errorf("%d: start=%s: expected %d rows, but got %d", i, tc.keys[start], maxResults, len(rows))
				} else if start+maxResults == len(tc.keys)+1 && len(rows) != maxResults-1 {
					t.Errorf("%d: expected %d rows, but got %d", i, maxResults-1, len(rows))
				}
			}
		}
		defer s.Stop()
	}
}
Пример #3
0
// Put sets the value for a key.
//
// A new result will be appended to the batch which will contain a single row
// and Result.Err will indicate success or failure.
//
// key can be either a byte slice or a string. value can be any key type, a
// proto.Message or any Go primitive type (bool, int, etc).
func (b *Batch) Put(key, value interface{}) {
	k, err := marshalKey(key)
	if err != nil {
		b.initResult(0, 1, err)
		return
	}
	v, err := marshalValue(value)
	if err != nil {
		b.initResult(0, 1, err)
		return
	}
	b.reqs = append(b.reqs, proto.NewPut(k, v))
	b.initResult(1, 1, nil)
}
Пример #4
0
// TestRetryOnDescriptorLookupError verifies that the DistSender retries a descriptor
// lookup on retryable errors.
func TestRetryOnDescriptorLookupError(t *testing.T) {
	defer leaktest.AfterTest(t)
	g, s := makeTestGossip(t)
	defer s()

	var testFn rpcSendFn = func(_ rpc.Options, _ string, _ []net.Addr, _ func(addr net.Addr) gogoproto.Message, getReply func() gogoproto.Message, _ *rpc.Context) ([]gogoproto.Message, error) {
		return []gogoproto.Message{getReply()}, nil
	}

	errors := []error{
		errors.New("fatal boom"),
		&proto.RangeKeyMismatchError{}, // retryable
		nil,
	}

	ctx := &DistSenderContext{
		RPCSend: testFn,
		RangeDescriptorDB: mockRangeDescriptorDB(func(k proto.Key, _ lookupOptions) ([]proto.RangeDescriptor, error) {
			// Return next error and truncate the prefix of the errors array.
			var err error
			if k != nil {
				err = errors[0]
				errors = errors[1:]
			}
			return []proto.RangeDescriptor{testRangeDescriptor}, err
		}),
	}
	ds := NewDistSender(ctx, g)
	put := proto.NewPut(proto.Key("a"), proto.Value{Bytes: []byte("value")})
	// Fatal error on descriptor lookup, propagated to reply.
	if _, err := batchutil.SendWrapped(ds, put); err.Error() != "fatal boom" {
		t.Errorf("unexpected error: %s", err)
	}
	// Retryable error on descriptor lookup, second attempt successful.
	if _, err := batchutil.SendWrapped(ds, put); err != nil {
		t.Errorf("unexpected error: %s", err)
	}
	if len(errors) != 0 {
		t.Fatalf("expected more descriptor lookups, leftover errors: %+v", errors)
	}
}
Пример #5
0
// TestRetryOnNotLeaderError verifies that the DistSender correctly updates the
// leader cache and retries when receiving a NotLeaderError.
func TestRetryOnNotLeaderError(t *testing.T) {
	defer leaktest.AfterTest(t)
	g, s := makeTestGossip(t)
	defer s()
	leader := proto.Replica{
		NodeID:  99,
		StoreID: 999,
	}
	first := true

	var testFn rpcSendFn = func(_ rpc.Options, method string, addrs []net.Addr, getArgs func(addr net.Addr) gogoproto.Message, getReply func() gogoproto.Message, _ *rpc.Context) ([]gogoproto.Message, error) {
		if first {
			reply := getReply()
			reply.(proto.Response).Header().SetGoError(
				&proto.NotLeaderError{Leader: &leader, Replica: &proto.Replica{}})
			first = false
			return []gogoproto.Message{reply}, nil
		}
		return []gogoproto.Message{getReply()}, nil
	}

	ctx := &DistSenderContext{
		RPCSend: testFn,
		RangeDescriptorDB: mockRangeDescriptorDB(func(_ proto.Key, _ lookupOptions) ([]proto.RangeDescriptor, error) {
			return []proto.RangeDescriptor{testRangeDescriptor}, nil
		}),
	}
	ds := NewDistSender(ctx, g)
	put := proto.NewPut(proto.Key("a"), proto.Value{Bytes: []byte("value")})
	if _, err := batchutil.SendWrapped(ds, put); err != nil {
		t.Errorf("put encountered error: %s", err)
	}
	if first {
		t.Errorf("The command did not retry")
	}
	if cur := ds.leaderCache.Lookup(1); cur.StoreID != leader.StoreID {
		t.Errorf("leader cache was not updated: expected %v, got %v",
			&leader, cur)
	}
}
Пример #6
0
func TestNodeEventFeed(t *testing.T) {
	defer leaktest.AfterTest(t)

	nodeDesc := proto.NodeDescriptor{
		NodeID: proto.NodeID(99),
	}

	// A testCase corresponds to a single Store event type. Each case contains a
	// method which publishes a single event to the given storeEventPublisher,
	// and an expected result interface which should match the produced
	// event.
	testCases := []struct {
		publishTo func(status.NodeEventFeed)
		expected  interface{}
	}{
		{
			publishTo: func(nef status.NodeEventFeed) {
				nef.StartNode(nodeDesc, 100)
			},
			expected: &status.StartNodeEvent{
				Desc:      nodeDesc,
				StartedAt: 100,
			},
		},
		{
			publishTo: func(nef status.NodeEventFeed) {
				nef.CallComplete(wrap(proto.NewGet(proto.Key("abc"))), nil)
			},
			expected: &status.CallSuccessEvent{
				NodeID: proto.NodeID(1),
				Method: proto.Get,
			},
		},
		{
			publishTo: func(nef status.NodeEventFeed) {
				nef.CallComplete(wrap(proto.NewPut(proto.Key("abc"), proto.Value{Bytes: []byte("def")})), nil)
			},
			expected: &status.CallSuccessEvent{
				NodeID: proto.NodeID(1),
				Method: proto.Put,
			},
		},
		{
			publishTo: func(nef status.NodeEventFeed) {
				nef.CallComplete(wrap(proto.NewGet(proto.Key("abc"))), proto.NewError(util.Errorf("error")))
			},
			expected: &status.CallErrorEvent{
				NodeID: proto.NodeID(1),
				Method: proto.Batch,
			},
		},
		{
			publishTo: func(nef status.NodeEventFeed) {
				nef.CallComplete(wrap(proto.NewGet(proto.Key("abc"))), &proto.Error{
					Index:   &proto.ErrPosition{Index: 0},
					Message: "boo",
				})
			},
			expected: &status.CallErrorEvent{
				NodeID: proto.NodeID(1),
				Method: proto.Get,
			},
		},
	}

	// Compile expected events into a single slice.
	expectedEvents := make([]interface{}, len(testCases))
	for i := range testCases {
		expectedEvents[i] = testCases[i].expected
	}

	events := make([]interface{}, 0, len(expectedEvents))

	// Run test cases directly through a feed.
	stopper := stop.NewStopper()
	defer stopper.Stop()
	feed := util.NewFeed(stopper)
	feed.Subscribe(func(event interface{}) {
		events = append(events, event)
	})

	nodefeed := status.NewNodeEventFeed(proto.NodeID(1), feed)
	for _, tc := range testCases {
		tc.publishTo(nodefeed)
	}

	feed.Flush()

	if a, e := events, expectedEvents; !reflect.DeepEqual(a, e) {
		t.Errorf("received incorrect events.\nexpected: %v\nactual: %v", e, a)
	}
}
Пример #7
0
func TestEvictCacheOnError(t *testing.T) {
	defer leaktest.AfterTest(t)
	// if rpcError is true, the first attempt gets an RPC error, otherwise
	// the RPC call succeeds but there is an error in the RequestHeader.
	// Currently leader and cached range descriptor are treated equally.
	testCases := []struct{ rpcError, retryable, shouldClearLeader, shouldClearReplica bool }{
		{false, false, false, false}, // non-retryable replica error
		{false, true, false, false},  // retryable replica error
		{true, false, true, true},    // RPC error aka all nodes dead
		{true, true, false, false},   // retryable RPC error
	}

	for i, tc := range testCases {
		g, s := makeTestGossip(t)
		defer s()
		leader := proto.Replica{
			NodeID:  99,
			StoreID: 999,
		}
		first := true

		var testFn rpcSendFn = func(_ rpc.Options, _ string, _ []net.Addr, _ func(addr net.Addr) gogoproto.Message, getReply func() gogoproto.Message, _ *rpc.Context) ([]gogoproto.Message, error) {
			if !first {
				return []gogoproto.Message{getReply()}, nil
			}
			first = false
			if tc.rpcError {
				return nil, rpc.NewSendError("boom", tc.retryable)
			}
			var err error
			if tc.retryable {
				err = &proto.RangeKeyMismatchError{}
			} else {
				err = errors.New("boom")
			}
			reply := getReply()
			reply.(proto.Response).Header().SetGoError(err)
			return []gogoproto.Message{reply}, nil
		}

		ctx := &DistSenderContext{
			RPCSend: testFn,
			RangeDescriptorDB: mockRangeDescriptorDB(func(_ proto.Key, _ lookupOptions) ([]proto.RangeDescriptor, error) {
				return []proto.RangeDescriptor{testRangeDescriptor}, nil
			}),
		}
		ds := NewDistSender(ctx, g)
		ds.updateLeaderCache(1, leader)

		put := proto.NewPut(proto.Key("a"), proto.Value{Bytes: []byte("value")}).(*proto.PutRequest)

		if _, err := batchutil.SendWrapped(ds, put); err != nil && !testutils.IsError(err, "boom") {
			t.Errorf("put encountered unexpected error: %s", err)
		}
		if cur := ds.leaderCache.Lookup(1); reflect.DeepEqual(cur, &proto.Replica{}) && !tc.shouldClearLeader {
			t.Errorf("%d: leader cache eviction: shouldClearLeader=%t, but value is %v", i, tc.shouldClearLeader, cur)
		}
		_, cachedDesc := ds.rangeCache.getCachedRangeDescriptor(put.Key, false /* !inclusive */)
		if cachedDesc == nil != tc.shouldClearReplica {
			t.Errorf("%d: unexpected second replica lookup behaviour: wanted=%t", i, tc.shouldClearReplica)
		}
	}
}
Пример #8
0
// TestMultiRangeScanDeleteRange tests that commands which access multiple
// ranges are carried out properly.
func TestMultiRangeScanDeleteRange(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := StartTestServer(t)
	defer s.Stop()
	ds := kv.NewDistSender(&kv.DistSenderContext{Clock: s.Clock()}, s.Gossip())
	tds := kv.NewTxnCoordSender(ds, s.Clock(), testContext.Linearizable, nil, s.stopper)

	if err := s.node.ctx.DB.AdminSplit("m"); err != nil {
		t.Fatal(err)
	}
	writes := []proto.Key{proto.Key("a"), proto.Key("z")}
	get := &proto.GetRequest{
		RequestHeader: proto.RequestHeader{Key: writes[0]},
	}
	get.EndKey = writes[len(writes)-1]
	if _, err := batchutil.SendWrapped(tds, get); err == nil {
		t.Errorf("able to call Get with a key range: %v", get)
	}
	var delTS proto.Timestamp
	for i, k := range writes {
		put := proto.NewPut(k, proto.Value{Bytes: k})
		reply, err := batchutil.SendWrapped(tds, put)
		if err != nil {
			t.Fatal(err)
		}
		scan := proto.NewScan(writes[0], writes[len(writes)-1].Next(), 0).(*proto.ScanRequest)
		// The Put ts may have been pushed by tsCache,
		// so make sure we see their values in our Scan.
		delTS = reply.(*proto.PutResponse).Timestamp
		scan.Timestamp = delTS
		reply, err = batchutil.SendWrapped(tds, scan)
		if err != nil {
			t.Fatal(err)
		}
		sr := reply.(*proto.ScanResponse)
		if sr.Txn != nil {
			// This was the other way around at some point in the past.
			// Same below for Delete, etc.
			t.Errorf("expected no transaction in response header")
		}
		if rows := sr.Rows; len(rows) != i+1 {
			t.Fatalf("expected %d rows, but got %d", i+1, len(rows))
		}
	}

	del := &proto.DeleteRangeRequest{
		RequestHeader: proto.RequestHeader{
			Key:       writes[0],
			EndKey:    proto.Key(writes[len(writes)-1]).Next(),
			Timestamp: delTS,
		},
	}
	reply, err := batchutil.SendWrapped(tds, del)
	if err != nil {
		t.Fatal(err)
	}
	dr := reply.(*proto.DeleteRangeResponse)
	if dr.Txn != nil {
		t.Errorf("expected no transaction in response header")
	}
	if n := dr.NumDeleted; n != int64(len(writes)) {
		t.Errorf("expected %d keys to be deleted, but got %d instead",
			len(writes), n)
	}

	scan := proto.NewScan(writes[0], writes[len(writes)-1].Next(), 0).(*proto.ScanRequest)
	scan.Timestamp = dr.Timestamp
	scan.Txn = &proto.Transaction{Name: "MyTxn"}
	reply, err = batchutil.SendWrapped(tds, scan)
	if err != nil {
		t.Fatal(err)
	}
	sr := reply.(*proto.ScanResponse)
	if txn := sr.Txn; txn == nil || txn.Name != "MyTxn" {
		t.Errorf("wanted Txn to persist, but it changed to %v", txn)
	}
	if rows := sr.Rows; len(rows) > 0 {
		t.Fatalf("scan after delete returned rows: %v", rows)
	}
}