Exemplo n.º 1
0
// TestTxnMultipleCoord checks that a coordinator uses the Writing flag to
// enforce that only one coordinator can be used for transactional writes.
func TestTxnMultipleCoord(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := createTestDB(t)
	defer s.Stop()

	for i, tc := range []struct {
		call    proto.Call
		writing bool
		ok      bool
	}{
		{proto.GetCall(proto.Key("a")), true, true},
		{proto.GetCall(proto.Key("a")), false, true},
		{proto.PutCall(proto.Key("a"), proto.Value{}), false, true},
		{proto.PutCall(proto.Key("a"), proto.Value{}), true, false},
	} {
		{
			txn := newTxn(s.Clock, proto.Key("a"))
			txn.Writing = tc.writing
			tc.call.Args.Header().Txn = txn
		}
		err := sendCall(s.Sender, tc.call)
		if err == nil != tc.ok {
			t.Errorf("%d: %T (writing=%t): success_expected=%t, but got: %v",
				i, tc.call.Args, tc.writing, tc.ok, err)
		}
		if err != nil {
			continue
		}

		txn := tc.call.Reply.Header().Txn
		// The transaction should come back rw if it started rw or if we just
		// wrote.
		isWrite := proto.IsTransactionWrite(tc.call.Args)
		if (tc.writing || isWrite) != txn.Writing {
			t.Errorf("%d: unexpected writing state: %s", i, txn)
		}
		if !isWrite {
			continue
		}
		// Abort for clean shutdown.
		etReply := &proto.EndTransactionResponse{}
		if err := sendCall(s.Sender, proto.Call{
			Args: &proto.EndTransactionRequest{
				RequestHeader: proto.RequestHeader{
					Key:       txn.Key,
					Timestamp: txn.Timestamp,
					Txn:       txn,
				},
				Commit: false,
			},
			Reply: etReply,
		}); err != nil {
			log.Warning(err)
			t.Fatal(err)
		}
	}
}
Exemplo n.º 2
0
func TestEvictCacheOnError(t *testing.T) {
	defer leaktest.AfterTest(t)
	// if rpcError is true, the first attempt gets an RPC error, otherwise
	// the RPC call succeeds but there is an error in the RequestHeader.
	// Currently leader and cached range descriptor are treated equally.
	testCases := []struct{ rpcError, retryable, shouldClearLeader, shouldClearReplica bool }{
		{false, false, false, false}, // non-retryable replica error
		{false, true, false, false},  // retryable replica error
		{true, false, true, true},    // RPC error aka all nodes dead
		{true, true, false, false},   // retryable RPC error
	}

	for i, tc := range testCases {
		g, s := makeTestGossip(t)
		defer s()
		leader := proto.Replica{
			NodeID:  99,
			StoreID: 999,
		}
		first := true

		var testFn rpcSendFn = func(_ rpc.Options, _ string, _ []net.Addr, _ func(addr net.Addr) gogoproto.Message, getReply func() gogoproto.Message, _ *rpc.Context) ([]gogoproto.Message, error) {
			if !first {
				return []gogoproto.Message{getReply()}, nil
			}
			first = false
			err := rpc.NewSendError("boom", tc.retryable)
			if tc.rpcError {
				return nil, err
			}
			reply := getReply()
			reply.(proto.Response).Header().SetGoError(err)
			return []gogoproto.Message{reply}, nil
		}

		ctx := &DistSenderContext{
			RPCSend: testFn,
			RangeDescriptorDB: mockRangeDescriptorDB(func(_ proto.Key, _ lookupOptions) ([]proto.RangeDescriptor, error) {
				return []proto.RangeDescriptor{testRangeDescriptor}, nil
			}),
		}
		ds := NewDistSender(ctx, g)
		ds.updateLeaderCache(1, leader)

		call := proto.PutCall(proto.Key("a"), proto.Value{Bytes: []byte("value")})
		reply := call.Reply.(*proto.PutResponse)
		client.SendCallConverted(ds, context.Background(), call)
		if err := reply.GoError(); err != nil && !testutils.IsError(err, "boom") {
			t.Errorf("put encountered unexpected error: %s", err)
		}
		if cur := ds.leaderCache.Lookup(1); reflect.DeepEqual(cur, &proto.Replica{}) && !tc.shouldClearLeader {
			t.Errorf("%d: leader cache eviction: shouldClearLeader=%t, but value is %v", i, tc.shouldClearLeader, cur)
		}
		_, cachedDesc := ds.rangeCache.getCachedRangeDescriptor(call.Args.Header().Key, false /* !inclusive */)
		if cachedDesc == nil != tc.shouldClearReplica {
			t.Errorf("%d: unexpected second replica lookup behaviour: wanted=%t", i, tc.shouldClearReplica)
		}
	}
}
Exemplo n.º 3
0
// TestMultiRangeScanWithMaxResults tests that commands which access multiple
// ranges with MaxResults parameter are carried out properly.
func TestMultiRangeScanWithMaxResults(t *testing.T) {
	defer leaktest.AfterTest(t)
	testCases := []struct {
		splitKeys []proto.Key
		keys      []proto.Key
	}{
		{[]proto.Key{proto.Key("m")},
			[]proto.Key{proto.Key("a"), proto.Key("z")}},
		{[]proto.Key{proto.Key("h"), proto.Key("q")},
			[]proto.Key{proto.Key("b"), proto.Key("f"), proto.Key("k"),
				proto.Key("r"), proto.Key("w"), proto.Key("y")}},
	}

	for i, tc := range testCases {
		s := StartTestServer(t)
		ds := kv.NewDistSender(&kv.DistSenderContext{Clock: s.Clock()}, s.Gossip())
		tds := kv.NewTxnCoordSender(ds, s.Clock(), testContext.Linearizable, s.stopper)

		for _, sk := range tc.splitKeys {
			if err := s.node.ctx.DB.AdminSplit(sk); err != nil {
				t.Fatal(err)
			}
		}

		var call proto.Call
		for _, k := range tc.keys {
			call = proto.PutCall(k, proto.Value{Bytes: k})
			call.Args.Header().User = storage.UserRoot
			tds.Send(context.Background(), call)
			if err := call.Reply.Header().GoError(); err != nil {
				t.Fatal(err)
			}
		}

		// Try every possible ScanRequest startKey.
		for start := 0; start < len(tc.keys); start++ {
			// Try every possible maxResults, from 1 to beyond the size of key array.
			for maxResults := 1; maxResults <= len(tc.keys)-start+1; maxResults++ {
				scan := proto.ScanCall(tc.keys[start], tc.keys[len(tc.keys)-1].Next(),
					int64(maxResults))
				scan.Args.Header().Timestamp = call.Reply.Header().Timestamp
				scan.Args.Header().User = storage.UserRoot
				tds.Send(context.Background(), scan)
				if err := scan.Reply.Header().GoError(); err != nil {
					t.Fatal(err)
				}
				rows := scan.Reply.(*proto.ScanResponse).Rows
				if start+maxResults <= len(tc.keys) && len(rows) != maxResults {
					t.Fatalf("%d: start=%s: expected %d rows, but got %d", i, tc.keys[start], maxResults, len(rows))
				} else if start+maxResults == len(tc.keys)+1 && len(rows) != maxResults-1 {
					t.Fatalf("%d: expected %d rows, but got %d", i, maxResults-1, len(rows))
				}
			}
		}
		defer s.Stop()
	}
}
Exemplo n.º 4
0
// Put sets the value for a key.
//
// A new result will be appended to the batch which will contain a single row
// and Result.Err will indicate success or failure.
//
// key can be either a byte slice, a string, a fmt.Stringer or an
// encoding.BinaryMarshaler. value can be any key type or a proto.Message.
func (b *Batch) Put(key, value interface{}) {
	k, err := marshalKey(key)
	if err != nil {
		b.initResult(0, 1, err)
		return
	}
	v, err := marshalValue(reflect.ValueOf(value))
	if err != nil {
		b.initResult(0, 1, err)
		return
	}
	b.calls = append(b.calls, proto.PutCall(proto.Key(k), v))
	b.initResult(1, 1, nil)
}
Exemplo n.º 5
0
// TestRetryOnDescriptorLookupError verifies that the DistSender retries a descriptor
// lookup on retryable errors.
func TestRetryOnDescriptorLookupError(t *testing.T) {
	defer leaktest.AfterTest(t)
	g, s := makeTestGossip(t)
	defer s()

	var testFn rpcSendFn = func(_ rpc.Options, _ string, _ []net.Addr, _ func(addr net.Addr) gogoproto.Message, getReply func() gogoproto.Message, _ *rpc.Context) ([]gogoproto.Message, error) {
		return []gogoproto.Message{getReply()}, nil
	}

	errors := []error{
		&proto.Error{
			Message:   "fatal boom",
			Retryable: false,
		},
		&proto.Error{
			Message:   "temporary boom",
			Retryable: true,
		},
		nil,
	}

	ctx := &DistSenderContext{
		RPCSend: testFn,
		RangeDescriptorDB: mockRangeDescriptorDB(func(k proto.Key, _ lookupOptions) ([]proto.RangeDescriptor, error) {
			// Return next error and truncate the prefix of the errors array.
			var err error
			if k != nil {
				err = errors[0]
				errors = errors[1:]
			}
			return []proto.RangeDescriptor{testRangeDescriptor}, err
		}),
	}
	ds := NewDistSender(ctx, g)
	call := proto.PutCall(proto.Key("a"), proto.Value{Bytes: []byte("value")})
	reply := call.Reply.(*proto.PutResponse)
	// Fatal error on descriptor lookup, propagated to reply.
	client.SendCallConverted(ds, context.Background(), call)
	if err := reply.Header().Error; err.GetMessage() != "fatal boom" {
		t.Errorf("unexpected error: %s", err)
	}
	// Retryable error on descriptor lookup, second attempt successful.
	client.SendCallConverted(ds, context.Background(), call)
	if err := reply.GoError(); err != nil {
		t.Errorf("unexpected error: %s", err)
	}
	if len(errors) != 0 {
		t.Fatalf("expected more descriptor lookups, leftover errors: %+v", errors)
	}
}
Exemplo n.º 6
0
// PutStruct sets the specified columns in the structured table identified by
// obj. The primary key columns within obj are used to identify which row to
// modify. The obj type must have previously been bound to a table using
// BindModel. If columns is empty all of the columns are set.
func (b *Batch) PutStruct(obj interface{}, columns ...string) {
	v := reflect.Indirect(reflect.ValueOf(obj))
	m, err := b.DB.getModel(v.Type(), false)
	if err != nil {
		b.initResult(0, 0, err)
		return
	}

	primaryKey, err := m.encodePrimaryKey(v)
	if err != nil {
		b.initResult(0, 0, err)
		return
	}

	if len(columns) == 0 {
		columns = m.otherColumnNames
	} else {
		lowerStrings(columns)
	}

	var calls []proto.Call
	for _, colName := range columns {
		col, ok := m.columnsByName[colName]
		if !ok {
			b.initResult(0, 0, fmt.Errorf("%s: unable to find column %s", m.name, colName))
			return
		}

		key := m.encodeColumnKey(primaryKey, col.ID)
		value := v.FieldByIndex(col.field.Index)
		if log.V(2) {
			log.Infof("Put %q -> %v", key, value.Interface())
		}

		v, err := marshalValue(value)
		if err != nil {
			b.initResult(0, 0, err)
			return
		}

		calls = append(calls, proto.PutCall(key, v))
	}

	b.calls = append(b.calls, calls...)
	b.initResult(len(calls), len(calls), nil)
}
Exemplo n.º 7
0
// TestRetryOnNotLeaderError verifies that the DistSender correctly updates the
// leader cache and retries when receiving a NotLeaderError.
func TestRetryOnNotLeaderError(t *testing.T) {
	defer leaktest.AfterTest(t)
	g, s := makeTestGossip(t)
	defer s()
	leader := proto.Replica{
		NodeID:  99,
		StoreID: 999,
	}
	first := true

	var testFn rpcSendFn = func(_ rpc.Options, method string, addrs []net.Addr, getArgs func(addr net.Addr) interface{}, getReply func() interface{}, _ *rpc.Context) ([]interface{}, error) {
		if first {
			reply := getReply()
			reply.(proto.Response).Header().SetGoError(
				&proto.NotLeaderError{Leader: &leader, Replica: &proto.Replica{}})
			first = false
			return []interface{}{reply}, nil
		}
		return []interface{}{getReply()}, nil
	}

	ctx := &DistSenderContext{
		rpcSend: testFn,
		rangeDescriptorDB: mockRangeDescriptorDB(func(_ proto.Key, _ lookupOptions) ([]proto.RangeDescriptor, error) {
			return []proto.RangeDescriptor{testRangeDescriptor}, nil
		}),
	}
	ds := NewDistSender(ctx, g)
	call := proto.PutCall(proto.Key("a"), proto.Value{Bytes: []byte("value")})
	reply := call.Reply.(*proto.PutResponse)
	ds.Send(context.Background(), call)
	if err := reply.GoError(); err != nil {
		t.Errorf("put encountered error: %s", err)
	}
	if first {
		t.Errorf("The command did not retry")
	}
	if cur := ds.leaderCache.Lookup(1); cur.StoreID != leader.StoreID {
		t.Errorf("leader cache was not updated: expected %v, got %v",
			&leader, cur)
	}
}
Exemplo n.º 8
0
// TestMultiRangeScanDeleteRange tests that commands which access multiple
// ranges are carried out properly.
func TestMultiRangeScanDeleteRange(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := StartTestServer(t)
	defer s.Stop()
	ds := kv.NewDistSender(&kv.DistSenderContext{Clock: s.Clock()}, s.Gossip())
	tds := kv.NewTxnCoordSender(ds, s.Clock(), testContext.Linearizable, s.stopper)

	if err := s.node.ctx.DB.AdminSplit("m"); err != nil {
		t.Fatal(err)
	}
	writes := []proto.Key{proto.Key("a"), proto.Key("z")}
	get := proto.Call{
		Args: &proto.GetRequest{
			RequestHeader: proto.RequestHeader{
				Key: writes[0],
			},
		},
		Reply: &proto.GetResponse{},
	}
	get.Args.Header().User = storage.UserRoot
	get.Args.Header().EndKey = writes[len(writes)-1]
	tds.Send(context.Background(), get)
	if err := get.Reply.Header().GoError(); err == nil {
		t.Errorf("able to call Get with a key range: %v", get)
	}
	var call proto.Call
	for i, k := range writes {
		call = proto.PutCall(k, proto.Value{Bytes: k})
		call.Args.Header().User = storage.UserRoot
		tds.Send(context.Background(), call)
		if err := call.Reply.Header().GoError(); err != nil {
			t.Fatal(err)
		}
		scan := proto.ScanCall(writes[0], writes[len(writes)-1].Next(), 0)
		// The Put ts may have been pushed by tsCache,
		// so make sure we see their values in our Scan.
		scan.Args.Header().Timestamp = call.Reply.Header().Timestamp
		scan.Args.Header().User = storage.UserRoot
		tds.Send(context.Background(), scan)
		if err := scan.Reply.Header().GoError(); err != nil {
			t.Fatal(err)
		}
		if scan.Reply.Header().Txn == nil {
			t.Errorf("expected Scan to be wrapped in a Transaction")
		}
		if rows := scan.Reply.(*proto.ScanResponse).Rows; len(rows) != i+1 {
			t.Fatalf("expected %d rows, but got %d", i+1, len(rows))
		}
	}

	del := proto.Call{
		Args: &proto.DeleteRangeRequest{
			RequestHeader: proto.RequestHeader{
				User:      storage.UserRoot,
				Key:       writes[0],
				EndKey:    proto.Key(writes[len(writes)-1]).Next(),
				Timestamp: call.Reply.Header().Timestamp,
			},
		},
		Reply: &proto.DeleteRangeResponse{},
	}
	tds.Send(context.Background(), del)
	if err := del.Reply.Header().GoError(); err != nil {
		t.Fatal(err)
	}
	if del.Reply.Header().Txn == nil {
		t.Errorf("expected DeleteRange to be wrapped in a Transaction")
	}
	if n := del.Reply.(*proto.DeleteRangeResponse).NumDeleted; n != int64(len(writes)) {
		t.Errorf("expected %d keys to be deleted, but got %d instead",
			len(writes), n)
	}

	scan := proto.ScanCall(writes[0], writes[len(writes)-1].Next(), 0)
	scan.Args.Header().Timestamp = del.Reply.Header().Timestamp
	scan.Args.Header().User = storage.UserRoot
	scan.Args.Header().Txn = &proto.Transaction{Name: "MyTxn"}
	tds.Send(context.Background(), scan)
	if err := scan.Reply.Header().GoError(); err != nil {
		t.Fatal(err)
	}
	if txn := scan.Reply.Header().Txn; txn == nil || txn.Name != "MyTxn" {
		t.Errorf("wanted Txn to persist, but it changed to %v", txn)
	}
	if rows := scan.Reply.(*proto.ScanResponse).Rows; len(rows) > 0 {
		t.Fatalf("scan after delete returned rows: %v", rows)
	}
}
Exemplo n.º 9
0
func TestNodeEventFeed(t *testing.T) {
	defer leaktest.AfterTest(t)

	nodeDesc := proto.NodeDescriptor{
		NodeID: proto.NodeID(99),
	}

	// A testCase corresponds to a single Store event type. Each case contains a
	// method which publishes a single event to the given storeEventPublisher,
	// and an expected result interface which should match the produced
	// event.
	testCases := []struct {
		name      string
		publishTo func(status.NodeEventFeed)
		expected  interface{}
	}{
		{
			name: "Start",
			publishTo: func(nef status.NodeEventFeed) {
				nef.StartNode(nodeDesc, 100)
			},
			expected: &status.StartNodeEvent{
				Desc:      nodeDesc,
				StartedAt: 100,
			},
		},
		{
			name: "Get",
			publishTo: func(nef status.NodeEventFeed) {
				call := proto.GetCall(proto.Key("abc"))
				nef.CallComplete(call.Args, call.Reply)
			},
			expected: &status.CallSuccessEvent{
				NodeID: proto.NodeID(1),
				Method: proto.Get,
			},
		},
		{
			name: "Put",
			publishTo: func(nef status.NodeEventFeed) {
				call := proto.PutCall(proto.Key("abc"), proto.Value{Bytes: []byte("def")})
				nef.CallComplete(call.Args, call.Reply)
			},
			expected: &status.CallSuccessEvent{
				NodeID: proto.NodeID(1),
				Method: proto.Put,
			},
		},
		{
			name: "Get Error",
			publishTo: func(nef status.NodeEventFeed) {
				call := proto.GetCall(proto.Key("abc"))
				call.Reply.Header().SetGoError(util.Errorf("error"))
				nef.CallComplete(call.Args, call.Reply)
			},
			expected: &status.CallErrorEvent{
				NodeID: proto.NodeID(1),
				Method: proto.Get,
			},
		},
	}

	// Compile expected events into a single slice.
	expectedEvents := make([]interface{}, len(testCases))
	for i := range testCases {
		expectedEvents[i] = testCases[i].expected
	}

	events := make([]interface{}, 0, len(expectedEvents))

	// Run test cases directly through a feed.
	stopper := stop.NewStopper()
	defer stopper.Stop()
	feed := util.NewFeed(stopper)
	feed.Subscribe(func(event interface{}) {
		events = append(events, event)
	})

	nodefeed := status.NewNodeEventFeed(proto.NodeID(1), feed)
	for _, tc := range testCases {
		tc.publishTo(nodefeed)
	}

	feed.Flush()

	if a, e := events, expectedEvents; !reflect.DeepEqual(a, e) {
		t.Errorf("received incorrect events.\nexpected: %v\nactual: %v", e, a)
	}
}
Exemplo n.º 10
0
func TestNodeEventFeed(t *testing.T) {
	defer leaktest.AfterTest(t)

	// A testCase corresponds to a single Store event type. Each case contains a
	// method which publishes a single event to the given storeEventPublisher,
	// and an expected result interface which should match the produced
	// event.
	testCases := []struct {
		name      string
		publishTo func(status.NodeEventFeed)
		expected  interface{}
	}{
		{
			name: "Get",
			publishTo: func(nef status.NodeEventFeed) {
				call := proto.GetCall(proto.Key("abc"))
				nef.CallComplete(call.Args, call.Reply)
			},
			expected: &status.CallSuccessEvent{
				NodeID: proto.NodeID(1),
				Method: proto.Get,
			},
		},
		{
			name: "Put",
			publishTo: func(nef status.NodeEventFeed) {
				call := proto.PutCall(proto.Key("abc"), proto.Value{Bytes: []byte("def")})
				nef.CallComplete(call.Args, call.Reply)
			},
			expected: &status.CallSuccessEvent{
				NodeID: proto.NodeID(1),
				Method: proto.Put,
			},
		},
		{
			name: "Get Error",
			publishTo: func(nef status.NodeEventFeed) {
				call := proto.GetCall(proto.Key("abc"))
				call.Reply.Header().SetGoError(util.Errorf("error"))
				nef.CallComplete(call.Args, call.Reply)
			},
			expected: &status.CallErrorEvent{
				NodeID: proto.NodeID(1),
				Method: proto.Get,
			},
		},
	}

	// Compile expected events into a single slice.
	expectedEvents := make([]interface{}, len(testCases))
	for i := range testCases {
		expectedEvents[i] = testCases[i].expected
	}

	// assertEventsEqual verifies that the given set of events is equal to the
	// expectedEvents.
	verifyEventSlice := func(source string, events []interface{}) {
		if a, e := len(events), len(expectedEvents); a != e {
			t.Errorf("%s had wrong number of events %d, expected %d", source, a, e)
			return
		}

		for i := range events {
			if a, e := events[i], expectedEvents[i]; !reflect.DeepEqual(a, e) {
				t.Errorf("%s had wrong event for case %s: got %v, expected %v", source, testCases[i].name, a, e)
			}
		}
	}

	// Run test cases directly through a feed.
	stopper, feed, consumers := startConsumerSet(3)
	nodefeed := status.NewNodeEventFeed(proto.NodeID(1), feed)
	for _, tc := range testCases {
		tc.publishTo(nodefeed)
	}
	feed.Close()
	waitForStopper(t, stopper)
	for i, c := range consumers {
		verifyEventSlice(fmt.Sprintf("feed direct consumer %d", i), c.received)
	}
}