// compareBiogoNode compares a biogo node and a range tree node to determine if both
// contain the same values in the same order.  It recursively calls itself on
// both children if they exist.
func compareBiogoNode(db *client.DB, biogoNode *llrb.Node, key *proto.Key) error {
	// Retrieve the node form the range tree.
	rtNode := &proto.RangeTreeNode{}
	if err := db.GetProto(keys.RangeTreeNodeKey(*key), rtNode); err != nil {
		return err
	}

	bNode := &proto.RangeTreeNode{
		Key:       proto.Key(biogoNode.Elem.(Key)),
		ParentKey: proto.KeyMin,
		Black:     bool(biogoNode.Color),
	}
	if biogoNode.Left != nil {
		leftKey := proto.Key(biogoNode.Left.Elem.(Key))
		bNode.LeftKey = &leftKey
	}
	if biogoNode.Right != nil {
		rightKey := proto.Key(biogoNode.Right.Elem.(Key))
		bNode.RightKey = &rightKey
	}
	if err := nodesEqual(*key, *bNode, *rtNode); err != nil {
		return err
	}
	if rtNode.LeftKey != nil {
		if err := compareBiogoNode(db, biogoNode.Left, rtNode.LeftKey); err != nil {
			return err
		}
	}
	if rtNode.RightKey != nil {
		if err := compareBiogoNode(db, biogoNode.Right, rtNode.RightKey); err != nil {
			return err
		}
	}
	return nil
}
Exemplo n.º 2
0
func TestGetFirstRangeDescriptor(t *testing.T) {
	n := simulation.NewNetwork(3, "unix", gossip.TestInterval, gossip.TestBootstrap)
	ds := NewDistSender(n.Nodes[0].Gossip)
	if _, err := ds.getFirstRangeDescriptor(); err == nil {
		t.Errorf("expected not to find first range descriptor")
	}
	expectedDesc := &proto.RangeDescriptor{}
	expectedDesc.StartKey = proto.Key("a")
	expectedDesc.EndKey = proto.Key("c")

	// Add first RangeDescriptor to a node different from the node for
	// this dist sender and ensure that this dist sender has the
	// information within a given time.
	n.Nodes[1].Gossip.AddInfo(
		gossip.KeyFirstRangeDescriptor, *expectedDesc, time.Hour)
	maxCycles := 10
	n.SimulateNetwork(func(cycle int, network *simulation.Network) bool {
		desc, err := ds.getFirstRangeDescriptor()
		if err != nil {
			if cycle >= maxCycles {
				t.Errorf("could not get range descriptor after %d cycles", cycle)
				return false
			}
			return true
		}
		if !bytes.Equal(desc.StartKey, expectedDesc.StartKey) ||
			!bytes.Equal(desc.EndKey, expectedDesc.EndKey) {
			t.Errorf("expected first range descriptor %v, instead was %v",
				expectedDesc, desc)
		}
		return false
	})
	n.Stop()
}
Exemplo n.º 3
0
// TestTxnCoordSenderEndTxn verifies that ending a transaction
// sends resolve write intent requests and removes the transaction
// from the txns map.
func TestTxnCoordSenderEndTxn(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := createTestDB(t)
	defer s.Stop()

	txn := newTxn(s.Clock, proto.Key("a"))
	key := proto.Key("a")
	put := createPutRequest(key, []byte("value"), txn)
	reply, err := batchutil.SendWrapped(s.Sender, put)
	if err != nil {
		t.Fatal(err)
	}
	pReply := reply.(*proto.PutResponse)
	if _, err := batchutil.SendWrapped(s.Sender, &proto.EndTransactionRequest{
		RequestHeader: proto.RequestHeader{
			Key:       txn.Key,
			Timestamp: txn.Timestamp,
			Txn:       pReply.Header().Txn,
		},
		Commit: true,
	}); err != nil {
		t.Fatal(err)
	}
	verifyCleanup(key, s.Sender, s.Eng, t)
}
Exemplo n.º 4
0
// TestRangeSplit executes various splits and checks that all created intents
// are resolved. This includes both intents which are resolved synchronously
// with EndTransaction and via RPC.
func TestRangeSplit(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := createTestDB(t)
	defer s.Stop()

	splitKeys := []proto.Key{proto.Key("G"), keys.RangeMetaKey(proto.Key("F")),
		keys.RangeMetaKey(proto.Key("K")), keys.RangeMetaKey(proto.Key("H"))}

	// Execute the consecutive splits.
	for _, splitKey := range splitKeys {
		log.Infof("starting split at key %q...", splitKey)
		if err := s.DB.AdminSplit(splitKey); err != nil {
			t.Fatal(err)
		}
		log.Infof("split at key %q complete", splitKey)
	}

	if err := util.IsTrueWithin(func() bool {
		if _, _, err := engine.MVCCScan(s.Eng, keys.LocalMax, proto.KeyMax, 0, proto.MaxTimestamp, true, nil); err != nil {
			log.Infof("mvcc scan should be clean: %s", err)
			return false
		}
		return true
	}, 500*time.Millisecond); err != nil {
		t.Error("failed to verify no dangling intents within 500ms")
	}
}
Exemplo n.º 5
0
// Example_rmUsers creates a series of user configs and verifies
// user-rm works by deleting some and then all and verifying entries
// have been removed via user-ls. Also verify the default user config
// cannot be removed.
func Example_rmUsers() {
	_, stopper := startAdminServer()
	defer stopper.Stop()

	keys := []proto.Key{
		proto.Key("user1"),
		proto.Key("user2"),
	}

	for _, key := range keys {
		prefix := url.QueryEscape(string(key))
		RunSetUser(testContext, prefix, testUserConfigBytes)
	}

	for _, key := range keys {
		prefix := url.QueryEscape(string(key))
		RunRmUser(testContext, prefix)
		RunLsUser(testContext, "")
	}
	// Output:
	// set user config for key prefix "user1"
	// set user config for key prefix "user2"
	// removed user config for key prefix "user1"
	// [default]
	// user2
	// removed user config for key prefix "user2"
	// [default]
}
Exemplo n.º 6
0
func TestMetaScanBounds(t *testing.T) {
	defer leaktest.AfterTest(t)

	testCases := []struct {
		key, expStart, expEnd proto.Key
	}{
		{
			key:      proto.Key{},
			expStart: Meta1Prefix,
			expEnd:   Meta1Prefix.PrefixEnd(),
		},
		{
			key:      proto.Key("foo"),
			expStart: proto.Key("foo").Next(),
			expEnd:   proto.Key("foo")[:len(Meta1Prefix)].PrefixEnd(),
		},
		{
			key:      proto.MakeKey(Meta1Prefix, proto.KeyMax),
			expStart: proto.MakeKey(Meta1Prefix, proto.KeyMax),
			expEnd:   Meta1Prefix.PrefixEnd(),
		},
	}
	for i, test := range testCases {
		resStart, resEnd := MetaScanBounds(test.key)
		if !resStart.Equal(test.expStart) || !resEnd.Equal(test.expEnd) {
			t.Errorf("%d: range bounds %q-%q don't match expected bounds %q-%q for key %q", i, resStart, resEnd, test.expStart, test.expEnd, test.key)
		}
	}
}
Exemplo n.º 7
0
// TestTxnCoordSenderGC verifies that the coordinator cleans up extant
// transactions after the lastUpdateNanos exceeds the timeout.
func TestTxnCoordSenderGC(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := createTestDB(t)
	defer s.Stop()

	// Set heartbeat interval to 1ms for testing.
	s.Sender.heartbeatInterval = 1 * time.Millisecond

	txn := newTxn(s.Clock, proto.Key("a"))
	call := proto.Call{
		Args:  createPutRequest(proto.Key("a"), []byte("value"), txn),
		Reply: &proto.PutResponse{},
	}
	if err := sendCall(s.Sender, call); err != nil {
		t.Fatal(err)
	}

	// Now, advance clock past the default client timeout.
	// Locking the TxnCoordSender to prevent a data race.
	s.Sender.Lock()
	s.Manual.Set(defaultClientTimeout.Nanoseconds() + 1)
	s.Sender.Unlock()

	if err := util.IsTrueWithin(func() bool {
		// Locking the TxnCoordSender to prevent a data race.
		s.Sender.Lock()
		_, ok := s.Sender.txns[string(txn.ID)]
		s.Sender.Unlock()
		return !ok
	}, 50*time.Millisecond); err != nil {
		t.Error("expected garbage collection")
	}
}
Exemplo n.º 8
0
// TestSendRPCRetry verifies that sendRPC failed on first address but succeed on
// second address, the second reply should be successfully returned back.
func TestSendRPCRetry(t *testing.T) {
	defer leaktest.AfterTest(t)
	g, s := makeTestGossip(t)
	defer s()
	if err := g.SetNodeDescriptor(&proto.NodeDescriptor{NodeID: 1}); err != nil {
		t.Fatal(err)
	}
	// Fill RangeDescriptor with 2 replicas
	var descriptor = proto.RangeDescriptor{
		RaftID:   1,
		StartKey: proto.Key("a"),
		EndKey:   proto.Key("z"),
	}
	for i := 1; i <= 2; i++ {
		addr := util.MakeUnresolvedAddr("tcp", fmt.Sprintf("node%d", i))
		nd := &proto.NodeDescriptor{
			NodeID: proto.NodeID(i),
			Address: proto.Addr{
				Network: addr.Network(),
				Address: addr.String(),
			},
		}
		if err := g.AddInfo(gossip.MakeNodeIDKey(proto.NodeID(i)), nd, time.Hour); err != nil {
			t.Fatal(err)
		}

		descriptor.Replicas = append(descriptor.Replicas, proto.Replica{
			NodeID:  proto.NodeID(i),
			StoreID: proto.StoreID(i),
		})
	}
	// Define our rpcSend stub which returns success on the second address.
	var testFn rpcSendFn = func(_ rpc.Options, method string, addrs []net.Addr, getArgs func(addr net.Addr) interface{}, getReply func() interface{}, _ *rpc.Context) ([]interface{}, error) {
		if method == "Node.Scan" {
			// reply from first address failed
			_ = getReply()
			// reply from second address succeed
			reply := getReply()
			reply.(*proto.ScanResponse).Rows = append([]proto.KeyValue{}, proto.KeyValue{Key: proto.Key("b"), Value: proto.Value{}})
			return []interface{}{reply}, nil
		}
		return nil, util.Errorf("Not expected method %v", method)
	}
	ctx := &DistSenderContext{
		rpcSend: testFn,
		rangeDescriptorDB: mockRangeDescriptorDB(func(_ proto.Key, _ lookupOptions) ([]proto.RangeDescriptor, error) {
			return []proto.RangeDescriptor{descriptor}, nil
		}),
	}
	ds := NewDistSender(ctx, g)
	call := proto.ScanCall(proto.Key("a"), proto.Key("d"), 1)
	sr := call.Reply.(*proto.ScanResponse)
	ds.Send(context.Background(), call)
	if err := sr.GoError(); err != nil {
		t.Fatal(err)
	}
	if l := len(sr.Rows); l != 1 {
		t.Fatalf("expected 1 row; got %d", l)
	}
}
Exemplo n.º 9
0
// TestPrefixConfigSort verifies sorting of keys.
func TestPrefixConfigSort(t *testing.T) {
	defer leaktest.AfterTest(t)
	keys := []proto.Key{
		proto.KeyMax,
		proto.Key("c"),
		proto.Key("a"),
		proto.Key("b"),
		proto.Key("aa"),
		proto.Key("\xfe"),
		proto.KeyMin,
	}
	expKeys := []proto.Key{
		proto.KeyMin,
		proto.Key("a"),
		proto.Key("aa"),
		proto.Key("b"),
		proto.Key("c"),
		proto.Key("\xfe"),
		proto.KeyMax,
	}
	pcc := &PrefixConfigMap{}
	for _, key := range keys {
		pcc.Configs = append(pcc.Configs, PrefixConfig{Prefix: key})
	}
	sort.Sort(pcc)
	for i, pc := range pcc.Configs {
		if bytes.Compare(pc.Prefix, expKeys[i]) != 0 {
			t.Errorf("order for index %d incorrect; expected %q, got %q", i, expKeys[i], pc.Prefix)
		}
	}
}
Exemplo n.º 10
0
// TestTxnCoordIdempotentCleanup verifies that cleanupTxn is idempotent.
func TestTxnCoordIdempotentCleanup(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := createTestDB(t)
	defer s.Stop()

	txn := newTxn(s.Clock, proto.Key("a"))
	pReply := &proto.PutResponse{}
	key := proto.Key("a")
	call := proto.Call{
		Args:  createPutRequest(key, []byte("value"), txn),
		Reply: pReply,
	}
	if err := sendCall(s.Sender, call); err != nil {
		t.Fatal(err)
	}

	if pReply.Error != nil {
		t.Fatal(pReply.GoError())
	}
	s.Sender.cleanupTxn(nil, *txn) // first call
	etReply := &proto.EndTransactionResponse{}
	if err := sendCall(s.Sender, proto.Call{
		Args: &proto.EndTransactionRequest{
			RequestHeader: proto.RequestHeader{
				Key:       txn.Key,
				Timestamp: txn.Timestamp,
				Txn:       txn,
			},
			Commit: true,
		},
		Reply: etReply,
	}); /* second call */ err != nil {
		t.Fatal(err)
	}
}
Exemplo n.º 11
0
// TestTxnCoordSenderMultipleTxns verifies correct operation with
// multiple outstanding transactions.
func TestTxnCoordSenderMultipleTxns(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := createTestDB(t)
	defer s.Stop()
	defer teardownHeartbeats(s.Sender)

	txn1 := newTxn(s.Clock, proto.Key("a"))
	txn2 := newTxn(s.Clock, proto.Key("b"))
	call := proto.Call{
		Args:  createPutRequest(proto.Key("a"), []byte("value"), txn1),
		Reply: &proto.PutResponse{}}
	if err := sendCall(s.Sender, call); err != nil {
		t.Fatal(err)
	}
	call = proto.Call{
		Args:  createPutRequest(proto.Key("b"), []byte("value"), txn2),
		Reply: &proto.PutResponse{}}
	if err := sendCall(s.Sender, call); err != nil {
		t.Fatal(err)
	}

	if len(s.Sender.txns) != 2 {
		t.Errorf("expected length of transactions map to be 2; got %d", len(s.Sender.txns))
	}
}
Exemplo n.º 12
0
// ValidateRangeMetaKey validates that the given key is a valid Range Metadata
// key.
func ValidateRangeMetaKey(key proto.Key) error {
	// KeyMin is a valid key.
	if key.Equal(proto.KeyMin) {
		return nil
	}
	// Key must be at least as long as Meta1Prefix.
	if len(key) < len(Meta1Prefix) {
		return NewInvalidRangeMetaKeyError("too short", key)
	}

	prefix, body := proto.Key(key[:len(Meta1Prefix)]), proto.Key(key[len(Meta1Prefix):])

	if prefix.Equal(Meta2Prefix) {
		if body.Less(proto.KeyMax) {
			return nil
		}
		return NewInvalidRangeMetaKeyError("body of meta2 range lookup is >= KeyMax", key)
	}

	if prefix.Equal(Meta1Prefix) {
		if proto.KeyMax.Less(body) {
			return NewInvalidRangeMetaKeyError("body of meta1 range lookup is > KeyMax", key)
		}
		return nil
	}
	return NewInvalidRangeMetaKeyError("not a meta key", key)
}
Exemplo n.º 13
0
// TestTxnCoordSenderEndTxn verifies that ending a transaction
// sends resolve write intent requests and removes the transaction
// from the txns map.
func TestTxnCoordSenderEndTxn(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := createTestDB(t)
	defer s.Stop()

	txn := newTxn(s.Clock, proto.Key("a"))
	pReply := &proto.PutResponse{}
	key := proto.Key("a")
	call := proto.Call{
		Args:  createPutRequest(key, []byte("value"), txn),
		Reply: pReply,
	}
	if err := sendCall(s.Sender, call); err != nil {
		t.Fatal(err)
	}
	if pReply.GoError() != nil {
		t.Fatal(pReply.GoError())
	}
	etReply := &proto.EndTransactionResponse{}
	s.Sender.Send(context.Background(), proto.Call{
		Args: &proto.EndTransactionRequest{
			RequestHeader: proto.RequestHeader{
				Key:       txn.Key,
				Timestamp: txn.Timestamp,
				Txn:       txn,
			},
			Commit: true,
		},
		Reply: etReply,
	})
	if etReply.Error != nil {
		t.Fatal(etReply.GoError())
	}
	verifyCleanup(key, s.Sender, s.Eng, t)
}
Exemplo n.º 14
0
// TestKVClientEmptyValues verifies that empty values are preserved
// for both empty []byte and integer=0. This used to fail when we
// allowed the protobufs to be gob-encoded using the default go rpc
// gob codec because gob treats pointer values and non-pointer values
// as equivalent and elides zero-valued defaults on decode.
func TestKVClientEmptyValues(t *testing.T) {
	s := StartTestServer(t)
	defer s.Stop()
	kvClient := createTestClient(s.HTTPAddr)
	kvClient.User = storage.UserRoot

	kvClient.Call(proto.Put, proto.PutArgs(proto.Key("a"), []byte{}), &proto.PutResponse{})
	kvClient.Call(proto.Put, &proto.PutRequest{
		RequestHeader: proto.RequestHeader{
			Key: proto.Key("b"),
		},
		Value: proto.Value{
			Integer: gogoproto.Int64(0),
		},
	}, &proto.PutResponse{})

	getResp := &proto.GetResponse{}
	kvClient.Call(proto.Get, proto.GetArgs(proto.Key("a")), getResp)
	if bytes := getResp.Value.Bytes; bytes == nil || len(bytes) != 0 {
		t.Errorf("expected non-nil empty byte slice; got %q", bytes)
	}
	kvClient.Call(proto.Get, proto.GetArgs(proto.Key("b")), getResp)
	if intVal := getResp.Value.Integer; intVal == nil || *intVal != 0 {
		t.Errorf("expected non-nil 0-valued integer; got %p, %d", getResp.Value.Integer, getResp.Value.GetInteger())
	}
}
Exemplo n.º 15
0
// TestRangeLookupOptionOnReverseScan verifies that a lookup triggered by a
// ReverseScan request has the `useReverseScan` option specified.
func TestRangeLookupOptionOnReverseScan(t *testing.T) {
	defer leaktest.AfterTest(t)
	g, s := makeTestGossip(t)
	defer s()

	var testFn rpcSendFn = func(_ rpc.Options, method string, addrs []net.Addr, getArgs func(addr net.Addr) gogoproto.Message, getReply func() gogoproto.Message, _ *rpc.Context) ([]gogoproto.Message, error) {
		return []gogoproto.Message{getReply()}, nil
	}

	ctx := &DistSenderContext{
		RPCSend: testFn,
		RangeDescriptorDB: mockRangeDescriptorDB(func(k proto.Key, opts lookupOptions) ([]proto.RangeDescriptor, error) {
			if len(k) > 0 && !opts.useReverseScan {
				t.Fatalf("expected useReverseScan to be set")
			}
			return []proto.RangeDescriptor{testRangeDescriptor}, nil
		}),
	}
	ds := NewDistSender(ctx, g)
	rScan := &proto.ReverseScanRequest{
		RequestHeader: proto.RequestHeader{Key: proto.Key("a"), EndKey: proto.Key("b")},
	}
	if _, err := batchutil.SendWrapped(ds, rScan); err != nil {
		t.Fatal(err)
	}
}
Exemplo n.º 16
0
// TestMultiRangeScanReverseScanInconsistent verifies that a Scan/ReverseScan
// across ranges that doesn't require read consistency will set a timestamp
// using the clock local to the distributed sender.
func TestMultiRangeScanReverseScanInconsistent(t *testing.T) {
	defer leaktest.AfterTest(t)
	s, db := setupMultipleRanges(t, "b")
	defer s.Stop()

	// Write keys "a" and "b", the latter of which is the first key in the
	// second range.
	keys := []string{"a", "b"}
	ts := []time.Time{}
	b := &client.Batch{}
	for _, key := range keys {
		b.Put(key, "value")
	}
	if err := db.Run(b); err != nil {
		t.Fatal(err)
	}
	for i := range keys {
		ts = append(ts, b.Results[i].Rows[0].Timestamp())
		log.Infof("%d: %s", i, b.Results[i].Rows[0].Timestamp())
	}

	// Do an inconsistent Scan/ReverseScan from a new DistSender and verify
	// it does the read at its local clock and doesn't receive an
	// OpRequiresTxnError. We set the local clock to the timestamp of
	// the first key to verify it's used to read only key "a".
	manual := hlc.NewManualClock(ts[1].UnixNano() - 1)
	clock := hlc.NewClock(manual.UnixNano)
	ds := kv.NewDistSender(&kv.DistSenderContext{Clock: clock}, s.Gossip())

	// Scan.
	sa := proto.NewScan(proto.Key("a"), proto.Key("c"), 0).(*proto.ScanRequest)
	sa.ReadConsistency = proto.INCONSISTENT
	reply, err := batchutil.SendWrapped(ds, sa)
	if err != nil {
		t.Fatal(err)
	}
	sr := reply.(*proto.ScanResponse)

	if l := len(sr.Rows); l != 1 {
		t.Fatalf("expected 1 row; got %d", l)
	}
	if key := string(sr.Rows[0].Key); keys[0] != key {
		t.Errorf("expected key %q; got %q", keys[0], key)
	}

	// ReverseScan.
	rsa := proto.NewReverseScan(proto.Key("a"), proto.Key("c"), 0).(*proto.ReverseScanRequest)
	rsa.ReadConsistency = proto.INCONSISTENT
	reply, err = batchutil.SendWrapped(ds, rsa)
	if err != nil {
		t.Fatal(err)
	}
	rsr := reply.(*proto.ReverseScanResponse)
	if l := len(rsr.Rows); l != 1 {
		t.Fatalf("expected 1 row; got %d", l)
	}
	if key := string(rsr.Rows[0].Key); keys[0] != key {
		t.Errorf("expected key %q; got %q", keys[0], key)
	}
}
Exemplo n.º 17
0
// CopyFrom copies all the cached results from the originRangeID
// response cache into this one. Note that the cache will not be
// locked while copying is in progress. Failures decoding individual
// cache entries return an error. The copy is done directly using the
// engine instead of interpreting values through MVCC for efficiency.
func (rc *ResponseCache) CopyFrom(e engine.Engine, originRangeID proto.RangeID) error {
	prefix := keys.ResponseCacheKey(originRangeID, nil) // response cache prefix
	start := engine.MVCCEncodeKey(prefix)
	end := engine.MVCCEncodeKey(prefix.PrefixEnd())

	return e.Iterate(start, end, func(kv proto.RawKeyValue) (bool, error) {
		// Decode the key into a cmd, skipping on error. Otherwise,
		// write it to the corresponding key in the new cache.
		cmdID, err := rc.decodeResponseCacheKey(kv.Key)
		if err != nil {
			return false, util.Errorf("could not decode a response cache key %s: %s",
				proto.Key(kv.Key), err)
		}
		key := keys.ResponseCacheKey(rc.rangeID, &cmdID)
		encKey := engine.MVCCEncodeKey(key)
		// Decode the value, update the checksum and re-encode.
		meta := &engine.MVCCMetadata{}
		if err := gogoproto.Unmarshal(kv.Value, meta); err != nil {
			return false, util.Errorf("could not decode response cache value %s [% x]: %s",
				proto.Key(kv.Key), kv.Value, err)
		}
		meta.Value.Checksum = nil
		meta.Value.InitChecksum(key)
		_, _, err = engine.PutProto(e, encKey, meta)
		return false, err
	})
}
Exemplo n.º 18
0
func TestCommandQueueMultiplePendingCommands(t *testing.T) {
	defer leaktest.AfterTest(t)
	cq := NewCommandQueue()
	wg1 := sync.WaitGroup{}
	wg2 := sync.WaitGroup{}
	wg3 := sync.WaitGroup{}

	// Add a command which will overlap all commands.
	wk := cq.Add(proto.Key("a"), proto.Key("d"), false)
	cq.GetWait(proto.Key("a"), nil, false, &wg1)
	cq.GetWait(proto.Key("b"), nil, false, &wg2)
	cq.GetWait(proto.Key("c"), nil, false, &wg3)
	cmdDone1 := waitForCmd(&wg1)
	cmdDone2 := waitForCmd(&wg2)
	cmdDone3 := waitForCmd(&wg3)

	if testCmdDone(cmdDone1, 1*time.Millisecond) ||
		testCmdDone(cmdDone2, 1*time.Millisecond) ||
		testCmdDone(cmdDone3, 1*time.Millisecond) {
		t.Fatal("no commands should finish with command outstanding")
	}
	cq.Remove(wk)
	if !testCmdDone(cmdDone1, 5*time.Millisecond) ||
		!testCmdDone(cmdDone2, 5*time.Millisecond) ||
		!testCmdDone(cmdDone3, 5*time.Millisecond) {
		t.Fatal("commands should finish with no commands outstanding")
	}
}
Exemplo n.º 19
0
// TestKVDBEndTransactionWithTriggers verifies that triggers are
// disallowed on call to EndTransaction.
func TestKVDBEndTransactionWithTriggers(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := server.StartTestServer(t)
	defer s.Stop()

	db := createTestClient(t, s.ServingAddr())
	err := db.Txn(func(txn *client.Txn) error {
		// Make an EndTransaction request which would fail if not
		// stripped. In this case, we set the start key to "bar" for a
		// split of the default range; start key must be "" in this case.
		b := &client.Batch{}
		b.Put("foo", "only here to make this a rw transaction")
		b.InternalAddCall(proto.Call{
			Args: &proto.EndTransactionRequest{
				RequestHeader: proto.RequestHeader{Key: proto.Key("foo")},
				Commit:        true,
				InternalCommitTrigger: &proto.InternalCommitTrigger{
					SplitTrigger: &proto.SplitTrigger{
						UpdatedDesc: proto.RangeDescriptor{StartKey: proto.Key("bar")},
					},
				},
			},
			Reply: &proto.EndTransactionResponse{},
		})
		return txn.Run(b)
	})
	if err == nil {
		t.Errorf("expected 400 bad request error on commit")
	}
}
Exemplo n.º 20
0
// runClientScan first creates test data (and resets the benchmarking
// timer). It then performs b.N client scans in increments of numRows
// keys over all of the data, restarting at the beginning of the
// keyspace, as many times as necessary.
func runClientScan(useRPC, useSSL bool, numRows, numVersions int, b *testing.B) {
	const numKeys = 100000

	s, db := setupClientBenchData(useRPC, useSSL, numVersions, numKeys, b)
	defer s.Stop()

	b.SetBytes(int64(numRows * valueSize))
	b.ResetTimer()

	b.RunParallel(func(pb *testing.PB) {
		startKeyBuf := append(make([]byte, 0, 64), []byte("key-")...)
		endKeyBuf := append(make([]byte, 0, 64), []byte("key-")...)
		for pb.Next() {
			// Choose a random key to start scan.
			keyIdx := rand.Int31n(int32(numKeys - numRows))
			startKey := proto.Key(encoding.EncodeUvarint(startKeyBuf, uint64(keyIdx)))
			endKey := proto.Key(encoding.EncodeUvarint(endKeyBuf, uint64(keyIdx)+uint64(numRows)))
			rows, err := db.Scan(startKey, endKey, int64(numRows))
			if err != nil {
				b.Fatalf("failed scan: %s", err)
			}
			if len(rows) != numRows {
				b.Fatalf("failed to scan: %d != %d", len(rows), numRows)
			}
		}
	})

	b.StopTimer()
}
Exemplo n.º 21
0
// TestStoreRangeUpReplicate verifies that the replication queue will notice
// under-replicated ranges and replicate them.
func TestStoreRangeUpReplicate(t *testing.T) {
	defer leaktest.AfterTest(t)
	mtc := startMultiTestContext(t, 3)
	defer mtc.Stop()

	// Initialize the gossip network.
	var wg sync.WaitGroup
	wg.Add(len(mtc.stores))
	key := gossip.MakePrefixPattern(gossip.KeyStorePrefix)
	mtc.stores[0].Gossip().RegisterCallback(key, func(_ string, _ []byte) { wg.Done() })
	for _, s := range mtc.stores {
		s.GossipStore()
	}
	wg.Wait()

	// Once we know our peers, trigger a scan.
	mtc.stores[0].ForceReplicationScan(t)

	// The range should become available on every node.
	if err := util.IsTrueWithin(func() bool {
		for _, s := range mtc.stores {
			r := s.LookupReplica(proto.Key("a"), proto.Key("b"))
			if r == nil {
				return false
			}
		}
		return true
	}, 1*time.Second); err != nil {
		t.Fatal(err)
	}
}
Exemplo n.º 22
0
// newTestRangeSet creates a new range set that has the count number of ranges.
func newTestRangeSet(count int, t *testing.T) *testRangeSet {
	rs := &testRangeSet{rangesByKey: btree.New(64 /* degree */)}
	for i := 0; i < count; i++ {
		desc := &proto.RangeDescriptor{
			RaftID:   proto.RaftID(i),
			StartKey: proto.Key(fmt.Sprintf("%03d", i)),
			EndKey:   proto.Key(fmt.Sprintf("%03d", i+1)),
		}
		// Initialize the range stat so the scanner can use it.
		rng := &Range{
			stats: &rangeStats{
				raftID: desc.RaftID,
				MVCCStats: engine.MVCCStats{
					KeyBytes:  1,
					ValBytes:  2,
					KeyCount:  1,
					LiveCount: 1,
				},
			},
		}
		if err := rng.setDesc(desc); err != nil {
			t.Fatal(err)
		}
		if exRngItem := rs.rangesByKey.ReplaceOrInsert(rng); exRngItem != nil {
			t.Fatalf("failed to insert range %s", rng)
		}
	}
	return rs
}
Exemplo n.º 23
0
// TestRangeCacheClearOverlappingMeta prevents regression of a bug which caused
// a panic when clearing overlapping descriptors for [KeyMin, Meta2Key). The
// issue was that when attempting to clear out descriptors which were subsumed
// by the above range, an iteration over the corresponding meta keys was
// performed, with the left endpoint excluded. This exclusion was incorrect: it
// first incremented the start key (KeyMin) and then formed the meta key; for
// KeyMin this leads to Meta2Prefix\x00. For the above EndKey, the meta key is
// a Meta1key which sorts before Meta2Prefix\x00, causing a panic. The fix was
// simply to increment the meta key for StartKey, not StartKey itself.
func TestRangeCacheClearOverlappingMeta(t *testing.T) {
	defer leaktest.AfterTest(t)

	firstDesc := &proto.RangeDescriptor{
		StartKey: proto.KeyMin,
		EndKey:   proto.Key("zzz"),
	}
	restDesc := &proto.RangeDescriptor{
		StartKey: firstDesc.StartKey,
		EndKey:   proto.KeyMax,
	}

	cache := newRangeDescriptorCache(nil, 2<<10)
	cache.rangeCache.Add(rangeCacheKey(keys.RangeMetaKey(firstDesc.EndKey)),
		firstDesc)
	cache.rangeCache.Add(rangeCacheKey(keys.RangeMetaKey(restDesc.EndKey)),
		restDesc)

	// Add new range, corresponding to splitting the first range at a meta key.
	metaSplitDesc := &proto.RangeDescriptor{
		StartKey: proto.KeyMin,
		EndKey:   proto.Key(keys.RangeMetaKey(proto.Key("foo"))),
	}
	func() {
		defer func() {
			if r := recover(); r != nil {
				t.Fatalf("invocation of clearOverlappingCachedRangeDescriptors panicked: %v", r)
			}
		}()
		cache.clearOverlappingCachedRangeDescriptors(metaSplitDesc.EndKey, keys.RangeMetaKey(metaSplitDesc.EndKey), metaSplitDesc)
	}()
}
Exemplo n.º 24
0
// Example_setAndGetPerm sets perm configs for a variety of key
// prefixes and verifies they can be fetched directly.
func Example_setAndGetPerms() {
	_, stopper := startAdminServer()
	defer stopper.Stop()

	testConfigFn := createTestConfigFile(testPermConfig)
	defer util.CleanupDir(testConfigFn)

	testData := []struct {
		prefix proto.Key
		yaml   string
	}{
		{proto.KeyMin, testPermConfig},
		{proto.Key("db1"), testPermConfig},
		{proto.Key("db 2"), testPermConfig},
		{proto.Key("\xfe"), testPermConfig},
	}

	for _, test := range testData {
		prefix := url.QueryEscape(string(test.prefix))
		RunSetPerm(testContext, prefix, testConfigFn)
		RunGetPerm(testContext, prefix)
	}
	// Output:
	// set permission config for key prefix ""
	// permission config for key prefix "":
	// read:
	// - readonly
	// - readwrite
	// write:
	// - readwrite
	// - writeonly
	//
	// set permission config for key prefix "db1"
	// permission config for key prefix "db1":
	// read:
	// - readonly
	// - readwrite
	// write:
	// - readwrite
	// - writeonly
	//
	// set permission config for key prefix "db+2"
	// permission config for key prefix "db+2":
	// read:
	// - readonly
	// - readwrite
	// write:
	// - readwrite
	// - writeonly
	//
	// set permission config for key prefix "%FE"
	// permission config for key prefix "%FE":
	// read:
	// - readonly
	// - readwrite
	// write:
	// - readwrite
	// - writeonly
}
Exemplo n.º 25
0
// Example_setAndGetZone sets zone configs for a variety of key
// prefixes and verifies they can be fetched directly.
func Example_setAndGetZone() {
	_, stopper := startAdminServer()
	defer stopper.Stop()

	testConfigFn := createTestConfigFile(testZoneConfig)
	defer util.CleanupDir(testConfigFn)

	testData := []struct {
		prefix proto.Key
		yaml   string
	}{
		{proto.KeyMin, testZoneConfig},
		{proto.Key("db1"), testZoneConfig},
		{proto.Key("db 2"), testZoneConfig},
		{proto.Key("\xfe"), testZoneConfig},
	}

	for _, test := range testData {
		prefix := url.QueryEscape(string(test.prefix))
		RunSetZone(testContext, prefix, testConfigFn)
		RunGetZone(testContext, prefix)
	}
	// Output:
	// set zone config for key prefix ""
	// zone config for key prefix "":
	// replicas:
	// - attrs: [dc1, ssd]
	// - attrs: [dc2, ssd]
	// - attrs: [dc3, ssd]
	// range_min_bytes: 1048576
	// range_max_bytes: 67108864
	//
	// set zone config for key prefix "db1"
	// zone config for key prefix "db1":
	// replicas:
	// - attrs: [dc1, ssd]
	// - attrs: [dc2, ssd]
	// - attrs: [dc3, ssd]
	// range_min_bytes: 1048576
	// range_max_bytes: 67108864
	//
	// set zone config for key prefix "db+2"
	// zone config for key prefix "db+2":
	// replicas:
	// - attrs: [dc1, ssd]
	// - attrs: [dc2, ssd]
	// - attrs: [dc3, ssd]
	// range_min_bytes: 1048576
	// range_max_bytes: 67108864
	//
	// set zone config for key prefix "%FE"
	// zone config for key prefix "%FE":
	// replicas:
	// - attrs: [dc1, ssd]
	// - attrs: [dc2, ssd]
	// - attrs: [dc3, ssd]
	// range_min_bytes: 1048576
	// range_max_bytes: 67108864
}
Exemplo n.º 26
0
// TestUpdateRangeAddressingSplitMeta1 verifies that it's an error to
// attempt to update range addressing records that would allow a split
// of meta1 records.
func TestUpdateRangeAddressingSplitMeta1(t *testing.T) {
	store := createTestStore(t)
	left := &proto.RangeDescriptor{StartKey: engine.KeyMin, EndKey: meta1Key(proto.Key("a"))}
	right := &proto.RangeDescriptor{StartKey: meta1Key(proto.Key("a")), EndKey: engine.KeyMax}
	if err := storage.SplitRangeAddressing(store.DB(), left, right); err == nil {
		t.Error("expected failure trying to update addressing records for meta1 split")
	}
}
Exemplo n.º 27
0
// TestUpdateRangeAddressingSplitMeta1 verifies that it's an error to
// attempt to update range addressing records that would allow a split
// of meta1 records.
func TestUpdateRangeAddressingSplitMeta1(t *testing.T) {
	defer leaktest.AfterTest(t)
	left := &proto.RangeDescriptor{StartKey: proto.KeyMin, EndKey: meta1Key(proto.Key("a"))}
	right := &proto.RangeDescriptor{StartKey: meta1Key(proto.Key("a")), EndKey: proto.KeyMax}
	if err := splitRangeAddressing(&client.Batch{}, left, right); err == nil {
		t.Error("expected failure trying to update addressing records for meta1 split")
	}
}
Exemplo n.º 28
0
// TestRocksDBCompaction verifies that a garbage collector can be
// installed on a RocksDB engine and will properly compact response
// cache and transaction entries.
func TestRocksDBCompaction(t *testing.T) {
	defer leaktest.AfterTest(t)
	gob.Register(proto.Timestamp{})
	rocksdb := newMemRocksDB(proto.Attributes{Attrs: []string{"ssd"}}, testCacheSize)
	err := rocksdb.Open()
	if err != nil {
		t.Fatalf("could not create new in-memory rocksdb db instance: %v", err)
	}
	rocksdb.SetGCTimeouts(1, 2)
	defer rocksdb.Close()

	cmdID := &proto.ClientCmdID{WallTime: 1, Random: 1}

	// Write two transaction values and two response cache values such
	// that exactly one of each should be GC'd based on our GC timeouts.
	kvs := []proto.KeyValue{
		{
			Key:   keys.ResponseCacheKey(1, cmdID),
			Value: proto.Value{Bytes: encodePutResponse(makeTS(2, 0), t)},
		},
		{
			Key:   keys.ResponseCacheKey(2, cmdID),
			Value: proto.Value{Bytes: encodePutResponse(makeTS(3, 0), t)},
		},
		{
			Key:   keys.TransactionKey(proto.Key("a"), proto.Key(uuid.NewUUID4())),
			Value: proto.Value{Bytes: encodeTransaction(makeTS(1, 0), t)},
		},
		{
			Key:   keys.TransactionKey(proto.Key("b"), proto.Key(uuid.NewUUID4())),
			Value: proto.Value{Bytes: encodeTransaction(makeTS(2, 0), t)},
		},
	}
	for _, kv := range kvs {
		if err := MVCCPut(rocksdb, nil, kv.Key, proto.ZeroTimestamp, kv.Value, nil); err != nil {
			t.Fatal(err)
		}
	}

	// Compact range and scan remaining values to compare.
	rocksdb.CompactRange(nil, nil)
	actualKVs, _, err := MVCCScan(rocksdb, proto.KeyMin, proto.KeyMax,
		0, proto.ZeroTimestamp, true, nil)
	if err != nil {
		t.Fatalf("could not run scan: %v", err)
	}
	var keys []proto.Key
	for _, kv := range actualKVs {
		keys = append(keys, kv.Key)
	}
	expKeys := []proto.Key{
		kvs[1].Key,
		kvs[3].Key,
	}
	if !reflect.DeepEqual(expKeys, keys) {
		t.Errorf("expected keys %+v, got keys %+v", expKeys, keys)
	}
}
Exemplo n.º 29
0
// TestTxnMultipleCoord checks that a coordinator uses the Writing flag to
// enforce that only one coordinator can be used for transactional writes.
func TestTxnMultipleCoord(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := createTestDB(t)
	defer s.Stop()

	for i, tc := range []struct {
		call    proto.Call
		writing bool
		ok      bool
	}{
		{proto.GetCall(proto.Key("a")), true, true},
		{proto.GetCall(proto.Key("a")), false, true},
		{proto.PutCall(proto.Key("a"), proto.Value{}), false, true},
		{proto.PutCall(proto.Key("a"), proto.Value{}), true, false},
	} {
		{
			txn := newTxn(s.Clock, proto.Key("a"))
			txn.Writing = tc.writing
			tc.call.Args.Header().Txn = txn
		}
		err := sendCall(s.Sender, tc.call)
		if err == nil != tc.ok {
			t.Errorf("%d: %T (writing=%t): success_expected=%t, but got: %v",
				i, tc.call.Args, tc.writing, tc.ok, err)
		}
		if err != nil {
			continue
		}

		txn := tc.call.Reply.Header().Txn
		// The transaction should come back rw if it started rw or if we just
		// wrote.
		isWrite := proto.IsTransactionWrite(tc.call.Args)
		if (tc.writing || isWrite) != txn.Writing {
			t.Errorf("%d: unexpected writing state: %s", i, txn)
		}
		if !isWrite {
			continue
		}
		// Abort for clean shutdown.
		etReply := &proto.EndTransactionResponse{}
		if err := sendCall(s.Sender, proto.Call{
			Args: &proto.EndTransactionRequest{
				RequestHeader: proto.RequestHeader{
					Key:       txn.Key,
					Timestamp: txn.Timestamp,
					Txn:       txn,
				},
				Commit: false,
			},
			Reply: etReply,
		}); err != nil {
			log.Warning(err)
			t.Fatal(err)
		}
	}
}
Exemplo n.º 30
0
func doLookup(t *testing.T, rc *RangeDescriptorCache, key string) {
	r, err := rc.LookupRangeDescriptor(proto.Key(key))
	if err != nil {
		t.Fatalf("Unexpected error from LookupRangeDescriptor: %s", err.Error())
	}
	if !r.ContainsKey(engine.KeyAddress(proto.Key(key))) {
		t.Fatalf("Returned range did not contain key: %s-%s, %s", r.StartKey, r.EndKey, key)
	}
}