Exemplo n.º 1
0
func TestEntryCache(t *testing.T) {
	defer leaktest.AfterTest(t)()
	rec := newRaftEntryCache(100)
	rangeID := roachpb.RangeID(2)
	// Add entries for range 1, indexes (1-10).
	ents := addEntries(rec, rangeID, 1, 11)
	// Fetch all data with an exact match.
	verifyGet(t, rec, rangeID, 1, 11, ents, 11)
	// Fetch point entry.
	verifyGet(t, rec, rangeID, 1, 2, ents[0:1], 2)
	// Fetch overlapping first half.
	verifyGet(t, rec, rangeID, 0, 5, []raftpb.Entry{}, 0)
	// Fetch overlapping second half.
	verifyGet(t, rec, rangeID, 9, 12, ents[8:], 11)
	// Fetch data from earlier range.
	verifyGet(t, rec, roachpb.RangeID(1), 1, 11, []raftpb.Entry{}, 1)
	// Fetch data from later range.
	verifyGet(t, rec, roachpb.RangeID(3), 1, 11, []raftpb.Entry{}, 1)
	// Create a gap in the entries.
	rec.delEntries(rangeID, 4, 8)
	// Fetch all data; verify we get only first three.
	verifyGet(t, rec, rangeID, 1, 11, ents[0:3], 4)
	// Try to fetch from within the gap; expect no entries.
	verifyGet(t, rec, rangeID, 5, 11, []raftpb.Entry{}, 5)
	// Fetch after the gap.
	verifyGet(t, rec, rangeID, 8, 11, ents[7:], 11)
	// Delete the prefix of entries.
	rec.delEntries(rangeID, 1, 3)
	// Verify entries are gone.
	verifyGet(t, rec, rangeID, 1, 5, []raftpb.Entry{}, 1)
	// Delete the suffix of entries.
	rec.delEntries(rangeID, 10, 11)
	// Verify get of entries at end of range.
	verifyGet(t, rec, rangeID, 8, 11, ents[7:9], 10)
}
Exemplo n.º 2
0
func localRangeIDKeyParse(input string) (remainder string, key roachpb.Key) {
	var rangeID int64
	var err error
	input = mustShiftSlash(input)
	if endPos := strings.Index(input, "/"); endPos > 0 {
		rangeID, err = strconv.ParseInt(input[:endPos], 10, 64)
		if err != nil {
			panic(err)
		}
		input = input[endPos:]
	} else {
		panic(errors.Errorf("illegal RangeID: %q", input))
	}
	input = mustShiftSlash(input)
	var infix string
	infix, input = mustShift(input)
	var replicated bool
	switch {
	case bytes.Equal(localRangeIDUnreplicatedInfix, []byte(infix)):
	case bytes.Equal(localRangeIDReplicatedInfix, []byte(infix)):
		replicated = true
	default:
		panic(errors.Errorf("invalid infix: %q", infix))
	}

	input = mustShiftSlash(input)
	// Get the suffix.
	var suffix roachpb.RKey
	for _, s := range rangeIDSuffixDict {
		if strings.HasPrefix(input, s.name) {
			input = input[len(s.name):]
			if s.psFunc != nil {
				remainder, key = s.psFunc(roachpb.RangeID(rangeID), input)
				return
			}
			suffix = roachpb.RKey(s.suffix)
			break
		}
	}
	maker := MakeRangeIDUnreplicatedKey
	if replicated {
		maker = MakeRangeIDReplicatedKey
	}
	if suffix != nil {
		if input != "" {
			panic(&errUglifyUnsupported{errors.New("nontrivial detail")})
		}
		var detail roachpb.RKey
		// TODO(tschottdorf): can't do this, init cycle:
		// detail, err := UglyPrint(input)
		// if err != nil {
		// 	return "", nil, err
		// }
		remainder = ""
		key = maker(roachpb.RangeID(rangeID), suffix, detail)
		return
	}
	panic(&errUglifyUnsupported{errors.New("unhandled general range key")})
}
Exemplo n.º 3
0
func TestRangeIDChunk(t *testing.T) {
	defer leaktest.AfterTest(t)()

	var c rangeIDChunk
	if c.Len() != 0 {
		t.Fatalf("expected empty chunk, but found %d", c.Len())
	}
	if c.WriteCap() != rangeIDChunkSize {
		t.Fatalf("expected %d, but found %d", rangeIDChunkSize, c.WriteCap())
	}
	if _, ok := c.PopFront(); ok {
		t.Fatalf("successfully popped from empty chunk")
	}

	for i := 1; i <= rangeIDChunkSize; i++ {
		if !c.PushBack(roachpb.RangeID(i)) {
			t.Fatalf("%d: failed to push", i)
		}
		if e := i; e != c.Len() {
			t.Fatalf("expected %d, but found %d", e, c.Len())
		}
		if e := rangeIDChunkSize - i; e != c.WriteCap() {
			t.Fatalf("expected %d, but found %d", e, c.WriteCap())
		}
	}
	if c.PushBack(0) {
		t.Fatalf("successfully pushed to full chunk")
	}

	for i := 1; i <= rangeIDChunkSize; i++ {
		id, ok := c.PopFront()
		if !ok {
			t.Fatalf("%d: failed to pop", i)
		}
		if roachpb.RangeID(i) != id {
			t.Fatalf("expected %d, but found %d", i, id)
		}
		if e := rangeIDChunkSize - i; e != c.Len() {
			t.Fatalf("expected %d, but found %d", e, c.Len())
		}
		if c.WriteCap() != 0 {
			t.Fatalf("expected full chunk, but found %d", c.WriteCap())
		}
	}
	if c.Len() != 0 {
		t.Fatalf("expected empty chunk, but found %d", c.Len())
	}
	if c.WriteCap() != 0 {
		t.Fatalf("expected full chunk, but found %d", c.WriteCap())
	}
	if _, ok := c.PopFront(); ok {
		t.Fatalf("successfully popped from empty chunk")
	}
}
Exemplo n.º 4
0
// newTestRangeSet creates a new range set that has the count number of ranges.
func newTestRangeSet(count int, t *testing.T) *testRangeSet {
	rs := &testRangeSet{replicasByKey: btree.New(64 /* degree */)}
	for i := 0; i < count; i++ {
		desc := &roachpb.RangeDescriptor{
			RangeID:  roachpb.RangeID(i),
			StartKey: roachpb.RKey(fmt.Sprintf("%03d", i)),
			EndKey:   roachpb.RKey(fmt.Sprintf("%03d", i+1)),
		}
		// Initialize the range stat so the scanner can use it.
		repl := &Replica{
			RangeID: desc.RangeID,
		}
		repl.mu.TimedMutex = syncutil.MakeTimedMutex(defaultMuLogger)
		repl.cmdQMu.TimedMutex = syncutil.MakeTimedMutex(defaultMuLogger)
		repl.mu.state.Stats = enginepb.MVCCStats{
			KeyBytes:  1,
			ValBytes:  2,
			KeyCount:  1,
			LiveCount: 1,
		}

		if err := repl.setDesc(desc); err != nil {
			t.Fatal(err)
		}
		if exRngItem := rs.replicasByKey.ReplaceOrInsert(repl); exRngItem != nil {
			t.Fatalf("failed to insert range %s", repl)
		}
	}
	return rs
}
Exemplo n.º 5
0
func parseRangeID(arg string) (roachpb.RangeID, error) {
	rangeIDInt, err := strconv.ParseInt(arg, 10, 64)
	if err != nil {
		return 0, err
	}
	if rangeIDInt < 1 {
		return 0, fmt.Errorf("illegal RangeID: %d", rangeIDInt)
	}
	return roachpb.RangeID(rangeIDInt), nil
}
Exemplo n.º 6
0
// addRange adds a new range to the cluster but does not attach it to any
// store.
func (c *Cluster) addRange() *Range {
	rangeID := roachpb.RangeID(len(c.ranges))
	newRng := newRange(rangeID, c.allocator)
	c.ranges[rangeID] = newRng

	// Save a sorted array of range IDs to avoid having to calculate them
	// multiple times.
	c.rangeIDs = append(c.rangeIDs, rangeID)
	sort.Sort(c.rangeIDs)

	return newRng
}
// TestReplicaGCQueueDropReplica verifies that a removed replica is
// immediately cleaned up.
func TestReplicaGCQueueDropReplicaDirect(t *testing.T) {
	defer leaktest.AfterTest(t)()
	mtc := &multiTestContext{}
	const numStores = 3
	rangeID := roachpb.RangeID(1)

	// In this test, the Replica on the second Node is removed, and the test
	// verifies that that Node adds this Replica to its RangeGCQueue. However,
	// the queue does a consistent lookup which will usually be read from
	// Node 1. Hence, if Node 1 hasn't processed the removal when Node 2 has,
	// no GC will take place since the consistent RangeLookup hits the first
	// Node. We use the TestingCommandFilter to make sure that the second Node
	// waits for the first.
	cfg := storage.TestStoreConfig(nil)
	mtc.storeConfig = &cfg
	mtc.storeConfig.TestingKnobs.TestingCommandFilter =
		func(filterArgs storagebase.FilterArgs) *roachpb.Error {
			et, ok := filterArgs.Req.(*roachpb.EndTransactionRequest)
			if !ok || filterArgs.Sid != 2 {
				return nil
			}
			crt := et.InternalCommitTrigger.GetChangeReplicasTrigger()
			if crt == nil || crt.ChangeType != roachpb.REMOVE_REPLICA {
				return nil
			}
			testutils.SucceedsSoon(t, func() error {
				r, err := mtc.stores[0].GetReplica(rangeID)
				if err != nil {
					return err
				}
				if _, ok := r.Desc().GetReplicaDescriptor(2); ok {
					return errors.New("expected second node gone from first node's known replicas")
				}
				return nil
			})
			return nil
		}

	defer mtc.Stop()
	mtc.Start(t, numStores)

	mtc.replicateRange(rangeID, 1, 2)
	mtc.unreplicateRange(rangeID, 1)

	// Make sure the range is removed from the store.
	testutils.SucceedsSoon(t, func() error {
		if _, err := mtc.stores[1].GetReplica(rangeID); !testutils.IsError(err, "range .* was not found") {
			return errors.Errorf("expected range removal: %v", err) // NB: errors.Wrapf(nil, ...) returns nil.
		}
		return nil
	})
}
Exemplo n.º 8
0
func TestRangeIDQueue(t *testing.T) {
	defer leaktest.AfterTest(t)()

	var q rangeIDQueue
	if q.Len() != 0 {
		t.Fatalf("expected empty queue, but found %d", q.Len())
	}
	if _, ok := q.PopFront(); ok {
		t.Fatalf("successfully popped from empty queue")
	}

	const count = 3 * rangeIDChunkSize
	for i := 1; i <= count; i++ {
		q.PushBack(roachpb.RangeID(i))
		if e := i; e != q.Len() {
			t.Fatalf("expected %d, but found %d", e, q.Len())
		}
	}

	for i := 1; i <= count; i++ {
		id, ok := q.PopFront()
		if !ok {
			t.Fatalf("%d: failed to pop", i)
		}
		if roachpb.RangeID(i) != id {
			t.Fatalf("expected %d, but found %d", i, id)
		}
		if e := count - i; e != q.Len() {
			t.Fatalf("expected %d, but found %d", e, q.Len())
		}
	}
	if q.Len() != 0 {
		t.Fatalf("expected empty queue, but found %d", q.Len())
	}
	if _, ok := q.PopFront(); ok {
		t.Fatalf("successfully popped from empty queue")
	}
}
Exemplo n.º 9
0
// TestBookieReserveMaxBytes ensures that over-booking doesn't occur when trying
// to reserve more bytes than maxReservedBytes.
func TestBookieReserveMaxBytes(t *testing.T) {
	defer leaktest.AfterTest(t)()

	previousReservedBytes := 10

	stopper, _, b := createTestBookie(time.Hour, previousReservedBytes*2, int64(previousReservedBytes))
	defer stopper.Stop()

	// Load up reservations with a size of 1 each.
	for i := 1; i <= previousReservedBytes; i++ {
		req := ReservationRequest{
			StoreRequestHeader: StoreRequestHeader{
				StoreID: roachpb.StoreID(i),
				NodeID:  roachpb.NodeID(i),
			},
			RangeID:   roachpb.RangeID(i),
			RangeSize: 1,
		}
		if !b.Reserve(context.Background(), req, nil).Reserved {
			t.Errorf("%d: could not add reservation", i)
		}
		verifyBookie(t, b, i, i, int64(i))
	}

	overbookedReq := ReservationRequest{
		StoreRequestHeader: StoreRequestHeader{
			StoreID: roachpb.StoreID(previousReservedBytes + 1),
			NodeID:  roachpb.NodeID(previousReservedBytes + 1),
		},
		RangeID:   roachpb.RangeID(previousReservedBytes + 1),
		RangeSize: 1,
	}
	if b.Reserve(context.Background(), overbookedReq, nil).Reserved {
		t.Errorf("expected reservation to fail due to too many already existing reservations, but it succeeded")
	}
	// The same numbers from the last call to verifyBookie.
	verifyBookie(t, b, previousReservedBytes, previousReservedBytes, int64(previousReservedBytes))
}
Exemplo n.º 10
0
func TestEntryCacheClearTo(t *testing.T) {
	defer leaktest.AfterTest(t)()
	rangeID := roachpb.RangeID(1)
	rec := newRaftEntryCache(100)
	rec.addEntries(rangeID, []raftpb.Entry{newEntry(2, 1)})
	rec.addEntries(rangeID, []raftpb.Entry{newEntry(20, 1), newEntry(21, 1)})
	rec.clearTo(rangeID, 21)
	if ents, _, _ := rec.getEntries(rangeID, 2, 21, 0); len(ents) != 0 {
		t.Errorf("expected no entries after clearTo")
	}
	if ents, _, _ := rec.getEntries(rangeID, 21, 22, 0); len(ents) != 1 {
		t.Errorf("expected entry 22 to remain in the cache clearTo")
	}
}
Exemplo n.º 11
0
func TestLeaseHolderCache(t *testing.T) {
	defer leaktest.AfterTest(t)()
	ctx := context.TODO()
	lc := newLeaseHolderCache(3)
	if repDesc, ok := lc.Lookup(ctx, 12); ok {
		t.Errorf("lookup of missing key returned: %+v", repDesc)
	}
	rangeID := roachpb.RangeID(5)
	replica := roachpb.ReplicaDescriptor{StoreID: 1}
	lc.Update(ctx, rangeID, replica)
	if repDesc, ok := lc.Lookup(ctx, rangeID); !ok {
		t.Fatalf("expected %+v", replica)
	} else if repDesc != replica {
		t.Errorf("expected %+v, got %+v", replica, repDesc)
	}
	newReplica := roachpb.ReplicaDescriptor{StoreID: 7}
	lc.Update(ctx, rangeID, newReplica)
	if repDesc, ok := lc.Lookup(ctx, rangeID); !ok {
		t.Fatalf("expected %+v", replica)
	} else if repDesc != newReplica {
		t.Errorf("expected %+v, got %+v", newReplica, repDesc)
	}
	lc.Update(ctx, rangeID, roachpb.ReplicaDescriptor{})
	if repDesc, ok := lc.Lookup(ctx, rangeID); ok {
		t.Errorf("lookup of evicted key returned: %+v", repDesc)
	}

	for i := 10; i < 20; i++ {
		lc.Update(ctx, roachpb.RangeID(i), replica)
	}
	_, ok16 := lc.Lookup(ctx, 16)
	_, ok17 := lc.Lookup(ctx, 17)
	if ok16 || !ok17 {
		t.Fatalf("unexpected policy used in cache")
	}
}
Exemplo n.º 12
0
func TestEntryCacheEviction(t *testing.T) {
	defer leaktest.AfterTest(t)()
	rangeID := roachpb.RangeID(1)
	rec := newRaftEntryCache(100)
	rec.addEntries(rangeID, []raftpb.Entry{newEntry(1, 40), newEntry(2, 40)})
	ents, _, hi := rec.getEntries(rangeID, 1, 3, 0)
	if len(ents) != 2 || hi != 3 {
		t.Errorf("expected both entries; got %+v, %d", ents, hi)
	}
	// Add another entry to evict first.
	rec.addEntries(rangeID, []raftpb.Entry{newEntry(3, 40)})
	ents, _, hi = rec.getEntries(rangeID, 2, 4, 0)
	if len(ents) != 2 || hi != 4 {
		t.Errorf("expected only two entries; got %+v, %d", ents, hi)
	}
}
Exemplo n.º 13
0
func init() {
	lastKey := roachpb.RKey(keys.MinKey)
	for i, b := 0, byte('a'); b <= byte('z'); i, b = i+1, b+1 {
		key := roachpb.RKey([]byte{b})
		alphaRangeDescriptors = append(alphaRangeDescriptors, &roachpb.RangeDescriptor{
			RangeID:  roachpb.RangeID(i + 2),
			StartKey: lastKey,
			EndKey:   key,
			Replicas: []roachpb.ReplicaDescriptor{
				{
					NodeID:  1,
					StoreID: 1,
				},
			},
		})
		lastKey = key
	}
}
Exemplo n.º 14
0
// TestStorePoolDefaultState verifies that the default state of a
// store is neither alive nor dead. This is a regression test for a
// bug in which a call to deadReplicas involving an unknown store
// would have the side effect of marking that store as alive and
// eligible for return by getStoreList. It is therefore significant
// that the two methods are tested in the same test, and in this
// order.
func TestStorePoolDefaultState(t *testing.T) {
	defer leaktest.AfterTest(t)()
	stopper, _, _, sp := createTestStorePool(TestTimeUntilStoreDeadOff, false /* deterministic */)
	defer stopper.Stop()

	if dead := sp.deadReplicas(0, []roachpb.ReplicaDescriptor{{StoreID: 1}}); len(dead) > 0 {
		t.Errorf("expected 0 dead replicas; got %v", dead)
	}

	sl, alive, throttled := sp.getStoreList(roachpb.RangeID(0))
	if len(sl.stores) > 0 {
		t.Errorf("expected no live stores; got list of %v", sl)
	}
	if alive != 0 {
		t.Errorf("expected no live stores; got an alive count of %d", alive)
	}
	if throttled != 0 {
		t.Errorf("expected no live stores; got a throttled count of %d", throttled)
	}
}
Exemplo n.º 15
0
// DecodeRangeIDKey parses a local range ID key into range ID, infix,
// suffix, and detail.
func DecodeRangeIDKey(
	key roachpb.Key,
) (rangeID roachpb.RangeID, infix, suffix, detail roachpb.Key, err error) {
	if !bytes.HasPrefix(key, LocalRangeIDPrefix) {
		return 0, nil, nil, nil, errors.Errorf("key %s does not have %s prefix", key, LocalRangeIDPrefix)
	}
	// Cut the prefix, the Range ID, and the infix specifier.
	b := key[len(LocalRangeIDPrefix):]
	b, rangeInt, err := encoding.DecodeUvarintAscending(b)
	if err != nil {
		return 0, nil, nil, nil, err
	}
	if len(b) < localSuffixLength+1 {
		return 0, nil, nil, nil, errors.Errorf("malformed key does not contain range ID infix and suffix")
	}
	infix = b[:1]
	b = b[1:]
	suffix = b[:localSuffixLength]
	b = b[localSuffixLength:]

	return roachpb.RangeID(rangeInt), infix, suffix, b, nil
}
Exemplo n.º 16
0
// TestReplicaGCQueueDropReplicaOnScan verifies that the range GC queue
// removes a range from a store that no longer should have a replica.
func TestReplicaGCQueueDropReplicaGCOnScan(t *testing.T) {
	defer leaktest.AfterTest(t)()

	mtc := &multiTestContext{}
	defer mtc.Stop()
	mtc.Start(t, 3)
	// Disable the replica gc queue to prevent direct removal of replica.
	mtc.stores[1].SetReplicaGCQueueActive(false)

	rangeID := roachpb.RangeID(1)
	mtc.replicateRange(rangeID, 1, 2)
	mtc.unreplicateRange(rangeID, 1)

	// Wait long enough for the direct replica GC to have had a chance and been
	// discarded because the queue is disabled.
	time.Sleep(10 * time.Millisecond)
	if _, err := mtc.stores[1].GetReplica(rangeID); err != nil {
		t.Error("unexpected range removal")
	}

	// Enable the queue.
	mtc.stores[1].SetReplicaGCQueueActive(true)

	// Increment the clock's timestamp to make the replica GC queue process the range.
	mtc.expireLeases()
	mtc.manualClock.Increment(int64(storage.ReplicaGCQueueInactivityThreshold + 1))

	// Make sure the range is removed from the store.
	testutils.SucceedsSoon(t, func() error {
		store := mtc.stores[1]
		store.ForceReplicaGCScanAndProcess()
		if _, err := store.GetReplica(rangeID); !testutils.IsError(err, "range .* was not found") {
			return errors.Errorf("expected range removal: %v", err) // NB: errors.Wrapf(nil, ...) returns nil.
		}
		return nil
	})
}
Exemplo n.º 17
0
// executeScriptedActions performs all scripted actions to the cluster and
// return true if the exit action is encountered.
func (c *Cluster) executeScriptedActions() bool {
	var lastEpoch bool
	actions := c.script.getActions(c.epoch)
	for _, action := range actions {
		switch action.operation {
		case OpExit:
			{
				fmt.Fprintf(c.actionWriter, "%d:\tAction:Exit - this will be the last epoch.\n", c.epoch)
				lastEpoch = true
			}
		case OpSplitRange:
			{
				switch action.variant {
				case OpVarValue:
					fmt.Fprintf(c.actionWriter, "%d:\tAction:SplitRange - splitting the range %d.\n", c.epoch, action.value)
					c.splitRange(roachpb.RangeID(action.value))
				case OpVarRandom:
					fmt.Fprintf(c.actionWriter, "%d:\tAction:SplitRange - splitting a random range.\n", c.epoch)
					c.splitRangeRandom()
				case OpVarFirst:
					fmt.Fprintf(c.actionWriter, "%d:\tAction:SplitRange - splitting the first range.\n", c.epoch)
					c.splitRange(c.rangeIDs[0])
				case OpVarLast:
					fmt.Fprintf(c.actionWriter, "%d:\tAction:SplitRange - splitting the last range.\n", c.epoch)
					c.splitRange(c.rangeIDs[len(c.rangeIDs)-1])
				}
			}
		case OpAddNode:
			{
				fmt.Fprintf(c.actionWriter, "%d:\tAction:AddNode - Adding a new node with a new store.\n", c.epoch)
				c.addNewNodeWithStore()
			}
		}
	}
	return lastEpoch
}
Exemplo n.º 18
0
// TestStatusSummaries verifies that status summaries are written correctly for
// both the Node and stores within the node.
func TestStatusSummaries(t *testing.T) {
	defer leaktest.AfterTest(t)()

	// ========================================
	// Start test server and wait for full initialization.
	// ========================================
	srv, _, kvDB := serverutils.StartServer(t, base.TestServerArgs{
		DisableEventLog: true,
	})
	defer srv.Stopper().Stop()
	ts := srv.(*TestServer)
	ctx := context.TODO()

	// Retrieve the first store from the Node.
	s, err := ts.node.stores.GetStore(roachpb.StoreID(1))
	if err != nil {
		t.Fatal(err)
	}

	s.WaitForInit()

	content := "junk"
	leftKey := "a"

	// Scan over all keys to "wake up" all replicas (force a lease holder election).
	if _, err := kvDB.Scan(context.TODO(), keys.MetaMax, keys.MaxKey, 0); err != nil {
		t.Fatal(err)
	}

	// Wait for full replication of initial ranges.
	initialRanges := ExpectedInitialRangeCount()
	util.SucceedsSoon(t, func() error {
		for i := 1; i <= int(initialRanges); i++ {
			if s.RaftStatus(roachpb.RangeID(i)) == nil {
				return errors.Errorf("Store %d replica %d is not present in raft", s.StoreID(), i)
			}
		}
		return nil
	})

	// ========================================
	// Construct an initial expectation for NodeStatus to compare to the first
	// status produced by the server.
	// ========================================
	expectedNodeStatus := &status.NodeStatus{
		Desc:      ts.node.Descriptor,
		StartedAt: 0,
		UpdatedAt: 0,
		Metrics: map[string]float64{
			"exec.success": 0,
			"exec.error":   0,
		},
	}

	expectedStoreStatuses := make(map[roachpb.StoreID]status.StoreStatus)
	if err := ts.node.stores.VisitStores(func(s *storage.Store) error {
		desc, err := s.Descriptor()
		if err != nil {
			t.Fatal(err)
		}
		expectedReplicas := 0
		if s.StoreID() == roachpb.StoreID(1) {
			expectedReplicas = initialRanges
		}
		stat := status.StoreStatus{
			Desc: *desc,
			Metrics: map[string]float64{
				"replicas":              float64(expectedReplicas),
				"replicas.leaseholders": float64(expectedReplicas),
				"livebytes":             0,
				"keybytes":              0,
				"valbytes":              0,
				"livecount":             0,
				"keycount":              0,
				"valcount":              0,
			},
		}
		expectedNodeStatus.StoreStatuses = append(expectedNodeStatus.StoreStatuses, stat)
		expectedStoreStatuses[s.StoreID()] = stat
		return nil
	}); err != nil {
		t.Fatal(err)
	}

	// Function to force summaries to be written synchronously, including all
	// data currently in the event pipeline. Only one of the stores has
	// replicas, so there are no concerns related to quorum writes; if there
	// were multiple replicas, more care would need to be taken in the initial
	// syncFeed().
	forceWriteStatus := func() {
		if err := ts.node.computePeriodicMetrics(ctx, 0); err != nil {
			t.Fatalf("error publishing store statuses: %s", err)
		}

		if err := ts.WriteSummaries(); err != nil {
			t.Fatalf("error writing summaries: %s", err)
		}
	}

	// Verify initial status.
	forceWriteStatus()
	expectedNodeStatus = compareNodeStatus(t, ts, expectedNodeStatus, 1)
	for _, s := range expectedNodeStatus.StoreStatuses {
		expectedStoreStatuses[s.Desc.StoreID] = s
	}

	// ========================================
	// Put some data into the K/V store and confirm change to status.
	// ========================================

	splitKey := "b"
	rightKey := "c"

	// Write some values left and right of the proposed split key.
	if err := ts.db.Put(ctx, leftKey, content); err != nil {
		t.Fatal(err)
	}
	if err := ts.db.Put(ctx, rightKey, content); err != nil {
		t.Fatal(err)
	}

	// Increment metrics on the node
	expectedNodeStatus.Metrics["exec.success"] += 2

	// Increment metrics on the first store.
	store1 := expectedStoreStatuses[roachpb.StoreID(1)].Metrics
	store1["livecount"]++
	store1["keycount"]++
	store1["valcount"]++
	store1["livebytes"]++
	store1["keybytes"]++
	store1["valbytes"]++

	forceWriteStatus()
	expectedNodeStatus = compareNodeStatus(t, ts, expectedNodeStatus, 2)
	for _, s := range expectedNodeStatus.StoreStatuses {
		expectedStoreStatuses[s.Desc.StoreID] = s
	}

	// ========================================
	// Perform an admin split and verify that status is updated.
	// ========================================

	// Split the range.
	if err := ts.db.AdminSplit(context.TODO(), splitKey); err != nil {
		t.Fatal(err)
	}

	// Write on both sides of the split to ensure that the raft machinery
	// is running.
	if err := ts.db.Put(ctx, leftKey, content); err != nil {
		t.Fatal(err)
	}
	if err := ts.db.Put(ctx, rightKey, content); err != nil {
		t.Fatal(err)
	}

	// Increment metrics on the node
	expectedNodeStatus.Metrics["exec.success"] += 2

	// Increment metrics on the first store.
	store1 = expectedStoreStatuses[roachpb.StoreID(1)].Metrics
	store1["replicas"]++
	store1["replicas.leaders"]++
	store1["replicas.leaseholders"]++
	store1["ranges"]++

	forceWriteStatus()
	expectedNodeStatus = compareNodeStatus(t, ts, expectedNodeStatus, 3)
	for _, s := range expectedNodeStatus.StoreStatuses {
		expectedStoreStatuses[s.Desc.StoreID] = s
	}
}
Exemplo n.º 19
0
func (m *CollectChecksumRequest) Unmarshal(dAtA []byte) error {
	l := len(dAtA)
	iNdEx := 0
	for iNdEx < l {
		preIndex := iNdEx
		var wire uint64
		for shift := uint(0); ; shift += 7 {
			if shift >= 64 {
				return ErrIntOverflowApi
			}
			if iNdEx >= l {
				return io.ErrUnexpectedEOF
			}
			b := dAtA[iNdEx]
			iNdEx++
			wire |= (uint64(b) & 0x7F) << shift
			if b < 0x80 {
				break
			}
		}
		fieldNum := int32(wire >> 3)
		wireType := int(wire & 0x7)
		if wireType == 4 {
			return fmt.Errorf("proto: CollectChecksumRequest: wiretype end group for non-group")
		}
		if fieldNum <= 0 {
			return fmt.Errorf("proto: CollectChecksumRequest: illegal tag %d (wire type %d)", fieldNum, wire)
		}
		switch fieldNum {
		case 1:
			if wireType != 2 {
				return fmt.Errorf("proto: wrong wireType = %d for field StoreRequestHeader", wireType)
			}
			var msglen int
			for shift := uint(0); ; shift += 7 {
				if shift >= 64 {
					return ErrIntOverflowApi
				}
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := dAtA[iNdEx]
				iNdEx++
				msglen |= (int(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
			if msglen < 0 {
				return ErrInvalidLengthApi
			}
			postIndex := iNdEx + msglen
			if postIndex > l {
				return io.ErrUnexpectedEOF
			}
			if err := m.StoreRequestHeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
				return err
			}
			iNdEx = postIndex
		case 2:
			if wireType != 0 {
				return fmt.Errorf("proto: wrong wireType = %d for field RangeID", wireType)
			}
			m.RangeID = 0
			for shift := uint(0); ; shift += 7 {
				if shift >= 64 {
					return ErrIntOverflowApi
				}
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := dAtA[iNdEx]
				iNdEx++
				m.RangeID |= (github_com_cockroachdb_cockroach_pkg_roachpb.RangeID(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
		case 3:
			if wireType != 2 {
				return fmt.Errorf("proto: wrong wireType = %d for field ChecksumID", wireType)
			}
			var byteLen int
			for shift := uint(0); ; shift += 7 {
				if shift >= 64 {
					return ErrIntOverflowApi
				}
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := dAtA[iNdEx]
				iNdEx++
				byteLen |= (int(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
			if byteLen < 0 {
				return ErrInvalidLengthApi
			}
			postIndex := iNdEx + byteLen
			if postIndex > l {
				return io.ErrUnexpectedEOF
			}
			if err := m.ChecksumID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
				return err
			}
			iNdEx = postIndex
		case 4:
			if wireType != 2 {
				return fmt.Errorf("proto: wrong wireType = %d for field Checksum", wireType)
			}
			var byteLen int
			for shift := uint(0); ; shift += 7 {
				if shift >= 64 {
					return ErrIntOverflowApi
				}
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := dAtA[iNdEx]
				iNdEx++
				byteLen |= (int(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
			if byteLen < 0 {
				return ErrInvalidLengthApi
			}
			postIndex := iNdEx + byteLen
			if postIndex > l {
				return io.ErrUnexpectedEOF
			}
			m.Checksum = append(m.Checksum[:0], dAtA[iNdEx:postIndex]...)
			if m.Checksum == nil {
				m.Checksum = []byte{}
			}
			iNdEx = postIndex
		default:
			iNdEx = preIndex
			skippy, err := skipApi(dAtA[iNdEx:])
			if err != nil {
				return err
			}
			if skippy < 0 {
				return ErrInvalidLengthApi
			}
			if (iNdEx + skippy) > l {
				return io.ErrUnexpectedEOF
			}
			iNdEx += skippy
		}
	}

	if iNdEx > l {
		return io.ErrUnexpectedEOF
	}
	return nil
}
Exemplo n.º 20
0
func (m *ReplicatedProposalData) Unmarshal(data []byte) error {
	l := len(data)
	iNdEx := 0
	for iNdEx < l {
		preIndex := iNdEx
		var wire uint64
		for shift := uint(0); ; shift += 7 {
			if shift >= 64 {
				return ErrIntOverflowProposerKv
			}
			if iNdEx >= l {
				return io.ErrUnexpectedEOF
			}
			b := data[iNdEx]
			iNdEx++
			wire |= (uint64(b) & 0x7F) << shift
			if b < 0x80 {
				break
			}
		}
		fieldNum := int32(wire >> 3)
		wireType := int(wire & 0x7)
		if wireType == 4 {
			return fmt.Errorf("proto: ReplicatedProposalData: wiretype end group for non-group")
		}
		if fieldNum <= 0 {
			return fmt.Errorf("proto: ReplicatedProposalData: illegal tag %d (wire type %d)", fieldNum, wire)
		}
		switch fieldNum {
		case 1:
			if wireType != 0 {
				return fmt.Errorf("proto: wrong wireType = %d for field RangeID", wireType)
			}
			m.RangeID = 0
			for shift := uint(0); ; shift += 7 {
				if shift >= 64 {
					return ErrIntOverflowProposerKv
				}
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				m.RangeID |= (github_com_cockroachdb_cockroach_pkg_roachpb.RangeID(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
		case 2:
			if wireType != 2 {
				return fmt.Errorf("proto: wrong wireType = %d for field OriginReplica", wireType)
			}
			var msglen int
			for shift := uint(0); ; shift += 7 {
				if shift >= 64 {
					return ErrIntOverflowProposerKv
				}
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				msglen |= (int(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
			if msglen < 0 {
				return ErrInvalidLengthProposerKv
			}
			postIndex := iNdEx + msglen
			if postIndex > l {
				return io.ErrUnexpectedEOF
			}
			if err := m.OriginReplica.Unmarshal(data[iNdEx:postIndex]); err != nil {
				return err
			}
			iNdEx = postIndex
		case 3:
			if wireType != 2 {
				return fmt.Errorf("proto: wrong wireType = %d for field Cmd", wireType)
			}
			var msglen int
			for shift := uint(0); ; shift += 7 {
				if shift >= 64 {
					return ErrIntOverflowProposerKv
				}
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				msglen |= (int(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
			if msglen < 0 {
				return ErrInvalidLengthProposerKv
			}
			postIndex := iNdEx + msglen
			if postIndex > l {
				return io.ErrUnexpectedEOF
			}
			if m.Cmd == nil {
				m.Cmd = &cockroach_roachpb3.BatchRequest{}
			}
			if err := m.Cmd.Unmarshal(data[iNdEx:postIndex]); err != nil {
				return err
			}
			iNdEx = postIndex
		case 4:
			if wireType != 0 {
				return fmt.Errorf("proto: wrong wireType = %d for field MaxLeaseIndex", wireType)
			}
			m.MaxLeaseIndex = 0
			for shift := uint(0); ; shift += 7 {
				if shift >= 64 {
					return ErrIntOverflowProposerKv
				}
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				m.MaxLeaseIndex |= (uint64(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
		case 10001:
			if wireType != 0 {
				return fmt.Errorf("proto: wrong wireType = %d for field BlockReads", wireType)
			}
			var v int
			for shift := uint(0); ; shift += 7 {
				if shift >= 64 {
					return ErrIntOverflowProposerKv
				}
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				v |= (int(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
			m.BlockReads = bool(v != 0)
		case 10002:
			if wireType != 2 {
				return fmt.Errorf("proto: wrong wireType = %d for field State", wireType)
			}
			var msglen int
			for shift := uint(0); ; shift += 7 {
				if shift >= 64 {
					return ErrIntOverflowProposerKv
				}
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				msglen |= (int(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
			if msglen < 0 {
				return ErrInvalidLengthProposerKv
			}
			postIndex := iNdEx + msglen
			if postIndex > l {
				return io.ErrUnexpectedEOF
			}
			if err := m.State.Unmarshal(data[iNdEx:postIndex]); err != nil {
				return err
			}
			iNdEx = postIndex
		case 10003:
			if wireType != 2 {
				return fmt.Errorf("proto: wrong wireType = %d for field Split", wireType)
			}
			var msglen int
			for shift := uint(0); ; shift += 7 {
				if shift >= 64 {
					return ErrIntOverflowProposerKv
				}
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				msglen |= (int(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
			if msglen < 0 {
				return ErrInvalidLengthProposerKv
			}
			postIndex := iNdEx + msglen
			if postIndex > l {
				return io.ErrUnexpectedEOF
			}
			if m.Split == nil {
				m.Split = &Split{}
			}
			if err := m.Split.Unmarshal(data[iNdEx:postIndex]); err != nil {
				return err
			}
			iNdEx = postIndex
		case 10004:
			if wireType != 2 {
				return fmt.Errorf("proto: wrong wireType = %d for field Merge", wireType)
			}
			var msglen int
			for shift := uint(0); ; shift += 7 {
				if shift >= 64 {
					return ErrIntOverflowProposerKv
				}
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				msglen |= (int(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
			if msglen < 0 {
				return ErrInvalidLengthProposerKv
			}
			postIndex := iNdEx + msglen
			if postIndex > l {
				return io.ErrUnexpectedEOF
			}
			if m.Merge == nil {
				m.Merge = &Merge{}
			}
			if err := m.Merge.Unmarshal(data[iNdEx:postIndex]); err != nil {
				return err
			}
			iNdEx = postIndex
		case 10005:
			if wireType != 2 {
				return fmt.Errorf("proto: wrong wireType = %d for field ComputeChecksum", wireType)
			}
			var msglen int
			for shift := uint(0); ; shift += 7 {
				if shift >= 64 {
					return ErrIntOverflowProposerKv
				}
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				msglen |= (int(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
			if msglen < 0 {
				return ErrInvalidLengthProposerKv
			}
			postIndex := iNdEx + msglen
			if postIndex > l {
				return io.ErrUnexpectedEOF
			}
			if m.ComputeChecksum == nil {
				m.ComputeChecksum = &cockroach_roachpb3.ComputeChecksumRequest{}
			}
			if err := m.ComputeChecksum.Unmarshal(data[iNdEx:postIndex]); err != nil {
				return err
			}
			iNdEx = postIndex
		default:
			iNdEx = preIndex
			skippy, err := skipProposerKv(data[iNdEx:])
			if err != nil {
				return err
			}
			if skippy < 0 {
				return ErrInvalidLengthProposerKv
			}
			if (iNdEx + skippy) > l {
				return io.ErrUnexpectedEOF
			}
			iNdEx += skippy
		}
	}

	if iNdEx > l {
		return io.ErrUnexpectedEOF
	}
	return nil
}
Exemplo n.º 21
0
// TestStorePoolGetStoreList ensures that the store list returns only stores
// that are live and match the attribute criteria.
func TestStorePoolGetStoreList(t *testing.T) {
	defer leaktest.AfterTest(t)()
	// We're going to manually mark stores dead in this test.
	stopper, g, _, sp, mnl := createTestStorePool(
		TestTimeUntilStoreDead, false /* deterministic */, false /* defaultNodeLiveness */)
	defer stopper.Stop()
	sg := gossiputil.NewStoreGossiper(g)
	constraints := config.Constraints{Constraints: []config.Constraint{{Value: "ssd"}, {Value: "dc"}}}
	required := []string{"ssd", "dc"}
	// Nothing yet.
	sl, _, _ := sp.getStoreList(roachpb.RangeID(0))
	sl = sl.filter(constraints)
	if len(sl.stores) != 0 {
		t.Errorf("expected no stores, instead %+v", sl.stores)
	}

	matchingStore := roachpb.StoreDescriptor{
		StoreID: 1,
		Node:    roachpb.NodeDescriptor{NodeID: 1},
		Attrs:   roachpb.Attributes{Attrs: required},
	}
	supersetStore := roachpb.StoreDescriptor{
		StoreID: 2,
		Node:    roachpb.NodeDescriptor{NodeID: 2},
		Attrs:   roachpb.Attributes{Attrs: append(required, "db")},
	}
	unmatchingStore := roachpb.StoreDescriptor{
		StoreID: 3,
		Node:    roachpb.NodeDescriptor{NodeID: 3},
		Attrs:   roachpb.Attributes{Attrs: []string{"ssd", "otherdc"}},
	}
	emptyStore := roachpb.StoreDescriptor{
		StoreID: 4,
		Node:    roachpb.NodeDescriptor{NodeID: 4},
		Attrs:   roachpb.Attributes{},
	}
	deadStore := roachpb.StoreDescriptor{
		StoreID: 5,
		Node:    roachpb.NodeDescriptor{NodeID: 5},
		Attrs:   roachpb.Attributes{Attrs: required},
	}
	declinedStore := roachpb.StoreDescriptor{
		StoreID: 6,
		Node:    roachpb.NodeDescriptor{NodeID: 6},
		Attrs:   roachpb.Attributes{Attrs: required},
	}
	corruptReplicaStore := roachpb.StoreDescriptor{
		StoreID: 7,
		Node:    roachpb.NodeDescriptor{NodeID: 7},
		Attrs:   roachpb.Attributes{Attrs: required},
	}

	corruptedRangeID := roachpb.RangeID(1)

	// Gossip and mark all alive initially.
	sg.GossipStores([]*roachpb.StoreDescriptor{
		&matchingStore,
		&supersetStore,
		&unmatchingStore,
		&emptyStore,
		&deadStore,
		&declinedStore,
		&corruptReplicaStore,
	}, t)
	for i := 1; i <= 7; i++ {
		mnl.setLive(roachpb.NodeID(i), true)
	}

	// Add some corrupt replicas that should not affect getStoreList().
	sp.mu.Lock()
	sp.mu.storeDetails[matchingStore.StoreID].deadReplicas[roachpb.RangeID(10)] =
		[]roachpb.ReplicaDescriptor{{
			StoreID: matchingStore.StoreID,
			NodeID:  matchingStore.Node.NodeID,
		}}
	sp.mu.storeDetails[matchingStore.StoreID].deadReplicas[roachpb.RangeID(11)] =
		[]roachpb.ReplicaDescriptor{{
			StoreID: matchingStore.StoreID,
			NodeID:  matchingStore.Node.NodeID,
		}}
	sp.mu.storeDetails[corruptReplicaStore.StoreID].deadReplicas[roachpb.RangeID(10)] =
		[]roachpb.ReplicaDescriptor{{
			StoreID: corruptReplicaStore.StoreID,
			NodeID:  corruptReplicaStore.Node.NodeID,
		}}
	sp.mu.Unlock()

	if err := verifyStoreList(
		sp,
		constraints,
		corruptedRangeID,
		[]int{
			int(matchingStore.StoreID),
			int(supersetStore.StoreID),
			int(deadStore.StoreID),
			int(declinedStore.StoreID),
			int(corruptReplicaStore.StoreID),
		},
		/* expectedAliveStoreCount */ 7,
		/* expectedThrottledStoreCount */ 0,
	); err != nil {
		t.Error(err)
	}

	// Set deadStore as dead.
	mnl.setLive(deadStore.Node.NodeID, false)
	sp.mu.Lock()
	// Set declinedStore as throttled.
	sp.mu.storeDetails[declinedStore.StoreID].throttledUntil = sp.clock.Now().GoTime().Add(time.Hour)
	// Add a corrupt replica to corruptReplicaStore.
	sp.mu.storeDetails[corruptReplicaStore.StoreID].deadReplicas[roachpb.RangeID(1)] =
		[]roachpb.ReplicaDescriptor{{
			StoreID: corruptReplicaStore.StoreID,
			NodeID:  corruptReplicaStore.Node.NodeID,
		}}
	sp.mu.Unlock()

	if err := verifyStoreList(
		sp,
		constraints,
		corruptedRangeID,
		[]int{
			int(matchingStore.StoreID),
			int(supersetStore.StoreID),
		},
		/* expectedAliveStoreCount */ 6,
		/* expectedThrottledStoreCount */ 1,
	); err != nil {
		t.Error(err)
	}
}
Exemplo n.º 22
0
// TestRequestToUninitializedRange tests the behavior when a request
// is sent to a node which should be a replica of the correct range
// but has not yet received its initial snapshot. This would
// previously panic due to a malformed error response from the server,
// as seen in https://github.com/cockroachdb/cockroach/issues/6027.
//
// Prior to the other changes in the commit that introduced it, this
// test would reliable trigger the panic from #6027. However, it
// relies on some hacky tricks to both trigger the panic and shut down
// cleanly. If this test needs a lot of maintenance in the future we
// should be willing to get rid of it.
func TestRequestToUninitializedRange(t *testing.T) {
	defer leaktest.AfterTest(t)()
	srv, _, _ := serverutils.StartServer(t, base.TestServerArgs{
		StoreSpecs: []base.StoreSpec{
			base.DefaultTestStoreSpec,
			base.DefaultTestStoreSpec,
		},
	})
	defer srv.Stopper().Stop()
	s := srv.(*server.TestServer)

	// Choose a range ID that is much larger than any that would be
	// created by initial splits.
	const rangeID = roachpb.RangeID(1000)

	// Set up a range with replicas on two stores of the same node. This
	// ensures that the DistSender will consider both replicas healthy
	// and will try to talk to both (so we can get a non-retryable error
	// from the second store).
	replica1 := roachpb.ReplicaDescriptor{
		NodeID:    1,
		StoreID:   1,
		ReplicaID: 1,
	}
	replica2 := roachpb.ReplicaDescriptor{
		NodeID:    1,
		StoreID:   2,
		ReplicaID: 2,
	}

	// HACK: remove the second store from the node to generate a
	// non-retryable error when we try to talk to it.
	store2, err := s.Stores().GetStore(2)
	if err != nil {
		t.Fatal(err)
	}
	s.Stores().RemoveStore(store2)

	// Create the uninitialized range by sending an isolated raft
	// message to the first store.
	conn, err := s.RPCContext().GRPCDial(s.ServingAddr())
	if err != nil {
		t.Fatal(err)
	}
	raftClient := storage.NewMultiRaftClient(conn)
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()
	stream, err := raftClient.RaftMessageBatch(ctx)
	if err != nil {
		t.Fatal(err)
	}
	msg := storage.RaftMessageRequestBatch{
		Requests: []storage.RaftMessageRequest{
			{
				RangeID:     rangeID,
				ToReplica:   replica1,
				FromReplica: replica2,
				Message: raftpb.Message{
					Type: raftpb.MsgApp,
					To:   1,
				},
			},
		},
	}
	if err := stream.Send(&msg); err != nil {
		t.Fatal(err)
	}

	// Make sure the replica was created.
	store1, err := s.Stores().GetStore(1)
	if err != nil {
		t.Fatal(err)
	}
	util.SucceedsSoon(t, func() error {
		if replica, err := store1.GetReplica(rangeID); err != nil {
			return errors.Errorf("failed to look up replica: %s", err)
		} else if replica.IsInitialized() {
			return errors.Errorf("expected replica to be uninitialized")
		}
		return nil
	})

	// Create our own DistSender so we can force some requests to the
	// bogus range. The DistSender needs to be in scope for its own
	// MockRangeDescriptorDB closure.
	var sender *kv.DistSender
	sender = kv.NewDistSender(kv.DistSenderConfig{
		Clock:      s.Clock(),
		RPCContext: s.RPCContext(),
		RangeDescriptorDB: kv.MockRangeDescriptorDB(
			func(key roachpb.RKey, useReverseScan bool,
			) ([]roachpb.RangeDescriptor, []roachpb.RangeDescriptor, *roachpb.Error) {
				if key.Equal(roachpb.RKeyMin) {
					// Pass through requests for the first range to the real sender.
					desc, err := sender.FirstRange()
					if err != nil {
						return nil, nil, roachpb.NewError(err)
					}
					return []roachpb.RangeDescriptor{*desc}, nil, nil
				}
				return []roachpb.RangeDescriptor{{
					RangeID:  rangeID,
					StartKey: roachpb.RKey(keys.Meta2Prefix),
					EndKey:   roachpb.RKeyMax,
					Replicas: []roachpb.ReplicaDescriptor{replica1, replica2},
				}}, nil, nil
			}),
	}, s.Gossip())
	// Only inconsistent reads triggered the panic in #6027.
	hdr := roachpb.Header{
		ReadConsistency: roachpb.INCONSISTENT,
	}
	req := roachpb.NewGet(roachpb.Key("asdf"))
	// Repeat the test a few times: due to the randomization between the
	// two replicas, each attempt only had a 50% chance of triggering
	// the panic.
	for i := 0; i < 5; i++ {
		_, pErr := client.SendWrappedWith(context.Background(), sender, hdr, req)
		// Each attempt fails with "store 2 not found" because that is the
		// non-retryable error.
		if !testutils.IsPError(pErr, "store 2 not found") {
			t.Fatal(pErr)
		}
	}
}
Exemplo n.º 23
0
func TestPrettyPrint(t *testing.T) {

	tm, _ := time.Parse(time.RFC3339Nano, "2016-03-30T13:40:35.053725008Z")
	duration := duration.Duration{Months: 1, Days: 1, Nanos: 1 * time.Second.Nanoseconds()}
	durationAsc, _ := encoding.EncodeDurationAscending(nil, duration)
	durationDesc, _ := encoding.EncodeDurationDescending(nil, duration)
	txnID := uuid.MakeV4()

	// The following test cases encode keys with a mixture of ascending and descending direction,
	// but always decode keys in the ascending direction. This is why some of the decoded values
	// seem bizarre.
	testCases := []struct {
		key roachpb.Key
		exp string
	}{
		// local
		{StoreIdentKey(), "/Local/Store/storeIdent"},
		{StoreGossipKey(), "/Local/Store/gossipBootstrap"},

		{AbortCacheKey(roachpb.RangeID(1000001), txnID), fmt.Sprintf(`/Local/RangeID/1000001/r/AbortCache/%q`, txnID)},
		{RaftTombstoneKey(roachpb.RangeID(1000001)), "/Local/RangeID/1000001/r/RaftTombstone"},
		{RaftAppliedIndexKey(roachpb.RangeID(1000001)), "/Local/RangeID/1000001/r/RaftAppliedIndex"},
		{LeaseAppliedIndexKey(roachpb.RangeID(1000001)), "/Local/RangeID/1000001/r/LeaseAppliedIndex"},
		{RaftTruncatedStateKey(roachpb.RangeID(1000001)), "/Local/RangeID/1000001/r/RaftTruncatedState"},
		{RangeLeaseKey(roachpb.RangeID(1000001)), "/Local/RangeID/1000001/r/RangeLease"},
		{RangeStatsKey(roachpb.RangeID(1000001)), "/Local/RangeID/1000001/r/RangeStats"},
		{RangeTxnSpanGCThresholdKey(roachpb.RangeID(1000001)), `/Local/RangeID/1000001/r/RangeTxnSpanGCThreshold`},
		{RangeFrozenStatusKey(roachpb.RangeID(1000001)), "/Local/RangeID/1000001/r/RangeFrozenStatus"},
		{RangeLastGCKey(roachpb.RangeID(1000001)), "/Local/RangeID/1000001/r/RangeLastGC"},

		{RaftHardStateKey(roachpb.RangeID(1000001)), "/Local/RangeID/1000001/u/RaftHardState"},
		{RaftLastIndexKey(roachpb.RangeID(1000001)), "/Local/RangeID/1000001/u/RaftLastIndex"},
		{RaftLogKey(roachpb.RangeID(1000001), uint64(200001)), "/Local/RangeID/1000001/u/RaftLog/logIndex:200001"},
		{RangeLastReplicaGCTimestampKey(roachpb.RangeID(1000001)), "/Local/RangeID/1000001/u/RangeLastReplicaGCTimestamp"},
		{RangeLastVerificationTimestampKeyDeprecated(roachpb.RangeID(1000001)), "/Local/RangeID/1000001/u/RangeLastVerificationTimestamp"},

		{MakeRangeKeyPrefix(roachpb.RKey("ok")), `/Local/Range/"ok"`},
		{RangeDescriptorKey(roachpb.RKey("111")), `/Local/Range/"111"/RangeDescriptor`},
		{TransactionKey(roachpb.Key("111"), txnID), fmt.Sprintf(`/Local/Range/"111"/Transaction/addrKey:/id:%q`, txnID)},

		{LocalMax, `/Meta1/""`}, // LocalMax == Meta1Prefix

		// system
		{makeKey(Meta2Prefix, roachpb.Key("foo")), `/Meta2/"foo"`},
		{makeKey(Meta1Prefix, roachpb.Key("foo")), `/Meta1/"foo"`},
		{RangeMetaKey(roachpb.RKey("f")), `/Meta2/"f"`},

		{NodeLivenessKey(10033), "/System/NodeLiveness/10033"},
		{NodeStatusKey(1111), "/System/StatusNode/1111"},

		{SystemMax, "/System/Max"},

		// key of key
		{RangeMetaKey(roachpb.RKey(MakeRangeKeyPrefix(roachpb.RKey("ok")))), `/Meta2/Local/Range/"ok"`},
		{RangeMetaKey(roachpb.RKey(makeKey(MakeTablePrefix(42), roachpb.RKey("foo")))), `/Meta2/Table/42/"foo"`},
		{RangeMetaKey(roachpb.RKey(makeKey(Meta2Prefix, roachpb.Key("foo")))), `/Meta1/"foo"`},

		// table
		{UserTableDataMin, "/Table/50"},
		{MakeTablePrefix(111), "/Table/111"},
		{makeKey(MakeTablePrefix(42), roachpb.RKey("foo")), `/Table/42/"foo"`},
		{makeKey(MakeTablePrefix(42),
			roachpb.RKey(encoding.EncodeFloatAscending(nil, float64(233.221112)))),
			"/Table/42/233.221112"},
		{makeKey(MakeTablePrefix(42),
			roachpb.RKey(encoding.EncodeFloatDescending(nil, float64(-233.221112)))),
			"/Table/42/233.221112"},
		{makeKey(MakeTablePrefix(42),
			roachpb.RKey(encoding.EncodeFloatAscending(nil, math.Inf(1)))),
			"/Table/42/+Inf"},
		{makeKey(MakeTablePrefix(42),
			roachpb.RKey(encoding.EncodeFloatAscending(nil, math.NaN()))),
			"/Table/42/NaN"},
		{makeKey(MakeTablePrefix(42),
			roachpb.RKey(encoding.EncodeVarintAscending(nil, 1222)),
			roachpb.RKey(encoding.EncodeStringAscending(nil, "handsome man"))),
			`/Table/42/1222/"handsome man"`},
		{makeKey(MakeTablePrefix(42),
			roachpb.RKey(encoding.EncodeVarintAscending(nil, 1222))),
			`/Table/42/1222`},
		{makeKey(MakeTablePrefix(42),
			roachpb.RKey(encoding.EncodeVarintDescending(nil, 1222))),
			`/Table/42/-1223`},
		{makeKey(MakeTablePrefix(42),
			roachpb.RKey(encoding.EncodeBytesAscending(nil, []byte{1, 2, 8, 255}))),
			`/Table/42/"\x01\x02\b\xff"`},
		{makeKey(MakeTablePrefix(42),
			roachpb.RKey(encoding.EncodeBytesAscending(nil, []byte{1, 2, 8, 255})),
			roachpb.RKey("bar")), `/Table/42/"\x01\x02\b\xff"/"bar"`},
		{makeKey(MakeTablePrefix(42),
			roachpb.RKey(encoding.EncodeBytesDescending(nil, []byte{1, 2, 8, 255})),
			roachpb.RKey("bar")), `/Table/42/"\x01\x02\b\xff"/"bar"`},
		{makeKey(MakeTablePrefix(42),
			roachpb.RKey(encoding.EncodeNullAscending(nil))), "/Table/42/NULL"},
		{makeKey(MakeTablePrefix(42),
			roachpb.RKey(encoding.EncodeNotNullAscending(nil))), "/Table/42/#"},
		{makeKey(MakeTablePrefix(42),
			roachpb.RKey(encoding.EncodeTimeAscending(nil, tm))),
			"/Table/42/2016-03-30T13:40:35.053725008Z"},
		{makeKey(MakeTablePrefix(42),
			roachpb.RKey(encoding.EncodeTimeDescending(nil, tm))),
			"/Table/42/1923-10-04T10:19:23.946274991Z"},
		{makeKey(MakeTablePrefix(42),
			roachpb.RKey(encoding.EncodeDecimalAscending(nil, inf.NewDec(1234, 2)))),
			"/Table/42/12.34"},
		{makeKey(MakeTablePrefix(42),
			roachpb.RKey(encoding.EncodeDecimalDescending(nil, inf.NewDec(1234, 2)))),
			"/Table/42/-12.34"},
		{makeKey(MakeTablePrefix(42),
			roachpb.RKey(durationAsc)),
			"/Table/42/1m1d1s"},
		{makeKey(MakeTablePrefix(42),
			roachpb.RKey(durationDesc)),
			"/Table/42/-2m-2d743h59m58.999999999s"},

		// others
		{makeKey([]byte("")), "/Min"},
		{Meta1KeyMax, "/Meta1/Max"},
		{Meta2KeyMax, "/Meta2/Max"},
		{makeKey(MakeTablePrefix(42), roachpb.RKey([]byte{0x12, 'a', 0x00, 0x02})), "/Table/42/<unknown escape sequence: 0x0 0x2>"},
	}
	for i, test := range testCases {
		keyInfo := MassagePrettyPrintedSpanForTest(PrettyPrint(test.key), nil)
		exp := MassagePrettyPrintedSpanForTest(test.exp, nil)
		if exp != keyInfo {
			t.Errorf("%d: expected %s, got %s", i, exp, keyInfo)
		}

		if exp != MassagePrettyPrintedSpanForTest(test.key.String(), nil) {
			t.Errorf("%d: expected %s, got %s", i, exp, test.key.String())
		}

		parsed, err := UglyPrint(keyInfo)
		if err != nil {
			if _, ok := err.(*errUglifyUnsupported); !ok {
				t.Errorf("%d: %s: %s", i, keyInfo, err)
			} else {
				t.Logf("%d: skipping parsing of %s; key is unsupported: %v", i, keyInfo, err)
			}
		} else if exp, act := test.key, parsed; !bytes.Equal(exp, act) {
			t.Errorf("%d: expected %q, got %q", i, exp, act)
		}
		if t.Failed() {
			return
		}
	}
}
Exemplo n.º 24
0
// TestUpdateRangeAddressing verifies range addressing records are
// correctly updated on creation of new range descriptors.
func TestUpdateRangeAddressing(t *testing.T) {
	defer leaktest.AfterTest(t)()
	store, _, stopper := createTestStore(t)
	defer stopper.Stop()

	// When split is false, merging treats the right range as the merged
	// range. With merging, expNewLeft indicates the addressing keys we
	// expect to be removed.
	testCases := []struct {
		split                   bool
		leftStart, leftEnd      roachpb.RKey
		rightStart, rightEnd    roachpb.RKey
		leftExpNew, rightExpNew [][]byte
	}{
		// Start out with whole range.
		{false, roachpb.RKeyMin, roachpb.RKeyMax, roachpb.RKeyMin, roachpb.RKeyMax,
			[][]byte{}, [][]byte{meta1Key(roachpb.RKeyMax), meta2Key(roachpb.RKeyMax)}},
		// Split KeyMin-KeyMax at key "a".
		{true, roachpb.RKeyMin, roachpb.RKey("a"), roachpb.RKey("a"), roachpb.RKeyMax,
			[][]byte{meta1Key(roachpb.RKeyMax), meta2Key(roachpb.RKey("a"))}, [][]byte{meta2Key(roachpb.RKeyMax)}},
		// Split "a"-KeyMax at key "z".
		{true, roachpb.RKey("a"), roachpb.RKey("z"), roachpb.RKey("z"), roachpb.RKeyMax,
			[][]byte{meta2Key(roachpb.RKey("z"))}, [][]byte{meta2Key(roachpb.RKeyMax)}},
		// Split "a"-"z" at key "m".
		{true, roachpb.RKey("a"), roachpb.RKey("m"), roachpb.RKey("m"), roachpb.RKey("z"),
			[][]byte{meta2Key(roachpb.RKey("m"))}, [][]byte{meta2Key(roachpb.RKey("z"))}},
		// Split KeyMin-"a" at meta2(m).
		{true, roachpb.RKeyMin, metaKey(roachpb.RKey("m")), metaKey(roachpb.RKey("m")), roachpb.RKey("a"),
			[][]byte{meta1Key(roachpb.RKey("m"))}, [][]byte{meta1Key(roachpb.RKeyMax), meta2Key(roachpb.RKey("a"))}},
		// Split meta2(m)-"a" at meta2(z).
		{true, metaKey(roachpb.RKey("m")), metaKey(roachpb.RKey("z")), metaKey(roachpb.RKey("z")), roachpb.RKey("a"),
			[][]byte{meta1Key(roachpb.RKey("z"))}, [][]byte{meta1Key(roachpb.RKeyMax), meta2Key(roachpb.RKey("a"))}},
		// Split meta2(m)-meta2(z) at meta2(r).
		{true, metaKey(roachpb.RKey("m")), metaKey(roachpb.RKey("r")), metaKey(roachpb.RKey("r")), metaKey(roachpb.RKey("z")),
			[][]byte{meta1Key(roachpb.RKey("r"))}, [][]byte{meta1Key(roachpb.RKey("z"))}},

		// Now, merge all of our splits backwards...

		// Merge meta2(m)-meta2(z).
		{false, metaKey(roachpb.RKey("m")), metaKey(roachpb.RKey("r")), metaKey(roachpb.RKey("m")), metaKey(roachpb.RKey("z")),
			[][]byte{meta1Key(roachpb.RKey("r"))}, [][]byte{meta1Key(roachpb.RKey("z"))}},
		// Merge meta2(m)-"a".
		{false, metaKey(roachpb.RKey("m")), metaKey(roachpb.RKey("z")), metaKey(roachpb.RKey("m")), roachpb.RKey("a"),
			[][]byte{meta1Key(roachpb.RKey("z"))}, [][]byte{meta1Key(roachpb.RKeyMax), meta2Key(roachpb.RKey("a"))}},
		// Merge KeyMin-"a".
		{false, roachpb.RKeyMin, metaKey(roachpb.RKey("m")), roachpb.RKeyMin, roachpb.RKey("a"),
			[][]byte{meta1Key(roachpb.RKey("m"))}, [][]byte{meta1Key(roachpb.RKeyMax), meta2Key(roachpb.RKey("a"))}},
		// Merge "a"-"z".
		{false, roachpb.RKey("a"), roachpb.RKey("m"), roachpb.RKey("a"), roachpb.RKey("z"),
			[][]byte{meta2Key(roachpb.RKey("m"))}, [][]byte{meta2Key(roachpb.RKey("z"))}},
		// Merge "a"-KeyMax.
		{false, roachpb.RKey("a"), roachpb.RKey("z"), roachpb.RKey("a"), roachpb.RKeyMax,
			[][]byte{meta2Key(roachpb.RKey("z"))}, [][]byte{meta2Key(roachpb.RKeyMax)}},
		// Merge KeyMin-KeyMax.
		{false, roachpb.RKeyMin, roachpb.RKey("a"), roachpb.RKeyMin, roachpb.RKeyMax,
			[][]byte{meta2Key(roachpb.RKey("a"))}, [][]byte{meta1Key(roachpb.RKeyMax), meta2Key(roachpb.RKeyMax)}},
	}
	expMetas := metaSlice{}

	for i, test := range testCases {
		left := &roachpb.RangeDescriptor{RangeID: roachpb.RangeID(i * 2), StartKey: test.leftStart, EndKey: test.leftEnd}
		right := &roachpb.RangeDescriptor{RangeID: roachpb.RangeID(i*2 + 1), StartKey: test.rightStart, EndKey: test.rightEnd}
		b := &client.Batch{}
		if test.split {
			if err := splitRangeAddressing(b, left, right); err != nil {
				t.Fatal(err)
			}
		} else {
			if err := mergeRangeAddressing(b, left, right); err != nil {
				t.Fatal(err)
			}
		}
		if err := store.DB().Run(context.TODO(), b); err != nil {
			t.Fatal(err)
		}
		// Scan meta keys directly from engine.
		kvs, _, _, err := engine.MVCCScan(context.Background(), store.Engine(), keys.MetaMin, keys.MetaMax, math.MaxInt64, hlc.MaxTimestamp, true, nil)
		if err != nil {
			t.Fatal(err)
		}
		metas := metaSlice{}
		for _, kv := range kvs {
			scannedDesc := &roachpb.RangeDescriptor{}
			if err := kv.Value.GetProto(scannedDesc); err != nil {
				t.Fatal(err)
			}
			metas = append(metas, metaRecord{key: kv.Key, desc: scannedDesc})
		}

		// Continue to build up the expected metas slice, replacing any earlier
		// version of same key.
		addOrRemoveNew := func(keys [][]byte, desc *roachpb.RangeDescriptor, add bool) {
			for _, n := range keys {
				found := -1
				for i := range expMetas {
					if expMetas[i].key.Equal(roachpb.Key(n)) {
						found = i
						expMetas[i].desc = desc
						break
					}
				}
				if found == -1 && add {
					expMetas = append(expMetas, metaRecord{key: n, desc: desc})
				} else if found != -1 && !add {
					expMetas = append(expMetas[:found], expMetas[found+1:]...)
				}
			}
		}
		addOrRemoveNew(test.leftExpNew, left, test.split /* on split, add; on merge, remove */)
		addOrRemoveNew(test.rightExpNew, right, true)
		sort.Sort(expMetas)

		if test.split {
			if log.V(1) {
				log.Infof(context.Background(), "test case %d: split %q-%q at %q", i, left.StartKey, right.EndKey, left.EndKey)
			}
		} else {
			if log.V(1) {
				log.Infof(context.Background(), "test case %d: merge %q-%q + %q-%q", i, left.StartKey, left.EndKey, left.EndKey, right.EndKey)
			}
		}
		for _, meta := range metas {
			if log.V(1) {
				log.Infof(context.Background(), "%q", meta.key)
			}
		}

		if !reflect.DeepEqual(expMetas, metas) {
			t.Errorf("expected metas don't match")
			if len(expMetas) != len(metas) {
				t.Errorf("len(expMetas) != len(metas); %d != %d", len(expMetas), len(metas))
			} else {
				for j, meta := range expMetas {
					if !meta.key.Equal(metas[j].key) {
						fmt.Printf("%d: expected %q vs %q\n", j, meta.key, metas[j].key)
					}
					if !reflect.DeepEqual(meta.desc, metas[j].desc) {
						fmt.Printf("%d: expected %q vs %q and %s vs %s\n", j, meta.key, metas[j].key, meta.desc, metas[j].desc)
					}
				}
			}
		}
	}
}
Exemplo n.º 25
0
func TestLogRebalances(t *testing.T) {
	defer leaktest.AfterTest(t)()
	s, _, db := serverutils.StartServer(t, base.TestServerArgs{})
	defer s.Stopper().Stop()

	// Use a client to get the RangeDescriptor for the first range. We will use
	// this range's information to log fake rebalance events.
	desc := &roachpb.RangeDescriptor{}
	if err := db.GetProto(context.TODO(), keys.RangeDescriptorKey(roachpb.RKeyMin), desc); err != nil {
		t.Fatal(err)
	}

	// This code assumes that there is only one TestServer, and thus that
	// StoreID 1 is present on the testserver. If this assumption changes in the
	// future, *any* store will work, but a new method will need to be added to
	// Stores (or a creative usage of VisitStores could suffice).
	store, err := s.(*server.TestServer).Stores().GetStore(roachpb.StoreID(1))
	if err != nil {
		t.Fatal(err)
	}

	// Log several fake events using the store.
	logEvent := func(changeType roachpb.ReplicaChangeType) {
		if err := db.Txn(context.TODO(), func(txn *client.Txn) error {
			return store.LogReplicaChangeTest(txn, changeType, desc.Replicas[0], *desc)
		}); err != nil {
			t.Fatal(err)
		}
	}
	checkMetrics := func(expAdds, expRemoves int64) {
		if a, e := store.Metrics().RangeAdds.Count(), expAdds; a != e {
			t.Errorf("range adds %d != expected %d", a, e)
		}
		if a, e := store.Metrics().RangeRemoves.Count(), expRemoves; a != e {
			t.Errorf("range removes %d != expected %d", a, e)
		}
	}
	logEvent(roachpb.ADD_REPLICA)
	checkMetrics(1 /*add*/, 0 /*remove*/)
	logEvent(roachpb.ADD_REPLICA)
	checkMetrics(2 /*adds*/, 0 /*remove*/)
	logEvent(roachpb.REMOVE_REPLICA)
	checkMetrics(2 /*adds*/, 1 /*remove*/)

	// Open a SQL connection to verify that the events have been logged.
	pgURL, cleanupFn := sqlutils.PGUrl(t, s.ServingAddr(), "TestLogRebalances", url.User(security.RootUser))
	defer cleanupFn()

	sqlDB, err := gosql.Open("postgres", pgURL.String())
	if err != nil {
		t.Fatal(err)
	}
	defer sqlDB.Close()

	// verify that two add replica events have been logged.
	// TODO(mrtracy): placeholders still appear to be broken, this query should
	// be using a string placeholder for the eventType value.
	rows, err := sqlDB.Query(`SELECT rangeID, info FROM system.rangelog WHERE eventType = 'add'`)
	if err != nil {
		t.Fatal(err)
	}
	var count int
	for rows.Next() {
		count++
		var rangeID int64
		var infoStr gosql.NullString
		if err := rows.Scan(&rangeID, &infoStr); err != nil {
			t.Fatal(err)
		}

		if a, e := roachpb.RangeID(rangeID), desc.RangeID; a != e {
			t.Errorf("wrong rangeID %d recorded for add event, expected %d", a, e)
		}
		// Verify that info returns a json struct.
		if !infoStr.Valid {
			t.Errorf("info not recorded for add replica of range %d", rangeID)
		}
		var info struct {
			AddReplica  roachpb.ReplicaDescriptor
			UpdatedDesc roachpb.RangeDescriptor
		}
		if err := json.Unmarshal([]byte(infoStr.String), &info); err != nil {
			t.Errorf("error unmarshalling info string for add replica %d: %s", rangeID, err)
			continue
		}
		if int64(info.UpdatedDesc.RangeID) != rangeID {
			t.Errorf("recorded wrong updated descriptor %s for add replica of range %d", info.UpdatedDesc, rangeID)
		}
		if a, e := info.AddReplica, desc.Replicas[0]; a != e {
			t.Errorf("recorded wrong updated replica %s for add replica of range %d, expected %s",
				a, rangeID, e)
		}
	}
	if rows.Err() != nil {
		t.Fatal(rows.Err())
	}
	if a, e := count, 2; a != e {
		t.Errorf("expected %d AddReplica events logged, found %d", e, a)
	}

	// verify that one remove replica event was logged.
	rows, err = sqlDB.Query(`SELECT rangeID, info FROM system.rangelog WHERE eventType = 'remove'`)
	if err != nil {
		t.Fatal(err)
	}
	count = 0
	for rows.Next() {
		count++
		var rangeID int64
		var infoStr gosql.NullString
		if err := rows.Scan(&rangeID, &infoStr); err != nil {
			t.Fatal(err)
		}

		if a, e := roachpb.RangeID(rangeID), desc.RangeID; a != e {
			t.Errorf("wrong rangeID %d recorded for remove event, expected %d", a, e)
		}
		// Verify that info returns a json struct.
		if !infoStr.Valid {
			t.Errorf("info not recorded for remove replica of range %d", rangeID)
		}
		var info struct {
			RemovedReplica roachpb.ReplicaDescriptor
			UpdatedDesc    roachpb.RangeDescriptor
		}
		if err := json.Unmarshal([]byte(infoStr.String), &info); err != nil {
			t.Errorf("error unmarshalling info string for remove replica %d: %s", rangeID, err)
			continue
		}
		if int64(info.UpdatedDesc.RangeID) != rangeID {
			t.Errorf("recorded wrong updated descriptor %s for remove replica of range %d", info.UpdatedDesc, rangeID)
		}
		if a, e := info.RemovedReplica, desc.Replicas[0]; a != e {
			t.Errorf("recorded wrong updated replica %s for remove replica of range %d, expected %s",
				a, rangeID, e)
		}
	}
	if rows.Err() != nil {
		t.Fatal(rows.Err())
	}
	if a, e := count, 1; a != e {
		t.Errorf("expected %d RemoveReplica events logged, found %d", e, a)
	}
}
Exemplo n.º 26
0
func TestStoresLookupReplica(t *testing.T) {
	defer leaktest.AfterTest(t)()
	stopper := stop.NewStopper()
	defer stopper.Stop()
	cfg := TestStoreConfig()
	manualClock := hlc.NewManualClock(0)
	cfg.Clock = hlc.NewClock(manualClock.UnixNano)
	ls := NewStores(log.AmbientContext{}, cfg.Clock)

	// Create two new stores with ranges we care about.
	var e [2]engine.Engine
	var s [2]*Store
	var d [2]*roachpb.RangeDescriptor
	ranges := []struct {
		storeID    roachpb.StoreID
		start, end roachpb.RKey
	}{
		{2, roachpb.RKeyMin, roachpb.RKey("c")},
		{3, roachpb.RKey("x"), roachpb.RKey("z")},
	}
	for i, rng := range ranges {
		e[i] = engine.NewInMem(roachpb.Attributes{}, 1<<20)
		stopper.AddCloser(e[i])
		cfg.Transport = NewDummyRaftTransport()
		s[i] = NewStore(cfg, e[i], &roachpb.NodeDescriptor{NodeID: 1})
		s[i].Ident.StoreID = rng.storeID

		d[i] = &roachpb.RangeDescriptor{
			RangeID:  roachpb.RangeID(i),
			StartKey: rng.start,
			EndKey:   rng.end,
			Replicas: []roachpb.ReplicaDescriptor{{StoreID: rng.storeID}},
		}
		newRng, err := NewReplica(d[i], s[i], 0)
		if err != nil {
			t.Fatal(err)
		}
		if err := s[i].AddReplica(newRng); err != nil {
			t.Error(err)
		}
		ls.AddStore(s[i])
	}

	testCases := []struct {
		start, end roachpb.RKey
		expStoreID roachpb.StoreID
		expError   string
	}{
		{
			start:      roachpb.RKey("a"),
			end:        roachpb.RKey("c"),
			expStoreID: s[0].Ident.StoreID,
		},
		{
			start:      roachpb.RKey("b"),
			end:        nil,
			expStoreID: s[0].Ident.StoreID,
		},
		{
			start:    roachpb.RKey("b"),
			end:      roachpb.RKey("d"),
			expError: "outside of bounds of range",
		},
		{
			start:      roachpb.RKey("x"),
			end:        roachpb.RKey("z"),
			expStoreID: s[1].Ident.StoreID,
		},
		{
			start:      roachpb.RKey("y"),
			end:        nil,
			expStoreID: s[1].Ident.StoreID,
		},
		{
			start:    roachpb.RKey("z1"),
			end:      roachpb.RKey("z2"),
			expError: "range 0 was not found",
		},
	}
	for testIdx, tc := range testCases {
		_, r, err := ls.LookupReplica(tc.start, tc.end)
		if tc.expError != "" {
			if !testutils.IsError(err, tc.expError) {
				t.Errorf("%d: got error %v (expected %s)", testIdx, err, tc.expError)
			}
		} else if err != nil {
			t.Errorf("%d: %s", testIdx, err)
		} else if r.StoreID != tc.expStoreID {
			t.Errorf("%d: expected store %d; got %d", testIdx, tc.expStoreID, r.StoreID)
		}
	}

	if desc, err := ls.FirstRange(); err != nil {
		t.Error(err)
	} else if !reflect.DeepEqual(desc, d[0]) {
		t.Fatalf("expected first range %+v; got %+v", desc, d[0])
	}
}
Exemplo n.º 27
0
// splitRangeRandom splits a random range from within the cluster.
func (c *Cluster) splitRangeRandom() {
	rangeID := roachpb.RangeID(c.rand.Int63n(int64(len(c.ranges))))
	c.splitRange(rangeID)
}
Exemplo n.º 28
0
// TestBookieReserve ensures that you can never have more than one reservation
// for a specific rangeID at a time, and that both `Reserve` and `Fill` function
// correctly.
func TestBookieReserve(t *testing.T) {
	defer leaktest.AfterTest(t)()
	b := createTestBookie(5, defaultMaxReservedBytes)

	testCases := []struct {
		rangeID      int
		reserve      bool                   // true for reserve, false for fill
		expSuc       bool                   // is the operation expected to succeed
		expOut       int                    // expected number of reserved replicas
		expBytes     int64                  // expected number of bytes being reserved
		deadReplicas []roachpb.ReplicaIdent // dead replicas that we should not reserve over
	}{
		{rangeID: 1, reserve: true, expSuc: true, expOut: 1, expBytes: 1},
		{rangeID: 1, reserve: true, expSuc: false, expOut: 1, expBytes: 1},
		{rangeID: 1, reserve: false, expSuc: true, expOut: 0, expBytes: 0},
		{rangeID: 1, reserve: false, expSuc: false, expOut: 0, expBytes: 0},
		{rangeID: 2, reserve: true, expSuc: true, expOut: 1, expBytes: 2},
		{rangeID: 3, reserve: true, expSuc: true, expOut: 2, expBytes: 5},
		{rangeID: 1, reserve: true, expSuc: true, expOut: 3, expBytes: 6},
		{rangeID: 2, reserve: true, expSuc: false, expOut: 3, expBytes: 6},
		{rangeID: 2, reserve: false, expSuc: true, expOut: 2, expBytes: 4},
		{rangeID: 2, reserve: false, expSuc: false, expOut: 2, expBytes: 4},
		{rangeID: 3, reserve: false, expSuc: true, expOut: 1, expBytes: 1},
		{rangeID: 1, reserve: false, expSuc: true, expOut: 0, expBytes: 0},
		{rangeID: 2, reserve: false, expSuc: false, expOut: 0, expBytes: 0},
		{rangeID: 0, reserve: true, expSuc: false, expOut: 0, expBytes: 0, deadReplicas: []roachpb.ReplicaIdent{{RangeID: 0}}},
		{rangeID: 0, reserve: true, expSuc: true, expOut: 1, expBytes: 0, deadReplicas: []roachpb.ReplicaIdent{{RangeID: 1}}},
		{rangeID: 0, reserve: false, expSuc: true, expOut: 0, expBytes: 0},
	}

	ctx := context.Background()
	for i, testCase := range testCases {
		if testCase.reserve {
			// Try to reserve the range.
			req := reservationRequest{
				StoreRequestHeader: StoreRequestHeader{
					StoreID: roachpb.StoreID(i),
					NodeID:  roachpb.NodeID(i),
				},
				RangeID:   roachpb.RangeID(testCase.rangeID),
				RangeSize: int64(testCase.rangeID),
			}
			if resp := b.Reserve(ctx, req, testCase.deadReplicas); resp.Reserved != testCase.expSuc {
				if testCase.expSuc {
					t.Errorf("%d: expected a successful reservation, was rejected", i)
				} else {
					t.Errorf("%d: expected no reservation, but it was accepted", i)
				}
			}
		} else {
			// Fill the reservation.
			if filled := b.Fill(ctx, roachpb.RangeID(testCase.rangeID)); filled != testCase.expSuc {
				if testCase.expSuc {
					t.Errorf("%d: expected a successful filled reservation, was rejected", i)
				} else {
					t.Errorf("%d: expected no reservation to be filled, but it was accepted", i)
				}
			}
		}

		verifyBookie(t, b, testCase.expOut, testCase.expBytes)
	}

	// Test that repeated requests with the same store and node number extend
	// the timeout of the pre-existing reservation.
	repeatReq := reservationRequest{
		StoreRequestHeader: StoreRequestHeader{
			StoreID: 100,
			NodeID:  100,
		},
		RangeID:   100,
		RangeSize: 100,
	}
	for i := 1; i < 10; i++ {
		if !b.Reserve(context.Background(), repeatReq, nil).Reserved {
			t.Errorf("%d: could not add repeated reservation", i)
		}
		verifyBookie(t, b, 1, 100)
	}

	// Test rejecting a reservation due to disk space constraints.
	overfilledReq := reservationRequest{
		StoreRequestHeader: StoreRequestHeader{
			StoreID: 200,
			NodeID:  200,
		},
		RangeID:   200,
		RangeSize: 200,
	}

	b.mu.Lock()
	// Set the bytes have 1 less byte free than needed by the reservation.
	b.metrics.Available.Update(b.mu.size + (2 * overfilledReq.RangeSize) - 1)
	b.mu.Unlock()

	if b.Reserve(context.Background(), overfilledReq, nil).Reserved {
		t.Errorf("expected reservation to fail due to disk space constraints, but it succeeded")
	}
	verifyBookie(t, b, 1, 100) // The same numbers from the last call to verifyBookie.
}
Exemplo n.º 29
0
// RemoveTarget returns a suitable replica to remove from the provided replica
// set. It first attempts to randomly select a target from the set of stores
// that have greater than the average number of replicas. Failing that, it
// falls back to selecting a random target from any of the existing
// replicas. It also will exclude any replica that lives on leaseStoreID.
//
// TODO(mrtracy): removeTarget eventually needs to accept the attributes from
// the zone config associated with the provided replicas. This will allow it to
// make correct decisions in the case of ranges with heterogeneous replica
// requirements (i.e. multiple data centers).
func (a Allocator) RemoveTarget(
	constraints config.Constraints,
	existing []roachpb.ReplicaDescriptor,
	leaseStoreID roachpb.StoreID,
) (roachpb.ReplicaDescriptor, error) {
	if len(existing) == 0 {
		return roachpb.ReplicaDescriptor{}, errors.Errorf("must supply at least one replica to allocator.RemoveTarget()")
	}

	if a.options.UseRuleSolver {
		// TODO(bram): #10275 Is this getStoreList call required? Compute candidate
		// requires a store list, but we should be able to create one using only
		// the stores that belong to the range.
		// Use an invalid range ID as we don't care about a corrupt replicas since
		// as we are removing a replica and not trying to add one.
		sl, _, _ := a.storePool.getStoreList(roachpb.RangeID(0))

		var worst roachpb.ReplicaDescriptor
		worstScore := math.Inf(0)
		for _, exist := range existing {
			if exist.StoreID == leaseStoreID {
				continue
			}
			desc, ok := a.storePool.getStoreDescriptor(exist.StoreID)
			if !ok {
				continue
			}

			candidate, valid := a.ruleSolver.computeCandidate(solveState{
				constraints: constraints,
				store:       desc,
				existing:    nil,
				sl:          sl,
				tierOrder:   canonicalTierOrder(sl),
				tiers:       storeTierMap(sl),
			})
			// When a candidate is not valid, it means that it can be
			// considered the worst existing replica.
			if !valid {
				return exist, nil
			}

			if candidate.score < worstScore {
				worstScore = candidate.score
				worst = exist
			}

		}

		if !math.IsInf(worstScore, 0) {
			return worst, nil
		}

		return roachpb.ReplicaDescriptor{}, errors.New("could not select an appropriate replica to be removed")
	}

	// Retrieve store descriptors for the provided replicas from the StorePool.
	descriptors := make([]roachpb.StoreDescriptor, 0, len(existing))
	for _, exist := range existing {
		if desc, ok := a.storePool.getStoreDescriptor(exist.StoreID); ok {
			if exist.StoreID == leaseStoreID {
				continue
			}
			descriptors = append(descriptors, desc)
		}
	}

	sl := makeStoreList(descriptors)
	if bad := a.selectBad(sl); bad != nil {
		for _, exist := range existing {
			if exist.StoreID == bad.StoreID {
				return exist, nil
			}
		}
	}
	return roachpb.ReplicaDescriptor{}, errors.New("could not select an appropriate replica to be removed")
}
Exemplo n.º 30
0
// TODO(bram): This test suite is not even close to exhaustive. The scores are
// not checked and each rule should have many more test cases. Also add a
// corrupt replica test and remove the 0 range ID used when calling
// getStoreList.
func TestRuleSolver(t *testing.T) {
	defer leaktest.AfterTest(t)()

	stopper, _, _, storePool := createTestStorePool(
		TestTimeUntilStoreDeadOff,
		/* deterministic */ false,
	)
	defer stopper.Stop()

	storeUSa15 := roachpb.StoreID(1) // us-a-1-5
	storeUSa1 := roachpb.StoreID(2)  // us-a-1
	storeUSb := roachpb.StoreID(3)   // us-b
	storeDead := roachpb.StoreID(4)
	storeEurope := roachpb.StoreID(5) // eur-a-1-5

	mockStorePool(storePool, []roachpb.StoreID{storeUSa15, storeUSa1, storeUSb, storeEurope}, []roachpb.StoreID{storeDead}, nil)

	// tierSetup returns a tier struct constructed using the passed in values.
	// If any value is an empty string, it is not included.
	tierSetup := func(datacenter, floor, rack, slot string) []roachpb.Tier {
		var tiers []roachpb.Tier
		if datacenter != "" {
			tiers = append(tiers, roachpb.Tier{Key: "datacenter", Value: datacenter})
		}
		if floor != "" {
			tiers = append(tiers, roachpb.Tier{Key: "floor", Value: floor})
		}
		if rack != "" {
			tiers = append(tiers, roachpb.Tier{Key: "rack", Value: rack})
		}
		if slot != "" {
			tiers = append(tiers, roachpb.Tier{Key: "slot", Value: slot})
		}
		return tiers
	}

	// capacitySetup returns a store capacity in which the total capacity is
	// always 100 and available and range count are passed in.
	capacitySetup := func(available int64, rangeCount int32) roachpb.StoreCapacity {
		return roachpb.StoreCapacity{
			Capacity:   100,
			Available:  available,
			RangeCount: rangeCount,
		}
	}

	storePool.mu.Lock()

	storePool.mu.storeDetails[storeUSa15].desc.Attrs.Attrs = []string{"a"}
	storePool.mu.storeDetails[storeUSa15].desc.Node.Locality.Tiers = tierSetup("us", "a", "1", "5")
	storePool.mu.storeDetails[storeUSa15].desc.Capacity = capacitySetup(1, 99)
	storePool.mu.nodeLocalities[roachpb.NodeID(storeUSa15)] = storePool.mu.storeDetails[storeUSa15].desc.Node.Locality

	storePool.mu.storeDetails[storeUSa1].desc.Attrs.Attrs = []string{"a", "b"}
	storePool.mu.storeDetails[storeUSa1].desc.Node.Locality.Tiers = tierSetup("us", "a", "1", "")
	storePool.mu.storeDetails[storeUSa1].desc.Capacity = capacitySetup(100, 0)
	storePool.mu.nodeLocalities[roachpb.NodeID(storeUSa1)] = storePool.mu.storeDetails[storeUSa1].desc.Node.Locality

	storePool.mu.storeDetails[storeUSb].desc.Attrs.Attrs = []string{"a", "b", "c"}
	storePool.mu.storeDetails[storeUSb].desc.Node.Locality.Tiers = tierSetup("us", "b", "", "")
	storePool.mu.storeDetails[storeUSb].desc.Capacity = capacitySetup(50, 50)
	storePool.mu.nodeLocalities[roachpb.NodeID(storeUSb)] = storePool.mu.storeDetails[storeUSb].desc.Node.Locality

	storePool.mu.storeDetails[storeEurope].desc.Node.Locality.Tiers = tierSetup("eur", "a", "1", "5")
	storePool.mu.storeDetails[storeEurope].desc.Capacity = capacitySetup(60, 40)
	storePool.mu.nodeLocalities[roachpb.NodeID(storeEurope)] = storePool.mu.storeDetails[storeEurope].desc.Node.Locality

	storePool.mu.Unlock()

	testCases := []struct {
		name     string
		rule     rule
		c        config.Constraints
		existing []roachpb.ReplicaDescriptor
		expected []roachpb.StoreID
	}{
		{
			name:     "no constraints or rules",
			expected: []roachpb.StoreID{storeUSa15, storeUSa1, storeUSb, storeEurope},
		},
		{
			name: "white list rule",
			rule: func(state solveState) (float64, bool) {
				switch state.store.StoreID {
				case storeUSa15:
					return 0, true
				case storeUSb:
					return 1, true
				default:
					return 0, false
				}
			},
			expected: []roachpb.StoreID{storeUSb, storeUSa15},
		},
		{
			name: "ruleReplicasUniqueNodes - 2 available nodes",
			rule: ruleReplicasUniqueNodes,
			existing: []roachpb.ReplicaDescriptor{
				{NodeID: roachpb.NodeID(storeUSa15)},
				{NodeID: roachpb.NodeID(storeUSb)},
			},
			expected: []roachpb.StoreID{storeUSa1, storeEurope},
		},
		{
			name: "ruleReplicasUniqueNodes - 0 available nodes",
			rule: ruleReplicasUniqueNodes,
			existing: []roachpb.ReplicaDescriptor{
				{NodeID: roachpb.NodeID(storeUSa15)},
				{NodeID: roachpb.NodeID(storeUSa1)},
				{NodeID: roachpb.NodeID(storeUSb)},
				{NodeID: roachpb.NodeID(storeEurope)},
			},
			expected: nil,
		},
		{
			name: "ruleConstraints - required constraints",
			rule: ruleConstraints,
			c: config.Constraints{
				Constraints: []config.Constraint{
					{Value: "b", Type: config.Constraint_REQUIRED},
				},
			},
			expected: []roachpb.StoreID{storeUSa1, storeUSb},
		},
		{
			name: "ruleConstraints - required locality constraints",
			rule: ruleConstraints,
			c: config.Constraints{
				Constraints: []config.Constraint{
					{Key: "datacenter", Value: "us", Type: config.Constraint_REQUIRED},
				},
			},
			expected: []roachpb.StoreID{storeUSa15, storeUSa1, storeUSb},
		},
		{
			name: "ruleConstraints - prohibited constraints",
			rule: ruleConstraints,
			c: config.Constraints{
				Constraints: []config.Constraint{
					{Value: "b", Type: config.Constraint_PROHIBITED},
				},
			},
			expected: []roachpb.StoreID{storeUSa15, storeEurope},
		},
		{
			name: "ruleConstraints - prohibited locality constraints",
			rule: ruleConstraints,
			c: config.Constraints{
				Constraints: []config.Constraint{
					{Key: "datacenter", Value: "us", Type: config.Constraint_PROHIBITED},
				},
			},
			expected: []roachpb.StoreID{storeEurope},
		},
		{
			name: "ruleConstraints - positive constraints",
			rule: ruleConstraints,
			c: config.Constraints{
				Constraints: []config.Constraint{
					{Value: "a"},
					{Value: "b"},
					{Value: "c"},
				},
			},
			expected: []roachpb.StoreID{storeUSb, storeUSa1, storeUSa15, storeEurope},
		},
		{
			name: "ruleConstraints - positive locality constraints",
			rule: ruleConstraints,
			c: config.Constraints{
				Constraints: []config.Constraint{
					{Key: "datacenter", Value: "eur"},
				},
			},
			expected: []roachpb.StoreID{storeEurope, storeUSa15, storeUSa1, storeUSb},
		},
		{
			name:     "ruleDiversity - no existing replicas",
			rule:     ruleDiversity,
			existing: nil,
			expected: []roachpb.StoreID{storeUSa15, storeUSa1, storeUSb, storeEurope},
		},
		{
			name: "ruleDiversity - one existing replicas",
			rule: ruleDiversity,
			existing: []roachpb.ReplicaDescriptor{
				{NodeID: roachpb.NodeID(storeUSa15)},
			},
			expected: []roachpb.StoreID{storeEurope, storeUSb, storeUSa1, storeUSa15},
		},
		{
			name: "ruleDiversity - two existing replicas",
			rule: ruleDiversity,
			existing: []roachpb.ReplicaDescriptor{
				{NodeID: roachpb.NodeID(storeUSa15)},
				{NodeID: roachpb.NodeID(storeEurope)},
			},
			expected: []roachpb.StoreID{storeUSb, storeUSa1, storeUSa15, storeEurope},
		},
		{
			name:     "ruleCapacity",
			rule:     ruleCapacity,
			expected: []roachpb.StoreID{storeUSa1, storeEurope, storeUSb},
		},
	}

	for _, tc := range testCases {
		t.Run(tc.name, func(t *testing.T) {
			var solver ruleSolver
			if tc.rule != nil {
				solver = ruleSolver{tc.rule}
			}
			sl, _, _ := storePool.getStoreList(roachpb.RangeID(0))
			candidates, err := solver.Solve(
				sl,
				tc.c,
				tc.existing,
				storePool.getNodeLocalities(tc.existing),
			)
			if err != nil {
				t.Fatal(err)
			}
			sort.Sort(byScoreAndID(candidates))
			if len(candidates) != len(tc.expected) {
				t.Fatalf("length of %+v should match %+v", candidates, tc.expected)
			}
			for i, expected := range tc.expected {
				if actual := candidates[i].store.StoreID; actual != expected {
					t.Errorf("candidates[%d].store.StoreID = %d; not %d; %+v",
						i, actual, expected, candidates)
				}
			}
		})
	}
}