Example #1
0
func TestStorePoolGetStoreDetails(t *testing.T) {
	defer leaktest.AfterTest(t)()
	stopper, g, _, sp := createTestStorePool(TestTimeUntilStoreDeadOff)
	defer stopper.Stop()
	sg := gossiputil.NewStoreGossiper(g)
	sg.GossipStores(uniqueStore, t)

	if detail := sp.getStoreDetail(roachpb.StoreID(1)); detail.dead {
		t.Errorf("Present storeDetail came back as dead, expected it to be alive. %+v", detail)
	}

	if detail := sp.getStoreDetail(roachpb.StoreID(2)); detail.dead {
		t.Errorf("Absent storeDetail came back as dead, expected it to be alive. %+v", detail)
	}
}
Example #2
0
// GetStoreList returns a storeList that contains all active stores that
// contain the required attributes and their associated stats.
// TODO(embark, spencer): consider using a reverse index map from
// Attr->stores, for efficiency. Ensure that entries in this map still
// have an opportunity to be garbage collected.
func (sp *StorePool) getStoreList(required roachpb.Attributes, excludeNodes []roachpb.NodeID, deterministic bool) StoreList {
	sp.mu.RLock()
	defer sp.mu.RUnlock()

	// Convert list of excluded nodes to a map for quick lookup.
	excludeMap := map[roachpb.NodeID]struct{}{}
	for _, nodeID := range excludeNodes {
		excludeMap[nodeID] = struct{}{}
	}

	var storeIDs roachpb.StoreIDSlice
	for storeID := range sp.stores {
		storeIDs = append(storeIDs, storeID)
	}
	// Sort the stores by key if deterministic is requested. This is only for
	// unit testing.
	if deterministic {
		sort.Sort(storeIDs)
	}
	sl := StoreList{}
	for _, storeID := range storeIDs {
		detail := sp.stores[roachpb.StoreID(storeID)]
		if _, ok := excludeMap[detail.desc.Node.NodeID]; ok {
			continue
		}
		if !detail.dead && required.IsSubset(*detail.desc.CombinedAttrs()) {
			desc := detail.desc
			sl.add(&desc)
		}
	}
	return sl
}
Example #3
0
// updateCountString describes the update counts that were recorded by
// storeEventReader.  The formatting is appropriate to paste into this test if
// as a new expected value.
func (ser *storeEventReader) updateCountString() string {
	var buffer bytes.Buffer
	w := tabwriter.NewWriter(&buffer, 2, 1, 2, ' ', 0)

	var storeIDs sort.IntSlice
	for storeID := range ser.perStoreUpdateCount {
		storeIDs = append(storeIDs, int(storeID))
	}
	sort.Sort(storeIDs)

	for _, storeID := range storeIDs {
		if countset, ok := ser.perStoreUpdateCount[roachpb.StoreID(storeID)]; ok {
			fmt.Fprintf(w, "%T(%d): {\n", storeID, storeID)

			var methodIDs sort.IntSlice
			for methodID := range countset {
				methodIDs = append(methodIDs, int(methodID))
			}
			sort.Sort(methodIDs)

			for _, methodID := range methodIDs {
				method := roachpb.Method(methodID)
				if count, okCount := countset[method]; okCount {
					fmt.Fprintf(w, "\tproto.%s:\t%d,\n", method, count)
				} else {
					panic("unreachable!")
				}
			}
		} else {
			panic("unreachable!")
		}
		fmt.Fprintf(w, "},\n")
	}
	return buffer.String()
}
Example #4
0
func TestStoresVisitStores(t *testing.T) {
	defer leaktest.AfterTest(t)
	ls := NewStores()
	numStores := 10
	for i := 0; i < numStores; i++ {
		ls.AddStore(&Store{Ident: roachpb.StoreIdent{StoreID: roachpb.StoreID(i)}})
	}

	visit := make([]bool, numStores)
	err := ls.VisitStores(func(s *Store) error { visit[s.Ident.StoreID] = true; return nil })
	if err != nil {
		t.Errorf("unexpected error on visit: %s", err.Error())
	}

	for i, visited := range visit {
		if !visited {
			t.Errorf("store %d was not visited", i)
		}
	}

	err = ls.VisitStores(func(s *Store) error { return errors.New("") })
	if err == nil {
		t.Errorf("expected visit error")
	}
}
Example #5
0
// GetStoreList returns a storeList that contains all active stores that
// contain the required attributes and their associated stats. It also returns
// the number of total alive stores.
// TODO(embark, spencer): consider using a reverse index map from
// Attr->stores, for efficiency. Ensure that entries in this map still
// have an opportunity to be garbage collected.
func (sp *StorePool) getStoreList(required roachpb.Attributes, deterministic bool) (StoreList, int) {
	sp.mu.RLock()
	defer sp.mu.RUnlock()

	var storeIDs roachpb.StoreIDSlice
	for storeID := range sp.stores {
		storeIDs = append(storeIDs, storeID)
	}
	// Sort the stores by key if deterministic is requested. This is only for
	// unit testing.
	if deterministic {
		sort.Sort(storeIDs)
	}
	sl := StoreList{}
	var aliveStoreCount int
	for _, storeID := range storeIDs {
		detail := sp.stores[roachpb.StoreID(storeID)]
		if !detail.dead && detail.desc != nil {
			aliveStoreCount++
			if required.IsSubset(*detail.desc.CombinedAttrs()) {
				sl.add(detail.desc)
			}
		}
	}
	return sl, aliveStoreCount
}
Example #6
0
// ReplicaDescriptor implements the Storage interface by returning a
// dummy descriptor.
func (m *MemoryStorage) ReplicaDescriptor(groupID roachpb.RangeID, replicaID roachpb.ReplicaID) (roachpb.ReplicaDescriptor, error) {
	return roachpb.ReplicaDescriptor{
		ReplicaID: replicaID,
		NodeID:    roachpb.NodeID(replicaID),
		StoreID:   roachpb.StoreID(replicaID),
	}, nil
}
Example #7
0
func TestStoresVisitStores(t *testing.T) {
	defer leaktest.AfterTest(t)()
	ls := NewStores(hlc.NewClock(hlc.UnixNano))
	numStores := 10
	for i := 0; i < numStores; i++ {
		ls.AddStore(&Store{Ident: roachpb.StoreIdent{StoreID: roachpb.StoreID(i)}})
	}

	visit := make([]bool, numStores)
	err := ls.VisitStores(func(s *Store) error { visit[s.Ident.StoreID] = true; return nil })
	if err != nil {
		t.Errorf("unexpected error on visit: %s", err.Error())
	}

	for i, visited := range visit {
		if !visited {
			t.Errorf("store %d was not visited", i)
		}
	}

	errBoom := errors.New("boom")
	if err := ls.VisitStores(func(s *Store) error {
		return errBoom
	}); err != errBoom {
		t.Errorf("got unexpected error %v", err)
	}
}
Example #8
0
// GetStoreList returns a storeList that contains all active stores that
// contain the required attributes and their associated stats. It also returns
// the number of total alive stores.
// TODO(embark, spencer): consider using a reverse index map from
// Attr->stores, for efficiency. Ensure that entries in this map still
// have an opportunity to be garbage collected.
func (sp *StorePool) getStoreList(required roachpb.Attributes, deterministic bool) (StoreList, int) {
	sp.mu.RLock()
	defer sp.mu.RUnlock()

	var storeIDs roachpb.StoreIDSlice
	for storeID := range sp.mu.stores {
		storeIDs = append(storeIDs, storeID)
	}
	// Sort the stores by key if deterministic is requested. This is only for
	// unit testing.
	if deterministic {
		sort.Sort(storeIDs)
	}
	now := sp.clock.Now().GoTime()
	sl := StoreList{}
	var aliveStoreCount int
	for _, storeID := range storeIDs {
		detail := sp.mu.stores[roachpb.StoreID(storeID)]
		matched := detail.match(now, required)
		if matched >= storeMatchAlive {
			aliveStoreCount++
		}
		if matched == storeMatchMatched {
			sl.add(detail.desc)
		}
	}
	return sl, aliveStoreCount
}
Example #9
0
// BootstrapCluster bootstraps a multiple stores using the provided engines and
// cluster ID. The first bootstrapped store contains a single range spanning
// all keys. Initial range lookup metadata is populated for the range.
//
// Returns a KV client for unittest purposes. Caller should close the returned
// client.
func BootstrapCluster(clusterID string, engines []engine.Engine, stopper *stop.Stopper) (*client.DB, error) {
	ctx := storage.StoreContext{}
	ctx.ScanInterval = 10 * time.Minute
	ctx.Clock = hlc.NewClock(hlc.UnixNano)
	// Create a KV DB with a local sender.
	lSender := kv.NewLocalSender()
	sender := kv.NewTxnCoordSender(lSender, ctx.Clock, false, nil, stopper)
	ctx.DB = client.NewDB(sender)
	ctx.Transport = multiraft.NewLocalRPCTransport(stopper)
	for i, eng := range engines {
		sIdent := roachpb.StoreIdent{
			ClusterID: clusterID,
			NodeID:    1,
			StoreID:   roachpb.StoreID(i + 1),
		}

		// The bootstrapping store will not connect to other nodes so its
		// StoreConfig doesn't really matter.
		s := storage.NewStore(ctx, eng, &roachpb.NodeDescriptor{NodeID: 1})

		// Verify the store isn't already part of a cluster.
		if len(s.Ident.ClusterID) > 0 {
			return nil, util.Errorf("storage engine already belongs to a cluster (%s)", s.Ident.ClusterID)
		}

		// Bootstrap store to persist the store ident.
		if err := s.Bootstrap(sIdent, stopper); err != nil {
			return nil, err
		}
		// Create first range, writing directly to engine. Note this does
		// not create the range, just its data.  Only do this if this is the
		// first store.
		if i == 0 {
			// TODO(marc): this is better than having storage/ import sql, but still
			// not great. Find a better place to keep those.
			initialValues := sql.GetInitialSystemValues()
			if err := s.BootstrapRange(initialValues); err != nil {
				return nil, err
			}
		}
		if err := s.Start(stopper); err != nil {
			return nil, err
		}

		lSender.AddStore(s)

		// Initialize node and store ids.  Only initialize the node once.
		if i == 0 {
			if nodeID, err := allocateNodeID(ctx.DB); nodeID != sIdent.NodeID || err != nil {
				return nil, util.Errorf("expected to initialize node id allocator to %d, got %d: %s",
					sIdent.NodeID, nodeID, err)
			}
		}
		if storeID, err := allocateStoreIDs(sIdent.NodeID, 1, ctx.DB); storeID != sIdent.StoreID || err != nil {
			return nil, util.Errorf("expected to initialize store id allocator to %d, got %d: %s",
				sIdent.StoreID, storeID, err)
		}
	}
	return ctx.DB, nil
}
Example #10
0
// allocateStoreIDs increments the store id generator key for the
// specified node to allocate "inc" new, unique store ids. The
// first ID in a contiguous range is returned on success.
func allocateStoreIDs(nodeID roachpb.NodeID, inc int64, db *client.DB) (roachpb.StoreID, error) {
	r, err := db.Inc(keys.StoreIDGenerator, inc)
	if err != nil {
		return 0, util.Errorf("unable to allocate %d store IDs for node %d: %s", inc, nodeID, err)
	}
	return roachpb.StoreID(r.ValueInt() - inc + 1), nil
}
Example #11
0
func createReplicaSlice() ReplicaSlice {
	rs := ReplicaSlice(nil)
	for i := 0; i < 5; i++ {
		rs = append(rs, ReplicaInfo{ReplicaDescriptor: roachpb.ReplicaDescriptor{StoreID: roachpb.StoreID(i + 1)}})
	}
	return rs
}
Example #12
0
// TestSendRPCRetry verifies that sendRPC failed on first address but succeed on
// second address, the second reply should be successfully returned back.
func TestSendRPCRetry(t *testing.T) {
	defer leaktest.AfterTest(t)
	g, s := makeTestGossip(t)
	defer s()
	g.SetNodeID(1)
	if err := g.SetNodeDescriptor(&roachpb.NodeDescriptor{NodeID: 1}); err != nil {
		t.Fatal(err)
	}
	// Fill RangeDescriptor with 2 replicas
	var descriptor = roachpb.RangeDescriptor{
		RangeID:  1,
		StartKey: roachpb.RKey("a"),
		EndKey:   roachpb.RKey("z"),
	}
	for i := 1; i <= 2; i++ {
		addr := util.MakeUnresolvedAddr("tcp", fmt.Sprintf("node%d", i))
		nd := &roachpb.NodeDescriptor{
			NodeID:  roachpb.NodeID(i),
			Address: util.MakeUnresolvedAddr(addr.Network(), addr.String()),
		}
		if err := g.AddInfoProto(gossip.MakeNodeIDKey(roachpb.NodeID(i)), nd, time.Hour); err != nil {
			t.Fatal(err)
		}

		descriptor.Replicas = append(descriptor.Replicas, roachpb.ReplicaDescriptor{
			NodeID:  roachpb.NodeID(i),
			StoreID: roachpb.StoreID(i),
		})
	}
	// Define our rpcSend stub which returns success on the second address.
	var testFn rpcSendFn = func(_ rpc.Options, method string, addrs []net.Addr, getArgs func(addr net.Addr) proto.Message, getReply func() proto.Message, _ *rpc.Context) ([]proto.Message, error) {
		if method == "Node.Batch" {
			// reply from first address failed
			_ = getReply()
			// reply from second address succeed
			batchReply := getReply().(*roachpb.BatchResponse)
			reply := &roachpb.ScanResponse{}
			batchReply.Add(reply)
			reply.Rows = append([]roachpb.KeyValue{}, roachpb.KeyValue{Key: roachpb.Key("b"), Value: roachpb.Value{}})
			return []proto.Message{batchReply}, nil
		}
		return nil, util.Errorf("unexpected method %v", method)
	}
	ctx := &DistSenderContext{
		RPCSend: testFn,
		RangeDescriptorDB: mockRangeDescriptorDB(func(_ roachpb.RKey, _, _ bool) ([]roachpb.RangeDescriptor, *roachpb.Error) {
			return []roachpb.RangeDescriptor{descriptor}, nil
		}),
	}
	ds := NewDistSender(ctx, g)
	scan := roachpb.NewScan(roachpb.Key("a"), roachpb.Key("d"), 1)
	sr, err := client.SendWrapped(ds, nil, scan)
	if err != nil {
		t.Fatal(err)
	}
	if l := len(sr.(*roachpb.ScanResponse).Rows); l != 1 {
		t.Fatalf("expected 1 row; got %d", l)
	}
}
Example #13
0
func TestMembershipChange(t *testing.T) {
	defer leaktest.AfterTest(t)
	stopper := stop.NewStopper()
	cluster := newTestCluster(nil, 4, stopper, t)
	defer stopper.Stop()

	// Create a group with a single member, cluster.nodes[0].
	groupID := roachpb.RangeID(1)
	cluster.createGroup(groupID, 0, 1)
	// An automatic election is triggered since this is a single-node Raft group,
	// so we don't need to call triggerElection.

	// Consume and apply the membership change events.
	for i := 0; i < 4; i++ {
		go func(i int) {
			for {
				e, ok := <-cluster.events[i].MembershipChangeCommitted
				if !ok {
					return
				}
				e.Callback(nil)
			}
		}(i)
	}

	// Add each of the other three nodes to the cluster.
	for i := 1; i < 4; i++ {
		ch := cluster.nodes[0].ChangeGroupMembership(groupID, makeCommandID(),
			raftpb.ConfChangeAddNode,
			roachpb.ReplicaDescriptor{
				NodeID:    cluster.nodes[i].nodeID,
				StoreID:   roachpb.StoreID(cluster.nodes[i].nodeID),
				ReplicaID: roachpb.ReplicaID(cluster.nodes[i].nodeID),
			}, nil)
		<-ch
	}

	// TODO(bdarnell): verify that the channel events are sent out correctly.
	/*
		for i := 0; i < 10; i++ {
			log.Infof("tick %d", i)
			cluster.tickers[0].Tick()
			time.Sleep(5 * time.Millisecond)
		}

		// Each node is notified of each other node's joining.
		for i := 0; i < 4; i++ {
			for j := 1; j < 4; j++ {
				select {
				case e := <-cluster.events[i].MembershipChangeCommitted:
					if e.NodeID != cluster.nodes[j].nodeID {
						t.Errorf("node %d expected event for %d, got %d", i, j, e.NodeID)
					}
				default:
					t.Errorf("node %d did not get expected event for %d", i, j)
				}
			}
		}*/
}
Example #14
0
// getRemoveTarget queries the allocator for the store that contains a replica
// that can be removed.
func (r *Range) getRemoveTarget() (roachpb.StoreID, error) {
	// Pass in an invalid store ID since we don't consider range leases as part
	// of the simulator.
	removeStore, err := r.allocator.RemoveTarget(r.desc.Replicas, roachpb.StoreID(-1))
	if err != nil {
		return 0, err
	}
	return removeStore.StoreID, nil
}
Example #15
0
// OutputEpoch writes to the epochWRiter the current free capacity for all
// stores.
func (c *Cluster) OutputEpoch() {
	fmt.Fprintf(c.epochWriter, "%d:\t", c.epoch)

	for _, storeID := range c.storeIDs {
		store := c.stores[roachpb.StoreID(storeID)]
		capacity := store.getCapacity(len(c.rangeIDsByStore[storeID]))
		fmt.Fprintf(c.epochWriter, "%d/%.0f%%\t", len(c.rangeIDsByStore[storeID]), float64(capacity.Available)/float64(capacity.Capacity)*100)
	}
	fmt.Fprintf(c.epochWriter, "\n")
}
Example #16
0
// TestSendRPCRetry verifies that sendRPC failed on first address but succeed on
// second address, the second reply should be successfully returned back.
func TestSendRPCRetry(t *testing.T) {
	defer leaktest.AfterTest(t)
	g, s := makeTestGossip(t)
	defer s()
	g.SetNodeID(1)
	if err := g.SetNodeDescriptor(&roachpb.NodeDescriptor{NodeID: 1}); err != nil {
		t.Fatal(err)
	}
	// Fill RangeDescriptor with 2 replicas
	var descriptor = roachpb.RangeDescriptor{
		RangeID:  1,
		StartKey: roachpb.RKey("a"),
		EndKey:   roachpb.RKey("z"),
	}
	for i := 1; i <= 2; i++ {
		addr := util.MakeUnresolvedAddr("tcp", fmt.Sprintf("node%d", i))
		nd := &roachpb.NodeDescriptor{
			NodeID:  roachpb.NodeID(i),
			Address: util.MakeUnresolvedAddr(addr.Network(), addr.String()),
		}
		if err := g.AddInfoProto(gossip.MakeNodeIDKey(roachpb.NodeID(i)), nd, time.Hour); err != nil {
			t.Fatal(err)
		}

		descriptor.Replicas = append(descriptor.Replicas, roachpb.ReplicaDescriptor{
			NodeID:  roachpb.NodeID(i),
			StoreID: roachpb.StoreID(i),
		})
	}
	var testFn rpcSendFn = func(_ SendOptions, _ ReplicaSlice,
		args roachpb.BatchRequest, _ *rpc.Context) (proto.Message, error) {
		batchReply := &roachpb.BatchResponse{}
		reply := &roachpb.ScanResponse{}
		batchReply.Add(reply)
		reply.Rows = append([]roachpb.KeyValue{}, roachpb.KeyValue{Key: roachpb.Key("b"), Value: roachpb.Value{}})
		return batchReply, nil
	}
	ctx := &DistSenderContext{
		RPCSend: testFn,
		RangeDescriptorDB: mockRangeDescriptorDB(func(_ roachpb.RKey, _, _ bool) ([]roachpb.RangeDescriptor, *roachpb.Error) {
			return []roachpb.RangeDescriptor{descriptor}, nil
		}),
	}
	ds := NewDistSender(ctx, g)
	scan := roachpb.NewScan(roachpb.Key("a"), roachpb.Key("d"), 1)
	sr, err := client.SendWrapped(ds, nil, scan)
	if err != nil {
		t.Fatal(err)
	}
	if l := len(sr.(*roachpb.ScanResponse).Rows); l != 1 {
		t.Fatalf("expected 1 row; got %d", l)
	}
}
Example #17
0
// GetFirstStoreID is a utility function returning the StoreID of the first
// store on this node.
func (ts *TestServer) GetFirstStoreID() roachpb.StoreID {
	firstStoreID := roachpb.StoreID(-1)
	err := ts.Stores().VisitStores(func(s *storage.Store) error {
		if firstStoreID == -1 {
			firstStoreID = s.Ident.StoreID
		}
		return nil
	})
	if err != nil {
		panic(err)
	}
	return firstStoreID
}
Example #18
0
// TestReproposeConfigChange verifies the behavior when multiple
// configuration changes are in flight at once. Raft prohibits this,
// but any configuration changes that are dropped by this rule should
// be reproposed when the previous change completes.
func TestReproposeConfigChange(t *testing.T) {
	defer leaktest.AfterTest(t)
	stopper := stop.NewStopper()
	defer stopper.Stop()
	const clusterSize = 4
	const groupSize = 3
	cluster := newTestCluster(nil, clusterSize, stopper, t)

	const groupID = roachpb.RangeID(1)
	const leader = 0
	const proposer = 1
	cluster.createGroup(groupID, leader, groupSize)
	cluster.elect(leader, groupID)

	targetDesc := roachpb.ReplicaDescriptor{
		NodeID:    cluster.nodes[groupSize].nodeID,
		StoreID:   roachpb.StoreID(cluster.nodes[groupSize].nodeID),
		ReplicaID: roachpb.ReplicaID(cluster.nodes[groupSize].nodeID),
	}
	// Add a node and immediately remove it without waiting for the
	// first change to commit.
	addErrCh := cluster.nodes[proposer].ChangeGroupMembership(groupID, makeCommandID(),
		raftpb.ConfChangeAddNode, targetDesc, nil)
	removeErrCh := cluster.nodes[proposer].ChangeGroupMembership(groupID, makeCommandID(),
		raftpb.ConfChangeRemoveNode, targetDesc, nil)

	// The add command will commit first; then it needs to be applied.
	// Apply it on the proposer node before the leader.
	e := <-cluster.events[proposer].MembershipChangeCommitted
	e.Callback(nil)
	e = <-cluster.events[leader].MembershipChangeCommitted
	e.Callback(nil)

	// Now wait for both commands to commit.
	select {
	case err := <-addErrCh:
		if err != nil {
			t.Errorf("add failed: %s", err)
		}
	case <-time.After(time.Second):
		t.Errorf("add timed out")
	}
	select {
	case err := <-removeErrCh:
		if err != nil {
			t.Errorf("remove failed: %s", err)
		}
	case <-time.After(time.Second):
		t.Errorf("remove timed out")
	}
}
Example #19
0
// TestBookieReserveMaxBytes ensures that over-booking doesn't occur when trying
// to reserve more bytes than maxReservedBytes.
func TestBookieReserveMaxBytes(t *testing.T) {
	defer leaktest.AfterTest(t)()

	previousReservedBytes := 10

	stopper, _, b := createTestBookie(time.Hour, previousReservedBytes*2, int64(previousReservedBytes))
	defer stopper.Stop()

	// Load up reservations with a size of 1 each.
	for i := 1; i <= previousReservedBytes; i++ {
		req := roachpb.ReservationRequest{
			StoreRequestHeader: roachpb.StoreRequestHeader{
				StoreID: roachpb.StoreID(i),
				NodeID:  roachpb.NodeID(i),
			},
			RangeID:   roachpb.RangeID(i),
			RangeSize: 1,
		}
		if !b.Reserve(context.Background(), req, nil).Reserved {
			t.Errorf("%d: could not add reservation", i)
		}
		verifyBookie(t, b, i, i, int64(i))
	}

	overbookedReq := roachpb.ReservationRequest{
		StoreRequestHeader: roachpb.StoreRequestHeader{
			StoreID: roachpb.StoreID(previousReservedBytes + 1),
			NodeID:  roachpb.NodeID(previousReservedBytes + 1),
		},
		RangeID:   roachpb.RangeID(previousReservedBytes + 1),
		RangeSize: 1,
	}
	if b.Reserve(context.Background(), overbookedReq, nil).Reserved {
		t.Errorf("expected reservation to fail due to too many already existing reservations, but it succeeded")
	}
	// The same numbers from the last call to verifyBookie.
	verifyBookie(t, b, previousReservedBytes, previousReservedBytes, int64(previousReservedBytes))
}
Example #20
0
func TestStoresRemoveStore(t *testing.T) {
	defer leaktest.AfterTest(t)
	ls := NewStores()

	storeID := roachpb.StoreID(89)

	ls.AddStore(&Store{Ident: roachpb.StoreIdent{StoreID: storeID}})

	ls.RemoveStore(&Store{Ident: roachpb.StoreIdent{StoreID: storeID}})

	if ls.HasStore(storeID) {
		t.Errorf("expted local sender to remove storeID=%d", storeID)
	}
}
Example #21
0
func TestStoresGetStoreCount(t *testing.T) {
	defer leaktest.AfterTest(t)
	ls := NewStores()
	if ls.GetStoreCount() != 0 {
		t.Errorf("expected 0 stores in new local sender")
	}

	expectedCount := 10
	for i := 0; i < expectedCount; i++ {
		ls.AddStore(&Store{Ident: roachpb.StoreIdent{StoreID: roachpb.StoreID(i)}})
	}
	if count := ls.GetStoreCount(); count != expectedCount {
		t.Errorf("expected store count to be %d but was %d", expectedCount, count)
	}
}
Example #22
0
// TestAllocatorRelaxConstraints verifies that attribute constraints
// will be relaxed in order to match nodes lacking required attributes,
// if necessary to find an allocation target.
func TestAllocatorRelaxConstraints(t *testing.T) {
	defer leaktest.AfterTest(t)()
	stopper, g, _, a, _ := createTestAllocator()
	defer stopper.Stop()
	gossiputil.NewStoreGossiper(g).GossipStores(multiDCStores, t)

	testCases := []struct {
		required         []string // attribute strings
		existing         []int    // existing store/node ID
		relaxConstraints bool     // allow constraints to be relaxed?
		expID            int      // expected store/node ID on allocate
		expErr           bool
	}{
		// The two stores in the system have attributes:
		//  storeID=1 {"a", "ssd"}
		//  storeID=2 {"b", "ssd"}
		{[]string{"a", "ssd"}, []int{}, true, 1, false},
		{[]string{"a", "ssd"}, []int{1}, true, 2, false},
		{[]string{"a", "ssd"}, []int{1}, false, 0, true},
		{[]string{"a", "ssd"}, []int{1, 2}, true, 0, true},
		{[]string{"b", "ssd"}, []int{}, true, 2, false},
		{[]string{"b", "ssd"}, []int{1}, true, 2, false},
		{[]string{"b", "ssd"}, []int{2}, false, 0, true},
		{[]string{"b", "ssd"}, []int{2}, true, 1, false},
		{[]string{"b", "ssd"}, []int{1, 2}, true, 0, true},
		{[]string{"b", "hdd"}, []int{}, true, 2, false},
		{[]string{"b", "hdd"}, []int{2}, true, 1, false},
		{[]string{"b", "hdd"}, []int{2}, false, 0, true},
		{[]string{"b", "hdd"}, []int{1, 2}, true, 0, true},
		{[]string{"b", "ssd", "gpu"}, []int{}, true, 2, false},
		{[]string{"b", "hdd", "gpu"}, []int{}, true, 2, false},
	}
	for i, test := range testCases {
		var existing []roachpb.ReplicaDescriptor
		for _, id := range test.existing {
			existing = append(existing, roachpb.ReplicaDescriptor{NodeID: roachpb.NodeID(id), StoreID: roachpb.StoreID(id)})
		}
		result, err := a.AllocateTarget(roachpb.Attributes{Attrs: test.required}, existing, test.relaxConstraints, nil)
		if haveErr := (err != nil); haveErr != test.expErr {
			t.Errorf("%d: expected error %t; got %t: %s", i, test.expErr, haveErr, err)
		} else if err == nil && roachpb.StoreID(test.expID) != result.StoreID {
			t.Errorf("%d: expected result to have store %d; got %+v", i, test.expID, result)
		}
	}
}
Example #23
0
func TestReplicaSetMoveToFront(t *testing.T) {
	defer leaktest.AfterTest(t)
	rs := replicaSlice(nil)
	for i := 0; i < 5; i++ {
		rs = append(rs, replicaInfo{ReplicaDescriptor: roachpb.ReplicaDescriptor{StoreID: roachpb.StoreID(i + 1)}})
	}
	rs.MoveToFront(0)
	exp := []roachpb.StoreID{1, 2, 3, 4, 5}
	if stores := getStores(rs); !reflect.DeepEqual(stores, exp) {
		t.Errorf("expected order %s, got %s", exp, stores)
	}
	rs.MoveToFront(2)
	exp = []roachpb.StoreID{3, 1, 2, 4, 5}
	if stores := getStores(rs); !reflect.DeepEqual(stores, exp) {
		t.Errorf("expected order %s, got %s", exp, stores)
	}
	rs.MoveToFront(4)
	exp = []roachpb.StoreID{5, 3, 1, 2, 4}
	if stores := getStores(rs); !reflect.DeepEqual(stores, exp) {
		t.Errorf("expected order %s, got %s", exp, stores)
	}
}
Example #24
0
// RaftMessage proxies the incoming request to the listening server interface.
func (t *rpcTransport) RaftMessage(args proto.Message, callback func(proto.Message, error)) {
	req := args.(*multiraft.RaftMessageRequest)

	t.mu.Lock()
	server, ok := t.servers[roachpb.StoreID(req.Message.To)]
	t.mu.Unlock()

	if !ok {
		callback(nil, util.Errorf("Unable to proxy message to node: %d", req.Message.To))
		return
	}

	// Raft responses are empty so we don't actually need to convert
	// between multiraft's internal struct and the external proto
	// representation. In fact, we don't even need to wait for the
	// message to be processed to invoke the callback. We are just
	// (ab)using the async handler mechanism to get this (synchronous)
	// handler called in the RPC server's goroutine so we can preserve
	// order of incoming messages.
	resp, err := server.RaftMessage(req)
	callback(resp, err)
}
Example #25
0
// GetStoreList returns a storeList that contains all active stores that
// contain the required attributes and their associated stats.
// TODO(embark, spencer): consider using a reverse index map from
// Attr->stores, for efficiency. Ensure that entries in this map still
// have an opportunity to be garbage collected.
func (sp *StorePool) getStoreList(required roachpb.Attributes, deterministic bool) *StoreList {
	sp.mu.RLock()
	defer sp.mu.RUnlock()

	var storeIDs roachpb.StoreIDSlice
	for storeID := range sp.stores {
		storeIDs = append(storeIDs, storeID)
	}
	// Sort the stores by key if deterministic is requested. This is only for
	// unit testing.
	if deterministic {
		sort.Sort(storeIDs)
	}
	sl := new(StoreList)
	for _, storeID := range storeIDs {
		detail := sp.stores[roachpb.StoreID(storeID)]
		if !detail.dead && required.IsSubset(*detail.desc.CombinedAttrs()) {
			desc := detail.desc
			sl.add(&desc)
		}
	}
	return sl
}
Example #26
0
// eventFeedString describes the event information that was recorded by
// storeEventReader. The formatting is appropriate to paste into this test if as
// a new expected value.
func (ser *storeEventReader) eventFeedString() string {
	var buffer bytes.Buffer
	w := tabwriter.NewWriter(&buffer, 2, 1, 2, ' ', 0)

	var storeIDs sort.IntSlice
	for storeID := range ser.perStoreFeeds {
		storeIDs = append(storeIDs, int(storeID))
	}
	sort.Sort(storeIDs)

	for _, storeID := range storeIDs {
		if feed, ok := ser.perStoreFeeds[roachpb.StoreID(storeID)]; ok {
			fmt.Fprintf(w, "%T(%d): {\n", storeID, storeID)
			for _, evt := range feed {
				fmt.Fprintf(w, "\t\"%s\",\n", evt)
			}
			fmt.Fprintf(w, "},\n")
		} else {
			panic("unreachable!")
		}
	}
	return buffer.String()
}
Example #27
0
func newTestCluster(transport Transport, size int, stopper *stop.Stopper, t *testing.T) *testCluster {
	if transport == nil {
		transport = NewLocalRPCTransport(stopper)
	}
	stopper.AddCloser(transport)
	cluster := &testCluster{
		t:         t,
		transport: transport,
		groups:    map[roachpb.RangeID][]int{},
	}

	for i := 0; i < size; i++ {
		ticker := newManualTicker()
		storage := &BlockableStorage{storage: NewMemoryStorage()}
		config := &Config{
			Transport:              transport,
			Storage:                storage,
			Ticker:                 ticker,
			ElectionTimeoutTicks:   2,
			HeartbeatIntervalTicks: 1,
			TickInterval:           time.Hour, // not in use
		}
		mr, err := NewMultiRaft(roachpb.NodeID(i+1), roachpb.StoreID(i+1), config, stopper)
		if err != nil {
			t.Fatal(err)
		}
		state := newState(mr)
		demux := newEventDemux(state.Events)
		demux.start(stopper)
		cluster.nodes = append(cluster.nodes, state)
		cluster.tickers = append(cluster.tickers, ticker)
		cluster.events = append(cluster.events, demux)
		cluster.storages = append(cluster.storages, storage)
	}
	cluster.start()
	return cluster
}
Example #28
0
// TestSendRPCOrder verifies that sendRPC correctly takes into account the
// leader, attributes and required consistency to determine where to send
// remote requests.
func TestSendRPCOrder(t *testing.T) {
	defer leaktest.AfterTest(t)
	g, s := makeTestGossip(t)
	defer s()
	rangeID := roachpb.RangeID(99)

	nodeAttrs := map[int32][]string{
		1: {}, // The local node, set in each test case.
		2: {"us", "west", "gpu"},
		3: {"eu", "dublin", "pdu2", "gpu"},
		4: {"us", "east", "gpu"},
		5: {"us", "east", "gpu", "flaky"},
	}

	// Gets filled below to identify the replica by its address.
	addrToNode := make(map[string]int32)
	makeVerifier := func(expOrder rpc.OrderingPolicy,
		expAddrs []int32) func(rpc.Options, []net.Addr) error {
		return func(o rpc.Options, addrs []net.Addr) error {
			if o.Ordering != expOrder {
				return util.Errorf("unexpected ordering, wanted %v, got %v",
					expOrder, o.Ordering)
			}
			var actualAddrs []int32
			for i, a := range addrs {
				if len(expAddrs) <= i {
					return util.Errorf("got unexpected address: %s", a)
				}
				if expAddrs[i] == 0 {
					actualAddrs = append(actualAddrs, 0)
				} else {
					actualAddrs = append(actualAddrs, addrToNode[a.String()])
				}
			}
			if !reflect.DeepEqual(expAddrs, actualAddrs) {
				return util.Errorf("expected %d, but found %d", expAddrs, actualAddrs)
			}
			return nil
		}
	}

	testCases := []struct {
		args       roachpb.Request
		attrs      []string
		order      rpc.OrderingPolicy
		expReplica []int32
		leader     int32 // 0 for not caching a leader.
		// Naming is somewhat off, as eventually consistent reads usually
		// do not have to go to the leader when a node has a read lease.
		// Would really want CONSENSUS here, but that is not implemented.
		// Likely a test setup here will never have a read lease, but good
		// to keep in mind.
		consistent bool
	}{
		// Inconsistent Scan without matching attributes.
		{
			args:       &roachpb.ScanRequest{},
			attrs:      []string{},
			order:      rpc.OrderRandom,
			expReplica: []int32{1, 2, 3, 4, 5},
		},
		// Inconsistent Scan with matching attributes.
		// Should move the two nodes matching the attributes to the front and
		// go stable.
		{
			args:  &roachpb.ScanRequest{},
			attrs: nodeAttrs[5],
			order: rpc.OrderStable,
			// Compare only the first two resulting addresses.
			expReplica: []int32{5, 4, 0, 0, 0},
		},

		// Scan without matching attributes that requires but does not find
		// a leader.
		{
			args:       &roachpb.ScanRequest{},
			attrs:      []string{},
			order:      rpc.OrderRandom,
			expReplica: []int32{1, 2, 3, 4, 5},
			consistent: true,
		},
		// Put without matching attributes that requires but does not find leader.
		// Should go random and not change anything.
		{
			args:       &roachpb.PutRequest{},
			attrs:      []string{"nomatch"},
			order:      rpc.OrderRandom,
			expReplica: []int32{1, 2, 3, 4, 5},
		},
		// Put with matching attributes but no leader.
		// Should move the two nodes matching the attributes to the front and
		// go stable.
		{
			args:  &roachpb.PutRequest{},
			attrs: append(nodeAttrs[5], "irrelevant"),
			// Compare only the first two resulting addresses.
			order:      rpc.OrderStable,
			expReplica: []int32{5, 4, 0, 0, 0},
		},
		// Put with matching attributes that finds the leader (node 3).
		// Should address the leader and the two nodes matching the attributes
		// (the last and second to last) in that order.
		{
			args:  &roachpb.PutRequest{},
			attrs: append(nodeAttrs[5], "irrelevant"),
			// Compare only the first resulting addresses as we have a leader
			// and that means we're only trying to send there.
			order:      rpc.OrderStable,
			expReplica: []int32{2, 5, 4, 0, 0},
			leader:     2,
		},
		// Inconsistent Get without matching attributes but leader (node 3). Should just
		// go random as the leader does not matter.
		{
			args:       &roachpb.GetRequest{},
			attrs:      []string{},
			order:      rpc.OrderRandom,
			expReplica: []int32{1, 2, 3, 4, 5},
			leader:     2,
		},
	}

	descriptor := roachpb.RangeDescriptor{
		StartKey: roachpb.RKeyMin,
		EndKey:   roachpb.RKeyMax,
		RangeID:  rangeID,
		Replicas: nil,
	}

	// Stub to be changed in each test case.
	var verifyCall func(rpc.Options, []net.Addr) error

	var testFn rpcSendFn = func(opts rpc.Options, method string,
		addrs []net.Addr, getArgs func(addr net.Addr) proto.Message,
		getReply func() proto.Message, _ *rpc.Context) ([]proto.Message, error) {
		if err := verifyCall(opts, addrs); err != nil {
			return nil, err
		}
		return []proto.Message{getArgs(addrs[0]).(*roachpb.BatchRequest).CreateReply()}, nil
	}

	ctx := &DistSenderContext{
		RPCSend: testFn,
		RangeDescriptorDB: mockRangeDescriptorDB(func(roachpb.RKey, bool, bool) ([]roachpb.RangeDescriptor, *roachpb.Error) {
			return []roachpb.RangeDescriptor{descriptor}, nil
		}),
	}

	ds := NewDistSender(ctx, g)

	for n, tc := range testCases {
		verifyCall = makeVerifier(tc.order, tc.expReplica)
		descriptor.Replicas = nil // could do this once above, but more convenient here
		for i := int32(1); i <= 5; i++ {
			addr := util.MakeUnresolvedAddr("tcp", fmt.Sprintf("node%d", i))
			addrToNode[addr.String()] = i
			nd := &roachpb.NodeDescriptor{
				NodeID:  roachpb.NodeID(i),
				Address: util.MakeUnresolvedAddr(addr.Network(), addr.String()),
				Attrs: roachpb.Attributes{
					Attrs: nodeAttrs[i],
				},
			}
			if err := g.AddInfoProto(gossip.MakeNodeIDKey(roachpb.NodeID(i)), nd, time.Hour); err != nil {
				t.Fatal(err)
			}
			descriptor.Replicas = append(descriptor.Replicas, roachpb.ReplicaDescriptor{
				NodeID:  roachpb.NodeID(i),
				StoreID: roachpb.StoreID(i),
			})
		}

		{
			// The local node needs to get its attributes during sendRPC.
			nd := &roachpb.NodeDescriptor{
				NodeID: 6,
				Attrs: roachpb.Attributes{
					Attrs: tc.attrs,
				},
			}
			g.SetNodeID(nd.NodeID)
			if err := g.SetNodeDescriptor(nd); err != nil {
				t.Fatal(err)
			}
		}

		ds.leaderCache.Update(roachpb.RangeID(rangeID), roachpb.ReplicaDescriptor{})
		if tc.leader > 0 {
			ds.leaderCache.Update(roachpb.RangeID(rangeID), descriptor.Replicas[tc.leader-1])
		}

		args := tc.args
		args.Header().Key = roachpb.Key("a")
		if roachpb.IsRange(args) {
			args.Header().EndKey = roachpb.Key("b")
		}
		consistency := roachpb.CONSISTENT
		if !tc.consistent {
			consistency = roachpb.INCONSISTENT
		}
		// Kill the cached NodeDescriptor, enforcing a lookup from Gossip.
		ds.nodeDescriptor = nil
		if _, err := client.SendWrappedWith(ds, nil, roachpb.Header{
			RangeID:         rangeID, // Not used in this test, but why not.
			ReadConsistency: consistency,
		}, args); err != nil {
			t.Errorf("%d: %s", n, err)
		}
	}
}
Example #29
0
func Example_rebalancing() {
	stopper := stop.NewStopper()
	defer stopper.Stop()

	// Model a set of stores in a cluster,
	// randomly adding / removing stores and adding bytes.
	g := gossip.New(nil, nil, stopper)
	// Have to call g.SetNodeID before call g.AddInfo
	g.SetNodeID(roachpb.NodeID(1))
	sp := NewStorePool(
		g,
		hlc.NewClock(hlc.UnixNano),
		nil,
		/* reservationsEnabled */ true,
		TestTimeUntilStoreDeadOff,
		stopper,
	)
	alloc := MakeAllocator(sp, AllocatorOptions{AllowRebalance: true, Deterministic: true})

	var wg sync.WaitGroup
	g.RegisterCallback(gossip.MakePrefixPattern(gossip.KeyStorePrefix), func(_ string, _ roachpb.Value) { wg.Done() })

	const generations = 100
	const nodes = 20

	// Initialize testStores.
	var testStores [nodes]testStore
	for i := 0; i < len(testStores); i++ {
		testStores[i].StoreID = roachpb.StoreID(i)
		testStores[i].Node = roachpb.NodeDescriptor{NodeID: roachpb.NodeID(i)}
		testStores[i].Capacity = roachpb.StoreCapacity{Capacity: 1 << 30, Available: 1 << 30}
	}
	// Initialize the cluster with a single range.
	testStores[0].add(alloc.randGen.Int63n(1 << 20))

	for i := 0; i < generations; i++ {
		// First loop through test stores and add data.
		wg.Add(len(testStores))
		for j := 0; j < len(testStores); j++ {
			// Add a pretend range to the testStore if there's already one.
			if testStores[j].Capacity.RangeCount > 0 {
				testStores[j].add(alloc.randGen.Int63n(1 << 20))
			}
			if err := g.AddInfoProto(gossip.MakeStoreKey(roachpb.StoreID(j)), &testStores[j].StoreDescriptor, 0); err != nil {
				panic(err)
			}
		}
		wg.Wait()

		// Next loop through test stores and maybe rebalance.
		for j := 0; j < len(testStores); j++ {
			ts := &testStores[j]
			if alloc.ShouldRebalance(ts.StoreID) {
				target := alloc.RebalanceTarget(ts.StoreID, roachpb.Attributes{}, []roachpb.ReplicaDescriptor{{NodeID: ts.Node.NodeID, StoreID: ts.StoreID}})
				if target != nil {
					testStores[j].rebalance(&testStores[int(target.StoreID)], alloc.randGen.Int63n(1<<20))
				}
			}
		}

		// Output store capacities as hexidecimal 2-character values.
		if i%(generations/50) == 0 {
			var maxBytes int64
			for j := 0; j < len(testStores); j++ {
				bytes := testStores[j].Capacity.Capacity - testStores[j].Capacity.Available
				if bytes > maxBytes {
					maxBytes = bytes
				}
			}
			if maxBytes > 0 {
				for j := 0; j < len(testStores); j++ {
					endStr := " "
					if j == len(testStores)-1 {
						endStr = ""
					}
					bytes := testStores[j].Capacity.Capacity - testStores[j].Capacity.Available
					fmt.Printf("%03d%s", (999*bytes)/maxBytes, endStr)
				}
				fmt.Printf("\n")
			}
		}
	}

	var totBytes int64
	var totRanges int32
	for i := 0; i < len(testStores); i++ {
		totBytes += testStores[i].Capacity.Capacity - testStores[i].Capacity.Available
		totRanges += testStores[i].Capacity.RangeCount
	}
	fmt.Printf("Total bytes=%d, ranges=%d\n", totBytes, totRanges)

	// Output:
	// 999 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000
	// 999 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000
	// 999 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000
	// 999 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000
	// 999 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000
	// 999 000 000 000 000 000 000 000 000 000 045 140 000 000 000 000 000 105 000 000
	// 999 014 143 000 000 000 000 039 017 000 112 071 000 088 009 000 097 134 000 151
	// 999 196 213 000 000 000 143 098 210 039 262 260 077 139 078 087 237 316 281 267
	// 999 394 368 391 000 393 316 356 364 263 474 262 214 321 345 374 403 445 574 220
	// 999 337 426 577 023 525 459 426 229 315 495 327 310 370 363 423 390 473 587 308
	// 999 481 529 533 132 563 519 496 396 363 636 337 414 408 425 533 445 605 559 405
	// 999 572 585 507 256 609 570 586 513 341 660 347 544 443 488 525 446 596 556 462
	// 999 580 575 603 325 636 590 549 495 337 698 386 663 526 518 511 517 572 546 533
	// 999 576 601 637 374 629 573 558 520 391 684 446 692 555 510 461 552 593 568 564
	// 999 573 636 671 441 643 619 629 628 452 705 525 795 590 542 525 589 658 589 655
	// 999 585 625 651 467 686 606 662 611 508 654 516 746 594 542 528 591 646 569 642
	// 999 636 690 728 501 704 638 700 619 539 688 555 738 592 556 568 659 669 602 649
	// 999 655 749 773 519 790 713 781 698 604 758 601 755 634 580 661 716 735 607 660
	// 999 648 716 726 549 813 748 766 693 606 784 568 749 655 579 642 692 711 587 632
	// 999 688 734 731 553 805 736 779 701 575 763 562 722 647 599 631 691 732 598 608
	// 999 679 770 719 590 815 754 799 687 613 748 540 715 664 590 638 703 720 621 588
	// 999 736 775 724 614 813 771 829 703 679 782 560 754 692 624 658 756 763 636 643
	// 999 759 792 737 688 847 782 872 761 695 841 617 756 730 607 664 762 807 677 666
	// 999 793 837 754 704 876 803 897 753 742 880 639 758 766 653 684 785 850 720 670
	// 999 815 864 778 735 921 843 927 778 752 896 696 775 796 698 681 775 859 730 693
	// 999 827 876 759 759 911 838 938 781 798 920 708 778 794 698 711 804 870 732 710
	// 999 815 893 733 790 924 849 940 755 777 901 720 794 832 704 721 834 851 722 748
	// 999 820 905 772 807 941 884 938 781 788 888 738 835 849 735 742 865 884 743 791
	// 999 828 889 768 828 939 865 936 789 805 913 751 841 860 751 759 895 889 730 814
	// 999 829 893 794 840 933 883 943 805 830 929 735 842 871 778 788 886 912 746 845
	// 999 848 892 820 824 963 913 978 832 828 952 755 860 890 784 814 905 905 755 855
	// 999 847 880 846 847 963 939 984 851 835 958 777 862 880 799 829 912 895 772 870
	// 999 850 886 859 871 950 921 998 847 823 925 759 877 861 787 810 908 915 798 840
	// 982 854 891 854 900 956 945 999 833 804 929 767 896 861 781 797 911 932 791 855
	// 961 849 884 846 881 949 928 999 829 796 906 768 868 858 797 804 883 897 774 834
	// 965 863 924 874 903 988 953 999 864 831 924 786 876 886 821 804 903 940 799 843
	// 963 873 936 880 915 997 966 999 885 832 935 799 891 919 854 801 916 953 802 866
	// 951 886 938 873 900 990 972 999 898 822 915 795 871 917 853 798 928 953 779 850
	// 932 880 939 866 897 999 948 970 884 837 912 805 877 893 866 807 922 933 791 846
	// 925 896 935 885 899 999 963 965 886 858 897 820 894 876 876 811 918 921 793 856
	// 926 881 933 876 896 999 952 942 857 859 878 812 898 884 883 791 920 894 783 853
	// 951 890 947 898 919 999 959 952 863 871 895 845 902 898 893 816 934 920 790 881
	// 962 895 959 919 921 999 982 951 883 877 901 860 911 910 899 835 949 923 803 883
	// 957 886 970 905 915 999 970 974 888 894 924 879 938 930 909 847 955 937 830 899
	// 941 881 958 889 914 999 957 953 885 890 900 870 946 919 885 822 950 927 832 875
	// 937 888 962 897 934 999 963 950 902 900 905 890 952 920 895 831 963 930 852 872
	// 916 888 967 881 924 999 970 946 912 890 901 889 958 910 911 830 966 928 834 866
	// 900 859 959 877 895 999 955 931 893 868 894 881 929 893 885 813 937 909 819 849
	// 902 857 960 875 896 999 944 929 911 867 911 895 946 897 897 812 926 921 815 859
	// 902 855 951 867 893 999 949 938 901 867 911 892 949 898 903 803 935 930 809 868
	// Total bytes=909881714, ranges=1745
}
Example #30
0
func TestStoreEventFeed(t *testing.T) {
	defer leaktest.AfterTest(t)

	// Construct a set of fake ranges to synthesize events correctly. They do
	// not need to be added to a Store.
	desc1 := &roachpb.RangeDescriptor{
		RangeID:  1,
		StartKey: roachpb.RKey("a"),
		EndKey:   roachpb.RKey("b"),
	}
	desc2 := &roachpb.RangeDescriptor{
		RangeID:  2,
		StartKey: roachpb.RKey("b"),
		EndKey:   roachpb.RKey("c"),
	}
	rng1 := &Replica{
		stats: &rangeStats{
			rangeID: desc1.RangeID,
			MVCCStats: engine.MVCCStats{
				LiveBytes:       400,
				KeyBytes:        40,
				ValBytes:        360,
				LastUpdateNanos: 10 * 1E9,
			},
		},
	}
	if err := rng1.setDesc(desc1); err != nil {
		t.Fatal(err)
	}
	rng2 := &Replica{
		stats: &rangeStats{
			rangeID: desc2.RangeID,
			MVCCStats: engine.MVCCStats{
				LiveBytes:       200,
				KeyBytes:        30,
				ValBytes:        170,
				LastUpdateNanos: 20 * 1E9,
			},
		},
	}
	if err := rng2.setDesc(desc2); err != nil {
		t.Fatal(err)
	}
	storeDesc := &roachpb.StoreDescriptor{
		StoreID: roachpb.StoreID(1),
		Node: roachpb.NodeDescriptor{
			NodeID: roachpb.NodeID(1),
		},
		Capacity: roachpb.StoreCapacity{
			Capacity:   100,
			Available:  100,
			RangeCount: 1,
		},
	}
	diffStats := &engine.MVCCStats{
		IntentBytes: 30,
		IntentAge:   20,
	}

	// A testCase corresponds to a single Store event type. Each case contains a
	// method which publishes a single event to the given storeEventPublisher,
	// and an expected result interface which should match the produced
	// event.
	testCases := []struct {
		name      string
		publishTo func(StoreEventFeed)
		expected  interface{}
	}{
		{
			"NewRange",
			func(feed StoreEventFeed) {
				feed.registerRange(rng1, false /* scan */)
			},
			&RegisterRangeEvent{
				StoreID: roachpb.StoreID(1),
				Desc: &roachpb.RangeDescriptor{
					RangeID:  1,
					StartKey: roachpb.RKey("a"),
					EndKey:   roachpb.RKey("b"),
				},
				Stats: engine.MVCCStats{
					LiveBytes:       400,
					KeyBytes:        40,
					ValBytes:        360,
					LastUpdateNanos: 10 * 1E9,
				},
			},
		},
		{
			"UpdateRange",
			func(feed StoreEventFeed) {
				feed.updateRange(rng1, roachpb.Put, diffStats)
			},
			&UpdateRangeEvent{
				StoreID: roachpb.StoreID(1),
				Desc: &roachpb.RangeDescriptor{
					RangeID:  1,
					StartKey: roachpb.RKey("a"),
					EndKey:   roachpb.RKey("b"),
				},
				Stats: engine.MVCCStats{
					LiveBytes:       400,
					KeyBytes:        40,
					ValBytes:        360,
					LastUpdateNanos: 10 * 1E9,
				},
				Method: roachpb.Put,
				Delta: engine.MVCCStats{
					IntentBytes: 30,
					IntentAge:   20,
				},
			},
		},
		{
			"RemoveRange",
			func(feed StoreEventFeed) {
				feed.removeRange(rng2)
			},
			&RemoveRangeEvent{
				StoreID: roachpb.StoreID(1),
				Desc: &roachpb.RangeDescriptor{
					RangeID:  2,
					StartKey: roachpb.RKey("b"),
					EndKey:   roachpb.RKey("c"),
				},
				Stats: engine.MVCCStats{
					LiveBytes:       200,
					KeyBytes:        30,
					ValBytes:        170,
					LastUpdateNanos: 20 * 1E9,
				},
			},
		},
		{
			"SplitRange",
			func(feed StoreEventFeed) {
				feed.splitRange(rng1, rng2)
			},
			&SplitRangeEvent{
				StoreID: roachpb.StoreID(1),
				Original: UpdateRangeEvent{
					Desc: &roachpb.RangeDescriptor{
						RangeID:  1,
						StartKey: roachpb.RKey("a"),
						EndKey:   roachpb.RKey("b"),
					},
					Stats: engine.MVCCStats{
						LiveBytes:       400,
						KeyBytes:        40,
						ValBytes:        360,
						LastUpdateNanos: 10 * 1E9,
					},
					Delta: engine.MVCCStats{
						LiveBytes:       -200,
						KeyBytes:        -30,
						ValBytes:        -170,
						LastUpdateNanos: 20 * 1E9,
					},
				},
				New: RegisterRangeEvent{
					Desc: &roachpb.RangeDescriptor{
						RangeID:  2,
						StartKey: roachpb.RKey("b"),
						EndKey:   roachpb.RKey("c"),
					},
					Stats: engine.MVCCStats{
						LiveBytes:       200,
						KeyBytes:        30,
						ValBytes:        170,
						LastUpdateNanos: 20 * 1E9,
					},
				},
			},
		},
		{
			"MergeRange",
			func(feed StoreEventFeed) {
				feed.mergeRange(rng1, rng2)
			},
			&MergeRangeEvent{
				StoreID: roachpb.StoreID(1),
				Merged: UpdateRangeEvent{
					Desc: &roachpb.RangeDescriptor{
						RangeID:  1,
						StartKey: roachpb.RKey("a"),
						EndKey:   roachpb.RKey("b"),
					},
					Stats: engine.MVCCStats{
						LiveBytes:       400,
						KeyBytes:        40,
						ValBytes:        360,
						LastUpdateNanos: 10 * 1E9,
					},
					Delta: engine.MVCCStats{
						LiveBytes:       200,
						KeyBytes:        30,
						ValBytes:        170,
						LastUpdateNanos: 20 * 1E9,
					},
				},
				Removed: RemoveRangeEvent{
					Desc: &roachpb.RangeDescriptor{
						RangeID:  2,
						StartKey: roachpb.RKey("b"),
						EndKey:   roachpb.RKey("c"),
					},
					Stats: engine.MVCCStats{
						LiveBytes:       200,
						KeyBytes:        30,
						ValBytes:        170,
						LastUpdateNanos: 20 * 1E9,
					},
				},
			},
		},
		{
			"StoreStatus",
			func(feed StoreEventFeed) {
				feed.storeStatus(storeDesc)
			},
			&StoreStatusEvent{
				Desc: storeDesc,
			},
		},
		{
			"ReplicationStatus",
			func(feed StoreEventFeed) {
				feed.replicationStatus(3, 2, 1)
			},
			&ReplicationStatusEvent{
				StoreID:              roachpb.StoreID(1),
				LeaderRangeCount:     3,
				ReplicatedRangeCount: 2,
				AvailableRangeCount:  1,
			},
		},
		{
			"StartStore",
			func(feed StoreEventFeed) {
				feed.startStore(100)
			},
			&StartStoreEvent{
				StoreID:   roachpb.StoreID(1),
				StartedAt: 100,
			},
		},
		{
			"BeginScanRanges",
			func(feed StoreEventFeed) {
				feed.beginScanRanges()
			},
			&BeginScanRangesEvent{
				StoreID: roachpb.StoreID(1),
			},
		},
		{
			"EndScanRanges",
			func(feed StoreEventFeed) {
				feed.endScanRanges()
			},
			&EndScanRangesEvent{
				StoreID: roachpb.StoreID(1),
			},
		},
	}

	// Compile expected events into a single slice.
	expectedEvents := make([]interface{}, len(testCases))
	for i := range testCases {
		expectedEvents[i] = testCases[i].expected
	}

	events := make([]interface{}, 0, len(expectedEvents))

	// Run test cases directly through a feed.
	stopper := stop.NewStopper()
	defer stopper.Stop()
	feed := util.NewFeed(stopper)
	feed.Subscribe(func(event interface{}) {
		events = append(events, event)
	})

	storefeed := NewStoreEventFeed(roachpb.StoreID(1), feed)
	for _, tc := range testCases {
		tc.publishTo(storefeed)
	}

	feed.Flush()

	if a, e := events, expectedEvents; !reflect.DeepEqual(a, e) {
		t.Errorf("received incorrect events.\nexpected: %v\nactual: %v", e, a)
	}
}