Example #1
0
// TestInfoStoreDistant verifies selection of infos from store with
// Hops > maxHops.
func TestInfoStoreDistant(t *testing.T) {
	defer leaktest.AfterTest(t)
	nodes := []proto.NodeID{
		proto.NodeID(1),
		proto.NodeID(2),
		proto.NodeID(3),
	}
	is := newInfoStore(1, emptyAddr)
	// Add info from each address, with hop count equal to index+1.
	for i := 0; i < len(nodes); i++ {
		inf := is.newInfo(nil, time.Second)
		inf.Hops = uint32(i + 1)
		inf.NodeID = nodes[i]
		if err := is.addInfo(fmt.Sprintf("b.%d", i), inf); err != nil {
			t.Fatal(err)
		}
	}

	for i := 0; i < len(nodes); i++ {
		nodesLen := is.distant(uint32(i)).len()
		if nodesLen != 3-i {
			t.Errorf("%d nodes (not %d) should be over maxHops = %d", 3-i, nodesLen, i)
		}
	}
}
Example #2
0
func TestIsFresh(t *testing.T) {
	defer leaktest.AfterTest(t)
	const seq = 10
	now := time.Now().UnixNano()
	node1 := proto.NodeID(1)
	node2 := proto.NodeID(2)
	node3 := proto.NodeID(3)
	i := info{"a", float64(1), now, now + int64(time.Millisecond), 0, node1, node2, seq}
	if !i.isFresh(node3, seq-1) {
		t.Error("info should be fresh:", i)
	}
	if i.isFresh(node3, seq+1) {
		t.Error("info should not be fresh:", i)
	}
	if i.isFresh(node1, seq-1) {
		t.Error("info should not be fresh:", i)
	}
	if i.isFresh(node2, seq-1) {
		t.Error("info should not be fresh:", i)
	}
	// Using node 0 will always yield fresh data.
	if !i.isFresh(0, 0) {
		t.Error("info should be fresh from node0:", i)
	}
}
// TestSendRPCRetry verifies that sendRPC failed on first address but succeed on
// second address, the second reply should be successfully returned back.
func TestSendRPCRetry(t *testing.T) {
	defer leaktest.AfterTest(t)
	g, s := makeTestGossip(t)
	defer s()
	if err := g.SetNodeDescriptor(&proto.NodeDescriptor{NodeID: 1}); err != nil {
		t.Fatal(err)
	}
	// Fill RangeDescriptor with 2 replicas
	var descriptor = proto.RangeDescriptor{
		RaftID:   1,
		StartKey: proto.Key("a"),
		EndKey:   proto.Key("z"),
	}
	for i := 1; i <= 2; i++ {
		addr := util.MakeUnresolvedAddr("tcp", fmt.Sprintf("node%d", i))
		nd := &proto.NodeDescriptor{
			NodeID: proto.NodeID(i),
			Address: proto.Addr{
				Network: addr.Network(),
				Address: addr.String(),
			},
		}
		if err := g.AddInfo(gossip.MakeNodeIDKey(proto.NodeID(i)), nd, time.Hour); err != nil {
			t.Fatal(err)
		}

		descriptor.Replicas = append(descriptor.Replicas, proto.Replica{
			NodeID:  proto.NodeID(i),
			StoreID: proto.StoreID(i),
		})
	}
	// Define our rpcSend stub which returns success on the second address.
	var testFn rpcSendFn = func(_ rpc.Options, method string, addrs []net.Addr, getArgs func(addr net.Addr) interface{}, getReply func() interface{}, _ *rpc.Context) ([]interface{}, error) {
		if method == "Node.Scan" {
			// reply from first address failed
			_ = getReply()
			// reply from second address succeed
			reply := getReply()
			reply.(*proto.ScanResponse).Rows = append([]proto.KeyValue{}, proto.KeyValue{Key: proto.Key("b"), Value: proto.Value{}})
			return []interface{}{reply}, nil
		}
		return nil, util.Errorf("Not expected method %v", method)
	}
	ctx := &DistSenderContext{
		rpcSend: testFn,
		rangeDescriptorDB: mockRangeDescriptorDB(func(_ proto.Key, _ lookupOptions) ([]proto.RangeDescriptor, error) {
			return []proto.RangeDescriptor{descriptor}, nil
		}),
	}
	ds := NewDistSender(ctx, g)
	call := proto.ScanCall(proto.Key("a"), proto.Key("d"), 1)
	sr := call.Reply.(*proto.ScanResponse)
	ds.Send(context.Background(), call)
	if err := sr.GoError(); err != nil {
		t.Fatal(err)
	}
	if l := len(sr.Rows); l != 1 {
		t.Fatalf("expected 1 row; got %d", l)
	}
}
Example #4
0
// AddStore creates a new store on the same Transport but doesn't create any ranges.
func (m *multiTestContext) addStore() {
	idx := len(m.stores)
	var clock *hlc.Clock
	if len(m.clocks) > idx {
		clock = m.clocks[idx]
	} else {
		clock = m.clock
		m.clocks = append(m.clocks, clock)
	}
	var eng engine.Engine
	var needBootstrap bool
	if len(m.engines) > idx {
		eng = m.engines[idx]
	} else {
		eng = engine.NewInMem(proto.Attributes{}, 1<<20)
		m.engines = append(m.engines, eng)
		needBootstrap = true
		// Add an extra refcount to the engine so the underlying rocksdb instances
		// aren't closed when stopping and restarting the stores.
		// These refcounts are removed in Stop().
		if err := eng.Open(); err != nil {
			m.t.Fatal(err)
		}
	}

	stopper := stop.NewStopper()
	ctx := m.makeContext(idx)
	store := storage.NewStore(ctx, eng, &proto.NodeDescriptor{NodeID: proto.NodeID(idx + 1)})
	if needBootstrap {
		err := store.Bootstrap(proto.StoreIdent{
			NodeID:  proto.NodeID(idx + 1),
			StoreID: proto.StoreID(idx + 1),
		}, stopper)
		if err != nil {
			m.t.Fatal(err)
		}

		// Bootstrap the initial range on the first store
		if idx == 0 {
			if err := store.BootstrapRange(nil); err != nil {
				m.t.Fatal(err)
			}
		}
	}
	if err := store.Start(stopper); err != nil {
		m.t.Fatal(err)
	}
	store.WaitForInit()
	m.stores = append(m.stores, store)
	if len(m.senders) == idx {
		m.senders = append(m.senders, kv.NewLocalSender())
	}
	m.senders[idx].AddStore(store)
	// Save the store identities for later so we can use them in
	// replication operations even while the store is stopped.
	m.idents = append(m.idents, store.Ident)
	m.stoppers = append(m.stoppers, stopper)
}
Example #5
0
// TestNodeEventFeedTransactionRestart verifies that calls which indicate a
// transaction restart are counted as successful.
func TestNodeEventFeedTransactionRestart(t *testing.T) {
	defer leaktest.AfterTest(t)
	stopper, feed, consumers := startConsumerSet(1)
	nodefeed := status.NewNodeEventFeed(proto.NodeID(1), feed)
	ner := &nodeEventReader{}
	sub := feed.Subscribe()
	stopper.RunWorker(func() {
		ner.readEvents(sub)
	})
	nodeID := proto.NodeID(1)

	nodefeed.CallComplete(&proto.GetRequest{}, &proto.GetResponse{
		ResponseHeader: proto.ResponseHeader{
			Error: &proto.Error{
				TransactionRestart: proto.TransactionRestart_BACKOFF,
			},
		},
	})
	nodefeed.CallComplete(&proto.GetRequest{}, &proto.GetResponse{
		ResponseHeader: proto.ResponseHeader{
			Error: &proto.Error{
				TransactionRestart: proto.TransactionRestart_IMMEDIATE,
			},
		},
	})
	nodefeed.CallComplete(&proto.PutRequest{}, &proto.PutResponse{
		ResponseHeader: proto.ResponseHeader{
			Error: &proto.Error{
				TransactionRestart: proto.TransactionRestart_ABORT,
			},
		},
	})
	feed.Close()
	stopper.Stop()

	c := consumers[0]
	exp := []interface{}{
		&status.CallSuccessEvent{
			NodeID: nodeID,
			Method: proto.Get,
		},
		&status.CallSuccessEvent{
			NodeID: nodeID,
			Method: proto.Get,
		},
		&status.CallErrorEvent{
			NodeID: nodeID,
			Method: proto.Put,
		},
	}

	if !reflect.DeepEqual(exp, c.received) {
		t.Fatalf("received unexpected events: %s", ner.eventFeedString())
	}
}
Example #6
0
// TestLeastUseful verifies that the least-contributing peer node
// can be determined.
func TestLeastUseful(t *testing.T) {
	defer leaktest.AfterTest(t)
	nodes := []proto.NodeID{
		proto.NodeID(1),
		proto.NodeID(2),
	}
	is := newInfoStore(1, emptyAddr)

	set := makeNodeSet(3)
	if is.leastUseful(set) != 0 {
		t.Error("not expecting a node from an empty set")
	}

	inf1 := is.newInfo(nil, time.Second)
	inf1.peerID = 1
	if err := is.addInfo("a1", inf1); err != nil {
		t.Fatal(err)
	}
	if is.leastUseful(set) != 0 {
		t.Error("not expecting a node from an empty set")
	}

	set.addNode(nodes[0])
	if is.leastUseful(set) != nodes[0] {
		t.Error("expecting nodes[0] as least useful")
	}

	inf2 := is.newInfo(nil, time.Second)
	inf2.peerID = 1
	if err := is.addInfo("a2", inf2); err != nil {
		t.Fatal(err)
	}
	if is.leastUseful(set) != nodes[0] {
		t.Error("expecting nodes[0] as least useful")
	}

	set.addNode(nodes[1])
	if is.leastUseful(set) != nodes[1] {
		t.Error("expecting nodes[1] as least useful")
	}

	inf3 := is.newInfo(nil, time.Second)
	inf3.peerID = 2
	if err := is.addInfo("a3", inf3); err != nil {
		t.Fatal(err)
	}
	if is.leastUseful(set) != nodes[1] {
		t.Error("expecting nodes[1] as least useful")
	}
}
Example #7
0
// allocateNodeID increments the node id generator key to allocate
// a new, unique node id.
func allocateNodeID(db *client.DB) (proto.NodeID, error) {
	r, err := db.Inc(keys.NodeIDGenerator, 1)
	if err != nil {
		return 0, util.Errorf("unable to allocate node ID: %s", err)
	}
	return proto.NodeID(r.ValueInt()), nil
}
Example #8
0
func TestNodeSetAsSlice(t *testing.T) {
	defer leaktest.AfterTest(t)
	nodes := newNodeSet(2)
	node0 := proto.NodeID(1)
	node1 := proto.NodeID(2)
	nodes.addNode(node0)
	nodes.addNode(node1)

	nodeArr := nodes.asSlice()
	if len(nodeArr) != 2 {
		t.Error("expected slice of length 2:", nodeArr)
	}
	if (nodeArr[0] != node0 && nodeArr[0] != node1) ||
		(nodeArr[1] != node1 && nodeArr[1] != node0) {
		t.Error("expected slice to contain both node0 and node1:", nodeArr)
	}
}
Example #9
0
func TestNodeSetFilter(t *testing.T) {
	defer leaktest.AfterTest(t)
	nodes1 := newNodeSet(2)
	node0 := proto.NodeID(1)
	node1 := proto.NodeID(2)
	nodes1.addNode(node0)
	nodes1.addNode(node1)

	nodes2 := newNodeSet(1)
	nodes2.addNode(node1)

	filtered := nodes1.filter(func(a proto.NodeID) bool {
		return !nodes2.hasNode(a)
	})
	if filtered.len() != 1 || filtered.hasNode(node1) || !filtered.hasNode(node0) {
		t.Errorf("expected filter to leave node0: %+v", filtered)
	}
}
Example #10
0
func TestRuntimeStatRecorder(t *testing.T) {
	defer leaktest.AfterTest(t)
	manual := hlc.NewManualClock(100)
	recorder := NewRuntimeStatRecorder(proto.NodeID(1), hlc.NewClock(manual.UnixNano))

	data := recorder.GetTimeSeriesData()
	if a, e := len(data), 10; a != e {
		t.Fatalf("Expected %d series generated, got %d", a, e)
	}
}
Example #11
0
func TestNodeSetAddAndRemoveNode(t *testing.T) {
	defer leaktest.AfterTest(t)
	nodes := newNodeSet(2)
	node0 := proto.NodeID(1)
	node1 := proto.NodeID(2)
	nodes.addNode(node0)
	nodes.addNode(node1)
	if !nodes.hasNode(node0) || !nodes.hasNode(node1) {
		t.Error("failed to locate added nodes")
	}
	nodes.removeNode(node0)
	if nodes.hasNode(node0) || !nodes.hasNode(node1) {
		t.Error("failed to remove node0", nodes)
	}
	nodes.removeNode(node1)
	if nodes.hasNode(node0) || nodes.hasNode(node1) {
		t.Error("failed to remove node1", nodes)
	}
}
Example #12
0
// NewNetwork creates nodeCount gossip nodes. The networkType should
// be set to either "tcp" or "unix". The gossipInterval should be set
// to a compressed simulation timescale, though large enough to give
// the concurrent goroutines enough time to pass data back and forth
// in order to yield accurate estimates of how old data actually ends
// up being at the various nodes (e.g. DefaultTestGossipInterval).
// TODO: This method should take `stopper` as an argument.
func NewNetwork(nodeCount int, networkType string,
	gossipInterval time.Duration) *Network {
	clock := hlc.NewClock(hlc.UnixNano)

	log.Infof("simulating gossip network with %d nodes", nodeCount)

	stopper := stop.NewStopper()

	rpcContext := rpc.NewContext(&base.Context{Insecure: true}, clock, stopper)

	nodes := make([]*Node, nodeCount)
	for i := range nodes {
		server := rpc.NewServer(util.CreateTestAddr(networkType), rpcContext)
		if err := server.Start(); err != nil {
			log.Fatal(err)
		}
		nodes[i] = &Node{Server: server}
	}

	var numResolvers int
	if len(nodes) > 3 {
		numResolvers = 3
	} else {
		numResolvers = len(nodes)
	}

	for i, leftNode := range nodes {
		// Build new resolvers for each instance or we'll get data races.
		var resolvers []resolver.Resolver
		for _, rightNode := range nodes[:numResolvers] {
			resolvers = append(resolvers, resolver.NewResolverFromAddress(rightNode.Server.Addr()))
		}

		gossipNode := gossip.New(rpcContext, gossipInterval, resolvers)
		addr := leftNode.Server.Addr()
		if err := gossipNode.SetNodeDescriptor(&proto.NodeDescriptor{
			NodeID:  proto.NodeID(i + 1),
			Address: util.MakeUnresolvedAddr(addr.Network(), addr.String()),
		}); err != nil {
			log.Fatal(err)
		}

		gossipNode.Start(leftNode.Server, stopper)
		stopper.AddCloser(leftNode.Server)

		leftNode.Gossip = gossipNode
	}

	return &Network{
		Nodes:          nodes,
		NetworkType:    networkType,
		GossipInterval: gossipInterval,
		Stopper:        stopper,
	}
}
Example #13
0
func TestNodeSetMaxSize(t *testing.T) {
	defer leaktest.AfterTest(t)
	nodes := newNodeSet(1)
	if !nodes.hasSpace() {
		t.Error("set should have space")
	}
	nodes.addNode(proto.NodeID(1))
	if nodes.hasSpace() {
		t.Error("set should have no space")
	}
}
Example #14
0
// restartStore restarts a store previously stopped with StopStore.
func (m *multiTestContext) restartStore(i int) {
	m.stoppers[i] = stop.NewStopper()

	ctx := m.makeContext(i)
	m.stores[i] = storage.NewStore(ctx, m.engines[i], &proto.NodeDescriptor{NodeID: proto.NodeID(i + 1)})
	if err := m.stores[i].Start(m.stoppers[i]); err != nil {
		m.t.Fatal(err)
	}
	// The sender is assumed to still exist.
	m.senders[i].AddStore(m.stores[i])
}
Example #15
0
func TestNodeSetHasNode(t *testing.T) {
	defer leaktest.AfterTest(t)
	nodes := newNodeSet(2)
	node := proto.NodeID(1)
	if nodes.hasNode(node) {
		t.Error("node wasn't added and should not be valid")
	}
	// Add node and verify it's valid.
	nodes.addNode(node)
	if !nodes.hasNode(node) {
		t.Error("empty node wasn't added and should not be valid")
	}
}
Example #16
0
// allocateNodeID increments the node id generator key to allocate
// a new, unique node id.
func allocateNodeID(db *client.KV) (proto.NodeID, error) {
	iReply := &proto.IncrementResponse{}
	if err := db.Call(proto.Increment, &proto.IncrementRequest{
		RequestHeader: proto.RequestHeader{
			Key:  engine.KeyNodeIDGenerator,
			User: storage.UserRoot,
		},
		Increment: 1,
	}, iReply); err != nil {
		return 0, util.Errorf("unable to allocate node ID: %v", err)
	}
	return proto.NodeID(iReply.NewValue), nil
}
Example #17
0
// String prints out the current status of the cluster.
func (c *Cluster) String() string {
	storesRangeCounts := make(map[proto.StoreID]int)
	for _, r := range c.ranges {
		for _, storeID := range r.getStoreIDs() {
			storesRangeCounts[storeID]++
		}
	}

	var nodeIDs []int
	for nodeID := range c.nodes {
		nodeIDs = append(nodeIDs, int(nodeID))
	}
	sort.Ints(nodeIDs)

	var buf bytes.Buffer
	buf.WriteString("Node Info:\n")
	for _, nodeID := range nodeIDs {
		n := c.nodes[proto.NodeID(nodeID)]
		buf.WriteString(n.String())
		buf.WriteString("\n")
	}

	var storeIDs []int
	for storeID := range c.stores {
		storeIDs = append(storeIDs, int(storeID))
	}
	sort.Ints(storeIDs)

	buf.WriteString("Store Info:\n")
	for _, storeID := range storeIDs {
		s := c.stores[proto.StoreID(storeID)]
		buf.WriteString(s.String(storesRangeCounts[proto.StoreID(storeID)]))
		buf.WriteString("\n")
	}

	var rangeIDs []int
	for rangeID := range c.ranges {
		rangeIDs = append(rangeIDs, int(rangeID))
	}
	sort.Ints(rangeIDs)

	buf.WriteString("Range Info:\n")
	for _, rangeID := range rangeIDs {
		r := c.ranges[proto.RangeID(rangeID)]
		buf.WriteString(r.String())
		buf.WriteString("\n")
	}

	return buf.String()
}
Example #18
0
// allocateNodeID increments the node id generator key to allocate
// a new, unique node id. It will retry indefinitely on retryable
// errors.
func allocateNodeID(db *client.DB) (proto.NodeID, error) {
	var id proto.NodeID
	err := retry.WithBackoff(allocRetryOptions, func() (retry.Status, error) {
		r, err := db.Inc(keys.NodeIDGenerator, 1)
		if err != nil {
			status := retry.Break
			if _, ok := err.(util.Retryable); ok {
				status = retry.Continue
			}
			return status, util.Errorf("unable to allocate node ID: %s", err)
		}
		id = proto.NodeID(r.ValueInt())
		return retry.Break, nil
	})
	return id, err
}
// TestAllocatorRelaxConstraints verifies that attribute constraints
// will be relaxed in order to match nodes lacking required attributes,
// if necessary to find an allocation target.
func TestAllocatorRelaxConstraints(t *testing.T) {
	defer leaktest.AfterTest(t)
	s, _, stopper := createTestStore(t)
	defer stopper.Stop()
	newStoreGossiper(s.Gossip()).gossipStores(multiDCStores, t)

	testCases := []struct {
		required         []string // attribute strings
		existing         []int    // existing store/node ID
		relaxConstraints bool     // allow constraints to be relaxed?
		expID            int      // expected store/node ID on allocate
		expErr           bool
	}{
		// The two stores in the system have attributes:
		//  storeID=1 {"a", "ssd"}
		//  storeID=2 {"b", "ssd"}
		{[]string{"a", "ssd"}, []int{}, true, 1, false},
		{[]string{"a", "ssd"}, []int{1}, true, 2, false},
		{[]string{"a", "ssd"}, []int{1}, false, 0, true},
		{[]string{"a", "ssd"}, []int{1, 2}, true, 0, true},
		{[]string{"b", "ssd"}, []int{}, true, 2, false},
		{[]string{"b", "ssd"}, []int{1}, true, 2, false},
		{[]string{"b", "ssd"}, []int{2}, false, 0, true},
		{[]string{"b", "ssd"}, []int{2}, true, 1, false},
		{[]string{"b", "ssd"}, []int{1, 2}, true, 0, true},
		{[]string{"b", "hdd"}, []int{}, true, 2, false},
		{[]string{"b", "hdd"}, []int{2}, true, 1, false},
		{[]string{"b", "hdd"}, []int{2}, false, 0, true},
		{[]string{"b", "hdd"}, []int{1, 2}, true, 0, true},
		{[]string{"b", "ssd", "gpu"}, []int{}, true, 2, false},
		{[]string{"b", "hdd", "gpu"}, []int{}, true, 2, false},
	}
	for i, test := range testCases {
		var existing []proto.Replica
		for _, id := range test.existing {
			existing = append(existing, proto.Replica{NodeID: proto.NodeID(id), StoreID: proto.StoreID(id)})
		}
		result, err := s.allocator().AllocateTarget(proto.Attributes{Attrs: test.required}, existing, test.relaxConstraints)
		if haveErr := (err != nil); haveErr != test.expErr {
			t.Errorf("%d: expected error %t; got %t: %s", i, test.expErr, haveErr, err)
		} else if err == nil && proto.StoreID(test.expID) != result.StoreID {
			t.Errorf("%d: expected result to have store %d; got %+v", i, test.expID, result)
		}
	}
}
Example #20
0
// TestNodeEventFeedTransactionRestart verifies that calls which indicate a
// transaction restart are counted as successful.
func TestNodeEventFeedTransactionRestart(t *testing.T) {
	defer leaktest.AfterTest(t)

	stopper := stop.NewStopper()
	feed := util.NewFeed(stopper)
	nodeID := proto.NodeID(1)
	nodefeed := status.NewNodeEventFeed(nodeID, feed)
	ner := nodeEventReader{}
	ner.readEvents(feed)

	nodefeed.CallComplete(&proto.GetRequest{}, &proto.GetResponse{
		ResponseHeader: proto.ResponseHeader{
			Error: &proto.Error{
				TransactionRestart: proto.TransactionRestart_BACKOFF,
			},
		},
	})
	nodefeed.CallComplete(&proto.GetRequest{}, &proto.GetResponse{
		ResponseHeader: proto.ResponseHeader{
			Error: &proto.Error{
				TransactionRestart: proto.TransactionRestart_IMMEDIATE,
			},
		},
	})
	nodefeed.CallComplete(&proto.PutRequest{}, &proto.PutResponse{
		ResponseHeader: proto.ResponseHeader{
			Error: &proto.Error{
				TransactionRestart: proto.TransactionRestart_ABORT,
			},
		},
	})

	feed.Flush()
	stopper.Stop()

	exp := []string{
		"Get",
		"Get",
		"failed Put",
	}

	if !reflect.DeepEqual(exp, ner.perNodeFeeds[nodeID]) {
		t.Fatalf("received unexpected events: %s", ner.eventFeedString())
	}
}
Example #21
0
// AddStore creates a new store on the same Transport but doesn't create any ranges.
func (m *multiTestContext) addStore(t *testing.T) {
	eng := engine.NewInMem(proto.Attributes{}, 1<<20)
	store := storage.NewStore(m.clock, eng, m.db, m.gossip, m.transport)
	err := store.Bootstrap(proto.StoreIdent{
		NodeID:  proto.NodeID(len(m.stores) + 1),
		StoreID: proto.StoreID(len(m.stores) + 1),
	})
	if err != nil {
		t.Fatal(err)
	}

	if len(m.stores) == 0 {
		// Bootstrap the initial range on the first store
		if err := store.BootstrapRange(); err != nil {
			t.Fatal(err)
		}
	}
	if err := store.Start(); err != nil {
		t.Fatal(err)
	}
	m.engines = append(m.engines, eng)
	m.stores = append(m.stores, store)
	m.sender.AddStore(store)
}
Example #22
0
func TestNodeEventFeed(t *testing.T) {
	defer leaktest.AfterTest(t)

	nodeDesc := proto.NodeDescriptor{
		NodeID: proto.NodeID(99),
	}

	// A testCase corresponds to a single Store event type. Each case contains a
	// method which publishes a single event to the given storeEventPublisher,
	// and an expected result interface which should match the produced
	// event.
	testCases := []struct {
		publishTo func(status.NodeEventFeed)
		expected  interface{}
	}{
		{
			publishTo: func(nef status.NodeEventFeed) {
				nef.StartNode(nodeDesc, 100)
			},
			expected: &status.StartNodeEvent{
				Desc:      nodeDesc,
				StartedAt: 100,
			},
		},
		{
			publishTo: func(nef status.NodeEventFeed) {
				nef.CallComplete(wrap(proto.NewGet(proto.Key("abc"))), nil)
			},
			expected: &status.CallSuccessEvent{
				NodeID: proto.NodeID(1),
				Method: proto.Get,
			},
		},
		{
			publishTo: func(nef status.NodeEventFeed) {
				nef.CallComplete(wrap(proto.NewPut(proto.Key("abc"), proto.Value{Bytes: []byte("def")})), nil)
			},
			expected: &status.CallSuccessEvent{
				NodeID: proto.NodeID(1),
				Method: proto.Put,
			},
		},
		{
			publishTo: func(nef status.NodeEventFeed) {
				nef.CallComplete(wrap(proto.NewGet(proto.Key("abc"))), proto.NewError(util.Errorf("error")))
			},
			expected: &status.CallErrorEvent{
				NodeID: proto.NodeID(1),
				Method: proto.Batch,
			},
		},
		{
			publishTo: func(nef status.NodeEventFeed) {
				nef.CallComplete(wrap(proto.NewGet(proto.Key("abc"))), &proto.Error{
					Index:   &proto.ErrPosition{Index: 0},
					Message: "boo",
				})
			},
			expected: &status.CallErrorEvent{
				NodeID: proto.NodeID(1),
				Method: proto.Get,
			},
		},
	}

	// Compile expected events into a single slice.
	expectedEvents := make([]interface{}, len(testCases))
	for i := range testCases {
		expectedEvents[i] = testCases[i].expected
	}

	events := make([]interface{}, 0, len(expectedEvents))

	// Run test cases directly through a feed.
	stopper := stop.NewStopper()
	defer stopper.Stop()
	feed := util.NewFeed(stopper)
	feed.Subscribe(func(event interface{}) {
		events = append(events, event)
	})

	nodefeed := status.NewNodeEventFeed(proto.NodeID(1), feed)
	for _, tc := range testCases {
		tc.publishTo(nodefeed)
	}

	feed.Flush()

	if a, e := events, expectedEvents; !reflect.DeepEqual(a, e) {
		t.Errorf("received incorrect events.\nexpected: %v\nactual: %v", e, a)
	}
}
Example #23
0
func (m *StoreStatus) Unmarshal(data []byte) error {
	l := len(data)
	iNdEx := 0
	for iNdEx < l {
		var wire uint64
		for shift := uint(0); ; shift += 7 {
			if iNdEx >= l {
				return io.ErrUnexpectedEOF
			}
			b := data[iNdEx]
			iNdEx++
			wire |= (uint64(b) & 0x7F) << shift
			if b < 0x80 {
				break
			}
		}
		fieldNum := int32(wire >> 3)
		wireType := int(wire & 0x7)
		switch fieldNum {
		case 1:
			if wireType != 2 {
				return fmt.Errorf("proto: wrong wireType = %d for field Desc", wireType)
			}
			var msglen int
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				msglen |= (int(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
			postIndex := iNdEx + msglen
			if msglen < 0 {
				return ErrInvalidLengthStatus
			}
			if postIndex > l {
				return io.ErrUnexpectedEOF
			}
			if err := m.Desc.Unmarshal(data[iNdEx:postIndex]); err != nil {
				return err
			}
			iNdEx = postIndex
		case 2:
			if wireType != 0 {
				return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType)
			}
			m.NodeID = 0
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				m.NodeID |= (github_com_cockroachdb_cockroach_proto.NodeID(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
		case 3:
			if wireType != 0 {
				return fmt.Errorf("proto: wrong wireType = %d for field RangeCount", wireType)
			}
			m.RangeCount = 0
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				m.RangeCount |= (int32(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
		case 4:
			if wireType != 0 {
				return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType)
			}
			m.StartedAt = 0
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				m.StartedAt |= (int64(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
		case 5:
			if wireType != 0 {
				return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType)
			}
			m.UpdatedAt = 0
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				m.UpdatedAt |= (int64(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
		case 6:
			if wireType != 2 {
				return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType)
			}
			var msglen int
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				msglen |= (int(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
			postIndex := iNdEx + msglen
			if msglen < 0 {
				return ErrInvalidLengthStatus
			}
			if postIndex > l {
				return io.ErrUnexpectedEOF
			}
			if err := m.Stats.Unmarshal(data[iNdEx:postIndex]); err != nil {
				return err
			}
			iNdEx = postIndex
		case 7:
			if wireType != 0 {
				return fmt.Errorf("proto: wrong wireType = %d for field LeaderRangeCount", wireType)
			}
			m.LeaderRangeCount = 0
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				m.LeaderRangeCount |= (int32(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
		case 8:
			if wireType != 0 {
				return fmt.Errorf("proto: wrong wireType = %d for field ReplicatedRangeCount", wireType)
			}
			m.ReplicatedRangeCount = 0
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				m.ReplicatedRangeCount |= (int32(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
		case 9:
			if wireType != 0 {
				return fmt.Errorf("proto: wrong wireType = %d for field AvailableRangeCount", wireType)
			}
			m.AvailableRangeCount = 0
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				m.AvailableRangeCount |= (int32(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
		default:
			var sizeOfWire int
			for {
				sizeOfWire++
				wire >>= 7
				if wire == 0 {
					break
				}
			}
			iNdEx -= sizeOfWire
			skippy, err := skipStatus(data[iNdEx:])
			if err != nil {
				return err
			}
			if skippy < 0 {
				return ErrInvalidLengthStatus
			}
			if (iNdEx + skippy) > l {
				return io.ErrUnexpectedEOF
			}
			m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
			iNdEx += skippy
		}
	}

	return nil
}
Example #24
0
// addNewNodeWithStore adds new node with a single store.
func (c *Cluster) addNewNodeWithStore() {
	nodeID := proto.NodeID(len(c.nodes))
	c.nodes[nodeID] = newNode(nodeID)
	c.addStore(nodeID)
}
Example #25
0
func (m *LogEntry) Unmarshal(data []byte) error {
	l := len(data)
	iNdEx := 0
	for iNdEx < l {
		var wire uint64
		for shift := uint(0); ; shift += 7 {
			if iNdEx >= l {
				return io.ErrUnexpectedEOF
			}
			b := data[iNdEx]
			iNdEx++
			wire |= (uint64(b) & 0x7F) << shift
			if b < 0x80 {
				break
			}
		}
		fieldNum := int32(wire >> 3)
		wireType := int(wire & 0x7)
		switch fieldNum {
		case 1:
			if wireType != 0 {
				return fmt.Errorf("proto: wrong wireType = %d for field Severity", wireType)
			}
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				m.Severity |= (int32(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
		case 2:
			if wireType != 0 {
				return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType)
			}
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				m.Time |= (int64(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
		case 3:
			if wireType != 0 {
				return fmt.Errorf("proto: wrong wireType = %d for field ThreadID", wireType)
			}
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				m.ThreadID |= (int32(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
		case 4:
			if wireType != 2 {
				return fmt.Errorf("proto: wrong wireType = %d for field File", wireType)
			}
			var stringLen uint64
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				stringLen |= (uint64(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
			postIndex := iNdEx + int(stringLen)
			if postIndex > l {
				return io.ErrUnexpectedEOF
			}
			m.File = string(data[iNdEx:postIndex])
			iNdEx = postIndex
		case 5:
			if wireType != 0 {
				return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType)
			}
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				m.Line |= (int32(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
		case 6:
			if wireType != 2 {
				return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType)
			}
			var stringLen uint64
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				stringLen |= (uint64(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
			postIndex := iNdEx + int(stringLen)
			if postIndex > l {
				return io.ErrUnexpectedEOF
			}
			m.Format = string(data[iNdEx:postIndex])
			iNdEx = postIndex
		case 7:
			if wireType != 2 {
				return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType)
			}
			var msglen int
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				msglen |= (int(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
			postIndex := iNdEx + msglen
			if postIndex > l {
				return io.ErrUnexpectedEOF
			}
			m.Args = append(m.Args, LogEntry_Arg{})
			if err := m.Args[len(m.Args)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
				return err
			}
			iNdEx = postIndex
		case 8:
			if wireType != 0 {
				return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType)
			}
			var v github_com_cockroachdb_cockroach_proto.NodeID
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				v |= (github_com_cockroachdb_cockroach_proto.NodeID(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
			m.NodeID = &v
		case 9:
			if wireType != 0 {
				return fmt.Errorf("proto: wrong wireType = %d for field StoreID", wireType)
			}
			var v github_com_cockroachdb_cockroach_proto.StoreID
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				v |= (github_com_cockroachdb_cockroach_proto.StoreID(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
			m.StoreID = &v
		case 10:
			if wireType != 0 {
				return fmt.Errorf("proto: wrong wireType = %d for field RangeID", wireType)
			}
			var v github_com_cockroachdb_cockroach_proto.RangeID
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				v |= (github_com_cockroachdb_cockroach_proto.RangeID(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
			m.RangeID = &v
		case 11:
			if wireType != 0 {
				return fmt.Errorf("proto: wrong wireType = %d for field Method", wireType)
			}
			var v github_com_cockroachdb_cockroach_proto.Method
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				v |= (github_com_cockroachdb_cockroach_proto.Method(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
			m.Method = &v
		case 12:
			if wireType != 2 {
				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
			}
			var byteLen int
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				byteLen |= (int(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
			postIndex := iNdEx + byteLen
			if postIndex > l {
				return io.ErrUnexpectedEOF
			}
			m.Key = append([]byte{}, data[iNdEx:postIndex]...)
			iNdEx = postIndex
		case 13:
			if wireType != 2 {
				return fmt.Errorf("proto: wrong wireType = %d for field Stacks", wireType)
			}
			var byteLen int
			for shift := uint(0); ; shift += 7 {
				if iNdEx >= l {
					return io.ErrUnexpectedEOF
				}
				b := data[iNdEx]
				iNdEx++
				byteLen |= (int(b) & 0x7F) << shift
				if b < 0x80 {
					break
				}
			}
			postIndex := iNdEx + byteLen
			if postIndex > l {
				return io.ErrUnexpectedEOF
			}
			m.Stacks = append([]byte{}, data[iNdEx:postIndex]...)
			iNdEx = postIndex
		default:
			var sizeOfWire int
			for {
				sizeOfWire++
				wire >>= 7
				if wire == 0 {
					break
				}
			}
			iNdEx -= sizeOfWire
			skippy, err := skipLog(data[iNdEx:])
			if err != nil {
				return err
			}
			if (iNdEx + skippy) > l {
				return io.ErrUnexpectedEOF
			}
			m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
			iNdEx += skippy
		}
	}

	return nil
}
Example #26
0
// TestNodeStatusRecorder verifies that the time series data generated by a
// recorder matches the data added to the monitor.
func TestNodeStatusRecorder(t *testing.T) {
	defer leaktest.AfterTest(t)
	nodeDesc := proto.NodeDescriptor{
		NodeID: proto.NodeID(1),
	}
	storeDesc1 := proto.StoreDescriptor{
		StoreID: proto.StoreID(1),
		Capacity: proto.StoreCapacity{
			Capacity:  100,
			Available: 50,
		},
	}
	storeDesc2 := proto.StoreDescriptor{
		StoreID: proto.StoreID(2),
		Capacity: proto.StoreCapacity{
			Capacity:  200,
			Available: 75,
		},
	}
	desc1 := &proto.RangeDescriptor{
		RangeID:  1,
		StartKey: proto.Key("a"),
		EndKey:   proto.Key("b"),
	}
	desc2 := &proto.RangeDescriptor{
		RangeID:  2,
		StartKey: proto.Key("b"),
		EndKey:   proto.Key("c"),
	}
	stats := engine.MVCCStats{
		LiveBytes:       1,
		KeyBytes:        2,
		ValBytes:        3,
		IntentBytes:     4,
		LiveCount:       5,
		KeyCount:        6,
		ValCount:        7,
		IntentCount:     8,
		IntentAge:       9,
		GCBytesAge:      10,
		LastUpdateNanos: 1 * 1E9,
	}

	// Create a monitor and a recorder which uses the monitor.
	monitor := NewNodeStatusMonitor()
	manual := hlc.NewManualClock(100)
	recorder := NewNodeStatusRecorder(monitor, hlc.NewClock(manual.UnixNano))

	// Initialization events.
	monitor.OnStartNode(&StartNodeEvent{
		Desc:      nodeDesc,
		StartedAt: 50,
	})
	monitor.OnStartStore(&storage.StartStoreEvent{
		StoreID:   proto.StoreID(1),
		StartedAt: 60,
	})
	monitor.OnStartStore(&storage.StartStoreEvent{
		StoreID:   proto.StoreID(2),
		StartedAt: 70,
	})
	monitor.OnStoreStatus(&storage.StoreStatusEvent{
		Desc: &storeDesc1,
	})
	monitor.OnStoreStatus(&storage.StoreStatusEvent{
		Desc: &storeDesc2,
	})

	// Add some data to the monitor by simulating incoming events.
	monitor.OnBeginScanRanges(&storage.BeginScanRangesEvent{
		StoreID: proto.StoreID(1),
	})
	monitor.OnBeginScanRanges(&storage.BeginScanRangesEvent{
		StoreID: proto.StoreID(2),
	})
	monitor.OnRegisterRange(&storage.RegisterRangeEvent{
		StoreID: proto.StoreID(1),
		Desc:    desc1,
		Stats:   stats,
		Scan:    true,
	})
	monitor.OnRegisterRange(&storage.RegisterRangeEvent{
		StoreID: proto.StoreID(1),
		Desc:    desc2,
		Stats:   stats,
		Scan:    true,
	})
	monitor.OnRegisterRange(&storage.RegisterRangeEvent{
		StoreID: proto.StoreID(2),
		Desc:    desc1,
		Stats:   stats,
		Scan:    true,
	})
	monitor.OnEndScanRanges(&storage.EndScanRangesEvent{
		StoreID: proto.StoreID(1),
	})
	monitor.OnEndScanRanges(&storage.EndScanRangesEvent{
		StoreID: proto.StoreID(2),
	})
	monitor.OnUpdateRange(&storage.UpdateRangeEvent{
		StoreID: proto.StoreID(1),
		Desc:    desc1,
		Delta:   stats,
	})
	// Periodically published events.
	monitor.OnReplicationStatus(&storage.ReplicationStatusEvent{
		StoreID:              proto.StoreID(1),
		LeaderRangeCount:     1,
		AvailableRangeCount:  2,
		ReplicatedRangeCount: 0,
	})
	monitor.OnReplicationStatus(&storage.ReplicationStatusEvent{
		StoreID:              proto.StoreID(2),
		LeaderRangeCount:     1,
		AvailableRangeCount:  2,
		ReplicatedRangeCount: 0,
	})
	// Node Events.
	monitor.OnCallSuccess(&CallSuccessEvent{
		NodeID: proto.NodeID(1),
		Method: proto.Get,
	})
	monitor.OnCallSuccess(&CallSuccessEvent{
		NodeID: proto.NodeID(1),
		Method: proto.Put,
	})
	monitor.OnCallError(&CallErrorEvent{
		NodeID: proto.NodeID(1),
		Method: proto.Scan,
	})

	generateNodeData := func(nodeId int, name string, time, val int64) proto.TimeSeriesData {
		return proto.TimeSeriesData{
			Name: fmt.Sprintf(nodeTimeSeriesNameFmt, name, proto.StoreID(nodeId)),
			Datapoints: []*proto.TimeSeriesDatapoint{
				{
					TimestampNanos: time,
					Value:          float64(val),
				},
			},
		}
	}

	generateStoreData := func(storeId int, name string, time, val int64) proto.TimeSeriesData {
		return proto.TimeSeriesData{
			Name: fmt.Sprintf(storeTimeSeriesNameFmt, name, proto.StoreID(storeId)),
			Datapoints: []*proto.TimeSeriesDatapoint{
				{
					TimestampNanos: time,
					Value:          float64(val),
				},
			},
		}
	}

	// Generate the expected return value of recorder.GetTimeSeriesData(). This
	// data was manually generated, but is based on a simple multiple of the
	// "stats" collection above.
	expected := []proto.TimeSeriesData{
		// Store 1 should have accumulated 3x stats from two ranges.
		generateStoreData(1, "livebytes", 100, 3),
		generateStoreData(1, "keybytes", 100, 6),
		generateStoreData(1, "valbytes", 100, 9),
		generateStoreData(1, "intentbytes", 100, 12),
		generateStoreData(1, "livecount", 100, 15),
		generateStoreData(1, "keycount", 100, 18),
		generateStoreData(1, "valcount", 100, 21),
		generateStoreData(1, "intentcount", 100, 24),
		generateStoreData(1, "intentage", 100, 27),
		generateStoreData(1, "gcbytesage", 100, 30),
		generateStoreData(1, "lastupdatenanos", 100, 1*1e9),
		generateStoreData(1, "ranges", 100, 2),
		generateStoreData(1, "ranges.leader", 100, 1),
		generateStoreData(1, "ranges.available", 100, 2),
		generateStoreData(1, "ranges.replicated", 100, 0),
		generateStoreData(1, "capacity", 100, 100),
		generateStoreData(1, "capacity.available", 100, 50),

		// Store 2 should have accumulated 1 copy of stats
		generateStoreData(2, "livebytes", 100, 1),
		generateStoreData(2, "keybytes", 100, 2),
		generateStoreData(2, "valbytes", 100, 3),
		generateStoreData(2, "intentbytes", 100, 4),
		generateStoreData(2, "livecount", 100, 5),
		generateStoreData(2, "keycount", 100, 6),
		generateStoreData(2, "valcount", 100, 7),
		generateStoreData(2, "intentcount", 100, 8),
		generateStoreData(2, "intentage", 100, 9),
		generateStoreData(2, "gcbytesage", 100, 10),
		generateStoreData(2, "lastupdatenanos", 100, 1*1e9),
		generateStoreData(2, "ranges", 100, 1),
		generateStoreData(2, "ranges.leader", 100, 1),
		generateStoreData(2, "ranges.available", 100, 2),
		generateStoreData(2, "ranges.replicated", 100, 0),
		generateStoreData(2, "capacity", 100, 200),
		generateStoreData(2, "capacity.available", 100, 75),

		// Node stats.
		generateNodeData(1, "calls.success", 100, 2),
		generateNodeData(1, "calls.error", 100, 1),
	}

	actual := recorder.GetTimeSeriesData()
	sort.Sort(byTimeAndName(actual))
	sort.Sort(byTimeAndName(expected))
	if a, e := actual, expected; !reflect.DeepEqual(a, e) {
		t.Errorf("recorder did not yield expected time series collection; expected %v, got %v", e, a)
	}

	expectedNodeSummary := &NodeStatus{
		Desc:      nodeDesc,
		StartedAt: 50,
		UpdatedAt: 100,
		StoreIDs: []proto.StoreID{
			proto.StoreID(1),
			proto.StoreID(2),
		},
		RangeCount:           3,
		LeaderRangeCount:     2,
		AvailableRangeCount:  4,
		ReplicatedRangeCount: 0,
	}
	expectedStoreSummaries := []storage.StoreStatus{
		{
			Desc:                 storeDesc1,
			NodeID:               proto.NodeID(1),
			UpdatedAt:            100,
			StartedAt:            60,
			RangeCount:           2,
			LeaderRangeCount:     1,
			AvailableRangeCount:  2,
			ReplicatedRangeCount: 0,
		},
		{
			Desc:                 storeDesc2,
			NodeID:               proto.NodeID(1),
			StartedAt:            70,
			UpdatedAt:            100,
			RangeCount:           1,
			LeaderRangeCount:     1,
			AvailableRangeCount:  2,
			ReplicatedRangeCount: 0,
		},
	}
	// Use base stats to generate expected summary stat values.
	for i := 0; i < 3; i++ {
		expectedStoreSummaries[0].Stats.Add(&stats)
	}
	expectedStoreSummaries[1].Stats.Add(&stats)
	for _, ss := range expectedStoreSummaries {
		expectedNodeSummary.Stats.Add(&ss.Stats)
	}

	nodeSummary, storeSummaries := recorder.GetStatusSummaries()
	sort.Sort(byStoreDescID(storeSummaries))
	sort.Sort(byStoreID(nodeSummary.StoreIDs))
	if a, e := nodeSummary, expectedNodeSummary; !reflect.DeepEqual(a, e) {
		t.Errorf("recorder did not produce expected NodeSummary; expected %v, got %v", e, a)
	}
	if a, e := storeSummaries, expectedStoreSummaries; !reflect.DeepEqual(a, e) {
		t.Errorf("recorder did not produce expected StoreSummaries; expected %v, got %v", e, a)
	}
}
func Example_rebalancing() {
	// Model a set of stores in a cluster,
	// randomly adding / removing stores and adding bytes.
	g := gossip.New(nil, 0, nil)
	alloc := newAllocator(g)
	alloc.randGen = rand.New(rand.NewSource(0))
	alloc.deterministic = true

	var wg sync.WaitGroup
	g.RegisterCallback(gossip.MakePrefixPattern(gossip.KeyStorePrefix), func(_ string, _ []byte) { wg.Done() })

	const generations = 100
	const nodes = 20

	// Initialize testStores.
	var testStores [nodes]testStore
	for i := 0; i < len(testStores); i++ {
		testStores[i].StoreID = proto.StoreID(i)
		testStores[i].Node = proto.NodeDescriptor{NodeID: proto.NodeID(i)}
		testStores[i].Capacity = proto.StoreCapacity{Capacity: 1 << 30, Available: 1 << 30}
	}
	// Initialize the cluster with a single range.
	testStores[0].Add(alloc.randGen.Int63n(1 << 20))

	for i := 0; i < generations; i++ {
		// First loop through test stores and add data.
		wg.Add(len(testStores))
		for j := 0; j < len(testStores); j++ {
			// Add a pretend range to the testStore if there's already one.
			if testStores[j].Capacity.RangeCount > 0 {
				testStores[j].Add(alloc.randGen.Int63n(1 << 20))
			}
			key := gossip.MakeStoreKey(proto.StoreID(j))
			if err := g.AddInfoProto(key, &testStores[j].StoreDescriptor, 0); err != nil {
				panic(err)
			}
		}
		wg.Wait()

		// Next loop through test stores and maybe rebalance.
		for j := 0; j < len(testStores); j++ {
			ts := &testStores[j]
			if alloc.ShouldRebalance(&testStores[j].StoreDescriptor) {
				target := alloc.RebalanceTarget(proto.Attributes{}, []proto.Replica{{NodeID: ts.Node.NodeID, StoreID: ts.StoreID}})
				if target != nil {
					testStores[j].Rebalance(&testStores[int(target.StoreID)], alloc.randGen.Int63n(1<<20))
				}
			}
		}

		// Output store capacities as hexidecimal 2-character values.
		if i%(generations/50) == 0 {
			var maxBytes int64
			for j := 0; j < len(testStores); j++ {
				bytes := testStores[j].Capacity.Capacity - testStores[j].Capacity.Available
				if bytes > maxBytes {
					maxBytes = bytes
				}
			}
			if maxBytes > 0 {
				for j := 0; j < len(testStores); j++ {
					endStr := " "
					if j == len(testStores)-1 {
						endStr = ""
					}
					bytes := testStores[j].Capacity.Capacity - testStores[j].Capacity.Available
					fmt.Printf("%03d%s", (999*bytes)/maxBytes, endStr)
				}
				fmt.Printf("\n")
			}
		}
	}

	var totBytes int64
	var totRanges int32
	for i := 0; i < len(testStores); i++ {
		totBytes += testStores[i].Capacity.Capacity - testStores[i].Capacity.Available
		totRanges += testStores[i].Capacity.RangeCount
	}
	fmt.Printf("Total bytes=%d, ranges=%d\n", totBytes, totRanges)

	// Output:
	// 999 000 000 000 000 000 000 739 000 000 000 000 000 000 000 000 000 000 000 000
	// 999 107 000 000 204 000 000 375 000 000 000 000 000 000 000 000 000 000 536 000
	// 999 310 000 262 872 000 000 208 000 705 000 526 000 000 439 000 000 607 933 000
	// 812 258 000 220 999 673 402 480 000 430 516 374 000 431 318 000 551 714 917 000
	// 582 625 185 334 720 589 647 619 000 300 483 352 279 502 208 665 816 684 999 374
	// 751 617 771 542 738 676 665 525 309 435 612 449 457 616 306 837 993 754 999 445
	// 759 659 828 478 693 622 594 591 349 458 630 538 526 613 462 827 879 787 999 550
	// 861 658 828 559 801 660 681 560 487 529 652 686 642 716 575 999 989 875 989 581
	// 775 647 724 557 779 662 670 494 535 502 681 676 624 695 561 961 999 772 888 592
	// 856 712 753 661 767 658 717 606 529 615 755 699 672 700 576 955 999 755 861 671
	// 882 735 776 685 844 643 740 578 610 688 787 741 661 767 587 999 955 809 803 731
	// 958 716 789 719 861 689 821 608 634 724 800 782 694 799 619 994 999 851 812 818
	// 949 726 788 664 873 633 749 599 680 714 790 728 663 842 628 999 978 816 823 791
	// 923 698 792 712 816 605 774 651 661 728 802 718 670 819 714 999 966 801 829 791
	// 962 779 847 737 900 675 811 691 745 778 835 812 680 894 790 999 989 872 923 799
	// 967 812 826 772 891 685 828 683 761 808 864 820 643 873 783 969 999 873 910 781
	// 923 813 837 739 867 672 792 664 773 772 879 803 610 845 740 957 999 867 912 732
	// 952 803 866 759 881 655 765 668 803 772 929 762 601 844 751 973 999 892 864 731
	// 970 777 867 800 859 639 774 662 787 760 906 751 595 854 732 989 999 853 859 762
	// 943 776 872 787 861 686 780 663 789 793 926 784 612 832 733 999 968 868 827 767
	// 914 801 912 802 878 704 800 685 818 808 939 759 627 844 717 999 976 872 828 757
	// 935 806 911 797 887 710 798 711 826 824 938 775 614 870 716 999 986 886 803 767
	// 991 851 898 856 872 795 828 782 826 852 963 797 710 868 775 994 999 923 896 794
	// 999 924 866 877 884 883 886 836 846 869 953 851 762 887 858 985 949 900 917 836
	// 999 910 887 878 897 890 906 868 906 903 983 947 801 895 913 976 924 890 904 898
	// 955 884 888 916 886 879 901 872 898 883 999 874 829 888 892 937 918 889 891 862
	// 974 952 957 990 950 976 945 946 980 961 999 975 942 926 957 994 965 946 960 960
	// 949 929 952 999 929 961 943 946 993 918 984 961 952 919 953 950 952 941 949 934
	// 907 999 916 935 903 903 909 907 960 939 973 912 901 885 916 910 941 911 906 913
	// 939 999 948 948 945 962 951 954 952 964 996 942 975 962 962 956 971 969 975 969
	// 940 974 964 947 971 975 949 954 953 970 992 971 981 973 948 962 999 969 978 975
	// 950 971 953 938 962 967 930 964 953 978 999 945 974 972 951 950 998 951 949 962
	// 934 946 943 936 942 949 929 956 928 970 989 944 945 923 987 927 999 942 931 944
	// 939 957 942 958 951 970 937 946 930 950 940 959 963 937 973 943 999 931 949 940
	// 933 935 945 929 933 960 937 935 919 918 930 931 950 924 969 935 999 943 949 926
	// 959 941 948 952 948 957 936 937 943 930 955 962 953 949 980 948 999 934 980 942
	// 950 973 954 962 949 964 935 949 925 936 951 962 979 962 999 942 990 948 969 959
	// 937 993 958 949 960 960 942 954 969 950 951 952 974 970 999 927 979 964 975 944
	// 981 986 971 968 964 984 954 959 985 979 966 963 994 963 999 970 991 971 988 965
	// 967 997 961 957 959 985 956 940 955 955 957 955 970 952 979 964 999 951 960 968
	// 937 969 931 950 945 954 932 925 954 946 944 926 955 938 957 949 999 934 947 938
	// 958 967 954 955 971 973 946 934 979 947 944 958 954 954 960 948 999 936 960 951
	// 950 948 940 958 937 955 928 927 953 923 935 939 934 921 934 934 999 922 940 938
	// 960 960 929 962 955 955 926 935 957 928 939 941 938 926 941 924 999 923 957 942
	// 979 958 947 987 980 972 945 943 984 939 951 943 944 946 942 942 999 928 970 943
	// 981 941 931 961 969 962 927 935 985 925 964 945 946 939 946 938 999 933 964 928
	// 980 944 929 970 973 955 942 937 977 920 955 929 937 946 935 933 999 947 956 926
	// 980 948 926 981 938 939 936 936 963 949 965 935 943 946 933 933 999 947 955 943
	// 968 959 945 941 929 926 924 941 970 951 959 941 924 952 931 943 999 941 951 950
	// 961 946 930 923 933 932 953 937 954 940 964 944 931 952 939 935 999 936 945 948
	// Total bytes=996294324, ranges=1897
}
Example #28
0
func testContext() context.Context {
	ctx := context.Background()
	return Add(ctx, NodeID, proto.NodeID(1), StoreID, proto.StoreID(2), RangeID, proto.RangeID(3), Method, proto.Get, Key, proto.Key("key"))
}
Example #29
0
// ExampleAllocatorRebalancing models a set of stores in a cluster,
// randomly adding / removing stores and adding bytes.
func ExampleAllocatorRebalancing() {
	g := gossip.New(nil, 0, nil)
	alloc := newAllocator(g)
	alloc.randGen = rand.New(rand.NewSource(0))
	alloc.deterministic = true

	var wg sync.WaitGroup
	g.RegisterCallback(gossip.MakePrefixPattern(gossip.KeyCapacityPrefix), func(_ string, _ bool) { wg.Done() })

	const generations = 100
	const nodes = 20

	// Initialize testStores.
	var testStores [nodes]testStore
	for i := 0; i < len(testStores); i++ {
		testStores[i].StoreID = proto.StoreID(i)
		testStores[i].Node = proto.NodeDescriptor{NodeID: proto.NodeID(i)}
		testStores[i].Capacity = proto.StoreCapacity{Capacity: 1 << 30, Available: 1 << 30}
	}
	// Initialize the cluster with a single range.
	testStores[0].Add(alloc.randGen.Int63n(1 << 20))

	for i := 0; i < generations; i++ {
		// First loop through test stores and add data.
		wg.Add(len(testStores))
		for j := 0; j < len(testStores); j++ {
			// Add a pretend range to the testStore if there's already one.
			if testStores[j].Capacity.RangeCount > 0 {
				testStores[j].Add(alloc.randGen.Int63n(1 << 20))
			}
			key := gossip.MakeCapacityKey(proto.NodeID(j), proto.StoreID(j))
			if err := g.AddInfo(key, testStores[j].StoreDescriptor, 0); err != nil {
				panic(err)
			}
		}
		wg.Wait()

		// Next loop through test stores and maybe rebalance.
		for j := 0; j < len(testStores); j++ {
			ts := &testStores[j]
			if alloc.ShouldRebalance(&testStores[j].StoreDescriptor) {
				target := alloc.RebalanceTarget(proto.Attributes{}, []proto.Replica{{NodeID: ts.Node.NodeID, StoreID: ts.StoreID}})
				if target != nil {
					testStores[j].Rebalance(&testStores[int(target.StoreID)], alloc.randGen.Int63n(1<<20))
				}
			}
		}

		// Output store capacities as hexidecimal 2-character values.
		if i%(generations/50) == 0 {
			var maxBytes int64
			for j := 0; j < len(testStores); j++ {
				bytes := testStores[j].Capacity.Capacity - testStores[j].Capacity.Available
				if bytes > maxBytes {
					maxBytes = bytes
				}
			}
			if maxBytes > 0 {
				for j := 0; j < len(testStores); j++ {
					endStr := " "
					if j == len(testStores)-1 {
						endStr = ""
					}
					bytes := testStores[j].Capacity.Capacity - testStores[j].Capacity.Available
					fmt.Printf("%03d%s", (999*bytes)/maxBytes, endStr)
				}
				fmt.Printf("\n")
			}
		}
	}

	var totBytes int64
	var totRanges int32
	for i := 0; i < len(testStores); i++ {
		totBytes += testStores[i].Capacity.Capacity - testStores[i].Capacity.Available
		totRanges += testStores[i].Capacity.RangeCount
	}
	fmt.Printf("Total bytes=%d, ranges=%d\n", totBytes, totRanges)

	// Output:
	// 999 000 000 000 000 000 000 739 000 000 000 000 000 000 000 000 000 000 000 000
	// 999 000 000 000 204 000 000 375 000 000 107 000 000 000 000 000 000 000 000 536
	// 942 000 000 463 140 000 000 646 000 288 288 000 442 000 058 647 000 000 316 999
	// 880 000 412 630 365 745 445 565 122 407 380 570 276 000 271 709 000 718 299 999
	// 925 000 667 600 555 975 704 552 272 491 773 890 584 000 407 974 000 930 476 999
	// 990 967 793 579 493 999 698 453 616 608 777 755 709 425 455 984 483 698 267 931
	// 965 999 869 606 635 908 630 585 567 577 818 870 740 621 550 868 805 790 411 913
	// 953 995 990 624 617 947 562 609 670 658 909 952 835 851 641 958 924 999 526 987
	// 999 923 901 571 687 915 636 636 674 685 831 881 847 820 702 905 897 983 509 981
	// 999 884 809 585 691 826 640 572 748 641 754 887 758 848 643 927 865 897 541 956
	// 999 856 891 594 691 745 602 615 766 663 814 834 719 886 733 925 882 911 593 926
	// 999 890 900 653 707 759 642 697 771 732 851 858 748 869 842 953 903 928 655 923
	// 999 924 909 696 748 797 693 689 806 766 841 902 705 897 874 914 913 916 730 892
	// 999 948 892 704 740 821 685 656 859 772 893 911 690 878 824 935 928 941 741 860
	// 999 948 931 697 770 782 697 666 893 761 944 869 658 902 816 925 923 983 742 831
	// 999 878 901 736 750 737 677 647 869 731 930 825 631 880 775 947 949 930 687 810
	// 999 890 910 764 778 757 709 663 849 777 964 837 672 891 814 978 944 946 721 868
	// 985 895 968 806 791 791 720 694 883 819 999 847 652 888 790 995 950 947 692 843
	// 960 903 956 794 815 779 746 706 891 824 958 830 665 886 757 999 931 969 701 861
	// 999 928 954 805 807 822 764 734 910 829 952 827 678 927 785 980 936 962 677 836
	// 999 903 924 800 769 822 776 730 886 815 935 781 668 890 805 948 929 965 676 837
	// 999 926 935 836 782 836 809 756 897 835 937 781 690 894 804 979 951 978 667 832
	// 999 937 936 875 843 872 854 793 908 873 950 808 714 901 860 981 975 962 693 866
	// 988 957 938 898 922 912 916 886 905 912 964 867 764 915 911 992 999 985 776 896
	// 945 959 922 910 937 913 938 944 957 921 993 916 898 957 928 999 976 997 855 957
	// 980 986 944 956 963 920 966 967 999 966 991 956 981 973 955 998 990 954 994 981
	// 956 985 942 945 950 900 933 949 981 969 946 935 963 951 931 999 936 941 972 963
	// 940 999 964 949 941 974 967 937 970 975 965 951 976 968 949 993 944 949 977 964
	// 926 999 973 932 944 952 933 944 963 965 927 940 964 960 938 995 932 935 968 951
	// 907 999 919 957 941 958 934 935 930 941 940 926 966 933 920 973 937 923 938 946
	// 924 999 914 963 976 945 911 936 929 951 930 930 972 935 941 977 932 960 939 958
	// 942 999 950 961 987 942 928 945 938 941 939 936 985 937 969 985 952 958 957 948
	// 956 999 950 947 943 939 949 934 929 935 940 942 943 957 988 974 933 936 938 951
	// 967 990 950 949 964 952 951 922 943 940 954 956 962 946 982 999 945 949 940 954
	// 970 999 952 959 970 955 957 974 937 965 968 947 950 958 947 993 953 938 958 950
	// 945 964 954 963 965 959 967 961 925 978 954 944 968 937 960 999 947 947 961 960
	// 930 957 938 974 956 944 968 930 944 972 930 946 958 974 940 999 961 945 953 947
	// 966 980 954 989 979 960 969 995 961 986 954 980 980 971 968 999 968 977 979 972
	// 963 953 958 986 990 947 973 955 955 983 974 981 961 964 977 999 984 982 966 964
	// 964 968 975 993 999 955 965 958 972 995 978 981 956 966 981 987 978 976 985 966
	// 967 957 954 999 963 940 968 966 941 966 971 969 957 961 949 940 968 963 988 947
	// 951 939 952 980 937 948 964 970 941 965 979 966 941 940 952 938 973 955 999 934
	// 939 958 941 998 942 951 962 942 962 951 972 978 946 935 958 935 950 947 999 953
	// 959 952 938 999 936 957 961 950 937 954 975 971 958 930 938 930 944 939 978 950
	// 957 943 963 999 947 965 953 937 966 953 978 972 963 937 933 945 944 937 979 952
	// 945 951 956 999 926 948 958 923 947 934 951 961 955 941 949 936 945 929 960 947
	// 956 960 975 999 945 977 956 934 954 943 961 956 956 954 960 954 958 929 969 938
	// 947 966 993 999 944 963 942 939 963 935 952 957 968 947 962 946 962 947 959 942
	// 940 961 999 992 935 946 938 932 968 939 957 938 970 949 964 934 948 957 952 939
	// 944 955 999 978 940 932 937 944 957 936 957 945 958 955 947 933 956 948 947 942
	// Total bytes=1003302292, ranges=1899
}
Example #30
0
func TestNodeStatusMonitor(t *testing.T) {
	defer leaktest.AfterTest(t)
	desc1 := &proto.RangeDescriptor{
		RaftID:   1,
		StartKey: proto.Key("a"),
		EndKey:   proto.Key("b"),
	}
	desc2 := &proto.RangeDescriptor{
		RaftID:   2,
		StartKey: proto.Key("b"),
		EndKey:   proto.Key("c"),
	}
	stats := engine.MVCCStats{
		LiveBytes:       1,
		KeyBytes:        2,
		ValBytes:        2,
		IntentBytes:     1,
		LiveCount:       1,
		KeyCount:        1,
		ValCount:        1,
		IntentCount:     1,
		IntentAge:       1,
		GCBytesAge:      1,
		LastUpdateNanos: 1 * 1E9,
	}

	monitorStopper := stop.NewStopper()
	storeStopper := stop.NewStopper()
	feed := &util.Feed{}
	monitor := NewNodeStatusMonitor()
	sub := feed.Subscribe()
	monitorStopper.RunWorker(func() {
		for event := range sub.Events() {
			storage.ProcessStoreEvent(monitor, event)
			ProcessNodeEvent(monitor, event)
		}
	})

	for i := 0; i < 3; i++ {
		id := proto.StoreID(i + 1)
		eventList := []interface{}{
			&storage.StartStoreEvent{
				StoreID: id,
			},
			&storage.BeginScanRangesEvent{ // Begin scan phase.
				StoreID: id,
			},
			&storage.UpdateRangeEvent{ // Update during scan, expect it to be ignored.
				StoreID: id,
				Desc:    desc1,
				Stats:   stats,
				Delta:   stats,
			},
			&storage.RegisterRangeEvent{
				StoreID: id,
				Desc:    desc2,
				Stats:   stats,
				Scan:    false, // should lead to this being ignored
			},
			&storage.RegisterRangeEvent{
				StoreID: id,
				Desc:    desc1,
				Stats:   stats,
				Scan:    true, // not ignored
			},
			&storage.UpdateRangeEvent{ // Update during scan after register, should be picked up
				StoreID: id,
				Desc:    desc1,
				Stats:   stats,
				Delta:   stats,
			},
			&storage.EndScanRangesEvent{ // End Scan.
				StoreID: id,
			},
			&storage.RegisterRangeEvent{
				StoreID: id,
				Desc:    desc2,
				Stats:   stats,
				Scan:    true, // ignored, not in ScanRanges mode
			},
			&storage.UpdateRangeEvent{
				StoreID: id,
				Desc:    desc1,
				Stats:   stats,
				Delta:   stats,
			},
			&storage.UpdateRangeEvent{
				StoreID: id,
				Desc:    desc1,
				Stats:   stats,
				Delta:   stats,
			},
			&storage.SplitRangeEvent{
				StoreID: id,
				Original: storage.UpdateRangeEvent{
					StoreID: id,
					Desc:    desc1,
					Stats:   stats,
					Delta:   stats,
				},
				New: storage.RegisterRangeEvent{
					StoreID: id,
					Desc:    desc2,
					Stats:   stats,
					Scan:    false,
				},
			},
			&storage.UpdateRangeEvent{
				StoreID: id,
				Desc:    desc2,
				Stats:   stats,
				Delta:   stats,
			},
			&storage.UpdateRangeEvent{
				StoreID: id,
				Desc:    desc2,
				Stats:   stats,
				Delta:   stats,
			},
			&CallSuccessEvent{
				NodeID: proto.NodeID(1),
				Method: proto.Get,
			},
			&CallSuccessEvent{
				NodeID: proto.NodeID(1),
				Method: proto.Put,
			},
			&CallErrorEvent{
				NodeID: proto.NodeID(1),
				Method: proto.Scan,
			},
		}
		storeStopper.RunWorker(func() {
			for _, event := range eventList {
				feed.Publish(event)
			}
		})
	}

	storeStopper.Stop()
	feed.Close()
	monitorStopper.Stop()

	expectedStats := engine.MVCCStats{
		LiveBytes:       6,
		KeyBytes:        12,
		ValBytes:        12,
		IntentBytes:     6,
		LiveCount:       6,
		KeyCount:        6,
		ValCount:        6,
		IntentCount:     6,
		IntentAge:       6,
		GCBytesAge:      6,
		LastUpdateNanos: 1 * 1E9,
	}

	if a, e := len(monitor.stores), 3; a != e {
		t.Fatalf("unexpected number of stores recorded by monitor; expected %d, got %d", e, a)
	}
	for id, store := range monitor.stores {
		if a, e := store.stats, expectedStats; !reflect.DeepEqual(a, e) {
			t.Errorf("monitored stats for store %d did not match expectation: %v != %v", id, a, e)
		}
		if a, e := store.rangeCount, int64(2); a != e {
			t.Errorf("monitored range count for store %d did not match expectation: %d != %d", id, a, e)
		}
	}

	if a, e := monitor.callCount, int64(6); a != e {
		t.Errorf("monitored stats for node recorded wrong number of ops %d, expected %d", a, e)
	}
	if a, e := monitor.callErrors, int64(3); a != e {
		t.Errorf("monitored stats for node recorded wrong number of errors %d, expected %d", a, e)
	}
}