func TestIsFresh(t *testing.T) { defer leaktest.AfterTest(t) node1 := roachpb.NodeID(1) node2 := roachpb.NodeID(2) node3 := roachpb.NodeID(3) i := newInfo(float64(1)) i.NodeID = node1 if !i.isFresh(node3, i.OrigStamp-1) { t.Error("info should be fresh:", i) } if i.isFresh(node3, i.OrigStamp+1) { t.Error("info should not be fresh:", i) } if i.isFresh(node1, i.OrigStamp-1) { t.Error("info should not be fresh:", i) } if !i.isFresh(node2, i.OrigStamp-1) { t.Error("info should be fresh:", i) } // Using node 0 will always yield fresh data. if !i.isFresh(0, 0) { t.Error("info should be fresh from node0:", i) } }
func TestIsFresh(t *testing.T) { defer leaktest.AfterTest(t) const seq = 10 node1 := roachpb.NodeID(1) node2 := roachpb.NodeID(2) node3 := roachpb.NodeID(3) i := newInfo(float64(1)) i.NodeID = node1 i.peerID = node2 i.seq = seq if !i.isFresh(node3, seq-1) { t.Error("info should be fresh:", i) } if i.isFresh(node3, seq+1) { t.Error("info should not be fresh:", i) } if i.isFresh(node1, seq-1) { t.Error("info should not be fresh:", i) } if i.isFresh(node2, seq-1) { t.Error("info should not be fresh:", i) } // Using node 0 will always yield fresh data. if !i.isFresh(0, 0) { t.Error("info should be fresh from node0:", i) } }
// TestInfoStoreDistant verifies selection of infos from store with // Hops > maxHops. func TestInfoStoreDistant(t *testing.T) { defer leaktest.AfterTest(t) nodes := []roachpb.NodeID{ roachpb.NodeID(1), roachpb.NodeID(2), roachpb.NodeID(3), } is := newInfoStore(1, emptyAddr) // Add info from each address, with hop count equal to index+1. for i := 0; i < len(nodes); i++ { inf := is.newInfo(nil, time.Second) inf.Hops = uint32(i + 1) inf.NodeID = nodes[i] if err := is.addInfo(fmt.Sprintf("b.%d", i), inf); err != nil { t.Fatal(err) } } for i := 0; i < len(nodes); i++ { nodesLen := is.distant(uint32(i)).len() if nodesLen != 3-i { t.Errorf("%d nodes (not %d) should be over maxHops = %d", 3-i, nodesLen, i) } } }
// TestInfoStoreMostDistant verifies selection of most distant node & // associated hops. func TestInfoStoreMostDistant(t *testing.T) { defer leaktest.AfterTest(t)() nodes := []roachpb.NodeID{ roachpb.NodeID(1), roachpb.NodeID(2), roachpb.NodeID(3), } stopper := stop.NewStopper() defer stopper.Stop() is := newInfoStore(context.TODO(), 1, emptyAddr, stopper) // Add info from each address, with hop count equal to index+1. for i := 0; i < len(nodes); i++ { inf := is.newInfo(nil, time.Second) inf.Hops = uint32(i + 1) inf.NodeID = nodes[i] if err := is.addInfo(fmt.Sprintf("b.%d", i), inf); err != nil { t.Fatal(err) } nodeID, hops := is.mostDistant() if nodeID != inf.NodeID { t.Errorf("%d: expected node %d; got %d", i, inf.NodeID, nodeID) } if hops != inf.Hops { t.Errorf("%d: expected node %d; got %d", i, inf.Hops, hops) } } }
// TestSendRPCRetry verifies that sendRPC failed on first address but succeed on // second address, the second reply should be successfully returned back. func TestSendRPCRetry(t *testing.T) { defer leaktest.AfterTest(t) g, s := makeTestGossip(t) defer s() g.SetNodeID(1) if err := g.SetNodeDescriptor(&roachpb.NodeDescriptor{NodeID: 1}); err != nil { t.Fatal(err) } // Fill RangeDescriptor with 2 replicas var descriptor = roachpb.RangeDescriptor{ RangeID: 1, StartKey: roachpb.RKey("a"), EndKey: roachpb.RKey("z"), } for i := 1; i <= 2; i++ { addr := util.MakeUnresolvedAddr("tcp", fmt.Sprintf("node%d", i)) nd := &roachpb.NodeDescriptor{ NodeID: roachpb.NodeID(i), Address: util.MakeUnresolvedAddr(addr.Network(), addr.String()), } if err := g.AddInfoProto(gossip.MakeNodeIDKey(roachpb.NodeID(i)), nd, time.Hour); err != nil { t.Fatal(err) } descriptor.Replicas = append(descriptor.Replicas, roachpb.ReplicaDescriptor{ NodeID: roachpb.NodeID(i), StoreID: roachpb.StoreID(i), }) } // Define our rpcSend stub which returns success on the second address. var testFn rpcSendFn = func(_ rpc.Options, method string, addrs []net.Addr, getArgs func(addr net.Addr) proto.Message, getReply func() proto.Message, _ *rpc.Context) ([]proto.Message, error) { if method == "Node.Batch" { // reply from first address failed _ = getReply() // reply from second address succeed batchReply := getReply().(*roachpb.BatchResponse) reply := &roachpb.ScanResponse{} batchReply.Add(reply) reply.Rows = append([]roachpb.KeyValue{}, roachpb.KeyValue{Key: roachpb.Key("b"), Value: roachpb.Value{}}) return []proto.Message{batchReply}, nil } return nil, util.Errorf("unexpected method %v", method) } ctx := &DistSenderContext{ RPCSend: testFn, RangeDescriptorDB: mockRangeDescriptorDB(func(_ roachpb.RKey, _, _ bool) ([]roachpb.RangeDescriptor, *roachpb.Error) { return []roachpb.RangeDescriptor{descriptor}, nil }), } ds := NewDistSender(ctx, g) scan := roachpb.NewScan(roachpb.Key("a"), roachpb.Key("d"), 1) sr, err := client.SendWrapped(ds, nil, scan) if err != nil { t.Fatal(err) } if l := len(sr.(*roachpb.ScanResponse).Rows); l != 1 { t.Fatalf("expected 1 row; got %d", l) } }
// TestRaftAfterRemoveRange verifies that the raft state removes // a remote node correctly after the Replica was removed from the Store. func TestRaftAfterRemoveRange(t *testing.T) { defer leaktest.AfterTest(t) mtc := startMultiTestContext(t, 3) defer mtc.Stop() // Make the split. splitArgs := adminSplitArgs(roachpb.KeyMin, []byte("b")) if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &splitArgs); err != nil { t.Fatal(err) } rangeID := roachpb.RangeID(2) mtc.replicateRange(rangeID, 1, 2) mtc.unreplicateRange(rangeID, 2) mtc.unreplicateRange(rangeID, 1) // Wait for the removal to be processed. util.SucceedsWithin(t, time.Second, func() error { _, err := mtc.stores[1].GetReplica(rangeID) if _, ok := err.(*roachpb.RangeNotFoundError); ok { return nil } else if err != nil { return err } return util.Errorf("range still exists") }) replica1 := roachpb.ReplicaDescriptor{ ReplicaID: roachpb.ReplicaID(mtc.stores[1].StoreID()), NodeID: roachpb.NodeID(mtc.stores[1].StoreID()), StoreID: mtc.stores[1].StoreID(), } replica2 := roachpb.ReplicaDescriptor{ ReplicaID: roachpb.ReplicaID(mtc.stores[2].StoreID()), NodeID: roachpb.NodeID(mtc.stores[2].StoreID()), StoreID: mtc.stores[2].StoreID(), } if err := mtc.transport.Send(&storage.RaftMessageRequest{ GroupID: 0, ToReplica: replica1, FromReplica: replica2, Message: raftpb.Message{ From: uint64(replica2.ReplicaID), To: uint64(replica1.ReplicaID), Type: raftpb.MsgHeartbeat, }}); err != nil { t.Fatal(err) } // Execute another replica change to ensure that raft has processed // the heartbeat just sent. mtc.replicateRange(roachpb.RangeID(1), 1) // Expire leases to ensure any remaining intent resolutions can complete. // TODO(bdarnell): understand why some tests need this. mtc.expireLeaderLeases() }
// TestLeastUseful verifies that the least-contributing peer node // can be determined. func TestLeastUseful(t *testing.T) { defer leaktest.AfterTest(t)() nodes := []roachpb.NodeID{ roachpb.NodeID(1), roachpb.NodeID(2), } stopper := stop.NewStopper() defer stopper.Stop() is := newInfoStore(context.TODO(), 1, emptyAddr, stopper) set := makeNodeSet(3, metric.NewGauge(metric.Metadata{Name: ""})) if is.leastUseful(set) != 0 { t.Error("not expecting a node from an empty set") } inf1 := is.newInfo(nil, time.Second) inf1.NodeID = 1 inf1.PeerID = 1 if err := is.addInfo("a1", inf1); err != nil { t.Fatal(err) } if is.leastUseful(set) != 0 { t.Error("not expecting a node from an empty set") } set.addNode(nodes[0]) if is.leastUseful(set) != nodes[0] { t.Error("expecting nodes[0] as least useful") } inf2 := is.newInfo(nil, time.Second) inf2.NodeID = 2 inf2.PeerID = 1 if err := is.addInfo("a2", inf2); err != nil { t.Fatal(err) } if is.leastUseful(set) != nodes[0] { t.Error("expecting nodes[0] as least useful") } set.addNode(nodes[1]) if is.leastUseful(set) != nodes[1] { t.Error("expecting nodes[1] as least useful") } inf3 := is.newInfo(nil, time.Second) inf3.NodeID = 2 inf3.PeerID = 2 if err := is.addInfo("a3", inf3); err != nil { t.Fatal(err) } if is.leastUseful(set) != nodes[1] { t.Error("expecting nodes[1] as least useful") } }
// NewNetwork creates nodeCount gossip nodes. The networkType should // be set to either "tcp" or "unix". func NewNetwork(nodeCount int, networkType string) *Network { clock := hlc.NewClock(hlc.UnixNano) log.Infof("simulating gossip network with %d nodes", nodeCount) stopper := stop.NewStopper() rpcContext := rpc.NewContext(&base.Context{Insecure: true}, clock, stopper) tlsConfig, err := rpcContext.GetServerTLSConfig() if err != nil { log.Fatal(err) } nodes := make([]*Node, nodeCount) for i := range nodes { server := rpc.NewServer(rpcContext) testAddr := util.CreateTestAddr(networkType) ln, err := util.ListenAndServe(stopper, server, testAddr, tlsConfig) if err != nil { log.Fatal(err) } nodes[i] = &Node{Server: server, Addr: ln.Addr()} } for i, leftNode := range nodes { // Build new resolvers for each instance or we'll get data races. resolvers := []resolver.Resolver{resolver.NewResolverFromAddress(nodes[0].Addr)} gossipNode := gossip.New(rpcContext, resolvers) addr := leftNode.Addr gossipNode.SetNodeID(roachpb.NodeID(i + 1)) if err := gossipNode.SetNodeDescriptor(&roachpb.NodeDescriptor{ NodeID: roachpb.NodeID(i + 1), Address: util.MakeUnresolvedAddr(addr.Network(), addr.String()), }); err != nil { log.Fatal(err) } if err := gossipNode.AddInfo(addr.String(), encoding.EncodeUint64(nil, 0), time.Hour); err != nil { log.Fatal(err) } gossipNode.Start(leftNode.Server, addr, stopper) gossipNode.EnableSimulationCycler(true) leftNode.Gossip = gossipNode } return &Network{ Nodes: nodes, NetworkType: networkType, Stopper: stopper, } }
// TestLeastUseful verifies that the least-contributing peer node // can be determined. func TestLeastUseful(t *testing.T) { defer leaktest.AfterTest(t) nodes := []roachpb.NodeID{ roachpb.NodeID(1), roachpb.NodeID(2), } is := newInfoStore(1, emptyAddr) set := makeNodeSet(3) if is.leastUseful(set) != 0 { t.Error("not expecting a node from an empty set") } inf1 := is.newInfo(nil, time.Second) inf1.NodeID = 1 inf1.PeerID = 1 if err := is.addInfo("a1", inf1); err != nil { t.Fatal(err) } if is.leastUseful(set) != 0 { t.Error("not expecting a node from an empty set") } set.addNode(nodes[0]) if is.leastUseful(set) != nodes[0] { t.Error("expecting nodes[0] as least useful") } inf2 := is.newInfo(nil, time.Second) inf2.NodeID = 2 inf2.PeerID = 1 if err := is.addInfo("a2", inf2); err != nil { t.Fatal(err) } if is.leastUseful(set) != nodes[0] { t.Error("expecting nodes[0] as least useful") } set.addNode(nodes[1]) if is.leastUseful(set) != nodes[1] { t.Error("expecting nodes[1] as least useful") } inf3 := is.newInfo(nil, time.Second) inf3.NodeID = 2 inf3.PeerID = 2 if err := is.addInfo("a3", inf3); err != nil { t.Fatal(err) } if is.leastUseful(set) != nodes[1] { t.Error("expecting nodes[1] as least useful") } }
// TestSendRPCRetry verifies that sendRPC failed on first address but succeed on // second address, the second reply should be successfully returned back. func TestSendRPCRetry(t *testing.T) { defer leaktest.AfterTest(t) g, s := makeTestGossip(t) defer s() g.SetNodeID(1) if err := g.SetNodeDescriptor(&roachpb.NodeDescriptor{NodeID: 1}); err != nil { t.Fatal(err) } // Fill RangeDescriptor with 2 replicas var descriptor = roachpb.RangeDescriptor{ RangeID: 1, StartKey: roachpb.RKey("a"), EndKey: roachpb.RKey("z"), } for i := 1; i <= 2; i++ { addr := util.MakeUnresolvedAddr("tcp", fmt.Sprintf("node%d", i)) nd := &roachpb.NodeDescriptor{ NodeID: roachpb.NodeID(i), Address: util.MakeUnresolvedAddr(addr.Network(), addr.String()), } if err := g.AddInfoProto(gossip.MakeNodeIDKey(roachpb.NodeID(i)), nd, time.Hour); err != nil { t.Fatal(err) } descriptor.Replicas = append(descriptor.Replicas, roachpb.ReplicaDescriptor{ NodeID: roachpb.NodeID(i), StoreID: roachpb.StoreID(i), }) } var testFn rpcSendFn = func(_ SendOptions, _ ReplicaSlice, args roachpb.BatchRequest, _ *rpc.Context) (proto.Message, error) { batchReply := &roachpb.BatchResponse{} reply := &roachpb.ScanResponse{} batchReply.Add(reply) reply.Rows = append([]roachpb.KeyValue{}, roachpb.KeyValue{Key: roachpb.Key("b"), Value: roachpb.Value{}}) return batchReply, nil } ctx := &DistSenderContext{ RPCSend: testFn, RangeDescriptorDB: mockRangeDescriptorDB(func(_ roachpb.RKey, _, _ bool) ([]roachpb.RangeDescriptor, *roachpb.Error) { return []roachpb.RangeDescriptor{descriptor}, nil }), } ds := NewDistSender(ctx, g) scan := roachpb.NewScan(roachpb.Key("a"), roachpb.Key("d"), 1) sr, err := client.SendWrapped(ds, nil, scan) if err != nil { t.Fatal(err) } if l := len(sr.(*roachpb.ScanResponse).Rows); l != 1 { t.Fatalf("expected 1 row; got %d", l) } }
func TestIsFresh(t *testing.T) { defer leaktest.AfterTest(t) node1 := roachpb.NodeID(1) node2 := roachpb.NodeID(2) node3 := roachpb.NodeID(3) i := newInfo(float64(1)) i.NodeID = node1 i.Hops = 3 if !i.isFresh(node3, nil) { t.Error("info should be fresh:", i) } if i.isFresh(node1, nil) { t.Error("info should not be fresh:", i) } if !i.isFresh(node3, &Node{i.OrigStamp - 1, 3}) { t.Error("info should be fresh:", i) } if !i.isFresh(node3, &Node{i.OrigStamp - 1, 4}) { t.Error("info should be fresh:", i) } if i.isFresh(node3, &Node{i.OrigStamp, 3}) { t.Error("info should not be fresh:", i) } if i.isFresh(node3, &Node{i.OrigStamp, 4}) { t.Error("info should not be fresh:", i) } if !i.isFresh(node3, &Node{i.OrigStamp, 5}) { t.Error("info should be fresh:", i) } if i.isFresh(node3, &Node{i.OrigStamp, 2}) { t.Error("info should not be fresh (hops + 1 will not be better):", i) } if i.isFresh(node3, &Node{i.OrigStamp + 1, 3}) { t.Error("info should not be fresh:", i) } if i.isFresh(node3, &Node{i.OrigStamp + 1, 2}) { t.Error("info should not be fresh:", i) } if i.isFresh(node1, &Node{i.OrigStamp - 1, 2}) { t.Error("info should not be fresh:", i) } if !i.isFresh(node2, &Node{i.OrigStamp - 1, 3}) { t.Error("info should be fresh:", i) } // Using node 0 will always yield fresh data. if !i.isFresh(0, &Node{0, 0}) { t.Error("info should be fresh from node0:", i) } }
// TestGossipCullNetwork verifies that a client will be culled from // the network periodically (at cullInterval duration intervals). func TestGossipCullNetwork(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() defer stopper.Stop() local := startGossip(1, stopper, t) local.SetCullInterval(5 * time.Millisecond) local.mu.Lock() for i := 0; i < minPeers; i++ { peer := startGossip(roachpb.NodeID(i+2), stopper, t) local.startClient(&peer.is.NodeAddr, stopper) } local.mu.Unlock() util.SucceedsSoon(t, func() error { if len(local.Outgoing()) == minPeers { return nil } return errors.New("some peers not yet connected") }) local.manage() util.SucceedsSoon(t, func() error { // Verify that a client is closed within the cull interval. if len(local.Outgoing()) == minPeers-1 { return nil } return errors.New("no network culling occurred") }) }
func gossipForTest(t *testing.T) (*gossip.Gossip, *stop.Stopper) { stopper := stop.NewStopper() // Setup fake zone config handler. config.TestingSetupZoneConfigHook(stopper) rpcContext := rpc.NewContext(&base.Context{}, hlc.NewClock(hlc.UnixNano), stopper) g := gossip.New(rpcContext, gossip.TestBootstrap) // Have to call g.SetNodeID before call g.AddInfo g.SetNodeID(roachpb.NodeID(1)) // Put an empty system config into gossip. if err := g.AddInfoProto(gossip.KeySystemConfig, &config.SystemConfig{}, 0); err != nil { t.Fatal(err) } // Wait for SystemConfig. if err := util.IsTrueWithin(func() bool { return g.GetSystemConfig() != nil }, 100*time.Millisecond); err != nil { t.Fatal(err) } return g, stopper }
// TestClientNodeID verifies a client's gossip request with correct NodeID. func TestClientNodeID(t *testing.T) { defer leaktest.AfterTest(t)() local, remote, stopper := startFakeServerGossips(t) nodeID := roachpb.NodeID(1) local.SetNodeID(nodeID) disconnected := make(chan *client, 1) // Use an insecure context. We're talking to tcp socket which are not in the certs. lclock := hlc.NewClock(hlc.UnixNano) rpcContext := rpc.NewContext(&base.Context{Insecure: true}, lclock, stopper) // Start a gossip client. c := newClient(&remote.nodeAddr) defer func() { stopper.Stop() if c != <-disconnected { t.Errorf("expected client disconnect after remote close") } }() c.start(local, disconnected, rpcContext, stopper) // Wait for c.gossip to start. if receivedNodeID := <-remote.nodeIDChan; receivedNodeID != nodeID { t.Errorf("client should send NodeID with %v, got %v", nodeID, receivedNodeID) } }
func gossipForTest(t *testing.T) (*gossip.Gossip, *stop.Stopper) { stopper := stop.NewStopper() // Setup fake zone config handler. config.TestingSetupZoneConfigHook(stopper) rpcContext := rpc.NewContext(nil, nil, stopper) g := gossip.New(rpcContext, nil, stopper) // Have to call g.SetNodeID before call g.AddInfo g.SetNodeID(roachpb.NodeID(1)) // Put an empty system config into gossip. if err := g.AddInfoProto(gossip.KeySystemConfig, &config.SystemConfig{}, 0); err != nil { t.Fatal(err) } // Wait for SystemConfig. util.SucceedsSoon(t, func() error { if _, ok := g.GetSystemConfig(); !ok { return util.Errorf("expected system config to be set") } return nil }) return g, stopper }
// allocateNodeID increments the node id generator key to allocate // a new, unique node id. func allocateNodeID(db *client.DB) (roachpb.NodeID, error) { r, err := db.Inc(keys.NodeIDGenerator, 1) if err != nil { return 0, util.Errorf("unable to allocate node ID: %s", err) } return roachpb.NodeID(r.ValueInt()), nil }
// TestGossipCullNetwork verifies that a client will be culled from // the network periodically (at cullInterval duration intervals). func TestGossipCullNetwork(t *testing.T) { defer leaktest.AfterTest(t)() // Create the local gossip and minPeers peers. stopper := stop.NewStopper() defer stopper.Stop() local := startGossip(1, stopper, t) local.SetCullInterval(5 * time.Millisecond) peers := []*Gossip{} for i := 0; i < minPeers; i++ { peers = append(peers, startGossip(roachpb.NodeID(i+2), stopper, t)) } // Start clients to all peers and start the local gossip's manage routine. local.mu.Lock() for _, p := range peers { pAddr := p.is.NodeAddr local.startClient(&pAddr, stopper) } local.mu.Unlock() local.manage() util.SucceedsSoon(t, func() error { // Verify that a client is closed within the cull interval. if len(local.Outgoing()) == minPeers-1 { return nil } return errors.New("no network culling occurred") }) }
// ReplicaDescriptor implements the Storage interface by returning a // dummy descriptor. func (m *MemoryStorage) ReplicaDescriptor(groupID roachpb.RangeID, replicaID roachpb.ReplicaID) (roachpb.ReplicaDescriptor, error) { return roachpb.ReplicaDescriptor{ ReplicaID: replicaID, NodeID: roachpb.NodeID(replicaID), StoreID: roachpb.StoreID(replicaID), }, nil }
func gossipForTest(t *testing.T) (*gossip.Gossip, *stop.Stopper) { stopper := stop.NewStopper() // Setup fake zone config handler. config.TestingSetupZoneConfigHook(stopper) rpcContext := rpc.NewContext(&base.Context{}, hlc.NewClock(hlc.UnixNano), stopper) g := gossip.New(rpcContext, gossip.TestBootstrap, stopper) // Have to call g.SetNodeID before call g.AddInfo g.SetNodeID(roachpb.NodeID(1)) // Put an empty system config into gossip. if err := g.AddInfoProto(gossip.KeySystemConfig, &config.SystemConfig{}, 0); err != nil { t.Fatal(err) } // Wait for SystemConfig. util.SucceedsSoon(t, func() error { if g.GetSystemConfig() == nil { return util.Errorf("expected non-nil system config") } return nil }) return g, stopper }
// fanoutHeartbeatResponse sends the given heartbeat response to all groups // which overlap with the sender's groups and consider themselves leader. func (s *state) fanoutHeartbeatResponse(req *RaftMessageRequest) { s.lockStorage() defer s.unlockStorage() fromID := roachpb.NodeID(req.Message.From) originNode, ok := s.nodes[fromID] if !ok { log.Warningf("node %v: not fanning out heartbeat response from unknown node %v", s.nodeID, fromID) return } cnt := 0 for groupID := range originNode.groupIDs { // If we don't think that the local node is leader, don't propagate. if s.groups[groupID].leader.NodeID != s.nodeID || fromID == s.nodeID { if log.V(8) { log.Infof("node %v: not fanning out heartbeat response to %v, msg is from %v and leader is %v", s.nodeID, groupID, fromID, s.groups[groupID].leader) } continue } fromRepID, err := s.Storage().ReplicaIDForStore(groupID, req.FromReplica.StoreID) if err != nil { if log.V(3) { log.Infof("node %s: not fanning out heartbeat to %s, could not find replica id for sending store %s", s.nodeID, groupID, req.FromReplica.StoreID) } continue } toRepID, err := s.Storage().ReplicaIDForStore(groupID, req.ToReplica.StoreID) if err != nil { if log.V(3) { log.Infof("node %s: not fanning out heartbeat to %s, could not find replica id for receiving store %s", s.nodeID, groupID, req.ToReplica.StoreID) } continue } msg := raftpb.Message{ Type: raftpb.MsgHeartbeatResp, From: uint64(fromRepID), To: uint64(toRepID), } if err := s.groups[groupID].raftGroup.Step(msg); err != nil { if log.V(4) { log.Infof("node %v: coalesced heartbeat response step to group %v failed", s.nodeID, groupID) } } cnt++ } if log.V(7) { log.Infof("node %v: received coalesced heartbeat response from node %v; "+ "fanned out to %d leaders in %d overlapping groups", s.nodeID, fromID, cnt, len(originNode.groupIDs)) } }
func TestNodeSetAsSlice(t *testing.T) { defer leaktest.AfterTest(t) nodes := makeNodeSet(2) node0 := roachpb.NodeID(1) node1 := roachpb.NodeID(2) nodes.addNode(node0) nodes.addNode(node1) nodeArr := nodes.asSlice() if len(nodeArr) != 2 { t.Error("expected slice of length 2:", nodeArr) } if (nodeArr[0] != node0 && nodeArr[0] != node1) || (nodeArr[1] != node1 && nodeArr[1] != node0) { t.Error("expected slice to contain both node0 and node1:", nodeArr) } }
func TestNodeSetFilter(t *testing.T) { defer leaktest.AfterTest(t) nodes1 := makeNodeSet(2) node0 := roachpb.NodeID(1) node1 := roachpb.NodeID(2) nodes1.addNode(node0) nodes1.addNode(node1) nodes2 := makeNodeSet(1) nodes2.addNode(node1) filtered := nodes1.filter(func(a roachpb.NodeID) bool { return !nodes2.hasNode(a) }) if filtered.len() != 1 || filtered.hasNode(node1) || !filtered.hasNode(node0) { t.Errorf("expected filter to leave node0: %+v", filtered) } }
// addNewNodeWithStore adds new node with a single store. func (c *Cluster) addNewNodeWithStore() { nodeID := roachpb.NodeID(len(c.nodes)) c.nodes[nodeID] = newNode(nodeID, c.gossip) // Only output if we're running the simulation. if c.epoch >= 0 { fmt.Fprintf(c.actionWriter, "%d:\tNode %d added\n", c.epoch, nodeID) } c.addStore(nodeID) }
// createTestStorePool creates a stopper, gossip and storePool for use in // tests. Stopper must be stopped by the caller. func createTestStorePool(timeUntilStoreDead time.Duration) (*stop.Stopper, *gossip.Gossip, *StorePool) { stopper := stop.NewStopper() rpcContext := rpc.NewContext(&base.Context{}, hlc.NewClock(hlc.UnixNano), stopper) g := gossip.New(rpcContext, gossip.TestBootstrap) // Have to call g.SetNodeID before call g.AddInfo g.SetNodeID(roachpb.NodeID(1)) storePool := NewStorePool(g, timeUntilStoreDead, stopper) return stopper, g, storePool }
func TestRuntimeStatRecorder(t *testing.T) { defer leaktest.AfterTest(t)() manual := hlc.NewManualClock(100) recorder := NewRuntimeStatRecorder(roachpb.NodeID(1), hlc.NewClock(manual.UnixNano)) data := recorder.GetTimeSeriesData() if a, e := len(data), 10; a != e { t.Fatalf("Expected %d series generated, got %d", a, e) } }
func TestNodeSetAddAndRemoveNode(t *testing.T) { defer leaktest.AfterTest(t) nodes := makeNodeSet(2) node0 := roachpb.NodeID(1) node1 := roachpb.NodeID(2) nodes.addNode(node0) nodes.addNode(node1) if !nodes.hasNode(node0) || !nodes.hasNode(node1) { t.Error("failed to locate added nodes") } nodes.removeNode(node0) if nodes.hasNode(node0) || !nodes.hasNode(node1) { t.Error("failed to remove node0", nodes) } nodes.removeNode(node1) if nodes.hasNode(node0) || nodes.hasNode(node1) { t.Error("failed to remove node1", nodes) } }
// createTestAllocator creates a stopper, gossip, store pool and allocator for // use in tests. Stopper must be stopped by the caller. func createTestAllocator() (*stop.Stopper, *gossip.Gossip, *StorePool, Allocator) { stopper := stop.NewStopper() rpcContext := rpc.NewContext(&base.Context{}, hlc.NewClock(hlc.UnixNano), stopper) g := gossip.New(rpcContext, gossip.TestBootstrap) // Have to call g.SetNodeID before call g.AddInfo g.SetNodeID(roachpb.NodeID(1)) storePool := NewStorePool(g, TestTimeUntilStoreDeadOff, stopper) a := MakeAllocator(storePool, AllocatorOptions{AllowRebalance: true}) return stopper, g, storePool, a }
// NewNetwork creates nodeCount gossip nodes. The networkType should // be set to either "tcp" or "unix". The gossipInterval should be set // to a compressed simulation timescale, though large enough to give // the concurrent goroutines enough time to pass data back and forth // in order to yield accurate estimates of how old data actually ends // up being at the various nodes (e.g. DefaultTestGossipInterval). // TODO: This method should take `stopper` as an argument. func NewNetwork(nodeCount int, networkType string, gossipInterval time.Duration) *Network { clock := hlc.NewClock(hlc.UnixNano) log.Infof("simulating gossip network with %d nodes", nodeCount) stopper := stop.NewStopper() rpcContext := rpc.NewContext(&base.Context{Insecure: true}, clock, stopper) nodes := make([]*Node, nodeCount) for i := range nodes { server := rpc.NewServer(util.CreateTestAddr(networkType), rpcContext) if err := server.Start(); err != nil { log.Fatal(err) } nodes[i] = &Node{Server: server} } var numResolvers int if len(nodes) > 3 { numResolvers = 3 } else { numResolvers = len(nodes) } for i, leftNode := range nodes { // Build new resolvers for each instance or we'll get data races. var resolvers []resolver.Resolver for _, rightNode := range nodes[:numResolvers] { resolvers = append(resolvers, resolver.NewResolverFromAddress(rightNode.Server.Addr())) } gossipNode := gossip.New(rpcContext, gossipInterval, resolvers) addr := leftNode.Server.Addr() if err := gossipNode.SetNodeDescriptor(&roachpb.NodeDescriptor{ NodeID: roachpb.NodeID(i + 1), Address: util.MakeUnresolvedAddr(addr.Network(), addr.String()), }); err != nil { log.Fatal(err) } gossipNode.Start(leftNode.Server, stopper) stopper.AddCloser(leftNode.Server) leftNode.Gossip = gossipNode } return &Network{ Nodes: nodes, NetworkType: networkType, GossipInterval: gossipInterval, Stopper: stopper, } }
func TestNodeSetMaxSize(t *testing.T) { defer leaktest.AfterTest(t) nodes := makeNodeSet(1) if !nodes.hasSpace() { t.Error("set should have space") } nodes.addNode(roachpb.NodeID(1)) if nodes.hasSpace() { t.Error("set should have no space") } }
// createTestStorePool creates a stopper, gossip and storePool for use in // tests. Stopper must be stopped by the caller. func createTestStorePool(timeUntilStoreDead time.Duration) (*stop.Stopper, *gossip.Gossip, *hlc.ManualClock, *StorePool) { stopper := stop.NewStopper() mc := hlc.NewManualClock(0) clock := hlc.NewClock(mc.UnixNano) rpcContext := rpc.NewContext(nil, clock, stopper) g := gossip.New(rpcContext, nil, stopper) // Have to call g.SetNodeID before call g.AddInfo g.SetNodeID(roachpb.NodeID(1)) storePool := NewStorePool(g, clock, timeUntilStoreDead, stopper) return stopper, g, mc, storePool }