// startFakeServerGossips creates local gossip instances and remote // faked gossip instance. The remote gossip instance launches its // faked gossip service just for check the client message. func startFakeServerGossips(t *testing.T) (local *Gossip, remote *fakeGossipServer, stopper *stop.Stopper) { stopper = stop.NewStopper() lRPCContext := rpc.NewContext(&base.Context{Insecure: true}, nil, stopper) lserver := rpc.NewServer(lRPCContext) lln, err := util.ListenAndServeGRPC(stopper, lserver, util.TestAddr) if err != nil { t.Fatal(err) } local = New(lRPCContext, nil, stopper) local.start(lserver, lln.Addr()) rRPCContext := rpc.NewContext(&base.Context{Insecure: true}, nil, stopper) rserver := rpc.NewServer(rRPCContext) rln, err := util.ListenAndServeGRPC(stopper, rserver, util.TestAddr) if err != nil { t.Fatal(err) } remote = newFakeGossipServer(rserver, stopper) addr := rln.Addr() remote.nodeAddr = util.MakeUnresolvedAddr(addr.Network(), addr.String()) return }
// startFakeServerGossip creates local gossip instances and remote faked gossip instance. // The remote gossip instance launches its faked gossip service just for // check the client message. func startFakeServerGossip(t *testing.T) (local *Gossip, remote *fakeGossipServer, stopper *stop.Stopper) { lclock := hlc.NewClock(hlc.UnixNano) stopper = stop.NewStopper() lRPCContext := rpc.NewContext(&base.Context{Insecure: true}, lclock, stopper) laddr := util.CreateTestAddr("tcp") lserver := rpc.NewServer(laddr, lRPCContext) if err := lserver.Start(); err != nil { t.Fatal(err) } local = New(lRPCContext, TestBootstrap) local.start(lserver, stopper) rclock := hlc.NewClock(hlc.UnixNano) raddr := util.CreateTestAddr("tcp") rRPCContext := rpc.NewContext(&base.Context{Insecure: true}, rclock, stopper) rserver := rpc.NewServer(raddr, rRPCContext) if err := rserver.Start(); err != nil { t.Fatal(err) } remote, err := newFakeGossipServer(rserver, stopper) if err != nil { t.Fatal(err) } addr := rserver.Addr() remote.nodeAddr = util.MakeUnresolvedAddr(addr.Network(), addr.String()) time.Sleep(time.Millisecond) return }
// startGossip creates local and remote gossip instances. // The remote gossip instance launches its gossip service. func startGossip(t *testing.T) (local, remote *Gossip, lserver, rserver *rpc.Server) { laddr := &net.UnixAddr{Net: "unix", Name: tempUnixFile()} lserver = rpc.NewServer(laddr) go lserver.ListenAndServe() local = New(lserver) raddr := &net.UnixAddr{Net: "unix", Name: tempUnixFile()} rserver = rpc.NewServer(raddr) go rserver.ListenAndServe() remote = New(rserver) go remote.serve() time.Sleep(time.Millisecond) return }
// startGossip creates local and remote gossip instances. // Both remote and local instances launch the gossip service. func startGossip(t *testing.T) (local, remote *Gossip, stopper *stop.Stopper) { stopper = stop.NewStopper() lclock := hlc.NewClock(hlc.UnixNano) lRPCContext := rpc.NewContext(&base.Context{Insecure: true}, lclock, stopper) laddr := util.CreateTestAddr("tcp") lserver := rpc.NewServer(lRPCContext) lTLSConfig, err := lRPCContext.GetServerTLSConfig() if err != nil { t.Fatal(err) } lln, err := util.ListenAndServe(stopper, lserver, laddr, lTLSConfig) if err != nil { t.Fatal(err) } local = New(lRPCContext, TestBootstrap) local.SetNodeID(1) if err := local.SetNodeDescriptor(&roachpb.NodeDescriptor{ NodeID: 1, Address: util.MakeUnresolvedAddr(laddr.Network(), laddr.String()), }); err != nil { t.Fatal(err) } rclock := hlc.NewClock(hlc.UnixNano) rRPCContext := rpc.NewContext(&base.Context{Insecure: true}, rclock, stopper) raddr := util.CreateTestAddr("tcp") rserver := rpc.NewServer(rRPCContext) rTLSConfig, err := rRPCContext.GetServerTLSConfig() if err != nil { t.Fatal(err) } rln, err := util.ListenAndServe(stopper, rserver, raddr, rTLSConfig) if err != nil { t.Fatal(err) } remote = New(rRPCContext, TestBootstrap) remote.SetNodeID(2) if err := remote.SetNodeDescriptor(&roachpb.NodeDescriptor{ NodeID: 2, Address: util.MakeUnresolvedAddr(raddr.Network(), raddr.String()), }); err != nil { t.Fatal(err) } local.start(lserver, lln.Addr(), stopper) remote.start(rserver, rln.Addr(), stopper) time.Sleep(time.Millisecond) return }
// startGossip creates local and remote gossip instances. // The remote gossip instance launches its gossip service. func startGossip(t *testing.T) (local, remote *Gossip, lserver, rserver *rpc.Server) { laddr := util.CreateTestAddr("unix") lserver = rpc.NewServer(laddr) lserver.Start() local = New() raddr := util.CreateTestAddr("unix") rserver = rpc.NewServer(raddr) rserver.Start() remote = New() local.start(lserver) remote.start(rserver) time.Sleep(time.Millisecond) return }
// createTestNode creates an rpc server using the specified address, // gossip instance, KV database and a node using the specified slice // of engines. The server, clock and node are returned. If gossipBS is // not nil, the gossip bootstrap address is set to gossipBS. func createTestNode(addr net.Addr, engines []engine.Engine, gossipBS net.Addr, t *testing.T) ( *rpc.Server, *hlc.Clock, *Node, *stop.Stopper) { ctx := storage.StoreContext{} stopper := stop.NewStopper() ctx.Clock = hlc.NewClock(hlc.UnixNano) nodeRPCContext := rpc.NewContext(nodeTestBaseContext, ctx.Clock, stopper) ctx.ScanInterval = 10 * time.Hour rpcServer := rpc.NewServer(addr, nodeRPCContext) if err := rpcServer.Start(); err != nil { t.Fatal(err) } g := gossip.New(nodeRPCContext, testContext.GossipInterval, testContext.GossipBootstrapResolvers) if gossipBS != nil { // Handle possibility of a :0 port specification. if gossipBS == addr { gossipBS = rpcServer.Addr() } g.SetResolvers([]resolver.Resolver{resolver.NewResolverFromAddress(gossipBS)}) g.Start(rpcServer, stopper) } ctx.Gossip = g sender := kv.NewDistSender(&kv.DistSenderContext{Clock: ctx.Clock}, g) ctx.DB = client.NewDB(sender) // TODO(bdarnell): arrange to have the transport closed. // (or attach LocalRPCTransport.Close to the stopper) ctx.Transport = multiraft.NewLocalRPCTransport(stopper) ctx.EventFeed = util.NewFeed(stopper) node := NewNode(ctx) return rpcServer, ctx.Clock, node, stopper }
func (lt *localRPCTransport) Listen(id roachpb.StoreID, server ServerInterface) error { addr := util.CreateTestAddr("tcp") rpcServer := crpc.NewServer(addr, &crpc.Context{ Context: base.Context{ Insecure: true, }, Stopper: lt.stopper, DisableCache: true, }) err := rpcServer.RegisterAsync(raftMessageName, false, /*not public*/ func(argsI proto.Message, callback func(proto.Message, error)) { args := argsI.(*RaftMessageRequest) resp, err := server.RaftMessage(args) callback(resp, err) }, &RaftMessageRequest{}) if err != nil { return err } lt.mu.Lock() if _, ok := lt.servers[id]; ok { log.Fatalf("node %d already listening", id) } lt.servers[id] = rpcServer lt.mu.Unlock() return rpcServer.Start() }
func (lt *localRPCTransport) Listen(id proto.RaftNodeID, server ServerInterface) error { addr := util.CreateTestAddr("tcp") rpcServer := crpc.NewServer(addr, &crpc.Context{ Context: base.Context{ Insecure: true, }, Stopper: lt.stopper, DisableCache: true, }) err := rpcServer.RegisterAsync(raftMessageName, func(argsI gogoproto.Message, callback func(gogoproto.Message, error)) { protoArgs := argsI.(*proto.RaftMessageRequest) args := RaftMessageRequest{ GroupID: protoArgs.GroupID, } if err := args.Message.Unmarshal(protoArgs.Msg); err != nil { callback(nil, err) } err := server.RaftMessage(&args, &RaftMessageResponse{}) callback(&proto.RaftMessageResponse{}, err) }, &proto.RaftMessageRequest{}) if err != nil { return err } lt.mu.Lock() if _, ok := lt.servers[id]; ok { log.Fatalf("node %d already listening", id) } lt.servers[id] = rpcServer lt.mu.Unlock() return rpcServer.Start() }
func newServer() (*server, error) { // Determine hostname in case it hasn't been specified in -rpc or -http. host, err := os.Hostname() if err != nil { host = "127.0.0.1" } // Resolve if strings.HasPrefix(*rpcAddr, ":") { *rpcAddr = host + *rpcAddr } addr, err := net.ResolveTCPAddr("tcp", *rpcAddr) if err != nil { return nil, util.Errorf("unable to resolve RPC address %q: %v", *rpcAddr, err) } s := &server{ host: host, mux: http.NewServeMux(), rpc: rpc.NewServer(addr), } s.gossip = gossip.New() s.kvDB = kv.NewDB(s.gossip) s.kvREST = rest.NewRESTServer(s.kvDB) s.node = NewNode(s.kvDB, s.gossip) s.admin = newAdminServer(s.kvDB) s.status = newStatusServer(s.kvDB) s.structuredDB = structured.NewDB(s.kvDB) s.structuredREST = structured.NewRESTServer(s.structuredDB) return s, nil }
// startGossip creates and starts a gossip instance. func startGossip(nodeID roachpb.NodeID, stopper *stop.Stopper, t *testing.T) *Gossip { clock := hlc.NewClock(hlc.UnixNano) rpcContext := rpc.NewContext(&base.Context{Insecure: true}, clock, stopper) addr := util.CreateTestAddr("tcp") server := rpc.NewServer(rpcContext) tlsConfig, err := rpcContext.GetServerTLSConfig() if err != nil { t.Fatal(err) } ln, err := util.ListenAndServe(stopper, server, addr, tlsConfig) if err != nil { t.Fatal(err) } g := New(rpcContext, TestBootstrap, stopper) g.SetNodeID(nodeID) if err := g.SetNodeDescriptor(&roachpb.NodeDescriptor{ NodeID: nodeID, Address: util.MakeUnresolvedAddr(addr.Network(), addr.String()), }); err != nil { t.Fatal(err) } g.start(server, ln.Addr()) time.Sleep(time.Millisecond) return g }
// TestGossipGroupsInfoStore verifies gossiping of groups via the // gossip instance infostore. func TestGossipGroupsInfoStore(t *testing.T) { g := New(rpc.NewServer(testAddr("test-addr:0"))) // For int64. g.RegisterGroup("i", 3, MinGroup) for i := 0; i < 3; i++ { g.AddInfo(fmt.Sprintf("i.%d", i), int64(i), time.Hour) } values, err := g.GetGroupInfos("i") if err != nil { t.Errorf("error fetching int64 group: %v", err) } if len(values) != 3 { t.Errorf("incorrect number of values in group: %v", values) } for i := 0; i < 3; i++ { if values[i].(int64) != int64(i) { t.Errorf("index %d has incorrect value: %d, expected %d", i, values[i].(int64), i) } } if _, err := g.GetGroupInfos("i2"); err == nil { t.Errorf("expected error fetching nonexistent key \"i2\"") } // For float64. g.RegisterGroup("f", 3, MinGroup) for i := 0; i < 3; i++ { g.AddInfo(fmt.Sprintf("f.%d", i), float64(i), time.Hour) } values, err = g.GetGroupInfos("f") if err != nil { t.Errorf("error fetching float64 group: %v", err) } if len(values) != 3 { t.Errorf("incorrect number of values in group: %v", values) } for i := 0; i < 3; i++ { if values[i].(float64) != float64(i) { t.Errorf("index %d has incorrect value: %f, expected %d", i, values[i].(float64), i) } } // For string. g.RegisterGroup("s", 3, MinGroup) for i := 0; i < 3; i++ { g.AddInfo(fmt.Sprintf("s.%d", i), fmt.Sprintf("%d", i), time.Hour) } values, err = g.GetGroupInfos("s") if err != nil { t.Errorf("error fetching string group: %v", err) } if len(values) != 3 { t.Errorf("incorrect number of values in group: %v", values) } for i := 0; i < 3; i++ { if values[i].(string) != fmt.Sprintf("%d", i) { t.Errorf("index %d has incorrect value: %d, expected %s", i, values[i], fmt.Sprintf("%d", i)) } } }
// createTestNode creates an rpc server using the specified address, // gossip instance, KV database and a node using the specified slice // of engines. The server and node are returned. If gossipBS is not // nil, the gossip bootstrap address is set to gossipBS. func createTestNode(addr net.Addr, engines []engine.Engine, gossipBS net.Addr, t *testing.T) ( *rpc.Server, *Node) { tlsConfig, err := rpc.LoadTestTLSConfig("..") if err != nil { t.Fatal(err) } rpcServer := rpc.NewServer(addr, tlsConfig) if err := rpcServer.Start(); err != nil { t.Fatal(err) } g := gossip.New(tlsConfig) if gossipBS != nil { // Handle possibility of a :0 port specification. if gossipBS == addr { gossipBS = rpcServer.Addr() } g.SetBootstrap([]net.Addr{gossipBS}) g.Start(rpcServer) } clock := hlc.NewClock(hlc.UnixNano) db := kv.NewDB(kv.NewDistKV(g), clock) node := NewNode(db, g) if err := node.start(rpcServer, clock, engines, proto.Attributes{}); err != nil { t.Fatal(err) } return rpcServer, node }
// SimulateNetwork creates nodeCount gossip nodes. The network should // be set to either "tcp" or "unix". The gossipInterval should be set // to a compressed simulation timescale, though large enough to give // the concurrent goroutines enough time to pass data back and forth // in order to yield accurate estimates of how old data actually ends // up being at the various nodes. After each gossipInterval period, // simCallback is invoked; when it returns false, the simulation // ends. If it returns true, the simulation continues another cycle. // // Node0 gossips the node count as well as the gossip sentinel. The // gossip bootstrap hosts are set to the first three nodes (or fewer if // less than three are available). // // At each cycle of the simulation, node 0 gossips the sentinel. If // the simulation requires other nodes to gossip, this should be done // via simCallback. // // The simulation callback receives a map of nodes, keyed by node address. func SimulateNetwork(nodeCount int, network string, gossipInterval time.Duration, simCallback func(cycle int, nodes map[string]*Gossip) bool) { // seed the random number generator for non-determinism across // multiple runs. rand.Seed(time.Now().UTC().UnixNano()) tlsConfig := rpc.LoadInsecureTLSConfig() log.Infof("simulating network with %d nodes", nodeCount) servers := make([]*rpc.Server, nodeCount) addrs := make([]net.Addr, nodeCount) for i := 0; i < nodeCount; i++ { addr := util.CreateTestAddr(network) servers[i] = rpc.NewServer(addr, tlsConfig) if err := servers[i].Start(); err != nil { log.Fatal(err) } addrs[i] = servers[i].Addr() } var bootstrap []net.Addr if nodeCount < 3 { bootstrap = addrs } else { bootstrap = addrs[:3] } nodes := make(map[string]*Gossip, nodeCount) for i := 0; i < nodeCount; i++ { node := New(tlsConfig) node.Name = fmt.Sprintf("Node%d", i) node.SetBootstrap(bootstrap) node.SetInterval(gossipInterval) node.Start(servers[i]) // Node 0 gossips node count. if i == 0 { node.AddInfo(KeyNodeCount, int64(nodeCount), time.Hour) } nodes[addrs[i].String()] = node } gossipTimeout := time.Tick(gossipInterval) var complete bool for cycle := 0; !complete; cycle++ { select { case <-gossipTimeout: // Node 0 gossips sentinel every cycle. nodes[addrs[0].String()].AddInfo(KeySentinel, int64(cycle), time.Hour) if !simCallback(cycle, nodes) { complete = true } } } // Stop all servers & nodes. for i := 0; i < nodeCount; i++ { servers[i].Close() nodes[addrs[i].String()].Stop() } }
// createTestRange creates a new range initialized to the full extent // of the keyspace. The gossip instance is also returned for testing. func createTestRange(engine Engine, t *testing.T) (*Range, *gossip.Gossip) { rm := RangeMetadata{ RangeID: 0, StartKey: KeyMin, EndKey: KeyMax, Replicas: testRangeLocations, } g := gossip.New(rpc.NewServer(&net.UnixAddr{"fake", "unix"})) return NewRange(rm, engine, nil, g), g }
func newTestServer(t *testing.T, ctx *rpc.Context) (*grpc.Server, net.Listener) { s := rpc.NewServer(ctx) ln, err := util.ListenAndServeGRPC(ctx.Stopper, s, util.TestAddr) if err != nil { t.Fatal(err) } return s, ln }
// SimulateNetwork creates nodeCount gossip nodes. The network should // be set to either "tcp" or "unix". The gossipInterval should be set // to a compressed simulation timescale, though large enough to give // the concurrent goroutines enough time to pass data back and forth // in order to yield accurate estimates of how old data actually ends // up being at the various nodes. After each gossipInterval period, // simCallback is invoked; when it returns false, the simulation // ends. If it returns true, the simulation continues another cycle. // // Node0 gossips the node count as well as the gossip sentinel. The // gossip bootstrap hosts are set to the first three nodes (or fewer if // less than three are available). // // At each cycle of the simulation, node 0 gossips the sentinel. If // the simulation requires other nodes to gossip, this should be done // via simCallback. // // The simulation callback receives a map of nodes, keyed by node address. func SimulateNetwork(nodeCount int, network string, gossipInterval time.Duration, simCallback func(cycle int, nodes map[string]*Gossip) bool) { glog.Infof("simulating network with %d nodes", nodeCount) servers := make([]*rpc.Server, nodeCount) addrs := make([]net.Addr, nodeCount) for i := 0; i < nodeCount; i++ { addr, err := createSimAddr(network) if err != nil { glog.Fatalf("failed to create address: %s", err) } servers[i] = rpc.NewServer(addr) go servers[i].ListenAndServe() addrs[i] = addr } var bootstrap []net.Addr if nodeCount < 3 { bootstrap = addrs } else { bootstrap = addrs[:3] } nodes := make(map[string]*Gossip, nodeCount) for i := 0; i < nodeCount; i++ { node := New(servers[i]) node.Name = fmt.Sprintf("Node%d", i) node.SetBootstrap(bootstrap) node.SetInterval(gossipInterval) // Node 0 gossips node count. if i == 0 { node.AddInfo(KeyNodeCount, int64(nodeCount), time.Hour) } node.Start() nodes[addrs[i].String()] = node } gossipTimeout := time.Tick(gossipInterval) var complete bool for cycle := 0; !complete; cycle++ { select { case <-gossipTimeout: // Node 0 gossips sentinel every cycle. nodes[addrs[0].String()].AddInfo(KeySentinel, int64(cycle), time.Hour) if !simCallback(cycle, nodes) { complete = true } } } // Stop all servers & nodes. for i := 0; i < nodeCount; i++ { servers[i].Close() nodes[addrs[i].String()].Stop() } }
// NewNetwork creates nodeCount gossip nodes. The networkType should // be set to either "tcp" or "unix". The gossipInterval should be set // to a compressed simulation timescale, though large enough to give // the concurrent goroutines enough time to pass data back and forth // in order to yield accurate estimates of how old data actually ends // up being at the various nodes (e.g. DefaultTestGossipInterval). // TODO: This method should take `stopper` as an argument. func NewNetwork(nodeCount int, networkType string, gossipInterval time.Duration) *Network { clock := hlc.NewClock(hlc.UnixNano) log.Infof("simulating gossip network with %d nodes", nodeCount) stopper := stop.NewStopper() rpcContext := rpc.NewContext(&base.Context{Insecure: true}, clock, stopper) nodes := make([]*Node, nodeCount) for i := range nodes { server := rpc.NewServer(util.CreateTestAddr(networkType), rpcContext) if err := server.Start(); err != nil { log.Fatal(err) } nodes[i] = &Node{Server: server} } var numResolvers int if len(nodes) > 3 { numResolvers = 3 } else { numResolvers = len(nodes) } for i, leftNode := range nodes { // Build new resolvers for each instance or we'll get data races. var resolvers []resolver.Resolver for _, rightNode := range nodes[:numResolvers] { resolvers = append(resolvers, resolver.NewResolverFromAddress(rightNode.Server.Addr())) } gossipNode := gossip.New(rpcContext, gossipInterval, resolvers) addr := leftNode.Server.Addr() if err := gossipNode.SetNodeDescriptor(&roachpb.NodeDescriptor{ NodeID: roachpb.NodeID(i + 1), Address: util.MakeUnresolvedAddr(addr.Network(), addr.String()), }); err != nil { log.Fatal(err) } gossipNode.Start(leftNode.Server, stopper) stopper.AddCloser(leftNode.Server) leftNode.Gossip = gossipNode } return &Network{ Nodes: nodes, NetworkType: networkType, GossipInterval: gossipInterval, Stopper: stopper, } }
// startGossip creates local and remote gossip instances. // The remote gossip instance launches its gossip service. func startGossip(t *testing.T) (local, remote *Gossip, lserver, rserver *rpc.Server) { tlsConfig := rpc.LoadInsecureTLSConfig() laddr := util.CreateTestAddr("unix") lserver = rpc.NewServer(laddr, tlsConfig) if err := lserver.Start(); err != nil { t.Fatal(err) } local = New(tlsConfig) raddr := util.CreateTestAddr("unix") rserver = rpc.NewServer(raddr, tlsConfig) if err := rserver.Start(); err != nil { t.Fatal(err) } remote = New(tlsConfig) local.start(lserver) remote.start(rserver) time.Sleep(time.Millisecond) return }
// CreateNode creates a simulation node and starts an RPC server for it. func (n *Network) CreateNode() (*Node, error) { server := rpc.NewServer(n.rpcContext) ln, err := util.ListenAndServeGRPC(n.Stopper, server, util.TestAddr) if err != nil { return nil, err } node := &Node{Server: server, Addr: ln.Addr()} node.Gossip = gossip.New(n.rpcContext, nil, n.Stopper) n.Nodes = append(n.Nodes, node) return node, nil }
// NewNetwork creates nodeCount gossip nodes. The networkType should // be set to either "tcp" or "unix". func NewNetwork(nodeCount int, networkType string) *Network { clock := hlc.NewClock(hlc.UnixNano) log.Infof("simulating gossip network with %d nodes", nodeCount) stopper := stop.NewStopper() rpcContext := rpc.NewContext(&base.Context{Insecure: true}, clock, stopper) tlsConfig, err := rpcContext.GetServerTLSConfig() if err != nil { log.Fatal(err) } nodes := make([]*Node, nodeCount) for i := range nodes { server := rpc.NewServer(rpcContext) testAddr := util.CreateTestAddr(networkType) ln, err := util.ListenAndServe(stopper, server, testAddr, tlsConfig) if err != nil { log.Fatal(err) } nodes[i] = &Node{Server: server, Addr: ln.Addr()} } for i, leftNode := range nodes { // Build new resolvers for each instance or we'll get data races. resolvers := []resolver.Resolver{resolver.NewResolverFromAddress(nodes[0].Addr)} gossipNode := gossip.New(rpcContext, resolvers) addr := leftNode.Addr gossipNode.SetNodeID(roachpb.NodeID(i + 1)) if err := gossipNode.SetNodeDescriptor(&roachpb.NodeDescriptor{ NodeID: roachpb.NodeID(i + 1), Address: util.MakeUnresolvedAddr(addr.Network(), addr.String()), }); err != nil { log.Fatal(err) } if err := gossipNode.AddInfo(addr.String(), encoding.EncodeUint64(nil, 0), time.Hour); err != nil { log.Fatal(err) } gossipNode.Start(leftNode.Server, addr, stopper) gossipNode.EnableSimulationCycler(true) leftNode.Gossip = gossipNode } return &Network{ Nodes: nodes, NetworkType: networkType, Stopper: stopper, } }
// startGossip creates local and remote gossip instances. // The remote gossip instance launches its gossip service. // We use insecure contexts since we do not have certificates for unix sockets. func startGossip(t *testing.T) (local, remote *Gossip, stopper *util.Stopper) { lclock := hlc.NewClock(hlc.UnixNano) stopper = util.NewStopper() lRPCContext := rpc.NewContext(insecureTestBaseContext, lclock, stopper) laddr := util.CreateTestAddr("unix") lserver := rpc.NewServer(laddr, lRPCContext) if err := lserver.Start(); err != nil { t.Fatal(err) } local = New(lRPCContext, gossipInterval, TestBootstrap) if err := local.SetNodeDescriptor(&proto.NodeDescriptor{ NodeID: 1, Address: proto.Addr{ Network: laddr.Network(), Address: laddr.String(), }}); err != nil { t.Fatal(err) } rclock := hlc.NewClock(hlc.UnixNano) raddr := util.CreateTestAddr("unix") rRPCContext := rpc.NewContext(insecureTestBaseContext, rclock, stopper) rserver := rpc.NewServer(raddr, rRPCContext) if err := rserver.Start(); err != nil { t.Fatal(err) } remote = New(rRPCContext, gossipInterval, TestBootstrap) if err := local.SetNodeDescriptor(&proto.NodeDescriptor{ NodeID: 2, Address: proto.Addr{ Network: raddr.Network(), Address: raddr.String(), }, }); err != nil { t.Fatal(err) } local.start(lserver, stopper) remote.start(rserver, stopper) time.Sleep(time.Millisecond) return }
func newRaftTransportTestContext(t testing.TB) *raftTransportTestContext { rttc := &raftTransportTestContext{ t: t, stopper: stop.NewStopper(), transports: map[roachpb.NodeID]*storage.RaftTransport{}, } rttc.nodeRPCContext = rpc.NewContext(testutils.NewNodeTestBaseContext(), nil, rttc.stopper) server := rpc.NewServer(rttc.nodeRPCContext) // never started rttc.gossip = gossip.New(rttc.nodeRPCContext, server, nil, rttc.stopper, metric.NewRegistry()) rttc.gossip.SetNodeID(1) return rttc }
// CreateNode creates a simulation node and starts an RPC server for it. func (n *Network) CreateNode() (*Node, error) { server := rpc.NewServer(n.rpcContext) testAddr := util.CreateTestAddr("tcp") ln, err := util.ListenAndServe(n.Stopper, server, testAddr, n.tlsConfig) if err != nil { return nil, err } node := &Node{Server: server, Addr: ln.Addr()} node.Gossip = gossip.New(n.rpcContext, nil) n.Nodes = append(n.Nodes, node) return node, nil }
// startFakeServerGossips creates local gossip instances and remote // faked gossip instance. The remote gossip instance launches its // faked gossip service just for check the client message. func startFakeServerGossips(t *testing.T) (local *Gossip, remote *fakeGossipServer, stopper *stop.Stopper) { stopper = stop.NewStopper() lclock := hlc.NewClock(hlc.UnixNano) lRPCContext := rpc.NewContext(&base.Context{Insecure: true}, lclock, stopper) laddr := util.CreateTestAddr("tcp") lserver := rpc.NewServer(lRPCContext) lTLSConfig, err := lRPCContext.GetServerTLSConfig() if err != nil { t.Fatal(err) } lln, err := util.ListenAndServe(stopper, lserver, laddr, lTLSConfig) if err != nil { t.Fatal(err) } local = New(lRPCContext, TestBootstrap, stopper) local.start(lserver, lln.Addr()) rclock := hlc.NewClock(hlc.UnixNano) rRPCContext := rpc.NewContext(&base.Context{Insecure: true}, rclock, stopper) raddr := util.CreateTestAddr("tcp") rserver := rpc.NewServer(rRPCContext) rTLSConfig, err := rRPCContext.GetServerTLSConfig() if err != nil { t.Fatal(err) } rln, err := util.ListenAndServe(stopper, rserver, raddr, rTLSConfig) if err != nil { t.Fatal(err) } if remote, err = newFakeGossipServer(rserver, stopper); err != nil { t.Fatal(err) } addr := rln.Addr() remote.nodeAddr = util.MakeUnresolvedAddr(addr.Network(), addr.String()) time.Sleep(time.Millisecond) return }
// createTestNode creates an rpc server using the specified address, // gossip instance, KV database and a node using the specified slice // of engines. The server, clock and node are returned. If gossipBS is // not nil, the gossip bootstrap address is set to gossipBS. func createTestNode(addr net.Addr, engines []engine.Engine, gossipBS net.Addr, t *testing.T) ( *grpc.Server, net.Addr, *hlc.Clock, *Node, *stop.Stopper) { ctx := storage.StoreContext{} stopper := stop.NewStopper() ctx.Clock = hlc.NewClock(hlc.UnixNano) nodeRPCContext := rpc.NewContext(nodeTestBaseContext, ctx.Clock, stopper) ctx.ScanInterval = 10 * time.Hour ctx.ConsistencyCheckInterval = 10 * time.Hour grpcServer := rpc.NewServer(nodeRPCContext) serverCtx := makeTestContext() g := gossip.New( context.Background(), nodeRPCContext, grpcServer, serverCtx.GossipBootstrapResolvers, stopper, metric.NewRegistry()) ln, err := netutil.ListenAndServeGRPC(stopper, grpcServer, addr) if err != nil { t.Fatal(err) } if gossipBS != nil { // Handle possibility of a :0 port specification. if gossipBS.Network() == addr.Network() && gossipBS.String() == addr.String() { gossipBS = ln.Addr() } r, err := resolver.NewResolverFromAddress(gossipBS) if err != nil { t.Fatalf("bad gossip address %s: %s", gossipBS, err) } g.SetResolvers([]resolver.Resolver{r}) g.Start(ln.Addr()) } ctx.Gossip = g retryOpts := base.DefaultRetryOptions() retryOpts.Closer = stopper.ShouldQuiesce() distSender := kv.NewDistSender(&kv.DistSenderConfig{ Clock: ctx.Clock, RPCContext: nodeRPCContext, RPCRetryOptions: &retryOpts, }, g) ctx.Ctx = tracing.WithTracer(context.Background(), tracing.NewTracer()) sender := kv.NewTxnCoordSender(ctx.Ctx, distSender, ctx.Clock, false, stopper, kv.MakeTxnMetrics()) ctx.DB = client.NewDB(sender) ctx.Transport = storage.NewDummyRaftTransport() node := NewNode(ctx, status.NewMetricsRecorder(ctx.Clock), metric.NewRegistry(), stopper, kv.MakeTxnMetrics(), sql.MakeEventLogger(nil)) roachpb.RegisterInternalServer(grpcServer, node) return grpcServer, ln.Addr(), ctx.Clock, node, stopper }
// AddNodeWithoutGossip registers a node with the cluster. Nodes must // be added before they can be used in other methods of // raftTransportTestContext. Unless you are testing the effects of // delaying gossip, use AddNode instead. func (rttc *raftTransportTestContext) AddNodeWithoutGossip( nodeID roachpb.NodeID, ) (*storage.RaftTransport, net.Addr) { grpcServer := rpc.NewServer(rttc.nodeRPCContext) ln, err := netutil.ListenAndServeGRPC(rttc.stopper, grpcServer, util.TestAddr) if err != nil { rttc.t.Fatal(err) } transport := storage.NewRaftTransport(storage.GossipAddressResolver(rttc.gossip), grpcServer, rttc.nodeRPCContext) rttc.transports[nodeID] = transport return transport, ln.Addr() }
// createTestNode creates an rpc server using the specified address, // gossip instance, KV database and a node using the specified slice // of engines. The server, clock and node are returned. If gossipBS is // not nil, the gossip bootstrap address is set to gossipBS. func createTestNode(addr net.Addr, engines []engine.Engine, gossipBS net.Addr, t *testing.T) ( *rpc.Server, net.Addr, *hlc.Clock, *Node, *stop.Stopper) { ctx := storage.StoreContext{} stopper := stop.NewStopper() ctx.Clock = hlc.NewClock(hlc.UnixNano) nodeRPCContext := rpc.NewContext(nodeTestBaseContext, ctx.Clock, stopper) ctx.ScanInterval = 10 * time.Hour rpcServer := rpc.NewServer(nodeRPCContext) grpcServer := grpc.NewServer() tlsConfig, err := nodeRPCContext.GetServerTLSConfig() if err != nil { t.Fatal(err) } ln, err := util.ListenAndServe(stopper, grpcutil.GRPCHandlerFunc(grpcServer, rpcServer), addr, tlsConfig) if err != nil { t.Fatal(err) } g := gossip.New(nodeRPCContext, testContext.GossipBootstrapResolvers, stopper) if gossipBS != nil { // Handle possibility of a :0 port specification. if gossipBS.Network() == addr.Network() && gossipBS.String() == addr.String() { gossipBS = ln.Addr() } r, err := resolver.NewResolverFromAddress(gossipBS) if err != nil { t.Fatalf("bad gossip address %s: %s", gossipBS, err) } g.SetResolvers([]resolver.Resolver{r}) g.Start(grpcServer, ln.Addr()) } ctx.Gossip = g retryOpts := kv.GetDefaultDistSenderRetryOptions() retryOpts.Closer = stopper.ShouldDrain() distSender := kv.NewDistSender(&kv.DistSenderContext{ Clock: ctx.Clock, RPCContext: nodeRPCContext, RPCRetryOptions: &retryOpts, }, g) tracer := tracing.NewTracer() sender := kv.NewTxnCoordSender(distSender, ctx.Clock, false, tracer, stopper) ctx.DB = client.NewDB(sender) // TODO(bdarnell): arrange to have the transport closed. // (or attach LocalRPCTransport.Close to the stopper) ctx.Transport = storage.NewLocalRPCTransport(stopper) ctx.EventFeed = util.NewFeed(stopper) ctx.Tracer = tracer node := NewNode(ctx, metric.NewRegistry(), stopper, nil) return rpcServer, ln.Addr(), ctx.Clock, node, stopper }
// Start starts the test cluster by bootstrapping an in-memory store // (defaults to maximum of 50M). The server is started, launching the // node RPC server and all HTTP endpoints. Use the value of // TestServer.Addr after Start() for client connections. Use Stop() // to shutdown the server after the test completes. func (ltc *LocalTestCluster) Start(t util.Tester, baseCtx *base.Context, initSender InitSenderFn) { nodeID := roachpb.NodeID(1) nodeDesc := &roachpb.NodeDescriptor{NodeID: nodeID} tracer := tracing.NewTracer() ltc.tester = t ltc.Manual = hlc.NewManualClock(0) ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano) ltc.Stopper = stop.NewStopper() rpcContext := rpc.NewContext(baseCtx, ltc.Clock, ltc.Stopper) server := rpc.NewServer(rpcContext) // never started ltc.Gossip = gossip.New( context.Background(), rpcContext, server, nil, ltc.Stopper, metric.NewRegistry()) ltc.Eng = engine.NewInMem(roachpb.Attributes{}, 50<<20, ltc.Stopper) ltc.Stores = storage.NewStores(ltc.Clock) ltc.Sender = initSender(nodeDesc, tracer, ltc.Clock, ltc.Latency, ltc.Stores, ltc.Stopper, ltc.Gossip) if ltc.DBContext == nil { dbCtx := client.DefaultDBContext() ltc.DBContext = &dbCtx } ltc.DB = client.NewDBWithContext(ltc.Sender, *ltc.DBContext) transport := storage.NewDummyRaftTransport() ctx := storage.TestStoreContext() if ltc.RangeRetryOptions != nil { ctx.RangeRetryOptions = *ltc.RangeRetryOptions } ctx.Ctx = tracing.WithTracer(context.Background(), tracer) ctx.Clock = ltc.Clock ctx.DB = ltc.DB ctx.Gossip = ltc.Gossip ctx.Transport = transport ltc.Store = storage.NewStore(ctx, ltc.Eng, nodeDesc) if err := ltc.Store.Bootstrap(roachpb.StoreIdent{NodeID: nodeID, StoreID: 1}, ltc.Stopper); err != nil { t.Fatalf("unable to start local test cluster: %s", err) } ltc.Stores.AddStore(ltc.Store) if err := ltc.Store.BootstrapRange(nil); err != nil { t.Fatalf("unable to start local test cluster: %s", err) } if err := ltc.Store.Start(context.Background(), ltc.Stopper); err != nil { t.Fatalf("unable to start local test cluster: %s", err) } ltc.Gossip.SetNodeID(nodeDesc.NodeID) if err := ltc.Gossip.SetNodeDescriptor(nodeDesc); err != nil { t.Fatalf("unable to set node descriptor: %s", err) } }
// startGossip creates local and remote gossip instances. // The remote gossip instance launches its gossip service. func startGossip(t *testing.T) (local, remote *Gossip, stopper *stop.Stopper) { lclock := hlc.NewClock(hlc.UnixNano) stopper = stop.NewStopper() lRPCContext := rpc.NewContext(&base.Context{Insecure: true}, lclock, stopper) laddr := util.CreateTestAddr("tcp") lserver := rpc.NewServer(laddr, lRPCContext) if err := lserver.Start(); err != nil { t.Fatal(err) } local = New(lRPCContext, gossipInterval, TestBootstrap) if err := local.SetNodeDescriptor(&proto.NodeDescriptor{ NodeID: 1, Address: util.MakeUnresolvedAddr(laddr.Network(), laddr.String()), }); err != nil { t.Fatal(err) } rclock := hlc.NewClock(hlc.UnixNano) raddr := util.CreateTestAddr("tcp") rRPCContext := rpc.NewContext(&base.Context{Insecure: true}, rclock, stopper) rserver := rpc.NewServer(raddr, rRPCContext) if err := rserver.Start(); err != nil { t.Fatal(err) } remote = New(rRPCContext, gossipInterval, TestBootstrap) if err := local.SetNodeDescriptor(&proto.NodeDescriptor{ NodeID: 2, Address: util.MakeUnresolvedAddr(raddr.Network(), raddr.String()), }); err != nil { t.Fatal(err) } local.start(lserver, stopper) remote.start(rserver, stopper) time.Sleep(time.Millisecond) return }
// startFakeServerGossips creates local gossip instances and remote // faked gossip instance. The remote gossip instance launches its // faked gossip service just for check the client message. func startFakeServerGossips(t *testing.T) (*Gossip, *fakeGossipServer, *stop.Stopper) { stopper := stop.NewStopper() lRPCContext := rpc.NewContext(&base.Context{Insecure: true}, nil, stopper) lserver := rpc.NewServer(lRPCContext) local := New(context.TODO(), lRPCContext, lserver, nil, stopper, metric.NewRegistry()) lln, err := netutil.ListenAndServeGRPC(stopper, lserver, util.TestAddr) if err != nil { t.Fatal(err) } local.start(lln.Addr()) rRPCContext := rpc.NewContext(&base.Context{Insecure: true}, nil, stopper) rserver := rpc.NewServer(rRPCContext) rln, err := netutil.ListenAndServeGRPC(stopper, rserver, util.TestAddr) if err != nil { t.Fatal(err) } remote := newFakeGossipServer(rserver, stopper) addr := rln.Addr() remote.nodeAddr = util.MakeUnresolvedAddr(addr.Network(), addr.String()) return local, remote, stopper }