// startFakeServerGossip creates local gossip instances and remote faked gossip instance. // The remote gossip instance launches its faked gossip service just for // check the client message. func startFakeServerGossip(t *testing.T) (local *Gossip, remote *fakeGossipServer, stopper *stop.Stopper) { lclock := hlc.NewClock(hlc.UnixNano) stopper = stop.NewStopper() lRPCContext := rpc.NewContext(&base.Context{Insecure: true}, lclock, stopper) laddr := util.CreateTestAddr("tcp") lserver := rpc.NewServer(laddr, lRPCContext) if err := lserver.Start(); err != nil { t.Fatal(err) } local = New(lRPCContext, TestBootstrap) local.start(lserver, stopper) rclock := hlc.NewClock(hlc.UnixNano) raddr := util.CreateTestAddr("tcp") rRPCContext := rpc.NewContext(&base.Context{Insecure: true}, rclock, stopper) rserver := rpc.NewServer(raddr, rRPCContext) if err := rserver.Start(); err != nil { t.Fatal(err) } remote, err := newFakeGossipServer(rserver, stopper) if err != nil { t.Fatal(err) } addr := rserver.Addr() remote.nodeAddr = util.MakeUnresolvedAddr(addr.Network(), addr.String()) time.Sleep(time.Millisecond) return }
// createTestNode creates an rpc server using the specified address, // gossip instance, KV database and a node using the specified slice // of engines. The server, clock and node are returned. If gossipBS is // not nil, the gossip bootstrap address is set to gossipBS. func createTestNode(addr net.Addr, engines []engine.Engine, gossipBS net.Addr, t *testing.T) ( *rpc.Server, *hlc.Clock, *Node, *util.Stopper) { var err error ctx := storage.StoreContext{} stopper := util.NewStopper() ctx.Clock = hlc.NewClock(hlc.UnixNano) serverContext := rpc.NewContext(serverTestBaseContext, ctx.Clock, stopper) ctx.ScanInterval = 10 * time.Hour rpcServer := rpc.NewServer(addr, serverContext) if err := rpcServer.Start(); err != nil { t.Fatal(err) } rpcContext := rpc.NewContext(serverTestBaseContext, ctx.Clock, stopper) g := gossip.New(rpcContext, testContext.GossipInterval, testContext.GossipBootstrapResolvers) if gossipBS != nil { // Handle possibility of a :0 port specification. if gossipBS == addr { gossipBS = rpcServer.Addr() } g.SetResolvers([]resolver.Resolver{resolver.NewResolverFromAddress(gossipBS)}) g.Start(rpcServer, stopper) } ctx.Gossip = g sender := kv.NewDistSender(&kv.DistSenderContext{Clock: ctx.Clock}, g) if ctx.DB, err = client.Open("//root@", client.SenderOpt(sender)); err != nil { t.Fatal(err) } // TODO(bdarnell): arrange to have the transport closed. ctx.Transport = multiraft.NewLocalRPCTransport() ctx.EventFeed = &util.Feed{} node := NewNode(ctx) return rpcServer, ctx.Clock, node, stopper }
// startFakeServerGossips creates local gossip instances and remote // faked gossip instance. The remote gossip instance launches its // faked gossip service just for check the client message. func startFakeServerGossips(t *testing.T) (local *Gossip, remote *fakeGossipServer, stopper *stop.Stopper) { stopper = stop.NewStopper() lRPCContext := rpc.NewContext(&base.Context{Insecure: true}, nil, stopper) lserver := rpc.NewServer(lRPCContext) lln, err := util.ListenAndServeGRPC(stopper, lserver, util.TestAddr) if err != nil { t.Fatal(err) } local = New(lRPCContext, nil, stopper) local.start(lserver, lln.Addr()) rRPCContext := rpc.NewContext(&base.Context{Insecure: true}, nil, stopper) rserver := rpc.NewServer(rRPCContext) rln, err := util.ListenAndServeGRPC(stopper, rserver, util.TestAddr) if err != nil { t.Fatal(err) } remote = newFakeGossipServer(rserver, stopper) addr := rln.Addr() remote.nodeAddr = util.MakeUnresolvedAddr(addr.Network(), addr.String()) return }
// startGossip creates local and remote gossip instances. // Both remote and local instances launch the gossip service. func startGossip(t *testing.T) (local, remote *Gossip, stopper *stop.Stopper) { stopper = stop.NewStopper() lclock := hlc.NewClock(hlc.UnixNano) lRPCContext := rpc.NewContext(&base.Context{Insecure: true}, lclock, stopper) laddr := util.CreateTestAddr("tcp") lserver := rpc.NewServer(lRPCContext) lTLSConfig, err := lRPCContext.GetServerTLSConfig() if err != nil { t.Fatal(err) } lln, err := util.ListenAndServe(stopper, lserver, laddr, lTLSConfig) if err != nil { t.Fatal(err) } local = New(lRPCContext, TestBootstrap) local.SetNodeID(1) if err := local.SetNodeDescriptor(&roachpb.NodeDescriptor{ NodeID: 1, Address: util.MakeUnresolvedAddr(laddr.Network(), laddr.String()), }); err != nil { t.Fatal(err) } rclock := hlc.NewClock(hlc.UnixNano) rRPCContext := rpc.NewContext(&base.Context{Insecure: true}, rclock, stopper) raddr := util.CreateTestAddr("tcp") rserver := rpc.NewServer(rRPCContext) rTLSConfig, err := rRPCContext.GetServerTLSConfig() if err != nil { t.Fatal(err) } rln, err := util.ListenAndServe(stopper, rserver, raddr, rTLSConfig) if err != nil { t.Fatal(err) } remote = New(rRPCContext, TestBootstrap) remote.SetNodeID(2) if err := remote.SetNodeDescriptor(&roachpb.NodeDescriptor{ NodeID: 2, Address: util.MakeUnresolvedAddr(raddr.Network(), raddr.String()), }); err != nil { t.Fatal(err) } local.start(lserver, lln.Addr(), stopper) remote.start(rserver, rln.Addr(), stopper) time.Sleep(time.Millisecond) return }
// createTestStoreWithEngine creates a test store using the given engine and clock. // The caller is responsible for closing the store on exit. func createTestStoreWithEngine(t *testing.T, eng engine.Engine, clock *hlc.Clock, bootstrap bool) *storage.Store { rpcContext := rpc.NewContext(hlc.NewClock(hlc.UnixNano), rpc.LoadInsecureTLSConfig()) g := gossip.New(rpcContext, gossip.TestInterval, "") lSender := kv.NewLocalSender() sender := kv.NewTxnCoordSender(lSender, clock, false) db := client.NewKV(sender, nil) db.User = storage.UserRoot // TODO(bdarnell): arrange to have the transport closed. store := storage.NewStore(clock, eng, db, g, multiraft.NewLocalRPCTransport()) if bootstrap { if err := store.Bootstrap(proto.StoreIdent{NodeID: 1, StoreID: 1}); err != nil { t.Fatal(err) } } lSender.AddStore(store) if bootstrap { if err := store.BootstrapRange(); err != nil { t.Fatal(err) } } if err := store.Start(); err != nil { t.Fatal(err) } return store }
// TestClientNodeID verifies a client's gossip request with correct NodeID. func TestClientNodeID(t *testing.T) { defer leaktest.AfterTest(t)() local, remote, stopper := startFakeServerGossips(t) nodeID := roachpb.NodeID(1) local.SetNodeID(nodeID) disconnected := make(chan *client, 1) // Use an insecure context. We're talking to tcp socket which are not in the certs. lclock := hlc.NewClock(hlc.UnixNano) rpcContext := rpc.NewContext(&base.Context{Insecure: true}, lclock, stopper) // Start a gossip client. c := newClient(&remote.nodeAddr) defer func() { stopper.Stop() if c != <-disconnected { t.Errorf("expected client disconnect after remote close") } }() c.start(local, disconnected, rpcContext, stopper) // Wait for c.gossip to start. if receivedNodeID := <-remote.nodeIDChan; receivedNodeID != nodeID { t.Errorf("client should send NodeID with %v, got %v", nodeID, receivedNodeID) } }
// NewNetwork creates nodeCount gossip nodes. func NewNetwork(nodeCount int) *Network { clock := hlc.NewClock(hlc.UnixNano) log.Infof("simulating gossip network with %d nodes", nodeCount) n := &Network{ Nodes: []*Node{}, Stopper: stop.NewStopper(), } n.rpcContext = rpc.NewContext(&base.Context{Insecure: true}, clock, n.Stopper) var err error n.tlsConfig, err = n.rpcContext.GetServerTLSConfig() if err != nil { log.Fatal(err) } for i := 0; i < nodeCount; i++ { node, err := n.CreateNode() if err != nil { log.Fatal(err) } // Build a resolver for each instance or we'll get data races. r, err := resolver.NewResolverFromAddress(n.Nodes[0].Addr) if err != nil { log.Fatalf("bad gossip address %s: %s", n.Nodes[0].Addr, err) } node.Gossip.SetResolvers([]resolver.Resolver{r}) if err := n.StartNode(node); err != nil { log.Fatal(err) } } return n }
// createTestStoreWithEngine creates a test store using the given engine and clock. // The caller is responsible for closing the store on exit. func createTestStoreWithEngine(t *testing.T, eng engine.Engine, clock *hlc.Clock, bootstrap bool, context *storage.StoreContext) (*storage.Store, *stop.Stopper) { stopper := stop.NewStopper() rpcContext := rpc.NewContext(&base.Context{}, hlc.NewClock(hlc.UnixNano), stopper) if context == nil { // make a copy ctx := storage.TestStoreContext context = &ctx } context.Gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap) lSender := kv.NewLocalSender() sender := kv.NewTxnCoordSender(lSender, clock, false, nil, stopper) context.Clock = clock context.DB = client.NewDB(sender) context.Transport = multiraft.NewLocalRPCTransport(stopper) // TODO(bdarnell): arrange to have the transport closed. store := storage.NewStore(*context, eng, &proto.NodeDescriptor{NodeID: 1}) if bootstrap { if err := store.Bootstrap(proto.StoreIdent{NodeID: 1, StoreID: 1}, stopper); err != nil { t.Fatal(err) } } lSender.AddStore(store) if bootstrap { if err := store.BootstrapRange(nil); err != nil { t.Fatal(err) } } if err := store.Start(stopper); err != nil { t.Fatal(err) } return store, stopper }
// createTestStoreWithoutStart creates a test store using an in-memory // engine without starting the store. It returns the store, the store // clock's manual unix nanos time and a stopper. The caller is // responsible for stopping the stopper upon completion. func createTestStoreWithoutStart(t *testing.T) (*Store, *hlc.ManualClock, *stop.Stopper) { stopper := stop.NewStopper() // Setup fake zone config handler. config.TestingSetupZoneConfigHook(stopper) rpcContext := rpc.NewContext(&base.Context{}, hlc.NewClock(hlc.UnixNano), stopper) ctx := TestStoreContext ctx.Gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap) ctx.StorePool = NewStorePool(ctx.Gossip, TestTimeUntilStoreDeadOff, stopper) manual := hlc.NewManualClock(0) ctx.Clock = hlc.NewClock(manual.UnixNano) eng := engine.NewInMem(roachpb.Attributes{}, 10<<20, stopper) ctx.Transport = multiraft.NewLocalRPCTransport(stopper) stopper.AddCloser(ctx.Transport) sender := &testSender{} ctx.DB = client.NewDB(sender) store := NewStore(ctx, eng, &roachpb.NodeDescriptor{NodeID: 1}) sender.store = store if err := store.Bootstrap(roachpb.StoreIdent{NodeID: 1, StoreID: 1}, stopper); err != nil { t.Fatal(err) } if err := store.BootstrapRange(nil); err != nil { t.Fatal(err) } return store, manual, stopper }
// createTestStoreWithEngine creates a test store using the given engine and clock. // The caller is responsible for closing the store on exit. func createTestStoreWithEngine(t *testing.T, eng engine.Engine, clock *hlc.Clock, bootstrap bool, sCtx *storage.StoreContext) (*storage.Store, *stop.Stopper) { stopper := stop.NewStopper() rpcContext := rpc.NewContext(&base.Context{}, clock, stopper) if sCtx == nil { // make a copy ctx := storage.TestStoreContext sCtx = &ctx } nodeDesc := &proto.NodeDescriptor{NodeID: 1} sCtx.Gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap) localSender := kv.NewLocalSender() rpcSend := func(_ rpc.Options, _ string, _ []net.Addr, getArgs func(addr net.Addr) gogoproto.Message, getReply func() gogoproto.Message, _ *rpc.Context) ([]gogoproto.Message, error) { call := proto.Call{ Args: getArgs(nil /* net.Addr */).(proto.Request), Reply: getReply().(proto.Response), } localSender.Send(context.Background(), call) return []gogoproto.Message{call.Reply}, call.Reply.Header().GoError() } // Mostly makes sure that we don't see a warning per request. { if err := sCtx.Gossip.AddInfoProto(gossip.MakeNodeIDKey(nodeDesc.NodeID), nodeDesc, time.Hour); err != nil { t.Fatal(err) } if err := sCtx.Gossip.SetNodeDescriptor(nodeDesc); err != nil { t.Fatal(err) } } distSender := kv.NewDistSender(&kv.DistSenderContext{ Clock: clock, RPCSend: rpcSend, // defined above RangeDescriptorDB: localSender, // for descriptor lookup }, sCtx.Gossip) sender := kv.NewTxnCoordSender(distSender, clock, false, nil, stopper) sCtx.Clock = clock sCtx.DB = client.NewDB(sender) sCtx.Transport = multiraft.NewLocalRPCTransport(stopper) // TODO(bdarnell): arrange to have the transport closed. store := storage.NewStore(*sCtx, eng, nodeDesc) if bootstrap { if err := store.Bootstrap(proto.StoreIdent{NodeID: 1, StoreID: 1}, stopper); err != nil { t.Fatal(err) } } localSender.AddStore(store) if bootstrap { if err := store.BootstrapRange(sql.GetInitialSystemValues()); err != nil { t.Fatal(err) } } if err := store.Start(stopper); err != nil { t.Fatal(err) } return store, stopper }
// NewNetwork creates nodeCount gossip nodes. func NewNetwork(nodeCount int, createResolvers bool) *Network { log.Infof(context.TODO(), "simulating gossip network with %d nodes", nodeCount) n := &Network{ Nodes: []*Node{}, Stopper: stop.NewStopper(), } n.rpcContext = rpc.NewContext(&base.Context{Insecure: true}, nil, n.Stopper) var err error n.tlsConfig, err = n.rpcContext.GetServerTLSConfig() if err != nil { log.Fatal(context.TODO(), err) } for i := 0; i < nodeCount; i++ { node, err := n.CreateNode() if err != nil { log.Fatal(context.TODO(), err) } // Build a resolver for each instance or we'll get data races. if createResolvers { r, err := resolver.NewResolverFromAddress(n.Nodes[0].Addr()) if err != nil { log.Fatalf(context.TODO(), "bad gossip address %s: %s", n.Nodes[0].Addr(), err) } node.Gossip.SetResolvers([]resolver.Resolver{r}) } } return n }
// TestGossipInfoStore verifies operation of gossip instance infostore. func TestGossipInfoStore(t *testing.T) { defer leaktest.AfterTest(t) rpcContext := rpc.NewContext(rootTestBaseContext, hlc.NewClock(hlc.UnixNano), nil) g := New(rpcContext, TestInterval, TestBootstrap) if err := g.AddInfo("i", int64(1), time.Hour); err != nil { t.Fatal(err) } if val, err := g.GetInfo("i"); val.(int64) != int64(1) || err != nil { t.Errorf("error fetching int64: %v", err) } if _, err := g.GetInfo("i2"); err == nil { t.Errorf("expected error fetching nonexistent key \"i2\"") } if err := g.AddInfo("f", float64(3.14), time.Hour); err != nil { t.Fatal(err) } if val, err := g.GetInfo("f"); val.(float64) != float64(3.14) || err != nil { t.Errorf("error fetching float64: %v", err) } if _, err := g.GetInfo("f2"); err == nil { t.Errorf("expected error fetching nonexistent key \"f2\"") } if err := g.AddInfo("s", "b", time.Hour); err != nil { t.Fatal(err) } if val, err := g.GetInfo("s"); val.(string) != "b" || err != nil { t.Errorf("error fetching string: %v", err) } if _, err := g.GetInfo("s2"); err == nil { t.Errorf("expected error fetching nonexistent key \"s2\"") } }
// createTestStorePool creates a stopper, gossip and storePool for use in // tests. Stopper must be stopped by the caller. func createTestStorePool(timeUntilStoreDead time.Duration) (*stop.Stopper, *gossip.Gossip, *StorePool) { stopper := stop.NewStopper() rpcContext := rpc.NewContext(&base.Context{}, hlc.NewClock(hlc.UnixNano), stopper) g := gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap) storePool := NewStorePool(g, timeUntilStoreDead, stopper) return stopper, g, storePool }
// TestClientGossip verifies a client can gossip a delta to the server. func TestClientGossip(t *testing.T) { defer leaktest.AfterTest(t) local, remote, stopper := startGossip(t) if err := local.AddInfo("local-key", "local value", time.Second); err != nil { t.Fatal(err) } if err := remote.AddInfo("remote-key", "remote value", time.Second); err != nil { t.Fatal(err) } disconnected := make(chan *client, 1) client := newClient(remote.is.NodeAddr) // Use an insecure context. We're talking to unix socket which are not in the certs. lclock := hlc.NewClock(hlc.UnixNano) rpcContext := rpc.NewContext(insecureTestBaseContext, lclock, stopper) client.start(local, disconnected, rpcContext, stopper) if err := util.IsTrueWithin(func() bool { _, lerr := remote.GetInfo("local-key") _, rerr := local.GetInfo("remote-key") return lerr == nil && rerr == nil }, 500*time.Millisecond); err != nil { t.Errorf("gossip exchange failed or taking too long") } stopper.Stop() log.Info("done serving") if client != <-disconnected { t.Errorf("expected client disconnect after remote close") } }
// createTestNode creates an rpc server using the specified address, // gossip instance, KV database and a node using the specified slice // of engines. The server, clock and node are returned. If gossipBS is // not nil, the gossip bootstrap address is set to gossipBS. func createTestNode(addr net.Addr, engines []engine.Engine, gossipBS net.Addr, t *testing.T) ( *rpc.Server, *hlc.Clock, *Node, *stop.Stopper) { ctx := storage.StoreContext{} stopper := stop.NewStopper() ctx.Clock = hlc.NewClock(hlc.UnixNano) nodeRPCContext := rpc.NewContext(nodeTestBaseContext, ctx.Clock, stopper) ctx.ScanInterval = 10 * time.Hour rpcServer := rpc.NewServer(addr, nodeRPCContext) if err := rpcServer.Start(); err != nil { t.Fatal(err) } g := gossip.New(nodeRPCContext, testContext.GossipInterval, testContext.GossipBootstrapResolvers) if gossipBS != nil { // Handle possibility of a :0 port specification. if gossipBS == addr { gossipBS = rpcServer.Addr() } g.SetResolvers([]resolver.Resolver{resolver.NewResolverFromAddress(gossipBS)}) g.Start(rpcServer, stopper) } ctx.Gossip = g sender := (*callDistSender)(kv.NewDistSender(&kv.DistSenderContext{Clock: ctx.Clock}, g)) ctx.DB = client.NewDB(sender) // TODO(bdarnell): arrange to have the transport closed. // (or attach LocalRPCTransport.Close to the stopper) ctx.Transport = multiraft.NewLocalRPCTransport(stopper) ctx.EventFeed = util.NewFeed(stopper) node := NewNode(ctx) return rpcServer, ctx.Clock, node, stopper }
func (m *multiTestContext) Start(t *testing.T, numStores int) { if m.manualClock == nil { m.manualClock = hlc.NewManualClock(0) } if m.clock == nil { m.clock = hlc.NewClock(m.manualClock.UnixNano) } if m.gossip == nil { rpcContext := rpc.NewContext(m.clock, rpc.LoadInsecureTLSConfig()) m.gossip = gossip.New(rpcContext, gossip.TestInterval, "") } if m.transport == nil { m.transport = multiraft.NewLocalRPCTransport() } if m.sender == nil { m.sender = kv.NewLocalSender() } if m.db == nil { txnSender := kv.NewTxnCoordSender(m.sender, m.clock, false) m.db = client.NewKV(txnSender, nil) m.db.User = storage.UserRoot } for i := 0; i < numStores; i++ { m.addStore(t) } }
func TestSpanStatsGRPCResponse(t *testing.T) { defer leaktest.AfterTest(t)() ts := startServer(t) defer ts.Stopper().Stop() rpcStopper := stop.NewStopper() defer rpcStopper.Stop() rpcContext := rpc.NewContext(ts.RPCContext().Context, ts.Clock(), rpcStopper) request := serverpb.SpanStatsRequest{ NodeID: "1", StartKey: []byte(roachpb.RKeyMin), EndKey: []byte(roachpb.RKeyMax), } url := ts.ServingAddr() conn, err := rpcContext.GRPCDial(url) if err != nil { t.Fatal(err) } client := serverpb.NewStatusClient(conn) response, err := client.SpanStats(context.Background(), &request) if err != nil { t.Fatal(err) } if a, e := int(response.RangeCount), ExpectedInitialRangeCount(); a != e { t.Errorf("expected %d ranges, found %d", e, a) } }
func gossipForTest(t *testing.T) (*gossip.Gossip, *stop.Stopper) { stopper := stop.NewStopper() // Setup fake zone config handler. config.TestingSetupZoneConfigHook(stopper) rpcContext := rpc.NewContext(&base.Context{}, hlc.NewClock(hlc.UnixNano), stopper) g := gossip.New(rpcContext, gossip.TestBootstrap) // Have to call g.SetNodeID before call g.AddInfo g.SetNodeID(roachpb.NodeID(1)) // Put an empty system config into gossip. if err := g.AddInfoProto(gossip.KeySystemConfig, &config.SystemConfig{}, 0); err != nil { t.Fatal(err) } // Wait for SystemConfig. if err := util.IsTrueWithin(func() bool { return g.GetSystemConfig() != nil }, 100*time.Millisecond); err != nil { t.Fatal(err) } return g, stopper }
// startGossip creates and starts a gossip instance. func startGossip(nodeID roachpb.NodeID, stopper *stop.Stopper, t *testing.T) *Gossip { clock := hlc.NewClock(hlc.UnixNano) rpcContext := rpc.NewContext(&base.Context{Insecure: true}, clock, stopper) addr := util.CreateTestAddr("tcp") server := rpc.NewServer(rpcContext) tlsConfig, err := rpcContext.GetServerTLSConfig() if err != nil { t.Fatal(err) } ln, err := util.ListenAndServe(stopper, server, addr, tlsConfig) if err != nil { t.Fatal(err) } g := New(rpcContext, TestBootstrap, stopper) g.SetNodeID(nodeID) if err := g.SetNodeDescriptor(&roachpb.NodeDescriptor{ NodeID: nodeID, Address: util.MakeUnresolvedAddr(addr.Network(), addr.String()), }); err != nil { t.Fatal(err) } g.start(server, ln.Addr()) time.Sleep(time.Millisecond) return g }
func gossipForTest(t *testing.T) (*gossip.Gossip, *stop.Stopper) { stopper := stop.NewStopper() // Setup fake zone config handler. config.TestingSetupZoneConfigHook(stopper) rpcContext := rpc.NewContext(nil, nil, stopper) g := gossip.New(rpcContext, nil, stopper) // Have to call g.SetNodeID before call g.AddInfo g.SetNodeID(roachpb.NodeID(1)) // Put an empty system config into gossip. if err := g.AddInfoProto(gossip.KeySystemConfig, &config.SystemConfig{}, 0); err != nil { t.Fatal(err) } // Wait for SystemConfig. util.SucceedsSoon(t, func() error { if _, ok := g.GetSystemConfig(); !ok { return util.Errorf("expected system config to be set") } return nil }) return g, stopper }
func newKVNative(b *testing.B) kvInterface { enableTracing := tracing.Disable() s, _, _ := serverutils.StartServer(b, base.TestServerArgs{}) // TestServer.DB() returns the TxnCoordSender wrapped client. But that isn't // a fair comparison with SQL as we want these client requests to be sent // over the network. sender, err := client.NewSender( rpc.NewContext(&base.Context{ User: security.NodeUser, SSLCA: filepath.Join(security.EmbeddedCertsDir, security.EmbeddedCACert), SSLCert: filepath.Join(security.EmbeddedCertsDir, "node.crt"), SSLCertKey: filepath.Join(security.EmbeddedCertsDir, "node.key"), }, nil, s.Stopper()), s.ServingAddr()) if err != nil { b.Fatal(err) } return &kvNative{ db: client.NewDB(sender), doneFn: func() { s.Stopper().Stop() enableTracing() }, } }
// createCluster generates a new cluster using the provided stopper and the // number of nodes supplied. Each node will have one store to start. func createCluster(stopper *stop.Stopper, nodeCount int) *Cluster { rand, seed := randutil.NewPseudoRand() clock := hlc.NewClock(hlc.UnixNano) rpcContext := rpc.NewContext(&base.Context{}, clock, stopper) g := gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap) storePool := storage.NewStorePool(g, storage.TestTimeUntilStoreDeadOff, stopper) c := &Cluster{ stopper: stopper, clock: clock, rpc: rpcContext, gossip: g, storePool: storePool, allocator: storage.MakeAllocator(storePool, storage.RebalancingOptions{}), storeGossiper: gossiputil.NewStoreGossiper(g), nodes: make(map[proto.NodeID]*Node), stores: make(map[proto.StoreID]*Store), ranges: make(map[proto.RangeID]*Range), rand: rand, seed: seed, } // Add the nodes. for i := 0; i < nodeCount; i++ { c.addNewNodeWithStore() } // Add a single range and add to this first node's first store. firstRange := c.addRange() firstRange.attachRangeToStore(c.stores[proto.StoreID(0)]) return c }
// createTestStoreWithoutStart creates a test store using an in-memory // engine without starting the store. It returns the store, the store // clock's manual unix nanos time and a stopper. The caller is // responsible for stopping the stopper upon completion. func createTestStoreWithoutStart(t *testing.T) (*Store, *hlc.ManualClock, *stop.Stopper) { stopper := stop.NewStopper() rpcContext := rpc.NewContext(rootTestBaseContext, hlc.NewClock(hlc.UnixNano), stopper) ctx := TestStoreContext ctx.Gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap) manual := hlc.NewManualClock(0) ctx.Clock = hlc.NewClock(manual.UnixNano) eng := engine.NewInMem(proto.Attributes{}, 10<<20) ctx.Transport = multiraft.NewLocalRPCTransport() stopper.AddCloser(ctx.Transport) sender := &testSender{} var err error if ctx.DB, err = client.Open("//root@", client.SenderOpt(sender)); err != nil { t.Fatal(err) } store := NewStore(ctx, eng, &proto.NodeDescriptor{NodeID: 1}) sender.store = store if err := store.Bootstrap(proto.StoreIdent{NodeID: 1, StoreID: 1}, stopper); err != nil { t.Fatal(err) } if err := store.BootstrapRange(); err != nil { t.Fatal(err) } return store, manual, stopper }
// Start starts the test cluster by bootstrapping an in-memory store // (defaults to maximum of 50M). The server is started, launching the // node RPC server and all HTTP endpoints. Use the value of // TestServer.Addr after Start() for client connections. Use Stop() // to shutdown the server after the test completes. func (ltc *LocalTestCluster) Start(t util.Tester) { ltc.Manual = hlc.NewManualClock(0) ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano) ltc.Stopper = stop.NewStopper() rpcContext := rpc.NewContext(testutils.NewRootTestBaseContext(), ltc.Clock, ltc.Stopper) ltc.Gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap) ltc.Eng = engine.NewInMem(proto.Attributes{}, 50<<20) ltc.lSender = newRetryableLocalSender(NewLocalSender()) ltc.Sender = NewTxnCoordSender(ltc.lSender, ltc.Clock, false, nil, ltc.Stopper) var err error if ltc.DB, err = client.Open("//root@", client.SenderOpt(ltc.Sender)); err != nil { t.Fatal(err) } transport := multiraft.NewLocalRPCTransport(ltc.Stopper) ltc.Stopper.AddCloser(transport) ctx := storage.TestStoreContext ctx.Clock = ltc.Clock ctx.DB = ltc.DB ctx.Gossip = ltc.Gossip ctx.Transport = transport ltc.Store = storage.NewStore(ctx, ltc.Eng, &proto.NodeDescriptor{NodeID: 1}) if err := ltc.Store.Bootstrap(proto.StoreIdent{NodeID: 1, StoreID: 1}, ltc.Stopper); err != nil { t.Fatalf("unable to start local test cluster: %s", err) } ltc.lSender.AddStore(ltc.Store) if err := ltc.Store.BootstrapRange(nil); err != nil { t.Fatalf("unable to start local test cluster: %s", err) } if err := ltc.Store.Start(ltc.Stopper); err != nil { t.Fatalf("unable to start local test cluster: %s", err) } }
func gossipForTest(t *testing.T) (*gossip.Gossip, *stop.Stopper) { stopper := stop.NewStopper() // Setup fake zone config handler. config.TestingSetupZoneConfigHook(stopper) rpcContext := rpc.NewContext(&base.Context{}, hlc.NewClock(hlc.UnixNano), stopper) g := gossip.New(rpcContext, gossip.TestBootstrap, stopper) // Have to call g.SetNodeID before call g.AddInfo g.SetNodeID(roachpb.NodeID(1)) // Put an empty system config into gossip. if err := g.AddInfoProto(gossip.KeySystemConfig, &config.SystemConfig{}, 0); err != nil { t.Fatal(err) } // Wait for SystemConfig. util.SucceedsSoon(t, func() error { if g.GetSystemConfig() == nil { return util.Errorf("expected non-nil system config") } return nil }) return g, stopper }
// TestClientGossip verifies a client can gossip a delta to the server. func TestClientGossip(t *testing.T) { defer leaktest.AfterTest(t) local, remote, stopper := startGossip(t) disconnected := make(chan *client, 1) client := newClient(remote.is.NodeAddr) defer func() { stopper.Stop() if client != <-disconnected { t.Errorf("expected client disconnect after remote close") } }() if err := local.AddInfo("local-key", nil, time.Second); err != nil { t.Fatal(err) } if err := remote.AddInfo("remote-key", nil, time.Second); err != nil { t.Fatal(err) } // Use an insecure context. We're talking to tcp socket which are not in the certs. lclock := hlc.NewClock(hlc.UnixNano) rpcContext := rpc.NewContext(&base.Context{Insecure: true}, lclock, stopper) client.start(local, disconnected, rpcContext, stopper) util.SucceedsWithin(t, 500*time.Millisecond, func() error { if _, err := remote.GetInfo("local-key"); err != nil { return err } if _, err := local.GetInfo("remote-key"); err != nil { return err } return nil }) }
// Start starts the test cluster by bootstrapping an in-memory store // (defaults to maximum of 50M). The server is started, launching the // node RPC server and all HTTP endpoints. Use the value of // TestServer.Addr after Start() for client connections. Use Stop() // to shutdown the server after the test completes. func (ltc *LocalTestCluster) Start(t util.Tester) { nodeDesc := &proto.NodeDescriptor{NodeID: 1} ltc.tester = t ltc.Manual = hlc.NewManualClock(0) ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano) ltc.Stopper = stop.NewStopper() rpcContext := rpc.NewContext(testutils.NewNodeTestBaseContext(), ltc.Clock, ltc.Stopper) ltc.Gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap) ltc.Eng = engine.NewInMem(proto.Attributes{}, 50<<20, ltc.Stopper) ltc.localSender = NewLocalSender() var rpcSend rpcSendFn = func(_ rpc.Options, _ string, _ []net.Addr, getArgs func(addr net.Addr) gogoproto.Message, getReply func() gogoproto.Message, _ *rpc.Context) ([]gogoproto.Message, error) { // TODO(tschottdorf): remove getReply(). br, pErr := ltc.localSender.Send(context.Background(), *getArgs(nil).(*proto.BatchRequest)) if br == nil { br = &proto.BatchResponse{} } if br.Error != nil { panic(proto.ErrorUnexpectedlySet(ltc.localSender, br)) } br.Error = pErr return []gogoproto.Message{br}, nil } ltc.distSender = NewDistSender(&DistSenderContext{ Clock: ltc.Clock, RangeDescriptorCacheSize: defaultRangeDescriptorCacheSize, RangeLookupMaxRanges: defaultRangeLookupMaxRanges, LeaderCacheSize: defaultLeaderCacheSize, RPCRetryOptions: &defaultRPCRetryOptions, nodeDescriptor: nodeDesc, RPCSend: rpcSend, // defined above RangeDescriptorDB: ltc.localSender, // for descriptor lookup }, ltc.Gossip) ltc.Sender = NewTxnCoordSender(ltc.distSender, ltc.Clock, false /* !linearizable */, nil /* tracer */, ltc.Stopper) ltc.DB = client.NewDB(ltc.Sender) transport := multiraft.NewLocalRPCTransport(ltc.Stopper) ltc.Stopper.AddCloser(transport) ctx := storage.TestStoreContext ctx.Clock = ltc.Clock ctx.DB = ltc.DB ctx.Gossip = ltc.Gossip ctx.Transport = transport ltc.Store = storage.NewStore(ctx, ltc.Eng, nodeDesc) if err := ltc.Store.Bootstrap(proto.StoreIdent{NodeID: 1, StoreID: 1}, ltc.Stopper); err != nil { t.Fatalf("unable to start local test cluster: %s", err) } ltc.localSender.AddStore(ltc.Store) if err := ltc.Store.BootstrapRange(nil); err != nil { t.Fatalf("unable to start local test cluster: %s", err) } if err := ltc.Store.Start(ltc.Stopper); err != nil { t.Fatalf("unable to start local test cluster: %s", err) } }
// createTestAllocator creates a stopper, gossip, store pool and allocator for // use in tests. Stopper must be stopped by the caller. func createTestAllocator() (*stop.Stopper, *gossip.Gossip, *StorePool, Allocator) { stopper := stop.NewStopper() rpcContext := rpc.NewContext(&base.Context{}, hlc.NewClock(hlc.UnixNano), stopper) g := gossip.New(rpcContext, gossip.TestBootstrap) storePool := NewStorePool(g, TestTimeUntilStoreDeadOff, stopper) a := MakeAllocator(storePool, RebalancingOptions{AllowRebalance: true}) return stopper, g, storePool, a }
// createTestAllocator creates a stopper, gossip, store pool and allocator for // use in tests. Stopper must be stopped by the caller. func createTestAllocator() (*stop.Stopper, *gossip.Gossip, *StorePool, allocator) { stopper := stop.NewStopper() rpcContext := rpc.NewContext(&base.Context{}, hlc.NewClock(hlc.UnixNano), stopper) g := gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap) storePool := NewStorePool(g, TestTimeUntilStoreDeadOff, stopper) a := makeAllocator(storePool) return stopper, g, storePool, a }
// newNodeTestContext returns a rpc.Context for testing. // It is meant to be used by nodes. func newNodeTestContext(clock *hlc.Clock, stopper *stop.Stopper) *rpc.Context { if clock == nil { clock = hlc.NewClock(hlc.UnixNano) } ctx := rpc.NewContext(testutils.NewNodeTestBaseContext(), clock, stopper) ctx.HeartbeatInterval = 10 * time.Millisecond ctx.HeartbeatTimeout = 5 * time.Second return ctx }