// createTestNode creates an rpc server using the specified address, // gossip instance, KV database and a node using the specified slice // of engines. The server, clock and node are returned. If gossipBS is // not nil, the gossip bootstrap address is set to gossipBS. func createTestNode( addr net.Addr, engines []engine.Engine, gossipBS net.Addr, t *testing.T, ) (*grpc.Server, net.Addr, *hlc.Clock, *Node, *stop.Stopper) { cfg := storage.StoreConfig{} stopper := stop.NewStopper() cfg.Clock = hlc.NewClock(hlc.UnixNano) nodeRPCContext := rpc.NewContext(log.AmbientContext{}, nodeTestBaseContext, cfg.Clock, stopper) cfg.ScanInterval = 10 * time.Hour cfg.ConsistencyCheckInterval = 10 * time.Hour grpcServer := rpc.NewServer(nodeRPCContext) serverCfg := makeTestConfig() cfg.Gossip = gossip.NewTest( 0, nodeRPCContext, grpcServer, serverCfg.GossipBootstrapResolvers, stopper, metric.NewRegistry(), ) ln, err := netutil.ListenAndServeGRPC(stopper, grpcServer, addr) if err != nil { t.Fatal(err) } if gossipBS != nil { // Handle possibility of a :0 port specification. if gossipBS.Network() == addr.Network() && gossipBS.String() == addr.String() { gossipBS = ln.Addr() } r, err := resolver.NewResolverFromAddress(gossipBS) if err != nil { t.Fatalf("bad gossip address %s: %s", gossipBS, err) } cfg.Gossip.SetResolvers([]resolver.Resolver{r}) cfg.Gossip.Start(ln.Addr()) } retryOpts := base.DefaultRetryOptions() retryOpts.Closer = stopper.ShouldQuiesce() distSender := kv.NewDistSender(kv.DistSenderConfig{ Clock: cfg.Clock, RPCContext: nodeRPCContext, RPCRetryOptions: &retryOpts, }, cfg.Gossip) cfg.AmbientCtx.Tracer = tracing.NewTracer() sender := kv.NewTxnCoordSender( cfg.AmbientCtx, distSender, cfg.Clock, false, stopper, kv.MakeTxnMetrics(metric.TestSampleInterval), ) cfg.DB = client.NewDB(sender) cfg.Transport = storage.NewDummyRaftTransport() cfg.MetricsSampleInterval = metric.TestSampleInterval node := NewNode(cfg, status.NewMetricsRecorder(cfg.Clock), metric.NewRegistry(), stopper, kv.MakeTxnMetrics(metric.TestSampleInterval), sql.MakeEventLogger(nil)) roachpb.RegisterInternalServer(grpcServer, node) return grpcServer, ln.Addr(), cfg.Clock, node, stopper }
// TestCorruptedClusterID verifies that a node fails to start when a // store's cluster ID is empty. func TestCorruptedClusterID(t *testing.T) { defer leaktest.AfterTest(t)() e := engine.NewInMem(roachpb.Attributes{}, 1<<20) defer e.Close() if _, err := bootstrapCluster( storage.StoreConfig{}, []engine.Engine{e}, kv.MakeTxnMetrics(metric.TestSampleInterval), ); err != nil { t.Fatal(err) } // Set the cluster ID to the empty UUID. sIdent := roachpb.StoreIdent{ ClusterID: uuid.UUID{}, NodeID: 1, StoreID: 1, } if err := engine.MVCCPutProto(context.Background(), e, nil, keys.StoreIdentKey(), hlc.ZeroTimestamp, nil, &sIdent); err != nil { t.Fatal(err) } engines := []engine.Engine{e} _, serverAddr, _, node, stopper := createTestNode(util.TestAddr, engines, nil, t) stopper.Stop() if err := node.start(context.Background(), serverAddr, engines, roachpb.Attributes{}, roachpb.Locality{}); !testutils.IsError(err, "unidentified store") { t.Errorf("unexpected error %v", err) } }
// TestBootstrapNewStore starts a cluster with two unbootstrapped // stores and verifies both stores are added and started. func TestBootstrapNewStore(t *testing.T) { defer leaktest.AfterTest(t)() e := engine.NewInMem(roachpb.Attributes{}, 1<<20) if _, err := bootstrapCluster( storage.StoreConfig{}, []engine.Engine{e}, kv.MakeTxnMetrics(metric.TestSampleInterval), ); err != nil { t.Fatal(err) } // Start a new node with two new stores which will require bootstrapping. engines := Engines([]engine.Engine{ e, engine.NewInMem(roachpb.Attributes{}, 1<<20), engine.NewInMem(roachpb.Attributes{}, 1<<20), }) defer engines.Close() _, _, node, stopper := createAndStartTestNode( util.TestAddr, engines, util.TestAddr, roachpb.Locality{}, t, ) defer stopper.Stop() // Non-initialized stores (in this case the new in-memory-based // store) will be bootstrapped by the node upon start. This happens // in a goroutine, so we'll have to wait a bit until we can find the // new node. util.SucceedsSoon(t, func() error { if n := node.stores.GetStoreCount(); n != 3 { return errors.Errorf("expected 3 stores but got %d", n) } return nil }) // Check whether all stores are started properly. if err := node.stores.VisitStores(func(s *storage.Store) error { if !s.IsStarted() { return errors.Errorf("fail to start store: %s", s) } return nil }); err != nil { t.Error(err) } }
// TestBootstrapCluster verifies the results of bootstrapping a // cluster. Uses an in memory engine. func TestBootstrapCluster(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() defer stopper.Stop() e := engine.NewInMem(roachpb.Attributes{}, 1<<20) stopper.AddCloser(e) if _, err := bootstrapCluster( storage.StoreConfig{}, []engine.Engine{e}, kv.MakeTxnMetrics(metric.TestSampleInterval), ); err != nil { t.Fatal(err) } // Scan the complete contents of the local database directly from the engine. rows, _, _, err := engine.MVCCScan(context.Background(), e, keys.LocalMax, roachpb.KeyMax, math.MaxInt64, hlc.MaxTimestamp, true, nil) if err != nil { t.Fatal(err) } var foundKeys keySlice for _, kv := range rows { foundKeys = append(foundKeys, kv.Key) } var expectedKeys = keySlice{ testutils.MakeKey(roachpb.Key("\x02"), roachpb.KeyMax), testutils.MakeKey(roachpb.Key("\x03"), roachpb.KeyMax), roachpb.Key("\x04node-idgen"), roachpb.Key("\x04store-idgen"), } // Add the initial keys for sql. for _, kv := range GetBootstrapSchema().GetInitialValues() { expectedKeys = append(expectedKeys, kv.Key) } // Resort the list. The sql values are not sorted. sort.Sort(expectedKeys) if !reflect.DeepEqual(foundKeys, expectedKeys) { t.Errorf("expected keys mismatch:\n%s\n -- vs. -- \n\n%s", formatKeys(foundKeys), formatKeys(expectedKeys)) } // TODO(spencer): check values. }
// TestMultiRangeScanWithMaxResults tests that commands which access multiple // ranges with MaxResults parameter are carried out properly. func TestMultiRangeScanWithMaxResults(t *testing.T) { defer leaktest.AfterTest(t)() testCases := []struct { splitKeys []roachpb.Key keys []roachpb.Key }{ {[]roachpb.Key{roachpb.Key("m")}, []roachpb.Key{roachpb.Key("a"), roachpb.Key("z")}}, {[]roachpb.Key{roachpb.Key("h"), roachpb.Key("q")}, []roachpb.Key{roachpb.Key("b"), roachpb.Key("f"), roachpb.Key("k"), roachpb.Key("r"), roachpb.Key("w"), roachpb.Key("y")}}, } for i, tc := range testCases { s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() ts := s.(*TestServer) retryOpts := base.DefaultRetryOptions() retryOpts.Closer = ts.stopper.ShouldQuiesce() ds := kv.NewDistSender(kv.DistSenderConfig{ Clock: s.Clock(), RPCContext: s.RPCContext(), RPCRetryOptions: &retryOpts, }, ts.Gossip()) ambient := log.AmbientContext{Tracer: tracing.NewTracer()} tds := kv.NewTxnCoordSender( ambient, ds, ts.Clock(), ts.Cfg.Linearizable, ts.stopper, kv.MakeTxnMetrics(metric.TestSampleInterval), ) for _, sk := range tc.splitKeys { if err := ts.node.storeCfg.DB.AdminSplit(context.TODO(), sk); err != nil { t.Fatal(err) } } for _, k := range tc.keys { put := roachpb.NewPut(k, roachpb.MakeValueFromBytes(k)) if _, err := client.SendWrapped(context.Background(), tds, put); err != nil { t.Fatal(err) } } // Try every possible ScanRequest startKey. for start := 0; start < len(tc.keys); start++ { // Try every possible maxResults, from 1 to beyond the size of key array. for maxResults := 1; maxResults <= len(tc.keys)-start+1; maxResults++ { scan := roachpb.NewScan(tc.keys[start], tc.keys[len(tc.keys)-1].Next()) reply, err := client.SendWrappedWith( context.Background(), tds, roachpb.Header{MaxSpanRequestKeys: int64(maxResults)}, scan, ) if err != nil { t.Fatal(err) } rows := reply.(*roachpb.ScanResponse).Rows if start+maxResults <= len(tc.keys) && len(rows) != maxResults { t.Errorf("%d: start=%s: expected %d rows, but got %d", i, tc.keys[start], maxResults, len(rows)) } else if start+maxResults == len(tc.keys)+1 && len(rows) != maxResults-1 { t.Errorf("%d: expected %d rows, but got %d", i, maxResults-1, len(rows)) } } } } }
// TestMultiRangeScanDeleteRange tests that commands which access multiple // ranges are carried out properly. func TestMultiRangeScanDeleteRange(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() ts := s.(*TestServer) retryOpts := base.DefaultRetryOptions() retryOpts.Closer = ts.stopper.ShouldQuiesce() ds := kv.NewDistSender(kv.DistSenderConfig{ Clock: s.Clock(), RPCContext: s.RPCContext(), RPCRetryOptions: &retryOpts, }, ts.Gossip()) ambient := log.AmbientContext{Tracer: tracing.NewTracer()} tds := kv.NewTxnCoordSender( ambient, ds, s.Clock(), ts.Cfg.Linearizable, ts.stopper, kv.MakeTxnMetrics(metric.TestSampleInterval), ) if err := ts.node.storeCfg.DB.AdminSplit(context.TODO(), "m"); err != nil { t.Fatal(err) } writes := []roachpb.Key{roachpb.Key("a"), roachpb.Key("z")} get := &roachpb.GetRequest{ Span: roachpb.Span{Key: writes[0]}, } get.EndKey = writes[len(writes)-1] if _, err := client.SendWrapped(context.Background(), tds, get); err == nil { t.Errorf("able to call Get with a key range: %v", get) } var delTS hlc.Timestamp for i, k := range writes { put := roachpb.NewPut(k, roachpb.MakeValueFromBytes(k)) if _, err := client.SendWrapped(context.Background(), tds, put); err != nil { t.Fatal(err) } scan := roachpb.NewScan(writes[0], writes[len(writes)-1].Next()) reply, err := client.SendWrapped(context.Background(), tds, scan) if err != nil { t.Fatal(err) } sr := reply.(*roachpb.ScanResponse) if sr.Txn != nil { // This was the other way around at some point in the past. // Same below for Delete, etc. t.Errorf("expected no transaction in response header") } if rows := sr.Rows; len(rows) != i+1 { t.Fatalf("expected %d rows, but got %d", i+1, len(rows)) } } del := &roachpb.DeleteRangeRequest{ Span: roachpb.Span{ Key: writes[0], EndKey: roachpb.Key(writes[len(writes)-1]).Next(), }, ReturnKeys: true, } reply, err := client.SendWrappedWith(context.Background(), tds, roachpb.Header{Timestamp: delTS}, del) if err != nil { t.Fatal(err) } dr := reply.(*roachpb.DeleteRangeResponse) if dr.Txn != nil { t.Errorf("expected no transaction in response header") } if !reflect.DeepEqual(dr.Keys, writes) { t.Errorf("expected %d keys to be deleted, but got %d instead", writes, dr.Keys) } scan := roachpb.NewScan(writes[0], writes[len(writes)-1].Next()) txn := &roachpb.Transaction{Name: "MyTxn"} reply, err = client.SendWrappedWith(context.Background(), tds, roachpb.Header{Txn: txn}, scan) if err != nil { t.Fatal(err) } sr := reply.(*roachpb.ScanResponse) if txn := sr.Txn; txn == nil || txn.Name != "MyTxn" { t.Errorf("wanted Txn to persist, but it changed to %v", txn) } if rows := sr.Rows; len(rows) > 0 { t.Fatalf("scan after delete returned rows: %v", rows) } }
// NewServer creates a Server from a server.Context. func NewServer(cfg Config, stopper *stop.Stopper) (*Server, error) { if _, err := net.ResolveTCPAddr("tcp", cfg.AdvertiseAddr); err != nil { return nil, errors.Errorf("unable to resolve RPC address %q: %v", cfg.AdvertiseAddr, err) } if cfg.AmbientCtx.Tracer == nil { cfg.AmbientCtx.Tracer = tracing.NewTracer() } // Try loading the TLS configs before anything else. if _, err := cfg.GetServerTLSConfig(); err != nil { return nil, err } if _, err := cfg.GetClientTLSConfig(); err != nil { return nil, err } s := &Server{ mux: http.NewServeMux(), clock: hlc.NewClock(hlc.UnixNano, cfg.MaxOffset), stopper: stopper, cfg: cfg, } // Add a dynamic log tag value for the node ID. // // We need to pass an ambient context to the various server components, but we // won't know the node ID until we Start(). At that point it's too late to // change the ambient contexts in the components (various background processes // will have already started using them). // // NodeIDContainer allows us to add the log tag to the context now and update // the value asynchronously. It's not significantly more expensive than a // regular tag since it's just doing an (atomic) load when a log/trace message // is constructed. The node ID is set by the Store if this host was // bootstrapped; otherwise a new one is allocated in Node. s.cfg.AmbientCtx.AddLogTag("n", &s.nodeIDContainer) ctx := s.AnnotateCtx(context.Background()) if s.cfg.Insecure { log.Warning(ctx, "running in insecure mode, this is strongly discouraged. See --insecure.") } s.rpcContext = rpc.NewContext(s.cfg.AmbientCtx, s.cfg.Config, s.clock, s.stopper) s.rpcContext.HeartbeatCB = func() { if err := s.rpcContext.RemoteClocks.VerifyClockOffset(); err != nil { log.Fatal(ctx, err) } } s.grpc = rpc.NewServer(s.rpcContext) s.registry = metric.NewRegistry() s.gossip = gossip.New( s.cfg.AmbientCtx, &s.nodeIDContainer, s.rpcContext, s.grpc, s.cfg.GossipBootstrapResolvers, s.stopper, s.registry, ) s.storePool = storage.NewStorePool( s.cfg.AmbientCtx, s.gossip, s.clock, s.rpcContext, s.cfg.TimeUntilStoreDead, s.stopper, /* deterministic */ false, ) // A custom RetryOptions is created which uses stopper.ShouldQuiesce() as // the Closer. This prevents infinite retry loops from occurring during // graceful server shutdown // // Such a loop loop occurs with the DistSender attempts a connection to the // local server during shutdown, and receives an internal server error (HTTP // Code 5xx). This is the correct error for a server to return when it is // shutting down, and is normally retryable in a cluster environment. // However, on a single-node setup (such as a test), retries will never // succeed because the only server has been shut down; thus, thus the // DistSender needs to know that it should not retry in this situation. retryOpts := base.DefaultRetryOptions() retryOpts.Closer = s.stopper.ShouldQuiesce() distSenderCfg := kv.DistSenderConfig{ AmbientCtx: s.cfg.AmbientCtx, Clock: s.clock, RPCContext: s.rpcContext, RPCRetryOptions: &retryOpts, } s.distSender = kv.NewDistSender(distSenderCfg, s.gossip) txnMetrics := kv.MakeTxnMetrics(s.cfg.MetricsSampleInterval) s.registry.AddMetricStruct(txnMetrics) s.txnCoordSender = kv.NewTxnCoordSender( s.cfg.AmbientCtx, s.distSender, s.clock, s.cfg.Linearizable, s.stopper, txnMetrics, ) s.db = client.NewDB(s.txnCoordSender) // Use the range lease expiration and renewal durations as the node // liveness expiration and heartbeat interval. active, renewal := storage.RangeLeaseDurations( storage.RaftElectionTimeout(s.cfg.RaftTickInterval, s.cfg.RaftElectionTimeoutTicks)) s.nodeLiveness = storage.NewNodeLiveness( s.cfg.AmbientCtx, s.clock, s.db, s.gossip, active, renewal, ) s.registry.AddMetricStruct(s.nodeLiveness.Metrics()) s.raftTransport = storage.NewRaftTransport( s.cfg.AmbientCtx, storage.GossipAddressResolver(s.gossip), s.grpc, s.rpcContext, ) s.kvDB = kv.NewDBServer(s.cfg.Config, s.txnCoordSender, s.stopper) roachpb.RegisterExternalServer(s.grpc, s.kvDB) // Set up internal memory metrics for use by internal SQL executors. s.internalMemMetrics = sql.MakeMemMetrics("internal") s.registry.AddMetricStruct(s.internalMemMetrics) // Set up Lease Manager var lmKnobs sql.LeaseManagerTestingKnobs if cfg.TestingKnobs.SQLLeaseManager != nil { lmKnobs = *s.cfg.TestingKnobs.SQLLeaseManager.(*sql.LeaseManagerTestingKnobs) } s.leaseMgr = sql.NewLeaseManager(&s.nodeIDContainer, *s.db, s.clock, lmKnobs, s.stopper, &s.internalMemMetrics) s.leaseMgr.RefreshLeases(s.stopper, s.db, s.gossip) // Set up the DistSQL server distSQLCfg := distsql.ServerConfig{ AmbientContext: s.cfg.AmbientCtx, DB: s.db, RPCContext: s.rpcContext, Stopper: s.stopper, } s.distSQLServer = distsql.NewServer(distSQLCfg) distsql.RegisterDistSQLServer(s.grpc, s.distSQLServer) // Set up admin memory metrics for use by admin SQL executors. s.adminMemMetrics = sql.MakeMemMetrics("admin") s.registry.AddMetricStruct(s.adminMemMetrics) // Set up Executor execCfg := sql.ExecutorConfig{ AmbientCtx: s.cfg.AmbientCtx, NodeID: &s.nodeIDContainer, DB: s.db, Gossip: s.gossip, LeaseManager: s.leaseMgr, Clock: s.clock, DistSQLSrv: s.distSQLServer, MetricsSampleInterval: s.cfg.MetricsSampleInterval, } if s.cfg.TestingKnobs.SQLExecutor != nil { execCfg.TestingKnobs = s.cfg.TestingKnobs.SQLExecutor.(*sql.ExecutorTestingKnobs) } else { execCfg.TestingKnobs = &sql.ExecutorTestingKnobs{} } if s.cfg.TestingKnobs.SQLSchemaChanger != nil { execCfg.SchemaChangerTestingKnobs = s.cfg.TestingKnobs.SQLSchemaChanger.(*sql.SchemaChangerTestingKnobs) } else { execCfg.SchemaChangerTestingKnobs = &sql.SchemaChangerTestingKnobs{} } s.sqlExecutor = sql.NewExecutor(execCfg, s.stopper, &s.adminMemMetrics) s.registry.AddMetricStruct(s.sqlExecutor) s.pgServer = pgwire.MakeServer( s.cfg.AmbientCtx, s.cfg.Config, s.sqlExecutor, &s.internalMemMetrics, s.cfg.SQLMemoryPoolSize, ) s.registry.AddMetricStruct(s.pgServer.Metrics()) s.tsDB = ts.NewDB(s.db) s.tsServer = ts.MakeServer(s.cfg.AmbientCtx, s.tsDB, s.cfg.TimeSeriesServerConfig, s.stopper) // TODO(bdarnell): make StoreConfig configurable. storeCfg := storage.StoreConfig{ AmbientCtx: s.cfg.AmbientCtx, Clock: s.clock, DB: s.db, Gossip: s.gossip, NodeLiveness: s.nodeLiveness, Transport: s.raftTransport, RaftTickInterval: s.cfg.RaftTickInterval, ScanInterval: s.cfg.ScanInterval, ScanMaxIdleTime: s.cfg.ScanMaxIdleTime, ConsistencyCheckInterval: s.cfg.ConsistencyCheckInterval, ConsistencyCheckPanicOnFailure: s.cfg.ConsistencyCheckPanicOnFailure, MetricsSampleInterval: s.cfg.MetricsSampleInterval, StorePool: s.storePool, SQLExecutor: sql.InternalExecutor{ LeaseManager: s.leaseMgr, }, LogRangeEvents: s.cfg.EventLogEnabled, AllocatorOptions: storage.AllocatorOptions{ AllowRebalance: true, }, RangeLeaseActiveDuration: active, RangeLeaseRenewalDuration: renewal, TimeSeriesDataStore: s.tsDB, } if s.cfg.TestingKnobs.Store != nil { storeCfg.TestingKnobs = *s.cfg.TestingKnobs.Store.(*storage.StoreTestingKnobs) } s.recorder = status.NewMetricsRecorder(s.clock) s.registry.AddMetricStruct(s.rpcContext.RemoteClocks.Metrics()) s.runtime = status.MakeRuntimeStatSampler(s.clock) s.registry.AddMetricStruct(s.runtime) s.node = NewNode(storeCfg, s.recorder, s.registry, s.stopper, txnMetrics, sql.MakeEventLogger(s.leaseMgr)) roachpb.RegisterInternalServer(s.grpc, s.node) storage.RegisterConsistencyServer(s.grpc, s.node.storesServer) storage.RegisterFreezeServer(s.grpc, s.node.storesServer) s.admin = newAdminServer(s) s.status = newStatusServer( s.cfg.AmbientCtx, s.db, s.gossip, s.recorder, s.rpcContext, s.node.stores, ) for _, gw := range []grpcGatewayServer{s.admin, s.status, &s.tsServer} { gw.RegisterService(s.grpc) } return s, nil }
// TestStartNodeWithLocality creates a new node and store and starts them with a // collection of different localities. func TestStartNodeWithLocality(t *testing.T) { defer leaktest.AfterTest(t)() testLocalityWitNewNode := func(locality roachpb.Locality) { e := engine.NewInMem(roachpb.Attributes{}, 1<<20) defer e.Close() if _, err := bootstrapCluster( storage.StoreConfig{}, []engine.Engine{e}, kv.MakeTxnMetrics(metric.TestSampleInterval), ); err != nil { t.Fatal(err) } _, _, node, stopper := createAndStartTestNode( util.TestAddr, []engine.Engine{e}, util.TestAddr, locality, t, ) defer stopper.Stop() // Check the node to make sure the locality was propagated to its // nodeDescriptor. if !reflect.DeepEqual(node.Descriptor.Locality, locality) { t.Fatalf("expected node locality to be %s, but it was %s", locality, node.Descriptor.Locality) } // Check the store to make sure the locality was propagated to its // nodeDescriptor. if err := node.stores.VisitStores(func(store *storage.Store) error { desc, err := store.Descriptor() if err != nil { t.Fatal(err) } if !reflect.DeepEqual(desc.Node.Locality, locality) { t.Fatalf("expected store's node locality to be %s, but it was %s", locality, desc.Node.Locality) } return nil }); err != nil { t.Fatal(err) } } testCases := []roachpb.Locality{ {}, { Tiers: []roachpb.Tier{ {Key: "a", Value: "b"}, }, }, { Tiers: []roachpb.Tier{ {Key: "a", Value: "b"}, {Key: "c", Value: "d"}, {Key: "e", Value: "f"}, }, }, } for _, testCase := range testCases { testLocalityWitNewNode(testCase) } }
// TestNodeJoin verifies a new node is able to join a bootstrapped // cluster consisting of one node. func TestNodeJoin(t *testing.T) { defer leaktest.AfterTest(t)() engineStopper := stop.NewStopper() defer engineStopper.Stop() e := engine.NewInMem(roachpb.Attributes{}, 1<<20) engineStopper.AddCloser(e) if _, err := bootstrapCluster( storage.StoreConfig{}, []engine.Engine{e}, kv.MakeTxnMetrics(metric.TestSampleInterval), ); err != nil { t.Fatal(err) } // Start the bootstrap node. engines1 := []engine.Engine{e} _, server1Addr, node1, stopper1 := createAndStartTestNode( util.TestAddr, engines1, util.TestAddr, roachpb.Locality{}, t, ) defer stopper1.Stop() // Create a new node. e2 := engine.NewInMem(roachpb.Attributes{}, 1<<20) engineStopper.AddCloser(e2) engines2 := []engine.Engine{e2} _, server2Addr, node2, stopper2 := createAndStartTestNode( util.TestAddr, engines2, server1Addr, roachpb.Locality{}, t, ) defer stopper2.Stop() // Verify new node is able to bootstrap its store. util.SucceedsSoon(t, func() error { if sc := node2.stores.GetStoreCount(); sc != 1 { return errors.Errorf("GetStoreCount() expected 1; got %d", sc) } return nil }) // Verify node1 sees node2 via gossip and vice versa. node1Key := gossip.MakeNodeIDKey(node1.Descriptor.NodeID) node2Key := gossip.MakeNodeIDKey(node2.Descriptor.NodeID) util.SucceedsSoon(t, func() error { var nodeDesc1 roachpb.NodeDescriptor if err := node1.storeCfg.Gossip.GetInfoProto(node2Key, &nodeDesc1); err != nil { return err } if addr2Str, server2AddrStr := nodeDesc1.Address.String(), server2Addr.String(); addr2Str != server2AddrStr { return errors.Errorf("addr2 gossip %s doesn't match addr2 address %s", addr2Str, server2AddrStr) } var nodeDesc2 roachpb.NodeDescriptor if err := node2.storeCfg.Gossip.GetInfoProto(node1Key, &nodeDesc2); err != nil { return err } if addr1Str, server1AddrStr := nodeDesc2.Address.String(), server1Addr.String(); addr1Str != server1AddrStr { return errors.Errorf("addr1 gossip %s doesn't match addr1 address %s", addr1Str, server1AddrStr) } return nil }) }