// createCluster generates a new cluster using the provided stopper and the // number of nodes supplied. Each node will have one store to start. func createCluster(stopper *stop.Stopper, nodeCount int) *Cluster { rand, seed := randutil.NewPseudoRand() clock := hlc.NewClock(hlc.UnixNano) rpcContext := rpc.NewContext(&base.Context{}, clock, stopper) g := gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap) storePool := storage.NewStorePool(g, storage.TestTimeUntilStoreDeadOff, stopper) c := &Cluster{ stopper: stopper, clock: clock, rpc: rpcContext, gossip: g, storePool: storePool, allocator: storage.MakeAllocator(storePool, storage.RebalancingOptions{}), storeGossiper: gossiputil.NewStoreGossiper(g), nodes: make(map[proto.NodeID]*Node), stores: make(map[proto.StoreID]*Store), ranges: make(map[proto.RangeID]*Range), rand: rand, seed: seed, } // Add the nodes. for i := 0; i < nodeCount; i++ { c.addNewNodeWithStore() } // Add a single range and add to this first node's first store. firstRange := c.addRange() firstRange.attachRangeToStore(c.stores[proto.StoreID(0)]) return c }
func (m *multiTestContext) Start(t *testing.T, numStores int) { m.t = t if m.manualClock == nil { m.manualClock = hlc.NewManualClock(0) } if m.clock == nil { m.clock = hlc.NewClock(m.manualClock.UnixNano) } if m.gossip == nil { rpcContext := rpc.NewContext(&base.Context{}, m.clock, nil) m.gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap) } if m.clientStopper == nil { m.clientStopper = stop.NewStopper() } if m.transport == nil { m.transport = multiraft.NewLocalRPCTransport(m.clientStopper) } if m.storePool == nil { if m.timeUntilStoreDead == 0 { m.timeUntilStoreDead = storage.TestTimeUntilStoreDeadOff } m.storePool = storage.NewStorePool(m.gossip, m.timeUntilStoreDead, m.clientStopper) } // Always create the first sender. m.senders = append(m.senders, kv.NewLocalSender()) rpcSend := func(_ rpc.Options, _ string, _ []net.Addr, getArgs func(addr net.Addr) gogoproto.Message, getReply func() gogoproto.Message, _ *rpc.Context) ([]gogoproto.Message, error) { call := proto.Call{ Args: getArgs(nil /* net.Addr */).(proto.Request), Reply: getReply().(proto.Response), } m.senders[0].Send(context.Background(), call) return []gogoproto.Message{call.Reply}, call.Reply.Header().GoError() } if m.db == nil { distSender := kv.NewDistSender(&kv.DistSenderContext{ Clock: m.clock, RangeDescriptorDB: m.senders[0], RPCSend: rpcSend, }, m.gossip) sender := kv.NewTxnCoordSender(distSender, m.clock, false, nil, m.clientStopper) m.db = client.NewDB(sender) } for i := 0; i < numStores; i++ { m.addStore() } if m.transportStopper == nil { m.transportStopper = stop.NewStopper() } m.transportStopper.AddCloser(m.transport) }
// createCluster generates a new cluster using the provided stopper and the // number of nodes supplied. Each node will have one store to start. func createCluster( stopper *stop.Stopper, nodeCount int, epochWriter, actionWriter io.Writer, script Script, rand *rand.Rand, ) *Cluster { clock := hlc.NewClock(hlc.UnixNano) rpcContext := rpc.NewContext(nil, clock, stopper) g := gossip.New(rpcContext, nil, stopper) // NodeID is required for Gossip, so set it to -1 for the cluster Gossip // instance to prevent conflicts with real NodeIDs. g.SetNodeID(-1) storePool := storage.NewStorePool(g, clock, storage.TestTimeUntilStoreDeadOff, stopper) c := &Cluster{ stopper: stopper, clock: clock, rpc: rpcContext, gossip: g, storePool: storePool, allocator: storage.MakeAllocator(storePool, storage.AllocatorOptions{ AllowRebalance: true, Deterministic: true, }), storeGossiper: gossiputil.NewStoreGossiper(g), nodes: make(map[roachpb.NodeID]*Node), stores: make(map[roachpb.StoreID]*Store), ranges: make(map[roachpb.RangeID]*Range), rangeIDsByStore: make(map[roachpb.StoreID]roachpb.RangeIDSlice), rand: rand, epochWriter: tabwriter.NewWriter(epochWriter, 8, 1, 2, ' ', 0), actionWriter: tabwriter.NewWriter(actionWriter, 8, 1, 2, ' ', 0), script: script, epoch: -1, } // Add the nodes. for i := 0; i < nodeCount; i++ { c.addNewNodeWithStore() } // Add a single range and add to this first node's first store. firstRange := c.addRange() firstRange.addReplica(c.stores[0]) c.calculateRangeIDsByStore() // Output the first epoch header. c.epoch = 0 c.OutputEpochHeader() c.OutputEpoch() c.flush() return c }
// createCluster generates a new cluster using the provided stopper and the // number of nodes supplied. Each node will have one store to start. func createCluster(stopper *stop.Stopper, nodeCount int, epochWriter, actionWriter io.Writer, script Script) *Cluster { rand, seed := randutil.NewPseudoRand() clock := hlc.NewClock(hlc.UnixNano) rpcContext := rpc.NewContext(&base.Context{}, clock, stopper) g := gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap) storePool := storage.NewStorePool(g, storage.TestTimeUntilStoreDeadOff, stopper) c := &Cluster{ stopper: stopper, clock: clock, rpc: rpcContext, gossip: g, storePool: storePool, allocator: storage.MakeAllocator(storePool, storage.RebalancingOptions{ AllowRebalance: true, Deterministic: true, }), storeGossiper: gossiputil.NewStoreGossiper(g), nodes: make(map[roachpb.NodeID]*Node), stores: make(map[roachpb.StoreID]*Store), ranges: make(map[roachpb.RangeID]*Range), rangeIDsByStore: make(map[roachpb.StoreID]roachpb.RangeIDSlice), rand: rand, seed: seed, epochWriter: tabwriter.NewWriter(epochWriter, 8, 1, 2, ' ', 0), actionWriter: tabwriter.NewWriter(actionWriter, 8, 1, 2, ' ', 0), script: script, epoch: -1, } // Add the nodes. for i := 0; i < nodeCount; i++ { c.addNewNodeWithStore() } // Add a single range and add to this first node's first store. firstRange := c.addRange() firstRange.addReplica(c.stores[0]) c.calculateRangeIDsByStore() // Output the first epoch header. c.epoch = 0 c.OutputEpochHeader() c.OutputEpoch() c.flush() return c }
func (m *multiTestContext) Start(t *testing.T, numStores int) { m.t = t if m.manualClock == nil { m.manualClock = hlc.NewManualClock(0) } if m.clock == nil { m.clock = hlc.NewClock(m.manualClock.UnixNano) } if m.gossip == nil { rpcContext := rpc.NewContext(&base.Context{}, m.clock, nil) m.gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap) } if m.clientStopper == nil { m.clientStopper = stop.NewStopper() } if m.transport == nil { m.transport = multiraft.NewLocalRPCTransport(m.clientStopper) } if m.storePool == nil { m.storePool = storage.NewStorePool(m.gossip, storage.TestTimeUntilStoreDeadOff, m.clientStopper) } // Always create the first sender. m.senders = append(m.senders, kv.NewLocalSender()) if m.db == nil { sender := kv.NewTxnCoordSender(m.senders[0], m.clock, false, nil, m.clientStopper) var err error if m.db, err = client.Open("//", client.SenderOpt(sender)); err != nil { t.Fatal(err) } } for i := 0; i < numStores; i++ { m.addStore() } if m.transportStopper == nil { m.transportStopper = stop.NewStopper() } m.transportStopper.AddCloser(m.transport) }
// NewServer creates a Server from a server.Context. func NewServer(ctx *Context, stopper *stop.Stopper) (*Server, error) { if ctx == nil { return nil, util.Errorf("ctx must not be null") } if _, err := net.ResolveTCPAddr("tcp", ctx.Addr); err != nil { return nil, util.Errorf("unable to resolve RPC address %q: %v", ctx.Addr, err) } if ctx.Insecure { log.Warning("running in insecure mode, this is strongly discouraged. See --insecure and --certs.") } // Try loading the TLS configs before anything else. if _, err := ctx.GetServerTLSConfig(); err != nil { return nil, err } if _, err := ctx.GetClientTLSConfig(); err != nil { return nil, err } s := &Server{ ctx: ctx, mux: http.NewServeMux(), clock: hlc.NewClock(hlc.UnixNano), stopper: stopper, } s.clock.SetMaxOffset(ctx.MaxOffset) s.rpcContext = crpc.NewContext(&ctx.Context, s.clock, stopper) stopper.RunWorker(func() { s.rpcContext.RemoteClocks.MonitorRemoteOffsets(stopper) }) s.rpc = crpc.NewServer(s.rpcContext) s.gossip = gossip.New(s.rpcContext, s.ctx.GossipBootstrapResolvers) s.storePool = storage.NewStorePool(s.gossip, s.clock, ctx.TimeUntilStoreDead, stopper) feed := util.NewFeed(stopper) tracer := tracer.NewTracer(feed, ctx.Addr) ds := kv.NewDistSender(&kv.DistSenderContext{Clock: s.clock, RPCContext: s.rpcContext}, s.gossip) sender := kv.NewTxnCoordSender(ds, s.clock, ctx.Linearizable, tracer, s.stopper) s.db = client.NewDB(sender) var err error s.raftTransport, err = newRPCTransport(s.gossip, s.rpc, s.rpcContext) if err != nil { return nil, err } s.stopper.AddCloser(s.raftTransport) s.kvDB = kv.NewDBServer(&s.ctx.Context, sender) if err := s.kvDB.RegisterRPC(s.rpc); err != nil { return nil, err } leaseMgr := sql.NewLeaseManager(0, *s.db, s.clock) leaseMgr.RefreshLeases(s.stopper, s.db, s.gossip) s.sqlServer = sql.MakeServer(&s.ctx.Context, *s.db, s.gossip, leaseMgr) if err := s.sqlServer.RegisterRPC(s.rpc); err != nil { return nil, err } s.pgServer = pgwire.NewServer(&pgwire.Context{ Context: &s.ctx.Context, Executor: s.sqlServer.Executor, Stopper: stopper, }) // TODO(bdarnell): make StoreConfig configurable. nCtx := storage.StoreContext{ Clock: s.clock, DB: s.db, Gossip: s.gossip, Transport: s.raftTransport, ScanInterval: s.ctx.ScanInterval, ScanMaxIdleTime: s.ctx.ScanMaxIdleTime, EventFeed: feed, Tracer: tracer, StorePool: s.storePool, AllocatorOptions: storage.AllocatorOptions{ AllowRebalance: true, Mode: s.ctx.BalanceMode, }, } s.node = NewNode(nCtx) s.admin = newAdminServer(s.db, s.stopper) s.status = newStatusServer(s.db, s.gossip, ctx) s.tsDB = ts.NewDB(s.db) s.tsServer = ts.NewServer(s.tsDB) return s, nil }
// NewServer creates a Server from a server.Context. func NewServer(ctx *Context, stopper *stop.Stopper) (*Server, error) { if ctx == nil { return nil, util.Errorf("ctx must not be null") } if _, err := net.ResolveTCPAddr("tcp", ctx.Addr); err != nil { return nil, util.Errorf("unable to resolve RPC address %q: %v", ctx.Addr, err) } if ctx.Insecure { log.Warning("running in insecure mode, this is strongly discouraged. See --insecure and --certs.") } // Try loading the TLS configs before anything else. if _, err := ctx.GetServerTLSConfig(); err != nil { return nil, err } if _, err := ctx.GetClientTLSConfig(); err != nil { return nil, err } s := &Server{ Tracer: tracing.NewTracer(), ctx: ctx, mux: http.NewServeMux(), clock: hlc.NewClock(hlc.UnixNano), stopper: stopper, } s.clock.SetMaxOffset(ctx.MaxOffset) s.rpcContext = crpc.NewContext(&ctx.Context, s.clock, stopper) stopper.RunWorker(func() { s.rpcContext.RemoteClocks.MonitorRemoteOffsets(stopper) }) s.rpc = crpc.NewServer(s.rpcContext) s.gossip = gossip.New(s.rpcContext, s.ctx.GossipBootstrapResolvers, stopper) s.storePool = storage.NewStorePool(s.gossip, s.clock, ctx.TimeUntilStoreDead, stopper) feed := util.NewFeed(stopper) // A custom RetryOptions is created which uses stopper.ShouldDrain() as // the Closer. This prevents infinite retry loops from occurring during // graceful server shutdown // // Such a loop loop occurs with the DistSender attempts a connection to the // local server during shutdown, and receives an internal server error (HTTP // Code 5xx). This is the correct error for a server to return when it is // shutting down, and is normally retryable in a cluster environment. // However, on a single-node setup (such as a test), retries will never // succeed because the only server has been shut down; thus, thus the // DistSender needs to know that it should not retry in this situation. retryOpts := kv.GetDefaultDistSenderRetryOptions() retryOpts.Closer = stopper.ShouldDrain() ds := kv.NewDistSender(&kv.DistSenderContext{ Clock: s.clock, RPCContext: s.rpcContext, RPCRetryOptions: &retryOpts, }, s.gossip) txnRegistry := metric.NewRegistry() txnMetrics := kv.NewTxnMetrics(txnRegistry) sender := kv.NewTxnCoordSender(ds, s.clock, ctx.Linearizable, s.Tracer, s.stopper, txnMetrics) s.db = client.NewDB(sender) s.grpc = grpc.NewServer() s.raftTransport = storage.NewRaftTransport(storage.GossipAddressResolver(s.gossip), s.grpc, s.rpcContext) s.kvDB = kv.NewDBServer(&s.ctx.Context, sender, stopper) if err := s.kvDB.RegisterRPC(s.rpc); err != nil { return nil, err } s.leaseMgr = sql.NewLeaseManager(0, *s.db, s.clock) s.leaseMgr.RefreshLeases(s.stopper, s.db, s.gossip) sqlRegistry := metric.NewRegistry() s.sqlExecutor = sql.NewExecutor(*s.db, s.gossip, s.leaseMgr, s.stopper, sqlRegistry) s.pgServer = pgwire.MakeServer(&s.ctx.Context, s.sqlExecutor, sqlRegistry) // TODO(bdarnell): make StoreConfig configurable. nCtx := storage.StoreContext{ Clock: s.clock, DB: s.db, Gossip: s.gossip, Transport: s.raftTransport, ScanInterval: s.ctx.ScanInterval, ScanMaxIdleTime: s.ctx.ScanMaxIdleTime, EventFeed: feed, Tracer: s.Tracer, StorePool: s.storePool, SQLExecutor: sql.InternalExecutor{ LeaseManager: s.leaseMgr, }, LogRangeEvents: true, AllocatorOptions: storage.AllocatorOptions{ AllowRebalance: true, Mode: s.ctx.BalanceMode, }, } s.recorder = status.NewMetricsRecorder(s.clock) s.recorder.AddNodeRegistry("sql.%s", sqlRegistry) s.recorder.AddNodeRegistry("txn.%s", txnRegistry) s.node = NewNode(nCtx, s.recorder, s.stopper, txnMetrics) s.admin = newAdminServer(s.db, s.stopper, s.sqlExecutor) s.tsDB = ts.NewDB(s.db) s.tsServer = ts.NewServer(s.tsDB) s.status = newStatusServer(s.db, s.gossip, s.recorder, s.ctx) return s, nil }
// NewServer creates a Server from a server.Context. func NewServer(srvCtx Context, stopper *stop.Stopper) (*Server, error) { if _, err := net.ResolveTCPAddr("tcp", srvCtx.Addr); err != nil { return nil, errors.Errorf("unable to resolve RPC address %q: %v", srvCtx.Addr, err) } if srvCtx.Ctx == nil { srvCtx.Ctx = context.Background() } if srvCtx.Ctx.Done() != nil { panic("context with cancel or deadline") } if tracing.TracerFromCtx(srvCtx.Ctx) == nil { // TODO(radu): instead of modifying srvCtx.Ctx, we should have a separate // context.Context inside Server. We will need to rename server.Context // though. srvCtx.Ctx = tracing.WithTracer(srvCtx.Ctx, tracing.NewTracer()) } if srvCtx.Insecure { log.Warning(srvCtx.Ctx, "running in insecure mode, this is strongly discouraged. See --insecure.") } // Try loading the TLS configs before anything else. if _, err := srvCtx.GetServerTLSConfig(); err != nil { return nil, err } if _, err := srvCtx.GetClientTLSConfig(); err != nil { return nil, err } s := &Server{ mux: http.NewServeMux(), clock: hlc.NewClock(hlc.UnixNano), stopper: stopper, } // Add a dynamic log tag value for the node ID. // // We need to pass the server's Ctx as a base context for the various server // components, but we won't know the node ID until we Start(). At that point // it's too late to change the contexts in the components (various background // processes will have already started using the contexts). // // The dynamic value allows us to add the log tag to the context now and // update the value asynchronously. It's not significantly more expensive than // a regular tag since it's just doing an (atomic) load when a log/trace // message is constructed. s.nodeLogTagVal.Set(log.DynamicIntValueUnknown) srvCtx.Ctx = log.WithLogTag(srvCtx.Ctx, "n", &s.nodeLogTagVal) s.ctx = srvCtx s.clock.SetMaxOffset(srvCtx.MaxOffset) s.rpcContext = rpc.NewContext(srvCtx.Context, s.clock, s.stopper) s.rpcContext.HeartbeatCB = func() { if err := s.rpcContext.RemoteClocks.VerifyClockOffset(); err != nil { log.Fatal(s.Ctx(), err) } } s.grpc = rpc.NewServer(s.rpcContext) s.registry = metric.NewRegistry() s.gossip = gossip.New( s.Ctx(), s.rpcContext, s.grpc, s.ctx.GossipBootstrapResolvers, s.stopper, s.registry) s.storePool = storage.NewStorePool( s.gossip, s.clock, s.rpcContext, srvCtx.ReservationsEnabled, srvCtx.TimeUntilStoreDead, s.stopper, ) // A custom RetryOptions is created which uses stopper.ShouldQuiesce() as // the Closer. This prevents infinite retry loops from occurring during // graceful server shutdown // // Such a loop loop occurs with the DistSender attempts a connection to the // local server during shutdown, and receives an internal server error (HTTP // Code 5xx). This is the correct error for a server to return when it is // shutting down, and is normally retryable in a cluster environment. // However, on a single-node setup (such as a test), retries will never // succeed because the only server has been shut down; thus, thus the // DistSender needs to know that it should not retry in this situation. retryOpts := base.DefaultRetryOptions() retryOpts.Closer = s.stopper.ShouldQuiesce() distSenderCfg := kv.DistSenderConfig{ Ctx: s.Ctx(), Clock: s.clock, RPCContext: s.rpcContext, RPCRetryOptions: &retryOpts, } s.distSender = kv.NewDistSender(&distSenderCfg, s.gossip) txnMetrics := kv.MakeTxnMetrics() s.registry.AddMetricStruct(txnMetrics) s.txnCoordSender = kv.NewTxnCoordSender(s.Ctx(), s.distSender, s.clock, srvCtx.Linearizable, s.stopper, txnMetrics) s.db = client.NewDB(s.txnCoordSender) s.raftTransport = storage.NewRaftTransport(storage.GossipAddressResolver(s.gossip), s.grpc, s.rpcContext) s.kvDB = kv.NewDBServer(s.ctx.Context, s.txnCoordSender, s.stopper) roachpb.RegisterExternalServer(s.grpc, s.kvDB) // Set up Lease Manager var lmKnobs sql.LeaseManagerTestingKnobs if srvCtx.TestingKnobs.SQLLeaseManager != nil { lmKnobs = *srvCtx.TestingKnobs.SQLLeaseManager.(*sql.LeaseManagerTestingKnobs) } s.leaseMgr = sql.NewLeaseManager(0, *s.db, s.clock, lmKnobs, s.stopper) s.leaseMgr.RefreshLeases(s.stopper, s.db, s.gossip) // Set up the DistSQL server distSQLCfg := distsql.ServerConfig{ Context: s.Ctx(), DB: s.db, RPCContext: s.rpcContext, } s.distSQLServer = distsql.NewServer(distSQLCfg) distsql.RegisterDistSQLServer(s.grpc, s.distSQLServer) // Set up Executor execCfg := sql.ExecutorConfig{ Context: s.Ctx(), DB: s.db, Gossip: s.gossip, LeaseManager: s.leaseMgr, Clock: s.clock, DistSQLSrv: s.distSQLServer, } if srvCtx.TestingKnobs.SQLExecutor != nil { execCfg.TestingKnobs = srvCtx.TestingKnobs.SQLExecutor.(*sql.ExecutorTestingKnobs) } else { execCfg.TestingKnobs = &sql.ExecutorTestingKnobs{} } s.sqlExecutor = sql.NewExecutor(execCfg, s.stopper) s.registry.AddMetricStruct(s.sqlExecutor) s.pgServer = pgwire.MakeServer(s.ctx.Context, s.sqlExecutor) s.registry.AddMetricStruct(s.pgServer.Metrics()) // TODO(bdarnell): make StoreConfig configurable. nCtx := storage.StoreContext{ Ctx: s.Ctx(), Clock: s.clock, DB: s.db, Gossip: s.gossip, Transport: s.raftTransport, RaftTickInterval: s.ctx.RaftTickInterval, ScanInterval: s.ctx.ScanInterval, ScanMaxIdleTime: s.ctx.ScanMaxIdleTime, ConsistencyCheckInterval: s.ctx.ConsistencyCheckInterval, ConsistencyCheckPanicOnFailure: s.ctx.ConsistencyCheckPanicOnFailure, StorePool: s.storePool, SQLExecutor: sql.InternalExecutor{ LeaseManager: s.leaseMgr, }, LogRangeEvents: true, AllocatorOptions: storage.AllocatorOptions{ AllowRebalance: true, }, } if srvCtx.TestingKnobs.Store != nil { nCtx.TestingKnobs = *srvCtx.TestingKnobs.Store.(*storage.StoreTestingKnobs) } s.recorder = status.NewMetricsRecorder(s.clock) s.registry.AddMetricStruct(s.rpcContext.RemoteClocks.Metrics()) s.runtime = status.MakeRuntimeStatSampler(s.clock) s.registry.AddMetricStruct(s.runtime) s.node = NewNode(nCtx, s.recorder, s.registry, s.stopper, txnMetrics, sql.MakeEventLogger(s.leaseMgr)) roachpb.RegisterInternalServer(s.grpc, s.node) storage.RegisterStoresServer(s.grpc, s.node.storesServer) s.tsDB = ts.NewDB(s.db) s.tsServer = ts.MakeServer(s.tsDB) s.admin = makeAdminServer(s) s.status = newStatusServer(s.db, s.gossip, s.recorder, s.ctx.Context, s.rpcContext, s.node.stores) for _, gw := range []grpcGatewayServer{&s.admin, s.status, &s.tsServer} { gw.RegisterService(s.grpc) } return s, nil }
// NewServer creates a Server from a server.Context. func NewServer(ctx *Context, stopper *stop.Stopper) (*Server, error) { if ctx == nil { return nil, util.Errorf("ctx must not be null") } addr := ctx.Addr _, err := net.ResolveTCPAddr("tcp", addr) if err != nil { return nil, util.Errorf("unable to resolve RPC address %q: %v", addr, err) } if ctx.Insecure { log.Warning("running in insecure mode, this is strongly discouraged. See --insecure and --certs.") } // Try loading the TLS configs before anything else. if _, err := ctx.GetServerTLSConfig(); err != nil { return nil, err } if _, err := ctx.GetClientTLSConfig(); err != nil { return nil, err } s := &Server{ ctx: ctx, mux: http.NewServeMux(), clock: hlc.NewClock(hlc.UnixNano), stopper: stopper, } s.clock.SetMaxOffset(ctx.MaxOffset) rpcContext := rpc.NewContext(&ctx.Context, s.clock, stopper) stopper.RunWorker(func() { rpcContext.RemoteClocks.MonitorRemoteOffsets(stopper) }) s.rpc = rpc.NewServer(util.MakeUnresolvedAddr("tcp", addr), rpcContext) s.stopper.AddCloser(s.rpc) s.gossip = gossip.New(rpcContext, s.ctx.GossipInterval, s.ctx.GossipBootstrapResolvers) s.storePool = storage.NewStorePool(s.gossip, ctx.TimeUntilStoreDead, stopper) feed := util.NewFeed(stopper) tracer := tracer.NewTracer(feed, addr) ds := kv.NewDistSender(&kv.DistSenderContext{Clock: s.clock}, s.gossip) sender := kv.NewTxnCoordSender(ds, s.clock, ctx.Linearizable, tracer, s.stopper) if s.db, err = client.Open("//", client.SenderOpt(sender)); err != nil { return nil, err } s.raftTransport, err = newRPCTransport(s.gossip, s.rpc, rpcContext) if err != nil { return nil, err } s.stopper.AddCloser(s.raftTransport) s.kvDB = kv.NewDBServer(&s.ctx.Context, sender) if s.ctx.ExperimentalRPCServer { if err = s.kvDB.RegisterRPC(s.rpc); err != nil { return nil, err } } s.sqlServer = sql.MakeHTTPServer(&s.ctx.Context, *s.db) // TODO(bdarnell): make StoreConfig configurable. nCtx := storage.StoreContext{ Clock: s.clock, DB: s.db, Gossip: s.gossip, Transport: s.raftTransport, ScanInterval: s.ctx.ScanInterval, ScanMaxIdleTime: s.ctx.ScanMaxIdleTime, EventFeed: feed, Tracer: tracer, StorePool: s.storePool, } s.node = NewNode(nCtx) s.admin = newAdminServer(s.db, s.stopper) s.status = newStatusServer(s.db, s.gossip, ctx) s.tsDB = ts.NewDB(s.db) s.tsServer = ts.NewServer(s.tsDB) return s, nil }
// NewServer creates a Server from a server.Context. func NewServer(ctx Context, stopper *stop.Stopper) (*Server, error) { if _, err := net.ResolveTCPAddr("tcp", ctx.Addr); err != nil { return nil, errors.Errorf("unable to resolve RPC address %q: %v", ctx.Addr, err) } if ctx.Insecure { log.Warning(context.TODO(), "running in insecure mode, this is strongly discouraged. See --insecure.") } // Try loading the TLS configs before anything else. if _, err := ctx.GetServerTLSConfig(); err != nil { return nil, err } if _, err := ctx.GetClientTLSConfig(); err != nil { return nil, err } s := &Server{ Tracer: tracing.NewTracer(), ctx: ctx, mux: http.NewServeMux(), clock: hlc.NewClock(hlc.UnixNano), stopper: stopper, } s.clock.SetMaxOffset(ctx.MaxOffset) s.rpcContext = rpc.NewContext(ctx.Context, s.clock, s.stopper) s.rpcContext.HeartbeatCB = func() { if err := s.rpcContext.RemoteClocks.VerifyClockOffset(); err != nil { log.Fatal(context.TODO(), err) } } s.grpc = rpc.NewServer(s.rpcContext) s.registry = metric.NewRegistry() s.gossip = gossip.New(s.rpcContext, s.grpc, s.ctx.GossipBootstrapResolvers, s.stopper, s.registry) s.storePool = storage.NewStorePool( s.gossip, s.clock, s.rpcContext, ctx.ReservationsEnabled, ctx.TimeUntilStoreDead, s.stopper, ) // A custom RetryOptions is created which uses stopper.ShouldQuiesce() as // the Closer. This prevents infinite retry loops from occurring during // graceful server shutdown // // Such a loop loop occurs with the DistSender attempts a connection to the // local server during shutdown, and receives an internal server error (HTTP // Code 5xx). This is the correct error for a server to return when it is // shutting down, and is normally retryable in a cluster environment. // However, on a single-node setup (such as a test), retries will never // succeed because the only server has been shut down; thus, thus the // DistSender needs to know that it should not retry in this situation. retryOpts := base.DefaultRetryOptions() retryOpts.Closer = s.stopper.ShouldQuiesce() s.distSender = kv.NewDistSender(&kv.DistSenderContext{ Clock: s.clock, RPCContext: s.rpcContext, RPCRetryOptions: &retryOpts, }, s.gossip) txnMetrics := kv.NewTxnMetrics(s.registry) sender := kv.NewTxnCoordSender(s.distSender, s.clock, ctx.Linearizable, s.Tracer, s.stopper, txnMetrics) s.db = client.NewDB(sender) s.raftTransport = storage.NewRaftTransport(storage.GossipAddressResolver(s.gossip), s.grpc, s.rpcContext) s.kvDB = kv.NewDBServer(s.ctx.Context, sender, s.stopper) roachpb.RegisterExternalServer(s.grpc, s.kvDB) // Set up Lease Manager var lmKnobs sql.LeaseManagerTestingKnobs if ctx.TestingKnobs.SQLLeaseManager != nil { lmKnobs = *ctx.TestingKnobs.SQLLeaseManager.(*sql.LeaseManagerTestingKnobs) } s.leaseMgr = sql.NewLeaseManager(0, *s.db, s.clock, lmKnobs, s.stopper) s.leaseMgr.RefreshLeases(s.stopper, s.db, s.gossip) // Set up the DistSQL server distSQLCtx := distsql.ServerContext{ Context: context.Background(), DB: s.db, RPCContext: s.rpcContext, } s.distSQLServer = distsql.NewServer(distSQLCtx) distsql.RegisterDistSQLServer(s.grpc, s.distSQLServer) // Set up Executor eCtx := sql.ExecutorContext{ Context: context.Background(), DB: s.db, Gossip: s.gossip, LeaseManager: s.leaseMgr, Clock: s.clock, DistSQLSrv: s.distSQLServer, } if ctx.TestingKnobs.SQLExecutor != nil { eCtx.TestingKnobs = ctx.TestingKnobs.SQLExecutor.(*sql.ExecutorTestingKnobs) } else { eCtx.TestingKnobs = &sql.ExecutorTestingKnobs{} } s.sqlExecutor = sql.NewExecutor(eCtx, s.stopper, s.registry) s.pgServer = pgwire.MakeServer(s.ctx.Context, s.sqlExecutor, s.registry) // TODO(bdarnell): make StoreConfig configurable. nCtx := storage.StoreContext{ Clock: s.clock, DB: s.db, Gossip: s.gossip, Transport: s.raftTransport, RaftTickInterval: s.ctx.RaftTickInterval, ScanInterval: s.ctx.ScanInterval, ScanMaxIdleTime: s.ctx.ScanMaxIdleTime, ConsistencyCheckInterval: s.ctx.ConsistencyCheckInterval, ConsistencyCheckPanicOnFailure: s.ctx.ConsistencyCheckPanicOnFailure, Tracer: s.Tracer, StorePool: s.storePool, SQLExecutor: sql.InternalExecutor{ LeaseManager: s.leaseMgr, }, LogRangeEvents: true, AllocatorOptions: storage.AllocatorOptions{ AllowRebalance: true, }, } if ctx.TestingKnobs.Store != nil { nCtx.TestingKnobs = *ctx.TestingKnobs.Store.(*storage.StoreTestingKnobs) } s.recorder = status.NewMetricsRecorder(s.clock) s.rpcContext.RemoteClocks.RegisterMetrics(s.registry) s.runtime = status.MakeRuntimeStatSampler(s.clock, s.registry) s.node = NewNode(nCtx, s.recorder, s.registry, s.stopper, txnMetrics, sql.MakeEventLogger(s.leaseMgr)) roachpb.RegisterInternalServer(s.grpc, s.node) roachpb.RegisterInternalStoresServer(s.grpc, s.node.InternalStoresServer) s.tsDB = ts.NewDB(s.db) s.tsServer = ts.MakeServer(s.tsDB) s.admin = makeAdminServer(s) s.status = newStatusServer(s.db, s.gossip, s.recorder, s.ctx.Context, s.rpcContext, s.node.stores) for _, gw := range []grpcGatewayServer{&s.admin, s.status, &s.tsServer} { gw.RegisterService(s.grpc) } return s, nil }