func (t *leaseTest) node(nodeID uint32) *csql.LeaseManager { mgr := t.nodes[nodeID] if mgr == nil { mgr = csql.NewLeaseManager(nodeID, *t.server.DB(), t.server.Clock()) t.nodes[nodeID] = mgr } return mgr }
func (t *leaseTest) node(nodeID uint32) *csql.LeaseManager { mgr := t.nodes[nodeID] if mgr == nil { mgr = csql.NewLeaseManager( nodeID, *t.kvDB, t.server.Clock(), t.leaseManagerTestingKnobs, t.server.Stopper(), ) t.nodes[nodeID] = mgr } return mgr }
// NewServer creates a Server from a server.Context. func NewServer(ctx *Context, stopper *stop.Stopper) (*Server, error) { if ctx == nil { return nil, util.Errorf("ctx must not be null") } if _, err := net.ResolveTCPAddr("tcp", ctx.Addr); err != nil { return nil, util.Errorf("unable to resolve RPC address %q: %v", ctx.Addr, err) } if ctx.Insecure { log.Warning("running in insecure mode, this is strongly discouraged. See --insecure and --certs.") } // Try loading the TLS configs before anything else. if _, err := ctx.GetServerTLSConfig(); err != nil { return nil, err } if _, err := ctx.GetClientTLSConfig(); err != nil { return nil, err } s := &Server{ ctx: ctx, mux: http.NewServeMux(), clock: hlc.NewClock(hlc.UnixNano), stopper: stopper, } s.clock.SetMaxOffset(ctx.MaxOffset) s.rpcContext = crpc.NewContext(&ctx.Context, s.clock, stopper) stopper.RunWorker(func() { s.rpcContext.RemoteClocks.MonitorRemoteOffsets(stopper) }) s.rpc = crpc.NewServer(s.rpcContext) s.gossip = gossip.New(s.rpcContext, s.ctx.GossipBootstrapResolvers) s.storePool = storage.NewStorePool(s.gossip, s.clock, ctx.TimeUntilStoreDead, stopper) feed := util.NewFeed(stopper) tracer := tracer.NewTracer(feed, ctx.Addr) ds := kv.NewDistSender(&kv.DistSenderContext{Clock: s.clock, RPCContext: s.rpcContext}, s.gossip) sender := kv.NewTxnCoordSender(ds, s.clock, ctx.Linearizable, tracer, s.stopper) s.db = client.NewDB(sender) var err error s.raftTransport, err = newRPCTransport(s.gossip, s.rpc, s.rpcContext) if err != nil { return nil, err } s.stopper.AddCloser(s.raftTransport) s.kvDB = kv.NewDBServer(&s.ctx.Context, sender) if err := s.kvDB.RegisterRPC(s.rpc); err != nil { return nil, err } leaseMgr := sql.NewLeaseManager(0, *s.db, s.clock) leaseMgr.RefreshLeases(s.stopper, s.db, s.gossip) s.sqlServer = sql.MakeServer(&s.ctx.Context, *s.db, s.gossip, leaseMgr) if err := s.sqlServer.RegisterRPC(s.rpc); err != nil { return nil, err } s.pgServer = pgwire.NewServer(&pgwire.Context{ Context: &s.ctx.Context, Executor: s.sqlServer.Executor, Stopper: stopper, }) // TODO(bdarnell): make StoreConfig configurable. nCtx := storage.StoreContext{ Clock: s.clock, DB: s.db, Gossip: s.gossip, Transport: s.raftTransport, ScanInterval: s.ctx.ScanInterval, ScanMaxIdleTime: s.ctx.ScanMaxIdleTime, EventFeed: feed, Tracer: tracer, StorePool: s.storePool, AllocatorOptions: storage.AllocatorOptions{ AllowRebalance: true, Mode: s.ctx.BalanceMode, }, } s.node = NewNode(nCtx) s.admin = newAdminServer(s.db, s.stopper) s.status = newStatusServer(s.db, s.gossip, ctx) s.tsDB = ts.NewDB(s.db) s.tsServer = ts.NewServer(s.tsDB) return s, nil }
// NewServer creates a Server from a server.Context. func NewServer(ctx *Context, stopper *stop.Stopper) (*Server, error) { if ctx == nil { return nil, util.Errorf("ctx must not be null") } if _, err := net.ResolveTCPAddr("tcp", ctx.Addr); err != nil { return nil, util.Errorf("unable to resolve RPC address %q: %v", ctx.Addr, err) } if ctx.Insecure { log.Warning("running in insecure mode, this is strongly discouraged. See --insecure and --certs.") } // Try loading the TLS configs before anything else. if _, err := ctx.GetServerTLSConfig(); err != nil { return nil, err } if _, err := ctx.GetClientTLSConfig(); err != nil { return nil, err } s := &Server{ Tracer: tracing.NewTracer(), ctx: ctx, mux: http.NewServeMux(), clock: hlc.NewClock(hlc.UnixNano), stopper: stopper, } s.clock.SetMaxOffset(ctx.MaxOffset) s.rpcContext = crpc.NewContext(&ctx.Context, s.clock, stopper) stopper.RunWorker(func() { s.rpcContext.RemoteClocks.MonitorRemoteOffsets(stopper) }) s.rpc = crpc.NewServer(s.rpcContext) s.gossip = gossip.New(s.rpcContext, s.ctx.GossipBootstrapResolvers, stopper) s.storePool = storage.NewStorePool(s.gossip, s.clock, ctx.TimeUntilStoreDead, stopper) feed := util.NewFeed(stopper) // A custom RetryOptions is created which uses stopper.ShouldDrain() as // the Closer. This prevents infinite retry loops from occurring during // graceful server shutdown // // Such a loop loop occurs with the DistSender attempts a connection to the // local server during shutdown, and receives an internal server error (HTTP // Code 5xx). This is the correct error for a server to return when it is // shutting down, and is normally retryable in a cluster environment. // However, on a single-node setup (such as a test), retries will never // succeed because the only server has been shut down; thus, thus the // DistSender needs to know that it should not retry in this situation. retryOpts := kv.GetDefaultDistSenderRetryOptions() retryOpts.Closer = stopper.ShouldDrain() ds := kv.NewDistSender(&kv.DistSenderContext{ Clock: s.clock, RPCContext: s.rpcContext, RPCRetryOptions: &retryOpts, }, s.gossip) txnRegistry := metric.NewRegistry() txnMetrics := kv.NewTxnMetrics(txnRegistry) sender := kv.NewTxnCoordSender(ds, s.clock, ctx.Linearizable, s.Tracer, s.stopper, txnMetrics) s.db = client.NewDB(sender) s.grpc = grpc.NewServer() s.raftTransport = storage.NewRaftTransport(storage.GossipAddressResolver(s.gossip), s.grpc, s.rpcContext) s.kvDB = kv.NewDBServer(&s.ctx.Context, sender, stopper) if err := s.kvDB.RegisterRPC(s.rpc); err != nil { return nil, err } s.leaseMgr = sql.NewLeaseManager(0, *s.db, s.clock) s.leaseMgr.RefreshLeases(s.stopper, s.db, s.gossip) sqlRegistry := metric.NewRegistry() s.sqlExecutor = sql.NewExecutor(*s.db, s.gossip, s.leaseMgr, s.stopper, sqlRegistry) s.pgServer = pgwire.MakeServer(&s.ctx.Context, s.sqlExecutor, sqlRegistry) // TODO(bdarnell): make StoreConfig configurable. nCtx := storage.StoreContext{ Clock: s.clock, DB: s.db, Gossip: s.gossip, Transport: s.raftTransport, ScanInterval: s.ctx.ScanInterval, ScanMaxIdleTime: s.ctx.ScanMaxIdleTime, EventFeed: feed, Tracer: s.Tracer, StorePool: s.storePool, SQLExecutor: sql.InternalExecutor{ LeaseManager: s.leaseMgr, }, LogRangeEvents: true, AllocatorOptions: storage.AllocatorOptions{ AllowRebalance: true, Mode: s.ctx.BalanceMode, }, } s.recorder = status.NewMetricsRecorder(s.clock) s.recorder.AddNodeRegistry("sql.%s", sqlRegistry) s.recorder.AddNodeRegistry("txn.%s", txnRegistry) s.node = NewNode(nCtx, s.recorder, s.stopper, txnMetrics) s.admin = newAdminServer(s.db, s.stopper, s.sqlExecutor) s.tsDB = ts.NewDB(s.db) s.tsServer = ts.NewServer(s.tsDB) s.status = newStatusServer(s.db, s.gossip, s.recorder, s.ctx) return s, nil }
func TestSchemaChangeProcess(t *testing.T) { defer leaktest.AfterTest(t) // The descriptor changes made must have an immediate effect // so disable leases on tables. defer csql.TestDisableTableLeases()() // Disable external processing of mutations. defer csql.TestDisableAsyncSchemaChangeExec()() server, sqlDB, kvDB := setup(t) defer cleanup(server, sqlDB) var id = csql.ID(keys.MaxReservedDescID + 2) var node = roachpb.NodeID(2) db := server.DB() leaseMgr := csql.NewLeaseManager(0, *db, hlc.NewClock(hlc.UnixNano)) changer := csql.NewSchemaChangerForTesting(id, 0, node, *db, leaseMgr) if _, err := sqlDB.Exec(` CREATE DATABASE t; CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR, INDEX foo(v)); INSERT INTO t.test VALUES ('a', 'b'), ('c', 'd'); `); err != nil { t.Fatal(err) } // Read table descriptor for version. nameKey := csql.MakeNameMetadataKey(keys.MaxReservedDescID+1, "test") gr, err := kvDB.Get(nameKey) if err != nil { t.Fatal(err) } if !gr.Exists() { t.Fatalf("Name entry %q does not exist", nameKey) } descKey := csql.MakeDescMetadataKey(csql.ID(gr.ValueInt())) desc := &csql.Descriptor{} // Check that MaybeIncrementVersion doesn't increment the version // when the up_version bit is not set. if err := kvDB.GetProto(descKey, desc); err != nil { t.Fatal(err) } expectedVersion := desc.GetTable().Version if err := changer.MaybeIncrementVersion(); err != nil { t.Fatal(err) } if err := kvDB.GetProto(descKey, desc); err != nil { t.Fatal(err) } newVersion := desc.GetTable().Version if newVersion != expectedVersion { t.Fatalf("bad version; e = %d, v = %d", expectedVersion, newVersion) } isDone, err := changer.IsDone() if err != nil { t.Fatal(err) } if !isDone { t.Fatalf("table expected to not have an outstanding schema change: %v", desc.GetTable()) } // Check that MaybeIncrementVersion increments the version // correctly. expectedVersion++ desc.GetTable().UpVersion = true if err := kvDB.Put(descKey, desc); err != nil { t.Fatal(err) } isDone, err = changer.IsDone() if err != nil { t.Fatal(err) } if isDone { t.Fatalf("table expected to have an outstanding schema change: %v", desc.GetTable()) } if err := changer.MaybeIncrementVersion(); err != nil { t.Fatal(err) } if err := kvDB.GetProto(descKey, desc); err != nil { t.Fatal(err) } newVersion = desc.GetTable().Version if newVersion != expectedVersion { t.Fatalf("bad version; e = %d, v = %d", expectedVersion, newVersion) } isDone, err = changer.IsDone() if err != nil { t.Fatal(err) } if !isDone { t.Fatalf("table expected to not have an outstanding schema change: %v", desc.GetTable()) } // Check that RunStateMachineBeforeBackfill doesn't do anything // if there are no mutations queued. if err := changer.RunStateMachineBeforeBackfill(); err != nil { t.Fatal(err) } if err := kvDB.GetProto(descKey, desc); err != nil { t.Fatal(err) } newVersion = desc.GetTable().Version if newVersion != expectedVersion { t.Fatalf("bad version; e = %d, v = %d", expectedVersion, newVersion) } // Check that RunStateMachineBeforeBackfill functions properly. if err := kvDB.GetProto(descKey, desc); err != nil { t.Fatal(err) } table := desc.GetTable() expectedVersion = table.Version // Make a copy of the index for use in a mutation. index := proto.Clone(&table.Indexes[0]).(*csql.IndexDescriptor) index.Name = "bar" index.ID = table.NextIndexID table.NextIndexID++ changer = csql.NewSchemaChangerForTesting(id, table.NextMutationID, node, *db, leaseMgr) table.Mutations = append(table.Mutations, csql.DescriptorMutation{ Descriptor_: &csql.DescriptorMutation_Index{Index: index}, Direction: csql.DescriptorMutation_ADD, State: csql.DescriptorMutation_DELETE_ONLY, MutationID: table.NextMutationID, }) table.NextMutationID++ // Run state machine in both directions. for _, direction := range []csql.DescriptorMutation_Direction{csql.DescriptorMutation_ADD, csql.DescriptorMutation_DROP} { table.Mutations[0].Direction = direction expectedVersion++ if err := kvDB.Put(descKey, desc); err != nil { t.Fatal(err) } // The expected end state. expectedState := csql.DescriptorMutation_WRITE_ONLY if direction == csql.DescriptorMutation_DROP { expectedState = csql.DescriptorMutation_DELETE_ONLY } // Run two times to ensure idempotency of operations. for i := 0; i < 2; i++ { if err := changer.RunStateMachineBeforeBackfill(); err != nil { t.Fatal(err) } if err := kvDB.GetProto(descKey, desc); err != nil { t.Fatal(err) } table = desc.GetTable() newVersion = table.Version if newVersion != expectedVersion { t.Fatalf("bad version; e = %d, v = %d", expectedVersion, newVersion) } state := table.Mutations[0].State if state != expectedState { t.Fatalf("bad state; e = %d, v = %d", expectedState, state) } } } // RunStateMachineBeforeBackfill() doesn't complete the schema change. isDone, err = changer.IsDone() if err != nil { t.Fatal(err) } if isDone { t.Fatalf("table expected to have an outstanding schema change: %v", desc.GetTable()) } }
func TestSchemaChangeProcess(t *testing.T) { defer leaktest.AfterTest(t)() // The descriptor changes made must have an immediate effect // so disable leases on tables. defer csql.TestDisableTableLeases()() params, _ := createTestServerParams() // Disable external processing of mutations. params.Knobs.SQLSchemaChangeManager = &csql.SchemaChangeManagerTestingKnobs{ AsyncSchemaChangerExecNotification: schemaChangeManagerDisabled, } s, sqlDB, kvDB := serverutils.StartServer(t, params) defer s.Stopper().Stop() var id = sqlbase.ID(keys.MaxReservedDescID + 2) var node = roachpb.NodeID(2) stopper := stop.NewStopper() leaseMgr := csql.NewLeaseManager(0, *kvDB, hlc.NewClock(hlc.UnixNano), csql.LeaseManagerTestingKnobs{}, stopper) defer stopper.Stop() changer := csql.NewSchemaChangerForTesting(id, 0, node, *kvDB, leaseMgr) if _, err := sqlDB.Exec(` CREATE DATABASE t; CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR, INDEX foo(v)); INSERT INTO t.test VALUES ('a', 'b'), ('c', 'd'); `); err != nil { t.Fatal(err) } // Read table descriptor for version. tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") expectedVersion := tableDesc.Version desc, err := changer.MaybeIncrementVersion() if err != nil { t.Fatal(err) } tableDesc = desc.GetTable() newVersion := tableDesc.Version if newVersion != expectedVersion { t.Fatalf("bad version; e = %d, v = %d", expectedVersion, newVersion) } isDone, err := changer.IsDone() if err != nil { t.Fatal(err) } if !isDone { t.Fatalf("table expected to not have an outstanding schema change: %v", tableDesc) } // Check that MaybeIncrementVersion increments the version // correctly. expectedVersion++ tableDesc.UpVersion = true if err := kvDB.Put( sqlbase.MakeDescMetadataKey(tableDesc.ID), sqlbase.WrapDescriptor(tableDesc), ); err != nil { t.Fatal(err) } isDone, err = changer.IsDone() if err != nil { t.Fatal(err) } if isDone { t.Fatalf("table expected to have an outstanding schema change: %v", desc.GetTable()) } desc, err = changer.MaybeIncrementVersion() if err != nil { t.Fatal(err) } tableDesc = desc.GetTable() savedTableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") newVersion = tableDesc.Version if newVersion != expectedVersion { t.Fatalf("bad version in returned desc; e = %d, v = %d", expectedVersion, newVersion) } newVersion = savedTableDesc.Version if newVersion != expectedVersion { t.Fatalf("bad version in saved desc; e = %d, v = %d", expectedVersion, newVersion) } isDone, err = changer.IsDone() if err != nil { t.Fatal(err) } if !isDone { t.Fatalf("table expected to not have an outstanding schema change: %v", tableDesc) } // Check that RunStateMachineBeforeBackfill doesn't do anything // if there are no mutations queued. if err := changer.RunStateMachineBeforeBackfill(); err != nil { t.Fatal(err) } tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") newVersion = tableDesc.Version if newVersion != expectedVersion { t.Fatalf("bad version; e = %d, v = %d", expectedVersion, newVersion) } // Check that RunStateMachineBeforeBackfill functions properly. expectedVersion = tableDesc.Version // Make a copy of the index for use in a mutation. index := protoutil.Clone(&tableDesc.Indexes[0]).(*sqlbase.IndexDescriptor) index.Name = "bar" index.ID = tableDesc.NextIndexID tableDesc.NextIndexID++ changer = csql.NewSchemaChangerForTesting(id, tableDesc.NextMutationID, node, *kvDB, leaseMgr) tableDesc.Mutations = append(tableDesc.Mutations, sqlbase.DescriptorMutation{ Descriptor_: &sqlbase.DescriptorMutation_Index{Index: index}, Direction: sqlbase.DescriptorMutation_ADD, State: sqlbase.DescriptorMutation_DELETE_ONLY, MutationID: tableDesc.NextMutationID, }) tableDesc.NextMutationID++ // Run state machine in both directions. for _, direction := range []sqlbase.DescriptorMutation_Direction{sqlbase.DescriptorMutation_ADD, sqlbase.DescriptorMutation_DROP} { tableDesc.Mutations[0].Direction = direction expectedVersion++ if err := kvDB.Put( sqlbase.MakeDescMetadataKey(tableDesc.ID), sqlbase.WrapDescriptor(tableDesc), ); err != nil { t.Fatal(err) } // The expected end state. expectedState := sqlbase.DescriptorMutation_WRITE_ONLY if direction == sqlbase.DescriptorMutation_DROP { expectedState = sqlbase.DescriptorMutation_DELETE_ONLY } // Run two times to ensure idempotency of operations. for i := 0; i < 2; i++ { if err := changer.RunStateMachineBeforeBackfill(); err != nil { t.Fatal(err) } tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") newVersion = tableDesc.Version if newVersion != expectedVersion { t.Fatalf("bad version; e = %d, v = %d", expectedVersion, newVersion) } state := tableDesc.Mutations[0].State if state != expectedState { t.Fatalf("bad state; e = %d, v = %d", expectedState, state) } } } // RunStateMachineBeforeBackfill() doesn't complete the schema change. isDone, err = changer.IsDone() if err != nil { t.Fatal(err) } if isDone { t.Fatalf("table expected to have an outstanding schema change: %v", tableDesc) } }
// NewServer creates a Server from a server.Context. func NewServer(srvCtx Context, stopper *stop.Stopper) (*Server, error) { if _, err := net.ResolveTCPAddr("tcp", srvCtx.Addr); err != nil { return nil, errors.Errorf("unable to resolve RPC address %q: %v", srvCtx.Addr, err) } if srvCtx.Ctx == nil { srvCtx.Ctx = context.Background() } if srvCtx.Ctx.Done() != nil { panic("context with cancel or deadline") } if tracing.TracerFromCtx(srvCtx.Ctx) == nil { // TODO(radu): instead of modifying srvCtx.Ctx, we should have a separate // context.Context inside Server. We will need to rename server.Context // though. srvCtx.Ctx = tracing.WithTracer(srvCtx.Ctx, tracing.NewTracer()) } if srvCtx.Insecure { log.Warning(srvCtx.Ctx, "running in insecure mode, this is strongly discouraged. See --insecure.") } // Try loading the TLS configs before anything else. if _, err := srvCtx.GetServerTLSConfig(); err != nil { return nil, err } if _, err := srvCtx.GetClientTLSConfig(); err != nil { return nil, err } s := &Server{ mux: http.NewServeMux(), clock: hlc.NewClock(hlc.UnixNano), stopper: stopper, } // Add a dynamic log tag value for the node ID. // // We need to pass the server's Ctx as a base context for the various server // components, but we won't know the node ID until we Start(). At that point // it's too late to change the contexts in the components (various background // processes will have already started using the contexts). // // The dynamic value allows us to add the log tag to the context now and // update the value asynchronously. It's not significantly more expensive than // a regular tag since it's just doing an (atomic) load when a log/trace // message is constructed. s.nodeLogTagVal.Set(log.DynamicIntValueUnknown) srvCtx.Ctx = log.WithLogTag(srvCtx.Ctx, "n", &s.nodeLogTagVal) s.ctx = srvCtx s.clock.SetMaxOffset(srvCtx.MaxOffset) s.rpcContext = rpc.NewContext(srvCtx.Context, s.clock, s.stopper) s.rpcContext.HeartbeatCB = func() { if err := s.rpcContext.RemoteClocks.VerifyClockOffset(); err != nil { log.Fatal(s.Ctx(), err) } } s.grpc = rpc.NewServer(s.rpcContext) s.registry = metric.NewRegistry() s.gossip = gossip.New( s.Ctx(), s.rpcContext, s.grpc, s.ctx.GossipBootstrapResolvers, s.stopper, s.registry) s.storePool = storage.NewStorePool( s.gossip, s.clock, s.rpcContext, srvCtx.ReservationsEnabled, srvCtx.TimeUntilStoreDead, s.stopper, ) // A custom RetryOptions is created which uses stopper.ShouldQuiesce() as // the Closer. This prevents infinite retry loops from occurring during // graceful server shutdown // // Such a loop loop occurs with the DistSender attempts a connection to the // local server during shutdown, and receives an internal server error (HTTP // Code 5xx). This is the correct error for a server to return when it is // shutting down, and is normally retryable in a cluster environment. // However, on a single-node setup (such as a test), retries will never // succeed because the only server has been shut down; thus, thus the // DistSender needs to know that it should not retry in this situation. retryOpts := base.DefaultRetryOptions() retryOpts.Closer = s.stopper.ShouldQuiesce() distSenderCfg := kv.DistSenderConfig{ Ctx: s.Ctx(), Clock: s.clock, RPCContext: s.rpcContext, RPCRetryOptions: &retryOpts, } s.distSender = kv.NewDistSender(&distSenderCfg, s.gossip) txnMetrics := kv.MakeTxnMetrics() s.registry.AddMetricStruct(txnMetrics) s.txnCoordSender = kv.NewTxnCoordSender(s.Ctx(), s.distSender, s.clock, srvCtx.Linearizable, s.stopper, txnMetrics) s.db = client.NewDB(s.txnCoordSender) s.raftTransport = storage.NewRaftTransport(storage.GossipAddressResolver(s.gossip), s.grpc, s.rpcContext) s.kvDB = kv.NewDBServer(s.ctx.Context, s.txnCoordSender, s.stopper) roachpb.RegisterExternalServer(s.grpc, s.kvDB) // Set up Lease Manager var lmKnobs sql.LeaseManagerTestingKnobs if srvCtx.TestingKnobs.SQLLeaseManager != nil { lmKnobs = *srvCtx.TestingKnobs.SQLLeaseManager.(*sql.LeaseManagerTestingKnobs) } s.leaseMgr = sql.NewLeaseManager(0, *s.db, s.clock, lmKnobs, s.stopper) s.leaseMgr.RefreshLeases(s.stopper, s.db, s.gossip) // Set up the DistSQL server distSQLCfg := distsql.ServerConfig{ Context: s.Ctx(), DB: s.db, RPCContext: s.rpcContext, } s.distSQLServer = distsql.NewServer(distSQLCfg) distsql.RegisterDistSQLServer(s.grpc, s.distSQLServer) // Set up Executor execCfg := sql.ExecutorConfig{ Context: s.Ctx(), DB: s.db, Gossip: s.gossip, LeaseManager: s.leaseMgr, Clock: s.clock, DistSQLSrv: s.distSQLServer, } if srvCtx.TestingKnobs.SQLExecutor != nil { execCfg.TestingKnobs = srvCtx.TestingKnobs.SQLExecutor.(*sql.ExecutorTestingKnobs) } else { execCfg.TestingKnobs = &sql.ExecutorTestingKnobs{} } s.sqlExecutor = sql.NewExecutor(execCfg, s.stopper) s.registry.AddMetricStruct(s.sqlExecutor) s.pgServer = pgwire.MakeServer(s.ctx.Context, s.sqlExecutor) s.registry.AddMetricStruct(s.pgServer.Metrics()) // TODO(bdarnell): make StoreConfig configurable. nCtx := storage.StoreContext{ Ctx: s.Ctx(), Clock: s.clock, DB: s.db, Gossip: s.gossip, Transport: s.raftTransport, RaftTickInterval: s.ctx.RaftTickInterval, ScanInterval: s.ctx.ScanInterval, ScanMaxIdleTime: s.ctx.ScanMaxIdleTime, ConsistencyCheckInterval: s.ctx.ConsistencyCheckInterval, ConsistencyCheckPanicOnFailure: s.ctx.ConsistencyCheckPanicOnFailure, StorePool: s.storePool, SQLExecutor: sql.InternalExecutor{ LeaseManager: s.leaseMgr, }, LogRangeEvents: true, AllocatorOptions: storage.AllocatorOptions{ AllowRebalance: true, }, } if srvCtx.TestingKnobs.Store != nil { nCtx.TestingKnobs = *srvCtx.TestingKnobs.Store.(*storage.StoreTestingKnobs) } s.recorder = status.NewMetricsRecorder(s.clock) s.registry.AddMetricStruct(s.rpcContext.RemoteClocks.Metrics()) s.runtime = status.MakeRuntimeStatSampler(s.clock) s.registry.AddMetricStruct(s.runtime) s.node = NewNode(nCtx, s.recorder, s.registry, s.stopper, txnMetrics, sql.MakeEventLogger(s.leaseMgr)) roachpb.RegisterInternalServer(s.grpc, s.node) storage.RegisterStoresServer(s.grpc, s.node.storesServer) s.tsDB = ts.NewDB(s.db) s.tsServer = ts.MakeServer(s.tsDB) s.admin = makeAdminServer(s) s.status = newStatusServer(s.db, s.gossip, s.recorder, s.ctx.Context, s.rpcContext, s.node.stores) for _, gw := range []grpcGatewayServer{&s.admin, s.status, &s.tsServer} { gw.RegisterService(s.grpc) } return s, nil }
// NewServer creates a Server from a server.Context. func NewServer(ctx Context, stopper *stop.Stopper) (*Server, error) { if _, err := net.ResolveTCPAddr("tcp", ctx.Addr); err != nil { return nil, errors.Errorf("unable to resolve RPC address %q: %v", ctx.Addr, err) } if ctx.Insecure { log.Warning(context.TODO(), "running in insecure mode, this is strongly discouraged. See --insecure.") } // Try loading the TLS configs before anything else. if _, err := ctx.GetServerTLSConfig(); err != nil { return nil, err } if _, err := ctx.GetClientTLSConfig(); err != nil { return nil, err } s := &Server{ Tracer: tracing.NewTracer(), ctx: ctx, mux: http.NewServeMux(), clock: hlc.NewClock(hlc.UnixNano), stopper: stopper, } s.clock.SetMaxOffset(ctx.MaxOffset) s.rpcContext = rpc.NewContext(ctx.Context, s.clock, s.stopper) s.rpcContext.HeartbeatCB = func() { if err := s.rpcContext.RemoteClocks.VerifyClockOffset(); err != nil { log.Fatal(context.TODO(), err) } } s.grpc = rpc.NewServer(s.rpcContext) s.registry = metric.NewRegistry() s.gossip = gossip.New(s.rpcContext, s.grpc, s.ctx.GossipBootstrapResolvers, s.stopper, s.registry) s.storePool = storage.NewStorePool( s.gossip, s.clock, s.rpcContext, ctx.ReservationsEnabled, ctx.TimeUntilStoreDead, s.stopper, ) // A custom RetryOptions is created which uses stopper.ShouldQuiesce() as // the Closer. This prevents infinite retry loops from occurring during // graceful server shutdown // // Such a loop loop occurs with the DistSender attempts a connection to the // local server during shutdown, and receives an internal server error (HTTP // Code 5xx). This is the correct error for a server to return when it is // shutting down, and is normally retryable in a cluster environment. // However, on a single-node setup (such as a test), retries will never // succeed because the only server has been shut down; thus, thus the // DistSender needs to know that it should not retry in this situation. retryOpts := base.DefaultRetryOptions() retryOpts.Closer = s.stopper.ShouldQuiesce() s.distSender = kv.NewDistSender(&kv.DistSenderContext{ Clock: s.clock, RPCContext: s.rpcContext, RPCRetryOptions: &retryOpts, }, s.gossip) txnMetrics := kv.NewTxnMetrics(s.registry) sender := kv.NewTxnCoordSender(s.distSender, s.clock, ctx.Linearizable, s.Tracer, s.stopper, txnMetrics) s.db = client.NewDB(sender) s.raftTransport = storage.NewRaftTransport(storage.GossipAddressResolver(s.gossip), s.grpc, s.rpcContext) s.kvDB = kv.NewDBServer(s.ctx.Context, sender, s.stopper) roachpb.RegisterExternalServer(s.grpc, s.kvDB) // Set up Lease Manager var lmKnobs sql.LeaseManagerTestingKnobs if ctx.TestingKnobs.SQLLeaseManager != nil { lmKnobs = *ctx.TestingKnobs.SQLLeaseManager.(*sql.LeaseManagerTestingKnobs) } s.leaseMgr = sql.NewLeaseManager(0, *s.db, s.clock, lmKnobs, s.stopper) s.leaseMgr.RefreshLeases(s.stopper, s.db, s.gossip) // Set up the DistSQL server distSQLCtx := distsql.ServerContext{ Context: context.Background(), DB: s.db, RPCContext: s.rpcContext, } s.distSQLServer = distsql.NewServer(distSQLCtx) distsql.RegisterDistSQLServer(s.grpc, s.distSQLServer) // Set up Executor eCtx := sql.ExecutorContext{ Context: context.Background(), DB: s.db, Gossip: s.gossip, LeaseManager: s.leaseMgr, Clock: s.clock, DistSQLSrv: s.distSQLServer, } if ctx.TestingKnobs.SQLExecutor != nil { eCtx.TestingKnobs = ctx.TestingKnobs.SQLExecutor.(*sql.ExecutorTestingKnobs) } else { eCtx.TestingKnobs = &sql.ExecutorTestingKnobs{} } s.sqlExecutor = sql.NewExecutor(eCtx, s.stopper, s.registry) s.pgServer = pgwire.MakeServer(s.ctx.Context, s.sqlExecutor, s.registry) // TODO(bdarnell): make StoreConfig configurable. nCtx := storage.StoreContext{ Clock: s.clock, DB: s.db, Gossip: s.gossip, Transport: s.raftTransport, RaftTickInterval: s.ctx.RaftTickInterval, ScanInterval: s.ctx.ScanInterval, ScanMaxIdleTime: s.ctx.ScanMaxIdleTime, ConsistencyCheckInterval: s.ctx.ConsistencyCheckInterval, ConsistencyCheckPanicOnFailure: s.ctx.ConsistencyCheckPanicOnFailure, Tracer: s.Tracer, StorePool: s.storePool, SQLExecutor: sql.InternalExecutor{ LeaseManager: s.leaseMgr, }, LogRangeEvents: true, AllocatorOptions: storage.AllocatorOptions{ AllowRebalance: true, }, } if ctx.TestingKnobs.Store != nil { nCtx.TestingKnobs = *ctx.TestingKnobs.Store.(*storage.StoreTestingKnobs) } s.recorder = status.NewMetricsRecorder(s.clock) s.rpcContext.RemoteClocks.RegisterMetrics(s.registry) s.runtime = status.MakeRuntimeStatSampler(s.clock, s.registry) s.node = NewNode(nCtx, s.recorder, s.registry, s.stopper, txnMetrics, sql.MakeEventLogger(s.leaseMgr)) roachpb.RegisterInternalServer(s.grpc, s.node) roachpb.RegisterInternalStoresServer(s.grpc, s.node.InternalStoresServer) s.tsDB = ts.NewDB(s.db) s.tsServer = ts.MakeServer(s.tsDB) s.admin = makeAdminServer(s) s.status = newStatusServer(s.db, s.gossip, s.recorder, s.ctx.Context, s.rpcContext, s.node.stores) for _, gw := range []grpcGatewayServer{&s.admin, s.status, &s.tsServer} { gw.RegisterService(s.grpc) } return s, nil }