// recordJoinEvent begins an asynchronous task which attempts to log a "node // join" or "node restart" event. This query will retry until it succeeds or the // server stops. func (n *Node) recordJoinEvent() { if !n.ctx.LogRangeEvents { return } logEventType := sql.EventLogNodeRestart if n.initialBoot { logEventType = sql.EventLogNodeJoin } n.stopper.RunWorker(func() { retryOpts := base.DefaultRetryOptions() retryOpts.Closer = n.stopper.ShouldStop() for r := retry.Start(retryOpts); r.Next(); { if err := n.ctx.DB.Txn(func(txn *client.Txn) error { return n.eventLogger.InsertEventRecord(txn, logEventType, int32(n.Descriptor.NodeID), int32(n.Descriptor.NodeID), struct { Descriptor roachpb.NodeDescriptor ClusterID uuid.UUID StartedAt int64 }{n.Descriptor, n.ClusterID, n.startedAt}, ) }); err != nil { log.Warningc(n.context(context.TODO()), "unable to log %s event for node %d: %s", logEventType, n.Descriptor.NodeID, err) } else { return } } }) }
// TestMultiRangeScanWithMaxResults tests that commands which access multiple // ranges with MaxResults parameter are carried out properly. func TestMultiRangeScanWithMaxResults(t *testing.T) { defer leaktest.AfterTest(t)() testCases := []struct { splitKeys []roachpb.Key keys []roachpb.Key }{ {[]roachpb.Key{roachpb.Key("m")}, []roachpb.Key{roachpb.Key("a"), roachpb.Key("z")}}, {[]roachpb.Key{roachpb.Key("h"), roachpb.Key("q")}, []roachpb.Key{roachpb.Key("b"), roachpb.Key("f"), roachpb.Key("k"), roachpb.Key("r"), roachpb.Key("w"), roachpb.Key("y")}}, } for i, tc := range testCases { s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() ts := s.(*TestServer) retryOpts := base.DefaultRetryOptions() retryOpts.Closer = ts.stopper.ShouldDrain() ds := kv.NewDistSender(&kv.DistSenderContext{ Clock: s.Clock(), RPCContext: s.RPCContext(), RPCRetryOptions: &retryOpts, }, ts.Gossip()) tds := kv.NewTxnCoordSender(ds, ts.Clock(), ts.Ctx.Linearizable, tracing.NewTracer(), ts.stopper, kv.NewTxnMetrics(metric.NewRegistry())) for _, sk := range tc.splitKeys { if err := ts.node.ctx.DB.AdminSplit(sk); err != nil { t.Fatal(err) } } for _, k := range tc.keys { put := roachpb.NewPut(k, roachpb.MakeValueFromBytes(k)) if _, err := client.SendWrapped(tds, nil, put); err != nil { t.Fatal(err) } } // Try every possible ScanRequest startKey. for start := 0; start < len(tc.keys); start++ { // Try every possible maxResults, from 1 to beyond the size of key array. for maxResults := 1; maxResults <= len(tc.keys)-start+1; maxResults++ { scan := roachpb.NewScan(tc.keys[start], tc.keys[len(tc.keys)-1].Next(), int64(maxResults)) reply, err := client.SendWrapped(tds, nil, scan) if err != nil { t.Fatal(err) } rows := reply.(*roachpb.ScanResponse).Rows if start+maxResults <= len(tc.keys) && len(rows) != maxResults { t.Errorf("%d: start=%s: expected %d rows, but got %d", i, tc.keys[start], maxResults, len(rows)) } else if start+maxResults == len(tc.keys)+1 && len(rows) != maxResults-1 { t.Errorf("%d: expected %d rows, but got %d", i, maxResults-1, len(rows)) } } } } }
// NewDistSender returns a batch.Sender instance which connects to the // Cockroach cluster via the supplied gossip instance. Supplying a // DistSenderContext or the fields within is optional. For omitted values, sane // defaults will be used. func NewDistSender(ctx *DistSenderContext, gossip *gossip.Gossip) *DistSender { if ctx == nil { ctx = &DistSenderContext{} } clock := ctx.Clock if clock == nil { clock = hlc.NewClock(hlc.UnixNano) } ds := &DistSender{ clock: clock, gossip: gossip, } if ctx.nodeDescriptor != nil { atomic.StorePointer(&ds.nodeDescriptor, unsafe.Pointer(ctx.nodeDescriptor)) } rcSize := ctx.RangeDescriptorCacheSize if rcSize <= 0 { rcSize = defaultRangeDescriptorCacheSize } rdb := ctx.RangeDescriptorDB if rdb == nil { rdb = ds } ds.rangeCache = newRangeDescriptorCache(rdb, int(rcSize)) lcSize := ctx.LeaderCacheSize if lcSize <= 0 { lcSize = defaultLeaderCacheSize } ds.leaderCache = newLeaderCache(int(lcSize)) if ctx.RangeLookupMaxRanges <= 0 { ds.rangeLookupMaxRanges = defaultRangeLookupMaxRanges } if ctx.TransportFactory != nil { ds.transportFactory = ctx.TransportFactory } ds.rpcRetryOptions = base.DefaultRetryOptions() if ctx.RPCRetryOptions != nil { ds.rpcRetryOptions = *ctx.RPCRetryOptions } if ctx.RPCContext != nil { ds.rpcContext = ctx.RPCContext if ds.rpcRetryOptions.Closer == nil { ds.rpcRetryOptions.Closer = ds.rpcContext.Stopper.ShouldDrain() } } if ctx.Tracer != nil { ds.Tracer = ctx.Tracer } else { ds.Tracer = tracing.NewTracer() } if ctx.SendNextTimeout != 0 { ds.sendNextTimeout = ctx.SendNextTimeout } else { ds.sendNextTimeout = defaultSendNextTimeout } return ds }
// createTestNode creates an rpc server using the specified address, // gossip instance, KV database and a node using the specified slice // of engines. The server, clock and node are returned. If gossipBS is // not nil, the gossip bootstrap address is set to gossipBS. func createTestNode(addr net.Addr, engines []engine.Engine, gossipBS net.Addr, t *testing.T) ( *grpc.Server, net.Addr, *hlc.Clock, *Node, *stop.Stopper) { ctx := storage.StoreContext{} stopper := stop.NewStopper() ctx.Clock = hlc.NewClock(hlc.UnixNano) nodeRPCContext := rpc.NewContext(nodeTestBaseContext, ctx.Clock, stopper) ctx.ScanInterval = 10 * time.Hour ctx.ConsistencyCheckInterval = 10 * time.Hour grpcServer := rpc.NewServer(nodeRPCContext) serverCtx := makeTestContext() g := gossip.New( context.Background(), nodeRPCContext, grpcServer, serverCtx.GossipBootstrapResolvers, stopper, metric.NewRegistry()) ln, err := netutil.ListenAndServeGRPC(stopper, grpcServer, addr) if err != nil { t.Fatal(err) } if gossipBS != nil { // Handle possibility of a :0 port specification. if gossipBS.Network() == addr.Network() && gossipBS.String() == addr.String() { gossipBS = ln.Addr() } r, err := resolver.NewResolverFromAddress(gossipBS) if err != nil { t.Fatalf("bad gossip address %s: %s", gossipBS, err) } g.SetResolvers([]resolver.Resolver{r}) g.Start(ln.Addr()) } ctx.Gossip = g retryOpts := base.DefaultRetryOptions() retryOpts.Closer = stopper.ShouldQuiesce() distSender := kv.NewDistSender(&kv.DistSenderConfig{ Clock: ctx.Clock, RPCContext: nodeRPCContext, RPCRetryOptions: &retryOpts, }, g) ctx.Ctx = tracing.WithTracer(context.Background(), tracing.NewTracer()) sender := kv.NewTxnCoordSender(ctx.Ctx, distSender, ctx.Clock, false, stopper, kv.MakeTxnMetrics()) ctx.DB = client.NewDB(sender) ctx.Transport = storage.NewDummyRaftTransport() node := NewNode(ctx, status.NewMetricsRecorder(ctx.Clock), metric.NewRegistry(), stopper, kv.MakeTxnMetrics(), sql.MakeEventLogger(nil)) roachpb.RegisterInternalServer(grpcServer, node) return grpcServer, ln.Addr(), ctx.Clock, node, stopper }
// execSchemaChanges releases schema leases and runs the queued // schema changers. This needs to be run after the transaction // scheduling the schema change has finished. // // The list of closures is cleared after (attempting) execution. // // Args: // results: The results from all statements in the group that scheduled the // schema changes we're about to execute. Results corresponding to the // schema change statements will be changed in case an error occurs. func (scc *schemaChangerCollection) execSchemaChanges( e *Executor, planMaker *planner, results ResultList, ) { if planMaker.txn != nil { panic("trying to execute schema changes while still in a transaction") } // Release the leases once a transaction is complete. planMaker.releaseLeases() if e.ctx.TestingKnobs.SyncSchemaChangersFilter != nil { e.ctx.TestingKnobs.SyncSchemaChangersFilter(TestingSchemaChangerCollection{scc}) } // Execute any schema changes that were scheduled, in the order of the // statements that scheduled them. for _, scEntry := range scc.schemaChangers { sc := &scEntry.sc sc.db = *e.ctx.DB for r := retry.Start(base.DefaultRetryOptions()); r.Next(); { if done, err := sc.IsDone(); err != nil { log.Warning(e.ctx.Context, err) break } else if done { break } if err := sc.exec( e.ctx.TestingKnobs.SchemaChangersStartBackfillNotification, e.ctx.TestingKnobs.SyncSchemaChangersRenameOldNameNotInUseNotification, ); err != nil { if isSchemaChangeRetryError(err) { // Try again continue } // All other errors can be reported; we report it as the result // corresponding to the statement that enqueued this changer. // There's some sketchiness here: we assume there's a single result // per statement and we clobber the result/error of the corresponding // statement. // There's also another subtlety: we can only report results for // statements in the current batch; we can't modify the results of older // statements. if scEntry.epoch == scc.curGroupNum { results[scEntry.idx] = Result{Err: err} } log.Warningf(e.ctx.Context, "Error executing schema change: %s", err) } break } } scc.schemaChangers = scc.schemaChangers[:0] }
func (ia *idAllocator) start() { ia.stopper.RunWorker(func() { defer close(ia.ids) for { var newValue int64 for newValue <= int64(ia.minID) { var ( err error res client.KeyValue ) for r := retry.Start(base.DefaultRetryOptions()); r.Next(); { idKey := ia.idKey.Load().(roachpb.Key) if !ia.stopper.RunTask(func() { res, err = ia.db.Inc(idKey, int64(ia.blockSize)) }) { return } if err == nil { newValue = res.ValueInt() break } log.Warningf("unable to allocate %d ids from %s: %s", ia.blockSize, idKey, err) } if err != nil { panic(fmt.Sprintf("unexpectedly exited id allocation retry loop: %s", err)) } } end := newValue + 1 start := end - int64(ia.blockSize) if start < int64(ia.minID) { start = int64(ia.minID) } // Add all new ids to the channel for consumption. for i := start; i < end; i++ { select { case ia.ids <- uint32(i): case <-ia.stopper.ShouldStop(): return } } } }) }
// InitSenderForLocalTestCluster initializes a TxnCoordSender that can be used // with LocalTestCluster. func InitSenderForLocalTestCluster( nodeDesc *roachpb.NodeDescriptor, tracer opentracing.Tracer, clock *hlc.Clock, latency time.Duration, stores client.Sender, stopper *stop.Stopper, gossip *gossip.Gossip, ) client.Sender { retryOpts := base.DefaultRetryOptions() retryOpts.Closer = stopper.ShouldQuiesce() senderTransportFactory := SenderTransportFactory(tracer, stores) distSender := NewDistSender(&DistSenderConfig{ Clock: clock, RangeDescriptorCacheSize: defaultRangeDescriptorCacheSize, RangeLookupMaxRanges: defaultRangeLookupMaxRanges, LeaseHolderCacheSize: defaultLeaseHolderCacheSize, RPCRetryOptions: &retryOpts, nodeDescriptor: nodeDesc, TransportFactory: func( opts SendOptions, rpcContext *rpc.Context, replicas ReplicaSlice, args roachpb.BatchRequest, ) (Transport, error) { transport, err := senderTransportFactory(opts, rpcContext, replicas, args) if err != nil { return nil, err } return &localTestClusterTransport{transport, latency}, nil }, RangeDescriptorDB: stores.(RangeDescriptorDB), // for descriptor lookup }, gossip) ctx := tracing.WithTracer(context.Background(), tracer) return NewTxnCoordSender(ctx, distSender, clock, false, /* !linearizable */ stopper, MakeTxnMetrics()) }
// Publish updates a table descriptor. It also maintains the invariant that // there are at most two versions of the descriptor out in the wild at any time // by first waiting for all nodes to be on the current (pre-update) version of // the table desc. // The update closure is called after the wait, and it provides the new version // of the descriptor to be written. In a multi-step schema operation, this // update should perform a single step. // The closure may be called multiple times if retries occur; make sure it does // not have side effects. // Returns the updated version of the descriptor. func (s LeaseStore) Publish( tableID sqlbase.ID, update func(*sqlbase.TableDescriptor) error, logEvent func(*client.Txn) error, ) (*sqlbase.Descriptor, error) { errLeaseVersionChanged := errors.New("lease version changed") // Retry while getting errLeaseVersionChanged. for r := retry.Start(base.DefaultRetryOptions()); r.Next(); { // Wait until there are no unexpired leases on the previous version // of the table. expectedVersion, err := s.waitForOneVersion(tableID, base.DefaultRetryOptions()) if err != nil { return nil, err } desc := &sqlbase.Descriptor{} // There should be only one version of the descriptor, but it's // a race now to update to the next version. err = s.db.Txn(context.TODO(), func(txn *client.Txn) error { descKey := sqlbase.MakeDescMetadataKey(tableID) // Re-read the current version of the table descriptor, this time // transactionally. if err := txn.GetProto(descKey, desc); err != nil { return err } tableDesc := desc.GetTable() if tableDesc == nil { return errors.Errorf("ID %d is not a table", tableID) } if expectedVersion != tableDesc.Version { // The version changed out from under us. Someone else must be // performing a schema change operation. if log.V(3) { log.Infof(context.TODO(), "publish (version changed): %d != %d", expectedVersion, tableDesc.Version) } return errLeaseVersionChanged } // Run the update closure. if err := update(tableDesc); err != nil { return err } // Bump the version and modification time. tableDesc.Version++ now := s.clock.Now() tableDesc.ModificationTime = now if log.V(3) { log.Infof(context.TODO(), "publish: descID=%d (%s) version=%d mtime=%s", tableDesc.ID, tableDesc.Name, tableDesc.Version, now.GoTime()) } if err := tableDesc.ValidateTable(); err != nil { return err } // Write the updated descriptor. txn.SetSystemConfigTrigger() b := txn.NewBatch() b.Put(descKey, desc) if logEvent != nil { // If an event log is required for this update, ensure that the // descriptor change occurs first in the transaction. This is // necessary to ensure that the System configuration change is // gossiped. See the documentation for // transaction.SetSystemConfigTrigger() for more information. if err := txn.Run(b); err != nil { return err } if err := logEvent(txn); err != nil { return err } return txn.Commit() } // More efficient batching can be used if no event log message // is required. return txn.CommitInBatch(b) }) switch err { case nil, errDidntUpdateDescriptor: return desc, nil case errLeaseVersionChanged: // will loop around to retry default: return nil, err } } panic("not reached") }
// NewDistSender returns a batch.Sender instance which connects to the // Cockroach cluster via the supplied gossip instance. Supplying a // DistSenderContext or the fields within is optional. For omitted values, sane // defaults will be used. func NewDistSender(cfg *DistSenderConfig, g *gossip.Gossip) *DistSender { if cfg == nil { cfg = &DistSenderConfig{} } ds := &DistSender{gossip: g} ds.Ctx = cfg.Ctx if ds.Ctx == nil { ds.Ctx = context.Background() } if ds.Ctx.Done() != nil { panic("context with cancel or deadline") } if tracing.TracerFromCtx(ds.Ctx) == nil { ds.Ctx = tracing.WithTracer(ds.Ctx, tracing.NewTracer()) } ds.clock = cfg.Clock if ds.clock == nil { ds.clock = hlc.NewClock(hlc.UnixNano) } if cfg.nodeDescriptor != nil { atomic.StorePointer(&ds.nodeDescriptor, unsafe.Pointer(cfg.nodeDescriptor)) } rcSize := cfg.RangeDescriptorCacheSize if rcSize <= 0 { rcSize = defaultRangeDescriptorCacheSize } rdb := cfg.RangeDescriptorDB if rdb == nil { rdb = ds } ds.rangeCache = newRangeDescriptorCache(rdb, int(rcSize)) lcSize := cfg.LeaseHolderCacheSize if lcSize <= 0 { lcSize = defaultLeaseHolderCacheSize } ds.leaseHolderCache = newLeaseHolderCache(int(lcSize)) if cfg.RangeLookupMaxRanges <= 0 { ds.rangeLookupMaxRanges = defaultRangeLookupMaxRanges } if cfg.TransportFactory != nil { ds.transportFactory = cfg.TransportFactory } ds.rpcRetryOptions = base.DefaultRetryOptions() if cfg.RPCRetryOptions != nil { ds.rpcRetryOptions = *cfg.RPCRetryOptions } if cfg.RPCContext != nil { ds.rpcContext = cfg.RPCContext if ds.rpcRetryOptions.Closer == nil { ds.rpcRetryOptions.Closer = ds.rpcContext.Stopper.ShouldQuiesce() } } if cfg.SendNextTimeout != 0 { ds.sendNextTimeout = cfg.SendNextTimeout } else { ds.sendNextTimeout = defaultSendNextTimeout } if g != nil { g.RegisterCallback(gossip.KeyFirstRangeDescriptor, func(_ string, value roachpb.Value) { if log.V(1) { var desc roachpb.RangeDescriptor if err := value.GetProto(&desc); err != nil { log.Errorf(ds.Ctx, "unable to parse gossipped first range descriptor: %s", err) } else { log.Infof(ds.Ctx, "gossipped first range descriptor: %+v", desc.Replicas) } } err := ds.rangeCache.EvictCachedRangeDescriptor(roachpb.RKeyMin, nil, false) if err != nil { log.Warningf(ds.Ctx, "failed to evict first range descriptor: %s", err) } }) } return ds }
// DefaultDBContext returns (a copy of) the default options for // NewDBWithContext. func DefaultDBContext() DBContext { return DBContext{ UserPriority: roachpb.NormalUserPriority, TxnRetryOptions: base.DefaultRetryOptions(), } }
// TestMultiRangeScanDeleteRange tests that commands which access multiple // ranges are carried out properly. func TestMultiRangeScanDeleteRange(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() ts := s.(*TestServer) retryOpts := base.DefaultRetryOptions() retryOpts.Closer = ts.stopper.ShouldQuiesce() ds := kv.NewDistSender(&kv.DistSenderConfig{ Clock: s.Clock(), RPCContext: s.RPCContext(), RPCRetryOptions: &retryOpts, }, ts.Gossip()) ctx := tracing.WithTracer(context.Background(), tracing.NewTracer()) tds := kv.NewTxnCoordSender(ctx, ds, s.Clock(), ts.Ctx.Linearizable, ts.stopper, kv.MakeTxnMetrics()) if err := ts.node.ctx.DB.AdminSplit("m"); err != nil { t.Fatal(err) } writes := []roachpb.Key{roachpb.Key("a"), roachpb.Key("z")} get := &roachpb.GetRequest{ Span: roachpb.Span{Key: writes[0]}, } get.EndKey = writes[len(writes)-1] if _, err := client.SendWrapped(tds, nil, get); err == nil { t.Errorf("able to call Get with a key range: %v", get) } var delTS hlc.Timestamp for i, k := range writes { put := roachpb.NewPut(k, roachpb.MakeValueFromBytes(k)) reply, err := client.SendWrapped(tds, nil, put) if err != nil { t.Fatal(err) } scan := roachpb.NewScan(writes[0], writes[len(writes)-1].Next()) reply, err = client.SendWrapped(tds, nil, scan) if err != nil { t.Fatal(err) } sr := reply.(*roachpb.ScanResponse) if sr.Txn != nil { // This was the other way around at some point in the past. // Same below for Delete, etc. t.Errorf("expected no transaction in response header") } if rows := sr.Rows; len(rows) != i+1 { t.Fatalf("expected %d rows, but got %d", i+1, len(rows)) } } del := &roachpb.DeleteRangeRequest{ Span: roachpb.Span{ Key: writes[0], EndKey: roachpb.Key(writes[len(writes)-1]).Next(), }, ReturnKeys: true, } reply, err := client.SendWrappedWith(tds, nil, roachpb.Header{Timestamp: delTS}, del) if err != nil { t.Fatal(err) } dr := reply.(*roachpb.DeleteRangeResponse) if dr.Txn != nil { t.Errorf("expected no transaction in response header") } if !reflect.DeepEqual(dr.Keys, writes) { t.Errorf("expected %d keys to be deleted, but got %d instead", writes, dr.Keys) } scan := roachpb.NewScan(writes[0], writes[len(writes)-1].Next()) txn := &roachpb.Transaction{Name: "MyTxn"} reply, err = client.SendWrappedWith(tds, nil, roachpb.Header{Txn: txn}, scan) if err != nil { t.Fatal(err) } sr := reply.(*roachpb.ScanResponse) if txn := sr.Txn; txn == nil || txn.Name != "MyTxn" { t.Errorf("wanted Txn to persist, but it changed to %v", txn) } if rows := sr.Rows; len(rows) > 0 { t.Fatalf("scan after delete returned rows: %v", rows) } }
// NewServer creates a Server from a server.Context. func NewServer(srvCtx Context, stopper *stop.Stopper) (*Server, error) { if _, err := net.ResolveTCPAddr("tcp", srvCtx.Addr); err != nil { return nil, errors.Errorf("unable to resolve RPC address %q: %v", srvCtx.Addr, err) } if srvCtx.Ctx == nil { srvCtx.Ctx = context.Background() } if srvCtx.Ctx.Done() != nil { panic("context with cancel or deadline") } if tracing.TracerFromCtx(srvCtx.Ctx) == nil { // TODO(radu): instead of modifying srvCtx.Ctx, we should have a separate // context.Context inside Server. We will need to rename server.Context // though. srvCtx.Ctx = tracing.WithTracer(srvCtx.Ctx, tracing.NewTracer()) } if srvCtx.Insecure { log.Warning(srvCtx.Ctx, "running in insecure mode, this is strongly discouraged. See --insecure.") } // Try loading the TLS configs before anything else. if _, err := srvCtx.GetServerTLSConfig(); err != nil { return nil, err } if _, err := srvCtx.GetClientTLSConfig(); err != nil { return nil, err } s := &Server{ mux: http.NewServeMux(), clock: hlc.NewClock(hlc.UnixNano), stopper: stopper, } // Add a dynamic log tag value for the node ID. // // We need to pass the server's Ctx as a base context for the various server // components, but we won't know the node ID until we Start(). At that point // it's too late to change the contexts in the components (various background // processes will have already started using the contexts). // // The dynamic value allows us to add the log tag to the context now and // update the value asynchronously. It's not significantly more expensive than // a regular tag since it's just doing an (atomic) load when a log/trace // message is constructed. s.nodeLogTagVal.Set(log.DynamicIntValueUnknown) srvCtx.Ctx = log.WithLogTag(srvCtx.Ctx, "n", &s.nodeLogTagVal) s.ctx = srvCtx s.clock.SetMaxOffset(srvCtx.MaxOffset) s.rpcContext = rpc.NewContext(srvCtx.Context, s.clock, s.stopper) s.rpcContext.HeartbeatCB = func() { if err := s.rpcContext.RemoteClocks.VerifyClockOffset(); err != nil { log.Fatal(s.Ctx(), err) } } s.grpc = rpc.NewServer(s.rpcContext) s.registry = metric.NewRegistry() s.gossip = gossip.New( s.Ctx(), s.rpcContext, s.grpc, s.ctx.GossipBootstrapResolvers, s.stopper, s.registry) s.storePool = storage.NewStorePool( s.gossip, s.clock, s.rpcContext, srvCtx.ReservationsEnabled, srvCtx.TimeUntilStoreDead, s.stopper, ) // A custom RetryOptions is created which uses stopper.ShouldQuiesce() as // the Closer. This prevents infinite retry loops from occurring during // graceful server shutdown // // Such a loop loop occurs with the DistSender attempts a connection to the // local server during shutdown, and receives an internal server error (HTTP // Code 5xx). This is the correct error for a server to return when it is // shutting down, and is normally retryable in a cluster environment. // However, on a single-node setup (such as a test), retries will never // succeed because the only server has been shut down; thus, thus the // DistSender needs to know that it should not retry in this situation. retryOpts := base.DefaultRetryOptions() retryOpts.Closer = s.stopper.ShouldQuiesce() distSenderCfg := kv.DistSenderConfig{ Ctx: s.Ctx(), Clock: s.clock, RPCContext: s.rpcContext, RPCRetryOptions: &retryOpts, } s.distSender = kv.NewDistSender(&distSenderCfg, s.gossip) txnMetrics := kv.MakeTxnMetrics() s.registry.AddMetricStruct(txnMetrics) s.txnCoordSender = kv.NewTxnCoordSender(s.Ctx(), s.distSender, s.clock, srvCtx.Linearizable, s.stopper, txnMetrics) s.db = client.NewDB(s.txnCoordSender) s.raftTransport = storage.NewRaftTransport(storage.GossipAddressResolver(s.gossip), s.grpc, s.rpcContext) s.kvDB = kv.NewDBServer(s.ctx.Context, s.txnCoordSender, s.stopper) roachpb.RegisterExternalServer(s.grpc, s.kvDB) // Set up Lease Manager var lmKnobs sql.LeaseManagerTestingKnobs if srvCtx.TestingKnobs.SQLLeaseManager != nil { lmKnobs = *srvCtx.TestingKnobs.SQLLeaseManager.(*sql.LeaseManagerTestingKnobs) } s.leaseMgr = sql.NewLeaseManager(0, *s.db, s.clock, lmKnobs, s.stopper) s.leaseMgr.RefreshLeases(s.stopper, s.db, s.gossip) // Set up the DistSQL server distSQLCfg := distsql.ServerConfig{ Context: s.Ctx(), DB: s.db, RPCContext: s.rpcContext, } s.distSQLServer = distsql.NewServer(distSQLCfg) distsql.RegisterDistSQLServer(s.grpc, s.distSQLServer) // Set up Executor execCfg := sql.ExecutorConfig{ Context: s.Ctx(), DB: s.db, Gossip: s.gossip, LeaseManager: s.leaseMgr, Clock: s.clock, DistSQLSrv: s.distSQLServer, } if srvCtx.TestingKnobs.SQLExecutor != nil { execCfg.TestingKnobs = srvCtx.TestingKnobs.SQLExecutor.(*sql.ExecutorTestingKnobs) } else { execCfg.TestingKnobs = &sql.ExecutorTestingKnobs{} } s.sqlExecutor = sql.NewExecutor(execCfg, s.stopper) s.registry.AddMetricStruct(s.sqlExecutor) s.pgServer = pgwire.MakeServer(s.ctx.Context, s.sqlExecutor) s.registry.AddMetricStruct(s.pgServer.Metrics()) // TODO(bdarnell): make StoreConfig configurable. nCtx := storage.StoreContext{ Ctx: s.Ctx(), Clock: s.clock, DB: s.db, Gossip: s.gossip, Transport: s.raftTransport, RaftTickInterval: s.ctx.RaftTickInterval, ScanInterval: s.ctx.ScanInterval, ScanMaxIdleTime: s.ctx.ScanMaxIdleTime, ConsistencyCheckInterval: s.ctx.ConsistencyCheckInterval, ConsistencyCheckPanicOnFailure: s.ctx.ConsistencyCheckPanicOnFailure, StorePool: s.storePool, SQLExecutor: sql.InternalExecutor{ LeaseManager: s.leaseMgr, }, LogRangeEvents: true, AllocatorOptions: storage.AllocatorOptions{ AllowRebalance: true, }, } if srvCtx.TestingKnobs.Store != nil { nCtx.TestingKnobs = *srvCtx.TestingKnobs.Store.(*storage.StoreTestingKnobs) } s.recorder = status.NewMetricsRecorder(s.clock) s.registry.AddMetricStruct(s.rpcContext.RemoteClocks.Metrics()) s.runtime = status.MakeRuntimeStatSampler(s.clock) s.registry.AddMetricStruct(s.runtime) s.node = NewNode(nCtx, s.recorder, s.registry, s.stopper, txnMetrics, sql.MakeEventLogger(s.leaseMgr)) roachpb.RegisterInternalServer(s.grpc, s.node) storage.RegisterStoresServer(s.grpc, s.node.storesServer) s.tsDB = ts.NewDB(s.db) s.tsServer = ts.MakeServer(s.tsDB) s.admin = makeAdminServer(s) s.status = newStatusServer(s.db, s.gossip, s.recorder, s.ctx.Context, s.rpcContext, s.node.stores) for _, gw := range []grpcGatewayServer{&s.admin, s.status, &s.tsServer} { gw.RegisterService(s.grpc) } return s, nil }
// waitForStoreFrozen polls the given stores until they all report having no // unfrozen Replicas (or an error or timeout occurs). func (s *adminServer) waitForStoreFrozen( stream serverpb.Admin_ClusterFreezeServer, stores map[roachpb.StoreID]roachpb.NodeID, wantFrozen bool, ) error { mu := struct { sync.Mutex oks map[roachpb.StoreID]bool }{ oks: make(map[roachpb.StoreID]bool), } opts := base.DefaultRetryOptions() opts.Closer = s.server.stopper.ShouldDrain() opts.MaxRetries = 20 sem := make(chan struct{}, 256) errChan := make(chan error, 1) sendErr := func(err error) { select { case errChan <- err: default: } } numWaiting := len(stores) // loop until this drops to zero var err error for r := retry.Start(opts); r.Next(); { mu.Lock() for storeID, nodeID := range stores { storeID, nodeID := storeID, nodeID // loop-local copies for goroutine var nodeDesc roachpb.NodeDescriptor if err := s.server.gossip.GetInfoProto(gossip.MakeNodeIDKey(nodeID), &nodeDesc); err != nil { sendErr(err) break } addr := nodeDesc.Address.String() if _, inflightOrSucceeded := mu.oks[storeID]; inflightOrSucceeded { continue } mu.oks[storeID] = false // mark as inflight action := func() (err error) { var resp *roachpb.PollFrozenResponse defer func() { message := fmt.Sprintf("node %d, store %d: ", nodeID, storeID) if err != nil { message += err.Error() } else { numMismatching := len(resp.Results) mu.Lock() if numMismatching == 0 { // If the Store is in the right state, mark it as such. // This means we won't try it again. message += "ready" mu.oks[storeID] = true } else { // Otherwise, forget that we tried the Store so that // the retry loop picks it up again. message += fmt.Sprintf("%d replicas report wrong status", numMismatching) if limit := 10; numMismatching > limit { message += " [truncated]: " resp.Results = resp.Results[:limit] } else { message += ": " } message += fmt.Sprintf("%+v", resp.Results) delete(mu.oks, storeID) } mu.Unlock() err = stream.Send(&serverpb.ClusterFreezeResponse{ Message: message, }) } }() conn, err := s.server.rpcContext.GRPCDial(addr) if err != nil { return err } client := roachpb.NewInternalClient(conn) resp, err = client.PollFrozen(context.Background(), &roachpb.PollFrozenRequest{ StoreRequestHeader: roachpb.StoreRequestHeader{ NodeID: nodeID, StoreID: storeID, }, // If we are looking to freeze everything, we want to // collect thawed Replicas, and vice versa. CollectFrozen: !wantFrozen, }) return err } // Run a limited, non-blocking task. That means the task simply // won't run if the semaphore is full (or the node is draining). // Both are handled by the surrounding retry loop. if !s.server.stopper.RunLimitedAsyncTask(sem, func() { if err := action(); err != nil { sendErr(err) } }) { // Node draining. sendErr(errors.New("node is shutting down")) break } } numWaiting = len(stores) for _, ok := range mu.oks { if ok { // Store has reported that it is frozen. numWaiting-- continue } } mu.Unlock() select { case err = <-errChan: default: } // Keep going unless there's been an error or everyone's frozen. if err != nil || numWaiting == 0 { break } if err := stream.Send(&serverpb.ClusterFreezeResponse{ Message: fmt.Sprintf("waiting for %d store%s to apply operation", numWaiting, util.Pluralize(int64(numWaiting))), }); err != nil { return err } } if err != nil { return err } if numWaiting > 0 { err = fmt.Errorf("timed out waiting for %d store%s to report freeze", numWaiting, util.Pluralize(int64(numWaiting))) } return err }
// NewServer creates a Server from a server.Context. func NewServer(ctx Context, stopper *stop.Stopper) (*Server, error) { if _, err := net.ResolveTCPAddr("tcp", ctx.Addr); err != nil { return nil, errors.Errorf("unable to resolve RPC address %q: %v", ctx.Addr, err) } if ctx.Insecure { log.Warning(context.TODO(), "running in insecure mode, this is strongly discouraged. See --insecure.") } // Try loading the TLS configs before anything else. if _, err := ctx.GetServerTLSConfig(); err != nil { return nil, err } if _, err := ctx.GetClientTLSConfig(); err != nil { return nil, err } s := &Server{ Tracer: tracing.NewTracer(), ctx: ctx, mux: http.NewServeMux(), clock: hlc.NewClock(hlc.UnixNano), stopper: stopper, } s.clock.SetMaxOffset(ctx.MaxOffset) s.rpcContext = rpc.NewContext(ctx.Context, s.clock, s.stopper) s.rpcContext.HeartbeatCB = func() { if err := s.rpcContext.RemoteClocks.VerifyClockOffset(); err != nil { log.Fatal(context.TODO(), err) } } s.grpc = rpc.NewServer(s.rpcContext) s.registry = metric.NewRegistry() s.gossip = gossip.New(s.rpcContext, s.grpc, s.ctx.GossipBootstrapResolvers, s.stopper, s.registry) s.storePool = storage.NewStorePool( s.gossip, s.clock, s.rpcContext, ctx.ReservationsEnabled, ctx.TimeUntilStoreDead, s.stopper, ) // A custom RetryOptions is created which uses stopper.ShouldQuiesce() as // the Closer. This prevents infinite retry loops from occurring during // graceful server shutdown // // Such a loop loop occurs with the DistSender attempts a connection to the // local server during shutdown, and receives an internal server error (HTTP // Code 5xx). This is the correct error for a server to return when it is // shutting down, and is normally retryable in a cluster environment. // However, on a single-node setup (such as a test), retries will never // succeed because the only server has been shut down; thus, thus the // DistSender needs to know that it should not retry in this situation. retryOpts := base.DefaultRetryOptions() retryOpts.Closer = s.stopper.ShouldQuiesce() s.distSender = kv.NewDistSender(&kv.DistSenderContext{ Clock: s.clock, RPCContext: s.rpcContext, RPCRetryOptions: &retryOpts, }, s.gossip) txnMetrics := kv.NewTxnMetrics(s.registry) sender := kv.NewTxnCoordSender(s.distSender, s.clock, ctx.Linearizable, s.Tracer, s.stopper, txnMetrics) s.db = client.NewDB(sender) s.raftTransport = storage.NewRaftTransport(storage.GossipAddressResolver(s.gossip), s.grpc, s.rpcContext) s.kvDB = kv.NewDBServer(s.ctx.Context, sender, s.stopper) roachpb.RegisterExternalServer(s.grpc, s.kvDB) // Set up Lease Manager var lmKnobs sql.LeaseManagerTestingKnobs if ctx.TestingKnobs.SQLLeaseManager != nil { lmKnobs = *ctx.TestingKnobs.SQLLeaseManager.(*sql.LeaseManagerTestingKnobs) } s.leaseMgr = sql.NewLeaseManager(0, *s.db, s.clock, lmKnobs, s.stopper) s.leaseMgr.RefreshLeases(s.stopper, s.db, s.gossip) // Set up the DistSQL server distSQLCtx := distsql.ServerContext{ Context: context.Background(), DB: s.db, RPCContext: s.rpcContext, } s.distSQLServer = distsql.NewServer(distSQLCtx) distsql.RegisterDistSQLServer(s.grpc, s.distSQLServer) // Set up Executor eCtx := sql.ExecutorContext{ Context: context.Background(), DB: s.db, Gossip: s.gossip, LeaseManager: s.leaseMgr, Clock: s.clock, DistSQLSrv: s.distSQLServer, } if ctx.TestingKnobs.SQLExecutor != nil { eCtx.TestingKnobs = ctx.TestingKnobs.SQLExecutor.(*sql.ExecutorTestingKnobs) } else { eCtx.TestingKnobs = &sql.ExecutorTestingKnobs{} } s.sqlExecutor = sql.NewExecutor(eCtx, s.stopper, s.registry) s.pgServer = pgwire.MakeServer(s.ctx.Context, s.sqlExecutor, s.registry) // TODO(bdarnell): make StoreConfig configurable. nCtx := storage.StoreContext{ Clock: s.clock, DB: s.db, Gossip: s.gossip, Transport: s.raftTransport, RaftTickInterval: s.ctx.RaftTickInterval, ScanInterval: s.ctx.ScanInterval, ScanMaxIdleTime: s.ctx.ScanMaxIdleTime, ConsistencyCheckInterval: s.ctx.ConsistencyCheckInterval, ConsistencyCheckPanicOnFailure: s.ctx.ConsistencyCheckPanicOnFailure, Tracer: s.Tracer, StorePool: s.storePool, SQLExecutor: sql.InternalExecutor{ LeaseManager: s.leaseMgr, }, LogRangeEvents: true, AllocatorOptions: storage.AllocatorOptions{ AllowRebalance: true, }, } if ctx.TestingKnobs.Store != nil { nCtx.TestingKnobs = *ctx.TestingKnobs.Store.(*storage.StoreTestingKnobs) } s.recorder = status.NewMetricsRecorder(s.clock) s.rpcContext.RemoteClocks.RegisterMetrics(s.registry) s.runtime = status.MakeRuntimeStatSampler(s.clock, s.registry) s.node = NewNode(nCtx, s.recorder, s.registry, s.stopper, txnMetrics, sql.MakeEventLogger(s.leaseMgr)) roachpb.RegisterInternalServer(s.grpc, s.node) roachpb.RegisterInternalStoresServer(s.grpc, s.node.InternalStoresServer) s.tsDB = ts.NewDB(s.db) s.tsServer = ts.MakeServer(s.tsDB) s.admin = makeAdminServer(s) s.status = newStatusServer(s.db, s.gossip, s.recorder, s.ctx.Context, s.rpcContext, s.node.stores) for _, gw := range []grpcGatewayServer{&s.admin, s.status, &s.tsServer} { gw.RegisterService(s.grpc) } return s, nil }