// Start starts the TestServer by bootstrapping an in-memory store // (defaults to maximum of 100M). The server is started, launching the // node RPC server and all HTTP endpoints. Use the value of // TestServer.ServingAddr() after Start() for client connections. Use Stop() // to shutdown the server after the test completes. func (ts *TestServer) Start() error { if ts.Ctx == nil { ts.Ctx = NewTestContext() } var err error ts.Server, err = NewServer(ts.Ctx, stop.NewStopper()) if err != nil { return err } // Ensure we have the correct number of engines. Add in in-memory ones where // needed. There must be at least one store/engine. if ts.StoresPerNode < 1 { ts.StoresPerNode = 1 } for i := len(ts.Ctx.Engines); i < ts.StoresPerNode; i++ { ts.Ctx.Engines = append(ts.Ctx.Engines, engine.NewInMem(proto.Attributes{}, 100<<20)) } if !ts.SkipBootstrap { stopper := stop.NewStopper() _, err := BootstrapCluster("cluster-1", ts.Ctx.Engines, stopper) if err != nil { return util.Errorf("could not bootstrap cluster: %s", err) } stopper.Stop() } if err := ts.Server.Start(true); err != nil { return err } if err := configutil.SetDefaultRangeReplicaNum(ts.db, 1); err != nil { return err } return nil }
// runStart starts the cockroach node using --stores as the list of // storage devices ("stores") on this machine and --gossip as the list // of "well-known" hosts used to join this node to the cockroach // cluster via the gossip network. func runStart(cmd *cobra.Command, args []string) { info := util.GetBuildInfo() log.Infof("build Vers: %s", info.Vers) log.Infof("build Tag: %s", info.Tag) log.Infof("build Time: %s", info.Time) log.Infof("build Deps: %s", info.Deps) // Default user for servers. context.User = security.NodeUser if context.EphemeralSingleNode { context.Stores = "mem=1073741824" context.GossipBootstrap = server.SelfGossipAddr runInit(cmd, args) } else { if err := context.InitStores(); err != nil { log.Errorf("failed to initialize stores: %s", err) return } } if err := context.InitNode(); err != nil { log.Errorf("failed to initialize node: %s", err) return } log.Info("starting cockroach cluster") stopper := stop.NewStopper() s, err := server.NewServer(context, stopper) if err != nil { log.Errorf("failed to start Cockroach server: %s", err) return } if err := s.Start(false); err != nil { log.Errorf("cockroach server exited with error: %s", err) return } if context.EphemeralSingleNode { // TODO(tamird): pass this to BootstrapRange rather than doing it // at runtime. This was quicker, though. db, clientStopper := makeDBClient() if err := configutil.SetDefaultRangeReplicaNum(db, 1); err != nil { log.Errorf("failed to set default replica number: %s", err) } clientStopper.Stop() } signalCh := make(chan os.Signal, 1) signal.Notify(signalCh, os.Interrupt, os.Kill) // TODO(spencer): move this behind a build tag. signal.Notify(signalCh, syscall.SIGTERM) // Block until one of the signals above is received or the stopper // is stopped externally (for example, via the quit endpoint). select { case <-stopper.ShouldStop(): case <-signalCh: go s.Stop() } log.Info("initiating graceful shutdown of server") go func() { ticker := time.NewTicker(5 * time.Second) defer ticker.Stop() for { select { case <-ticker.C: if log.V(1) { log.Infof("running tasks:\n%s", stopper.RunningTasks()) } log.Infof("%d running tasks", stopper.NumTasks()) case <-stopper.ShouldStop(): return } } }() select { case <-signalCh: log.Warningf("second signal received, initiating hard shutdown") case <-time.After(time.Minute): log.Warningf("time limit reached, initiating hard shutdown") case <-stopper.IsStopped(): log.Infof("server drained and shutdown completed") } log.Flush() }