// StartWithStopper is the same as Start, but allows passing a stopper // explicitly. func (ts *TestServer) StartWithStopper(stopper *stop.Stopper) error { if ts.Ctx == nil { ts.Ctx = NewTestContext() } if stopper == nil { stopper = stop.NewStopper() } // Change the replication requirements so we don't get log spam // about ranges not being replicated enough. // TODO(marc): set this in the zones table when we have an entry // for the default cluster-wide zone config and remove these // shenanigans about mutating the global default. oldDefaultZC := proto.Clone(config.DefaultZoneConfig).(*config.ZoneConfig) config.DefaultZoneConfig.ReplicaAttrs = []roachpb.Attributes{{}} stopper.AddCloser(stop.CloserFn(func() { config.DefaultZoneConfig = oldDefaultZC })) var err error ts.Server, err = NewServer(ts.Ctx, stopper) if err != nil { return err } // Ensure we have the correct number of engines. Add in in-memory ones where // needed. There must be at least one store/engine. if ts.StoresPerNode < 1 { ts.StoresPerNode = 1 } for i := len(ts.Ctx.Engines); i < ts.StoresPerNode; i++ { ts.Ctx.Engines = append(ts.Ctx.Engines, engine.NewInMem(roachpb.Attributes{}, 100<<20, ts.Server.stopper)) } if !ts.SkipBootstrap { stopper := stop.NewStopper() _, err := BootstrapCluster("cluster-1", ts.Ctx.Engines, stopper) if err != nil { return util.Errorf("could not bootstrap cluster: %s", err) } stopper.Stop() } if err := ts.Server.Start(true); err != nil { return err } // If enabled, wait for initial splits to complete before returning control. // If initial splits do not complete, the server is stopped before // returning. if config.TestingTableSplitsDisabled() { return nil } if err := ts.WaitForInitialSplits(); err != nil { ts.Stop() return err } return nil }
// waitForStopper stops the supplied stop.Stopper and waits up to five seconds // for it to complete. func waitForStopper(t testing.TB, stopper *stop.Stopper) { stopper.Stop() select { case <-stopper.IsStopped(): case <-time.After(5 * time.Second): t.Fatalf("Stopper failed to stop after 5 seconds") } }
// StartWithStopper is the same as Start, but allows passing a stopper // explicitly. func (ts *TestServer) StartWithStopper(stopper *stop.Stopper) error { if ts.Ctx == nil { ts.Ctx = NewTestContext() } // TODO(marc): set this in the zones table when we have an entry // for the default cluster-wide zone config. config.DefaultZoneConfig.ReplicaAttrs = []roachpb.Attributes{{}} if stopper == nil { stopper = stop.NewStopper() } var err error ts.Server, err = NewServer(ts.Ctx, stopper) if err != nil { return err } // Ensure we have the correct number of engines. Add in in-memory ones where // needed. There must be at least one store/engine. if ts.StoresPerNode < 1 { ts.StoresPerNode = 1 } for i := len(ts.Ctx.Engines); i < ts.StoresPerNode; i++ { ts.Ctx.Engines = append(ts.Ctx.Engines, engine.NewInMem(roachpb.Attributes{}, 100<<20, ts.Server.stopper)) } if !ts.SkipBootstrap { stopper := stop.NewStopper() _, err := BootstrapCluster("cluster-1", ts.Ctx.Engines, stopper) if err != nil { return util.Errorf("could not bootstrap cluster: %s", err) } stopper.Stop() } if err := ts.Server.Start(true); err != nil { return err } // If enabled, wait for initial splits to complete before returning control. // If initial splits do not complete, the server is stopped before // returning. if config.TestingTableSplitsDisabled() { return nil } if err := ts.WaitForInitialSplits(); err != nil { ts.Stop() return err } return nil }