Ejemplo n.º 1
0
// Start starts the TestServer by bootstrapping an in-memory store
// (defaults to maximum of 100M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.ServingAddr() after Start() for client connections.
// Use TestServer.Stopper().Stop() to shutdown the server after the test
// completes.
func (ts *TestServer) Start(params base.TestServerArgs) error {
	if ts.Ctx == nil {
		panic("Ctx not set")
	}

	if params.Stopper == nil {
		params.Stopper = stop.NewStopper()
	}

	if !params.PartOfCluster {
		// Change the replication requirements so we don't get log spam about ranges
		// not being replicated enough.
		cfg := config.DefaultZoneConfig()
		cfg.ReplicaAttrs = []roachpb.Attributes{{}}
		fn := config.TestingSetDefaultZoneConfig(cfg)
		params.Stopper.AddCloser(stop.CloserFn(fn))
	}

	// Needs to be called before NewServer to ensure resolvers are initialized.
	if err := ts.Ctx.InitNode(); err != nil {
		return err
	}

	// Ensure we have the correct number of engines. Add in-memory ones where
	// needed. There must be at least one store/engine.
	if params.StoresPerNode < 1 {
		params.StoresPerNode = 1
	}
	for i := len(ts.Ctx.Engines); i < params.StoresPerNode; i++ {
		ts.Ctx.Engines = append(ts.Ctx.Engines, engine.NewInMem(roachpb.Attributes{}, 100<<20, params.Stopper))
	}

	var err error
	ts.Server, err = NewServer(*ts.Ctx, params.Stopper)
	if err != nil {
		return err
	}
	// Our context must be shared with our server.
	ts.Ctx = &ts.Server.ctx

	if err := ts.Server.Start(); err != nil {
		return err
	}

	// If enabled, wait for initial splits to complete before returning control.
	// If initial splits do not complete, the server is stopped before
	// returning.
	if stk, ok := ts.ctx.TestingKnobs.Store.(*storage.StoreTestingKnobs); ok &&
		stk.DisableSplitQueue {
		return nil
	}
	if err := ts.WaitForInitialSplits(); err != nil {
		ts.Stop()
		return err
	}

	return nil
}
Ejemplo n.º 2
0
// Starts up a cluster made of up `nodes` in-memory testing servers,
// creates database `name and returns open gosql.DB connections to each
// node (to the named db), as well as a cleanup func that stops and
// cleans up all nodes and connections.
func SetupMultinodeTestCluster(
	t testing.TB, nodes int, name string,
) (MultinodeTestCluster, []*gosql.DB, *stop.Stopper) {
	if nodes < 1 {
		t.Fatal("invalid cluster size: ", nodes)
	}
	stopper := stop.NewStopper()

	// Force all ranges to be replicated everywhere. This is needed until #7297 is
	// fixed, otherwise starting a cluster takes forever.
	cfg := config.DefaultZoneConfig()
	cfg.ReplicaAttrs = make([]roachpb.Attributes, nodes)
	fn := config.TestingSetDefaultZoneConfig(cfg)
	stopper.AddCloser(stop.CloserFn(fn))

	var servers []serverutils.TestServerInterface
	var conns []*gosql.DB
	args := base.TestServerArgs{
		Stopper:       stopper,
		PartOfCluster: true,
		UseDatabase:   name,
	}
	first, conn, _ := serverutils.StartServer(t, args)
	servers = append(servers, first)
	conns = append(conns, conn)
	args.JoinAddr = first.ServingAddr()
	for i := 1; i < nodes; i++ {
		s, conn, _ := serverutils.StartServer(t, args)
		servers = append(servers, s)
		conns = append(conns, conn)
	}

	if _, err := conns[0].Exec(fmt.Sprintf(`CREATE DATABASE %s`, name)); err != nil {
		t.Fatal(err)
	}

	testCluster := MultinodeTestCluster{Servers: servers}
	return testCluster, conns, first.Stopper()
}