Exemple #1
0
// TestBootstrapNewStore starts a cluster with two unbootstrapped
// stores and verifies both stores are added.
func TestBootstrapNewStore(t *testing.T) {
	e := engine.NewInMem(engine.Attributes{}, 1<<20)
	localDB, err := BootstrapCluster("cluster-1", e)
	if err != nil {
		t.Fatal(err)
	}
	localDB.Close()

	// Start a new node with two new stores which will require bootstrapping.
	engines := []engine.Engine{
		e,
		engine.NewInMem(engine.Attributes{}, 1<<20),
		engine.NewInMem(engine.Attributes{}, 1<<20),
	}
	server, node := createTestNode(util.CreateTestAddr("tcp"), engines, nil, t)
	defer server.Close()

	// Non-initialized stores (in this case the new in-memory-based
	// store) will be bootstrapped by the node upon start. This happens
	// in a goroutine, so we'll have to wait a bit (maximum 10ms) until
	// we can find the new node.
	if err := util.IsTrueWithin(func() bool { return node.localDB.GetStoreCount() == 3 }, 50*time.Millisecond); err != nil {
		t.Error(err)
	}
}
Exemple #2
0
// TestNodeJoin verifies a new node is able to join a bootstrapped
// cluster consisting of one node.
func TestNodeJoin(t *testing.T) {
	defer leaktest.AfterTest(t)
	engineStopper := stop.NewStopper()
	defer engineStopper.Stop()
	e := engine.NewInMem(roachpb.Attributes{}, 1<<20, engineStopper)
	stopper := stop.NewStopper()
	_, err := BootstrapCluster("cluster-1", []engine.Engine{e}, stopper)
	if err != nil {
		t.Fatal(err)
	}
	stopper.Stop()

	// Set an aggressive gossip interval to make sure information is exchanged tout de suite.
	testContext.GossipInterval = gossip.TestInterval
	// Start the bootstrap node.
	engines1 := []engine.Engine{e}
	addr1 := util.CreateTestAddr("tcp")
	server1, node1, stopper1 := createAndStartTestNode(addr1, engines1, addr1, t)
	defer stopper1.Stop()

	// Create a new node.
	engines2 := []engine.Engine{engine.NewInMem(roachpb.Attributes{}, 1<<20, engineStopper)}
	server2, node2, stopper2 := createAndStartTestNode(util.CreateTestAddr("tcp"), engines2, server1.Addr(), t)
	defer stopper2.Stop()

	// Verify new node is able to bootstrap its store.
	if err := util.IsTrueWithin(func() bool { return node2.lSender.GetStoreCount() == 1 }, 50*time.Millisecond); err != nil {
		t.Fatal(err)
	}

	// Verify node1 sees node2 via gossip and vice versa.
	node1Key := gossip.MakeNodeIDKey(node1.Descriptor.NodeID)
	node2Key := gossip.MakeNodeIDKey(node2.Descriptor.NodeID)
	if err := util.IsTrueWithin(func() bool {
		nodeDesc1 := &roachpb.NodeDescriptor{}
		if err := node1.ctx.Gossip.GetInfoProto(node2Key, nodeDesc1); err != nil {
			return false
		}
		if addr2 := nodeDesc1.Address.AddressField; addr2 != server2.Addr().String() {
			t.Errorf("addr2 gossip %s doesn't match addr2 address %s", addr2, server2.Addr().String())
		}
		nodeDesc2 := &roachpb.NodeDescriptor{}
		if err := node2.ctx.Gossip.GetInfoProto(node1Key, nodeDesc2); err != nil {
			return false
		}
		if addr1 := nodeDesc2.Address.AddressField; addr1 != server1.Addr().String() {
			t.Errorf("addr1 gossip %s doesn't match addr1 address %s", addr1, server1.Addr().String())
		}
		return true
	}, 50*time.Millisecond); err != nil {
		t.Error(err)
	}
}
Exemple #3
0
// TestNodeJoin verifies a new node is able to join a bootstrapped
// cluster consisting of one node.
func TestNodeJoin(t *testing.T) {
	defer leaktest.AfterTest(t)()
	engineStopper := stop.NewStopper()
	defer engineStopper.Stop()
	e := engine.NewInMem(roachpb.Attributes{}, 1<<20, engineStopper)
	if _, err := bootstrapCluster([]engine.Engine{e}, kv.NewTxnMetrics(metric.NewRegistry())); err != nil {
		t.Fatal(err)
	}

	// Start the bootstrap node.
	engines1 := []engine.Engine{e}
	addr1 := util.CreateTestAddr("tcp")
	_, server1Addr, node1, stopper1 := createAndStartTestNode(addr1, engines1, addr1, t)
	defer stopper1.Stop()

	// Create a new node.
	engines2 := []engine.Engine{engine.NewInMem(roachpb.Attributes{}, 1<<20, engineStopper)}
	addr2 := util.CreateTestAddr("tcp")
	_, server2Addr, node2, stopper2 := createAndStartTestNode(addr2, engines2, server1Addr, t)
	defer stopper2.Stop()

	// Verify new node is able to bootstrap its store.
	util.SucceedsSoon(t, func() error {
		if sc := node2.stores.GetStoreCount(); sc != 1 {
			return util.Errorf("GetStoreCount() expected 1; got %d", sc)
		}
		return nil
	})

	// Verify node1 sees node2 via gossip and vice versa.
	node1Key := gossip.MakeNodeIDKey(node1.Descriptor.NodeID)
	node2Key := gossip.MakeNodeIDKey(node2.Descriptor.NodeID)
	util.SucceedsSoon(t, func() error {
		var nodeDesc1 roachpb.NodeDescriptor
		if err := node1.ctx.Gossip.GetInfoProto(node2Key, &nodeDesc1); err != nil {
			return err
		}
		if addr2Str, server2AddrStr := nodeDesc1.Address.String(), server2Addr.String(); addr2Str != server2AddrStr {
			return util.Errorf("addr2 gossip %s doesn't match addr2 address %s", addr2Str, server2AddrStr)
		}
		var nodeDesc2 roachpb.NodeDescriptor
		if err := node2.ctx.Gossip.GetInfoProto(node1Key, &nodeDesc2); err != nil {
			return err
		}
		if addr1Str, server1AddrStr := nodeDesc2.Address.String(), server1Addr.String(); addr1Str != server1AddrStr {
			return util.Errorf("addr1 gossip %s doesn't match addr1 address %s", addr1Str, server1AddrStr)
		}
		return nil
	})
}
// TestResponseCacheGC verifies that response cache entries are
// garbage collected periodically.
func TestResponseCacheGC(t *testing.T) {
	defer leaktest.AfterTest(t)
	eng := engine.NewInMem(proto.Attributes{Attrs: []string{"ssd"}}, 1<<30)
	defer eng.Close()

	rc := NewResponseCache(1, eng)
	cmdID := makeCmdID(1, 1)

	// Add response for cmdID with timestamp at time=1ns.
	copyIncR := incR
	copyIncR.Timestamp.WallTime = 1
	if err := rc.PutResponse(cmdID, &copyIncR); err != nil {
		t.Fatalf("unexpected error putting responpse: %v", err)
	}
	eng.SetGCTimeouts(0, 0) // avoids GC
	eng.CompactRange(nil, nil)
	val := proto.IncrementResponse{}
	if ok, err := rc.GetResponse(cmdID, &val); !ok || err != nil || val.NewValue != 1 {
		t.Fatalf("unexpected response or error: %t, %v, %+v", ok, err, val)
	}

	// Now set minRCacheTS to 1, which will GC.
	eng.SetGCTimeouts(0, 1)
	eng.CompactRange(nil, nil)
	if ok, err := rc.GetResponse(cmdID, &val); ok || err != nil {
		t.Errorf("unexpected response or error: %t, %v", ok, err)
	}
}
Exemple #5
0
func startNewServer() *kvTestServer {
	s := &kvTestServer{}

	// Initialize engine, store, and localDB.
	e := engine.NewInMem(engine.Attributes{}, 1<<20)
	localDB, err := server.BootstrapCluster("test-cluster", e)
	if err != nil {
		panic(err)
	}
	s.db = localDB

	// Rip through the stores (should be just one) and grab the first range (there should also just be one).
	localDB.VisitStores(func(store *storage.Store) error {
		rs := store.GetRanges()
		if len(rs) > 0 {
			s.firstRange = rs[0]
		}
		return nil
	})
	if s.firstRange == nil {
		panic("Internal Error: Expected to find a range while initializing test server!")
	}

	// Initialize the REST server.
	s.rest = rest.NewRESTServer(s.db)
	s.httpServer = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		s.rest.HandleAction(w, r)
	}))

	return s
}
Exemple #6
0
// startAdminServer launches a new admin server using minimal engine
// and local database setup. Returns the new http test server, which
// should be cleaned up by caller via httptest.Server.Close(). The
// Cockroach KV client address is set to the address of the test server.
func startAdminServer() (string, *stop.Stopper) {
	stopper := stop.NewStopper()
	db, err := BootstrapCluster("cluster-1", []engine.Engine{engine.NewInMem(proto.Attributes{}, 1<<20)}, stopper)
	if err != nil {
		log.Fatal(err)
	}
	admin := newAdminServer(db, stopper)
	mux := http.NewServeMux()
	mux.Handle(adminEndpoint, admin)
	mux.Handle(debugEndpoint, admin)
	httpServer := httptest.NewUnstartedServer(mux)
	tlsConfig, err := testContext.GetServerTLSConfig()
	if err != nil {
		log.Fatal(err)
	}
	httpServer.TLS = tlsConfig
	httpServer.StartTLS()
	stopper.AddCloser(httpServer)

	if strings.HasPrefix(httpServer.URL, "http://") {
		testContext.Addr = strings.TrimPrefix(httpServer.URL, "http://")
	} else if strings.HasPrefix(httpServer.URL, "https://") {
		testContext.Addr = strings.TrimPrefix(httpServer.URL, "https://")
	}
	return httpServer.URL, stopper
}
// createTestStoreWithoutStart creates a test store using an in-memory
// engine without starting the store. It returns the store, the store
// clock's manual unix nanos time and a stopper. The caller is
// responsible for stopping the stopper upon completion.
func createTestStoreWithoutStart(t *testing.T) (*Store, *hlc.ManualClock, *stop.Stopper) {
	stopper := stop.NewStopper()
	// Setup fake zone config handler.
	config.TestingSetupZoneConfigHook(stopper)
	rpcContext := rpc.NewContext(&base.Context{}, hlc.NewClock(hlc.UnixNano), stopper)
	ctx := TestStoreContext
	ctx.Gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap)
	ctx.StorePool = NewStorePool(ctx.Gossip, TestTimeUntilStoreDeadOff, stopper)
	manual := hlc.NewManualClock(0)
	ctx.Clock = hlc.NewClock(manual.UnixNano)
	eng := engine.NewInMem(roachpb.Attributes{}, 10<<20, stopper)
	ctx.Transport = multiraft.NewLocalRPCTransport(stopper)
	stopper.AddCloser(ctx.Transport)
	sender := &testSender{}
	ctx.DB = client.NewDB(sender)
	store := NewStore(ctx, eng, &roachpb.NodeDescriptor{NodeID: 1})
	sender.store = store
	if err := store.Bootstrap(roachpb.StoreIdent{NodeID: 1, StoreID: 1}, stopper); err != nil {
		t.Fatal(err)
	}
	if err := store.BootstrapRange(nil); err != nil {
		t.Fatal(err)
	}
	return store, manual, stopper
}
Exemple #8
0
// TestCorruptedClusterID verifies that a node fails to start when a
// store's cluster ID is empty.
func TestCorruptedClusterID(t *testing.T) {
	defer leaktest.AfterTest(t)
	eagerStopper := stop.NewStopper()
	e := engine.NewInMem(proto.Attributes{}, 1<<20)
	_, err := BootstrapCluster("cluster-1", []engine.Engine{e}, eagerStopper)
	if err != nil {
		t.Fatal(err)
	}
	eagerStopper.Stop()

	// Set the cluster ID to an empty string.
	sIdent := proto.StoreIdent{
		ClusterID: "",
		NodeID:    1,
		StoreID:   1,
	}
	if err = engine.MVCCPutProto(e, nil, keys.StoreIdentKey(), proto.ZeroTimestamp, nil, &sIdent); err != nil {
		t.Fatal(err)
	}

	engines := []engine.Engine{e}
	server, _, node, stopper := createTestNode(util.CreateTestAddr("tcp"), engines, nil, t)
	if err := node.start(server, engines, proto.Attributes{}, stopper); err == nil {
		t.Errorf("unexpected success")
	}
	stopper.Stop()
}
Exemple #9
0
// createTestStore creates a test store using an in-memory
// engine. Returns the store clock's manual unix nanos time and the
// store. If createDefaultRange is true, creates a single range from
// key "a" to key "z" with a default replica descriptor (i.e. StoreID
// = 0, RangeID = 1, etc.). The caller is responsible for closing the
// store on exit.
func createTestStore(createDefaultRange bool, t *testing.T) (*Store, *hlc.ManualClock) {
	manual := hlc.ManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	eng := engine.NewInMem(proto.Attributes{}, 1<<20)
	store := NewStore(clock, eng, nil, nil)
	if err := store.Bootstrap(proto.StoreIdent{StoreID: 1}); err != nil {
		t.Fatal(err)
	}
	db, _ := newTestDB(store)
	store.db = db
	replica := proto.Replica{StoreID: 1, RangeID: 1}
	// Create system key range for allocations.
	meta := store.BootstrapRangeMetadata()
	meta.StartKey = engine.KeySystemPrefix
	meta.EndKey = engine.PrefixEndKey(engine.KeySystemPrefix)
	_, err := store.CreateRange(meta)
	if err != nil {
		t.Fatal(err)
	}
	if err := store.Init(); err != nil {
		t.Fatal(err)
	}
	// Now that the system key range is available, initialize the store. set store DB so new
	// ranges can be allocated as needed for tests.
	// If requested, create a default range for tests from "a"-"z".
	if createDefaultRange {
		replica = proto.Replica{StoreID: 1}
		_, err := store.CreateRange(store.NewRangeMetadata(engine.Key("a"), engine.Key("z"), []proto.Replica{replica}))
		if err != nil {
			t.Fatal(err)
		}
	}
	return store, &manual
}
// TestResponseCacheGC verifies that response cache entries are
// garbage collected periodically.
func TestResponseCacheGC(t *testing.T) {
	defer leaktest.AfterTest(t)
	eng := engine.NewInMem(proto.Attributes{Attrs: []string{"ssd"}}, 1<<30)
	defer eng.Close()

	rc := NewResponseCache(1)
	cmdID := makeCmdID(1, 1)

	// Add response for cmdID with timestamp at time=1ns.
	copyIncR := incR
	copyIncR.Timestamp.WallTime = 1
	if err := rc.PutResponse(eng, cmdID, proto.ResponseWithError{Reply: &copyIncR, Err: nil}); err != nil {
		t.Fatalf("unexpected error putting responpse: %s", err)
	}
	eng.SetGCTimeouts(0, 0) // avoids GC
	eng.CompactRange(nil, nil)
	if replyWithErr, readErr := rc.GetResponse(eng, cmdID); replyWithErr.Reply == nil && replyWithErr.Err == nil || replyWithErr.Reply.(*proto.IncrementResponse).NewValue != 1 {
		t.Fatalf("unexpected response or error: %s, %+v", replyWithErr.Err, replyWithErr.Reply)
	} else if readErr != nil {
		t.Fatalf("unxpected read error :%s", readErr)
	}

	// Now set minRCacheTS to 1, which will GC.
	eng.SetGCTimeouts(0, 1)
	eng.CompactRange(nil, nil)
	if replyWithErr, readErr := rc.GetResponse(eng, cmdID); replyWithErr.Reply != nil && replyWithErr.Err != nil {
		t.Fatalf("unexpected response or error: %s, %+v", replyWithErr.Err, replyWithErr.Reply)
	} else if readErr != nil {
		t.Fatalf("unxpected read error :%s", readErr)
	}
}
Exemple #11
0
// TestBootstrapOfNonEmptyStore verifies bootstrap failure if engine
// is not empty.
func TestBootstrapOfNonEmptyStore(t *testing.T) {
	defer leaktest.AfterTest(t)
	eng := engine.NewInMem(proto.Attributes{}, 1<<20)

	// Put some random garbage into the engine.
	if err := eng.Put(proto.EncodedKey("foo"), []byte("bar")); err != nil {
		t.Errorf("failure putting key foo into engine: %s", err)
	}
	ctx := TestStoreContext
	manual := hlc.NewManualClock(0)
	ctx.Clock = hlc.NewClock(manual.UnixNano)
	ctx.Transport = multiraft.NewLocalRPCTransport()
	stopper := stop.NewStopper()
	stopper.AddCloser(ctx.Transport)
	defer stopper.Stop()
	store := NewStore(ctx, eng, &proto.NodeDescriptor{NodeID: 1})

	// Can't init as haven't bootstrapped.
	if err := store.Start(stopper); err == nil {
		t.Error("expected failure init'ing un-bootstrapped store")
	}

	// Bootstrap should fail on non-empty engine.
	if err := store.Bootstrap(testIdent, stopper); err == nil {
		t.Error("expected bootstrap error on non-empty store")
	}
}
Exemple #12
0
// TestCorruptedClusterID verifies that a node fails to start when a
// store's cluster ID is empty.
func TestCorruptedClusterID(t *testing.T) {
	defer leaktest.AfterTest(t)()
	engineStopper := stop.NewStopper()
	e := engine.NewInMem(roachpb.Attributes{}, 1<<20, engineStopper)
	defer engineStopper.Stop()
	if _, err := bootstrapCluster([]engine.Engine{e}, kv.NewTxnMetrics(metric.NewRegistry())); err != nil {
		t.Fatal(err)
	}

	// Set the cluster ID to the empty UUID.
	sIdent := roachpb.StoreIdent{
		ClusterID: *uuid.EmptyUUID,
		NodeID:    1,
		StoreID:   1,
	}
	if err := engine.MVCCPutProto(context.Background(), e, nil, keys.StoreIdentKey(), roachpb.ZeroTimestamp, nil, &sIdent); err != nil {
		t.Fatal(err)
	}

	engines := []engine.Engine{e}
	_, serverAddr, _, node, stopper := createTestNode(util.TestAddr, engines, nil, t)
	stopper.Stop()
	if err := node.start(serverAddr, engines, roachpb.Attributes{}); !testutils.IsError(err, "unidentified store") {
		t.Errorf("unexpected error %v", err)
	}
}
Exemple #13
0
// createTestStoreWithoutStart creates a test store using an in-memory
// engine without starting the store. It returns the store, the store
// clock's manual unix nanos time and a stopper. The caller is
// responsible for stopping the stopper upon completion.
func createTestStoreWithoutStart(t *testing.T) (*Store, *hlc.ManualClock, *stop.Stopper) {
	stopper := stop.NewStopper()
	rpcContext := rpc.NewContext(rootTestBaseContext, hlc.NewClock(hlc.UnixNano), stopper)
	ctx := TestStoreContext
	ctx.Gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap)
	manual := hlc.NewManualClock(0)
	ctx.Clock = hlc.NewClock(manual.UnixNano)
	eng := engine.NewInMem(proto.Attributes{}, 10<<20)
	ctx.Transport = multiraft.NewLocalRPCTransport()
	stopper.AddCloser(ctx.Transport)
	sender := &testSender{}
	var err error
	if ctx.DB, err = client.Open("//root@", client.SenderOpt(sender)); err != nil {
		t.Fatal(err)
	}
	store := NewStore(ctx, eng, &proto.NodeDescriptor{NodeID: 1})
	sender.store = store
	if err := store.Bootstrap(proto.StoreIdent{NodeID: 1, StoreID: 1}, stopper); err != nil {
		t.Fatal(err)
	}
	if err := store.BootstrapRange(); err != nil {
		t.Fatal(err)
	}
	return store, manual, stopper
}
Exemple #14
0
func TestPutGetDeleteSchema(t *testing.T) {
	s, err := createTestSchema()
	if err != nil {
		t.Fatalf("could not create test schema: %v", err)
	}
	e := engine.NewInMem(proto.Attributes{}, 1<<20)
	localDB, err := server.BootstrapCluster("test-cluster", e)
	if err != nil {
		t.Fatalf("unable to boostrap cluster: %v", err)
	}
	db := structured.NewDB(localDB)
	if err := db.PutSchema(s); err != nil {
		t.Fatalf("could not register schema: %v", err)
	}
	if s, err = db.GetSchema(s.Key); err != nil {
		t.Errorf("could not get schema with key %q: %v", s.Key, err)
	}
	expectedName := "PhotoDB"
	if s.Name != expectedName {
		t.Errorf("expected schema to be named %q; got %q", expectedName, s.Name)
	}
	if err := db.DeleteSchema(s); err != nil {
		t.Errorf("could not delete schema: %v", err)
	}
	if s, err = db.GetSchema(s.Key); err != nil {
		t.Errorf("could not get schema with key %q: %v", s.Key, err)
	}
	if s != nil {
		t.Errorf("expected schema to be nil; got %+v", s)
	}
}
// TestResponseCacheGC verifies that response cache entries are
// garbage collected periodically.
func TestResponseCacheGC(t *testing.T) {
	defer leaktest.AfterTest(t)
	stopper := stop.NewStopper()
	defer stopper.Stop()
	eng := engine.NewInMem(roachpb.Attributes{Attrs: []string{"ssd"}}, 1<<30, stopper)

	rc := NewResponseCache(1)
	cmdID := makeCmdID(1, 1)

	// Add response for cmdID with timestamp at time=1ns.
	copyR := batchR
	copyR.Timestamp.WallTime = 1
	if err := rc.PutResponse(eng, cmdID, roachpb.ResponseWithError{Reply: &copyR, Err: nil}); err != nil {
		t.Fatalf("unexpected error putting responpse: %s", err)
	}
	eng.SetGCTimeouts(0, 0) // avoids GC
	eng.CompactRange(nil, nil)
	replyWithErr, readErr := rc.GetResponse(eng, cmdID)
	if readErr != nil {
		t.Fatalf("unxpected read error :%s", readErr)
	} else if replyWithErr.Reply == nil || replyWithErr.Err != nil {
		t.Fatalf("unexpected empty response or error: %s, %+v", replyWithErr.Err, replyWithErr.Reply)
	} else if inc := copyR.Responses[0].GetInner().(*roachpb.IncrementResponse); inc.NewValue != 1 {
		t.Fatalf("unexpected value for increment: %+v", inc)
	}

	// Now set minRCacheTS to 1, which will GC.
	eng.SetGCTimeouts(0, 1)
	eng.CompactRange(nil, nil)
	if replyWithErr, readErr := rc.GetResponse(eng, cmdID); replyWithErr.Reply != nil && replyWithErr.Err != nil {
		t.Fatalf("unexpected response or error: %s, %+v", replyWithErr.Err, replyWithErr.Reply)
	} else if readErr != nil {
		t.Fatalf("unxpected read error :%s", readErr)
	}
}
Exemple #16
0
func newBlockingEngine() *blockingEngine {
	be := &blockingEngine{
		InMem: engine.NewInMem(proto.Attributes{}, 1<<20),
	}
	be.cvar = sync.NewCond(&be.mu)
	return be
}
// Start starts the test cluster by bootstrapping an in-memory store
// (defaults to maximum of 50M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.Addr after Start() for client connections. Use Stop()
// to shutdown the server after the test completes.
func (ltc *LocalTestCluster) Start(t util.Tester) {
	ltc.Manual = hlc.NewManualClock(0)
	ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano)
	ltc.Stopper = stop.NewStopper()
	rpcContext := rpc.NewContext(testutils.NewRootTestBaseContext(), ltc.Clock, ltc.Stopper)
	ltc.Gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap)
	ltc.Eng = engine.NewInMem(proto.Attributes{}, 50<<20)
	ltc.lSender = newRetryableLocalSender(NewLocalSender())
	ltc.Sender = NewTxnCoordSender(ltc.lSender, ltc.Clock, false, nil, ltc.Stopper)
	var err error
	if ltc.DB, err = client.Open("//root@", client.SenderOpt(ltc.Sender)); err != nil {
		t.Fatal(err)
	}
	transport := multiraft.NewLocalRPCTransport(ltc.Stopper)
	ltc.Stopper.AddCloser(transport)
	ctx := storage.TestStoreContext
	ctx.Clock = ltc.Clock
	ctx.DB = ltc.DB
	ctx.Gossip = ltc.Gossip
	ctx.Transport = transport
	ltc.Store = storage.NewStore(ctx, ltc.Eng, &proto.NodeDescriptor{NodeID: 1})
	if err := ltc.Store.Bootstrap(proto.StoreIdent{NodeID: 1, StoreID: 1}, ltc.Stopper); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	ltc.lSender.AddStore(ltc.Store)
	if err := ltc.Store.BootstrapRange(nil); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	if err := ltc.Store.Start(ltc.Stopper); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
}
Exemple #18
0
// TestCorruptedClusterID verifies that a node fails to start when a
// store's cluster ID is empty.
func TestCorruptedClusterID(t *testing.T) {
	defer leaktest.AfterTest(t)
	engineStopper := stop.NewStopper()
	e := engine.NewInMem(roachpb.Attributes{}, 1<<20, engineStopper)
	defer engineStopper.Stop()
	if _, err := bootstrapCluster([]engine.Engine{e}); err != nil {
		t.Fatal(err)
	}

	// Set the cluster ID to an empty string.
	sIdent := roachpb.StoreIdent{
		ClusterID: "",
		NodeID:    1,
		StoreID:   1,
	}
	if err := engine.MVCCPutProto(e, nil, keys.StoreIdentKey(), roachpb.ZeroTimestamp, nil, &sIdent); err != nil {
		t.Fatal(err)
	}

	engines := []engine.Engine{e}
	server, serverAddr, _, node, stopper := createTestNode(util.CreateTestAddr("tcp"), engines, nil, t)
	stopper.Stop()
	if err := node.start(server, serverAddr, engines, roachpb.Attributes{}); !testutils.IsError(err, "unidentified store") {
		t.Errorf("unexpected error %v", err)
	}
}
Exemple #19
0
// StartWithStopper is the same as Start, but allows passing a stopper
// explicitly.
func (ts *TestServer) StartWithStopper(stopper *stop.Stopper) error {
	if ts.Ctx == nil {
		ts.Ctx = NewTestContext()
	}

	if stopper == nil {
		stopper = stop.NewStopper()
	}

	// Change the replication requirements so we don't get log spam
	// about ranges not being replicated enough.
	// TODO(marc): set this in the zones table when we have an entry
	// for the default cluster-wide zone config and remove these
	// shenanigans about mutating the global default.
	oldDefaultZC := proto.Clone(config.DefaultZoneConfig).(*config.ZoneConfig)
	config.DefaultZoneConfig.ReplicaAttrs = []roachpb.Attributes{{}}
	stopper.AddCloser(stop.CloserFn(func() {
		config.DefaultZoneConfig = oldDefaultZC
	}))

	var err error
	ts.Server, err = NewServer(ts.Ctx, stopper)
	if err != nil {
		return err
	}

	// Ensure we have the correct number of engines. Add in in-memory ones where
	// needed.  There must be at least one store/engine.
	if ts.StoresPerNode < 1 {
		ts.StoresPerNode = 1
	}
	for i := len(ts.Ctx.Engines); i < ts.StoresPerNode; i++ {
		ts.Ctx.Engines = append(ts.Ctx.Engines, engine.NewInMem(roachpb.Attributes{}, 100<<20, ts.Server.stopper))
	}

	if !ts.SkipBootstrap {
		stopper := stop.NewStopper()
		_, err := BootstrapCluster("cluster-1", ts.Ctx.Engines, stopper)
		if err != nil {
			return util.Errorf("could not bootstrap cluster: %s", err)
		}
		stopper.Stop()
	}
	if err := ts.Server.Start(true); err != nil {
		return err
	}

	// If enabled, wait for initial splits to complete before returning control.
	// If initial splits do not complete, the server is stopped before
	// returning.
	if config.TestingTableSplitsDisabled() {
		return nil
	}
	if err := ts.WaitForInitialSplits(); err != nil {
		ts.Stop()
		return err
	}

	return nil
}
// Start starts the test cluster by bootstrapping an in-memory store
// (defaults to maximum of 50M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.Addr after Start() for client connections. Use Stop()
// to shutdown the server after the test completes.
func (ltc *LocalTestCluster) Start(t util.Tester) {

	nodeDesc := &proto.NodeDescriptor{NodeID: 1}
	ltc.tester = t
	ltc.Manual = hlc.NewManualClock(0)
	ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano)
	ltc.Stopper = stop.NewStopper()
	rpcContext := rpc.NewContext(testutils.NewNodeTestBaseContext(), ltc.Clock, ltc.Stopper)
	ltc.Gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap)
	ltc.Eng = engine.NewInMem(proto.Attributes{}, 50<<20, ltc.Stopper)

	ltc.localSender = NewLocalSender()
	var rpcSend rpcSendFn = func(_ rpc.Options, _ string, _ []net.Addr,
		getArgs func(addr net.Addr) gogoproto.Message, getReply func() gogoproto.Message,
		_ *rpc.Context) ([]gogoproto.Message, error) {
		// TODO(tschottdorf): remove getReply().
		br, pErr := ltc.localSender.Send(context.Background(), *getArgs(nil).(*proto.BatchRequest))
		if br == nil {
			br = &proto.BatchResponse{}
		}
		if br.Error != nil {
			panic(proto.ErrorUnexpectedlySet(ltc.localSender, br))
		}
		br.Error = pErr
		return []gogoproto.Message{br}, nil
	}
	ltc.distSender = NewDistSender(&DistSenderContext{
		Clock: ltc.Clock,
		RangeDescriptorCacheSize: defaultRangeDescriptorCacheSize,
		RangeLookupMaxRanges:     defaultRangeLookupMaxRanges,
		LeaderCacheSize:          defaultLeaderCacheSize,
		RPCRetryOptions:          &defaultRPCRetryOptions,
		nodeDescriptor:           nodeDesc,
		RPCSend:                  rpcSend,         // defined above
		RangeDescriptorDB:        ltc.localSender, // for descriptor lookup
	}, ltc.Gossip)

	ltc.Sender = NewTxnCoordSender(ltc.distSender, ltc.Clock, false /* !linearizable */, nil /* tracer */, ltc.Stopper)
	ltc.DB = client.NewDB(ltc.Sender)

	transport := multiraft.NewLocalRPCTransport(ltc.Stopper)
	ltc.Stopper.AddCloser(transport)
	ctx := storage.TestStoreContext
	ctx.Clock = ltc.Clock
	ctx.DB = ltc.DB
	ctx.Gossip = ltc.Gossip
	ctx.Transport = transport
	ltc.Store = storage.NewStore(ctx, ltc.Eng, nodeDesc)
	if err := ltc.Store.Bootstrap(proto.StoreIdent{NodeID: 1, StoreID: 1}, ltc.Stopper); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	ltc.localSender.AddStore(ltc.Store)
	if err := ltc.Store.BootstrapRange(nil); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	if err := ltc.Store.Start(ltc.Stopper); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
}
// AddStore creates a new store on the same Transport but doesn't create any ranges.
func (m *multiTestContext) addStore() {
	idx := len(m.stores)
	var clock *hlc.Clock
	if len(m.clocks) > idx {
		clock = m.clocks[idx]
	} else {
		clock = m.clock
		m.clocks = append(m.clocks, clock)
	}
	var eng engine.Engine
	var needBootstrap bool
	if len(m.engines) > idx {
		eng = m.engines[idx]
	} else {
		eng = engine.NewInMem(proto.Attributes{}, 1<<20)
		m.engines = append(m.engines, eng)
		needBootstrap = true
		// Add an extra refcount to the engine so the underlying rocksdb instances
		// aren't closed when stopping and restarting the stores.
		// These refcounts are removed in Stop().
		if err := eng.Open(); err != nil {
			m.t.Fatal(err)
		}
	}

	stopper := stop.NewStopper()
	ctx := m.makeContext(idx)
	store := storage.NewStore(ctx, eng, &proto.NodeDescriptor{NodeID: proto.NodeID(idx + 1)})
	if needBootstrap {
		err := store.Bootstrap(proto.StoreIdent{
			NodeID:  proto.NodeID(idx + 1),
			StoreID: proto.StoreID(idx + 1),
		}, stopper)
		if err != nil {
			m.t.Fatal(err)
		}

		// Bootstrap the initial range on the first store
		if idx == 0 {
			if err := store.BootstrapRange(nil); err != nil {
				m.t.Fatal(err)
			}
		}
	}
	if err := store.Start(stopper); err != nil {
		m.t.Fatal(err)
	}
	store.WaitForInit()
	m.stores = append(m.stores, store)
	if len(m.senders) == idx {
		m.senders = append(m.senders, kv.NewLocalSender())
	}
	m.senders[idx].AddStore(store)
	// Save the store identities for later so we can use them in
	// replication operations even while the store is stopped.
	m.idents = append(m.idents, store.Ident)
	m.stoppers = append(m.stoppers, stopper)
}
func TestLocalSenderLookupReplica(t *testing.T) {
	defer leaktest.AfterTest(t)
	stopper := stop.NewStopper()
	defer stopper.Stop()
	ctx := storage.TestStoreContext
	manualClock := hlc.NewManualClock(0)
	ctx.Clock = hlc.NewClock(manualClock.UnixNano)
	ls := NewLocalSender()

	// Create two new stores with ranges we care about.
	var e [2]engine.Engine
	var s [2]*storage.Store
	ranges := []struct {
		storeID    proto.StoreID
		start, end proto.Key
	}{
		{2, proto.Key("a"), proto.Key("c")},
		{3, proto.Key("x"), proto.Key("z")},
	}
	for i, rng := range ranges {
		e[i] = engine.NewInMem(proto.Attributes{}, 1<<20)
		ctx.Transport = multiraft.NewLocalRPCTransport(stopper)
		defer ctx.Transport.Close()
		s[i] = storage.NewStore(ctx, e[i], &proto.NodeDescriptor{NodeID: 1})
		s[i].Ident.StoreID = rng.storeID

		desc := &proto.RangeDescriptor{
			RangeID:  proto.RangeID(i),
			StartKey: rng.start,
			EndKey:   rng.end,
			Replicas: []proto.Replica{{StoreID: rng.storeID}},
		}
		newRng, err := storage.NewReplica(desc, s[i])
		if err != nil {
			t.Fatal(err)
		}
		if err := s[i].AddRangeTest(newRng); err != nil {
			t.Error(err)
		}
		ls.AddStore(s[i])
	}

	if _, r, err := ls.lookupReplica(proto.Key("a"), proto.Key("c")); r.StoreID != s[0].Ident.StoreID || err != nil {
		t.Errorf("expected store %d; got %d: %v", s[0].Ident.StoreID, r.StoreID, err)
	}
	if _, r, err := ls.lookupReplica(proto.Key("b"), nil); r.StoreID != s[0].Ident.StoreID || err != nil {
		t.Errorf("expected store %d; got %d: %v", s[0].Ident.StoreID, r.StoreID, err)
	}
	if _, r, err := ls.lookupReplica(proto.Key("b"), proto.Key("d")); r != nil || err == nil {
		t.Errorf("expected store 0 and error got %d", r.StoreID)
	}
	if _, r, err := ls.lookupReplica(proto.Key("x"), proto.Key("z")); r.StoreID != s[1].Ident.StoreID {
		t.Errorf("expected store %d; got %d: %v", s[1].Ident.StoreID, r.StoreID, err)
	}
	if _, r, err := ls.lookupReplica(proto.Key("y"), nil); r.StoreID != s[1].Ident.StoreID || err != nil {
		t.Errorf("expected store %d; got %d: %v", s[1].Ident.StoreID, r.StoreID, err)
	}
}
Exemple #23
0
// Start starts the TestServer by bootstrapping an in-memory store
// (defaults to maximum of 100M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.ServingAddr() after Start() for client connections.
// Use TestServer.Stopper().Stop() to shutdown the server after the test
// completes.
func (ts *TestServer) Start(params base.TestServerArgs) error {
	if ts.Ctx == nil {
		panic("Ctx not set")
	}

	if params.Stopper == nil {
		params.Stopper = stop.NewStopper()
	}

	if !params.PartOfCluster {
		// Change the replication requirements so we don't get log spam about ranges
		// not being replicated enough.
		cfg := config.DefaultZoneConfig()
		cfg.ReplicaAttrs = []roachpb.Attributes{{}}
		fn := config.TestingSetDefaultZoneConfig(cfg)
		params.Stopper.AddCloser(stop.CloserFn(fn))
	}

	// Needs to be called before NewServer to ensure resolvers are initialized.
	if err := ts.Ctx.InitNode(); err != nil {
		return err
	}

	// Ensure we have the correct number of engines. Add in-memory ones where
	// needed. There must be at least one store/engine.
	if params.StoresPerNode < 1 {
		params.StoresPerNode = 1
	}
	for i := len(ts.Ctx.Engines); i < params.StoresPerNode; i++ {
		ts.Ctx.Engines = append(ts.Ctx.Engines, engine.NewInMem(roachpb.Attributes{}, 100<<20, params.Stopper))
	}

	var err error
	ts.Server, err = NewServer(*ts.Ctx, params.Stopper)
	if err != nil {
		return err
	}
	// Our context must be shared with our server.
	ts.Ctx = &ts.Server.ctx

	if err := ts.Server.Start(); err != nil {
		return err
	}

	// If enabled, wait for initial splits to complete before returning control.
	// If initial splits do not complete, the server is stopped before
	// returning.
	if stk, ok := ts.ctx.TestingKnobs.Store.(*storage.StoreTestingKnobs); ok &&
		stk.DisableSplitQueue {
		return nil
	}
	if err := ts.WaitForInitialSplits(); err != nil {
		ts.Stop()
		return err
	}

	return nil
}
Exemple #24
0
// TestNodeJoin verifies a new node is able to join a bootstrapped
// cluster consisting of one node.
func TestNodeJoin(t *testing.T) {
	e := engine.NewInMem(engine.Attributes{}, 1<<20)
	localDB, err := BootstrapCluster("cluster-1", e)
	if err != nil {
		t.Fatal(err)
	}
	localDB.Close()

	// Set an aggressive gossip interval to make sure information is exchanged tout de suite.
	*gossip.GossipInterval = 10 * time.Millisecond
	// Start the bootstrap node.
	engines1 := []engine.Engine{e}
	addr1 := util.CreateTestAddr("tcp")
	server1, node1 := createTestNode(addr1, engines1, addr1, t)
	defer server1.Close()

	// Create a new node.
	engines2 := []engine.Engine{engine.NewInMem(engine.Attributes{}, 1<<20)}
	server2, node2 := createTestNode(util.CreateTestAddr("tcp"), engines2, server1.Addr(), t)
	defer server2.Close()

	// Verify new node is able to bootstrap its store.
	if err := util.IsTrueWithin(func() bool { return node2.localDB.GetStoreCount() == 1 }, 50*time.Millisecond); err != nil {
		t.Fatal(err)
	}

	// Verify node1 sees node2 via gossip and vice versa.
	node1Key := gossip.MakeNodeIDGossipKey(node1.Descriptor.NodeID)
	node2Key := gossip.MakeNodeIDGossipKey(node2.Descriptor.NodeID)
	if err := util.IsTrueWithin(func() bool {
		if val, err := node1.gossip.GetInfo(node2Key); err != nil {
			return false
		} else if val.(net.Addr).String() != server2.Addr().String() {
			t.Error("addr2 gossip %s doesn't match addr2 address %s", val.(net.Addr).String(), server2.Addr().String())
		}
		if val, err := node2.gossip.GetInfo(node1Key); err != nil {
			return false
		} else if val.(net.Addr).String() != server1.Addr().String() {
			t.Error("addr1 gossip %s doesn't match addr1 address %s", val.(net.Addr).String(), server1.Addr().String())
		}
		return true
	}, 50*time.Millisecond); err != nil {
		t.Error(err)
	}
}
Exemple #25
0
func TestLocalKVLookupReplica(t *testing.T) {
	manual := hlc.ManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	eng := engine.NewInMem(proto.Attributes{}, 1<<20)
	kv := NewLocalKV()
	db := NewDB(kv, clock)
	store := storage.NewStore(clock, eng, db, nil)
	if err := store.Bootstrap(proto.StoreIdent{StoreID: 1}); err != nil {
		t.Fatal(err)
	}
	kv.AddStore(store)
	meta := store.BootstrapRangeMetadata()
	meta.StartKey = engine.KeySystemPrefix
	meta.EndKey = engine.PrefixEndKey(engine.KeySystemPrefix)
	if _, err := store.CreateRange(meta); err != nil {
		t.Fatal(err)
	}
	if err := store.Init(); err != nil {
		t.Fatal(err)
	}
	// Create two new stores with ranges we care about.
	var s [2]*storage.Store
	ranges := []struct {
		storeID    int32
		start, end engine.Key
	}{
		{2, engine.Key("a"), engine.Key("c")},
		{3, engine.Key("x"), engine.Key("z")},
	}
	for i, rng := range ranges {
		s[i] = storage.NewStore(clock, eng, db, nil)
		s[i].Ident.StoreID = rng.storeID
		replica := proto.Replica{StoreID: rng.storeID}
		_, err := s[i].CreateRange(store.NewRangeMetadata(rng.start, rng.end, []proto.Replica{replica}))
		if err != nil {
			t.Fatal(err)
		}
		kv.AddStore(s[i])
	}

	if r, err := kv.lookupReplica(engine.Key("a"), engine.Key("c")); r.StoreID != s[0].Ident.StoreID || err != nil {
		t.Errorf("expected store %d; got %d: %v", s[0].Ident.StoreID, r.StoreID, err)
	}
	if r, err := kv.lookupReplica(engine.Key("b"), nil); r.StoreID != s[0].Ident.StoreID || err != nil {
		t.Errorf("expected store %d; got %d: %v", s[0].Ident.StoreID, r.StoreID, err)
	}
	if r, err := kv.lookupReplica(engine.Key("b"), engine.Key("d")); r != nil || err == nil {
		t.Errorf("expected store 0 and error got %d", r.StoreID)
	}
	if r, err := kv.lookupReplica(engine.Key("x"), engine.Key("z")); r.StoreID != s[1].Ident.StoreID {
		t.Errorf("expected store %d; got %d: %v", s[1].Ident.StoreID, r.StoreID, err)
	}
	if r, err := kv.lookupReplica(engine.Key("y"), nil); r.StoreID != s[1].Ident.StoreID || err != nil {
		t.Errorf("expected store %d; got %d: %v", s[1].Ident.StoreID, r.StoreID, err)
	}
}
Exemple #26
0
// initEngine parses the store attributes as a colon-separated list
// and instantiates an engine based on the dir parameter. If dir parses
// to an integer, it's taken to mean an in-memory engine; otherwise,
// dir is treated as a path and a RocksDB engine is created.
func (ctx *Context) initEngine(attrsStr, path string, stopper *stop.Stopper) (engine.Engine, error) {
	attrs := parseAttributes(attrsStr)
	if size, err := strconv.ParseUint(path, 10, 64); err == nil {
		if size == 0 {
			return nil, errUnsizedInMemStore
		}
		return engine.NewInMem(attrs, int64(size), stopper), nil
	}
	return engine.NewRocksDB(attrs, path, ctx.CacheSize, stopper), nil
}
// TestStoreRecoverWithErrors verifies that even commands that fail are marked as
// applied so they are not retried after recovery.
func TestStoreRecoverWithErrors(t *testing.T) {
	defer leaktest.AfterTest(t)
	manual := hlc.NewManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	eng := engine.NewInMem(proto.Attributes{}, 1<<20)

	numIncrements := 0

	storage.TestingCommandFilter = func(args proto.Request, reply proto.Response) bool {
		if _, ok := args.(*proto.IncrementRequest); ok && args.Header().Key.Equal(proto.Key("a")) {
			numIncrements++
		}
		return false
	}
	defer func() {
		storage.TestingCommandFilter = nil
	}()

	func() {
		store, stopper := createTestStoreWithEngine(t, eng, clock, true, nil)
		defer stopper.Stop()

		// Write a bytes value so the increment will fail.
		putArgs, putReply := putArgs(proto.Key("a"), []byte("asdf"), 1, store.StoreID())
		if err := store.ExecuteCmd(context.Background(), proto.Call{Args: putArgs, Reply: putReply}); err != nil {
			t.Fatal(err)
		}

		// Try and fail to increment the key. It is important for this test that the
		// failure be the last thing in the raft log when the store is stopped.
		incArgs, incReply := incrementArgs(proto.Key("a"), 42, 1, store.StoreID())
		if err := store.ExecuteCmd(context.Background(), proto.Call{Args: incArgs, Reply: incReply}); err == nil {
			t.Fatal("did not get expected error")
		}
	}()

	if numIncrements != 1 {
		t.Fatalf("expected 1 increments; was %d", numIncrements)
	}

	// Recover from the engine.
	store, stopper := createTestStoreWithEngine(t, eng, clock, false, nil)
	defer stopper.Stop()

	// Issue a no-op write to lazily initialize raft on the range.
	incArgs, incReply := incrementArgs(proto.Key("b"), 0, 1, store.StoreID())
	if err := store.ExecuteCmd(context.Background(), proto.Call{Args: incArgs, Reply: incReply}); err != nil {
		t.Fatal(err)
	}

	// No additional increments were performed on key A during recovery.
	if numIncrements != 1 {
		t.Fatalf("expected 1 increments; was %d", numIncrements)
	}
}
// TestStoreRecoverWithErrors verifies that even commands that fail are marked as
// applied so they are not retried after recovery.
func TestStoreRecoverWithErrors(t *testing.T) {
	defer leaktest.AfterTest(t)
	defer func() { storage.TestingCommandFilter = nil }()
	manual := hlc.NewManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	engineStopper := stop.NewStopper()
	defer engineStopper.Stop()
	eng := engine.NewInMem(roachpb.Attributes{}, 1<<20, engineStopper)

	numIncrements := 0

	storage.TestingCommandFilter = func(_ roachpb.StoreID, args roachpb.Request, _ roachpb.Header) error {
		if _, ok := args.(*roachpb.IncrementRequest); ok && args.Header().Key.Equal(roachpb.Key("a")) {
			numIncrements++
		}
		return nil
	}

	func() {
		stopper := stop.NewStopper()
		defer stopper.Stop()
		store := createTestStoreWithEngine(t, eng, clock, true, nil, stopper)

		// Write a bytes value so the increment will fail.
		putArgs := putArgs(roachpb.Key("a"), []byte("asdf"))
		if _, err := client.SendWrapped(rg1(store), nil, &putArgs); err != nil {
			t.Fatal(err)
		}

		// Try and fail to increment the key. It is important for this test that the
		// failure be the last thing in the raft log when the store is stopped.
		incArgs := incrementArgs(roachpb.Key("a"), 42)
		if _, err := client.SendWrapped(rg1(store), nil, &incArgs); err == nil {
			t.Fatal("did not get expected error")
		}
	}()

	if numIncrements != 1 {
		t.Fatalf("expected 1 increments; was %d", numIncrements)
	}

	// Recover from the engine.
	store := createTestStoreWithEngine(t, eng, clock, false, nil, engineStopper)

	// Issue a no-op write to lazily initialize raft on the range.
	incArgs := incrementArgs(roachpb.Key("b"), 0)
	if _, err := client.SendWrapped(rg1(store), nil, &incArgs); err != nil {
		t.Fatal(err)
	}

	// No additional increments were performed on key A during recovery.
	if numIncrements != 1 {
		t.Fatalf("expected 1 increments; was %d", numIncrements)
	}
}
// startStatusServer launches a new status server using minimal engine
// and local database setup. Returns the new http test server, which
// should be cleaned up by caller via httptest.Server.Close(). The
// Cockroach KV client address is set to the address of the test server.
func startStatusServer() *httptest.Server {
	db, err := BootstrapCluster("cluster-1", engine.NewInMem(proto.Attributes{}, 1<<20))
	if err != nil {
		log.Fatal(err)
	}
	status := newStatusServer(db, nil)
	mux := http.NewServeMux()
	status.RegisterHandlers(mux)
	httpServer := httptest.NewServer(mux)
	return httpServer
}
Exemple #30
0
// TestBootstrapNewStore starts a cluster with two unbootstrapped
// stores and verifies both stores are added and started.
func TestBootstrapNewStore(t *testing.T) {
	defer leaktest.AfterTest(t)
	engineStopper := stop.NewStopper()
	defer engineStopper.Stop()
	e := engine.NewInMem(roachpb.Attributes{}, 1<<20, engineStopper)
	eagerStopper := stop.NewStopper()
	if _, err := BootstrapCluster("cluster-1", []engine.Engine{e}, eagerStopper); err != nil {
		t.Fatal(err)
	}
	eagerStopper.Stop()

	// Start a new node with two new stores which will require bootstrapping.
	engines := []engine.Engine{
		e,
		engine.NewInMem(roachpb.Attributes{}, 1<<20, engineStopper),
		engine.NewInMem(roachpb.Attributes{}, 1<<20, engineStopper),
	}
	_, _, node, stopper := createAndStartTestNode(util.CreateTestAddr("tcp"), engines, util.CreateTestAddr("tcp"), t)
	defer stopper.Stop()

	// Non-initialized stores (in this case the new in-memory-based
	// store) will be bootstrapped by the node upon start. This happens
	// in a goroutine, so we'll have to wait a bit until we can find the
	// new node.
	util.SucceedsWithin(t, testTimeout, func() error {
		if n := node.stores.GetStoreCount(); n != 3 {
			return util.Errorf("expected 3 stores but got %d", n)
		}
		return nil
	})

	// Check whether all stores are started properly.
	if err := node.stores.VisitStores(func(s *storage.Store) error {
		if s.IsStarted() == false {
			return util.Errorf("fail to start store: %s", s)
		}
		return nil
	}); err != nil {
		t.Error(err)
	}
}