Esempio n. 1
0
// TestScannerAddToQueues verifies that ranges are added to and
// removed from multiple queues.
func TestScannerAddToQueues(t *testing.T) {
	const count = 3
	iter := newTestIterator(count)
	q1, q2 := &testQueue{}, &testQueue{}
	s := newRangeScanner(1*time.Millisecond, iter)
	s.AddQueues(q1, q2)
	mc := hlc.NewManualClock(0)
	clock := hlc.NewClock(mc.UnixNano)
	stopper := util.NewStopper(0)

	// Start queue and verify that all ranges are added to both queues.
	s.Start(clock, stopper)
	if err := util.IsTrueWithin(func() bool {
		return q1.count() == count && q2.count() == count
	}, 50*time.Millisecond); err != nil {
		t.Error(err)
	}

	// Remove first range and verify it does not exist in either range.
	rng := iter.remove(0)
	s.RemoveRange(rng)
	if err := util.IsTrueWithin(func() bool {
		return q1.count() == count-1 && q2.count() == count-1
	}, 10*time.Millisecond); err != nil {
		t.Error(err)
	}

	// Stop scanner and verify both queues are stopped.
	stopper.Stop()
	if !q1.isDone() || !q2.isDone() {
		t.Errorf("expected all queues to stop; got %t, %t", q1.isDone(), q2.isDone())
	}
}
Esempio n. 2
0
// TestScannerTiming verifies that ranges are scanned, regardless
// of how many, to match scanInterval.
func TestScannerTiming(t *testing.T) {
	defer leaktest.AfterTest(t)
	const count = 3
	const runTime = 100 * time.Millisecond
	const maxError = 7500 * time.Microsecond
	durations := []time.Duration{
		10 * time.Millisecond,
		25 * time.Millisecond,
	}
	for i, duration := range durations {
		ranges := newTestRangeSet(count, t)
		q := &testQueue{}
		s := newRangeScanner(duration, 0, ranges, nil)
		s.AddQueues(q)
		mc := hlc.NewManualClock(0)
		clock := hlc.NewClock(mc.UnixNano)
		stopper := util.NewStopper()
		defer stopper.Stop()
		s.Start(clock, stopper)
		time.Sleep(runTime)

		avg := s.avgScan()
		log.Infof("%d: average scan: %s", i, avg)
		if avg.Nanoseconds()-duration.Nanoseconds() > maxError.Nanoseconds() ||
			duration.Nanoseconds()-avg.Nanoseconds() > maxError.Nanoseconds() {
			t.Errorf("expected %s, got %s: exceeds max error of %s", duration, avg, maxError)
		}
	}
}
Esempio n. 3
0
// TestUnretryableError verifies that Send returns an unretryable
// error when it hits a critical error.
func TestUnretryableError(t *testing.T) {
	defer leaktest.AfterTest(t)

	stopper := util.NewStopper()
	defer stopper.Stop()

	nodeContext := NewNodeTestContext(nil, stopper)
	s := createAndStartNewServer(t, nodeContext)

	opts := Options{
		N:               1,
		Ordering:        OrderStable,
		SendNextTimeout: 1 * time.Second,
		Timeout:         5 * time.Second,
	}
	getArgs := func(addr net.Addr) interface{} {
		return &proto.PingRequest{}
	}
	// Make getRetry return a non-proto value so that the proto
	// integrity check fails.
	getReply := func() interface{} {
		return 0
	}
	_, err := Send(opts, "Heartbeat.Ping", []net.Addr{s.Addr()}, getArgs, getReply, nodeContext)
	if err == nil {
		t.Fatalf("Unexpected success")
	}
	retryErr, ok := err.(util.Retryable)
	if !ok {
		t.Fatalf("Unexpected error type: %v", err)
	}
	if retryErr.CanRetry() {
		t.Errorf("Unexpected retryable error: %v", retryErr)
	}
}
Esempio n. 4
0
// TestRetryableError verifies that Send returns a retryable error
// when it hits an RPC error.
func TestRetryableError(t *testing.T) {
	defer leaktest.AfterTest(t)

	stopper := util.NewStopper()
	defer stopper.Stop()

	nodeContext := NewNodeTestContext(nil, stopper)
	s := createAndStartNewServer(t, nodeContext)

	// Wait until the server becomes ready and shut down the server.
	c := NewClient(s.Addr(), nil, nodeContext)
	<-c.Ready
	// Directly call Close() to close the connection without
	// removing the client from the cache.
	c.Client.Close()
	s.Close()

	opts := Options{
		N:               1,
		Ordering:        OrderStable,
		SendNextTimeout: 1 * time.Second,
		Timeout:         1 * time.Second,
	}
	if _, err := sendPing(opts, []net.Addr{s.Addr()}, nodeContext); err != nil {
		retryErr, ok := err.(util.Retryable)
		if !ok {
			t.Fatalf("Unexpected error type: %v", err)
		}
		if !retryErr.CanRetry() {
			t.Errorf("Expected retryable error: %v", retryErr)
		}
	} else {
		t.Fatalf("Unexpected success")
	}
}
Esempio n. 5
0
// startAdminServer launches a new admin server using minimal engine
// and local database setup. Returns the new http test server, which
// should be cleaned up by caller via httptest.Server.Close(). The
// Cockroach KV client address is set to the address of the test server.
func startAdminServer() (string, *util.Stopper) {
	stopper := util.NewStopper()
	db, err := BootstrapCluster("cluster-1", []engine.Engine{engine.NewInMem(proto.Attributes{}, 1<<20)}, stopper)
	if err != nil {
		log.Fatal(err)
	}
	admin := newAdminServer(db, stopper)
	mux := http.NewServeMux()
	mux.Handle(adminEndpoint, admin)
	mux.Handle(debugEndpoint, admin)
	httpServer := httptest.NewUnstartedServer(mux)
	tlsConfig, err := testContext.GetServerTLSConfig()
	if err != nil {
		log.Fatal(err)
	}
	httpServer.TLS = tlsConfig
	httpServer.StartTLS()
	stopper.AddCloser(httpServer)

	if strings.HasPrefix(httpServer.URL, "http://") {
		testContext.Addr = strings.TrimPrefix(httpServer.URL, "http://")
	} else if strings.HasPrefix(httpServer.URL, "https://") {
		testContext.Addr = strings.TrimPrefix(httpServer.URL, "https://")
	}
	return httpServer.URL, stopper
}
Esempio n. 6
0
// TestBaseQueueAddRemove adds then removes a range; ensure range is not processed.
func TestBaseQueueAddRemove(t *testing.T) {
	defer leaktest.AfterTest(t)
	r := &Range{}
	if err := r.setDesc(&proto.RangeDescriptor{RaftID: 1}); err != nil {
		t.Fatal(err)
	}
	testQueue := &testQueueImpl{
		shouldQueueFn: func(now proto.Timestamp, r *Range) (shouldQueue bool, priority float64) {
			shouldQueue = true
			priority = 1.0
			return
		},
	}
	bq := newBaseQueue("test", testQueue, 2)
	stopper := util.NewStopper()
	mc := hlc.NewManualClock(0)
	clock := hlc.NewClock(mc.UnixNano)
	bq.Start(clock, stopper)
	defer stopper.Stop()

	bq.MaybeAdd(r, proto.ZeroTimestamp)
	bq.MaybeRemove(r)

	time.Sleep(5 * time.Millisecond)
	if pc := atomic.LoadInt32(&testQueue.processed); pc > 0 {
		t.Errorf("expected processed count of 0; got %d", pc)
	}
}
Esempio n. 7
0
// TestSendToMultipleClients verifies that Send correctly sends
// multiple requests to multiple server using the heartbeat RPC.
func TestSendToMultipleClients(t *testing.T) {
	defer leaktest.AfterTest(t)

	stopper := util.NewStopper()
	defer stopper.Stop()

	nodeContext := NewNodeTestContext(nil, stopper)

	numServers := 4
	var addrs []net.Addr
	for i := 0; i < numServers; i++ {
		s := createAndStartNewServer(t, nodeContext)
		addrs = append(addrs, s.Addr())
	}
	for n := 1; n < numServers; n++ {
		// Send n requests.
		opts := Options{
			N:               n,
			Ordering:        OrderStable,
			SendNextTimeout: 1 * time.Second,
			Timeout:         1 * time.Second,
		}
		replies, err := sendPing(opts, addrs, nodeContext)
		if err != nil {
			t.Fatal(err)
		}
		if len(replies) != n {
			t.Errorf("%v replies are expected, but got %v", n, len(replies))
		}
	}
}
Esempio n. 8
0
func TestSlowStorage(t *testing.T) {
	defer leaktest.AfterTest(t)
	stopper := util.NewStopper()
	cluster := newTestCluster(nil, 3, stopper, t)
	defer stopper.Stop()
	groupID := proto.RaftID(1)
	cluster.createGroup(groupID, 0, 3)

	cluster.triggerElection(0, groupID)
	cluster.waitForElection(0)

	// Block the storage on the last node.
	// TODO(bdarnell): there appear to still be issues if the storage is blocked during
	// the election.
	cluster.storages[2].Block()

	// Submit a command to the leader
	cluster.nodes[0].SubmitCommand(groupID, makeCommandID(), []byte("command"))

	// Even with the third node blocked, the other nodes can make progress.
	for i := 0; i < 2; i++ {
		events := cluster.events[i]
		log.Infof("waiting for event to be commited on node %v", i)
		commit := <-events.CommandCommitted
		if string(commit.Command) != "command" {
			t.Errorf("unexpected value in committed command: %v", commit.Command)
		}
	}

	// Ensure that node 2 is in fact blocked.
	time.Sleep(time.Millisecond)
	select {
	case commit := <-cluster.events[2].CommandCommitted:
		t.Errorf("didn't expect commits on node 2 but got %v", commit)
	default:
	}

	// After unblocking the third node, it will catch up.
	cluster.storages[2].Unblock()
	log.Infof("waiting for event to be commited on node 2")
	// When we unblock, the backlog is not guaranteed to be processed in order,
	// and in some cases the leader may need to retransmit some messages.
	for i := 0; i < 3; i++ {
		select {
		case commit := <-cluster.events[2].CommandCommitted:
			if string(commit.Command) != "command" {
				t.Errorf("unexpected value in committed command: %v", commit.Command)
			}
			return

		case <-time.After(5 * time.Millisecond):
			// Tick both node's clocks. The ticks on the follower node don't
			// really do anything, but they do ensure that that goroutine is
			// getting scheduled (and the real-time delay allows rpc responses
			// to pass between the nodes)
			cluster.tickers[0].Tick()
			cluster.tickers[2].Tick()
		}
	}
}
Esempio n. 9
0
// Start starts the test cluster by bootstrapping an in-memory store
// (defaults to maximum of 50M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.Addr after Start() for client connections. Use Stop()
// to shutdown the server after the test completes.
func (ltc *LocalTestCluster) Start(t util.Tester) {
	ltc.Manual = hlc.NewManualClock(0)
	ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano)
	ltc.Stopper = util.NewStopper()
	rpcContext := rpc.NewContext(testutils.NewTestBaseContext(), ltc.Clock, ltc.Stopper)
	ltc.Gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap)
	ltc.Eng = engine.NewInMem(proto.Attributes{}, 50<<20)
	ltc.lSender = newRetryableLocalSender(NewLocalSender())
	ltc.Sender = NewTxnCoordSender(ltc.lSender, ltc.Clock, false, ltc.Stopper)
	var err error
	if ltc.DB, err = client.Open("//root@", client.SenderOpt(ltc.Sender)); err != nil {
		t.Fatal(err)
	}
	transport := multiraft.NewLocalRPCTransport()
	ltc.Stopper.AddCloser(transport)
	ctx := storage.TestStoreContext
	ctx.Clock = ltc.Clock
	ctx.DB = ltc.DB
	ctx.Gossip = ltc.Gossip
	ctx.Transport = transport
	ltc.Store = storage.NewStore(ctx, ltc.Eng, &proto.NodeDescriptor{NodeID: 1})
	if err := ltc.Store.Bootstrap(proto.StoreIdent{NodeID: 1, StoreID: 1}, ltc.Stopper); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	ltc.lSender.AddStore(ltc.Store)
	if err := ltc.Store.BootstrapRange(); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	if err := ltc.Store.Start(ltc.Stopper); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
}
Esempio n. 10
0
// TestBootstrapOfNonEmptyStore verifies bootstrap failure if engine
// is not empty.
func TestBootstrapOfNonEmptyStore(t *testing.T) {
	defer leaktest.AfterTest(t)
	eng := engine.NewInMem(proto.Attributes{}, 1<<20)

	// Put some random garbage into the engine.
	if err := eng.Put(proto.EncodedKey("foo"), []byte("bar")); err != nil {
		t.Errorf("failure putting key foo into engine: %s", err)
	}
	ctx := TestStoreContext
	manual := hlc.NewManualClock(0)
	ctx.Clock = hlc.NewClock(manual.UnixNano)
	ctx.Transport = multiraft.NewLocalRPCTransport()
	stopper := util.NewStopper()
	stopper.AddCloser(ctx.Transport)
	defer stopper.Stop()
	store := NewStore(ctx, eng, &proto.NodeDescriptor{NodeID: 1})

	// Can't init as haven't bootstrapped.
	if err := store.Start(stopper); err == nil {
		t.Error("expected failure init'ing un-bootstrapped store")
	}

	// Bootstrap should fail on non-empty engine.
	if err := store.Bootstrap(testIdent, stopper); err == nil {
		t.Error("expected bootstrap error on non-empty store")
	}
}
Esempio n. 11
0
// TestClientHeartbeatBadServer verifies that the client is not marked
// as "ready" until a heartbeat request succeeds.
func TestClientHeartbeatBadServer(t *testing.T) {
	defer leaktest.AfterTest(t)

	stopper := util.NewStopper()
	defer stopper.Stop()

	// Create a server without registering a heartbeat service.
	serverClock := hlc.NewClock(hlc.UnixNano)
	s := createTestServer(serverClock, stopper, t)

	// Create a client. It should attempt a heartbeat and fail.
	c := NewClient(s.Addr(), nil, s.context)

	// Register a heartbeat service.
	heartbeat := &HeartbeatService{
		clock:              serverClock,
		remoteClockMonitor: newRemoteClockMonitor(serverClock),
	}

	// The client should fail the heartbeat and be recycled.
	<-c.Closed

	if err := s.RegisterName("Heartbeat", heartbeat); err != nil {
		t.Fatalf("Unable to register heartbeat service: %s", err)
	}
	// Same thing, but now the heartbeat service exists.
	c = NewClient(s.Addr(), nil, s.context)

	// A heartbeat should succeed and the client should become ready.
	<-c.Ready
}
Esempio n. 12
0
// createTestStoreWithoutStart creates a test store using an in-memory
// engine without starting the store. It returns the store, the store
// clock's manual unix nanos time and a stopper. The caller is
// responsible for stopping the stopper upon completion.
func createTestStoreWithoutStart(t *testing.T) (*Store, *hlc.ManualClock, *util.Stopper) {
	stopper := util.NewStopper()
	rpcContext := rpc.NewContext(testBaseContext, hlc.NewClock(hlc.UnixNano), stopper)
	ctx := TestStoreContext
	ctx.Gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap)
	manual := hlc.NewManualClock(0)
	ctx.Clock = hlc.NewClock(manual.UnixNano)
	eng := engine.NewInMem(proto.Attributes{}, 10<<20)
	ctx.Transport = multiraft.NewLocalRPCTransport()
	stopper.AddCloser(ctx.Transport)
	sender := &testSender{}
	var err error
	if ctx.DB, err = client.Open("//root@", client.SenderOpt(sender)); err != nil {
		t.Fatal(err)
	}
	store := NewStore(ctx, eng, &proto.NodeDescriptor{NodeID: 1})
	sender.store = store
	if err := store.Bootstrap(proto.StoreIdent{NodeID: 1, StoreID: 1}, stopper); err != nil {
		t.Fatal(err)
	}
	if err := store.BootstrapRange(); err != nil {
		t.Fatal(err)
	}
	return store, manual, stopper
}
Esempio n. 13
0
// createTestNode creates an rpc server using the specified address,
// gossip instance, KV database and a node using the specified slice
// of engines. The server, clock and node are returned. If gossipBS is
// not nil, the gossip bootstrap address is set to gossipBS.
func createTestNode(addr net.Addr, engines []engine.Engine, gossipBS net.Addr, t *testing.T) (
	*rpc.Server, *hlc.Clock, *Node, *util.Stopper) {
	var err error
	ctx := storage.StoreContext{}

	stopper := util.NewStopper()
	ctx.Clock = hlc.NewClock(hlc.UnixNano)
	nodeRPCContext := rpc.NewContext(nodeTestBaseContext, ctx.Clock, stopper)
	ctx.ScanInterval = 10 * time.Hour
	rpcServer := rpc.NewServer(addr, nodeRPCContext)
	if err := rpcServer.Start(); err != nil {
		t.Fatal(err)
	}
	g := gossip.New(nodeRPCContext, testContext.GossipInterval, testContext.GossipBootstrapResolvers)
	if gossipBS != nil {
		// Handle possibility of a :0 port specification.
		if gossipBS == addr {
			gossipBS = rpcServer.Addr()
		}
		g.SetResolvers([]resolver.Resolver{resolver.NewResolverFromAddress(gossipBS)})
		g.Start(rpcServer, stopper)
	}
	ctx.Gossip = g
	sender := kv.NewDistSender(&kv.DistSenderContext{Clock: ctx.Clock}, g)
	if ctx.DB, err = client.Open("//root@", client.SenderOpt(sender)); err != nil {
		t.Fatal(err)
	}
	// TODO(bdarnell): arrange to have the transport closed.
	ctx.Transport = multiraft.NewLocalRPCTransport()
	ctx.EventFeed = &util.Feed{}
	node := NewNode(ctx)
	return rpcServer, ctx.Clock, node, stopper
}
Esempio n. 14
0
// TestCorruptedClusterID verifies that a node fails to start when a
// store's cluster ID is empty.
func TestCorruptedClusterID(t *testing.T) {
	defer leaktest.AfterTest(t)
	eagerStopper := util.NewStopper()
	e := engine.NewInMem(proto.Attributes{}, 1<<20)
	_, err := BootstrapCluster("cluster-1", []engine.Engine{e}, eagerStopper)
	if err != nil {
		t.Fatal(err)
	}
	eagerStopper.Stop()

	// Set the cluster ID to an empty string.
	sIdent := proto.StoreIdent{
		ClusterID: "",
		NodeID:    1,
		StoreID:   1,
	}
	if err = engine.MVCCPutProto(e, nil, keys.StoreIdentKey(), proto.ZeroTimestamp, nil, &sIdent); err != nil {
		t.Fatal(err)
	}

	engines := []engine.Engine{e}
	server, _, node, stopper := createTestNode(util.CreateTestAddr("tcp"), engines, nil, t)
	if err := node.start(server, engines, proto.Attributes{}, stopper); err == nil {
		t.Errorf("unexpected success")
	}
	stopper.Stop()
}
Esempio n. 15
0
// runStart starts the cockroach node using --stores as the list of
// storage devices ("stores") on this machine and --gossip as the list
// of "well-known" hosts used to join this node to the cockroach
// cluster via the gossip network.
func runStart(cmd *cobra.Command, args []string) {
	info := util.GetBuildInfo()
	log.Infof("build Vers: %s", info.Vers)
	log.Infof("build Tag:  %s", info.Tag)
	log.Infof("build Time: %s", info.Time)
	log.Infof("build Deps: %s", info.Deps)

	// Default user for servers.
	Context.User = security.NodeUser
	// First initialize the Context as it is used in other places.
	err := Context.Init("start")
	if err != nil {
		log.Errorf("failed to initialize context: %s", err)
		return
	}

	log.Info("starting cockroach cluster")
	stopper := util.NewStopper()
	stopper.AddWorker()
	s, err := server.NewServer(Context, stopper)
	if err != nil {
		log.Errorf("failed to start Cockroach server: %s", err)
		return
	}

	err = s.Start(false)
	if err != nil {
		log.Errorf("cockroach server exited with error: %s", err)
		return
	}

	signalCh := make(chan os.Signal, 1)
	signal.Notify(signalCh, os.Interrupt, os.Kill)
	// TODO(spencer): move this behind a build tag.
	signal.Notify(signalCh, syscall.SIGTERM)

	// Block until one of the signals above is received or the stopper
	// is stopped externally (for example, via the quit endpoint).
	select {
	case <-stopper.ShouldStop():
		stopper.SetStopped()
	case <-signalCh:
		log.Infof("initiating graceful shutdown of server")
		stopper.SetStopped()
		go func() {
			s.Stop()
		}()
	}

	select {
	case <-signalCh:
		log.Warningf("second signal received, initiating hard shutdown")
	case <-time.After(time.Minute):
		log.Warningf("time limit reached, initiating hard shutdown")
		return
	case <-stopper.IsStopped():
		log.Infof("server drained and shutdown completed")
	}
	log.Flush()
}
Esempio n. 16
0
// TestScannerStats verifies that stats accumulate from all ranges.
func TestScannerStats(t *testing.T) {
	defer leaktest.AfterTest(t)
	const count = 3
	ranges := newTestRangeSet(count, t)
	q := &testQueue{}
	stopper := util.NewStopper()
	defer stopper.Stop()
	s := newRangeScanner(1*time.Millisecond, 0, ranges, nil)
	s.AddQueues(q)
	mc := hlc.NewManualClock(0)
	clock := hlc.NewClock(mc.UnixNano)
	// At start, scanner stats should be blank for MVCC, but have accurate number of ranges.
	if rc := s.Stats().RangeCount; rc != count {
		t.Errorf("range count expected %d; got %d", count, rc)
	}
	if vb := s.Stats().MVCC.ValBytes; vb != 0 {
		t.Errorf("value bytes expected %d; got %d", 0, vb)
	}
	s.Start(clock, stopper)
	// We expect a full run to accumulate stats from all ranges.
	if err := util.IsTrueWithin(func() bool {
		if rc := s.Stats().RangeCount; rc != count {
			return false
		}
		if vb := s.Stats().MVCC.ValBytes; vb != count*2 {
			return false
		}
		return true
	}, 100*time.Millisecond); err != nil {
		t.Error(err)
	}
}
Esempio n. 17
0
// AddStore creates a new store on the same Transport but doesn't create any ranges.
func (m *multiTestContext) addStore() {
	idx := len(m.stores)
	var clock *hlc.Clock
	if len(m.clocks) > idx {
		clock = m.clocks[idx]
	} else {
		clock = m.clock
		m.clocks = append(m.clocks, clock)
	}
	var eng engine.Engine
	var needBootstrap bool
	if len(m.engines) > idx {
		eng = m.engines[idx]
	} else {
		eng = engine.NewInMem(proto.Attributes{}, 1<<20)
		m.engines = append(m.engines, eng)
		needBootstrap = true
		// Add an extra refcount to the engine so the underlying rocksdb instances
		// aren't closed when stopping and restarting the stores.
		// These refcounts are removed in Stop().
		if err := eng.Open(); err != nil {
			m.t.Fatal(err)
		}
	}

	stopper := util.NewStopper()
	ctx := m.makeContext(idx)
	store := storage.NewStore(ctx, eng, &proto.NodeDescriptor{NodeID: proto.NodeID(idx + 1)})
	if needBootstrap {
		err := store.Bootstrap(proto.StoreIdent{
			NodeID:  proto.NodeID(idx + 1),
			StoreID: proto.StoreID(idx + 1),
		}, stopper)
		if err != nil {
			m.t.Fatal(err)
		}

		// Bootstrap the initial range on the first store
		if idx == 0 {
			if err := store.BootstrapRange(); err != nil {
				m.t.Fatal(err)
			}
		}
	}
	if err := store.Start(stopper); err != nil {
		m.t.Fatal(err)
	}
	store.WaitForInit()
	m.stores = append(m.stores, store)
	if len(m.senders) == idx {
		m.senders = append(m.senders, kv.NewLocalSender())
	}
	m.senders[idx].AddStore(store)
	// Save the store identities for later so we can use them in
	// replication operations even while the store is stopped.
	m.idents = append(m.idents, store.Ident)
	m.stoppers = append(m.stoppers, stopper)
}
Esempio n. 18
0
func newEventDemux(events <-chan interface{}) *eventDemux {
	return &eventDemux{
		make(chan *EventLeaderElection, 1000),
		make(chan *EventCommandCommitted, 1000),
		make(chan *EventMembershipChangeCommitted, 1000),
		events,
		util.NewStopper(1),
	}
}
Esempio n. 19
0
func TestUpdateOffsetOnHeartbeat(t *testing.T) {
	defer leaktest.AfterTest(t)

	stopper := util.NewStopper()
	defer stopper.Stop()

	sContext := NewServerTestContext(nil, stopper)
	serverAddr := util.CreateTestAddr("tcp")
	// Start heartbeat.
	s := NewServer(serverAddr, sContext)
	if err := s.Start(); err != nil {
		t.Fatal(err)
	}
	// Create a client and set its remote offset. On first heartbeat,
	// it will update the server's remote clocks map. We create the
	// client manually here to allow us to set the remote offset
	// before the first heartbeat.
	client := &Client{
		addr:         s.Addr(),
		Ready:        make(chan struct{}),
		Closed:       make(chan struct{}),
		clock:        sContext.localClock,
		remoteClocks: sContext.RemoteClocks,
		offset: proto.RemoteOffset{
			Offset:      10,
			Uncertainty: 5,
			MeasuredAt:  20,
		},
	}
	if err := client.connect(nil, sContext); err != nil {
		t.Fatal(err)
	}

	sContext.RemoteClocks.mu.Lock()
	remoteAddr := client.Addr().String()
	o := sContext.RemoteClocks.offsets[remoteAddr]
	sContext.RemoteClocks.mu.Unlock()
	expServerOffset := proto.RemoteOffset{Offset: -10, Uncertainty: 5, MeasuredAt: 20}
	if o.Equal(expServerOffset) {
		t.Errorf("expected updated offset %v, instead %v", expServerOffset, o)
	}
	s.Close()

	// Remove the offset from RemoteClocks and simulate the remote end
	// closing the client connection. A new offset for the server should
	// not be added to the clock monitor.
	sContext.RemoteClocks.mu.Lock()
	delete(sContext.RemoteClocks.offsets, remoteAddr)
	client.Client.Close()
	sContext.RemoteClocks.mu.Unlock()

	sContext.RemoteClocks.mu.Lock()
	if offset, ok := sContext.RemoteClocks.offsets[remoteAddr]; ok {
		t.Errorf("unexpected updated offset: %v", offset)
	}
	sContext.RemoteClocks.mu.Unlock()
}
Esempio n. 20
0
// newWriteTask creates a writeTask. The caller should start the task after creating it.
func newWriteTask(storage Storage) *writeTask {
	return &writeTask{
		storage: storage,
		stopper: util.NewStopper(1),
		ready:   make(chan struct{}),
		in:      make(chan *writeRequest, 1),
		out:     make(chan *writeResponse, 1),
	}
}
Esempio n. 21
0
// Start starts the TestServer by bootstrapping an in-memory store
// (defaults to maximum of 100M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.ServingAddr() after Start() for client connections. Use Stop()
// to shutdown the server after the test completes.
func (ts *TestServer) Start() error {
	if ts.Ctx == nil {
		ts.Ctx = NewTestContext()
	}

	var err error
	ts.Server, err = NewServer(ts.Ctx, util.NewStopper())
	if err != nil {
		return err
	}

	// Ensure we have the correct number of engines. Add in in-memory ones where
	// needed.  There must be at least one store/engine.
	if ts.StoresPerNode < 1 {
		ts.StoresPerNode = 1
	}
	if ts.Engines == nil {
		ts.Engines = []engine.Engine{}
	}
	for i := 0; i < ts.StoresPerNode; i++ {
		if len(ts.Engines) < i+1 {
			ts.Engines = append(ts.Engines, nil)
		}
		if ts.Engines[i] == nil {
			ts.Engines[i] = engine.NewInMem(proto.Attributes{}, 100<<20)
		}
	}
	ts.Ctx.Engines = ts.Engines

	if !ts.SkipBootstrap {
		stopper := util.NewStopper()
		_, err := BootstrapCluster("cluster-1", ts.Ctx.Engines, stopper)
		if err != nil {
			return util.Errorf("could not bootstrap cluster: %s", err)
		}
		stopper.Stop()
	}
	err = ts.Server.Start(true)
	if err != nil {
		return err
	}

	return nil
}
Esempio n. 22
0
// startConsumerSet starts a NodeEventFeed and a number of associated
// simple consumers.
func startConsumerSet(count int) (*util.Stopper, *util.Feed, []*simpleEventConsumer) {
	stopper := util.NewStopper()
	feed := &util.Feed{}
	consumers := make([]*simpleEventConsumer, count)
	for i := range consumers {
		consumers[i] = newSimpleEventConsumer(feed)
		stopper.RunWorker(consumers[i].process)
	}
	return stopper, feed, consumers
}
Esempio n. 23
0
// TestProposeBadGroup ensures that unknown group IDs are an error, not a panic.
func TestProposeBadGroup(t *testing.T) {
	defer leaktest.AfterTest(t)
	stopper := util.NewStopper()
	cluster := newTestCluster(nil, 3, stopper, t)
	defer stopper.Stop()
	err := <-cluster.nodes[1].SubmitCommand(7, "asdf", []byte{})
	if err == nil {
		t.Fatal("did not get expected error")
	}
}
Esempio n. 24
0
// TestHeartbeatResponseFanout check 2 raft groups on the same node distribution,
// but each group has different Term, heartbeat response from each group should
// not disturb other group's Term or Leadership
func TestHeartbeatResponseFanout(t *testing.T) {
	defer leaktest.AfterTest(t)
	stopper := util.NewStopper()
	defer stopper.Stop()

	cluster := newTestCluster(nil, 3, stopper, t)
	groupID1 := proto.RaftID(1)
	cluster.createGroup(groupID1, 0, 3 /* replicas */)

	groupID2 := proto.RaftID(2)
	cluster.createGroup(groupID2, 0, 3 /* replicas */)

	leaderIndex := 0

	cluster.triggerElection(leaderIndex, groupID1)
	event := cluster.waitForElection(leaderIndex)
	// Drain off the election event from other nodes.
	_ = cluster.waitForElection((leaderIndex + 1) % 3)
	_ = cluster.waitForElection((leaderIndex + 2) % 3)

	if event.GroupID != groupID1 {
		t.Fatalf("election event had incorrect groupid %v", event.GroupID)
	}
	if event.NodeID != cluster.nodes[leaderIndex].nodeID {
		t.Fatalf("expected %v to win election, but was %v", cluster.nodes[leaderIndex].nodeID, event.NodeID)
	}
	// GroupID2 will have 3 round of election, so it will have different
	// term with groupID1, but both leader on the same node.
	for i := 2; i >= 0; i-- {
		leaderIndex = i
		cluster.triggerElection(leaderIndex, groupID2)
		event = cluster.waitForElection(leaderIndex)
		_ = cluster.waitForElection((leaderIndex + 1) % 3)
		_ = cluster.waitForElection((leaderIndex + 2) % 3)

		if event.GroupID != groupID2 {
			t.Fatalf("election event had incorrect groupid %v", event.GroupID)
		}
		if event.NodeID != cluster.nodes[leaderIndex].nodeID {
			t.Fatalf("expected %v to win election, but was %v", cluster.nodes[leaderIndex].nodeID, event.NodeID)
		}
	}
	// Send a coalesced heartbeat.
	// Heartbeat response from groupID2 will have a big term than which from groupID1.
	cluster.nodes[0].coalescedHeartbeat()
	// Start submit a command to see if groupID1's leader changed?
	cluster.nodes[0].SubmitCommand(groupID1, makeCommandID(), []byte("command"))

	select {
	case _ = <-cluster.events[0].CommandCommitted:
		log.Infof("SubmitCommand succeed after Heartbeat Response fanout")
	case <-time.After(500 * time.Millisecond):
		t.Fatalf("No leader after Heartbeat Response fanout")
	}
}
Esempio n. 25
0
func TestMembershipChange(t *testing.T) {
	defer leaktest.AfterTest(t)
	stopper := util.NewStopper()
	cluster := newTestCluster(nil, 4, stopper, t)
	defer stopper.Stop()

	// Create a group with a single member, cluster.nodes[0].
	groupID := proto.RaftID(1)
	cluster.createGroup(groupID, 0, 1)
	// An automatic election is triggered since this is a single-node Raft group.
	cluster.waitForElection(0)

	// Consume and apply the membership change events.
	for i := 0; i < 4; i++ {
		go func(i int) {
			for {
				e, ok := <-cluster.events[i].MembershipChangeCommitted
				if !ok {
					return
				}
				e.Callback(nil)
			}
		}(i)
	}

	// Add each of the other three nodes to the cluster.
	for i := 1; i < 4; i++ {
		ch := cluster.nodes[0].ChangeGroupMembership(groupID, makeCommandID(),
			raftpb.ConfChangeAddNode,
			cluster.nodes[i].nodeID, nil)
		<-ch
	}

	// TODO(bdarnell): verify that the channel events are sent out correctly.
	/*
		for i := 0; i < 10; i++ {
			log.Infof("tick %d", i)
			cluster.tickers[0].Tick()
			time.Sleep(5 * time.Millisecond)
		}

		// Each node is notified of each other node's joining.
		for i := 0; i < 4; i++ {
			for j := 1; j < 4; j++ {
				select {
				case e := <-cluster.events[i].MembershipChangeCommitted:
					if e.NodeID != cluster.nodes[j].nodeID {
						t.Errorf("node %d expected event for %d, got %d", i, j, e.NodeID)
					}
				default:
					t.Errorf("node %d did not get expected event for %d", i, j)
				}
			}
		}*/
}
Esempio n. 26
0
func TestRetryStop(t *testing.T) {
	stopper := util.NewStopper()
	// Create a retry loop which will never stop without stopper.
	opts := Options{"test", time.Microsecond * 10, time.Second, 2, 0, false, stopper}
	if err := WithBackoff(opts, func() (Status, error) {
		go stopper.Stop()
		return Continue, nil
	}); err == nil {
		t.Errorf("expected retry loop to exit from being stopped")
	}
}
Esempio n. 27
0
// restartStore restarts a store previously stopped with StopStore.
func (m *multiTestContext) restartStore(i int) {
	m.stoppers[i] = util.NewStopper()

	ctx := m.makeContext(i)
	m.stores[i] = storage.NewStore(ctx, m.engines[i], &proto.NodeDescriptor{NodeID: proto.NodeID(i + 1)})
	if err := m.stores[i].Start(m.stoppers[i]); err != nil {
		m.t.Fatal(err)
	}
	// The sender is assumed to still exist.
	m.senders[i].AddStore(m.stores[i])
}
Esempio n. 28
0
func TestLeaderElectionEvent(t *testing.T) {
	defer leaktest.AfterTest(t)
	// Leader election events are fired when the leader commits an entry, not when it
	// issues a call for votes.
	stopper := util.NewStopper()
	cluster := newTestCluster(nil, 3, stopper, t)
	defer stopper.Stop()
	groupID := proto.RaftID(1)
	cluster.createGroup(groupID, 0, 3)

	// Process a Ready with a new leader but no new commits.
	// This happens while an election is in progress.
	cluster.nodes[1].maybeSendLeaderEvent(groupID, cluster.nodes[1].groups[groupID],
		&raft.Ready{
			SoftState: &raft.SoftState{
				Lead: 3,
			},
		})

	// No events are sent.
	select {
	case e := <-cluster.events[1].LeaderElection:
		t.Fatalf("got unexpected event %v", e)
	case <-time.After(time.Millisecond):
	}

	// Now there are new committed entries. A new leader always commits an entry
	// to conclude the election.
	entry := raftpb.Entry{
		Index: 42,
		Term:  42,
	}
	cluster.nodes[1].maybeSendLeaderEvent(groupID, cluster.nodes[1].groups[groupID],
		&raft.Ready{
			Entries:          []raftpb.Entry{entry},
			CommittedEntries: []raftpb.Entry{entry},
		})

	// Now we get an event.
	select {
	case e := <-cluster.events[1].LeaderElection:
		if !reflect.DeepEqual(e, &EventLeaderElection{
			GroupID: groupID,
			NodeID:  3,
			Term:    42,
		}) {
			t.Errorf("election event did not match expectations: %+v", e)
		}
	case <-time.After(time.Millisecond):
		t.Fatal("didn't get expected event")
	}
}
Esempio n. 29
0
func (m *multiTestContext) Start(t *testing.T, numStores int) {
	m.t = t
	if m.manualClock == nil {
		m.manualClock = hlc.NewManualClock(0)
	}
	if m.clock == nil {
		m.clock = hlc.NewClock(m.manualClock.UnixNano)
	}
	if m.gossip == nil {
		rpcContext := rpc.NewContext(rootTestBaseContext, m.clock, nil)
		m.gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap)
	}
	if m.transport == nil {
		m.transport = multiraft.NewLocalRPCTransport()
	}

	if m.clientStopper == nil {
		m.clientStopper = util.NewStopper()
	}

	// Always create the first sender.
	m.senders = append(m.senders, kv.NewLocalSender())

	if m.db == nil {
		sender := kv.NewTxnCoordSender(m.senders[0], m.clock, false, nil, m.clientStopper)
		var err error
		if m.db, err = client.Open("//root@", client.SenderOpt(sender)); err != nil {
			t.Fatal(err)
		}
	}

	for i := 0; i < numStores; i++ {
		m.addStore()
	}
	if m.transportStopper == nil {
		m.transportStopper = util.NewStopper()
	}
	m.transportStopper.AddCloser(m.transport)
}
Esempio n. 30
0
// TestPollSource verifies that polled data sources are called as expected.
func TestPollSource(t *testing.T) {
	defer leaktest.AfterTest(t)
	tm := newTestModel(t)
	tm.Start()
	defer tm.Stop()

	testSource := &modelDataSource{
		model:   tm,
		r:       Resolution10s,
		stopper: util.NewStopper(),
		datasets: [][]proto.TimeSeriesData{
			{
				{
					Name:   "test.metric.float",
					Source: "cpu01",
					Datapoints: []*proto.TimeSeriesDatapoint{
						datapoint(1428713843000000000, 100.0),
						datapoint(1428713843000000001, 50.2),
						datapoint(1428713843000000002, 90.9),
					},
				},
				{
					Name:   "test.metric.float",
					Source: "cpu02",
					Datapoints: []*proto.TimeSeriesDatapoint{
						datapoint(1428713843000000000, 900.8),
						datapoint(1428713843000000001, 30.12),
						datapoint(1428713843000000002, 72.324),
					},
				},
			},
			{
				{
					Name: "test.metric",
					Datapoints: []*proto.TimeSeriesDatapoint{
						datapoint(-446061360000000000, 100),
					},
				},
			},
		},
	}

	tm.DB.PollSource(testSource, time.Millisecond, Resolution10s, testSource.stopper)
	<-testSource.stopper.IsStopped()
	if a, e := testSource.calledCount, 2; a != e {
		t.Errorf("testSource was called %d times, expected %d", a, e)
	}
	tm.assertKeyCount(3)
	tm.assertModelCorrect()
}