コード例 #1
0
ファイル: status_test.go プロジェクト: zhaoyta/cockroach
// startServer will start a server with a short scan interval, wait for
// the scan to complete, and return the server. The caller is
// responsible for stopping the server.
// TODO(Bram): Add more nodes.
func startServer(t *testing.T, keyPrefix string) TestServer {
	var ts TestServer
	ts.Ctx = NewTestContext()
	ts.Ctx.ScanInterval = time.Duration(5 * time.Millisecond)
	ts.StoresPerNode = 3
	if err := ts.Start(); err != nil {
		t.Fatal(err)
	}

	// Make sure the range is spun up with an arbitrary read command. We do not
	// expect a specific response.
	if _, err := ts.db.Get("a"); err != nil {
		t.Fatal(err)
	}

	// Make sure the node status is available. This is done by forcing stores to
	// publish their status, synchronizing to the event feed with a canary
	// event, and then forcing the server to write summaries immediately.
	if err := ts.node.publishStoreStatuses(); err != nil {
		t.Fatalf("error publishing store statuses: %s", err)
	}
	syncEvent := status.NewTestSyncEvent(1)
	ts.EventFeed().Publish(syncEvent)
	if err := syncEvent.Sync(5 * time.Second); err != nil {
		t.Fatal(err)
	}
	if err := ts.writeSummaries(); err != nil {
		t.Fatalf("error writing summaries: %s", err)
	}

	return ts
}
コード例 #2
0
ファイル: node_test.go プロジェクト: backend2use/cockroachdb
// TestStatusSummaries verifies that status summaries are written correctly for
// both the Node and stores within the node.
func TestStatusSummaries(t *testing.T) {
	defer leaktest.AfterTest(t)
	ts := &TestServer{}
	ts.Ctx = NewTestContext()
	ts.StoresPerNode = 3
	if err := ts.Start(); err != nil {
		t.Fatal(err)
	}
	defer ts.Stop()

	// Retrieve the first store from the Node.
	s, err := ts.node.lSender.GetStore(proto.StoreID(1))
	if err != nil {
		t.Fatal(err)
	}

	s.WaitForInit()
	// Perform a read from the range to ensure that the raft election has
	// completed.  We do not expect a response.
	if _, err := ts.db.Get("a"); err != nil {
		t.Fatal(err)
	}

	storeDesc, err := s.Descriptor()
	if err != nil {
		t.Fatal(err)
	}

	expectedNodeStatus := &status.NodeStatus{
		RangeCount:           1,
		StoreIDs:             []proto.StoreID{1, 2, 3},
		StartedAt:            0,
		UpdatedAt:            0,
		Desc:                 ts.node.Descriptor,
		LeaderRangeCount:     1,
		AvailableRangeCount:  1,
		ReplicatedRangeCount: 0,
		Stats: engine.MVCCStats{
			LiveBytes: 1,
			KeyBytes:  1,
			ValBytes:  1,
			LiveCount: 1,
			KeyCount:  1,
			ValCount:  1,
		},
	}
	expectedStoreStatus := &storage.StoreStatus{
		Desc:                 *storeDesc,
		NodeID:               1,
		RangeCount:           1,
		LeaderRangeCount:     1,
		AvailableRangeCount:  1,
		ReplicatedRangeCount: 0,
		Stats: engine.MVCCStats{
			LiveBytes: 1,
			KeyBytes:  1,
			ValBytes:  1,
			LiveCount: 1,
			KeyCount:  1,
			ValCount:  1,
		},
	}

	// Function to ensure that the event feed has been fully flushed.
	syncFeed := func() {
		syncEvent := status.NewTestSyncEvent(1)
		ts.EventFeed().Publish(syncEvent)
		if err := syncEvent.Sync(5 * time.Second); err != nil {
			t.Fatal(err)
		}
	}

	// Function to force summaries to be written synchronously, including all
	// data currently in the event pipeline. Only one of the stores has
	// replicas, so there are no concerns related to quorum writes; if there
	// were multiple replicas, more care would need to be taken in the initial
	// syncFeed().
	forceWriteStatus := func() {
		syncFeed()
		if err := ts.node.publishStoreStatuses(); err != nil {
			t.Fatalf("error publishing store statuses: %s", err)
		}
		syncFeed()
		if err := ts.writeSummaries(); err != nil {
			t.Fatalf("error writing summaries: %s", err)
		}
	}

	forceWriteStatus()
	oldNodeStats := compareNodeStatus(t, ts, expectedNodeStatus, 0)
	oldStoreStats := compareStoreStatus(t, ts, s, expectedStoreStatus, 0)

	// Write some values left and right of the proposed split key.
	content := proto.Key("test content")
	if err := ts.db.Put("a", content); err != nil {
		t.Fatal(err)
	}
	if err := ts.db.Put("c", content); err != nil {
		t.Fatal(err)
	}

	expectedNodeStatus = &status.NodeStatus{
		RangeCount:           1,
		StoreIDs:             []proto.StoreID{1, 2, 3},
		StartedAt:            oldNodeStats.StartedAt,
		UpdatedAt:            oldNodeStats.UpdatedAt,
		Desc:                 ts.node.Descriptor,
		LeaderRangeCount:     1,
		AvailableRangeCount:  1,
		ReplicatedRangeCount: 0,
		Stats: engine.MVCCStats{
			LiveBytes: 1,
			KeyBytes:  1,
			ValBytes:  1,
			LiveCount: oldNodeStats.Stats.LiveCount + 1,
			KeyCount:  oldNodeStats.Stats.KeyCount + 1,
			ValCount:  oldNodeStats.Stats.ValCount + 1,
		},
	}
	expectedStoreStatus = &storage.StoreStatus{
		Desc:                 oldStoreStats.Desc,
		NodeID:               1,
		RangeCount:           1,
		LeaderRangeCount:     1,
		AvailableRangeCount:  1,
		ReplicatedRangeCount: 0,
		Stats: engine.MVCCStats{
			LiveBytes: 1,
			KeyBytes:  1,
			ValBytes:  1,
			LiveCount: oldStoreStats.Stats.LiveCount + 1,
			KeyCount:  oldStoreStats.Stats.KeyCount + 1,
			ValCount:  oldStoreStats.Stats.ValCount + 1,
		},
	}

	forceWriteStatus()
	oldNodeStats = compareNodeStatus(t, ts, expectedNodeStatus, 1)
	oldStoreStats = compareStoreStatus(t, ts, s, expectedStoreStatus, 1)

	// Split the range.
	splitKey := proto.Key("b")
	rng := s.LookupRange(splitKey, nil)
	args := &proto.AdminSplitRequest{
		RequestHeader: proto.RequestHeader{
			Key:     proto.KeyMin,
			RaftID:  rng.Desc().RaftID,
			Replica: proto.Replica{StoreID: s.Ident.StoreID},
		},
		SplitKey: splitKey,
	}
	var reply *proto.AdminSplitResponse
	if replyI, err := ts.node.executeCmd(args); err != nil {
		t.Fatal(err)
	} else {
		reply = replyI.(*proto.AdminSplitResponse)
	}
	if reply.Error != nil {
		t.Fatal(reply.Error)
	}

	expectedNodeStatus = &status.NodeStatus{
		RangeCount:           2,
		StoreIDs:             []proto.StoreID{1, 2, 3},
		StartedAt:            oldNodeStats.StartedAt,
		UpdatedAt:            oldNodeStats.UpdatedAt,
		Desc:                 ts.node.Descriptor,
		LeaderRangeCount:     2,
		AvailableRangeCount:  2,
		ReplicatedRangeCount: 0,
		Stats: engine.MVCCStats{
			LiveBytes: 1,
			KeyBytes:  1,
			ValBytes:  1,
			LiveCount: oldNodeStats.Stats.LiveCount,
			KeyCount:  oldNodeStats.Stats.KeyCount,
			ValCount:  oldNodeStats.Stats.ValCount,
		},
	}
	expectedStoreStatus = &storage.StoreStatus{
		Desc:                 oldStoreStats.Desc,
		NodeID:               1,
		RangeCount:           2,
		LeaderRangeCount:     2,
		AvailableRangeCount:  2,
		ReplicatedRangeCount: 0,
		Stats: engine.MVCCStats{
			LiveBytes: 1,
			KeyBytes:  1,
			ValBytes:  1,
			LiveCount: oldStoreStats.Stats.LiveCount,
			KeyCount:  oldStoreStats.Stats.KeyCount,
			ValCount:  oldStoreStats.Stats.ValCount,
		},
	}
	forceWriteStatus()
	oldNodeStats = compareNodeStatus(t, ts, expectedNodeStatus, 2)
	oldStoreStats = compareStoreStatus(t, ts, s, expectedStoreStatus, 2)

	// Write some values left and right of the proposed split key.
	if err := ts.db.Put("aa", content); err != nil {
		t.Fatal(err)
	}
	if err := ts.db.Put("cc", content); err != nil {
		t.Fatal(err)
	}

	expectedNodeStatus = &status.NodeStatus{
		RangeCount:           2,
		StoreIDs:             []proto.StoreID{1, 2, 3},
		StartedAt:            oldNodeStats.StartedAt,
		UpdatedAt:            oldNodeStats.UpdatedAt,
		Desc:                 ts.node.Descriptor,
		LeaderRangeCount:     2,
		AvailableRangeCount:  2,
		ReplicatedRangeCount: 0,
		Stats: engine.MVCCStats{
			LiveBytes: 1,
			KeyBytes:  1,
			ValBytes:  1,
			LiveCount: oldNodeStats.Stats.LiveCount + 1,
			KeyCount:  oldNodeStats.Stats.KeyCount + 1,
			ValCount:  oldNodeStats.Stats.ValCount + 1,
		},
	}
	expectedStoreStatus = &storage.StoreStatus{
		Desc:                 oldStoreStats.Desc,
		NodeID:               1,
		RangeCount:           2,
		LeaderRangeCount:     2,
		AvailableRangeCount:  2,
		ReplicatedRangeCount: 0,
		Stats: engine.MVCCStats{
			LiveBytes: 1,
			KeyBytes:  1,
			ValBytes:  1,
			LiveCount: oldStoreStats.Stats.LiveCount + 1,
			KeyCount:  oldStoreStats.Stats.KeyCount + 1,
			ValCount:  oldStoreStats.Stats.ValCount + 1,
		},
	}
	forceWriteStatus()
	compareNodeStatus(t, ts, expectedNodeStatus, 3)
	compareStoreStatus(t, ts, s, expectedStoreStatus, 3)
}
コード例 #3
0
ファイル: node_test.go プロジェクト: routhcr/cockroach
// TestStatusSummaries verifies that status summaries are written correctly for
// both the Node and stores within the node.
func TestStatusSummaries(t *testing.T) {
	defer leaktest.AfterTest(t)
	ts := &TestServer{}
	ts.Ctx = NewTestContext()
	ts.StoresPerNode = 3
	if err := ts.Start(); err != nil {
		t.Fatal(err)
	}
	defer ts.Stop()

	// Retrieve the first store from the Node.
	s, err := ts.node.lSender.GetStore(proto.StoreID(1))
	if err != nil {
		t.Fatal(err)
	}

	s.WaitForInit()

	content := "junk"
	leftKey := "a"

	// Write to the range to ensure that the raft machinery is running.
	if err := ts.db.Put(leftKey, content); err != nil {
		t.Fatal(err)
	}

	storeDesc, err := s.Descriptor()
	if err != nil {
		t.Fatal(err)
	}

	expectedNodeStatus := &status.NodeStatus{
		RangeCount:           1,
		StoreIDs:             []proto.StoreID{1, 2, 3},
		StartedAt:            0,
		UpdatedAt:            0,
		Desc:                 ts.node.Descriptor,
		LeaderRangeCount:     1,
		AvailableRangeCount:  1,
		ReplicatedRangeCount: 1,
		Stats: engine.MVCCStats{
			LiveBytes: 1,
			KeyBytes:  1,
			ValBytes:  1,
			LiveCount: 1,
			KeyCount:  1,
			ValCount:  1,
		},
	}
	expectedStoreStatus := &storage.StoreStatus{
		Desc:                 *storeDesc,
		NodeID:               1,
		RangeCount:           1,
		LeaderRangeCount:     1,
		AvailableRangeCount:  1,
		ReplicatedRangeCount: 1,
		Stats: engine.MVCCStats{
			LiveBytes: 1,
			KeyBytes:  1,
			ValBytes:  1,
			LiveCount: 1,
			KeyCount:  1,
			ValCount:  1,
		},
	}

	// Function to force summaries to be written synchronously, including all
	// data currently in the event pipeline. Only one of the stores has
	// replicas, so there are no concerns related to quorum writes; if there
	// were multiple replicas, more care would need to be taken in the initial
	// syncFeed().
	forceWriteStatus := func() {
		if err := ts.node.publishStoreStatuses(); err != nil {
			t.Fatalf("error publishing store statuses: %s", err)
		}

		// Ensure that the event feed has been fully flushed.
		syncEvent := status.NewTestSyncEvent(1)
		ts.EventFeed().Publish(syncEvent)
		if err := syncEvent.Sync(5 * time.Second); err != nil {
			t.Fatal(err)
		}

		if err := ts.writeSummaries(); err != nil {
			t.Fatalf("error writing summaries: %s", err)
		}
	}

	forceWriteStatus()
	oldNodeStats := compareNodeStatus(t, ts, expectedNodeStatus, 0)
	oldStoreStats := compareStoreStatus(t, ts, s, expectedStoreStatus, 0)

	splitKey := "b"
	rightKey := "c"

	// Write some values left and right of the proposed split key. No
	// particular reason.
	if err := ts.db.Put(leftKey, content); err != nil {
		t.Fatal(err)
	}
	if err := ts.db.Put(rightKey, content); err != nil {
		t.Fatal(err)
	}

	expectedNodeStatus = &status.NodeStatus{
		RangeCount:           1,
		StoreIDs:             []proto.StoreID{1, 2, 3},
		StartedAt:            oldNodeStats.StartedAt,
		UpdatedAt:            oldNodeStats.UpdatedAt,
		Desc:                 ts.node.Descriptor,
		LeaderRangeCount:     1,
		AvailableRangeCount:  1,
		ReplicatedRangeCount: 1,
		Stats: engine.MVCCStats{
			LiveBytes: 1,
			KeyBytes:  1,
			ValBytes:  1,
			LiveCount: oldNodeStats.Stats.LiveCount + 1,
			KeyCount:  oldNodeStats.Stats.KeyCount + 1,
			ValCount:  oldNodeStats.Stats.ValCount + 1,
		},
	}
	expectedStoreStatus = &storage.StoreStatus{
		Desc:                 oldStoreStats.Desc,
		NodeID:               1,
		RangeCount:           1,
		LeaderRangeCount:     1,
		AvailableRangeCount:  1,
		ReplicatedRangeCount: 1,
		Stats: engine.MVCCStats{
			LiveBytes: 1,
			KeyBytes:  1,
			ValBytes:  1,
			LiveCount: oldStoreStats.Stats.LiveCount + 1,
			KeyCount:  oldStoreStats.Stats.KeyCount + 1,
			ValCount:  oldStoreStats.Stats.ValCount + 1,
		},
	}

	forceWriteStatus()
	oldNodeStats = compareNodeStatus(t, ts, expectedNodeStatus, 1)
	oldStoreStats = compareStoreStatus(t, ts, s, expectedStoreStatus, 1)

	// Split the range.
	if err := ts.db.AdminSplit(splitKey); err != nil {
		t.Fatal(err)
	}

	// Write on both sides of the split to ensure that the raft machinery
	// is running.
	if err := ts.db.Put(leftKey, content); err != nil {
		t.Fatal(err)
	}
	if err := ts.db.Put(rightKey, content); err != nil {
		t.Fatal(err)
	}

	expectedNodeStatus = &status.NodeStatus{
		RangeCount:           2,
		StoreIDs:             []proto.StoreID{1, 2, 3},
		StartedAt:            oldNodeStats.StartedAt,
		UpdatedAt:            oldNodeStats.UpdatedAt,
		Desc:                 ts.node.Descriptor,
		LeaderRangeCount:     2,
		AvailableRangeCount:  2,
		ReplicatedRangeCount: 2,
		Stats: engine.MVCCStats{
			LiveBytes: 1,
			KeyBytes:  1,
			ValBytes:  1,
			LiveCount: oldNodeStats.Stats.LiveCount,
			KeyCount:  oldNodeStats.Stats.KeyCount,
			ValCount:  oldNodeStats.Stats.ValCount,
		},
	}
	expectedStoreStatus = &storage.StoreStatus{
		Desc:                 oldStoreStats.Desc,
		NodeID:               1,
		RangeCount:           2,
		LeaderRangeCount:     2,
		AvailableRangeCount:  2,
		ReplicatedRangeCount: 2,
		Stats: engine.MVCCStats{
			LiveBytes: 1,
			KeyBytes:  1,
			ValBytes:  1,
			LiveCount: oldStoreStats.Stats.LiveCount,
			KeyCount:  oldStoreStats.Stats.KeyCount,
			ValCount:  oldStoreStats.Stats.ValCount,
		},
	}
	forceWriteStatus()
	compareNodeStatus(t, ts, expectedNodeStatus, 3)
	compareStoreStatus(t, ts, s, expectedStoreStatus, 3)
}