// TestRequestToUninitializedRange tests the behavior when a request
// is sent to a node which should be a replica of the correct range
// but has not yet received its initial snapshot. This would
// previously panic due to a malformed error response from the server,
// as seen in https://github.com/cockroachdb/cockroach/issues/6027.
//
// Prior to the other changes in the commit that introduced it, this
// test would reliable trigger the panic from #6027. However, it
// relies on some hacky tricks to both trigger the panic and shut down
// cleanly. If this test needs a lot of maintenance in the future we
// should be willing to get rid of it.
func TestRequestToUninitializedRange(t *testing.T) {
	defer leaktest.AfterTest(t)()
	s := server.TestServer{StoresPerNode: 2}
	if err := s.Start(); err != nil {
		t.Fatalf("Could not start server: %v", err)
	}
	defer s.Stop()

	// Choose a range ID that is much larger than any that would be
	// created by initial splits.
	const rangeID = roachpb.RangeID(1000)

	// Set up a range with replicas on two stores of the same node. This
	// ensures that the DistSender will consider both replicas healthy
	// and will try to talk to both (so we can get a non-retryable error
	// from the second store).
	replica1 := roachpb.ReplicaDescriptor{
		NodeID:    1,
		StoreID:   1,
		ReplicaID: 1,
	}
	replica2 := roachpb.ReplicaDescriptor{
		NodeID:    1,
		StoreID:   2,
		ReplicaID: 2,
	}

	// HACK: remove the second store from the node to generate a
	// non-retryable error when we try to talk to it.
	store2, err := s.Stores().GetStore(2)
	if err != nil {
		t.Fatal(err)
	}
	s.Stores().RemoveStore(store2)

	// Create the uninitialized range by sending an isolated raft
	// message to the first store.
	conn, err := s.RPCContext().GRPCDial(s.ServingAddr())
	if err != nil {
		t.Fatal(err)
	}
	raftClient := storage.NewMultiRaftClient(conn)
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()
	stream, err := raftClient.RaftMessage(ctx)
	if err != nil {
		t.Fatal(err)
	}
	msg := storage.RaftMessageRequest{
		GroupID:     rangeID,
		ToReplica:   replica1,
		FromReplica: replica2,
		Message: raftpb.Message{
			Type: raftpb.MsgApp,
			To:   1,
		},
	}
	if err := stream.Send(&msg); err != nil {
		t.Fatal(err)
	}

	// Make sure the replica was created.
	store1, err := s.Stores().GetStore(1)
	if err != nil {
		t.Fatal(err)
	}
	util.SucceedsSoon(t, func() error {
		if replica, err := store1.GetReplica(rangeID); err != nil {
			return util.Errorf("failed to look up replica: %s", err)
		} else if replica.IsInitialized() {
			return util.Errorf("expected replica to be uninitialized")
		}
		return nil
	})

	// Create our own DistSender so we can force some requests to the
	// bogus range. The DistSender needs to be in scope for its own
	// MockRangeDescriptorDB closure.
	var sender *kv.DistSender
	sender = kv.NewDistSender(&kv.DistSenderContext{
		Clock:      s.Clock(),
		RPCContext: s.RPCContext(),
		RangeDescriptorDB: kv.MockRangeDescriptorDB(
			func(key roachpb.RKey, considerIntents, useReverseScan bool,
			) ([]roachpb.RangeDescriptor, []roachpb.RangeDescriptor, *roachpb.Error) {
				if key.Equal(roachpb.RKeyMin) {
					// Pass through requests for the first range to the real sender.
					desc, err := sender.FirstRange()
					if err != nil {
						return nil, nil, roachpb.NewError(err)
					}
					return []roachpb.RangeDescriptor{*desc}, nil, nil
				}
				return []roachpb.RangeDescriptor{{
					RangeID:  rangeID,
					StartKey: roachpb.RKey(keys.Meta2Prefix),
					EndKey:   roachpb.RKeyMax,
					Replicas: []roachpb.ReplicaDescriptor{replica1, replica2},
				}}, nil, nil
			}),
	}, s.Gossip())
	// Only inconsistent reads triggered the panic in #6027.
	hdr := roachpb.Header{
		ReadConsistency: roachpb.INCONSISTENT,
	}
	req := roachpb.NewGet(roachpb.Key("asdf"))
	// Repeat the test a few times: due to the randomization between the
	// two replicas, each attempt only had a 50% chance of triggering
	// the panic.
	for i := 0; i < 5; i++ {
		_, pErr := client.SendWrappedWith(sender, context.Background(), hdr, req)
		// Each attempt fails with "store 2 not found" because that is the
		// non-retryable error.
		if !testutils.IsPError(pErr, "store 2 not found") {
			t.Fatal(pErr)
		}
	}
}
示例#2
0
func TestQuery(t *testing.T) {
	defer leaktest.AfterTest(t)()
	var tsrv server.TestServer
	if err := tsrv.Start(); err != nil {
		t.Fatal(err)
	}
	defer tsrv.Stop()

	// Populate data directly.
	tsdb := tsrv.TsDB()
	if err := tsdb.StoreData(ts.Resolution10s, []tspb.TimeSeriesData{
		{
			Name:   "test.metric",
			Source: "source1",
			Datapoints: []tspb.TimeSeriesDatapoint{
				{
					TimestampNanos: 400 * 1e9,
					Value:          100.0,
				},
				{
					TimestampNanos: 500 * 1e9,
					Value:          200.0,
				},
				{
					TimestampNanos: 520 * 1e9,
					Value:          300.0,
				},
			},
		},
		{
			Name:   "test.metric",
			Source: "source2",
			Datapoints: []tspb.TimeSeriesDatapoint{
				{
					TimestampNanos: 400 * 1e9,
					Value:          100.0,
				},
				{
					TimestampNanos: 500 * 1e9,
					Value:          200.0,
				},
				{
					TimestampNanos: 510 * 1e9,
					Value:          250.0,
				},
				{
					TimestampNanos: 530 * 1e9,
					Value:          350.0,
				},
			},
		},
		{
			Name: "other.metric",
			Datapoints: []tspb.TimeSeriesDatapoint{
				{
					TimestampNanos: 400 * 1e9,
					Value:          100.0,
				},
				{
					TimestampNanos: 500 * 1e9,
					Value:          200.0,
				},
				{
					TimestampNanos: 510 * 1e9,
					Value:          250.0,
				},
			},
		},
	}); err != nil {
		t.Fatal(err)
	}

	expectedResult := &tspb.TimeSeriesQueryResponse{
		Results: []tspb.TimeSeriesQueryResponse_Result{
			{
				Query: tspb.Query{
					Name:             "test.metric",
					Sources:          []string{"source1", "source2"},
					Downsampler:      tspb.TimeSeriesQueryAggregator_AVG.Enum(),
					SourceAggregator: tspb.TimeSeriesQueryAggregator_SUM.Enum(),
					Derivative:       tspb.TimeSeriesQueryDerivative_NONE.Enum(),
				},
				Datapoints: []tspb.TimeSeriesDatapoint{
					{
						TimestampNanos: 505 * 1e9,
						Value:          400.0,
					},
					{
						TimestampNanos: 515 * 1e9,
						Value:          500.0,
					},
					{
						TimestampNanos: 525 * 1e9,
						Value:          600.0,
					},
				},
			},
			{
				Query: tspb.Query{
					Name:             "other.metric",
					Sources:          []string{""},
					Downsampler:      tspb.TimeSeriesQueryAggregator_AVG.Enum(),
					SourceAggregator: tspb.TimeSeriesQueryAggregator_SUM.Enum(),
					Derivative:       tspb.TimeSeriesQueryDerivative_NONE.Enum(),
				},
				Datapoints: []tspb.TimeSeriesDatapoint{
					{
						TimestampNanos: 505 * 1e9,
						Value:          200.0,
					},
					{
						TimestampNanos: 515 * 1e9,
						Value:          250.0,
					},
				},
			},
			{
				Query: tspb.Query{
					Name:             "test.metric",
					Sources:          []string{"source1", "source2"},
					Downsampler:      tspb.TimeSeriesQueryAggregator_MAX.Enum(),
					SourceAggregator: tspb.TimeSeriesQueryAggregator_MAX.Enum(),
					Derivative:       tspb.TimeSeriesQueryDerivative_DERIVATIVE.Enum(),
				},
				Datapoints: []tspb.TimeSeriesDatapoint{
					{
						TimestampNanos: 505 * 1e9,
						Value:          1.0,
					},
					{
						TimestampNanos: 515 * 1e9,
						Value:          5.0,
					},
					{
						TimestampNanos: 525 * 1e9,
						Value:          5.0,
					},
				},
			},
		},
	}

	conn, err := tsrv.RPCContext().GRPCDial(tsrv.Ctx.Addr)
	if err != nil {
		t.Fatal(err)
	}
	response, err := tspb.NewTimeSeriesClient(conn).Query(context.Background(), &tspb.TimeSeriesQueryRequest{
		StartNanos: 500 * 1e9,
		EndNanos:   526 * 1e9,
		Queries: []tspb.Query{
			{
				Name: "test.metric",
			},
			{
				Name: "other.metric",
			},
			{
				Name:             "test.metric",
				Downsampler:      tspb.TimeSeriesQueryAggregator_MAX.Enum(),
				SourceAggregator: tspb.TimeSeriesQueryAggregator_MAX.Enum(),
				Derivative:       tspb.TimeSeriesQueryDerivative_DERIVATIVE.Enum(),
			},
		},
	})
	if err != nil {
		t.Fatal(err)
	}
	for _, r := range response.Results {
		sort.Strings(r.Sources)
	}
	if !proto.Equal(response, expectedResult) {
		t.Fatalf("actual response \n%v\n did not match expected response \n%v",
			response, expectedResult)
	}
}