Ejemplo n.º 1
0
func TestMixedDirections(t *testing.T) {
	defer leaktest.AfterTest(t)()
	s, db, cdb := serverutils.StartServer(t, base.TestServerArgs{
		UseDatabase: "t",
	})
	defer s.Stopper().Stop()

	rowRanges, tableDesc := setupRanges(db, s.(*server.TestServer), cdb, t)
	lr := distsql.NewSpanResolver(
		s.DistSender(), s.Gossip(),
		s.(*server.TestServer).GetNode().Descriptor,
		distsql.BinPackingLeaseHolderChoice)

	ctx := context.Background()
	it := lr.NewSpanResolverIterator()

	spans := []spanWithDir{
		orient(kv.Ascending, makeSpan(tableDesc, 11, 15))[0],
		orient(kv.Descending, makeSpan(tableDesc, 1, 14))[0],
	}
	replicas, err := resolveSpans(ctx, it, spans...)
	if err != nil {
		t.Fatal(err)
	}
	expected := [][]rngInfo{
		{onlyReplica(rowRanges[1])},
		{onlyReplica(rowRanges[1]), onlyReplica(rowRanges[0])},
	}
	if err = expectResolved(replicas, expected...); err != nil {
		t.Fatal(err)
	}
}
func newDistSQLPlanner(
	nodeDesc roachpb.NodeDescriptor,
	rpcCtx *rpc.Context,
	distSQLSrv *distsql.ServerImpl,
	distSender *kv.DistSender,
	gossip *gossip.Gossip,
) *distSQLPlanner {
	return &distSQLPlanner{
		nodeDesc:     nodeDesc,
		rpcContext:   rpcCtx,
		distSQLSrv:   distSQLSrv,
		spanResolver: distsql.NewSpanResolver(distSender, gossip, nodeDesc, resolverPolicy),
	}
}
Ejemplo n.º 3
0
// Test that resolving spans uses a node's range cache and lease holder cache.
// The idea is to test that resolving is not random, but predictable given the
// state of caches.
func TestSpanResolverUsesCaches(t *testing.T) {
	defer leaktest.AfterTest(t)()
	tc := testcluster.StartTestCluster(t, 4,
		base.TestClusterArgs{
			ReplicationMode: base.ReplicationManual,
			ServerArgs: base.TestServerArgs{
				UseDatabase: "t",
			},
		})
	defer tc.Stopper().Stop()

	rowRanges, _ := setupRanges(
		tc.Conns[0], tc.Servers[0], tc.Servers[0].KVClient().(*client.DB), t)

	// Replicate the row ranges on all of the first 3 nodes. Save the 4th node in
	// a pristine state, with empty caches.
	for i := 0; i < 3; i++ {
		var err error
		rowRanges[i], err = tc.AddReplicas(
			rowRanges[i].StartKey.AsRawKey(), tc.Target(1), tc.Target(2))
		if err != nil {
			t.Fatal(err)
		}
	}

	// Scatter the leases around; node i gets range i.
	for i := 0; i < 3; i++ {
		if err := tc.TransferRangeLease(rowRanges[i], tc.Target(i)); err != nil {
			t.Fatal(err)
		}
		// Wait for everybody to apply the new lease, so that we can rely on the
		// lease discovery done later by the SpanResolver to be up to date.
		testutils.SucceedsSoon(t, func() error {
			for j := 0; j < 3; j++ {
				target := tc.Target(j)
				rt, err := tc.FindRangeLeaseHolder(rowRanges[i], &target)
				if err != nil {
					return err
				}
				if rt != tc.Target(i) {
					return errors.Errorf("node %d hasn't applied the lease yet", j)
				}
			}
			return nil
		})
	}

	// Create a SpanResolver using the 4th node, with empty caches.
	s3 := tc.Servers[3]

	lr := distsql.NewSpanResolver(
		s3.DistSender(), s3.Gossip(), s3.GetNode().Descriptor,
		distsql.BinPackingLeaseHolderChoice)

	var spans []spanWithDir
	for i := 0; i < 3; i++ {
		spans = append(
			spans,
			spanWithDir{
				Span: roachpb.Span{
					Key:    rowRanges[i].StartKey.AsRawKey(),
					EndKey: rowRanges[i].EndKey.AsRawKey(),
				},
				dir: kv.Ascending,
			})
	}

	// Resolve the spans. Since the LeaseHolderCache is empty, all the ranges
	// should be grouped and "assigned" to replica 0.
	replicas, err := resolveSpans(context.TODO(), lr.NewSpanResolverIterator(), spans...)
	if err != nil {
		t.Fatal(err)
	}
	if len(replicas) != 3 {
		t.Fatalf("expected replies for 3 spans, got %d: %+v", len(replicas), replicas)
	}
	si := tc.Servers[0]

	nodeID := si.GetNode().Descriptor.NodeID
	storeID := si.GetFirstStoreID()
	for i := 0; i < 3; i++ {
		if len(replicas[i]) != 1 {
			t.Fatalf("expected 1 range for span %s, got %d (%+v)",
				spans[i].Span, len(replicas[i]), replicas[i])
		}
		rd := replicas[i][0].ReplicaDescriptor
		if rd.NodeID != nodeID || rd.StoreID != storeID {
			t.Fatalf("expected span %s to be on replica (%d, %d) but was on %s",
				spans[i].Span, nodeID, storeID, rd)
		}
	}

	// Now populate the cached on node 4 and query again. This time, we expect to see
	// each span on its own range.
	if err := populateCache(tc.Conns[3], 3 /* expectedNumRows */); err != nil {
		t.Fatal(err)
	}
	replicas, err = resolveSpans(context.TODO(), lr.NewSpanResolverIterator(), spans...)
	if err != nil {
		t.Fatal(err)
	}

	var expected [][]rngInfo
	for i := 0; i < 3; i++ {
		expected = append(expected, []rngInfo{selectReplica(tc.Servers[i].NodeID(), rowRanges[i])})
	}
	if err = expectResolved(replicas, expected...); err != nil {
		t.Fatal(err)
	}
}
Ejemplo n.º 4
0
func TestSpanResolver(t *testing.T) {
	defer leaktest.AfterTest(t)()
	s, db, cdb := serverutils.StartServer(t, base.TestServerArgs{
		UseDatabase: "t",
	})
	defer s.Stopper().Stop()

	rowRanges, tableDesc := setupRanges(db, s.(*server.TestServer), cdb, t)
	lr := distsql.NewSpanResolver(
		s.DistSender(), s.Gossip(),
		s.(*server.TestServer).GetNode().Descriptor,
		distsql.BinPackingLeaseHolderChoice)

	ctx := context.Background()
	it := lr.NewSpanResolverIterator()

	testCases := []struct {
		spans    []roachpb.Span
		expected [][]rngInfo
	}{
		{
			[]roachpb.Span{makeSpan(tableDesc, 0, 10000)},
			[][]rngInfo{{
				onlyReplica(rowRanges[0]),
				onlyReplica(rowRanges[1]),
				onlyReplica(rowRanges[2])}},
		},
		{
			[]roachpb.Span{
				makeSpan(tableDesc, 0, 9),
				makeSpan(tableDesc, 11, 19),
				makeSpan(tableDesc, 21, 29),
			},
			[][]rngInfo{
				{onlyReplica(rowRanges[0])},
				{onlyReplica(rowRanges[1])},
				{onlyReplica(rowRanges[2])},
			},
		},
		{
			[]roachpb.Span{
				makeSpan(tableDesc, 0, 20),
				makeSpan(tableDesc, 20, 29),
			},
			[][]rngInfo{
				{onlyReplica(rowRanges[0]), onlyReplica(rowRanges[1])},
				{onlyReplica(rowRanges[2])},
			},
		},
		{
			[]roachpb.Span{
				makeSpan(tableDesc, 0, 1),
				makeSpan(tableDesc, 1, 2),
				makeSpan(tableDesc, 2, 3),
				makeSpan(tableDesc, 3, 4),
				makeSpan(tableDesc, 5, 11),
				makeSpan(tableDesc, 20, 29),
			},
			[][]rngInfo{
				{onlyReplica(rowRanges[0])},
				{onlyReplica(rowRanges[0])},
				{onlyReplica(rowRanges[0])},
				{onlyReplica(rowRanges[0])},
				{onlyReplica(rowRanges[0]), onlyReplica(rowRanges[1])},
				{onlyReplica(rowRanges[2])},
			},
		},
	}
	for i, tc := range testCases {
		for _, dir := range []kv.ScanDirection{kv.Ascending, kv.Descending} {
			t.Run(fmt.Sprintf("%d-direction:%d", i, dir), func(t *testing.T) {
				replicas, err := resolveSpans(ctx, it, orient(dir, tc.spans...)...)
				if err != nil {
					t.Fatal(err)
				}
				if dir == kv.Descending {
					// When testing Descending resolving, reverse the expected results.
					for i, j := 0, len(tc.expected)-1; i <= j; i, j = i+1, j-1 {
						reverse(tc.expected[i])
						if i != j {
							reverse(tc.expected[j])
						}
						tc.expected[i], tc.expected[j] = tc.expected[j], tc.expected[i]
					}
				}
				if err = expectResolved(replicas, tc.expected...); err != nil {
					t.Fatal(err)
				}
			})
		}
	}
}