Пример #1
0
// Range returns a key range encompassing all the keys in the Batch.
func Range(ba roachpb.BatchRequest) (roachpb.RSpan, error) {
	from := roachpb.RKeyMax
	to := roachpb.RKeyMin
	for _, arg := range ba.Requests {
		req := arg.GetInner()
		if _, ok := req.(*roachpb.NoopRequest); ok {
			continue
		}
		h := req.Header()
		if !roachpb.IsRange(req) && len(h.EndKey) != 0 {
			return roachpb.RSpan{}, errors.Errorf("end key specified for non-range operation: %s", req)
		}

		key, err := Addr(h.Key)
		if err != nil {
			return roachpb.RSpan{}, err
		}
		if key.Less(from) {
			// Key is smaller than `from`.
			from = key
		}
		if !key.Less(to) {
			// Key.Next() is larger than `to`.
			if bytes.Compare(key, roachpb.RKeyMax) > 0 {
				return roachpb.RSpan{}, errors.Errorf("%s must be less than KeyMax", key)
			}
			to = key.Next()
		}

		if len(h.EndKey) == 0 {
			continue
		}
		endKey, err := AddrUpperBound(h.EndKey)
		if err != nil {
			return roachpb.RSpan{}, err
		}
		if bytes.Compare(roachpb.RKeyMax, endKey) < 0 {
			return roachpb.RSpan{}, errors.Errorf("%s must be less than or equal to KeyMax", endKey)
		}
		if to.Less(endKey) {
			// EndKey is larger than `to`.
			to = endKey
		}
	}
	return roachpb.RSpan{Key: from, EndKey: to}, nil
}
Пример #2
0
func TestKVDBInternalMethods(t *testing.T) {
	defer leaktest.AfterTest(t)()
	s, _, _ := serverutils.StartServer(t, base.TestServerArgs{})
	defer s.Stopper().Stop()

	testCases := []roachpb.Request{
		&roachpb.HeartbeatTxnRequest{},
		&roachpb.GCRequest{},
		&roachpb.PushTxnRequest{},
		&roachpb.RangeLookupRequest{},
		&roachpb.ResolveIntentRequest{},
		&roachpb.ResolveIntentRangeRequest{},
		&roachpb.MergeRequest{},
		&roachpb.TruncateLogRequest{},
		&roachpb.RequestLeaseRequest{},

		&roachpb.EndTransactionRequest{
			InternalCommitTrigger: &roachpb.InternalCommitTrigger{},
		},
	}
	// Verify internal methods experience bad request errors.
	db := createTestClient(t, s.Stopper(), s.ServingAddr())
	for i, args := range testCases {
		{
			header := args.Header()
			header.Key = roachpb.Key("a")
			args.SetHeader(header)
		}
		if roachpb.IsRange(args) {
			header := args.Header()
			header.EndKey = args.Header().Key.Next()
			args.SetHeader(header)
		}
		b := &client.Batch{}
		b.AddRawRequest(args)
		err := db.Run(b)
		if err == nil {
			t.Errorf("%d: unexpected success calling %s", i, args.Method())
		} else if !testutils.IsError(err, "contains an internal request|contains commit trigger") {
			t.Errorf("%d: unexpected error for %s: %s", i, args.Method(), err)
		}
	}
}
Пример #3
0
// TestKVDBInternalMethods verifies no internal methods are available
// HTTP DB interface.
func TestKVDBInternalMethods(t *testing.T) {
	defer leaktest.AfterTest(t)
	t.Skip("test broken & disabled; obsolete after after #2271")
	s := server.StartTestServer(t)
	defer s.Stop()

	testCases := []roachpb.Request{
		&roachpb.HeartbeatTxnRequest{},
		&roachpb.GCRequest{},
		&roachpb.PushTxnRequest{},
		&roachpb.RangeLookupRequest{},
		&roachpb.ResolveIntentRequest{},
		&roachpb.ResolveIntentRangeRequest{},
		&roachpb.MergeRequest{},
		&roachpb.TruncateLogRequest{},
		&roachpb.LeaderLeaseRequest{},

		&roachpb.EndTransactionRequest{
			InternalCommitTrigger: &roachpb.InternalCommitTrigger{},
		},
	}
	// Verify internal methods experience bad request errors.
	db := createTestClient(t, s.Stopper(), s.ServingAddr())
	for i, args := range testCases {
		args.Header().Key = roachpb.Key("a")
		if roachpb.IsRange(args) {
			args.Header().EndKey = args.Header().Key.Next()
		}
		b := &client.Batch{}
		b.InternalAddRequest(args)
		err := db.Run(b)
		if err == nil {
			t.Errorf("%d: unexpected success calling %s", i, args.Method())
		} else if !testutils.IsError(err, "(couldn't find method|contains commit trigger)") {
			t.Errorf("%d: expected missing method %s; got %s", i, args.Method(), err)
		}
	}
}
Пример #4
0
// TestSendRPCOrder verifies that sendRPC correctly takes into account the
// leader, attributes and required consistency to determine where to send
// remote requests.
func TestSendRPCOrder(t *testing.T) {
	defer leaktest.AfterTest(t)
	g, s := makeTestGossip(t)
	defer s()
	rangeID := roachpb.RangeID(99)

	nodeAttrs := map[int32][]string{
		1: {}, // The local node, set in each test case.
		2: {"us", "west", "gpu"},
		3: {"eu", "dublin", "pdu2", "gpu"},
		4: {"us", "east", "gpu"},
		5: {"us", "east", "gpu", "flaky"},
	}

	// Gets filled below to identify the replica by its address.
	addrToNode := make(map[string]int32)
	makeVerifier := func(expOrder rpc.OrderingPolicy,
		expAddrs []int32) func(rpc.Options, []net.Addr) error {
		return func(o rpc.Options, addrs []net.Addr) error {
			if o.Ordering != expOrder {
				return util.Errorf("unexpected ordering, wanted %v, got %v",
					expOrder, o.Ordering)
			}
			var actualAddrs []int32
			for i, a := range addrs {
				if len(expAddrs) <= i {
					return util.Errorf("got unexpected address: %s", a)
				}
				if expAddrs[i] == 0 {
					actualAddrs = append(actualAddrs, 0)
				} else {
					actualAddrs = append(actualAddrs, addrToNode[a.String()])
				}
			}
			if !reflect.DeepEqual(expAddrs, actualAddrs) {
				return util.Errorf("expected %d, but found %d", expAddrs, actualAddrs)
			}
			return nil
		}
	}

	testCases := []struct {
		args       roachpb.Request
		attrs      []string
		order      rpc.OrderingPolicy
		expReplica []int32
		leader     int32 // 0 for not caching a leader.
		// Naming is somewhat off, as eventually consistent reads usually
		// do not have to go to the leader when a node has a read lease.
		// Would really want CONSENSUS here, but that is not implemented.
		// Likely a test setup here will never have a read lease, but good
		// to keep in mind.
		consistent bool
	}{
		// Inconsistent Scan without matching attributes.
		{
			args:       &roachpb.ScanRequest{},
			attrs:      []string{},
			order:      rpc.OrderRandom,
			expReplica: []int32{1, 2, 3, 4, 5},
		},
		// Inconsistent Scan with matching attributes.
		// Should move the two nodes matching the attributes to the front and
		// go stable.
		{
			args:  &roachpb.ScanRequest{},
			attrs: nodeAttrs[5],
			order: rpc.OrderStable,
			// Compare only the first two resulting addresses.
			expReplica: []int32{5, 4, 0, 0, 0},
		},

		// Scan without matching attributes that requires but does not find
		// a leader.
		{
			args:       &roachpb.ScanRequest{},
			attrs:      []string{},
			order:      rpc.OrderRandom,
			expReplica: []int32{1, 2, 3, 4, 5},
			consistent: true,
		},
		// Put without matching attributes that requires but does not find leader.
		// Should go random and not change anything.
		{
			args:       &roachpb.PutRequest{},
			attrs:      []string{"nomatch"},
			order:      rpc.OrderRandom,
			expReplica: []int32{1, 2, 3, 4, 5},
		},
		// Put with matching attributes but no leader.
		// Should move the two nodes matching the attributes to the front and
		// go stable.
		{
			args:  &roachpb.PutRequest{},
			attrs: append(nodeAttrs[5], "irrelevant"),
			// Compare only the first two resulting addresses.
			order:      rpc.OrderStable,
			expReplica: []int32{5, 4, 0, 0, 0},
		},
		// Put with matching attributes that finds the leader (node 3).
		// Should address the leader and the two nodes matching the attributes
		// (the last and second to last) in that order.
		{
			args:  &roachpb.PutRequest{},
			attrs: append(nodeAttrs[5], "irrelevant"),
			// Compare only the first resulting addresses as we have a leader
			// and that means we're only trying to send there.
			order:      rpc.OrderStable,
			expReplica: []int32{2, 5, 4, 0, 0},
			leader:     2,
		},
		// Inconsistent Get without matching attributes but leader (node 3). Should just
		// go random as the leader does not matter.
		{
			args:       &roachpb.GetRequest{},
			attrs:      []string{},
			order:      rpc.OrderRandom,
			expReplica: []int32{1, 2, 3, 4, 5},
			leader:     2,
		},
	}

	descriptor := roachpb.RangeDescriptor{
		StartKey: roachpb.RKeyMin,
		EndKey:   roachpb.RKeyMax,
		RangeID:  rangeID,
		Replicas: nil,
	}

	// Stub to be changed in each test case.
	var verifyCall func(rpc.Options, []net.Addr) error

	var testFn rpcSendFn = func(opts rpc.Options, method string,
		addrs []net.Addr, getArgs func(addr net.Addr) proto.Message,
		getReply func() proto.Message, _ *rpc.Context) ([]proto.Message, error) {
		if err := verifyCall(opts, addrs); err != nil {
			return nil, err
		}
		return []proto.Message{getArgs(addrs[0]).(*roachpb.BatchRequest).CreateReply()}, nil
	}

	ctx := &DistSenderContext{
		RPCSend: testFn,
		RangeDescriptorDB: mockRangeDescriptorDB(func(roachpb.RKey, bool, bool) ([]roachpb.RangeDescriptor, *roachpb.Error) {
			return []roachpb.RangeDescriptor{descriptor}, nil
		}),
	}

	ds := NewDistSender(ctx, g)

	for n, tc := range testCases {
		verifyCall = makeVerifier(tc.order, tc.expReplica)
		descriptor.Replicas = nil // could do this once above, but more convenient here
		for i := int32(1); i <= 5; i++ {
			addr := util.MakeUnresolvedAddr("tcp", fmt.Sprintf("node%d", i))
			addrToNode[addr.String()] = i
			nd := &roachpb.NodeDescriptor{
				NodeID:  roachpb.NodeID(i),
				Address: util.MakeUnresolvedAddr(addr.Network(), addr.String()),
				Attrs: roachpb.Attributes{
					Attrs: nodeAttrs[i],
				},
			}
			if err := g.AddInfoProto(gossip.MakeNodeIDKey(roachpb.NodeID(i)), nd, time.Hour); err != nil {
				t.Fatal(err)
			}
			descriptor.Replicas = append(descriptor.Replicas, roachpb.ReplicaDescriptor{
				NodeID:  roachpb.NodeID(i),
				StoreID: roachpb.StoreID(i),
			})
		}

		{
			// The local node needs to get its attributes during sendRPC.
			nd := &roachpb.NodeDescriptor{
				NodeID: 6,
				Attrs: roachpb.Attributes{
					Attrs: tc.attrs,
				},
			}
			g.SetNodeID(nd.NodeID)
			if err := g.SetNodeDescriptor(nd); err != nil {
				t.Fatal(err)
			}
		}

		ds.leaderCache.Update(roachpb.RangeID(rangeID), roachpb.ReplicaDescriptor{})
		if tc.leader > 0 {
			ds.leaderCache.Update(roachpb.RangeID(rangeID), descriptor.Replicas[tc.leader-1])
		}

		args := tc.args
		args.Header().Key = roachpb.Key("a")
		if roachpb.IsRange(args) {
			args.Header().EndKey = roachpb.Key("b")
		}
		consistency := roachpb.CONSISTENT
		if !tc.consistent {
			consistency = roachpb.INCONSISTENT
		}
		// Kill the cached NodeDescriptor, enforcing a lookup from Gossip.
		ds.nodeDescriptor = nil
		if _, err := client.SendWrappedWith(ds, nil, roachpb.Header{
			RangeID:         rangeID, // Not used in this test, but why not.
			ReadConsistency: consistency,
		}, args); err != nil {
			t.Errorf("%d: %s", n, err)
		}
	}
}
Пример #5
0
// fillSkippedResponses after meeting the batch key max limit for range
// requests.
func fillSkippedResponses(
	ba roachpb.BatchRequest, br *roachpb.BatchResponse, nextSpan roachpb.RSpan,
) {
	// Some requests might have NoopResponses; we must replace them with empty
	// responses of the proper type.
	for i, req := range ba.Requests {
		if _, ok := br.Responses[i].GetInner().(*roachpb.NoopResponse); !ok {
			continue
		}
		var reply roachpb.Response
		switch t := req.GetInner().(type) {
		case *roachpb.ScanRequest:
			reply = &roachpb.ScanResponse{}

		case *roachpb.ReverseScanRequest:
			reply = &roachpb.ReverseScanResponse{}

		case *roachpb.DeleteRangeRequest:
			reply = &roachpb.DeleteRangeResponse{}

		case *roachpb.BeginTransactionRequest, *roachpb.EndTransactionRequest:
			continue

		default:
			panic(fmt.Sprintf("bad type %T", t))
		}
		union := roachpb.ResponseUnion{}
		union.MustSetInner(reply)
		br.Responses[i] = union
	}
	// Set the ResumeSpan for future batch requests.
	isReverse := ba.IsReverse()
	for i, resp := range br.Responses {
		req := ba.Requests[i].GetInner()
		if !roachpb.IsRange(req) {
			continue
		}
		hdr := resp.GetInner().Header()
		origSpan := req.Header()
		if isReverse {
			if hdr.ResumeSpan != nil {
				// The ResumeSpan.Key might be set to the StartKey of a range;
				// correctly set it to the Key of the original request span.
				hdr.ResumeSpan.Key = origSpan.Key
			} else if roachpb.RKey(origSpan.Key).Less(nextSpan.EndKey) {
				// Some keys have yet to be processed.
				hdr.ResumeSpan = &origSpan
				if nextSpan.EndKey.Less(roachpb.RKey(origSpan.EndKey)) {
					// The original span has been partially processed.
					hdr.ResumeSpan.EndKey = nextSpan.EndKey.AsRawKey()
				}
			}
		} else {
			if hdr.ResumeSpan != nil {
				// The ResumeSpan.EndKey might be set to the EndKey of a
				// range; correctly set it to the EndKey of the original
				// request span.
				hdr.ResumeSpan.EndKey = origSpan.EndKey
			} else if nextSpan.Key.Less(roachpb.RKey(origSpan.EndKey)) {
				// Some keys have yet to be processed.
				hdr.ResumeSpan = &origSpan
				if roachpb.RKey(origSpan.Key).Less(nextSpan.Key) {
					// The original span has been partially processed.
					hdr.ResumeSpan.Key = nextSpan.Key.AsRawKey()
				}
			}
		}
		br.Responses[i].GetInner().SetHeader(hdr)
	}
}
Пример #6
0
// truncate restricts all contained requests to the given key range.
// Even on error, the returned closure must be executed; it undoes any
// truncations performed.
// All requests contained in the batch are "truncated" to the given
// span, inserting NoopRequest appropriately to replace requests which
// are left without a key range to operate on. The number of non-noop
// requests after truncation is returned along with a closure which
// must be executed to undo the truncation, even in case of an error.
// TODO(tschottdorf): Consider returning a new BatchRequest, which has more
// overhead in the common case of a batch which never needs truncation but is
// less magical.
func truncate(br *roachpb.BatchRequest, rs roachpb.RSpan) (func(), int, error) {
	truncateOne := func(args roachpb.Request) (bool, []func(), error) {
		if _, ok := args.(*roachpb.NoopRequest); ok {
			return true, nil, nil
		}
		header := args.Header()
		if !roachpb.IsRange(args) {
			// This is a point request.
			if len(header.EndKey) > 0 {
				return false, nil, util.Errorf("%T is not a range command, but EndKey is set", args)
			}
			if !rs.ContainsKey(keys.Addr(header.Key)) {
				return true, nil, nil
			}
			return false, nil, nil
		}
		// We're dealing with a range-spanning request.
		var undo []func()
		keyAddr, endKeyAddr := keys.Addr(header.Key), keys.Addr(header.EndKey)
		if l, r := !keyAddr.Equal(header.Key), !endKeyAddr.Equal(header.EndKey); l || r {
			if !rs.ContainsKeyRange(keyAddr, endKeyAddr) {
				return false, nil, util.Errorf("local key range must not span ranges")
			}
			if !l || !r {
				return false, nil, util.Errorf("local key mixed with global key in range")
			}
			return false, nil, nil
		}
		// Below, {end,}keyAddr equals header.{End,}Key, so nothing is local.
		if keyAddr.Less(rs.Key) {
			{
				origKey := header.Key
				undo = append(undo, func() { header.Key = origKey })
			}
			header.Key = rs.Key.AsRawKey() // "key" can't be local
			keyAddr = rs.Key
		}
		if !endKeyAddr.Less(rs.EndKey) {
			{
				origEndKey := header.EndKey
				undo = append(undo, func() { header.EndKey = origEndKey })
			}
			header.EndKey = rs.EndKey.AsRawKey() // "endKey" can't be local
			endKeyAddr = rs.EndKey
		}
		// Check whether the truncation has left any keys in the range. If not,
		// we need to cut it out of the request.
		return !keyAddr.Less(endKeyAddr), undo, nil
	}

	var fns []func()
	gUndo := func() {
		for _, f := range fns {
			f()
		}
	}

	var numNoop int
	for pos, arg := range br.Requests {
		omit, undo, err := truncateOne(arg.GetInner())
		if omit {
			numNoop++
			nReq := &roachpb.RequestUnion{}
			if !nReq.SetValue(&roachpb.NoopRequest{}) {
				panic("RequestUnion excludes NoopRequest")
			}
			oReq := br.Requests[pos]
			br.Requests[pos] = *nReq
			posCpy := pos // for closure
			undo = append(undo, func() {
				br.Requests[posCpy] = oReq
			})
		}
		fns = append(fns, undo...)
		if err != nil {
			return gUndo, 0, err
		}
	}
	return gUndo, len(br.Requests) - numNoop, nil
}
Пример #7
0
// truncate restricts all contained requests to the given key range
// and returns a new BatchRequest.
// All requests contained in that batch are "truncated" to the given
// span, inserting NoopRequest appropriately to replace requests which
// are left without a key range to operate on. The number of non-noop
// requests after truncation is returned.
func truncate(ba roachpb.BatchRequest, rs roachpb.RSpan) (roachpb.BatchRequest, int, error) {
	truncateOne := func(args roachpb.Request) (bool, roachpb.Span, error) {
		if _, ok := args.(*roachpb.NoopRequest); ok {
			return true, emptySpan, nil
		}
		header := args.Header()
		if !roachpb.IsRange(args) {
			// This is a point request.
			if len(header.EndKey) > 0 {
				return false, emptySpan, errors.Errorf("%T is not a range command, but EndKey is set", args)
			}
			keyAddr, err := keys.Addr(header.Key)
			if err != nil {
				return false, emptySpan, err
			}
			if !rs.ContainsKey(keyAddr) {
				return false, emptySpan, nil
			}
			return true, header, nil
		}
		// We're dealing with a range-spanning request.
		local := false
		keyAddr, err := keys.Addr(header.Key)
		if err != nil {
			return false, emptySpan, err
		}
		endKeyAddr, err := keys.Addr(header.EndKey)
		if err != nil {
			return false, emptySpan, err
		}
		if l, r := !keyAddr.Equal(header.Key), !endKeyAddr.Equal(header.EndKey); l || r {
			if !l || !r {
				return false, emptySpan, errors.Errorf("local key mixed with global key in range")
			}
			local = true
		}
		if keyAddr.Less(rs.Key) {
			// rs.Key can't be local because it contains range split points, which
			// are never local.
			if !local {
				header.Key = rs.Key.AsRawKey()
			} else {
				// The local start key should be truncated to the boundary of local keys which
				// address to rs.Key.
				header.Key = keys.MakeRangeKeyPrefix(rs.Key)
			}
		}
		if !endKeyAddr.Less(rs.EndKey) {
			// rs.EndKey can't be local because it contains range split points, which
			// are never local.
			if !local {
				header.EndKey = rs.EndKey.AsRawKey()
			} else {
				// The local end key should be truncated to the boundary of local keys which
				// address to rs.EndKey.
				header.EndKey = keys.MakeRangeKeyPrefix(rs.EndKey)
			}
		}
		// Check whether the truncation has left any keys in the range. If not,
		// we need to cut it out of the request.
		if header.Key.Compare(header.EndKey) >= 0 {
			return false, emptySpan, nil
		}
		return true, header, nil
	}

	var numNoop int
	origRequests := ba.Requests
	ba.Requests = make([]roachpb.RequestUnion, len(ba.Requests))
	for pos, arg := range origRequests {
		hasRequest, newHeader, err := truncateOne(arg.GetInner())
		if !hasRequest {
			// We omit this one, i.e. replace it with a Noop.
			numNoop++
			union := roachpb.RequestUnion{}
			union.MustSetInner(&noopRequest)
			ba.Requests[pos] = union
		} else {
			// Keep the old one. If we must adjust the header, must copy.
			if inner := origRequests[pos].GetInner(); newHeader.Equal(inner.Header()) {
				ba.Requests[pos] = origRequests[pos]
			} else {
				shallowCopy := inner.ShallowCopy()
				shallowCopy.SetHeader(newHeader)
				union := &ba.Requests[pos] // avoid operating on copy
				union.MustSetInner(shallowCopy)
			}
		}
		if err != nil {
			return roachpb.BatchRequest{}, 0, err
		}
	}
	return ba, len(ba.Requests) - numNoop, nil
}
Пример #8
0
// truncate restricts all contained requests to the given key range.
// Even on error, the returned closure must be executed; it undoes any
// truncations performed.
// First, the boundaries of the truncation are obtained: This is the
// intersection between [from,to) and the descriptor's range.
// Secondly, all requests contained in the batch are "truncated" to
// the resulting range, inserting NoopRequest appropriately to
// replace requests which are left without a key range to operate on.
// The number of non-noop requests after truncation is returned along
// with a closure which must be executed to undo the truncation, even
// in case of an error.
// TODO(tschottdorf): Consider returning a new BatchRequest, which has more
// overhead in the common case of a batch which never needs truncation but is
// less magical.
func truncate(br *roachpb.BatchRequest, desc *roachpb.RangeDescriptor, from, to roachpb.Key) (func(), int, error) {
	if !desc.ContainsKey(from) {
		from = desc.StartKey
	}
	if !desc.ContainsKeyRange(desc.StartKey, to) || to == nil {
		to = desc.EndKey
	}
	truncateOne := func(args roachpb.Request) (bool, []func(), error) {
		if _, ok := args.(*roachpb.NoopRequest); ok {
			return true, nil, nil
		}
		header := args.Header()
		if !roachpb.IsRange(args) {
			if len(header.EndKey) > 0 {
				return false, nil, util.Errorf("%T is not a range command, but EndKey is set", args)
			}
			if !desc.ContainsKey(keys.KeyAddress(header.Key)) {
				return true, nil, nil
			}
			return false, nil, nil
		}
		var undo []func()
		key, endKey := header.Key, header.EndKey
		keyAddr, endKeyAddr := keys.KeyAddress(key), keys.KeyAddress(endKey)
		if keyAddr.Less(from) {
			undo = append(undo, func() { header.Key = key })
			header.Key = from
			keyAddr = from
		}
		if !endKeyAddr.Less(to) {
			undo = append(undo, func() { header.EndKey = endKey })
			header.EndKey = to
			endKeyAddr = to
		}
		// Check whether the truncation has left any keys in the range. If not,
		// we need to cut it out of the request.
		return !keyAddr.Less(endKeyAddr), undo, nil
	}

	var fns []func()
	gUndo := func() {
		for _, f := range fns {
			f()
		}
	}

	var numNoop int
	for pos, arg := range br.Requests {
		omit, undo, err := truncateOne(arg.GetInner())
		if omit {
			numNoop++
			nReq := &roachpb.RequestUnion{}
			if !nReq.SetValue(&roachpb.NoopRequest{}) {
				panic("RequestUnion excludes NoopRequest")
			}
			oReq := br.Requests[pos]
			br.Requests[pos] = *nReq
			posCpy := pos // for closure
			undo = append(undo, func() {
				br.Requests[posCpy] = oReq
			})
		}
		fns = append(fns, undo...)
		if err != nil {
			return gUndo, 0, err
		}
	}
	return gUndo, len(br.Requests) - numNoop, nil
}
Пример #9
0
// truncate restricts all contained requests to the given key range
// and returns a new BatchRequest.
// All requests contained in that batch are "truncated" to the given
// span, inserting NoopRequest appropriately to replace requests which
// are left without a key range to operate on. The number of non-noop
// requests after truncation is returned.
func truncate(ba roachpb.BatchRequest, rs roachpb.RSpan) (roachpb.BatchRequest, int, error) {
	truncateOne := func(args roachpb.Request) (bool, roachpb.Span, error) {
		if _, ok := args.(*roachpb.NoopRequest); ok {
			return true, emptySpan, nil
		}
		header := *args.Header()
		if !roachpb.IsRange(args) {
			// This is a point request.
			if len(header.EndKey) > 0 {
				return false, emptySpan, util.Errorf("%T is not a range command, but EndKey is set", args)
			}
			if !rs.ContainsKey(keys.Addr(header.Key)) {
				return false, emptySpan, nil
			}
			return true, header, nil
		}
		// We're dealing with a range-spanning request.
		keyAddr, endKeyAddr := keys.Addr(header.Key), keys.Addr(header.EndKey)
		if l, r := !keyAddr.Equal(header.Key), !endKeyAddr.Equal(header.EndKey); l || r {
			if !rs.ContainsKeyRange(keyAddr, endKeyAddr) {
				return false, emptySpan, util.Errorf("local key range must not span ranges")
			}
			if !l || !r {
				return false, emptySpan, util.Errorf("local key mixed with global key in range")
			}
			// Range-local local key range.
			return true, header, nil
		}
		// Below, {end,}keyAddr equals header.{End,}Key, so nothing is local.
		if keyAddr.Less(rs.Key) {
			header.Key = rs.Key.AsRawKey() // "key" can't be local
			keyAddr = rs.Key
		}
		if !endKeyAddr.Less(rs.EndKey) {
			header.EndKey = rs.EndKey.AsRawKey() // "endKey" can't be local
			endKeyAddr = rs.EndKey
		}
		// Check whether the truncation has left any keys in the range. If not,
		// we need to cut it out of the request.
		if !keyAddr.Less(endKeyAddr) {
			return false, emptySpan, nil
		}
		return true, header, nil
	}

	var numNoop int
	origRequests := ba.Requests
	ba.Requests = make([]roachpb.RequestUnion, len(ba.Requests))
	for pos, arg := range origRequests {
		hasRequest, newHeader, err := truncateOne(arg.GetInner())
		if !hasRequest {
			// We omit this one, i.e. replace it with a Noop.
			numNoop++
			nReq := roachpb.RequestUnion{}
			if !nReq.SetValue(&roachpb.NoopRequest{}) {
				panic("RequestUnion excludes NoopRequest")
			}
			ba.Requests[pos] = nReq
		} else {
			// Keep the old one. If we must adjust the header, must copy.
			// TODO(tschottdorf): this could wind up cloning big chunks of data.
			// Can optimize by creating a new Request manually, but with the old
			// data.
			if newHeader.Equal(*origRequests[pos].GetInner().Header()) {
				ba.Requests[pos] = origRequests[pos]
			} else {
				ba.Requests[pos] = *proto.Clone(&origRequests[pos]).(*roachpb.RequestUnion)
				*ba.Requests[pos].GetInner().Header() = newHeader
			}
		}
		if err != nil {
			return roachpb.BatchRequest{}, 0, err
		}
	}
	return ba, len(ba.Requests) - numNoop, nil
}
Пример #10
0
// truncate restricts all contained requests to the given key range
// and returns a new BatchRequest.
// All requests contained in that batch are "truncated" to the given
// span, inserting NoopRequest appropriately to replace requests which
// are left without a key range to operate on. The number of non-noop
// requests after truncation is returned.
func truncate(ba roachpb.BatchRequest, rs roachpb.RSpan) (roachpb.BatchRequest, int, error) {
	truncateOne := func(args roachpb.Request) (bool, roachpb.Span, error) {
		if _, ok := args.(*roachpb.NoopRequest); ok {
			return true, emptySpan, nil
		}
		header := args.Header()
		if !roachpb.IsRange(args) {
			// This is a point request.
			if len(header.EndKey) > 0 {
				return false, emptySpan, util.Errorf("%T is not a range command, but EndKey is set", args)
			}
			if !rs.ContainsKey(keys.Addr(header.Key)) {
				return false, emptySpan, nil
			}
			return true, header, nil
		}
		// We're dealing with a range-spanning request.
		keyAddr, endKeyAddr := keys.Addr(header.Key), keys.Addr(header.EndKey)
		if l, r := !keyAddr.Equal(header.Key), !endKeyAddr.Equal(header.EndKey); l || r {
			if !rs.ContainsKeyRange(keyAddr, endKeyAddr) {
				return false, emptySpan, util.Errorf("local key range must not span ranges")
			}
			if !l || !r {
				return false, emptySpan, util.Errorf("local key mixed with global key in range")
			}
			// Range-local local key range.
			return true, header, nil
		}
		// Below, {end,}keyAddr equals header.{End,}Key, so nothing is local.
		if keyAddr.Less(rs.Key) {
			header.Key = rs.Key.AsRawKey() // "key" can't be local
			keyAddr = rs.Key
		}
		if !endKeyAddr.Less(rs.EndKey) {
			header.EndKey = rs.EndKey.AsRawKey() // "endKey" can't be local
			endKeyAddr = rs.EndKey
		}
		// Check whether the truncation has left any keys in the range. If not,
		// we need to cut it out of the request.
		if !keyAddr.Less(endKeyAddr) {
			return false, emptySpan, nil
		}
		return true, header, nil
	}

	var numNoop int
	origRequests := ba.Requests
	ba.Requests = make([]roachpb.RequestUnion, len(ba.Requests))
	for pos, arg := range origRequests {
		hasRequest, newHeader, err := truncateOne(arg.GetInner())
		if !hasRequest {
			// We omit this one, i.e. replace it with a Noop.
			numNoop++
			union := roachpb.RequestUnion{}
			if !union.SetInner(&noopRequest) {
				panic(fmt.Sprintf("%T excludes %T", union, noopRequest))
			}
			ba.Requests[pos] = union
		} else {
			// Keep the old one. If we must adjust the header, must copy.
			if inner := origRequests[pos].GetInner(); newHeader.Equal(inner.Header()) {
				ba.Requests[pos] = origRequests[pos]
			} else {
				shallowCopy := inner.ShallowCopy()
				shallowCopy.SetHeader(newHeader)
				if union := &ba.Requests[pos]; !union.SetInner(shallowCopy) {
					panic(fmt.Sprintf("%T excludes %T", union, shallowCopy))
				}
			}
		}
		if err != nil {
			return roachpb.BatchRequest{}, 0, err
		}
	}
	return ba, len(ba.Requests) - numNoop, nil
}