// rangeLookup dispatches an RangeLookup request for the given // metadata key to the replicas of the given range. Note that we allow // inconsistent reads when doing range lookups for efficiency. Getting // stale data is not a correctness problem but instead may // infrequently result in additional latency as additional range // lookups may be required. Note also that rangeLookup bypasses the // DistSender's SendBatch() method, so there is no error inspection and // retry logic here; this is not an issue since the lookup performs a // single inconsistent read only. func (ds *DistSender) rangeLookup(key proto.Key, options lookupOptions, desc *proto.RangeDescriptor) ([]proto.RangeDescriptor, error) { args, unwrap := client.MaybeWrap(&proto.RangeLookupRequest{ RequestHeader: proto.RequestHeader{ Key: key, ReadConsistency: proto.INCONSISTENT, }, MaxRanges: ds.rangeLookupMaxRanges, ConsiderIntents: options.considerIntents, Reverse: options.useReverseScan, }) replicas := newReplicaSlice(ds.gossip, desc) // TODO(tschottdorf) consider a Trace here, potentially that of the request // that had the cache miss and waits for the result. reply, err := ds.sendRPC(nil /* Trace */, desc.RangeID, replicas, rpc.OrderRandom, args) reply = unwrap(reply) if err != nil { return nil, err } rlReply := reply.(*proto.RangeLookupResponse) if rlReply.Error != nil { return nil, rlReply.GoError() } return rlReply.Ranges, nil }
// rangeLookup implements the rangeDescriptorDB interface. It looks up // the descriptors for the given (meta) key. func (ls *LocalSender) rangeLookup(key proto.Key, options lookupOptions, _ *proto.RangeDescriptor) ([]proto.RangeDescriptor, error) { ba, unwrap := client.MaybeWrap(&proto.RangeLookupRequest{ RequestHeader: proto.RequestHeader{ Key: key, ReadConsistency: proto.INCONSISTENT, }, MaxRanges: 1, ConsiderIntents: options.considerIntents, Reverse: options.useReverseScan, }) br, err := ls.SendBatch(context.Background(), *ba) if err != nil { return nil, err } return unwrap(br).(*proto.RangeLookupResponse).Ranges, nil }
// executeCmd creates a proto.Call struct and sends it via our local sender. func (s *DBServer) executeCmd(argsI gogoproto.Message) (gogoproto.Message, error) { args := argsI.(proto.Request) ba, unwrap := client.MaybeWrap(args) if err := verifyRequest(args); err != nil { return nil, err } br, pErr := s.sender.Send(context.TODO(), *ba) if pErr != nil { br = &proto.BatchResponse{} } if br.Error != nil { panic(proto.ErrorUnexpectedlySet(s.sender, br)) } br.Error = pErr return unwrap(br), nil }
// executeCmd creates a proto.Call struct and sends it via our local sender. func (n *Node) executeCmd(argsI gogoproto.Message) (gogoproto.Message, error) { args := argsI.(proto.Request) // TODO(tschottdorf) get a hold of the client's ID, add it to the // context before dispatching, and create an ID for tracing the request. header := args.Header() header.CmdID = header.GetOrCreateCmdID(n.ctx.Clock.PhysicalNow()) trace := n.ctx.Tracer.NewTrace(header) defer trace.Finalize() defer trace.Epoch("node")() ctx := tracer.ToCtx((*Node)(n).context(), trace) ba, unwrap := client.MaybeWrap(args) br, pErr := n.lSender.Send(ctx, *ba) if pErr != nil { br = &proto.BatchResponse{} trace.Event(fmt.Sprintf("error: %T", pErr.GoError())) } if br.Error != nil { panic(proto.ErrorUnexpectedlySet(n.lSender, br)) } br.Error = pErr n.feed.CallComplete(ba, br) return unwrap(br), nil }