// recvResponse receives and parses an RPC response. // On error, it returns the error and indicates whether the call should be retried. // // TODO(zhaoq): Check whether the received message sequence is valid. func recvResponse(dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) error { // Try to acquire header metadata from the server if there is any. var err error defer func() { if err != nil { if _, ok := err.(transport.ConnectionError); !ok { t.CloseStream(stream, err) } } }() c.headerMD, err = stream.Header() if err != nil { return err } p := &parser{r: stream} for { if err = recv(p, dopts.codec, stream, dopts.dc, reply, math.MaxInt32); err != nil { if err == io.EOF { break } return err } } c.trailerMD = stream.Trailer() return nil }
// NewClientStream creates a new Stream for the client side. This is called // by generated code. func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { var ( t transport.ClientTransport err error ) t, err = cc.dopts.picker.Pick(ctx) if err != nil { return nil, toRPCErr(err) } // TODO(zhaoq): CallOption is omitted. Add support when it is needed. callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, Flush: desc.ServerStreams && desc.ClientStreams, } if cc.dopts.cp != nil { callHdr.SendCompress = cc.dopts.cp.Type() } cs := &clientStream{ desc: desc, codec: cc.dopts.codec, cp: cc.dopts.cp, dc: cc.dopts.dc, tracing: EnableTracing, } if cc.dopts.cp != nil { callHdr.SendCompress = cc.dopts.cp.Type() cs.cbuf = new(bytes.Buffer) } if cs.tracing { cs.trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) cs.trInfo.firstLine.client = true if deadline, ok := ctx.Deadline(); ok { cs.trInfo.firstLine.deadline = deadline.Sub(time.Now()) } cs.trInfo.tr.LazyLog(&cs.trInfo.firstLine, false) ctx = trace.NewContext(ctx, cs.trInfo.tr) } s, err := t.NewStream(ctx, callHdr) if err != nil { cs.finish(err) return nil, toRPCErr(err) } cs.t = t cs.s = s cs.p = &parser{r: s} // Listen on ctx.Done() to detect cancellation when there is no pending // I/O operations on this stream. go func() { select { case <-t.Error(): // Incur transport error, simply exit. case <-s.Context().Done(): err := s.Context().Err() cs.finish(err) cs.closeTransportStream(transport.ContextErr(err)) } }() return cs, nil }
// NewClientStream creates a new Stream for the client side. This is called // by generated code. func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { var ( conn *Conn t transport.ClientTransport err error ) for { conn, err = cc.dopts.picker.Pick() if err != nil { return nil, toRPCErr(err) } t, err = conn.Wait(ctx) if err != nil { if err == ErrTransientFailure { continue } return nil, toRPCErr(err) } break } // TODO(zhaoq): CallOption is omitted. Add support when it is needed. callHdr := &transport.CallHdr{ Host: conn.authority, Method: method, } cs := &clientStream{ desc: desc, codec: conn.dopts.codec, tracing: EnableTracing, } if cs.tracing { cs.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) cs.traceInfo.firstLine.client = true if deadline, ok := ctx.Deadline(); ok { cs.traceInfo.firstLine.deadline = deadline.Sub(time.Now()) } cs.traceInfo.tr.LazyLog(&cs.traceInfo.firstLine, false) ctx = trace.NewContext(ctx, cs.traceInfo.tr) } s, err := t.NewStream(ctx, callHdr) if err != nil { return nil, toRPCErr(err) } cs.t = t cs.s = s cs.p = &parser{s: s} return cs, nil }
// NewClientStream creates a new Stream for the client side. This is called // by generated code. func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { var ( t transport.ClientTransport err error ) t, err = cc.dopts.picker.Pick(ctx) if err != nil { return nil, toRPCErr(err) } // TODO(zhaoq): CallOption is omitted. Add support when it is needed. callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, } cs := &clientStream{ desc: desc, codec: cc.dopts.codec, tracing: EnableTracing, } if cs.tracing { cs.trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) cs.trInfo.firstLine.client = true if deadline, ok := ctx.Deadline(); ok { cs.trInfo.firstLine.deadline = deadline.Sub(time.Now()) } cs.trInfo.tr.LazyLog(&cs.trInfo.firstLine, false) ctx = trace.NewContext(ctx, cs.trInfo.tr) } s, err := t.NewStream(ctx, callHdr) if err != nil { return nil, toRPCErr(err) } cs.t = t cs.s = s cs.p = &parser{s: s} // Listen on ctx.Done() to detect cancellation when there is no pending // I/O operations on this stream. go func() { <-s.Context().Done() cs.closeTransportStream(transport.ContextErr(s.Context().Err())) }() return cs, nil }
// sendRequest writes out various information of an RPC such as Context and Message. func sendRequest(ctx context.Context, codec Codec, compressor Compressor, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) { stream, err := t.NewStream(ctx, callHdr) if err != nil { return nil, err } defer func() { if err != nil { // If err is connection error, t will be closed, no need to close stream here. if _, ok := err.(transport.ConnectionError); !ok { t.CloseStream(stream, err) } } }() var cbuf *bytes.Buffer if compressor != nil { cbuf = new(bytes.Buffer) } outBuf, err := encode(codec, args, compressor, cbuf) if err != nil { return nil, transport.StreamErrorf(codes.Internal, "grpc: %v", err) } err = t.Write(stream, outBuf, opts) // t.NewStream(...) could lead to an early rejection of the RPC (e.g., the service/method // does not exist.) so that t.Write could get io.EOF from wait(...). Leave the following // recvResponse to get the final status. if err != nil && err != io.EOF { return nil, err } // Sent successfully. return stream, nil }
// sendRequest writes out various information of an RPC such as Context and Message. func sendRequest(ctx context.Context, codec Codec, compressor Compressor, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) { stream, err := t.NewStream(ctx, callHdr) if err != nil { return nil, err } defer func() { if err != nil { if _, ok := err.(transport.ConnectionError); !ok { t.CloseStream(stream, err) } } }() var cbuf *bytes.Buffer if compressor != nil { cbuf = new(bytes.Buffer) } outBuf, err := encode(codec, args, compressor, cbuf) if err != nil { return nil, transport.StreamErrorf(codes.Internal, "grpc: %v", err) } err = t.Write(stream, outBuf, opts) if err != nil { return nil, err } // Sent successfully. return stream, nil }
// recvResponse receives and parses an RPC response. // On error, it returns the error and indicates whether the call should be retried. // // TODO(zhaoq): Check whether the received message sequence is valid. // TODO ctx is used for stats collection and processing. It is the context passed from the application. func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) (err error) { // Try to acquire header metadata from the server if there is any. defer func() { if err != nil { if _, ok := err.(transport.ConnectionError); !ok { t.CloseStream(stream, err) } } }() c.headerMD, err = stream.Header() if err != nil { return } p := &parser{r: stream} var inPayload *stats.InPayload if stats.On() { inPayload = &stats.InPayload{ Client: true, } } for { if err = recv(p, dopts.codec, stream, dopts.dc, reply, math.MaxInt32, inPayload); err != nil { if err == io.EOF { break } return } } if inPayload != nil && err == io.EOF && stream.StatusCode() == codes.OK { // TODO in the current implementation, inTrailer may be handled before inPayload in some cases. // Fix the order if necessary. stats.Handle(ctx, inPayload) } c.trailerMD = stream.Trailer() return nil }
// sendRequest writes out various information of an RPC such as Context and Message. func sendRequest(ctx context.Context, codec Codec, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) { stream, err := t.NewStream(ctx, callHdr) if err != nil { return nil, err } defer func() { if err != nil { if _, ok := err.(transport.ConnectionError); !ok { t.CloseStream(stream, err) } } }() // TODO(zhaoq): Support compression. outBuf, err := encode(codec, args, compressionNone) if err != nil { return nil, transport.StreamErrorf(codes.Internal, "grpc: %v", err) } err = t.Write(stream, outBuf, opts) if err != nil { return nil, err } // Sent successfully. return stream, nil }
// Invoke is called by the generated code. It sends the RPC request on the // wire and returns after response is received. func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (err error) { var c callInfo // before/after // 1. 首先调用所有的opts的before(作用的对象是callInfo) for _, o := range opts { if err := o.before(&c); err != nil { return toRPCErr(err) } } defer func() { for _, o := range opts { o.after(&c) } }() // 暂不考虑: Tracing if EnableTracing { c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) defer c.traceInfo.tr.Finish() c.traceInfo.firstLine.client = true if deadline, ok := ctx.Deadline(); ok { c.traceInfo.firstLine.deadline = deadline.Sub(time.Now()) } c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false) // TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set. defer func() { if err != nil { c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) c.traceInfo.tr.SetError() } }() } // 函数调用: 最后一个消息,马上执行 topts := &transport.Options{ Last: true, Delay: false, } var ( lastErr error // record the error that happened ) // 在某个Invoke过程中不停地for,到底是要做什么呢? for { var ( err error t transport.ClientTransport stream *transport.Stream ) // TODO(zhaoq): Need a formal spec of retry strategy for non-failfast rpcs. // 1. failFast的处理 if lastErr != nil && c.failFast { return toRPCErr(lastErr) } // 2. callHdr如何处理呢? callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, } // 3. 设置发送方的压缩算法(在接收方需要检测这个) if cc.dopts.cp != nil { callHdr.SendCompress = cc.dopts.cp.Type() } // 4. 挑选一个Transport // 这个是如何实现的呢? t, err = cc.dopts.picker.Pick(ctx) // 4.1 如果是Pick出现问题,则没有必要尝试;可能是底层的网路出现问题 if err != nil { // 如果retry失败,则直接报失败 if lastErr != nil { // This was a retry; return the error from the last attempt. return toRPCErr(lastErr) } return toRPCErr(err) } // 暂不考虑: tr if c.traceInfo.tr != nil { c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true) } // 5. 如何发送请求呢? // 核心参数: args stream, err = sendRequest(ctx, cc.dopts.codec, cc.dopts.cp, callHdr, t, args, topts) // 如何处理Err呢? if err != nil { // 5.1 如果是连接出现错误,则继续尝试 if _, ok := err.(transport.ConnectionError); ok { lastErr = err continue } // 5.2 其他错误,则直接终止调用 if lastErr != nil { return toRPCErr(lastErr) } return toRPCErr(err) } // 6. Receive the response lastErr = recvResponse(cc.dopts, t, &c, stream, reply) // 6.1 如果是连接错误,则可以继续尝试 if _, ok := lastErr.(transport.ConnectionError); ok { continue } if c.traceInfo.tr != nil { c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true) } // 7. 关闭stream t.CloseStream(stream, lastErr) if lastErr != nil { return toRPCErr(lastErr) } // 8. 汇报错误状态 return Errorf(stream.StatusCode(), "%s", stream.StatusDesc()) } }
// Invoke sends the RPC request on the wire and returns after response is received. // Invoke is called by generated code. Also users can call Invoke directly when it // is really needed in their use cases. func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (err error) { c := defaultCallInfo for _, o := range opts { if err := o.before(&c); err != nil { return toRPCErr(err) } } defer func() { for _, o := range opts { o.after(&c) } }() if EnableTracing { c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) defer c.traceInfo.tr.Finish() c.traceInfo.firstLine.client = true if deadline, ok := ctx.Deadline(); ok { c.traceInfo.firstLine.deadline = deadline.Sub(time.Now()) } c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false) // TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set. defer func() { if err != nil { c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) c.traceInfo.tr.SetError() } }() } topts := &transport.Options{ Last: true, Delay: false, } for { var ( err error t transport.ClientTransport stream *transport.Stream // Record the put handler from Balancer.Get(...). It is called once the // RPC has completed or failed. put func() ) // TODO(zhaoq): Need a formal spec of fail-fast. callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, } if cc.dopts.cp != nil { callHdr.SendCompress = cc.dopts.cp.Type() } gopts := BalancerGetOptions{ BlockingWait: !c.failFast, } t, put, err = cc.getTransport(ctx, gopts) if err != nil { // TODO(zhaoq): Probably revisit the error handling. if _, ok := err.(*rpcError); ok { return err } if err == errConnClosing { if c.failFast { return Errorf(codes.Unavailable, "%v", errConnClosing) } continue } // All the other errors are treated as Internal errors. return Errorf(codes.Internal, "%v", err) } if c.traceInfo.tr != nil { c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true) } stream, err = sendRequest(ctx, cc.dopts.codec, cc.dopts.cp, callHdr, t, args, topts) if err != nil { if put != nil { put() put = nil } if _, ok := err.(transport.ConnectionError); ok { if c.failFast { return toRPCErr(err) } continue } return toRPCErr(err) } // Receive the response err = recvResponse(cc.dopts, t, &c, stream, reply) if err != nil { if put != nil { put() put = nil } if _, ok := err.(transport.ConnectionError); ok { if c.failFast { return toRPCErr(err) } continue } t.CloseStream(stream, err) return toRPCErr(err) } if c.traceInfo.tr != nil { c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true) } t.CloseStream(stream, nil) if put != nil { put() put = nil } return Errorf(stream.StatusCode(), "%s", stream.StatusDesc()) } }
// Invoke is called by the generated code. It sends the RPC request on the // wire and returns after response is received. func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (err error) { var c callInfo for _, o := range opts { if err := o.before(&c); err != nil { return toRPCErr(err) } } defer func() { for _, o := range opts { o.after(&c) } }() if EnableTracing { c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) defer c.traceInfo.tr.Finish() c.traceInfo.firstLine.client = true if deadline, ok := ctx.Deadline(); ok { c.traceInfo.firstLine.deadline = deadline.Sub(time.Now()) } c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false) // TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set. defer func() { if err != nil { c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) c.traceInfo.tr.SetError() } }() } topts := &transport.Options{ Last: true, Delay: false, } var ( lastErr error // record the error that happened ) for { var ( err error t transport.ClientTransport stream *transport.Stream ) // TODO(zhaoq): Need a formal spec of retry strategy for non-failfast rpcs. if lastErr != nil && c.failFast { return toRPCErr(lastErr) } callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, } t, err = cc.dopts.picker.Pick(ctx) if err != nil { if lastErr != nil { // This was a retry; return the error from the last attempt. return toRPCErr(lastErr) } return toRPCErr(err) } if c.traceInfo.tr != nil { c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true) } stream, err = sendRequest(ctx, cc.dopts.codec, callHdr, t, args, topts) if err != nil { if _, ok := err.(transport.ConnectionError); ok { lastErr = err continue } if lastErr != nil { return toRPCErr(lastErr) } return toRPCErr(err) } // Receive the response lastErr = recvResponse(cc.dopts.codec, t, &c, stream, reply) if _, ok := lastErr.(transport.ConnectionError); ok { continue } if c.traceInfo.tr != nil { c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true) } t.CloseStream(stream, lastErr) if lastErr != nil { return toRPCErr(lastErr) } return Errorf(stream.StatusCode(), stream.StatusDesc()) } }
func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { var ( t transport.ClientTransport s *transport.Stream put func() ) c := defaultCallInfo for _, o := range opts { if err := o.before(&c); err != nil { return nil, toRPCErr(err) } } callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, Flush: desc.ServerStreams && desc.ClientStreams, } if cc.dopts.cp != nil { callHdr.SendCompress = cc.dopts.cp.Type() } var trInfo traceInfo if EnableTracing { trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) trInfo.firstLine.client = true if deadline, ok := ctx.Deadline(); ok { trInfo.firstLine.deadline = deadline.Sub(time.Now()) } trInfo.tr.LazyLog(&trInfo.firstLine, false) ctx = trace.NewContext(ctx, trInfo.tr) defer func() { if err != nil { // Need to call tr.finish() if error is returned. // Because tr will not be returned to caller. trInfo.tr.LazyPrintf("RPC: [%v]", err) trInfo.tr.SetError() trInfo.tr.Finish() } }() } if stats.On() { ctx = stats.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method}) begin := &stats.Begin{ Client: true, BeginTime: time.Now(), FailFast: c.failFast, } stats.HandleRPC(ctx, begin) } defer func() { if err != nil && stats.On() { // Only handle end stats if err != nil. end := &stats.End{ Client: true, Error: err, } stats.HandleRPC(ctx, end) } }() gopts := BalancerGetOptions{ BlockingWait: !c.failFast, } for { t, put, err = cc.getTransport(ctx, gopts) if err != nil { // TODO(zhaoq): Probably revisit the error handling. if _, ok := err.(*rpcError); ok { return nil, err } if err == errConnClosing || err == errConnUnavailable { if c.failFast { return nil, Errorf(codes.Unavailable, "%v", err) } continue } // All the other errors are treated as Internal errors. return nil, Errorf(codes.Internal, "%v", err) } s, err = t.NewStream(ctx, callHdr) if err != nil { if put != nil { put() put = nil } if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain { if c.failFast { return nil, toRPCErr(err) } continue } return nil, toRPCErr(err) } break } cs := &clientStream{ opts: opts, c: c, desc: desc, codec: cc.dopts.codec, cp: cc.dopts.cp, dc: cc.dopts.dc, put: put, t: t, s: s, p: &parser{r: s}, tracing: EnableTracing, trInfo: trInfo, statsCtx: ctx, } if cc.dopts.cp != nil { cs.cbuf = new(bytes.Buffer) } // Listen on ctx.Done() to detect cancellation and s.Done() to detect normal termination // when there is no pending I/O operations on this stream. go func() { select { case <-t.Error(): // Incur transport error, simply exit. case <-s.Done(): // TODO: The trace of the RPC is terminated here when there is no pending // I/O, which is probably not the optimal solution. if s.StatusCode() == codes.OK { cs.finish(nil) } else { cs.finish(Errorf(s.StatusCode(), "%s", s.StatusDesc())) } cs.closeTransportStream(nil) case <-s.GoAway(): cs.finish(errConnDrain) cs.closeTransportStream(errConnDrain) case <-s.Context().Done(): err := s.Context().Err() cs.finish(err) cs.closeTransportStream(transport.ContextErr(err)) } }() return cs, nil }
func (cc *Conn) resetTransport(closeTransport bool) error { var retries int start := time.Now() for { cc.mu.Lock() cc.printf("connecting") if cc.state == Shutdown { // cc.Close() has been invoked. cc.mu.Unlock() return ErrClientConnClosing } cc.state = Connecting cc.stateCV.Broadcast() cc.mu.Unlock() if closeTransport { cc.transport.Close() } // Adjust timeout for the current try. copts := cc.dopts.copts if copts.Timeout < 0 { cc.Close() return ErrClientConnTimeout } if copts.Timeout > 0 { copts.Timeout -= time.Since(start) if copts.Timeout <= 0 { cc.Close() return ErrClientConnTimeout } } sleepTime := backoff(retries) timeout := sleepTime if timeout < minConnectTimeout { timeout = minConnectTimeout } if copts.Timeout == 0 || copts.Timeout > timeout { copts.Timeout = timeout } connectTime := time.Now() addr, err := cc.dopts.picker.PickAddr() var newTransport transport.ClientTransport if err == nil { newTransport, err = transport.NewClientTransport(addr, &copts) } if err != nil { cc.mu.Lock() if cc.state == Shutdown { // cc.Close() has been invoked. cc.mu.Unlock() return ErrClientConnClosing } cc.errorf("transient failure: %v", err) cc.state = TransientFailure cc.stateCV.Broadcast() if cc.ready != nil { close(cc.ready) cc.ready = nil } cc.mu.Unlock() sleepTime -= time.Since(connectTime) if sleepTime < 0 { sleepTime = 0 } // Fail early before falling into sleep. if cc.dopts.copts.Timeout > 0 && cc.dopts.copts.Timeout < sleepTime+time.Since(start) { cc.mu.Lock() cc.errorf("connection timeout") cc.mu.Unlock() cc.Close() return ErrClientConnTimeout } closeTransport = false time.Sleep(sleepTime) retries++ grpclog.Printf("grpc: Conn.resetTransport failed to create client transport: %v; Reconnecting to %q", err, cc.target) continue } cc.mu.Lock() cc.printf("ready") if cc.state == Shutdown { // cc.Close() has been invoked. cc.mu.Unlock() newTransport.Close() return ErrClientConnClosing } cc.state = Ready cc.stateCV.Broadcast() cc.transport = newTransport if cc.ready != nil { close(cc.ready) cc.ready = nil } cc.mu.Unlock() return nil } }
// Invoke is called by the generated code. It sends the RPC request on the // wire and returns after response is received. func Invoke(ctx context.Context, method string, args, reply proto.Message, cc *ClientConn, opts ...CallOption) error { var c callInfo for _, o := range opts { if err := o.before(&c); err != nil { return toRPCErr(err) } } defer func() { for _, o := range opts { o.after(&c) } }() host, _, err := net.SplitHostPort(cc.target) if err != nil { return toRPCErr(err) } callHdr := &transport.CallHdr{ Host: host, Method: method, } topts := &transport.Options{ Last: true, Delay: false, } ts := 0 var lastErr error // record the error that happened for { var ( err error t transport.ClientTransport stream *transport.Stream ) // TODO(zhaoq): Need a formal spec of retry strategy for non-failfast rpcs. if lastErr != nil && c.failFast { return lastErr } t, ts, err = cc.wait(ctx, ts) if err != nil { if lastErr != nil { // This was a retry; return the error from the last attempt. return lastErr } return err } stream, err = sendRPC(ctx, callHdr, t, args, topts) if err != nil { if _, ok := err.(transport.ConnectionError); ok { lastErr = err continue } if lastErr != nil { return toRPCErr(lastErr) } return toRPCErr(err) } // Receive the response lastErr = recv(t, &c, stream, reply) if _, ok := lastErr.(transport.ConnectionError); ok { continue } t.CloseStream(stream, lastErr) if lastErr != nil { return toRPCErr(lastErr) } return Errorf(stream.StatusCode(), stream.StatusDesc()) } }
// Invoke is called by the generated code. It sends the RPC request on the // wire and returns after response is received. func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { var c callInfo for _, o := range opts { if err := o.before(&c); err != nil { return toRPCErr(err) } } defer func() { for _, o := range opts { o.after(&c) } }() callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, } topts := &transport.Options{ Last: true, Delay: false, } var ( ts int // track the transport sequence number lastErr error // record the error that happened ) for { var ( err error t transport.ClientTransport stream *transport.Stream ) // TODO(zhaoq): Need a formal spec of retry strategy for non-failfast rpcs. if lastErr != nil && c.failFast { return toRPCErr(lastErr) } t, ts, err = cc.wait(ctx, ts) if err != nil { if lastErr != nil { // This was a retry; return the error from the last attempt. return toRPCErr(lastErr) } return toRPCErr(err) } stream, err = sendRequest(ctx, cc.dopts.codec, callHdr, t, args, topts) if err != nil { if _, ok := err.(transport.ConnectionError); ok { lastErr = err continue } if lastErr != nil { return toRPCErr(lastErr) } return toRPCErr(err) } // Receive the response lastErr = recvResponse(cc.dopts.codec, t, &c, stream, reply) if _, ok := lastErr.(transport.ConnectionError); ok { continue } t.CloseStream(stream, lastErr) if lastErr != nil { return toRPCErr(lastErr) } return Errorf(stream.StatusCode(), stream.StatusDesc()) } }
func invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (e error) { c := defaultCallInfo if mc, ok := cc.getMethodConfig(method); ok { c.failFast = !mc.WaitForReady if mc.Timeout > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, mc.Timeout) defer cancel() } } for _, o := range opts { if err := o.before(&c); err != nil { return toRPCErr(err) } } defer func() { for _, o := range opts { o.after(&c) } }() if EnableTracing { c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) defer c.traceInfo.tr.Finish() c.traceInfo.firstLine.client = true if deadline, ok := ctx.Deadline(); ok { c.traceInfo.firstLine.deadline = deadline.Sub(time.Now()) } c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false) // TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set. defer func() { if e != nil { c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{e}}, true) c.traceInfo.tr.SetError() } }() } sh := cc.dopts.copts.StatsHandler if sh != nil { ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method}) begin := &stats.Begin{ Client: true, BeginTime: time.Now(), FailFast: c.failFast, } sh.HandleRPC(ctx, begin) } defer func() { if sh != nil { end := &stats.End{ Client: true, EndTime: time.Now(), Error: e, } sh.HandleRPC(ctx, end) } }() topts := &transport.Options{ Last: true, Delay: false, } for { var ( err error t transport.ClientTransport stream *transport.Stream // Record the put handler from Balancer.Get(...). It is called once the // RPC has completed or failed. put func() ) // TODO(zhaoq): Need a formal spec of fail-fast. callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, } if cc.dopts.cp != nil { callHdr.SendCompress = cc.dopts.cp.Type() } gopts := BalancerGetOptions{ BlockingWait: !c.failFast, } t, put, err = cc.getTransport(ctx, gopts) if err != nil { // TODO(zhaoq): Probably revisit the error handling. if _, ok := err.(*rpcError); ok { return err } if err == errConnClosing || err == errConnUnavailable { if c.failFast { return Errorf(codes.Unavailable, "%v", err) } continue } // All the other errors are treated as Internal errors. return Errorf(codes.Internal, "%v", err) } if c.traceInfo.tr != nil { c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true) } stream, err = sendRequest(ctx, cc.dopts, cc.dopts.cp, callHdr, t, args, topts) if err != nil { if put != nil { put() put = nil } // Retry a non-failfast RPC when // i) there is a connection error; or // ii) the server started to drain before this RPC was initiated. if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain { if c.failFast { return toRPCErr(err) } continue } return toRPCErr(err) } err = recvResponse(ctx, cc.dopts, t, &c, stream, reply) if err != nil { if put != nil { put() put = nil } if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain { if c.failFast { return toRPCErr(err) } continue } return toRPCErr(err) } if c.traceInfo.tr != nil { c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true) } t.CloseStream(stream, nil) if put != nil { put() put = nil } return Errorf(stream.StatusCode(), "%s", stream.StatusDesc()) } }
// NewClientStream creates a new Stream for the client side. This is called // by generated code. func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { var ( t transport.ClientTransport s *transport.Stream err error put func() ) c := defaultCallInfo for _, o := range opts { if err := o.before(&c); err != nil { return nil, toRPCErr(err) } } callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, Flush: desc.ServerStreams && desc.ClientStreams, } if cc.dopts.cp != nil { callHdr.SendCompress = cc.dopts.cp.Type() } cs := &clientStream{ opts: opts, c: c, desc: desc, codec: cc.dopts.codec, cp: cc.dopts.cp, dc: cc.dopts.dc, tracing: EnableTracing, } if cc.dopts.cp != nil { callHdr.SendCompress = cc.dopts.cp.Type() cs.cbuf = new(bytes.Buffer) } if cs.tracing { cs.trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) cs.trInfo.firstLine.client = true if deadline, ok := ctx.Deadline(); ok { cs.trInfo.firstLine.deadline = deadline.Sub(time.Now()) } cs.trInfo.tr.LazyLog(&cs.trInfo.firstLine, false) ctx = trace.NewContext(ctx, cs.trInfo.tr) } gopts := BalancerGetOptions{ BlockingWait: !c.failFast, } for { t, put, err = cc.getTransport(ctx, gopts) if err != nil { // TODO(zhaoq): Probably revisit the error handling. if _, ok := err.(*rpcError); ok { return nil, err } if err == errConnClosing { if c.failFast { return nil, Errorf(codes.Unavailable, "%v", errConnClosing) } continue } // All the other errors are treated as Internal errors. return nil, Errorf(codes.Internal, "%v", err) } s, err = t.NewStream(ctx, callHdr) if err != nil { if put != nil { put() put = nil } if _, ok := err.(transport.ConnectionError); ok { if c.failFast { cs.finish(err) return nil, toRPCErr(err) } continue } return nil, toRPCErr(err) } break } cs.put = put cs.t = t cs.s = s cs.p = &parser{r: s} // Listen on ctx.Done() to detect cancellation and s.Done() to detect normal termination // when there is no pending I/O operations on this stream. go func() { select { case <-t.Error(): // Incur transport error, simply exit. case <-s.Done(): // TODO: The trace of the RPC is terminated here when there is no pending // I/O, which is probably not the optimal solution. if s.StatusCode() == codes.OK { cs.finish(nil) } else { cs.finish(Errorf(s.StatusCode(), "%s", s.StatusDesc())) } cs.closeTransportStream(nil) case <-s.Context().Done(): err := s.Context().Err() cs.finish(err) cs.closeTransportStream(transport.ContextErr(err)) } }() return cs, nil }