// NewClientStream creates a new Stream for the client side. This is called // by generated code. func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { var ( t transport.ClientTransport err error ) t, err = cc.dopts.picker.Pick(ctx) if err != nil { return nil, toRPCErr(err) } // TODO(zhaoq): CallOption is omitted. Add support when it is needed. callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, } cs := &clientStream{ desc: desc, codec: cc.dopts.codec, tracing: EnableTracing, } if cs.tracing { cs.trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) cs.trInfo.firstLine.client = true if deadline, ok := ctx.Deadline(); ok { cs.trInfo.firstLine.deadline = deadline.Sub(time.Now()) } cs.trInfo.tr.LazyLog(&cs.trInfo.firstLine, false) ctx = trace.NewContext(ctx, cs.trInfo.tr) } s, err := t.NewStream(ctx, callHdr) if err != nil { return nil, toRPCErr(err) } cs.t = t cs.s = s cs.p = &parser{s: s} // Listen on ctx.Done() to detect cancellation when there is no pending // I/O operations on this stream. go func() { <-s.Context().Done() cs.closeTransportStream(transport.ContextErr(s.Context().Err())) }() return cs, nil }
// Invoke is called by the generated code. It sends the RPC request on the // wire and returns after response is received. func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (err error) { var c callInfo for _, o := range opts { if err := o.before(&c); err != nil { return toRPCErr(err) } } defer func() { for _, o := range opts { o.after(&c) } }() if EnableTracing { c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) defer c.traceInfo.tr.Finish() c.traceInfo.firstLine.client = true if deadline, ok := ctx.Deadline(); ok { c.traceInfo.firstLine.deadline = deadline.Sub(time.Now()) } c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false) // TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set. defer func() { if err != nil { c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) c.traceInfo.tr.SetError() } }() } topts := &transport.Options{ Last: true, Delay: false, } var ( lastErr error // record the error that happened ) for { var ( err error t transport.ClientTransport stream *transport.Stream ) // TODO(zhaoq): Need a formal spec of retry strategy for non-failfast rpcs. if lastErr != nil && c.failFast { return toRPCErr(lastErr) } callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, } t, err = cc.dopts.picker.Pick(ctx) if err != nil { if lastErr != nil { // This was a retry; return the error from the last attempt. return toRPCErr(lastErr) } return toRPCErr(err) } if c.traceInfo.tr != nil { c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true) } stream, err = sendRequest(ctx, cc.dopts.codec, callHdr, t, args, topts) if err != nil { if _, ok := err.(transport.ConnectionError); ok { lastErr = err continue } if lastErr != nil { return toRPCErr(lastErr) } return toRPCErr(err) } // Receive the response lastErr = recvResponse(cc.dopts.codec, t, &c, stream, reply) if _, ok := lastErr.(transport.ConnectionError); ok { continue } if c.traceInfo.tr != nil { c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true) } t.CloseStream(stream, lastErr) if lastErr != nil { return toRPCErr(lastErr) } return Errorf(stream.StatusCode(), stream.StatusDesc()) } }
// Serve accepts incoming connections on the listener lis, creating a new // ServerTransport and service goroutine for each. The service goroutines // read gRPC request and then call the registered handlers to reply to them. // Service returns when lis.Accept fails. func (s *Server) Serve(lis net.Listener) error { s.mu.Lock() s.printf("serving") if s.lis == nil { s.mu.Unlock() return ErrServerStopped } s.lis[lis] = true s.mu.Unlock() defer func() { lis.Close() s.mu.Lock() delete(s.lis, lis) s.mu.Unlock() }() for { c, err := lis.Accept() if err != nil { s.mu.Lock() s.printf("done serving; Accept = %v", err) s.mu.Unlock() return err } var authInfo credentials.AuthInfo if creds, ok := s.opts.creds.(credentials.TransportAuthenticator); ok { var conn net.Conn conn, authInfo, err = creds.ServerHandshake(c) if err != nil { s.mu.Lock() s.errorf("ServerHandshake(%q) failed: %v", c.RemoteAddr(), err) s.mu.Unlock() grpclog.Println("grpc: Server.Serve failed to complete security handshake.") continue } c = conn } s.mu.Lock() if s.conns == nil { s.mu.Unlock() c.Close() return nil } st, err := transport.NewServerTransport("http2", c, s.opts.maxConcurrentStreams, authInfo) if err != nil { s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) s.mu.Unlock() c.Close() grpclog.Println("grpc: Server.Serve failed to create ServerTransport: ", err) continue } s.conns[st] = true s.mu.Unlock() go func() { var wg sync.WaitGroup st.HandleStreams(func(stream *transport.Stream) { var trInfo *traceInfo if EnableTracing { trInfo = &traceInfo{ tr: trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()), } trInfo.firstLine.client = false trInfo.firstLine.remoteAddr = st.RemoteAddr() stream.TraceContext(trInfo.tr) if dl, ok := stream.Context().Deadline(); ok { trInfo.firstLine.deadline = dl.Sub(time.Now()) } } wg.Add(1) go func() { s.handleStream(st, stream, trInfo) wg.Done() }() }) wg.Wait() s.mu.Lock() delete(s.conns, st) s.mu.Unlock() }() } }