// WaitForStateChange blocks until the state changes to something other than the sourceState. func (cc *Conn) WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) { cc.mu.Lock() defer cc.mu.Unlock() if sourceState != cc.state { return cc.state, nil } done := make(chan struct{}) var err error go func() { select { case <-ctx.Done(): cc.mu.Lock() err = ctx.Err() cc.stateCV.Broadcast() cc.mu.Unlock() case <-done: } }() defer close(done) for sourceState == cc.state { cc.stateCV.Wait() if err != nil { return cc.state, err } } return cc.state, nil }
// Wait blocks until i) the new transport is up or ii) ctx is done or iii) cc is closed. func (cc *Conn) Wait(ctx context.Context) (transport.ClientTransport, error) { for { cc.mu.Lock() switch { case cc.state == Shutdown: cc.mu.Unlock() return nil, ErrClientConnClosing case cc.state == Ready: cc.mu.Unlock() return cc.transport, nil default: ready := cc.ready if ready == nil { ready = make(chan struct{}) cc.ready = ready } cc.mu.Unlock() select { case <-ctx.Done(): return nil, transport.ContextErr(ctx.Err()) // Wait until the new transport is ready or failed. case <-ready: } } } }
// NewClientStream creates a new Stream for the client side. This is called // by generated code. func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { // TODO(zhaoq): CallOption is omitted. Add support when it is needed. callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, } cs := &clientStream{ desc: desc, codec: cc.dopts.codec, tracing: EnableTracing, } if cs.tracing { cs.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) cs.traceInfo.firstLine.client = true if deadline, ok := ctx.Deadline(); ok { cs.traceInfo.firstLine.deadline = deadline.Sub(time.Now()) } cs.traceInfo.tr.LazyLog(&cs.traceInfo.firstLine, false) } t, _, err := cc.wait(ctx, 0) if err != nil { return nil, toRPCErr(err) } s, err := t.NewStream(ctx, callHdr) if err != nil { return nil, toRPCErr(err) } cs.t = t cs.s = s cs.p = &parser{s: s} return cs, nil }
// When wait returns, either the new transport is up or ClientConn is // closing. Used to avoid working on a dying transport. It updates and // returns the transport and its version when there is no error. func (cc *ClientConn) wait(ctx context.Context, ts int) (transport.ClientTransport, int, error) { for { cc.mu.Lock() switch { case cc.state == Shutdown: cc.mu.Unlock() return nil, 0, ErrClientConnClosing case ts < cc.transportSeq: // Worked on a dying transport. Try the new one immediately. defer cc.mu.Unlock() return cc.transport, cc.transportSeq, nil default: ready := cc.ready if ready == nil { ready = make(chan struct{}) cc.ready = ready } cc.mu.Unlock() select { case <-ctx.Done(): return nil, 0, transport.ContextErr(ctx.Err()) // Wait until the new transport is ready or failed. case <-ready: } } } }
// Do sends an HTTP request with the provided http.Client and returns an HTTP response. // If the client is nil, http.DefaultClient is used. // If the context is canceled or times out, ctx.Err() will be returned. func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { if client == nil { client = http.DefaultClient } // Request cancelation changed in Go 1.5, see cancelreq.go and cancelreq_go14.go. cancel := canceler(client, req) type responseAndError struct { resp *http.Response err error } result := make(chan responseAndError, 1) go func() { resp, err := client.Do(req) result <- responseAndError{resp, err} }() select { case <-ctx.Done(): cancel() return nil, ctx.Err() case r := <-result: return r.resp, r.err } }
// wait blocks until it can receive from ctx.Done, closing, or proceed. // If it receives from ctx.Done, it returns 0, the StreamError for ctx.Err. // If it receives from closing, it returns 0, ErrConnClosing. // If it receives from proceed, it returns the received integer, nil. func wait(ctx context.Context, closing <-chan struct{}, proceed <-chan int) (int, error) { select { case <-ctx.Done(): return 0, ContextErr(ctx.Err()) case <-closing: return 0, ErrConnClosing case i := <-proceed: return i, nil } }
// NewClientStream creates a new Stream for the client side. This is called // by generated code. func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { var ( t transport.ClientTransport err error ) t, err = cc.dopts.picker.Pick(ctx) if err != nil { return nil, toRPCErr(err) } // TODO(zhaoq): CallOption is omitted. Add support when it is needed. callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, } cs := &clientStream{ desc: desc, codec: cc.dopts.codec, tracing: EnableTracing, } if cs.tracing { cs.trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) cs.trInfo.firstLine.client = true if deadline, ok := ctx.Deadline(); ok { cs.trInfo.firstLine.deadline = deadline.Sub(time.Now()) } cs.trInfo.tr.LazyLog(&cs.trInfo.firstLine, false) ctx = trace.NewContext(ctx, cs.trInfo.tr) } s, err := t.NewStream(ctx, callHdr) if err != nil { cs.finish(err) return nil, toRPCErr(err) } cs.t = t cs.s = s cs.p = &parser{s: s} // Listen on ctx.Done() to detect cancellation when there is no pending // I/O operations on this stream. go func() { select { case <-t.Error(): // Incur transport error, simply exit. case <-s.Context().Done(): err := s.Context().Err() cs.finish(err) cs.closeTransportStream(transport.ContextErr(err)) } }() return cs, nil }
// FromContext returns the Trace bound to the context, if any. func FromContext(ctx context.Context) (tr Trace, ok bool) { tr, ok = ctx.Value(contextKey).(Trace) return }
// FromContext returns the peer information in ctx if it exists. func FromContext(ctx context.Context) (p *Peer, ok bool) { p, ok = ctx.Value(peerKey{}).(*Peer) return }
// FromContext returns the MD in ctx if it exists. func FromContext(ctx context.Context) (md MD, ok bool) { md, ok = ctx.Value(mdKey{}).(MD) return }
// Invoke is called by the generated code. It sends the RPC request on the // wire and returns after response is received. func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (err error) { var c callInfo for _, o := range opts { if err := o.before(&c); err != nil { return toRPCErr(err) } } defer func() { for _, o := range opts { o.after(&c) } }() if EnableTracing { c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) defer c.traceInfo.tr.Finish() c.traceInfo.firstLine.client = true if deadline, ok := ctx.Deadline(); ok { c.traceInfo.firstLine.deadline = deadline.Sub(time.Now()) } c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false) // TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set. defer func() { if err != nil { c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) c.traceInfo.tr.SetError() } }() } topts := &transport.Options{ Last: true, Delay: false, } var ( lastErr error // record the error that happened ) for { var ( err error t transport.ClientTransport stream *transport.Stream ) // TODO(zhaoq): Need a formal spec of retry strategy for non-failfast rpcs. if lastErr != nil && c.failFast { return toRPCErr(lastErr) } callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, } t, err = cc.dopts.picker.Pick(ctx) if err != nil { if lastErr != nil { // This was a retry; return the error from the last attempt. return toRPCErr(lastErr) } return toRPCErr(err) } if c.traceInfo.tr != nil { c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true) } stream, err = sendRequest(ctx, cc.dopts.codec, callHdr, t, args, topts) if err != nil { if _, ok := err.(transport.ConnectionError); ok { lastErr = err continue } if lastErr != nil { return toRPCErr(lastErr) } return toRPCErr(err) } // Receive the response lastErr = recvResponse(cc.dopts.codec, t, &c, stream, reply) if _, ok := lastErr.(transport.ConnectionError); ok { continue } if c.traceInfo.tr != nil { c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true) } t.CloseStream(stream, lastErr) if lastErr != nil { return toRPCErr(lastErr) } return Errorf(stream.StatusCode(), stream.StatusDesc()) } }
// StreamFromContext returns the stream saved in ctx. func StreamFromContext(ctx context.Context) (s *Stream, ok bool) { s, ok = ctx.Value(streamKey).(*Stream) return }
// NewStream creates a stream and register it into the transport as "active" // streams. func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { // Record the timeout value on the context. var timeout time.Duration if dl, ok := ctx.Deadline(); ok { timeout = dl.Sub(time.Now()) if timeout <= 0 { return nil, ContextErr(context.DeadlineExceeded) } } pr := &peer.Peer{ Addr: t.conn.RemoteAddr(), } // Attach Auth info if there is any. if t.authInfo != nil { pr.AuthInfo = t.authInfo } ctx = peer.NewContext(ctx, pr) authData := make(map[string]string) for _, c := range t.authCreds { // Construct URI required to get auth request metadata. var port string if pos := strings.LastIndex(t.target, ":"); pos != -1 { // Omit port if it is the default one. if t.target[pos+1:] != "443" { port = ":" + t.target[pos+1:] } } pos := strings.LastIndex(callHdr.Method, "/") if pos == -1 { return nil, StreamErrorf(codes.InvalidArgument, "transport: malformed method name: %q", callHdr.Method) } audience := "https://" + callHdr.Host + port + callHdr.Method[:pos] data, err := c.GetRequestMetadata(ctx, audience) if err != nil { return nil, StreamErrorf(codes.InvalidArgument, "transport: %v", err) } for k, v := range data { authData[k] = v } } t.mu.Lock() if t.state != reachable { t.mu.Unlock() return nil, ErrConnClosing } checkStreamsQuota := t.streamsQuota != nil t.mu.Unlock() if checkStreamsQuota { sq, err := wait(ctx, t.shutdownChan, t.streamsQuota.acquire()) if err != nil { return nil, err } // Returns the quota balance back. if sq > 1 { t.streamsQuota.add(sq - 1) } } if _, err := wait(ctx, t.shutdownChan, t.writableChan); err != nil { // t.streamsQuota will be updated when t.CloseStream is invoked. return nil, err } t.mu.Lock() if t.state != reachable { t.mu.Unlock() return nil, ErrConnClosing } s := t.newStream(ctx, callHdr) t.activeStreams[s.id] = s // This stream is not counted when applySetings(...) initialize t.streamsQuota. // Reset t.streamsQuota to the right value. var reset bool if !checkStreamsQuota && t.streamsQuota != nil { reset = true } t.mu.Unlock() if reset { t.streamsQuota.reset(-1) } // HPACK encodes various headers. Note that once WriteField(...) is // called, the corresponding headers/continuation frame has to be sent // because hpack.Encoder is stateful. t.hBuf.Reset() t.hEnc.WriteField(hpack.HeaderField{Name: ":method", Value: "POST"}) t.hEnc.WriteField(hpack.HeaderField{Name: ":scheme", Value: t.scheme}) t.hEnc.WriteField(hpack.HeaderField{Name: ":path", Value: callHdr.Method}) t.hEnc.WriteField(hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) t.hEnc.WriteField(hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) t.hEnc.WriteField(hpack.HeaderField{Name: "te", Value: "trailers"}) if timeout > 0 { t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-timeout", Value: timeoutEncode(timeout)}) } for k, v := range authData { t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) } var ( hasMD bool endHeaders bool ) if md, ok := metadata.FromContext(ctx); ok { hasMD = true for k, v := range md { for _, entry := range v { t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) } } } first := true // Sends the headers in a single batch even when they span multiple frames. for !endHeaders { size := t.hBuf.Len() if size > http2MaxFrameLen { size = http2MaxFrameLen } else { endHeaders = true } if first { // Sends a HeadersFrame to server to start a new stream. p := http2.HeadersFrameParam{ StreamID: s.id, BlockFragment: t.hBuf.Next(size), EndStream: false, EndHeaders: endHeaders, } // Do a force flush for the buffered frames iff it is the last headers frame // and there is header metadata to be sent. Otherwise, there is flushing until // the corresponding data frame is written. err = t.framer.writeHeaders(hasMD && endHeaders, p) first = false } else { // Sends Continuation frames for the leftover headers. err = t.framer.writeContinuation(hasMD && endHeaders, s.id, endHeaders, t.hBuf.Next(size)) } if err != nil { t.notifyError(err) return nil, ConnectionErrorf("transport: %v", err) } } t.writableChan <- 0 return s, nil }
// FromContext returns the authInfo in ctx if it exists. func FromContext(ctx context.Context) (authInfo AuthInfo, ok bool) { authInfo, ok = ctx.Value(authInfoKey{}).(AuthInfo) return }