// wait blocks until it can receive from ctx.Done, closing, or proceed. // If it receives from ctx.Done, it returns 0, the StreamError for ctx.Err. // If it receives from closing, it returns 0, ErrConnClosing. // If it receives from proceed, it returns the received integer, nil. func wait(ctx context.Context, closing <-chan struct{}, proceed <-chan int) (int, error) { select { case <-ctx.Done(): return 0, ContextErr(ctx.Err()) case <-closing: return 0, ErrConnClosing case i := <-proceed: return i, nil } }
func (mn *multiNode) step(ctx context.Context, m multiMessage) error { ch := mn.recvc if m.msg.Type == pb.MsgProp { ch = mn.propc } select { case ch <- m: return nil case <-ctx.Done(): return ctx.Err() case <-mn.done: return ErrStopped } }
func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error { ticker := time.NewTicker(interval) defer ticker.Stop() for { err := c.Sync(ctx) if err != nil { return err } select { case <-ctx.Done(): return ctx.Err() case <-ticker.C: } } }
func contextClient(ctx context.Context) (*http.Client, error) { for _, fn := range contextClientFuncs { c, err := fn(ctx) if err != nil { return nil, err } if c != nil { return c, nil } } if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { return hc, nil } return http.DefaultClient, nil }
func handleKeyWatch(ctx context.Context, w http.ResponseWriter, wa store.Watcher, stream bool, rt etcdserver.RaftTimer) { defer wa.Remove() ech := wa.EventChan() var nch <-chan bool if x, ok := w.(http.CloseNotifier); ok { nch = x.CloseNotify() } w.Header().Set("Content-Type", "application/json") w.Header().Set("X-Etcd-Index", fmt.Sprint(wa.StartIndex())) w.Header().Set("X-Raft-Index", fmt.Sprint(rt.Index())) w.Header().Set("X-Raft-Term", fmt.Sprint(rt.Term())) w.WriteHeader(http.StatusOK) // Ensure headers are flushed early, in case of long polling w.(http.Flusher).Flush() for { select { case <-nch: // Client closed connection. Nothing to do. return case <-ctx.Done(): // Timed out. net/http will close the connection for us, so nothing to do. return case ev, ok := <-ech: if !ok { // If the channel is closed this may be an indication of // that notifications are much more than we are able to // send to the client in time. Then we simply end streaming. return } ev = trimEventPrefix(ev, etcdserver.StoreKeysPrefix) if err := json.NewEncoder(w).Encode(ev); err != nil { // Should never be reached plog.Warningf("error writing event (%v)", err) return } if !stream { return } w.(http.Flusher).Flush() } } }
func (s *EtcdServer) V3DemoDo(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) { r.ID = s.reqIDGen.Next() data, err := r.Marshal() if err != nil { return &pb.EmptyResponse{}, err } ch := s.w.Register(r.ID) s.r.Propose(ctx, data) select { case x := <-ch: result := x.(*applyResult) return result.resp, result.err case <-ctx.Done(): s.w.Trigger(r.ID, nil) // GC wait return &pb.EmptyResponse{}, ctx.Err() case <-s.done: return &pb.EmptyResponse{}, ErrStopped } }
// FromContext returns the MD in ctx if it exists. func FromContext(ctx context.Context) (md MD, ok bool) { md, ok = ctx.Value(mdKey{}).(MD) return }
func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { req := act.HTTPRequest(c.endpoint) if err := printcURL(req); err != nil { return nil, nil, err } hctx, hcancel := context.WithCancel(ctx) if c.headerTimeout > 0 { hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout) } defer hcancel() reqcancel := requestCanceler(c.transport, req) rtchan := make(chan roundTripResponse, 1) go func() { resp, err := c.transport.RoundTrip(req) rtchan <- roundTripResponse{resp: resp, err: err} close(rtchan) }() var resp *http.Response var err error select { case rtresp := <-rtchan: resp, err = rtresp.resp, rtresp.err case <-hctx.Done(): // cancel and wait for request to actually exit before continuing reqcancel() rtresp := <-rtchan resp = rtresp.resp switch { case ctx.Err() != nil: err = ctx.Err() case hctx.Err() != nil: err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String()) default: panic("failed to get error from context") } } // always check for resp nil-ness to deal with possible // race conditions between channels above defer func() { if resp != nil { resp.Body.Close() } }() if err != nil { return nil, nil, err } var body []byte done := make(chan struct{}) go func() { body, err = ioutil.ReadAll(resp.Body) done <- struct{}{} }() select { case <-ctx.Done(): resp.Body.Close() <-done return nil, nil, ctx.Err() case <-done: } return resp, body, err }
// StreamFromContext returns the stream saved in ctx. func StreamFromContext(ctx context.Context) (s *Stream, ok bool) { s, ok = ctx.Value(streamKey).(*Stream) return }
// cc returns the internal *cloudContext (cc) state for a context.Context. // It panics if the user did it wrong. func cc(ctx context.Context) *cloudContext { if c, ok := ctx.Value(contextKey{}).(*cloudContext); ok { return c } panic("invalid context.Context type; it should be created with cloud.NewContext") }
// NewStream creates a stream and register it into the transport as "active" // streams. func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { // Record the timeout value on the context. var timeout time.Duration if dl, ok := ctx.Deadline(); ok { timeout = dl.Sub(time.Now()) if timeout <= 0 { return nil, ContextErr(context.DeadlineExceeded) } } authData := make(map[string]string) for _, c := range t.authCreds { data, err := c.GetRequestMetadata(ctx) if err != nil { return nil, StreamErrorf(codes.InvalidArgument, "transport: %v", err) } for k, v := range data { authData[k] = v } } if _, err := wait(ctx, t.shutdownChan, t.writableChan); err != nil { return nil, err } t.mu.Lock() if t.state != reachable { t.mu.Unlock() return nil, ErrConnClosing } if uint32(len(t.activeStreams)) >= t.maxStreams { t.mu.Unlock() t.writableChan <- 0 return nil, StreamErrorf(codes.Unavailable, "transport: failed to create new stream because the limit has been reached.") } s := t.newStream(ctx, callHdr) t.activeStreams[s.id] = s t.mu.Unlock() // HPACK encodes various headers. Note that once WriteField(...) is // called, the corresponding headers/continuation frame has to be sent // because hpack.Encoder is stateful. t.hBuf.Reset() t.hEnc.WriteField(hpack.HeaderField{Name: ":method", Value: "POST"}) t.hEnc.WriteField(hpack.HeaderField{Name: ":scheme", Value: t.scheme}) t.hEnc.WriteField(hpack.HeaderField{Name: ":path", Value: callHdr.Method}) t.hEnc.WriteField(hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) t.hEnc.WriteField(hpack.HeaderField{Name: "te", Value: "trailers"}) if timeout > 0 { t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-timeout", Value: timeoutEncode(timeout)}) } for k, v := range authData { t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) } var ( hasMD bool endHeaders bool ) if md, ok := metadata.FromContext(ctx); ok { hasMD = true for k, v := range md { t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) } } first := true // Sends the headers in a single batch even when they span multiple frames. for !endHeaders { size := t.hBuf.Len() if size > http2MaxFrameLen { size = http2MaxFrameLen } else { endHeaders = true } if first { // Sends a HeadersFrame to server to start a new stream. p := http2.HeadersFrameParam{ StreamID: s.id, BlockFragment: t.hBuf.Next(size), EndStream: false, EndHeaders: endHeaders, } // Do a force flush for the buffered frames iff it is the last headers frame // and there is header metadata to be sent. Otherwise, there is flushing until // the corresponding data frame is written. err = t.framer.writeHeaders(hasMD && endHeaders, p) first = false } else { // Sends Continuation frames for the leftover headers. err = t.framer.writeContinuation(hasMD && endHeaders, s.id, endHeaders, t.hBuf.Next(size)) } if err != nil { t.notifyError(err) return nil, ConnectionErrorf("transport: %v", err) } } t.writableChan <- 0 return s, nil }