// WaitForStateChange blocks until the state changes to something other than the sourceState. func (cc *Conn) WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) { cc.mu.Lock() defer cc.mu.Unlock() if sourceState != cc.state { return cc.state, nil } done := make(chan struct{}) var err error go func() { select { case <-ctx.Done(): cc.mu.Lock() err = ctx.Err() cc.stateCV.Broadcast() cc.mu.Unlock() case <-done: } }() defer close(done) for sourceState == cc.state { cc.stateCV.Wait() if err != nil { return cc.state, err } } return cc.state, nil }
// Wait blocks until i) the new transport is up or ii) ctx is done or iii) cc is closed. func (cc *Conn) Wait(ctx context.Context) (transport.ClientTransport, error) { for { cc.mu.Lock() switch { case cc.state == Shutdown: cc.mu.Unlock() return nil, ErrClientConnClosing case cc.state == Ready: ct := cc.transport cc.mu.Unlock() return ct, nil default: ready := cc.ready if ready == nil { ready = make(chan struct{}) cc.ready = ready } cc.mu.Unlock() select { case <-ctx.Done(): return nil, transport.ContextErr(ctx.Err()) // Wait until the new transport is ready or failed. case <-ready: } } } }
// Do interprets r and performs an operation on s.store according to r.Method // and other fields. If r.Method is "POST", "PUT", "DELETE", or a "GET" with // Quorum == true, r will be sent through consensus before performing its // respective operation. Do will block until an action is performed or there is // an error. func (s *EtcdServer) Do(ctx context.Context, r pb.Request) (Response, error) { r.ID = s.reqIDGen.Next() if r.Method == "GET" && r.Quorum { r.Method = "QGET" } switch r.Method { case "POST", "PUT", "DELETE", "QGET": var raftReq pb.InternalRaftRequest raftReq.V2 = &r data, err := raftReq.Marshal() if err != nil { return Response{}, err } ch := s.w.Register(r.ID) // TODO: benchmark the cost of time.Now() // might be sampling? start := time.Now() s.r.Propose(ctx, data) proposePending.Inc() defer proposePending.Dec() select { case x := <-ch: proposeDurations.Observe(float64(time.Since(start).Nanoseconds() / int64(time.Millisecond))) resp := x.(Response) return resp, resp.err case <-ctx.Done(): proposeFailed.Inc() s.w.Trigger(r.ID, nil) // GC wait return Response{}, parseCtxErr(ctx.Err()) case <-s.done: return Response{}, ErrStopped } case "GET": switch { case r.Wait: wc, err := s.store.Watch(r.Path, r.Recursive, r.Stream, r.Since) if err != nil { return Response{}, err } return Response{Watcher: wc}, nil default: ev, err := s.store.Get(r.Path, r.Recursive, r.Sorted) if err != nil { return Response{}, err } return Response{Event: ev}, nil } case "HEAD": ev, err := s.store.Get(r.Path, r.Recursive, r.Sorted) if err != nil { return Response{}, err } return Response{Event: ev}, nil default: return Response{}, ErrUnknownMethod } }
// Watch posts a watch request to run() and waits for a new watcher channel func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan { ow := opWatch(key, opts...) wr := ow.toWatchRequest() wr.ctx = ctx retc := make(chan chan WatchResponse, 1) wr.retc = retc ok := false // submit request select { case w.reqc <- wr: ok = true case <-wr.ctx.Done(): case <-w.donec: } // receive channel if ok { select { case ret := <-retc: return ret case <-ctx.Done(): case <-w.donec: } } // couldn't create channel; return closed channel ch := make(chan WatchResponse) close(ch) return ch }
func (l *lessor) keepAliveCtxCloser(id lease.LeaseID, ctx context.Context, donec <-chan struct{}) { select { case <-donec: return case <-l.donec: return case <-ctx.Done(): } l.mu.Lock() defer l.mu.Unlock() ka, ok := l.keepAlives[id] if !ok { return } // close channel and remove context if still associated with keep alive for i, c := range ka.ctxs { if c == ctx { close(ka.chs[i]) ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...) ka.chs = append(ka.chs[:i], ka.chs[i+1:]...) break } } // remove if no one more listeners if len(ka.chs) == 0 { delete(l.keepAlives, id) } }
// When wait returns, either the new transport is up or ClientConn is // closing. Used to avoid working on a dying transport. It updates and // returns the transport and its version when there is no error. func (cc *ClientConn) wait(ctx context.Context, ts int) (transport.ClientTransport, int, error) { for { cc.mu.Lock() switch { case cc.closing: cc.mu.Unlock() return nil, 0, ErrClientConnClosing case ts < cc.transportSeq: // Worked on a dying transport. Try the new one immediately. defer cc.mu.Unlock() return cc.transport, cc.transportSeq, nil default: ready := cc.ready if ready == nil { ready = make(chan struct{}) cc.ready = ready } cc.mu.Unlock() select { case <-ctx.Done(): return nil, 0, transport.ContextErr(ctx.Err()) // Wait until the new transport is ready or failed. case <-ready: } } } }
func (s *EtcdServer) processInternalRaftRequest(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) { r.ID = s.reqIDGen.Next() data, err := r.Marshal() if err != nil { return nil, err } if len(data) > maxRequestBytes { return nil, ErrRequestTooLarge } ch := s.w.Register(r.ID) s.r.Propose(ctx, data) select { case x := <-ch: return x.(*applyResult), nil case <-ctx.Done(): s.w.Trigger(r.ID, nil) // GC wait return nil, ctx.Err() case <-s.done: return nil, ErrStopped } }
// NewClientStream creates a new Stream for the client side. This is called // by generated code. func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { var ( t transport.ClientTransport err error ) t, err = cc.dopts.picker.Pick(ctx) if err != nil { return nil, toRPCErr(err) } // TODO(zhaoq): CallOption is omitted. Add support when it is needed. callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, Flush: desc.ServerStreams && desc.ClientStreams, } if cc.dopts.cp != nil { callHdr.SendCompress = cc.dopts.cp.Type() } cs := &clientStream{ desc: desc, codec: cc.dopts.codec, cp: cc.dopts.cp, dc: cc.dopts.dc, tracing: EnableTracing, } if cc.dopts.cp != nil { callHdr.SendCompress = cc.dopts.cp.Type() cs.cbuf = new(bytes.Buffer) } if cs.tracing { cs.trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) cs.trInfo.firstLine.client = true if deadline, ok := ctx.Deadline(); ok { cs.trInfo.firstLine.deadline = deadline.Sub(time.Now()) } cs.trInfo.tr.LazyLog(&cs.trInfo.firstLine, false) ctx = trace.NewContext(ctx, cs.trInfo.tr) } s, err := t.NewStream(ctx, callHdr) if err != nil { cs.finish(err) return nil, toRPCErr(err) } cs.t = t cs.s = s cs.p = &parser{r: s} // Listen on ctx.Done() to detect cancellation when there is no pending // I/O operations on this stream. go func() { select { case <-t.Error(): // Incur transport error, simply exit. case <-s.Context().Done(): err := s.Context().Err() cs.finish(err) cs.closeTransportStream(transport.ContextErr(err)) } }() return cs, nil }
func waitUpdate(ctx context.Context, client *v3.Client, key string, opts ...v3.OpOption) error { cctx, cancel := context.WithCancel(ctx) defer cancel() wresp, ok := <-client.Watch(cctx, key, opts...) if !ok { return ctx.Err() } return wresp.Err() }
// wait blocks until it can receive from ctx.Done, closing, or proceed. // If it receives from ctx.Done, it returns 0, the StreamError for ctx.Err. // If it receives from closing, it returns 0, ErrConnClosing. // If it receives from proceed, it returns the received integer, nil. func wait(ctx context.Context, closing <-chan struct{}, proceed <-chan int) (int, error) { select { case <-ctx.Done(): return 0, ContextErr(ctx.Err()) case <-closing: return 0, ErrConnClosing case i := <-proceed: return i, nil } }
// Do interprets r and performs an operation on s.store according to r.Method // and other fields. If r.Method is "POST", "PUT", "DELETE", or a "GET" with // Quorum == true, r will be sent through consensus before performing its // respective operation. Do will block until an action is performed or there is // an error. func (s *EtcdServer) Do(ctx context.Context, r pb.Request) (Response, error) { if r.ID == 0 { log.Panicf("request ID should never be 0") } if r.Method == "GET" && r.Quorum { r.Method = "QGET" } switch r.Method { case "POST", "PUT", "DELETE", "QGET": data, err := r.Marshal() if err != nil { return Response{}, err } ch := s.w.Register(r.ID) s.node.Propose(ctx, data) select { case x := <-ch: resp := x.(Response) return resp, resp.err case <-ctx.Done(): s.w.Trigger(r.ID, nil) // GC wait return Response{}, parseCtxErr(ctx.Err()) case <-s.done: return Response{}, ErrStopped } case "GET": switch { case r.Wait: wc, err := s.store.Watch(r.Path, r.Recursive, r.Stream, r.Since) if err != nil { return Response{}, err } return Response{Watcher: wc}, nil default: ev, err := s.store.Get(r.Path, r.Recursive, r.Sorted) if err != nil { return Response{}, err } return Response{Event: ev}, nil } case "HEAD": ev, err := s.store.Get(r.Path, r.Recursive, r.Sorted) if err != nil { return Response{}, err } return Response{Event: ev}, nil default: return Response{}, ErrUnknownMethod } }
func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { req := act.HTTPRequest(c.endpoint) if err := printcURL(req); err != nil { return nil, nil, err } rtchan := make(chan roundTripResponse, 1) go func() { resp, err := c.transport.RoundTrip(req) rtchan <- roundTripResponse{resp: resp, err: err} close(rtchan) }() var resp *http.Response var err error select { case rtresp := <-rtchan: resp, err = rtresp.resp, rtresp.err case <-ctx.Done(): // cancel and wait for request to actually exit before continuing c.transport.CancelRequest(req) rtresp := <-rtchan resp = rtresp.resp err = ctx.Err() } // always check for resp nil-ness to deal with possible // race conditions between channels above defer func() { if resp != nil { resp.Body.Close() } }() if err != nil { return nil, nil, err } var body []byte done := make(chan struct{}) go func() { body, err = ioutil.ReadAll(resp.Body) done <- struct{}{} }() select { case <-ctx.Done(): err = resp.Body.Close() <-done if err == nil { err = ctx.Err() } case <-done: } return resp, body, err }
func contextClient(ctx context.Context) (*http.Client, error) { for _, fn := range contextClientFuncs { c, err := fn(ctx) if err != nil { return nil, err } if c != nil { return c, nil } } if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { return hc, nil } return http.DefaultClient, nil }
func (mn *multiNode) step(ctx context.Context, m multiMessage) error { ch := mn.recvc if m.msg.Type == pb.MsgProp { ch = mn.propc } select { case ch <- m: return nil case <-ctx.Done(): return ctx.Err() case <-mn.done: return ErrStopped } }
// Step advances the state machine using msgs. The ctx.Err() will be returned, // if any. func (n *node) step(ctx context.Context, m pb.Message) error { ch := n.recvc if m.Type == pb.MsgProp { ch = n.propc } select { case ch <- m: return nil case <-ctx.Done(): return ctx.Err() case <-n.done: return ErrStopped } }
func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error { ticker := time.NewTicker(interval) defer ticker.Stop() for { err := c.Sync(ctx) if err != nil { return err } select { case <-ctx.Done(): return ctx.Err() case <-ticker.C: } } }
func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error { cctx, cancel := context.WithCancel(ctx) defer cancel() wch := client.Watch(cctx, key, v3.WithRev(rev)) for wr := range wch { for _, ev := range wr.Events { if ev.Type == storagepb.DELETE { return nil } } } if err := ctx.Err(); err != nil { return err } return fmt.Errorf("lost watcher waiting for delete") }
func waitUpdate(ctx context.Context, client *v3.Client, key string, opts ...v3.OpOption) error { w := v3.NewWatcher(client) defer w.Close() wc := w.Watch(ctx, key, opts...) if wc == nil { return ctx.Err() } wresp, ok := <-wc if !ok { return ctx.Err() } if len(wresp.Events) == 0 { return v3rpc.ErrCompacted } return nil }
// Lock locks the mutex with a cancellable context. If the context is cancelled // while trying to acquire the lock, the mutex tries to clean its stale lock entry. func (m *Mutex) Lock(ctx context.Context) error { s, err := NewSession(m.client) if err != nil { return err } // put self in lock waiters via myKey; oldest waiter holds lock m.myKey, m.myRev, err = NewUniqueKey(ctx, m.client, m.pfx, v3.WithLease(s.Lease())) // wait for deletion revisions prior to myKey err = waitDeletes(ctx, m.client, m.pfx, v3.WithPrefix(), v3.WithRev(m.myRev-1)) // release lock key if cancelled select { case <-ctx.Done(): m.Unlock() default: } return err }
func handleKeyWatch(ctx context.Context, w http.ResponseWriter, wa store.Watcher, stream bool, rt etcdserver.RaftTimer) { defer wa.Remove() ech := wa.EventChan() var nch <-chan bool if x, ok := w.(http.CloseNotifier); ok { nch = x.CloseNotify() } w.Header().Set("Content-Type", "application/json") w.Header().Set("X-Etcd-Index", fmt.Sprint(wa.StartIndex())) w.Header().Set("X-Raft-Index", fmt.Sprint(rt.Index())) w.Header().Set("X-Raft-Term", fmt.Sprint(rt.Term())) w.WriteHeader(http.StatusOK) // Ensure headers are flushed early, in case of long polling w.(http.Flusher).Flush() for { select { case <-nch: // Client closed connection. Nothing to do. return case <-ctx.Done(): // Timed out. net/http will close the connection for us, so nothing to do. return case ev, ok := <-ech: if !ok { // If the channel is closed this may be an indication of // that notifications are much more than we are able to // send to the client in time. Then we simply end streaming. return } ev = trimEventPrefix(ev, etcdserver.StoreKeysPrefix) if err := json.NewEncoder(w).Encode(ev); err != nil { // Should never be reached plog.Warningf("error writing event (%v)", err) return } if !stream { return } w.(http.Flusher).Flush() } } }
// Description: // A helper routine to renew the TTL of o given key. This routine starts a // timer and at every tick attempts to renew the TTL of the given key. // // Parameters: // @ctx - A context object used to stop the renewal operation. // @key - The key for which the TTL needs to be renewed. // @val - The value associated with the key. // @ttl - TTL value in seconds. // @refresh - Interval, in seconds, at which the TTL will be renewed. // // Return value: // @out - A channel on which errors will be reported back to the caller. func (ec *EtcdConnector) RenewTTL(ctx context.Context, key, val string, ttl, refresh time.Duration) <-chan error { out := make(chan error) // Start a ticker to trigger at every @refresh seconds. ticker := time.NewTicker(refresh) // Launch a go routine to handle the periodic TTL renewal of @key. go func() { opts := &client.SetOptions{PrevExist: client.PrevExist, TTL: ttl} // Wait for activity either on the ticker channel or @ctx's channel. for { select { case <-ticker.C: // Update the TTL of @key. _, err := ec.Set(ctx, key, val, opts) if err != nil { // In case of an error, put out the error info on the // outward channel and return. out <- err close(out) return } case <-ctx.Done(): // If we are here then it's indication by the caller to stop the // renewal operation. So kill the ticker, delete @key, close the // outward channel and return. ticker.Stop() // Deletion is best effort here and also an optimization. We end // up here only when the program is being shutdown or when a // catastrophic failure occurs. If we succeed in deleting @key // then entities watching @key will get notified quickly or else // they'll have to wait for the TTL to expire. But if we get here // because of catastrophic failure then it's a best effort as the // delete can also fail. ec.Delete(context.Background(), key, &client.DeleteOptions{}) close(out) return } } }() return out }
// watch posts a watch request to run() and waits for a new watcher channel func (w *watcher) watch(ctx context.Context, key, prefix string, rev int64) <-chan WatchResponse { retc := make(chan chan WatchResponse, 1) wr := &watchRequest{ctx: ctx, key: key, prefix: prefix, rev: rev, retc: retc} // submit request select { case w.reqc <- wr: case <-wr.ctx.Done(): return nil case <-w.donec: return nil } // receive channel select { case ret := <-retc: return ret case <-ctx.Done(): return nil case <-w.donec: return nil } }
// Step advances the state machine using msgs. The ctx.Err() will be returned, // if any. func (n *node) step(ctx context.Context, m pb.Message) error { ch := n.recvc if m.Type == pb.MsgProp { ch = n.propc } select { case ch <- m: if m.Type == pb.MsgProp { //NOTE: this case is accessed before quorum check //plog.Infof("message is in channel") } return nil case <-ctx.Done(): //plog.Infof("context is done, should not happen") return ctx.Err() case <-n.done: //plog.Infof("node stopped") return ErrStopped } }
// Lock locks the mutex with a cancellable context. If the context is cancelled // while trying to acquire the lock, the mutex tries to clean its stale lock entry. func (m *Mutex) Lock(ctx context.Context) error { s, err := NewSession(m.client) if err != nil { return err } // put self in lock waiters via myKey; oldest waiter holds lock m.myKey, m.myRev, err = NewUniqueKey(ctx, m.kv, m.pfx, v3.WithLease(s.Lease())) // wait for lock to become available for err == nil { // find oldest element in waiters via revision of insertion var resp *v3.GetResponse resp, err = m.kv.Get(ctx, m.pfx, v3.WithFirstRev()...) if err != nil { break } if m.myRev == resp.Kvs[0].CreateRevision { // myKey is oldest in waiters; myKey holds the lock now return nil } // otherwise myKey isn't lowest, so there must be a pfx prior to myKey opts := append(v3.WithLastRev(), v3.WithRev(m.myRev-1)) resp, err = m.kv.Get(ctx, m.pfx, opts...) if err != nil { break } lastKey := string(resp.Kvs[0].Key) // wait for release on prior pfx err = waitUpdate(ctx, m.client, lastKey, v3.WithRev(m.myRev)) // try again in case lastKey left the wait list before acquiring the lock; // myKey can only hold the lock if it's the oldest in the list } // release lock key if cancelled select { case <-ctx.Done(): m.Unlock() default: } return err }
// configure sends a configuration change through consensus and // then waits for it to be applied to the server. It // will block until the change is performed or there is an error. func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) error { ch := s.w.Register(cc.ID) if err := s.node.ProposeConfChange(ctx, cc); err != nil { s.w.Trigger(cc.ID, nil) return err } select { case x := <-ch: if err, ok := x.(error); ok { return err } if x != nil { log.Panicf("return type should always be error") } return nil case <-ctx.Done(): s.w.Trigger(cc.ID, nil) // GC wait return parseCtxErr(ctx.Err()) case <-s.done: return ErrStopped } }
func (s *EtcdServer) V3DemoDo(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) { r.ID = s.reqIDGen.Next() data, err := r.Marshal() if err != nil { return &pb.EmptyResponse{}, err } ch := s.w.Register(r.ID) s.r.Propose(ctx, data) select { case x := <-ch: resp := x.(proto.Message) return resp, nil case <-ctx.Done(): s.w.Trigger(r.ID, nil) // GC wait return &pb.EmptyResponse{}, ctx.Err() case <-s.done: return &pb.EmptyResponse{}, ErrStopped } }
// Watch posts a watch request to run() and waits for a new watcher channel func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan { ow := opWatch(key, opts...) retc := make(chan chan WatchResponse, 1) wr := &watchRequest{ ctx: ctx, key: string(ow.key), end: string(ow.end), rev: ow.rev, progressNotify: ow.progressNotify, retc: retc, } ok := false // submit request select { case w.reqc <- wr: ok = true case <-wr.ctx.Done(): case <-w.donec: } // receive channel if ok { select { case ret := <-retc: return ret case <-ctx.Done(): case <-w.donec: } } // couldn't create channel; return closed channel ch := make(chan WatchResponse) close(ch) return ch }
func (c *httpClient) Do(ctx context.Context, act HTTPAction) (*http.Response, []byte, error) { req := act.HTTPRequest(c.endpoint) rtchan := make(chan roundTripResponse, 1) go func() { resp, err := c.transport.RoundTrip(req) rtchan <- roundTripResponse{resp: resp, err: err} close(rtchan) }() var resp *http.Response var err error select { case rtresp := <-rtchan: resp, err = rtresp.resp, rtresp.err case <-ctx.Done(): c.transport.CancelRequest(req) // wait for request to actually exit before continuing <-rtchan err = ctx.Err() } // always check for resp nil-ness to deal with possible // race conditions between channels above defer func() { if resp != nil { resp.Body.Close() } }() if err != nil { return nil, nil, err } body, err := ioutil.ReadAll(resp.Body) return resp, body, err }
// Campaign puts a value as eligible for the election. It blocks until // it is elected, an error occurs, or the context is cancelled. func (e *Election) Campaign(ctx context.Context, val string) error { s, serr := NewSession(e.client) if serr != nil { return serr } k, rev, err := NewUniqueKV(ctx, e.client, e.keyPrefix, val, v3.WithLease(s.Lease())) if err == nil { err = waitDeletes(ctx, e.client, e.keyPrefix, v3.WithPrefix(), v3.WithRev(rev-1)) } if err != nil { // clean up in case of context cancel select { case <-ctx.Done(): e.client.Delete(e.client.Ctx(), k) default: } return err } e.leaderKey, e.leaderRev, e.leaderSession = k, rev, s return nil }
// Do sends an HTTP request with the provided http.Client and returns an HTTP response. // If the client is nil, http.DefaultClient is used. // If the context is canceled or times out, ctx.Err() will be returned. func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { if client == nil { client = http.DefaultClient } // Request cancelation changed in Go 1.5, see cancelreq.go and cancelreq_go14.go. cancel := canceler(client, req) type responseAndError struct { resp *http.Response err error } result := make(chan responseAndError, 1) go func() { resp, err := client.Do(req) testHookDoReturned() result <- responseAndError{resp, err} }() var resp *http.Response select { case <-ctx.Done(): testHookContextDoneBeforeHeaders() cancel() // Clean up after the goroutine calling client.Do: go func() { if r := <-result; r.resp != nil { testHookDidBodyClose() r.resp.Body.Close() } }() return nil, ctx.Err() case r := <-result: var err error resp, err = r.resp, r.err if err != nil { return resp, err } } c := make(chan struct{}) go func() { select { case <-ctx.Done(): cancel() case <-c: // The response's Body is closed. } }() resp.Body = ¬ifyingReader{resp.Body, c} return resp, nil }