// Just checking behavior of the context package func TestSanity(t *testing.T) { Logger.Level = logrus.InfoLevel levels := 10 root, cancel := context.WithDeadline(context.Background(), time.Time{}) defer cancel() var ctxFunc func(parent context.Context, level int) context.Context ctxFunc = func(parent context.Context, level int) context.Context { if level == levels { return parent } child, cancel := context.WithDeadline(parent, time.Now().Add(time.Hour)) defer cancel() return ctxFunc(child, level+1) } child := ctxFunc(root, 0) if !assert.Error(t, child.Err()) { t.FailNow() } err := root.Err() if !assert.Error(t, err) { return } }
// DeadlineHandler returns a Handler which adds a deadline to the context. // // Child handlers are responsible for obeying the context deadline and returning // an appropriate error (or not) response in case of timeout. func DeadlineHandler(deadline time.Time) func(http.Handler) http.Handler { return func(h http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { ctx, cancel := context.WithDeadline(req.Context(), deadline) req = req.WithContext(ctx) defer cancel() h.ServeHTTP(w, req) }) } }
// Do a lookup for a single name, which must be rooted // (otherwise answer will not find the answers). func tryOneName(ctx context.Context, cfg *dnsConfig, name string, qtype uint16) (string, []dnsRR, error) { if len(cfg.servers) == 0 { return "", nil, &DNSError{Err: "no DNS servers", Name: name} } deadline := time.Now().Add(cfg.timeout) if old, ok := ctx.Deadline(); !ok || deadline.Before(old) { var cancel context.CancelFunc ctx, cancel = context.WithDeadline(ctx, deadline) defer cancel() } var lastErr error for i := 0; i < cfg.attempts; i++ { for _, server := range cfg.servers { msg, err := exchange(ctx, server, name, qtype) if err != nil { lastErr = &DNSError{ Err: err.Error(), Name: name, Server: server, } if nerr, ok := err.(Error); ok && nerr.Timeout() { lastErr.(*DNSError).IsTimeout = true } continue } // libresolv continues to the next server when it receives // an invalid referral response. See golang.org/issue/15434. if msg.rcode == dnsRcodeSuccess && !msg.authoritative && !msg.recursion_available && len(msg.answer) == 0 && len(msg.extra) == 0 { lastErr = &DNSError{Err: "lame referral", Name: name, Server: server} continue } cname, rrs, err := answer(name, server, msg, qtype) // If answer errored for rcodes dnsRcodeSuccess or dnsRcodeNameError, // it means the response in msg was not useful and trying another // server probably won't help. Return now in those cases. // TODO: indicate this in a more obvious way, such as a field on DNSError? if err == nil || msg.rcode == dnsRcodeSuccess || msg.rcode == dnsRcodeNameError { return cname, rrs, err } lastErr = err } } return "", nil, lastErr }
func (s *Stream) waitForData(ctx context.Context) error { if !s.rDeadline.IsZero() { dctx, cancel := context.WithDeadline(ctx, s.rDeadline) defer cancel() ctx = dctx } select { case read, ok := <-s.dataIn: if !ok { return io.EOF } s.extra = read s.exbuf = read return nil case <-ctx.Done(): return ctx.Err() } }
// This example passes a context with a arbitrary deadline to tell a blocking // function that it should abandon its work as soon as it gets to it. func ExampleWithDeadline() { d := time.Now().Add(50 * time.Millisecond) ctx, cancel := context.WithDeadline(context.Background(), d) // Even though ctx will be expired, it is good practice to call its // cancelation function in any case. Failure to do so may keep the // context and its parent alive longer than necessary. defer cancel() select { case <-time.After(1 * time.Second): fmt.Println("overslept") case <-ctx.Done(): fmt.Println(ctx.Err()) } // Output: // context deadline exceeded }
// dialSerial connects to a list of addresses in sequence, returning // either the first successful connection, or the first error. func dialSerial(ctx context.Context, dp *dialParam, ras addrList) (Conn, error) { var firstErr error // The error from the first address is most relevant. for i, ra := range ras { select { case <-ctx.Done(): return nil, &OpError{Op: "dial", Net: dp.network, Source: dp.LocalAddr, Addr: ra, Err: mapErr(ctx.Err())} default: } deadline, _ := ctx.Deadline() partialDeadline, err := partialDeadline(time.Now(), deadline, len(ras)-i) if err != nil { // Ran out of time. if firstErr == nil { firstErr = &OpError{Op: "dial", Net: dp.network, Source: dp.LocalAddr, Addr: ra, Err: err} } break } dialCtx := ctx if partialDeadline.Before(deadline) { var cancel context.CancelFunc dialCtx, cancel = context.WithDeadline(ctx, partialDeadline) defer cancel() } c, err := dialSingle(dialCtx, dp, ra) if err == nil { return c, nil } if firstErr == nil { firstErr = err } } if firstErr == nil { firstErr = &OpError{Op: "dial", Net: dp.network, Source: nil, Addr: nil, Err: errMissingAddress} } return nil, firstErr }
// exchange sends a query on the connection and hopes for a response. func exchange(ctx context.Context, server, name string, qtype uint16, timeout time.Duration) (*dnsMsg, error) { d := testHookDNSDialer() out := dnsMsg{ dnsMsgHdr: dnsMsgHdr{ recursion_desired: true, }, question: []dnsQuestion{ {name, qtype, dnsClassINET}, }, } for _, network := range []string{"udp", "tcp"} { // TODO(mdempsky): Refactor so defers from UDP-based // exchanges happen before TCP-based exchange. ctx, cancel := context.WithDeadline(ctx, time.Now().Add(timeout)) defer cancel() c, err := d.dialDNS(ctx, network, server) if err != nil { return nil, err } defer c.Close() if d, ok := ctx.Deadline(); ok && !d.IsZero() { c.SetDeadline(d) } out.id = uint16(rand.Int()) ^ uint16(time.Now().UnixNano()) in, err := c.dnsRoundTrip(&out) if err != nil { return nil, mapErr(err) } if in.truncated { // see RFC 5966 continue } return in, nil } return nil, errors.New("no answer from DNS server") }
// WithDeadline returns a copy of the parent context with the deadline adjusted // to be no later than d. If the parent's deadline is already earlier than d, // WithDeadline(parent, d) is semantically equivalent to parent. The returned // context's Done channel is closed when the deadline expires, when the returned // cancel function is called, or when the parent context's Done channel is // closed, whichever happens first. // // Canceling this context releases resources associated with it, so code should // call cancel as soon as the operations running in this Context complete. func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { ctx, f := context.WithDeadline(parent, deadline) return ctx, CancelFunc(f) }
// DialContext connects to the address on the named network using // the provided context. // // The provided Context must be non-nil. If the context expires before // the connection is complete, an error is returned. Once successfully // connected, any expiration of the context will not affect the // connection. // // See func Dial for a description of the network and address // parameters. func (d *Dialer) DialContext(ctx context.Context, network, address string) (Conn, error) { if ctx == nil { panic("nil context") } deadline := d.deadline(ctx, time.Now()) if !deadline.IsZero() { if d, ok := ctx.Deadline(); !ok || deadline.Before(d) { subCtx, cancel := context.WithDeadline(ctx, deadline) defer cancel() ctx = subCtx } } if oldCancel := d.Cancel; oldCancel != nil { subCtx, cancel := context.WithCancel(ctx) defer cancel() go func() { select { case <-oldCancel: cancel() case <-subCtx.Done(): } }() ctx = subCtx } // Shadow the nettrace (if any) during resolve so Connect events don't fire for DNS lookups. resolveCtx := ctx if trace, _ := ctx.Value(nettrace.TraceKey{}).(*nettrace.Trace); trace != nil { shadow := *trace shadow.ConnectStart = nil shadow.ConnectDone = nil resolveCtx = context.WithValue(resolveCtx, nettrace.TraceKey{}, &shadow) } addrs, err := d.resolver().resolveAddrList(resolveCtx, "dial", network, address, d.LocalAddr) if err != nil { return nil, &OpError{Op: "dial", Net: network, Source: nil, Addr: nil, Err: err} } dp := &dialParam{ Dialer: *d, network: network, address: address, } var primaries, fallbacks addrList if d.DualStack && network == "tcp" { primaries, fallbacks = addrs.partition(isIPv4) } else { primaries = addrs } var c Conn if len(fallbacks) > 0 { c, err = dialParallel(ctx, dp, primaries, fallbacks) } else { c, err = dialSerial(ctx, dp, primaries) } if err != nil { return nil, err } if tc, ok := c.(*TCPConn); ok && d.KeepAlive > 0 { setKeepAlive(tc.fd, true) setKeepAlivePeriod(tc.fd, d.KeepAlive) testHookSetKeepAlive() } return c, nil }
{Criteria{Metric: "cpu", From: xmltime("2014-10-25T09:41:00Z"), Interval: time.Minute}, ResultSet{}}, // `until` is before the first point {Criteria{Metric: "cpu", From: xmltime("2014-10-24T08:00:00Z"), Until: xmltime("2014-10-24T08:59:00Z"), Interval: time.Minute}, ResultSet{}}, } for _, test := range tests { res, err := subject.Query(context.Background(), &test.crit) Expect(err).NotTo(HaveOccurred(), "for %+v", test.crit) Expect(res).To(Equal(test.res), "for %+v", test.crit) } }) It("should query with context", func() { subject.Set([]Point{point("cpu,a,b 1414141200 1")}) ctx, _ := context.WithDeadline(context.Background(), time.Now().Add(-time.Millisecond)) _, err := subject.Query(ctx, &Criteria{ Metric: "cpu", From: xmltime("2014-10-24T09:00:00Z"), Interval: time.Hour, }) Expect(err).To(Equal(context.DeadlineExceeded)) }) It("should query points", func() { subject.Set([]Point{ point("cpu,a,b 1414141200 1"), // 2014-10-24T09:00:00Z point("cpu,a,c 1414141300 2"), // 2014-10-24T09:01:40Z point("cpu,a,c 1414142000 4"), // 2014-10-24T09:13:20Z point("cpu,b,c 1414146000 8"), // 2014-10-24T10:20:00Z })
// WithDeadline func WithDeadline(parent *Operation, expiration time.Time, format string, args ...interface{}) (Operation, context.CancelFunc) { ctx, cancelFunc := context.WithDeadline(parent.Context, expiration) op := parent.newChild(ctx, fmt.Sprintf(format, args...)) return op, cancelFunc }
func _() { ctx, _ := context.WithCancel() // ERROR "the cancel function returned by context.WithCancel should be called, not discarded, to avoid a context leak" ctx, _ = context.WithTimeout() // ERROR "the cancel function returned by context.WithTimeout should be called, not discarded, to avoid a context leak" ctx, _ = context.WithDeadline() // ERROR "the cancel function returned by context.WithDeadline should be called, not discarded, to avoid a context leak" }
func _() { ctx, cancel2 := context.WithDeadline() // ERROR "the cancel2 function is not used..." } // ERROR "may be reached without using the cancel2 var defined on line 21"
func performWatchOnPrefixes(ctx context.Context, getClient getClientFunc, round int) { runningTime := 60 * time.Second // time for which operation should be performed noOfPrefixes := 36 // total number of prefixes which will be watched upon watchPerPrefix := 10 // number of watchers per prefix reqRate := 30 // put request per second keyPrePrefix := 30 // max number of keyPrePrefixs for put operation prefixes := stringutil.UniqueStrings(5, noOfPrefixes) keys := stringutil.RandomStrings(10, keyPrePrefix) roundPrefix := fmt.Sprintf("%16x", round) var ( revision int64 wg sync.WaitGroup gr *clientv3.GetResponse err error ) client := getClient() defer client.Close() gr, err = getKey(ctx, client, "non-existent") if err != nil { log.Fatalf("failed to get the initial revision: %v", err) } revision = gr.Header.Revision ctxt, cancel := context.WithDeadline(ctx, time.Now().Add(runningTime)) defer cancel() // generate and put keys in cluster limiter := rate.NewLimiter(rate.Limit(reqRate), reqRate) go func() { for _, key := range keys { for _, prefix := range prefixes { if err = limiter.Wait(ctxt); err != nil { return } if err = putKeyAtMostOnce(ctxt, client, roundPrefix+"-"+prefix+"-"+key); err != nil { log.Fatalf("failed to put key: %v", err) return } } } }() ctxc, cancelc := context.WithCancel(ctx) wcs := make([]clientv3.WatchChan, 0) rcs := make([]*clientv3.Client, 0) for _, prefix := range prefixes { for j := 0; j < watchPerPrefix; j++ { rc := getClient() rcs = append(rcs, rc) watchPrefix := roundPrefix + "-" + prefix wc := rc.Watch(ctxc, watchPrefix, clientv3.WithPrefix(), clientv3.WithRev(revision)) wcs = append(wcs, wc) wg.Add(1) go func() { defer wg.Done() checkWatchResponse(wc, watchPrefix, keys) }() } } wg.Wait() cancelc() // verify all watch channels are closed for e, wc := range wcs { if _, ok := <-wc; ok { log.Fatalf("expected wc to be closed, but received %v", e) } } for _, rc := range rcs { rc.Close() } if err = deletePrefix(ctx, client, roundPrefix); err != nil { log.Fatalf("failed to clean up keys after test: %v", err) } }