func (c *Client) do(ctx context.Context, req *http.Request) (*http.Response, error) { if nil == ctx || nil == ctx.Done() { // ctx.Done() is for ctx return c.Client.Do(req) } var resc = make(chan *http.Response, 1) var errc = make(chan error, 1) // Perform request from separate routine. go func() { res, err := c.Client.Do(req) if err != nil { errc <- err } else { resc <- res } }() // Wait for request completion of context expiry. select { case <-ctx.Done(): c.t.CancelRequest(req) return nil, ctx.Err() case err := <-errc: return nil, err case res := <-resc: return res, nil } }
func paintLines(ctx context.Context, r io.Reader, source string) { scanner := bufio.NewScanner(r) doneC := ctx.Done() for scanner.Scan() { lsp.Println(source, scanner.Text()) select { case <-doneC: break default: // do nothing } } if err := ctx.Err(); err != nil { errC <- err return } if err := scanner.Err(); err != nil { errC <- err return } errC <- nil }
// WaitForResult wraps govmomi operations and wait the operation to complete. // Return the operation result // Sample usage: // info, err := WaitForResult(ctx, func(ctx) (*TaskInfo, error) { // return vm, vm.Reconfigure(ctx, config) // }) func WaitForResult(ctx context.Context, f func(context.Context) (Task, error)) (*types.TaskInfo, error) { var err error var info *types.TaskInfo var backoffFactor int64 = 1 for { var t Task if t, err = f(ctx); err == nil { info, err = t.WaitForResult(ctx, nil) if err == nil { return info, err } } if !isTaskInProgress(err) { return info, err } sleepValue := time.Duration(backoffFactor * (rand.Int63n(100) + int64(50))) select { case <-time.After(sleepValue * time.Millisecond): backoffFactor *= 2 if backoffFactor > maxBackoffFactor { backoffFactor = maxBackoffFactor } case <-ctx.Done(): return info, ctx.Err() } log.Warnf("retrying task") } }
func cgoLookupPTR(ctx context.Context, addr string) (names []string, err error, completed bool) { var zone string ip := parseIPv4(addr) if ip == nil { ip, zone = parseIPv6(addr, true) } if ip == nil { return nil, &DNSError{Err: "invalid address", Name: addr}, true } sa, salen := cgoSockaddr(ip, zone) if sa == nil { return nil, &DNSError{Err: "invalid address " + ip.String(), Name: addr}, true } if ctx.Done() == nil { names, err := cgoLookupAddrPTR(addr, sa, salen) return names, err, true } result := make(chan reverseLookupResult, 1) go cgoReverseLookup(result, addr, sa, salen) select { case r := <-result: return r.names, r.err, true case <-ctx.Done(): return nil, mapErr(ctx.Err()), false } }
func dialPlan9(ctx context.Context, net string, laddr, raddr Addr) (fd *netFD, err error) { defer func() { fixErr(err) }() type res struct { fd *netFD err error } resc := make(chan res) go func() { testHookDialChannel() fd, err := dialPlan9Blocking(ctx, net, laddr, raddr) select { case resc <- res{fd, err}: case <-ctx.Done(): if fd != nil { fd.Close() } } }() select { case res := <-resc: return res.fd, res.err case <-ctx.Done(): return nil, mapErr(ctx.Err()) } }
// ScanFiles takes a chanel of files func ScanFilesToBytes(ctx context.Context, in chan File) chan Content { out := make(chan Content) go func() { defer close(out) for { select { case value, open := <-in: if !open { return } if value.Err != nil { out <- Content{"", nil, kerr.Wrap("PQUCOUYLJE", value.Err)} return } bytes, err := ProcessFile(value.File) // process returns Bytes == nil for non json files, so we should skip them if bytes != nil || err != nil { out <- Content{value.File, bytes, err} } case <-ctx.Done(): out <- Content{"", nil, kerr.Wrap("AFBJCTFOKX", ctx.Err())} return } } }() return out }
func ctxDriverBegin(ctx context.Context, ci driver.Conn) (driver.Tx, error) { if ciCtx, is := ci.(driver.ConnBeginContext); is { return ciCtx.BeginContext(ctx) } if ctx.Done() == context.Background().Done() { return ci.Begin() } // Check the transaction level in ctx. If set and non-default // then return an error here as the BeginContext driver value is not supported. if level, ok := driver.IsolationFromContext(ctx); ok && level != driver.IsolationLevel(LevelDefault) { return nil, errors.New("sql: driver does not support non-default isolation level") } // Check for a read-only parameter in ctx. If a read-only transaction is // requested return an error as the BeginContext driver value is not supported. if ro := driver.ReadOnlyFromContext(ctx); ro { return nil, errors.New("sql: driver does not support read-only transactions") } txi, err := ci.Begin() if err == nil { select { default: case <-ctx.Done(): txi.Rollback() return nil, ctx.Err() } } return txi, err }
func (s *subscriber) Loop(ctx context.Context, q Queue) error { var ( opened bool = true m *protocol.Message ) sCtx, cancel := context.WithCancel(ctx) s.cancel = cancel defer func() { s.cancel = nil }() for opened { select { case m, opened = <-s.route.MessagesChannel(): if !opened { break } q.Push(NewRequest(s, m)) case <-sCtx.Done(): // If the parent context is still running then only this subscriber context // has been cancelled if ctx.Err() == nil { return sCtx.Err() } return nil } } //TODO Cosmin Bogdan returning this error can mean 2 things: overflow of route's channel, or intentional stopping of router / gubled. return ErrRouteChannelClosed }
// Update replace an item by a new one in the mongo collection func (m *Handler) Update(ctx context.Context, item *resource.Item, original *resource.Item) error { mItem := newMongoItem(item) c, err := m.c(ctx) if err != nil { return err } defer m.close(c) err = c.Update(bson.M{"_id": original.ID, "_etag": original.ETag}, mItem) if err == mgo.ErrNotFound { // Determine if the item is not found or if the item is found but etag missmatch var count int count, err = c.FindId(original.ID).Count() if err != nil { // The find returned an unexpected err, just forward it with no mapping } else if count == 0 { err = resource.ErrNotFound } else if ctx.Err() != nil { err = ctx.Err() } else { // If the item were found, it means that its etag didn't match err = resource.ErrConflict } } return err }
// lookupProtocol looks up IP protocol name and returns correspondent protocol number. func lookupProtocol(ctx context.Context, name string) (int, error) { // GetProtoByName return value is stored in thread local storage. // Start new os thread before the call to prevent races. type result struct { proto int err error } ch := make(chan result) // unbuffered go func() { acquireThread() defer releaseThread() runtime.LockOSThread() defer runtime.UnlockOSThread() proto, err := getprotobyname(name) select { case ch <- result{proto: proto, err: err}: case <-ctx.Done(): } }() select { case r := <-ch: if r.err != nil { if proto, err := lookupProtocolMap(name); err == nil { return proto, nil } r.err = &DNSError{Err: r.err.Error(), Name: name} } return r.proto, r.err case <-ctx.Done(): return 0, mapErr(ctx.Err()) } }
func (ds *dagService) GetMany(ctx context.Context, keys []*cid.Cid) <-chan *NodeOption { out := make(chan *NodeOption, len(keys)) blocks := ds.Blocks.GetBlocks(ctx, keys) var count int go func() { defer close(out) for { select { case b, ok := <-blocks: if !ok { if count != len(keys) { out <- &NodeOption{Err: fmt.Errorf("failed to fetch all nodes")} } return } nd, err := decodeBlock(b) if err != nil { out <- &NodeOption{Err: err} return } out <- &NodeOption{Node: nd} count++ case <-ctx.Done(): out <- &NodeOption{Err: ctx.Err()} return } } }() return out }
func (e *apiClient) PerformRequests(ctx context.Context, queries []QueryToSend) (*tsdb.QueryResult, error) { queryResult := &tsdb.QueryResult{} queryCount := len(queries) jobsChan := make(chan QueryToSend, queryCount) resultChan := make(chan []*tsdb.TimeSeries, queryCount) errorsChan := make(chan error, 1) for w := 1; w <= MaxWorker; w++ { go e.spawnWorker(ctx, w, jobsChan, resultChan, errorsChan) } for _, v := range queries { jobsChan <- v } close(jobsChan) resultCounter := 0 for { select { case timeseries := <-resultChan: queryResult.Series = append(queryResult.Series, timeseries...) resultCounter++ if resultCounter == queryCount { close(resultChan) return queryResult, nil } case err := <-errorsChan: return nil, err case <-ctx.Done(): return nil, ctx.Err() } } }
func EnumerateChildrenAsync(ctx context.Context, ds DAGService, c *cid.Cid, visit func(*cid.Cid) bool) error { toprocess := make(chan []*cid.Cid, 8) nodes := make(chan *NodeOption, 8) ctx, cancel := context.WithCancel(ctx) defer cancel() defer close(toprocess) go fetchNodes(ctx, ds, toprocess, nodes) root, err := ds.Get(ctx, c) if err != nil { return err } nodes <- &NodeOption{Node: root} live := 1 for { select { case opt, ok := <-nodes: if !ok { return nil } if opt.Err != nil { return opt.Err } nd := opt.Node // a node has been fetched live-- var cids []*cid.Cid for _, lnk := range nd.Links() { c := lnk.Cid if visit(c) { live++ cids = append(cids, c) } } if live == 0 { return nil } if len(cids) > 0 { select { case toprocess <- cids: case <-ctx.Done(): return ctx.Err() } } case <-ctx.Done(): return ctx.Err() } } }
// PopulateShard gets data for predicate pred from server with id serverId and // writes it to RocksDB. func populateShard(ctx context.Context, pl *pool, group uint32) (int, error) { gkeys, err := generateGroup(group) if err != nil { return 0, x.Wrapf(err, "While generating keys group") } conn, err := pl.Get() if err != nil { return 0, err } defer pl.Put(conn) c := NewWorkerClient(conn) stream, err := c.PredicateData(context.Background(), gkeys) if err != nil { return 0, err } x.Trace(ctx, "Streaming data for group: %v", group) kvs := make(chan *task.KV, 1000) che := make(chan error) go writeBatch(ctx, kvs, che) // We can use count to check the number of posting lists returned in tests. count := 0 for { kv, err := stream.Recv() if err == io.EOF { break } if err != nil { close(kvs) return count, err } count++ // We check for errors, if there are no errors we send value to channel. select { case kvs <- kv: // OK case <-ctx.Done(): x.TraceError(ctx, x.Errorf("Context timed out while streaming group: %v", group)) close(kvs) return count, ctx.Err() case err := <-che: x.TraceError(ctx, x.Errorf("Error while doing a batch write for group: %v", group)) close(kvs) return count, err } } close(kvs) if err := <-che; err != nil { x.TraceError(ctx, x.Errorf("Error while doing a batch write for group: %v", group)) return count, err } x.Trace(ctx, "Streaming complete for group: %v", group) return count, nil }
func deletePrefix(ctx context.Context, client *clientv3.Client, key string) error { for ctx.Err() == nil { if _, err := client.Delete(ctx, key, clientv3.WithPrefix()); err == nil { return nil } } return ctx.Err() }
func getKey(ctx context.Context, client *clientv3.Client, key string) (*clientv3.GetResponse, error) { for ctx.Err() == nil { if gr, err := client.Get(ctx, key); err == nil { return gr, nil } } return nil, ctx.Err() }
// writeResult blocks until it can write the result r to the channel c or until // the context times out. func writeResult(ctx context.Context, c chan result, r result) error { select { case <-ctx.Done(): return ctx.Err() case c <- r: return nil } }
func waitOnErrChan(ctx context.Context, errs chan error) error { select { case err := <-errs: return err case <-ctx.Done(): return ctx.Err() } }
func (r *Resolver) lookupIP(ctx context.Context, name string) ([]IPAddr, error) { // TODO(bradfitz,brainman): use ctx more. See TODO below. type ret struct { addrs []IPAddr err error } ch := make(chan ret, 1) go func() { acquireThread() defer releaseThread() hints := syscall.AddrinfoW{ Family: syscall.AF_UNSPEC, Socktype: syscall.SOCK_STREAM, Protocol: syscall.IPPROTO_IP, } var result *syscall.AddrinfoW e := syscall.GetAddrInfoW(syscall.StringToUTF16Ptr(name), nil, &hints, &result) if e != nil { ch <- ret{err: &DNSError{Err: winError("getaddrinfow", e).Error(), Name: name}} } defer syscall.FreeAddrInfoW(result) addrs := make([]IPAddr, 0, 5) for ; result != nil; result = result.Next { addr := unsafe.Pointer(result.Addr) switch result.Family { case syscall.AF_INET: a := (*syscall.RawSockaddrInet4)(addr).Addr addrs = append(addrs, IPAddr{IP: IPv4(a[0], a[1], a[2], a[3])}) case syscall.AF_INET6: a := (*syscall.RawSockaddrInet6)(addr).Addr zone := zoneToString(int((*syscall.RawSockaddrInet6)(addr).Scope_id)) addrs = append(addrs, IPAddr{IP: IP{a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14], a[15]}, Zone: zone}) default: ch <- ret{err: &DNSError{Err: syscall.EWINDOWS.Error(), Name: name}} } } ch <- ret{addrs: addrs} }() select { case r := <-ch: return r.addrs, r.err case <-ctx.Done(): // TODO(bradfitz,brainman): cancel the ongoing // GetAddrInfoW? It would require conditionally using // GetAddrInfoEx with lpOverlapped, which requires // Windows 8 or newer. I guess we'll need oldLookupIP, // newLookupIP, and newerLookUP. // // For now we just let it finish and write to the // buffered channel. return nil, &DNSError{ Name: name, Err: ctx.Err().Error(), IsTimeout: ctx.Err() == context.DeadlineExceeded, } } }
func (s storageWrapper) Clear(ctx context.Context, lookup *Lookup) (deleted int, err error) { if s.Storer == nil { return 0, ErrNoStorage } if ctx.Err() != nil { return 0, ctx.Err() } return s.Storer.Clear(ctx, lookup) }
func (s storageWrapper) Delete(ctx context.Context, item *Item) (err error) { if s.Storer == nil { return ErrNoStorage } if ctx.Err() != nil { return ctx.Err() } return s.Storer.Delete(ctx, item) }
func (s storageWrapper) Update(ctx context.Context, item *Item, original *Item) (err error) { if s.Storer == nil { return ErrNoStorage } if ctx.Err() != nil { return ctx.Err() } return s.Storer.Update(ctx, item, original) }
func (s storageWrapper) Insert(ctx context.Context, items []*Item) (err error) { if s.Storer == nil { return ErrNoStorage } if ctx.Err() != nil { return ctx.Err() } return s.Storer.Insert(ctx, items) }
// WaitForTimeoutRaw waits after the context deadline then returns the context // error. yarpc should interpret this as an handler timeout, which in turns // should be forwarded to the yarpc client as a remote handler timeout. func WaitForTimeoutRaw(ctx context.Context, reqMeta yarpc.ReqMeta, body []byte) ([]byte, yarpc.ResMeta, error) { if _, ok := ctx.Deadline(); !ok { return nil, nil, fmt.Errorf("no deadline set in context") } select { case <-ctx.Done(): return nil, nil, ctx.Err() } }
func HandleRequest(ctx context.Context, req *Request) (*Response, error) { context := NewQueryContext(req.Queries, req.TimeRange) batches, err := getBatches(req) if err != nil { return nil, err } currentlyExecuting := 0 for _, batch := range batches { if len(batch.Depends) == 0 { currentlyExecuting += 1 batch.Started = true go batch.process(ctx, context) } } response := &Response{} for currentlyExecuting != 0 { select { case batchResult := <-context.ResultsChan: currentlyExecuting -= 1 response.BatchTimings = append(response.BatchTimings, batchResult.Timings) if batchResult.Error != nil { return nil, batchResult.Error } for refId, result := range batchResult.QueryResults { context.Results[refId] = result } for _, batch := range batches { // not interested in started batches if batch.Started { continue } if batch.allDependenciesAreIn(context) { currentlyExecuting += 1 batch.Started = true go batch.process(ctx, context) } } case <-ctx.Done(): return nil, ctx.Err() } } response.Results = context.Results return response, nil }
// Fsync flushes the content in the file to disk, but does not // update the dag tree internally func (fi *FileNode) Fsync(ctx context.Context, req *fuse.FsyncRequest) error { errs := make(chan error, 1) go func() { errs <- fi.fi.Sync() }() select { case err := <-errs: return err case <-ctx.Done(): return ctx.Err() } }
// CallOneway makes a oneway request func (o *Outbound) CallOneway(ctx context.Context, treq *transport.Request) (transport.Ack, error) { if !o.started.Load() { // panic because there's no recovery from this panic(errOutboundNotStarted) } peer, err := o.getPeerForRequest(ctx, treq) if err != nil { return nil, err } endRequest := peer.StartRequest() defer endRequest() req, err := o.createRequest(peer, treq) if err != nil { return nil, err } start := time.Now() var ttl time.Duration req.Header = applicationHeaders.ToHTTPHeaders(treq.Headers, nil) ctx, req, span := o.withOpentracingSpan(ctx, req, treq, start) defer span.Finish() req = o.withCoreHeaders(req, treq, ttl) client, err := o.getHTTPClient(peer) if err != nil { return nil, err } _, err = client.Do(req.WithContext(ctx)) if err != nil { // Workaround borrowed from ctxhttp until // https://github.com/golang/go/issues/17711 is resolved. select { case <-ctx.Done(): err = ctx.Err() default: } } sent := time.Now() if err != nil { span.SetTag("error", true) span.LogEvent(err.Error()) return nil, err } return ack{time: sent}, nil }
func (s *stresser) run(ctx context.Context) { defer s.wg.Done() for { if err := s.rateLimiter.Wait(ctx); err == context.Canceled || ctx.Err() == context.Canceled { return } s.mu.Lock() s.success++ s.mu.Unlock() } }
func slowProcess(ctx context.Context) error { for i := 0; i < 10; i++ { log.Println("doing something...", i) select { case <-time.After(1 * time.Second): case <-ctx.Done(): log.Println("slowProcess done.", i) return ctx.Err() } } log.Println("something is done") return nil }
func backchannel(ctx context.Context, conn net.Conn) error { defer trace.End(trace.Begin("establish tether backchannel")) // HACK: currently RawConn dosn't implement timeout so throttle the spinning // it does implement the Timeout methods so the intermediary code can be written // to support it, but they are stub implementation in rawconn impl. // This needs to tick *faster* than the ticker in connection.go on the // portlayer side. The PL sends the first syn and if this isn't waiting, // alignment will take a few rounds (or it may never happen). ticker := time.NewTicker(10 * time.Millisecond) defer ticker.Stop() // We run this in a separate goroutine because HandshakeServer // calls a Read on rawconn which is a blocking call which causes // the caller to block as well so this is the only way to cancel. // Calling Close() will unblock us and on the next tick we will // return ctx.Err() go func() { select { case <-ctx.Done(): conn.Close() } }() for { select { case <-ticker.C: if ctx.Err() != nil { return ctx.Err() } deadline, ok := ctx.Deadline() if ok { conn.SetReadDeadline(deadline) } err := serial.HandshakeServer(conn) if err == nil { conn.SetReadDeadline(time.Time{}) return nil } switch et := err.(type) { case *serial.HandshakeError: log.Debugf("HandshakeServer: %v", et) default: log.Errorf("HandshakeServer: %v", err) } } } }