// exchange sends a query on the connection and hopes for a response. func exchange(ctx context.Context, server, name string, qtype uint16) (*dnsMsg, error) { d := testHookDNSDialer() out := dnsMsg{ dnsMsgHdr: dnsMsgHdr{ recursion_desired: true, }, question: []dnsQuestion{ {name, qtype, dnsClassINET}, }, } for _, network := range []string{"udp", "tcp"} { c, err := d.dialDNS(ctx, network, server) if err != nil { return nil, err } defer c.Close() if d, ok := ctx.Deadline(); ok && !d.IsZero() { c.SetDeadline(d) } out.id = uint16(rand.Int()) ^ uint16(time.Now().UnixNano()) in, err := c.dnsRoundTrip(&out) if err != nil { return nil, mapErr(err) } if in.truncated { // see RFC 5966 continue } return in, nil } return nil, errors.New("no answer from DNS server") }
func fetchNodes(ctx context.Context, ds DAGService, in <-chan []*cid.Cid, out chan<- *NodeOption) { var wg sync.WaitGroup defer func() { // wait for all 'get' calls to complete so we don't accidentally send // on a closed channel wg.Wait() close(out) }() get := func(ks []*cid.Cid) { defer wg.Done() nodes := ds.GetMany(ctx, ks) for opt := range nodes { select { case out <- opt: case <-ctx.Done(): return } } } for ks := range in { wg.Add(1) go get(ks) } }
// FindProvidersAsync returns a channel of providers for the given key func (bsnet *impl) FindProvidersAsync(ctx context.Context, k *cid.Cid, max int) <-chan peer.ID { // Since routing queries are expensive, give bitswap the peers to which we // have open connections. Note that this may cause issues if bitswap starts // precisely tracking which peers provide certain keys. This optimization // would be misleading. In the long run, this may not be the most // appropriate place for this optimization, but it won't cause any harm in // the short term. connectedPeers := bsnet.host.Network().Peers() out := make(chan peer.ID, len(connectedPeers)) // just enough buffer for these connectedPeers for _, id := range connectedPeers { if id == bsnet.host.ID() { continue // ignore self as provider } out <- id } go func() { defer close(out) providers := bsnet.routing.FindProvidersAsync(ctx, k, max) for info := range providers { if info.ID == bsnet.host.ID() { continue // ignore self as provider } bsnet.host.Peerstore().AddAddrs(info.ID, info.Addrs, pstore.TempAddrTTL) select { case <-ctx.Done(): return case out <- info.ID: } } }() return out }
func FromContext(ctx context.Context) *App { app, ok := ctx.Value(ctxKey).(*App) if !ok { panic(kerr.New("EJRTLPWCKH", "No app in ctx").Error()) } return app }
func RequestContextFromContext(ctx context.Context) *RequestContext { val := ctx.Value(requestContextCtxKey) if val == nil { return nil } return val.(*RequestContext) }
func (c *Client) do(ctx context.Context, req *http.Request) (*http.Response, error) { if nil == ctx || nil == ctx.Done() { // ctx.Done() is for ctx return c.Client.Do(req) } return c.Client.Do(req.WithContext(ctx)) }
func (bs *Bitswap) provideCollector(ctx context.Context) { defer close(bs.provideKeys) var toProvide []*cid.Cid var nextKey *cid.Cid var keysOut chan *cid.Cid for { select { case blkey, ok := <-bs.newBlocks: if !ok { log.Debug("newBlocks channel closed") return } if keysOut == nil { nextKey = blkey keysOut = bs.provideKeys } else { toProvide = append(toProvide, blkey) } case keysOut <- nextKey: if len(toProvide) > 0 { nextKey = toProvide[0] toProvide = toProvide[1:] } else { keysOut = nil } case <-ctx.Done(): return } } }
// bloomCached returns Blockstore that caches Has requests using Bloom filter // Size is size of bloom filter in bytes func bloomCached(bs Blockstore, ctx context.Context, bloomSize, hashCount int) (*bloomcache, error) { bl, err := bloom.New(float64(bloomSize), float64(hashCount)) if err != nil { return nil, err } bc := &bloomcache{blockstore: bs, bloom: bl} bc.hits = metrics.NewCtx(ctx, "bloom.hits_total", "Number of cache hits in bloom cache").Counter() bc.total = metrics.NewCtx(ctx, "bloom_total", "Total number of requests to bloom cache").Counter() bc.Invalidate() go bc.Rebuild(ctx) if metrics.Active() { go func() { fill := metrics.NewCtx(ctx, "bloom_fill_ratio", "Ratio of bloom filter fullnes, (updated once a minute)").Gauge() <-bc.rebuildChan t := time.NewTicker(1 * time.Minute) for { select { case <-ctx.Done(): t.Stop() return case <-t.C: fill.Set(bc.bloom.FillRatio()) } } }() } return bc, nil }
func (b *bloomcache) Rebuild(ctx context.Context) { evt := log.EventBegin(ctx, "bloomcache.Rebuild") defer evt.Done() ch, err := b.blockstore.AllKeysChan(ctx) if err != nil { log.Errorf("AllKeysChan failed in bloomcache rebuild with: %v", err) return } finish := false for !finish { select { case key, ok := <-ch: if ok { b.bloom.AddTS(key.Bytes()) // Use binary key, the more compact the better } else { finish = true } case <-ctx.Done(): log.Warning("Cache rebuild closed by context finishing.") return } } close(b.rebuildChan) atomic.StoreInt32(&b.active, 1) }
func (e *offlineExchange) GetBlocks(ctx context.Context, ks []*cid.Cid) (<-chan blocks.Block, error) { out := make(chan blocks.Block, 0) go func() { defer close(out) var misses []*cid.Cid for _, k := range ks { hit, err := e.bs.Get(k) if err != nil { misses = append(misses, k) // a long line of misses should abort when context is cancelled. select { // TODO case send misses down channel case <-ctx.Done(): return default: continue } } select { case out <- hit: case <-ctx.Done(): return } } }() return out, nil }
func (m *mdnsService) pollForEntries(ctx context.Context) { ticker := time.NewTicker(m.interval) for { select { case <-ticker.C: entriesCh := make(chan *mdns.ServiceEntry, 16) go func() { for entry := range entriesCh { m.handleEntry(entry) } }() log.Debug("starting mdns query") qp := &mdns.QueryParam{ Domain: "local", Entries: entriesCh, Service: ServiceTag, Timeout: time.Second * 5, } err := mdns.Query(qp) if err != nil { log.Error("mdns lookup error: ", err) } close(entriesCh) log.Debug("mdns query complete") case <-ctx.Done(): log.Debug("mdns service halting") return } } }
// WaitForResult wraps govmomi operations and wait the operation to complete. // Return the operation result // Sample usage: // info, err := WaitForResult(ctx, func(ctx) (*TaskInfo, error) { // return vm, vm.Reconfigure(ctx, config) // }) func WaitForResult(ctx context.Context, f func(context.Context) (Task, error)) (*types.TaskInfo, error) { var err error var info *types.TaskInfo var backoffFactor int64 = 1 for { var t Task if t, err = f(ctx); err == nil { info, err = t.WaitForResult(ctx, nil) if err == nil { return info, err } } if !isTaskInProgress(err) { return info, err } sleepValue := time.Duration(backoffFactor * (rand.Int63n(100) + int64(50))) select { case <-time.After(sleepValue * time.Millisecond): backoffFactor *= 2 if backoffFactor > maxBackoffFactor { backoffFactor = maxBackoffFactor } case <-ctx.Done(): return info, ctx.Err() } log.Warnf("retrying task") } }
// MountLabel performs a mount with the label and target being absolute paths func (t *BaseOperations) MountLabel(ctx context.Context, label, target string) error { defer trace.End(trace.Begin(fmt.Sprintf("Mounting %s on %s", label, target))) if err := os.MkdirAll(target, 0600); err != nil { return fmt.Errorf("unable to create mount point %s: %s", target, err) } // convert the label to a filesystem path label = "/dev/disk/by-label/" + label // do..while ! timedout var timeout bool for timeout = false; !timeout; { _, err := os.Stat(label) if err == nil || !os.IsNotExist(err) { break } deadline, ok := ctx.Deadline() timeout = ok && time.Now().After(deadline) } if timeout { detail := fmt.Sprintf("timed out waiting for %s to appear", label) return errors.New(detail) } if err := Sys.Syscall.Mount(label, target, "ext4", syscall.MS_NOATIME, ""); err != nil { detail := fmt.Sprintf("mounting %s on %s failed: %s", label, target, err) return errors.New(detail) } return nil }
func (d *Diagnostics) getDiagnosticFromPeers(ctx context.Context, peers map[peer.ID]int, pmes *pb.Message) (<-chan *DiagInfo, error) { respdata := make(chan *DiagInfo) wg := sync.WaitGroup{} for p := range peers { wg.Add(1) log.Debugf("Sending diagnostic request to peer: %s", p) go func(p peer.ID) { defer wg.Done() out, err := d.getDiagnosticFromPeer(ctx, p, pmes) if err != nil { log.Debugf("Error getting diagnostic from %s: %s", p, err) return } for d := range out { select { case respdata <- d: case <-ctx.Done(): return } } }(p) } go func() { wg.Wait() close(respdata) }() return respdata, nil }
// FromContext returns the User value stored in ctx, if any. func FromContext(ctx context.Context) *Cmd { e, ok := ctx.Value(cmdKey).(*Cmd) if !ok { panic(kerr.New("OQVLBQFQJW", "No cmd in ctx").Error()) } return e }
func GetPoster(ctx context.Context) Poster { poster := ctx.Value(posterKey{}) if poster == nil { logger := log.G(ctx) tx, _ := getTx(ctx) topic := getTopic(ctx) // likely means we don't have a configured event system. Just return // the default poster, which merely logs events. return posterFunc(func(ctx context.Context, event Event) { fields := logrus.Fields{"event": event} if topic != "" { fields["topic"] = topic } if tx != nil { fields["tx.id"] = tx.id if tx.parent != nil { fields["tx.parent.id"] = tx.parent.id } } logger.WithFields(fields).Info("event posted") }) } return poster.(Poster) }
func FromContextOrNil(ctx context.Context) *Cmd { e, ok := ctx.Value(cmdKey).(*Cmd) if ok { return e } return nil }
func absFilepath(ctx context.Context, filename string) <-chan string { ch := make(chan string) go func() { var ( err error ) defer func() { if err != nil { <-ctx.Done() close(ch) return } select { case <-ctx.Done(): close(ch) default: ch <- filename close(ch) } }() if !filepath.IsAbs(filename) { err = errors.New("filename is not abs") return } fi, err := os.Lstat(filename) if err != nil { return } if fi.IsDir() { err = ErrNotFile return } }() return ch }
func (bs *Bitswap) taskWorker(ctx context.Context, id int) { idmap := logging.LoggableMap{"ID": id} defer log.Info("bitswap task worker shutting down...") for { log.Event(ctx, "Bitswap.TaskWorker.Loop", idmap) select { case nextEnvelope := <-bs.engine.Outbox(): select { case envelope, ok := <-nextEnvelope: if !ok { continue } log.Event(ctx, "Bitswap.TaskWorker.Work", logging.LoggableMap{ "ID": id, "Target": envelope.Peer.Pretty(), "Block": envelope.Block.Cid().String(), }) bs.wm.SendBlock(ctx, envelope) case <-ctx.Done(): return } case <-ctx.Done(): return } } }
func relativeFilepath(ctx context.Context, base, filename string) <-chan string { ch := make(chan string) go func() { var ( err error ) defer func() { if err != nil { <-ctx.Done() close(ch) return } select { case <-ctx.Done(): close(ch) default: ch <- filename close(ch) } }() // 相对路径检查 filename = filepath.Join(base, filename) fi, err := os.Lstat(filename) if err != nil { return } if fi.IsDir() { err = ErrNotFile return } }() return ch }
// dialSingle attempts to establish and returns a single connection to // the destination address. func dialSingle(ctx context.Context, dp *dialParam, ra Addr) (c Conn, err error) { trace, _ := ctx.Value(nettrace.TraceKey{}).(*nettrace.Trace) if trace != nil { raStr := ra.String() if trace.ConnectStart != nil { trace.ConnectStart(dp.network, raStr) } if trace.ConnectDone != nil { defer func() { trace.ConnectDone(dp.network, raStr, err) }() } } la := dp.LocalAddr switch ra := ra.(type) { case *TCPAddr: la, _ := la.(*TCPAddr) c, err = dialTCP(ctx, dp.network, la, ra) case *UDPAddr: la, _ := la.(*UDPAddr) c, err = dialUDP(ctx, dp.network, la, ra) case *IPAddr: la, _ := la.(*IPAddr) c, err = dialIP(ctx, dp.network, la, ra) case *UnixAddr: la, _ := la.(*UnixAddr) c, err = dialUnix(ctx, dp.network, la, ra) default: return nil, &OpError{Op: "dial", Net: dp.network, Source: la, Addr: ra, Err: &AddrError{Err: "unexpected address type", Addr: dp.address}} } if err != nil { return nil, &OpError{Op: "dial", Net: dp.network, Source: la, Addr: ra, Err: err} // c is non-nil interface containing nil pointer } return c, nil }
/* Path returns the path that the Goji router uses to perform the PathPrefix optimization. While this function does not distinguish between the absence of a path and an empty path, Goji will automatically extract a path from the request if none is present. By convention, paths are stored in their escaped form (i.e., the value returned by net/url.URL.EscapedPath, and not URL.Path) to ensure that Patterns have as much discretion as possible (e.g., to behave differently for '/' and '%2f'). */ func Path(ctx context.Context) string { pi := ctx.Value(internal.Path) if pi == nil { return "" } return pi.(string) }
func (mq *msgQueue) doWork(ctx context.Context) { if mq.sender == nil { err := mq.openSender(ctx) if err != nil { log.Infof("cant open message sender to peer %s: %s", mq.p, err) // TODO: cant connect, what now? return } } // grab outgoing message mq.outlk.Lock() wlm := mq.out if wlm == nil || wlm.Empty() { mq.outlk.Unlock() return } mq.out = nil mq.outlk.Unlock() // send wantlist updates for { // try to send this message until we fail. err := mq.sender.SendMsg(wlm) if err == nil { return } log.Infof("bitswap send error: %s", err) mq.sender.Close() mq.sender = nil select { case <-mq.done: return case <-ctx.Done(): return case <-time.After(time.Millisecond * 100): // wait 100ms in case disconnect notifications are still propogating log.Warning("SendMsg errored but neither 'done' nor context.Done() were set") } err = mq.openSender(ctx) if err != nil { log.Errorf("couldnt open sender again after SendMsg(%s) failed: %s", mq.p, err) // TODO(why): what do we do now? // I think the *right* answer is to probably put the message we're // trying to send back, and then return to waiting for new work or // a disconnect. return } // TODO: Is this the same instance for the remote peer? // If its not, we should resend our entire wantlist to them /* if mq.sender.InstanceID() != mq.lastSeenInstanceID { wlm = mq.getFullWantlistMessage() } */ } }
func msgToStream(ctx context.Context, s inet.Stream, msg bsmsg.BitSwapMessage) error { deadline := time.Now().Add(sendMessageTimeout) if dl, ok := ctx.Deadline(); ok { deadline = dl } if err := s.SetWriteDeadline(deadline); err != nil { log.Warningf("error setting deadline: %s", err) } switch s.Protocol() { case ProtocolBitswap: if err := msg.ToNetV1(s); err != nil { log.Debugf("error: %s", err) return err } case ProtocolBitswapOne, ProtocolBitswapNoVers: if err := msg.ToNetV0(s); err != nil { log.Debugf("error: %s", err) return err } default: return fmt.Errorf("unrecognized protocol on remote: %s", s.Protocol()) } if err := s.SetWriteDeadline(time.Time{}); err != nil { log.Warningf("error resetting deadline: %s", err) } return nil }
// UserFromContext returns a user from a context.Context if one is present. func UserFromContext(ctx context.Context) *empire.User { u, ok := ctx.Value(userKey).(*empire.User) if !ok { panic("expected user to be authenticated") } return u }
// ExtractUser extracts user from from context. func ExtractUser(ctx context.Context) (*User, error) { u, ok := ctx.Value(userKey).(*User) if !ok { return nil, errors.New("not found context user") } return u, nil }
func FromContextOrNil(ctx context.Context) *App { e, ok := ctx.Value(ctxKey).(*App) if ok { return e } return nil }
// ExtractTokenKey extracts user's from from context. func ExtractTokenKey(ctx context.Context) (string, error) { t, ok := ctx.Value(tokenKey).(string) if !ok { return "", errors.New("not found context token") } return t, nil }
// FromContext retreives the request's xstats client from a given context if any. // If no xstats is embeded in the context, a nop instance is returned so you can // use it safely without having to test for it's presence. func FromContext(ctx context.Context) XStater { rc, ok := ctx.Value(xstatsKey).(XStater) if ok { return rc } return nop }
func (ds *dagService) GetMany(ctx context.Context, keys []*cid.Cid) <-chan *NodeOption { out := make(chan *NodeOption, len(keys)) blocks := ds.Blocks.GetBlocks(ctx, keys) var count int go func() { defer close(out) for { select { case b, ok := <-blocks: if !ok { if count != len(keys) { out <- &NodeOption{Err: fmt.Errorf("failed to fetch all nodes")} } return } nd, err := decodeBlock(b) if err != nil { out <- &NodeOption{Err: err} return } out <- &NodeOption{Node: nd} count++ case <-ctx.Done(): out <- &NodeOption{Err: ctx.Err()} return } } }() return out }