func (ps *PingService) Ping(ctx context.Context, p peer.ID) (<-chan time.Duration, error) { s, err := ps.Host.NewStream(ctx, ID, p) if err != nil { return nil, err } out := make(chan time.Duration) go func() { defer close(out) for { select { case <-ctx.Done(): return default: t, err := ping(s) if err != nil { log.Debugf("ping error: %s", err) return } ps.Host.Peerstore().RecordLatency(p, t) select { case out <- t: case <-ctx.Done(): return } } } }() return out, nil }
func pingPeer(ctx context.Context, n *core.IpfsNode, pid peer.ID, numPings int) <-chan interface{} { outChan := make(chan interface{}) go func() { defer close(outChan) if len(n.Peerstore.Addrs(pid)) == 0 { // Make sure we can find the node in question outChan <- &PingResult{ Text: fmt.Sprintf("Looking up peer %s", pid.Pretty()), } ctx, cancel := context.WithTimeout(ctx, kPingTimeout) defer cancel() p, err := n.Routing.FindPeer(ctx, pid) if err != nil { outChan <- &PingResult{Text: fmt.Sprintf("Peer lookup error: %s", err)} return } n.Peerstore.AddAddrs(p.ID, p.Addrs, peer.TempAddrTTL) } outChan <- &PingResult{Text: fmt.Sprintf("PING %s.", pid.Pretty())} ctx, cancel := context.WithTimeout(ctx, kPingTimeout*time.Duration(numPings)) defer cancel() pings, err := n.Ping.Ping(ctx, pid) if err != nil { log.Debugf("Ping error: %s", err) outChan <- &PingResult{Text: fmt.Sprintf("Ping error: %s", err)} return } var done bool var total time.Duration for i := 0; i < numPings && !done; i++ { select { case <-ctx.Done(): done = true break case t, ok := <-pings: if !ok { done = true break } outChan <- &PingResult{ Success: true, Time: t, } total += t time.Sleep(time.Second) } } averagems := total.Seconds() * 1000 / float64(numPings) outChan <- &PingResult{ Text: fmt.Sprintf("Average latency: %.2fms", averagems), } }() return outChan }
func readMsgCtx(ctx context.Context, r msgio.Reader, p proto.Message) ([]byte, error) { var msg []byte // read in a goroutine so we can exit when our context is cancelled. done := make(chan error) go func() { var err error msg, err = r.ReadMsg() select { case done <- err: case <-ctx.Done(): } }() select { case <-ctx.Done(): return nil, ctx.Err() case e := <-done: if e != nil { return nil, e } } return msg, proto.Unmarshal(msg, p) }
func (b *bloomcache) Rebuild(ctx context.Context) { evt := log.EventBegin(ctx, "bloomcache.Rebuild") defer evt.Done() ch, err := b.blockstore.AllKeysChan(ctx) if err != nil { log.Errorf("AllKeysChan failed in bloomcache rebuild with: %v", err) return } finish := false for !finish { select { case key, ok := <-ch: if ok { b.bloom.AddTS([]byte(key)) // Use binary key, the more compact the better } else { finish = true } case <-ctx.Done(): log.Warning("Cache rebuild closed by context finishing.") return } } close(b.rebuildChan) atomic.StoreInt32(&b.active, 1) }
// GetBlocks returns a channel where the caller may receive blocks that // correspond to the provided |keys|. Returns an error if BitSwap is unable to // begin this request within the deadline enforced by the context. // // NB: Your request remains open until the context expires. To conserve // resources, provide a context with a reasonably short deadline (ie. not one // that lasts throughout the lifetime of the server) func (bs *Bitswap) GetBlocks(ctx context.Context, keys []key.Key) (<-chan *blocks.Block, error) { select { case <-bs.process.Closing(): return nil, errors.New("bitswap is closed") default: } promise := bs.notifications.Subscribe(ctx, keys...) for _, k := range keys { log.Event(ctx, "Bitswap.GetBlockRequest.Start", &k) } bs.wm.WantBlocks(keys) req := &blockRequest{ keys: keys, ctx: ctx, } select { case bs.findKeys <- req: return promise, nil case <-ctx.Done(): return nil, ctx.Err() } }
// GetBlocks returns a channel where the caller may receive blocks that // correspond to the provided |keys|. Returns an error if BitSwap is unable to // begin this request within the deadline enforced by the context. // // NB: Your request remains open until the context expires. To conserve // resources, provide a context with a reasonably short deadline (ie. not one // that lasts throughout the lifetime of the server) func (bs *Bitswap) GetBlocks(ctx context.Context, keys []key.Key) (<-chan blocks.Block, error) { if len(keys) == 0 { out := make(chan blocks.Block) close(out) return out, nil } select { case <-bs.process.Closing(): return nil, errors.New("bitswap is closed") default: } promise := bs.notifications.Subscribe(ctx, keys...) for _, k := range keys { log.Event(ctx, "Bitswap.GetBlockRequest.Start", &k) } bs.wm.WantBlocks(ctx, keys) // NB: Optimization. Assumes that providers of key[0] are likely to // be able to provide for all keys. This currently holds true in most // every situation. Later, this assumption may not hold as true. req := &wantlist.Entry{ Key: keys[0], Ctx: ctx, } select { case bs.findKeys <- req: return promise, nil case <-ctx.Done(): return nil, ctx.Err() } }
func (cq *ChanQueue) process(ctx context.Context) { // construct the channels here to be able to use them bidirectionally enqChan := make(chan peer.ID) deqChan := make(chan peer.ID) cq.EnqChan = enqChan cq.DeqChan = deqChan go func() { log.Debug("processing") defer log.Debug("closed") defer close(deqChan) var next peer.ID var item peer.ID var more bool for { if cq.Queue.Len() == 0 { // log.Debug("wait for enqueue") select { case next, more = <-enqChan: if !more { return } // log.Debug("got", next) case <-ctx.Done(): return } } else { next = cq.Queue.Dequeue() // log.Debug("peek", next) } select { case item, more = <-enqChan: if !more { if cq.Queue.Len() > 0 { return // we're done done. } enqChan = nil // closed, so no use. } // log.Debug("got", item) cq.Queue.Enqueue(item) cq.Queue.Enqueue(next) // order may have changed. next = "" case deqChan <- next: // log.Debug("dequeued", next) next = "" case <-ctx.Done(): return } } }() }
func fetchNodes(ctx context.Context, ds DAGService, in <-chan []key.Key, out chan<- *Node, errs chan<- error) { defer close(out) get := func(g NodeGetter) { nd, err := g.Get(ctx) if err != nil { select { case errs <- err: case <-ctx.Done(): } return } select { case out <- nd: case <-ctx.Done(): return } } for ks := range in { ng := ds.GetNodes(ctx, ks) for _, g := range ng { go get(g) } } }
// FindProvidersAsync returns a channel of providers for the given key func (bsnet *impl) FindProvidersAsync(ctx context.Context, k key.Key, max int) <-chan peer.ID { // Since routing queries are expensive, give bitswap the peers to which we // have open connections. Note that this may cause issues if bitswap starts // precisely tracking which peers provide certain keys. This optimization // would be misleading. In the long run, this may not be the most // appropriate place for this optimization, but it won't cause any harm in // the short term. connectedPeers := bsnet.host.Network().Peers() out := make(chan peer.ID, len(connectedPeers)) // just enough buffer for these connectedPeers for _, id := range connectedPeers { if id == bsnet.host.ID() { continue // ignore self as provider } out <- id } go func() { defer close(out) providers := bsnet.routing.FindProvidersAsync(ctx, k, max) for info := range providers { if info.ID == bsnet.host.ID() { continue // ignore self as provider } bsnet.host.Peerstore().AddAddrs(info.ID, info.Addrs, peer.TempAddrTTL) select { case <-ctx.Done(): return case out <- info.ID: } } }() return out }
func fetchNodes(ctx context.Context, ds DAGService, in <-chan []key.Key, out chan<- *NodeOption) { var wg sync.WaitGroup defer func() { // wait for all 'get' calls to complete so we don't accidentally send // on a closed channel wg.Wait() close(out) }() get := func(ks []key.Key) { defer wg.Done() nodes := ds.GetMany(ctx, ks) for opt := range nodes { select { case out <- opt: case <-ctx.Done(): return } } } for ks := range in { wg.Add(1) go get(ks) } }
func (bs *Bitswap) provideCollector(ctx context.Context) { defer close(bs.provideKeys) var toProvide []key.Key var nextKey key.Key var keysOut chan key.Key for { select { case blk, ok := <-bs.newBlocks: if !ok { log.Debug("newBlocks channel closed") return } if keysOut == nil { nextKey = blk.Key() keysOut = bs.provideKeys } else { toProvide = append(toProvide, blk.Key()) } case keysOut <- nextKey: if len(toProvide) > 0 { nextKey = toProvide[0] toProvide = toProvide[1:] } else { keysOut = nil } case <-ctx.Done(): return } } }
// connects to providers for the given keys func (bs *Bitswap) providerConnector(parent context.Context) { defer log.Info("bitswap client worker shutting down...") for { log.Event(parent, "Bitswap.ProviderConnector.Loop") select { case req := <-bs.findKeys: keys := req.keys if len(keys) == 0 { log.Warning("Received batch request for zero blocks") continue } log.Event(parent, "Bitswap.ProviderConnector.Work", logging.LoggableMap{"Keys": keys}) // NB: Optimization. Assumes that providers of key[0] are likely to // be able to provide for all keys. This currently holds true in most // every situation. Later, this assumption may not hold as true. child, cancel := context.WithTimeout(req.ctx, providerRequestTimeout) providers := bs.network.FindProvidersAsync(child, keys[0], maxProvidersPerRequest) for p := range providers { go bs.network.ConnectTo(req.ctx, p) } cancel() case <-parent.Done(): return } } }
func (bs *Bitswap) rebroadcastWorker(parent context.Context) { ctx, cancel := context.WithCancel(parent) defer cancel() broadcastSignal := time.NewTicker(rebroadcastDelay.Get()) defer broadcastSignal.Stop() tick := time.NewTicker(10 * time.Second) defer tick.Stop() for { log.Event(ctx, "Bitswap.Rebroadcast.idle") select { case <-tick.C: n := bs.wm.wl.Len() if n > 0 { log.Debug(n, "keys in bitswap wantlist") } case <-broadcastSignal.C: // resend unfulfilled wantlist keys log.Event(ctx, "Bitswap.Rebroadcast.active") entries := bs.wm.wl.Entries() if len(entries) > 0 { bs.connectToProviders(ctx, entries) } case <-parent.Done(): return } } }
func (ds *dagService) GetMany(ctx context.Context, keys []key.Key) <-chan *NodeOption { out := make(chan *NodeOption, len(keys)) blocks := ds.Blocks.GetBlocks(ctx, keys) var count int go func() { defer close(out) for { select { case b, ok := <-blocks: if !ok { if count != len(keys) { out <- &NodeOption{Err: fmt.Errorf("failed to fetch all nodes")} } return } nd, err := DecodeProtobuf(b.Data()) if err != nil { out <- &NodeOption{Err: err} return } nd.cached = b.Key().ToMultihash() // buffered, no need to select out <- &NodeOption{Node: nd} count++ case <-ctx.Done(): out <- &NodeOption{Err: ctx.Err()} return } } }() return out }
// Subscribe returns a channel of blocks for the given |keys|. |blockChannel| // is closed if the |ctx| times out or is cancelled, or after sending len(keys) // blocks. func (ps *impl) Subscribe(ctx context.Context, keys ...key.Key) <-chan *blocks.Block { blocksCh := make(chan *blocks.Block, len(keys)) valuesCh := make(chan interface{}, len(keys)) // provide our own channel to control buffer, prevent blocking if len(keys) == 0 { close(blocksCh) return blocksCh } ps.wrapped.AddSubOnceEach(valuesCh, toStrings(keys)...) go func() { defer close(blocksCh) defer ps.wrapped.Unsub(valuesCh) // with a len(keys) buffer, this is an optimization for { select { case <-ctx.Done(): return case val, ok := <-valuesCh: if !ok { return } block, ok := val.(*blocks.Block) if !ok { return } select { case <-ctx.Done(): return case blocksCh <- block: // continue } } } }() return blocksCh }
func (e *offlineExchange) GetBlocks(ctx context.Context, ks []key.Key) (<-chan *blocks.Block, error) { out := make(chan *blocks.Block, 0) go func() { defer close(out) var misses []key.Key for _, k := range ks { hit, err := e.bs.Get(k) if err != nil { misses = append(misses, k) // a long line of misses should abort when context is cancelled. select { // TODO case send misses down channel case <-ctx.Done(): return default: continue } } select { case out <- hit: case <-ctx.Done(): return } } }() return out, nil }
// GetBlocks gets a list of blocks asynchronously and returns through // the returned channel. // NB: No guarantees are made about order. func (s *BlockService) GetBlocks(ctx context.Context, ks []key.Key) <-chan *blocks.Block { out := make(chan *blocks.Block, 0) go func() { defer close(out) var misses []key.Key for _, k := range ks { hit, err := s.Blockstore.Get(k) if err != nil { misses = append(misses, k) continue } log.Debug("Blockservice: Got data in datastore.") select { case out <- hit: case <-ctx.Done(): return } } rblocks, err := s.Exchange.GetBlocks(ctx, misses) if err != nil { log.Debugf("Error with GetBlocks: %s", err) return } for b := range rblocks { select { case out <- b: case <-ctx.Done(): return } } }() return out }
func (bs *Bitswap) taskWorker(ctx context.Context, id int) { idmap := logging.LoggableMap{"ID": id} defer log.Info("bitswap task worker shutting down...") for { log.Event(ctx, "Bitswap.TaskWorker.Loop", idmap) select { case nextEnvelope := <-bs.engine.Outbox(): select { case envelope, ok := <-nextEnvelope: if !ok { continue } log.Event(ctx, "Bitswap.TaskWorker.Work", logging.LoggableMap{ "ID": id, "Target": envelope.Peer.Pretty(), "Block": envelope.Block.Multihash.B58String(), }) bs.wm.SendBlock(ctx, envelope) case <-ctx.Done(): return } case <-ctx.Done(): return } } }
func waitOnErrChan(ctx context.Context, errs chan error) error { select { case err := <-errs: return err case <-ctx.Done(): return ctx.Err() } }
func (it *interrupt) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { select { case it.hanging <- struct{}{}: default: } <-ctx.Done() return fuse.EINTR }
// GetNodes returns an array of 'NodeGetter' promises, with each corresponding // to the key with the same index as the passed in keys func GetNodes(ctx context.Context, ds DAGService, keys []key.Key) []NodeGetter { // Early out if no work to do if len(keys) == 0 { return nil } promises := make([]NodeGetter, len(keys)) for i := range keys { promises[i] = newNodePromise(ctx) } dedupedKeys := dedupeKeys(keys) go func() { ctx, cancel := context.WithCancel(ctx) defer cancel() nodechan := ds.GetMany(ctx, dedupedKeys) for count := 0; count < len(keys); { select { case opt, ok := <-nodechan: if !ok { for _, p := range promises { p.Fail(ErrNotFound) } return } if opt.Err != nil { for _, p := range promises { p.Fail(opt.Err) } return } nd := opt.Node k, err := nd.Key() if err != nil { log.Error("Failed to get node key: ", err) continue } is := FindLinks(keys, k, 0) for _, i := range is { count++ promises[i].Send(nd) } case <-ctx.Done(): return } } }() return promises }
func (pm *ProviderManager) AddProvider(ctx context.Context, k key.Key, val peer.ID) { prov := &addProv{ k: k, val: val, } select { case pm.newprovs <- prov: case <-ctx.Done(): } }
func EnumerateChildrenAsync(ctx context.Context, ds DAGService, root *Node, set key.KeySet) error { toprocess := make(chan []key.Key, 8) nodes := make(chan *NodeOption, 8) ctx, cancel := context.WithCancel(ctx) defer cancel() defer close(toprocess) go fetchNodes(ctx, ds, toprocess, nodes) nodes <- &NodeOption{Node: root} live := 1 for { select { case opt, ok := <-nodes: if !ok { return nil } if opt.Err != nil { return opt.Err } nd := opt.Node // a node has been fetched live-- var keys []key.Key for _, lnk := range nd.Links { k := key.Key(lnk.Hash) if !set.Has(k) { set.Add(k) live++ keys = append(keys, k) } } if live == 0 { return nil } if len(keys) > 0 { select { case toprocess <- keys: case <-ctx.Done(): return ctx.Err() } } case <-ctx.Done(): return ctx.Err() } } }
func (d *Diagnostics) getDiagnosticFromPeer(ctx context.Context, p peer.ID, pmes *pb.Message) (<-chan *DiagInfo, error) { s, err := d.host.NewStream(ctx, ProtocolDiag, p) if err != nil { return nil, err } cr := ctxio.NewReader(ctx, s) // ok to use. we defer close stream in this func cw := ctxio.NewWriter(ctx, s) // ok to use. we defer close stream in this func r := ggio.NewDelimitedReader(cr, inet.MessageSizeMax) w := ggio.NewDelimitedWriter(cw) start := time.Now() if err := w.WriteMsg(pmes); err != nil { return nil, err } out := make(chan *DiagInfo) go func() { defer func() { close(out) s.Close() rtt := time.Since(start) log.Infof("diagnostic request took: %s", rtt.String()) }() for { rpmes := new(pb.Message) if err := r.ReadMsg(rpmes); err != nil { log.Debugf("Error reading diagnostic from stream: %s", err) return } if rpmes == nil { log.Debug("got no response back from diag request") return } di, err := decodeDiagJson(rpmes.GetData()) if err != nil { log.Debug(err) return } select { case out <- di: case <-ctx.Done(): return } } }() return out, nil }
// Fsync flushes the content in the file to disk, but does not // update the dag tree internally func (fi *FileNode) Fsync(ctx context.Context, req *fuse.FsyncRequest) error { errs := make(chan error, 1) go func() { errs <- fi.fi.Sync() }() select { case err := <-errs: return err case <-ctx.Done(): return ctx.Err() } }
func (mq *msgQueue) runQueue(ctx context.Context) { for { select { case <-mq.work: // there is work to be done mq.doWork(ctx) case <-mq.done: return case <-ctx.Done(): return } } }
// Run runs the query at hand. pass in a list of peers to use first. func (q *dhtQuery) Run(ctx context.Context, peers []peer.ID) (*dhtQueryResult, error) { select { case <-ctx.Done(): return nil, ctx.Err() default: } ctx, cancel := context.WithCancel(ctx) defer cancel() runner := newQueryRunner(q) return runner.Run(ctx, peers) }
func (np *nodePromise) Get(ctx context.Context) (*Node, error) { if np.cache != nil { return np.cache, nil } select { case blk := <-np.recv: np.cache = blk case <-np.ctx.Done(): return nil, np.ctx.Err() case <-ctx.Done(): return nil, ctx.Err() } return np.cache, nil }
func PublishQueryEvent(ctx context.Context, ev *QueryEvent) { ich := ctx.Value(RoutingQueryKey) if ich == nil { return } ch, ok := ich.(chan<- *QueryEvent) if !ok { return } select { case ch <- ev: case <-ctx.Done(): } }
func (c *client) FindProvidersAsync(ctx context.Context, k key.Key, max int) <-chan peer.PeerInfo { out := make(chan peer.PeerInfo) go func() { defer close(out) for i, p := range c.server.Providers(k) { if max <= i { return } select { case out <- p: case <-ctx.Done(): return } } }() return out }