func (bs *Bitswap) provideCollector(ctx context.Context) { defer close(bs.provideKeys) var toProvide []key.Key var nextKey key.Key var keysOut chan key.Key for { select { case blk, ok := <-bs.newBlocks: if !ok { log.Debug("newBlocks channel closed") return } if keysOut == nil { nextKey = blk.Key() keysOut = bs.provideKeys } else { toProvide = append(toProvide, blk.Key()) } case keysOut <- nextKey: if len(toProvide) > 0 { nextKey = toProvide[0] toProvide = toProvide[1:] } else { keysOut = nil } case <-ctx.Done(): return } } }
// FindProvidersAsync returns a channel of providers for the given key func (bsnet *impl) FindProvidersAsync(ctx context.Context, k key.Key, max int) <-chan peer.ID { // Since routing queries are expensive, give bitswap the peers to which we // have open connections. Note that this may cause issues if bitswap starts // precisely tracking which peers provide certain keys. This optimization // would be misleading. In the long run, this may not be the most // appropriate place for this optimization, but it won't cause any harm in // the short term. connectedPeers := bsnet.host.Network().Peers() out := make(chan peer.ID, len(connectedPeers)) // just enough buffer for these connectedPeers for _, id := range connectedPeers { if id == bsnet.host.ID() { continue // ignore self as provider } out <- id } go func() { defer close(out) providers := bsnet.routing.FindProvidersAsync(ctx, k, max) for info := range providers { if info.ID == bsnet.host.ID() { continue // ignore self as provider } bsnet.host.Peerstore().AddAddrs(info.ID, info.Addrs, peer.TempAddrTTL) select { case <-ctx.Done(): return case out <- info.ID: } } }() return out }
// connects to providers for the given keys func (bs *Bitswap) providerConnector(parent context.Context) { defer log.Info("bitswap client worker shutting down...") for { log.Event(parent, "Bitswap.ProviderConnector.Loop") select { case req := <-bs.findKeys: keys := req.keys if len(keys) == 0 { log.Warning("Received batch request for zero blocks") continue } log.Event(parent, "Bitswap.ProviderConnector.Work", logging.LoggableMap{"Keys": keys}) // NB: Optimization. Assumes that providers of key[0] are likely to // be able to provide for all keys. This currently holds true in most // every situation. Later, this assumption may not hold as true. child, cancel := context.WithTimeout(req.ctx, providerRequestTimeout) providers := bs.network.FindProvidersAsync(child, keys[0], maxProvidersPerRequest) for p := range providers { go bs.network.ConnectTo(req.ctx, p) } cancel() case <-parent.Done(): return } } }
func pingPeer(ctx context.Context, n *core.IpfsNode, pid peer.ID, numPings int) <-chan interface{} { outChan := make(chan interface{}) go func() { defer close(outChan) if len(n.Peerstore.Addrs(pid)) == 0 { // Make sure we can find the node in question outChan <- &PingResult{ Text: fmt.Sprintf("Looking up peer %s", pid.Pretty()), } ctx, cancel := context.WithTimeout(ctx, kPingTimeout) defer cancel() p, err := n.Routing.FindPeer(ctx, pid) if err != nil { outChan <- &PingResult{Text: fmt.Sprintf("Peer lookup error: %s", err)} return } n.Peerstore.AddAddrs(p.ID, p.Addrs, peer.TempAddrTTL) } outChan <- &PingResult{Text: fmt.Sprintf("PING %s.", pid.Pretty())} ctx, cancel := context.WithTimeout(ctx, kPingTimeout*time.Duration(numPings)) defer cancel() pings, err := n.Ping.Ping(ctx, pid) if err != nil { log.Debugf("Ping error: %s", err) outChan <- &PingResult{Text: fmt.Sprintf("Ping error: %s", err)} return } var done bool var total time.Duration for i := 0; i < numPings && !done; i++ { select { case <-ctx.Done(): done = true break case t, ok := <-pings: if !ok { done = true break } outChan <- &PingResult{ Success: true, Time: t, } total += t time.Sleep(time.Second) } } averagems := total.Seconds() * 1000 / float64(numPings) outChan <- &PingResult{ Text: fmt.Sprintf("Average latency: %.2fms", averagems), } }() return outChan }
// Subscribe returns a channel of blocks for the given |keys|. |blockChannel| // is closed if the |ctx| times out or is cancelled, or after sending len(keys) // blocks. func (ps *impl) Subscribe(ctx context.Context, keys ...key.Key) <-chan *blocks.Block { blocksCh := make(chan *blocks.Block, len(keys)) valuesCh := make(chan interface{}, len(keys)) // provide our own channel to control buffer, prevent blocking if len(keys) == 0 { close(blocksCh) return blocksCh } ps.wrapped.AddSubOnceEach(valuesCh, toStrings(keys)...) go func() { defer close(blocksCh) defer ps.wrapped.Unsub(valuesCh) // with a len(keys) buffer, this is an optimization for { select { case <-ctx.Done(): return case val, ok := <-valuesCh: if !ok { return } block, ok := val.(*blocks.Block) if !ok { return } select { case <-ctx.Done(): return case blocksCh <- block: // continue } } } }() return blocksCh }
// GetBlocks gets a list of blocks asynchronously and returns through // the returned channel. // NB: No guarantees are made about order. func (s *BlockService) GetBlocks(ctx context.Context, ks []key.Key) <-chan *blocks.Block { out := make(chan *blocks.Block, 0) go func() { defer close(out) var misses []key.Key for _, k := range ks { hit, err := s.Blockstore.Get(k) if err != nil { misses = append(misses, k) continue } log.Debug("Blockservice: Got data in datastore.") select { case out <- hit: case <-ctx.Done(): return } } rblocks, err := s.Exchange.GetBlocks(ctx, misses) if err != nil { log.Debugf("Error with GetBlocks: %s", err) return } for b := range rblocks { select { case out <- b: case <-ctx.Done(): return } } }() return out }
// Run is the main republisher loop func (np *Republisher) Run(ctx context.Context) { for { select { case <-np.Publish: quick := time.After(np.TimeoutShort) longer := time.After(np.TimeoutLong) wait: select { case <-ctx.Done(): return case <-np.Publish: quick = time.After(np.TimeoutShort) goto wait case <-quick: case <-longer: } log.Info("Publishing Changes!") err := np.root.Publish(ctx) if err != nil { log.Error("republishRoot error: %s", err) } case <-ctx.Done(): return } } }
func (bs *Bitswap) rebroadcastWorker(parent context.Context) { ctx, cancel := context.WithCancel(parent) defer cancel() broadcastSignal := time.NewTicker(rebroadcastDelay.Get()) defer broadcastSignal.Stop() tick := time.NewTicker(10 * time.Second) defer tick.Stop() for { log.Event(ctx, "Bitswap.Rebroadcast.idle") select { case <-tick.C: n := bs.wm.wl.Len() if n > 0 { log.Debug(n, "keys in bitswap wantlist") } case <-broadcastSignal.C: // resend unfulfilled wantlist keys log.Event(ctx, "Bitswap.Rebroadcast.active") entries := bs.wm.wl.Entries() if len(entries) > 0 { bs.connectToProviders(ctx, entries) } case <-parent.Done(): return } } }
func (e *offlineExchange) GetBlocks(ctx context.Context, ks []key.Key) (<-chan *blocks.Block, error) { out := make(chan *blocks.Block, 0) go func() { defer close(out) var misses []key.Key for _, k := range ks { hit, err := e.bs.Get(k) if err != nil { misses = append(misses, k) // a long line of misses should abort when context is cancelled. select { // TODO case send misses down channel case <-ctx.Done(): return default: continue } } select { case out <- hit: case <-ctx.Done(): return } } }() return out, nil }
func (bs *Bitswap) taskWorker(ctx context.Context, id int) { idmap := logging.LoggableMap{"ID": id} defer log.Info("bitswap task worker shutting down...") for { log.Event(ctx, "Bitswap.TaskWorker.Loop", idmap) select { case nextEnvelope := <-bs.engine.Outbox(): select { case envelope, ok := <-nextEnvelope: if !ok { continue } log.Event(ctx, "Bitswap.TaskWorker.Work", logging.LoggableMap{ "ID": id, "Target": envelope.Peer.Pretty(), "Block": envelope.Block.Multihash.B58String(), }) bs.wm.SendBlock(ctx, envelope) case <-ctx.Done(): return } case <-ctx.Done(): return } } }
func (ps *PingService) Ping(ctx context.Context, p peer.ID) (<-chan time.Duration, error) { s, err := ps.Host.NewStream(ID, p) if err != nil { return nil, err } out := make(chan time.Duration) go func() { defer close(out) for { select { case <-ctx.Done(): return default: t, err := ping(s) if err != nil { log.Debugf("ping error: %s", err) return } select { case out <- t: case <-ctx.Done(): return } } } }() return out, nil }
// FetchGraph asynchronously fetches all nodes that are children of the given // node, and returns a channel that may be waited upon for the fetch to complete func FetchGraph(ctx context.Context, root *Node, serv DAGService) chan struct{} { log.Warning("Untested.") var wg sync.WaitGroup done := make(chan struct{}) for _, l := range root.Links { wg.Add(1) go func(lnk *Link) { // Signal child is done on way out defer wg.Done() select { case <-ctx.Done(): return } nd, err := lnk.GetNode(ctx, serv) if err != nil { log.Debug(err) return } // Wait for children to finish <-FetchGraph(ctx, nd, serv) }(l) } go func() { wg.Wait() done <- struct{}{} }() return done }
func GarbageCollectAsync(n *core.IpfsNode, ctx context.Context) (<-chan *KeyRemoved, error) { keychan, err := n.Blockstore.AllKeysChan(ctx) if err != nil { return nil, err } output := make(chan *KeyRemoved) go func() { defer close(output) for { select { case k, ok := <-keychan: if !ok { return } if !n.Pinning.IsPinned(k) { err := n.Blockstore.DeleteBlock(k) if err != nil { log.Debugf("Error removing key from blockstore: %s", err) continue } select { case output <- &KeyRemoved{k}: case <-ctx.Done(): } } case <-ctx.Done(): return } } }() return output, nil }
func (cq *ChanQueue) process(ctx context.Context) { // construct the channels here to be able to use them bidirectionally enqChan := make(chan peer.ID) deqChan := make(chan peer.ID) cq.EnqChan = enqChan cq.DeqChan = deqChan go func() { log.Debug("processing") defer log.Debug("closed") defer close(deqChan) var next peer.ID var item peer.ID var more bool for { if cq.Queue.Len() == 0 { // log.Debug("wait for enqueue") select { case next, more = <-enqChan: if !more { return } // log.Debug("got", next) case <-ctx.Done(): return } } else { next = cq.Queue.Dequeue() // log.Debug("peek", next) } select { case item, more = <-enqChan: if !more { if cq.Queue.Len() > 0 { return // we're done done. } enqChan = nil // closed, so no use. } // log.Debug("got", item) cq.Queue.Enqueue(item) cq.Queue.Enqueue(next) // order may have changed. next = "" case deqChan <- next: // log.Debug("dequeued", next) next = "" case <-ctx.Done(): return } } }() }
// GetBlocks returns a channel where the caller may receive blocks that // correspond to the provided |keys|. Returns an error if BitSwap is unable to // begin this request within the deadline enforced by the context. // // NB: Your request remains open until the context expires. To conserve // resources, provide a context with a reasonably short deadline (ie. not one // that lasts throughout the lifetime of the server) func (bs *Bitswap) GetBlocks(ctx context.Context, keys []key.Key) (<-chan *blocks.Block, error) { select { case <-bs.process.Closing(): return nil, errors.New("bitswap is closed") default: } promise := bs.notifications.Subscribe(ctx, keys...) for _, k := range keys { log.Event(ctx, "Bitswap.GetBlockRequest.Start", &k) } bs.wm.WantBlocks(keys) req := &blockRequest{ keys: keys, ctx: ctx, } select { case bs.findKeys <- req: return promise, nil case <-ctx.Done(): return nil, ctx.Err() } }
// LogError logs the error to the owner of the context. // // If this context was created with ContextWithErrorLog, then this method // passes the error to context creator over an unbuffered channel. // // If this context was created by other means, this method is a no-op. func LogError(ctx context.Context, err error) { v := ctx.Value(errLogKey) errs, ok := v.(privateChanType) if !ok { return } errs <- err }
func (it *interrupt) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { select { case it.hanging <- struct{}{}: default: } <-ctx.Done() return fuse.EINTR }
// gatedDialAttempt is an attempt to dial a node. It is gated by the swarm's // dial synchronization systems: dialsync and dialbackoff. func (s *Swarm) gatedDialAttempt(ctx context.Context, p peer.ID) (*Conn, error) { var logdial = lgbl.Dial("swarm", s.LocalPeer(), p, nil, nil) defer log.EventBegin(ctx, "swarmDialAttemptSync", logdial).Done() // check if we already have an open connection first conn := s.bestConnectionToPeer(p) if conn != nil { return conn, nil } // check if there's an ongoing dial to this peer if ok, wait := s.dsync.Lock(p); ok { // ok, we have been charged to dial! let's do it. // if it succeeds, dial will add the conn to the swarm itself. defer log.EventBegin(ctx, "swarmDialAttemptStart", logdial).Done() ctxT, cancel := context.WithTimeout(ctx, s.dialT) conn, err := s.dial(ctxT, p) cancel() s.dsync.Unlock(p) log.Debugf("dial end %s", conn) if err != nil { log.Event(ctx, "swarmDialBackoffAdd", logdial) s.backf.AddBackoff(p) // let others know to backoff // ok, we failed. try again. (if loop is done, our error is output) return nil, fmt.Errorf("dial attempt failed: %s", err) } log.Event(ctx, "swarmDialBackoffClear", logdial) s.backf.Clear(p) // okay, no longer need to backoff return conn, nil } else { // we did not dial. we must wait for someone else to dial. // check whether we should backoff first... if s.backf.Backoff(p) { log.Event(ctx, "swarmDialBackoff", logdial) return nil, ErrDialBackoff } defer log.EventBegin(ctx, "swarmDialWait", logdial).Done() select { case <-wait: // wait for that other dial to finish. // see if it worked, OR we got an incoming dial in the meantime... conn := s.bestConnectionToPeer(p) if conn != nil { return conn, nil } return nil, ErrDialFailed case <-ctx.Done(): // or we may have to bail... return nil, ctx.Err() } } }
func (pm *ProviderManager) AddProvider(ctx context.Context, k key.Key, val peer.ID) { prov := &addProv{ k: k, val: val, } select { case pm.newprovs <- prov: case <-ctx.Done(): } }
func MetadataFromContext(ctx context.Context) (Metadata, error) { value := ctx.Value(metadataKey) if value != nil { metadata, ok := value.(Metadata) if ok { return metadata, nil } } return nil, errors.New("context contains no metadata") }
func (d *Diagnostics) getDiagnosticFromPeer(ctx context.Context, p peer.ID, pmes *pb.Message) (<-chan *DiagInfo, error) { s, err := d.host.NewStream(ProtocolDiag, p) if err != nil { return nil, err } cr := ctxio.NewReader(ctx, s) // ok to use. we defer close stream in this func cw := ctxio.NewWriter(ctx, s) // ok to use. we defer close stream in this func r := ggio.NewDelimitedReader(cr, inet.MessageSizeMax) w := ggio.NewDelimitedWriter(cw) start := time.Now() if err := w.WriteMsg(pmes); err != nil { return nil, err } out := make(chan *DiagInfo) go func() { defer func() { close(out) s.Close() rtt := time.Since(start) log.Infof("diagnostic request took: %s", rtt.String()) }() for { rpmes := new(pb.Message) if err := r.ReadMsg(rpmes); err != nil { log.Debugf("Error reading diagnostic from stream: %s", err) return } if rpmes == nil { log.Debug("Got no response back from diag request.") return } di, err := decodeDiagJson(rpmes.GetData()) if err != nil { log.Debug(err) return } select { case out <- di: case <-ctx.Done(): return } } }() return out, nil }
// Fsync flushes the content in the file to disk, but does not // update the dag tree internally func (fi *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error { errs := make(chan error, 1) go func() { errs <- fi.fi.Sync() }() select { case err := <-errs: return err case <-ctx.Done(): return ctx.Err() } }
func (mq *msgQueue) runQueue(ctx context.Context) { for { select { case <-mq.work: // there is work to be done mq.doWork(ctx) case <-mq.done: return case <-ctx.Done(): return } } }
// Run runs the query at hand. pass in a list of peers to use first. func (q *dhtQuery) Run(ctx context.Context, peers []peer.ID) (*dhtQueryResult, error) { select { case <-ctx.Done(): return nil, ctx.Err() default: } ctx, cancel := context.WithCancel(ctx) defer cancel() runner := newQueryRunner(q) return runner.Run(ctx, peers) }
func (np *nodePromise) Get(ctx context.Context) (*Node, error) { if np.cache != nil { return np.cache, nil } select { case blk := <-np.recv: np.cache = blk case <-np.ctx.Done(): return nil, np.ctx.Err() case <-ctx.Done(): return nil, ctx.Err() } return np.cache, nil }
// WithDeadlineFraction returns a Context with a fraction of the // original context's timeout. This is useful in sequential pipelines // of work, where one might try options and fall back to others // depending on the time available, or failure to respond. For example: // // // getPicture returns a picture from our encrypted database // // we have a pipeline of multiple steps. we need to: // // - get the data from a database // // - decrypt it // // - apply many transforms // // // // we **know** that each step takes increasingly more time. // // The transforms are much more expensive than decryption, and // // decryption is more expensive than the database lookup. // // If our database takes too long (i.e. >0.2 of available time), // // there's no use in continuing. // func getPicture(ctx context.Context, key string) ([]byte, error) { // // fractional timeout contexts to the rescue! // // // try the database with 0.2 of remaining time. // ctx1, _ := ctxext.WithDeadlineFraction(ctx, 0.2) // val, err := db.Get(ctx1, key) // if err != nil { // return nil, err // } // // // try decryption with 0.3 of remaining time. // ctx2, _ := ctxext.WithDeadlineFraction(ctx, 0.3) // if val, err = decryptor.Decrypt(ctx2, val); err != nil { // return nil, err // } // // // try transforms with all remaining time. hopefully it's enough! // return transformer.Transform(ctx, val) // } // // func WithDeadlineFraction(ctx context.Context, fraction float64) ( context.Context, context.CancelFunc) { d, found := ctx.Deadline() if !found { // no deadline return context.WithCancel(ctx) } left := d.Sub(time.Now()) if left < 0 { // already passed... return context.WithCancel(ctx) } left = time.Duration(float64(left) * fraction) return context.WithTimeout(ctx, left) }
func PublishQueryEvent(ctx context.Context, ev *QueryEvent) { ich := ctx.Value(RoutingQueryKey) if ich == nil { return } ch, ok := ich.(chan<- *QueryEvent) if !ok { return } select { case ch <- ev: case <-ctx.Done(): } }
func (pm *ProviderManager) GetProviders(ctx context.Context, k key.Key) []peer.ID { gp := &getProv{ k: k, resp: make(chan []peer.ID, 1), // buffered to prevent sender from blocking } select { case <-ctx.Done(): return nil case pm.getprovs <- gp: } select { case <-ctx.Done(): return nil case peers := <-gp.resp: return peers } }
func (c *client) FindProvidersAsync(ctx context.Context, k key.Key, max int) <-chan peer.PeerInfo { out := make(chan peer.PeerInfo) go func() { defer close(out) for i, p := range c.server.Providers(k) { if max <= i { return } select { case out <- p: case <-ctx.Done(): return } } }() return out }
// GetNodes returns an array of 'NodeGetter' promises, with each corresponding // to the key with the same index as the passed in keys func (ds *dagService) GetNodes(ctx context.Context, keys []key.Key) []NodeGetter { // Early out if no work to do if len(keys) == 0 { return nil } promises := make([]NodeGetter, len(keys)) sendChans := make([]chan<- *Node, len(keys)) for i := range keys { promises[i], sendChans[i] = newNodePromise(ctx) } dedupedKeys := dedupeKeys(keys) go func() { ctx, cancel := context.WithCancel(ctx) defer cancel() blkchan := ds.Blocks.GetBlocks(ctx, dedupedKeys) for count := 0; count < len(keys); { select { case blk, ok := <-blkchan: if !ok { return } nd, err := Decoded(blk.Data) if err != nil { // NB: can happen with improperly formatted input data log.Debug("Got back bad block!") return } is := FindLinks(keys, blk.Key(), 0) for _, i := range is { count++ sendChans[i] <- nd } case <-ctx.Done(): return } } }() return promises }