// connects to providers for the given keys func (bs *Bitswap) providerConnector(parent context.Context) { defer log.Info("bitswap client worker shutting down...") for { log.Event(parent, "Bitswap.ProviderConnector.Loop") select { case req := <-bs.findKeys: keys := req.keys if len(keys) == 0 { log.Warning("Received batch request for zero blocks") continue } log.Event(parent, "Bitswap.ProviderConnector.Work", eventlog.LoggableMap{"Keys": keys}) // NB: Optimization. Assumes that providers of key[0] are likely to // be able to provide for all keys. This currently holds true in most // every situation. Later, this assumption may not hold as true. child, cancel := context.WithTimeout(req.ctx, providerRequestTimeout) providers := bs.network.FindProvidersAsync(child, keys[0], maxProvidersPerRequest) for p := range providers { go bs.network.ConnectTo(req.ctx, p) } cancel() case <-parent.Done(): return } } }
func (cq *ChanQueue) process(ctx context.Context) { // construct the channels here to be able to use them bidirectionally enqChan := make(chan peer.ID) deqChan := make(chan peer.ID) cq.EnqChan = enqChan cq.DeqChan = deqChan go func() { log.Debug("processing") defer log.Debug("closed") defer close(deqChan) var next peer.ID var item peer.ID var more bool for { if cq.Queue.Len() == 0 { // log.Debug("wait for enqueue") select { case next, more = <-enqChan: if !more { return } // log.Debug("got", next) case <-ctx.Done(): return } } else { next = cq.Queue.Dequeue() // log.Debug("peek", next) } select { case item, more = <-enqChan: if !more { if cq.Queue.Len() > 0 { return // we're done done. } enqChan = nil // closed, so no use. } // log.Debug("got", item) cq.Queue.Enqueue(item) cq.Queue.Enqueue(next) // order may have changed. next = "" case deqChan <- next: // log.Debug("dequeued", next) next = "" case <-ctx.Done(): return } } }() }
func (bs *Bitswap) rebroadcastWorker(parent context.Context) { ctx, cancel := context.WithCancel(parent) defer cancel() broadcastSignal := time.NewTicker(rebroadcastDelay.Get()) defer broadcastSignal.Stop() tick := time.NewTicker(10 * time.Second) defer tick.Stop() for { log.Event(ctx, "Bitswap.Rebroadcast.idle") select { case <-tick.C: n := bs.wm.wl.Len() if n > 0 { log.Debug(n, "keys in bitswap wantlist") } case <-broadcastSignal.C: // resend unfulfilled wantlist keys log.Event(ctx, "Bitswap.Rebroadcast.active") entries := bs.wm.wl.Entries() if len(entries) > 0 { bs.connectToProviders(ctx, entries) } case <-parent.Done(): return } } }
// GetBlocks returns a channel where the caller may receive blocks that // correspond to the provided |keys|. Returns an error if BitSwap is unable to // begin this request within the deadline enforced by the context. // // NB: Your request remains open until the context expires. To conserve // resources, provide a context with a reasonably short deadline (ie. not one // that lasts throughout the lifetime of the server) func (bs *Bitswap) GetBlocks(ctx context.Context, keys []key.Key) (<-chan *blocks.Block, error) { select { case <-bs.process.Closing(): return nil, errors.New("bitswap is closed") default: } promise := bs.notifications.Subscribe(ctx, keys...) for _, k := range keys { log.Event(ctx, "Bitswap.GetBlockRequest.Start", &k) } bs.wm.WantBlocks(keys) req := &blockRequest{ keys: keys, ctx: ctx, } select { case bs.findKeys <- req: return promise, nil case <-ctx.Done(): return nil, ctx.Err() } }
// GetBlocks gets a list of blocks asynchronously and returns through // the returned channel. // NB: No guarantees are made about order. func (s *BlockService) GetBlocks(ctx context.Context, ks []key.Key) <-chan *blocks.Block { out := make(chan *blocks.Block, 0) go func() { defer close(out) var misses []key.Key for _, k := range ks { hit, err := s.Blockstore.Get(k) if err != nil { misses = append(misses, k) continue } log.Debug("Blockservice: Got data in datastore.") select { case out <- hit: case <-ctx.Done(): return } } rblocks, err := s.Exchange.GetBlocks(ctx, misses) if err != nil { log.Debugf("Error with GetBlocks: %s", err) return } for b := range rblocks { select { case out <- b: case <-ctx.Done(): return } } }() return out }
// Subscribe returns a channel of blocks for the given |keys|. |blockChannel| // is closed if the |ctx| times out or is cancelled, or after sending len(keys) // blocks. func (ps *impl) Subscribe(ctx context.Context, keys ...key.Key) <-chan *blocks.Block { blocksCh := make(chan *blocks.Block, len(keys)) valuesCh := make(chan interface{}, len(keys)) // provide our own channel to control buffer, prevent blocking if len(keys) == 0 { close(blocksCh) return blocksCh } ps.wrapped.AddSubOnceEach(valuesCh, toStrings(keys)...) go func() { defer close(blocksCh) defer ps.wrapped.Unsub(valuesCh) // with a len(keys) buffer, this is an optimization for { select { case <-ctx.Done(): return case val, ok := <-valuesCh: if !ok { return } block, ok := val.(*blocks.Block) if !ok { return } select { case <-ctx.Done(): return case blocksCh <- block: // continue } } } }() return blocksCh }
func (e *offlineExchange) GetBlocks(ctx context.Context, ks []key.Key) (<-chan *blocks.Block, error) { out := make(chan *blocks.Block, 0) go func() { defer close(out) var misses []key.Key for _, k := range ks { hit, err := e.bs.Get(k) if err != nil { misses = append(misses, k) // a long line of misses should abort when context is cancelled. select { // TODO case send misses down channel case <-ctx.Done(): return default: continue } } select { case out <- hit: case <-ctx.Done(): return } } }() return out, nil }
func (bs *Bitswap) provideCollector(ctx context.Context) { defer close(bs.provideKeys) var toProvide []key.Key var nextKey key.Key var keysOut chan key.Key for { select { case blk, ok := <-bs.newBlocks: if !ok { log.Debug("newBlocks channel closed") return } if keysOut == nil { nextKey = blk.Key() keysOut = bs.provideKeys } else { toProvide = append(toProvide, blk.Key()) } case keysOut <- nextKey: if len(toProvide) > 0 { nextKey = toProvide[0] toProvide = toProvide[1:] } else { keysOut = nil } case <-ctx.Done(): return } } }
// LogError logs the error to the owner of the context. // // If this context was created with ContextWithErrorLog, then this method // passes the error to context creator over an unbuffered channel. // // If this context was created by other means, this method is a no-op. func LogError(ctx context.Context, err error) { v := ctx.Value(errLogKey) errs, ok := v.(privateChanType) if !ok { return } errs <- err }
// gatedDialAttempt is an attempt to dial a node. It is gated by the swarm's // dial synchronization systems: dialsync and dialbackoff. func (s *Swarm) gatedDialAttempt(ctx context.Context, p peer.ID) (*Conn, error) { var logdial = lgbl.Dial("swarm", s.LocalPeer(), p, nil, nil) defer log.EventBegin(ctx, "swarmDialAttemptSync", logdial).Done() // check if we already have an open connection first conn := s.bestConnectionToPeer(p) if conn != nil { return conn, nil } // check if there's an ongoing dial to this peer if ok, wait := s.dsync.Lock(p); ok { // ok, we have been charged to dial! let's do it. // if it succeeds, dial will add the conn to the swarm itself. defer log.EventBegin(ctx, "swarmDialAttemptStart", logdial).Done() ctxT, cancel := context.WithTimeout(ctx, s.dialT) conn, err := s.dial(ctxT, p) cancel() s.dsync.Unlock(p) log.Debugf("dial end %s", conn) if err != nil { log.Event(ctx, "swarmDialBackoffAdd", logdial) s.backf.AddBackoff(p) // let others know to backoff // ok, we failed. try again. (if loop is done, our error is output) return nil, fmt.Errorf("dial attempt failed: %s", err) } log.Event(ctx, "swarmDialBackoffClear", logdial) s.backf.Clear(p) // okay, no longer need to backoff return conn, nil } else { // we did not dial. we must wait for someone else to dial. // check whether we should backoff first... if s.backf.Backoff(p) { log.Event(ctx, "swarmDialBackoff", logdial) return nil, ErrDialBackoff } defer log.EventBegin(ctx, "swarmDialWait", logdial).Done() select { case <-wait: // wait for that other dial to finish. // see if it worked, OR we got an incoming dial in the meantime... conn := s.bestConnectionToPeer(p) if conn != nil { return conn, nil } return nil, ErrDialFailed case <-ctx.Done(): // or we may have to bail... return nil, ctx.Err() } } }
func MetadataFromContext(ctx context.Context) (Metadata, error) { value := ctx.Value(metadataKey) if value != nil { metadata, ok := value.(Metadata) if ok { return metadata, nil } } return nil, errors.New("context contains no metadata") }
func (pm *ProviderManager) AddProvider(ctx context.Context, k key.Key, val peer.ID) { prov := &addProv{ k: k, val: val, } select { case pm.newprovs <- prov: case <-ctx.Done(): } }
// WithContextAndTeardown is a helper function to set teardown at initiation // of WithContext func WithContextAndTeardown(ctx context.Context, tf goprocess.TeardownFunc) goprocess.Process { if ctx == nil { panic("nil Context") } p := goprocess.WithTeardown(tf) go func() { <-ctx.Done() p.Close() }() return p }
func (mq *msgQueue) runQueue(ctx context.Context) { for { select { case <-mq.work: // there is work to be done mq.doWork(ctx) case <-mq.done: return case <-ctx.Done(): return } } }
// CloseAfterContext schedules the process to close after the given // context is done. It is the equivalent of: // // func CloseAfterContext(p goprocess.Process, ctx context.Context) { // go func() { // <-ctx.Done() // p.Close() // }() // } // func CloseAfterContext(p goprocess.Process, ctx context.Context) { if p == nil { panic("nil Process") } if ctx == nil { panic("nil Context") } go func() { <-ctx.Done() p.Close() }() }
// WithDeadlineFraction returns a Context with a fraction of the // original context's timeout. This is useful in sequential pipelines // of work, where one might try options and fall back to others // depending on the time available, or failure to respond. For example: // // // getPicture returns a picture from our encrypted database // // we have a pipeline of multiple steps. we need to: // // - get the data from a database // // - decrypt it // // - apply many transforms // // // // we **know** that each step takes increasingly more time. // // The transforms are much more expensive than decryption, and // // decryption is more expensive than the database lookup. // // If our database takes too long (i.e. >0.2 of available time), // // there's no use in continuing. // func getPicture(ctx context.Context, key string) ([]byte, error) { // // fractional timeout contexts to the rescue! // // // try the database with 0.2 of remaining time. // ctx1, _ := ctxext.WithDeadlineFraction(ctx, 0.2) // val, err := db.Get(ctx1, key) // if err != nil { // return nil, err // } // // // try decryption with 0.3 of remaining time. // ctx2, _ := ctxext.WithDeadlineFraction(ctx, 0.3) // if val, err = decryptor.Decrypt(ctx2, val); err != nil { // return nil, err // } // // // try transforms with all remaining time. hopefully it's enough! // return transformer.Transform(ctx, val) // } // // func WithDeadlineFraction(ctx context.Context, fraction float64) ( context.Context, context.CancelFunc) { d, found := ctx.Deadline() if !found { // no deadline return context.WithCancel(ctx) } left := d.Sub(time.Now()) if left < 0 { // already passed... return context.WithCancel(ctx) } left = time.Duration(float64(left) * fraction) return context.WithTimeout(ctx, left) }
func PublishQueryEvent(ctx context.Context, ev *QueryEvent) { ich := ctx.Value(RoutingQueryKey) if ich == nil { return } ch, ok := ich.(chan<- *QueryEvent) if !ok { return } select { case ch <- ev: case <-ctx.Done(): } }
func (pm *ProviderManager) GetProviders(ctx context.Context, k key.Key) []peer.ID { gp := &getProv{ k: k, resp: make(chan []peer.ID, 1), // buffered to prevent sender from blocking } select { case <-ctx.Done(): return nil case pm.getprovs <- gp: } select { case <-ctx.Done(): return nil case peers := <-gp.resp: return peers } }
func (c *client) FindProvidersAsync(ctx context.Context, k key.Key, max int) <-chan peer.PeerInfo { out := make(chan peer.PeerInfo) go func() { defer close(out) for i, p := range c.server.Providers(k) { if max <= i { return } select { case out <- p: case <-ctx.Done(): return } } }() return out }
// Bootstrap ensures the dht routing table remains healthy as peers come and go. // it builds up a list of peers by requesting random peer IDs. The Bootstrap // process will run a number of queries each time, and run every time signal fires. // These parameters are configurable. // // As opposed to BootstrapWithConfig, Bootstrap satisfies the routing interface func (dht *IpfsDHT) Bootstrap(ctx context.Context) error { proc, err := dht.BootstrapWithConfig(DefaultBootstrapConfig) if err != nil { return err } // wait till ctx or dht.Context exits. // we have to do it this way to satisfy the Routing interface (contexts) go func() { defer proc.Close() select { case <-ctx.Done(): case <-dht.Context().Done(): } }() return nil }
func (rp *Reprovider) ProvideEvery(ctx context.Context, tick time.Duration) { // dont reprovide immediately. // may have just started the daemon and shutting it down immediately. // probability( up another minute | uptime ) increases with uptime. after := time.After(time.Minute) for { select { case <-ctx.Done(): return case <-after: err := rp.Reprovide(ctx) if err != nil { log.Debug(err) } after = time.After(tick) } } }
func (e *Engine) taskWorker(ctx context.Context) { defer close(e.outbox) // because taskWorker uses the channel exclusively for { oneTimeUse := make(chan *Envelope, 1) // buffer to prevent blocking select { case <-ctx.Done(): return case e.outbox <- oneTimeUse: } // receiver is ready for an outoing envelope. let's prepare one. first, // we must acquire a task from the PQ... envelope, err := e.nextEnvelope(ctx) if err != nil { close(oneTimeUse) return // ctx cancelled } oneTimeUse <- envelope // buffered. won't block close(oneTimeUse) } }
func readMsgCtx(ctx context.Context, r msgio.Reader, p proto.Message) ([]byte, error) { var msg []byte // read in a goroutine so we can exit when our context is cancelled. done := make(chan error) go func() { var err error msg, err = r.ReadMsg() select { case done <- err: case <-ctx.Done(): } }() select { case <-ctx.Done(): return nil, ctx.Err() case e := <-done: if e != nil { return nil, e } } return msg, proto.Unmarshal(msg, p) }
func (bs *Bitswap) provideWorker(ctx context.Context, id int) { idmap := eventlog.LoggableMap{"ID": id} for { log.Event(ctx, "Bitswap.ProvideWorker.Loop", idmap) select { case k, ok := <-bs.provideKeys: log.Event(ctx, "Bitswap.ProvideWorker.Work", idmap, &k) if !ok { log.Debug("provideKeys channel closed") return } ctx, cancel := context.WithTimeout(ctx, provideTimeout) err := bs.network.Provide(ctx, k) if err != nil { log.Error(err) } cancel() case <-ctx.Done(): return } } }
// GetBlock attempts to retrieve a particular block from peers within the // deadline enforced by the context. func (bs *Bitswap) GetBlock(parent context.Context, k key.Key) (*blocks.Block, error) { // Any async work initiated by this function must end when this function // returns. To ensure this, derive a new context. Note that it is okay to // listen on parent in this scope, but NOT okay to pass |parent| to // functions called by this one. Otherwise those functions won't return // when this context's cancel func is executed. This is difficult to // enforce. May this comment keep you safe. ctx, cancelFunc := context.WithCancel(parent) ctx = eventlog.ContextWithLoggable(ctx, eventlog.Uuid("GetBlockRequest")) log.Event(ctx, "Bitswap.GetBlockRequest.Start", &k) defer log.Event(ctx, "Bitswap.GetBlockRequest.End", &k) defer func() { cancelFunc() }() promise, err := bs.GetBlocks(ctx, []key.Key{k}) if err != nil { return nil, err } select { case block, ok := <-promise: if !ok { select { case <-ctx.Done(): return nil, ctx.Err() default: return nil, errors.New("promise channel was closed") } } return block, nil case <-parent.Done(): return nil, parent.Err() } }
func (bs *Bitswap) taskWorker(ctx context.Context, id int) { idmap := eventlog.LoggableMap{"ID": id} defer log.Info("bitswap task worker shutting down...") for { log.Event(ctx, "Bitswap.TaskWorker.Loop", idmap) select { case nextEnvelope := <-bs.engine.Outbox(): select { case envelope, ok := <-nextEnvelope: if !ok { continue } log.Event(ctx, "Bitswap.TaskWorker.Work", eventlog.LoggableMap{"ID": id, "Target": envelope.Peer.Pretty(), "Block": envelope.Block.Multihash.B58String()}) bs.wm.SendBlock(ctx, envelope) case <-ctx.Done(): return } case <-ctx.Done(): return } } }
func (c *Client) FindProvidersAsync(ctx context.Context, k key.Key, max int) <-chan peer.PeerInfo { ctx = eventlog.ContextWithLoggable(ctx, eventlog.Uuid("findProviders")) defer log.EventBegin(ctx, "findProviders", &k).Done() ch := make(chan peer.PeerInfo) go func() { defer close(ch) request := pb.NewMessage(pb.Message_GET_PROVIDERS, string(k), 0) response, err := c.proxy.SendRequest(ctx, request) if err != nil { log.Debug(err) return } for _, p := range pb.PBPeersToPeerInfos(response.GetProviderPeers()) { select { case <-ctx.Done(): log.Debug(ctx.Err()) return case ch <- p: } } }() return ch }
// HasBlock announces the existance of a block to this bitswap service. The // service will potentially notify its peers. func (bs *Bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { select { case <-bs.process.Closing(): return errors.New("bitswap is closed") default: } err := bs.tryPutBlock(blk, 4) // attempt to store block up to four times if err != nil { log.Errorf("Error writing block to datastore: %s", err) return err } bs.notifications.Publish(blk) select { case bs.newBlocks <- blk: // send block off to be reprovided case <-ctx.Done(): return ctx.Err() } return nil }
func echoListen(ctx context.Context, listener Listener) { for { c, err := listener.Accept() if err != nil { select { case <-ctx.Done(): return default: } if ne, ok := err.(net.Error); ok && ne.Temporary() { <-time.After(time.Microsecond * 10) continue } log.Debugf("echoListen: listener appears to be closing") return } go echo(c.(Conn)) } }
// nextEnvelope runs in the taskWorker goroutine. Returns an error if the // context is cancelled before the next Envelope can be created. func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) { for { nextTask := e.peerRequestQueue.Pop() for nextTask == nil { select { case <-ctx.Done(): return nil, ctx.Err() case <-e.workSignal: nextTask = e.peerRequestQueue.Pop() } } // with a task in hand, we're ready to prepare the envelope... block, err := e.bs.Get(nextTask.Entry.Key) if err != nil { // If we don't have the block, don't hold that against the peer // make sure to update that the task has been 'completed' nextTask.Done() continue } return &Envelope{ Peer: nextTask.Target, Block: block, Sent: func() { nextTask.Done() select { case e.workSignal <- struct{}{}: // work completing may mean that our queue will provide new // work to be done. default: } }, }, nil } }