// GetBlocks returns a channel where the caller may receive blocks that // correspond to the provided |keys|. Returns an error if BitSwap is unable to // begin this request within the deadline enforced by the context. // // NB: Your request remains open until the context expires. To conserve // resources, provide a context with a reasonably short deadline (ie. not one // that lasts throughout the lifetime of the server) func (bs *Bitswap) GetBlocks(ctx context.Context, keys []key.Key) (<-chan *blocks.Block, error) { select { case <-bs.process.Closing(): return nil, errors.New("bitswap is closed") default: } promise := bs.notifications.Subscribe(ctx, keys...) for _, k := range keys { log.Event(ctx, "Bitswap.GetBlockRequest.Start", &k) } bs.wm.WantBlocks(keys) req := &blockRequest{ keys: keys, ctx: ctx, } select { case bs.findKeys <- req: return promise, nil case <-ctx.Done(): return nil, ctx.Err() } }
func readMsgCtx(ctx context.Context, r msgio.Reader, p proto.Message) ([]byte, error) { var msg []byte // read in a goroutine so we can exit when our context is cancelled. done := make(chan error) go func() { var err error msg, err = r.ReadMsg() select { case done <- err: case <-ctx.Done(): } }() select { case <-ctx.Done(): return nil, ctx.Err() case e := <-done: if e != nil { return nil, e } } return msg, proto.Unmarshal(msg, p) }
// gatedDialAttempt is an attempt to dial a node. It is gated by the swarm's // dial synchronization systems: dialsync and dialbackoff. func (s *Swarm) gatedDialAttempt(ctx context.Context, p peer.ID) (*Conn, error) { var logdial = lgbl.Dial("swarm", s.LocalPeer(), p, nil, nil) defer log.EventBegin(ctx, "swarmDialAttemptSync", logdial).Done() // check if we already have an open connection first conn := s.bestConnectionToPeer(p) if conn != nil { return conn, nil } // check if there's an ongoing dial to this peer if ok, wait := s.dsync.Lock(p); ok { // ok, we have been charged to dial! let's do it. // if it succeeds, dial will add the conn to the swarm itself. defer log.EventBegin(ctx, "swarmDialAttemptStart", logdial).Done() ctxT, cancel := context.WithTimeout(ctx, s.dialT) conn, err := s.dial(ctxT, p) cancel() s.dsync.Unlock(p) log.Debugf("dial end %s", conn) if err != nil { log.Event(ctx, "swarmDialBackoffAdd", logdial) s.backf.AddBackoff(p) // let others know to backoff // ok, we failed. try again. (if loop is done, our error is output) return nil, fmt.Errorf("dial attempt failed: %s", err) } log.Event(ctx, "swarmDialBackoffClear", logdial) s.backf.Clear(p) // okay, no longer need to backoff return conn, nil } else { // we did not dial. we must wait for someone else to dial. // check whether we should backoff first... if s.backf.Backoff(p) { log.Event(ctx, "swarmDialBackoff", logdial) return nil, ErrDialBackoff } defer log.EventBegin(ctx, "swarmDialWait", logdial).Done() select { case <-wait: // wait for that other dial to finish. // see if it worked, OR we got an incoming dial in the meantime... conn := s.bestConnectionToPeer(p) if conn != nil { return conn, nil } return nil, ErrDialFailed case <-ctx.Done(): // or we may have to bail... return nil, ctx.Err() } } }
// Fsync flushes the content in the file to disk, but does not // update the dag tree internally func (fi *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error { errs := make(chan error, 1) go func() { errs <- fi.fi.Sync() }() select { case err := <-errs: return err case <-ctx.Done(): return ctx.Err() } }
// Run runs the query at hand. pass in a list of peers to use first. func (q *dhtQuery) Run(ctx context.Context, peers []peer.ID) (*dhtQueryResult, error) { select { case <-ctx.Done(): return nil, ctx.Err() default: } ctx, cancel := context.WithCancel(ctx) defer cancel() runner := newQueryRunner(q) return runner.Run(ctx, peers) }
func (np *nodePromise) Get(ctx context.Context) (*Node, error) { if np.cache != nil { return np.cache, nil } select { case blk := <-np.recv: np.cache = blk case <-np.ctx.Done(): return nil, np.ctx.Err() case <-ctx.Done(): return nil, ctx.Err() } return np.cache, nil }
func ContextDo(ctx context.Context, f func() error) error { ch := make(chan error) go func() { select { case <-ctx.Done(): case ch <- f(): } }() select { case <-ctx.Done(): return ctx.Err() case val := <-ch: return val } return nil }
func (fi *File) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { _, err := fi.fi.Seek(req.Offset, os.SEEK_SET) if err != nil { return err } fisize, err := fi.fi.Size() if err != nil { return err } select { case <-ctx.Done(): return ctx.Err() default: } readsize := min(req.Size, int(fisize-req.Offset)) n, err := fi.fi.CtxReadFull(ctx, resp.Data[:readsize]) resp.Data = resp.Data[:n] return err }
// GetBlock attempts to retrieve a particular block from peers within the // deadline enforced by the context. func (bs *Bitswap) GetBlock(parent context.Context, k key.Key) (*blocks.Block, error) { // Any async work initiated by this function must end when this function // returns. To ensure this, derive a new context. Note that it is okay to // listen on parent in this scope, but NOT okay to pass |parent| to // functions called by this one. Otherwise those functions won't return // when this context's cancel func is executed. This is difficult to // enforce. May this comment keep you safe. ctx, cancelFunc := context.WithCancel(parent) ctx = logging.ContextWithLoggable(ctx, logging.Uuid("GetBlockRequest")) log.Event(ctx, "Bitswap.GetBlockRequest.Start", &k) defer log.Event(ctx, "Bitswap.GetBlockRequest.End", &k) defer func() { cancelFunc() }() promise, err := bs.GetBlocks(ctx, []key.Key{k}) if err != nil { return nil, err } select { case block, ok := <-promise: if !ok { select { case <-ctx.Done(): return nil, ctx.Err() default: return nil, errors.New("promise channel was closed") } } return block, nil case <-parent.Done(): return nil, parent.Err() } }
func (c *Client) FindProvidersAsync(ctx context.Context, k key.Key, max int) <-chan peer.PeerInfo { ctx = logging.ContextWithLoggable(ctx, logging.Uuid("findProviders")) defer log.EventBegin(ctx, "findProviders", &k).Done() ch := make(chan peer.PeerInfo) go func() { defer close(ch) request := pb.NewMessage(pb.Message_GET_PROVIDERS, string(k), 0) response, err := c.proxy.SendRequest(ctx, request) if err != nil { log.Debug(err) return } for _, p := range pb.PBPeersToPeerInfos(response.GetProviderPeers()) { select { case <-ctx.Done(): log.Debug(ctx.Err()) return case ch <- p: } } }() return ch }
// writeMsgCtx is used by the func writeMsgCtx(ctx context.Context, w msgio.Writer, msg proto.Message) ([]byte, error) { enc, err := proto.Marshal(msg) if err != nil { return nil, err } // write in a goroutine so we can exit when our context is cancelled. done := make(chan error) go func(m []byte) { err := w.WriteMsg(m) select { case done <- err: case <-ctx.Done(): } }(enc) select { case <-ctx.Done(): return nil, ctx.Err() case e := <-done: return enc, e } }
// nextEnvelope runs in the taskWorker goroutine. Returns an error if the // context is cancelled before the next Envelope can be created. func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) { for { nextTask := e.peerRequestQueue.Pop() for nextTask == nil { select { case <-ctx.Done(): return nil, ctx.Err() case <-e.workSignal: nextTask = e.peerRequestQueue.Pop() } } // with a task in hand, we're ready to prepare the envelope... block, err := e.bs.Get(nextTask.Entry.Key) if err != nil { // If we don't have the block, don't hold that against the peer // make sure to update that the task has been 'completed' nextTask.Done() continue } return &Envelope{ Peer: nextTask.Target, Block: block, Sent: func() { nextTask.Done() select { case e.workSignal <- struct{}{}: // work completing may mean that our queue will provide new // work to be done. default: } }, }, nil } }
// dialPeer opens a connection to peer, and makes sure to identify // the connection once it has been opened. func (h *BasicHost) dialPeer(ctx context.Context, p peer.ID) error { log.Debugf("host %s dialing %s", h.ID, p) c, err := h.Network().DialPeer(ctx, p) if err != nil { return err } // identify the connection before returning. done := make(chan struct{}) go func() { h.ids.IdentifyConn(c) close(done) }() // respect don contexteone select { case <-done: case <-ctx.Done(): return ctx.Err() } log.Debugf("host %s finished dialing %s", h.ID, p) return nil }
// Dial connects to a peer over a particular address // Ensures raddr is part of peer.Addresses() // Example: d.DialAddr(ctx, peer.Addresses()[0], peer) func (d *Dialer) Dial(ctx context.Context, raddr ma.Multiaddr, remote peer.ID) (Conn, error) { logdial := lgbl.Dial("conn", d.LocalPeer, remote, nil, raddr) logdial["encrypted"] = (d.PrivateKey != nil) // log wether this will be an encrypted dial or not. defer log.EventBegin(ctx, "connDial", logdial).Done() var connOut Conn var errOut error done := make(chan struct{}) // do it async to ensure we respect don contexteone go func() { defer func() { select { case done <- struct{}{}: case <-ctx.Done(): } }() maconn, err := d.rawConnDial(ctx, raddr, remote) if err != nil { errOut = err return } if d.Wrapper != nil { maconn = d.Wrapper(maconn) } c, err := newSingleConn(ctx, d.LocalPeer, remote, maconn) if err != nil { maconn.Close() errOut = err return } if d.PrivateKey == nil || EncryptConnections == false { log.Warning("dialer %s dialing INSECURELY %s at %s!", d, remote, raddr) connOut = c return } c2, err := newSecureConn(ctx, d.PrivateKey, c) if err != nil { errOut = err c.Close() return } connOut = c2 }() select { case <-ctx.Done(): logdial["error"] = ctx.Err() logdial["dial"] = "failure" return nil, ctx.Err() case <-done: // whew, finished. } if errOut != nil { logdial["error"] = errOut logdial["dial"] = "failure" return nil, errOut } logdial["dial"] = "success" return connOut, nil }
// FindPeersConnectedToPeer searches for peers directly connected to a given peer. func (dht *IpfsDHT) FindPeersConnectedToPeer(ctx context.Context, id peer.ID) (<-chan peer.PeerInfo, error) { peerchan := make(chan peer.PeerInfo, asyncQueryBuffer) peersSeen := peer.Set{} peers := dht.routingTable.NearestPeers(kb.ConvertPeerID(id), AlphaValue) if len(peers) == 0 { return nil, kb.ErrLookupFailure } // setup the Query query := dht.newQuery(key.Key(id), func(ctx context.Context, p peer.ID) (*dhtQueryResult, error) { pmes, err := dht.findPeerSingle(ctx, p, id) if err != nil { return nil, err } var clpeers []peer.PeerInfo closer := pmes.GetCloserPeers() for _, pbp := range closer { pi := pb.PBPeerToPeerInfo(pbp) // skip peers already seen if _, found := peersSeen[pi.ID]; found { continue } peersSeen[pi.ID] = struct{}{} // if peer is connected, send it to our client. if pb.Connectedness(*pbp.Connection) == inet.Connected { select { case <-ctx.Done(): return nil, ctx.Err() case peerchan <- pi: } } // if peer is the peer we're looking for, don't bother querying it. // TODO maybe query it? if pb.Connectedness(*pbp.Connection) != inet.Connected { clpeers = append(clpeers, pi) } } return &dhtQueryResult{closerPeers: clpeers}, nil }) // run it! run it asynchronously to gen peers as results are found. // this does no error checking go func() { if _, err := query.Run(ctx, peers); err != nil { log.Debug(err) } // close the peerchan channel when done. close(peerchan) }() return peerchan, nil }
func (dht *IpfsDHT) findProvidersAsyncRoutine(ctx context.Context, key key.Key, count int, peerOut chan peer.PeerInfo) { defer log.EventBegin(ctx, "findProvidersAsync", &key).Done() defer close(peerOut) ps := pset.NewLimited(count) provs := dht.providers.GetProviders(ctx, key) for _, p := range provs { // NOTE: assuming that this list of peers is unique if ps.TryAdd(p) { select { case peerOut <- dht.peerstore.PeerInfo(p): case <-ctx.Done(): return } } // If we have enough peers locally, dont bother with remote RPC if ps.Size() >= count { return } } // setup the Query parent := ctx query := dht.newQuery(key, func(ctx context.Context, p peer.ID) (*dhtQueryResult, error) { notif.PublishQueryEvent(parent, ¬if.QueryEvent{ Type: notif.SendingQuery, ID: p, }) pmes, err := dht.findProvidersSingle(ctx, p, key) if err != nil { return nil, err } log.Debugf("%d provider entries", len(pmes.GetProviderPeers())) provs := pb.PBPeersToPeerInfos(pmes.GetProviderPeers()) log.Debugf("%d provider entries decoded", len(provs)) // Add unique providers from request, up to 'count' for _, prov := range provs { log.Debugf("got provider: %s", prov) if ps.TryAdd(prov.ID) { log.Debugf("using provider: %s", prov) select { case peerOut <- prov: case <-ctx.Done(): log.Debug("Context timed out sending more providers") return nil, ctx.Err() } } if ps.Size() >= count { log.Debugf("got enough providers (%d/%d)", ps.Size(), count) return &dhtQueryResult{success: true}, nil } } // Give closer peers back to the query to be queried closer := pmes.GetCloserPeers() clpeers := pb.PBPeersToPeerInfos(closer) log.Debugf("got closer peers: %d %s", len(clpeers), clpeers) notif.PublishQueryEvent(parent, ¬if.QueryEvent{ Type: notif.PeerResponse, ID: p, Responses: pointerizePeerInfos(clpeers), }) return &dhtQueryResult{closerPeers: clpeers}, nil }) peers := dht.routingTable.NearestPeers(kb.ConvertKey(key), AlphaValue) _, err := query.Run(ctx, peers) if err != nil { log.Debugf("Query error: %s", err) notif.PublishQueryEvent(ctx, ¬if.QueryEvent{ Type: notif.QueryError, Extra: err.Error(), }) } }
// Kademlia 'node lookup' operation. Returns a channel of the K closest peers // to the given key func (dht *IpfsDHT) GetClosestPeers(ctx context.Context, key key.Key) (<-chan peer.ID, error) { e := log.EventBegin(ctx, "getClosestPeers", &key) tablepeers := dht.routingTable.NearestPeers(kb.ConvertKey(key), AlphaValue) if len(tablepeers) == 0 { return nil, kb.ErrLookupFailure } out := make(chan peer.ID, KValue) peerset := pset.NewLimited(KValue) for _, p := range tablepeers { select { case out <- p: case <-ctx.Done(): return nil, ctx.Err() } peerset.Add(p) } // since the query doesnt actually pass our context down // we have to hack this here. whyrusleeping isnt a huge fan of goprocess parent := ctx query := dht.newQuery(key, func(ctx context.Context, p peer.ID) (*dhtQueryResult, error) { // For DHT query command notif.PublishQueryEvent(parent, ¬if.QueryEvent{ Type: notif.SendingQuery, ID: p, }) closer, err := dht.closerPeersSingle(ctx, key, p) if err != nil { log.Debugf("error getting closer peers: %s", err) return nil, err } var filtered []peer.PeerInfo for _, clp := range closer { if kb.Closer(clp, dht.self, key) && peerset.TryAdd(clp) { select { case out <- clp: case <-ctx.Done(): return nil, ctx.Err() } filtered = append(filtered, dht.peerstore.PeerInfo(clp)) } } // For DHT query command notif.PublishQueryEvent(parent, ¬if.QueryEvent{ Type: notif.PeerResponse, ID: p, Responses: pointerizePeerInfos(filtered), }) return &dhtQueryResult{closerPeers: filtered}, nil }) go func() { defer close(out) defer e.Done() // run it! _, err := query.Run(ctx, tablepeers) if err != nil { log.Debugf("closestPeers query run error: %s", err) } }() return out, nil }