// Bootstrap kicks off IpfsNode bootstrapping. This function will periodically // check the number of open connections and -- if there are too few -- initiate // connections to well-known bootstrap peers. It also kicks off subsystem // bootstrapping (i.e. routing). func Bootstrap(n *IpfsNode, cfg BootstrapConfig) (io.Closer, error) { // make a signal to wait for one bootstrap round to complete. doneWithRound := make(chan struct{}) // the periodic bootstrap function -- the connection supervisor periodic := func(worker goprocess.Process) { ctx := procctx.WithProcessClosing(context.Background(), worker) defer log.EventBegin(ctx, "periodicBootstrap", n.Identity).Done() if err := bootstrapRound(ctx, n.PeerHost, cfg); err != nil { log.Event(ctx, "bootstrapError", n.Identity, lgbl.Error(err)) log.Debugf("%s bootstrap error: %s", n.Identity, err) } <-doneWithRound } // kick off the node's periodic bootstrapping proc := periodicproc.Tick(cfg.Period, periodic) proc.Go(periodic) // run one right now. // kick off Routing.Bootstrap if n.Routing != nil { ctx := procctx.WithProcessClosing(context.Background(), proc) if err := n.Routing.Bootstrap(ctx); err != nil { proc.Close() return nil, err } } doneWithRound <- struct{}{} close(doneWithRound) // it no longer blocks periodic return proc, nil }
func newQueryRunner(q *dhtQuery) *dhtQueryRunner { proc := process.WithParent(process.Background()) ctx := ctxproc.WithProcessClosing(context.Background(), proc) return &dhtQueryRunner{ query: q, peersToQuery: queue.NewChanQueue(ctx, queue.NewXORDistancePQ(q.key)), peersRemaining: todoctr.NewSyncCounter(), peersSeen: pset.New(), rateLimit: make(chan struct{}, q.concurrency), proc: proc, } }
func (r *dhtQueryRunner) queryPeer(proc process.Process, p peer.ID) { // make sure we rate limit concurrency. select { case <-r.rateLimit: case <-proc.Closing(): r.peersRemaining.Decrement(1) return } // ok let's do this! // create a context from our proc. ctx := ctxproc.WithProcessClosing(context.Background(), proc) // make sure we do this when we exit defer func() { // signal we're done proccessing peer p r.peersRemaining.Decrement(1) r.rateLimit <- struct{}{} }() // make sure we're connected to the peer. // FIXME abstract away into the network layer if conns := r.query.dht.host.Network().ConnsToPeer(p); len(conns) == 0 { log.Infof("not connected. dialing.") // while we dial, we do not take up a rate limit. this is to allow // forward progress during potentially very high latency dials. r.rateLimit <- struct{}{} pi := peer.PeerInfo{ID: p} if err := r.query.dht.host.Connect(ctx, pi); err != nil { log.Debugf("Error connecting: %s", err) notif.PublishQueryEvent(ctx, ¬if.QueryEvent{ Type: notif.QueryError, Extra: err.Error(), }) r.Lock() r.errs = append(r.errs, err) r.Unlock() <-r.rateLimit // need to grab it again, as we deferred. return } <-r.rateLimit // need to grab it again, as we deferred. log.Debugf("connected. dial success.") } // finally, run the query against this peer res, err := r.query.qfunc(ctx, p) if err != nil { log.Debugf("ERROR worker for: %v %v", p, err) r.Lock() r.errs = append(r.errs, err) r.Unlock() } else if res.success { log.Debugf("SUCCESS worker for: %v %s", p, res) r.Lock() r.result = res r.Unlock() go r.proc.Close() // signal to everyone that we're done. // must be async, as we're one of the children, and Close blocks. } else if len(res.closerPeers) > 0 { log.Debugf("PEERS CLOSER -- worker for: %v (%d closer peers)", p, len(res.closerPeers)) for _, next := range res.closerPeers { if next.ID == r.query.dht.self { // dont add self. log.Debugf("PEERS CLOSER -- worker for: %v found self", p) continue } // add their addresses to the dialer's peerstore r.query.dht.peerstore.AddAddrs(next.ID, next.Addrs, peer.TempAddrTTL) r.addPeerToQuery(next.ID) log.Debugf("PEERS CLOSER -- worker for: %v added %v (%v)", p, next.ID, next.Addrs) } } else { log.Debugf("QUERY worker for: %v - not found, and no closer peers.", p) } }