// NewDHT creates a new DHT object with the given peer as the 'local' host func NewDHT(ctx context.Context, h host.Host, dstore ds.Datastore) *IpfsDHT { dht := new(IpfsDHT) dht.datastore = dstore dht.self = h.ID() dht.peerstore = h.Peerstore() dht.host = h // register for network notifs. dht.host.Network().Notify((*netNotifiee)(dht)) dht.proc = goprocess.WithTeardown(func() error { // remove ourselves from network notifs. dht.host.Network().StopNotify((*netNotifiee)(dht)) return nil }) dht.ctx = ctx h.SetStreamHandler(ProtocolDHT, dht.handleNewStream) dht.providers = NewProviderManager(dht.ctx, dht.self) dht.proc.AddChild(dht.providers.proc) goprocessctx.CloseAfterContext(dht.proc, ctx) dht.routingTable = kb.NewRoutingTable(20, kb.ConvertPeerID(dht.self), time.Minute, dht.peerstore) dht.birth = time.Now() dht.Validator = make(record.Validator) dht.Validator["pk"] = record.PublicKeyValidator dht.Selector = make(record.Selector) dht.Selector["pk"] = record.PublicKeySelector return dht }
func (r *dhtQueryRunner) Run(ctx context.Context, peers []peer.ID) (*dhtQueryResult, error) { r.log = log r.runCtx = ctx if len(peers) == 0 { log.Warning("Running query with no peers!") return nil, nil } // setup concurrency rate limiting for i := 0; i < r.query.concurrency; i++ { r.rateLimit <- struct{}{} } // add all the peers we got first. for _, p := range peers { r.addPeerToQuery(p) } // go do this thing. // do it as a child proc to make sure Run exits // ONLY AFTER spawn workers has exited. r.proc.Go(r.spawnWorkers) // so workers are working. // wait until they're done. err := routing.ErrNotFound // now, if the context finishes, close the proc. // we have to do it here because the logic before is setup, which // should run without closing the proc. ctxproc.CloseAfterContext(r.proc, ctx) select { case <-r.peersRemaining.Done(): r.proc.Close() r.RLock() defer r.RUnlock() err = routing.ErrNotFound // if every query to every peer failed, something must be very wrong. if len(r.errs) > 0 && len(r.errs) == r.peersSeen.Size() { log.Debugf("query errs: %s", r.errs) err = r.errs[0] } case <-r.proc.Closed(): r.RLock() defer r.RUnlock() err = context.DeadlineExceeded } if r.result != nil && r.result.success { return r.result, nil } return nil, err }
// New initializes a BitSwap instance that communicates over the provided // BitSwapNetwork. This function registers the returned instance as the network // delegate. // Runs until context is cancelled. func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, bstore blockstore.Blockstore, nice bool) exchange.Interface { // important to use provided parent context (since it may include important // loggable data). It's probably not a good idea to allow bitswap to be // coupled to the concerns of the IPFS daemon in this way. // // FIXME(btc) Now that bitswap manages itself using a process, it probably // shouldn't accept a context anymore. Clients should probably use Close() // exclusively. We should probably find another way to share logging data ctx, cancelFunc := context.WithCancel(parent) notif := notifications.New() px := process.WithTeardown(func() error { notif.Shutdown() return nil }) bs := &Bitswap{ self: p, blockstore: bstore, notifications: notif, engine: decision.NewEngine(ctx, bstore), // TODO close the engine with Close() method network: network, findKeys: make(chan *wantlist.Entry, sizeBatchRequestChan), process: px, newBlocks: make(chan blocks.Block, HasBlockBufferSize), provideKeys: make(chan key.Key, provideKeysBufferSize), wm: NewWantManager(ctx, network), } go bs.wm.Run() network.SetDelegate(bs) // Start up bitswaps async worker routines bs.startWorkers(px, ctx) // bind the context and process. // do it over here to avoid closing before all setup is done. go func() { <-px.Closing() // process closes first cancelFunc() }() procctx.CloseAfterContext(px, ctx) // parent cancelled first return bs }