func (dht *IpfsDHT) findProvidersAsyncRoutine(ctx context.Context, key key.Key, count int, peerOut chan peer.PeerInfo) { defer log.EventBegin(ctx, "findProvidersAsync", &key).Done() defer close(peerOut) ps := pset.NewLimited(count) provs := dht.providers.GetProviders(ctx, key) for _, p := range provs { // NOTE: assuming that this list of peers is unique if ps.TryAdd(p) { select { case peerOut <- dht.peerstore.PeerInfo(p): case <-ctx.Done(): return } } // If we have enough peers locally, dont bother with remote RPC if ps.Size() >= count { return } } // setup the Query parent := ctx query := dht.newQuery(key, func(ctx context.Context, p peer.ID) (*dhtQueryResult, error) { notif.PublishQueryEvent(parent, ¬if.QueryEvent{ Type: notif.SendingQuery, ID: p, }) pmes, err := dht.findProvidersSingle(ctx, p, key) if err != nil { return nil, err } log.Debugf("%d provider entries", len(pmes.GetProviderPeers())) provs := pb.PBPeersToPeerInfos(pmes.GetProviderPeers()) log.Debugf("%d provider entries decoded", len(provs)) // Add unique providers from request, up to 'count' for _, prov := range provs { log.Debugf("got provider: %s", prov) if ps.TryAdd(prov.ID) { log.Debugf("using provider: %s", prov) select { case peerOut <- prov: case <-ctx.Done(): log.Debug("Context timed out sending more providers") return nil, ctx.Err() } } if ps.Size() >= count { log.Debugf("got enough providers (%d/%d)", ps.Size(), count) return &dhtQueryResult{success: true}, nil } } // Give closer peers back to the query to be queried closer := pmes.GetCloserPeers() clpeers := pb.PBPeersToPeerInfos(closer) log.Debugf("got closer peers: %d %s", len(clpeers), clpeers) notif.PublishQueryEvent(parent, ¬if.QueryEvent{ Type: notif.PeerResponse, ID: p, Responses: pointerizePeerInfos(clpeers), }) return &dhtQueryResult{closerPeers: clpeers}, nil }) peers := dht.routingTable.NearestPeers(kb.ConvertKey(key), AlphaValue) _, err := query.Run(ctx, peers) if err != nil { log.Debugf("Query error: %s", err) notif.PublishQueryEvent(ctx, ¬if.QueryEvent{ Type: notif.QueryError, Extra: err.Error(), }) } }
// Kademlia 'node lookup' operation. Returns a channel of the K closest peers // to the given key func (dht *IpfsDHT) GetClosestPeers(ctx context.Context, key key.Key) (<-chan peer.ID, error) { e := log.EventBegin(ctx, "getClosestPeers", &key) tablepeers := dht.routingTable.NearestPeers(kb.ConvertKey(key), KValue) if len(tablepeers) == 0 { return nil, kb.ErrLookupFailure } out := make(chan peer.ID, KValue) peerset := pset.NewLimited(KValue) for _, p := range tablepeers { select { case out <- p: case <-ctx.Done(): return nil, ctx.Err() } peerset.Add(p) } // since the query doesnt actually pass our context down // we have to hack this here. whyrusleeping isnt a huge fan of goprocess parent := ctx query := dht.newQuery(key, func(ctx context.Context, p peer.ID) (*dhtQueryResult, error) { // For DHT query command notif.PublishQueryEvent(parent, ¬if.QueryEvent{ Type: notif.SendingQuery, ID: p, }) closer, err := dht.closerPeersSingle(ctx, key, p) if err != nil { log.Debugf("error getting closer peers: %s", err) return nil, err } var filtered []peer.PeerInfo for _, clp := range closer { if kb.Closer(clp, dht.self, key) && peerset.TryAdd(clp) { select { case out <- clp: case <-ctx.Done(): return nil, ctx.Err() } filtered = append(filtered, dht.peerstore.PeerInfo(clp)) } } // For DHT query command notif.PublishQueryEvent(parent, ¬if.QueryEvent{ Type: notif.PeerResponse, ID: p, Responses: pointerizePeerInfos(filtered), }) return &dhtQueryResult{closerPeers: filtered}, nil }) go func() { defer close(out) defer e.Done() // run it! _, err := query.Run(ctx, tablepeers) if err != nil { log.Debugf("closestPeers query run error: %s", err) } }() return out, nil }