func (dht *IpfsDHT) handleFindPeer(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { defer log.EventBegin(ctx, "handleFindPeer", p).Done() resp := pb.NewMessage(pmes.GetType(), "", pmes.GetClusterLevel()) var closest []peer.ID // if looking for self... special case where we send it on CloserPeers. if peer.ID(pmes.GetKey()) == dht.self { closest = []peer.ID{dht.self} } else { closest = dht.betterPeersToQuery(pmes, p, CloserPeerCount) } if closest == nil { log.Infof("%s handleFindPeer %s: could not find anything.", dht.self, p) return resp, nil } var withAddresses []peer.PeerInfo closestinfos := peer.PeerInfos(dht.peerstore, closest) for _, pi := range closestinfos { if len(pi.Addrs) > 0 { withAddresses = append(withAddresses, pi) log.Debugf("handleFindPeer: sending back '%s'", pi.ID) } } resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), withAddresses) return resp, nil }
func (dht *IpfsDHT) handleGetProviders(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { lm := make(lgbl.DeferredMap) lm["peer"] = func() interface{} { return p.Pretty() } defer log.EventBegin(ctx, "handleGetProviders", lm).Done() resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel()) key := key.Key(pmes.GetKey()) lm["key"] = func() interface{} { return key.B58String() } // debug logging niceness. reqDesc := fmt.Sprintf("%s handleGetProviders(%s, %s): ", dht.self, p, key) log.Debugf("%s begin", reqDesc) defer log.Debugf("%s end", reqDesc) // check if we have this value, to add ourselves as provider. has, err := dht.datastore.Has(key.DsKey()) if err != nil && err != ds.ErrNotFound { log.Debugf("unexpected datastore error: %v\n", err) has = false } // setup providers providers := dht.providers.GetProviders(ctx, key) if has { providers = append(providers, dht.self) log.Debugf("%s have the value. added self as provider", reqDesc) } if providers != nil && len(providers) > 0 { infos := peer.PeerInfos(dht.peerstore, providers) resp.ProviderPeers = pb.PeerInfosToPBPeers(dht.host.Network(), infos) log.Debugf("%s have %d providers: %s", reqDesc, len(providers), infos) } // Also send closer peers. closer := dht.betterPeersToQuery(pmes, p, CloserPeerCount) if closer != nil { infos := peer.PeerInfos(dht.peerstore, closer) resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), infos) log.Debugf("%s have %d closer peers: %s", reqDesc, len(closer), infos) } return resp, nil }
func (dht *IpfsDHT) handleGetValue(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { defer log.EventBegin(ctx, "handleGetValue", p).Done() log.Debugf("%s handleGetValue for key: %s", dht.self, pmes.GetKey()) // setup response resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel()) // first, is there even a key? k := key.Key(pmes.GetKey()) if k == "" { return nil, errors.New("handleGetValue but no key was provided") // TODO: send back an error response? could be bad, but the other node's hanging. } rec, err := dht.checkLocalDatastore(k) if err != nil { return nil, err } resp.Record = rec // Find closest peer on given cluster to desired key and reply with that info closer := dht.betterPeersToQuery(pmes, p, CloserPeerCount) if len(closer) > 0 { closerinfos := peer.PeerInfos(dht.peerstore, closer) for _, pi := range closerinfos { log.Debugf("handleGetValue returning closer peer: '%s'", pi.ID) if len(pi.Addrs) < 1 { log.Errorf(`no addresses on peer being sent! [local:%s] [sending:%s] [remote:%s]`, dht.self, pi.ID, p) } } resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), closerinfos) } return resp, nil }