func (dht *IpfsDHT) handleAddProvider(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { lm := make(lgbl.DeferredMap) lm["peer"] = func() interface{} { return p.Pretty() } defer log.EventBegin(ctx, "handleAddProvider", lm).Done() key := key.Key(pmes.GetKey()) lm["key"] = func() interface{} { return key.B58String() } log.Debugf("%s adding %s as a provider for '%s'\n", dht.self, p, key) // add provider should use the address given in the message pinfos := pb.PBPeersToPeerInfos(pmes.GetProviderPeers()) for _, pi := range pinfos { if pi.ID != p { // we should ignore this provider reccord! not from originator. // (we chould sign them and check signature later...) log.Debugf("handleAddProvider received provider %s from %s. Ignore.", pi.ID, p) continue } if len(pi.Addrs) < 1 { log.Debugf("%s got no valid addresses for provider %s. Ignore.", dht.self, p) continue } log.Infof("received provider %s for %s (addrs: %s)", p, key, pi.Addrs) if pi.ID != dht.self { // dont add own addrs. // add the received addresses to our peerstore. dht.peerstore.AddAddrs(pi.ID, pi.Addrs, pstore.ProviderAddrTTL) } dht.providers.AddProvider(ctx, key, p) } return nil, nil }
// putValueToPeer stores the given key/value pair at the peer 'p' func (dht *IpfsDHT) putValueToPeer(ctx context.Context, p peer.ID, key key.Key, rec *pb.Record) error { pmes := pb.NewMessage(pb.Message_PUT_VALUE, string(key), 0) pmes.Record = rec rpmes, err := dht.sendRequest(ctx, p, pmes) switch err { case ErrReadTimeout: log.Warningf("read timeout: %s %s", p.Pretty(), key) fallthrough default: return err case nil: break } if err != nil { return err } if !bytes.Equal(rpmes.GetRecord().Value, pmes.GetRecord().Value) { return errors.New("value not put correctly") } return nil }
func (dht *IpfsDHT) findProvidersSingle(ctx context.Context, p peer.ID, key key.Key) (*pb.Message, error) { defer log.EventBegin(ctx, "findProvidersSingle", p, &key).Done() pmes := pb.NewMessage(pb.Message_GET_PROVIDERS, string(key), 0) resp, err := dht.sendRequest(ctx, p, pmes) switch err { case nil: return resp, nil case ErrReadTimeout: log.Warningf("read timeout: %s %s", p.Pretty(), key) fallthrough default: return nil, err } }
// findPeerSingle asks peer 'p' if they know where the peer with id 'id' is func (dht *IpfsDHT) findPeerSingle(ctx context.Context, p peer.ID, id peer.ID) (*pb.Message, error) { defer log.EventBegin(ctx, "findPeerSingle", p, id).Done() pmes := pb.NewMessage(pb.Message_FIND_NODE, string(id), 0) resp, err := dht.sendRequest(ctx, p, pmes) switch err { case nil: return resp, nil case ErrReadTimeout: log.Warningf("read timeout: %s %s", p.Pretty(), id) fallthrough default: return nil, err } }
// Dial metadata is metadata for dial events func Dial(sys string, lid, rid peer.ID, laddr, raddr ma.Multiaddr) DeferredMap { m := DeferredMap{} m["subsystem"] = sys if lid != "" { m["localPeer"] = func() interface{} { return lid.Pretty() } } if laddr != nil { m["localAddr"] = func() interface{} { return laddr.String() } } if rid != "" { m["remotePeer"] = func() interface{} { return rid.Pretty() } } if raddr != nil { m["remoteAddr"] = func() interface{} { return raddr.String() } } return m }
func (dht *IpfsDHT) handleGetProviders(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { lm := make(lgbl.DeferredMap) lm["peer"] = func() interface{} { return p.Pretty() } defer log.EventBegin(ctx, "handleGetProviders", lm).Done() resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel()) key := key.Key(pmes.GetKey()) lm["key"] = func() interface{} { return key.B58String() } // debug logging niceness. reqDesc := fmt.Sprintf("%s handleGetProviders(%s, %s): ", dht.self, p, key) log.Debugf("%s begin", reqDesc) defer log.Debugf("%s end", reqDesc) // check if we have this value, to add ourselves as provider. has, err := dht.datastore.Has(key.DsKey()) if err != nil && err != ds.ErrNotFound { log.Debugf("unexpected datastore error: %v\n", err) has = false } // setup providers providers := dht.providers.GetProviders(ctx, key) if has { providers = append(providers, dht.self) log.Debugf("%s have the value. added self as provider", reqDesc) } if providers != nil && len(providers) > 0 { infos := pstore.PeerInfos(dht.peerstore, providers) resp.ProviderPeers = pb.PeerInfosToPBPeers(dht.host.Network(), infos) log.Debugf("%s have %d providers: %s", reqDesc, len(providers), infos) } // Also send closer peers. closer := dht.betterPeersToQuery(pmes, p, CloserPeerCount) if closer != nil { infos := pstore.PeerInfos(dht.peerstore, closer) resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), infos) log.Debugf("%s have %d closer peers: %s", reqDesc, len(closer), infos) } return resp, nil }
func pingPeer(ctx context.Context, n *core.IpfsNode, pid peer.ID, numPings int) <-chan interface{} { outChan := make(chan interface{}) go func() { defer close(outChan) if len(n.Peerstore.Addrs(pid)) == 0 { // Make sure we can find the node in question outChan <- &PingResult{ Text: fmt.Sprintf("Looking up peer %s", pid.Pretty()), Success: true, } ctx, cancel := context.WithTimeout(ctx, kPingTimeout) defer cancel() p, err := n.Routing.FindPeer(ctx, pid) if err != nil { outChan <- &PingResult{Text: fmt.Sprintf("Peer lookup error: %s", err)} return } n.Peerstore.AddAddrs(p.ID, p.Addrs, pstore.TempAddrTTL) } outChan <- &PingResult{ Text: fmt.Sprintf("PING %s.", pid.Pretty()), Success: true, } ctx, cancel := context.WithTimeout(ctx, kPingTimeout*time.Duration(numPings)) defer cancel() pings, err := n.Ping.Ping(ctx, pid) if err != nil { log.Debugf("Ping error: %s", err) outChan <- &PingResult{ Success: false, Text: fmt.Sprintf("Ping error: %s", err), } return } var done bool var total time.Duration for i := 0; i < numPings && !done; i++ { select { case <-ctx.Done(): done = true break case t, ok := <-pings: if !ok { done = true break } outChan <- &PingResult{ Success: true, Time: t, } total += t time.Sleep(time.Second) } } averagems := total.Seconds() * 1000 / float64(numPings) outChan <- &PingResult{ Success: true, Text: fmt.Sprintf("Average latency: %.2fms", averagems), } }() return outChan }