func pingPeer(ctx context.Context, n *core.IpfsNode, pid peer.ID, numPings int) <-chan interface{} { outChan := make(chan interface{}) go func() { defer close(outChan) if len(n.Peerstore.Addrs(pid)) == 0 { // Make sure we can find the node in question outChan <- &PingResult{ Text: fmt.Sprintf("Looking up peer %s", pid.Pretty()), } ctx, cancel := context.WithTimeout(ctx, kPingTimeout) defer cancel() p, err := n.Routing.FindPeer(ctx, pid) if err != nil { outChan <- &PingResult{Text: fmt.Sprintf("Peer lookup error: %s", err)} return } n.Peerstore.AddAddrs(p.ID, p.Addrs, peer.TempAddrTTL) } outChan <- &PingResult{Text: fmt.Sprintf("PING %s.", pid.Pretty())} ctx, cancel := context.WithTimeout(ctx, kPingTimeout*time.Duration(numPings)) defer cancel() pings, err := n.Ping.Ping(ctx, pid) if err != nil { log.Debugf("Ping error: %s", err) outChan <- &PingResult{Text: fmt.Sprintf("Ping error: %s", err)} return } var done bool var total time.Duration for i := 0; i < numPings && !done; i++ { select { case <-ctx.Done(): done = true break case t, ok := <-pings: if !ok { done = true break } outChan <- &PingResult{ Success: true, Time: t, } total += t time.Sleep(time.Second) } } averagems := total.Seconds() * 1000 / float64(numPings) outChan <- &PingResult{ Text: fmt.Sprintf("Average latency: %.2fms", averagems), } }() return outChan }
func logRoutingErrDifferentPeers(ctx context.Context, wanted, got peer.ID, err error) { lm := make(lgbl.DeferredMap) lm["error"] = err lm["wantedPeer"] = func() interface{} { return wanted.Pretty() } lm["gotPeer"] = func() interface{} { return got.Pretty() } log.Event(ctx, "routingError", lm) }
func (service *OpenBazaarService) handleUnFollow(peer peer.ID, pmes *pb.Message) (*pb.Message, error) { log.Debugf("Received UNFOLLOW message from %s", peer.Pretty()) err := service.datastore.Followers().Delete(peer.Pretty()) if err != nil { return nil, err } service.broadcast <- []byte(`{"notification": {"unfollow":"` + peer.Pretty() + `"}}`) return nil, nil }
func (p *PointersDB) Delete(id peer.ID) error { p.lock.Lock() defer p.lock.Unlock() _, err := p.db.Exec("delete from pointers where pointerID=?", id.Pretty()) if err != nil { log.Error(err) return err } return nil }
// Unlock releases waiters to a dial attempt. see Lock. // if Unlock(p) is called without calling Lock(p) first, Unlock panics. func (ds *dialsync) Unlock(dst peer.ID) { ds.lock.Lock() wait, found := ds.ongoing[dst] if !found { panic("called dialDone with no ongoing dials to peer: " + dst.Pretty()) } delete(ds.ongoing, dst) // remove ongoing dial close(wait) // release everyone else ds.lock.Unlock() }
func (service *OpenBazaarService) handleOfflineAck(p peer.ID, pmes *pb.Message) (*pb.Message, error) { log.Debugf("Received OFFLINE_ACK message from %s", p.Pretty()) pid, err := peer.IDB58Decode(string(pmes.Payload.Value)) if err != nil { return nil, err } err = service.datastore.Pointers().Delete(pid) if err != nil { return nil, err } return nil, nil }
func (service *OpenBazaarService) SendRequest(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { log.Debugf("Sending %s request to %s", pmes.MessageType.String(), p.Pretty()) s, err := service.host.NewStream(ctx, ProtocolOpenBazaar, p) if err != nil { return nil, err } defer s.Close() cr := ctxio.NewReader(ctx, s) // ok to use. we defer close stream in this func cw := ctxio.NewWriter(ctx, s) // ok to use. we defer close stream in this func r := ggio.NewDelimitedReader(cr, inet.MessageSizeMax) w := ggio.NewDelimitedWriter(cw) if err := w.WriteMsg(pmes); err != nil { return nil, err } rpmes := new(pb.Message) if err := r.ReadMsg(rpmes); err != nil { log.Debugf("No response from %s", p.Pretty()) return nil, err } if rpmes == nil { log.Debugf("No response from %s", p.Pretty()) return nil, errors.New("no response from peer") } log.Debugf("Received response from %s", p.Pretty()) return rpmes, nil }
func (service *OpenBazaarService) SendMessage(ctx context.Context, p peer.ID, pmes *pb.Message) error { log.Debugf("Sending %s message to %s", pmes.MessageType.String(), p.Pretty()) s, err := service.host.NewStream(ctx, ProtocolOpenBazaar, p) if err != nil { return err } defer s.Close() cw := ctxio.NewWriter(ctx, s) // ok to use. we defer close stream in this func w := ggio.NewDelimitedWriter(cw) if err := w.WriteMsg(pmes); err != nil { return err } return nil }
// Dial metadata is metadata for dial events func Dial(sys string, lid, rid peer.ID, laddr, raddr ma.Multiaddr) DeferredMap { m := DeferredMap{} m["subsystem"] = sys if lid != "" { m["localPeer"] = func() interface{} { return lid.Pretty() } } if laddr != nil { m["localAddr"] = func() interface{} { return laddr.String() } } if rid != "" { m["remotePeer"] = func() interface{} { return rid.Pretty() } } if raddr != nil { m["remoteAddr"] = func() interface{} { return raddr.String() } } return m }
func (dht *IpfsDHT) handleGetProviders(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { lm := make(lgbl.DeferredMap) lm["peer"] = func() interface{} { return p.Pretty() } defer log.EventBegin(ctx, "handleGetProviders", lm).Done() resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel()) key := key.Key(pmes.GetKey()) lm["key"] = func() interface{} { return key.B58String() } // debug logging niceness. reqDesc := fmt.Sprintf("%s handleGetProviders(%s, %s): ", dht.self, p, key) log.Debugf("%s begin", reqDesc) defer log.Debugf("%s end", reqDesc) // check if we have this value, to add ourselves as provider. has, err := dht.datastore.Has(key.DsKey()) if err != nil && err != ds.ErrNotFound { log.Debugf("unexpected datastore error: %v\n", err) has = false } // setup providers providers := dht.providers.GetProviders(ctx, key) if has { providers = append(providers, dht.self) log.Debugf("%s have the value. added self as provider", reqDesc) } if providers != nil && len(providers) > 0 { infos := peer.PeerInfos(dht.peerstore, providers) resp.ProviderPeers = pb.PeerInfosToPBPeers(dht.host.Network(), infos) log.Debugf("%s have %d providers: %s", reqDesc, len(providers), infos) } // Also send closer peers. closer := dht.betterPeersToQuery(pmes, p, CloserPeerCount) if closer != nil { infos := peer.PeerInfos(dht.peerstore, closer) resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), infos) log.Debugf("%s have %d closer peers: %s", reqDesc, len(closer), infos) } return resp, nil }
func (n *OpenBazaarNode) SendOfflineAck(peerId string, pointerID peer.ID) error { p, err := peer.IDB58Decode(peerId) if err != nil { return err } ctx, cancel := context.WithCancel(context.Background()) defer cancel() a := &any.Any{Value: []byte(pointerID.Pretty())} m := pb.Message{ MessageType: pb.Message_OFFLINE_ACK, Payload: a} err = n.Service.SendMessage(ctx, p, &m) if err != nil { // Couldn't connect directly to peer. Likely offline. if err := n.SendOfflineMessage(p, &m); err != nil { return err } } return nil }
func (dht *IpfsDHT) handleAddProvider(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { lm := make(lgbl.DeferredMap) lm["peer"] = func() interface{} { return p.Pretty() } defer log.EventBegin(ctx, "handleAddProvider", lm).Done() key := key.Key(pmes.GetKey()) lm["key"] = func() interface{} { return key.B58String() } log.Debugf("%s adding %s as a provider for '%s'\n", dht.self, p, key) // add provider should use the address given in the message pinfos := pb.PBPeersToPeerInfos(pmes.GetProviderPeers()) for _, pi := range pinfos { // We need to disable this check for OpenBazaar pointers to work /*if pi.ID != p { // we should ignore this provider reccord! not from originator. // (we chould sign them and check signature later...) log.Debugf("handleAddProvider received provider %s from %s. Ignore.", pi.ID, p) continue }*/ if len(pi.Addrs) < 1 { log.Debugf("%s got no valid addresses for provider %s. Ignore.", dht.self, p) continue } log.Infof("received provider %s for %s (addrs: %s)", p, key, pi.Addrs) if pi.ID != dht.self && !isPointer(pi.ID) { // dont add own addrs. // add the received addresses to our peerstore. dht.peerstore.AddAddrs(pi.ID, pi.Addrs, peer.ProviderAddrTTL) } else if isPointer(pi.ID) { // keep the address for this pointer around for a week dht.peerstore.AddAddrs(pi.ID, pi.Addrs, time.Hour*24*7) } dht.providers.AddProvider(ctx, key, pi.ID) } return nil, nil }
func (n *OpenBazaarNode) SendOfflineMessage(p peer.ID, m *pb.Message) error { log.Debugf("Sending offline message to %s", p.Pretty()) env := pb.Envelope{Message: m, PeerID: n.IpfsNode.Identity.Pretty()} messageBytes, merr := proto.Marshal(&env) if merr != nil { return merr } ciphertext, cerr := n.EncryptMessage(p, messageBytes) if cerr != nil { return cerr } addr, aerr := n.MessageStorage.Store(p, ciphertext) if aerr != nil { return aerr } ctx, cancel := context.WithCancel(context.Background()) defer cancel() mh, mherr := multihash.FromB58String(p.Pretty()) if mherr != nil { return mherr } // TODO: We're just using a default prefix length for now. Eventually we will want to customize this, // but we will need some way to get the recipient's desired prefix length. Likely will be in profile. pointer, err := ipfs.PublishPointer(n.IpfsNode, ctx, mh, 16, addr) if err != nil { return err } if m.MessageType != pb.Message_OFFLINE_ACK { pointer.Purpose = ipfs.MESSAGE err = n.Datastore.Pointers().Put(pointer) if err != nil { return err } } return nil }
func (service *OpenBazaarService) handlePing(peer peer.ID, pmes *pb.Message) (*pb.Message, error) { log.Debugf("Received PING message from %s", peer.Pretty()) return pmes, nil }