示例#1
0
func (dht *IpfsDHT) handleFindPeer(p peer.Peer, pmes *pb.Message) (*pb.Message, error) {
	resp := pb.NewMessage(pmes.GetType(), "", pmes.GetClusterLevel())
	var closest []peer.Peer

	// if looking for self... special case where we send it on CloserPeers.
	if peer.ID(pmes.GetKey()).Equal(dht.self.ID()) {
		closest = []peer.Peer{dht.self}
	} else {
		closest = dht.betterPeersToQuery(pmes, CloserPeerCount)
	}

	if closest == nil {
		log.Errorf("handleFindPeer: could not find anything.")
		return resp, nil
	}

	var withAddresses []peer.Peer
	for _, p := range closest {
		if len(p.Addresses()) > 0 {
			withAddresses = append(withAddresses, p)
		}
	}

	for _, p := range withAddresses {
		log.Debugf("handleFindPeer: sending back '%s'", p)
	}
	resp.CloserPeers = pb.PeersToPBPeers(withAddresses)
	return resp, nil
}
示例#2
0
func printPeer(p peer.Peer) (interface{}, error) {
	if p == nil {
		return nil, errors.New("Attempted to print nil peer!")
	}
	info := new(IdOutput)

	info.ID = p.ID().String()
	if p.PubKey() == nil {
		return nil, errors.New(`peer publickey not populated on offline runs,
please run the daemon to use ipfs id!`)
	}
	pkb, err := p.PubKey().Bytes()
	if err != nil {
		return nil, err
	}
	info.PublicKey = base64.StdEncoding.EncodeToString(pkb)
	for _, a := range p.Addresses() {
		info.Addresses = append(info.Addresses, a.String())
	}

	agent, protocol := p.GetVersions()
	info.AgentVersion = agent
	info.ProtocolVersion = protocol

	return info, nil
}
示例#3
0
func peerToPBPeer(p peer.Peer) *Message_Peer {
	pbp := new(Message_Peer)
	addrs := p.Addresses()
	if len(addrs) == 0 || addrs[0] == nil {
		pbp.Addr = proto.String("")
	} else {
		addr := addrs[0].String()
		pbp.Addr = &addr
	}
	pid := string(p.ID())
	pbp.Id = &pid
	return pbp
}
示例#4
0
// DialAddr connects to a peer over a particular address
// Ensures raddr is part of peer.Addresses()
// Example: d.DialAddr(ctx, peer.Addresses()[0], peer)
func (d *Dialer) DialAddr(ctx context.Context, raddr ma.Multiaddr, remote peer.Peer) (Conn, error) {

	found := false
	for _, addr := range remote.Addresses() {
		if addr.Equal(raddr) {
			found = true
		}
	}
	if !found {
		return nil, debugerror.Errorf("address %s is not in peer %s", raddr, remote)
	}

	network, _, err := manet.DialArgs(raddr)
	if err != nil {
		return nil, err
	}

	laddr := d.LocalPeer.NetAddress(network)
	if laddr == nil {
		return nil, debugerror.Errorf("No local address for network %s", network)
	}

	if strings.HasPrefix(raddr.String(), "/ip4/0.0.0.0") {
		return nil, debugerror.Errorf("Attempted to connect to zero address: %s", raddr)
	}

	remote.SetType(peer.Remote)
	remote, err = d.Peerstore.Add(remote)
	if err != nil {
		log.Errorf("Error putting peer into peerstore: %s", remote)
	}

	// TODO: try to get reusing addr/ports to work.
	// madialer := manet.Dialer{LocalAddr: laddr}
	madialer := manet.Dialer{}

	log.Infof("%s dialing %s %s", d.LocalPeer, remote, raddr)
	maconn, err := madialer.Dial(raddr)
	if err != nil {
		return nil, err
	}

	c, err := newSingleConn(ctx, d.LocalPeer, remote, maconn)
	if err != nil {
		return nil, err
	}

	return newSecureConn(ctx, c, d.Peerstore)
}
示例#5
0
func setupDHT(ctx context.Context, t *testing.T, p peer.Peer) *IpfsDHT {
	peerstore := peer.NewPeerstore()

	dhts := netservice.NewService(ctx, nil) // nil handler for now, need to patch it
	net, err := inet.NewIpfsNetwork(ctx, p.Addresses(), p, peerstore, &mux.ProtocolMap{
		mux.ProtocolID_Routing: dhts,
	})
	if err != nil {
		t.Fatal(err)
	}

	d := NewDHT(ctx, p, peerstore, net, dhts, ds.NewMapDatastore())
	dhts.SetHandler(d)
	d.Validators["v"] = func(u.Key, []byte) error {
		return nil
	}
	return d
}
示例#6
0
// Handshake3Msg constructs a Handshake3 msg.
func Handshake3Msg(localPeer peer.Peer, remoteAddr ma.Multiaddr) *pb.Handshake3 {
	var msg pb.Handshake3
	// don't need publicKey after secure channel.
	// msg.PublicKey = localPeer.PubKey().Bytes()

	// local listen addresses
	addrs := localPeer.Addresses()
	msg.ListenAddrs = make([][]byte, len(addrs))
	for i, a := range addrs {
		msg.ListenAddrs[i] = a.Bytes()
	}

	// observed remote address
	msg.ObservedAddr = remoteAddr.Bytes()

	// services
	// srv := localPeer.Services()
	// msg.Services = make([]mux.ProtocolID, len(srv))
	// for i, pid := range srv {
	// 	msg.Services[i] = pid
	// }

	return &msg
}
示例#7
0
// Dial connects to a peer.
//
// The idea is that the client of Swarm does not need to know what network
// the connection will happen over. Swarm can use whichever it choses.
// This allows us to use various transport protocols, do NAT traversal/relay,
// etc. to achive connection.
//
// For now, Dial uses only TCP. This will be extended.
func (s *Swarm) Dial(peer peer.Peer) (conn.Conn, error) {
	if peer.ID().Equal(s.local.ID()) {
		return nil, errors.New("Attempted connection to self!")
	}

	// check if we already have an open connection first
	c := s.GetConnection(peer.ID())
	if c != nil {
		return c, nil
	}

	// check if we don't have the peer in Peerstore
	peer, err := s.peers.Add(peer)
	if err != nil {
		return nil, err
	}

	// open connection to peer
	d := &conn.Dialer{
		LocalPeer: s.local,
		Peerstore: s.peers,
	}

	// try to connect to one of the peer's known addresses.
	// for simplicity, we do this sequentially.
	// A future commit will do this asynchronously.
	for _, addr := range peer.Addresses() {
		c, err = d.DialAddr(s.Context(), addr, peer)
		if err == nil {
			break
		}
	}
	if err != nil {
		return nil, err
	}

	c, err = s.connSetup(c)
	if err != nil {
		c.Close()
		return nil, err
	}

	// TODO replace the TODO ctx with a context passed in from caller
	log.Event(context.TODO(), "dial", peer)
	return c, nil
}
示例#8
0
func (dht *IpfsDHT) handleGetValue(p peer.Peer, pmes *pb.Message) (*pb.Message, error) {
	log.Debugf("%s handleGetValue for key: %s\n", dht.self, pmes.GetKey())

	// setup response
	resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel())

	// first, is the key even a key?
	key := pmes.GetKey()
	if key == "" {
		return nil, errors.New("handleGetValue but no key was provided")
	}

	// let's first check if we have the value locally.
	log.Debugf("%s handleGetValue looking into ds", dht.self)
	dskey := u.Key(pmes.GetKey()).DsKey()
	iVal, err := dht.datastore.Get(dskey)
	log.Debugf("%s handleGetValue looking into ds GOT %v", dht.self, iVal)

	// if we got an unexpected error, bail.
	if err != nil && err != ds.ErrNotFound {
		return nil, err
	}

	// Note: changed the behavior here to return _as much_ info as possible
	// (potentially all of {value, closer peers, provider})

	// if we have the value, send it back
	if err == nil {
		log.Debugf("%s handleGetValue success!", dht.self)

		byts, ok := iVal.([]byte)
		if !ok {
			return nil, fmt.Errorf("datastore had non byte-slice value for %v", dskey)
		}

		rec := new(pb.Record)
		err := proto.Unmarshal(byts, rec)
		if err != nil {
			log.Error("Failed to unmarshal dht record from datastore")
			return nil, err
		}

		resp.Record = rec
	}

	// if we know any providers for the requested value, return those.
	provs := dht.providers.GetProviders(u.Key(pmes.GetKey()))
	if len(provs) > 0 {
		log.Debugf("handleGetValue returning %d provider[s]", len(provs))
		resp.ProviderPeers = pb.PeersToPBPeers(provs)
	}

	// Find closest peer on given cluster to desired key and reply with that info
	closer := dht.betterPeersToQuery(pmes, CloserPeerCount)
	if closer != nil {
		for _, p := range closer {
			log.Debugf("handleGetValue returning closer peer: '%s'", p)
			if len(p.Addresses()) < 1 {
				log.Critical("no addresses on peer being sent!")
			}
		}
		resp.CloserPeers = pb.PeersToPBPeers(closer)
	}

	return resp, nil
}