コード例 #1
0
func (dht *IpfsDHT) handleAddProvider(p peer.Peer, pmes *pb.Message) (*pb.Message, error) {
	key := u.Key(pmes.GetKey())

	log.Debugf("%s adding %s as a provider for '%s'\n", dht.self, p, peer.ID(key))

	// add provider should use the address given in the message
	for _, pb := range pmes.GetProviderPeers() {
		pid := peer.ID(pb.GetId())
		if pid.Equal(p.ID()) {

			addr, err := pb.Address()
			if err != nil {
				log.Errorf("provider %s error with address %s", p, *pb.Addr)
				continue
			}

			log.Infof("received provider %s %s for %s", p, addr, key)
			p.AddAddress(addr)
			dht.providers.AddProvider(key, p)

		} else {
			log.Errorf("handleAddProvider received provider %s from %s", pid, p)
		}
	}

	return pmes, nil // send back same msg as confirmation.
}
コード例 #2
0
// Handshake3Update updates local knowledge with the information in the
// handshake3 msg we received from remote client.
func Handshake3Update(lpeer, rpeer peer.Peer, msg *pb.Handshake3) (*Handshake3Result, error) {
	res := &Handshake3Result{}

	// our observed address
	observedAddr, err := ma.NewMultiaddrBytes(msg.GetObservedAddr())
	if err != nil {
		return res, err
	}
	if lpeer.AddAddress(observedAddr) {
		log.Infof("(nat) added new local, remote-observed address: %s", observedAddr)
	}
	res.LocalObservedAddress = observedAddr

	// remote's reported addresses
	for _, a := range msg.GetListenAddrs() {
		addr, err := ma.NewMultiaddrBytes(a)
		if err != nil {
			err = fmt.Errorf("remote peer address not a multiaddr: %s", err)
			log.Errorf("Handshake3 error %s", err)
			return res, err
		}
		rpeer.AddAddress(addr)
		res.RemoteListenAddresses = append(res.RemoteListenAddresses, addr)
	}

	return res, nil
}
コード例 #3
0
ファイル: id.go プロジェクト: carriercomm/interplanetary
func printPeer(p peer.Peer) (interface{}, error) {
	if p == nil {
		return nil, errors.New("Attempted to print nil peer!")
	}
	info := new(IdOutput)

	info.ID = p.ID().String()
	if p.PubKey() == nil {
		return nil, errors.New(`peer publickey not populated on offline runs,
please run the daemon to use ipfs id!`)
	}
	pkb, err := p.PubKey().Bytes()
	if err != nil {
		return nil, err
	}
	info.PublicKey = base64.StdEncoding.EncodeToString(pkb)
	for _, a := range p.Addresses() {
		info.Addresses = append(info.Addresses, a.String())
	}

	agent, protocol := p.GetVersions()
	info.AgentVersion = agent
	info.ProtocolVersion = protocol

	return info, nil
}
コード例 #4
0
ファイル: conn.go プロジェクト: carriercomm/interplanetary
// peerMultiConn returns the MultiConn responsible for handling this peer.
// if there is none, it creates one and returns it. Note that timeouts
// and connection teardowns will remove it.
func (s *Swarm) peerMultiConn(p peer.Peer) (*conn.MultiConn, error) {

	s.connsLock.Lock()
	mc, found := s.conns[p.Key()]
	if found {
		s.connsLock.Unlock()
		return mc, nil
	}

	// multiconn doesn't exist, make a new one.
	mc, err := conn.NewMultiConn(s.Context(), s.local, p, nil)
	if err != nil {
		s.connsLock.Unlock()
		log.Errorf("error creating multiconn: %s", err)
		return nil, err
	}
	s.conns[p.Key()] = mc
	s.connsLock.Unlock()

	// kick off reader goroutine
	s.Children().Add(1)
	mc.Children().Add(1) // child of Conn as well.
	go s.fanInSingle(mc)
	return mc, nil
}
コード例 #5
0
func (dht *IpfsDHT) handleFindPeer(p peer.Peer, pmes *pb.Message) (*pb.Message, error) {
	resp := pb.NewMessage(pmes.GetType(), "", pmes.GetClusterLevel())
	var closest []peer.Peer

	// if looking for self... special case where we send it on CloserPeers.
	if peer.ID(pmes.GetKey()).Equal(dht.self.ID()) {
		closest = []peer.Peer{dht.self}
	} else {
		closest = dht.betterPeersToQuery(pmes, CloserPeerCount)
	}

	if closest == nil {
		log.Errorf("handleFindPeer: could not find anything.")
		return resp, nil
	}

	var withAddresses []peer.Peer
	for _, p := range closest {
		if len(p.Addresses()) > 0 {
			withAddresses = append(withAddresses, p)
		}
	}

	for _, p := range withAddresses {
		log.Debugf("handleFindPeer: sending back '%s'", p)
	}
	resp.CloserPeers = pb.PeersToPBPeers(withAddresses)
	return resp, nil
}
コード例 #6
0
ファイル: dial.go プロジェクト: carriercomm/interplanetary
// Dial connects to a particular peer, over a given network
// Example: d.Dial(ctx, "udp", peer)
func (d *Dialer) Dial(ctx context.Context, network string, remote peer.Peer) (Conn, error) {
	raddr := remote.NetAddress(network)
	if raddr == nil {
		return nil, debugerror.Errorf("No remote address for network %s", network)
	}
	return d.DialAddr(ctx, raddr, remote)
}
コード例 #7
0
ファイル: network.go プロジェクト: carriercomm/interplanetary
func (n *network) Adapter(p peer.Peer) bsnet.BitSwapNetwork {
	client := &networkClient{
		local:   p,
		network: n,
	}
	n.clients[p.Key()] = client
	return client
}
コード例 #8
0
// ledger lazily instantiates a ledger
func (s *strategist) ledger(p peer.Peer) *ledger {
	l, ok := s.ledgerMap[peerKey(p.Key())]
	if !ok {
		l = newLedger(p, s.strategyFunc)
		s.ledgerMap[peerKey(p.Key())] = l
	}
	return l
}
コード例 #9
0
func peerIsPartner(p peer.Peer, s Strategy) bool {
	for _, partner := range s.Peers() {
		if partner.Key() == p.Key() {
			return true
		}
	}
	return false
}
コード例 #10
0
ファイル: util.go プロジェクト: carriercomm/interplanetary
func (ps *peerSet) AddIfSmallerThan(p peer.Peer, maxsize int) bool {
	var success bool
	ps.lk.Lock()
	if _, ok := ps.ps[string(p.ID())]; !ok && len(ps.ps) < maxsize {
		success = true
		ps.ps[string(p.ID())] = true
	}
	ps.lk.Unlock()
	return success
}
コード例 #11
0
func (pq *distancePQ) Enqueue(p peer.Peer) {
	pq.Lock()
	defer pq.Unlock()

	distance := ks.XORKeySpace.Key(p.ID()).Distance(pq.from)

	heap.Push(&pq.heap, &peerMetric{
		peer:   p,
		metric: distance,
	})
}
コード例 #12
0
ファイル: routing.go プロジェクト: carriercomm/interplanetary
func (rs *hashTable) Announce(p peer.Peer, k u.Key) error {
	rs.lock.Lock()
	defer rs.lock.Unlock()

	_, ok := rs.providers[k]
	if !ok {
		rs.providers[k] = make(peer.Map)
	}
	rs.providers[k][p.Key()] = p
	return nil
}
コード例 #13
0
ファイル: bitswap.go プロジェクト: carriercomm/interplanetary
// TODO(brian): handle errors
func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsmsg.BitSwapMessage) (
	peer.Peer, bsmsg.BitSwapMessage) {
	log.Debugf("ReceiveMessage from %v", p.Key())
	log.Debugf("Message wantlist: %v", incoming.Wantlist())

	if p == nil {
		log.Error("Received message from nil peer!")
		// TODO propagate the error upward
		return nil, nil
	}
	if incoming == nil {
		log.Error("Got nil bitswap message!")
		// TODO propagate the error upward
		return nil, nil
	}

	// Record message bytes in ledger
	// TODO: this is bad, and could be easily abused.
	// Should only track *useful* messages in ledger
	bs.strategy.MessageReceived(p, incoming) // FIRST

	for _, block := range incoming.Blocks() {
		// TODO verify blocks?
		if err := bs.blockstore.Put(&block); err != nil {
			continue // FIXME(brian): err ignored
		}
		bs.notifications.Publish(block)
		err := bs.HasBlock(ctx, block)
		if err != nil {
			log.Warningf("HasBlock errored: %s", err)
		}
	}

	message := bsmsg.New()
	for _, wanted := range bs.wantlist.Keys() {
		message.AddWanted(wanted)
	}
	for _, key := range incoming.Wantlist() {
		// TODO: might be better to check if we have the block before checking
		//			if we should send it to someone
		if bs.strategy.ShouldSendBlockToPeer(key, p) {
			if block, errBlockNotFound := bs.blockstore.Get(key); errBlockNotFound != nil {
				continue
			} else {
				message.AddBlock(*block)
			}
		}
	}
	defer bs.strategy.MessageSent(p, message)

	log.Debug("Returning message.")
	return p, message
}
コード例 #14
0
ファイル: swarm.go プロジェクト: carriercomm/interplanetary
// CloseConnection removes a given peer from swarm + closes the connection
func (s *Swarm) CloseConnection(p peer.Peer) error {
	c := s.GetConnection(p.ID())
	if c == nil {
		return u.ErrNotFound
	}

	s.connsLock.Lock()
	delete(s.conns, u.Key(p.ID()))
	s.connsLock.Unlock()

	return c.Close()
}
コード例 #15
0
ファイル: message.go プロジェクト: carriercomm/interplanetary
func peerToPBPeer(p peer.Peer) *Message_Peer {
	pbp := new(Message_Peer)
	addrs := p.Addresses()
	if len(addrs) == 0 || addrs[0] == nil {
		pbp.Addr = proto.String("")
	} else {
		addr := addrs[0].String()
		pbp.Addr = &addr
	}
	pid := string(p.ID())
	pbp.Id = &pid
	return pbp
}
コード例 #16
0
ファイル: query.go プロジェクト: carriercomm/interplanetary
func (r *dhtQueryRunner) addPeerToQuery(next peer.Peer, benchmark peer.Peer) {
	if next == nil {
		// wtf why are peers nil?!?
		log.Error("Query getting nil peers!!!\n")
		return
	}

	// if new peer further away than whom we got it from, bother (loops)
	if benchmark != nil && kb.Closer(benchmark.ID(), next.ID(), r.query.key) {
		return
	}

	// if already seen, no need.
	r.Lock()
	_, found := r.peersSeen[next.Key()]
	if found {
		r.Unlock()
		return
	}
	r.peersSeen[next.Key()] = next
	r.Unlock()

	log.Debugf("adding peer to query: %v\n", next)

	// do this after unlocking to prevent possible deadlocks.
	r.peersRemaining.Increment(1)
	select {
	case r.peersToQuery.EnqChan <- next:
	case <-r.ctx.Done():
	}
}
コード例 #17
0
ファイル: dial.go プロジェクト: carriercomm/interplanetary
// DialAddr connects to a peer over a particular address
// Ensures raddr is part of peer.Addresses()
// Example: d.DialAddr(ctx, peer.Addresses()[0], peer)
func (d *Dialer) DialAddr(ctx context.Context, raddr ma.Multiaddr, remote peer.Peer) (Conn, error) {

	found := false
	for _, addr := range remote.Addresses() {
		if addr.Equal(raddr) {
			found = true
		}
	}
	if !found {
		return nil, debugerror.Errorf("address %s is not in peer %s", raddr, remote)
	}

	network, _, err := manet.DialArgs(raddr)
	if err != nil {
		return nil, err
	}

	laddr := d.LocalPeer.NetAddress(network)
	if laddr == nil {
		return nil, debugerror.Errorf("No local address for network %s", network)
	}

	if strings.HasPrefix(raddr.String(), "/ip4/0.0.0.0") {
		return nil, debugerror.Errorf("Attempted to connect to zero address: %s", raddr)
	}

	remote.SetType(peer.Remote)
	remote, err = d.Peerstore.Add(remote)
	if err != nil {
		log.Errorf("Error putting peer into peerstore: %s", remote)
	}

	// TODO: try to get reusing addr/ports to work.
	// madialer := manet.Dialer{LocalAddr: laddr}
	madialer := manet.Dialer{}

	log.Infof("%s dialing %s %s", d.LocalPeer, remote, raddr)
	maconn, err := madialer.Dial(raddr)
	if err != nil {
		return nil, err
	}

	c, err := newSingleConn(ctx, d.LocalPeer, remote, maconn)
	if err != nil {
		return nil, err
	}

	return newSecureConn(ctx, c, d.Peerstore)
}
コード例 #18
0
ファイル: swarm.go プロジェクト: carriercomm/interplanetary
// Dial connects to a peer.
//
// The idea is that the client of Swarm does not need to know what network
// the connection will happen over. Swarm can use whichever it choses.
// This allows us to use various transport protocols, do NAT traversal/relay,
// etc. to achive connection.
//
// For now, Dial uses only TCP. This will be extended.
func (s *Swarm) Dial(peer peer.Peer) (conn.Conn, error) {
	if peer.ID().Equal(s.local.ID()) {
		return nil, errors.New("Attempted connection to self!")
	}

	// check if we already have an open connection first
	c := s.GetConnection(peer.ID())
	if c != nil {
		return c, nil
	}

	// check if we don't have the peer in Peerstore
	peer, err := s.peers.Add(peer)
	if err != nil {
		return nil, err
	}

	// open connection to peer
	d := &conn.Dialer{
		LocalPeer: s.local,
		Peerstore: s.peers,
	}

	// try to connect to one of the peer's known addresses.
	// for simplicity, we do this sequentially.
	// A future commit will do this asynchronously.
	for _, addr := range peer.Addresses() {
		c, err = d.DialAddr(s.Context(), addr, peer)
		if err == nil {
			break
		}
	}
	if err != nil {
		return nil, err
	}

	c, err = s.connSetup(c)
	if err != nil {
		c.Close()
		return nil, err
	}

	// TODO replace the TODO ctx with a context passed in from caller
	log.Event(context.TODO(), "dial", peer)
	return c, nil
}
コード例 #19
0
func setupDHT(ctx context.Context, t *testing.T, p peer.Peer) *IpfsDHT {
	peerstore := peer.NewPeerstore()

	dhts := netservice.NewService(ctx, nil) // nil handler for now, need to patch it
	net, err := inet.NewIpfsNetwork(ctx, p.Addresses(), p, peerstore, &mux.ProtocolMap{
		mux.ProtocolID_Routing: dhts,
	})
	if err != nil {
		t.Fatal(err)
	}

	d := NewDHT(ctx, p, peerstore, net, dhts, ds.NewMapDatastore())
	dhts.SetHandler(d)
	d.Validators["v"] = func(u.Key, []byte) error {
		return nil
	}
	return d
}
コード例 #20
0
ファイル: network.go プロジェクト: carriercomm/interplanetary
// TODO should this be completely asynchronous?
// TODO what does the network layer do with errors received from services?
func (n *network) SendMessage(
	ctx context.Context,
	from peer.Peer,
	to peer.Peer,
	message bsmsg.BitSwapMessage) error {

	receiver, ok := n.clients[to.Key()]
	if !ok {
		return errors.New("Cannot locate peer on network")
	}

	// nb: terminate the context since the context wouldn't actually be passed
	// over the network in a real scenario

	go n.deliver(receiver, from, message)

	return nil
}
コード例 #21
0
ファイル: dht.go プロジェクト: carriercomm/interplanetary
// NewDHT creates a new DHT object with the given peer as the 'local' host
func NewDHT(ctx context.Context, p peer.Peer, ps peer.Peerstore, dialer inet.Dialer, sender inet.Sender, dstore ds.Datastore) *IpfsDHT {
	dht := new(IpfsDHT)
	dht.dialer = dialer
	dht.sender = sender
	dht.datastore = dstore
	dht.self = p
	dht.peerstore = ps
	dht.ContextCloser = ctxc.NewContextCloser(ctx, nil)

	dht.providers = NewProviderManager(dht.Context(), p.ID())
	dht.AddCloserChild(dht.providers)

	dht.routingTables = make([]*kb.RoutingTable, 3)
	dht.routingTables[0] = kb.NewRoutingTable(20, kb.ConvertPeerID(p.ID()), time.Millisecond*1000)
	dht.routingTables[1] = kb.NewRoutingTable(20, kb.ConvertPeerID(p.ID()), time.Millisecond*1000)
	dht.routingTables[2] = kb.NewRoutingTable(20, kb.ConvertPeerID(p.ID()), time.Hour)
	dht.birth = time.Now()

	dht.Validators = make(map[string]ValidatorFunc)
	dht.Validators["pk"] = ValidatePublicKeyRecord

	if doPinging {
		dht.Children().Add(1)
		go dht.PingRoutine(time.Second * 10)
	}
	return dht
}
コード例 #22
0
ファイル: network.go プロジェクト: carriercomm/interplanetary
// TODO
func (n *network) SendRequest(
	ctx context.Context,
	from peer.Peer,
	to peer.Peer,
	message bsmsg.BitSwapMessage) (
	incoming bsmsg.BitSwapMessage, err error) {

	r, ok := n.clients[to.Key()]
	if !ok {
		return nil, errors.New("Cannot locate peer on network")
	}
	nextPeer, nextMsg := r.ReceiveMessage(context.TODO(), from, message)

	// TODO dedupe code
	if (nextPeer == nil && nextMsg != nil) || (nextMsg == nil && nextPeer != nil) {
		r.ReceiveError(errors.New("Malformed client request"))
		return nil, nil
	}

	// TODO dedupe code
	if nextPeer == nil && nextMsg == nil {
		return nil, nil
	}

	// TODO test when receiver doesn't immediately respond to the initiator of the request
	if !bytes.Equal(nextPeer.ID(), from.ID()) {
		go func() {
			nextReceiver, ok := n.clients[nextPeer.Key()]
			if !ok {
				// TODO log the error?
			}
			n.deliver(nextReceiver, nextPeer, nextMsg)
		}()
		return nil, nil
	}
	return nextMsg, nil
}
コード例 #23
0
// Handshake3Msg constructs a Handshake3 msg.
func Handshake3Msg(localPeer peer.Peer, remoteAddr ma.Multiaddr) *pb.Handshake3 {
	var msg pb.Handshake3
	// don't need publicKey after secure channel.
	// msg.PublicKey = localPeer.PubKey().Bytes()

	// local listen addresses
	addrs := localPeer.Addresses()
	msg.ListenAddrs = make([][]byte, len(addrs))
	for i, a := range addrs {
		msg.ListenAddrs[i] = a.Bytes()
	}

	// observed remote address
	msg.ObservedAddr = remoteAddr.Bytes()

	// services
	// srv := localPeer.Services()
	// msg.Services = make([]mux.ProtocolID, len(srv))
	// for i, pid := range srv {
	// 	msg.Services[i] = pid
	// }

	return &msg
}
コード例 #24
0
ファイル: table.go プロジェクト: carriercomm/interplanetary
// Update adds or moves the given peer to the front of its respective bucket
// If a peer gets removed from a bucket, it is returned
func (rt *RoutingTable) Update(p peer.Peer) peer.Peer {
	rt.tabLock.Lock()
	defer rt.tabLock.Unlock()
	peerID := ConvertPeerID(p.ID())
	cpl := commonPrefixLen(peerID, rt.local)

	bucketID := cpl
	if bucketID >= len(rt.Buckets) {
		bucketID = len(rt.Buckets) - 1
	}

	bucket := rt.Buckets[bucketID]
	e := bucket.find(p.ID())
	if e == nil {
		// New peer, add to bucket
		if p.GetLatency() > rt.maxLatency {
			// Connection doesnt meet requirements, skip!
			return nil
		}
		bucket.pushFront(p)

		// Are we past the max bucket size?
		if bucket.len() > rt.bucketsize {
			// If this bucket is the rightmost bucket, and its full
			// we need to split it and create a new bucket
			if bucketID == len(rt.Buckets)-1 {
				return rt.nextBucket()
			} else {
				// If the bucket cant split kick out least active node
				return bucket.popBack()
			}
		}
		return nil
	}
	// If the peer is already in the table, move it to the front.
	// This signifies that it it "more active" and the less active nodes
	// Will as a result tend towards the back of the list
	bucket.moveToFront(e)
	return nil
}
コード例 #25
0
func (dht *IpfsDHT) handleGetValue(p peer.Peer, pmes *pb.Message) (*pb.Message, error) {
	log.Debugf("%s handleGetValue for key: %s\n", dht.self, pmes.GetKey())

	// setup response
	resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel())

	// first, is the key even a key?
	key := pmes.GetKey()
	if key == "" {
		return nil, errors.New("handleGetValue but no key was provided")
	}

	// let's first check if we have the value locally.
	log.Debugf("%s handleGetValue looking into ds", dht.self)
	dskey := u.Key(pmes.GetKey()).DsKey()
	iVal, err := dht.datastore.Get(dskey)
	log.Debugf("%s handleGetValue looking into ds GOT %v", dht.self, iVal)

	// if we got an unexpected error, bail.
	if err != nil && err != ds.ErrNotFound {
		return nil, err
	}

	// Note: changed the behavior here to return _as much_ info as possible
	// (potentially all of {value, closer peers, provider})

	// if we have the value, send it back
	if err == nil {
		log.Debugf("%s handleGetValue success!", dht.self)

		byts, ok := iVal.([]byte)
		if !ok {
			return nil, fmt.Errorf("datastore had non byte-slice value for %v", dskey)
		}

		rec := new(pb.Record)
		err := proto.Unmarshal(byts, rec)
		if err != nil {
			log.Error("Failed to unmarshal dht record from datastore")
			return nil, err
		}

		resp.Record = rec
	}

	// if we know any providers for the requested value, return those.
	provs := dht.providers.GetProviders(u.Key(pmes.GetKey()))
	if len(provs) > 0 {
		log.Debugf("handleGetValue returning %d provider[s]", len(provs))
		resp.ProviderPeers = pb.PeersToPBPeers(provs)
	}

	// Find closest peer on given cluster to desired key and reply with that info
	closer := dht.betterPeersToQuery(pmes, CloserPeerCount)
	if closer != nil {
		for _, p := range closer {
			log.Debugf("handleGetValue returning closer peer: '%s'", p)
			if len(p.Addresses()) < 1 {
				log.Critical("no addresses on peer being sent!")
			}
		}
		resp.CloserPeers = pb.PeersToPBPeers(closer)
	}

	return resp, nil
}
コード例 #26
0
ファイル: util.go プロジェクト: carriercomm/interplanetary
func (ps *peerSet) Add(p peer.Peer) {
	ps.lk.Lock()
	ps.ps[string(p.ID())] = true
	ps.lk.Unlock()
}
コード例 #27
0
ファイル: network.go プロジェクト: carriercomm/interplanetary
func (n *network) HasPeer(p peer.Peer) bool {
	_, found := n.clients[p.Key()]
	return found
}
コード例 #28
0
ファイル: util.go プロジェクト: carriercomm/interplanetary
func (ps *peerSet) Contains(p peer.Peer) bool {
	ps.lk.RLock()
	_, ok := ps.ps[string(p.ID())]
	ps.lk.RUnlock()
	return ok
}
コード例 #29
0
ファイル: net.go プロジェクト: carriercomm/interplanetary
// IsConnected returns whether a connection to given peer exists.
func (n *IpfsNetwork) IsConnected(p peer.Peer) (bool, error) {
	return n.swarm.GetConnection(p.ID()) != nil, nil
}