// TODO: Im not certain on this implementation, we get a list of peers/providers // from someone what do we do with it? Connect to each of them? randomly pick // one to get the value from? Or just connect to one at a time until we get a // successful connection and request the value from it? func (dht *IpfsDHT) getFromPeerList(key u.Key, timeout time.Duration, peerlist []*PBDHTMessage_PBPeer, level int) ([]byte, error) { for _, pinfo := range peerlist { p, _ := dht.Find(peer.ID(pinfo.GetId())) if p == nil { maddr, err := ma.NewMultiaddr(pinfo.GetAddr()) if err != nil { u.PErr("getValue error: %s\n", err) continue } p, err = dht.network.GetConnection(peer.ID(pinfo.GetId()), maddr) if err != nil { u.PErr("getValue error: %s\n", err) continue } } pmes, err := dht.getValueSingle(p, key, timeout, level) if err != nil { u.DErr("getFromPeers error: %s\n", err) continue } dht.providers.AddProvider(key, p) // Make sure it was a successful get if pmes.GetSuccess() && pmes.Value != nil { return pmes.GetValue(), nil } } return nil, u.ErrNotFound }
// FindProviders searches for peers who can provide the value for given key. func (dht *IpfsDHT) FindProviders(key u.Key, timeout time.Duration) ([]*peer.Peer, error) { ll := startNewRPC("FindProviders") defer func() { ll.EndLog() ll.Print() }() u.DOut("Find providers for: '%s'\n", key) p := dht.routingTables[0].NearestPeer(kb.ConvertKey(key)) if p == nil { return nil, kb.ErrLookupFailure } for level := 0; level < len(dht.routingTables); { pmes, err := dht.findProvidersSingle(p, key, level, timeout) if err != nil { return nil, err } if pmes.GetSuccess() { u.DOut("Got providers back from findProviders call!\n") provs := dht.addPeerList(key, pmes.GetPeers()) ll.Success = true return provs, nil } u.DOut("Didnt get providers, just closer peers.\n") closer := pmes.GetPeers() if len(closer) == 0 { level++ continue } if peer.ID(closer[0].GetId()).Equal(dht.self.ID) { u.DOut("Got myself back as a closer peer.") return nil, u.ErrNotFound } maddr, err := ma.NewMultiaddr(closer[0].GetAddr()) if err != nil { // ??? Move up route level??? panic("not yet implemented") } np, err := dht.network.GetConnection(peer.ID(closer[0].GetId()), maddr) if err != nil { u.PErr("[%s] Failed to connect to: %s\n", dht.self.ID.Pretty(), closer[0].GetAddr()) level++ continue } p = np } return nil, u.ErrNotFound }
func (dht *IpfsDHT) handleFindPeer(p *peer.Peer, pmes *PBDHTMessage) { resp := Message{ Type: pmes.GetType(), ID: pmes.GetId(), Response: true, } defer func() { mes := swarm.NewMessage(p, resp.ToProtobuf()) dht.netChan.Outgoing <- mes }() level := pmes.GetValue()[0] u.DOut("handleFindPeer: searching for '%s'\n", peer.ID(pmes.GetKey()).Pretty()) closest := dht.routingTables[level].NearestPeer(kb.ConvertKey(u.Key(pmes.GetKey()))) if closest == nil { u.PErr("handleFindPeer: could not find anything.\n") return } if len(closest.Addresses) == 0 { u.PErr("handleFindPeer: no addresses for connected peer...\n") return } // If the found peer further away than this peer... if kb.Closer(dht.self.ID, closest.ID, u.Key(pmes.GetKey())) { return } u.DOut("handleFindPeer: sending back '%s'\n", closest.ID.Pretty()) resp.Peers = []*peer.Peer{closest} resp.Success = true }
func (dht *IpfsDHT) peerFromInfo(pbp *PBDHTMessage_PBPeer) (*peer.Peer, error) { maddr, err := ma.NewMultiaddr(pbp.GetAddr()) if err != nil { return nil, err } return dht.network.GetConnection(peer.ID(pbp.GetId()), maddr) }
// If less than K nodes are in the entire network, it should fail when we make // a GET rpc and nobody has the value func TestLessThanKResponses(t *testing.T) { u.Debug = false fn := newFauxNet() fn.Listen() local := new(peer.Peer) local.ID = peer.ID("test_peer") d := NewDHT(local, fn, ds.NewMapDatastore()) d.Start() var ps []*peer.Peer for i := 0; i < 5; i++ { ps = append(ps, _randPeer()) d.Update(ps[i]) } other := _randPeer() // Reply with random peers to every message fn.AddHandler(func(mes *swarm.Message) *swarm.Message { pmes := new(PBDHTMessage) err := proto.Unmarshal(mes.Data, pmes) if err != nil { t.Fatal(err) } switch pmes.GetType() { case PBDHTMessage_GET_VALUE: resp := Message{ Type: pmes.GetType(), ID: pmes.GetId(), Response: true, Success: false, Peers: []*peer.Peer{other}, } return swarm.NewMessage(mes.Peer, resp.ToProtobuf()) default: panic("Shouldnt recieve this.") } }) _, err := d.GetValue(u.Key("hello"), time.Second*30) if err != nil { switch err { case u.ErrNotFound: //Success! return case u.ErrTimeout: t.Fatal("Should not have gotten timeout!") default: t.Fatalf("Got unexpected error: %s", err) } } t.Fatal("Expected to recieve an error.") }
func TestProviderManager(t *testing.T) { mid := peer.ID("testing") p := NewProviderManager(mid) a := u.Key("test") p.AddProvider(a, &peer.Peer{}) resp := p.GetProviders(a) if len(resp) != 1 { t.Fatal("Could not retrieve provider.") } p.Halt() }
// IDFromPubKey returns Nodes ID given its public key func IDFromPubKey(pk ci.PubKey) (peer.ID, error) { b, err := pk.Bytes() if err != nil { return nil, err } hash, err := u.Hash(b) if err != nil { return nil, err } return peer.ID(hash), nil }
func (dht *IpfsDHT) getValueOrPeers(p *peer.Peer, key u.Key, timeout time.Duration, level int) ([]byte, []*peer.Peer, error) { pmes, err := dht.getValueSingle(p, key, timeout, level) if err != nil { return nil, nil, err } if pmes.GetSuccess() { if pmes.Value == nil { // We were given provider[s] val, err := dht.getFromPeerList(key, timeout, pmes.GetPeers(), level) if err != nil { return nil, nil, err } return val, nil, nil } // Success! We were given the value return pmes.GetValue(), nil, nil } // We were given a closer node var peers []*peer.Peer for _, pb := range pmes.GetPeers() { if peer.ID(pb.GetId()).Equal(dht.self.ID) { continue } addr, err := ma.NewMultiaddr(pb.GetAddr()) if err != nil { u.PErr("%v\n", err.Error()) continue } np, err := dht.network.GetConnection(peer.ID(pb.GetId()), addr) if err != nil { u.PErr("%v\n", err.Error()) continue } peers = append(peers, np) } return nil, peers, nil }
// TODO: Could be done async func (dht *IpfsDHT) addPeerList(key u.Key, peers []*PBDHTMessage_PBPeer) []*peer.Peer { var provArr []*peer.Peer for _, prov := range peers { // Dont add outselves to the list if peer.ID(prov.GetId()).Equal(dht.self.ID) { continue } // Dont add someone who is already on the list p := dht.network.Find(u.Key(prov.GetId())) if p == nil { u.DOut("given provider %s was not in our network already.\n", peer.ID(prov.GetId()).Pretty()) var err error p, err = dht.peerFromInfo(prov) if err != nil { u.PErr("error connecting to new peer: %s\n", err) continue } } dht.providers.AddProvider(key, p) provArr = append(provArr, p) } return provArr }
//TODO: this function could also be done asynchronously func (dht *IpfsDHT) addPeerListAsync(k u.Key, peers []*PBDHTMessage_PBPeer, ps *peerSet, count int, out chan *peer.Peer) { for _, pbp := range peers { if peer.ID(pbp.GetId()).Equal(dht.self.ID) { continue } maddr, err := ma.NewMultiaddr(pbp.GetAddr()) if err != nil { u.PErr("%v\n", err) continue } p, err := dht.network.GetConnection(peer.ID(pbp.GetId()), maddr) if err != nil { u.PErr("%v\n", err) continue } dht.providers.AddProvider(k, p) if ps.AddIfSmallerThan(p, count) { out <- p } else if ps.Size() >= count { return } } }
func setupPeer(id string, addr string) (*peer.Peer, error) { tcp, err := ma.NewMultiaddr(addr) if err != nil { return nil, err } mh, err := mh.FromHexString(id) if err != nil { return nil, err } p := &peer.Peer{ID: peer.ID(mh)} p.AddAddress(tcp) return p, nil }
// FindPeer searches for a peer with given ID. func (dht *IpfsDHT) FindPeer(id peer.ID, timeout time.Duration) (*peer.Peer, error) { // Check if were already connected to them p, _ := dht.Find(id) if p != nil { return p, nil } routeLevel := 0 p = dht.routingTables[routeLevel].NearestPeer(kb.ConvertPeerID(id)) if p == nil { return nil, kb.ErrLookupFailure } if p.ID.Equal(id) { return p, nil } for routeLevel < len(dht.routingTables) { pmes, err := dht.findPeerSingle(p, id, timeout, routeLevel) plist := pmes.GetPeers() if plist == nil || len(plist) == 0 { routeLevel++ continue } found := plist[0] addr, err := ma.NewMultiaddr(found.GetAddr()) if err != nil { return nil, err } nxtPeer, err := dht.network.GetConnection(peer.ID(found.GetId()), addr) if err != nil { return nil, err } if pmes.GetSuccess() { if !id.Equal(nxtPeer.ID) { return nil, errors.New("got back invalid peer from 'successful' response") } return nxtPeer, nil } p = nxtPeer } return nil, u.ErrNotFound }
func initIdentity(cfg *config.Config) (*peer.Peer, error) { if cfg.Identity == nil { return nil, errors.New("Identity was not set in config (was ipfs init run?)") } if len(cfg.Identity.PeerID) == 0 { return nil, errors.New("No peer ID in config! (was ipfs init run?)") } // address is optional var addresses []*ma.Multiaddr if len(cfg.Identity.Address) > 0 { maddr, err := ma.NewMultiaddr(cfg.Identity.Address) if err != nil { return nil, err } addresses = []*ma.Multiaddr{maddr} } skb, err := base64.StdEncoding.DecodeString(cfg.Identity.PrivKey) if err != nil { return nil, err } sk, err := ci.UnmarshalPrivateKey(skb) if err != nil { return nil, err } return &peer.Peer{ ID: peer.ID(b58.Decode(cfg.Identity.PeerID)), Addresses: addresses, PrivKey: sk, PubKey: sk.GetPublic(), }, nil }
// Looks for race conditions in table operations. For a more 'certain' // test, increase the loop counter from 1000 to a much higher number // and set GOMAXPROCS above 1 func TestTableMultithreaded(t *testing.T) { local := peer.ID("localPeer") tab := NewRoutingTable(20, ConvertPeerID(local), time.Hour) var peers []*peer.Peer for i := 0; i < 500; i++ { peers = append(peers, _randPeer()) } done := make(chan struct{}) go func() { for i := 0; i < 1000; i++ { n := rand.Intn(len(peers)) tab.Update(peers[n]) } done <- struct{}{} }() go func() { for i := 0; i < 1000; i++ { n := rand.Intn(len(peers)) tab.Update(peers[n]) } done <- struct{}{} }() go func() { for i := 0; i < 1000; i++ { n := rand.Intn(len(peers)) tab.Find(peers[n].ID) } done <- struct{}{} }() <-done <-done <-done }
// Builds up list of peers by requesting random peer IDs func (dht *IpfsDHT) Bootstrap() { id := make([]byte, 16) rand.Read(id) dht.FindPeer(peer.ID(id), time.Second*10) }
func TestGetFailures(t *testing.T) { fn := newFauxNet() fn.Listen() local := new(peer.Peer) local.ID = peer.ID("test_peer") d := NewDHT(local, fn, ds.NewMapDatastore()) other := &peer.Peer{ID: peer.ID("other_peer")} d.Start() d.Update(other) // This one should time out _, err := d.GetValue(u.Key("test"), time.Millisecond*10) if err != nil { if err != u.ErrTimeout { t.Fatal("Got different error than we expected.") } } else { t.Fatal("Did not get expected error!") } // Reply with failures to every message fn.AddHandler(func(mes *swarm.Message) *swarm.Message { pmes := new(PBDHTMessage) err := proto.Unmarshal(mes.Data, pmes) if err != nil { t.Fatal(err) } resp := Message{ Type: pmes.GetType(), ID: pmes.GetId(), Response: true, Success: false, } return swarm.NewMessage(mes.Peer, resp.ToProtobuf()) }) // This one should fail with NotFound _, err = d.GetValue(u.Key("test"), time.Millisecond*1000) if err != nil { if err != u.ErrNotFound { t.Fatalf("Expected ErrNotFound, got: %s", err) } } else { t.Fatal("expected error, got none.") } success := make(chan struct{}) fn.handlers = nil fn.AddHandler(func(mes *swarm.Message) *swarm.Message { resp := new(PBDHTMessage) err := proto.Unmarshal(mes.Data, resp) if err != nil { t.Fatal(err) } if resp.GetSuccess() { t.Fatal("Get returned success when it shouldnt have.") } success <- struct{}{} return nil }) // Now we test this DHT's handleGetValue failure req := Message{ Type: PBDHTMessage_GET_VALUE, Key: "hello", ID: swarm.GenerateMessageID(), Value: []byte{0}, } fn.Chan.Incoming <- swarm.NewMessage(other, req.ToProtobuf()) <-success }
func (dht *IpfsDHT) handleAddProvider(p *peer.Peer, pmes *PBDHTMessage) { key := u.Key(pmes.GetKey()) u.DOut("[%s] Adding [%s] as a provider for '%s'\n", dht.self.ID.Pretty(), p.ID.Pretty(), peer.ID(key).Pretty()) dht.providers.AddProvider(key, p) }