func (dht *IpfsDHT) handleGetProviders(p peer.Peer, pmes *pb.Message) (*pb.Message, error) { resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel()) // check if we have this value, to add ourselves as provider. log.Debugf("handling GetProviders: '%s'", u.Key(pmes.GetKey())) dsk := u.Key(pmes.GetKey()).DsKey() has, err := dht.datastore.Has(dsk) if err != nil && err != ds.ErrNotFound { log.Errorf("unexpected datastore error: %v\n", err) has = false } // setup providers providers := dht.providers.GetProviders(u.Key(pmes.GetKey())) if has { providers = append(providers, dht.self) } // if we've got providers, send thos those. if providers != nil && len(providers) > 0 { resp.ProviderPeers = pb.PeersToPBPeers(providers) } // Also send closer peers. closer := dht.betterPeersToQuery(pmes, CloserPeerCount) if closer != nil { resp.CloserPeers = pb.PeersToPBPeers(closer) } return resp, nil }
func TestToNetFromNetPreservesWantList(t *testing.T) { original := New() original.AddWanted(u.Key("M")) original.AddWanted(u.Key("B")) original.AddWanted(u.Key("D")) original.AddWanted(u.Key("T")) original.AddWanted(u.Key("F")) p := peer.WithIDString("X") netmsg, err := original.ToNet(p) if err != nil { t.Fatal(err) } copied, err := FromNet(netmsg) if err != nil { t.Fatal(err) } keys := make(map[u.Key]bool) for _, k := range copied.Wantlist() { keys[k] = true } for _, k := range original.Wantlist() { if _, ok := keys[k]; !ok { t.Fatalf("Key Missing: \"%v\"", k) } } }
func TestClientFindProviders(t *testing.T) { peer := peer.WithIDString("42") rs := VirtualRoutingServer() client := rs.Client(peer) k := u.Key("hello") err := client.Provide(context.Background(), k) if err != nil { t.Fatal(err) } max := 100 providersFromHashTable := rs.Providers(k) isInHT := false for _, p := range providersFromHashTable { if bytes.Equal(p.ID(), peer.ID()) { isInHT = true } } if !isInHT { t.Fatal("Despite client providing key, peer wasn't in hash table as a provider") } providersFromClient := client.FindProvidersAsync(context.Background(), u.Key("hello"), max) isInClient := false for p := range providersFromClient { if bytes.Equal(p.ID(), peer.ID()) { isInClient = true } } if !isInClient { t.Fatal("Despite client providing key, client didn't receive peer when finding providers") } }
func TestProvides(t *testing.T) { // t.Skip("skipping test to debug another") ctx := context.Background() u.Debug = false _, peers, dhts := setupDHTS(ctx, 4, t) defer func() { for i := 0; i < 4; i++ { dhts[i].Close() defer dhts[i].dialer.(inet.Network).Close() } }() _, err := dhts[0].Connect(ctx, peers[1]) if err != nil { t.Fatal(err) } _, err = dhts[1].Connect(ctx, peers[2]) if err != nil { t.Fatal(err) } _, err = dhts[1].Connect(ctx, peers[3]) if err != nil { t.Fatal(err) } err = dhts[3].putLocal(u.Key("hello"), []byte("world")) if err != nil { t.Fatal(err) } bits, err := dhts[3].getLocal(u.Key("hello")) if err != nil && bytes.Equal(bits, []byte("world")) { t.Fatal(err) } err = dhts[3].Provide(ctx, u.Key("hello")) if err != nil { t.Fatal(err) } time.Sleep(time.Millisecond * 60) ctxT, _ := context.WithTimeout(ctx, time.Second) provchan := dhts[0].FindProvidersAsync(ctxT, u.Key("hello"), 1) after := time.After(time.Second) select { case prov := <-provchan: if prov == nil { t.Fatal("Got back nil provider") } case <-after: t.Fatal("Did not get a provider back.") } }
func TestLayeredGet(t *testing.T) { if testing.Short() { t.SkipNow() } ctx := context.Background() u.Debug = false _, peers, dhts := setupDHTS(ctx, 4, t) defer func() { for i := 0; i < 4; i++ { dhts[i].Close() defer dhts[i].dialer.(inet.Network).Close() } }() _, err := dhts[0].Connect(ctx, peers[1]) if err != nil { t.Fatalf("Failed to connect: %s", err) } _, err = dhts[1].Connect(ctx, peers[2]) if err != nil { t.Fatal(err) } _, err = dhts[1].Connect(ctx, peers[3]) if err != nil { t.Fatal(err) } err = dhts[3].putLocal(u.Key("/v/hello"), []byte("world")) if err != nil { t.Fatal(err) } err = dhts[3].Provide(ctx, u.Key("/v/hello")) if err != nil { t.Fatal(err) } time.Sleep(time.Millisecond * 60) ctxT, _ := context.WithTimeout(ctx, time.Second) val, err := dhts[0].GetValue(ctxT, u.Key("/v/hello")) if err != nil { t.Fatal(err) } if string(val) != "world" { t.Fatal("Got incorrect value.") } }
// Resolve implements Resolver. Uses the IPFS routing system to resolve SFS-like // names. func (r *routingResolver) Resolve(name string) (string, error) { log.Debugf("RoutingResolve: '%s'", name) ctx := context.TODO() hash, err := mh.FromB58String(name) if err != nil { log.Warning("RoutingResolve: bad input hash: [%s]\n", name) return "", err } // name should be a multihash. if it isn't, error out here. // use the routing system to get the name. // /ipns/<name> h := []byte("/ipns/" + string(hash)) ipnsKey := u.Key(h) val, err := r.routing.GetValue(ctx, ipnsKey) if err != nil { log.Warning("RoutingResolve get failed.") return "", err } entry := new(pb.IpnsEntry) err = proto.Unmarshal(val, entry) if err != nil { return "", err } // name should be a public key retrievable from ipfs // /ipfs/<name> key := u.Key("/pk/" + string(hash)) pkval, err := r.routing.GetValue(ctx, key) if err != nil { log.Warning("RoutingResolve PubKey Get failed.") return "", err } // get PublicKey from node.Data pk, err := ci.UnmarshalPublicKey(pkval) if err != nil { return "", err } hsh, _ := pk.Hash() log.Debugf("pk hash = %s", u.Key(hsh)) // check sig with pk if ok, err := pk.Verify(ipnsEntryDataForSig(entry), entry.GetSignature()); err != nil || !ok { return "", fmt.Errorf("Invalid value. Not signed by PrivateKey corresponding to %v", pk) } // ok sig checks out. this is a valid name. return string(entry.GetValue()), nil }
func (dht *IpfsDHT) handleAddProvider(p peer.Peer, pmes *pb.Message) (*pb.Message, error) { key := u.Key(pmes.GetKey()) log.Debugf("%s adding %s as a provider for '%s'\n", dht.self, p, peer.ID(key)) // add provider should use the address given in the message for _, pb := range pmes.GetProviderPeers() { pid := peer.ID(pb.GetId()) if pid.Equal(p.ID()) { addr, err := pb.Address() if err != nil { log.Errorf("provider %s error with address %s", p, *pb.Addr) continue } log.Infof("received provider %s %s for %s", p, addr, key) p.AddAddress(addr) dht.providers.AddProvider(key, p) } else { log.Errorf("handleAddProvider received provider %s from %s", pid, p) } } return pmes, nil // send back same msg as confirmation. }
// betterPeerToQuery returns nearestPeersToQuery, but iff closer than self. func (dht *IpfsDHT) betterPeersToQuery(pmes *pb.Message, count int) []peer.Peer { closer := dht.nearestPeersToQuery(pmes, count) // no node? nil if closer == nil { return nil } // == to self? thats bad for _, p := range closer { if p.ID().Equal(dht.self.ID()) { log.Error("Attempted to return self! this shouldnt happen...") return nil } } var filtered []peer.Peer for _, p := range closer { // must all be closer than self key := u.Key(pmes.GetKey()) if !kb.Closer(dht.self.ID(), p.ID(), key) { filtered = append(filtered, p) } } // ok seems like closer nodes return filtered }
// ResolvePath fetches the node for given path. It uses the first // path component as a hash (key) of the first node, then resolves // all other components walking the links, with ResolveLinks. func (s *Resolver) ResolvePath(fpath string) (*merkledag.Node, error) { log.Debugf("Resolve: '%s'", fpath) fpath = path.Clean(fpath) parts := strings.Split(fpath, "/") // skip over empty first elem if len(parts[0]) == 0 { parts = parts[1:] } // if nothing, bail. if len(parts) == 0 { return nil, fmt.Errorf("ipfs path must contain at least one component") } // first element in the path is a b58 hash (for now) h, err := mh.FromB58String(parts[0]) if err != nil { log.Debug("given path element is not a base58 string.\n") return nil, err } log.Debug("Resolve dag get.\n") nd, err := s.DAG.Get(u.Key(h)) if err != nil { return nil, err } return s.ResolveLinks(nd, parts[1:]) }
func TestClientOverMax(t *testing.T) { rs := VirtualRoutingServer() k := u.Key("hello") numProvidersForHelloKey := 100 for i := 0; i < numProvidersForHelloKey; i++ { peer := peer.WithIDString(string(i)) err := rs.Announce(peer, k) if err != nil { t.Fatal(err) } } providersFromHashTable := rs.Providers(k) if len(providersFromHashTable) != numProvidersForHelloKey { t.Log(1 == len(providersFromHashTable)) t.Fatal("not all providers were returned") } max := 10 peer := peer.WithIDString("TODO") client := rs.Client(peer) providersFromClient := client.FindProvidersAsync(context.Background(), k, max) i := 0 for _ = range providersFromClient { i++ } if i != max { t.Fatal("Too many providers returned") } }
// GetNode returns the MDAG Node that this link points to func (l *Link) GetNode(serv DAGService) (*Node, error) { if l.Node != nil { return l.Node, nil } return serv.Get(u.Key(l.Hash)) }
func (p *peerstore) Delete(i ID) error { p.Lock() defer p.Unlock() k := u.Key(i).DsKey() return p.peers.Delete(k) }
func TestKeyNotFound(t *testing.T) { vrs := VirtualRoutingServer() empty := vrs.Providers(u.Key("not there")) if len(empty) != 0 { t.Fatal("should be empty") } }
// ID returns the ID of a given Conn. func ID(c Conn) string { l := fmt.Sprintf("%s/%s", c.LocalMultiaddr(), c.LocalPeer().ID()) r := fmt.Sprintf("%s/%s", c.RemoteMultiaddr(), c.RemotePeer().ID()) lh := u.Hash([]byte(l)) rh := u.Hash([]byte(r)) ch := u.XOR(lh, rh) return u.Key(ch).Pretty() }
// Publish implements Publisher. Accepts a keypair and a value, // and publishes it out to the routing system func (p *ipnsPublisher) Publish(k ci.PrivKey, value string) error { log.Debugf("namesys: Publish %s", value) // validate `value` is a ref (multihash) _, err := mh.FromB58String(value) if err != nil { log.Errorf("hash cast failed: %s", value) return fmt.Errorf("publish value must be str multihash. %v", err) } ctx := context.TODO() data, err := createRoutingEntryData(k, value) if err != nil { log.Error("entry creation failed.") return err } pubkey := k.GetPublic() pkbytes, err := pubkey.Bytes() if err != nil { log.Error("pubkey getbytes failed.") return err } nameb := u.Hash(pkbytes) namekey := u.Key("/pk/" + string(nameb)) log.Debugf("Storing pubkey at: %s", namekey) // Store associated public key timectx, _ := context.WithDeadline(ctx, time.Now().Add(time.Second*4)) err = p.routing.PutValue(timectx, namekey, pkbytes) if err != nil { return err } ipnskey := u.Key("/ipns/" + string(nameb)) log.Debugf("Storing ipns entry at: %s", ipnskey) // Store ipns entry at "/ipns/"+b58(h(pubkey)) timectx, _ = context.WithDeadline(ctx, time.Now().Add(time.Second*4)) err = p.routing.PutValue(timectx, ipnskey, data) if err != nil { return err } return nil }
func TestBlockReturnsErr(t *testing.T) { off := NewOfflineExchange() _, err := off.Block(context.Background(), u.Key("foo")) if err != nil { return // as desired } t.Fail() }
// nearestPeersToQuery returns the routing tables closest peers. func (dht *IpfsDHT) nearestPeersToQuery(pmes *pb.Message, count int) []peer.Peer { level := pmes.GetClusterLevel() cluster := dht.routingTables[level] key := u.Key(pmes.GetKey()) closer := cluster.NearestPeers(kb.ConvertKey(key), count) return closer }
func TestAppendWanted(t *testing.T) { const str = "foo" m := New() m.AddWanted(u.Key(str)) if !contains(m.ToProto().GetWantlist(), str) { t.Fail() } }
func TestCopyProtoByValue(t *testing.T) { const str = "foo" m := New() protoBeforeAppend := m.ToProto() m.AddWanted(u.Key(str)) if contains(protoBeforeAppend.GetWantlist(), str) { t.Fail() } }
func TestGetWhenKeyNotPresent(t *testing.T) { bs := NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore())) _, err := bs.Get(u.Key("not present")) if err != nil { t.Log("As expected, block is not present") return } t.Fail() }
// GetConnection returns the connection in the swarm to given peer.ID func (s *Swarm) GetConnection(pid peer.ID) conn.Conn { s.connsLock.RLock() c, found := s.conns[u.Key(pid)] s.connsLock.RUnlock() if !found { return nil } return c }
func newMessageFromProto(pbm pb.Message) BitSwapMessage { m := New() for _, s := range pbm.GetWantlist() { m.AddWanted(u.Key(s)) } for _, d := range pbm.GetBlocks() { b := blocks.NewBlock(d) m.AddBlock(*b) } return m }
// CloseConnection removes a given peer from swarm + closes the connection func (s *Swarm) CloseConnection(p peer.Peer) error { c := s.GetConnection(p.ID()) if c == nil { return u.ErrNotFound } s.connsLock.Lock() delete(s.conns, u.Key(p.ID())) s.connsLock.Unlock() return c.Close() }
// Store a value in this peer local storage func (dht *IpfsDHT) handlePutValue(p peer.Peer, pmes *pb.Message) (*pb.Message, error) { dht.dslock.Lock() defer dht.dslock.Unlock() dskey := u.Key(pmes.GetKey()).DsKey() err := dht.verifyRecord(pmes.GetRecord()) if err != nil { fmt.Println(u.Key(pmes.GetRecord().GetAuthor())) log.Error("Bad dht record in put request") return nil, err } data, err := proto.Marshal(pmes.GetRecord()) if err != nil { return nil, err } err = dht.datastore.Put(dskey, data) log.Debugf("%s handlePutValue %v\n", dht.self, dskey) return pmes, err }
func TestProviderManager(t *testing.T) { ctx := context.Background() mid := peer.ID("testing") p := NewProviderManager(ctx, mid) a := u.Key("test") p.AddProvider(a, peer.WithIDString("testingprovider")) resp := p.GetProviders(a) if len(resp) != 1 { t.Fatal("Could not retrieve provider.") } p.Close() }
func addRef(h mh.Multihash, refs []u.Key, refsSeen map[u.Key]bool) (bool, []u.Key) { key := u.Key(h) if refsSeen != nil { _, found := refsSeen[key] if found { return true, refs } refsSeen[key] = true } refs = append(refs, key) return false, refs }
func TestRoutingResolve(t *testing.T) { local := peer.WithIDString("testID") lds := ds.NewMapDatastore() d := mock.NewMockRouter(local, lds) resolver := NewRoutingResolver(d) publisher := NewRoutingPublisher(d) privk, pubk, err := ci.GenerateKeyPair(ci.RSA, 512) if err != nil { t.Fatal(err) } err = publisher.Publish(privk, "Hello") if err == nil { t.Fatal("should have errored out when publishing a non-multihash val") } h := u.Key(u.Hash([]byte("Hello"))).Pretty() err = publisher.Publish(privk, h) if err != nil { t.Fatal(err) } pubkb, err := pubk.Bytes() if err != nil { t.Fatal(err) } pkhash := u.Hash(pubkb) res, err := resolver.Resolve(u.Key(pkhash).Pretty()) if err != nil { t.Fatal(err) } if res != h { t.Fatal("Got back incorrect value.") } }
func (dht *IpfsDHT) verifyRecord(r *pb.Record) error { // First, validate the signature p, err := dht.peerstore.Get(peer.ID(r.GetAuthor())) if err != nil { return err } k := u.Key(r.GetKey()) blob := bytes.Join([][]byte{[]byte(k), r.GetValue(), []byte(r.GetAuthor())}, []byte{}) ok, err := p.PubKey().Verify(blob, r.GetSignature()) if err != nil { log.Error("Signature verify failed.") return err } if !ok { return ErrBadRecord } // Now, check validity func parts := strings.Split(r.GetKey(), "/") if len(parts) < 3 { log.Errorf("Record had bad key: %s", u.Key(r.GetKey())) return ErrBadRecord } fnc, ok := dht.Validators[parts[1]] if !ok { log.Errorf("Unrecognized key prefix: %s", parts[1]) return ErrInvalidRecordType } return fnc(u.Key(r.GetKey()), r.GetValue()) }
func TestQueue(t *testing.T) { p1 := newPeer("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31") p2 := newPeer("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a32") p3 := newPeer("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33") p4 := newPeer("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a34") p5 := newPeer("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31") // these are the peer.IDs' XORKeySpace Key values: // [228 47 151 130 156 102 222 232 218 31 132 94 170 208 80 253 120 103 55 35 91 237 48 157 81 245 57 247 66 150 9 40] // [26 249 85 75 54 49 25 30 21 86 117 62 85 145 48 175 155 194 210 216 58 14 241 143 28 209 129 144 122 28 163 6] // [78 135 26 216 178 181 224 181 234 117 2 248 152 115 255 103 244 34 4 152 193 88 9 225 8 127 216 158 226 8 236 246] // [125 135 124 6 226 160 101 94 192 57 39 12 18 79 121 140 190 154 147 55 44 83 101 151 63 255 94 179 51 203 241 51] pq := NewXORDistancePQ(u.Key("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31")) pq.Enqueue(p3) pq.Enqueue(p1) pq.Enqueue(p2) pq.Enqueue(p4) pq.Enqueue(p5) pq.Enqueue(p1) // should come out as: p1, p4, p3, p2 if d := pq.Dequeue(); d != p1 && d != p5 { t.Error("ordering failed") } if d := pq.Dequeue(); d != p1 && d != p5 { t.Error("ordering failed") } if d := pq.Dequeue(); d != p1 && d != p5 { t.Error("ordering failed") } if pq.Dequeue() != p4 { t.Error("ordering failed") } if pq.Dequeue() != p3 { t.Error("ordering failed") } if pq.Dequeue() != p2 { t.Error("ordering failed") } }
func TestNode(t *testing.T) { n1 := &Node{Data: []byte("beep")} n2 := &Node{Data: []byte("boop")} n3 := &Node{Data: []byte("beep boop")} if err := n3.AddNodeLink("beep-link", n1); err != nil { t.Error(err) } if err := n3.AddNodeLink("boop-link", n2); err != nil { t.Error(err) } printn := func(name string, n *Node) { fmt.Println(">", name) fmt.Println("data:", string(n.Data)) fmt.Println("links:") for _, l := range n.Links { fmt.Println("-", l.Name, l.Size, l.Hash) } e, err := n.Encoded(false) if err != nil { t.Error(err) } else { fmt.Println("encoded:", e) } h, err := n.Multihash() if err != nil { t.Error(err) } else { fmt.Println("hash:", h) } k, err := n.Key() if err != nil { t.Error(err) } else if k != u.Key(h) { t.Error("Key is not equivalent to multihash") } else { fmt.Println("key: ", k) } } printn("beep", n1) printn("boop", n2) printn("beep boop", n3) }