func TestClientFindProviders(t *testing.T) { pi := testutil.RandIdentityOrFatal(t) rs := NewServer() client := rs.Client(pi) k := key.Key("hello") err := client.Provide(context.Background(), k) if err != nil { t.Fatal(err) } // This is bad... but simulating networks is hard time.Sleep(time.Millisecond * 300) max := 100 providersFromClient := client.FindProvidersAsync(context.Background(), key.Key("hello"), max) isInClient := false for pi := range providersFromClient { if pi.ID == pi.ID { isInClient = true } } if !isInClient { t.Fatal("Despite client providing key, client didn't receive peer when finding providers") } }
func TestToNetFromNetPreservesWantList(t *testing.T) { original := New(true) original.AddEntry(key.Key("M"), 1) original.AddEntry(key.Key("B"), 1) original.AddEntry(key.Key("D"), 1) original.AddEntry(key.Key("T"), 1) original.AddEntry(key.Key("F"), 1) buf := new(bytes.Buffer) if err := original.ToNet(buf); err != nil { t.Fatal(err) } copied, err := FromNet(buf) if err != nil { t.Fatal(err) } keys := make(map[key.Key]bool) for _, k := range copied.Wantlist() { keys[k.Key] = true } for _, k := range original.Wantlist() { if _, ok := keys[k.Key]; !ok { t.Fatalf("Key Missing: \"%v\"", k) } } }
func Diff(ctx context.Context, ds dag.DAGService, a, b *dag.Node) []*Change { if len(a.Links) == 0 && len(b.Links) == 0 { ak, _ := a.Key() bk, _ := b.Key() return []*Change{ &Change{ Type: Mod, Before: ak, After: bk, }, } } var out []*Change clean_a := a.Copy() clean_b := b.Copy() // strip out unchanged stuff for _, lnk := range a.Links { l, err := b.GetNodeLink(lnk.Name) if err == nil { if bytes.Equal(l.Hash, lnk.Hash) { // no change... ignore it } else { anode, _ := lnk.GetNode(ctx, ds) bnode, _ := l.GetNode(ctx, ds) sub := Diff(ctx, ds, anode, bnode) for _, subc := range sub { subc.Path = path.Join(lnk.Name, subc.Path) out = append(out, subc) } } clean_a.RemoveNodeLink(l.Name) clean_b.RemoveNodeLink(l.Name) } } for _, lnk := range clean_a.Links { out = append(out, &Change{ Type: Remove, Path: lnk.Name, Before: key.Key(lnk.Hash), }) } for _, lnk := range clean_b.Links { out = append(out, &Change{ Type: Add, Path: lnk.Name, After: key.Key(lnk.Hash), }) } return out }
// resolveOnce implements resolver. Uses the IPFS routing system to // resolve SFS-like names. func (r *routingResolver) resolveOnce(ctx context.Context, name string) (path.Path, error) { log.Debugf("RoutingResolve: '%s'", name) hash, err := mh.FromB58String(name) if err != nil { log.Warning("RoutingResolve: bad input hash: [%s]\n", name) return "", err } // name should be a multihash. if it isn't, error out here. // use the routing system to get the name. // /ipns/<name> h := []byte("/ipns/" + string(hash)) ipnsKey := key.Key(h) val, err := r.routing.GetValue(ctx, ipnsKey) if err != nil { log.Warning("RoutingResolve get failed.") return "", err } entry := new(pb.IpnsEntry) err = proto.Unmarshal(val, entry) if err != nil { return "", err } // name should be a public key retrievable from ipfs pubkey, err := routing.GetPublicKey(r.routing, ctx, hash) if err != nil { return "", err } hsh, _ := pubkey.Hash() log.Debugf("pk hash = %s", key.Key(hsh)) // check sig with pk if ok, err := pubkey.Verify(ipnsEntryDataForSig(entry), entry.GetSignature()); err != nil || !ok { return "", fmt.Errorf("Invalid value. Not signed by PrivateKey corresponding to %v", pubkey) } // ok sig checks out. this is a valid name. // check for old style record: valh, err := mh.Cast(entry.GetValue()) if err != nil { // Not a multihash, probably a new record return path.ParsePath(string(entry.GetValue())) } else { // Its an old style multihash record log.Warning("Detected old style multihash record") return path.FromKey(key.Key(valh)), nil } }
func TestPushPop(t *testing.T) { prq := newPRQ() partner := testutil.RandPeerIDFatal(t) alphabet := strings.Split("abcdefghijklmnopqrstuvwxyz", "") vowels := strings.Split("aeiou", "") consonants := func() []string { var out []string for _, letter := range alphabet { skip := false for _, vowel := range vowels { if letter == vowel { skip = true } } if !skip { out = append(out, letter) } } return out }() sort.Strings(alphabet) sort.Strings(vowels) sort.Strings(consonants) // add a bunch of blocks. cancel some. drain the queue. the queue should only have the kept entries for _, index := range rand.Perm(len(alphabet)) { // add blocks for all letters letter := alphabet[index] t.Log(partner.String()) prq.Push(wantlist.Entry{Key: key.Key(letter), Priority: math.MaxInt32 - index}, partner) } for _, consonant := range consonants { prq.Remove(key.Key(consonant), partner) } var out []string for { received := prq.Pop() if received == nil { break } out = append(out, string(received.Entry.Key)) } // Entries popped should already be in correct order for i, expected := range vowels { if out[i] != expected { t.Fatal("received", out[i], "expected", expected) } } }
// This test checks that peers wont starve out other peers func TestPeerRepeats(t *testing.T) { prq := newPRQ() a := testutil.RandPeerIDFatal(t) b := testutil.RandPeerIDFatal(t) c := testutil.RandPeerIDFatal(t) d := testutil.RandPeerIDFatal(t) // Have each push some blocks for i := 0; i < 5; i++ { prq.Push(wantlist.Entry{Key: key.Key(i)}, a) prq.Push(wantlist.Entry{Key: key.Key(i)}, b) prq.Push(wantlist.Entry{Key: key.Key(i)}, c) prq.Push(wantlist.Entry{Key: key.Key(i)}, d) } // now, pop off four entries, there should be one from each var targets []string var tasks []*peerRequestTask for i := 0; i < 4; i++ { t := prq.Pop() targets = append(targets, t.Target.Pretty()) tasks = append(tasks, t) } expected := []string{a.Pretty(), b.Pretty(), c.Pretty(), d.Pretty()} sort.Strings(expected) sort.Strings(targets) t.Log(targets) t.Log(expected) for i, s := range targets { if expected[i] != s { t.Fatal("unexpected peer", s, expected[i]) } } // Now, if one of the tasks gets finished, the next task off the queue should // be for the same peer for blockI := 0; blockI < 4; blockI++ { for i := 0; i < 4; i++ { // its okay to mark the same task done multiple times here (JUST FOR TESTING) tasks[i].Done() ntask := prq.Pop() if ntask.Target != tasks[i].Target { t.Fatal("Expected task from peer with lowest active count") } } } }
func RunSupernodePutRecordGetRecord(conf testutil.LatencyConfig) error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() servers, clients, err := InitializeSupernodeNetwork(ctx, 2, 2, conf) if err != nil { return err } for _, n := range append(servers, clients...) { defer n.Close() } putter := clients[0] getter := clients[1] k := key.Key("key") note := []byte("a note from putter") if err := putter.Routing.PutValue(ctx, k, note); err != nil { return fmt.Errorf("failed to put value: %s", err) } received, err := getter.Routing.GetValue(ctx, k) if err != nil { return fmt.Errorf("failed to get value: %s", err) } if 0 != bytes.Compare(note, received) { return errors.New("record doesn't match") } return nil }
func TestRoutingResolve(t *testing.T) { d := mockrouting.NewServer().Client(testutil.RandIdentityOrFatal(t)) resolver := NewRoutingResolver(d) publisher := NewRoutingPublisher(d) privk, pubk, err := testutil.RandTestKeyPair(512) if err != nil { t.Fatal(err) } h := path.FromString("/ipfs/QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN") err = publisher.Publish(context.Background(), privk, h) if err != nil { t.Fatal(err) } pubkb, err := pubk.Bytes() if err != nil { t.Fatal(err) } pkhash := u.Hash(pubkb) res, err := resolver.Resolve(context.Background(), key.Key(pkhash).Pretty()) if err != nil { t.Fatal(err) } if res != h { t.Fatal("Got back incorrect value.") } }
// NewFilesystem instantiates an ipns filesystem using the given parameters and locally owned keys func NewFilesystem(ctx context.Context, ds dag.DAGService, nsys namesys.NameSystem, pins pin.Pinner, keys ...ci.PrivKey) (*Filesystem, error) { roots := make(map[string]*KeyRoot) fs := &Filesystem{ ctx: ctx, roots: roots, nsys: nsys, dserv: ds, pins: pins, resolver: &path.Resolver{DAG: ds}, } for _, k := range keys { pkh, err := k.GetPublic().Hash() if err != nil { return nil, err } root, err := fs.newKeyRoot(ctx, k) if err != nil { return nil, err } roots[key.Key(pkh).Pretty()] = root } return fs, nil }
// newKeyRoot creates a new KeyRoot for the given key, and starts up a republisher routine // for it func (fs *Filesystem) newKeyRoot(parent context.Context, k ci.PrivKey) (*KeyRoot, error) { hash, err := k.GetPublic().Hash() if err != nil { return nil, err } name := "/ipns/" + key.Key(hash).String() root := new(KeyRoot) root.key = k root.fs = fs root.name = name ctx, cancel := context.WithCancel(parent) defer cancel() pointsTo, err := fs.nsys.Resolve(ctx, name) if err != nil { err = namesys.InitializeKeyspace(ctx, fs.dserv, fs.nsys, fs.pins, k) if err != nil { return nil, err } pointsTo, err = fs.nsys.Resolve(ctx, name) if err != nil { return nil, err } } mnode, err := fs.resolver.ResolvePath(ctx, pointsTo) if err != nil { log.Errorf("Failed to retrieve value '%s' for ipns entry: %s\n", pointsTo, err) return nil, err } root.node = mnode root.repub = NewRepublisher(root, time.Millisecond*300, time.Second*3) go root.repub.Run(parent) pbn, err := ft.FromBytes(mnode.Data) if err != nil { log.Error("IPNS pointer was not unixfs node") return nil, err } switch pbn.GetType() { case ft.TDirectory: root.val = NewDirectory(ctx, pointsTo.String(), mnode, root, fs) case ft.TFile, ft.TMetadata, ft.TRaw: fi, err := NewFile(pointsTo.String(), mnode, root, fs) if err != nil { return nil, err } root.val = fi default: panic("unrecognized! (NYI)") } return root, nil }
// betterPeerToQuery returns nearestPeersToQuery, but iff closer than self. func (dht *IpfsDHT) betterPeersToQuery(pmes *pb.Message, p peer.ID, count int) []peer.ID { closer := dht.nearestPeersToQuery(pmes, count) // no node? nil if closer == nil { return nil } // == to self? thats bad for _, p := range closer { if p == dht.self { log.Debug("Attempted to return self! this shouldnt happen...") return nil } } var filtered []peer.ID for _, clp := range closer { // Dont send a peer back themselves if p == clp { continue } // must all be closer than self key := key.Key(pmes.GetKey()) if !kb.Closer(dht.self, clp, key) { filtered = append(filtered, clp) } } // ok seems like closer nodes return filtered }
// putProvider sends a message to peer 'p' saying that the local node // can provide the value of 'key' func (dht *IpfsDHT) putProvider(ctx context.Context, p peer.ID, skey string) error { // add self as the provider pi := peer.PeerInfo{ ID: dht.self, Addrs: dht.host.Addrs(), } // // only share WAN-friendly addresses ?? // pi.Addrs = addrutil.WANShareableAddrs(pi.Addrs) if len(pi.Addrs) < 1 { // log.Infof("%s putProvider: %s for %s error: no wan-friendly addresses", dht.self, p, key.Key(key), pi.Addrs) return fmt.Errorf("no known addresses for self. cannot put provider.") } pmes := pb.NewMessage(pb.Message_ADD_PROVIDER, skey, 0) pmes.ProviderPeers = pb.RawPeerInfosToPBPeers([]peer.PeerInfo{pi}) err := dht.sendMessage(ctx, p, pmes) if err != nil { return err } log.Debugf("%s putProvider: %s for %s (%s)", dht.self, p, key.Key(skey), pi.Addrs) return nil }
// VerifyRecord checks a record and ensures it is still valid. // It runs needed validators func (v Validator) VerifyRecord(r *pb.Record) error { // Now, check validity func parts := strings.Split(r.GetKey(), "/") if len(parts) < 3 { log.Infof("Record key does not have validator: %s", key.Key(r.GetKey())) return nil } val, ok := v[parts[1]] if !ok { log.Infof("Unrecognized key prefix: %s", parts[1]) return ErrInvalidRecordType } return val.Func(key.Key(r.GetKey()), r.GetValue()) }
func (rw *RefWriter) writeRefsSingle(n *dag.Node) (int, error) { nkey, err := n.Key() if err != nil { return 0, err } if rw.skip(nkey) { return 0, nil } count := 0 for _, l := range n.Links { lk := key.Key(l.Hash) if rw.skip(lk) { continue } if err := rw.WriteEdge(nkey, lk, l.Name); err != nil { return count, err } count++ } return count, nil }
func (rw *RefWriter) writeRefsRecursive(n *dag.Node) (int, error) { nkey, err := n.Key() if err != nil { return 0, err } var count int for i, ng := range rw.DAG.GetDAG(rw.Ctx, n) { lk := key.Key(n.Links[i].Hash) if rw.skip(lk) { continue } if err := rw.WriteEdge(nkey, lk, n.Links[i].Name); err != nil { return count, err } nd, err := ng.Get(rw.Ctx) if err != nil { return count, err } c, err := rw.writeRefsRecursive(nd) count += c if err != nil { return count, err } } return count, nil }
func TestValidAfter(t *testing.T) { pi := testutil.RandIdentityOrFatal(t) var key = key.Key("mock key") var ctx = context.Background() conf := DelayConfig{ ValueVisibility: delay.Fixed(1 * time.Hour), Query: delay.Fixed(0), } rs := NewServerWithDelay(conf) rs.Client(pi).Provide(ctx, key) var providers []peer.PeerInfo providers, err := rs.Client(pi).FindProviders(ctx, key) if err != nil { t.Fatal(err) } if len(providers) > 0 { t.Fail() } conf.ValueVisibility.Set(0) providers, err = rs.Client(pi).FindProviders(ctx, key) if err != nil { t.Fatal(err) } t.Log("providers", providers) if len(providers) != 1 { t.Fail() } }
func defaultRepo(dstore ds.ThreadSafeDatastore) (repo.Repo, error) { c := cfg.Config{} priv, pub, err := ci.GenerateKeyPairWithReader(ci.RSA, 1024, rand.Reader) if err != nil { return nil, err } data, err := pub.Hash() if err != nil { return nil, err } privkeyb, err := priv.Bytes() if err != nil { return nil, err } c.Bootstrap = cfg.DefaultBootstrapAddresses c.Addresses.Swarm = []string{"/ip4/0.0.0.0/tcp/4001"} c.Identity.PeerID = key.Key(data).B58String() c.Identity.PrivKey = base64.StdEncoding.EncodeToString(privkeyb) return &repo.Mock{ D: dstore, C: c, }, nil }
// GetNode returns the MDAG Node that this link points to func (l *Link) GetNode(ctx context.Context, serv DAGService) (*Node, error) { if l.Node != nil { return l.Node, nil } return serv.Get(ctx, key.Key(l.Hash)) }
func (dht *IpfsDHT) handleAddProvider(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { lm := make(lgbl.DeferredMap) lm["peer"] = func() interface{} { return p.Pretty() } defer log.EventBegin(ctx, "handleAddProvider", lm).Done() key := key.Key(pmes.GetKey()) lm["key"] = func() interface{} { return key.Pretty() } log.Debugf("%s adding %s as a provider for '%s'\n", dht.self, p, key) // add provider should use the address given in the message pinfos := pb.PBPeersToPeerInfos(pmes.GetProviderPeers()) for _, pi := range pinfos { if pi.ID != p { // we should ignore this provider reccord! not from originator. // (we chould sign them and check signature later...) log.Debugf("handleAddProvider received provider %s from %s. Ignore.", pi.ID, p) continue } if len(pi.Addrs) < 1 { log.Debugf("%s got no valid addresses for provider %s. Ignore.", dht.self, p) continue } log.Infof("received provider %s for %s (addrs: %s)", p, key, pi.Addrs) if pi.ID != dht.self { // dont add own addrs. // add the received addresses to our peerstore. dht.peerstore.AddAddrs(pi.ID, pi.Addrs, peer.ProviderAddrTTL) } dht.providers.AddProvider(ctx, key, p) } return nil, nil }
func TestClientOverMax(t *testing.T) { rs := NewServer() k := key.Key("hello") numProvidersForHelloKey := 100 for i := 0; i < numProvidersForHelloKey; i++ { pi := testutil.RandIdentityOrFatal(t) err := rs.Client(pi).Provide(context.Background(), k) if err != nil { t.Fatal(err) } } max := 10 pi := testutil.RandIdentityOrFatal(t) client := rs.Client(pi) providersFromClient := client.FindProvidersAsync(context.Background(), k, max) i := 0 for _ = range providersFromClient { i++ } if i != max { t.Fatal("Too many providers returned") } }
// Loggable turns a Message into machine-readable log output func (m *Message) Loggable() map[string]interface{} { return map[string]interface{}{ "message": map[string]string{ "type": m.Type.String(), "key": key.Key(m.GetKey()).Pretty(), }, } }
// GetDAG will fill out all of the links of the given Node. // It returns a channel of nodes, which the caller can receive // all the child nodes of 'root' on, in proper order. func (ds *dagService) GetDAG(ctx context.Context, root *Node) []NodeGetter { var keys []key.Key for _, lnk := range root.Links { keys = append(keys, key.Key(lnk.Hash)) } return ds.GetNodes(ctx, keys) }
// Removes the child node at the given index func (n *UnixfsNode) RemoveChild(index int, dbh *DagBuilderHelper) { k := key.Key(n.node.Links[index].Hash) if dbh.mp != nil { dbh.mp.RemovePinWithMode(k, pin.Indirect) } n.ufmt.RemoveBlockSize(index) n.node.Links = append(n.node.Links[:index], n.node.Links[index+1:]...) }
func TestBlockReturnsErr(t *testing.T) { off := Exchange(bstore()) _, err := off.GetBlock(context.Background(), key.Key("foo")) if err != nil { return // as desired } t.Fail() }
func init() { testCaseValues["hello"] = []byte("world") for i := 0; i < 100; i++ { k := fmt.Sprintf("%d -- key", i) v := fmt.Sprintf("%d -- value", i) testCaseValues[key.Key(k)] = []byte(v) } }
func TestCopyProtoByValue(t *testing.T) { const str = "foo" m := New(true) protoBeforeAppend := m.ToProto() m.AddEntry(key.Key(str), 1) if wantlistContains(protoBeforeAppend.GetWantlist(), str) { t.Fail() } }
// Store a value in this peer local storage func (dht *IpfsDHT) handlePutValue(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { defer log.EventBegin(ctx, "handlePutValue", p).Done() dskey := key.Key(pmes.GetKey()).DsKey() if err := dht.verifyRecordLocally(pmes.GetRecord()); err != nil { log.Debugf("Bad dht record in PUT from: %s. %s", key.Key(pmes.GetRecord().GetAuthor()), err) return nil, err } data, err := proto.Marshal(pmes.GetRecord()) if err != nil { return nil, err } err = dht.datastore.Put(dskey, data) log.Debugf("%s handlePutValue %v", dht.self, dskey) return pmes, err }
// TODO does dht ensure won't receive self as a provider? probably not. func TestCanceledContext(t *testing.T) { rs := NewServer() k := key.Key("hello") // avoid leaking goroutine, without using the context to signal // (we want the goroutine to keep trying to publish on a // cancelled context until we've tested it doesnt do anything.) done := make(chan struct{}) defer func() { done <- struct{}{} }() t.Log("async'ly announce infinite stream of providers for key") i := 0 go func() { // infinite stream for { select { case <-done: t.Log("exiting async worker") return default: } pi, err := testutil.RandIdentity() if err != nil { t.Error(err) } err = rs.Client(pi).Provide(context.Background(), k) if err != nil { t.Error(err) } i++ } }() local := testutil.RandIdentityOrFatal(t) client := rs.Client(local) t.Log("warning: max is finite so this test is non-deterministic") t.Log("context cancellation could simply take lower priority") t.Log("and result in receiving the max number of results") max := 1000 t.Log("cancel the context before consuming") ctx, cancelFunc := context.WithCancel(context.Background()) cancelFunc() providers := client.FindProvidersAsync(ctx, k, max) numProvidersReturned := 0 for _ = range providers { numProvidersReturned++ } t.Log(numProvidersReturned) if numProvidersReturned == max { t.Fatal("Context cancel had no effect") } }
func TestGetWhenKeyNotPresent(t *testing.T) { bs := NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore())) _, err := bs.Get(key.Key("not present")) if err != nil { t.Log("As expected, block is not present") return } t.Fail() }
func TestAppendWanted(t *testing.T) { const str = "foo" m := New(true) m.AddEntry(key.Key(str), 1) if !wantlistContains(m.ToProto().GetWantlist(), str) { t.Fail() } m.ToProto().GetWantlist().GetEntries() }