Example #1
0
func walkItems(ctx context.Context, dag merkledag.DAGService, n *merkledag.Node, fn walkerFunc, children keyObserver) error {
	hdr, buf, err := readHdr(n)
	if err != nil {
		return err
	}
	// readHdr guarantees fanout is a safe value
	fanout := hdr.GetFanout()
	for i, l := range n.Links[fanout:] {
		if err := fn(buf, i, l); err != nil {
			return err
		}
	}
	for _, l := range n.Links[:fanout] {
		children(key.Key(l.Hash))
		if key.Key(l.Hash) == emptyKey {
			continue
		}
		subtree, err := l.GetNode(ctx, dag)
		if err != nil {
			return err
		}
		if err := walkItems(ctx, dag, subtree, fn, children); err != nil {
			return err
		}
	}
	return nil
}
Example #2
0
// ResolveToKey resolves a path to a key.
//
// It first checks if the path is already in the form of just a key (<key> or
// /ipfs/<key>) and returns immediately if so. Otherwise, it falls back onto
// Resolve to perform resolution of the dagnode being referenced.
func ResolveToKey(ctx context.Context, n *IpfsNode, p path.Path) (key.Key, error) {

	// If the path is simply a key, parse and return it. Parsed paths are already
	// normalized (read: prepended with /ipfs/ if needed), so segment[1] should
	// always be the key.
	if p.IsJustAKey() {
		return key.B58KeyDecode(p.Segments()[1]), nil
	}

	// Fall back onto regular dagnode resolution. Retrieve the second-to-last
	// segment of the path and resolve its link to the last segment.
	head, tail, err := p.PopLastSegment()
	if err != nil {
		return key.Key(""), err
	}
	dagnode, err := Resolve(ctx, n, head)
	if err != nil {
		return key.Key(""), err
	}

	// Extract and return the key of the link to the target dag node.
	link, err := dagnode.GetNodeLink(tail)
	if err != nil {
		return key.Key(""), err
	}

	return key.Key(link.Hash), nil
}
Example #3
0
func TestClientFindProviders(t *testing.T) {
	pi := testutil.RandIdentityOrFatal(t)
	rs := NewServer()
	client := rs.Client(pi)

	k := key.Key("hello")
	err := client.Provide(context.Background(), k)
	if err != nil {
		t.Fatal(err)
	}

	// This is bad... but simulating networks is hard
	time.Sleep(time.Millisecond * 300)
	max := 100

	providersFromClient := client.FindProvidersAsync(context.Background(), key.Key("hello"), max)
	isInClient := false
	for pi := range providersFromClient {
		if pi.ID == pi.ID {
			isInClient = true
		}
	}
	if !isInClient {
		t.Fatal("Despite client providing key, client didn't receive peer when finding providers")
	}
}
Example #4
0
func TestToNetFromNetPreservesWantList(t *testing.T) {
	original := New(true)
	original.AddEntry(key.Key("M"), 1)
	original.AddEntry(key.Key("B"), 1)
	original.AddEntry(key.Key("D"), 1)
	original.AddEntry(key.Key("T"), 1)
	original.AddEntry(key.Key("F"), 1)

	buf := new(bytes.Buffer)
	if err := original.ToNet(buf); err != nil {
		t.Fatal(err)
	}

	copied, err := FromNet(buf)
	if err != nil {
		t.Fatal(err)
	}

	keys := make(map[key.Key]bool)
	for _, k := range copied.Wantlist() {
		keys[k.Key] = true
	}

	for _, k := range original.Wantlist() {
		if _, ok := keys[k.Key]; !ok {
			t.Fatalf("Key Missing: \"%v\"", k)
		}
	}
}
Example #5
0
func Diff(ctx context.Context, ds dag.DAGService, a, b *dag.Node) []*Change {
	if len(a.Links) == 0 && len(b.Links) == 0 {
		ak, _ := a.Key()
		bk, _ := b.Key()
		return []*Change{
			&Change{
				Type:   Mod,
				Before: ak,
				After:  bk,
			},
		}
	}

	var out []*Change
	clean_a := a.Copy()
	clean_b := b.Copy()

	// strip out unchanged stuff
	for _, lnk := range a.Links {
		l, err := b.GetNodeLink(lnk.Name)
		if err == nil {
			if bytes.Equal(l.Hash, lnk.Hash) {
				// no change... ignore it
			} else {
				anode, _ := lnk.GetNode(ctx, ds)
				bnode, _ := l.GetNode(ctx, ds)
				sub := Diff(ctx, ds, anode, bnode)

				for _, subc := range sub {
					subc.Path = path.Join(lnk.Name, subc.Path)
					out = append(out, subc)
				}
			}
			clean_a.RemoveNodeLink(l.Name)
			clean_b.RemoveNodeLink(l.Name)
		}
	}

	for _, lnk := range clean_a.Links {
		out = append(out, &Change{
			Type:   Remove,
			Path:   lnk.Name,
			Before: key.Key(lnk.Hash),
		})
	}
	for _, lnk := range clean_b.Links {
		out = append(out, &Change{
			Type:  Add,
			Path:  lnk.Name,
			After: key.Key(lnk.Hash),
		})
	}

	return out
}
Example #6
0
// resolveOnce implements resolver. Uses the IPFS routing system to
// resolve SFS-like names.
func (r *routingResolver) resolveOnce(ctx context.Context, name string) (path.Path, error) {
	log.Debugf("RoutingResolve: '%s'", name)
	hash, err := mh.FromB58String(name)
	if err != nil {
		log.Warning("RoutingResolve: bad input hash: [%s]\n", name)
		return "", err
	}
	// name should be a multihash. if it isn't, error out here.

	// use the routing system to get the name.
	// /ipns/<name>
	h := []byte("/ipns/" + string(hash))

	ipnsKey := key.Key(h)
	val, err := r.routing.GetValue(ctx, ipnsKey)
	if err != nil {
		log.Warning("RoutingResolve get failed.")
		return "", err
	}

	entry := new(pb.IpnsEntry)
	err = proto.Unmarshal(val, entry)
	if err != nil {
		return "", err
	}

	// name should be a public key retrievable from ipfs
	pubkey, err := routing.GetPublicKey(r.routing, ctx, hash)
	if err != nil {
		return "", err
	}

	hsh, _ := pubkey.Hash()
	log.Debugf("pk hash = %s", key.Key(hsh))

	// check sig with pk
	if ok, err := pubkey.Verify(ipnsEntryDataForSig(entry), entry.GetSignature()); err != nil || !ok {
		return "", fmt.Errorf("Invalid value. Not signed by PrivateKey corresponding to %v", pubkey)
	}

	// ok sig checks out. this is a valid name.

	// check for old style record:
	valh, err := mh.Cast(entry.GetValue())
	if err != nil {
		// Not a multihash, probably a new record
		return path.ParsePath(string(entry.GetValue()))
	} else {
		// Its an old style multihash record
		log.Warning("Detected old style multihash record")
		return path.FromKey(key.Key(valh)), nil
	}
}
Example #7
0
func TestPushPop(t *testing.T) {
	prq := newPRQ()
	partner := testutil.RandPeerIDFatal(t)
	alphabet := strings.Split("abcdefghijklmnopqrstuvwxyz", "")
	vowels := strings.Split("aeiou", "")
	consonants := func() []string {
		var out []string
		for _, letter := range alphabet {
			skip := false
			for _, vowel := range vowels {
				if letter == vowel {
					skip = true
				}
			}
			if !skip {
				out = append(out, letter)
			}
		}
		return out
	}()
	sort.Strings(alphabet)
	sort.Strings(vowels)
	sort.Strings(consonants)

	// add a bunch of blocks. cancel some. drain the queue. the queue should only have the kept entries

	for _, index := range rand.Perm(len(alphabet)) { // add blocks for all letters
		letter := alphabet[index]
		t.Log(partner.String())
		prq.Push(wantlist.Entry{Key: key.Key(letter), Priority: math.MaxInt32 - index}, partner)
	}
	for _, consonant := range consonants {
		prq.Remove(key.Key(consonant), partner)
	}

	var out []string
	for {
		received := prq.Pop()
		if received == nil {
			break
		}

		out = append(out, string(received.Entry.Key))
	}

	// Entries popped should already be in correct order
	for i, expected := range vowels {
		if out[i] != expected {
			t.Fatal("received", out[i], "expected", expected)
		}
	}
}
Example #8
0
// This test checks that peers wont starve out other peers
func TestPeerRepeats(t *testing.T) {
	prq := newPRQ()
	a := testutil.RandPeerIDFatal(t)
	b := testutil.RandPeerIDFatal(t)
	c := testutil.RandPeerIDFatal(t)
	d := testutil.RandPeerIDFatal(t)

	// Have each push some blocks

	for i := 0; i < 5; i++ {
		prq.Push(wantlist.Entry{Key: key.Key(i)}, a)
		prq.Push(wantlist.Entry{Key: key.Key(i)}, b)
		prq.Push(wantlist.Entry{Key: key.Key(i)}, c)
		prq.Push(wantlist.Entry{Key: key.Key(i)}, d)
	}

	// now, pop off four entries, there should be one from each
	var targets []string
	var tasks []*peerRequestTask
	for i := 0; i < 4; i++ {
		t := prq.Pop()
		targets = append(targets, t.Target.Pretty())
		tasks = append(tasks, t)
	}

	expected := []string{a.Pretty(), b.Pretty(), c.Pretty(), d.Pretty()}
	sort.Strings(expected)
	sort.Strings(targets)

	t.Log(targets)
	t.Log(expected)
	for i, s := range targets {
		if expected[i] != s {
			t.Fatal("unexpected peer", s, expected[i])
		}
	}

	// Now, if one of the tasks gets finished, the next task off the queue should
	// be for the same peer
	for blockI := 0; blockI < 4; blockI++ {
		for i := 0; i < 4; i++ {
			// its okay to mark the same task done multiple times here (JUST FOR TESTING)
			tasks[i].Done()

			ntask := prq.Pop()
			if ntask.Target != tasks[i].Target {
				t.Fatal("Expected task from peer with lowest active count")
			}
		}
	}
}
Example #9
0
File: peer.go Project: utamaro/core
//ReadRecent reads recent key of Peer p.
func (p *Peer) ReadRecent() (key.Key, error) {
	var recent NodeProto
	if err := p.readDAG(config.RecentPath, &recent); log.If(err) {
		return "", err
	}
	return key.Key(recent.Id), nil
}
Example #10
0
func TestClientOverMax(t *testing.T) {
	rs := NewServer()
	k := key.Key("hello")
	numProvidersForHelloKey := 100
	for i := 0; i < numProvidersForHelloKey; i++ {
		pi := testutil.RandIdentityOrFatal(t)
		err := rs.Client(pi).Provide(context.Background(), k)
		if err != nil {
			t.Fatal(err)
		}
	}

	max := 10
	pi := testutil.RandIdentityOrFatal(t)
	client := rs.Client(pi)

	providersFromClient := client.FindProvidersAsync(context.Background(), k, max)
	i := 0
	for _ = range providersFromClient {
		i++
	}
	if i != max {
		t.Fatal("Too many providers returned")
	}
}
Example #11
0
func TestValidAfter(t *testing.T) {

	pi := testutil.RandIdentityOrFatal(t)
	var key = key.Key("mock key")
	var ctx = context.Background()
	conf := DelayConfig{
		ValueVisibility: delay.Fixed(1 * time.Hour),
		Query:           delay.Fixed(0),
	}

	rs := NewServerWithDelay(conf)

	rs.Client(pi).Provide(ctx, key)

	var providers []peer.PeerInfo
	providers, err := rs.Client(pi).FindProviders(ctx, key)
	if err != nil {
		t.Fatal(err)
	}
	if len(providers) > 0 {
		t.Fail()
	}

	conf.ValueVisibility.Set(0)
	providers, err = rs.Client(pi).FindProviders(ctx, key)
	if err != nil {
		t.Fatal(err)
	}
	t.Log("providers", providers)
	if len(providers) != 1 {
		t.Fail()
	}
}
Example #12
0
func TestRoutingResolve(t *testing.T) {
	d := mockrouting.NewServer().Client(testutil.RandIdentityOrFatal(t))
	dstore := ds.NewMapDatastore()

	resolver := NewRoutingResolver(d, 0)
	publisher := NewRoutingPublisher(d, dstore)

	privk, pubk, err := testutil.RandTestKeyPair(512)
	if err != nil {
		t.Fatal(err)
	}

	h := path.FromString("/ipfs/QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN")
	err = publisher.Publish(context.Background(), privk, h)
	if err != nil {
		t.Fatal(err)
	}

	pubkb, err := pubk.Bytes()
	if err != nil {
		t.Fatal(err)
	}

	pkhash := u.Hash(pubkb)
	res, err := resolver.Resolve(context.Background(), key.Key(pkhash).Pretty())
	if err != nil {
		t.Fatal(err)
	}

	if res != h {
		t.Fatal("Got back incorrect value.")
	}
}
Example #13
0
func TestEnumerateChildren(t *testing.T) {
	bsi := bstest.Mocks(1)
	ds := NewDAGService(bsi[0])

	read := io.LimitReader(u.NewTimeSeededRand(), 1024*1024)
	root, err := imp.BuildDagFromReader(ds, chunk.NewSizeSplitter(read, 512))
	if err != nil {
		t.Fatal(err)
	}

	ks := key.NewKeySet()
	err = EnumerateChildren(context.Background(), ds, root, ks, false)
	if err != nil {
		t.Fatal(err)
	}

	var traverse func(n *Node)
	traverse = func(n *Node) {
		// traverse dag and check
		for _, lnk := range n.Links {
			k := key.Key(lnk.Hash)
			if !ks.Has(k) {
				t.Fatal("missing key in set!")
			}
			child, err := ds.Get(context.Background(), k)
			if err != nil {
				t.Fatal(err)
			}
			traverse(child)
		}
	}

	traverse(root)
}
Example #14
0
func TestEmptyKey(t *testing.T) {
	ds := dstest.Mock()
	_, err := ds.Get(context.Background(), key.Key(""))
	if err != ErrNotFound {
		t.Error("dag service should error when key is nil", err)
	}
}
Example #15
0
func publish(ctx context.Context, n *core.IpfsNode, k crypto.PrivKey, ref path.Path, opts *publishOpts) (*IpnsEntry, error) {

	if opts.verifyExists {
		// verify the path exists
		_, err := core.Resolve(ctx, n, ref)
		if err != nil {
			return nil, err
		}
	}

	eol := time.Now().Add(opts.pubValidTime)
	err := n.Namesys.PublishWithEOL(ctx, k, ref, eol)
	if err != nil {
		return nil, err
	}

	hash, err := k.GetPublic().Hash()
	if err != nil {
		return nil, err
	}

	return &IpnsEntry{
		Name:  key.Key(hash).String(),
		Value: ref.String(),
	}, nil
}
Example #16
0
// betterPeerToQuery returns nearestPeersToQuery, but iff closer than self.
func (dht *IpfsDHT) betterPeersToQuery(pmes *pb.Message, p peer.ID, count int) []peer.ID {
	closer := dht.nearestPeersToQuery(pmes, count)

	// no node? nil
	if closer == nil {
		return nil
	}

	// == to self? thats bad
	for _, p := range closer {
		if p == dht.self {
			log.Debug("Attempted to return self! this shouldnt happen...")
			return nil
		}
	}

	var filtered []peer.ID
	for _, clp := range closer {
		// Dont send a peer back themselves
		if p == clp {
			continue
		}

		// must all be closer than self
		key := key.Key(pmes.GetKey())
		if !kb.Closer(dht.self, clp, key) {
			filtered = append(filtered, clp)
		}
	}

	// ok seems like closer nodes
	return filtered
}
Example #17
0
func RunSupernodePutRecordGetRecord(conf testutil.LatencyConfig) error {
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()

	servers, clients, err := InitializeSupernodeNetwork(ctx, 2, 2, conf)
	if err != nil {
		return err
	}
	for _, n := range append(servers, clients...) {
		defer n.Close()
	}

	putter := clients[0]
	getter := clients[1]

	k := key.Key("key")
	note := []byte("a note from putter")

	if err := putter.Routing.PutValue(ctx, k, note); err != nil {
		return fmt.Errorf("failed to put value: %s", err)
	}

	received, err := getter.Routing.GetValue(ctx, k)
	if err != nil {
		return fmt.Errorf("failed to get value: %s", err)
	}

	if 0 != bytes.Compare(note, received) {
		return errors.New("record doesn't match")
	}
	cancel()
	return nil
}
Example #18
0
// VerifyRecord checks a record and ensures it is still valid.
// It runs needed validators
func (v Validator) VerifyRecord(r *pb.Record) error {
	// Now, check validity func
	parts := path.SplitList(r.GetKey())
	if len(parts) < 3 {
		log.Infof("Record key does not have validator: %s", key.Key(r.GetKey()))
		return nil
	}

	val, ok := v[parts[1]]
	if !ok {
		log.Infof("Unrecognized key prefix: %s", parts[1])
		return ErrInvalidRecordType
	}

	return val.Func(key.Key(r.GetKey()), r.GetValue())
}
Example #19
0
// putProvider sends a message to peer 'p' saying that the local node
// can provide the value of 'key'
func (dht *IpfsDHT) putProvider(ctx context.Context, p peer.ID, skey string) error {

	// add self as the provider
	pi := peer.PeerInfo{
		ID:    dht.self,
		Addrs: dht.host.Addrs(),
	}

	// // only share WAN-friendly addresses ??
	// pi.Addrs = addrutil.WANShareableAddrs(pi.Addrs)
	if len(pi.Addrs) < 1 {
		// log.Infof("%s putProvider: %s for %s error: no wan-friendly addresses", dht.self, p, key.Key(key), pi.Addrs)
		return fmt.Errorf("no known addresses for self. cannot put provider.")
	}

	pmes := pb.NewMessage(pb.Message_ADD_PROVIDER, skey, 0)
	pmes.ProviderPeers = pb.RawPeerInfosToPBPeers([]peer.PeerInfo{pi})
	err := dht.sendMessage(ctx, p, pmes)
	if err != nil {
		return err
	}

	log.Debugf("%s putProvider: %s for %s (%s)", dht.self, p, key.Key(skey), pi.Addrs)
	return nil
}
Example #20
0
func defaultRepo(dstore repo.Datastore) (repo.Repo, error) {
	c := cfg.Config{}
	priv, pub, err := ci.GenerateKeyPairWithReader(ci.RSA, 1024, rand.Reader)
	if err != nil {
		return nil, err
	}

	data, err := pub.Hash()
	if err != nil {
		return nil, err
	}

	privkeyb, err := priv.Bytes()
	if err != nil {
		return nil, err
	}

	c.Bootstrap = cfg.DefaultBootstrapAddresses
	c.Addresses.Swarm = []string{"/ip4/0.0.0.0/tcp/4001"}
	c.Identity.PeerID = key.Key(data).B58String()
	c.Identity.PrivKey = base64.StdEncoding.EncodeToString(privkeyb)

	return &repo.Mock{
		D: dstore,
		C: c,
	}, nil
}
Example #21
0
// GetNode returns the MDAG Node that this link points to
func (l *Link) GetNode(ctx context.Context, serv DAGService) (*Node, error) {
	if l.Node != nil {
		return l.Node, nil
	}

	return serv.Get(ctx, key.Key(l.Hash))
}
Example #22
0
func TestGetWhenKeyIsEmptyString(t *testing.T) {
	bs := NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore()))
	_, err := bs.Get(key.Key(""))
	if err != ErrNotFound {
		t.Fail()
	}
}
Example #23
0
func TestProvidersDatastore(t *testing.T) {
	old := lruCacheSize
	lruCacheSize = 10
	defer func() { lruCacheSize = old }()

	ctx := context.Background()
	mid := peer.ID("testing")
	p := NewProviderManager(ctx, mid, ds.NewMapDatastore())
	defer p.proc.Close()

	friend := peer.ID("friend")
	var keys []key.Key
	for i := 0; i < 100; i++ {
		k := key.Key(fmt.Sprint(i))
		keys = append(keys, k)
		p.AddProvider(ctx, k, friend)
	}

	for _, k := range keys {
		resp := p.GetProviders(ctx, k)
		if len(resp) != 1 {
			t.Fatal("Could not retrieve provider.")
		}
		if resp[0] != friend {
			t.Fatal("expected provider to be 'friend'")
		}
	}
}
Example #24
0
func (rw *RefWriter) writeRefsSingle(n *dag.Node) (int, error) {
	nkey, err := n.Key()
	if err != nil {
		return 0, err
	}

	if rw.skip(nkey) {
		return 0, nil
	}

	count := 0
	for _, l := range n.Links {
		lk := key.Key(l.Hash)

		if rw.skip(lk) {
			continue
		}

		if err := rw.WriteEdge(nkey, lk, l.Name); err != nil {
			return count, err
		}
		count++
	}
	return count, nil
}
Example #25
0
func (n *IpfsNode) loadFilesRoot() error {
	dsk := ds.NewKey("/local/filesroot")
	pf := func(ctx context.Context, k key.Key) error {
		return n.Repo.Datastore().Put(dsk, []byte(k))
	}

	var nd *merkledag.Node
	val, err := n.Repo.Datastore().Get(dsk)

	switch {
	case err == ds.ErrNotFound || val == nil:
		nd = uio.NewEmptyDirectory()
		_, err := n.DAG.Add(nd)
		if err != nil {
			return fmt.Errorf("failure writing to dagstore: %s", err)
		}
	case err == nil:
		k := key.Key(val.([]byte))
		nd, err = n.DAG.Get(n.Context(), k)
		if err != nil {
			return fmt.Errorf("error loading filesroot from DAG: %s", err)
		}
	default:
		return err
	}

	mr, err := mfs.NewRoot(n.Context(), n.DAG, nd, pf)
	if err != nil {
		return err
	}

	n.FilesRoot = mr
	return nil
}
Example #26
0
func (rw *RefWriter) writeRefsRecursive(n *dag.Node) (int, error) {
	nkey, err := n.Key()
	if err != nil {
		return 0, err
	}

	var count int
	for i, ng := range rw.DAG.GetDAG(rw.Ctx, n) {
		lk := key.Key(n.Links[i].Hash)
		if rw.skip(lk) {
			continue
		}

		if err := rw.WriteEdge(nkey, lk, n.Links[i].Name); err != nil {
			return count, err
		}

		nd, err := ng.Get(rw.Ctx)
		if err != nil {
			return count, err
		}

		c, err := rw.writeRefsRecursive(nd)
		count += c
		if err != nil {
			return count, err
		}
	}
	return count, nil
}
Example #27
0
// NewFilesystem instantiates an ipns filesystem using the given parameters and locally owned keys
func NewFilesystem(ctx context.Context, ds dag.DAGService, nsys namesys.NameSystem, pins pin.Pinner, keys ...ci.PrivKey) (*Filesystem, error) {
	roots := make(map[string]*KeyRoot)
	fs := &Filesystem{
		ctx:      ctx,
		roots:    roots,
		nsys:     nsys,
		dserv:    ds,
		pins:     pins,
		resolver: &path.Resolver{DAG: ds},
	}
	for _, k := range keys {
		pkh, err := k.GetPublic().Hash()
		if err != nil {
			return nil, err
		}

		root, err := fs.newKeyRoot(ctx, k)
		if err != nil {
			return nil, err
		}
		roots[key.Key(pkh).Pretty()] = root
	}

	return fs, nil
}
Example #28
0
// newKeyRoot creates a new KeyRoot for the given key, and starts up a republisher routine
// for it
func (fs *Filesystem) newKeyRoot(parent context.Context, k ci.PrivKey) (*KeyRoot, error) {
	hash, err := k.GetPublic().Hash()
	if err != nil {
		return nil, err
	}

	name := "/ipns/" + key.Key(hash).String()

	root := new(KeyRoot)
	root.key = k
	root.fs = fs
	root.name = name

	ctx, cancel := context.WithCancel(parent)
	defer cancel()

	pointsTo, err := fs.nsys.Resolve(ctx, name)
	if err != nil {
		err = namesys.InitializeKeyspace(ctx, fs.dserv, fs.nsys, fs.pins, k)
		if err != nil {
			return nil, err
		}

		pointsTo, err = fs.nsys.Resolve(ctx, name)
		if err != nil {
			return nil, err
		}
	}

	mnode, err := fs.resolver.ResolvePath(ctx, pointsTo)
	if err != nil {
		log.Errorf("Failed to retrieve value '%s' for ipns entry: %s\n", pointsTo, err)
		return nil, err
	}

	root.node = mnode

	root.repub = NewRepublisher(root, time.Millisecond*300, time.Second*3)
	go root.repub.Run(parent)

	pbn, err := ft.FromBytes(mnode.Data)
	if err != nil {
		log.Error("IPNS pointer was not unixfs node")
		return nil, err
	}

	switch pbn.GetType() {
	case ft.TDirectory:
		root.val = NewDirectory(ctx, pointsTo.String(), mnode, root, fs)
	case ft.TFile, ft.TMetadata, ft.TRaw:
		fi, err := NewFile(pointsTo.String(), mnode, root, fs)
		if err != nil {
			return nil, err
		}
		root.val = fi
	default:
		panic("unrecognized! (NYI)")
	}
	return root, nil
}
Example #29
0
func (dht *IpfsDHT) handleAddProvider(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
	lm := make(lgbl.DeferredMap)
	lm["peer"] = func() interface{} { return p.Pretty() }

	defer log.EventBegin(ctx, "handleAddProvider", lm).Done()
	key := key.Key(pmes.GetKey())
	lm["key"] = func() interface{} { return key.Pretty() }

	log.Debugf("%s adding %s as a provider for '%s'\n", dht.self, p, key)

	// add provider should use the address given in the message
	pinfos := pb.PBPeersToPeerInfos(pmes.GetProviderPeers())
	for _, pi := range pinfos {
		if pi.ID != p {
			// we should ignore this provider reccord! not from originator.
			// (we chould sign them and check signature later...)
			log.Debugf("handleAddProvider received provider %s from %s. Ignore.", pi.ID, p)
			continue
		}

		if len(pi.Addrs) < 1 {
			log.Debugf("%s got no valid addresses for provider %s. Ignore.", dht.self, p)
			continue
		}

		log.Infof("received provider %s for %s (addrs: %s)", p, key, pi.Addrs)
		if pi.ID != dht.self { // dont add own addrs.
			// add the received addresses to our peerstore.
			dht.peerstore.AddAddrs(pi.ID, pi.Addrs, peer.ProviderAddrTTL)
		}
		dht.providers.AddProvider(ctx, key, p)
	}

	return nil, nil
}
Example #30
0
func TestBlockReturnsErr(t *testing.T) {
	off := Exchange(bstore())
	_, err := off.GetBlock(context.Background(), key.Key("foo"))
	if err != nil {
		return // as desired
	}
	t.Fail()
}