Пример #1
0
func TestBasicBitswap(t *testing.T) {
	net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay))
	sg := NewTestSessionGenerator(net)
	defer sg.Close()
	bg := blocksutil.NewBlockGenerator()

	t.Log("Test a one node trying to get one block from another")

	instances := sg.Instances(2)
	blocks := bg.Blocks(1)
	err := instances[0].Exchange.HasBlock(context.TODO(), blocks[0])
	if err != nil {
		t.Fatal(err)
	}

	ctx, _ := context.WithTimeout(context.TODO(), time.Second*5)
	blk, err := instances[1].Exchange.GetBlock(ctx, blocks[0].Key())
	if err != nil {
		t.Fatal(err)
	}

	t.Log(blk)
	for _, inst := range instances {
		err := inst.Exchange.Close()
		if err != nil {
			t.Fatal(err)
		}
	}
}
Пример #2
0
func TestAddrBlocking(t *testing.T) {
	ctx := context.Background()
	swarms := makeSwarms(ctx, t, 2)

	swarms[0].SetConnHandler(func(conn *Conn) {
		t.Fatal("no connections should happen!")
	})

	_, block, err := net.ParseCIDR("127.0.0.1/8")
	if err != nil {
		t.Fatal(err)
	}

	swarms[1].Filters.AddDialFilter(block)

	swarms[1].peers.AddAddr(swarms[0].LocalPeer(), swarms[0].ListenAddresses()[0], peer.PermanentAddrTTL)
	_, err = swarms[1].Dial(context.TODO(), swarms[0].LocalPeer())
	if err == nil {
		t.Fatal("dial should have failed")
	}

	swarms[0].peers.AddAddr(swarms[1].LocalPeer(), swarms[1].ListenAddresses()[0], peer.PermanentAddrTTL)
	_, err = swarms[0].Dial(context.TODO(), swarms[1].LocalPeer())
	if err == nil {
		t.Fatal("dial should have failed")
	}
}
Пример #3
0
// TODO simplify this test. get to the _essence_!
func TestSendToWantingPeer(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}

	net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay))
	sg := NewTestSessionGenerator(net)
	defer sg.Close()
	bg := blocksutil.NewBlockGenerator()

	prev := rebroadcastDelay.Set(time.Second / 2)
	defer func() { rebroadcastDelay.Set(prev) }()

	peers := sg.Instances(2)
	peerA := peers[0]
	peerB := peers[1]

	t.Logf("Session %v\n", peerA.Peer)
	t.Logf("Session %v\n", peerB.Peer)

	timeout := time.Second
	waitTime := time.Second * 5

	alpha := bg.Next()
	// peerA requests and waits for block alpha
	ctx, _ := context.WithTimeout(context.TODO(), waitTime)
	alphaPromise, err := peerA.Exchange.GetBlocks(ctx, []key.Key{alpha.Key()})
	if err != nil {
		t.Fatal(err)
	}

	// peerB announces to the network that he has block alpha
	ctx, _ = context.WithTimeout(context.TODO(), timeout)
	err = peerB.Exchange.HasBlock(ctx, alpha)
	if err != nil {
		t.Fatal(err)
	}

	// At some point, peerA should get alpha (or timeout)
	blkrecvd, ok := <-alphaPromise
	if !ok {
		t.Fatal("context timed out and broke promise channel!")
	}

	if blkrecvd.Key() != alpha.Key() {
		t.Fatal("Wrong block!")
	}

}
Пример #4
0
func (d *tcpDialer) reuseDial(raddr ma.Multiaddr) (manet.Conn, error) {
	logdial := lgbl.Dial("conn", "", "", d.laddr, raddr)
	rpev := log.EventBegin(context.TODO(), "tptDialReusePort", logdial)

	network, netraddr, err := manet.DialArgs(raddr)
	if err != nil {
		return nil, err
	}

	con, err := d.rd.Dial(network, netraddr)
	if err == nil {
		logdial["reuseport"] = "success"
		rpev.Done()
		return manet.WrapNetConn(con)
	}

	if !ReuseErrShouldRetry(err) {
		logdial["reuseport"] = "failure"
		logdial["error"] = err
		rpev.Done()
		return nil, err
	}

	logdial["reuseport"] = "retry"
	logdial["error"] = err
	rpev.Done()

	return d.madialer.Dial(raddr)
}
Пример #5
0
func TestBalancedDag(t *testing.T) {
	ds := mdtest.Mock()
	buf := make([]byte, 10000)
	u.NewTimeSeededRand().Read(buf)
	r := bytes.NewReader(buf)

	nd, err := BuildDagFromReader(ds, chunk.DefaultSplitter(r), nil)
	if err != nil {
		t.Fatal(err)
	}

	dr, err := uio.NewDagReader(context.TODO(), nd, ds)
	if err != nil {
		t.Fatal(err)
	}

	out, err := ioutil.ReadAll(dr)
	if err != nil {
		t.Fatal(err)
	}

	if !bytes.Equal(out, buf) {
		t.Fatal("bad read")
	}
}
Пример #6
0
func testInsert(t *testing.T, e *Editor, path, data string, create bool, experr string) {
	child := &dag.Node{Data: []byte(data)}
	ck, err := e.ds.Add(child)
	if err != nil {
		t.Fatal(err)
	}

	var c func() *dag.Node
	if create {
		c = func() *dag.Node {
			return &dag.Node{}
		}
	}

	err = e.InsertNodeAtPath(context.TODO(), path, ck, c)
	if experr != "" {
		var got string
		if err != nil {
			got = err.Error()
		}
		if got != experr {
			t.Fatalf("expected '%s' but got '%s'", experr, got)
		}
		return
	}

	if err != nil {
		t.Fatal(err)
	}

	assertNodeAtPath(t, e.ds, e.root, path, ck)
}
Пример #7
0
func setupIpnsTest(t *testing.T, node *core.IpfsNode) (*core.IpfsNode, *fstest.Mount) {
	maybeSkipFuseTests(t)

	var err error
	if node == nil {
		node, err = coremock.NewMockNode()
		if err != nil {
			t.Fatal(err)
		}

		ipnsfs, err := nsfs.NewFilesystem(context.TODO(), node.DAG, node.Namesys, node.Pinning, node.PrivateKey)
		if err != nil {
			t.Fatal(err)
		}

		node.IpnsFs = ipnsfs
	}

	fs, err := NewFileSystem(node, node.PrivateKey, "", "")
	if err != nil {
		t.Fatal(err)
	}
	mnt, err := fstest.MountedT(t, fs)
	if err != nil {
		t.Fatal(err)
	}

	return node, mnt
}
Пример #8
0
// InitializeKeyspace sets the ipns record for the given key to
// point to an empty directory.
func InitializeKeyspace(n *core.IpfsNode, key ci.PrivKey) error {
	emptyDir := &mdag.Node{Data: ft.FolderPBData()}
	nodek, err := n.DAG.Add(emptyDir)
	if err != nil {
		return err
	}

	ctx, cancel := context.WithTimeout(context.TODO(), time.Minute)
	defer cancel()

	err = n.Pinning.Pin(ctx, emptyDir, false)
	if err != nil {
		return err
	}

	err = n.Pinning.Flush()
	if err != nil {
		return err
	}

	pub := nsys.NewRoutingPublisher(n.Routing)
	err = pub.Publish(n.Context(), key, path.FromKey(nodek))
	if err != nil {
		return err
	}

	return nil
}
Пример #9
0
// Recursive call for verifying the structure of a trickledag
func verifyTDagRec(nd *dag.Node, depth, direct, layerRepeat int, ds dag.DAGService) error {
	if depth == 0 {
		// zero depth dag is raw data block
		if len(nd.Links) > 0 {
			return errors.New("expected direct block")
		}

		pbn, err := ft.FromBytes(nd.Data)
		if err != nil {
			return err
		}

		if pbn.GetType() != ft.TRaw {
			return errors.New("Expected raw block")
		}
		return nil
	}

	// Verify this is a branch node
	pbn, err := ft.FromBytes(nd.Data)
	if err != nil {
		return err
	}

	if pbn.GetType() != ft.TFile {
		return errors.New("expected file as branch node")
	}

	if len(pbn.Data) > 0 {
		return errors.New("branch node should not have data")
	}

	for i := 0; i < len(nd.Links); i++ {
		ctx, cancel := context.WithTimeout(context.TODO(), time.Minute)
		defer cancel()
		child, err := nd.Links[i].GetNode(ctx, ds)
		if err != nil {
			return err
		}

		if i < direct {
			// Direct blocks
			err := verifyTDagRec(child, 0, direct, layerRepeat, ds)
			if err != nil {
				return err
			}
		} else {
			// Recursive trickle dags
			rdepth := ((i - direct) / layerRepeat) + 1
			if rdepth >= depth && depth > 0 {
				return errors.New("Child dag was too deep!")
			}
			err := verifyTDagRec(child, rdepth, direct, layerRepeat, ds)
			if err != nil {
				return err
			}
		}
	}
	return nil
}
Пример #10
0
func TestGetBlocksSequential(t *testing.T) {
	var servs = Mocks(t, 4)
	for _, s := range servs {
		defer s.Close()
	}
	bg := blocksutil.NewBlockGenerator()
	blks := bg.Blocks(50)

	var keys []key.Key
	for _, blk := range blks {
		keys = append(keys, blk.Key())
		servs[0].AddBlock(blk)
	}

	t.Log("one instance at a time, get blocks concurrently")

	for i := 1; i < len(servs); i++ {
		ctx, _ := context.WithTimeout(context.TODO(), time.Second*50)
		out := servs[i].GetBlocks(ctx, keys)
		gotten := make(map[key.Key]*blocks.Block)
		for blk := range out {
			if _, ok := gotten[blk.Key()]; ok {
				t.Fatal("Got duplicate block!")
			}
			gotten[blk.Key()] = blk
		}
		if len(gotten) != len(blks) {
			t.Fatalf("Didnt get enough blocks back: %d/%d", len(gotten), len(blks))
		}
	}
}
Пример #11
0
func TestFetchGraph(t *testing.T) {
	var dservs []DAGService
	bsis := bstest.Mocks(2)
	for _, bsi := range bsis {
		dservs = append(dservs, NewDAGService(bsi))
	}

	read := io.LimitReader(u.NewTimeSeededRand(), 1024*32)
	root, err := imp.BuildDagFromReader(dservs[0], chunk.NewSizeSplitter(read, 512))
	if err != nil {
		t.Fatal(err)
	}

	err = FetchGraph(context.TODO(), root, dservs[1])
	if err != nil {
		t.Fatal(err)
	}

	// create an offline dagstore and ensure all blocks were fetched
	bs := bserv.New(bsis[1].Blockstore, offline.Exchange(bsis[1].Blockstore))

	offline_ds := NewDAGService(bs)
	ks := key.NewKeySet()

	err = EnumerateChildren(context.Background(), offline_ds, root, ks)
	if err != nil {
		t.Fatal(err)
	}
}
Пример #12
0
func connect(args args) error {
	p, ps, err := setupPeer(args)
	if err != nil {
		return err
	}

	var conn net.Conn
	if args.listen {
		conn, err = Listen(args.localAddr)
	} else {
		conn, err = Dial(args.localAddr, args.remoteAddr)
	}
	if err != nil {
		return err
	}

	// log everything that goes through conn
	rwc := &logRW{n: "conn", rw: conn}

	// OK, let's setup the channel.
	sk := ps.PrivKey(p)
	sg := secio.SessionGenerator{LocalID: p, PrivateKey: sk}
	sess, err := sg.NewSession(context.TODO(), rwc)
	if err != nil {
		return err
	}
	out("remote peer id: %s", sess.RemotePeer())
	netcat(sess.ReadWriter().(io.ReadWriteCloser))
	return nil
}
Пример #13
0
// getNode returns the node for link. If it return an error,
// stop processing. if it returns a nil node, just skip it.
//
// the error handling is a little complicated.
func (t *traversal) getNode(link *mdag.Link) (*mdag.Node, error) {

	getNode := func(l *mdag.Link) (*mdag.Node, error) {
		ctx, cancel := context.WithTimeout(context.TODO(), time.Minute)
		defer cancel()

		next, err := l.GetNode(ctx, t.opts.DAG)
		if err != nil {
			return nil, err
		}

		skip, err := t.shouldSkip(next)
		if skip {
			next = nil
		}
		return next, err
	}

	next, err := getNode(link)
	if err != nil && t.opts.ErrFunc != nil { // attempt recovery.
		err = t.opts.ErrFunc(err)
		next = nil // skip regardless
	}
	return next, err
}
Пример #14
0
// GetDiagnostic runs a diagnostics request across the entire network
func (d *Diagnostics) GetDiagnostic(timeout time.Duration) ([]*DiagInfo, error) {
	log.Debug("Getting diagnostic.")
	ctx, cancel := context.WithTimeout(context.TODO(), timeout)
	defer cancel()

	diagID := newID()
	d.diagLock.Lock()
	d.diagMap[diagID] = time.Now()
	d.diagLock.Unlock()

	log.Debug("Begin Diagnostic")

	peers := d.getPeers()
	log.Debugf("Sending diagnostic request to %d peers.", len(peers))

	pmes := newMessage(diagID)

	pmes.SetTimeoutDuration(timeout - HopTimeoutDecrement) // decrease timeout per hop
	dpeers, err := d.getDiagnosticFromPeers(ctx, d.getPeers(), pmes)
	if err != nil {
		return nil, fmt.Errorf("diagnostic from peers err: %s", err)
	}

	di := d.getDiagInfo()
	out := []*DiagInfo{di}
	for dpi := range dpeers {
		out = append(out, dpi)
	}
	return out, nil
}
Пример #15
0
func (pn *peernet) Adapter(p testutil.Identity) bsnet.BitSwapNetwork {
	client, err := pn.Mocknet.AddPeer(p.PrivateKey(), p.Address())
	if err != nil {
		panic(err.Error())
	}
	routing := pn.routingserver.ClientWithDatastore(context.TODO(), p, ds.NewMapDatastore())
	return bsnet.NewFromIpfsHost(client, routing)
}
Пример #16
0
func TestSubscribeIsANoopWhenCalledWithNoKeys(t *testing.T) {
	n := New()
	defer n.Shutdown()
	ch := n.Subscribe(context.TODO()) // no keys provided
	if _, ok := <-ch; ok {
		t.Fatal("should be closed if no keys provided")
	}
}
Пример #17
0
func (n *IpfsNode) HandlePeerFound(p peer.PeerInfo) {
	log.Warning("trying peer info: ", p)
	ctx, _ := context.WithTimeout(context.TODO(), time.Second*10)
	err := n.PeerHost.Connect(ctx, p)
	if err != nil {
		log.Warning("Failed to connect to peer found by discovery: ", err)
	}
}
Пример #18
0
func logProtocolMismatchDisconnect(c inet.Conn, protocol, agent string) {
	lm := make(lgbl.DeferredMap)
	lm["remotePeer"] = func() interface{} { return c.RemotePeer().Pretty() }
	lm["remoteAddr"] = func() interface{} { return c.RemoteMultiaddr().String() }
	lm["protocolVersion"] = protocol
	lm["agentVersion"] = agent
	log.Event(context.TODO(), "IdentifyProtocolMismatch", lm)
	log.Debug("IdentifyProtocolMismatch %s %s %s (disconnected)", c.RemotePeer(), protocol, agent)
}
Пример #19
0
func catNode(ds dag.DAGService, nd *dag.Node) ([]byte, error) {
	r, err := uio.NewDagReader(context.TODO(), nd, ds)
	if err != nil {
		return nil, err
	}
	defer r.Close()

	return ioutil.ReadAll(r)
}
Пример #20
0
func (ids *IDService) IdentifyConn(c inet.Conn) {
	ids.currmu.Lock()
	if wait, found := ids.currid[c]; found {
		ids.currmu.Unlock()
		log.Debugf("IdentifyConn called twice on: %s", c)
		<-wait // already identifying it. wait for it.
		return
	}
	ids.currid[c] = make(chan struct{})
	ids.currmu.Unlock()

	s, err := c.NewStream()
	if err != nil {
		log.Debugf("error opening initial stream for %s", ID)
		log.Event(context.TODO(), "IdentifyOpenFailed", c.RemotePeer())
		c.Close()
		return
	} else {
		bwc := ids.Host.GetBandwidthReporter()
		s = mstream.WrapStream(s, ID, bwc)

		// ok give the response to our handler.
		if err := msmux.SelectProtoOrFail(ID, s); err != nil {
			log.Debugf("error writing stream header for %s", ID)
			log.Event(context.TODO(), "IdentifyOpenFailed", c.RemotePeer())
			s.Close()
			return
		} else {
			ids.ResponseHandler(s)
		}
	}

	ids.currmu.Lock()
	ch, found := ids.currid[c]
	delete(ids.currid, c)
	ids.currmu.Unlock()

	if !found {
		log.Debugf("IdentifyConn failed to find channel (programmer error) for %s", c)
		return
	}

	close(ch) // release everyone waiting.
}
Пример #21
0
func TestInitialization(t *testing.T) {
	ctx := context.TODO()
	id := testIdentity

	good := []*config.Config{
		{
			Identity: id,
			Datastore: config.Datastore{
				Type: "memory",
			},
			Addresses: config.Addresses{
				Swarm: []string{"/ip4/0.0.0.0/tcp/4001"},
				API:   "/ip4/127.0.0.1/tcp/8000",
			},
		},

		{
			Identity: id,
			Datastore: config.Datastore{
				Type: "leveldb",
				Path: ".testdb",
			},
			Addresses: config.Addresses{
				Swarm: []string{"/ip4/0.0.0.0/tcp/4001"},
				API:   "/ip4/127.0.0.1/tcp/8000",
			},
		},
	}

	bad := []*config.Config{
		{},
		{Datastore: config.Datastore{Type: "memory"}},
	}

	for i, c := range good {
		r := &repo.Mock{
			C: *c,
			D: testutil.ThreadSafeCloserMapDatastore(),
		}
		n, err := NewIPFSNode(ctx, Standard(r, false))
		if n == nil || err != nil {
			t.Error("Should have constructed.", i, err)
		}
	}

	for i, c := range bad {
		r := &repo.Mock{
			C: *c,
			D: testutil.ThreadSafeCloserMapDatastore(),
		}
		n, err := NewIPFSNode(ctx, Standard(r, false))
		if n != nil || err == nil {
			t.Error("Should have failed to construct.", i)
		}
	}
}
Пример #22
0
// WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS!
func NewTestSessionGenerator(
	net tn.Network) SessionGenerator {
	ctx, cancel := context.WithCancel(context.TODO())
	return SessionGenerator{
		net:    net,
		seq:    0,
		ctx:    ctx, // TODO take ctx as param to Next, Instances
		cancel: cancel,
	}
}
Пример #23
0
func TestMetadata(t *testing.T) {
	// Make some random node
	ds := getDagserv(t)
	data := make([]byte, 1000)
	u.NewTimeSeededRand().Read(data)
	r := bytes.NewReader(data)
	nd, err := importer.BuildDagFromReader(ds, chunk.DefaultSplitter(r), nil)
	if err != nil {
		t.Fatal(err)
	}

	k, err := nd.Key()
	if err != nil {
		t.Fatal(err)
	}

	m := new(ft.Metadata)
	m.MimeType = "THIS IS A TEST"

	// Such effort, many compromise
	ipfsnode := &core.IpfsNode{DAG: ds}

	mdk, err := AddMetadataTo(ipfsnode, k.B58String(), m)
	if err != nil {
		t.Fatal(err)
	}

	rec, err := Metadata(ipfsnode, mdk)
	if err != nil {
		t.Fatal(err)
	}
	if rec.MimeType != m.MimeType {
		t.Fatalf("something went wrong in conversion: '%s' != '%s'", rec.MimeType, m.MimeType)
	}

	retnode, err := ds.Get(context.Background(), key.B58KeyDecode(mdk))
	if err != nil {
		t.Fatal(err)
	}

	ndr, err := uio.NewDagReader(context.TODO(), retnode, ds)
	if err != nil {
		t.Fatal(err)
	}

	out, err := ioutil.ReadAll(ndr)
	if err != nil {
		t.Fatal(err)
	}

	if !bytes.Equal(out, data) {
		t.Fatal("read incorrect data")
	}
}
Пример #24
0
func (n *UnixfsNode) GetChild(i int, ds dag.DAGService) (*UnixfsNode, error) {
	ctx, cancel := context.WithTimeout(context.TODO(), time.Minute)
	defer cancel()

	nd, err := n.node.Links[i].GetNode(ctx, ds)
	if err != nil {
		return nil, err
	}

	return NewUnixfsNodeFromDag(nd)
}
Пример #25
0
func assertCanGet(t *testing.T, ds DAGService, n *Node) {
	k, err := n.Key()
	if err != nil {
		t.Fatal(err)
	}

	_, err = ds.Get(context.TODO(), k)
	if err != nil {
		t.Fatal(err)
	}
}
Пример #26
0
func (n *network) deliver(
	r bsnet.Receiver, from peer.ID, message bsmsg.BitSwapMessage) error {
	if message == nil || from == "" {
		return errors.New("Invalid input")
	}

	n.delay.Wait()

	r.ReceiveMessage(context.TODO(), from, message)
	return nil
}
Пример #27
0
// LoadPinner loads a pinner and its keysets from the given datastore
func LoadPinner(d ds.Datastore, dserv mdag.DAGService) (Pinner, error) {
	p := new(pinner)

	rootKeyI, err := d.Get(pinDatastoreKey)
	if err != nil {
		return nil, fmt.Errorf("cannot load pin state: %v", err)
	}
	rootKeyBytes, ok := rootKeyI.([]byte)
	if !ok {
		return nil, fmt.Errorf("cannot load pin state: %s was not bytes", pinDatastoreKey)
	}

	rootKey := key.Key(rootKeyBytes)

	ctx, cancel := context.WithTimeout(context.TODO(), time.Second*5)
	defer cancel()

	root, err := dserv.Get(ctx, rootKey)
	if err != nil {
		return nil, fmt.Errorf("cannot find pinning root object: %v", err)
	}

	internalPin := map[key.Key]struct{}{
		rootKey: struct{}{},
	}
	recordInternal := func(k key.Key) {
		internalPin[k] = struct{}{}
	}

	{ // load recursive set
		recurseKeys, err := loadSet(ctx, dserv, root, linkRecursive, recordInternal)
		if err != nil {
			return nil, fmt.Errorf("cannot load recursive pins: %v", err)
		}
		p.recursePin = set.SimpleSetFromKeys(recurseKeys)
	}

	{ // load direct set
		directKeys, err := loadSet(ctx, dserv, root, linkDirect, recordInternal)
		if err != nil {
			return nil, fmt.Errorf("cannot load direct pins: %v", err)
		}
		p.directPin = set.SimpleSetFromKeys(directKeys)
	}

	p.internalPin = internalPin

	// assign services
	p.dserv = dserv
	p.dstore = d

	return p, nil
}
Пример #28
0
// childFromDag searches through this directories dag node for a child link
// with the given name
func (d *Directory) childFromDag(name string) (*dag.Node, error) {
	for _, lnk := range d.node.Links {
		if lnk.Name == name {
			ctx, cancel := context.WithTimeout(context.TODO(), time.Minute)
			defer cancel()

			return lnk.GetNode(ctx, d.fs.dserv)
		}
	}

	return nil, os.ErrNotExist
}
Пример #29
0
func addNode(n *core.IpfsNode, node *merkledag.Node) error {
	err := n.DAG.AddRecursive(node) // add the file to the graph + local storage
	if err != nil {
		return err
	}
	ctx, cancel := context.WithTimeout(context.TODO(), time.Minute)
	defer cancel()
	err = n.Pinning.Pin(ctx, node, true) // ensure we keep it
	if err != nil {
		return err
	}
	return nil
}
Пример #30
0
// Flush encodes and writes pinner keysets to the datastore
func (p *pinner) Flush() error {
	p.lock.Lock()
	defer p.lock.Unlock()

	ctx := context.TODO()

	internalPin := make(map[key.Key]struct{})
	recordInternal := func(k key.Key) {
		internalPin[k] = struct{}{}
	}

	root := &mdag.Node{}
	{
		n, err := storeSet(ctx, p.dserv, p.directPin.GetKeys(), recordInternal)
		if err != nil {
			return err
		}
		if err := root.AddNodeLink(linkDirect, n); err != nil {
			return err
		}
	}

	{
		n, err := storeSet(ctx, p.dserv, p.recursePin.GetKeys(), recordInternal)
		if err != nil {
			return err
		}
		if err := root.AddNodeLink(linkRecursive, n); err != nil {
			return err
		}
	}

	// add the empty node, its referenced by the pin sets but never created
	_, err := p.dserv.Add(new(mdag.Node))
	if err != nil {
		return err
	}

	k, err := p.dserv.Add(root)
	if err != nil {
		return err
	}

	internalPin[k] = struct{}{}
	if err := p.dstore.Put(pinDatastoreKey, []byte(k)); err != nil {
		return fmt.Errorf("cannot store pin state: %v", err)
	}
	p.internalPin = internalPin
	return nil
}