// InitializeKeyspace sets the ipns record for the given key to // point to an empty directory. // TODO: this doesnt feel like it belongs here func InitializeKeyspace(ctx context.Context, ds dag.DAGService, pub Publisher, pins pin.Pinner, key ci.PrivKey) error { emptyDir := &dag.Node{Data: ft.FolderPBData()} nodek, err := ds.Add(emptyDir) if err != nil { return err } // pin recursively because this might already be pinned // and doing a direct pin would throw an error in that case err = pins.Pin(ctx, emptyDir, true) if err != nil { return err } err = pins.Flush() if err != nil { return err } err = pub.Publish(ctx, key, path.FromKey(nodek)) if err != nil { return err } return nil }
func insertNodeAtPath(ctx context.Context, ds dag.DAGService, root *dag.Node, path []string, toinsert key.Key, create func() *dag.Node) (*dag.Node, error) { if len(path) == 1 { return addLink(ctx, ds, root, path[0], toinsert) } nd, err := root.GetLinkedNode(ctx, ds, path[0]) if err != nil { // if 'create' is true, we create directories on the way down as needed if err == dag.ErrNotFound && create != nil { nd = create() } else { return nil, err } } ndprime, err := insertNodeAtPath(ctx, ds, nd, path[1:], toinsert, create) if err != nil { return nil, err } _ = root.RemoveNodeLink(path[0]) err = root.AddNodeLinkClean(path[0], ndprime) if err != nil { return nil, err } _, err = ds.Add(root) if err != nil { return nil, err } return root, nil }
func copyDag(nd *dag.ProtoNode, from, to dag.DAGService) error { _, err := to.Add(nd) if err != nil { return err } for _, lnk := range nd.Links() { child, err := lnk.GetNode(context.Background(), from) if err != nil { if err == dag.ErrNotFound { // not found means we didnt modify it, and it should // already be in the target datastore continue } return err } childpb, ok := child.(*dag.ProtoNode) if !ok { return dag.ErrNotProtobuf } err = copyDag(childpb, from, to) if err != nil { return err } } return nil }
func statNode(ds dag.DAGService, fsn mfs.FSNode) (*Object, error) { nd, err := fsn.GetNode() if err != nil { return nil, err } // add to dagserv to ensure its available k, err := ds.Add(nd) if err != nil { return nil, err } d, err := ft.FromBytes(nd.Data) if err != nil { return nil, err } cumulsize, err := nd.Size() if err != nil { return nil, err } return &Object{ Hash: k.B58String(), Blocks: len(nd.Links), Size: d.GetFilesize(), CumulativeSize: cumulsize, }, nil }
func addLink(ctx context.Context, ds dag.DAGService, root *dag.Node, childname string, childnd *dag.Node) (*dag.Node, error) { if childname == "" { return nil, errors.New("cannot create link with no name!") } // ensure that the node we are adding is in the dagservice _, err := ds.Add(childnd) if err != nil { return nil, err } _ = ds.Remove(root) // ensure no link with that name already exists _ = root.RemoveNodeLink(childname) // ignore error, only option is ErrNotFound if err := root.AddNodeLinkClean(childname, childnd); err != nil { return nil, err } if _, err := ds.Add(root); err != nil { return nil, err } return root, nil }
func ImportTar(r io.Reader, ds dag.DAGService) (*dag.Node, error) { rall, err := ioutil.ReadAll(r) if err != nil { return nil, err } r = bytes.NewReader(rall) tr := tar.NewReader(r) root := new(dag.Node) root.Data = []byte("ipfs/tar") e := dagutil.NewDagEditor(root, ds) for { h, err := tr.Next() if err != nil { if err == io.EOF { break } return nil, err } header := new(dag.Node) headerBytes, err := marshalHeader(h) if err != nil { return nil, err } header.Data = headerBytes if h.Size > 0 { spl := chunk.NewRabin(tr, uint64(chunk.DefaultBlockSize)) nd, err := importer.BuildDagFromReader(ds, spl) if err != nil { return nil, err } err = header.AddNodeLinkClean("data", nd) if err != nil { return nil, err } } _, err = ds.Add(header) if err != nil { return nil, err } path := escapePath(h.Name) err = e.InsertNodeAtPath(context.Background(), path, header, func() *dag.Node { return new(dag.Node) }) if err != nil { return nil, err } } return e.Finalize(ds) }
// LoadPinner loads a pinner and its keysets from the given datastore func LoadPinner(d ds.Datastore, dserv, internal mdag.DAGService) (Pinner, error) { p := new(pinner) rootKeyI, err := d.Get(pinDatastoreKey) if err != nil { return nil, fmt.Errorf("cannot load pin state: %v", err) } rootKeyBytes, ok := rootKeyI.([]byte) if !ok { return nil, fmt.Errorf("cannot load pin state: %s was not bytes", pinDatastoreKey) } rootCid, err := cid.Cast(rootKeyBytes) if err != nil { return nil, err } ctx, cancel := context.WithTimeout(context.TODO(), time.Second*5) defer cancel() root, err := internal.Get(ctx, rootCid) if err != nil { return nil, fmt.Errorf("cannot find pinning root object: %v", err) } rootpb, ok := root.(*mdag.ProtoNode) if !ok { return nil, mdag.ErrNotProtobuf } internalset := cid.NewSet() internalset.Add(rootCid) recordInternal := internalset.Add { // load recursive set recurseKeys, err := loadSet(ctx, internal, rootpb, linkRecursive, recordInternal) if err != nil { return nil, fmt.Errorf("cannot load recursive pins: %v", err) } p.recursePin = cidSetWithValues(recurseKeys) } { // load direct set directKeys, err := loadSet(ctx, internal, rootpb, linkDirect, recordInternal) if err != nil { return nil, fmt.Errorf("cannot load direct pins: %v", err) } p.directPin = cidSetWithValues(directKeys) } p.internalPin = internalset // assign services p.dserv = dserv p.dstore = d p.internal = internal return p, nil }
func addLink(t *testing.T, ds mdag.DAGService, a, b node.Node) { to := string(a.(*mdag.ProtoNode).Data()) + "2" + string(b.(*mdag.ProtoNode).Data()) if _, err := ds.Add(b); err != nil { t.Error(err) } if err := a.(*mdag.ProtoNode).AddNodeLink(to, b.(*mdag.ProtoNode)); err != nil { t.Error(err) } }
// LoadPinner loads a pinner and its keysets from the given datastore func LoadPinner(d ds.Datastore, dserv mdag.DAGService) (Pinner, error) { p := new(pinner) rootKeyI, err := d.Get(pinDatastoreKey) if err != nil { return nil, fmt.Errorf("cannot load pin state: %v", err) } rootKeyBytes, ok := rootKeyI.([]byte) if !ok { return nil, fmt.Errorf("cannot load pin state: %s was not bytes", pinDatastoreKey) } rootKey := key.Key(rootKeyBytes) ctx, cancel := context.WithTimeout(context.TODO(), time.Second*5) defer cancel() root, err := dserv.Get(ctx, rootKey) if err != nil { return nil, fmt.Errorf("cannot find pinning root object: %v", err) } internalPin := map[key.Key]struct{}{ rootKey: struct{}{}, } recordInternal := func(k key.Key) { internalPin[k] = struct{}{} } { // load recursive set recurseKeys, err := loadSet(ctx, dserv, root, linkRecursive, recordInternal) if err != nil { return nil, fmt.Errorf("cannot load recursive pins: %v", err) } p.recursePin = set.SimpleSetFromKeys(recurseKeys) } { // load direct set directKeys, err := loadSet(ctx, dserv, root, linkDirect, recordInternal) if err != nil { return nil, fmt.Errorf("cannot load direct pins: %v", err) } p.directPin = set.SimpleSetFromKeys(directKeys) } p.internalPin = internalPin // assign services p.dserv = dserv p.dstore = d return p, nil }
func NewDataFileReader(ctx context.Context, n *mdag.Node, pb *ftpb.Data, serv mdag.DAGService) *DagReader { fctx, cancel := context.WithCancel(ctx) promises := serv.GetDAG(fctx, n) return &DagReader{ node: n, serv: serv, buf: NewRSNCFromBytes(pb.GetData()), promises: promises, ctx: fctx, cancel: cancel, pbdata: pb, } }
func storeSet(ctx context.Context, dag merkledag.DAGService, cids []*cid.Cid, internalKeys keyObserver) (*merkledag.ProtoNode, error) { iter := getCidListIterator(cids) n, err := storeItems(ctx, dag, uint64(len(cids)), iter, internalKeys) if err != nil { return nil, err } c, err := dag.Add(n) if err != nil { return nil, err } internalKeys(c) return n, nil }
func ApplyChange(ctx context.Context, ds dag.DAGService, nd *dag.ProtoNode, cs []*Change) (*dag.ProtoNode, error) { e := NewDagEditor(nd, ds) for _, c := range cs { switch c.Type { case Add: child, err := ds.Get(ctx, c.After) if err != nil { return nil, err } childpb, ok := child.(*dag.ProtoNode) if !ok { return nil, dag.ErrNotProtobuf } err = e.InsertNodeAtPath(ctx, c.Path, childpb, nil) if err != nil { return nil, err } case Remove: err := e.RmLink(ctx, c.Path) if err != nil { return nil, err } case Mod: err := e.RmLink(ctx, c.Path) if err != nil { return nil, err } child, err := ds.Get(ctx, c.After) if err != nil { return nil, err } childpb, ok := child.(*dag.ProtoNode) if !ok { return nil, dag.ErrNotProtobuf } err = e.InsertNodeAtPath(ctx, c.Path, childpb, nil) if err != nil { return nil, err } } } return e.Finalize(ds) }
func Descendants(ds dag.DAGService, set key.KeySet, roots []key.Key) error { for _, k := range roots { set.Add(k) nd, err := ds.Get(context.Background(), k) if err != nil { return err } // EnumerateChildren recursively walks the dag and adds the keys to the given set err = dag.EnumerateChildren(context.Background(), ds, nd, set) if err != nil { return err } } return nil }
func Descendants(ctx context.Context, ds dag.DAGService, set key.KeySet, roots []key.Key, bestEffort bool) error { for _, k := range roots { set.Add(k) nd, err := ds.Get(ctx, k) if err != nil { return err } // EnumerateChildren recursively walks the dag and adds the keys to the given set err = dag.EnumerateChildren(ctx, ds, nd, set, bestEffort) if err != nil { return err } } return nil }
func storeMultiset(ctx context.Context, dag merkledag.DAGService, refcounts map[key.Key]uint64, internalKeys keyObserver) (*merkledag.Node, error) { // make a working copy of the refcounts refcounts = copyRefcounts(refcounts) iter := func() (k key.Key, data []byte, ok bool) { // Every call of this function returns the next refcount item. // // This function splits out the uint64 reference counts as // smaller increments, as fits in type refcount. Most of the // time the refcount will fit inside just one, so this saves // space. // // We use range here to pick an arbitrary item in the map, but // not really iterate the map. for k, refs := range refcounts { // Max value a single multiset item can store num := ^refcount(0) if refs <= uint64(num) { // Remaining count fits in a single item; remove the // key from the map. num = refcount(refs) delete(refcounts, k) } else { // Count is too large to fit in one item, the key will // repeat in some later call. refcounts[k] -= uint64(num) } return k, num.Bytes(), true } return "", nil, false } n, err := storeItems(ctx, dag, uint64(len(refcounts)), iter, internalKeys) if err != nil { return nil, err } k, err := dag.Add(n) if err != nil { return nil, err } internalKeys(k) return n, nil }
func storeSet(ctx context.Context, dag merkledag.DAGService, keys []key.Key, internalKeys keyObserver) (*merkledag.Node, error) { iter := func() (k key.Key, data []byte, ok bool) { if len(keys) == 0 { return "", nil, false } first := keys[0] keys = keys[1:] return first, nil, true } n, err := storeItems(ctx, dag, uint64(len(keys)), iter, internalKeys) if err != nil { return nil, err } k, err := dag.Add(n) if err != nil { return nil, err } internalKeys(k) return n, nil }
func addLink(ctx context.Context, ds dag.DAGService, root *dag.Node, childname string, childk key.Key) (*dag.Node, error) { ctx, cancel := context.WithTimeout(ctx, time.Second*30) childnd, err := ds.Get(ctx, childk) if err != nil { cancel() return nil, err } cancel() err = root.AddNodeLinkClean(childname, childnd) if err != nil { return nil, err } _, err = ds.Add(root) if err != nil { return nil, err } return root, nil }
func statNode(ds dag.DAGService, fsn mfs.FSNode) (*Object, error) { nd, err := fsn.GetNode() if err != nil { return nil, err } // add to dagserv to ensure its available k, err := ds.Add(nd) if err != nil { return nil, err } d, err := ft.FromBytes(nd.Data) if err != nil { return nil, err } cumulsize, err := nd.Size() if err != nil { return nil, err } var ndtype string switch fsn.Type() { case mfs.TDir: ndtype = "directory" case mfs.TFile: ndtype = "file" default: return nil, fmt.Errorf("unrecognized node type: %s", fsn.Type()) } return &Object{ Hash: k.B58String(), Blocks: len(nd.Links), Size: d.GetFilesize(), CumulativeSize: cumulsize, Type: ndtype, }, nil }
func rmLink(ctx context.Context, ds dag.DAGService, root *dag.Node, path []string) (*dag.Node, error) { if len(path) == 1 { // base case, remove node in question err := root.RemoveNodeLink(path[0]) if err != nil { return nil, err } _, err = ds.Add(root) if err != nil { return nil, err } return root, nil } nd, err := root.GetLinkedNode(ctx, ds, path[0]) if err != nil { return nil, err } nnode, err := rmLink(ctx, ds, nd, path[1:]) if err != nil { return nil, err } _ = root.RemoveNodeLink(path[0]) err = root.AddNodeLinkClean(path[0], nnode) if err != nil { return nil, err } _, err = ds.Add(root) if err != nil { return nil, err } return root, nil }
func ApplyChange(ctx context.Context, ds dag.DAGService, nd *dag.Node, cs []*Change) (*dag.Node, error) { e := NewDagEditor(ds, nd) for _, c := range cs { switch c.Type { case Add: child, err := ds.Get(ctx, c.After) if err != nil { return nil, err } err = e.InsertNodeAtPath(ctx, c.Path, child, nil) if err != nil { return nil, err } case Remove: err := e.RmLink(ctx, c.Path) if err != nil { return nil, err } case Mod: err := e.RmLink(ctx, c.Path) if err != nil { return nil, err } child, err := ds.Get(ctx, c.After) if err != nil { return nil, err } err = e.InsertNodeAtPath(ctx, c.Path, child, nil) if err != nil { return nil, err } } } return e.GetNode(), nil }
func insertNodeAtPath(ctx context.Context, ds dag.DAGService, root *dag.Node, path []string, toinsert key.Key) (*dag.Node, error) { if len(path) == 1 { return addLink(ctx, ds, root, path[0], toinsert) } child, err := root.GetNodeLink(path[0]) if err != nil { return nil, err } nd, err := child.GetNode(ctx, ds) if err != nil { return nil, err } ndprime, err := insertNodeAtPath(ctx, ds, nd, path[1:], toinsert) if err != nil { return nil, err } err = root.RemoveNodeLink(path[0]) if err != nil { return nil, err } err = root.AddNodeLinkClean(path[0], ndprime) if err != nil { return nil, err } _, err = ds.Add(root) if err != nil { return nil, err } return root, nil }
func addLink(ctx context.Context, ds dag.DAGService, root *dag.Node, childname string, childk key.Key) (*dag.Node, error) { if childname == "" { return nil, errors.New("cannot create link with no name!") } childnd, err := ds.Get(ctx, childk) if err != nil { return nil, err } // ensure no link with that name already exists _ = root.RemoveNodeLink(childname) // ignore error, only option is ErrNotFound err = root.AddNodeLinkClean(childname, childnd) if err != nil { return nil, err } _, err = ds.Add(root) if err != nil { return nil, err } return root, nil }
func hasChild(ds mdag.DAGService, root *mdag.Node, child key.Key) (bool, error) { for _, lnk := range root.Links { k := key.Key(lnk.Hash) if k == child { return true, nil } nd, err := ds.Get(context.Background(), k) if err != nil { return false, err } has, err := hasChild(ds, nd, child) if err != nil { return false, err } if has { return has, nil } } return false, nil }
// dagTruncate truncates the given node to 'size' and returns the modified Node func dagTruncate(ctx context.Context, nd *mdag.Node, size uint64, ds mdag.DAGService) (*mdag.Node, error) { if len(nd.Links) == 0 { // TODO: this can likely be done without marshaling and remarshaling pbn, err := ft.FromBytes(nd.Data) if err != nil { return nil, err } nd.Data = ft.WrapData(pbn.Data[:size]) return nd, nil } var cur uint64 end := 0 var modified *mdag.Node ndata := new(ft.FSNode) for i, lnk := range nd.Links { child, err := lnk.GetNode(ctx, ds) if err != nil { return nil, err } childsize, err := ft.DataSize(child.Data) if err != nil { return nil, err } // found the child we want to cut if size < cur+childsize { nchild, err := dagTruncate(ctx, child, size-cur, ds) if err != nil { return nil, err } ndata.AddBlockSize(size - cur) modified = nchild end = i break } cur += childsize ndata.AddBlockSize(childsize) } _, err := ds.Add(modified) if err != nil { return nil, err } nd.Links = nd.Links[:end] err = nd.AddNodeLinkClean("", modified) if err != nil { return nil, err } d, err := ndata.GetBytes() if err != nil { return nil, err } nd.Data = d // invalidate cache and recompute serialized data _, err = nd.Encoded(true) if err != nil { return nil, err } return nd, nil }
func storeItems(ctx context.Context, dag merkledag.DAGService, estimatedLen uint64, iter itemIterator, internalKeys keyObserver) (*merkledag.ProtoNode, error) { seed, err := randomSeed() if err != nil { return nil, err } links := make([]*node.Link, 0, defaultFanout+maxItems) for i := 0; i < defaultFanout; i++ { links = append(links, &node.Link{Cid: emptyKey}) } // add emptyKey to our set of internal pinset objects n := &merkledag.ProtoNode{} n.SetLinks(links) internalKeys(emptyKey) hdr := &pb.Set{ Version: proto.Uint32(1), Fanout: proto.Uint32(defaultFanout), Seed: proto.Uint32(seed), } if err := writeHdr(n, hdr); err != nil { return nil, err } if estimatedLen < maxItems { // it'll probably fit links := n.Links() for i := 0; i < maxItems; i++ { k, ok := iter() if !ok { // all done break } links = append(links, &node.Link{Cid: k}) } n.SetLinks(links) // sort by hash, also swap item Data s := sortByHash{ links: n.Links()[defaultFanout:], } sort.Stable(s) } hashed := make([][]*cid.Cid, defaultFanout) for { // This loop essentially enumerates every single item in the set // and maps them all into a set of buckets. Each bucket will be recursively // turned into its own sub-set, and so on down the chain. Each sub-set // gets added to the dagservice, and put into its place in a set nodes // links array. // // Previously, the bucket was selected by taking an int32 from the hash of // the input key + seed. This was erroneous as we would later be assigning // the created sub-sets into an array of length 256 by the modulus of the // int32 hash value with 256. This resulted in overwriting existing sub-sets // and losing pins. The fix (a few lines down from this comment), is to // map the hash value down to the 8 bit keyspace here while creating the // buckets. This way, we avoid any overlapping later on. k, ok := iter() if !ok { break } h := hash(seed, k) % defaultFanout hashed[h] = append(hashed[h], k) } for h, items := range hashed { if len(items) == 0 { // recursion base case continue } childIter := getCidListIterator(items) // recursively create a pinset from the items for this bucket index child, err := storeItems(ctx, dag, uint64(len(items)), childIter, internalKeys) if err != nil { return nil, err } size, err := child.Size() if err != nil { return nil, err } childKey, err := dag.Add(child) if err != nil { return nil, err } internalKeys(childKey) // overwrite the 'empty key' in the existing links array n.Links()[h] = &node.Link{ Cid: childKey, Size: size, } } return n, nil }
func storeItems(ctx context.Context, dag merkledag.DAGService, estimatedLen uint64, iter itemIterator, internalKeys keyObserver) (*merkledag.Node, error) { seed, err := randomSeed() if err != nil { return nil, err } n := &merkledag.Node{ Links: make([]*merkledag.Link, 0, defaultFanout+maxItems), } for i := 0; i < defaultFanout; i++ { n.Links = append(n.Links, &merkledag.Link{Hash: emptyKey.ToMultihash()}) } internalKeys(emptyKey) hdr := &pb.Set{ Version: proto.Uint32(1), Fanout: proto.Uint32(defaultFanout), Seed: proto.Uint32(seed), } if err := writeHdr(n, hdr); err != nil { return nil, err } hdrLen := len(n.Data()) if estimatedLen < maxItems { // it'll probably fit for i := 0; i < maxItems; i++ { k, data, ok := iter() if !ok { // all done break } n.Links = append(n.Links, &merkledag.Link{Hash: k.ToMultihash()}) n.SetData(append(n.Data(), data...)) } // sort by hash, also swap item Data s := sortByHash{ links: n.Links[defaultFanout:], data: n.Data()[hdrLen:], } sort.Stable(s) } // wasteful but simple type item struct { k key.Key data []byte } hashed := make(map[uint32][]item) for { k, data, ok := iter() if !ok { break } h := hash(seed, k) hashed[h] = append(hashed[h], item{k, data}) } for h, items := range hashed { childIter := func() (k key.Key, data []byte, ok bool) { if len(items) == 0 { return "", nil, false } first := items[0] items = items[1:] return first.k, first.data, true } child, err := storeItems(ctx, dag, uint64(len(items)), childIter, internalKeys) if err != nil { return nil, err } size, err := child.Size() if err != nil { return nil, err } childKey, err := dag.Add(child) if err != nil { return nil, err } internalKeys(childKey) l := &merkledag.Link{ Name: "", Hash: childKey.ToMultihash(), Size: size, } n.Links[int(h%defaultFanout)] = l } return n, nil }