// Recursive call for verifying the structure of a trickledag func verifyTDagRec(nd *dag.Node, depth, direct, layerRepeat int, ds dag.DAGService) error { if depth == 0 { // zero depth dag is raw data block if len(nd.Links) > 0 { return errors.New("expected direct block") } pbn, err := ft.FromBytes(nd.Data) if err != nil { return err } if pbn.GetType() != ft.TRaw { return errors.New("Expected raw block") } return nil } // Verify this is a branch node pbn, err := ft.FromBytes(nd.Data) if err != nil { return err } if pbn.GetType() != ft.TFile { return errors.New("expected file as branch node") } if len(pbn.Data) > 0 { return errors.New("branch node should not have data") } for i := 0; i < len(nd.Links); i++ { child, err := nd.Links[i].GetNode(context.TODO(), ds) if err != nil { return err } if i < direct { // Direct blocks err := verifyTDagRec(child, 0, direct, layerRepeat, ds) if err != nil { return err } } else { // Recursive trickle dags rdepth := ((i - direct) / layerRepeat) + 1 if rdepth >= depth && depth > 0 { return errors.New("Child dag was too deep!") } err := verifyTDagRec(child, rdepth, direct, layerRepeat, ds) if err != nil { return err } } } return nil }
// childFile returns a file under this directory by the given name if it exists func (d *Directory) childFile(name string) (*File, error) { fi, ok := d.files[name] if ok { return fi, nil } nd, err := d.childFromDag(name) if err != nil { return nil, err } i, err := ft.FromBytes(nd.Data) if err != nil { return nil, err } switch i.GetType() { case ufspb.Data_Directory: return nil, ErrIsDirectory case ufspb.Data_File: nfi, err := NewFile(name, nd, d, d.fs) if err != nil { return nil, err } d.files[name] = nfi return nfi, nil case ufspb.Data_Metadata: return nil, ErrNotYetImplemented default: return nil, ErrInvalidChild } }
// AddChild adds the node 'nd' under this directory giving it the name 'name' func (d *Directory) AddChild(name string, nd *dag.Node) error { d.Lock() defer d.Unlock() pbn, err := ft.FromBytes(nd.Data) if err != nil { return err } _, err = d.childUnsync(name) if err == nil { return errors.New("directory already has entry by that name") } err = d.node.AddNodeLinkClean(name, nd) if err != nil { return err } switch pbn.GetType() { case ft.TDirectory: d.childDirs[name] = NewDirectory(d.ctx, name, nd, d, d.fs) case ft.TFile, ft.TMetadata, ft.TRaw: nfi, err := NewFile(name, nd, d, d.fs) if err != nil { return err } d.files[name] = nfi default: return ErrInvalidChild } return d.parent.closeChild(d.name, d.node) }
// childDir returns a directory under this directory by the given name if it // exists. func (d *Directory) childDir(name string) (*Directory, error) { dir, ok := d.childDirs[name] if ok { return dir, nil } nd, err := d.childFromDag(name) if err != nil { return nil, err } i, err := ft.FromBytes(nd.Data) if err != nil { return nil, err } switch i.GetType() { case ufspb.Data_Directory: ndir := NewDirectory(d.ctx, name, nd, d, d.fs) d.childDirs[name] = ndir return ndir, nil case ufspb.Data_File: return nil, fmt.Errorf("%s is not a directory", name) case ufspb.Data_Metadata: return nil, ErrNotYetImplemented default: return nil, ErrInvalidChild } }
// newKeyRoot creates a new KeyRoot for the given key, and starts up a republisher routine // for it func (fs *Filesystem) newKeyRoot(parent context.Context, k ci.PrivKey) (*KeyRoot, error) { hash, err := k.GetPublic().Hash() if err != nil { return nil, err } name := "/ipns/" + key.Key(hash).String() root := new(KeyRoot) root.key = k root.fs = fs root.name = name ctx, cancel := context.WithCancel(parent) defer cancel() pointsTo, err := fs.nsys.Resolve(ctx, name) if err != nil { err = namesys.InitializeKeyspace(ctx, fs.dserv, fs.nsys, fs.pins, k) if err != nil { return nil, err } pointsTo, err = fs.nsys.Resolve(ctx, name) if err != nil { return nil, err } } mnode, err := fs.resolver.ResolvePath(ctx, pointsTo) if err != nil { log.Errorf("Failed to retrieve value '%s' for ipns entry: %s\n", pointsTo, err) return nil, err } root.node = mnode root.repub = NewRepublisher(root, time.Millisecond*300, time.Second*3) go root.repub.Run(parent) pbn, err := ft.FromBytes(mnode.Data) if err != nil { log.Error("IPNS pointer was not unixfs node") return nil, err } switch pbn.GetType() { case ft.TDirectory: root.val = NewDirectory(ctx, pointsTo.String(), mnode, root, fs) case ft.TFile, ft.TMetadata, ft.TRaw: fi, err := NewFile(pointsTo.String(), mnode, root, fs) if err != nil { return nil, err } root.val = fi default: panic("unrecognized! (NYI)") } return root, nil }
func (dm *DagModifier) Size() (int64, error) { pbn, err := ft.FromBytes(dm.curNode.Data) if err != nil { return 0, err } if dm.wrBuf != nil { if uint64(dm.wrBuf.Len())+dm.writeStart > pbn.GetFilesize() { return int64(dm.wrBuf.Len()) + int64(dm.writeStart), nil } } return int64(pbn.GetFilesize()), nil }
dagnodes = append(dagnodes, dagnode) } output := make([]LsObject, len(req.Arguments())) for i, dagnode := range dagnodes { output[i] = LsObject{ Hash: paths[i], Links: make([]LsLink, len(dagnode.Links)), } for j, link := range dagnode.Links { link.Node, err = link.GetNode(req.Context(), node.DAG) if err != nil { res.SetError(err, cmds.ErrNormal) return } d, err := unixfs.FromBytes(link.Node.Data) if err != nil { res.SetError(err, cmds.ErrNormal) return } output[i].Links[j] = LsLink{ Name: link.Name, Hash: link.Hash.B58String(), Size: link.Size, Type: d.GetType(), } } } res.SetOutput(&LsOutput{output}) },
// dagTruncate truncates the given node to 'size' and returns the modified Node func dagTruncate(ctx context.Context, nd *mdag.Node, size uint64, ds mdag.DAGService) (*mdag.Node, error) { if len(nd.Links) == 0 { // TODO: this can likely be done without marshaling and remarshaling pbn, err := ft.FromBytes(nd.Data) if err != nil { return nil, err } nd.Data = ft.WrapData(pbn.Data[:size]) return nd, nil } var cur uint64 end := 0 var modified *mdag.Node ndata := new(ft.FSNode) for i, lnk := range nd.Links { child, err := lnk.GetNode(ctx, ds) if err != nil { return nil, err } childsize, err := ft.DataSize(child.Data) if err != nil { return nil, err } // found the child we want to cut if size < cur+childsize { nchild, err := dagTruncate(ctx, child, size-cur, ds) if err != nil { return nil, err } ndata.AddBlockSize(size - cur) modified = nchild end = i break } cur += childsize ndata.AddBlockSize(childsize) } _, err := ds.Add(modified) if err != nil { return nil, err } nd.Links = nd.Links[:end] err = nd.AddNodeLinkClean("", modified) if err != nil { return nil, err } d, err := ndata.GetBytes() if err != nil { return nil, err } nd.Data = d // invalidate cache and recompute serialized data _, err = nd.Encoded(true) if err != nil { return nil, err } return nd, nil }
// modifyDag writes the data in 'data' over the data in 'node' starting at 'offset' // returns the new key of the passed in node and whether or not all the data in the reader // has been consumed. func (dm *DagModifier) modifyDag(node *mdag.Node, offset uint64, data io.Reader) (key.Key, bool, error) { f, err := ft.FromBytes(node.Data) if err != nil { return "", false, err } // If we've reached a leaf node. if len(node.Links) == 0 { n, err := data.Read(f.Data[offset:]) if err != nil && err != io.EOF { return "", false, err } // Update newly written node.. b, err := proto.Marshal(f) if err != nil { return "", false, err } nd := &mdag.Node{Data: b} k, err := dm.dagserv.Add(nd) if err != nil { return "", false, err } // Hey look! we're done! var done bool if n < len(f.Data[offset:]) { done = true } return k, done, nil } var cur uint64 var done bool for i, bs := range f.GetBlocksizes() { // We found the correct child to write into if cur+bs > offset { // Unpin block ckey := key.Key(node.Links[i].Hash) dm.mp.RemovePinWithMode(ckey, pin.Indirect) child, err := node.Links[i].GetNode(dm.ctx, dm.dagserv) if err != nil { return "", false, err } k, sdone, err := dm.modifyDag(child, offset-cur, data) if err != nil { return "", false, err } // pin the new node dm.mp.PinWithMode(k, pin.Indirect) offset += bs node.Links[i].Hash = mh.Multihash(k) // Recache serialized node _, err = node.Encoded(true) if err != nil { return "", false, err } if sdone { // No more bytes to write! done = true break } offset = cur + bs } cur += bs } k, err := dm.dagserv.Add(node) return k, done, err }
key, err := merkleNode.Key() if err != nil { res.SetError(err, cmds.ErrNormal) return } hash := key.B58String() output.Arguments[fpath] = hash if _, ok := output.Objects[hash]; ok { // duplicate argument for an already-listed node continue } unixFSNode, err := unixfs.FromBytes(merkleNode.Data) if err != nil { res.SetError(err, cmds.ErrNormal) return } t := unixFSNode.GetType() output.Objects[hash] = &LsObject{ Hash: key.String(), Type: t.String(), Size: unixFSNode.GetFilesize(), } switch t { case unixfspb.Data_File: