func walkItems(ctx context.Context, dag merkledag.DAGService, n *merkledag.ProtoNode, fn walkerFunc, children keyObserver) error { hdr, err := readHdr(n) if err != nil { return err } // readHdr guarantees fanout is a safe value fanout := hdr.GetFanout() for i, l := range n.Links()[fanout:] { if err := fn(i, l); err != nil { return err } } for _, l := range n.Links()[:fanout] { c := l.Cid children(c) if c.Equals(emptyKey) { continue } subtree, err := l.GetNode(ctx, dag) if err != nil { return err } stpb, ok := subtree.(*merkledag.ProtoNode) if !ok { return merkledag.ErrNotProtobuf } if err := walkItems(ctx, dag, stpb, fn, children); err != nil { return err } } return nil }
func randNode() (*merkledag.ProtoNode, key.Key) { node := new(merkledag.ProtoNode) node.SetData(make([]byte, 32)) util.NewTimeSeededRand().Read(node.Data()) k := node.Key() return node, k }
func loadSet(ctx context.Context, dag merkledag.DAGService, root *merkledag.ProtoNode, name string, internalKeys keyObserver) ([]*cid.Cid, error) { l, err := root.GetNodeLink(name) if err != nil { return nil, err } lnkc := l.Cid internalKeys(lnkc) n, err := l.GetNode(ctx, dag) if err != nil { return nil, err } pbn, ok := n.(*merkledag.ProtoNode) if !ok { return nil, merkledag.ErrNotProtobuf } var res []*cid.Cid walk := func(idx int, link *node.Link) error { res = append(res, link.Cid) return nil } if err := walkItems(ctx, dag, pbn, walk, internalKeys); err != nil { return nil, err } return res, nil }
func (d *Directory) Mkdir(name string) (*Directory, error) { d.lock.Lock() defer d.lock.Unlock() fsn, err := d.childUnsync(name) if err == nil { switch fsn := fsn.(type) { case *Directory: return fsn, os.ErrExist case *File: return nil, os.ErrExist default: return nil, fmt.Errorf("unrecognized type: %#v", fsn) } } ndir := new(dag.ProtoNode) ndir.SetData(ft.FolderPBData()) _, err = d.dserv.Add(ndir) if err != nil { return nil, err } err = d.node.AddNodeLinkClean(name, ndir) if err != nil { return nil, err } dirobj := NewDirectory(d.ctx, name, ndir, d, d.dserv) d.childDirs[name] = dirobj return dirobj, nil }
// newRoot creates a new Root and starts up a republisher routine for it func NewRoot(parent context.Context, ds dag.DAGService, node *dag.ProtoNode, pf PubFunc) (*Root, error) { var repub *Republisher if pf != nil { repub = NewRepublisher(parent, pf, time.Millisecond*300, time.Second*3) repub.setVal(node.Cid()) go repub.Run() } root := &Root{ node: node, repub: repub, dserv: ds, } pbn, err := ft.FromBytes(node.Data()) if err != nil { log.Error("IPNS pointer was not unixfs node") return nil, err } switch pbn.GetType() { case ft.TDirectory: root.val = NewDirectory(parent, node.String(), node, root, ds) case ft.TFile, ft.TMetadata, ft.TRaw: fi, err := NewFile(node.String(), node, root, ds) if err != nil { return nil, err } root.val = fi default: panic("unrecognized! (NYI)") } return root, nil }
func addLink(ctx context.Context, ds dag.DAGService, root *dag.ProtoNode, childname string, childnd node.Node) (*dag.ProtoNode, error) { if childname == "" { return nil, errors.New("cannot create link with no name!") } // ensure that the node we are adding is in the dagservice _, err := ds.Add(childnd) if err != nil { return nil, err } _ = ds.Remove(root) // ensure no link with that name already exists _ = root.RemoveNodeLink(childname) // ignore error, only option is ErrNotFound if err := root.AddNodeLinkClean(childname, childnd); err != nil { return nil, err } if _, err := ds.Add(root); err != nil { return nil, err } return root, nil }
func copyDag(nd *dag.ProtoNode, from, to dag.DAGService) error { _, err := to.Add(nd) if err != nil { return err } for _, lnk := range nd.Links() { child, err := lnk.GetNode(context.Background(), from) if err != nil { if err == dag.ErrNotFound { // not found means we didnt modify it, and it should // already be in the target datastore continue } return err } childpb, ok := child.(*dag.ProtoNode) if !ok { return dag.ErrNotProtobuf } err = copyDag(childpb, from, to) if err != nil { return err } } return nil }
func AddMetadataTo(n *core.IpfsNode, skey string, m *ft.Metadata) (string, error) { c, err := cid.Decode(skey) if err != nil { return "", err } nd, err := n.DAG.Get(n.Context(), c) if err != nil { return "", err } mdnode := new(dag.ProtoNode) mdata, err := ft.BytesForMetadata(m) if err != nil { return "", err } mdnode.SetData(mdata) if err := mdnode.AddNodeLinkClean("file", nd); err != nil { return "", err } nk, err := n.DAG.Add(mdnode) if err != nil { return "", err } return nk.String(), nil }
func ExportTar(ctx context.Context, root *dag.ProtoNode, ds dag.DAGService) (io.Reader, error) { if string(root.Data()) != "ipfs/tar" { return nil, errors.New("not an IPFS tarchive") } return &tarReader{ links: root.Links(), ds: ds, ctx: ctx, }, nil }
// converts the Node object into a real dag.ProtoNode func deserializeNode(nd *Node, dataFieldEncoding string) (*dag.ProtoNode, error) { dagnode := new(dag.ProtoNode) switch dataFieldEncoding { case "text": dagnode.SetData([]byte(nd.Data)) case "base64": data, _ := base64.StdEncoding.DecodeString(nd.Data) dagnode.SetData(data) default: return nil, fmt.Errorf("Unkown data field encoding") } dagnode.SetLinks(make([]*node.Link, len(nd.Links))) for i, link := range nd.Links { c, err := cid.Decode(link.Hash) if err != nil { return nil, err } dagnode.Links()[i] = &node.Link{ Name: link.Name, Size: link.Size, Cid: c, } } return dagnode, nil }
// NewUnixfsNodeFromDag reconstructs a Unixfs node from a given dag node func NewUnixfsNodeFromDag(nd *dag.ProtoNode) (*UnixfsNode, error) { mb, err := ft.FSNodeFromBytes(nd.Data()) if err != nil { return nil, err } return &UnixfsNode{ node: nd, ufmt: mb, }, nil }
func PrintDag(nd *mdag.ProtoNode, ds mdag.DAGService, indent int) { pbd, err := ft.FromBytes(nd.Data()) if err != nil { panic(err) } for i := 0; i < indent; i++ { fmt.Print(" ") } fmt.Printf("{size = %d, type = %s, children = %d", pbd.GetFilesize(), pbd.GetType().String(), len(pbd.GetBlocksizes())) if len(nd.Links()) > 0 { fmt.Println() } for _, lnk := range nd.Links() { child, err := lnk.GetNode(context.Background(), ds) if err != nil { panic(err) } PrintDag(child.(*mdag.ProtoNode), ds, indent+1) } if len(nd.Links()) > 0 { for i := 0; i < indent; i++ { fmt.Print(" ") } } fmt.Println("}") }
func (e *Editor) rmLink(ctx context.Context, root *dag.ProtoNode, path []string) (*dag.ProtoNode, error) { if len(path) == 1 { // base case, remove node in question err := root.RemoveNodeLink(path[0]) if err != nil { return nil, err } _, err = e.tmp.Add(root) if err != nil { return nil, err } return root, nil } // search for node in both tmp dagstore and source dagstore nd, err := root.GetLinkedProtoNode(ctx, e.tmp, path[0]) if err == dag.ErrNotFound { nd, err = root.GetLinkedProtoNode(ctx, e.src, path[0]) } if err != nil { return nil, err } nnode, err := e.rmLink(ctx, nd, path[1:]) if err != nil { return nil, err } _ = e.tmp.Remove(root) _ = root.RemoveNodeLink(path[0]) err = root.AddNodeLinkClean(path[0], nnode) if err != nil { return nil, err } _, err = e.tmp.Add(root) if err != nil { return nil, err } return root, nil }
func randNode() (*mdag.ProtoNode, *cid.Cid) { nd := new(mdag.ProtoNode) nd.SetData(make([]byte, 32)) util.NewTimeSeededRand().Read(nd.Data()) k := nd.Cid() return nd, k }
func writeHdr(n *merkledag.ProtoNode, hdr *pb.Set) error { hdrData, err := proto.Marshal(hdr) if err != nil { return err } // make enough space for the length prefix and the marshalled header data data := make([]byte, binary.MaxVarintLen64, binary.MaxVarintLen64+len(hdrData)) // write the uvarint length of the header data uvarlen := binary.PutUvarint(data, uint64(len(hdrData))) // append the actual protobuf data *after* the length value we wrote data = append(data[:uvarlen], hdrData...) n.SetData(data) return nil }
func ImportTar(r io.Reader, ds dag.DAGService) (*dag.ProtoNode, error) { tr := tar.NewReader(r) root := new(dag.ProtoNode) root.SetData([]byte("ipfs/tar")) e := dagutil.NewDagEditor(root, ds) for { h, err := tr.Next() if err != nil { if err == io.EOF { break } return nil, err } header := new(dag.ProtoNode) headerBytes, err := marshalHeader(h) if err != nil { return nil, err } header.SetData(headerBytes) if h.Size > 0 { spl := chunk.NewRabin(tr, uint64(chunk.DefaultBlockSize)) nd, err := importer.BuildDagFromReader(ds, spl) if err != nil { return nil, err } err = header.AddNodeLinkClean("data", nd) if err != nil { return nil, err } } _, err = ds.Add(header) if err != nil { return nil, err } path := escapePath(h.Name) err = e.InsertNodeAtPath(context.Background(), path, header, func() *dag.ProtoNode { return new(dag.ProtoNode) }) if err != nil { return nil, err } } return e.Finalize(ds) }
func TestAppendSingleBytesToEmpty(t *testing.T) { ds := mdtest.Mock() data := []byte("AB") nd := new(merkledag.ProtoNode) nd.SetData(ft.FilePBData(nil, 0)) dbp := &h.DagBuilderParams{ Dagserv: ds, Maxlinks: 4, } spl := chunk.SizeSplitterGen(500) ctx := context.Background() nnode, err := TrickleAppend(ctx, nd, dbp.New(spl(bytes.NewReader(data[:1])))) if err != nil { t.Fatal(err) } nnode, err = TrickleAppend(ctx, nnode, dbp.New(spl(bytes.NewReader(data[1:])))) if err != nil { t.Fatal(err) } fread, err := uio.NewDagReader(ctx, nnode, ds) if err != nil { t.Fatal(err) } out, err := ioutil.ReadAll(fread) if err != nil { t.Fatal(err) } fmt.Println(out, data) err = arrComp(out, data) if err != nil { t.Fatal(err) } }
func (w *Writer) WriteNode(nd *mdag.ProtoNode, fpath string) error { pb := new(upb.Data) if err := proto.Unmarshal(nd.Data(), pb); err != nil { return err } switch pb.GetType() { case upb.Data_Metadata: fallthrough case upb.Data_Directory: return w.writeDir(nd, fpath) case upb.Data_Raw: fallthrough case upb.Data_File: return w.writeFile(nd, pb, fpath) case upb.Data_Symlink: return writeSymlinkHeader(w.TarW, string(pb.GetData()), fpath) default: return ft.ErrUnrecognizedType } }
func getPaths(t *testing.T, ipfs *core.IpfsNode, name string, n *dag.ProtoNode) []string { if len(n.Links()) == 0 { return []string{name} } var out []string for _, lnk := range n.Links() { child, err := lnk.GetNode(ipfs.Context(), ipfs.DAG) if err != nil { t.Fatal(err) } childpb, ok := child.(*dag.ProtoNode) if !ok { t.Fatal(dag.ErrNotProtobuf) } sub := getPaths(t, ipfs, path.Join(name, lnk.Name), childpb) out = append(out, sub...) } return out }
func readHdr(n *merkledag.ProtoNode) (*pb.Set, error) { hdrLenRaw, consumed := binary.Uvarint(n.Data()) if consumed <= 0 { return nil, errors.New("invalid Set header length") } pbdata := n.Data()[consumed:] if hdrLenRaw > uint64(len(pbdata)) { return nil, errors.New("impossibly large Set header length") } // as hdrLenRaw was <= an int, we now know it fits in an int hdrLen := int(hdrLenRaw) var hdr pb.Set if err := proto.Unmarshal(pbdata[:hdrLen], &hdr); err != nil { return nil, err } if v := hdr.GetVersion(); v != 1 { return nil, fmt.Errorf("unsupported Set version: %d", v) } if uint64(hdr.GetFanout()) > uint64(len(n.Links())) { return nil, errors.New("impossibly large Fanout") } return &hdr, nil }
func (w *Writer) writeDir(nd *mdag.ProtoNode, fpath string) error { if err := writeDirHeader(w.TarW, fpath); err != nil { return err } for i, ng := range mdag.GetDAG(w.ctx, w.Dag, nd) { child, err := ng.Get(w.ctx) if err != nil { return err } childpb, ok := child.(*mdag.ProtoNode) if !ok { return mdag.ErrNotProtobuf } npath := path.Join(fpath, nd.Links()[i].Name) if err := w.WriteNode(childpb, npath); err != nil { return err } } return nil }
func (e *Editor) insertNodeAtPath(ctx context.Context, root *dag.ProtoNode, path []string, toinsert node.Node, create func() *dag.ProtoNode) (*dag.ProtoNode, error) { if len(path) == 1 { return addLink(ctx, e.tmp, root, path[0], toinsert) } nd, err := root.GetLinkedProtoNode(ctx, e.tmp, path[0]) if err != nil { // if 'create' is true, we create directories on the way down as needed if err == dag.ErrLinkNotFound && create != nil { nd = create() err = nil // no longer an error case } else if err == dag.ErrNotFound { // try finding it in our source dagstore nd, err = root.GetLinkedProtoNode(ctx, e.src, path[0]) } // if we receive an ErrNotFound, then our second 'GetLinkedNode' call // also fails, we want to error out if err != nil { return nil, err } } ndprime, err := e.insertNodeAtPath(ctx, nd, path[1:], toinsert, create) if err != nil { return nil, err } _ = e.tmp.Remove(root) _ = root.RemoveNodeLink(path[0]) err = root.AddNodeLinkClean(path[0], ndprime) if err != nil { return nil, err } _, err = e.tmp.Add(root) if err != nil { return nil, err } return root, nil }
// dagTruncate truncates the given node to 'size' and returns the modified Node func dagTruncate(ctx context.Context, nd *mdag.ProtoNode, size uint64, ds mdag.DAGService) (*mdag.ProtoNode, error) { if len(nd.Links()) == 0 { // TODO: this can likely be done without marshaling and remarshaling pbn, err := ft.FromBytes(nd.Data()) if err != nil { return nil, err } nd.SetData(ft.WrapData(pbn.Data[:size])) return nd, nil } var cur uint64 end := 0 var modified *mdag.ProtoNode ndata := new(ft.FSNode) for i, lnk := range nd.Links() { child, err := lnk.GetNode(ctx, ds) if err != nil { return nil, err } childpb, ok := child.(*mdag.ProtoNode) if !ok { return nil, err } childsize, err := ft.DataSize(childpb.Data()) if err != nil { return nil, err } // found the child we want to cut if size < cur+childsize { nchild, err := dagTruncate(ctx, childpb, size-cur, ds) if err != nil { return nil, err } ndata.AddBlockSize(size - cur) modified = nchild end = i break } cur += childsize ndata.AddBlockSize(childsize) } _, err := ds.Add(modified) if err != nil { return nil, err } nd.SetLinks(nd.Links()[:end]) err = nd.AddNodeLinkClean("", modified) if err != nil { return nil, err } d, err := ndata.GetBytes() if err != nil { return nil, err } nd.SetData(d) // invalidate cache and recompute serialized data _, err = nd.EncodeProtobuf(true) if err != nil { return nil, err } return nd, nil }
func (i *gatewayHandler) deleteHandler(w http.ResponseWriter, r *http.Request) { urlPath := r.URL.Path ctx, cancel := context.WithCancel(i.node.Context()) defer cancel() p, err := path.ParsePath(urlPath) if err != nil { webError(w, "failed to parse path", err, http.StatusBadRequest) return } c, components, err := path.SplitAbsPath(p) if err != nil { webError(w, "Could not split path", err, http.StatusInternalServerError) return } tctx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() rootnd, err := i.node.Resolver.DAG.Get(tctx, c) if err != nil { webError(w, "Could not resolve root object", err, http.StatusBadRequest) return } pathNodes, err := i.node.Resolver.ResolveLinks(tctx, rootnd, components[:len(components)-1]) if err != nil { webError(w, "Could not resolve parent object", err, http.StatusBadRequest) return } pbnd, ok := pathNodes[len(pathNodes)-1].(*dag.ProtoNode) if !ok { webError(w, "Cannot read non protobuf nodes through gateway", dag.ErrNotProtobuf, http.StatusBadRequest) return } // TODO(cyrptix): assumes len(pathNodes) > 1 - not found is an error above? err = pbnd.RemoveNodeLink(components[len(components)-1]) if err != nil { webError(w, "Could not delete link", err, http.StatusBadRequest) return } var newnode *dag.ProtoNode = pbnd for j := len(pathNodes) - 2; j >= 0; j-- { if _, err := i.node.DAG.Add(newnode); err != nil { webError(w, "Could not add node", err, http.StatusInternalServerError) return } pathpb, ok := pathNodes[j].(*dag.ProtoNode) if !ok { webError(w, "Cannot read non protobuf nodes through gateway", dag.ErrNotProtobuf, http.StatusBadRequest) return } newnode, err = pathpb.UpdateNodeLink(components[j], newnode) if err != nil { webError(w, "Could not update node links", err, http.StatusInternalServerError) return } } if _, err := i.node.DAG.Add(newnode); err != nil { webError(w, "Could not add root node", err, http.StatusInternalServerError) return } // Redirect to new path ncid := newnode.Cid() i.addUserHeaders(w) // ok, _now_ write user's headers. w.Header().Set("IPFS-Hash", ncid.String()) http.Redirect(w, r, gopath.Join(ipfsPathPrefix+ncid.String(), path.Join(components[:len(components)-1])), http.StatusCreated) }
func Diff(ctx context.Context, ds dag.DAGService, a, b *dag.ProtoNode) ([]*Change, error) { if len(a.Links()) == 0 && len(b.Links()) == 0 { return []*Change{ &Change{ Type: Mod, Before: a.Cid(), After: b.Cid(), }, }, nil } var out []*Change clean_a := a.Copy().(*dag.ProtoNode) clean_b := b.Copy().(*dag.ProtoNode) // strip out unchanged stuff for _, lnk := range a.Links() { l, err := b.GetNodeLink(lnk.Name) if err == nil { if l.Cid.Equals(lnk.Cid) { // no change... ignore it } else { anode, err := lnk.GetNode(ctx, ds) if err != nil { return nil, err } bnode, err := l.GetNode(ctx, ds) if err != nil { return nil, err } anodepb, ok := anode.(*dag.ProtoNode) if !ok { return nil, dag.ErrNotProtobuf } bnodepb, ok := bnode.(*dag.ProtoNode) if !ok { return nil, dag.ErrNotProtobuf } sub, err := Diff(ctx, ds, anodepb, bnodepb) if err != nil { return nil, err } for _, subc := range sub { subc.Path = path.Join(lnk.Name, subc.Path) out = append(out, subc) } } clean_a.RemoveNodeLink(l.Name) clean_b.RemoveNodeLink(l.Name) } } for _, lnk := range clean_a.Links() { out = append(out, &Change{ Type: Remove, Path: lnk.Name, Before: lnk.Cid, }) } for _, lnk := range clean_b.Links() { out = append(out, &Change{ Type: Add, Path: lnk.Name, After: lnk.Cid, }) } return out, nil }
// NewEmptyDirectory returns an empty merkledag Node with a folder Data chunk func NewEmptyDirectory() *mdag.ProtoNode { nd := new(mdag.ProtoNode) nd.SetData(format.FolderPBData()) return nd }
// modifyDag writes the data in 'data' over the data in 'node' starting at 'offset' // returns the new key of the passed in node and whether or not all the data in the reader // has been consumed. func (dm *DagModifier) modifyDag(node *mdag.ProtoNode, offset uint64, data io.Reader) (*cid.Cid, bool, error) { f, err := ft.FromBytes(node.Data()) if err != nil { return nil, false, err } // If we've reached a leaf node. if len(node.Links()) == 0 { n, err := data.Read(f.Data[offset:]) if err != nil && err != io.EOF { return nil, false, err } // Update newly written node.. b, err := proto.Marshal(f) if err != nil { return nil, false, err } nd := new(mdag.ProtoNode) nd.SetData(b) k, err := dm.dagserv.Add(nd) if err != nil { return nil, false, err } // Hey look! we're done! var done bool if n < len(f.Data[offset:]) { done = true } return k, done, nil } var cur uint64 var done bool for i, bs := range f.GetBlocksizes() { // We found the correct child to write into if cur+bs > offset { child, err := node.Links()[i].GetNode(dm.ctx, dm.dagserv) if err != nil { return nil, false, err } childpb, ok := child.(*mdag.ProtoNode) if !ok { return nil, false, mdag.ErrNotProtobuf } k, sdone, err := dm.modifyDag(childpb, offset-cur, data) if err != nil { return nil, false, err } offset += bs node.Links()[i].Cid = k // Recache serialized node _, err = node.EncodeProtobuf(true) if err != nil { return nil, false, err } if sdone { // No more bytes to write! done = true break } offset = cur + bs } cur += bs } k, err := dm.dagserv.Add(node) return k, done, err }
// Recursive call for verifying the structure of a trickledag func verifyTDagRec(nd *dag.ProtoNode, depth, direct, layerRepeat int, ds dag.DAGService) error { if depth == 0 { // zero depth dag is raw data block if len(nd.Links()) > 0 { return errors.New("expected direct block") } pbn, err := ft.FromBytes(nd.Data()) if err != nil { return err } if pbn.GetType() != ft.TRaw { return errors.New("Expected raw block") } return nil } // Verify this is a branch node pbn, err := ft.FromBytes(nd.Data()) if err != nil { return err } if pbn.GetType() != ft.TFile { return fmt.Errorf("expected file as branch node, got: %s", pbn.GetType()) } if len(pbn.Data) > 0 { return errors.New("branch node should not have data") } for i := 0; i < len(nd.Links()); i++ { childi, err := nd.Links()[i].GetNode(context.TODO(), ds) if err != nil { return err } childpb, ok := childi.(*dag.ProtoNode) if !ok { return fmt.Errorf("cannot operate on non-protobuf nodes") } if i < direct { // Direct blocks err := verifyTDagRec(childpb, 0, direct, layerRepeat, ds) if err != nil { return err } } else { // Recursive trickle dags rdepth := ((i - direct) / layerRepeat) + 1 if rdepth >= depth && depth > 0 { return errors.New("Child dag was too deep!") } err := verifyTDagRec(childpb, rdepth, direct, layerRepeat, ds) if err != nil { return err } } } return nil }
func randNode() *merkledag.ProtoNode { node := new(merkledag.ProtoNode) node.SetData(make([]byte, 32)) util.NewTimeSeededRand().Read(node.Data()) return node }
dagnode, err := core.Resolve(req.Context(), nd.Namesys, r, p) if err != nil { res.SetError(err, cmds.ErrNormal) return } dagnodes = append(dagnodes, dagnode) } output := make([]LsObject, len(req.Arguments())) for i, dagnode := range dagnodes { output[i] = LsObject{ Hash: paths[i], Links: make([]LsLink, len(dagnode.Links())), } for j, link := range dagnode.Links() { var linkNode *merkledag.ProtoNode t := unixfspb.Data_DataType(-1) linkKey := link.Cid if ok, err := nd.Blockstore.Has(linkKey); ok && err == nil { b, err := nd.Blockstore.Get(linkKey) if err != nil { res.SetError(err, cmds.ErrNormal) return } linkNode, err = merkledag.DecodeProtobuf(b.RawData()) if err != nil { res.SetError(err, cmds.ErrNormal) return } }