func (rw *RefWriter) writeRefsRecursive(n *dag.Node) (int, error) { nkey, err := n.Key() if err != nil { return 0, err } var count int for i, ng := range rw.DAG.GetDAG(rw.Ctx, n) { lk := key.Key(n.Links[i].Hash) if rw.skip(lk) { continue } if err := rw.WriteEdge(nkey, lk, n.Links[i].Name); err != nil { return count, err } nd, err := ng.Get(rw.Ctx) if err != nil { return count, err } c, err := rw.writeRefsRecursive(nd) count += c if err != nil { return count, err } } return count, nil }
func (rw *RefWriter) writeRefsSingle(n *dag.Node) (int, error) { nkey, err := n.Key() if err != nil { return 0, err } if rw.skip(nkey) { return 0, nil } count := 0 for _, l := range n.Links { lk := key.Key(l.Hash) if rw.skip(lk) { continue } if err := rw.WriteEdge(nkey, lk, l.Name); err != nil { return count, err } count++ } return count, nil }
func randNode() (*merkledag.Node, key.Key) { node := new(merkledag.Node) node.Data = make([]byte, 32) util.NewTimeSeededRand().Read(node.Data) k, _ := node.Key() return node, k }
func randNode() (*mdag.Node, key.Key) { nd := new(mdag.Node) nd.Data = make([]byte, 32) util.NewTimeSeededRand().Read(nd.Data) k, _ := nd.Key() return nd, k }
func AddMetadataTo(n *core.IpfsNode, skey string, m *ft.Metadata) (string, error) { ukey := key.B58KeyDecode(skey) nd, err := n.DAG.Get(n.Context(), ukey) if err != nil { return "", err } mdnode := new(dag.Node) mdata, err := ft.BytesForMetadata(m) if err != nil { return "", err } mdnode.Data = mdata if err := mdnode.AddNodeLinkClean("file", nd); err != nil { return "", err } nk, err := n.DAG.Add(mdnode) if err != nil { return "", err } return nk.B58String(), nil }
func (p *pinner) pinIndirectRecurse(ctx context.Context, node *mdag.Node) error { k, err := node.Key() if err != nil { return err } p.indirPin.Increment(k) return p.pinLinks(ctx, node) }
func NewDagModifier(ctx context.Context, from *mdag.Node, serv mdag.DAGService, mp pin.ManualPinner, spl chunk.SplitterGen) (*DagModifier, error) { return &DagModifier{ curNode: from.Copy(), dagserv: serv, splitter: spl, ctx: ctx, mp: mp, }, nil }
func nodeFromTemplate(template string) (*dag.Node, error) { switch template { case "unixfs-dir": nd := new(dag.Node) nd.Data = ft.FolderPBData() return nd, nil default: return nil, fmt.Errorf("template '%s' not found", template) } }
func Diff(ctx context.Context, ds dag.DAGService, a, b *dag.Node) []*Change { if len(a.Links) == 0 && len(b.Links) == 0 { ak, _ := a.Key() bk, _ := b.Key() return []*Change{ &Change{ Type: Mod, Before: ak, After: bk, }, } } var out []*Change clean_a := a.Copy() clean_b := b.Copy() // strip out unchanged stuff for _, lnk := range a.Links { l, err := b.GetNodeLink(lnk.Name) if err == nil { if bytes.Equal(l.Hash, lnk.Hash) { // no change... ignore it } else { anode, _ := lnk.GetNode(ctx, ds) bnode, _ := l.GetNode(ctx, ds) sub := Diff(ctx, ds, anode, bnode) for _, subc := range sub { subc.Path = path.Join(lnk.Name, subc.Path) out = append(out, subc) } } clean_a.RemoveNodeLink(l.Name) clean_b.RemoveNodeLink(l.Name) } } for _, lnk := range clean_a.Links { out = append(out, &Change{ Type: Remove, Path: lnk.Name, Before: key.Key(lnk.Hash), }) } for _, lnk := range clean_b.Links { out = append(out, &Change{ Type: Add, Path: lnk.Name, After: key.Key(lnk.Hash), }) } return out }
func (t *traversal) shouldSkip(n *mdag.Node) (bool, error) { if t.opts.SkipDuplicates { k, err := n.Key() if err != nil { return true, err } if _, found := t.seen[string(k)]; found { return true, nil } t.seen[string(k)] = struct{}{} } return false, nil }
func (params *adder) addNode(node *dag.Node, path string) error { // patch it into the root if path == "" { key, err := node.Key() if err != nil { return err } path = key.Pretty() } if err := params.editor.InsertNodeAtPath(params.ctx, path, node, newDirNode); err != nil { return err } return outputDagnode(params.out, path, node) }
func TestAppendSingleBytesToEmpty(t *testing.T) { ds := mdtest.Mock() data := []byte("AB") nd := new(merkledag.Node) nd.Data = ft.FilePBData(nil, 0) dbp := &h.DagBuilderParams{ Dagserv: ds, Maxlinks: 4, } spl := chunk.SizeSplitterGen(500) blks, errs := chunk.Chan(spl(bytes.NewReader(data[:1]))) ctx := context.Background() nnode, err := TrickleAppend(ctx, nd, dbp.New(blks, errs)) if err != nil { t.Fatal(err) } blks, errs = chunk.Chan(spl(bytes.NewReader(data[1:]))) nnode, err = TrickleAppend(ctx, nnode, dbp.New(blks, errs)) if err != nil { t.Fatal(err) } fread, err := uio.NewDagReader(ctx, nnode, ds) if err != nil { t.Fatal(err) } out, err := ioutil.ReadAll(fread) if err != nil { t.Fatal(err) } fmt.Println(out, data) err = arrComp(out, data) if err != nil { t.Fatal(err) } }
// converts the Node object into a real dag.Node func deserializeNode(node *Node) (*dag.Node, error) { dagnode := new(dag.Node) dagnode.Data = []byte(node.Data) dagnode.Links = make([]*dag.Link, len(node.Links)) for i, link := range node.Links { hash, err := mh.FromB58String(link.Hash) if err != nil { return nil, err } dagnode.Links[i] = &dag.Link{ Name: link.Name, Size: link.Size, Hash: hash, } } return dagnode, nil }
func getOutput(dagnode *dag.Node) (*Object, error) { key, err := dagnode.Key() if err != nil { return nil, err } output := &Object{ Hash: key.Pretty(), Links: make([]Link, len(dagnode.Links)), } for i, link := range dagnode.Links { output.Links[i] = Link{ Name: link.Name, Hash: link.Hash.B58String(), Size: link.Size, } } return output, nil }
func (p *pinner) unpinLinks(ctx context.Context, node *mdag.Node) error { for _, l := range node.Links { node, err := l.GetNode(ctx, p.dserv) if err != nil { return err } k, err := node.Key() if err != nil { return err } p.indirPin.Decrement(k) err = p.unpinLinks(ctx, node) if err != nil { return err } } return nil }
func insertNodeAtPath(ctx context.Context, ds dag.DAGService, root *dag.Node, path []string, toinsert *dag.Node, create func() *dag.Node) (*dag.Node, error) { if len(path) == 1 { return addLink(ctx, ds, root, path[0], toinsert) } nd, err := root.GetLinkedNode(ctx, ds, path[0]) if err != nil { // if 'create' is true, we create directories on the way down as needed if err == dag.ErrNotFound && create != nil { nd = create() } else { return nil, err } } ndprime, err := insertNodeAtPath(ctx, ds, nd, path[1:], toinsert, create) if err != nil { return nil, err } _ = root.RemoveNodeLink(path[0]) err = root.AddNodeLinkClean(path[0], ndprime) if err != nil { return nil, err } _, err = ds.Add(root) if err != nil { return nil, err } return root, nil }
func addLink(ctx context.Context, ds dag.DAGService, root *dag.Node, childname string, childnd *dag.Node) (*dag.Node, error) { if childname == "" { return nil, errors.New("cannot create link with no name!") } // ensure that the node we are adding is in the dagservice _, err := ds.Add(childnd) if err != nil { return nil, err } // ensure no link with that name already exists _ = root.RemoveNodeLink(childname) // ignore error, only option is ErrNotFound if err := root.AddNodeLinkClean(childname, childnd); err != nil { return nil, err } if _, err := ds.Add(root); err != nil { return nil, err } return root, nil }
// Pin the given node, optionally recursive func (p *pinner) Pin(ctx context.Context, node *mdag.Node, recurse bool) error { p.lock.Lock() defer p.lock.Unlock() k, err := node.Key() if err != nil { return err } if recurse { if p.recursePin.HasKey(k) { return nil } if p.directPin.HasKey(k) { p.directPin.RemoveBlock(k) } err := p.pinLinks(ctx, node) if err != nil { return err } p.recursePin.AddBlock(k) } else { if _, err := p.dserv.Get(ctx, k); err != nil { return err } if p.recursePin.HasKey(k) { return fmt.Errorf("%s already pinned recursively", k.B58String()) } p.directPin.AddBlock(k) } return nil }
func setDataCaller(req cmds.Request, root *dag.Node) (key.Key, error) { if len(req.Arguments()) < 3 { return "", fmt.Errorf("not enough arguments for set-data") } nd, err := req.InvocContext().GetNode() if err != nil { return "", err } root.Data = []byte(req.Arguments()[2]) newkey, err := nd.DAG.Add(root) if err != nil { return "", err } return newkey, nil }
func rmLink(ctx context.Context, ds dag.DAGService, root *dag.Node, path []string) (*dag.Node, error) { if len(path) == 1 { // base case, remove node in question err := root.RemoveNodeLink(path[0]) if err != nil { return nil, err } _, err = ds.Add(root) if err != nil { return nil, err } return root, nil } nd, err := root.GetLinkedNode(ctx, ds, path[0]) if err != nil { return nil, err } nnode, err := rmLink(ctx, ds, nd, path[1:]) if err != nil { return nil, err } _ = root.RemoveNodeLink(path[0]) err = root.AddNodeLinkClean(path[0], nnode) if err != nil { return nil, err } _, err = ds.Add(root) if err != nil { return nil, err } return root, nil }
func (i *gatewayHandler) putHandler(w http.ResponseWriter, r *http.Request) { // TODO(cryptix): either ask mildred about the flow of this or rewrite it webErrorWithCode(w, "Sorry, PUT is bugged right now, closing request", errors.New("handler disabled"), http.StatusInternalServerError) return urlPath := r.URL.Path pathext := urlPath[5:] var err error if urlPath == ipfsPathPrefix+"QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn/" { i.putEmptyDirHandler(w, r) return } var newnode *dag.Node if pathext[len(pathext)-1] == '/' { newnode = uio.NewEmptyDirectory() } else { newnode, err = i.newDagFromReader(r.Body) if err != nil { webError(w, "Could not create DAG from request", err, http.StatusInternalServerError) return } } ctx, cancel := context.WithCancel(i.node.Context()) defer cancel() ipfsNode, err := core.Resolve(ctx, i.node, path.Path(urlPath)) if err != nil { // FIXME HTTP error code webError(w, "Could not resolve name", err, http.StatusInternalServerError) return } k, err := ipfsNode.Key() if err != nil { webError(w, "Could not get key from resolved node", err, http.StatusInternalServerError) return } h, components, err := path.SplitAbsPath(path.FromKey(k)) if err != nil { webError(w, "Could not split path", err, http.StatusInternalServerError) return } if len(components) < 1 { err = fmt.Errorf("Cannot override existing object") webError(w, "http gateway", err, http.StatusBadRequest) return } tctx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() // TODO(cryptix): could this be core.Resolve() too? rootnd, err := i.node.Resolver.DAG.Get(tctx, key.Key(h)) if err != nil { webError(w, "Could not resolve root object", err, http.StatusBadRequest) return } // resolving path components into merkledag nodes. if a component does not // resolve, create empty directories (which will be linked and populated below.) pathNodes, err := i.node.Resolver.ResolveLinks(tctx, rootnd, components[:len(components)-1]) if _, ok := err.(path.ErrNoLink); ok { // Create empty directories, links will be made further down the code for len(pathNodes) < len(components) { pathNodes = append(pathNodes, uio.NewDirectory(i.node.DAG).GetNode()) } } else if err != nil { webError(w, "Could not resolve parent object", err, http.StatusBadRequest) return } for i := len(pathNodes) - 1; i >= 0; i-- { newnode, err = pathNodes[i].UpdateNodeLink(components[i], newnode) if err != nil { webError(w, "Could not update node links", err, http.StatusInternalServerError) return } } if err := i.node.DAG.AddRecursive(newnode); err != nil { webError(w, "Could not add recursively new node", err, http.StatusInternalServerError) return } // Redirect to new path key, err := newnode.Key() if err != nil { webError(w, "Could not get key of new node", err, http.StatusInternalServerError) return } i.addUserHeaders(w) // ok, _now_ write user's headers. w.Header().Set("IPFS-Hash", key.String()) http.Redirect(w, r, ipfsPathPrefix+key.String()+"/"+strings.Join(components, "/"), http.StatusCreated) }
// dagTruncate truncates the given node to 'size' and returns the modified Node func dagTruncate(ctx context.Context, nd *mdag.Node, size uint64, ds mdag.DAGService) (*mdag.Node, error) { if len(nd.Links) == 0 { // TODO: this can likely be done without marshaling and remarshaling pbn, err := ft.FromBytes(nd.Data) if err != nil { return nil, err } nd.Data = ft.WrapData(pbn.Data[:size]) return nd, nil } var cur uint64 end := 0 var modified *mdag.Node ndata := new(ft.FSNode) for i, lnk := range nd.Links { child, err := lnk.GetNode(ctx, ds) if err != nil { return nil, err } childsize, err := ft.DataSize(child.Data) if err != nil { return nil, err } // found the child we want to cut if size < cur+childsize { nchild, err := dagTruncate(ctx, child, size-cur, ds) if err != nil { return nil, err } ndata.AddBlockSize(size - cur) modified = nchild end = i break } cur += childsize ndata.AddBlockSize(childsize) } _, err := ds.Add(modified) if err != nil { return nil, err } nd.Links = nd.Links[:end] err = nd.AddNodeLinkClean("", modified) if err != nil { return nil, err } d, err := ndata.GetBytes() if err != nil { return nil, err } nd.Data = d // invalidate cache and recompute serialized data _, err = nd.Encoded(true) if err != nil { return nil, err } return nd, nil }
// modifyDag writes the data in 'data' over the data in 'node' starting at 'offset' // returns the new key of the passed in node and whether or not all the data in the reader // has been consumed. func (dm *DagModifier) modifyDag(node *mdag.Node, offset uint64, data io.Reader) (key.Key, bool, error) { f, err := ft.FromBytes(node.Data) if err != nil { return "", false, err } // If we've reached a leaf node. if len(node.Links) == 0 { n, err := data.Read(f.Data[offset:]) if err != nil && err != io.EOF { return "", false, err } // Update newly written node.. b, err := proto.Marshal(f) if err != nil { return "", false, err } nd := &mdag.Node{Data: b} k, err := dm.dagserv.Add(nd) if err != nil { return "", false, err } // Hey look! we're done! var done bool if n < len(f.Data[offset:]) { done = true } return k, done, nil } var cur uint64 var done bool for i, bs := range f.GetBlocksizes() { // We found the correct child to write into if cur+bs > offset { // Unpin block ckey := key.Key(node.Links[i].Hash) dm.mp.RemovePinWithMode(ckey, pin.Indirect) child, err := node.Links[i].GetNode(dm.ctx, dm.dagserv) if err != nil { return "", false, err } k, sdone, err := dm.modifyDag(child, offset-cur, data) if err != nil { return "", false, err } // pin the new node dm.mp.PinWithMode(k, pin.Indirect) offset += bs node.Links[i].Hash = mh.Multihash(k) // Recache serialized node _, err = node.Encoded(true) if err != nil { return "", false, err } if sdone { // No more bytes to write! done = true break } offset = cur + bs } cur += bs } k, err := dm.dagserv.Add(node) return k, done, err }
func addLink(t *testing.T, a, b *mdag.Node) { to := string(a.Data) + "2" + string(b.Data) if err := a.AddNodeLink(to, b); err != nil { t.Error(err) } }
func ImportTar(r io.Reader, ds dag.DAGService) (*dag.Node, error) { rall, err := ioutil.ReadAll(r) if err != nil { return nil, err } r = bytes.NewReader(rall) tr := tar.NewReader(r) root := new(dag.Node) root.Data = []byte("ipfs/tar") e := dagutil.NewDagEditor(ds, root) for { h, err := tr.Next() if err != nil { if err == io.EOF { break } return nil, err } header := new(dag.Node) headerBytes, err := marshalHeader(h) if err != nil { return nil, err } header.Data = headerBytes if h.Size > 0 { spl := chunk.NewRabin(tr, uint64(chunk.DefaultBlockSize)) nd, err := importer.BuildDagFromReader(ds, spl, nil) if err != nil { return nil, err } err = header.AddNodeLinkClean("data", nd) if err != nil { return nil, err } } _, err = ds.Add(header) if err != nil { return nil, err } path := escapePath(h.Name) err = e.InsertNodeAtPath(context.Background(), path, header, func() *dag.Node { return new(dag.Node) }) if err != nil { return nil, err } } root = e.GetNode() _, err = ds.Add(root) if err != nil { return nil, err } return root, nil }