// converts the Node object into a real dag.Node func deserializeNode(node *Node, dataFieldEncoding string) (*dag.Node, error) { dagnode := new(dag.Node) switch dataFieldEncoding { case "text": dagnode.SetData([]byte(node.Data)) case "base64": data, _ := base64.StdEncoding.DecodeString(node.Data) dagnode.SetData(data) default: return nil, fmt.Errorf("Unkown data field encoding") } dagnode.Links = make([]*dag.Link, len(node.Links)) for i, link := range node.Links { hash, err := mh.FromB58String(link.Hash) if err != nil { return nil, err } dagnode.Links[i] = &dag.Link{ Name: link.Name, Size: link.Size, Hash: hash, } } return dagnode, nil }
func randNode() (*merkledag.Node, key.Key) { node := new(merkledag.Node) node.SetData(make([]byte, 32)) util.NewTimeSeededRand().Read(node.Data()) k, _ := node.Key() return node, k }
func (d *Directory) Mkdir(name string) (*Directory, error) { d.lock.Lock() defer d.lock.Unlock() fsn, err := d.childUnsync(name) if err == nil { switch fsn := fsn.(type) { case *Directory: return fsn, os.ErrExist case *File: return nil, os.ErrExist default: return nil, fmt.Errorf("unrecognized type: %#v", fsn) } } ndir := new(dag.Node) ndir.SetData(ft.FolderPBData()) _, err = d.dserv.Add(ndir) if err != nil { return nil, err } err = d.node.AddNodeLinkClean(name, ndir) if err != nil { return nil, err } dirobj := NewDirectory(d.ctx, name, ndir, d, d.dserv) d.childDirs[name] = dirobj return dirobj, nil }
func ImportTar(r io.Reader, ds dag.DAGService) (*dag.Node, error) { rall, err := ioutil.ReadAll(r) if err != nil { return nil, err } r = bytes.NewReader(rall) tr := tar.NewReader(r) root := new(dag.Node) root.SetData([]byte("ipfs/tar")) e := dagutil.NewDagEditor(root, ds) for { h, err := tr.Next() if err != nil { if err == io.EOF { break } return nil, err } header := new(dag.Node) headerBytes, err := marshalHeader(h) if err != nil { return nil, err } header.SetData(headerBytes) if h.Size > 0 { spl := chunk.NewRabin(tr, uint64(chunk.DefaultBlockSize)) nd, err := importer.BuildDagFromReader(ds, spl) if err != nil { return nil, err } err = header.AddNodeLinkClean("data", nd) if err != nil { return nil, err } } _, err = ds.Add(header) if err != nil { return nil, err } path := escapePath(h.Name) err = e.InsertNodeAtPath(context.Background(), path, header, func() *dag.Node { return new(dag.Node) }) if err != nil { return nil, err } } return e.Finalize(ds) }
func randNode() (*mdag.Node, key.Key) { nd := new(mdag.Node) nd.SetData(make([]byte, 32)) util.NewTimeSeededRand().Read(nd.Data()) k, _ := nd.Key() return nd, k }
func AddMetadataTo(n *core.IpfsNode, skey string, m *ft.Metadata) (string, error) { ukey := key.B58KeyDecode(skey) nd, err := n.DAG.Get(n.Context(), ukey) if err != nil { return "", err } mdnode := new(dag.Node) mdata, err := ft.BytesForMetadata(m) if err != nil { return "", err } mdnode.SetData(mdata) if err := mdnode.AddNodeLinkClean("file", nd); err != nil { return "", err } nk, err := n.DAG.Add(mdnode) if err != nil { return "", err } return nk.B58String(), nil }
func (s *Shell) NewObject(template string) (string, error) { node := new(dag.Node) switch template { case "": break case "unixfs-dir": node.SetData(ft.FolderPBData()) default: return "", fmt.Errorf("unknown template %s", template) } c, err := s.node.DAG.Add(node) if err != nil { return "", err } return c.String(), nil }
func TestAppendSingleBytesToEmpty(t *testing.T) { ds := mdtest.Mock() data := []byte("AB") nd := new(merkledag.Node) nd.SetData(ft.FilePBData(nil, 0)) dbp := &h.DagBuilderParams{ Dagserv: ds, Maxlinks: 4, } spl := chunk.SizeSplitterGen(500) ctx := context.Background() nnode, err := TrickleAppend(ctx, nd, dbp.New(spl(bytes.NewReader(data[:1])))) if err != nil { t.Fatal(err) } nnode, err = TrickleAppend(ctx, nnode, dbp.New(spl(bytes.NewReader(data[1:])))) if err != nil { t.Fatal(err) } fread, err := uio.NewDagReader(ctx, nnode, ds) if err != nil { t.Fatal(err) } out, err := ioutil.ReadAll(fread) if err != nil { t.Fatal(err) } fmt.Println(out, data) err = arrComp(out, data) if err != nil { t.Fatal(err) } }
func writeHdr(n *merkledag.Node, hdr *pb.Set) error { hdrData, err := proto.Marshal(hdr) if err != nil { return err } n.SetData(make([]byte, binary.MaxVarintLen64, binary.MaxVarintLen64+len(hdrData))) written := binary.PutUvarint(n.Data(), uint64(len(hdrData))) n.SetData(n.Data()[:written]) n.SetData(append(n.Data(), hdrData...)) return nil }
// dagTruncate truncates the given node to 'size' and returns the modified Node func dagTruncate(ctx context.Context, nd *mdag.Node, size uint64, ds mdag.DAGService) (*mdag.Node, error) { if len(nd.Links) == 0 { // TODO: this can likely be done without marshaling and remarshaling pbn, err := ft.FromBytes(nd.Data()) if err != nil { return nil, err } nd.SetData(ft.WrapData(pbn.Data[:size])) return nd, nil } var cur uint64 end := 0 var modified *mdag.Node ndata := new(ft.FSNode) for i, lnk := range nd.Links { child, err := lnk.GetNode(ctx, ds) if err != nil { return nil, err } childsize, err := ft.DataSize(child.Data()) if err != nil { return nil, err } // found the child we want to cut if size < cur+childsize { nchild, err := dagTruncate(ctx, child, size-cur, ds) if err != nil { return nil, err } ndata.AddBlockSize(size - cur) modified = nchild end = i break } cur += childsize ndata.AddBlockSize(childsize) } _, err := ds.Add(modified) if err != nil { return nil, err } nd.Links = nd.Links[:end] err = nd.AddNodeLinkClean("", modified) if err != nil { return nil, err } d, err := ndata.GetBytes() if err != nil { return nil, err } nd.SetData(d) // invalidate cache and recompute serialized data _, err = nd.EncodeProtobuf(true) if err != nil { return nil, err } return nd, nil }
// modifyDag writes the data in 'data' over the data in 'node' starting at 'offset' // returns the new key of the passed in node and whether or not all the data in the reader // has been consumed. func (dm *DagModifier) modifyDag(node *mdag.Node, offset uint64, data io.Reader) (key.Key, bool, error) { f, err := ft.FromBytes(node.Data()) if err != nil { return "", false, err } // If we've reached a leaf node. if len(node.Links) == 0 { n, err := data.Read(f.Data[offset:]) if err != nil && err != io.EOF { return "", false, err } // Update newly written node.. b, err := proto.Marshal(f) if err != nil { return "", false, err } nd := new(mdag.Node) nd.SetData(b) k, err := dm.dagserv.Add(nd) if err != nil { return "", false, err } // Hey look! we're done! var done bool if n < len(f.Data[offset:]) { done = true } return k, done, nil } var cur uint64 var done bool for i, bs := range f.GetBlocksizes() { // We found the correct child to write into if cur+bs > offset { child, err := node.Links[i].GetNode(dm.ctx, dm.dagserv) if err != nil { return "", false, err } k, sdone, err := dm.modifyDag(child, offset-cur, data) if err != nil { return "", false, err } offset += bs node.Links[i].Hash = mh.Multihash(k) // Recache serialized node _, err = node.EncodeProtobuf(true) if err != nil { return "", false, err } if sdone { // No more bytes to write! done = true break } offset = cur + bs } cur += bs } k, err := dm.dagserv.Add(node) return k, done, err }
// NewEmptyDirectory returns an empty merkledag Node with a folder Data chunk func NewEmptyDirectory() *mdag.Node { nd := new(mdag.Node) nd.SetData(format.FolderPBData()) return nd }