// converts the Node object into a real dag.ProtoNode func deserializeNode(nd *Node, dataFieldEncoding string) (*dag.ProtoNode, error) { dagnode := new(dag.ProtoNode) switch dataFieldEncoding { case "text": dagnode.SetData([]byte(nd.Data)) case "base64": data, _ := base64.StdEncoding.DecodeString(nd.Data) dagnode.SetData(data) default: return nil, fmt.Errorf("Unkown data field encoding") } dagnode.SetLinks(make([]*node.Link, len(nd.Links))) for i, link := range nd.Links { c, err := cid.Decode(link.Hash) if err != nil { return nil, err } dagnode.Links()[i] = &node.Link{ Name: link.Name, Size: link.Size, Cid: c, } } return dagnode, nil }
func randNode() (*mdag.ProtoNode, *cid.Cid) { nd := new(mdag.ProtoNode) nd.SetData(make([]byte, 32)) util.NewTimeSeededRand().Read(nd.Data()) k := nd.Cid() return nd, k }
func AddMetadataTo(n *core.IpfsNode, skey string, m *ft.Metadata) (string, error) { c, err := cid.Decode(skey) if err != nil { return "", err } nd, err := n.DAG.Get(n.Context(), c) if err != nil { return "", err } mdnode := new(dag.ProtoNode) mdata, err := ft.BytesForMetadata(m) if err != nil { return "", err } mdnode.SetData(mdata) if err := mdnode.AddNodeLinkClean("file", nd); err != nil { return "", err } nk, err := n.DAG.Add(mdnode) if err != nil { return "", err } return nk.String(), nil }
func (d *Directory) Mkdir(name string) (*Directory, error) { d.lock.Lock() defer d.lock.Unlock() fsn, err := d.childUnsync(name) if err == nil { switch fsn := fsn.(type) { case *Directory: return fsn, os.ErrExist case *File: return nil, os.ErrExist default: return nil, fmt.Errorf("unrecognized type: %#v", fsn) } } ndir := new(dag.ProtoNode) ndir.SetData(ft.FolderPBData()) _, err = d.dserv.Add(ndir) if err != nil { return nil, err } err = d.node.AddNodeLinkClean(name, ndir) if err != nil { return nil, err } dirobj := NewDirectory(d.ctx, name, ndir, d, d.dserv) d.childDirs[name] = dirobj return dirobj, nil }
func ImportTar(r io.Reader, ds dag.DAGService) (*dag.ProtoNode, error) { tr := tar.NewReader(r) root := new(dag.ProtoNode) root.SetData([]byte("ipfs/tar")) e := dagutil.NewDagEditor(root, ds) for { h, err := tr.Next() if err != nil { if err == io.EOF { break } return nil, err } header := new(dag.ProtoNode) headerBytes, err := marshalHeader(h) if err != nil { return nil, err } header.SetData(headerBytes) if h.Size > 0 { spl := chunk.NewRabin(tr, uint64(chunk.DefaultBlockSize)) nd, err := importer.BuildDagFromReader(ds, spl) if err != nil { return nil, err } err = header.AddNodeLinkClean("data", nd) if err != nil { return nil, err } } _, err = ds.Add(header) if err != nil { return nil, err } path := escapePath(h.Name) err = e.InsertNodeAtPath(context.Background(), path, header, func() *dag.ProtoNode { return new(dag.ProtoNode) }) if err != nil { return nil, err } } return e.Finalize(ds) }
func randNode() (*merkledag.ProtoNode, key.Key) { node := new(merkledag.ProtoNode) node.SetData(make([]byte, 32)) util.NewTimeSeededRand().Read(node.Data()) k := node.Key() return node, k }
func writeHdr(n *merkledag.ProtoNode, hdr *pb.Set) error { hdrData, err := proto.Marshal(hdr) if err != nil { return err } // make enough space for the length prefix and the marshalled header data data := make([]byte, binary.MaxVarintLen64, binary.MaxVarintLen64+len(hdrData)) // write the uvarint length of the header data uvarlen := binary.PutUvarint(data, uint64(len(hdrData))) // append the actual protobuf data *after* the length value we wrote data = append(data[:uvarlen], hdrData...) n.SetData(data) return nil }
func TestAppendSingleBytesToEmpty(t *testing.T) { ds := mdtest.Mock() data := []byte("AB") nd := new(merkledag.ProtoNode) nd.SetData(ft.FilePBData(nil, 0)) dbp := &h.DagBuilderParams{ Dagserv: ds, Maxlinks: 4, } spl := chunk.SizeSplitterGen(500) ctx := context.Background() nnode, err := TrickleAppend(ctx, nd, dbp.New(spl(bytes.NewReader(data[:1])))) if err != nil { t.Fatal(err) } nnode, err = TrickleAppend(ctx, nnode, dbp.New(spl(bytes.NewReader(data[1:])))) if err != nil { t.Fatal(err) } fread, err := uio.NewDagReader(ctx, nnode, ds) if err != nil { t.Fatal(err) } out, err := ioutil.ReadAll(fread) if err != nil { t.Fatal(err) } fmt.Println(out, data) err = arrComp(out, data) if err != nil { t.Fatal(err) } }
// NewEmptyDirectory returns an empty merkledag Node with a folder Data chunk func NewEmptyDirectory() *mdag.ProtoNode { nd := new(mdag.ProtoNode) nd.SetData(format.FolderPBData()) return nd }
func randNode() *merkledag.ProtoNode { node := new(merkledag.ProtoNode) node.SetData(make([]byte, 32)) util.NewTimeSeededRand().Read(node.Data()) return node }
// dagTruncate truncates the given node to 'size' and returns the modified Node func dagTruncate(ctx context.Context, nd *mdag.ProtoNode, size uint64, ds mdag.DAGService) (*mdag.ProtoNode, error) { if len(nd.Links()) == 0 { // TODO: this can likely be done without marshaling and remarshaling pbn, err := ft.FromBytes(nd.Data()) if err != nil { return nil, err } nd.SetData(ft.WrapData(pbn.Data[:size])) return nd, nil } var cur uint64 end := 0 var modified *mdag.ProtoNode ndata := new(ft.FSNode) for i, lnk := range nd.Links() { child, err := lnk.GetNode(ctx, ds) if err != nil { return nil, err } childpb, ok := child.(*mdag.ProtoNode) if !ok { return nil, err } childsize, err := ft.DataSize(childpb.Data()) if err != nil { return nil, err } // found the child we want to cut if size < cur+childsize { nchild, err := dagTruncate(ctx, childpb, size-cur, ds) if err != nil { return nil, err } ndata.AddBlockSize(size - cur) modified = nchild end = i break } cur += childsize ndata.AddBlockSize(childsize) } _, err := ds.Add(modified) if err != nil { return nil, err } nd.SetLinks(nd.Links()[:end]) err = nd.AddNodeLinkClean("", modified) if err != nil { return nil, err } d, err := ndata.GetBytes() if err != nil { return nil, err } nd.SetData(d) // invalidate cache and recompute serialized data _, err = nd.EncodeProtobuf(true) if err != nil { return nil, err } return nd, nil }
// modifyDag writes the data in 'data' over the data in 'node' starting at 'offset' // returns the new key of the passed in node and whether or not all the data in the reader // has been consumed. func (dm *DagModifier) modifyDag(node *mdag.ProtoNode, offset uint64, data io.Reader) (*cid.Cid, bool, error) { f, err := ft.FromBytes(node.Data()) if err != nil { return nil, false, err } // If we've reached a leaf node. if len(node.Links()) == 0 { n, err := data.Read(f.Data[offset:]) if err != nil && err != io.EOF { return nil, false, err } // Update newly written node.. b, err := proto.Marshal(f) if err != nil { return nil, false, err } nd := new(mdag.ProtoNode) nd.SetData(b) k, err := dm.dagserv.Add(nd) if err != nil { return nil, false, err } // Hey look! we're done! var done bool if n < len(f.Data[offset:]) { done = true } return k, done, nil } var cur uint64 var done bool for i, bs := range f.GetBlocksizes() { // We found the correct child to write into if cur+bs > offset { child, err := node.Links()[i].GetNode(dm.ctx, dm.dagserv) if err != nil { return nil, false, err } childpb, ok := child.(*mdag.ProtoNode) if !ok { return nil, false, mdag.ErrNotProtobuf } k, sdone, err := dm.modifyDag(childpb, offset-cur, data) if err != nil { return nil, false, err } offset += bs node.Links()[i].Cid = k // Recache serialized node _, err = node.EncodeProtobuf(true) if err != nil { return nil, false, err } if sdone { // No more bytes to write! done = true break } offset = cur + bs } cur += bs } k, err := dm.dagserv.Add(node) return k, done, err }