func readHdr(n *merkledag.Node) (*pb.Set, []byte, error) { hdrLenRaw, consumed := binary.Uvarint(n.Data()) if consumed <= 0 { return nil, nil, errors.New("invalid Set header length") } buf := n.Data()[consumed:] if hdrLenRaw > uint64(len(buf)) { return nil, nil, errors.New("impossibly large Set header length") } // as hdrLenRaw was <= an int, we now know it fits in an int hdrLen := int(hdrLenRaw) var hdr pb.Set if err := proto.Unmarshal(buf[:hdrLen], &hdr); err != nil { return nil, nil, err } buf = buf[hdrLen:] if v := hdr.GetVersion(); v != 1 { return nil, nil, fmt.Errorf("unsupported Set version: %d", v) } if uint64(hdr.GetFanout()) > uint64(len(n.Links)) { return nil, nil, errors.New("impossibly large Fanout") } return &hdr, buf, nil }
// NewDagReader creates a new reader object that reads the data represented by // the given node, using the passed in DAGService for data retreival func NewDagReader(ctx context.Context, n *mdag.Node, serv mdag.DAGService) (*DagReader, error) { pb := new(ftpb.Data) if err := proto.Unmarshal(n.Data(), pb); err != nil { return nil, err } switch pb.GetType() { case ftpb.Data_Directory: // Dont allow reading directories return nil, ErrIsDir case ftpb.Data_Raw: fallthrough case ftpb.Data_File: return NewDataFileReader(ctx, n, pb, serv), nil case ftpb.Data_Metadata: if len(n.Links) == 0 { return nil, errors.New("incorrectly formatted metadata object") } child, err := n.Links[0].GetNode(ctx, serv) if err != nil { return nil, err } return NewDagReader(ctx, child, serv) case ftpb.Data_Symlink: return nil, ErrCantReadSymlinks default: return nil, ft.ErrUnrecognizedType } }
func printDag(nd *mdag.Node, ds mdag.DAGService, indent int) { pbd, err := ft.FromBytes(nd.Data()) if err != nil { panic(err) } for i := 0; i < indent; i++ { fmt.Print(" ") } fmt.Printf("{size = %d, type = %s, children = %d", pbd.GetFilesize(), pbd.GetType().String(), len(pbd.GetBlocksizes())) if len(nd.Links) > 0 { fmt.Println() } for _, lnk := range nd.Links { child, err := lnk.GetNode(context.Background(), ds) if err != nil { panic(err) } printDag(child, ds, indent+1) } if len(nd.Links) > 0 { for i := 0; i < indent; i++ { fmt.Print(" ") } } fmt.Println("}") }
func randNode() (*mdag.Node, key.Key) { nd := new(mdag.Node) nd.SetData(make([]byte, 32)) util.NewTimeSeededRand().Read(nd.Data()) k, _ := nd.Key() return nd, k }
func randNode() (*merkledag.Node, key.Key) { node := new(merkledag.Node) node.SetData(make([]byte, 32)) util.NewTimeSeededRand().Read(node.Data()) k, _ := node.Key() return node, k }
func ImportTar(r io.Reader, ds dag.DAGService) (*dag.Node, error) { rall, err := ioutil.ReadAll(r) if err != nil { return nil, err } r = bytes.NewReader(rall) tr := tar.NewReader(r) root := new(dag.Node) root.Data = []byte("ipfs/tar") e := dagutil.NewDagEditor(root, ds) for { h, err := tr.Next() if err != nil { if err == io.EOF { break } return nil, err } header := new(dag.Node) headerBytes, err := marshalHeader(h) if err != nil { return nil, err } header.Data = headerBytes if h.Size > 0 { spl := chunk.NewRabin(tr, uint64(chunk.DefaultBlockSize)) nd, err := importer.BuildDagFromReader(ds, spl) if err != nil { return nil, err } err = header.AddNodeLinkClean("data", nd) if err != nil { return nil, err } } _, err = ds.Add(header) if err != nil { return nil, err } path := escapePath(h.Name) err = e.InsertNodeAtPath(context.Background(), path, header, func() *dag.Node { return new(dag.Node) }) if err != nil { return nil, err } } return e.Finalize(ds) }
// Recursive call for verifying the structure of a trickledag func verifyTDagRec(nd *dag.Node, depth, direct, layerRepeat int, ds dag.DAGService) error { if depth == 0 { // zero depth dag is raw data block if len(nd.Links) > 0 { return errors.New("expected direct block") } pbn, err := ft.FromBytes(nd.Data()) if err != nil { return err } if pbn.GetType() != ft.TRaw { return errors.New("Expected raw block") } return nil } // Verify this is a branch node pbn, err := ft.FromBytes(nd.Data()) if err != nil { return err } if pbn.GetType() != ft.TFile { return errors.New("expected file as branch node") } if len(pbn.Data) > 0 { return errors.New("branch node should not have data") } for i := 0; i < len(nd.Links); i++ { child, err := nd.Links[i].GetNode(context.TODO(), ds) if err != nil { return err } if i < direct { // Direct blocks err := verifyTDagRec(child, 0, direct, layerRepeat, ds) if err != nil { return err } } else { // Recursive trickle dags rdepth := ((i - direct) / layerRepeat) + 1 if rdepth >= depth && depth > 0 { return errors.New("Child dag was too deep!") } err := verifyTDagRec(child, rdepth, direct, layerRepeat, ds) if err != nil { return err } } } return nil }
func addLink(t *testing.T, ds mdag.DAGService, a, b *mdag.Node) { to := string(a.Data()) + "2" + string(b.Data()) if _, err := ds.Add(b); err != nil { t.Error(err) } if err := a.AddNodeLink(to, b); err != nil { t.Error(err) } }
func ExportTar(ctx context.Context, root *dag.Node, ds dag.DAGService) (io.Reader, error) { if string(root.Data()) != "ipfs/tar" { return nil, errors.New("not an ipfs tarchive") } return &tarReader{ links: root.Links, ds: ds, ctx: ctx, }, nil }
// NewUnixfsNodeFromDag reconstructs a Unixfs node from a given dag node func NewUnixfsNodeFromDag(nd *dag.Node) (*UnixfsNode, error) { mb, err := ft.FSNodeFromBytes(nd.Data()) if err != nil { return nil, err } return &UnixfsNode{ node: nd, ufmt: mb, }, nil }
func writeHdr(n *merkledag.Node, hdr *pb.Set) error { hdrData, err := proto.Marshal(hdr) if err != nil { return err } n.Data = make([]byte, binary.MaxVarintLen64, binary.MaxVarintLen64+len(hdrData)) written := binary.PutUvarint(n.Data, uint64(len(hdrData))) n.Data = n.Data[:written] n.Data = append(n.Data, hdrData...) return nil }
// converts the Node object into a real dag.Node func deserializeNode(node *Node, dataFieldEncoding string) (*dag.Node, error) { dagnode := new(dag.Node) switch dataFieldEncoding { case "text": dagnode.Data = []byte(node.Data) case "base64": dagnode.Data, _ = base64.StdEncoding.DecodeString(node.Data) default: return nil, fmt.Errorf("Unkown data field encoding") } dagnode.Links = make([]*dag.Link, len(node.Links)) for i, link := range node.Links { hash, err := mh.FromB58String(link.Hash) if err != nil { return nil, err } dagnode.Links[i] = &dag.Link{ Name: link.Name, Size: link.Size, Hash: hash, } } return dagnode, nil }
func AddMetadataTo(n *core.IpfsNode, skey string, m *ft.Metadata) (string, error) { ukey := key.B58KeyDecode(skey) nd, err := n.DAG.Get(n.Context(), ukey) if err != nil { return "", err } mdnode := new(dag.Node) mdata, err := ft.BytesForMetadata(m) if err != nil { return "", err } mdnode.Data = mdata if err := mdnode.AddNodeLinkClean("file", nd); err != nil { return "", err } nk, err := n.DAG.Add(mdnode) if err != nil { return "", err } return nk.B58String(), nil }
func nodeFromTemplate(template string) (*dag.Node, error) { switch template { case "unixfs-dir": nd := new(dag.Node) nd.Data = ft.FolderPBData() return nd, nil default: return nil, fmt.Errorf("template '%s' not found", template) } }
func (w *Writer) WriteNode(nd *mdag.Node, fpath string) error { pb := new(upb.Data) if err := proto.Unmarshal(nd.Data(), pb); err != nil { return err } switch pb.GetType() { case upb.Data_Metadata: fallthrough case upb.Data_Directory: return w.writeDir(nd, fpath) case upb.Data_Raw: fallthrough case upb.Data_File: return w.writeFile(nd, pb, fpath) case upb.Data_Symlink: return writeSymlinkHeader(w.TarW, string(pb.GetData()), fpath) default: return ft.ErrUnrecognizedType } }
// newRoot creates a new Root and starts up a republisher routine for it func NewRoot(parent context.Context, ds dag.DAGService, node *dag.Node, pf PubFunc) (*Root, error) { ndk, err := node.Key() if err != nil { return nil, err } var repub *Republisher if pf != nil { repub = NewRepublisher(parent, pf, time.Millisecond*300, time.Second*3) repub.setVal(ndk) go repub.Run() } root := &Root{ node: node, repub: repub, dserv: ds, } pbn, err := ft.FromBytes(node.Data()) if err != nil { log.Error("IPNS pointer was not unixfs node") return nil, err } switch pbn.GetType() { case ft.TDirectory: root.val = NewDirectory(parent, ndk.String(), node, root, ds) case ft.TFile, ft.TMetadata, ft.TRaw: fi, err := NewFile(ndk.String(), node, root, ds) if err != nil { return nil, err } root.val = fi default: panic("unrecognized! (NYI)") } return root, nil }
// cacheNode caches a node into d.childDirs or d.files and returns the FSNode. func (d *Directory) cacheNode(name string, nd *dag.Node) (FSNode, error) { i, err := ft.FromBytes(nd.Data()) if err != nil { return nil, err } switch i.GetType() { case ufspb.Data_Directory: ndir := NewDirectory(d.ctx, name, nd, d, d.dserv) d.childDirs[name] = ndir return ndir, nil case ufspb.Data_File, ufspb.Data_Raw, ufspb.Data_Symlink: nfi, err := NewFile(name, nd, d, d.dserv) if err != nil { return nil, err } d.files[name] = nfi return nfi, nil case ufspb.Data_Metadata: return nil, ErrNotYetImplemented default: return nil, ErrInvalidChild } }
func (s *Shell) NewObject(template string) (string, error) { node := new(dag.Node) switch template { case "": break case "unixfs-dir": node.Data = ft.FolderPBData() default: return "", fmt.Errorf("unknown template %s", template) } k, err := s.node.DAG.Add(node) if err != nil { return "", err } return k.B58String(), nil }
func TestAppendSingleBytesToEmpty(t *testing.T) { ds := mdtest.Mock() data := []byte("AB") nd := new(merkledag.Node) nd.Data = ft.FilePBData(nil, 0) dbp := &h.DagBuilderParams{ Dagserv: ds, Maxlinks: 4, } spl := chunk.SizeSplitterGen(500) blks, errs := chunk.Chan(spl(bytes.NewReader(data[:1]))) ctx := context.Background() nnode, err := TrickleAppend(ctx, nd, dbp.New(blks, errs)) if err != nil { t.Fatal(err) } blks, errs = chunk.Chan(spl(bytes.NewReader(data[1:]))) nnode, err = TrickleAppend(ctx, nnode, dbp.New(blks, errs)) if err != nil { t.Fatal(err) } fread, err := uio.NewDagReader(ctx, nnode, ds) if err != nil { t.Fatal(err) } out, err := ioutil.ReadAll(fread) if err != nil { t.Fatal(err) } fmt.Println(out, data) err = arrComp(out, data) if err != nil { t.Fatal(err) } }
func TestAppendSingleBytesToEmpty(t *testing.T) { ds := mdtest.Mock(t) data := []byte("AB") nd := new(merkledag.Node) nd.Data = ft.FilePBData(nil, 0) dbp := &h.DagBuilderParams{ Dagserv: ds, Maxlinks: 4, } spl := &chunk.SizeSplitter{500} blks := spl.Split(bytes.NewReader(data[:1])) nnode, err := TrickleAppend(nd, dbp.New(blks)) if err != nil { t.Fatal(err) } blks = spl.Split(bytes.NewReader(data[1:])) nnode, err = TrickleAppend(nnode, dbp.New(blks)) if err != nil { t.Fatal(err) } fread, err := uio.NewDagReader(context.TODO(), nnode, ds) if err != nil { t.Fatal(err) } out, err := ioutil.ReadAll(fread) if err != nil { t.Fatal(err) } fmt.Println(out, data) err = arrComp(out, data) if err != nil { t.Fatal(err) } }
// converts the Node object into a real dag.Node func deserializeNode(node *Node) (*dag.Node, error) { dagnode := new(dag.Node) dagnode.Data = []byte(node.Data) dagnode.Links = make([]*dag.Link, len(node.Links)) for i, link := range node.Links { hash, err := mh.FromB58String(link.Hash) if err != nil { return nil, err } dagnode.Links[i] = &dag.Link{ Name: link.Name, Size: link.Size, Hash: hash, } } return dagnode, nil }
func setDataCaller(req cmds.Request, root *dag.Node) (key.Key, error) { if len(req.Arguments()) < 3 { return "", fmt.Errorf("not enough arguments for set-data") } nd, err := req.InvocContext().GetNode() if err != nil { return "", err } root.Data = []byte(req.Arguments()[2]) newkey, err := nd.DAG.Add(root) if err != nil { return "", err } return newkey, nil }
// modifyDag writes the data in 'data' over the data in 'node' starting at 'offset' // returns the new key of the passed in node and whether or not all the data in the reader // has been consumed. func (dm *DagModifier) modifyDag(node *mdag.Node, offset uint64, data io.Reader) (key.Key, bool, error) { f, err := ft.FromBytes(node.Data()) if err != nil { return "", false, err } // If we've reached a leaf node. if len(node.Links) == 0 { n, err := data.Read(f.Data[offset:]) if err != nil && err != io.EOF { return "", false, err } // Update newly written node.. b, err := proto.Marshal(f) if err != nil { return "", false, err } nd := new(mdag.Node) nd.SetData(b) k, err := dm.dagserv.Add(nd) if err != nil { return "", false, err } // Hey look! we're done! var done bool if n < len(f.Data[offset:]) { done = true } return k, done, nil } var cur uint64 var done bool for i, bs := range f.GetBlocksizes() { // We found the correct child to write into if cur+bs > offset { child, err := node.Links[i].GetNode(dm.ctx, dm.dagserv) if err != nil { return "", false, err } k, sdone, err := dm.modifyDag(child, offset-cur, data) if err != nil { return "", false, err } offset += bs node.Links[i].Hash = mh.Multihash(k) // Recache serialized node _, err = node.EncodeProtobuf(true) if err != nil { return "", false, err } if sdone { // No more bytes to write! done = true break } offset = cur + bs } cur += bs } k, err := dm.dagserv.Add(node) return k, done, err }
func child(t *testing.T, ds mdag.DAGService, a *mdag.Node, name string) *mdag.Node { return mdag.NodeWithData([]byte(string(a.Data()) + "/" + name)) }
linkNode, err = merkledag.DecodeProtobuf(b.Data()) if err != nil { res.SetError(err, cmds.ErrNormal) return } } if linkNode == nil && resolve { linkNode, err = link.GetNode(req.Context(), node.DAG) if err != nil { res.SetError(err, cmds.ErrNormal) return } } if linkNode != nil { d, err := unixfs.FromBytes(linkNode.Data()) if err != nil { res.SetError(err, cmds.ErrNormal) return } t = d.GetType() } output[i].Links[j] = LsLink{ Name: link.Name, Hash: link.Hash.B58String(), Size: link.Size, Type: t, } } }
// dagTruncate truncates the given node to 'size' and returns the modified Node func dagTruncate(ctx context.Context, nd *mdag.Node, size uint64, ds mdag.DAGService) (*mdag.Node, error) { if len(nd.Links) == 0 { // TODO: this can likely be done without marshaling and remarshaling pbn, err := ft.FromBytes(nd.Data) if err != nil { return nil, err } nd.Data = ft.WrapData(pbn.Data[:size]) return nd, nil } var cur uint64 end := 0 var modified *mdag.Node ndata := new(ft.FSNode) for i, lnk := range nd.Links { child, err := lnk.GetNode(ctx, ds) if err != nil { return nil, err } childsize, err := ft.DataSize(child.Data) if err != nil { return nil, err } // found the child we want to cut if size < cur+childsize { nchild, err := dagTruncate(ctx, child, size-cur, ds) if err != nil { return nil, err } ndata.AddBlockSize(size - cur) modified = nchild end = i break } cur += childsize ndata.AddBlockSize(childsize) } _, err := ds.Add(modified) if err != nil { return nil, err } nd.Links = nd.Links[:end] err = nd.AddNodeLinkClean("", modified) if err != nil { return nil, err } d, err := ndata.GetBytes() if err != nil { return nil, err } nd.Data = d // invalidate cache and recompute serialized data _, err = nd.Encoded(true) if err != nil { return nil, err } return nd, nil }
func (i *gatewayHandler) putHandler(w http.ResponseWriter, r *http.Request) { // TODO(cryptix): move me to ServeHTTP and pass into all handlers ctx, cancel := context.WithCancel(i.node.Context()) defer cancel() rootPath, err := path.ParsePath(r.URL.Path) if err != nil { webError(w, "putHandler: ipfs path not valid", err, http.StatusBadRequest) return } rsegs := rootPath.Segments() if rsegs[0] == ipnsPathPrefix { webError(w, "putHandler: updating named entries not supported", errors.New("WritableGateway: ipns put not supported"), http.StatusBadRequest) return } var newnode *dag.Node if rsegs[len(rsegs)-1] == "QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn" { newnode = uio.NewEmptyDirectory() } else { putNode, err := i.newDagFromReader(r.Body) if err != nil { webError(w, "putHandler: Could not create DAG from request", err, http.StatusInternalServerError) return } newnode = putNode } var newPath string if len(rsegs) > 1 { newPath = path.Join(rsegs[2:]) } var newkey key.Key rnode, err := core.Resolve(ctx, i.node, rootPath) switch ev := err.(type) { case path.ErrNoLink: // ev.Node < node where resolve failed // ev.Name < new link // but we need to patch from the root rnode, err := i.node.DAG.Get(ctx, key.B58KeyDecode(rsegs[1])) if err != nil { webError(w, "putHandler: Could not create DAG from request", err, http.StatusInternalServerError) return } e := dagutils.NewDagEditor(rnode, i.node.DAG) err = e.InsertNodeAtPath(ctx, newPath, newnode, uio.NewEmptyDirectory) if err != nil { webError(w, "putHandler: InsertNodeAtPath failed", err, http.StatusInternalServerError) return } nnode, err := e.Finalize(i.node.DAG) if err != nil { webError(w, "putHandler: could not get node", err, http.StatusInternalServerError) return } newkey, err = nnode.Key() if err != nil { webError(w, "putHandler: could not get key of edited node", err, http.StatusInternalServerError) return } case nil: // object set-data case rnode.SetData(newnode.Data()) newkey, err = i.node.DAG.Add(rnode) if err != nil { nnk, _ := newnode.Key() rk, _ := rnode.Key() webError(w, fmt.Sprintf("putHandler: Could not add newnode(%q) to root(%q)", nnk.B58String(), rk.B58String()), err, http.StatusInternalServerError) return } default: log.Warningf("putHandler: unhandled resolve error %T", ev) webError(w, "could not resolve root DAG", ev, http.StatusInternalServerError) return } i.addUserHeaders(w) // ok, _now_ write user's headers. w.Header().Set("IPFS-Hash", newkey.String()) http.Redirect(w, r, gopath.Join(ipfsPathPrefix, newkey.String(), newPath), http.StatusCreated) }