Example #1
0
func randNode() (*mdag.ProtoNode, *cid.Cid) {
	nd := new(mdag.ProtoNode)
	nd.SetData(make([]byte, 32))
	util.NewTimeSeededRand().Read(nd.Data())
	k := nd.Cid()
	return nd, k
}
Example #2
0
func readHdr(n *merkledag.ProtoNode) (*pb.Set, error) {
	hdrLenRaw, consumed := binary.Uvarint(n.Data())
	if consumed <= 0 {
		return nil, errors.New("invalid Set header length")
	}

	pbdata := n.Data()[consumed:]
	if hdrLenRaw > uint64(len(pbdata)) {
		return nil, errors.New("impossibly large Set header length")
	}
	// as hdrLenRaw was <= an int, we now know it fits in an int
	hdrLen := int(hdrLenRaw)
	var hdr pb.Set
	if err := proto.Unmarshal(pbdata[:hdrLen], &hdr); err != nil {
		return nil, err
	}

	if v := hdr.GetVersion(); v != 1 {
		return nil, fmt.Errorf("unsupported Set version: %d", v)
	}
	if uint64(hdr.GetFanout()) > uint64(len(n.Links())) {
		return nil, errors.New("impossibly large Fanout")
	}
	return &hdr, nil
}
Example #3
0
func PrintDag(nd *mdag.ProtoNode, ds mdag.DAGService, indent int) {
	pbd, err := ft.FromBytes(nd.Data())
	if err != nil {
		panic(err)
	}

	for i := 0; i < indent; i++ {
		fmt.Print(" ")
	}
	fmt.Printf("{size = %d, type = %s, children = %d", pbd.GetFilesize(), pbd.GetType().String(), len(pbd.GetBlocksizes()))
	if len(nd.Links()) > 0 {
		fmt.Println()
	}
	for _, lnk := range nd.Links() {
		child, err := lnk.GetNode(context.Background(), ds)
		if err != nil {
			panic(err)
		}
		PrintDag(child.(*mdag.ProtoNode), ds, indent+1)
	}
	if len(nd.Links()) > 0 {
		for i := 0; i < indent; i++ {
			fmt.Print(" ")
		}
	}
	fmt.Println("}")
}
Example #4
0
func ExportTar(ctx context.Context, root *dag.ProtoNode, ds dag.DAGService) (io.Reader, error) {
	if string(root.Data()) != "ipfs/tar" {
		return nil, errors.New("not an IPFS tarchive")
	}
	return &tarReader{
		links: root.Links(),
		ds:    ds,
		ctx:   ctx,
	}, nil
}
Example #5
0
// NewUnixfsNodeFromDag reconstructs a Unixfs node from a given dag node
func NewUnixfsNodeFromDag(nd *dag.ProtoNode) (*UnixfsNode, error) {
	mb, err := ft.FSNodeFromBytes(nd.Data())
	if err != nil {
		return nil, err
	}

	return &UnixfsNode{
		node: nd,
		ufmt: mb,
	}, nil
}
Example #6
0
// newRoot creates a new Root and starts up a republisher routine for it
func NewRoot(parent context.Context, ds dag.DAGService, node *dag.ProtoNode, pf PubFunc) (*Root, error) {

	var repub *Republisher
	if pf != nil {
		repub = NewRepublisher(parent, pf, time.Millisecond*300, time.Second*3)
		repub.setVal(node.Cid())
		go repub.Run()
	}

	root := &Root{
		node:  node,
		repub: repub,
		dserv: ds,
	}

	pbn, err := ft.FromBytes(node.Data())
	if err != nil {
		log.Error("IPNS pointer was not unixfs node")
		return nil, err
	}

	switch pbn.GetType() {
	case ft.TDirectory:
		root.val = NewDirectory(parent, node.String(), node, root, ds)
	case ft.TFile, ft.TMetadata, ft.TRaw:
		fi, err := NewFile(node.String(), node, root, ds)
		if err != nil {
			return nil, err
		}
		root.val = fi
	default:
		panic("unrecognized! (NYI)")
	}
	return root, nil
}
Example #7
0
func randNode() (*merkledag.ProtoNode, key.Key) {
	node := new(merkledag.ProtoNode)
	node.SetData(make([]byte, 32))
	util.NewTimeSeededRand().Read(node.Data())
	k := node.Key()
	return node, k
}
Example #8
0
func (w *Writer) WriteNode(nd *mdag.ProtoNode, fpath string) error {
	pb := new(upb.Data)
	if err := proto.Unmarshal(nd.Data(), pb); err != nil {
		return err
	}

	switch pb.GetType() {
	case upb.Data_Metadata:
		fallthrough
	case upb.Data_Directory:
		return w.writeDir(nd, fpath)
	case upb.Data_Raw:
		fallthrough
	case upb.Data_File:
		return w.writeFile(nd, pb, fpath)
	case upb.Data_Symlink:
		return writeSymlinkHeader(w.TarW, string(pb.GetData()), fpath)
	default:
		return ft.ErrUnrecognizedType
	}
}
Example #9
0
func randNode() *merkledag.ProtoNode {
	node := new(merkledag.ProtoNode)
	node.SetData(make([]byte, 32))
	util.NewTimeSeededRand().Read(node.Data())
	return node
}
Example #10
0
					nd, err := link.GetNode(req.Context(), nd.DAG)
					if err != nil {
						res.SetError(err, cmds.ErrNormal)
						return
					}

					pbnd, ok := nd.(*merkledag.ProtoNode)
					if !ok {
						res.SetError(merkledag.ErrNotProtobuf, cmds.ErrNormal)
						return
					}

					linkNode = pbnd
				}
				if linkNode != nil {
					d, err := unixfs.FromBytes(linkNode.Data())
					if err != nil {
						res.SetError(err, cmds.ErrNormal)
						return
					}

					t = d.GetType()
				}
				output[i].Links[j] = LsLink{
					Name: link.Name,
					Hash: link.Cid.String(),
					Size: link.Size,
					Type: t,
				}
			}
		}
Example #11
0
// dagTruncate truncates the given node to 'size' and returns the modified Node
func dagTruncate(ctx context.Context, nd *mdag.ProtoNode, size uint64, ds mdag.DAGService) (*mdag.ProtoNode, error) {
	if len(nd.Links()) == 0 {
		// TODO: this can likely be done without marshaling and remarshaling
		pbn, err := ft.FromBytes(nd.Data())
		if err != nil {
			return nil, err
		}

		nd.SetData(ft.WrapData(pbn.Data[:size]))
		return nd, nil
	}

	var cur uint64
	end := 0
	var modified *mdag.ProtoNode
	ndata := new(ft.FSNode)
	for i, lnk := range nd.Links() {
		child, err := lnk.GetNode(ctx, ds)
		if err != nil {
			return nil, err
		}

		childpb, ok := child.(*mdag.ProtoNode)
		if !ok {
			return nil, err
		}

		childsize, err := ft.DataSize(childpb.Data())
		if err != nil {
			return nil, err
		}

		// found the child we want to cut
		if size < cur+childsize {
			nchild, err := dagTruncate(ctx, childpb, size-cur, ds)
			if err != nil {
				return nil, err
			}

			ndata.AddBlockSize(size - cur)

			modified = nchild
			end = i
			break
		}
		cur += childsize
		ndata.AddBlockSize(childsize)
	}

	_, err := ds.Add(modified)
	if err != nil {
		return nil, err
	}

	nd.SetLinks(nd.Links()[:end])
	err = nd.AddNodeLinkClean("", modified)
	if err != nil {
		return nil, err
	}

	d, err := ndata.GetBytes()
	if err != nil {
		return nil, err
	}

	nd.SetData(d)

	// invalidate cache and recompute serialized data
	_, err = nd.EncodeProtobuf(true)
	if err != nil {
		return nil, err
	}

	return nd, nil
}
Example #12
0
// modifyDag writes the data in 'data' over the data in 'node' starting at 'offset'
// returns the new key of the passed in node and whether or not all the data in the reader
// has been consumed.
func (dm *DagModifier) modifyDag(node *mdag.ProtoNode, offset uint64, data io.Reader) (*cid.Cid, bool, error) {
	f, err := ft.FromBytes(node.Data())
	if err != nil {
		return nil, false, err
	}

	// If we've reached a leaf node.
	if len(node.Links()) == 0 {
		n, err := data.Read(f.Data[offset:])
		if err != nil && err != io.EOF {
			return nil, false, err
		}

		// Update newly written node..
		b, err := proto.Marshal(f)
		if err != nil {
			return nil, false, err
		}

		nd := new(mdag.ProtoNode)
		nd.SetData(b)
		k, err := dm.dagserv.Add(nd)
		if err != nil {
			return nil, false, err
		}

		// Hey look! we're done!
		var done bool
		if n < len(f.Data[offset:]) {
			done = true
		}

		return k, done, nil
	}

	var cur uint64
	var done bool
	for i, bs := range f.GetBlocksizes() {
		// We found the correct child to write into
		if cur+bs > offset {
			child, err := node.Links()[i].GetNode(dm.ctx, dm.dagserv)
			if err != nil {
				return nil, false, err
			}

			childpb, ok := child.(*mdag.ProtoNode)
			if !ok {
				return nil, false, mdag.ErrNotProtobuf
			}

			k, sdone, err := dm.modifyDag(childpb, offset-cur, data)
			if err != nil {
				return nil, false, err
			}

			offset += bs
			node.Links()[i].Cid = k

			// Recache serialized node
			_, err = node.EncodeProtobuf(true)
			if err != nil {
				return nil, false, err
			}

			if sdone {
				// No more bytes to write!
				done = true
				break
			}
			offset = cur + bs
		}
		cur += bs
	}

	k, err := dm.dagserv.Add(node)
	return k, done, err
}
Example #13
0
// Recursive call for verifying the structure of a trickledag
func verifyTDagRec(nd *dag.ProtoNode, depth, direct, layerRepeat int, ds dag.DAGService) error {
	if depth == 0 {
		// zero depth dag is raw data block
		if len(nd.Links()) > 0 {
			return errors.New("expected direct block")
		}

		pbn, err := ft.FromBytes(nd.Data())
		if err != nil {
			return err
		}

		if pbn.GetType() != ft.TRaw {
			return errors.New("Expected raw block")
		}
		return nil
	}

	// Verify this is a branch node
	pbn, err := ft.FromBytes(nd.Data())
	if err != nil {
		return err
	}

	if pbn.GetType() != ft.TFile {
		return fmt.Errorf("expected file as branch node, got: %s", pbn.GetType())
	}

	if len(pbn.Data) > 0 {
		return errors.New("branch node should not have data")
	}

	for i := 0; i < len(nd.Links()); i++ {
		childi, err := nd.Links()[i].GetNode(context.TODO(), ds)
		if err != nil {
			return err
		}

		childpb, ok := childi.(*dag.ProtoNode)
		if !ok {
			return fmt.Errorf("cannot operate on non-protobuf nodes")
		}

		if i < direct {
			// Direct blocks
			err := verifyTDagRec(childpb, 0, direct, layerRepeat, ds)
			if err != nil {
				return err
			}
		} else {
			// Recursive trickle dags
			rdepth := ((i - direct) / layerRepeat) + 1
			if rdepth >= depth && depth > 0 {
				return errors.New("Child dag was too deep!")
			}
			err := verifyTDagRec(childpb, rdepth, direct, layerRepeat, ds)
			if err != nil {
				return err
			}
		}
	}
	return nil
}