func testFileConsistency(t *testing.T, bs chunk.BlockSplitter, nbytes int) { should := make([]byte, nbytes) u.NewTimeSeededRand().Read(should) read := bytes.NewReader(should) ds := mdtest.Mock(t) nd, err := buildTestDag(read, ds, bs) if err != nil { t.Fatal(err) } r, err := uio.NewDagReader(context.Background(), nd, ds) if err != nil { t.Fatal(err) } out, err := ioutil.ReadAll(r) if err != nil { t.Fatal(err) } err = arrComp(out, should) if err != nil { t.Fatal(err) } }
func TestSeekEndSingleBlockFile(t *testing.T) { nbytes := int64(100) should := make([]byte, nbytes) u.NewTimeSeededRand().Read(should) read := bytes.NewReader(should) ds := mdtest.Mock(t) nd, err := buildTestDag(read, ds, &chunk.SizeSplitter{5000}) if err != nil { t.Fatal(err) } rs, err := uio.NewDagReader(context.Background(), nd, ds) if err != nil { t.Fatal(err) } seeked, err := rs.Seek(0, os.SEEK_END) if err != nil { t.Fatal(err) } if seeked != nbytes { t.Fatal("Failed to seek to end") } }
func TestBalancedDag(t *testing.T) { ds := mdtest.Mock() buf := make([]byte, 10000) u.NewTimeSeededRand().Read(buf) r := bytes.NewReader(buf) nd, err := BuildDagFromReader(ds, chunk.DefaultSplitter(r), nil) if err != nil { t.Fatal(err) } dr, err := uio.NewDagReader(context.TODO(), nd, ds) if err != nil { t.Fatal(err) } out, err := ioutil.ReadAll(dr) if err != nil { t.Fatal(err) } if !bytes.Equal(out, buf) { t.Fatal("bad read") } }
func TestBuilderConsistency(t *testing.T) { nbytes := 100000 buf := new(bytes.Buffer) io.CopyN(buf, u.NewTimeSeededRand(), int64(nbytes)) should := dup(buf.Bytes()) dagserv := mdtest.Mock(t) nd, err := buildTestDag(buf, dagserv, chunk.DefaultSplitter) if err != nil { t.Fatal(err) } r, err := uio.NewDagReader(context.Background(), nd, dagserv) if err != nil { t.Fatal(err) } out, err := ioutil.ReadAll(r) if err != nil { t.Fatal(err) } err = arrComp(out, should) if err != nil { t.Fatal(err) } }
func TestSeekingBasic(t *testing.T) { nbytes := int64(10 * 1024) ds := mdtest.Mock() nd, should := getTestDag(t, ds, nbytes, 500) rs, err := uio.NewDagReader(context.Background(), nd, ds) if err != nil { t.Fatal(err) } start := int64(4000) n, err := rs.Seek(start, os.SEEK_SET) if err != nil { t.Fatal(err) } if n != start { t.Fatal("Failed to seek to correct offset") } out, err := ioutil.ReadAll(rs) if err != nil { t.Fatal(err) } err = arrComp(out, should[start:]) if err != nil { t.Fatal(err) } }
func TestSeekToBegin(t *testing.T) { ds := mdtest.Mock() nd, should := getTestDag(t, ds, 10*1024, 500) rs, err := uio.NewDagReader(context.Background(), nd, ds) if err != nil { t.Fatal(err) } n, err := io.CopyN(ioutil.Discard, rs, 1024*4) if err != nil { t.Fatal(err) } if n != 4096 { t.Fatal("Copy didnt copy enough bytes") } seeked, err := rs.Seek(0, os.SEEK_SET) if err != nil { t.Fatal(err) } if seeked != 0 { t.Fatal("Failed to seek to beginning") } out, err := ioutil.ReadAll(rs) if err != nil { t.Fatal(err) } err = arrComp(out, should) if err != nil { t.Fatal(err) } }
// getZip is equivalent to `ipfs getdag $hash | gzip` func getZip(ctx context.Context, node *core.IpfsNode, p path.Path, compression int) (io.Reader, error) { dagnode, err := core.Resolve(ctx, node, p) if err != nil { return nil, err } reader, err := uio.NewDagReader(ctx, dagnode, node.DAG) if err != nil { return nil, err } pr, pw := io.Pipe() gw, err := gzip.NewWriterLevel(pw, compression) if err != nil { return nil, err } bufin := bufio.NewReader(reader) go func() { _, err := bufin.WriteTo(gw) if err != nil { log.Error("Fail to compress the stream") } gw.Close() pw.Close() }() return pr, nil }
func TestSeekToAlmostBegin(t *testing.T) { ds := mdtest.Mock() nd, should := getTestDag(t, ds, 10*1024, 500) rs, err := uio.NewDagReader(context.Background(), nd, ds) if err != nil { t.Fatal(err) } n, err := io.CopyN(ioutil.Discard, rs, 1024*4) if err != nil { t.Fatal(err) } if n != 4096 { t.Fatal("Copy didnt copy enough bytes") } seeked, err := rs.Seek(1, os.SEEK_SET) if err != nil { t.Fatal(err) } if seeked != 1 { t.Fatal("Failed to seek to almost beginning") } dagrArrComp(t, rs, should[1:]) }
func TestSeekingConsistency(t *testing.T) { nbytes := int64(128 * 1024) ds := mdtest.Mock() nd, should := getTestDag(t, ds, nbytes, 500) rs, err := uio.NewDagReader(context.Background(), nd, ds) if err != nil { t.Fatal(err) } out := make([]byte, nbytes) for coff := nbytes - 4096; coff >= 0; coff -= 4096 { t.Log(coff) n, err := rs.Seek(coff, os.SEEK_SET) if err != nil { t.Fatal(err) } if n != coff { t.Fatal("wasnt able to seek to the right position") } nread, err := rs.Read(out[coff : coff+4096]) if err != nil { t.Fatal(err) } if nread != 4096 { t.Fatal("didnt read the correct number of bytes") } } err = arrComp(out, should) if err != nil { t.Fatal(err) } }
func Cat(ctx context.Context, n *core.IpfsNode, pstr string) (*uio.DagReader, error) { dagNode, err := core.Resolve(ctx, n, path.Path(pstr)) if err != nil { return nil, err } return uio.NewDagReader(ctx, dagNode, n.DAG) }
func (dm *DagModifier) readPrep() error { err := dm.Sync() if err != nil { return err } if dm.read == nil { ctx, cancel := context.WithCancel(dm.ctx) dr, err := uio.NewDagReader(ctx, dm.curNode, dm.dagserv) if err != nil { return err } i, err := dr.Seek(int64(dm.curWrOff), os.SEEK_SET) if err != nil { return err } if i != int64(dm.curWrOff) { return ErrSeekFail } dm.readCancel = cancel dm.read = dr } return nil }
func (s *Node) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { k, err := s.Nd.Key() if err != nil { return err } // setup our logging event lm := make(lgbl.DeferredMap) lm["fs"] = "ipfs" lm["key"] = func() interface{} { return k.B58String() } lm["req_offset"] = req.Offset lm["req_size"] = req.Size defer log.EventBegin(ctx, "fuseRead", lm).Done() r, err := uio.NewDagReader(ctx, s.Nd, s.Ipfs.DAG) if err != nil { return err } o, err := r.Seek(req.Offset, os.SEEK_SET) lm["res_offset"] = o if err != nil { return err } buf := resp.Data[:min(req.Size, int(int64(r.Size())-req.Offset))] n, err := io.ReadFull(r, buf) if err != nil && err != io.EOF { return err } resp.Data = resp.Data[:n] lm["res_size"] = n return nil // may be non-nil / not succeeded }
func TestIndirectBlocks(t *testing.T) { splitter := &chunk.SizeSplitter{512} nbytes := 1024 * 1024 buf := make([]byte, nbytes) u.NewTimeSeededRand().Read(buf) read := bytes.NewReader(buf) ds := mdtest.Mock(t) dag, err := buildTestDag(read, ds, splitter) if err != nil { t.Fatal(err) } reader, err := uio.NewDagReader(context.Background(), dag, ds) if err != nil { t.Fatal(err) } out, err := ioutil.ReadAll(reader) if err != nil { t.Fatal(err) } if !bytes.Equal(out, buf) { t.Fatal("Not equal!") } }
func Cat(ctx context.Context, n *core.IpfsNode, pstr string) (*uio.DagReader, error) { p := path.FromString(pstr) dagNode, err := n.Resolver.ResolvePath(ctx, p) if err != nil { return nil, err } return uio.NewDagReader(ctx, dagNode, n.DAG) }
func Cat(n *core.IpfsNode, pstr string) (io.Reader, error) { p := path.FromString(pstr) dagNode, err := n.Resolver.ResolvePath(n.Context(), p) if err != nil { return nil, err } return uio.NewDagReader(n.Context(), dagNode, n.DAG) }
func catNode(ds dag.DAGService, nd *dag.Node) ([]byte, error) { r, err := uio.NewDagReader(context.TODO(), nd, ds) if err != nil { return nil, err } defer r.Close() return ioutil.ReadAll(r) }
func TestMetadata(t *testing.T) { ctx := context.Background() // Make some random node ds := getDagserv(t) data := make([]byte, 1000) u.NewTimeSeededRand().Read(data) r := bytes.NewReader(data) nd, err := importer.BuildDagFromReader(ds, chunk.DefaultSplitter(r)) if err != nil { t.Fatal(err) } k, err := nd.Key() if err != nil { t.Fatal(err) } m := new(ft.Metadata) m.MimeType = "THIS IS A TEST" // Such effort, many compromise ipfsnode := &core.IpfsNode{DAG: ds} mdk, err := AddMetadataTo(ipfsnode, k.B58String(), m) if err != nil { t.Fatal(err) } rec, err := Metadata(ipfsnode, mdk) if err != nil { t.Fatal(err) } if rec.MimeType != m.MimeType { t.Fatalf("something went wrong in conversion: '%s' != '%s'", rec.MimeType, m.MimeType) } retnode, err := ds.Get(ctx, key.B58KeyDecode(mdk)) if err != nil { t.Fatal(err) } ndr, err := uio.NewDagReader(ctx, retnode, ds) if err != nil { t.Fatal(err) } out, err := ioutil.ReadAll(ndr) if err != nil { t.Fatal(err) } if !bytes.Equal(out, data) { t.Fatal("read incorrect data") } }
func TestTwoChunks(t *testing.T) { ds := mdtest.Mock() nd, should := getTestDag(t, ds, 2000, 1000) r, err := uio.NewDagReader(context.Background(), nd, ds) if err != nil { t.Fatal(err) } dagrArrComp(t, r, should) }
func testFileConsistency(t *testing.T, nbytes int64, blksize int64) { ds := mdtest.Mock() nd, should := getTestDag(t, ds, nbytes, blksize) r, err := uio.NewDagReader(context.Background(), nd, ds) if err != nil { t.Fatal(err) } dagrArrComp(t, r, should) }
func TestMultiWrite(t *testing.T) { dserv := getMockDagServ(t) _, n := getNode(t, dserv, 0) ctx, cancel := context.WithCancel(context.Background()) defer cancel() dagmod, err := NewDagModifier(ctx, n, dserv, sizeSplitterGen(512)) if err != nil { t.Fatal(err) } data := make([]byte, 4000) u.NewTimeSeededRand().Read(data) for i := 0; i < len(data); i++ { n, err := dagmod.WriteAt(data[i:i+1], int64(i)) if err != nil { t.Fatal(err) } if n != 1 { t.Fatal("Somehow wrote the wrong number of bytes! (n != 1)") } size, err := dagmod.Size() if err != nil { t.Fatal(err) } if size != int64(i+1) { t.Fatal("Size was reported incorrectly") } } nd, err := dagmod.GetNode() if err != nil { t.Fatal(err) } read, err := uio.NewDagReader(context.Background(), nd, dserv) if err != nil { t.Fatal(err) } rbuf, err := ioutil.ReadAll(read) if err != nil { t.Fatal(err) } err = arrComp(rbuf, data) if err != nil { t.Fatal(err) } }
// This test appends one byte at a time to an empty file func TestMultipleAppends(t *testing.T) { ds := mdtest.Mock() // TODO: fix small size appends and make this number bigger nbytes := int64(1000) should := make([]byte, nbytes) u.NewTimeSeededRand().Read(should) read := bytes.NewReader(nil) nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 500)) if err != nil { t.Fatal(err) } dbp := &h.DagBuilderParams{ Dagserv: ds, Maxlinks: 4, } spl := chunk.SizeSplitterGen(500) ctx := context.Background() for i := 0; i < len(should); i++ { blks, errs := chunk.Chan(spl(bytes.NewReader(should[i : i+1]))) nnode, err := TrickleAppend(ctx, nd, dbp.New(blks, errs)) if err != nil { t.Fatal(err) } err = VerifyTrickleDagStructure(nnode, ds, dbp.Maxlinks, layerRepeat) if err != nil { t.Fatal(err) } fread, err := uio.NewDagReader(ctx, nnode, ds) if err != nil { t.Fatal(err) } out, err := ioutil.ReadAll(fread) if err != nil { t.Fatal(err) } err = arrComp(out, should[:i+1]) if err != nil { t.Fatal(err) } } }
func (api *UnixfsAPI) Cat(ctx context.Context, p string) (coreiface.Reader, error) { dagnode, err := resolve(ctx, api.node, p) if err != nil { return nil, err } r, err := uio.NewDagReader(ctx, dagnode, api.node.DAG) if err == uio.ErrIsDir { return nil, coreiface.ErrIsDir } else if err != nil { return nil, err } return r, nil }
func cat(ctx context.Context, node *core.IpfsNode, fpath string) (io.Reader, uint64, error) { dagnode, err := core.Resolve(ctx, node, path.Path(fpath)) if err != nil { return nil, 0, err } reader, err := uio.NewDagReader(ctx, dagnode, node.DAG) if err != nil { return nil, 0, err } length := uint64(reader.Size()) return reader, length, nil }
// Cat resolves the ipfs path p and returns a reader for that data, if it exists and is availalbe func (s *Shell) Cat(p string) (io.ReadCloser, error) { ipfsPath, err := path.ParsePath(p) if err != nil { return nil, errgo.Notef(err, "cat: could not parse %q", p) } nd, err := core.Resolve(s.ctx, s.node, ipfsPath) if err != nil { return nil, errgo.Notef(err, "cat: could not resolve %s", ipfsPath) } dr, err := unixfsio.NewDagReader(s.ctx, nd, s.node.DAG) if err != nil { return nil, errgo.Notef(err, "cat: failed to construct DAG reader") } return dr, nil }
func runReadBench(b *testing.B, nd *dag.Node, ds dag.DAGService) { for i := 0; i < b.N; i++ { ctx, cancel := context.WithCancel(context.TODO()) read, err := uio.NewDagReader(ctx, nd, ds) if err != nil { b.Fatal(err) } _, err = read.WriteTo(ioutil.Discard) if err != nil && err != io.EOF { b.Fatal(err) } cancel() } }
func TestMultiWriteAndFlush(t *testing.T) { dserv := testu.GetDAGServ() n := testu.GetEmptyNode(t, dserv) ctx, cancel := context.WithCancel(context.Background()) defer cancel() dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512)) if err != nil { t.Fatal(err) } data := make([]byte, 20) u.NewTimeSeededRand().Read(data) for i := 0; i < len(data); i++ { n, err := dagmod.WriteAt(data[i:i+1], int64(i)) if err != nil { t.Fatal(err) } if n != 1 { t.Fatal("Somehow wrote the wrong number of bytes! (n != 1)") } err = dagmod.Sync() if err != nil { t.Fatal(err) } } nd, err := dagmod.GetNode() if err != nil { t.Fatal(err) } read, err := uio.NewDagReader(context.Background(), nd, dserv) if err != nil { t.Fatal(err) } rbuf, err := ioutil.ReadAll(read) if err != nil { t.Fatal(err) } err = testu.ArrComp(rbuf, data) if err != nil { t.Fatal(err) } }
func TestAppend(t *testing.T) { nbytes := int64(128 * 1024) should := make([]byte, nbytes) u.NewTimeSeededRand().Read(should) // Reader for half the bytes read := bytes.NewReader(should[:nbytes/2]) ds := mdtest.Mock() nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 500)) if err != nil { t.Fatal(err) } dbp := &h.DagBuilderParams{ Dagserv: ds, Maxlinks: h.DefaultLinksPerBlock, } r := bytes.NewReader(should[nbytes/2:]) blks, errs := chunk.Chan(chunk.NewSizeSplitter(r, 500)) ctx := context.Background() nnode, err := TrickleAppend(ctx, nd, dbp.New(blks, errs)) if err != nil { t.Fatal(err) } err = VerifyTrickleDagStructure(nnode, ds, dbp.Maxlinks, layerRepeat) if err != nil { t.Fatal(err) } fread, err := uio.NewDagReader(ctx, nnode, ds) if err != nil { t.Fatal(err) } out, err := ioutil.ReadAll(fread) if err != nil { t.Fatal(err) } err = arrComp(out, should) if err != nil { t.Fatal(err) } }
func (w *Writer) writeFile(ctx cxt.Context, nd *mdag.Node, pb *upb.Data, fpath string) error { if err := writeFileHeader(w.TarW, fpath, pb.GetFilesize()); err != nil { return err } dagr, err := uio.NewDagReader(ctx, nd, w.Dag) if err != nil { return err } _, err = io.Copy(w.TarW, dagr) if err != nil && err != io.EOF { return err } return nil }
func TestAppend(t *testing.T) { nbytes := int64(128 * 1024) should := make([]byte, nbytes) u.NewTimeSeededRand().Read(should) // Reader for half the bytes read := bytes.NewReader(should[:nbytes/2]) ds := mdtest.Mock(t) nd, err := buildTestDag(read, ds, &chunk.SizeSplitter{500}) if err != nil { t.Fatal(err) } dbp := &h.DagBuilderParams{ Dagserv: ds, Maxlinks: h.DefaultLinksPerBlock, } spl := &chunk.SizeSplitter{500} blks := spl.Split(bytes.NewReader(should[nbytes/2:])) nnode, err := TrickleAppend(nd, dbp.New(blks)) if err != nil { t.Fatal(err) } err = VerifyTrickleDagStructure(nnode, ds, dbp.Maxlinks, layerRepeat) if err != nil { t.Fatal(err) } fread, err := uio.NewDagReader(context.TODO(), nnode, ds) if err != nil { t.Fatal(err) } out, err := ioutil.ReadAll(fread) if err != nil { t.Fatal(err) } err = arrComp(out, should) if err != nil { t.Fatal(err) } }
func TestAppendSingleBytesToEmpty(t *testing.T) { ds := mdtest.Mock() data := []byte("AB") nd := new(merkledag.Node) nd.Data = ft.FilePBData(nil, 0) dbp := &h.DagBuilderParams{ Dagserv: ds, Maxlinks: 4, } spl := chunk.SizeSplitterGen(500) blks, errs := chunk.Chan(spl(bytes.NewReader(data[:1]))) ctx := context.Background() nnode, err := TrickleAppend(ctx, nd, dbp.New(blks, errs)) if err != nil { t.Fatal(err) } blks, errs = chunk.Chan(spl(bytes.NewReader(data[1:]))) nnode, err = TrickleAppend(ctx, nnode, dbp.New(blks, errs)) if err != nil { t.Fatal(err) } fread, err := uio.NewDagReader(ctx, nnode, ds) if err != nil { t.Fatal(err) } out, err := ioutil.ReadAll(fread) if err != nil { t.Fatal(err) } fmt.Println(out, data) err = arrComp(out, data) if err != nil { t.Fatal(err) } }