func (s *Node) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { k, err := s.Nd.Key() if err != nil { return err } // setup our logging event lm := make(lgbl.DeferredMap) lm["fs"] = "ipfs" lm["key"] = func() interface{} { return k.Pretty() } lm["req_offset"] = req.Offset lm["req_size"] = req.Size defer log.EventBegin(ctx, "fuseRead", lm).Done() r, err := uio.NewDagReader(ctx, s.Nd, s.Ipfs.DAG) if err != nil { return err } o, err := r.Seek(req.Offset, os.SEEK_SET) lm["res_offset"] = o if err != nil { return err } buf := resp.Data[:min(req.Size, int(int64(r.Size())-req.Offset))] n, err := io.ReadFull(r, buf) if err != nil && err != io.EOF { return err } resp.Data = resp.Data[:n] lm["res_size"] = n return nil // may be non-nil / not succeeded }
// HandleRead handles a read request assuming that data is the entire file content. // It adjusts the amount returned in resp according to req.Offset and req.Size. func HandleRead(req *fuse.ReadRequest, resp *fuse.ReadResponse, data []byte) { if req.Offset >= int64(len(data)) { data = nil } else { data = data[req.Offset:] } if len(data) > req.Size { data = data[:req.Size] } n := copy(resp.Data[:req.Size], data) resp.Data = resp.Data[:n] }
func (fi *File) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { _, err := fi.fi.Seek(req.Offset, os.SEEK_SET) if err != nil { return err } fisize, err := fi.fi.Size() if err != nil { return err } select { case <-ctx.Done(): return ctx.Err() default: } readsize := min(req.Size, int(fisize-req.Offset)) n, err := fi.fi.CtxReadFull(ctx, resp.Data[:readsize]) resp.Data = resp.Data[:n] return err }
func (benchFile) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { resp.Data = resp.Data[:cap(resp.Data)] return nil }