func (benchDir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { l := []fuse.Dirent{ {Inode: 2, Name: "bench", Type: fuse.DT_File}, } return l, nil } type benchFile struct { conf *benchConfig } var _ = fs.Node(benchFile{}) var _ = fs.NodeOpener(benchFile{}) var _ = fs.NodeFsyncer(benchFile{}) var _ = fs.Handle(benchFile{}) var _ = fs.HandleReader(benchFile{}) var _ = fs.HandleWriter(benchFile{}) func (benchFile) Attr(ctx context.Context, a *fuse.Attr) error { a.Inode = 2 a.Mode = 0644 a.Size = 9999999999999999 return nil } func (f benchFile) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) { if f.conf.directIO { resp.Flags |= fuse.OpenDirectIO } // TODO configurable? resp.Flags |= fuse.OpenKeepCache
type FileHandle struct { r *vault.ReadSeeker mu sync.Mutex } var _ fs.Handle = (*FileHandle)(nil) var _ fs.HandleReleaser = (*FileHandle)(nil) func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) error { return fh.r.Close() } var _ = fs.HandleReader(&FileHandle{}) func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { // We don't actually enforce Offset to match where previous read // ended. Maybe we should, but that would mean'd we need to track // it. The kernel *should* do it for us, based on the // fuse.OpenNonSeekable flag. // // One exception to the above is if we fail to fully populate a // page cache page; a read into page cache is always page aligned. // Make sure we never serve a partial read, to avoid that. _, e := fh.r.Seek(req.Offset, 0) if e != nil { return e } //buf := bytes.NewBuffer(nil)