func (fh *FileHandle) ReadAt(p []byte, offset int64) (int, error) { if !fl.IsReadAllowed(fh.flags) { return 0, EBADF } return fh.of.ReadAt(p, offset) }
func (bh *CachedBlobHandle) PRead(p []byte, offset int64) error { if !fl.IsReadAllowed(bh.flags) { return EPERM } return bh.be.PRead(p, offset) }
func (fh *FileHandle) PRead(offset int64, p []byte) error { if !fl.IsReadAllowed(fh.flags) { return EBADF } return fh.of.PRead(offset, p) }
func (fs *FileSystem) OpenFile(id inodedb.ID, flags int) (*FileHandle, error) { logger.Infof(fslog, "OpenFile(id: %v, flags rok: %t wok: %t)", id, fl.IsReadAllowed(flags), fl.IsWriteAllowed(flags)) tryLock := fl.IsWriteAllowed(flags) if tryLock && !fl.IsWriteAllowed(fs.bs.Flags()) { return nil, EACCES } of := fs.getOrCreateOpenFile(id) of.mu.Lock() defer of.mu.Unlock() ofIsInitialized := of.nlock.ID != 0 if ofIsInitialized && (of.nlock.HasTicket() || !tryLock) { // No need to upgrade lock. Just use cached filehandle. logger.Infof(fslog, "Using cached of for inode id: %v", id) return of.OpenHandleWithoutLock(flags), nil } // upgrade lock or acquire new lock... v, nlock, err := fs.idb.QueryNode(id, tryLock) if err != nil { return nil, err } if v.GetType() != inodedb.FileNodeT { if err := fs.idb.UnlockNode(nlock); err != nil { logger.Warningf(fslog, "Unlock node failed for non-file node: %v", err) } if v.GetType() == inodedb.DirNodeT { return nil, EISDIR } return nil, fmt.Errorf("Specified node not file but has type %v", v.GetType()) } of.nlock = nlock caio := NewINodeDBChunksArrayIO(fs.idb, nlock) of.cfio = chunkstore.NewChunkedFileIO(fs.bs, fs.c, caio) of.cfio.SetOrigFilename(fs.tryGetOrigPath(nlock.ID)) if fl.IsWriteTruncate(flags) { if err := of.truncateWithLock(0); err != nil { return nil, fmt.Errorf("Failed to truncate file: %v", err) } } fh := of.OpenHandleWithoutLock(flags) return fh, nil }
func (f *FileBlobStore) OpenReader(blobpath string) (io.ReadCloser, error) { if !fl.IsReadAllowed(f.flags) { return nil, EPERM } realpath := path.Join(f.base, blobpath) rc, err := os.Open(realpath) if err != nil { if os.IsNotExist(err) { return nil, ENOENT } return nil, err } return rc, nil }
func (s *CacheUsageStats) ObserveOpen(blobpath string, flags int) { s.mu.Lock() defer s.mu.Unlock() e := s.entries[blobpath] if fl.IsReadAllowed(flags) { e.readCount++ } if fl.IsWriteAllowed(flags) { e.writeCount++ } e.lastUsed = time.Now() s.entries[blobpath] = e }
func (cfio *ChunkedFileIO) ReadAt(p []byte, offset int64) (int, error) { remo := offset remp := p if offset < 0 { return 0, fmt.Errorf("negative offset %d given", offset) } cs, err := cfio.caio.Read() if err != nil { return 0, fmt.Errorf("Failed to read cs array: %v", err) } if !fl.IsReadAllowed(cfio.bs.Flags()) { return 0, EPERM } // fmt.Printf("cs: %v\n", cs) for i := 0; i < len(cs) && len(remp) > 0; i++ { c := cs[i] if c.Left() > remo+int64(len(remp)) { break } if c.Right() <= remo { continue } coff := remo - c.Left() if coff < 0 { // Fill gap with zero n := util.Int64Min(int64(len(remp)), -coff) for j := int64(0); j < n; j++ { remp[j] = 0 } remo += n coff = 0 if len(remp) == 0 { return int(remo - offset), nil } } bh, err := cfio.bs.Open(c.BlobPath, fl.O_RDONLY) if err != nil { return int(remo - offset), fmt.Errorf("Failed to open path \"%s\" for reading: %v", c.BlobPath, err) } defer func() { if err := bh.Close(); err != nil { logger.Criticalf(mylog, "blobhandle Close failed: %v", err) } }() cio := cfio.newChunkIO(bh, cfio.c, c.Offset) defer func() { if err := cio.Close(); err != nil { logger.Criticalf(mylog, "cio Close failed: %v", err) } }() n := util.Int64Min(int64(len(p)), c.Length-coff) if err := cio.PRead(remp[:n], coff); err != nil { return int(remo - offset), err } remo += n remp = remp[n:] if len(remp) == 0 { return int(remo - offset), nil } } // logger.Debugf(mylog, "cs: %+v", cs) return int(remo - offset), nil }
func (cfio *ChunkedFileIO) PRead(offset int64, p []byte) error { remo := offset remp := p if offset < 0 { return fmt.Errorf("negative offset %d given", offset) } cs, err := cfio.caio.Read() if err != nil { return fmt.Errorf("Failed to read cs array: %v", err) } // fmt.Printf("cs: %v\n", cs) for i := 0; i < len(cs) && len(remp) > 0; i++ { c := cs[i] if c.Left() > remo+int64(len(remp)) { break } if c.Right() <= remo { continue } coff := remo - c.Left() if coff < 0 { // Fill gap with zero n := Int64Min(int64(len(remp)), -coff) for j := int64(0); j < n; j++ { remp[j] = 0 } remo += n coff = 0 if len(remp) == 0 { return nil } } if !fl.IsReadAllowed(cfio.bs.Flags()) { return EPERM } bh, err := cfio.bs.Open(c.BlobPath, fl.O_RDONLY) if err != nil { return fmt.Errorf("Failed to open path \"%s\" for reading: %v", c.BlobPath, err) } defer func() { if err := bh.Close(); err != nil { log.Printf("blobhandle Close failed: %v", err) } }() cio := cfio.newChunkIO(bh, cfio.c, c.Offset) defer func() { if err := cio.Close(); err != nil { log.Printf("cio Close failed: %v", err) } }() n := Int64Min(int64(len(p)), c.Length-coff) if err := cio.PRead(coff, remp[:n]); err != nil { return err } remo += n remp = remp[n:] if len(remp) == 0 { return nil } } log.Printf("cs: %+v", cs) return fmt.Errorf("Attempt to read over file size by %d", len(remp)) }