func (of *OpenFile) CloseHandle(tgt *FileHandle) { if tgt.of == nil { logger.Warningf(fslog, "Detected FileHandle double close!") return } if tgt.of != of { logger.Criticalf(fslog, "Attempt to close handle for other OpenFile. tgt fh: %+v, of: %+v", tgt, of) return } wasWriteHandle := fl.IsWriteAllowed(tgt.flags) ofHasOtherWriteHandle := false tgt.of = nil of.mu.Lock() defer of.mu.Unlock() // remove tgt from of.handles slice newHandles := make([]*FileHandle, 0, len(of.handles)-1) for _, h := range of.handles { if h != tgt { if fl.IsWriteAllowed(h.flags) { ofHasOtherWriteHandle = true } newHandles = append(newHandles, h) } } of.handles = newHandles if wasWriteHandle && !ofHasOtherWriteHandle { of.downgradeToReadLock() } }
func New(backendbs blobstore.BlobStore, cachebs blobstore.RandomAccessBlobStore, s *scheduler.Scheduler, flags int, queryVersion version.QueryFunc) (*CachedBlobStore, error) { if fl.IsWriteAllowed(flags) { if fr, ok := backendbs.(fl.FlagsReader); ok { if !fl.IsWriteAllowed(fr.Flags()) { return nil, fmt.Errorf("Writable CachedBlobStore requested, but backendbs doesn't allow writes") } } } if !fl.IsWriteAllowed(cachebs.Flags()) { return nil, fmt.Errorf("CachedBlobStore requested, but cachebs doesn't allow writes") } cbs := &CachedBlobStore{ backendbs: backendbs, cachebs: cachebs, s: s, flags: flags, queryVersion: queryVersion, bever: NewCachedBackendVersion(backendbs, queryVersion), entriesmgr: NewCachedBlobEntriesManager(), usagestats: NewCacheUsageStats(), } if lister, ok := cachebs.(blobstore.BlobLister); ok { bps, err := lister.ListBlobs() if err != nil { return nil, fmt.Errorf("Failed to list blobs to init CacheUsageStats: %v", err) } cbs.usagestats.ImportBlobList(bps) } go cbs.entriesmgr.Run() return cbs, nil }
func (cbs *CachedBlobStore) Open(blobpath string, flags int) (blobstore.BlobHandle, error) { if !fl.IsWriteAllowed(cbs.flags) && fl.IsWriteAllowed(flags) { return nil, EPERM } be, err := cbs.entriesmgr.OpenEntry(blobpath) if err != nil { return nil, err } return be.OpenHandle(cbs, flags) }
func (fs *FileSystem) OpenFile(id inodedb.ID, flags int) (*FileHandle, error) { logger.Infof(fslog, "OpenFile(id: %v, flags rok: %t wok: %t)", id, fl.IsReadAllowed(flags), fl.IsWriteAllowed(flags)) tryLock := fl.IsWriteAllowed(flags) if tryLock && !fl.IsWriteAllowed(fs.bs.Flags()) { return nil, EACCES } of := fs.getOrCreateOpenFile(id) of.mu.Lock() defer of.mu.Unlock() ofIsInitialized := of.nlock.ID != 0 if ofIsInitialized && (of.nlock.HasTicket() || !tryLock) { // No need to upgrade lock. Just use cached filehandle. logger.Infof(fslog, "Using cached of for inode id: %v", id) return of.OpenHandleWithoutLock(flags), nil } // upgrade lock or acquire new lock... v, nlock, err := fs.idb.QueryNode(id, tryLock) if err != nil { return nil, err } if v.GetType() != inodedb.FileNodeT { if err := fs.idb.UnlockNode(nlock); err != nil { logger.Warningf(fslog, "Unlock node failed for non-file node: %v", err) } if v.GetType() == inodedb.DirNodeT { return nil, EISDIR } return nil, fmt.Errorf("Specified node not file but has type %v", v.GetType()) } of.nlock = nlock caio := NewINodeDBChunksArrayIO(fs.idb, nlock) of.cfio = chunkstore.NewChunkedFileIO(fs.bs, fs.c, caio) of.cfio.SetOrigFilename(fs.tryGetOrigPath(nlock.ID)) if fl.IsWriteTruncate(flags) { if err := of.truncateWithLock(0); err != nil { return nil, fmt.Errorf("Failed to truncate file: %v", err) } } fh := of.OpenHandleWithoutLock(flags) return fh, nil }
func (bh *CachedBlobHandle) PWrite(p []byte, offset int64) error { if !fl.IsWriteAllowed(bh.flags) { return EPERM } return bh.be.PWrite(p, offset) }
func (fh *FileHandle) PWrite(offset int64, p []byte) error { if !fl.IsWriteAllowed(fh.flags) { return EBADF } return fh.of.PWrite(offset, p) }
func (bh *CachedBlobHandle) Truncate(newsize int64) error { if !fl.IsWriteAllowed(bh.flags) { return EPERM } return bh.be.Truncate(newsize) }
func (bh *CachedBlobHandle) Sync() error { if !fl.IsWriteAllowed(bh.flags) { return nil } return bh.be.Sync() }
func (fh *FileHandle) Sync() error { if !fl.IsWriteAllowed(fh.flags) { return nil } return fh.of.Sync() }
func (fh *FileHandle) Truncate(newsize int64) error { if !fl.IsWriteAllowed(fh.flags) { return EBADF } return fh.of.Truncate(newsize) }
func (f *FileBlobStore) OpenWriter(blobpath string) (io.WriteCloser, error) { if !fl.IsWriteAllowed(f.flags) { return nil, EPERM } realpath := path.Join(f.base, blobpath) return os.Create(realpath) }
func (bs *GCSBlobStore) OpenWriter(blobpath string) (io.WriteCloser, error) { if !oflags.IsWriteAllowed(bs.flags) { return nil, otaru.EPERM } ctx := bs.newAuthedContext(context.TODO()) gcsw := storage.NewWriter(ctx, bs.bucketName, blobpath) gcsw.ContentType = "application/octet-stream" return &Writer{gcsw}, nil }
func (fh *FileHandle) PWrite(p []byte, offset int64) error { if !fl.IsWriteAllowed(fh.flags) { return EBADF } if fl.IsWriteAppend(fh.flags) { return fh.of.Append(p) } return fh.of.PWrite(p, offset) }
func New(backendbs blobstore.BlobStore, cachebs blobstore.RandomAccessBlobStore, flags int, queryVersion QueryVersionFunc) (*CachedBlobStore, error) { if fl.IsWriteAllowed(flags) { if fr, ok := backendbs.(fl.FlagsReader); ok { if !fl.IsWriteAllowed(fr.Flags()) { return nil, fmt.Errorf("Writable CachedBlobStore requested, but backendbs doesn't allow writes") } } } if !fl.IsWriteAllowed(cachebs.Flags()) { return nil, fmt.Errorf("CachedBlobStore requested, but cachebs doesn't allow writes") } cbs := &CachedBlobStore{ backendbs: backendbs, cachebs: cachebs, flags: flags, queryVersion: queryVersion, bever: NewCachedBackendVersion(backendbs, queryVersion), entriesmgr: NewCachedBlobEntriesManager(), } go cbs.entriesmgr.Run() return cbs, nil }
func (s *CacheUsageStats) ObserveOpen(blobpath string, flags int) { s.mu.Lock() defer s.mu.Unlock() e := s.entries[blobpath] if fl.IsReadAllowed(flags) { e.readCount++ } if fl.IsWriteAllowed(flags) { e.writeCount++ } e.lastUsed = time.Now() s.entries[blobpath] = e }
func NewFileBlobStore(base string, flags int) (*FileBlobStore, error) { base = path.Clean(base) fi, err := os.Stat(base) if err != nil { return nil, fmt.Errorf("Fstat base \"%s\" failed: %v", base, err) } if !fi.Mode().IsDir() { return nil, fmt.Errorf("Specified base \"%s\" is not a directory") } fmask := fl.O_RDONLY if fl.IsWriteAllowed(flags) { fmask = fl.O_RDONLY | fl.O_WRONLY | fl.O_RDWR | fl.O_CREATE | fl.O_EXCL } return &FileBlobStore{base, flags, fmask}, nil }
func (be *CachedBlobEntry) infoWithLock() *CachedBlobEntryInfo { numWriters := 0 for h, _ := range be.handles { if fl.IsWriteAllowed(h.Flags()) { numWriters++ } } return &CachedBlobEntryInfo{ BlobPath: be.blobpath, State: be.state.String(), BlobLen: be.bloblen, ValidLen: be.validlen, SyncCount: be.syncCount, LastUsed: be.lastUsed, LastWrite: be.lastWrite, LastSync: be.lastSync, NumberOfWriterHandles: numWriters, NumberOfHandles: len(be.handles), } }