func TestChunkedFileIO_SingleChunk(t *testing.T) { caio := NewSimpleDBChunksArrayIO() bs := blobstore.NewMockBlobStore() cfio := chunkstore.NewChunkedFileIO(bs, TestCipher(), caio) // Disable Chunk framing for testing cfio.OverrideNewChunkIOForTesting(func(bh blobstore.BlobHandle, c btncrypt.Cipher, offset int64) blobstore.BlobHandle { return bh }) if err := cfio.PWrite(123, HelloWorld); err != nil { t.Errorf("PWrite failed: %v", err) return } if err := cfio.PWrite(456, HelloWorld); err != nil { t.Errorf("PWrite failed: %v", err) return } if len(caio.cs) != 1 { t.Errorf("len(caio.cs) %d", len(caio.cs)) return } if caio.cs[0].Offset != 0 { t.Errorf("Chunk at invalid offset: %d", caio.cs[1].Offset) } bh := bs.Paths[caio.cs[0].BlobPath] if bh.Log[0].Offset != 123 { t.Errorf("Chunk write at invalid offset: %d", bh.Log[0].Offset) } if bh.Log[1].Offset != 456 { t.Errorf("Chunk write at invalid offset: %d", bh.Log[0].Offset) } }
func TestChunkedFileIO_FileBlobStore(t *testing.T) { caio := NewSimpleDBChunksArrayIO() fbs := TestFileBlobStore() cfio := chunkstore.NewChunkedFileIO(fbs, TestCipher(), caio) if err := cfio.PWrite(HelloWorld, 0); err != nil { t.Errorf("PWrite failed: %v", err) return } readtgt := make([]byte, len(HelloWorld)) n, err := cfio.ReadAt(readtgt, 0) if err != nil { t.Errorf("ReadAt failed: %v", err) return } if n != len(HelloWorld) { t.Errorf("Unexpected partial read. n=%d", n) } if !bytes.Equal(HelloWorld, readtgt) { t.Errorf("read content invalid: %v", readtgt) } if int64(len(HelloWorld)) != cfio.Size() { t.Errorf("len invalid: %v", cfio.Size()) } }
func (fs *FileSystem) OpenFile(id inodedb.ID, flags int) (*FileHandle, error) { logger.Infof(fslog, "OpenFile(id: %v, flags rok: %t wok: %t)", id, fl.IsReadAllowed(flags), fl.IsWriteAllowed(flags)) tryLock := fl.IsWriteAllowed(flags) if tryLock && !fl.IsWriteAllowed(fs.bs.Flags()) { return nil, EACCES } of := fs.getOrCreateOpenFile(id) of.mu.Lock() defer of.mu.Unlock() ofIsInitialized := of.nlock.ID != 0 if ofIsInitialized && (of.nlock.HasTicket() || !tryLock) { // No need to upgrade lock. Just use cached filehandle. logger.Infof(fslog, "Using cached of for inode id: %v", id) return of.OpenHandleWithoutLock(flags), nil } // upgrade lock or acquire new lock... v, nlock, err := fs.idb.QueryNode(id, tryLock) if err != nil { return nil, err } if v.GetType() != inodedb.FileNodeT { if err := fs.idb.UnlockNode(nlock); err != nil { logger.Warningf(fslog, "Unlock node failed for non-file node: %v", err) } if v.GetType() == inodedb.DirNodeT { return nil, EISDIR } return nil, fmt.Errorf("Specified node not file but has type %v", v.GetType()) } of.nlock = nlock caio := NewINodeDBChunksArrayIO(fs.idb, nlock) of.cfio = chunkstore.NewChunkedFileIO(fs.bs, fs.c, caio) of.cfio.SetOrigFilename(fs.tryGetOrigPath(nlock.ID)) if fl.IsWriteTruncate(flags) { if err := of.truncateWithLock(0); err != nil { return nil, fmt.Errorf("Failed to truncate file: %v", err) } } fh := of.OpenHandleWithoutLock(flags) return fh, nil }
func (of *OpenFile) downgradeToReadLock() { logger.Infof(fslog, "Downgrade %v to read lock.", of) // Note: assumes of.mu is Lock()-ed if !of.nlock.HasTicket() { logger.Warningf(fslog, "Attempt to downgrade node lock, but no excl lock found. of: %v", of) return } if err := of.fs.idb.UnlockNode(of.nlock); err != nil { logger.Warningf(fslog, "Unlocking node to downgrade to read lock failed: %v", err) } of.nlock.Ticket = inodedb.NoTicket caio := NewINodeDBChunksArrayIO(of.fs.idb, of.nlock) of.cfio = chunkstore.NewChunkedFileIO(of.fs.bs, of.fs.c, caio) }
func NewFileSystem(idb inodedb.DBHandler, bs blobstore.RandomAccessBlobStore, c btncrypt.Cipher) *FileSystem { fs := &FileSystem{ idb: idb, bs: bs, c: c, newChunkedFileIO: func(bs blobstore.RandomAccessBlobStore, c btncrypt.Cipher, caio chunkstore.ChunksArrayIO) blobstore.BlobHandle { return chunkstore.NewChunkedFileIO(bs, c, caio) }, openFiles: make(map[inodedb.ID]*OpenFile), origpath: make(map[inodedb.ID]string), } fs.setOrigPathForId(inodedb.RootDirID, "/") return fs }
func TestChunkedFileIO_MultiChunk(t *testing.T) { caio := NewSimpleDBChunksArrayIO() bs := blobstore.NewMockBlobStore() cfio := chunkstore.NewChunkedFileIO(bs, TestCipher(), caio) // Disable Chunk framing for testing cfio.OverrideNewChunkIOForTesting(func(bh blobstore.BlobHandle, c btncrypt.Cipher, offset int64) blobstore.BlobHandle { return bh }) if err := cfio.PWrite(chunkstore.ChunkSplitSize+12345, HelloWorld); err != nil { t.Errorf("PWrite failed: %v", err) return } if err := cfio.PWrite(123, HelloWorld); err != nil { t.Errorf("PWrite failed: %v", err) return } if len(caio.cs) != 2 { t.Errorf("len(caio.cs) %d", len(caio.cs)) return } if caio.cs[0].Offset != 0 { t.Errorf("Chunk at invalid offset: %d", caio.cs[1].Offset) } bh := bs.Paths[caio.cs[0].BlobPath] if bh.Log[0].Offset != 123 { t.Errorf("Chunk write at invalid offset: %d", bh.Log[0].Offset) } if caio.cs[1].Offset != chunkstore.ChunkSplitSize { t.Errorf("Split chunk at invalid offset: %d", caio.cs[1].Offset) } bh = bs.Paths[caio.cs[1].BlobPath] if bh.Log[0].Offset != 12345 { t.Errorf("Split chunk write at invalid offset: %d", bh.Log[0].Offset) } if err := cfio.PWrite(chunkstore.ChunkSplitSize-5, HelloWorld); err != nil { t.Errorf("PWrite failed: %v", err) return } bh = bs.Paths[caio.cs[1].BlobPath] if !reflect.DeepEqual(bh.Log[1], blobstore.MockBlobStoreOperation{'W', 0, 7, HelloWorld[5]}) { fmt.Printf("? %+v\n", bh.Log[1]) } }
func TestChunkedFileIO_FileBlobStore(t *testing.T) { caio := NewSimpleDBChunksArrayIO() fbs := TestFileBlobStore() cfio := chunkstore.NewChunkedFileIO(fbs, TestCipher(), caio) if err := cfio.PWrite(0, HelloWorld); err != nil { t.Errorf("PWrite failed: %v", err) return } readtgt := make([]byte, len(HelloWorld)) if err := cfio.PRead(0, readtgt); err != nil { t.Errorf("PRead failed: %v", err) return } if !bytes.Equal(HelloWorld, readtgt) { t.Errorf("read content invalid: %v", readtgt) } if int64(len(HelloWorld)) != cfio.Size() { t.Errorf("len invalid: %v", cfio.Size()) } }