Exemplo n.º 1
0
// invalidateCacheBlob fetches new version of the blob from backendbs.
// This func should be only called from be.invalidateCache()
func (be *CachedBlobEntry) invalidateCacheBlob(cbs *CachedBlobStore) error {
	blobpath := be.blobpath

	backendr, err := cbs.backendbs.OpenReader(blobpath)
	if err != nil {
		return fmt.Errorf("Failed to open backend blob for cache invalidation: %v", err)
	}
	defer func() {
		if err := backendr.Close(); err != nil {
			logger.Criticalf(mylog, "Failed to close backend blob reader for cache invalidation: %v", err)
		}
	}()

	bs, ok := cbs.cachebs.(blobstore.BlobStore)
	if !ok {
		return fmt.Errorf("FIXME: only cachebs supporting OpenWriter is currently supported")
	}

	cachew, err := bs.OpenWriter(blobpath)
	defer func() {
		if err := cachew.Close(); err != nil {
			logger.Criticalf(mylog, "Failed to close cache blob writer for cache invalidation: %v", err)
		}
	}()

	buf := make([]byte, invalidateBlockSize)
	for {
		nr, er := backendr.Read(buf)
		if nr > 0 {
			nw, ew := cachew.Write(buf[:nr])
			if nw > 0 {
				be.mu.Lock()
				be.validlen += int64(nw)
				be.validlenExtended.Broadcast()
				be.mu.Unlock()
			}
			if ew != nil {
				return fmt.Errorf("Failed to write backend blob content to cache: %v", err)
			}
			if nw != nr {
				return fmt.Errorf("Failed to write backend blob content to cache: %v", io.ErrShortWrite)
			}
		}
		if er != nil {
			if er == io.EOF {
				break
			}
			return fmt.Errorf("Failed to read backend blob content: %v", err)
		}
	}

	if _, err := io.Copy(cachew, backendr); err != nil {
		return fmt.Errorf("Failed to copy blob from backend: %v", err)
	}

	// FIXME: check integrity here?

	return nil
}
Exemplo n.º 2
0
func (of *OpenFile) CloseHandle(tgt *FileHandle) {
	if tgt.of == nil {
		logger.Warningf(fslog, "Detected FileHandle double close!")
		return
	}
	if tgt.of != of {
		logger.Criticalf(fslog, "Attempt to close handle for other OpenFile. tgt fh: %+v, of: %+v", tgt, of)
		return
	}

	wasWriteHandle := fl.IsWriteAllowed(tgt.flags)
	ofHasOtherWriteHandle := false

	tgt.of = nil

	of.mu.Lock()
	defer of.mu.Unlock()

	// remove tgt from of.handles slice
	newHandles := make([]*FileHandle, 0, len(of.handles)-1)
	for _, h := range of.handles {
		if h != tgt {
			if fl.IsWriteAllowed(h.flags) {
				ofHasOtherWriteHandle = true
			}
			newHandles = append(newHandles, h)
		}
	}
	of.handles = newHandles

	if wasWriteHandle && !ofHasOtherWriteHandle {
		of.downgradeToReadLock()
	}
}
Exemplo n.º 3
0
func (cbv *CachedBackendVersion) Query(blobpath string) (version.Version, error) {
	cbv.mu.Lock()
	defer cbv.mu.Unlock() // FIXME: unlock earlier?

	if ver, ok := cbv.cache[blobpath]; ok {
		logger.Debugf(mylog, "return cached ver for \"%s\" -> %d", blobpath, ver)
		return ver, nil
	}

	r, err := cbv.backendbs.OpenReader(blobpath)
	if err != nil {
		if err == ENOENT {
			cbv.cache[blobpath] = 0
			return 0, nil
		}
		return -1, fmt.Errorf("Failed to open backend blob for ver query: %v", err)
	}
	defer func() {
		if err := r.Close(); err != nil {
			logger.Criticalf(mylog, "Failed to close backend blob handle for querying version: %v", err)
		}
	}()
	ver, err := cbv.queryVersion(r)
	if err != nil {
		return -1, fmt.Errorf("Failed to query backend blob ver: %v", err)
	}

	cbv.cache[blobpath] = ver
	return ver, nil
}
Exemplo n.º 4
0
func Bazil2OtaruFlags(bf bfuse.OpenFlags) int {
	ret := 0
	if bf.IsReadOnly() {
		ret = oflags.O_RDONLY
	} else if bf.IsWriteOnly() {
		ret = oflags.O_WRONLY
	} else if bf.IsReadWrite() {
		ret = oflags.O_RDWR
	}

	if bf&bfuse.OpenAppend != 0 {
		ret |= oflags.O_APPEND
	}
	if bf&bfuse.OpenCreate != 0 {
		ret |= oflags.O_CREATE
	}
	if bf&bfuse.OpenExclusive != 0 {
		ret |= oflags.O_EXCL
	}
	if bf&bfuse.OpenSync != 0 {
		logger.Criticalf(mylog, "FIXME: OpenSync not supported yet !!!!!!!!!!!")
	}
	if bf&bfuse.OpenTruncate != 0 {
		ret |= oflags.O_TRUNCATE
	}

	return ret
}
Exemplo n.º 5
0
func (mgr *CachedBlobEntriesManager) tryCloseEntry(be *CachedBlobEntry) {
	if err := be.Close(writebackAndClose); err != nil {
		logger.Criticalf(mylog, "Failed to close cache entry \"%s\": %v", be.blobpath, err)
		return
	}

	delete(mgr.entries, be.blobpath)
}
Exemplo n.º 6
0
func TestHandleCritical(t *testing.T) {
	called := false
	h := logger.HandleCritical(func() { called = true })

	logger.Debugf(h, "debug")
	if called {
		t.Errorf("Shouldn't be triggered from debug msg")
	}
	logger.Criticalf(h, "critical")
	if !called {
		t.Errorf("Should be triggered from debug msg")
	}

	called = false
	logger.Criticalf(h, "critical2")
	if !called {
		t.Errorf("Should be triggered only once")
	}
}
Exemplo n.º 7
0
func (cfio *ChunkedFileIO) Size() int64 {
	cs, err := cfio.caio.Read()
	if err != nil {
		logger.Criticalf(mylog, "Failed to read cs array: %v", err)
		return 0
	}
	if len(cs) == 0 {
		return 0
	}
	return cs[len(cs)-1].Right()
}
Exemplo n.º 8
0
func (ch *ChunkIO) writeContentFrame(i int, f *decryptedContentFrame) error {
	// the offset of the start of the frame in blob
	blobOffset := ch.encryptedFrameOffset(i)

	wr := &blobstore.OffsetWriter{ch.bh, int64(blobOffset)}
	bew, err := btncrypt.NewWriteCloser(wr, ch.c, len(f.P))
	if err != nil {
		return fmt.Errorf("Failed to create BtnEncryptWriteCloser: %v", err)
	}
	defer func() {
		if err := bew.Close(); err != nil {
			logger.Criticalf(mylog, "Failed to Close BtnEncryptWriteCloser: %v", err)
		}
	}()
	if _, err := bew.Write(f.P); err != nil {
		return fmt.Errorf("Failed to encrypt frame: %v", err)
	}
	ch.header.PayloadVersion++
	ch.needsHeaderUpdate = true

	logger.Debugf(mylog, "ChunkIO: Wrote content frame idx: %d", i)
	return nil
}
Exemplo n.º 9
0
func main() {
	log.SetFlags(log.Ldate | log.Ltime | log.Lshortfile)
	logger.Registry().AddOutput(logger.WriterLogger{os.Stderr})
	flag.Usage = Usage
	flag.Parse()

	cfg, err := facade.NewConfig(*flagConfigDir)
	if err != nil {
		logger.Criticalf(mylog, "%v", err)
		Usage()
		os.Exit(2)
	}
	if flag.NArg() != 1 {
		Usage()
		os.Exit(2)
	}
	mountpoint := flag.Arg(0)

	if err := facade.SetupFluentLogger(cfg); err != nil {
		logger.Criticalf(mylog, "Failed to setup fluentd logger: %v", err)
		os.Exit(1)
	}

	o, err := facade.NewOtaru(cfg, &facade.OneshotConfig{Mkfs: *flagMkfs})
	if err != nil {
		logger.Criticalf(mylog, "NewOtaru failed: %v", err)
		os.Exit(1)
	}
	var muClose sync.Mutex
	closeOtaruAndExit := func(exitCode int) {
		muClose.Lock()
		defer muClose.Unlock()

		if err := bfuse.Unmount(mountpoint); err != nil {
			logger.Warningf(mylog, "umount err: %v", err)
		}
		if o != nil {
			if err := o.Close(); err != nil {
				logger.Warningf(mylog, "Otaru.Close() returned errs: %v", err)
			}
			o = nil
		}
		os.Exit(exitCode)
	}
	defer closeOtaruAndExit(0)

	sigC := make(chan os.Signal, 1)
	signal.Notify(sigC, os.Interrupt)
	signal.Notify(sigC, syscall.SIGTERM)
	go func() {
		for s := range sigC {
			logger.Warningf(mylog, "Received signal: %v", s)
			closeOtaruAndExit(1)
		}
	}()
	logger.Registry().AddOutput(logger.HandleCritical(func() {
		logger.Warningf(mylog, "Starting shutdown due to critical event.")
		closeOtaruAndExit(1)
	}))

	bfuseLogger := logger.Registry().Category("bfuse")
	bfuse.Debug = func(msg interface{}) { logger.Debugf(bfuseLogger, "%v", msg) }
	if err := fuse.ServeFUSE(cfg.BucketName, mountpoint, o.FS, nil); err != nil {
		logger.Warningf(mylog, "ServeFUSE failed: %v", err)
		closeOtaruAndExit(1)
	}
	logger.Infof(mylog, "ServeFUSE end!")
}
Exemplo n.º 10
0
func (cfio *ChunkedFileIO) PWrite(p []byte, offset int64) error {
	logger.Debugf(mylog, "PWrite: offset=%d, len=%d", offset, len(p))
	// logger.Debugf(mylog, "PWrite: p=%v", p)
	remo := offset
	remp := p
	if len(remp) == 0 {
		return nil
	}

	cs, err := cfio.caio.Read()
	if err != nil {
		return fmt.Errorf("Failed to read cs array: %v", err)
	}

	writeToChunk := func(c *inodedb.FileChunk, isNewChunk bool, maxChunkLen int64) error {
		if !fl.IsReadWriteAllowed(cfio.bs.Flags()) {
			return EPERM
		}

		flags := fl.O_RDWR
		if isNewChunk {
			flags |= fl.O_CREATE | fl.O_EXCL
		}
		bh, err := cfio.bs.Open(c.BlobPath, flags)
		if err != nil {
			return fmt.Errorf("Failed to open path \"%s\" for writing (isNewChunk: %t): %v", c.BlobPath, isNewChunk, err)
		}
		defer func() {
			if err := bh.Close(); err != nil {
				logger.Criticalf(mylog, "blobhandle Close failed: %v", err)
			}
		}()

		cio := cfio.newChunkIO(bh, cfio.c, c.Offset)
		defer func() {
			if err := cio.Close(); err != nil {
				logger.Criticalf(mylog, "cio Close failed: %v", err)
			}
		}()

		coff := remo - c.Offset
		n := util.IntMin(len(remp), int(maxChunkLen-coff))
		if n < 0 {
			return nil
		}
		if err := cio.PWrite(remp[:n], coff); err != nil {
			return err
		}
		oldLength := c.Length
		c.Length = int64(cio.Size())
		if oldLength != c.Length {
			if err := cfio.caio.Write(cs); err != nil {
				return fmt.Errorf("Failed to write updated cs array: %v", err)
			}
		}

		remo += int64(n)
		remp = remp[n:]
		return nil
	}

	for i := 0; i < len(cs); i++ {
		c := &cs[i]
		if c.Left() > remo {
			// Insert a new chunk @ i

			// try best to align offset at ChunkSplitSize
			newo := remo / ChunkSplitSize * ChunkSplitSize
			maxlen := int64(ChunkSplitSize)
			if i > 0 {
				prev := cs[i-1]
				pright := prev.Right()
				if newo < pright {
					maxlen -= pright - newo
					newo = pright
				}
			}
			if i < len(cs)-1 {
				next := cs[i+1]
				if newo+maxlen > next.Left() {
					maxlen = next.Left() - newo
				}
			}

			newc, err := cfio.newFileChunk(newo)
			if err != nil {
				return err
			}
			cs = append(cs, inodedb.FileChunk{})
			copy(cs[i+1:], cs[i:])
			cs[i] = newc
			if err := cfio.caio.Write(cs); err != nil {
				return fmt.Errorf("Failed to write updated cs array: %v", err)
			}

			if err := writeToChunk(&newc, NewChunk, maxlen); err != nil {
				return err
			}
			if len(remp) == 0 {
				break
			}

			continue
		}

		// Write to the chunk
		maxlen := int64(ChunkSplitSize)
		if i < len(cs)-1 {
			next := cs[i+1]
			if c.Left()+maxlen > next.Left() {
				maxlen = next.Left() - c.Left()
			}
		}
		if err := writeToChunk(c, ExistingChunk, maxlen); err != nil {
			return err
		}
		if len(remp) == 0 {
			break
		}
	}

	for len(remp) > 0 {
		// Append a new chunk at the end
		newo := remo / ChunkSplitSize * ChunkSplitSize
		maxlen := int64(ChunkSplitSize)

		if len(cs) > 0 {
			last := cs[len(cs)-1]
			lastRight := last.Right()
			if newo < lastRight {
				maxlen -= lastRight - newo
				newo = lastRight
			}
		}

		newc, err := cfio.newFileChunk(newo)
		if err != nil {
			return err
		}
		if err := writeToChunk(&newc, NewChunk, maxlen); err != nil {
			return err
		}

		cs = append(cs, newc)
		if err := cfio.caio.Write(cs); err != nil {
			return fmt.Errorf("Failed to write updated cs array: %v", err)
		}
	}

	return nil
}
Exemplo n.º 11
0
func (cfio *ChunkedFileIO) ReadAt(p []byte, offset int64) (int, error) {
	remo := offset
	remp := p

	if offset < 0 {
		return 0, fmt.Errorf("negative offset %d given", offset)
	}

	cs, err := cfio.caio.Read()
	if err != nil {
		return 0, fmt.Errorf("Failed to read cs array: %v", err)
	}

	if !fl.IsReadAllowed(cfio.bs.Flags()) {
		return 0, EPERM
	}

	// fmt.Printf("cs: %v\n", cs)
	for i := 0; i < len(cs) && len(remp) > 0; i++ {
		c := cs[i]
		if c.Left() > remo+int64(len(remp)) {
			break
		}
		if c.Right() <= remo {
			continue
		}

		coff := remo - c.Left()
		if coff < 0 {
			// Fill gap with zero
			n := util.Int64Min(int64(len(remp)), -coff)
			for j := int64(0); j < n; j++ {
				remp[j] = 0
			}
			remo += n
			coff = 0
			if len(remp) == 0 {
				return int(remo - offset), nil
			}
		}

		bh, err := cfio.bs.Open(c.BlobPath, fl.O_RDONLY)
		if err != nil {
			return int(remo - offset), fmt.Errorf("Failed to open path \"%s\" for reading: %v", c.BlobPath, err)
		}
		defer func() {
			if err := bh.Close(); err != nil {
				logger.Criticalf(mylog, "blobhandle Close failed: %v", err)
			}
		}()

		cio := cfio.newChunkIO(bh, cfio.c, c.Offset)
		defer func() {
			if err := cio.Close(); err != nil {
				logger.Criticalf(mylog, "cio Close failed: %v", err)
			}
		}()

		n := util.Int64Min(int64(len(p)), c.Length-coff)
		if err := cio.PRead(remp[:n], coff); err != nil {
			return int(remo - offset), err
		}

		remo += n
		remp = remp[n:]

		if len(remp) == 0 {
			return int(remo - offset), nil
		}
	}

	// logger.Debugf(mylog, "cs: %+v", cs)
	return int(remo - offset), nil
}
Exemplo n.º 12
0
func (ch *ChunkIO) Header() ChunkHeader {
	if err := ch.ensureHeader(); err != nil {
		logger.Criticalf(mylog, "Failed to ensureHeader(): %v", err)
	}
	return ch.header
}