Beispiel #1
0
func (fs *FileSystem) tryGetOrigPath(id inodedb.ID) string {
	fs.muOrigPath.Lock()
	defer fs.muOrigPath.Unlock()

	origpath, ok := fs.origpath[id]
	if !ok {
		logger.Warningf(fslog, "Failed to lookup orig path for ID %d", id)
		return "<unknown>"
	}
	logger.Warningf(fslog, "Orig path for ID %d is \"%s\"", id, origpath)
	return origpath
}
Beispiel #2
0
func (of *OpenFile) downgradeToReadLock() {
	logger.Infof(fslog, "Downgrade %v to read lock.", of)
	// Note: assumes of.mu is Lock()-ed

	if !of.nlock.HasTicket() {
		logger.Warningf(fslog, "Attempt to downgrade node lock, but no excl lock found. of: %v", of)
		return
	}

	if err := of.fs.idb.UnlockNode(of.nlock); err != nil {
		logger.Warningf(fslog, "Unlocking node to downgrade to read lock failed: %v", err)
	}
	of.nlock.Ticket = inodedb.NoTicket
	caio := NewINodeDBChunksArrayIO(of.fs.idb, of.nlock)
	of.cfio = chunkstore.NewChunkedFileIO(of.fs.bs, of.fs.c, caio)
}
Beispiel #3
0
func (of *OpenFile) CloseHandle(tgt *FileHandle) {
	if tgt.of == nil {
		logger.Warningf(fslog, "Detected FileHandle double close!")
		return
	}
	if tgt.of != of {
		logger.Criticalf(fslog, "Attempt to close handle for other OpenFile. tgt fh: %+v, of: %+v", tgt, of)
		return
	}

	wasWriteHandle := fl.IsWriteAllowed(tgt.flags)
	ofHasOtherWriteHandle := false

	tgt.of = nil

	of.mu.Lock()
	defer of.mu.Unlock()

	// remove tgt from of.handles slice
	newHandles := make([]*FileHandle, 0, len(of.handles)-1)
	for _, h := range of.handles {
		if h != tgt {
			if fl.IsWriteAllowed(h.flags) {
				ofHasOtherWriteHandle = true
			}
			newHandles = append(newHandles, h)
		}
	}
	of.handles = newHandles

	if wasWriteHandle && !ofHasOtherWriteHandle {
		of.downgradeToReadLock()
	}
}
Beispiel #4
0
func (fs *FileSystem) createNode(dirID inodedb.ID, name string, typ inodedb.Type, permmode uint16, uid, gid uint32, modifiedT time.Time) (inodedb.ID, error) {
	nlock, err := fs.idb.LockNode(inodedb.AllocateNewNodeID)
	if err != nil {
		return 0, err
	}
	defer func() {
		if err := fs.idb.UnlockNode(nlock); err != nil {
			logger.Warningf(fslog, "Failed to unlock node when creating file: %v", err)
		}
	}()

	dirorigpath := fs.tryGetOrigPath(dirID)
	origpath := fmt.Sprintf("%s/%s", dirorigpath, name)

	tx := inodedb.DBTransaction{Ops: []inodedb.DBOperation{
		&inodedb.CreateNodeOp{NodeLock: nlock, OrigPath: origpath, ParentID: dirID, Type: typ, PermMode: permmode, Uid: uid, Gid: gid, ModifiedT: modifiedT},
		&inodedb.HardLinkOp{NodeLock: inodedb.NodeLock{dirID, inodedb.NoTicket}, Name: name, TargetID: nlock.ID},
	}}
	if _, err := fs.idb.ApplyTransaction(tx); err != nil {
		return 0, err
	}

	fs.setOrigPathForId(nlock.ID, origpath)

	return nlock.ID, nil
}
Beispiel #5
0
func (be *CachedBlobEntry) closeWithLock(abandon bool) error {
	if len(be.handles) > 0 {
		return fmt.Errorf("Entry has %d handles", len(be.handles))
	}

	logger.Infof(mylog, "Close entry: %+v", be.infoWithLock())

	if !abandon {
		for be.state == cacheEntryInvalidating {
			logger.Warningf(mylog, "Waiting for cache to be fully invalidated before close. (shouldn't come here, as PWrite should block)")
			be.validlenExtended.Wait()
		}

		if err := be.writeBackWithLock(); err != nil {
			return fmt.Errorf("Failed to writeback dirty: %v", err)
		}
		be.syncCount++
		be.lastSync = time.Now()
	}

	if err := be.cachebh.Close(); err != nil {
		return fmt.Errorf("Failed to close cache bh: %v", err)
	}

	be.state = cacheEntryClosed
	return nil
}
Beispiel #6
0
func (d DirNode) ReadDirAll(ctx context.Context) ([]bfuse.Dirent, error) {
	parentID, err := d.fs.ParentID(d.id)
	if err != nil {
		return nil, err
	}

	entries, err := d.fs.DirEntries(d.id)
	if err != nil {
		return nil, err
	}

	fentries := make([]bfuse.Dirent, 0, len(entries)+2)
	fentries = append(fentries, bfuse.Dirent{Inode: uint64(d.id), Name: ".", Type: bfuse.DT_Dir})
	fentries = append(fentries, bfuse.Dirent{Inode: uint64(parentID), Name: "..", Type: bfuse.DT_Dir})
	for name, id := range entries {
		isdir, err := d.fs.IsDir(id)
		if err != nil {
			logger.Warningf(mylog, "Error while querying IsDir for id %d: %v", id, err)
		}

		var t bfuse.DirentType
		if isdir {
			t = bfuse.DT_Dir
		} else {
			t = bfuse.DT_File
		}

		fentries = append(fentries, bfuse.Dirent{
			Inode: uint64(id),
			Name:  name,
			Type:  t,
		})
	}
	return fentries, nil
}
Beispiel #7
0
func (ch *ChunkIO) PayloadLen() int {
	if err := ch.ensureHeader(); err != nil {
		logger.Warningf(mylog, "Failed to read header for payload len: %v", err)
		return 0
	}

	return int(ch.header.PayloadLen)
}
Beispiel #8
0
func NewSyncScheduler(s Syncer, wait time.Duration) *PeriodicRunner {
	return NewPeriodicRunner(func() {
		err := s.Sync()
		if err != nil {
			logger.Warningf(synclog, "Sync err: %v", err)
		}
	}, wait)
}
Beispiel #9
0
func NewCacheSyncScheduler(cbs *CachedBlobStore) *util.PeriodicRunner {
	return util.NewPeriodicRunner(func() {
		err := cbs.SyncOneEntry()
		if err != nil && err != ENOENT {
			logger.Warningf(mylog, "SyncOneEntry err: %v", err)
		}
	}, schedulerWaitDuration)
}
Beispiel #10
0
func (db *DB) UnlockNode(nlock NodeLock) error {
	if err := db.state.checkLock(nlock, true); err != nil {
		logger.Warningf(mylog, "Unlock node failed: %v", err)
		return err
	}

	delete(db.state.nodeLocks, nlock.ID)
	return nil
}
Beispiel #11
0
func (be *CachedBlobEntry) Close(abandon bool) error {
	be.mu.Lock()
	defer be.mu.Unlock()

	if !be.state.IsActive() {
		logger.Warningf(mylog, "Attempted to close uninitialized/already closed entry: %+v", be.infoWithLock())
		return nil
	}

	return be.closeWithLock(abandon)
}
Beispiel #12
0
func (be *CachedBlobEntry) Sync() error {
	be.mu.Lock()
	defer be.mu.Unlock()

	// Wait for invalidation to complete
	for be.state == cacheEntryInvalidating {
		logger.Infof(mylog, "Waiting for cache to be fully invalidated before sync.")
		be.validlenExtended.Wait()
	}

	if !be.state.IsActive() {
		logger.Warningf(mylog, "Attempted to sync already uninitialized/closed entry: %+v", be.infoWithLock())
		return nil
	}
	if be.state == cacheEntryClean {
		return nil
	}

	logger.Infof(mylog, "Sync entry: %+v", be.infoWithLock())

	errC := make(chan error)

	go func() {
		if err := be.writeBackWithLock(); err != nil {
			errC <- fmt.Errorf("Failed to writeback dirty: %v", err)
		} else {
			errC <- nil
		}
	}()

	go func() {
		if cs, ok := be.cachebh.(util.Syncer); ok {
			if err := cs.Sync(); err != nil {
				errC <- fmt.Errorf("Failed to sync cache blob: %v", err)
			} else {
				errC <- nil
			}
		} else {
			errC <- nil
		}
	}()

	errs := []error{}
	for i := 0; i < 2; i++ {
		if err := <-errC; err != nil {
			errs = append(errs, err)
		}
	}

	be.syncCount++
	be.lastSync = time.Now()
	return util.ToErrors(errs)
}
Beispiel #13
0
func (fs *FileSystem) OpenFile(id inodedb.ID, flags int) (*FileHandle, error) {
	logger.Infof(fslog, "OpenFile(id: %v, flags rok: %t wok: %t)", id, fl.IsReadAllowed(flags), fl.IsWriteAllowed(flags))

	tryLock := fl.IsWriteAllowed(flags)
	if tryLock && !fl.IsWriteAllowed(fs.bs.Flags()) {
		return nil, EACCES
	}

	of := fs.getOrCreateOpenFile(id)

	of.mu.Lock()
	defer of.mu.Unlock()

	ofIsInitialized := of.nlock.ID != 0
	if ofIsInitialized && (of.nlock.HasTicket() || !tryLock) {
		// No need to upgrade lock. Just use cached filehandle.
		logger.Infof(fslog, "Using cached of for inode id: %v", id)
		return of.OpenHandleWithoutLock(flags), nil
	}

	// upgrade lock or acquire new lock...
	v, nlock, err := fs.idb.QueryNode(id, tryLock)
	if err != nil {
		return nil, err
	}
	if v.GetType() != inodedb.FileNodeT {
		if err := fs.idb.UnlockNode(nlock); err != nil {
			logger.Warningf(fslog, "Unlock node failed for non-file node: %v", err)
		}

		if v.GetType() == inodedb.DirNodeT {
			return nil, EISDIR
		}
		return nil, fmt.Errorf("Specified node not file but has type %v", v.GetType())
	}

	of.nlock = nlock
	caio := NewINodeDBChunksArrayIO(fs.idb, nlock)
	of.cfio = chunkstore.NewChunkedFileIO(fs.bs, fs.c, caio)
	of.cfio.SetOrigFilename(fs.tryGetOrigPath(nlock.ID))

	if fl.IsWriteTruncate(flags) {
		if err := of.truncateWithLock(0); err != nil {
			return nil, fmt.Errorf("Failed to truncate file: %v", err)
		}
	}

	fh := of.OpenHandleWithoutLock(flags)
	return fh, nil
}
Beispiel #14
0
func (be *CachedBlobEntry) initializeWithLock(cbs *CachedBlobStore) error {
	cachebh, err := cbs.cachebs.Open(be.blobpath, fl.O_RDWRCREATE)
	if err != nil {
		be.closeWithLock(abandonAndClose)
		return fmt.Errorf("Failed to open cache blob: %v", err)
	}
	cachever, err := cbs.queryVersion(&blobstore.OffsetReader{cachebh, 0})
	if err != nil {
		be.closeWithLock(abandonAndClose)
		return fmt.Errorf("Failed to query cached blob ver: %v", err)
	}
	backendver, err := cbs.bever.Query(be.blobpath)
	if err != nil {
		be.closeWithLock(abandonAndClose)
		return err
	}

	be.cbs = cbs
	be.cachebh = cachebh
	be.handles = make(map[*CachedBlobHandle]struct{})

	if cachever > backendver {
		logger.Warningf(mylog, "FIXME: cache is newer than backend when open")
		be.state = cacheEntryDirty
		be.bloblen = cachebh.Size()
		be.validlen = be.bloblen
	} else if cachever == backendver {
		be.state = cacheEntryClean
		be.bloblen = cachebh.Size()
		be.validlen = be.bloblen
	} else {
		blobsizer := cbs.backendbs.(blobstore.BlobSizer)
		be.bloblen, err = blobsizer.BlobSize(be.blobpath)
		if err != nil {
			be.closeWithLock(abandonAndClose)
			return fmt.Errorf("Failed to query backend blobsize: %v", err)
		}
		be.state = cacheEntryInvalidating
		be.validlen = 0

		cbs.s.RunImmediately(&InvalidateCacheTask{cbs, be}, nil)
	}
	if be.state == cacheEntryUninitialized {
		panic("be.state should be set above")
	}

	return nil
}
Beispiel #15
0
func (d DirNode) Lookup(ctx context.Context, name string) (bfs.Node, error) {
	entries, err := d.fs.DirEntries(d.id)
	if err != nil {
		return nil, err
	}

	if id, ok := entries[name]; ok {
		isdir, err := d.fs.IsDir(id)
		if err != nil {
			logger.Warningf(mylog, "Stale inode in dir? Failed IsDir: %v", err)
			return nil, err
		}
		if isdir {
			return DirNode{d.fs, id}, nil
		} else {
			return FileNode{d.fs, id}, nil
		}
	}

	return nil, bfuse.ENOENT
}
Beispiel #16
0
func NewConfig(configdir string) (*Config, error) {
	tomlpath := path.Join(configdir, "config.toml")

	buf, err := ioutil.ReadFile(tomlpath)
	if err != nil {
		return nil, fmt.Errorf("Failed to read config file: %v", err)
	}

	cfg := &Config{
		PasswordFile:                 path.Join(configdir, "password.txt"),
		UseSeparateBucketForMetadata: false,
		CacheDir:                     "/var/cache/otaru",
		CredentialsFilePath:          path.Join(configdir, "credentials.json"),
		TokenCacheFilePath:           path.Join(configdir, "tokencache.json"),
	}

	if err := toml.Unmarshal(buf, &cfg); err != nil {
		return nil, fmt.Errorf("Failed to parse config file: %v", err)
	}

	if cfg.Password != "" {
		logger.Warningf(mylog, "Storing password directly on config file is not recommended.")
	} else {
		fi, err := os.Stat(cfg.PasswordFile)
		if err != nil {
			return nil, fmt.Errorf("Failed to stat password file \"%s\": %v", cfg.PasswordFile, err)
		}
		if fi.Mode()&os.ModePerm != 0400 {
			logger.Warningf(mylog, "Warning: Password file \"%s\" permission is not 0400", cfg.PasswordFile)
		}

		cfg.Password, err = util.StringFromFile(cfg.PasswordFile)
		if err != nil {
			return nil, fmt.Errorf("Failed to read password file \"%s\": %v", cfg.PasswordFile, err)
		}
	}

	if cfg.ProjectName == "" {
		return nil, fmt.Errorf("Config Error: ProjectName must be given.")
	}
	if cfg.BucketName == "" {
		return nil, fmt.Errorf("Config Error: BucketName must be given.")
	}

	if _, err := os.Stat(cfg.CredentialsFilePath); err != nil {
		if os.IsNotExist(err) {
			return nil, fmt.Errorf("Credentials not found at %s", cfg.CredentialsFilePath)
		} else {
			return nil, fmt.Errorf("Failed to stat credentials file \"%s\" from unknown err: %v", cfg.CredentialsFilePath, err)
		}
	}

	if _, err := os.Stat(cfg.TokenCacheFilePath); err != nil {
		if os.IsNotExist(err) {
			logger.Warningf(mylog, "Warning: Token cache file found not at %s", cfg.TokenCacheFilePath)
		} else {
			return nil, fmt.Errorf("Failed to stat token cache file \"%s\" from unknown err: %v", cfg.TokenCacheFilePath, err)
		}
	}

	if cfg.Fluent.TagPrefix == "" {
		cfg.Fluent.TagPrefix = "otaru"
	}

	return cfg, nil
}
Beispiel #17
0
func NewOtaru(cfg *Config, oneshotcfg *OneshotConfig) (*Otaru, error) {
	o := &Otaru{}

	var err error

	key := btncrypt.KeyFromPassword(cfg.Password)
	o.C, err = btncrypt.NewCipher(key)
	if err != nil {
		o.Close()
		return nil, fmt.Errorf("Failed to init Cipher: %v", err)
	}

	o.S = scheduler.NewScheduler()

	if !cfg.LocalDebug {
		o.Tsrc, err = auth.GetGCloudTokenSource(context.TODO(), cfg.CredentialsFilePath, cfg.TokenCacheFilePath, false)
		if err != nil {
			o.Close()
			return nil, fmt.Errorf("Failed to init GCloudClientSource: %v", err)
		}
		o.DSCfg = datastore.NewConfig(cfg.ProjectName, cfg.BucketName, o.C, o.Tsrc)
		o.GL = datastore.NewGlobalLocker(o.DSCfg, GenHostName(), "FIXME: fill info")
		if err := o.GL.Lock(); err != nil {
			return nil, err
		}
	}

	o.CacheTgtBS, err = blobstore.NewFileBlobStore(cfg.CacheDir, oflags.O_RDWRCREATE)
	if err != nil {
		o.Close()
		return nil, fmt.Errorf("Failed to init FileBlobStore: %v", err)
	}

	if !cfg.LocalDebug {
		o.DefaultBS, err = gcs.NewGCSBlobStore(cfg.ProjectName, cfg.BucketName, o.Tsrc, oflags.O_RDWRCREATE)
		if err != nil {
			o.Close()
			return nil, fmt.Errorf("Failed to init GCSBlobStore: %v", err)
		}
		if !cfg.UseSeparateBucketForMetadata {
			o.BackendBS = o.DefaultBS
		} else {
			metabucketname := fmt.Sprintf("%s-meta", cfg.BucketName)
			o.MetadataBS, err = gcs.NewGCSBlobStore(cfg.ProjectName, metabucketname, o.Tsrc, oflags.O_RDWRCREATE)
			if err != nil {
				o.Close()
				return nil, fmt.Errorf("Failed to init GCSBlobStore (metadata): %v", err)
			}

			o.BackendBS = blobstore.Mux{
				blobstore.MuxEntry{metadata.IsMetadataBlobpath, o.MetadataBS},
				blobstore.MuxEntry{nil, o.DefaultBS},
			}
		}
	} else {
		o.BackendBS, err = blobstore.NewFileBlobStore(path.Join(os.Getenv("HOME"), ".otaru", "bbs"), oflags.O_RDWRCREATE)
	}

	queryFn := chunkstore.NewQueryChunkVersion(o.C)
	o.CBS, err = cachedblobstore.New(o.BackendBS, o.CacheTgtBS, o.S, oflags.O_RDWRCREATE /* FIXME */, queryFn)
	if err != nil {
		o.Close()
		return nil, fmt.Errorf("Failed to init CachedBlobStore: %v", err)
	}
	if err := o.CBS.RestoreState(o.C); err != nil {
		logger.Warningf(mylog, "Attempted to restore cachedblobstore state but failed: %v", err)
	}
	o.CSS = cachedblobstore.NewCacheSyncScheduler(o.CBS)

	if !cfg.LocalDebug {
		o.SSLoc = datastore.NewINodeDBSSLocator(o.DSCfg)
	} else {
		logger.Panicf(mylog, "Implement mock sslocator that doesn't depend on gcloud/datastore")
	}
	o.SIO = blobstoredbstatesnapshotio.New(o.CBS, o.C, o.SSLoc)

	if !cfg.LocalDebug {
		txio := datastore.NewDBTransactionLogIO(o.DSCfg)
		o.TxIO = txio
		o.TxIOSS = util.NewSyncScheduler(txio, 300*time.Millisecond)
	} else {
		o.TxIO = inodedb.NewSimpleDBTransactionLogIO()
	}
	o.CTxIO = inodedb.NewCachedDBTransactionLogIO(o.TxIO)

	if oneshotcfg.Mkfs {
		o.IDBBE, err = inodedb.NewEmptyDB(o.SIO, o.CTxIO)
		if err != nil {
			o.Close()
			return nil, fmt.Errorf("NewEmptyDB failed: %v", err)
		}
	} else {
		o.IDBBE, err = inodedb.NewDB(o.SIO, o.CTxIO)
		if err != nil {
			o.Close()
			return nil, fmt.Errorf("NewDB failed: %v", err)
		}
	}

	o.IDBS = inodedb.NewDBService(o.IDBBE)
	o.IDBSS = util.NewSyncScheduler(o.IDBS, 30*time.Second)

	o.FS = otaru.NewFileSystem(o.IDBS, o.CBS, o.C)
	o.MGMT = mgmt.NewServer()
	if err := o.runMgmtServer(); err != nil {
		o.Close()
		return nil, fmt.Errorf("Mgmt server run failed: %v", err)
	}

	return o, nil
}
Beispiel #18
0
func main() {
	log.SetFlags(log.Ldate | log.Ltime | log.Lshortfile)
	logger.Registry().AddOutput(logger.WriterLogger{os.Stderr})
	flag.Usage = Usage
	flag.Parse()

	cfg, err := facade.NewConfig(*flagConfigDir)
	if err != nil {
		logger.Criticalf(mylog, "%v", err)
		Usage()
		os.Exit(2)
	}
	if flag.NArg() != 1 {
		Usage()
		os.Exit(2)
	}
	mountpoint := flag.Arg(0)

	if err := facade.SetupFluentLogger(cfg); err != nil {
		logger.Criticalf(mylog, "Failed to setup fluentd logger: %v", err)
		os.Exit(1)
	}

	o, err := facade.NewOtaru(cfg, &facade.OneshotConfig{Mkfs: *flagMkfs})
	if err != nil {
		logger.Criticalf(mylog, "NewOtaru failed: %v", err)
		os.Exit(1)
	}
	var muClose sync.Mutex
	closeOtaruAndExit := func(exitCode int) {
		muClose.Lock()
		defer muClose.Unlock()

		if err := bfuse.Unmount(mountpoint); err != nil {
			logger.Warningf(mylog, "umount err: %v", err)
		}
		if o != nil {
			if err := o.Close(); err != nil {
				logger.Warningf(mylog, "Otaru.Close() returned errs: %v", err)
			}
			o = nil
		}
		os.Exit(exitCode)
	}
	defer closeOtaruAndExit(0)

	sigC := make(chan os.Signal, 1)
	signal.Notify(sigC, os.Interrupt)
	signal.Notify(sigC, syscall.SIGTERM)
	go func() {
		for s := range sigC {
			logger.Warningf(mylog, "Received signal: %v", s)
			closeOtaruAndExit(1)
		}
	}()
	logger.Registry().AddOutput(logger.HandleCritical(func() {
		logger.Warningf(mylog, "Starting shutdown due to critical event.")
		closeOtaruAndExit(1)
	}))

	bfuseLogger := logger.Registry().Category("bfuse")
	bfuse.Debug = func(msg interface{}) { logger.Debugf(bfuseLogger, "%v", msg) }
	if err := fuse.ServeFUSE(cfg.BucketName, mountpoint, o.FS, nil); err != nil {
		logger.Warningf(mylog, "ServeFUSE failed: %v", err)
		closeOtaruAndExit(1)
	}
	logger.Infof(mylog, "ServeFUSE end!")
}