// mountOptions configures the options from the command line flags func mountOptions(device string) (options []fuse.MountOption) { options = []fuse.MountOption{ fuse.MaxReadahead(uint32(maxReadAhead)), fuse.Subtype("rclone"), fuse.FSName(device), fuse.VolumeName(device), fuse.NoAppleDouble(), fuse.NoAppleXattr(), // Options from benchmarking in the fuse module //fuse.MaxReadahead(64 * 1024 * 1024), //fuse.AsyncRead(), - FIXME this causes // ReadFileHandle.Read error: read /home/files/ISOs/xubuntu-15.10-desktop-amd64.iso: bad file descriptor // which is probably related to errors people are having //fuse.WritebackCache(), } if allowNonEmpty { options = append(options, fuse.AllowNonEmptyMount()) } if allowOther { options = append(options, fuse.AllowOther()) } if allowRoot { options = append(options, fuse.AllowRoot()) } if defaultPermissions { options = append(options, fuse.DefaultPermissions()) } if readOnly { options = append(options, fuse.ReadOnly()) } if writebackCache { options = append(options, fuse.WritebackCache()) } return options }
func (fs *ClueFS) MountAndServe(mountpoint string, readonly bool) error { // Mount the file system fs.mountDir = mountpoint if IsDebugActive() { fuse.Debug = FuseDebug } mountOpts := []fuse.MountOption{ fuse.FSName(programName), fuse.Subtype(programName), fuse.VolumeName(programName), fuse.LocalVolume(), } if readonly { mountOpts = append(mountOpts, fuse.ReadOnly()) } conn, err := fuse.Mount(mountpoint, mountOpts...) if err != nil { return err } defer conn.Close() // Start serving requests if err = fusefs.Serve(conn, fs); err != nil { return err } // Check for errors when mounting the file system <-conn.Ready if err = conn.MountError; err != nil { return err } return nil }
func TestMountOptionReadOnly(t *testing.T) { t.Parallel() mnt, err := fstestutil.MountedT(t, fstestutil.SimpleFS{createrDir{}}, fuse.ReadOnly(), ) if err != nil { t.Fatal(err) } defer mnt.Close() // This will be prevented by kernel-level access checking when // ReadOnly is used. f, err := os.Create(mnt.Dir + "/child") if err == nil { f.Close() t.Fatal("expected an error") } perr, ok := err.(*os.PathError) if !ok { t.Fatalf("expected PathError, got %T: %v", err, err) } if perr.Err != syscall.EROFS { t.Fatalf("expected EROFS, got %T: %v", err, err) } }
func mount(opts MountOptions, gopts GlobalOptions, mountpoint string) error { debug.Log("start mount") defer debug.Log("finish mount") repo, err := OpenRepository(gopts) if err != nil { return err } err = repo.LoadIndex() if err != nil { return err } if _, err := resticfs.Stat(mountpoint); os.IsNotExist(errors.Cause(err)) { Verbosef("Mountpoint %s doesn't exist, creating it\n", mountpoint) err = resticfs.Mkdir(mountpoint, os.ModeDir|0700) if err != nil { return err } } c, err := systemFuse.Mount( mountpoint, systemFuse.ReadOnly(), systemFuse.FSName("restic"), ) if err != nil { return err } Printf("Now serving the repository at %s\n", mountpoint) Printf("Don't forget to umount after quitting!\n") root := fs.Tree{} root.Add("snapshots", fuse.NewSnapshotsDir(repo, opts.OwnerRoot)) debug.Log("serving mount at %v", mountpoint) err = fs.Serve(c, &root) if err != nil { return err } <-c.Ready return c.MountError }
func mountOptions(device string) (options []fuse.MountOption) { if svfs.AllowOther { options = append(options, fuse.AllowOther()) } if svfs.AllowRoot { options = append(options, fuse.AllowRoot()) } if svfs.DefaultPermissions { options = append(options, fuse.DefaultPermissions()) } if svfs.ReadOnly { options = append(options, fuse.ReadOnly()) } options = append(options, fuse.MaxReadahead(uint32(svfs.ReadAheadSize))) options = append(options, fuse.Subtype("svfs")) options = append(options, fuse.FSName(device)) return options }
func (cmd CmdMount) Execute(args []string) error { if len(args) == 0 { return fmt.Errorf("wrong number of parameters, Usage: %s", cmd.Usage()) } repo, err := cmd.global.OpenRepository() if err != nil { return err } err = repo.LoadIndex() if err != nil { return err } mountpoint := args[0] if _, err := os.Stat(mountpoint); os.IsNotExist(err) { cmd.global.Verbosef("Mountpoint %s doesn't exist, creating it\n", mountpoint) err = os.Mkdir(mountpoint, os.ModeDir|0700) if err != nil { return err } } c, err := systemFuse.Mount( mountpoint, systemFuse.ReadOnly(), systemFuse.FSName("restic"), ) if err != nil { return err } root := fs.Tree{} root.Add("snapshots", fuse.NewSnapshotsDir(repo, cmd.Root)) cmd.global.Printf("Now serving %s at %s\n", repo.Backend().Location(), mountpoint) cmd.global.Printf("Don't forget to umount after quitting!\n") AddCleanupHandler(func() error { return systemFuse.Unmount(mountpoint) }) cmd.ready <- struct{}{} errServe := make(chan error) go func() { err = fs.Serve(c, &root) if err != nil { errServe <- err } <-c.Ready errServe <- c.MountError }() select { case err := <-errServe: return err case <-cmd.done: err := c.Close() if err != nil { cmd.global.Printf("Error closing fuse connection: %s\n", err) } return systemFuse.Unmount(mountpoint) } }
func main() { flag.Usage = Usage flag.Parse() logging.SetFormatter(format) if *debug { logging.SetLevel(logging.DEBUG, "gocfs") } else { logging.SetLevel(logging.ERROR, "gocfs") } if flag.NArg() != 1 { Usage() os.Exit(2) } mountpoint := flag.Arg(0) if *daemon { var args = make([]string, 0) if *ro { args = append(args, "-ro") } args = append(args, "-db", *cassandra) if *debug { args = append(args, "-v") } args = append(args, flag.Args()...) cmd := exec.Command(os.Args[0], args...) cmd.Start() return } var mountOptions = make([]fuse.MountOption, 0) mountOptions = append(mountOptions, fuse.FSName("gocfs")) mountOptions = append(mountOptions, fuse.Subtype("gocfs")) mountOptions = append(mountOptions, fuse.VolumeName("cassandra")) //mountOptions = append(mountOptions, fuse.UseIno()) //mountOptions = append(mountOptions, fuse.WritebackCache()) //mountOptions = append(mountOptions, fuse.MaxReadahead(0)) if *ro { mountOptions = append(mountOptions, fuse.ReadOnly()) } c, err := fuse.Mount( mountpoint, mountOptions..., ) if err != nil { log.Fatal(err) } defer c.Close() var consistencyLevel gocql.Consistency if *consistency == "quorum" { consistencyLevel = gocql.Quorum } else if *consistency == "one" { consistencyLevel = gocql.One } else if *consistency == "all" { consistencyLevel = gocql.All } else { log.Fatal("Unsupported consistency level: %s", *consistency) } cluster := gocql.NewCluster(*cassandra) cluster.DiscoverHosts = true cluster.Timeout = 2 * time.Second cluster.Consistency = consistencyLevel session, _ := cluster.CreateSession() defer session.Close() if err := initStorage(session); err != nil { log.Fatal(err) } server := fs.New(c, &fs.Config{ Debug: debugFs, }) err = server.Serve(FS{ Session: session, Server: server, Conn: c, blockSize: 65536, id2path: make(map[uint64]string), path2id: make(map[string]uint64), nextId: 0, }) if err != nil { log.Fatal(err) } // check if the mount process has an error to report <-c.Ready if err := c.MountError; err != nil { log.Fatal(err) } }
func (this *Mount) Run(args []string) (exitCode int) { cmdFlags := flag.NewFlagSet("mount", flag.ContinueOnError) cmdFlags.Usage = func() { this.Ui.Output(this.Help()) } cmdFlags.StringVar(&this.zone, "z", ctx.ZkDefaultZone(), "") cmdFlags.StringVar(&this.cluster, "c", "", "") cmdFlags.StringVar(&this.logLevel, "l", "info", "") if err := cmdFlags.Parse(args); err != nil { return 1 } if validateArgs(this, this.Ui). on("-z", "-c"). invalid(args) { return 2 } this.mountPoint = args[len(args)-1] if !strings.HasPrefix(this.mountPoint, "/") { this.Ui.Error("mount point must start with /") return 1 } setupLogging("stdout", this.logLevel, "") c, err := fuse.Mount( this.mountPoint, fuse.FSName("kfs"), fuse.Subtype("kfs"), fuse.VolumeName("Kafka FS"), fuse.ReadOnly(), fuse.AllowOther(), ) if err != nil { log.Critical(err) } signal.RegisterHandler(func(sig os.Signal) { var err error for i := 0; i < 5; i++ { err = fuse.Unmount(this.mountPoint) if err == nil { break } log.Warn(err) time.Sleep(time.Second * 5) } if err == nil { log.Info("Kafka FS unmounted") } else { log.Error("Kafka FS unable to umount") } c.Close() os.Exit(0) }, syscall.SIGINT, syscall.SIGTERM) srv := fs.New(c, &fs.Config{}) fs := kfs.New(this.zone, this.cluster) if err := srv.Serve(fs); err != nil { log.Error(err) } <-c.Ready if err := c.MountError; err != nil { log.Error(err) } return }
func main() { allowOther := flag.Bool("allow-other", false, "allow all users access to the filesystem") allowRoot := flag.Bool("allow-root", false, "allow root to access the filesystem") debug := flag.Bool("debug", false, "enable debug output") gid := flag.Int("gid", os.Getgid(), "set the GID that should own all files") perm := flag.Int("perm", 0, "set the file permission flags for all files") ro := flag.Bool("ro", false, "mount the filesystem read-only") root := flag.String("root", "", "path in Consul to the root of the filesystem") timeout := flag.String("timeout", defaultTimeout, "timeout for Consul requests") uid := flag.Int("uid", os.Getuid(), "set the UID that should own all files") flag.Parse() logger := logrus.New() if *debug { logger.Level = logrus.DebugLevel } consulConfig := &consul.Config{} var mountPoint string switch flag.NArg() { case 1: mountPoint = flag.Arg(0) case 2: consulConfig.Address = flag.Arg(0) mountPoint = flag.Arg(1) default: flag.Usage() } // Initialize a Consul client. TODO: connection parameters client, err := consul.NewClient(consulConfig) if err != nil { logrus.NewEntry(logger).WithError(err).Error("could not initialize consul") os.Exit(1) } // Configure some mount options timeoutDuration, err := time.ParseDuration(*timeout) if err != nil { logrus.NewEntry(logger).WithError(err).Fatal("invalid -timeout value") } mountOptions := []fuse.MountOption{ fuse.DefaultPermissions(), fuse.DaemonTimeout(fmt.Sprint(int64(timeoutDuration.Seconds() + 1))), fuse.NoAppleDouble(), fuse.NoAppleXattr(), } if *allowOther { mountOptions = append(mountOptions, fuse.AllowOther()) } if *allowRoot { mountOptions = append(mountOptions, fuse.AllowRoot()) } if *ro { mountOptions = append(mountOptions, fuse.ReadOnly()) } // Mount the file system to start receiving FS events at the mount point. logger.WithField("location", mountPoint).Info("mounting kvfs") conn, err := fuse.Mount(mountPoint, mountOptions...) if err != nil { logrus.NewEntry(logger).WithError(err).Fatal("error mounting kvfs") } defer conn.Close() // Try to cleanly unmount the FS if SIGINT or SIGTERM is received sigs := make(chan os.Signal, 10) signal.Notify(sigs, os.Interrupt, syscall.SIGTERM) go func() { for sig := range sigs { logger.WithField("signal", sig).Info("attempting to unmount") err := fuse.Unmount(mountPoint) if err != nil { logrus.NewEntry(logger).WithError(err).Error("cannot unmount") } } }() // Create a file system object and start handing its requests server := fs.New(conn, &fs.Config{ Debug: func(m interface{}) { logger.Debug(m) }, WithContext: func(ctx context.Context, req fuse.Request) context.Context { // The returned cancel function doesn't matter: the request handler will // cancel the parent context at the end of the request. newCtx, _ := context.WithTimeout(ctx, timeoutDuration) return newCtx }, }) f := &consulfs.ConsulFS{ Consul: &consulfs.CancelConsulKV{ Client: client, Logger: logger, }, Logger: logger, UID: uint32(*uid), GID: uint32(*gid), Perms: os.FileMode(*perm), RootPath: *root, } err = server.Serve(f) if err != nil { // Not sure what would cause Serve() to exit with an error logrus.NewEntry(logger).WithError(err).Error("error serving filesystem") } // Wait for the FUSE connection to end <-conn.Ready if conn.MountError != nil { logrus.NewEntry(logger).WithError(conn.MountError).Error("unmount error") os.Exit(1) } else { logger.Info("file system exiting normally") } }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) runtime.SetBlockProfileRate(1) flag.Usage = Usage flag.Parse() if flag.NArg() != 1 { Usage() os.Exit(2) } mountpoint := flag.Arg(0) if *debugGdrive { debug = true } userCurrent, err := user.Current() if err != nil { log.Fatalf("unable to get UID/GID of current user: %v", err) } uidInt, err := strconv.Atoi(userCurrent.Uid) if err != nil { log.Fatalf("unable to get UID/GID of current user: %v", err) } uid := uint32(uidInt) gidInt, err := strconv.Atoi(userCurrent.Gid) if err != nil { log.Fatalf("unable to get UID/GID of current user: %v", err) } gid := uint32(gidInt) if err = sanityCheck(mountpoint); err != nil { log.Fatalf("sanityCheck failed: %s\n", err) } http.HandleFunc("/", RootHandler) go http.ListenAndServe(fmt.Sprintf("localhost:%s", *port), nil) var client *http.Client if *readOnly { client = getOAuthClient(drive.DriveReadonlyScope) } else { client = getOAuthClient(drive.DriveScope) } driveCache := cache.NewCache("/tmp", client) // TODO: move into drivedb, so we don't create a service twice service, _ := drive.New(client) about, err := service.About.Get().Do() if err != nil { log.Fatalf("drive.service.About.Get().Do: %v\n", err) } // fileId of the root of the FS (aka "My Drive") rootId := about.RootFolderId // email address of the mounted google drive account account := about.User.EmailAddress // Ensure the token's always fresh // TODO: Remove this once goauth2 changes are accepted upstream // https://code.google.com/p/goauth2/issues/detail?id=47 go tokenKicker(client, 59*time.Minute) // Create and start the drive metadata syncer. db, err := drive_db.NewDriveDB(client, *dbDir, *cacheDir, *driveMetadataLatency, rootId) if err != nil { log.Fatalf("could not open leveldb: %v", err) } defer db.Close() db.WaitUntilSynced() log.Printf("synced!") options := []fuse.MountOption{ fuse.FSName("GoogleDrive"), fuse.Subtype("gdrive"), fuse.VolumeName(account), } if *allowOther { options = append(options, fuse.AllowOther()) } if *readOnly { options = append(options, fuse.ReadOnly()) } c, err := fuse.Mount(mountpoint, options...) if err != nil { log.Fatal(err) } defer c.Close() // Trap control-c (sig INT) and unmount sig := make(chan os.Signal, 1) signal.Notify(sig, os.Interrupt) go func() { for _ = range sig { if err := fuse.Unmount(mountpoint); err != nil { log.Printf("fuse.Unmount failed: %v", err) } } }() sc := serveConn{db: db, driveCache: driveCache, service: service, uid: uid, gid: gid, writers: make(map[int]io.PipeWriter), conn: c, } err = sc.Serve() if err != nil { log.Fatalln("fuse server failed: ", err) } // check if the mount process has an error to report <-c.Ready if err := c.MountError; err != nil { log.Fatal(err) } }
func run() error { flag.Usage = usage flag.Parse() if *debug { fuse.Debug = func(v interface{}) { log.Println("[fuse]", v) } } var subdir, mountpoint string switch flag.NArg() { case 1: subdir = "/" mountpoint = flag.Arg(0) case 2: subdir = path.Join("/", flag.Arg(0)) mountpoint = flag.Arg(1) default: usage() os.Exit(1) } var endpoints []string if ep := os.Getenv("ETCD_ENDPOINTS"); ep != "" { endpoints = strings.Split(ep, ",") } else { endpoints = []string{"localhost:4001"} } log.Printf("Using endpoints %v", endpoints) cfg := client.Config{ Endpoints: endpoints, } etcd, err := client.New(cfg) if err != nil { return err } var mountOpts []fuse.MountOption if *allowOther { mountOpts = append(mountOpts, fuse.AllowOther()) } if *allowRoot { mountOpts = append(mountOpts, fuse.AllowRoot()) } mountOpts = append(mountOpts, fuse.DefaultPermissions()) mountOpts = append(mountOpts, fuse.FSName("etcd:"+subdir)) mountOpts = append(mountOpts, fuse.ReadOnly()) mountOpts = append(mountOpts, fuse.Subtype("etcdFS")) log.Printf("Mounting etcd:%s to %s", subdir, mountpoint) c, err := fuse.Mount( mountpoint, mountOpts..., ) if err != nil { return err } defer c.Close() srv := fs.New(c, nil) filesys := &etcdFS{ etcd: client.NewKeysAPI(etcd), base: subdir, } errch := make(chan error) log.Printf("Start serving") go func() { errch <- srv.Serve(filesys) }() <-c.Ready if c.MountError != nil { return c.MountError } sigs := make(chan os.Signal) signal.Notify(sigs, syscall.SIGHUP, syscall.SIGINT, syscall.SIGKILL, syscall.SIGTERM) select { case err := <-errch: return err case s := <-sigs: log.Printf("Caught signal: %v", s) err := c.Close() log.Printf("Error: %v", err) return err } }
driver, err := continuity.NewSystemDriver() if err != nil { logrus.Fatal(err) } provider := continuityfs.NewFSFileContentProvider(source, driver) contfs, err := continuityfs.NewFSFromManifest(m, mountpoint, provider) if err != nil { logrus.Fatal(err) } c, err := fuse.Mount( mountpoint, fuse.ReadOnly(), fuse.FSName(manifestName), fuse.Subtype("continuity"), // OSX Only options fuse.LocalVolume(), fuse.VolumeName("Continuity FileSystem"), ) if err != nil { logrus.Fatal(err) } <-c.Ready if err := c.MountError; err != nil { c.Close() logrus.Fatal(err) }