func setup(t *testing.T) (workdir string, cleanup func()) { wd, _ := ioutil.TempDir("", "") err := os.Mkdir(wd+"/mnt", 0700) fuse.CheckSuccess(err) err = os.Mkdir(wd+"/store", 0700) fuse.CheckSuccess(err) os.Mkdir(wd+"/ro", 0700) fuse.CheckSuccess(err) WriteFile(wd+"/ro/file1", "file1") WriteFile(wd+"/ro/file2", "file2") fs := NewAutoUnionFs(wd+"/store", testAOpts) nfs := fuse.NewPathNodeFs(fs, nil) state, conn, err := fuse.MountNodeFileSystem(wd+"/mnt", nfs, &testAOpts.FileSystemOptions) CheckSuccess(err) state.Debug = fuse.VerboseTest() conn.Debug = fuse.VerboseTest() go state.Loop() return wd, func() { state.Unmount() os.RemoveAll(wd) } }
func setupFs(fs fuse.FileSystem) (string, func()) { opts := &fuse.FileSystemOptions{ EntryTimeout: 0.0, AttrTimeout: 0.0, NegativeTimeout: 0.0, } mountPoint, _ := ioutil.TempDir("", "stat_test") nfs := fuse.NewPathNodeFs(fs, nil) state, _, err := fuse.MountNodeFileSystem(mountPoint, nfs, opts) if err != nil { panic(fmt.Sprintf("cannot mount %v", err)) // ugh - benchmark has no error methods. } state.SetRecordStatistics(true) // state.Debug = true go state.Loop() return mountPoint, func() { lc, lns := state.Latencies().Get("LOOKUP") gc, gns := state.Latencies().Get("GETATTR") fmt.Printf("GETATTR %dus/call n=%d, LOOKUP %dus/call n=%d\n", gns/int64(1000*lc), gc, lns/int64(1000*lc), lc) err := state.Unmount() if err != nil { log.Println("error during unmount", err) } else { os.RemoveAll(mountPoint) } } }
func main() { version := flag.Bool("version", false, "print version number") debug := flag.Bool("debug", false, "debug on") hardlinks := flag.Bool("hardlinks", false, "support hardlinks") delcache_ttl := flag.Float64("deletion_cache_ttl", 5.0, "Deletion cache TTL in seconds.") branchcache_ttl := flag.Float64("branchcache_ttl", 5.0, "Branch cache TTL in seconds.") deldirname := flag.String( "deletion_dirname", "GOUNIONFS_DELETIONS", "Directory name to use for deletions.") flag.Parse() if *version { fmt.Println(fuse.Version()) os.Exit(0) } if len(flag.Args()) < 2 { fmt.Println("Usage:\n main MOUNTPOINT BASEDIR") os.Exit(2) } ufsOptions := unionfs.UnionFsOptions{ DeletionCacheTTL: time.Duration(*delcache_ttl * float64(time.Second)), BranchCacheTTL: time.Duration(*branchcache_ttl * float64(time.Second)), DeletionDirName: *deldirname, } options := unionfs.AutoUnionFsOptions{ UnionFsOptions: ufsOptions, FileSystemOptions: fuse.FileSystemOptions{ EntryTimeout: time.Second, AttrTimeout: time.Second, NegativeTimeout: time.Second, Owner: fuse.CurrentOwner(), }, UpdateOnMount: true, PathNodeFsOptions: fuse.PathNodeFsOptions{ ClientInodes: *hardlinks, }, } fmt.Printf("AutoUnionFs - Go-FUSE Version %v.\n", fuse.Version()) gofs := unionfs.NewAutoUnionFs(flag.Arg(1), options) pathfs := fuse.NewPathNodeFs(gofs, nil) state, conn, err := fuse.MountNodeFileSystem(flag.Arg(0), pathfs, nil) if err != nil { fmt.Printf("Mount fail: %v\n", err) os.Exit(1) } pathfs.Debug = *debug conn.Debug = *debug state.Debug = *debug gofs.SetMountState(state) state.Loop() }
func newRpcFsTestCase(t *testing.T) (me *rpcFsTestCase) { me = &rpcFsTestCase{tester: t} me.tmp, _ = ioutil.TempDir("", "term-fss") me.mnt = me.tmp + "/mnt" me.orig = me.tmp + "/orig" srvCache := me.tmp + "/server-cache" os.Mkdir(me.mnt, 0700) os.Mkdir(me.orig, 0700) copts := cba.StoreOptions{ Dir: srvCache, } me.serverStore = cba.NewStore(&copts) me.attr = attr.NewAttributeCache( func(n string) *attr.FileAttr { return me.getattr(n) }, func(n string) *fuse.Attr { return StatForTest(t, filepath.Join(me.orig, n)) }) me.attr.Paranoia = true me.server = NewFsServer(me.attr, me.serverStore) var err error me.sockL, me.sockR, err = unixSocketpair() if err != nil { t.Fatal(err) } me.contentL, me.contentR, err = unixSocketpair() if err != nil { t.Fatal(err) } rpcServer := rpc.NewServer() rpcServer.Register(me.server) go rpcServer.ServeConn(me.sockL) go me.serverStore.ServeConn(me.contentL) rpcClient := rpc.NewClient(me.sockR) cOpts := cba.StoreOptions{ Dir: me.tmp + "/client-cache", } me.clientStore = cba.NewStore(&cOpts) me.rpcFs = NewRpcFs(rpcClient, me.clientStore, me.contentR) me.rpcFs.id = "rpcfs_test" nfs := fuse.NewPathNodeFs(me.rpcFs, nil) me.state, _, err = fuse.MountNodeFileSystem(me.mnt, nfs, nil) me.state.Debug = fuse.VerboseTest() if err != nil { t.Fatal("Mount", err) } go me.state.Loop() return me }
func main() { flag.Parse() if len(flag.Args()) < 1 { log.Fatal("Usage:\n hello MOUNTPOINT") } nfs := fuse.NewPathNodeFs(&HelloFs{}, nil) state, _, err := fuse.MountNodeFileSystem(flag.Arg(0), nfs, nil) if err != nil { log.Fatal("Mount fail: %v\n", err) } state.Loop() }
// Creates 3 directories on a temporary dir: /mnt with the overlayed // (unionfs) mount, rw with modifiable data, and ro on the bottom. func setupUfs(t *testing.T) (workdir string, cleanup func()) { // Make sure system setting does not affect test. syscall.Umask(0) wd, _ := ioutil.TempDir("", "unionfs") err := os.Mkdir(wd+"/mnt", 0700) if err != nil { t.Fatalf("Mkdir failed: %v", err) } err = os.Mkdir(wd+"/rw", 0700) if err != nil { t.Fatalf("Mkdir failed: %v", err) } os.Mkdir(wd+"/ro", 0700) if err != nil { t.Fatalf("Mkdir failed: %v", err) } var fses []fuse.FileSystem fses = append(fses, fuse.NewLoopbackFileSystem(wd+"/rw")) fses = append(fses, NewCachingFileSystem(fuse.NewLoopbackFileSystem(wd+"/ro"), 0)) ufs := NewUnionFs(fses, testOpts) // We configure timeouts are smaller, so we can check for // UnionFs's cache consistency. opts := &fuse.FileSystemOptions{ EntryTimeout: entryTtl / 2, AttrTimeout: entryTtl / 2, NegativeTimeout: entryTtl / 2, } pathfs := fuse.NewPathNodeFs(ufs, &fuse.PathNodeFsOptions{ClientInodes: true}) state, conn, err := fuse.MountNodeFileSystem(wd+"/mnt", pathfs, opts) if err != nil { t.Fatalf("MountNodeFileSystem failed: %v", err) } conn.Debug = fuse.VerboseTest() state.Debug = fuse.VerboseTest() go state.Loop() return wd, func() { err := state.Unmount() if err != nil { return } setRecursiveWritable(t, wd, true) os.RemoveAll(wd) } }
func main() { version := flag.Bool("version", false, "print version number") debug := flag.Bool("debug", false, "debug on") threaded := flag.Bool("threaded", true, "threading on") delcache_ttl := flag.Float64("deletion_cache_ttl", 5.0, "Deletion cache TTL in seconds.") branchcache_ttl := flag.Float64("branchcache_ttl", 5.0, "Branch cache TTL in seconds.") deldirname := flag.String( "deletion_dirname", "GOUNIONFS_DELETIONS", "Directory name to use for deletions.") flag.Parse() if *version { fmt.Println(fuse.Version()) os.Exit(0) } if len(flag.Args()) < 2 { fmt.Println("Usage:\n main MOUNTPOINT BASEDIR") os.Exit(2) } ufsOptions := unionfs.UnionFsOptions{ DeletionCacheTTLSecs: *delcache_ttl, BranchCacheTTLSecs: *branchcache_ttl, DeletionDirName: *deldirname, } options := unionfs.AutoUnionFsOptions{ UnionFsOptions: ufsOptions, FileSystemOptions: fuse.FileSystemOptions{ EntryTimeout: 1.0, AttrTimeout: 1.0, NegativeTimeout: 1.0, Owner: fuse.CurrentOwner(), }, UpdateOnMount: true, } gofs := unionfs.NewAutoUnionFs(flag.Arg(1), options) pathfs := fuse.NewPathNodeFs(gofs) state, conn, err := fuse.MountNodeFileSystem(flag.Arg(0), pathfs, nil) if err != nil { fmt.Printf("Mount fail: %v\n", err) os.Exit(1) } pathfs.Debug = *debug conn.Debug = *debug state.Debug = *debug state.Loop(*threaded) }
func setupMzfs() (mountPoint string, cleanup func()) { fs := NewMultiZipFs() mountPoint, _ = ioutil.TempDir("", "") nfs := fuse.NewPathNodeFs(fs, nil) state, _, err := fuse.MountNodeFileSystem(mountPoint, nfs, &fuse.FileSystemOptions{ EntryTimeout: testTtl, AttrTimeout: testTtl, NegativeTimeout: 0.0, }) CheckSuccess(err) state.Debug = fuse.VerboseTest() go state.Loop() return mountPoint, func() { state.Unmount() os.RemoveAll(mountPoint) } }
func main() { debug := flag.Bool("debug", false, "debug on") portable := flag.Bool("portable", false, "use 32 bit inodes") entry_ttl := flag.Float64("entry_ttl", 1.0, "fuse entry cache TTL.") negative_ttl := flag.Float64("negative_ttl", 1.0, "fuse negative entry cache TTL.") delcache_ttl := flag.Float64("deletion_cache_ttl", 5.0, "Deletion cache TTL in seconds.") branchcache_ttl := flag.Float64("branchcache_ttl", 5.0, "Branch cache TTL in seconds.") deldirname := flag.String( "deletion_dirname", "GOUNIONFS_DELETIONS", "Directory name to use for deletions.") flag.Parse() if len(flag.Args()) < 2 { fmt.Println("Usage:\n unionfs MOUNTPOINT RW-DIRECTORY RO-DIRECTORY ...") os.Exit(2) } ufsOptions := unionfs.UnionFsOptions{ DeletionCacheTTL: time.Duration(*delcache_ttl * float64(time.Second)), BranchCacheTTL: time.Duration(*branchcache_ttl * float64(time.Second)), DeletionDirName: *deldirname, } ufs, err := unionfs.NewUnionFsFromRoots(flag.Args()[1:], &ufsOptions, true) if err != nil { log.Fatal("Cannot create UnionFs", err) os.Exit(1) } nodeFs := fuse.NewPathNodeFs(ufs, &fuse.PathNodeFsOptions{ClientInodes: true}) mOpts := fuse.FileSystemOptions{ EntryTimeout: time.Duration(*entry_ttl * float64(time.Second)), AttrTimeout: time.Duration(*entry_ttl * float64(time.Second)), NegativeTimeout: time.Duration(*negative_ttl * float64(time.Second)), PortableInodes: *portable, } mountState, _, err := fuse.MountNodeFileSystem(flag.Arg(0), nodeFs, &mOpts) if err != nil { log.Fatal("Mount fail:", err) } mountState.Debug = *debug mountState.Loop() }
func setupFs(fs fuse.FileSystem, opts *fuse.FileSystemOptions) (string, func()) { mountPoint, _ := ioutil.TempDir("", "stat_test") nfs := fuse.NewPathNodeFs(fs, nil) state, _, err := fuse.MountNodeFileSystem(mountPoint, nfs, opts) if err != nil { panic(fmt.Sprintf("cannot mount %v", err)) // ugh - benchmark has no error methods. } // state.Debug = true go state.Loop() return mountPoint, func() { err := state.Unmount() if err != nil { log.Println("error during unmount", err) } else { os.RemoveAll(mountPoint) } } }
func main() { // Scans the arg list and sets up flags debug := flag.Bool("debug", false, "print debugging messages.") other := flag.Bool("allow-other", false, "mount with -o allowother.") flag.Parse() if flag.NArg() < 2 { // TODO - where to get program name? fmt.Println("usage: main MOUNTPOINT ORIGINAL") os.Exit(2) } var finalFs fuse.FileSystem orig := flag.Arg(1) loopbackfs := fuse.NewLoopbackFileSystem(orig) finalFs = loopbackfs opts := &fuse.FileSystemOptions{ // These options are to be compatible with libfuse defaults, // making benchmarking easier. NegativeTimeout: time.Second, AttrTimeout: time.Second, EntryTimeout: time.Second, } pathFs := fuse.NewPathNodeFs(finalFs, nil) conn := fuse.NewFileSystemConnector(pathFs, opts) state := fuse.NewMountState(conn) state.Debug = *debug mountPoint := flag.Arg(0) fmt.Println("Mounting") mOpts := &fuse.MountOptions{ AllowOther: *other, } err := state.Mount(mountPoint, mOpts) if err != nil { fmt.Printf("Mount fail: %v\n", err) os.Exit(1) } fmt.Println("Mounted!") state.Loop() }
func setupUfs(t *testing.T) (workdir string, cleanup func()) { // Make sure system setting does not affect test. syscall.Umask(0) wd, _ := ioutil.TempDir("", "") err := os.Mkdir(wd+"/mnt", 0700) fuse.CheckSuccess(err) err = os.Mkdir(wd+"/rw", 0700) fuse.CheckSuccess(err) os.Mkdir(wd+"/ro", 0700) fuse.CheckSuccess(err) var fses []fuse.FileSystem fses = append(fses, fuse.NewLoopbackFileSystem(wd+"/rw")) fses = append(fses, NewCachingFileSystem(fuse.NewLoopbackFileSystem(wd+"/ro"), 0)) ufs := NewUnionFs(fses, testOpts) // We configure timeouts are smaller, so we can check for // UnionFs's cache consistency. opts := &fuse.FileSystemOptions{ EntryTimeout: .5 * entryTtl, AttrTimeout: .5 * entryTtl, NegativeTimeout: .5 * entryTtl, } pathfs := fuse.NewPathNodeFs(ufs, &fuse.PathNodeFsOptions{ClientInodes: true}) state, conn, err := fuse.MountNodeFileSystem(wd+"/mnt", pathfs, opts) CheckSuccess(err) conn.Debug = fuse.VerboseTest() state.Debug = fuse.VerboseTest() go state.Loop() return wd, func() { state.Unmount() os.RemoveAll(wd) } }
func main() { // Scans the arg list and sets up flags debug := flag.Bool("debug", false, "debug on") flag.Parse() if flag.NArg() < 1 { _, prog := filepath.Split(os.Args[0]) fmt.Printf("usage: %s MOUNTPOINT\n", prog) os.Exit(2) } fs := zipfs.NewMultiZipFs() nfs := fuse.NewPathNodeFs(fs, nil) state, _, err := fuse.MountNodeFileSystem(flag.Arg(0), nfs, nil) if err != nil { fmt.Printf("Mount fail: %v\n", err) os.Exit(1) } state.Debug = *debug state.Loop() }
func main() { flag.Parse() if len(flag.Args()) != 2 { log.Fatal("Usage:\n fuse mountpoint backup-folder") } var blobStorage blob.BlobStorage var objectStorage object.ObjectStorage var err error storageFile := flag.Arg(1) + "/.objects" if blobStorage, err = blob.NewFileBasedBlobStorage(storageFile); err != nil { log.Fatalf("Error loading object database. Are you running from the correct directory? (%s)\n", storageFile) } objectStorage = object.NewJSONStorage(blobStorage) backupFs := &BackupFs{blobStorage: blobStorage, objectStorage: objectStorage} backupFs.init("HEAD") nfs := fuse.NewPathNodeFs(backupFs, nil) state, _, err := fuse.MountNodeFileSystem(flag.Arg(0), nfs, nil) if err != nil { log.Fatal("Mount fail: %v\n", err) } state.Loop() }
func main() { cachedir := flag.String("cachedir", "/tmp/termite-cache", "content cache") server := flag.String("server", "localhost:1234", "file server") secretFile := flag.String("secret", "/tmp/secret.txt", "file containing password.") flag.Parse() if flag.NArg() < 1 { fmt.Fprintf(os.Stderr, "usage: %s MOUNTPOINT\n", os.Args[0]) os.Exit(2) } secret, err := ioutil.ReadFile(*secretFile) if err != nil { log.Fatal("ReadFile", err) } rpcConn, err := termite.DialTypedConnection(*server, termite.RPC_CHANNEL, secret) if err != nil { log.Fatal("dialing:", err) } cache := termite.NewContentCache(*cachedir) fs := termite.NewRpcFs(rpc.NewClient(rpcConn), cache) conn := fuse.NewFileSystemConnector(fuse.NewPathNodeFs(fs), nil) state := fuse.NewMountState(conn) opts := fuse.MountOptions{} if os.Geteuid() == 0 { opts.AllowOther = true } state.Mount(flag.Arg(0), &opts) if err != nil { fmt.Printf("Mount fail: %v\n", err) os.Exit(1) } state.Debug = true state.Loop(false) }
func MountVfs(databasePath string, mountPath string, allowOther bool) (*FuseVfs, error) { fuseVfs := FuseVfs{} pathNodeFs := fuse.NewPathNodeFs(&fuseVfs, nil) conn := fuse.NewFileSystemConnector(pathNodeFs, nil) state := fuse.NewMountState(conn) mountOptions := fuse.MountOptions{AllowOther: allowOther} err := state.Mount(mountPath, &mountOptions) if err != nil { return nil, fmt.Errorf("could not mount virtual filesystem at '%v': %v", mountPath, err) } store, err := storage.OpenAt(databasePath) if err != nil { return nil, fmt.Errorf("could not open database at '%v': %v", databasePath, err) } fuseVfs.store = store fuseVfs.mountPath = mountPath fuseVfs.state = state return &fuseVfs, nil }
func main() { flag.Usage = func() { fmt.Fprint(os.Stderr, "Usage:\n icasefs [options] MOUNTPOINT ORIGDIR\n") flag.PrintDefaults() } flag.Parse() if flag.NArg() != 2 { flag.Usage() os.Exit(1) } if logFile, err := configureLogging(); err != nil { log.Fatalf("Error configuring logging: %v", err) } else if logFile != nil { defer logFile.Close() } origDir, err := filepath.Abs(flag.Arg(1)) if err != nil { log.Fatalf("Error resolving ORIGDIR: %v", err) } fs := NewFS(origDir, *reportFilename) nfs := fuse.NewPathNodeFs(fs, nil) state, _, err := fuse.MountNodeFileSystem(flag.Arg(0), nfs, nil) if err != nil { log.Fatalf("Mount fail: %v", err) } state.Loop() err = fs.WriteReport() if err != nil { log.Printf("Failed to write matches: %v", err) } }
func (me *AutoUnionFs) createFs(name string, roots []string) fuse.Status { me.lock.Lock() defer me.lock.Unlock() for workspace, root := range me.nameRootMap { if root == roots[0] && workspace != name { log.Printf("Already have a union FS for directory %s in workspace %s", roots[0], workspace) return fuse.EBUSY } } known := me.knownFileSystems[name] if known.UnionFs != nil { log.Println("Already have a workspace:", name) return fuse.EBUSY } ufs, err := NewUnionFsFromRoots(roots, &me.options.UnionFsOptions, true) if err != nil { log.Println("Could not create UnionFs:", err) return fuse.EPERM } log.Printf("Adding workspace %v for roots %v", name, ufs.Name()) nfs := fuse.NewPathNodeFs(ufs) code := me.nodeFs.Mount(name, nfs, &me.options.FileSystemOptions) if code.Ok() { me.knownFileSystems[name] = knownFs{ ufs, nfs, } me.nameRootMap[name] = roots[0] } return code }
func TestUnionFsDisappearing(t *testing.T) { // This init is like setupUfs, but we want access to the // writable Fs. wd, _ := ioutil.TempDir("", "") defer os.RemoveAll(wd) err := os.Mkdir(wd+"/mnt", 0700) fuse.CheckSuccess(err) err = os.Mkdir(wd+"/rw", 0700) fuse.CheckSuccess(err) os.Mkdir(wd+"/ro", 0700) fuse.CheckSuccess(err) wrFs := fuse.NewLoopbackFileSystem(wd + "/rw") var fses []fuse.FileSystem fses = append(fses, wrFs) fses = append(fses, fuse.NewLoopbackFileSystem(wd+"/ro")) ufs := NewUnionFs(fses, testOpts) opts := &fuse.FileSystemOptions{ EntryTimeout: entryTtl, AttrTimeout: entryTtl, NegativeTimeout: entryTtl, } nfs := fuse.NewPathNodeFs(ufs, nil) state, _, err := fuse.MountNodeFileSystem(wd+"/mnt", nfs, opts) CheckSuccess(err) defer state.Unmount() state.Debug = fuse.VerboseTest() go state.Loop() log.Println("TestUnionFsDisappearing2") err = ioutil.WriteFile(wd+"/ro/file", []byte("blabla"), 0644) CheckSuccess(err) freezeRo(wd + "/ro") err = os.Remove(wd + "/mnt/file") CheckSuccess(err) oldRoot := wrFs.Root wrFs.Root = "/dev/null" time.Sleep(1.5 * entryTtl * 1e9) _, err = ioutil.ReadDir(wd + "/mnt") if err == nil { t.Fatal("Readdir should have failed") } log.Println("expected readdir failure:", err) err = ioutil.WriteFile(wd+"/mnt/file2", []byte("blabla"), 0644) if err == nil { t.Fatal("write should have failed") } log.Println("expected write failure:", err) // Restore, and wait for caches to catch up. wrFs.Root = oldRoot time.Sleep(1.5 * entryTtl * 1e9) _, err = ioutil.ReadDir(wd + "/mnt") if err != nil { t.Fatal("Readdir should succeed", err) } err = ioutil.WriteFile(wd+"/mnt/file2", []byte("blabla"), 0644) if err != nil { t.Fatal("write should succeed", err) } }
func main() { // Scans the arg list and sets up flags debug := flag.Bool("debug", false, "print debugging messages.") latencies := flag.Bool("latencies", false, "record latencies.") threaded := flag.Bool("threaded", true, "switch off threading; print debugging messages.") flag.Parse() if flag.NArg() < 2 { // TODO - where to get program name? fmt.Println("usage: main MOUNTPOINT ORIGINAL") os.Exit(2) } var finalFs fuse.FileSystem orig := flag.Arg(1) loopbackfs := fuse.NewLoopbackFileSystem(orig) finalFs = loopbackfs debugFs := fuse.NewFileSystemDebug() if *latencies { timing := fuse.NewTimingFileSystem(finalFs) debugFs.AddTimingFileSystem(timing) finalFs = timing } opts := &fuse.FileSystemOptions{ // These options are to be compatible with libfuse defaults, // making benchmarking easier. NegativeTimeout: 1.0, AttrTimeout: 1.0, EntryTimeout: 1.0, } if *latencies { debugFs.FileSystem = finalFs finalFs = debugFs } conn := fuse.NewFileSystemConnector(fuse.NewPathNodeFs(finalFs), opts) var finalRawFs fuse.RawFileSystem = conn if *latencies { rawTiming := fuse.NewTimingRawFileSystem(conn) debugFs.AddRawTimingFileSystem(rawTiming) finalRawFs = rawTiming } state := fuse.NewMountState(finalRawFs) state.Debug = *debug if *latencies { state.SetRecordStatistics(true) debugFs.AddMountState(state) } mountPoint := flag.Arg(0) fmt.Println("Mounting") err := state.Mount(mountPoint, nil) if err != nil { fmt.Printf("Mount fail: %v\n", err) os.Exit(1) } fmt.Println("Mounted!") state.Loop(*threaded) }
func main() { // Scans the arg list and sets up flags debug := flag.Bool("debug", false, "print debugging messages.") latencies := flag.Bool("latencies", false, "record operation latencies.") profile := flag.String("profile", "", "record cpu profile.") mem_profile := flag.String("mem-profile", "", "record memory profile.") command := flag.String("run", "", "run this command after mounting.") ttl := flag.Float64("ttl", 1.0, "attribute/entry cache TTL.") flag.Parse() if flag.NArg() < 2 { fmt.Fprintf(os.Stderr, "usage: %s MOUNTPOINT FILENAMES-FILE\n", os.Args[0]) os.Exit(2) } var profFile, memProfFile io.Writer var err error if *profile != "" { profFile, err = os.Create(*profile) if err != nil { log.Fatalf("os.Create: %v", err) } } if *mem_profile != "" { memProfFile, err = os.Create(*mem_profile) if err != nil { log.Fatalf("os.Create: %v", err) } } fs := benchmark.NewStatFs() lines := benchmark.ReadLines(flag.Arg(1)) for _, l := range lines { fs.AddFile(l) } nfs := fuse.NewPathNodeFs(fs, nil) opts := &fuse.FileSystemOptions{ AttrTimeout: time.Duration(*ttl * float64(time.Second)), EntryTimeout: time.Duration(*ttl * float64(time.Second)), } state, _, err := fuse.MountNodeFileSystem(flag.Arg(0), nfs, opts) if err != nil { fmt.Printf("Mount fail: %v\n", err) os.Exit(1) } state.SetRecordStatistics(*latencies) state.SetDebug(*debug) runtime.GC() if profFile != nil { pprof.StartCPUProfile(profFile) defer pprof.StopCPUProfile() } if *command != "" { args := strings.Split(*command, " ") cmd := exec.Command(args[0], args[1:]...) cmd.Stdout = os.Stdout cmd.Start() } state.Loop() if memProfFile != nil { pprof.WriteHeapProfile(memProfFile) } }
func newWorkerFuseFs(tmpDir string, rpcFs fuse.FileSystem, writableRoot string, nobody *User) (*workerFuseFs, error) { tmpDir, err := ioutil.TempDir(tmpDir, "termite-task") if err != nil { return nil, err } me := &workerFuseFs{ tmpDir: tmpDir, writableRoot: strings.TrimLeft(writableRoot, "/"), tasks: map[*WorkerTask]bool{}, } type dirInit struct { dst *string val string } tmpBacking := "" for _, v := range []dirInit{ {&me.rwDir, "rw"}, {&me.mount, "mnt"}, {&tmpBacking, "tmp-backing"}, } { *v.dst = filepath.Join(me.tmpDir, v.val) err = os.Mkdir(*v.dst, 0700) if err != nil { return nil, err } } fuseOpts := fuse.MountOptions{} if os.Geteuid() == 0 { fuseOpts.AllowOther = true } me.rpcNodeFs = fuse.NewPathNodeFs(rpcFs, nil) ttl := 30 * time.Second mOpts := fuse.FileSystemOptions{ EntryTimeout: ttl, AttrTimeout: ttl, NegativeTimeout: ttl, // 32-bit programs have trouble with 64-bit inode // numbers. PortableInodes: true, } me.fsConnector = fuse.NewFileSystemConnector(me.rpcNodeFs, &mOpts) me.MountState = fuse.NewMountState(me.fsConnector) err = me.MountState.Mount(me.mount, &fuseOpts) if err != nil { return nil, err } go me.MountState.Loop() me.unionFs = fs.NewMemUnionFs( me.rwDir, &fuse.PrefixFileSystem{rpcFs, me.writableRoot}) me.procFs = fs.NewProcFs() me.procFs.StripPrefix = me.mount if nobody != nil { me.procFs.Uid = nobody.Uid } type submount struct { mountpoint string fs fuse.NodeFileSystem } mounts := []submount{ {"proc", fuse.NewPathNodeFs(me.procFs, nil)}, {"sys", fuse.NewPathNodeFs(&fuse.ReadonlyFileSystem{fuse.NewLoopbackFileSystem("/sys")}, nil)}, {"tmp", fuse.NewMemNodeFs(tmpBacking + "/tmp")}, {"dev", fs.NewDevNullFs()}, {"var/tmp", fuse.NewMemNodeFs(tmpBacking + "/vartmp")}, } for _, s := range mounts { subOpts := &mOpts if s.mountpoint == "proc" { subOpts = nil } code := me.rpcNodeFs.Mount(s.mountpoint, s.fs, subOpts) if !code.Ok() { if err := me.MountState.Unmount(); err != nil { log.Fatal("FUSE unmount error during cleanup:", err) } return nil, errors.New(fmt.Sprintf("submount error for %s: %v", s.mountpoint, code)) } } if strings.HasPrefix(me.writableRoot, "tmp/") { parent, _ := filepath.Split(me.writableRoot) err := os.MkdirAll(filepath.Join(me.mount, parent), 0755) if err != nil { if err := me.MountState.Unmount(); err != nil { log.Fatal("FUSE unmount error during cleanup:", err) } return nil, errors.New(fmt.Sprintf("Mkdir of %q in /tmp fail: %v", parent, err)) } // This is hackish, but we don't want rpcfs/fsserver // getting confused by asking for tmp/foo/bar // directly. rpcFs.GetAttr("tmp", nil) rpcFs.GetAttr(me.writableRoot, nil) } code := me.rpcNodeFs.Mount(me.writableRoot, me.unionFs, &mOpts) if !code.Ok() { if err := me.MountState.Unmount(); err != nil { log.Fatal("FUSE unmount error during cleanup:", err) } return nil, errors.New(fmt.Sprintf("submount error for %s: %v", me.writableRoot, code)) } return me, nil }