Ejemplo n.º 1
0
func setUpBucket(
	ctx context.Context,
	flags *flagStorage,
	conn gcs.Conn,
	name string) (b gcs.Bucket, err error) {
	// Extract the appropriate bucket.
	b, err = conn.OpenBucket(ctx, name)
	if err != nil {
		err = fmt.Errorf("OpenBucket: %v", err)
		return
	}

	// Enable rate limiting, if requested.
	b, err = setUpRateLimiting(
		b,
		flags.OpRateLimitHz,
		flags.EgressBandwidthLimitBytesPerSecond)

	if err != nil {
		err = fmt.Errorf("setUpRateLimiting: %v", err)
		return
	}

	// Enable cached StatObject results, if appropriate.
	if flags.StatCacheTTL != 0 {
		const cacheCapacity = 4096
		b = gcscaching.NewFastStatBucket(
			flags.StatCacheTTL,
			gcscaching.NewStatCache(cacheCapacity),
			timeutil.RealClock(),
			b)
	}

	return
}
Ejemplo n.º 2
0
// Create a fake bucket with canned contents as described in the comments for
// FakeBucketName.
func MakeFakeBucket(ctx context.Context) (b gcs.Bucket) {
	b = gcsfake.NewFakeBucket(timeutil.RealClock(), FakeBucketName)

	// Set up contents.
	contents := map[string]string{
		TopLevelFile:    TopLevelFile_Contents,
		TopLevelDir:     TopLevelDir_Contents,
		ImplicitDirFile: ImplicitDirFile_Contents,
	}

	for k, v := range contents {
		_, err := b.CreateObject(
			ctx,
			&gcs.CreateObjectRequest{
				Name:     k,
				Contents: strings.NewReader(v),
			})

		if err != nil {
			log.Panicf("CreateObject: %v", err)
		}
	}

	return
}
Ejemplo n.º 3
0
// Read all blobs necessary for verifying the directory structure rooted at a
// set of backup root scores, ensuring that the entire directory structure is
// intact in GCS.
//
// Optionally, all file content is also read and verified. This is less
// important than verifying directory connectedness if we trust that GCS does
// not corrupt object metadata (where we store expected CRC32C and MD5) and
// does correctly report the object's CRC32C and MD5 sums in listings,
// verifying them periodically.
//
// If work is to be preserved across runs, knownStructure should be filled in
// with parenthood information from previously-generated records (for both
// files and directories). Nodes that exist as keys in this map will not be
// re-verified, except to confirm that their content still exists in allScores.
//
// It is expected that the blob store's Load method does score verification for
// us.
func Verify(
	ctx context.Context,
	readFiles bool,
	rootScores []blob.Score,
	allScores []blob.Score,
	knownStructure map[Node][]Node,
	records chan<- Record,
	blobStore blob.Store) (err error) {
	clock := timeutil.RealClock()

	// Set up a dependency resolver that reads directory listings. It also takes
	// care of confirming that all scores (for files and directories) exist.
	dr := newDependencyResolver(
		allScores,
		knownStructure,
		records,
		blobStore,
		clock)

	// Do we need to do anything for file nodes?
	var visitor dag.Visitor
	if readFiles {
		visitor = newVisitor(records, blobStore, clock)
	} else {
		visitor = &doNothingVisitor{}
	}

	// Traverse the graph.
	var rootNodes []dag.Node
	for _, s := range rootScores {
		n := Node{
			Score: s,
			Dir:   true,
		}

		rootNodes = append(rootNodes, n)
	}

	const resolverParallelism = 128
	const visitorParallelism = 128

	err = dag.Visit(
		ctx,
		rootNodes,
		dr,
		visitor,
		resolverParallelism,
		visitorParallelism)

	if err != nil {
		err = fmt.Errorf("dag.Visit: %v", err)
		return
	}

	return
}
Ejemplo n.º 4
0
func (t *PrefixBucketTest) SetUp(ti *TestInfo) {
	var err error

	t.ctx = ti.Ctx
	t.prefix = "foo_"
	t.wrapped = gcsfake.NewFakeBucket(timeutil.RealClock(), "some_bucket")

	t.bucket, err = gcsx.NewPrefixBucket(t.prefix, t.wrapped)
	AssertEq(nil, err)
}
Ejemplo n.º 5
0
func init() {
	makeDeps := func(ctx context.Context) (deps gcstesting.BucketTestDeps) {
		var err error

		// Set up the token source.
		const scope = gcs.Scope_FullControl
		tokenSrc, err := google.DefaultTokenSource(context.Background(), scope)
		AssertEq(nil, err)

		// Use that to create a GCS connection, enabling retry if requested.
		cfg := &gcs.ConnConfig{
			TokenSource: tokenSrc,
		}

		if *fUseRetry {
			cfg.MaxBackoffSleep = 5 * time.Minute
			deps.BuffersEntireContentsForCreate = true
		}

		if *fDebugGCS {
			cfg.GCSDebugLogger = log.New(os.Stderr, "gcs: ", 0)
		}

		if *fDebugHTTP {
			cfg.HTTPDebugLogger = log.New(os.Stderr, "http: ", 0)
		}

		conn, err := gcs.NewConn(cfg)
		AssertEq(nil, err)

		// Open the bucket.
		deps.Bucket, err = conn.OpenBucket(ctx, *fBucket)
		AssertEq(nil, err)

		// Clear the bucket.
		err = gcsutil.DeleteAllObjects(ctx, deps.Bucket)
		if err != nil {
			panic("DeleteAllObjects: " + err.Error())
		}

		// Set up other information.
		deps.Clock = timeutil.RealClock()
		deps.SupportsCancellation = true

		return
	}

	gcstesting.RegisterBucketTests(makeDeps)
}
Ejemplo n.º 6
0
// Configure a bucket based on the supplied flags.
//
// Special case: if the bucket name is canned.FakeBucketName, set up a fake
// bucket as described in that package.
func setUpBucket(
	ctx context.Context,
	flags *flagStorage,
	conn gcs.Conn,
	name string) (b gcs.Bucket, err error) {
	// Set up the appropriate backing bucket.
	if name == canned.FakeBucketName {
		b = canned.MakeFakeBucket(ctx)
	} else {
		b, err = conn.OpenBucket(ctx, name)
		if err != nil {
			err = fmt.Errorf("OpenBucket: %v", err)
			return
		}
	}

	// Limit to a requested prefix of the bucket, if any.
	if flags.OnlyDir != "" {
		b, err = gcsx.NewPrefixBucket(path.Clean(flags.OnlyDir)+"/", b)
		if err != nil {
			err = fmt.Errorf("NewPrefixBucket: %v", err)
			return
		}
	}

	// Enable rate limiting, if requested.
	b, err = setUpRateLimiting(
		b,
		flags.OpRateLimitHz,
		flags.EgressBandwidthLimitBytesPerSecond)

	if err != nil {
		err = fmt.Errorf("setUpRateLimiting: %v", err)
		return
	}

	// Enable cached StatObject results, if appropriate.
	if flags.StatCacheTTL != 0 {
		const cacheCapacity = 4096
		b = gcscaching.NewFastStatBucket(
			flags.StatCacheTTL,
			gcscaching.NewStatCache(cacheCapacity),
			timeutil.RealClock(),
			b)
	}

	return
}
Ejemplo n.º 7
0
func (t *fsTest) SetUp(ti *TestInfo) {
	var err error
	t.ctx = ti.Ctx

	// Set up the clocks.
	t.mtimeClock = timeutil.RealClock()
	t.cacheClock.SetTime(time.Date(2015, 4, 5, 2, 15, 0, 0, time.Local))
	t.serverCfg.CacheClock = &t.cacheClock

	// And the bucket.
	if t.bucket == nil {
		t.bucket = gcsfake.NewFakeBucket(t.mtimeClock, "some_bucket")
	}

	t.serverCfg.Bucket = t.bucket

	// Set up ownership.
	t.serverCfg.Uid, t.serverCfg.Gid, err = perms.MyUserAndGroup()
	AssertEq(nil, err)

	// Set up permissions.
	t.serverCfg.FilePerms = filePerms
	t.serverCfg.DirPerms = dirPerms

	// Set up the append optimization.
	t.serverCfg.AppendThreshold = 0
	t.serverCfg.TmpObjectPrefix = ".gcsfuse_tmp/"

	// Set up a temporary directory for mounting.
	t.Dir, err = ioutil.TempDir("", "fs_test")
	AssertEq(nil, err)

	// Create a file system server.
	server, err := fs.NewServer(&t.serverCfg)
	AssertEq(nil, err)

	// Mount the file system.
	mountCfg := t.mountCfg
	mountCfg.OpContext = t.ctx

	if *fDebug {
		mountCfg.DebugLogger = log.New(os.Stderr, "fuse: ", 0)
	}

	t.mfs, err = fuse.Mount(t.Dir, server, &mountCfg)
	AssertEq(nil, err)
}
Ejemplo n.º 8
0
func (t *cachingTestCommon) SetUp(ti *TestInfo) {
	// Wrap the bucket in a stat caching layer for the purposes of the file
	// system.
	t.uncachedBucket = gcsfake.NewFakeBucket(timeutil.RealClock(), "some_bucket")

	const statCacheCapacity = 1000
	statCache := gcscaching.NewStatCache(statCacheCapacity)
	t.bucket = gcscaching.NewFastStatBucket(
		ttl,
		statCache,
		&t.cacheClock,
		t.uncachedBucket)

	// Enable directory type caching.
	t.serverCfg.DirTypeCacheTTL = ttl

	// Call through.
	t.fsTest.SetUp(ti)
}
Ejemplo n.º 9
0
func newFakeBlobStore(ctx context.Context) (blobStore blob.Store, err error) {
	// Create a bucket.
	bucket := gcsfake.NewFakeBucket(timeutil.RealClock(), "some_bucket")

	// And a cryptoer.
	_, crypter, err := wiring.MakeRegistryAndCrypter(ctx, "password", bucket)
	if err != nil {
		err = fmt.Errorf("MakeRegistryAndCrypter: %v", err)
		return
	}

	// And the blob store.
	blobStore, err = wiring.MakeBlobStore(bucket, crypter, util.NewStringSet())
	if err != nil {
		err = fmt.Errorf("MakeBlobStore: %v", err)
		return
	}

	return
}
Ejemplo n.º 10
0
func runSave(ctx context.Context, args []string) (err error) {
	cfg := getConfig()

	// Extract arguments.
	if len(args) != 1 {
		err = fmt.Errorf("Usage: %s save job_name", os.Args[0])
		return
	}

	jobName := args[0]

	// Look for the specified job.
	job, ok := cfg.Jobs[jobName]
	if !ok {
		err = fmt.Errorf("Unknown job: %q", jobName)
		return
	}

	// Resolve any symlinks in the job's base path. This saves us from a race in
	// the particular case of copying from a Time Machine volume, where the
	// 'Latest' symlink may be updated while we work.
	job.BasePath, err = filepath.EvalSymlinks(job.BasePath)
	if err != nil {
		err = fmt.Errorf("EvalSymlinks(%q): %v", job.BasePath, err)
		return
	}

	log.Printf("Using base path: %s", job.BasePath)

	// Special case: visit the file system only if --list_only is set.
	if *fListOnly {
		err = doList(ctx, job)
		if err != nil {
			err = fmt.Errorf("doList: %v", err)
			return
		}

		return
	}

	// Grab dependencies. Make sure to get the registry first, because otherwise
	// the user will have to wait for bucket keys to be listed before being
	// prompted for a crypto password.
	//
	// Make sure to do this before setting up state saving below, because these
	// calls may modify the state struct.
	reg := getRegistry(ctx)
	blobStore := getBlobStore(ctx)
	state := getState(ctx)
	clock := timeutil.RealClock()

	// Periodically save state.
	const saveStatePeriod = 15 * time.Second
	saveStateTicker := time.NewTicker(saveStatePeriod)
	go saveStatePeriodically(ctx, saveStateTicker.C)

	// Choose a start time for the job.
	startTime := clock.Now()

	// Call the saving pipeline.
	score, err := save.Save(
		ctx,
		job.BasePath,
		job.Excludes,
		state.ScoresForFiles,
		blobStore,
		log.New(os.Stderr, "Save progress: ", 0),
		clock)

	if err != nil {
		err = fmt.Errorf("save.Save: %v", err)
		return
	}

	// Register the successful backup.
	completedJob := registry.CompletedJob{
		StartTime: startTime,
		Name:      jobName,
		Score:     score,
	}

	err = reg.RecordBackup(ctx, completedJob)
	if err != nil {
		err = fmt.Errorf("RecordBackup: %v", err)
		return
	}

	log.Printf(
		"Successfully backed up with score %v. Start time: %v\n",
		score.Hex(),
		startTime.UTC())

	// Store state for next time.
	saveStateTicker.Stop()
	log.Println("Writing out final state file...")
	saveState(ctx)

	return
}
Ejemplo n.º 11
0
// Mount the file system based on the supplied arguments, returning a
// fuse.MountedFileSystem that can be joined to wait for unmounting.
//
// In main, set flagSet to flag.CommandLine and pass in os.Args[1:]. In a test,
// pass in a virgin flag set and test arguments.
//
// Promises to pass on flag.ErrHelp from FlagSet.Parse.
func mount(
	ctx context.Context,
	args []string,
	flagSet *flag.FlagSet,
	conn gcs.Conn) (mfs *fuse.MountedFileSystem, err error) {
	// Populate and parse flags.
	flags := populateFlagSet(flagSet)

	err = flagSet.Parse(args)
	if err != nil {
		// Special case: don't mangle ErrHelp.
		if err != flag.ErrHelp {
			err = fmt.Errorf("Parsing flags: %v", err)
		}

		return
	}

	// Extract positional arguments.
	if flagSet.NArg() != 2 {
		flagSet.Usage()
		err = errors.New("Incorrect usage")
		return
	}

	bucketName := flagSet.Arg(0)
	mountPoint := flagSet.Arg(1)

	// Sanity check: make sure the temporary directory exists and is writable
	// currently. This gives a better user experience than harder to debug EIO
	// errors when reading files in the future.
	if flags.TempDir != "" {
		var f *os.File
		f, err = fsutil.AnonymousFile(flags.TempDir)
		f.Close()

		if err != nil {
			err = fmt.Errorf(
				"Error writing to temporary directory (%q); are you sure it exists "+
					"with the correct permissions?",
				err.Error())
			return
		}
	}

	// The file leaser used by the file system sizes its limit on number of
	// temporary files based on the process's rlimit. If this is too low, we'll
	// throw away cached content unnecessarily often. This is particularly a
	// problem on OS X, which has a crazy low default limit (256 as of OS X
	// 10.10.3). So print a warning if the limit is low.
	var rlimit unix.Rlimit
	if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlimit); err == nil {
		const reasonableLimit = 4096

		if rlimit.Cur < reasonableLimit {
			log.Printf(
				"Warning: low file rlimit of %d will cause cached content to be "+
					"frequently evicted. Consider raising with `ulimit -n`.",
				rlimit.Cur)
		}
	}

	// Choose UID and GID.
	uid, gid, err := perms.MyUserAndGroup()
	if err != nil {
		err = fmt.Errorf("MyUserAndGroup: %v", err)
		return
	}

	if flags.Uid >= 0 {
		uid = uint32(flags.Uid)
	}

	if flags.Gid >= 0 {
		gid = uint32(flags.Gid)
	}

	// Set up the bucket.
	bucket, err := setUpBucket(
		ctx,
		flags,
		conn,
		bucketName)

	if err != nil {
		err = fmt.Errorf("setUpBucket: %v", err)
		return
	}

	// Create a file system server.
	serverCfg := &fs.ServerConfig{
		Clock:                timeutil.RealClock(),
		Bucket:               bucket,
		TempDir:              flags.TempDir,
		TempDirLimitNumFiles: fs.ChooseTempDirLimitNumFiles(),
		TempDirLimitBytes:    flags.TempDirLimit,
		GCSChunkSize:         flags.GCSChunkSize,
		ImplicitDirectories:  flags.ImplicitDirs,
		DirTypeCacheTTL:      flags.TypeCacheTTL,
		Uid:                  uid,
		Gid:                  gid,
		FilePerms:            os.FileMode(flags.FileMode),
		DirPerms:             os.FileMode(flags.DirMode),

		AppendThreshold: 1 << 21, // 2 MiB, a total guess.
		TmpObjectPrefix: ".gcsfuse_tmp/",
	}

	server, err := fs.NewServer(serverCfg)
	if err != nil {
		err = fmt.Errorf("fs.NewServer: %v", err)
		return
	}

	// Mount the file system.
	mountCfg := &fuse.MountConfig{
		FSName:      bucket.Name(),
		Options:     flags.MountOptions,
		ErrorLogger: log.New(os.Stderr, "fuse: ", log.Flags()),
	}

	mfs, err = fuse.Mount(mountPoint, server, mountCfg)
	if err != nil {
		err = fmt.Errorf("Mount: %v", err)
		return
	}

	return
}
Ejemplo n.º 12
0
// Mount the file system based on the supplied arguments, returning a
// fuse.MountedFileSystem that can be joined to wait for unmounting.
func mount(
	ctx context.Context,
	bucketName string,
	mountPoint string,
	flags *flagStorage,
	conn gcs.Conn) (mfs *fuse.MountedFileSystem, err error) {
	// Sanity check: make sure the temporary directory exists and is writable
	// currently. This gives a better user experience than harder to debug EIO
	// errors when reading files in the future.
	if flags.TempDir != "" {
		var f *os.File
		f, err = fsutil.AnonymousFile(flags.TempDir)
		f.Close()

		if err != nil {
			err = fmt.Errorf(
				"Error writing to temporary directory (%q); are you sure it exists "+
					"with the correct permissions?",
				err.Error())
			return
		}
	}

	// Choose UID and GID.
	uid, gid, err := perms.MyUserAndGroup()
	if err != nil {
		err = fmt.Errorf("MyUserAndGroup: %v", err)
		return
	}

	if flags.Uid >= 0 {
		uid = uint32(flags.Uid)
	}

	if flags.Gid >= 0 {
		gid = uint32(flags.Gid)
	}

	// Set up the bucket.
	bucket, err := setUpBucket(
		ctx,
		flags,
		conn,
		bucketName)

	if err != nil {
		err = fmt.Errorf("setUpBucket: %v", err)
		return
	}

	// Create a file system server.
	serverCfg := &fs.ServerConfig{
		CacheClock:             timeutil.RealClock(),
		Bucket:                 bucket,
		TempDir:                flags.TempDir,
		ImplicitDirectories:    flags.ImplicitDirs,
		InodeAttributeCacheTTL: flags.StatCacheTTL,
		DirTypeCacheTTL:        flags.TypeCacheTTL,
		Uid:                    uid,
		Gid:                    gid,
		FilePerms:              os.FileMode(flags.FileMode),
		DirPerms:               os.FileMode(flags.DirMode),

		AppendThreshold: 1 << 21, // 2 MiB, a total guess.
		TmpObjectPrefix: ".gcsfuse_tmp/",
	}

	server, err := fs.NewServer(serverCfg)
	if err != nil {
		err = fmt.Errorf("fs.NewServer: %v", err)
		return
	}

	// Mount the file system.
	mountCfg := &fuse.MountConfig{
		FSName:      bucket.Name(),
		Options:     flags.MountOptions,
		ErrorLogger: log.New(os.Stderr, "fuse: ", log.Flags()),
	}

	if flags.DebugFuse {
		mountCfg.DebugLogger = log.New(os.Stderr, "fuse_debug: ", 0)
	}

	mfs, err = fuse.Mount(mountPoint, server, mountCfg)
	if err != nil {
		err = fmt.Errorf("Mount: %v", err)
		return
	}

	return
}
Ejemplo n.º 13
0
Archivo: fs.go Proyecto: kahing/gcsfuse
// Create a fuse file system server according to the supplied configuration.
func NewServer(cfg *ServerConfig) (server fuse.Server, err error) {
	// Check permissions bits.
	if cfg.FilePerms&^os.ModePerm != 0 {
		err = fmt.Errorf("Illegal file perms: %v", cfg.FilePerms)
		return
	}

	if cfg.DirPerms&^os.ModePerm != 0 {
		err = fmt.Errorf("Illegal dir perms: %v", cfg.FilePerms)
		return
	}

	// Create the object syncer.
	if cfg.TmpObjectPrefix == "" {
		err = errors.New("You must set TmpObjectPrefix.")
		return
	}

	syncer := gcsx.NewSyncer(
		cfg.AppendThreshold,
		cfg.TmpObjectPrefix,
		cfg.Bucket)

	// Set up the basic struct.
	fs := &fileSystem{
		mtimeClock:             timeutil.RealClock(),
		cacheClock:             cfg.CacheClock,
		bucket:                 cfg.Bucket,
		syncer:                 syncer,
		tempDir:                cfg.TempDir,
		implicitDirs:           cfg.ImplicitDirectories,
		inodeAttributeCacheTTL: cfg.InodeAttributeCacheTTL,
		dirTypeCacheTTL:        cfg.DirTypeCacheTTL,
		uid:                    cfg.Uid,
		gid:                    cfg.Gid,
		fileMode:               cfg.FilePerms,
		dirMode:                cfg.DirPerms | os.ModeDir,
		inodes:                 make(map[fuseops.InodeID]inode.Inode),
		nextInodeID:            fuseops.RootInodeID + 1,
		generationBackedInodes: make(map[string]inode.GenerationBackedInode),
		implicitDirInodes:      make(map[string]inode.DirInode),
		handles:                make(map[fuseops.HandleID]interface{}),
	}

	// Set up the root inode.
	root := inode.NewDirInode(
		fuseops.RootInodeID,
		"", // name
		fuseops.InodeAttributes{
			Uid:  fs.uid,
			Gid:  fs.gid,
			Mode: fs.dirMode,
		},
		fs.implicitDirs,
		fs.dirTypeCacheTTL,
		cfg.Bucket,
		fs.mtimeClock,
		fs.cacheClock)

	root.Lock()
	root.IncrementLookupCount()
	fs.inodes[fuseops.RootInodeID] = root
	fs.implicitDirInodes[root.Name()] = root
	root.Unlock()

	// Set up invariant checking.
	fs.mu = syncutil.NewInvariantMutex(fs.checkInvariants)

	// Periodically garbage collect temporary objects.
	var gcCtx context.Context
	gcCtx, fs.stopGarbageCollecting = context.WithCancel(context.Background())
	go garbageCollect(gcCtx, cfg.TmpObjectPrefix, fs.bucket)

	server = fuseutil.NewFileSystemServer(fs)
	return
}
Ejemplo n.º 14
0
Archivo: mount.go Proyecto: zfo/gcsfuse
// Mount the file system based on the supplied arguments, returning a
// fuse.MountedFileSystem that can be joined to wait for unmounting.
func mountWithConn(
	ctx context.Context,
	bucketName string,
	mountPoint string,
	flags *flagStorage,
	conn gcs.Conn,
	status *log.Logger) (mfs *fuse.MountedFileSystem, err error) {
	// Sanity check: make sure the temporary directory exists and is writable
	// currently. This gives a better user experience than harder to debug EIO
	// errors when reading files in the future.
	if flags.TempDir != "" {
		var f *os.File
		f, err = fsutil.AnonymousFile(flags.TempDir)
		f.Close()

		if err != nil {
			err = fmt.Errorf(
				"Error writing to temporary directory (%q); are you sure it exists "+
					"with the correct permissions?",
				err.Error())
			return
		}
	}

	// Find the current process's UID and GID. If it was invoked as root and the
	// user hasn't explicitly overridden --uid, everything is going to be owned
	// by root. This is probably not what the user wants, so print a warning.
	uid, gid, err := perms.MyUserAndGroup()
	if err != nil {
		err = fmt.Errorf("MyUserAndGroup: %v", err)
		return
	}

	if uid == 0 && flags.Uid < 0 {
		fmt.Fprintln(os.Stderr, `
WARNING: gcsfuse invoked as root. This will cause all files to be owned by
root. If this is not what you intended, invoke gcsfuse as the user that will
be interacting with the file system.
`)
	}

	// Choose UID and GID.
	if flags.Uid >= 0 {
		uid = uint32(flags.Uid)
	}

	if flags.Gid >= 0 {
		gid = uint32(flags.Gid)
	}

	// Set up the bucket.
	status.Println("Opening bucket...")

	bucket, err := setUpBucket(
		ctx,
		flags,
		conn,
		bucketName)

	if err != nil {
		err = fmt.Errorf("setUpBucket: %v", err)
		return
	}

	// Create a file system server.
	serverCfg := &fs.ServerConfig{
		CacheClock:             timeutil.RealClock(),
		Bucket:                 bucket,
		TempDir:                flags.TempDir,
		ImplicitDirectories:    flags.ImplicitDirs,
		InodeAttributeCacheTTL: flags.StatCacheTTL,
		DirTypeCacheTTL:        flags.TypeCacheTTL,
		Uid:                    uid,
		Gid:                    gid,
		FilePerms:              os.FileMode(flags.FileMode),
		DirPerms:               os.FileMode(flags.DirMode),

		AppendThreshold: 1 << 21, // 2 MiB, a total guess.
		TmpObjectPrefix: ".gcsfuse_tmp/",
	}

	server, err := fs.NewServer(serverCfg)
	if err != nil {
		err = fmt.Errorf("fs.NewServer: %v", err)
		return
	}

	// Mount the file system.
	status.Println("Mounting file system...")

	mountCfg := &fuse.MountConfig{
		FSName:      bucket.Name(),
		VolumeName:  bucket.Name(),
		Options:     flags.MountOptions,
		ErrorLogger: log.New(os.Stderr, "fuse: ", log.Flags()),
	}

	if flags.DebugFuse {
		mountCfg.DebugLogger = log.New(os.Stderr, "fuse_debug: ", 0)
	}

	mfs, err = fuse.Mount(mountPoint, server, mountCfg)
	if err != nil {
		err = fmt.Errorf("Mount: %v", err)
		return
	}

	return
}