Пример #1
0
func (t *flushFSTest) setUp(
	ti *TestInfo,
	flushErr syscall.Errno,
	fsyncErr syscall.Errno,
	readOnly bool) {
	var err error

	// Set up files to receive flush and fsync reports.
	t.flushes, err = fsutil.AnonymousFile("")
	AssertEq(nil, err)

	t.fsyncs, err = fsutil.AnonymousFile("")
	AssertEq(nil, err)

	// Set up test config.
	t.MountType = "flushfs"
	t.MountFlags = []string{
		"--flushfs.flush_error",
		fmt.Sprintf("%d", int(flushErr)),

		"--flushfs.fsync_error",
		fmt.Sprintf("%d", int(fsyncErr)),
	}

	if readOnly {
		t.MountFlags = append(t.MountFlags, "--read_only")
	}

	t.MountFiles = map[string]*os.File{
		"flushfs.flushes_file": t.flushes,
		"flushfs.fsyncs_file":  t.fsyncs,
	}

	t.SubprocessTest.SetUp(ti)
}
Пример #2
0
func (t *NoErrorsTest) Dup2() {
	var n int
	var err error

	// Open the file.
	t.f1, err = os.OpenFile(path.Join(t.Dir, "foo"), os.O_WRONLY, 0)
	AssertEq(nil, err)

	// Write some contents to the file.
	n, err = t.f1.Write([]byte("taco"))
	AssertEq(nil, err)
	AssertEq(4, n)

	// Create some anonymous temporary file.
	t.f2, err = fsutil.AnonymousFile("")
	AssertEq(nil, err)

	// Duplicate the temporary file descriptor on top of the file from our file
	// system. We should see a flush.
	err = dup2(int(t.f2.Fd()), int(t.f1.Fd()))
	ExpectEq(nil, err)

	ExpectThat(t.getFlushes(), ElementsAre("taco"))
	ExpectThat(t.getFsyncs(), ElementsAre())
}
Пример #3
0
// Create a temp file whose initial contents are given by the supplied reader.
// dir is a directory on whose file system the inode will live, or the system
// default temporary location if empty.
func NewTempFile(
	content io.Reader,
	dir string,
	clock timeutil.Clock) (tf TempFile, err error) {
	// Create an anonymous file to wrap. When we close it, its resources will be
	// magically cleaned up.
	f, err := fsutil.AnonymousFile(dir)
	if err != nil {
		err = fmt.Errorf("AnonymousFile: %v", err)
		return
	}

	// Copy into the file.
	size, err := io.Copy(f, content)
	if err != nil {
		err = fmt.Errorf("copy: %v", err)
		return
	}

	tf = &tempFile{
		clock:          clock,
		f:              f,
		dirtyThreshold: size,
	}

	return
}
Пример #4
0
// LOCKS_REQUIRED(fh)
func (fh *fileHandle) ensureFile(ctx context.Context) (err error) {
	// Is the file already present?
	if fh.file != nil {
		return
	}

	// Create a file.
	f, err := fsutil.AnonymousFile("")
	if err != nil {
		err = fmt.Errorf("AnonymousFile: %v", err)
		return
	}

	defer func() {
		if err != nil {
			f.Close()
		}
	}()

	// Copy in the contents.
	for _, s := range fh.scores {
		var p []byte

		// Load a chunk.
		p, err = fh.blobStore.Load(ctx, s)
		if err != nil {
			err = fmt.Errorf("Load(%s): %v", s.Hex(), err)
			return
		}

		// Unmarshal it.
		p, err = repr.UnmarshalFile(p)
		if err != nil {
			err = fmt.Errorf("UnmarshalFile: %v", err)
			return
		}

		// Write it out.
		_, err = f.Write(p)
		if err != nil {
			err = fmt.Errorf("Write: %v", err)
			return
		}
	}

	// Update state.
	fh.file = f

	return
}
Пример #5
0
func (t *FlushErrorTest) Dup2() {
	var err error

	// Open the file.
	t.f1, err = os.OpenFile(path.Join(t.Dir, "foo"), os.O_WRONLY, 0)
	AssertEq(nil, err)

	// Create some anonymous temporary file.
	t.f2, err = fsutil.AnonymousFile("")
	AssertEq(nil, err)

	// Duplicate the temporary file descriptor on top of the file from our file
	// system. We shouldn't see the flush error.
	err = dup2(int(t.f2.Fd()), int(t.f1.Fd()))
	ExpectEq(nil, err)
}
Пример #6
0
// LOCKS_EXCLUDED(fl.mu)
func (fl *fileLeaser) NewFile() (rwl ReadWriteLease, err error) {
	// Create an anonymous file.
	f, err := fsutil.AnonymousFile(fl.dir)
	if err != nil {
		err = fmt.Errorf("AnonymousFile: %v", err)
		return
	}

	// Wrap a lease around it.
	rwl = newReadWriteLease(fl, 0, f)

	// Update state.
	fl.mu.Lock()
	fl.readWriteCount++
	fl.evict(fl.limitNumFiles, fl.limitBytes)
	fl.mu.Unlock()

	return
}
Пример #7
0
func getFile() (f *os.File, err error) {
	// Is there a pre-set file?
	if *fFile != "" {
		f, err = os.OpenFile(*fFile, os.O_RDONLY, 0)
		if err != nil {
			err = fmt.Errorf("OpenFile: %v", err)
			return
		}

		return
	}

	// Create a temporary file to hold random contents.
	f, err = fsutil.AnonymousFile("")
	if err != nil {
		err = fmt.Errorf("AnonymousFile: %v", err)
		return
	}

	// Copy a bunch of random data into the file.
	log.Println("Reading random data.")
	_, err = io.Copy(f, io.LimitReader(rand.Reader, *fSize))
	if err != nil {
		err = fmt.Errorf("Copy: %v", err)
		return
	}

	// Seek back to the start for consumption.
	_, err = f.Seek(0, 0)
	if err != nil {
		err = fmt.Errorf("Seek: %v", err)
		return
	}

	return
}
Пример #8
0
// Mount the file system based on the supplied arguments, returning a
// fuse.MountedFileSystem that can be joined to wait for unmounting.
//
// In main, set flagSet to flag.CommandLine and pass in os.Args[1:]. In a test,
// pass in a virgin flag set and test arguments.
//
// Promises to pass on flag.ErrHelp from FlagSet.Parse.
func mount(
	ctx context.Context,
	args []string,
	flagSet *flag.FlagSet,
	conn gcs.Conn) (mfs *fuse.MountedFileSystem, err error) {
	// Populate and parse flags.
	flags := populateFlagSet(flagSet)

	err = flagSet.Parse(args)
	if err != nil {
		// Special case: don't mangle ErrHelp.
		if err != flag.ErrHelp {
			err = fmt.Errorf("Parsing flags: %v", err)
		}

		return
	}

	// Extract positional arguments.
	if flagSet.NArg() != 2 {
		flagSet.Usage()
		err = errors.New("Incorrect usage")
		return
	}

	bucketName := flagSet.Arg(0)
	mountPoint := flagSet.Arg(1)

	// Sanity check: make sure the temporary directory exists and is writable
	// currently. This gives a better user experience than harder to debug EIO
	// errors when reading files in the future.
	if flags.TempDir != "" {
		var f *os.File
		f, err = fsutil.AnonymousFile(flags.TempDir)
		f.Close()

		if err != nil {
			err = fmt.Errorf(
				"Error writing to temporary directory (%q); are you sure it exists "+
					"with the correct permissions?",
				err.Error())
			return
		}
	}

	// The file leaser used by the file system sizes its limit on number of
	// temporary files based on the process's rlimit. If this is too low, we'll
	// throw away cached content unnecessarily often. This is particularly a
	// problem on OS X, which has a crazy low default limit (256 as of OS X
	// 10.10.3). So print a warning if the limit is low.
	var rlimit unix.Rlimit
	if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlimit); err == nil {
		const reasonableLimit = 4096

		if rlimit.Cur < reasonableLimit {
			log.Printf(
				"Warning: low file rlimit of %d will cause cached content to be "+
					"frequently evicted. Consider raising with `ulimit -n`.",
				rlimit.Cur)
		}
	}

	// Choose UID and GID.
	uid, gid, err := perms.MyUserAndGroup()
	if err != nil {
		err = fmt.Errorf("MyUserAndGroup: %v", err)
		return
	}

	if flags.Uid >= 0 {
		uid = uint32(flags.Uid)
	}

	if flags.Gid >= 0 {
		gid = uint32(flags.Gid)
	}

	// Set up the bucket.
	bucket, err := setUpBucket(
		ctx,
		flags,
		conn,
		bucketName)

	if err != nil {
		err = fmt.Errorf("setUpBucket: %v", err)
		return
	}

	// Create a file system server.
	serverCfg := &fs.ServerConfig{
		Clock:                timeutil.RealClock(),
		Bucket:               bucket,
		TempDir:              flags.TempDir,
		TempDirLimitNumFiles: fs.ChooseTempDirLimitNumFiles(),
		TempDirLimitBytes:    flags.TempDirLimit,
		GCSChunkSize:         flags.GCSChunkSize,
		ImplicitDirectories:  flags.ImplicitDirs,
		DirTypeCacheTTL:      flags.TypeCacheTTL,
		Uid:                  uid,
		Gid:                  gid,
		FilePerms:            os.FileMode(flags.FileMode),
		DirPerms:             os.FileMode(flags.DirMode),

		AppendThreshold: 1 << 21, // 2 MiB, a total guess.
		TmpObjectPrefix: ".gcsfuse_tmp/",
	}

	server, err := fs.NewServer(serverCfg)
	if err != nil {
		err = fmt.Errorf("fs.NewServer: %v", err)
		return
	}

	// Mount the file system.
	mountCfg := &fuse.MountConfig{
		FSName:      bucket.Name(),
		Options:     flags.MountOptions,
		ErrorLogger: log.New(os.Stderr, "fuse: ", log.Flags()),
	}

	mfs, err = fuse.Mount(mountPoint, server, mountCfg)
	if err != nil {
		err = fmt.Errorf("Mount: %v", err)
		return
	}

	return
}
Пример #9
0
// Mount the file system based on the supplied arguments, returning a
// fuse.MountedFileSystem that can be joined to wait for unmounting.
func mount(
	ctx context.Context,
	bucketName string,
	mountPoint string,
	flags *flagStorage,
	conn gcs.Conn) (mfs *fuse.MountedFileSystem, err error) {
	// Sanity check: make sure the temporary directory exists and is writable
	// currently. This gives a better user experience than harder to debug EIO
	// errors when reading files in the future.
	if flags.TempDir != "" {
		var f *os.File
		f, err = fsutil.AnonymousFile(flags.TempDir)
		f.Close()

		if err != nil {
			err = fmt.Errorf(
				"Error writing to temporary directory (%q); are you sure it exists "+
					"with the correct permissions?",
				err.Error())
			return
		}
	}

	// Choose UID and GID.
	uid, gid, err := perms.MyUserAndGroup()
	if err != nil {
		err = fmt.Errorf("MyUserAndGroup: %v", err)
		return
	}

	if flags.Uid >= 0 {
		uid = uint32(flags.Uid)
	}

	if flags.Gid >= 0 {
		gid = uint32(flags.Gid)
	}

	// Set up the bucket.
	bucket, err := setUpBucket(
		ctx,
		flags,
		conn,
		bucketName)

	if err != nil {
		err = fmt.Errorf("setUpBucket: %v", err)
		return
	}

	// Create a file system server.
	serverCfg := &fs.ServerConfig{
		CacheClock:             timeutil.RealClock(),
		Bucket:                 bucket,
		TempDir:                flags.TempDir,
		ImplicitDirectories:    flags.ImplicitDirs,
		InodeAttributeCacheTTL: flags.StatCacheTTL,
		DirTypeCacheTTL:        flags.TypeCacheTTL,
		Uid:                    uid,
		Gid:                    gid,
		FilePerms:              os.FileMode(flags.FileMode),
		DirPerms:               os.FileMode(flags.DirMode),

		AppendThreshold: 1 << 21, // 2 MiB, a total guess.
		TmpObjectPrefix: ".gcsfuse_tmp/",
	}

	server, err := fs.NewServer(serverCfg)
	if err != nil {
		err = fmt.Errorf("fs.NewServer: %v", err)
		return
	}

	// Mount the file system.
	mountCfg := &fuse.MountConfig{
		FSName:      bucket.Name(),
		Options:     flags.MountOptions,
		ErrorLogger: log.New(os.Stderr, "fuse: ", log.Flags()),
	}

	if flags.DebugFuse {
		mountCfg.DebugLogger = log.New(os.Stderr, "fuse_debug: ", 0)
	}

	mfs, err = fuse.Mount(mountPoint, server, mountCfg)
	if err != nil {
		err = fmt.Errorf("Mount: %v", err)
		return
	}

	return
}
Пример #10
0
// Mount the file system based on the supplied arguments, returning a
// fuse.MountedFileSystem that can be joined to wait for unmounting.
func mountWithConn(
	ctx context.Context,
	bucketName string,
	mountPoint string,
	flags *flagStorage,
	conn gcs.Conn,
	status *log.Logger) (mfs *fuse.MountedFileSystem, err error) {
	// Sanity check: make sure the temporary directory exists and is writable
	// currently. This gives a better user experience than harder to debug EIO
	// errors when reading files in the future.
	if flags.TempDir != "" {
		var f *os.File
		f, err = fsutil.AnonymousFile(flags.TempDir)
		f.Close()

		if err != nil {
			err = fmt.Errorf(
				"Error writing to temporary directory (%q); are you sure it exists "+
					"with the correct permissions?",
				err.Error())
			return
		}
	}

	// Find the current process's UID and GID. If it was invoked as root and the
	// user hasn't explicitly overridden --uid, everything is going to be owned
	// by root. This is probably not what the user wants, so print a warning.
	uid, gid, err := perms.MyUserAndGroup()
	if err != nil {
		err = fmt.Errorf("MyUserAndGroup: %v", err)
		return
	}

	if uid == 0 && flags.Uid < 0 {
		fmt.Fprintln(os.Stderr, `
WARNING: gcsfuse invoked as root. This will cause all files to be owned by
root. If this is not what you intended, invoke gcsfuse as the user that will
be interacting with the file system.
`)
	}

	// Choose UID and GID.
	if flags.Uid >= 0 {
		uid = uint32(flags.Uid)
	}

	if flags.Gid >= 0 {
		gid = uint32(flags.Gid)
	}

	// Set up the bucket.
	status.Println("Opening bucket...")

	bucket, err := setUpBucket(
		ctx,
		flags,
		conn,
		bucketName)

	if err != nil {
		err = fmt.Errorf("setUpBucket: %v", err)
		return
	}

	// Create a file system server.
	serverCfg := &fs.ServerConfig{
		CacheClock:             timeutil.RealClock(),
		Bucket:                 bucket,
		TempDir:                flags.TempDir,
		ImplicitDirectories:    flags.ImplicitDirs,
		InodeAttributeCacheTTL: flags.StatCacheTTL,
		DirTypeCacheTTL:        flags.TypeCacheTTL,
		Uid:                    uid,
		Gid:                    gid,
		FilePerms:              os.FileMode(flags.FileMode),
		DirPerms:               os.FileMode(flags.DirMode),

		AppendThreshold: 1 << 21, // 2 MiB, a total guess.
		TmpObjectPrefix: ".gcsfuse_tmp/",
	}

	server, err := fs.NewServer(serverCfg)
	if err != nil {
		err = fmt.Errorf("fs.NewServer: %v", err)
		return
	}

	// Mount the file system.
	status.Println("Mounting file system...")

	mountCfg := &fuse.MountConfig{
		FSName:      bucket.Name(),
		VolumeName:  bucket.Name(),
		Options:     flags.MountOptions,
		ErrorLogger: log.New(os.Stderr, "fuse: ", log.Flags()),
	}

	if flags.DebugFuse {
		mountCfg.DebugLogger = log.New(os.Stderr, "fuse_debug: ", 0)
	}

	mfs, err = fuse.Mount(mountPoint, server, mountCfg)
	if err != nil {
		err = fmt.Errorf("Mount: %v", err)
		return
	}

	return
}
Пример #11
0
func createFiles(
	dir string,
	numFiles int) (files []*os.File, err error) {
	b := syncutil.NewBundle(context.Background())

	// Create files in parallel, and write them to a channel.
	const parallelism = 128

	var counter uint64
	fileChan := make(chan *os.File)
	var wg sync.WaitGroup

	for i := 0; i < parallelism; i++ {
		wg.Add(1)
		b.Add(func(ctx context.Context) (err error) {
			defer wg.Done()
			for {
				// Should we create another?
				count := atomic.AddUint64(&counter, 1)
				if count > uint64(numFiles) {
					return
				}

				// Create it.
				var f *os.File
				f, err = fsutil.AnonymousFile(dir)
				if err != nil {
					err = fmt.Errorf("AnonymousFile: %v", err)
					return
				}

				// Write it to the channel.
				select {
				case fileChan <- f:
				case <-ctx.Done():
					err = ctx.Err()
					return
				}
			}
		})
	}

	go func() {
		wg.Wait()
		close(fileChan)
	}()

	// Accumulate into the slice.
	b.Add(func(ctx context.Context) (err error) {
		for f := range fileChan {
			files = append(files, f)
		}

		return
	})

	err = b.Join()
	if err != nil {
		closeAll(files)
		files = nil
	}

	return
}