Ejemplo n.º 1
0
func (f *FS) Mount(volumeName string) error {
	log.Debugf("setting up fuse: volume=%s", volumeName)
	c, err := fuse.Mount(
		f.mountpoint,
		fuse.FSName("libsecret"),
		fuse.Subtype("libsecretfs"),
		fuse.LocalVolume(),
		fuse.VolumeName(volumeName),
	)
	if err != nil {
		return err
	}

	srv := fs.New(c, nil)

	f.server = srv
	f.volumeName = volumeName
	f.conn = c

	go func() {
		err = f.server.Serve(f)
		if err != nil {
			f.errChan <- err
		}
	}()

	// check if the mount process has an error to report
	log.Debug("waiting for mount")
	<-c.Ready
	if err := c.MountError; err != nil {
		return err
	}

	return nil
}
Ejemplo n.º 2
0
// Mount makes the contents of the volume visible at the given
// mountpoint. If Mount returns with a nil error, the mount has
// occurred.
func (ref *VolumeRef) Mount(mountpoint string) error {
	ref.app.volumes.Lock()
	defer ref.app.volumes.Unlock()

	// TODO obey `bazil -debug server run`

	if ref.mounted {
		return errors.New("volume already mounted")
	}

	conn, err := fuse.Mount(mountpoint,
		fuse.MaxReadahead(32*1024*1024),
		fuse.AsyncRead(),
	)
	if err != nil {
		return fmt.Errorf("mount fail: %v", err)
	}

	srv := fusefs.New(conn, nil)
	serveErr := make(chan error, 1)
	go func() {
		defer func() {
			// remove map entry on unmount or failed mount
			ref.app.volumes.Lock()
			ref.mounted = false
			ref.conn = nil
			ref.app.volumes.Unlock()
			ref.app.volumes.Broadcast()
			ref.Close()
		}()
		defer conn.Close()
		ref.fs.SetFUSE(srv)
		defer func() {
			ref.fs.SetFUSE(nil)
		}()
		serveErr <- srv.Serve(ref.fs)
	}()

	select {
	case <-conn.Ready:
		if err := conn.MountError; err != nil {
			return fmt.Errorf("mount fail (delayed): %v", err)
		}
		ref.refs++
		ref.mounted = true
		ref.conn = conn
		ref.app.volumes.Broadcast()
		return nil
	case err := <-serveErr:
		// Serve quit early
		if err != nil {
			return fmt.Errorf("filesystem failure: %v", err)
		}
		return errors.New("Serve exited early")
	}
}
Ejemplo n.º 3
0
func (m *mounter) Mount(
	mountPoint string,
	shard *pfsclient.Shard,
	commitMounts []*CommitMount,
	ready chan bool,
	debug bool,
) (retErr error) {
	var once sync.Once
	defer once.Do(func() {
		if ready != nil {
			close(ready)
		}
	})
	name := namePrefix + m.address
	conn, err := fuse.Mount(
		mountPoint,
		fuse.FSName(name),
		fuse.VolumeName(name),
		fuse.Subtype(subtype),
		fuse.AllowOther(),
		fuse.WritebackCache(),
		fuse.MaxReadahead(1<<32-1),
	)
	if err != nil {
		return err
	}
	defer func() {
		if err := conn.Close(); err != nil && retErr == nil {
			retErr = err
		}
	}()

	sigChan := make(chan os.Signal, 1)
	signal.Notify(sigChan, os.Interrupt)
	go func() {
		<-sigChan
		m.Unmount(mountPoint)
	}()

	once.Do(func() {
		if ready != nil {
			close(ready)
		}
	})
	config := &fs.Config{}
	if debug {
		config.Debug = func(msg interface{}) { lion.Printf("%+v", msg) }
	}
	if err := fs.New(conn, config).Serve(newFilesystem(m.apiClient, shard, commitMounts)); err != nil {
		return err
	}
	<-conn.Ready
	return conn.MountError
}
Ejemplo n.º 4
0
// NewConnSrc returns a source of new connections based on Lookups in the
// provided mount directory. If there isn't a directory located at tmpdir one
// is created. The second return parameter can be used to shutdown and release
// any resources. As a result of this shutdown, or during any other fatal
// error, the returned chan will be closed.
//
// The connset parameter is optional.
func NewConnSrc(mountdir, tmpdir string, connset *proxy.ConnSet) (<-chan proxy.Conn, io.Closer, error) {
	if err := os.MkdirAll(tmpdir, 0777); err != nil {
		return nil, nil, err
	}

	if err := fuse.Unmount(mountdir); err != nil {
		// The error is too verbose to be useful to print out
	}
	log.Printf("Mounting %v...", mountdir)
	c, err := fuse.Mount(mountdir, fuse.AllowOther())
	if err != nil {
		return nil, nil, fmt.Errorf("cannot mount %q: %v", mountdir, err)
	}
	log.Printf("Mounted %v", mountdir)

	if connset == nil {
		// Make a dummy one.
		connset = proxy.NewConnSet()
	}
	conns := make(chan proxy.Conn, 1)
	root := &fsRoot{
		tmpDir:  tmpdir,
		linkDir: mountdir,
		dst:     conns,
		links:   make(map[string]symlink),
		closers: []io.Closer{c},
		connset: connset,
	}

	server := fs.New(c, &fs.Config{
		Debug: func(msg interface{}) {
			if false {
				log.Print(msg)
			}
		},
	})

	go func() {
		if err := server.Serve(root); err != nil {
			log.Printf("serve %q exited due to error: %v", mountdir, err)
		}
		// The server exited but we don't know whether this is because of a
		// graceful reason (via root.Close) or via an external force unmounting.
		// Closing the root will ensure the 'dst' chan is closed correctly to
		// signify that no new connections are possible.
		if err := root.Close(); err != nil {
			log.Printf("root.Close() error: %v", err)
		}
		log.Printf("FUSE exited")
	}()

	return conns, root, nil
}
Ejemplo n.º 5
0
func Serve(mountPoint, cgroupDir string) error {
	c, err := fuse.Mount(
		mountPoint,
		fuse.FSName("cgroupfs"),
		fuse.Subtype("cgroupfs"),
		fuse.LocalVolume(),
		fuse.VolumeName("cgroup volume"),
		fuse.AllowOther(),
	)
	if err != nil {
		return err
	}
	defer c.Close()
	go handleStopSignals(mountPoint)

	var srv *fusefs.Server
	if os.Getenv("FUSE_DEBUG") != "" {
		srv = fusefs.New(c, &fusefs.Config{
			Debug: func(msg interface{}) {
				fmt.Printf("%s\n", msg)
			},
		})
	} else {
		srv = fusefs.New(c, nil)
	}

	err = srv.Serve(fs.FS{cgroupDir})
	if err != nil {
		return err
	}

	// check if the mount process has an error to report
	<-c.Ready
	if err := c.MountError; err != nil {
		return err
	}

	return nil
}
Ejemplo n.º 6
0
// Serve FS. Will block.
func (f *FS) Serve(ctx context.Context) error {
	srv := fs.New(f.conn, &fs.Config{
		WithContext: func(ctx context.Context, _ fuse.Request) context.Context {
			return f.WithContext(ctx)
		},
	})
	f.fuse = srv

	f.LaunchNotificationProcessor(ctx)
	f.remoteStatus.Init(ctx, f.log, f.config)
	// Blocks forever, unless an interrupt signal is received
	// (handled by libkbfs.Init).
	return srv.Serve(f)
}
Ejemplo n.º 7
0
// Serve FS. Will block.
func (f *FS) Serve(ctx context.Context) error {
	srv := fs.New(f.conn, &fs.Config{
		GetContext: func() context.Context {
			return ctx
		},
	})
	f.fuse = srv

	f.launchNotificationProcessor(ctx)

	// Blocks forever, unless an interrupt signal is received
	// (handled by libkbfs.Init).
	return srv.Serve(f)
}
Ejemplo n.º 8
0
// NewMount mounts a fuse endpoint at `mountpoint` retrieving data from `store`.
func NewMount(store *store.Store, mountpoint string) (*Mount, error) {
	conn, err := fuse.Mount(
		mountpoint,
		fuse.FSName("brigfs"),
		fuse.Subtype("brig"),
		fuse.AllowNonEmptyMount(),
	)

	if err != nil {
		return nil, err
	}

	filesys := &Filesystem{Store: store}

	mnt := &Mount{
		Conn:   conn,
		Server: fs.New(conn, nil),
		FS:     filesys,
		Dir:    mountpoint,
		Store:  store,
		done:   make(chan util.Empty),
		errors: make(chan error),
	}

	go func() {
		defer close(mnt.done)
		log.Debugf("Serving FUSE at %v", mountpoint)
		mnt.errors <- mnt.Server.Serve(filesys)
		mnt.done <- util.Empty{}
		log.Debugf("Stopped serving FUSE at %v", mountpoint)
	}()

	select {
	case <-mnt.Conn.Ready:
		if err := mnt.Conn.MountError; err != nil {
			return nil, err
		}
	case err = <-mnt.errors:
		// Serve quit early
		if err != nil {
			return nil, err
		}
		return nil, errors.New("Serve exited early")
	}

	return mnt, nil
}
Ejemplo n.º 9
0
func main() {
	flag.Usage = usage
	flag.Parse()

	if flag.NArg() != 1 {
		usage()
		os.Exit(2)
	}
	mountpoint := flag.Arg(0)

	c, err := fuse.Mount(
		mountpoint,
		fuse.FSName("clock"),
		fuse.Subtype("clockfsfs"),
		fuse.LocalVolume(),
		fuse.VolumeName("Clock filesystem"),
	)
	if err != nil {
		log.Fatal(err)
	}
	defer c.Close()

	srv := fs.New(c, nil)
	filesys := &FS{
		// We pre-create the clock node so that it's always the same
		// object returned from all the Lookups. You could carefully
		// track its lifetime between Lookup&Forget, and have the
		// ticking & invalidation happen only when active, but let's
		// keep this example simple.
		clockFile: &File{
			fuse: srv,
		},
	}
	filesys.clockFile.tick()
	// This goroutine never exits. That's fine for this example.
	go filesys.clockFile.update()
	if err := srv.Serve(filesys); err != nil {
		log.Fatal(err)
	}

	// Check if the mount process has an error to report.
	<-c.Ready
	if err := c.MountError; err != nil {
		log.Fatal(err)
	}
}
Ejemplo n.º 10
0
func main() {
	flag.Usage = usage
	flag.Parse()

	if flag.NArg() < 2 {
		usage()
		os.Exit(2)
	}
	origin := "http://" + flag.Arg(0) + "/"
	url := "ws://" + flag.Arg(0) + "/fs"
	mountpoint := flag.Arg(1)

	c, err := fuse.Mount(
		mountpoint,
		fuse.FSName(url),
		fuse.Subtype("simplewebfs"),
		fuse.LocalVolume(),
		fuse.VolumeName("Hello world!"),
	)
	if err != nil {
		log.Fatal(err)
	}
	defer c.Close()

	con := simplewebfs.New(url, origin)

	srv := fs.New(c, nil)

	filesys := &FS{
		c: &con,
	}
	filesys.cache = make(map[string]fileserver.RPCDirlistingReply)
	err = srv.Serve(filesys)
	if err != nil {
		log.Fatal(err)
		return
	}

	// check if the mount process has an error to report
	<-c.Ready
	if err := c.MountError; err != nil {
		log.Fatal(err)
		return
	}
}
Ejemplo n.º 11
0
func run(mountpoint string) error {
	c, err := fuse.Mount(
		mountpoint,
		fuse.FSName("clock"),
		fuse.Subtype("clockfsfs"),
		fuse.LocalVolume(),
		fuse.VolumeName("Clock filesystem"),
	)
	if err != nil {
		return err
	}
	defer c.Close()

	if p := c.Protocol(); !p.HasInvalidate() || true {
		return fmt.Errorf("kernel FUSE support is too old to have invalidations: version %v", p)
	}

	srv := fs.New(c, nil)
	filesys := &FS{
		// We pre-create the clock node so that it's always the same
		// object returned from all the Lookups. You could carefully
		// track its lifetime between Lookup&Forget, and have the
		// ticking & invalidation happen only when active, but let's
		// keep this example simple.
		clockFile: &File{
			fuse: srv,
		},
	}
	filesys.clockFile.tick()
	// This goroutine never exits. That's fine for this example.
	go filesys.clockFile.update()
	if err := srv.Serve(filesys); err != nil {
		return err
	}

	// Check if the mount process has an error to report.
	<-c.Ready
	if err := c.MountError; err != nil {
		return err
	}
	return nil
}
Ejemplo n.º 12
0
func main() {
	flag.Usage = usage
	flag.Parse()

	if *mount == "" || *mirror == "" {
		usage()
		os.Exit(2)
	}

	c, err := fuse.Mount(
		*mount,
		fuse.FSName("mirrorfs"),
		fuse.Subtype("mirrorfs"),
		fuse.VolumeName("Mirror FS"),
		// fuse.LocalVolume(),
		fuse.AllowOther(),
	)

	if err != nil {
		log.Fatal(err)
	}
	defer c.Close()

	cfg := &fs.Config{}
	if *debug {
		cfg.Debug = debugLog
	}
	srv := fs.New(c, cfg)
	filesys := mirrorfs.NewMirrorFS(*mirror)

	if err := srv.Serve(filesys); err != nil {
		log.Fatal(err)
	}

	// Check if the mount process has an error to report.
	<-c.Ready
	if err := c.MountError; err != nil {
		log.Fatal(err)
	}
}
Ejemplo n.º 13
0
// MountedFunc mounts a filesystem at a temporary directory. The
// filesystem used is constructed by calling a function, to allow
// storing fuse.Conn and fs.Server in the FS.
//
// It also waits until the filesystem is known to be visible (OS X
// workaround).
//
// After successful return, caller must clean up by calling Close.
func MountedFunc(fn func(*Mount) fs.FS, conf *fs.Config, options ...fuse.MountOption) (*Mount, error) {
	dir, err := ioutil.TempDir("", "fusetest")
	if err != nil {
		return nil, err
	}
	c, err := fuse.Mount(dir, options...)
	if err != nil {
		return nil, err
	}
	server := fs.New(c, conf)
	done := make(chan struct{})
	serveErr := make(chan error, 1)
	mnt := &Mount{
		Dir:    dir,
		Conn:   c,
		Server: server,
		Error:  serveErr,
		done:   done,
	}
	filesys := fn(mnt)
	go func() {
		defer close(done)
		serveErr <- server.Serve(filesys)
	}()

	select {
	case <-mnt.Conn.Ready:
		if err := mnt.Conn.MountError; err != nil {
			return nil, err
		}
		return mnt, nil
	case err = <-mnt.Error:
		// Serve quit early
		if err != nil {
			return nil, err
		}
		return nil, errors.New("Serve exited early")
	}
}
Ejemplo n.º 14
0
func main() {
	flag.Usage = usage
	flag.Parse()

	if *mount == "" || *folder == "" {
		usage()
		os.Exit(2)
	}

	c, err := fuse.Mount(
		*mount,
		fuse.FSName("logfs"),
		fuse.Subtype("logfs"),
		fuse.VolumeName("Log FS"),
		//fuse.AllowOther(),
	)
	if err != nil {
		log.Fatal(err)
	}
	defer c.Close()

	cfg := &fs.Config{}
	srv := fs.New(c, cfg)
	filesys := logfs.NewLogFS(*folder)

	if err := srv.Serve(filesys); err != nil {
		log.Fatal(err)
	}

	// Verify mount error
	<-c.Ready
	if err := c.MountError; err != nil {
		log.Fatal(err)
	}

	log.Println("Log FS unmounted")
}
Ejemplo n.º 15
0
func main() {
	allowOther := flag.Bool("allow-other", false, "allow all users access to the filesystem")
	allowRoot := flag.Bool("allow-root", false, "allow root to access the filesystem")
	debug := flag.Bool("debug", false, "enable debug output")
	gid := flag.Int("gid", os.Getgid(), "set the GID that should own all files")
	perm := flag.Int("perm", 0, "set the file permission flags for all files")
	ro := flag.Bool("ro", false, "mount the filesystem read-only")
	root := flag.String("root", "", "path in Consul to the root of the filesystem")
	timeout := flag.String("timeout", defaultTimeout, "timeout for Consul requests")
	uid := flag.Int("uid", os.Getuid(), "set the UID that should own all files")
	flag.Parse()

	logger := logrus.New()
	if *debug {
		logger.Level = logrus.DebugLevel
	}

	consulConfig := &consul.Config{}
	var mountPoint string
	switch flag.NArg() {
	case 1:
		mountPoint = flag.Arg(0)
	case 2:
		consulConfig.Address = flag.Arg(0)
		mountPoint = flag.Arg(1)
	default:
		flag.Usage()
	}

	// Initialize a Consul client. TODO: connection parameters
	client, err := consul.NewClient(consulConfig)
	if err != nil {
		logrus.NewEntry(logger).WithError(err).Error("could not initialize consul")
		os.Exit(1)
	}

	// Configure some mount options
	timeoutDuration, err := time.ParseDuration(*timeout)
	if err != nil {
		logrus.NewEntry(logger).WithError(err).Fatal("invalid -timeout value")
	}
	mountOptions := []fuse.MountOption{
		fuse.DefaultPermissions(),
		fuse.DaemonTimeout(fmt.Sprint(int64(timeoutDuration.Seconds() + 1))),
		fuse.NoAppleDouble(),
		fuse.NoAppleXattr(),
	}
	if *allowOther {
		mountOptions = append(mountOptions, fuse.AllowOther())
	}
	if *allowRoot {
		mountOptions = append(mountOptions, fuse.AllowRoot())
	}
	if *ro {
		mountOptions = append(mountOptions, fuse.ReadOnly())
	}

	// Mount the file system to start receiving FS events at the mount point.
	logger.WithField("location", mountPoint).Info("mounting kvfs")
	conn, err := fuse.Mount(mountPoint, mountOptions...)
	if err != nil {
		logrus.NewEntry(logger).WithError(err).Fatal("error mounting kvfs")
	}
	defer conn.Close()

	// Try to cleanly unmount the FS if SIGINT or SIGTERM is received
	sigs := make(chan os.Signal, 10)
	signal.Notify(sigs, os.Interrupt, syscall.SIGTERM)
	go func() {
		for sig := range sigs {
			logger.WithField("signal", sig).Info("attempting to unmount")
			err := fuse.Unmount(mountPoint)
			if err != nil {
				logrus.NewEntry(logger).WithError(err).Error("cannot unmount")
			}
		}
	}()

	// Create a file system object and start handing its requests
	server := fs.New(conn, &fs.Config{
		Debug: func(m interface{}) { logger.Debug(m) },
		WithContext: func(ctx context.Context, req fuse.Request) context.Context {
			// The returned cancel function doesn't matter: the request handler will
			// cancel the parent context at the end of the request.
			newCtx, _ := context.WithTimeout(ctx, timeoutDuration)
			return newCtx
		},
	})
	f := &consulfs.ConsulFS{
		Consul: &consulfs.CancelConsulKV{
			Client: client,
			Logger: logger,
		},
		Logger:   logger,
		UID:      uint32(*uid),
		GID:      uint32(*gid),
		Perms:    os.FileMode(*perm),
		RootPath: *root,
	}
	err = server.Serve(f)
	if err != nil {
		// Not sure what would cause Serve() to exit with an error
		logrus.NewEntry(logger).WithError(err).Error("error serving filesystem")
	}

	// Wait for the FUSE connection to end
	<-conn.Ready
	if conn.MountError != nil {
		logrus.NewEntry(logger).WithError(conn.MountError).Error("unmount error")
		os.Exit(1)
	} else {
		logger.Info("file system exiting normally")
	}
}
Ejemplo n.º 16
0
func (this *Mount) Run(args []string) (exitCode int) {
	cmdFlags := flag.NewFlagSet("mount", flag.ContinueOnError)
	cmdFlags.Usage = func() { this.Ui.Output(this.Help()) }
	cmdFlags.StringVar(&this.zone, "z", ctx.ZkDefaultZone(), "")
	cmdFlags.StringVar(&this.cluster, "c", "", "")
	cmdFlags.StringVar(&this.logLevel, "l", "info", "")
	if err := cmdFlags.Parse(args); err != nil {
		return 1
	}

	if validateArgs(this, this.Ui).
		on("-z", "-c").
		invalid(args) {
		return 2
	}

	this.mountPoint = args[len(args)-1]
	if !strings.HasPrefix(this.mountPoint, "/") {
		this.Ui.Error("mount point must start with /")
		return 1
	}

	setupLogging("stdout", this.logLevel, "")

	c, err := fuse.Mount(
		this.mountPoint,
		fuse.FSName("kfs"),
		fuse.Subtype("kfs"),
		fuse.VolumeName("Kafka FS"),
		fuse.ReadOnly(),
		fuse.AllowOther(),
	)
	if err != nil {
		log.Critical(err)
	}

	signal.RegisterHandler(func(sig os.Signal) {
		var err error
		for i := 0; i < 5; i++ {
			err = fuse.Unmount(this.mountPoint)
			if err == nil {
				break
			}

			log.Warn(err)
			time.Sleep(time.Second * 5)
		}

		if err == nil {
			log.Info("Kafka FS unmounted")
		} else {
			log.Error("Kafka FS unable to umount")
		}

		c.Close()
		os.Exit(0)
	}, syscall.SIGINT, syscall.SIGTERM)

	srv := fs.New(c, &fs.Config{})
	fs := kfs.New(this.zone, this.cluster)

	if err := srv.Serve(fs); err != nil {
		log.Error(err)
	}

	<-c.Ready
	if err := c.MountError; err != nil {
		log.Error(err)
	}

	return
}
Ejemplo n.º 17
0
func main() {
	flag.Usage = Usage
	flag.Parse()

	logging.SetFormatter(format)
	if *debug {
		logging.SetLevel(logging.DEBUG, "gocfs")
	} else {
		logging.SetLevel(logging.ERROR, "gocfs")
	}

	if flag.NArg() != 1 {
		Usage()
		os.Exit(2)
	}
	mountpoint := flag.Arg(0)

	if *daemon {
		var args = make([]string, 0)
		if *ro {
			args = append(args, "-ro")
		}
		args = append(args, "-db", *cassandra)
		if *debug {
			args = append(args, "-v")
		}
		args = append(args, flag.Args()...)
		cmd := exec.Command(os.Args[0], args...)
		cmd.Start()
		return
	}

	var mountOptions = make([]fuse.MountOption, 0)

	mountOptions = append(mountOptions, fuse.FSName("gocfs"))
	mountOptions = append(mountOptions, fuse.Subtype("gocfs"))
	mountOptions = append(mountOptions, fuse.VolumeName("cassandra"))
	//mountOptions = append(mountOptions, fuse.UseIno())
	//mountOptions = append(mountOptions, fuse.WritebackCache())
	//mountOptions = append(mountOptions, fuse.MaxReadahead(0))
	if *ro {
		mountOptions = append(mountOptions, fuse.ReadOnly())
	}

	c, err := fuse.Mount(
		mountpoint,
		mountOptions...,
	)
	if err != nil {
		log.Fatal(err)
	}
	defer c.Close()

	var consistencyLevel gocql.Consistency
	if *consistency == "quorum" {
		consistencyLevel = gocql.Quorum
	} else if *consistency == "one" {
		consistencyLevel = gocql.One
	} else if *consistency == "all" {
		consistencyLevel = gocql.All
	} else {
		log.Fatal("Unsupported consistency level: %s", *consistency)
	}

	cluster := gocql.NewCluster(*cassandra)
	cluster.DiscoverHosts = true
	cluster.Timeout = 2 * time.Second
	cluster.Consistency = consistencyLevel

	session, _ := cluster.CreateSession()
	defer session.Close()

	if err := initStorage(session); err != nil {
		log.Fatal(err)
	}

	server := fs.New(c, &fs.Config{
		Debug: debugFs,
	})

	err = server.Serve(FS{
		Session:   session,
		Server:    server,
		Conn:      c,
		blockSize: 65536,
		id2path:   make(map[uint64]string),
		path2id:   make(map[string]uint64),
		nextId:    0,
	})
	if err != nil {
		log.Fatal(err)
	}

	// check if the mount process has an error to report
	<-c.Ready
	if err := c.MountError; err != nil {
		log.Fatal(err)
	}
}
Ejemplo n.º 18
0
Archivo: mount.go Proyecto: ovh/svfs
		// Mount SVFS
		c, err := fuse.Mount(mountpoint, mountOptions(device)...)
		if err != nil {
			goto Err
		}

		defer c.Close()

		// Initialize SVFS
		if err = fs.Init(); err != nil {
			goto Err
		}

		// Serve SVFS
		srv = fusefs.New(c, nil)
		if err = srv.Serve(&fs); err != nil {
			goto Err
		}

		// Check for mount errors
		<-c.Ready

		// Memory profiling
		if memProf != "" {
			createMemProf(memProf)
		}

		if err = c.MountError; err != nil {
			goto Err
		}
Ejemplo n.º 19
0
func run() error {
	flag.Usage = usage
	flag.Parse()

	if *debug {
		fuse.Debug = func(v interface{}) { log.Println("[fuse]", v) }
	}

	var subdir, mountpoint string
	switch flag.NArg() {
	case 1:
		subdir = "/"
		mountpoint = flag.Arg(0)
	case 2:
		subdir = path.Join("/", flag.Arg(0))
		mountpoint = flag.Arg(1)
	default:
		usage()
		os.Exit(1)
	}

	var endpoints []string
	if ep := os.Getenv("ETCD_ENDPOINTS"); ep != "" {
		endpoints = strings.Split(ep, ",")
	} else {
		endpoints = []string{"localhost:4001"}
	}
	log.Printf("Using endpoints %v", endpoints)

	cfg := client.Config{
		Endpoints: endpoints,
	}

	etcd, err := client.New(cfg)
	if err != nil {
		return err
	}

	var mountOpts []fuse.MountOption

	if *allowOther {
		mountOpts = append(mountOpts, fuse.AllowOther())
	}
	if *allowRoot {
		mountOpts = append(mountOpts, fuse.AllowRoot())
	}
	mountOpts = append(mountOpts, fuse.DefaultPermissions())
	mountOpts = append(mountOpts, fuse.FSName("etcd:"+subdir))
	mountOpts = append(mountOpts, fuse.ReadOnly())
	mountOpts = append(mountOpts, fuse.Subtype("etcdFS"))

	log.Printf("Mounting etcd:%s to %s", subdir, mountpoint)
	c, err := fuse.Mount(
		mountpoint,
		mountOpts...,
	)
	if err != nil {
		return err
	}
	defer c.Close()

	srv := fs.New(c, nil)
	filesys := &etcdFS{
		etcd: client.NewKeysAPI(etcd),
		base: subdir,
	}

	errch := make(chan error)

	log.Printf("Start serving")
	go func() {
		errch <- srv.Serve(filesys)
	}()

	<-c.Ready
	if c.MountError != nil {
		return c.MountError
	}

	sigs := make(chan os.Signal)
	signal.Notify(sigs, syscall.SIGHUP, syscall.SIGINT, syscall.SIGKILL, syscall.SIGTERM)

	select {
	case err := <-errch:
		return err
	case s := <-sigs:
		log.Printf("Caught signal: %v", s)
		err := c.Close()
		log.Printf("Error: %v", err)
		return err
	}
}
Ejemplo n.º 20
0
// Unmount without first killing the FUSE connection while there are FUSE
// operations blocked inside the filesystem code.
func TestUnmountWedged(t *testing.T) {
	layout, err := newGreetingLayout()
	if err != nil {
		t.Fatal(err)
	}
	defer func() {
		err := layout.Destroy()
		if err != nil {
			t.Log(err)
		}
	}()
	client, err := torrent.NewClient(&torrent.Config{
		DataDir:         filepath.Join(layout.BaseDir, "incomplete"),
		DisableTrackers: true,
		NoDHT:           true,
		ListenAddr:      "redonk",
		DisableTCP:      true,
		DisableUTP:      true,

		NoDefaultBlocklist: true,
	})
	if err != nil {
		t.Fatal(err)
	}
	defer client.Close()
	client.AddTorrent(layout.Metainfo)
	fs := New(client)
	fuseConn, err := fuse.Mount(layout.MountDir)
	if err != nil {
		msg := fmt.Sprintf("error mounting: %s", err)
		if strings.Contains(err.Error(), "fuse") || err.Error() == "exit status 71" {
			t.Skip(msg)
		}
		t.Fatal(msg)
	}
	go func() {
		server := fusefs.New(fuseConn, &fusefs.Config{
			Debug: func(msg interface{}) {
				t.Log(msg)
			},
		})
		server.Serve(fs)
	}()
	<-fuseConn.Ready
	if err := fuseConn.MountError; err != nil {
		t.Fatalf("mount error: %s", err)
	}
	// Read the greeting file, though it will never be available. This should
	// "wedge" FUSE, requiring the fs object to be forcibly destroyed. The
	// read call will return with a FS error.
	go func() {
		_, err := ioutil.ReadFile(filepath.Join(layout.MountDir, layout.Metainfo.Info.Name))
		if err == nil {
			t.Fatal("expected error reading greeting")
		}
	}()

	// Wait until the read has blocked inside the filesystem code.
	fs.mu.Lock()
	for fs.blockedReads != 1 {
		fs.event.Wait()
	}
	fs.mu.Unlock()

	fs.Destroy()

	for {
		err = fuse.Unmount(layout.MountDir)
		if err != nil {
			t.Logf("error unmounting: %s", err)
			time.Sleep(time.Millisecond)
		} else {
			break
		}
	}

	err = fuseConn.Close()
	if err != nil {
		t.Fatalf("error closing fuse conn: %s", err)
	}
}