Ejemplo n.º 1
0
func main() {
	// Set up channel on which to send signal notifications.
	// We must use a buffered channel or risk missing the signal
	// if we're not ready to receive when the signal is sent.
	sigs := make(chan os.Signal, 1)
	signal.Notify(sigs, os.Interrupt, os.Kill)

	// var Usage = func() {
	// 	fmt.Fprintf(os.Stderr, "Usage %s [options]\n", os.Args[0])
	// 	flag.PrintDefaults()
	// }

	flag.Parse()

	_, err := os.Lstat(*ipfsMountPoint)
	if err != nil {
		fmt.Fprintf(os.Stderr, "%v\n%s does not exists, can't start..\n Please use ipfs command line to mount it\n", err, *ipfsMountPoint)
		os.Exit(1)
	}

	d := newIPFSDriver(*ipfsMountPoint)
	h := volume.NewHandler(d)
	go func() {
		if err := h.ServeUnix("root", "ipfs"); err != nil {
			fmt.Println(err)
		}
	}()
	cmd := startIPFSDaemon()
	cmd.Wait()
}
Ejemplo n.º 2
0
func main() {
	var Usage = func() {
		fmt.Fprintf(os.Stderr, "Usage: %s [options] url\n", os.Args[0])
		flag.PrintDefaults()
	}

	flag.Parse()
	if flag.NArg() != 1 {
		Usage()
		os.Exit(1)
	}

	config := keywhizConfig{
		ServerURL:      flag.Args()[0],
		CertFile:       *certFile,
		KeyFile:        *keyFile,
		CaFile:         *caFile,
		User:           *user,
		Group:          *group,
		Ping:           *ping,
		Debug:          *debug,
		TimeoutSeconds: time.Duration(*timeoutSeconds) * time.Second,
	}

	lockMemory(config.Debug)

	d := newKeywhizDriver(*root, config)
	h := volume.NewHandler(d)
	fmt.Printf("Listening on %s\n", socketAddress)
	fmt.Println(h.ServeUnix("root", socketAddress))
}
Ejemplo n.º 3
0
func main() {
	flag.Var(&flAddrs, "addr", "List of address to KV store")
	flag.Parse()

	if len(flAddrs) == 0 {
		logrus.Fatal("need at least one addr to connect to kv store")
	}

	if *flDebug {
		logrus.SetLevel(logrus.DebugLevel)
	}

	if _, err := os.Stat(*flHome); err != nil {
		if !os.IsNotExist(err) {
			logrus.Fatal(err)
		}
		logrus.Debugf("created home dir at %s", *flHome)
		if err := os.MkdirAll(*flHome, 0700); err != nil {
			logrus.Fatal(err)
		}
	}

	kvfs := newDriver(*flStore, *flHome, flAddrs.GetAll())

	signal.Trap(func() {
		kvfs.cleanup()
	})

	h := volume.NewHandler(kvfs)
	if err := h.ServeUnix("root", *flListen); err != nil {
		logrus.Fatal(err)
	}
}
Ejemplo n.º 4
0
func main() {
	quobyteMountPath := flag.String("path", "/run/docker/quobyte/mnt", "Path where Quobyte is mounted on the host")
	quobyteMountOptions := flag.String("options", "-o user_xattr", "Fuse options to be used when Quobyte is mounted")

	quobyteUser := flag.String("user", "root", "User to connect to the Quobyte API server")
	quobytePassword := flag.String("password", "quobyte", "Password for the user to connect to the Quobyte API server")
	quobyteAPIURL := flag.String("api", "localhost:7860", "URL to the API server(s) in the form host[:port][,host:port] or SRV record name")
	quobyteRegistry := flag.String("registry", "localhost:7861", "URL to the registry server(s) in the form of host[:port][,host:port] or SRV record name")

	group := flag.String("group", "root", "Group to create the unix socket")
	showVersion := flag.Bool("version", false, "Shows version string")
	flag.Parse()

	if *showVersion {
		log.Printf("Version: %s - Revision: %s\n", version, revision)
		return
	}

	if err := os.MkdirAll(*quobyteMountPath, 0555); err != nil {
		log.Println(err.Error())
	}

	if !isMounted(*quobyteMountPath) {
		log.Printf("Mounting Quobyte namespace in %s", *quobyteMountPath)
		mountAll(*quobyteMountOptions, *quobyteRegistry, *quobyteMountPath)
	}

	qDriver := newQuobyteDriver(*quobyteAPIURL, *quobyteUser, *quobytePassword, *quobyteMountPath)
	handler := volume.NewHandler(qDriver)
	log.Println(handler.ServeUnix(*group, quobyteID))
}
Ejemplo n.º 5
0
func main() {
	flag.Parse()

	d := newSshfsDriver(*root)
	h := volume.NewHandler(d)
	fmt.Printf("listening on %s\n", socketAddress)
	fmt.Println(h.ServeUnix("root", socketAddress))
}
Ejemplo n.º 6
0
func Start(cfgFile string, debug bool) {
	if debug == true {
		log.SetLevel(log.DebugLevel)
	} else {
		log.SetLevel(log.InfoLevel)
	}
	d := New(cfgFile)
	h := volume.NewHandler(d)
	log.Info(h.ServeUnix("root", "solidfire"))
}
Ejemplo n.º 7
0
func main() {
	debug := os.Getenv("DEBUG")
	if ok, _ := strconv.ParseBool(debug); ok {
		logrus.SetLevel(logrus.DebugLevel)
	}

	d := newSshfsDriver(filepath.Join("/mnt", sshfsID))
	h := volume.NewHandler(d)
	logrus.Infof("listening on %s", socketAddress)
	logrus.Error(h.ServeUnix("", socketAddress))
}
Ejemplo n.º 8
0
func main() {

	// Request handler with a driver implementation
	log.Printf("[Init] INFO volume root is %s\n", *volRoot)
	d := initDriver(*volRoot, *defPool, *defFsType, *defSize)
	h := dkvolume.NewHandler(&d)

	// Listen for requests in a unix socket:
	log.Printf("[Init] INFO listening on %s\n", socket)
	fmt.Println(h.ServeUnix("", socket))
}
Ejemplo n.º 9
0
func start(dt drivers.DriverType, driver volume.Driver) {
	h := volume.NewHandler(driver)
	if isTCPEnabled() {
		addr := os.Getenv(EnvTCPAddr)
		if addr == "" {
			addr, _ = rootCmd.PersistentFlags().GetString(PortFlag)
		}
		fmt.Println(h.ServeTCP(dt.String(), addr, nil))
	} else {
		fmt.Println(h.ServeUnix("", dt.String()))
	}
}
Ejemplo n.º 10
0
func docker(cmd *cobra.Command, args []string) {
	config := driver.DriverConfig{
		Consistency: viper.GetString("consistency"),
		Keyspace:    viper.GetString("keyspace"),
		Server:      viper.GetString("server"),
		StateDir:    viper.GetString("statedir"),
		VolumeDir:   viper.GetString("voldir"),
	}
	driver := driver.NewCassFsDriver(&config)
	if driver == nil {
		panic("Got nil back for driver")
	}
	handler := volume.NewHandler(driver)
	fmt.Println(handler.ServeUnix("root", "cassfs"))
}
Ejemplo n.º 11
0
func main() {
	flag.Parse()

	if *help {
		usage(0)
	}

	// Fill in the default volume options
	var opts volumeOptions

	if err := opts.setSize(*size); err != nil {
		logrus.Fatalf(err.Error())
	}
	if err := opts.setMode(*mode); err != nil {
		logrus.Fatalf(err.Error())
	}
	if err := opts.setCLog(*clog); err != nil {
		logrus.Fatalf(err.Error())
	}
	if err := opts.setTier(*tier); err != nil {
		logrus.Fatalf(err.Error())
	}

	// Set log level
	if *debug {
		if *quiet {
			logrus.Fatalf("Flags 'debug' and 'quiet' are mutually exclusive")
		}
		logrus.SetLevel(logrus.DebugLevel)
		ploop.SetVerboseLevel(ploop.Timestamps)
		logrus.Debugf("Debug logging enabled")
	}
	if *quiet {
		logrus.SetOutput(os.Stderr)
		logrus.SetLevel(logrus.ErrorLevel)
		ploop.SetVerboseLevel(ploop.NoStdout)
	}

	// Let's run!
	d := newPloopDriver(*home, &opts)
	h := volume.NewHandler(d)
	e := h.ServeUnix("root", "ploop")
	if e != nil {
		logrus.Fatalf("Failed to initialize: %s", e)
	}
}
Ejemplo n.º 12
0
func main() {
	var Usage = func() {
		fmt.Fprintf(os.Stderr, "Usage: %s [options]\n", os.Args[0])
		flag.PrintDefaults()
	}

	flag.Parse()
	if len(*serversList) == 0 {
		Usage()
		os.Exit(1)
	}

	servers := strings.Split(*serversList, ":")

	d := newGlusterfsDriver(*root, *restAddress, *gfsBase, servers)
	h := volume.NewHandler(d)
	fmt.Println(h.ServeUnix("root", "glusterfs"))
}
Ejemplo n.º 13
0
func main() {
	var Usage = func() {
		fmt.Fprintf(os.Stderr, "Usage: %s [options]\n", os.Args[0])
		flag.PrintDefaults()
	}

	flag.Parse()
	if *url == "" || *token == "" {
		Usage()
		os.Exit(1)
	}

	lockMemory()

	vault.DefaultConfig = &api.Config{Address: *url, HttpClient: http.DefaultClient}
	d := newDriver(*root, *token)
	h := volume.NewHandler(d)
	fmt.Println(h.ServeUnix("root", "vault"))
}
Ejemplo n.º 14
0
func TestMain(m *testing.M) {
	flag.Parse()
	cephConf := os.Getenv("CEPH_CONF")

	testDriver = newCephRBDVolumeDriver(
		"test",
		"",
		"admin",
		"rbd",
		dkvolume.DefaultDockerRootDirectory,
		cephConf,
		false,
	)
	defer testDriver.shutdown()

	handler := dkvolume.NewHandler(testDriver)
	// Serve won't return so spin off routine
	go handler.ServeUnix("", TEST_SOCKET_PATH)

	os.Exit(m.Run())
}
Ejemplo n.º 15
0
func main() {
	showVersion := flag.Bool("version", false, "Display version number of plugin and exit")
	flag.Parse()
	if *showVersion == true {
		fmt.Println("Version: ", VERSION)
		os.Exit(0)
	}

	cfgFile := flag.String("config", "/var/lib/cinder/dockerdriver/config.json", "path to config file")
	debug := flag.Bool("debug", true, "enable debug logging")
	flag.Parse()
	if *debug == true {
		log.SetLevel(log.DebugLevel)
	} else {
		log.SetLevel(log.InfoLevel)
	}
	log.Info("Starting cinder-docker-driver version: ", VERSION)
	d := New(*cfgFile)
	h := volume.NewHandler(d)
	log.Info(h.ServeUnix("root", "cinder"))
}
Ejemplo n.º 16
0
func main() {

	sigc := make(chan os.Signal, 1)
	signal.Notify(sigc,
		syscall.SIGHUP,
		syscall.SIGINT,
		syscall.SIGTERM,
		syscall.SIGQUIT)

	// Request handler with a driver implementation
	log.Printf("[Init] INFO volume root is %s\n", *volRoot)
	d := initDriver(*volRoot, *defPool, *defFsType, *defSize)
	h := volume.NewHandler(&d)

	// Listen for requests in a unix socket:
	log.Printf("[Init] INFO listening on %s\n", socket)
	fmt.Println(h.ServeUnix("", socket))

	// Wait for termination
	<-sigc
	os.Exit(0)
}
Ejemplo n.º 17
0
// main for docker-volume-vsphere
// Parses flags, initializes and mounts refcounters and finally services Docker requests
func main() {
	// connect to this socket
	port := flag.Int("port", 1019, "Default port for vmci")
	useMockEsx := flag.Bool("mock_esx", false, "Mock the ESX server")
	logLevel := flag.String("log_level", "info", "Logging Level")
	configFile := flag.String("config", config.DefaultConfigPath, "Configuration file path")
	flag.Parse()

	vmdkops.EsxPort = *port

	logInit(logLevel, nil, configFile)

	log.WithFields(log.Fields{
		"version":   version,
		"port":      vmdkops.EsxPort,
		"mock_esx":  *useMockEsx,
		"log_level": *logLevel,
		"config":    *configFile,
	}).Info("Docker VMDK plugin started ")

	sigChannel := make(chan os.Signal, 1)
	signal.Notify(sigChannel, syscall.SIGINT, syscall.SIGTERM)
	go func() {
		sig := <-sigChannel
		log.WithFields(log.Fields{"signal": sig}).Warning("Received signal ")
		os.Remove(fullSocketAddress(vmdkPluginID))
		os.Exit(0)
	}()

	driver := newVmdkDriver(*useMockEsx)
	handler := volume.NewHandler(driver)

	log.WithFields(log.Fields{
		"address": fullSocketAddress(vmdkPluginID),
	}).Info("Going into ServeUnix - Listening on Unix socket ")

	log.Info(handler.ServeUnix("root", fullSocketAddress(vmdkPluginID)))
}
Ejemplo n.º 18
0
func main() {

	flag.Parse()

	if *flVersion {
		fmt.Fprint(os.Stdout, "docker lvm plugin version: 1.0\n")
		return
	}

	if *flDebug {
		logrus.SetLevel(logrus.DebugLevel)
	}

	if _, err := os.Stat(lvmHome); err != nil {
		if !os.IsNotExist(err) {
			logrus.Fatal(err)
		}
		logrus.Debugf("Created home dir at %s", lvmHome)
		if err := os.MkdirAll(lvmHome, 0700); err != nil {
			logrus.Fatal(err)
		}
	}

	lvm := newDriver(lvmHome, vgConfigPath)

	// Call loadFromDisk only if config file exists.
	if _, err := os.Stat(lvmVolumesConfigPath); err == nil {
		if err := loadFromDisk(lvm); err != nil {
			logrus.Fatal(err)
		}
	}

	h := volume.NewHandler(lvm)
	if err := h.ServeUnix("root", lvmPluginSocketPath); err != nil {
		logrus.Fatal(err)
	}
}
Ejemplo n.º 19
0
		})

		logrus.WithFields(logrus.Fields{
			"root":     args[0],
			"address":  viper.GetString("address"),
			"insecure": viper.GetBool("insecure"),
			"socket":   viper.GetString("socket"),
		}).Info("starting plugin server")

		defer func() {
			for _, err := range driver.Stop() {
				logrus.WithError(err).Error("error stopping driver")
			}
		}()

		handler := volume.NewHandler(driver)
		logrus.WithField("socket", viper.GetString("socket")).Info("serving unix socket")
		err := handler.ServeUnix("root", viper.GetString("socket"))
		if err != nil {
			logrus.WithError(err).Fatal("failed serving")
		}
	},
}

func init() {
	RootCmd.AddCommand(dockerCmd)

	dockerCmd.Flags().StringP("address", "a", "https://localhost:8200", "vault address")
	dockerCmd.Flags().BoolP("insecure", "i", false, "skip SSL certificate verification")
	dockerCmd.Flags().StringP("token", "t", "", "vault token")
	dockerCmd.Flags().StringP("socket", "s", "/run/docker/plugins/vault.sock", "socket address to communicate with docker")
Ejemplo n.º 20
0
func main() {
	if *versionFlag {
		fmt.Printf("%s\n", VERSION)
		return
	}

	logFile, err := setupLogging()
	if err != nil {
		log.Fatalf("FATAL: Unable to setup logging: %s", err)
	}
	defer shutdownLogging(logFile)

	log.Printf("INFO: starting rbd-docker-plugin version %s", VERSION)
	log.Printf("INFO: canCreateVolumes=%q, removeAction=%q", *canCreateVolumes, removeActionFlag)
	log.Printf(
		"INFO: Setting up Ceph Driver for PluginID=%s, cluster=%s, user=%s, pool=%s, mount=%s, config=%s, go-ceph=%s",
		*pluginName,
		*cephCluster,
		*cephUser,
		*defaultCephPool,
		*rootMountDir,
		*cephConfigFile,
		*useGoCeph,
	)

	// double check for config file - required especially for non-standard configs
	if *cephConfigFile == "" {
		log.Fatal("FATAL: Unable to use ceph rbd tool without config file")
	}
	if _, err = os.Stat(*cephConfigFile); os.IsNotExist(err) {
		log.Fatalf("FATAL: Unable to find ceph config needed for ceph rbd tool: %s", err)
	}

	// build driver struct -- but don't create connection yet
	d := newCephRBDVolumeDriver(
		*pluginName,
		*cephCluster,
		*cephUser,
		*defaultCephPool,
		*rootMountDir,
		*cephConfigFile,
		*useGoCeph,
	)
	if *useGoCeph {
		defer d.shutdown()
	}

	log.Println("INFO: Creating Docker VolumeDriver Handler")
	h := dkvolume.NewHandler(d)

	socket := socketPath()
	log.Printf("INFO: Opening Socket for Docker to connect: %s", socket)
	// ensure directory exists
	err = os.MkdirAll(filepath.Dir(socket), os.ModeDir)
	if err != nil {
		log.Fatalf("FATAL: Error creating socket directory: %s", err)
	}

	// setup signal handling after logging setup and creating driver, in order to signal the logfile and ceph connection
	// NOTE: systemd will send SIGTERM followed by SIGKILL after a timeout to stop a service daemon
	signalChannel := make(chan os.Signal, 2) // chan with buffer size 2
	signal.Notify(signalChannel, syscall.SIGTERM, syscall.SIGKILL)
	go func() {
		for sig := range signalChannel {
			//sig := <-signalChannel
			switch sig {
			case syscall.SIGTERM, syscall.SIGKILL:
				log.Printf("INFO: received TERM or KILL signal: %s", sig)
				// close up conn and logs
				if *useGoCeph {
					d.shutdown()
				}
				shutdownLogging(logFile)
				os.Exit(0)
			}
		}
	}()

	// NOTE: pass empty string for group to skip broken chgrp in dkvolume lib
	err = h.ServeUnix("", socket)

	if err != nil {
		log.Printf("ERROR: Unable to create UNIX socket: %v", err)
	}
}
Ejemplo n.º 21
0
// NewHandlerFromVolumeDriver creates a plugin handler from an existing volume
// driver. This could be used, for instance, by the `local` volume driver built-in
// to Docker Engine and it would create a plugin from it that maps plugin API calls
// directly to any volume driver that satifies the volume.Driver interface from
// Docker Engine.
func NewHandlerFromVolumeDriver(d volume.Driver) *volumeplugin.Handler {
	return volumeplugin.NewHandler(&shimDriver{d})
}
func main() {
	cmd := cli.NewApp()
	cmd.Name = "azurefile-dockervolumedriver"
	cmd.Version = "0.2"
	cmd.Usage = "Docker Volume Driver for Azure File Service"
	cli.AppHelpTemplate = usageTemplate

	cmd.Flags = []cli.Flag{
		cli.StringFlag{
			Name:   "account-name",
			Usage:  "Azure storage account name",
			EnvVar: "AZURE_STORAGE_ACCOUNT",
		},
		cli.StringFlag{
			Name:   "account-key",
			Usage:  "Azure storage account key",
			EnvVar: "AZURE_STORAGE_ACCOUNT_KEY",
		},
		cli.BoolFlag{
			Name:  "remove-shares",
			Usage: "remove associated Azure File Share when volume is removed",
		},
		cli.BoolFlag{
			Name:   "debug",
			Usage:  "Enable verbose logging",
			EnvVar: "DEBUG",
		},
		cli.StringFlag{
			Name:  "mountpoint",
			Usage: "Host path where volumes are mounted at",
			Value: mountpoint,
		},
		cli.StringFlag{
			Name:  "metadata",
			Usage: "Path where volume metadata are stored",
			Value: metadataRoot,
		},
	}
	cmd.Action = func(c *cli.Context) {
		if c.Bool("debug") {
			log.SetLevel(log.DebugLevel)
		}

		accountName := c.String("account-name")
		accountKey := c.String("account-key")
		mountpoint := c.String("mountpoint")
		metaDir := c.String("metadata")
		removeShares := c.Bool("remove-shares")
		if accountName == "" || accountKey == "" {
			log.Fatal("azure storage account name and key must be provided.")
		}

		log.WithFields(log.Fields{
			"accountName":  accountName,
			"metadata":     metaDir,
			"mountpoint":   mountpoint,
			"removeShares": removeShares,
		}).Debug("Starting server.")

		driver, err := newVolumeDriver(accountName, accountKey, mountpoint, metaDir, removeShares)
		if err != nil {
			log.Fatal(err)
		}
		h := volume.NewHandler(driver)
		log.Fatal(h.ServeUnix("docker", volumeDriverName))
	}
	cmd.Run(os.Args)
}
Ejemplo n.º 23
0
func main() {
	if *versionFlag {
		fmt.Printf("%s\n", VERSION)
		return
	}

	logFile, err := setupLogging()
	if err != nil {
		log.Panicf("Unable to setup logging: %s", err)
	}
	defer shutdownLogging(logFile)

	log.Printf(
		"INFO: Setting up Ceph Driver for PluginID=%s, cluster=%s, user=%s, pool=%s, mount=%s, config=%s",
		*pluginName,
		*cephCluster,
		*cephUser,
		*defaultCephPool,
		*rootMountDir,
		*cephConfigFile,
	)
	// build driver struct
	d := newCephRBDVolumeDriver(
		*pluginName,
		*cephCluster,
		*cephUser,
		*defaultCephPool,
		*rootMountDir,
		*cephConfigFile,
	)
	defer d.shutdown()

	log.Println("INFO: Creating Docker VolumeDriver Handler")
	h := dkvolume.NewHandler(d)

	socket := socketPath()
	log.Printf("INFO: Opening Socket for Docker to connect: %s", socket)
	// ensure directory exists
	err = os.MkdirAll(filepath.Dir(socket), os.ModeDir)
	if err != nil {
		log.Panicf("Error creating socket directory: %s", err)
	}

	// setup signal handling after logging setup and creating driver, in order to signal the logfile and ceph connection
	// NOTE: systemd will send SIGTERM followed by SIGKILL after a timeout to stop a service daemon
	// NOTE: we chose to use SIGHUP to reload logfile and ceph connection
	signalChannel := make(chan os.Signal, 2) // chan with buffer size 2
	signal.Notify(signalChannel, syscall.SIGTERM, syscall.SIGKILL, syscall.SIGHUP)
	go func() {
		for sig := range signalChannel {
			//sig := <-signalChannel
			switch sig {
			case syscall.SIGTERM, syscall.SIGKILL:
				log.Printf("INFO: received TERM or KILL signal: %s", sig)
				// close up conn and logs
				d.shutdown()
				shutdownLogging(logFile)
				os.Exit(0)
			case syscall.SIGHUP:
				// reload logs and conn
				log.Printf("INFO: received HUP signal: %s", sig)
				logFile, err = reloadLogging(logFile)
				if err != nil {
					log.Printf("Unable to reload log: %s", err)
				}
				d.reload()
			}
		}
	}()

	// NOTE: pass empty string for group to skip broken chgrp in dkvolume lib
	err = h.ServeUnix("", socket)

	if err != nil {
		log.Printf("ERROR: Unable to create UNIX socket: %v", err)
	}
}
Ejemplo n.º 24
0
func main() {
	driver := newLocalPersistDriver()

	handler := volume.NewHandler(driver)
	fmt.Println(handler.ServeUnix("root", driver.name))
}
Ejemplo n.º 25
0
func main() {
	d := ebsDriver{*root}
	h := volume.NewHandler(d)
	fmt.Printf("Starting listening on unix socket with name %s (usually in /run/docker/plugins/%s)\n", pluginName, pluginName)
	h.ServeUnix("root", pluginName)
}