func (s *dockerSuite) SetUpTest(c *C) { c.Assert(exec.Command("sh", "-c", "set -e; for i in $(rbd ls); do rbd snap purge $i; rbd rm $i; done").Run(), IsNil) exec.Command("/bin/sh", "-c", "etcdctl rm --recursive /volplugin").Run() client, err := config.NewClient("/volplugin", []string{"http://127.0.0.1:2379"}) if err != nil { c.Fatal(err) } s.client = client global := config.NewGlobalConfig() s.api = api.NewAPI(NewVolplugin(), "mon0", client, &global) s.server = httptest.NewServer(s.api.Router(s.api)) }
// Daemon is the top-level entrypoint for the volsupervisor from the CLI. func Daemon(ctx *cli.Context) { cfg, err := config.NewClient(ctx.String("prefix"), ctx.StringSlice("etcd")) if err != nil { logrus.Fatal(err) } retry: global, err := cfg.GetGlobal() if err != nil { logrus.Errorf("Could not retrieve global configuration: %v. Retrying in 1 second", err) time.Sleep(time.Second) goto retry } dc := &DaemonConfig{Config: cfg, Global: global, Hostname: ctx.String("host-label")} dc.setDebug() globalChan := make(chan *watch.Watch) dc.Config.WatchGlobal(globalChan) go dc.watchAndSetGlobal(globalChan) go info.HandleDebugSignal() stopChan, err := lock.NewDriver(dc.Config).AcquireWithTTLRefresh(&config.UseVolsupervisor{Hostname: dc.Hostname}, dc.Global.TTL, dc.Global.Timeout) if err != nil { logrus.Fatal("Could not start volsupervisor: already in use") } sigChan := make(chan os.Signal, 1) go func() { <-sigChan logrus.Infof("Removing volsupervisor global lock; waiting %v for lock to clear", dc.Global.TTL) stopChan <- struct{}{} time.Sleep(wait.Jitter(dc.Global.TTL+time.Second, 0)) // give us enough time to try to clear the lock os.Exit(0) }() signal.Notify(sigChan, syscall.SIGTERM, syscall.SIGINT) dc.signalSnapshot() dc.updateVolumes() // doing it here ensures the goroutine is created when the first poll completes. go func() { for { time.Sleep(wait.Jitter(time.Second, 0)) dc.updateVolumes() } }() dc.loop() }
func start(ctx *cli.Context) { cfg, err := config.NewClient(ctx.String("prefix"), ctx.StringSlice("etcd")) if err != nil { logrus.Fatal(err) } d := &apiserver.DaemonConfig{ Config: cfg, MountTTL: ctx.Int("ttl"), Timeout: time.Duration(ctx.Int("timeout")) * time.Minute, } d.Daemon(ctx.String("listen")) }
func (s *lockSuite) SetUpTest(c *C) { exec.Command("/bin/sh", "-c", "etcdctl rm --recursive /volplugin").Run() tlc, err := config.NewClient("/volplugin", []string{"http://127.0.0.1:2379"}) if err != nil { c.Fatal(err) } s.tlc = tlc content, err := ioutil.ReadFile("policy.json") c.Assert(err, IsNil) policy := &config.Policy{} c.Assert(json.Unmarshal(content, policy), IsNil) s.tlc.PublishPolicy("policy", policy) }
// NewDaemonConfig creates a DaemonConfig from the master host and hostname // arguments. func NewDaemonConfig(ctx *cli.Context) *DaemonConfig { retry: client, err := config.NewClient(ctx.String("prefix"), ctx.StringSlice("etcd")) if err != nil { logrus.Warn("Could not establish client to etcd cluster: %v. Retrying.", err) time.Sleep(wait.Jitter(time.Second, 0)) goto retry } dc := &DaemonConfig{ Hostname: ctx.String("host-label"), Client: client, PluginName: ctx.String("plugin-name"), } if dc.PluginName == "" || strings.Contains(dc.PluginName, "/") { logrus.Fatal("Cannot continue; socket name contains empty value or invalid characters") } return dc }