Esempio n. 1
0
// Add the flags accepted by run to the supplied flag set, returning the
// variables into which the flags will parse.
func PopulateFlags(c *cli.Context) (flags *FlagStorage) {
	flags = &FlagStorage{
		// File system
		MountOptions: make(map[string]string),
		DirMode:      os.FileMode(c.Int("dir-mode")),
		FileMode:     os.FileMode(c.Int("file-mode")),
		Uid:          uint32(c.Int("uid")),
		Gid:          uint32(c.Int("gid")),

		// Tuning,
		StatCacheTTL: c.Duration("stat-cache-ttl"),
		TypeCacheTTL: c.Duration("type-cache-ttl"),

		// OSS
		Endpoint:       c.String("endpoint"),
		UsePathRequest: c.Bool("use-path-request"),
		Internal:       c.Bool("internal"),

		// Debugging,
		DebugFuse: c.Bool("debug_fuse"),
		DebugOSS:  c.Bool("debug_oss"),
	}

	// Handle the repeated "-o" flag.
	for _, o := range c.StringSlice("o") {
		parseOptions(flags.MountOptions, o)
	}

	// Get the region/AccessKeyId/AccessKeySecret
	flags.AccessKeyId = os.Getenv("ACCESS_KEY_ID")
	flags.AccessKeySecret = os.Getenv("ACCESS_KEY_SECRET")
	flags.Region = oss.Region(os.Getenv("OSS_REGION"))

	return
}
Esempio n. 2
0
File: flags.go Progetto: x5u/goofys
// Add the flags accepted by run to the supplied flag set, returning the
// variables into which the flags will parse.
func PopulateFlags(c *cli.Context) (flags *FlagStorage) {
	flags = &FlagStorage{
		// File system
		MountOptions: make(map[string]string),
		DirMode:      os.FileMode(c.Int("dir-mode")),
		FileMode:     os.FileMode(c.Int("file-mode")),
		Uid:          uint32(c.Int("uid")),
		Gid:          uint32(c.Int("gid")),

		// Tuning,
		StatCacheTTL: c.Duration("stat-cache-ttl"),
		TypeCacheTTL: c.Duration("type-cache-ttl"),

		// S3
		Endpoint:       c.String("endpoint"),
		StorageClass:   c.String("storage-class"),
		UsePathRequest: c.Bool("use-path-request"),

		// Debugging,
		DebugFuse:  c.Bool("debug_fuse"),
		DebugS3:    c.Bool("debug_s3"),
		Foreground: c.Bool("f"),
	}

	// Handle the repeated "-o" flag.
	for _, o := range c.StringSlice("o") {
		parseOptions(flags.MountOptions, o)
	}
	return
}
Esempio n. 3
0
func newDockerStressor(context *cli.Context) (ds *dockerStressor, err error) {
	ds = &dockerStressor{}

	client, err := dcli.NewDockerClientTimeout(
		"unix:///var/run/docker.sock", nil, time.Second*5)
	if err != nil {
		return
	}
	ds.dockerClient = client

	scfg, err := loadStressCfg(context.String("config"))
	if err != nil {
		return
	}
	ds.stressConfig = scfg

	if context.Int("count") <= 0 {
		return nil, errors.New("flag count must > 0")
	}
	ds.containerNum = context.Int("count")

	if context.Int("concurrent") <= 0 {
		return nil, errors.New("flag concurrent must > 0")
	}
	ds.containerConCurrent = context.Int("concurrent")

	ds.containerRunTime = context.Duration("runtime")
	return
}
Esempio n. 4
0
// Add the flags accepted by run to the supplied flag set, returning the
// variables into which the flags will parse.
func populateFlags(c *cli.Context) (flags *flagStorage) {
	flags = &flagStorage{
		// File system
		MountOptions: make(map[string]string),
		DirMode:      os.FileMode(c.Int("dir-mode")),
		FileMode:     os.FileMode(c.Int("file-mode")),
		Uid:          int64(c.Int("uid")),
		Gid:          int64(c.Int("gid")),

		// GCS,
		KeyFile: c.String("key-file"),
		EgressBandwidthLimitBytesPerSecond: c.Float64("limit-bytes-per-sec"),
		OpRateLimitHz:                      c.Float64("limit-ops-per-sec"),

		// Tuning,
		StatCacheTTL: c.Duration("stat-cache-ttl"),
		TypeCacheTTL: c.Duration("type-cache-ttl"),
		TempDir:      c.String("temp-dir"),
		ImplicitDirs: c.Bool("implicit-dirs"),

		// Debugging,
		DebugFuse:       c.Bool("debug_fuse"),
		DebugGCS:        c.Bool("debug_gcs"),
		DebugHTTP:       c.Bool("debug_http"),
		DebugInvariants: c.Bool("debug_invariants"),
	}

	// Handle the repeated "-o" flag.
	for _, o := range c.StringSlice("o") {
		mountpkg.ParseOptions(flags.MountOptions, o)
	}

	return
}
Esempio n. 5
0
// FromCLIContext creates a Config using a cli.Context by pulling configuration
// from the flags in the context.
func FromCLIContext(c *cli.Context) *Config {
	cfg := &Config{}
	cfgVal := reflect.ValueOf(cfg).Elem()

	for _, def := range defs {
		if !def.HasField {
			continue
		}

		field := cfgVal.FieldByName(def.FieldName)

		if _, ok := def.Flag.(*cli.BoolFlag); ok {
			field.SetBool(c.Bool(def.Name))
		} else if _, ok := def.Flag.(*cli.DurationFlag); ok {
			field.Set(reflect.ValueOf(c.Duration(def.Name)))
		} else if _, ok := def.Flag.(*cli.IntFlag); ok {
			field.SetInt(int64(c.Int(def.Name)))
		} else if _, ok := def.Flag.(*cli.StringFlag); ok {
			field.SetString(c.String(def.Name))
		}
	}

	cfg.ProviderConfig = ProviderConfigFromEnviron(cfg.ProviderName)

	return cfg
}
Esempio n. 6
0
func (cmd *Command) upsertHostAction(c *cli.Context) {
	host, err := engine.NewHost(c.String("name"), engine.HostSettings{})
	if err != nil {
		cmd.printError(err)
		return
	}
	if c.String("cert") != "" || c.String("privateKey") != "" {
		keyPair, err := readKeyPair(c.String("cert"), c.String("privateKey"))
		if err != nil {
			cmd.printError(fmt.Errorf("failed to read key pair: %s", err))
			return
		}
		host.Settings.KeyPair = keyPair
	}
	host.Settings.OCSP = engine.OCSPSettings{
		Enabled:            c.Bool("ocsp"),
		SkipSignatureCheck: c.Bool("ocspSkipCheck"),
		Period:             c.Duration("ocspPeriod").String(),
		Responders:         c.StringSlice("ocspResponder"),
	}
	if err := cmd.client.UpsertHost(*host); err != nil {
		cmd.printError(err)
		return
	}
	cmd.printOk("host added")
}
Esempio n. 7
0
func buildAction(context *cli.Context) {
	signals := make(chan os.Signal, 128)
	signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
	config := nsq.NewConfig()
	config.MsgTimeout = context.Duration("timeout")
	config.MaxInFlight = context.Int("c")
	consumer, err := nsq.NewConsumer(context.String("topic"), context.String("channel"), config)
	if err != nil {
		log.Fatal(err)
	}
	producer, err := nsq.NewProducer(context.String("nsqd"), config)
	if err != nil {
		log.Fatal(err)
	}
	consumer.AddConcurrentHandlers(&handler{
		producer: producer,
		topic:    context.String("error-topic"),
		channel:  context.String("channel"),
	}, context.Int("c"))
	if err := consumer.ConnectToNSQD(context.String("nsqd")); err != nil {
		log.Fatal(err)
	}
	for {
		select {
		case <-consumer.StopChan:
			return
		case <-signals:
			consumer.Stop()
		}
	}
}
func (factory *AppExaminerCommandFactory) appStatus(context *cli.Context) {

	summaryFlag := context.Bool("summary")
	rateFlag := context.Duration("rate")

	if len(context.Args()) < 1 {
		factory.ui.SayIncorrectUsage("App Name required")
		factory.exitHandler.Exit(exit_codes.InvalidSyntax)
		return
	}

	appName := context.Args()[0]

	appInfo, err := factory.appExaminer.AppStatus(appName)
	if err != nil {
		factory.ui.SayLine(err.Error())
		factory.exitHandler.Exit(exit_codes.CommandFailed)
		return
	}

	factory.printAppInfo(appInfo)

	if summaryFlag || rateFlag != 0 {
		factory.printInstanceSummary(appInfo.ActualInstances)
	} else {
		factory.printInstanceInfo(appInfo.ActualInstances)
	}

	if rateFlag == 0 {
		return
	}

	linesWritten := appStatusLinesWritten(appInfo)
	closeChan := make(chan struct{})
	defer factory.ui.Say(cursor.Show())
	factory.ui.Say(cursor.Hide())

	factory.exitHandler.OnExit(func() {
		closeChan <- struct{}{}
		factory.ui.Say(cursor.Show())
	})

	for {
		select {
		case <-closeChan:
			return
		case <-factory.clock.NewTimer(rateFlag).C():
			appInfo, err = factory.appExaminer.AppStatus(appName)
			if err != nil {
				factory.ui.SayLine("Error getting status: " + err.Error())
				return
			}
			factory.ui.Say(cursor.Up(linesWritten))
			factory.printAppInfo(appInfo)
			factory.printInstanceSummary(appInfo.ActualInstances)
			linesWritten = appStatusLinesWritten(appInfo)
		}
	}
}
Esempio n. 9
0
func appBefore(c *cli.Context) error {
	desc := "cli.app.Before"

	switch {
	case c.Bool("debug"):
		log.Level = logrus.DebugLevel
	case c.Bool("quiet"):
		log.Level = logrus.ErrorLevel
	default:
		log.Level = logrus.InfoLevel
	}

	getConfig()
	cfg.Mission = airstrike.NewMission(log)

	client = sleepwalker.GetClient(&sleepwalker.Config{
		Credentials: &sleepwalker.Credentials{
			APIKey:    c.String("key"),
			APISecret: c.String("secret"),
			Username:  c.String("username"),
			Password:  c.String("password"),
		},
		OAuthEndpoint: espsdk.OAuthEndpoint,
		APIRoot:       espsdk.SandboxAPI,
		Logger:        log,
	})

	cfg.Mission.Enabled = true

	cliInterval := float64(c.Duration("attack-interval") / time.Duration(time.Millisecond))
	if cliInterval > 0 {
		cfg.Mission.Interval = cliInterval
	}

	if c.Duration("warning-threshold") == 0 {
		warningThreshold = time.Duration(cfg.Mission.Interval) * time.Millisecond
	}

	// set up the reporter for logging and console output
	cfg.Mission.Reporter.URLInvariant = espsdk.APIInvariant
	cfg.Mission.Reporter.WarningThreshold = warningThreshold

	token = sleepwalker.Token(c.String("token"))

	if viper.GetString("format") == "json" {
		log.Formatter = &logrus.JSONFormatter{}
	}

	config = loadConfig(c.String("config"))
	cfgJSON, err := json.Marshal(config)
	if err != nil {
		log.WithFields(logrus.Fields{
			"error": "unable to marshal config",
		}).Error(desc)
	}
	log.WithFields(logrus.Fields{"config": string(cfgJSON)}).Debug(desc)
	return nil
}
Esempio n. 10
0
File: dgc.go Progetto: hatchery/dgc
func collectAPIImages(images []docker.APIImages, client *docker.Client, ctx *cli.Context, excludes []string) {
	var imageSync sync.WaitGroup
	grace := ctx.Duration("grace")
	quiet := ctx.Bool("quiet")
	options := docker.RemoveImageOptions{
		Force:   ctx.Bool("force"),
		NoPrune: ctx.Bool("no-prune"),
	}

	for _, image := range images {
		imageSync.Add(1)
		go func(image docker.APIImages) {
			defer imageSync.Done()

			// Check if the image id or tag is on excludes list
			for _, excludeName := range excludes {
				if image.ID == excludeName {
					return
				}
				for _, tag := range image.RepoTags {
					if tag == excludeName {
						return
					}
				}
			}

			// End if the image is still in the grace period
			log.Printf("Inspecting image: %s\n", image.ID)

			imageDetail, err := client.InspectImage(image.ID)
			if err != nil {
				log.Printf("Error. Failed to inspect image: %s\n", image.ID)
				return
			}
			now := time.Now()
			if now.Sub(imageDetail.Created) < grace {
				return
			}

			// Delete image
			log.Printf("Deleting image: %s\n", imageDetail.ID)

			if err := client.RemoveImageExtended(imageDetail.ID, options); err == nil {
				log.Printf("Deleted image: %s\n", imageDetail.ID)
				if !quiet {
					fmt.Printf("Deleted image: %s\n", imageDetail.ID)
				}
			} else {
				log.Printf("Error. Failed to delete image: %s\n", imageDetail.ID)
				return
			}
		}(image)
	}

	imageSync.Wait()
}
Esempio n. 11
0
func (cmd *Command) upsertServerAction(c *cli.Context) error {
	s, err := engine.NewServer(c.String("id"), c.String("url"))
	if err != nil {
		return err
	}
	if err := cmd.client.UpsertServer(engine.BackendKey{Id: c.String("backend")}, *s, c.Duration("ttl")); err != nil {
		return err
	}
	cmd.printOk("server upserted")
	return nil
}
Esempio n. 12
0
File: main.go Progetto: pdf/crononag
func doSplay(c *cli.Context) {
	splay := c.Duration(`splay`)
	if splay > 0 {
		rand.Seed(time.Now().UnixNano())
		rsplayf := splay.Seconds() * rand.Float64()
		rsplay, err := time.ParseDuration(fmt.Sprintf("%fs", rsplayf))
		if err == nil {
			time.Sleep(rsplay)
		}
	}
}
Esempio n. 13
0
// getDuration calculates interval value since interval can be set with
// --hour and --day boolean flags
func getDuration(c *cli.Context) (duration time.Duration, err error) {
	if err = validateDuration(c); err != nil {
		return
	}
	if c.Bool("hour") {
		return IntervalHour, nil
	}
	if c.Bool("day") {
		return IntervalDay, nil
	}
	return c.Duration("duration") * -1, nil
}
Esempio n. 14
0
func validateDuration(c *cli.Context) error {
	hour := c.Bool("hour")
	day := c.Bool("day")
	duration := c.Duration("duration")
	if hour && day {
		return ErrMutuallyExclusiveFlags
	}
	if duration != intervalFlagDuration.Value && (hour || day) {
		return ErrMutuallyExclusiveFlags
	}
	return nil
}
Esempio n. 15
0
func consulConfFromFlags(c *cli.Context) func(*dagger.ConsulConfig) {
	return func(conf *dagger.ConsulConfig) {
		if c.IsSet("consul") {
			conf.Address = c.String("consul")
		}
		if c.IsSet("consul-ttl") {
			conf.TTL = c.String("consul-ttl")
		}
		if c.IsSet("consul-lockdelay") {
			conf.LockDelay = c.Duration("consul-lockdelay")
		}
	}
}
Esempio n. 16
0
func daemon(context *cli.Context) error {
	// setup a standard reaper so that we don't leave any zombies if we are still alive
	// this is just good practice because we are spawning new processes
	s := make(chan os.Signal, 2048)
	signal.Notify(s, syscall.SIGCHLD, syscall.SIGTERM, syscall.SIGINT)
	if err := osutils.SetSubreaper(1); err != nil {
		logrus.WithField("error", err).Error("containerd: set subpreaper")
	}
	sv, err := supervisor.New(
		context.String("state-dir"),
		context.String("runtime"),
		context.String("shim"),
		context.StringSlice("runtime-args"),
		context.Duration("start-timeout"),
		context.Int("retain-count"))
	if err != nil {
		return err
	}
	wg := &sync.WaitGroup{}
	for i := 0; i < 10; i++ {
		wg.Add(1)
		w := supervisor.NewWorker(sv, wg)
		go w.Start()
	}
	if err := sv.Start(); err != nil {
		return err
	}
	// Split the listen string of the form proto://addr
	listenSpec := context.String("listen")
	listenParts := strings.SplitN(listenSpec, "://", 2)
	if len(listenParts) != 2 {
		return fmt.Errorf("bad listen address format %s, expected proto://address", listenSpec)
	}
	server, err := startServer(listenParts[0], listenParts[1], sv)
	if err != nil {
		return err
	}
	for ss := range s {
		switch ss {
		case syscall.SIGCHLD:
			if _, err := osutils.Reap(); err != nil {
				logrus.WithField("error", err).Warn("containerd: reap child processes")
			}
		default:
			logrus.Infof("stopping containerd after receiving %s", ss)
			server.Stop()
			os.Exit(0)
		}
	}
	return nil
}
Esempio n. 17
0
func pollOAuthConfirmation(context *cli.Context, deviceCode string, interval int) (*http.Client, string) {
	config := oauth2.Config{
		ClientID:     lib.DefaultConfig.GCPOAuthClientID,
		ClientSecret: lib.DefaultConfig.GCPOAuthClientSecret,
		Endpoint: oauth2.Endpoint{
			AuthURL:  lib.DefaultConfig.GCPOAuthAuthURL,
			TokenURL: lib.DefaultConfig.GCPOAuthTokenURL,
		},
		RedirectURL: gcp.RedirectURL,
		Scopes:      []string{gcp.ScopeCloudPrint},
	}

	for {
		time.Sleep(time.Duration(interval) * time.Second)

		form := url.Values{
			"client_id":     {lib.DefaultConfig.GCPOAuthClientID},
			"client_secret": {lib.DefaultConfig.GCPOAuthClientSecret},
			"code":          {deviceCode},
			"grant_type":    {gcpOAuthGrantTypeDevice},
		}
		response, err := http.PostForm(gcpOAuthTokenPollURL, form)
		if err != nil {
			log.Fatalln(err)
		}

		var r struct {
			Error        string `json:"error"`
			AccessToken  string `json:"access_token"`
			ExpiresIn    int    `json:"expires_in"`
			RefreshToken string `json:"refresh_token"`
		}
		json.NewDecoder(response.Body).Decode(&r)

		switch r.Error {
		case "":
			token := &oauth2.Token{RefreshToken: r.RefreshToken}
			client := config.Client(oauth2.NoContext, token)
			client.Timeout = context.Duration("gcp-api-timeout")
			return client, r.RefreshToken
		case "authorization_pending":
		case "slow_down":
			interval *= 2
		default:
			log.Fatalln(err)
		}
	}

	panic("unreachable")
}
Esempio n. 18
0
func runCommand(ctx *cli.Context) {
	ansibleResp := initAnsubleResp(ctx)

	// TODO: here we duplicate fatalf in both run(), pull() and clean()
	// maybe refactor to make it cleaner
	fatalf := func(err error) {
		if ansibleResp != nil {
			ansibleResp.Error(err).WriteTo(os.Stdout)
		}
		log.Fatal(err)
	}

	initLogs(ctx)

	dockerCli := initDockerClient(ctx)
	config := initComposeConfig(ctx, dockerCli)
	auth := initAuthConfig(ctx)

	compose, err := compose.New(&compose.Config{
		Manifest: config,
		Docker:   dockerCli,
		Force:    ctx.Bool("force"),
		DryRun:   ctx.Bool("dry"),
		Attach:   ctx.Bool("attach"),
		Wait:     ctx.Duration("wait"),
		Pull:     ctx.Bool("pull"),
		Auth:     auth,
	})

	if err != nil {
		fatalf(err)
	}

	// in case of --force given, first remove all existing containers
	if ctx.Bool("force") {
		if err := doRemove(ctx, config, dockerCli, auth); err != nil {
			fatalf(err)
		}
	}

	if err := compose.RunAction(); err != nil {
		fatalf(err)
	}

	if ansibleResp != nil {
		// ansibleResp.Success("done hehe").WriteTo(os.Stdout)
		compose.WritePlan(ansibleResp).WriteTo(os.Stdout)
	}
}
Esempio n. 19
0
func start(c *cli.Context) {

	// debug level if requested by user
	if c.Bool("debug") {
		logrus.SetLevel(logrus.DebugLevel)
	} else {
		logrus.SetLevel(logrus.WarnLevel)
	}

	client := client.NewClientToken(
		c.String("drone-server"),
		c.String("drone-token"),
	)

	tls, err := dockerclient.TLSConfigFromCertPath(c.String("docker-cert-path"))
	if err == nil {
		tls.InsecureSkipVerify = c.Bool("docker-tls-verify")
	}
	docker, err := dockerclient.NewDockerClient(c.String("docker-host"), tls)
	if err != nil {
		logrus.Fatal(err)
	}

	var wg sync.WaitGroup
	for i := 0; i < c.Int("docker-max-procs"); i++ {
		wg.Add(1)
		go func() {
			r := pipeline{
				drone:  client,
				docker: docker,
				config: config{
					whitelist:  c.StringSlice("whitelist"),
					namespace:  c.String("namespace"),
					privileged: c.StringSlice("privileged"),
					netrc:      c.StringSlice("netrc-plugin"),
					pull:       c.Bool("pull"),
				},
			}
			for {
				if err := r.run(); err != nil {
					dur := c.Duration("backoff")
					logrus.Warnf("reconnect in %v. %s", dur, err.Error())
					time.Sleep(dur)
				}
			}
		}()
	}
	wg.Wait()
}
Esempio n. 20
0
func (cmd *Command) upsertFrontendAction(c *cli.Context) error {
	settings, err := getFrontendSettings(c)
	if err != nil {
		return err
	}
	f, err := engine.NewHTTPFrontend(route.NewMux(), c.String("id"), c.String("b"), c.String("route"), settings)
	if err != nil {
		return err
	}
	if err := cmd.client.UpsertFrontend(*f, c.Duration("ttl")); err != nil {
		return err
	}
	cmd.printOk("frontend upserted")
	return nil
}
Esempio n. 21
0
func cmdLogsStream(c *cli.Context) {
	_, app, err := stdcli.DirApp(c, ".")

	if err != nil {
		stdcli.Error(err)
		return
	}

	err = rackClient(c).StreamAppLogs(app, c.String("filter"), c.BoolT("follow"), c.Duration("since"), os.Stdout)

	if err != nil {
		stdcli.Error(err)
		return
	}
}
Esempio n. 22
0
func queryOptions(c *cli.Context) *consulapi.QueryOptions {
	consistent := c.Bool(consistentFlag.Name)
	stale := c.Bool(staleFlag.Name)
	if consistent && stale {
		cli.ShowSubcommandHelp(c)
		log.Fatalf("only --stale or --consistent may be set, not both")
	}
	return &consulapi.QueryOptions{
		Datacenter:        c.String(dcFlag.Name),
		AllowStale:        stale,
		RequireConsistent: consistent,
		WaitTime:          c.Duration(waitFlag.Name),
		WaitIndex:         uint64(c.Int(waitIndexFlag.Name)),
	}
}
Esempio n. 23
0
// getUserClientFromToken creates a user client with just a refresh token.
func getUserClientFromToken(context *cli.Context) *http.Client {
	config := &oauth2.Config{
		ClientID:     lib.DefaultConfig.GCPOAuthClientID,
		ClientSecret: lib.DefaultConfig.GCPOAuthClientSecret,
		Endpoint: oauth2.Endpoint{
			AuthURL:  lib.DefaultConfig.GCPOAuthAuthURL,
			TokenURL: lib.DefaultConfig.GCPOAuthTokenURL,
		},
		RedirectURL: gcp.RedirectURL,
		Scopes:      []string{gcp.ScopeCloudPrint},
	}

	token := &oauth2.Token{RefreshToken: context.String("gcp-user-refresh-token")}
	client := config.Client(oauth2.NoContext, token)
	client.Timeout = context.Duration("gcp-api-timeout")

	return client
}
func (factory *AppRunnerCommandFactory) scaleApp(c *cli.Context) {
	appName := c.Args().First()
	instancesArg := c.Args().Get(1)
	timeoutFlag := c.Duration("timeout")
	if appName == "" || instancesArg == "" {
		factory.UI.SayIncorrectUsage("Please enter 'ltc scale APP_NAME NUMBER_OF_INSTANCES'")
		factory.ExitHandler.Exit(exit_codes.InvalidSyntax)
		return
	}

	instances, err := strconv.Atoi(instancesArg)
	if err != nil {
		factory.UI.SayIncorrectUsage("Number of Instances must be an integer")
		factory.ExitHandler.Exit(exit_codes.InvalidSyntax)
		return
	}

	factory.setAppInstances(timeoutFlag, appName, instances)
}
Esempio n. 25
0
// Add the flags accepted by run to the supplied flag set, returning the
// variables into which the flags will parse.
func PopulateFlags(c *cli.Context) (flags *FlagStorage) {
	flags = &FlagStorage{
		// File system
		MountOptions: make(map[string]string),
		DirMode:      os.FileMode(c.Int("dir-mode")),
		FileMode:     os.FileMode(c.Int("file-mode")),
		Uid:          uint32(c.Int("uid")),
		Gid:          uint32(c.Int("gid")),

		// Tuning,
		StatCacheTTL: c.Duration("stat-cache-ttl"),
		TypeCacheTTL: c.Duration("type-cache-ttl"),

		// S3
		Endpoint:       c.String("endpoint"),
		Region:         c.String("region"),
		RegionSet:      c.IsSet("region"),
		StorageClass:   c.String("storage-class"),
		UsePathRequest: c.Bool("use-path-request"),
		Profile:        c.String("profile"),
		UseContentType: c.Bool("use-content-type"),
		UseSSE:         c.Bool("sse"),
		UseKMS:         c.IsSet("sse-kms"),
		KMSKeyID:       c.String("sse-kms"),
		ACL:            c.String("acl"),

		// Debugging,
		DebugFuse:  c.Bool("debug_fuse"),
		DebugS3:    c.Bool("debug_s3"),
		Foreground: c.Bool("f"),
	}

	// KMS implies SSE
	if flags.UseKMS {
		flags.UseSSE = true
	}

	// Handle the repeated "-o" flag.
	for _, o := range c.StringSlice("o") {
		parseOptions(flags.MountOptions, o)
	}
	return
}
Esempio n. 26
0
func daemon(context *cli.Context) error {
	s := make(chan os.Signal, 2048)
	signal.Notify(s, syscall.SIGTERM, syscall.SIGINT)
	osutils.SetSubreaper(1)
	sv, err := supervisor.New(
		context.String("state-dir"),
		context.String("runtime"),
		context.String("shim"),
		context.StringSlice("runtime-args"),
		context.Duration("start-timeout"),
		context.Int("retain-count"))
	if err != nil {
		return err
	}
	wg := &sync.WaitGroup{}
	for i := 0; i < 10; i++ {
		wg.Add(1)
		w := supervisor.NewWorker(sv, wg)
		go w.Start()
	}
	if err := sv.Start(); err != nil {
		return err
	}
	// Split the listen string of the form proto://addr
	listenSpec := context.String("listen")
	listenParts := strings.SplitN(listenSpec, "://", 2)
	if len(listenParts) != 2 {
		return fmt.Errorf("bad listen address format %s, expected proto://address", listenSpec)
	}
	server, err := startServer(listenParts[0], listenParts[1], sv)
	if err != nil {
		return err
	}
	for ss := range s {
		switch ss {
		default:
			logrus.Infof("stopping containerd after receiving %s", ss)
			server.Stop()
			os.Exit(0)
		}
	}
	return nil
}
Esempio n. 27
0
func recoverCommand(ctx *cli.Context) {
	initLogs(ctx)

	dockerCli := initDockerClient(ctx)
	auth := initAuthConfig(ctx)

	compose, err := compose.New(&compose.Config{
		Docker:  dockerCli,
		DryRun:  ctx.Bool("dry"),
		Wait:    ctx.Duration("wait"),
		Recover: true,
		Auth:    auth,
	})

	if err != nil {
		log.Fatal(err)
	}

	if err := compose.RecoverAction(); err != nil {
		log.Fatal(err)
	}
}
Esempio n. 28
0
func monitorConnector(context *cli.Context) {
	config, filename, err := lib.GetConfig(context)
	if err != nil {
		log.Fatalf("Failed to read config file: %s\n", err)
	}
	if filename == "" {
		fmt.Println("No config file was found, so using defaults")
	}

	if _, err := os.Stat(config.MonitorSocketFilename); err != nil {
		if !os.IsNotExist(err) {
			log.Fatalln(err)
		}
		log.Fatalf(
			"No connector is running, or the monitoring socket %s is mis-configured\n",
			config.MonitorSocketFilename)
	}

	timer := time.AfterFunc(context.Duration("monitor-timeout"), func() {
		log.Fatalf("Timeout after %s\n", context.Duration("monitor-timeout").String())
	})

	conn, err := net.DialTimeout("unix", config.MonitorSocketFilename, time.Second)
	if err != nil {
		log.Fatalf(
			"No connector is running, or it is not listening to socket %s\n",
			config.MonitorSocketFilename)
	}
	defer conn.Close()

	buf, err := ioutil.ReadAll(conn)
	if err != nil {
		log.Fatalln(err)
	}

	timer.Stop()

	fmt.Printf(string(buf))
}
func (factory *AppExaminerCommandFactory) visualizeCells(context *cli.Context) {
	rate := context.Duration("rate")
	graphicalFlag := context.Bool("graphical")

	if graphicalFlag {
		err := factory.graphicalVisualizer.PrintDistributionChart(rate)
		if err != nil {
			factory.ui.SayLine("Error Visualization: " + err.Error())
			factory.exitHandler.Exit(exit_codes.CommandFailed)
		}
		return
	}

	factory.ui.SayLine(colors.Bold("Distribution"))
	linesWritten := factory.printDistribution()

	if rate == 0 {
		return
	}

	closeChan := make(chan struct{})
	factory.ui.Say(cursor.Hide())

	factory.exitHandler.OnExit(func() {
		closeChan <- struct{}{}
		factory.ui.Say(cursor.Show())

	})

	for {
		select {
		case <-closeChan:
			return
		case <-factory.clock.NewTimer(rate).C():
			factory.ui.Say(cursor.Up(linesWritten))
			linesWritten = factory.printDistribution()
		}
	}
}
Esempio n. 30
0
// New TODO
func New(context *cli.Context) *Config {

	// TODO: parse this more gracefully
	loggingLevel := logrus.DebugLevel
	logLevelArg := context.String(logLevel)
	var err error
	loggingLevel, err = logrus.ParseLevel(logLevelArg)
	if err != nil {
		loggingLevel = logrus.DebugLevel
	}

	return &Config{
		ServiceName:    context.String(serviceName),
		ServiceVerion:  context.String(serviceVersion),
		EndpointHost:   context.String(endpointHost),
		EndpointPort:   context.Int(endpointPort),
		LogstashServer: context.String(logstashServer),
		Register:       context.BoolT(register),
		Proxy:          context.BoolT(proxy),
		Log:            context.BoolT(log),
		Supervise:      context.Bool(supervise),
		Controller: Controller{
			URL:  context.String(controllerURL),
			Poll: context.Duration(controllerPoll),
		},
		Tenant: Tenant{
			ID:        context.String(tenantID),
			Token:     context.String(tenantToken),
			TTL:       context.Duration(tenantTTL),
			Heartbeat: context.Duration(tenantHeartbeat),
		},
		Registry: Registry{
			URL:   context.String(registryURL),
			Token: context.String(registryToken),
		},
		Kafka: Kafka{
			Username: context.String(kafkaUsername),
			Password: context.String(kafkaPassword),
			APIKey:   context.String(kafkaToken),
			RestURL:  context.String(kafkaRestURL),
			Brokers:  context.StringSlice(kafkaBrokers),
			SASL:     context.Bool(kafkaSASL),
		},
		Nginx: Nginx{
			Port: context.Int(nginxPort),
		},
		LogLevel: loggingLevel,
		AppArgs:  context.Args(),
	}
}