func (cmd *editDescriptionCmd) Flags(fs *flag.FlagSet) *flag.FlagSet { cmd.ById = fs.Bool(drive.CLIOptionId, false, "open by id instead of path") cmd.Description = fs.String(drive.CLIOptionDescription, "", drive.DescDescription) cmd.Piped = fs.Bool(drive.CLIOptionPiped, false, drive.DescPiped) return fs }
func commandVtGateSplitQuery(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { server := subFlags.String("server", "", "VtGate server to connect to") bindVariables := newBindvars(subFlags) connectTimeout := subFlags.Duration("connect_timeout", 30*time.Second, "Connection timeout for vtgate client") splitCount := subFlags.Int("split_count", 16, "number of splits to generate") keyspace := subFlags.String("keyspace", "", "keyspace to send query to") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() != 1 { return fmt.Errorf("the <sql> argument is required for the VtGateSplitQuery command") } vtgateConn, err := vtgateconn.Dial(ctx, *server, *connectTimeout) if err != nil { return fmt.Errorf("error connecting to vtgate '%v': %v", *server, err) } defer vtgateConn.Close() r, err := vtgateConn.SplitQuery(ctx, *keyspace, tproto.BoundQuery{ Sql: subFlags.Arg(0), BindVariables: *bindVariables, }, *splitCount) if err != nil { return fmt.Errorf("SplitQuery failed: %v", err) } wr.Logger().Printf("%v\n", jscfg.ToJSON(r)) return nil }
func commandLegacySplitClone(wi *Instance, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) (Worker, error) { excludeTables := subFlags.String("exclude_tables", "", "comma separated list of tables to exclude") strategy := subFlags.String("strategy", "", "which strategy to use for restore, use 'vtworker LegacySplitClone --strategy=-help k/s' for more info") sourceReaderCount := subFlags.Int("source_reader_count", defaultSourceReaderCount, "number of concurrent streaming queries to use on the source") destinationPackCount := subFlags.Int("destination_pack_count", defaultDestinationPackCount, "number of packets to pack in one destination insert") destinationWriterCount := subFlags.Int("destination_writer_count", defaultDestinationWriterCount, "number of concurrent RPCs to execute on the destination") minHealthyRdonlyTablets := subFlags.Int("min_healthy_rdonly_tablets", defaultMinHealthyRdonlyTablets, "minimum number of healthy RDONLY tablets before taking out one") maxTPS := subFlags.Int64("max_tps", defaultMaxTPS, "if non-zero, limit copy to maximum number of (write) transactions/second on the destination (unlimited by default)") if err := subFlags.Parse(args); err != nil { return nil, err } if subFlags.NArg() != 1 { subFlags.Usage() return nil, fmt.Errorf("command LegacySplitClone requires <keyspace/shard>") } keyspace, shard, err := topoproto.ParseKeyspaceShard(subFlags.Arg(0)) if err != nil { return nil, err } var excludeTableArray []string if *excludeTables != "" { excludeTableArray = strings.Split(*excludeTables, ",") } worker, err := NewLegacySplitCloneWorker(wr, wi.cell, keyspace, shard, excludeTableArray, *strategy, *sourceReaderCount, *destinationPackCount, *destinationWriterCount, *minHealthyRdonlyTablets, *maxTPS) if err != nil { return nil, fmt.Errorf("cannot create split clone worker: %v", err) } return worker, nil }
// newCommandSub creates and returns a sub command. func newCommandSub(args []string, cli *client.Client) (command, error) { // Create a flag set. var flg flag.FlagSet // Define the flags. topicFilter := flg.String("t", "", "Topic Filter") qos := flg.Uint("q", uint(mqtt.QoS0), "QoS") // Parse the flag. if err := flg.Parse(args); err != nil { return nil, errCmdArgsParse } // Create a sub command. cmd := &commandSub{ cli: cli, subscribeOpts: &client.SubscribeOptions{ SubReqs: []*client.SubReq{ &client.SubReq{ TopicFilter: []byte(*topicFilter), QoS: byte(*qos), Handler: messageHandler, }, }, }, } // Return the command. return cmd, nil }
func commandVerticalSplitClone(wi *Instance, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) (Worker, error) { tables := subFlags.String("tables", "", "comma separated list of tables to replicate (used for vertical split)") strategy := subFlags.String("strategy", "", "which strategy to use for restore, use 'mysqlctl multirestore -strategy=-help' for more info") sourceReaderCount := subFlags.Int("source_reader_count", defaultSourceReaderCount, "number of concurrent streaming queries to use on the source") destinationPackCount := subFlags.Int("destination_pack_count", defaultDestinationPackCount, "number of packets to pack in one destination insert") minTableSizeForSplit := subFlags.Int("min_table_size_for_split", defaultMinTableSizeForSplit, "tables bigger than this size on disk in bytes will be split into source_reader_count chunks if possible") destinationWriterCount := subFlags.Int("destination_writer_count", defaultDestinationWriterCount, "number of concurrent RPCs to execute on the destination") if err := subFlags.Parse(args); err != nil { return nil, err } if subFlags.NArg() != 1 { subFlags.Usage() return nil, fmt.Errorf("command VerticalSplitClone requires <destination keyspace/shard>") } keyspace, shard, err := topo.ParseKeyspaceShardString(subFlags.Arg(0)) if err != nil { return nil, err } var tableArray []string if *tables != "" { tableArray = strings.Split(*tables, ",") } worker, err := NewVerticalSplitCloneWorker(wr, wi.cell, keyspace, shard, tableArray, *strategy, *sourceReaderCount, *destinationPackCount, uint64(*minTableSizeForSplit), *destinationWriterCount) if err != nil { return nil, fmt.Errorf("cannot create worker: %v", err) } return worker, nil }
func doInitialize(fs *flag.FlagSet, argv []string) error { var ( conffile = fs.String("conf", config.DefaultConfig.Conffile, "Config file path") apikey = fs.String("apikey", "", "API key from mackerel.io web site (Required)") ) fs.Parse(argv) if *apikey == "" { // Setting apikey via environment variable should be supported or not? return fmt.Errorf("-apikey option is required") } _, err := os.Stat(*conffile) confExists := err == nil if confExists { conf, err := config.LoadConfig(*conffile) if err != nil { return fmt.Errorf("Failed to load the config file: %s", err) } if conf.Apikey != "" { return apikeyAlreadySetError(*conffile) } } contents := []byte(fmt.Sprintf("apikey = %q\n", *apikey)) if confExists { cBytes, err := ioutil.ReadFile(*conffile) if err != nil { return err } contents = append(contents, cBytes...) } return ioutil.WriteFile(*conffile, contents, 0644) }
// Setup the parameters with the command line flags in args. func (job *Job) Setup(fs *flag.FlagSet, args []string) error { taskfile := fs.String("task", "", "path to the task description (mandatory)") inputfile := fs.String("input", "", "path to the input file (mandatory)") fs.StringVar(&job.UmlPath, "uml", job.UmlPath, "path to the UML executable") fs.StringVar(&job.EnvDir, "envdir", job.EnvDir, "environments directory") fs.StringVar(&job.TasksDir, "tasksdir", job.TasksDir, "tasks directory") if err := fs.Parse(args); err != nil { return err } if len(*taskfile) == 0 || len(*inputfile) == 0 { return errors.New("Missing task or input file") } taskcontent, err := ioutil.ReadFile(*taskfile) if err != nil { return err } if json.Unmarshal(taskcontent, &job.Task) != nil { return err } inputcontent, err := ioutil.ReadFile(*inputfile) if err != nil { return err } job.Input = string(inputcontent) return nil }
func (gc *GetCommand) Flags(fs *flag.FlagSet) *flag.FlagSet { gc.header = fs.Bool("H", false, "Display record header") gc.test = fs.Bool("t", false, "Test that the record can be retrieved") gc.separator = fs.String("s", "====", "Record separator") return fs }
func mainReqs(args []string, flags *flag.FlagSet) { flags.Usage = func() { fmt.Fprintf(os.Stderr, "Usage: %s %s <package-name>\n", os.Args[0], args[0]) flags.PrintDefaults() } file := flags.String("graphfile", "", fmt.Sprintf("Path to PyPI dependency graph file. Defaults to $GOPATH/src/github.com/beyang/cheerio/data/pypi_graph")) flags.Parse(args[1:]) if flags.NArg() < 1 { flags.Usage() os.Exit(1) } pkg := cheerio.NormalizedPkgName(flags.Arg(0)) var pypiG *cheerio.PyPIGraph if *file == "" { pypiG = cheerio.DefaultPyPIGraph } else { var err error pypiG, err = cheerio.NewPyPIGraph(*file) if err != nil { fmt.Printf("Error creating PyPI graph: %s\n", err) os.Exit(1) } } pkgReq := pypiG.Requires(pkg) pkgReqBy := pypiG.RequiredBy(pkg) fmt.Printf("pkg %s uses (%d):\n %s\nand is used by (%d):\n %s\n", pkg, len(pkgReq), strings.Join(pkgReq, " "), len(pkgReqBy), strings.Join(pkgReqBy, " ")) }
func commandEmergencyReparentShard(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { waitSlaveTimeout := subFlags.Duration("wait_slave_timeout", 30*time.Second, "time to wait for slaves to catch up in reparenting") keyspaceShard := subFlags.String("keyspace_shard", "", "keyspace/shard of the shard that needs to be reparented") newMaster := subFlags.String("new_master", "", "alias of a tablet that should be the new master") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() == 2 { // Legacy syntax: "<keyspace/shard> <tablet alias>". if *newMaster != "" { return fmt.Errorf("cannot use legacy syntax and flag -new_master for action EmergencyReparentShard at the same time") } *keyspaceShard = subFlags.Arg(0) *newMaster = subFlags.Arg(1) } else if subFlags.NArg() != 0 { return fmt.Errorf("action EmergencyReparentShard requires -keyspace_shard=<keyspace/shard> -new_master=<tablet alias>") } keyspace, shard, err := topoproto.ParseKeyspaceShard(*keyspaceShard) if err != nil { return err } tabletAlias, err := topoproto.ParseTabletAlias(*newMaster) if err != nil { return err } return wr.EmergencyReparentShard(ctx, keyspace, shard, tabletAlias, *waitSlaveTimeout) }
func commandSplitClone(wi *Instance, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) (Worker, error) { online := subFlags.Bool("online", defaultOnline, "do online copy (optional approximate copy, source and destination tablets will not be put out of serving, minimizes downtime during offline copy)") offline := subFlags.Bool("offline", defaultOffline, "do offline copy (exact copy at a specific GTID, required before shard migration, source and destination tablets will be put out of serving during copy)") excludeTables := subFlags.String("exclude_tables", "", "comma separated list of tables to exclude") strategy := subFlags.String("strategy", "", "which strategy to use for restore, use 'vtworker SplitClone --strategy=-help k/s' for more info") sourceReaderCount := subFlags.Int("source_reader_count", defaultSourceReaderCount, "number of concurrent streaming queries to use on the source") writeQueryMaxRows := subFlags.Int("write_query_max_rows", defaultWriteQueryMaxRows, "maximum number of rows per write query") writeQueryMaxSize := subFlags.Int("write_query_max_size", defaultWriteQueryMaxSize, "maximum size (in bytes) per write query") writeQueryMaxRowsDelete := subFlags.Int("write_query_max_rows_delete", defaultWriteQueryMaxRows, "maximum number of rows per DELETE FROM write query") minTableSizeForSplit := subFlags.Int("min_table_size_for_split", defaultMinTableSizeForSplit, "tables bigger than this size on disk in bytes will be split into source_reader_count chunks if possible") destinationWriterCount := subFlags.Int("destination_writer_count", defaultDestinationWriterCount, "number of concurrent RPCs to execute on the destination") minHealthyRdonlyTablets := subFlags.Int("min_healthy_rdonly_tablets", defaultMinHealthyRdonlyTablets, "minimum number of healthy RDONLY tablets in the source and destination shard at start") maxTPS := subFlags.Int64("max_tps", defaultMaxTPS, "if non-zero, limit copy to maximum number of (write) transactions/second on the destination (unlimited by default)") if err := subFlags.Parse(args); err != nil { return nil, err } if subFlags.NArg() != 1 { subFlags.Usage() return nil, fmt.Errorf("command SplitClone requires <keyspace/shard>") } keyspace, shard, err := topoproto.ParseKeyspaceShard(subFlags.Arg(0)) if err != nil { return nil, err } var excludeTableArray []string if *excludeTables != "" { excludeTableArray = strings.Split(*excludeTables, ",") } worker, err := NewSplitCloneWorker(wr, wi.cell, keyspace, shard, *online, *offline, excludeTableArray, *strategy, *sourceReaderCount, *writeQueryMaxRows, *writeQueryMaxSize, *writeQueryMaxRowsDelete, uint64(*minTableSizeForSplit), *destinationWriterCount, *minHealthyRdonlyTablets, *maxTPS) if err != nil { return nil, fmt.Errorf("cannot create split clone worker: %v", err) } return worker, nil }
func commandVtGateExecute(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { server := subFlags.String("server", "", "VtGate server to connect to") bindVariables := newBindvars(subFlags) connectTimeout := subFlags.Duration("connect_timeout", 30*time.Second, "Connection timeout for vtgate client") tabletType := subFlags.String("tablet_type", "master", "tablet type to query") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() != 1 { return fmt.Errorf("the <sql> argument is required for the VtGateExecute command") } t, err := parseTabletType(*tabletType, []pb.TabletType{pb.TabletType_MASTER, pb.TabletType_REPLICA, pb.TabletType_RDONLY}) if err != nil { return err } vtgateConn, err := vtgateconn.Dial(ctx, *server, *connectTimeout) if err != nil { return fmt.Errorf("error connecting to vtgate '%v': %v", *server, err) } defer vtgateConn.Close() qr, err := vtgateConn.Execute(ctx, subFlags.Arg(0), *bindVariables, t) if err != nil { return fmt.Errorf("Execute failed: %v", err) } return printJSON(wr, qr) }
// newCommandUnsub creates and returns an unsub command. func newCommandUnsub(args []string, cli *client.Client) (command, error) { // Create a flag set. var flg flag.FlagSet // Define the flags. topicFilter := flg.String("t", "", "Topic Filter") // Parse the flag. if err := flg.Parse(args); err != nil { return nil, errCmdArgsParse } // Create an unsub command. cmd := &commandUnsub{ cli: cli, unsubscribeOpts: &client.UnsubscribeOptions{ TopicFilters: [][]byte{ []byte(*topicFilter), }, }, } // Return the command. return cmd, nil }
// newCommandPub creates and returns a pub command. func newCommandPub(args []string, cli *client.Client) (command, error) { // Create a flag set. var flg flag.FlagSet // Define the flags. qos := flg.Uint("q", uint(mqtt.QoS0), "QoS") retain := flg.Bool("r", false, "Retain") topicName := flg.String("t", "", "Topic Name") message := flg.String("m", "", "Application Message") // Parse the flag. if err := flg.Parse(args); err != nil { return nil, errCmdArgsParse } // Create a pub command. cmd := &commandPub{ cli: cli, publishOpts: &client.PublishOptions{ QoS: byte(*qos), Retain: *retain, TopicName: []byte(*topicName), Message: []byte(*message), }, } // Return the command. return cmd, nil }
func AddFlags(flags *flag.FlagSet) { flags.String( DebugFlag, "", "host:port for serving pprof debugging info", ) }
func (c *DaemonCmd) Flags(fs *flag.FlagSet) { c.dbfile = fs.String("o", "ci.db", "override the default local file name") c.port = fs.Int("p", 2020, "override the default local port") c.hookport = fs.Int("hp", 2121, "override the default hook port ") c.hook = fs.Bool("hook", false, "also start an http Hook server (Get returns a status, Post fire a build)") }
func (cmd *unshareCmd) Flags(fs *flag.FlagSet) *flag.FlagSet { cmd.accountType = fs.String(drive.TypeKey, "", "scope of account to revoke access to") cmd.noPrompt = fs.Bool(drive.NoPromptKey, false, "disables the prompt") cmd.quiet = fs.Bool(drive.QuietKey, false, "if set, do not log anything but errors") cmd.byId = fs.Bool(drive.CLIOptionId, false, "unshare by id instead of path") return fs }
func TestLoadConfig(t *testing.T) { var flagSet flag.FlagSet a := flagSet.Int("a", -1, "for test") b := flagSet.Bool("b", true, "for test") c := flagSet.String("c", "-1", "for test") da := flagSet.Int("d.a", -1, "for test") dd := flagSet.Bool("d.d", false, "for test") dc := flagSet.String("d.c", "-1", "for test") e := loadConfig("config_test.txt", &flagSet, false) if nil != e { t.Error(e) return } if *a != 1 { t.Error("a != 1") } if !*b { t.Error("b != true") } if "abc" != *c { t.Error("c != \"abc\"") } if *da != 1323 { t.Error("d.a != 1323") } if !*dd { t.Error("db != true") } if "67" != *dc { t.Errorf("dc != \"67\", actual is %s", *dc) } }
func commandVtGateExecute(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { server := subFlags.String("server", "", "VtGate server to connect to") bindVariables := newBindvars(subFlags) connectTimeout := subFlags.Duration("connect_timeout", 30*time.Second, "Connection timeout for vtgate client") tabletType := subFlags.String("tablet_type", "master", "tablet type to query") json := subFlags.Bool("json", false, "Output JSON instead of human-readable table") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() != 1 { return fmt.Errorf("the <sql> argument is required for the VtGateExecute command") } t, err := parseTabletType(*tabletType, []topodatapb.TabletType{topodatapb.TabletType_MASTER, topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY}) if err != nil { return err } vtgateConn, err := vtgateconn.Dial(ctx, *server, *connectTimeout) if err != nil { return fmt.Errorf("error connecting to vtgate '%v': %v", *server, err) } defer vtgateConn.Close() qr, err := vtgateConn.Execute(ctx, subFlags.Arg(0), *bindVariables, t) if err != nil { return fmt.Errorf("Execute failed: %v", err) } if *json { return printJSON(wr.Logger(), qr) } printQueryResult(loggerWriter{wr.Logger()}, qr) return nil }
func multisnapshotCmd(mysqld *mysqlctl.Mysqld, subFlags *flag.FlagSet, args []string) { concurrency := subFlags.Int("concurrency", 8, "how many compression jobs to run simultaneously") spec := subFlags.String("spec", "-", "shard specification") tablesString := subFlags.String("tables", "", "dump only this comma separated list of tables") skipSlaveRestart := subFlags.Bool("skip-slave-restart", false, "after the snapshot is done, do not restart slave replication") maximumFilesize := subFlags.Uint64("maximum-file-size", 128*1024*1024, "the maximum size for an uncompressed data file") subFlags.Parse(args) if subFlags.NArg() != 2 { relog.Fatal("action multisnapshot requires <db name> <key name>") } shards, err := key.ParseShardingSpec(*spec) if err != nil { relog.Fatal("multisnapshot failed: %v", err) } var tables []string if *tablesString != "" { tables = strings.Split(*tablesString, ",") } filenames, err := mysqld.CreateMultiSnapshot(shards, subFlags.Arg(0), subFlags.Arg(1), tabletAddr, false, *concurrency, tables, *skipSlaveRestart, *maximumFilesize, nil) if err != nil { relog.Fatal("multisnapshot failed: %v", err) } else { relog.Info("manifest locations: %v", filenames) } }
func ParseServiceFlags(scfg *ServiceConfig, flagset *flag.FlagSet, argv []string) (config *ServiceConfig, args []string) { rpcAddr := flagset.String("l", GetDefaultBindAddr(), "host:port to listen on for RPC") adminAddr := flagset.String("admin", GetDefaultBindAddr(), "host:port to listen on for admin") err := flagset.Parse(argv) args = flagset.Args() if err == flag.ErrHelp { // -help was given, pass it on to caller who // may decide to quit instead of continuing args = append(args, "-help") } rpcBA, err := BindAddrFromString(*rpcAddr) if err != nil { panic(err) } adminBA, err := BindAddrFromString(*adminAddr) if err != nil { panic(err) } scfg.ServiceAddr = rpcBA scfg.AdminAddr = adminBA return scfg, args }
// HTTPAddrFlag returns a pointer to a string that will be populated // when the given flagset is parsed with the HTTP address of the Consul. func HTTPAddrFlag(f *flag.FlagSet) *string { defaultHTTPAddr := os.Getenv(HTTPAddrEnvName) if defaultHTTPAddr == "" { defaultHTTPAddr = "127.0.0.1:8500" } return f.String("http-addr", defaultHTTPAddr, "HTTP address of the Consul agent") }
// RPCAddrFlag returns a pointer to a string that will be populated // when the given flagset is parsed with the RPC address of the Consul. func RPCAddrFlag(f *flag.FlagSet) *string { defaultRPCAddr := os.Getenv(RPCAddrEnvName) if defaultRPCAddr == "" { defaultRPCAddr = "127.0.0.1:8400" } return f.String("rpc-addr", defaultRPCAddr, "RPC address of the Consul agent") }
// HTTPAddrFlag returns a pointer to a string that will be populated // when the given flagset is parsed with the HTTP address of the Consul. func HTTPAddrFlag(f *flag.FlagSet) *string { defaultHTTPAddr := os.Getenv(HTTPAddrEnvName) if defaultHTTPAddr == "" { defaultHTTPAddr = "127.0.0.1:8091" } return f.String("http-addr", defaultHTTPAddr, "HTTP address of the Couchbase server") }
// RPCAddrFlag returns a pointer to a string that will be populated // when the given flagset is parsed with the RPC address of the Serf. func RPCAddrFlag(f *flag.FlagSet) *string { defaultRpcAddr := os.Getenv("SERF_RPC_ADDR") if defaultRpcAddr == "" { defaultRpcAddr = "127.0.0.1:7373" } return f.String("rpc-addr", defaultRpcAddr, "RPC address of the Serf agent") }
func initCmd(mysqld *mysqlctl.Mysqld, subFlags *flag.FlagSet, args []string) { waitTime := subFlags.Duration("wait_time", mysqlctl.MysqlWaitTime, "how long to wait for startup") bootstrapArchive := subFlags.String("bootstrap_archive", "mysql-db-dir.tbz", "name of bootstrap archive within vitess/data/bootstrap directory") skipSchema := subFlags.Bool("skip_schema", false, "don't apply initial schema") subFlags.Parse(args) if err := mysqld.Init(*waitTime, *bootstrapArchive, *skipSchema); err != nil { log.Fatalf("failed init mysql: %v", err) } }
func (c *flags) addSet(s *flag.FlagSet) { cassandraAddr := s.String("cassandra-addr", "", "Address to a single Cassandra node") c.cassandraAddr = cassandraAddr cassandraRepl := s.Int("cassandra-repl", 1, "Replication factor to use for the oinker keyspace in Cassandra") c.cassandraRepl = cassandraRepl address := s.String("address", "0.0.0.0:8080", "host:port on which to listen") c.address = address }
/* * This function must be implemented by any plugin because it is part of the * plugin interface defined by the core CLI. * * Run(....) is the entry point when the core CLI is invoking a command defined * by a plugin. The first parameter, plugin.CliConnection, is a struct that can * be used to invoke cli commands. The second parameter, args, is a slice of * strings. args[0] will be the name of the command, and will be followed by * any additional arguments a cli user typed in. * * Any error handling should be handled with the plugin itself (this means printing * user facing errors). The CLI will exit 0 if the plugin exits 0 and will exit * 1 should the plugin exits nonzero. */ func (c *JanitorPlugin) Run(cliConnection plugin.CliConnection, args []string) { c.cliConnection = cliConnection if args[0] != "CLI-MESSAGE-UNINSTALL" { fs := new(flag.FlagSet) c.before = fs.String("before", "", "") c.ignore = fs.String("ignore", "", "") fs.Parse(args[1:]) c.execute() } }
func (f StringFlag) Apply(set *flag.FlagSet) { if f.EnvVar != "" { if envVal := os.Getenv(f.EnvVar); envVal != "" { f.Value = envVal } } eachName(f.Name, func(name string) { set.String(name, f.Value, f.Usage) }) }
func addGlobalOpts(set *flag.FlagSet) { for _, flg := range GlobalOptions { lName := longName(flg) switch f := flg.(type) { case cli.BoolFlag: set.Bool(lName, false, f.Usage) case cli.StringFlag: set.String(lName, f.Value, f.Usage) } } }