// loadFactory returns the configured factory instance for execing containers. func loadFactory(context *cli.Context) (libcontainer.Factory, error) { var ( debug = "false" root = context.GlobalString("root") ) if context.GlobalBool("debug") { debug = "true" } abs, err := filepath.Abs(root) if err != nil { return nil, err } logAbs, err := filepath.Abs(context.GlobalString("log")) if err != nil { return nil, err } return libcontainer.New(abs, libcontainer.Cgroupfs, func(l *libcontainer.LinuxFactory) error { l.CriuPath = context.GlobalString("criu") return nil }, libcontainer.InitArgs(os.Args[0], "--log", logAbs, "--log-format", context.GlobalString("log-format"), fmt.Sprintf("--debug=%s", debug), "init"), ) }
func createImageCommandFunc(c *cli.Context) { var image string if len(c.Args()) == 0 { log.Fatal("You need to specify a image") } else { image = c.Args()[0] } clnt := client.New(c.GlobalString("server")) if c.GlobalBool("debug") { clnt.SetDebug() } s := client.Image{ Image: image, Type: prompt.String("Type", prompt.Prompt{Default: "docker", FuncPtr: prompt.Enum, FuncInp: "file,docker"}), BootTagID: *chooseTag(clnt, ""), } // Is this correct? fmt.Println(string(s.JSON())) if !prompt.Bool("Is this correct", true) { os.Exit(1) } // Create image clnt.Image.Create(&s) }
// MakeChain creates a chain manager from set command line flags. func MakeChain(ctx *cli.Context) (chain *core.ChainManager, chainDb common.Database) { datadir := ctx.GlobalString(DataDirFlag.Name) cache := ctx.GlobalInt(CacheFlag.Name) var err error if chainDb, err = ethdb.NewLDBDatabase(filepath.Join(datadir, "chaindata"), cache); err != nil { Fatalf("Could not open database: %v", err) } if ctx.GlobalBool(OlympicFlag.Name) { InitOlympic() _, err := core.WriteTestNetGenesisBlock(chainDb, 42) if err != nil { glog.Fatalln(err) } } eventMux := new(event.TypeMux) pow := ethash.New() //genesis := core.GenesisBlock(uint64(ctx.GlobalInt(GenesisNonceFlag.Name)), blockDB) chain, err = core.NewChainManager(chainDb, nil, pow, eventMux) if err != nil { Fatalf("Could not start chainmanager: %v", err) } proc := core.NewBlockProcessor(chainDb, pow, chain, eventMux) chain.SetProcessor(proc) return chain, chainDb }
func BeforeApp(c *cli.Context) error { if c.GlobalBool("verbose") { logrus.SetLevel(logrus.DebugLevel) } logrus.Warning("Note: This is an experimental alternate implementation of the Compose CLI (https://github.com/docker/compose)") return nil }
func preload(context *cli.Context) error { if context.GlobalBool("debug") { logger.Level = logrus.DebugLevel } config = loadConfig(context.GlobalString("config")) return nil }
func updateSiteCommandFunc(c *cli.Context) { if len(c.Args()) == 0 { log.Fatal("You need to specify a site") } site := c.Args()[0] clnt := client.New(c.GlobalString("server")) if c.GlobalBool("debug") { clnt.SetDebug() } v, err := clnt.Site.Get(site) if err != nil { log.Fatal(err.Error()) } s := client.Site{ ID: v.ID, Site: prompt.String("Site", prompt.Prompt{Default: v.Site, FuncPtr: prompt.Regex, FuncInp: ""}), Domain: prompt.String("Domain", prompt.Prompt{Default: v.Domain, FuncPtr: prompt.Regex, FuncInp: ""}), DNS: strings.Split(prompt.String("DNS", prompt.Prompt{Default: strings.Join(v.DNS, ","), FuncPtr: validateIPv4List, FuncInp: ""}), ","), DockerRegistry: prompt.String("Docker Registry", prompt.Prompt{Default: v.DockerRegistry, FuncPtr: prompt.Regex, FuncInp: ""}), ArtifactRepository: prompt.String("Artifact Repository", prompt.Prompt{Default: v.ArtifactRepository, FuncPtr: prompt.Regex, FuncInp: ""}), NamingScheme: prompt.String("Naming Scheme", prompt.Prompt{Default: v.NamingScheme, FuncPtr: prompt.Enum, FuncInp: "serial-number,hardware-address,external"}), PXETheme: prompt.String("PXE Theme", prompt.Prompt{Default: v.PXETheme, FuncPtr: prompt.Regex, FuncInp: ""}), } // Create site clnt.Site.Update(site, &s) }
func newTransmitter(c *cli.Context) *smpp.Transmitter { tx := &smpp.Transmitter{ Addr: c.GlobalString("addr"), User: os.Getenv("SMPP_USER"), Passwd: os.Getenv("SMPP_PASSWD"), } if s := c.GlobalString("user"); s != "" { tx.User = s } if s := c.GlobalString("passwd"); s != "" { tx.Passwd = s } if c.GlobalBool("tls") { host, _, _ := net.SplitHostPort(tx.Addr) tx.TLS = &tls.Config{ ServerName: host, } if c.GlobalBool("precaire") { tx.TLS.InsecureSkipVerify = true } } conn := <-tx.Bind() switch conn.Status() { case smpp.Connected: default: log.Fatalln("Connection failed:", conn.Error()) } return tx }
func createSubnetCommandFunc(c *cli.Context) { var subnet string if len(c.Args()) == 0 { log.Fatal("You need to specify a subnet") } else { subnet = c.Args()[0] } clnt := client.New(c.GlobalString("server")) if c.GlobalBool("debug") { clnt.SetDebug() } if c.Bool("prompt") { s := client.Subnet{ Subnet: subnet, // Calculate automatically based on subnet/prefix Mask: prompt.String("Mask", prompt.Prompt{NoDefault: true, FuncPtr: validateIPv4, FuncInp: ""}), // Default to .254 for subnet Gw: prompt.String("Gateway", prompt.Prompt{NoDefault: true, FuncPtr: validateIPv4, FuncInp: ""}), SiteID: *chooseSite(clnt, ""), } // Create subnet clnt.Subnet.Create(&s) return } }
func cmdGet(c *cli.Context) { verbose = c.GlobalBool("verbose") path := c.Args().Get(0) key := c.String("key") files := strings.Split(c.String("file"), ",") envs := strings.Split(c.String("env"), ",") if len(path) < 1 { path = "." // default to current directory } if len(key) < 1 { key = strings.ToLower(filepath.Base(files[0])) } hash := checksum(files, envs) fullPath := filepath.Join(CachePath, strings.Join([]string{key, hash}, "-")) + ".tar.gz" // get cache if exists if _, err := os.Stat(fullPath); err == nil { printInfo("Fetching cache '%s'. Please, wait...", key) args := []string{ "-xzf", fullPath, "-C", path, } err := exec.Command("tar", args...).Run() checkError(err) } }
func revelIndex(ctx *cli.Context) { template_dir, err := default_template_dir(ctx) if err != nil { fmt.Println(err.Error()) return } force := ctx.GlobalBool("force") theme := ctx.String("theme") if theme == "" { fmt.Println("unknown template theme, please use -t to provide.") return } args := ctx.Args() if len(args) == 0 { fmt.Println("Usage: scaffold revel index <project>") return } if err := revel_index(args[0], template_dir, theme, force); err != nil { fmt.Println("scaffold revel index <"+args[0]+"> failed:", err) return } }
func updateTagCommandFunc(c *cli.Context) { if len(c.Args()) == 0 { log.Fatal("You need to specify a tag") } tag := c.Args()[0] clnt := client.New(c.GlobalString("server")) if c.GlobalBool("debug") { clnt.SetDebug() } v, err := clnt.Tag.Get(tag) if err != nil { log.Fatal(err.Error()) } if c.Bool("prompt") { s := client.Tag{ ID: v.ID, Tag: prompt.String("Image", prompt.Prompt{Default: v.Tag, FuncPtr: prompt.Regex, FuncInp: ""}), Created: prompt.String("Created", prompt.Prompt{Default: v.Created, FuncPtr: prompt.Regex, FuncInp: ""}), SHA256: prompt.String("SHA256", prompt.Prompt{Default: v.SHA256, FuncPtr: prompt.Regex, FuncInp: "^[0-9a-f]+$"}), ImageID: *chooseImage(clnt, v.ImageID), } // Update tag clnt.Tag.Update(tag, &s) return } }
// MakeChain creates a chain manager from set command line flags. func MakeChain(ctx *cli.Context) (chain *core.BlockChain, chainDb ethdb.Database) { datadir := MustMakeDataDir(ctx) cache := ctx.GlobalInt(CacheFlag.Name) handles := MakeDatabaseHandles() var err error if chainDb, err = ethdb.NewLDBDatabase(filepath.Join(datadir, "chaindata"), cache, handles); err != nil { Fatalf("Could not open database: %v", err) } if ctx.GlobalBool(OlympicFlag.Name) { _, err := core.WriteTestNetGenesisBlock(chainDb) if err != nil { glog.Fatalln(err) } } eventMux := new(event.TypeMux) pow := ethash.New() //genesis := core.GenesisBlock(uint64(ctx.GlobalInt(GenesisNonceFlag.Name)), blockDB) chain, err = core.NewBlockChain(chainDb, pow, eventMux) if err != nil { Fatalf("Could not start chainmanager: %v", err) } return chain, chainDb }
func runPing(c *cli.Context) { if c.GlobalBool("verbose") { fmt.Println("Connetion to mikrotik...") } conf, err := getConfig(c) assert(err) // fmt.Printf("config: %#v", conf.Secret) m := Routerboard{Config: &conf, Verbose: c.GlobalBool("verbose")} // fmt.Printf("m.Config.Hosts: %v\n", m.Config.Hosts) // debug(conf) for i, h := range m.Config.Hosts { err, code, res := m.RouterboardResource(h.Ip, h.Username, h.Passwd) if err != nil { // Unexpected result on login // fmt.Printf("Error: %v\n", err.Error()) if code == 1 { fmt.Printf("%v) Error no ping: %s [ %s ] %s (# %s)\n", i+1, h.Name, h.Ip, h.Adress, h.Dogovor) } else if code == 2 { fmt.Printf("%v) Error no connect: %s [ %s ] %s (# %s)\n", i+1, h.Name, h.Ip, h.Adress, h.Dogovor) } } if m.Verbose && code == 200 { fmt.Printf("%v) %s [ %s ] %s | cpu load %s\n", i+1, h.Name, h.Ip, h.Adress, res.CpuLoad) } } // h := Host{Ip: '178.151.111.129', name: 'kievhleb032', adress: 'Харківське шосе, 144-б 2254709', ping: true, changefreq: 'weekly', priority: 1.0, username: '******', passwd: '1AzRss53' }, }
func runShowFireWall(c *cli.Context) { conf, err := getConfig(c) assert(err) m := Routerboard{Config: &conf, Verbose: c.GlobalBool("verbose")} err, _ = m.RouterboardInterface("178.151.111.129", "admin", "1AzRss53") assert(err) }
func setupLogging(c *cli.Context) error { if c.GlobalBool("debug") { log.SetLevel(log.DebugLevel) log.Debug("Set loglevel to debug") } return nil }
func actionStatus(c *cli.Context) { err, profile := loadProfile(c, true) if err != nil { panic(err) } table := tablewriter.NewWriter(os.Stdout) table.SetHeader([]string{"Id", "Name", "Cert", "Type", "URL"}) maxCacheAge := profile.MaxCacheAge if c.GlobalBool("flushCache") == true { maxCacheAge = 0 } err, instances := getInstances(profile.Region, maxCacheAge, profile) if err != nil { panic(err) } for _, instance := range instances { table.Append([]string{ instance.Id, instance.Name, instance.CertName, instance.InstanceType, instance.PublicDnsName, }) } table.Render() }
func NewLogrusLogger(ctx *cli.Context) Logger { logFile := ctx.GlobalString("log-file") if logFile != "" { if err := os.MkdirAll(path.Dir(logFile), 0755); err != nil { logrus.Errorf("Failed to create path %s: %s", path.Dir(logFile), err.Error()) } else { file, err := os.OpenFile(logFile, syscall.O_CREAT|syscall.O_APPEND|syscall.O_WRONLY, 0644) if err != nil { logrus.Errorf("Failed to open log file %s: %s", logFile, err.Error()) } else { logrus.SetOutput(file) } } } switch ctx.GlobalString("log-format") { case "json": logrus.SetFormatter(&logrus.JSONFormatter{}) case "logstash": logrus.SetFormatter(&logstash.LogstashFormatter{}) default: logrus.SetFormatter(&logrus.TextFormatter{}) } if ctx.GlobalBool("debug") { logrus.SetLevel(logrus.DebugLevel) } return &logrusLogger{ logger: logrus.StandardLogger(), } }
func actionAlias(c *cli.Context) { err, profile := loadProfile(c, true) if err != nil { panic(err) } sshCertificateLocation := "" if profile.CertLocation != "" { if _, err := os.Stat(profile.CertLocation); err != nil { exit("Cannot find certificate") } sshCertificateLocation = fmt.Sprintf(" -i %s", profile.CertLocation) } maxCacheAge := profile.MaxCacheAge if c.GlobalBool("flushCache") == true { maxCacheAge = 0 } err, instances := getInstances(profile.Region, maxCacheAge, profile) if err != nil { panic(err) } for _, instance := range instances { name := fmt.Sprintf("%s_%s", instance.Name, instance.Id) name = regexWhiteChars.ReplaceAllString(name, "_") fmt.Println(fmt.Sprintf( "alias %s%s=\"ssh%s %s@%s\"", profile.AliasPrefix, strings.ToLower(name), sshCertificateLocation, profile.User, instance.Addr, )) } }
func initEnv(ctx *cli.Context) error { if ctx.GlobalBool("debug") { common.SetupLOG(log, "DEBUG") } else { common.SetupLOG(log, "INFO") } if noDaemon { client = NewLBClient() } else { var ( c *cnc.Client err error ) if host := ctx.GlobalString("host"); host == "" { c, err = cnc.NewDefaultClient() } else { c, err = cnc.NewClient(host, nil) } if err != nil { fmt.Fprintf(os.Stderr, "Error while creating cilium-client: %s\n", err) return fmt.Errorf("Error while creating cilium-client: %s", err) } client = c } return nil }
// Setup initializes profiling and logging based on the CLI flags. // It should be called as early as possible in the program. func Setup(ctx *cli.Context) error { // logging glog.CopyStandardLogTo("INFO") glog.SetToStderr(true) // profiling, tracing runtime.MemProfileRate = ctx.GlobalInt(memprofilerateFlag.Name) Handler.SetBlockProfileRate(ctx.GlobalInt(blockprofilerateFlag.Name)) if traceFile := ctx.GlobalString(traceFlag.Name); traceFile != "" { if err := Handler.StartTrace(traceFile); err != nil { return err } } if cpuFile := ctx.GlobalString(cpuprofileFlag.Name); cpuFile != "" { if err := Handler.StartCPUProfile(cpuFile); err != nil { return err } } // pprof server if ctx.GlobalBool(pprofFlag.Name) { address := fmt.Sprintf("127.0.0.1:%d", ctx.GlobalInt(pprofPortFlag.Name)) go func() { glog.V(logger.Info).Infof("starting pprof server at http://%s/debug/pprof", address) glog.Errorln(http.ListenAndServe(address, nil)) }() } return nil }
func doVolumeCreate(c *cli.Context) error { var err error name := c.Args().First() size, err := getSize(c, err) driverName, err := util.GetFlag(c, "driver", false, err) backupURL, err := util.GetFlag(c, "backup", false, err) if err != nil { return err } driverVolumeID := c.String("id") volumeType := c.String("type") iops := c.Int("iops") prepareForVM := c.Bool("vm") request := &api.VolumeCreateRequest{ Name: name, DriverName: driverName, Size: size, BackupURL: backupURL, DriverVolumeID: driverVolumeID, Type: volumeType, IOPS: int64(iops), PrepareForVM: prepareForVM, Verbose: c.GlobalBool(verboseFlag), } url := "/volumes/create" return sendRequestAndPrint("POST", url, request) }
// preload initializes any global options and configuration // before the main or sub commands are run func preload(c *cli.Context) (err error) { if c.GlobalBool("debug") { logrus.SetLevel(logrus.DebugLevel) } defaultGPGKey = c.GlobalString("keyid") home := homedir.Get() homeShort := homedir.GetShortcutString() // set the filestore variable filestore = strings.Replace(c.GlobalString("file"), homeShort, home, 1) // set gpg path variables gpgPath = strings.Replace(c.GlobalString("gpgpath"), homeShort, home, 1) publicKeyring = filepath.Join(gpgPath, "pubring.gpg") secretKeyring = filepath.Join(gpgPath, "secring.gpg") // if they passed an arguement, run the prechecks // TODO(jfrazelle): this will run even if the command they issue // does not exist, which is kinda shitty if len(c.Args()) > 0 { preChecks() } // we need to read the secrets file for all commands // might as well be dry about it s, err = readSecretsFile(filestore) if err != nil { logrus.Fatal(err) } return nil }
func storagepoolAgent(c *cli.Context) { healthCheckInterval := c.GlobalInt("healthcheck-interval") healthCheckBaseDir := c.GlobalString("healthcheck-basedir") healthCheckType := c.String("storagepool-healthcheck-type") cattleUrl := c.GlobalString("url") cattleAccessKey := c.GlobalString("access-key") cattleSecretKey := c.GlobalString("secret-key") if c.GlobalBool("debug") { log.SetLevel(log.DebugLevel) } storagepoolRootDir := c.GlobalString("storagepool-rootdir") driver := c.GlobalString("storagepool-driver") if driver == "" { log.Fatal("required field storagepool-driver has not been set") } cattleClient, err := cattle.NewCattleClient(cattleUrl, cattleAccessKey, cattleSecretKey) if err != nil { log.Fatal(err) } storagepoolAgent := NewStoragepoolAgent(healthCheckInterval, storagepoolRootDir, driver, healthCheckBaseDir, healthCheckType, cattleClient) metadataUrl := c.String("storagepool-metadata-url") if err := storagepoolAgent.Run(metadataUrl); err != nil { log.Fatal(err) } }
func processGlobalFlags(c *cli.Context) time.Duration { var network string if c.GlobalBool("tcp") { network = "tcp" } else { network = "udp" } client, err := raidman.Dial(network, fmt.Sprintf("%s:%d", c.GlobalString("host"), c.GlobalInt("port"))) if c.GlobalString("event-host") == "nil" { log.Panic("Failed to automatically get the hostname. Please specify it with --host") } if err != nil { log.Panicf("Failed to connect to the riemann host because %s", err) } attribute, err := processAttributes(c.GlobalStringSlice("attribute")) if err != nil { log.Panic(err) } eventTemplate := raidman.Event{ Ttl: float32(c.GlobalDuration("ttl").Seconds()), Tags: c.GlobalStringSlice("tags"), Host: c.GlobalString("event-host"), Attributes: attribute, } riemannSend = func(url, method string, duration float64) { event := eventTemplate event.Service = fmt.Sprintf("%s %s", url, method) event.Time = time.Now().Unix() event.Metric = duration client.Send(&event) } return c.GlobalDuration("interval") }
// fetch data for subreddits based on configuration provided func load(configuration *Configuration, context *cli.Context) { timeout := time.Duration(context.GlobalInt("timeout")) jsonOut := context.GlobalBool("json") browserOut := context.GlobalBool("browser") port := context.GlobalString("port") if len(configuration.Subreddits) == 0 { log.Fatalln("No subreddits found") } results := make(chan *Subreddit, len(configuration.Subreddits)) for _, subreddit := range configuration.Subreddits { go fetch(subreddit, results) } collect(results, configuration, timeout) if browserOut { browserOutput(configuration.Subreddits, port) } else if jsonOut { jsonOutput(configuration.Subreddits) } else { prettyOutput(configuration.Subreddits) } }
func fmtOutput(c *cli.Context, format *Format) { jsonOut := c.GlobalBool("json") outFd := os.Stdout if format.Err != "" { outFd = os.Stderr } if jsonOut { b, _ := json.MarshalIndent(format, "", " ") fmt.Fprintf(outFd, "%+v\n", string(b)) return } if format.Err == "" { if format.Result == nil { for _, v := range format.UUID { fmt.Fprintln(outFd, v) } return } b, _ := json.MarshalIndent(format.Result, "", " ") fmt.Fprintf(outFd, "%+v\n", string(b)) return } if format.Desc != "" { fmt.Fprintf(outFd, "%s: %v - %s\n", format.Cmd, format.Err, format.Desc) return } fmt.Fprintf(outFd, "%s: %v\n", format.Cmd, format.Err) }
func (c *clusterClient) enumerate(context *cli.Context) { c.clusterOptions(context) jsonOut := context.GlobalBool("json") outFd := os.Stdout fn := "enumerate" cluster, err := c.manager.Enumerate() if err != nil { cmdError(context, fn, err) return } if jsonOut { fmtOutput(context, &Format{Cluster: &cluster}) } else { w := new(tabwriter.Writer) w.Init(outFd, 12, 12, 1, ' ', 0) fmt.Fprintln(w, "ID\t IMAGE\t STATUS\t NAMES\t NODE") for _, n := range cluster.Nodes { for _, c := range n.Containers { fmt.Fprintln(w, c.ID, "\t", c.Image, "\t", c.Status, "\t", c.Names, "\t", n.Ip) } } fmt.Fprintln(w) w.Flush() } }
// multiplexAction uses the arguments passed via the command line and // multiplexes them across multiple SSH connections func multiplexAction(context *cli.Context) { c, err := newCommand(context) if err != nil { log.Fatal(err) } log.Debug(c) hosts, err := loadHosts(context) if err != nil { log.Fatal(err) } sections, err := parseSshConfigFile(filepath.Join(os.Getenv("HOME"), ".ssh", "config")) if err != nil { log.Fatal(err) } if len(hosts) == 0 { log.Fatal("no host specified for command to run") } log.Debugf("hosts %v", hosts) group := &sync.WaitGroup{} for _, h := range hosts { group.Add(1) go executeCommand(c, h, sections[h], context.GlobalBool("A"), context.GlobalBool("quiet"), group) } group.Wait() log.Debugf("finished executing %s on all hosts", c) }
func (cli *CLI) init(ctx *cli.Context) error { if ctx.GlobalBool("prtg") { cli.Err, cli.Out = prtg.Err, prtg.Out } var err error if cli.cred, err = cli.Store.Load(); err == nil { cli.c, err = cli.Client(cli.cred.URL, cli.cred.User, cli.cred.Pass) } if err != nil { cli.cred = &Creds{ctx.GlobalString("addr"), ctx.GlobalString("user"), ctx.GlobalString("pass")} if cli.c, err = cli.Client(cli.cred.URL, cli.cred.User, cli.cred.Pass); err != nil { return err } } a, p, s, o := ctx.GlobalString("agent"), ctx.GlobalString("project"), ctx.GlobalString("stage"), ctx.GlobalString("output") if cli.a, err = regexp.Compile(a); err != nil { return err } if cli.p, err = regexp.Compile(p); err != nil { return err } if cli.s, err = regexp.Compile(s); err != nil { return err } if cli.o, err = regexp.Compile(o); err != nil { return err } if cli.d, err = time.ParseDuration(ctx.GlobalString("timeout")); err != nil { return err } cli.n, cli.rev = int64(ctx.GlobalInt("build")), ctx.GlobalString("revision") cli.c.SetTimeout(cli.d) return nil }
// startNode boots up the system node and all registered protocols, after which // it unlocks any requested accounts, and starts the RPC/IPC interfaces and the // miner. func startNode(ctx *cli.Context, stack *node.Node) { // Start up the node itself utils.StartNode(stack) // Unlock any account specifically requested var ethereum *eth.Ethereum if err := stack.Service(ðereum); err != nil { utils.Fatalf("ethereum service not running: %v", err) } accman := ethereum.AccountManager() passwords := utils.MakePasswordList(ctx) accounts := strings.Split(ctx.GlobalString(utils.UnlockedAccountFlag.Name), ",") for i, account := range accounts { if trimmed := strings.TrimSpace(account); trimmed != "" { unlockAccount(ctx, accman, trimmed, i, passwords) } } // Start auxiliary services if enabled if ctx.GlobalBool(utils.MiningEnabledFlag.Name) { if err := ethereum.StartMining(ctx.GlobalInt(utils.MinerThreadsFlag.Name), ctx.GlobalString(utils.MiningGPUFlag.Name)); err != nil { utils.Fatalf("Failed to start mining: %v", err) } } }