func runAction(c *cli.Context) { cfg, err := loadConfig() if err != nil { logger.Fatal(err) } m := client.NewManager(cfg) if c.String("name") == "" { logger.Fatal("you must specify an image name") } env := parseEnvironmentVariables(c.StringSlice("env")) ports := parsePorts(c.StringSlice("port")) image := &citadel.Image{ Name: c.String("name"), Cpus: c.Float64("cpus"), Memory: c.Float64("memory"), Hostname: c.String("hostname"), Domainname: c.String("domain"), Labels: c.StringSlice("label"), Args: c.StringSlice("arg"), Environment: env, BindPorts: ports, Type: c.String("type"), } containers, err := m.Run(image, c.Int("count"), c.Bool("pull")) if err != nil { logger.Fatalf("error running container: %s\n", err) } for _, c := range containers { fmt.Printf("started %s on %s\n", c.ID[:12], c.Engine.ID) } }
// Add the flags accepted by run to the supplied flag set, returning the // variables into which the flags will parse. func populateFlags(c *cli.Context) (flags *flagStorage) { flags = &flagStorage{ // File system MountOptions: make(map[string]string), DirMode: os.FileMode(c.Int("dir-mode")), FileMode: os.FileMode(c.Int("file-mode")), Uid: int64(c.Int("uid")), Gid: int64(c.Int("gid")), // GCS, KeyFile: c.String("key-file"), EgressBandwidthLimitBytesPerSecond: c.Float64("limit-bytes-per-sec"), OpRateLimitHz: c.Float64("limit-ops-per-sec"), // Tuning, StatCacheTTL: c.Duration("stat-cache-ttl"), TypeCacheTTL: c.Duration("type-cache-ttl"), TempDir: c.String("temp-dir"), ImplicitDirs: c.Bool("implicit-dirs"), // Debugging, DebugFuse: c.Bool("debug_fuse"), DebugGCS: c.Bool("debug_gcs"), DebugHTTP: c.Bool("debug_http"), DebugInvariants: c.Bool("debug_invariants"), } // Handle the repeated "-o" flag. for _, o := range c.StringSlice("o") { mountpkg.ParseOptions(flags.MountOptions, o) } return }
func followAction(ctx *cli.Context) { addr := ctx.Args().First() if addr == "" { log.Fatal("need url") } addrP, err := url.Parse(addr) if err != nil { log.Fatalf("parse url err: %s", err) } limit := ctx.Int("limit") timeout := ctx.Float64("timeout") if timeout < 0 { log.Fatalf("timeout should be greater than zeo") } go func() { time.Sleep(time.Duration(timeout*1000) * time.Millisecond) log.Fatalf("timeout %.2f sec. reached", timeout) }() if flDebug { log.Printf("redirects limit: %d", limit) } jar, err := cookiejar.New(nil) if err != nil { log.Fatalf("jar new err: %s", err) } if flDebug { log.Printf("target addr: %s", addrP.String()) } addrP, err = getFinalURL(jar, addrP, limit) if err != nil { log.Fatalf("%s", err) } fmt.Println(addrP.String()) }
func nodeAction(c *cli.Context) { if c.Bool("debug") { log.SetLevel(log.DebugLevel) } nodeIp := c.String("ip") // if no host ip is specified, attempt to detect if nodeIp == "" { // get listening IP and check if running in "bridged" mode // if so, exit and tell to run in "host" mode // this is so we can properly report the IP of the host in port listings addrs, err := net.InterfaceAddrs() if err != nil { log.Fatalf("unable to get network interfaces: %s", err) } log.Debugf("detecting machine ip...") for _, addr := range addrs { i, _, err := net.ParseCIDR(addr.String()) if err != nil { continue } ip := i.String() if i.To4() == nil { log.Debugf("skipping ipv6 address: %s", ip) continue } switch { case ip == "127.0.0.1": continue case strings.Index(ip, "172.17") != -1: continue case strings.Index(ip, "fe80") != -1: continue case ip == "::1": continue default: nodeIp = ip break } } if nodeIp == "" { log.Fatalf("unable to run node: unable to detect machine IP -- use --net=host if you are running in a container") } log.Debugf("detected machine ip: %s", nodeIp) } node, err := node.NewNode(c.String("controller"), c.String("docker"), nil, c.Float64("cpus"), c.Float64("memory"), c.Int("heartbeat"), nodeIp, c.Bool("grid-containers"), c.Bool("debug")) if err != nil { log.Fatalf("error connecting to docker: %s", err) } node.Run() waitForInterrupt() }
func runAction(c *cli.Context) { cfg, err := loadConfig(c) if err != nil { logger.Fatal(err) } m := client.NewManager(cfg) if c.String("name") == "" { logger.Fatal("you must specify an image name") } vols := c.StringSlice("vol") env := parseEnvironmentVariables(c.StringSlice("env")) ports := parsePorts(c.StringSlice("port")) links := parseContainerLinks(c.StringSlice("link")) policy, maxRetries, err := parseRestartPolicy(c.String("restart")) if err != nil { logger.Fatalf("error parsing restart policy: %s", err) } rp := citadel.RestartPolicy{ Name: policy, MaximumRetryCount: maxRetries, } image := &citadel.Image{ Name: c.String("name"), ContainerName: c.String("container-name"), Cpus: c.Float64("cpus"), Cpuset: c.String("cpuset"), Memory: c.Float64("memory"), Hostname: c.String("hostname"), Domainname: c.String("domain"), NetworkMode: c.String("network"), Labels: c.StringSlice("label"), Args: c.StringSlice("arg"), Environment: env, Links: links, Publish: c.Bool("publish"), Volumes: vols, BindPorts: ports, RestartPolicy: rp, Type: c.String("type"), } containers, err := m.Run(image, c.Int("count"), c.Bool("pull")) if err != nil { logger.Fatalf("error running container: %s\n", err) } for _, c := range containers { fmt.Printf("started %s on %s\n", c.ID[:12], c.Engine.ID) } }
// cmdEditMeasurement edit value or date for a measurement with given ID. func cmdEditMeasurement(c *cli.Context) { // Check obligatory flags (id, file) if c.Int("id") < 0 { fmt.Fprintf(os.Stderr, "weightWatcher: missing ID parameter. Specify it with --id or -i flag.\n") return } if c.String("file") == "" { fmt.Fprintf(os.Stderr, "weightWatcher: missing file parameter. Specify it with --file or -f flag.\n") return } // Open data file db, err := getDataFile(c.String("file")) if err != nil { fmt.Fprintf(os.Stderr, "%s\n", err) return } defer db.Close() // Check if measurement with given ID exists if !measurementExist(c.Int("id"), db) { fmt.Fprintf(os.Stderr, "weightWatcher: measurement with id=%d does not exist.\n", c.Int("id")) return } // Edit data sqlStmt := "BEGIN TRANSACTION;" if c.String("date") != "" { sqlStmt += fmt.Sprintf("UPDATE measurements SET day='%s' WHERE measurement_id=%d;", c.String("date"), c.Int("id")) } if c.Float64("weight") != 0 { sqlStmt += fmt.Sprintf("UPDATE measurements SET measurement=%f WHERE measurement_id=%d;", c.Float64("weight"), c.Int("id")) } sqlStmt += "COMMIT;" _, err = db.Exec(sqlStmt) if err != nil { fmt.Fprintf(os.Stderr, "weightWatcher: %s\n", err) return } // Show summary if verbose if c.Bool("verbose") == true { fmt.Fprintf(os.Stdout, "weightWatcher: edited measurement %3.2f to file %s with date %s.\n", c.Float64("weight"), c.String("file"), c.String("date")) } }
func NewGoPow(c *cli.Context) (*GoPow, error) { config := &RunConfig{ InputFile: c.String("input"), OutputFile: c.String("output"), Format: c.String("format"), Annotations: !c.Bool("no-annotations"), MaxPower: c.Float64("max-power"), MinPower: c.Float64("min-power"), } if !c.IsSet("max-power") { config.MaxPower = PowerConfigAuto } if !c.IsSet("min-power") { config.MinPower = PowerConfigAuto } if config.InputFile == "" { return nil, fmt.Errorf("missing input file") } if config.Format == "" { config.Format = "png" } if config.OutputFile == "" { config.OutputFile = config.InputFile + "." + config.Format } log.WithFields(log.Fields{ "input": config.InputFile, }).Info("GoPow init") log.WithFields(log.Fields{ "output": config.OutputFile, }).Info("GoPow init") log.WithFields(log.Fields{ "format": config.Format, }).Info("GoPow init") g := &GoPow{ config: config, } return g, nil }
func deployCmd(c *cli.Context) error { sb := core.ServiceConfigurationBuilder{ ServiceId: c.String("service-id"), RegistryDomain: c.String("registry"), Namespace: c.String("namespace"), Tag: c.String("tag"), Cpu: c.Float64("cpu"), Memory: c.Float64("memory"), MinimumHealthCapacity: c.Float64("minimumHealthCapacity"), MaximumOverCapacity: c.Float64("maximumOverCapacity"), SyslogTag: c.String("syslog-tag"), Instances: c.Int("instances"), JobUrl: c.String("deploy-url"), ContactEmail: c.String("contact-email"), } envs, err := util.ParseMultiFileLinesToArray(c.StringSlice("env-file")) if err != nil { logger.Instance().Fatalln("Error parsing environment files", err) } sb.AddEnvironmentVars(envs) sb.AddEnvironmentVars(c.StringSlice("env")) sb.AddConstraints(c.StringSlice("constraint")) sb.AddPorts(c.StringSlice("port")) sb.AddLabels(c.StringSlice("label")) handleDeploySigTerm(clusterManager) if clusterManager.DeployWithRollbackOnError(sb, c.Bool("force")) { logger.Instance().Infoln("Deployment READY") var resume []callbackResume for clusterKey, cluster := range clusterManager.Clusters() { logger.Instance().Infof("Services in Cluster %s :", clusterKey) for _, service := range cluster.Services() { for _, instance := range service.Instances { for _, val := range instance.Ports { logger.Instance().Infof("Deployed %s at host %s and address %+v", instance.ID, instance.Host, val) instanceInfo := callbackResume{ Id: instance.ID, Address: instance.Host + ":" + strconv.FormatInt(val.Internal, 10), ClusterKey: clusterKey, } resume = append(resume, instanceInfo) } } } } //jsonResume, _ := json.Marshal(resume) //fmt.Println(string(jsonResume)) return nil } return fmt.Errorf("Deployment-Process terminated with errors") }
// cmdAddMeasurement adds measurement to data file func cmdAddMeasurement(c *cli.Context) { // Check obligatory flags (file, date, measurement) if c.String("file") == "" { fmt.Fprintf(os.Stderr, "weightWatcher: missing file parameter. Specify it with --file or -f flag.\n") return } if c.String("date") == "" { fmt.Fprintf(os.Stderr, "weightWatcher: missing date parameter. Specify it with --date or -d flag.\n") return } if c.Float64("weight") == 0 { fmt.Fprintf(os.Stderr, "weightWatcher: missing weight parameter. Specify it with --weight or -w flag.\n") return } // Open data file db, err := getDataFile(c.String("file")) if err != nil { fmt.Fprintf(os.Stderr, "%s\n", err) return } defer db.Close() // Add data to file sqlStmt := fmt.Sprintf("INSERT INTO measurements VALUES (NULL, '%s', %f);", c.String("date"), c.Float64("weight")) _, err = db.Exec(sqlStmt) if err != nil { fmt.Fprintf(os.Stderr, "weightWatcher: %s\n", err) return } // Show summary if verbose if c.Bool("verbose") == true { fmt.Fprintf(os.Stdout, "weightWatcher: add measurement %3.2f to file %s with date %s.\n", c.Float64("weight"), c.String("file"), c.String("date")) } }
func trust(c *cli.Context) { // Validate and parse required fields if c.String("amount") == "" || key == nil { fmt.Println("Amount and seed are required") os.Exit(1) } amount := parseAmount(c.String("amount")) // Create tx and sign it tx := &data.TrustSet{ LimitAmount: *amount, } tx.TransactionType = data.TRUST_SET tx.QualityOut = new(uint32) *tx.QualityOut = uint32(c.Float64("quality-out") * 1000000000) tx.QualityIn = new(uint32) *tx.QualityIn = uint32(c.Float64("quality-in") * 1000000000) tx.Flags = new(data.TransactionFlag) if c.Bool("auth") { *tx.Flags = *tx.Flags | data.TxSetAuth } if c.Bool("noripple") { *tx.Flags = *tx.Flags | data.TxSetNoRipple } if c.Bool("clear-noripple") { *tx.Flags = *tx.Flags | data.TxClearNoRipple } if c.Bool("freeze") { *tx.Flags = *tx.Flags | data.TxSetFreeze } if c.Bool("clear-freeze") { *tx.Flags = *tx.Flags | data.TxClearFreeze } sign(c, tx) outputTx(c, tx) }
// Run is a facade method of the query command func Run(c *cli.Context) { if len(c.Args()) != 1 { fmt.Println("Not Found a statement") return } statement := c.Args()[0] isVerbose = c.Bool("verbose") isDryRun = c.Bool("dryRun") onlyStatement = c.Bool("onlyStatement") args := Args{ beforeHour: c.Float64("beforeHour"), startDate: c.String("startDate"), endDate: c.String("endDate"), tz: c.Float64("tz"), buffer: c.Float64("buffer"), } q := newQuery(statement, args) output, err := q.query() if err != nil { fmt.Printf("Failed to run the command\n:error=%v\n", err) return } if output != "" { fmt.Printf(output) } }
func deployBefore(c *cli.Context) error { if c.String("service-id") == "" { return errors.New("Service-id is empty") } if c.String("tag") == "" { return errors.New("The Tag of the image is empty") } if c.String("namespace") == "" { return errors.New("Namespace is empty") } if c.Int64("memory") < 0 { return errors.New("Memory flag value should not be negative") } if c.Float64("cpu") < 0 { return errors.New("Cpu flag value should not be negative") } for _, file := range c.StringSlice("env-file") { if err := util.FileExists(file); err != nil { return errors.New(fmt.Sprintf("El archivo %s con variables de entorno no existe", file)) } } if c.Float64("minimumHealthCapacity") < 0.0 || c.Float64("minimumHealthCapacity") > 1.0 { return errors.New("MinimumHealthCapacity flag value should be between 0.0 and 1.0") } if c.Float64("maximumOverCapacity") < 0.0 || c.Float64("maximumOverCapacity") > 1.0 { return errors.New("MaximumOverCapacity flag value should be between 0.0 and 1.0") } return nil }
func run_check(c *cli.Context) { prot := c.String("protocol") host := c.String("hostname") port := c.Int("port") path := c.String("urlpath") warn := c.Float64("warning") crit := c.Float64("critical") tmout := c.Float64("timeout") chkstr := c.String("checkstr") log.Debugf("Protocol : %s", prot) log.Debugf("Host : %s", host) log.Debugf("Port : %d", port) log.Debugf("UPath : %s", path) log.Debugf("Warning : %f", warn) log.Debugf("Critical : %f", crit) log.Debugf("Timeout : %f", tmout) log.Debugf("Check string : %q", chkstr) chResults := make(chan DPResponse) chCtrl := make(chan bool) defer close(chResults) defer close(chCtrl) dpurl := fmt.Sprintf("%s://%s:%d%s", prot, host, port, path) log.Debugf("DP URL: %s", dpurl) // run in parallell thread go scrape(dpurl, chResults, chCtrl) select { case res := <-chResults: log.Debugf("Status Value: %s", res.StatusVal) log.Debugf("Description: %s", res.Description) log.Debugf("Response time: %f", res.RTime.Seconds()) if strings.ToUpper(res.StatusVal) == strings.ToUpper(chkstr) { if res.RTime.Seconds() >= warn && res.RTime.Seconds() < crit { msg := fmt.Sprintf("Too long response time (>= %ds), (desc: %s)", int(warn), res.Description) nagios_result(E_WARNING, S_WARNING, msg, path, res.RTime.Seconds(), warn, crit) } else if res.RTime.Seconds() >= crit { msg := fmt.Sprintf("Too long response time (>= %ds), (desc: %s)", int(crit), res.Description) nagios_result(E_CRITICAL, S_CRITICAL, msg, path, res.RTime.Seconds(), warn, crit) } else { nagios_result(E_OK, S_OK, res.Description, path, res.RTime.Seconds(), warn, crit) } } else { nagios_result(E_CRITICAL, S_CRITICAL, res.Description, path, res.RTime.Seconds(), warn, crit) } case <-chCtrl: log.Debug("Got done signal. Bye.") case <-time.After(time.Second * time.Duration(tmout)): fmt.Printf("%s: DP %q timed out after %d seconds.\n", S_CRITICAL, dpurl, int(tmout)) os.Exit(E_CRITICAL) } }
func manage(c *cli.Context) { var ( tlsConfig *tls.Config = nil err error ) // If either --tls or --tlsverify are specified, load the certificates. if c.Bool("tls") || c.Bool("tlsverify") { if !c.IsSet("tlscert") || !c.IsSet("tlskey") { log.Fatal("--tlscert and --tlskey must be provided when using --tls") } if c.Bool("tlsverify") && !c.IsSet("tlscacert") { log.Fatal("--tlscacert must be provided when using --tlsverify") } tlsConfig, err = loadTlsConfig( c.String("tlscacert"), c.String("tlscert"), c.String("tlskey"), c.Bool("tlsverify")) if err != nil { log.Fatal(err) } } else { // Otherwise, if neither --tls nor --tlsverify are specified, abort if // the other flags are passed as they will be ignored. if c.IsSet("tlscert") || c.IsSet("tlskey") || c.IsSet("tlscacert") { log.Fatal("--tlscert, --tlskey and --tlscacert require the use of either --tls or --tlsverify") } } store := state.NewStore(path.Join(c.String("rootdir"), "state")) if err := store.Initialize(); err != nil { log.Fatal(err) } cluster := cluster.NewCluster(store, tlsConfig, c.Float64("overcommit")) cluster.Events(&logHandler{}) dflag := getDiscovery(c) if dflag == "" { log.Fatalf("discovery required to manage a cluster. See '%s manage --help'.", c.App.Name) } s, err := strategy.New(c.String("strategy")) if err != nil { log.Fatal(err) } // see https://github.com/codegangsta/cli/issues/160 names := c.StringSlice("filter") if c.IsSet("filter") || c.IsSet("f") { names = names[DEFAULT_FILTER_NUMBER:] } fs, err := filter.New(names) if err != nil { log.Fatal(err) } // get the list of nodes from the discovery service go func() { d, err := discovery.New(dflag, c.Int("heartbeat")) if err != nil { log.Fatal(err) } nodes, err := d.Fetch() if err != nil { log.Fatal(err) } cluster.UpdateNodes(nodes) go d.Watch(cluster.UpdateNodes) }() sched := scheduler.NewScheduler( cluster, s, fs, ) // see https://github.com/codegangsta/cli/issues/160 hosts := c.StringSlice("host") if c.IsSet("host") || c.IsSet("H") { hosts = hosts[1:] } log.Fatal(api.ListenAndServe(cluster, sched, hosts, c.Bool("cors"), tlsConfig)) }
func engineAddAction(c *cli.Context) { cfg, err := loadConfig(c) if err != nil { logger.Fatal(err) } m := client.NewManager(cfg) id := c.String("id") addr := c.String("addr") if id == "" || addr == "" { logger.Fatalf("you must specify an id and address") } engine := &citadel.Engine{ ID: id, Addr: addr, Cpus: c.Float64("cpus"), Memory: c.Float64("memory"), Labels: c.StringSlice("label"), } sslCertPath := c.String("ssl-cert") sslKeyPath := c.String("ssl-key") caCertPath := c.String("ca-cert") var ( sslCertData = []byte{} sslKeyData = []byte{} caCertData = []byte{} sslErr error ) if sslCertPath != "" && sslKeyPath != "" && caCertPath != "" { sslCert, err := os.Open(sslCertPath) if err != nil { logger.Fatalf("unable to open ssl certificate: %s", err) } sslKey, err := os.Open(sslKeyPath) if err != nil { logger.Fatalf("unable to open ssl key: %s", err) } caCert, err := os.Open(caCertPath) if err != nil { logger.Fatalf("unable to open ca certificate: %s", err) } if _, err := sslCert.Stat(); err != nil { logger.Fatalf("ssl cert is not accessible: %s", err) } if _, err := sslKey.Stat(); err != nil { logger.Fatalf("ssl key is not accessible: %s", err) } if _, err := caCert.Stat(); err != nil { logger.Fatalf("ca cert is not accessible: %s", err) } sslCertData, sslErr = ioutil.ReadAll(sslCert) if sslErr != nil { logger.Fatalf("unable to read ssl certificate: %s", sslErr) } sslKeyData, sslErr = ioutil.ReadAll(sslKey) if sslErr != nil { logger.Fatalf("unable to read ssl key: %s", sslErr) } caCertData, sslErr = ioutil.ReadAll(caCert) if sslErr != nil { logger.Fatalf("unable to read ca certificate: %s", sslErr) } } shipyardEngine := &shipyard.Engine{ SSLCertificate: string(sslCertData), SSLKey: string(sslKeyData), CACertificate: string(caCertData), Engine: engine, } if err := m.AddEngine(shipyardEngine); err != nil { logger.Fatalf("error adding engine: %s", err) } }
func manage(c *cli.Context) { var ( tlsConfig *tls.Config = nil err error ) // If either --tls or --tlsverify are specified, load the certificates. if c.Bool("tls") || c.Bool("tlsverify") { if !c.IsSet("tlscert") || !c.IsSet("tlskey") { log.Fatal("--tlscert and --tlskey must be provided when using --tls") } if c.Bool("tlsverify") && !c.IsSet("tlscacert") { log.Fatal("--tlscacert must be provided when using --tlsverify") } tlsConfig, err = loadTlsConfig( c.String("tlscacert"), c.String("tlscert"), c.String("tlskey"), c.Bool("tlsverify")) if err != nil { log.Fatal(err) } } else { // Otherwise, if neither --tls nor --tlsverify are specified, abort if // the other flags are passed as they will be ignored. if c.IsSet("tlscert") || c.IsSet("tlskey") || c.IsSet("tlscacert") { log.Fatal("--tlscert, --tlskey and --tlscacert require the use of either --tls or --tlsverify") } } store := state.NewStore(path.Join(c.String("rootdir"), "state")) if err := store.Initialize(); err != nil { log.Fatal(err) } dflag := getDiscovery(c) if dflag == "" { log.Fatalf("discovery required to manage a cluster. See '%s manage --help'.", c.App.Name) } s, err := strategy.New(c.String("strategy")) if err != nil { log.Fatal(err) } // see https://github.com/codegangsta/cli/issues/160 names := c.StringSlice("filter") if c.IsSet("filter") || c.IsSet("f") { names = names[DEFAULT_FILTER_NUMBER:] } fs, err := filter.New(names) if err != nil { log.Fatal(err) } sched := scheduler.New(s, fs) eventsHandler := api.NewEventsHandler() options := &cluster.Options{ TLSConfig: tlsConfig, OvercommitRatio: c.Float64("overcommit"), Discovery: dflag, Heartbeat: c.Int("heartbeat"), } cluster := swarm.NewCluster(sched, store, eventsHandler, options) // see https://github.com/codegangsta/cli/issues/160 hosts := c.StringSlice("host") if c.IsSet("host") || c.IsSet("H") { hosts = hosts[1:] } log.Fatal(api.ListenAndServe(cluster, hosts, c.Bool("cors"), tlsConfig, eventsHandler)) }
func deployCmd(c *cli.Context) { envs, err := util.ParseMultiFileLinesToArray(c.StringSlice("env-file")) if err != nil { util.Log.Fatalln("No se pudo procesar el archivo con variables de entorno", err) } for _, v := range c.StringSlice("env") { envs = append(envs, v) } serviceConfig := service.ServiceConfig{ ServiceId: c.String("service-id"), CpuShares: c.Int("cpu"), Envs: envs, ImageName: c.String("image"), Tag: c.String("tag"), } if c.String("memory") != "" { megabytes, _ := bytefmt.ToMegabytes(c.String("memory")) memory := megabytes * 1024 * 1024 serviceConfig.Memory = int64(memory) } smokeConfig := monitor.MonitorConfig{ Retries: c.Int("smoke-retries"), Type: monitor.GetMonitor(c.String("smoke-type")), Request: c.String("smoke-request"), Expected: c.String("smoke-expected"), } warmUpConfig := monitor.MonitorConfig{ Retries: 1, Type: monitor.HTTP, Request: c.String("warmup-request"), Expected: c.String("warmup-expected"), } util.Log.Debugf("La configuración del servicio es: %#v", serviceConfig.String()) handleDeploySigTerm(stackManager) if stackManager.Deploy(serviceConfig, smokeConfig, warmUpConfig, c.Int("instances"), c.Float64("tolerance")) { services := stackManager.DeployedContainers() var resume []callbackResume for k := range services { if addr, err := services[k].AddressAndPort(8080); err != nil { util.Log.Errorln(err) } else { util.Log.Infof("Se desplegó %s con el tag de registrator %s y dirección %s", services[k].GetId(), services[k].RegistratorId(), addr) containerInfo := callbackResume{ RegisterId: services[k].RegistratorId(), Address: addr, } resume = append(resume, containerInfo) } } jsonResume, _ := json.Marshal(resume) fmt.Println(string(jsonResume)) } else { util.Log.Fatalln("Proceso de deploy con errores") } }
func deployCmd(c *cli.Context) { envs, err := util.ParseMultiFileLinesToArray(c.StringSlice("env-file")) if err != nil { util.Log.Fatalln("No se pudo procesar el archivo con variables de entorno", err) } for _, v := range c.StringSlice("env") { envs = append(envs, v) } serviceConfig := framework.ServiceConfig{ ServiceID: c.String("service-id"), CPUShares: c.Float64("cpu"), Envs: envs, ImageName: c.String("image"), Tag: c.String("tag"), MinimumHealthCapacity: c.Float64("minimumHealthCapacity"), MaximumOverCapacity: c.Float64("maximumOverCapacity"), HealthCheckConfig: &framework.HealthCheck{Path: c.String("health-check-path")}, } applyPorts(c.StringSlice("port"), &serviceConfig) if c.String("memory") != "" { n, _ := strconv.ParseInt(c.String("memory"), 10, 64) serviceConfig.Memory = int64(n) } err = applyKeyValSliceFlag(c.StringSlice("constraint"), func(configMap map[string]string) { if configMap != nil && len(configMap) != 0 { serviceConfig.Constraints = configMap } }) if err != nil { util.Log.Fatalln("Error reading constraints", err) } err = applyKeyValSliceFlag(c.StringSlice("label"), func(configMap map[string]string) { if configMap != nil && len(configMap) != 0 { serviceConfig.Labels = configMap } }) if err != nil { util.Log.Fatalln("Error reading labels", err) } if c.String("beta") != "" { if serviceConfig.Labels == nil { serviceConfig.Labels = make(map[string]string) } if serviceConfig.Constraints == nil { serviceConfig.Constraints = make(map[string]string) } serviceConfig.Labels["slave_name"] = c.String("beta") serviceConfig.Constraints["slave_name"] = c.String("beta") } handleDeploySigTerm(stackManager) if stackManager.Deploy(serviceConfig, c.Int("instances"), c.Float64("tolerance")) { services := stackManager.DeployedContainers() var resume []callbackResume for _, service := range services { for _, instance := range service.Instances { for _, val := range instance.Ports { util.Log.Infof("Se desplegó %s en host %s y dirección %+v", instance.ID, instance.Host, val) instanceInfo := callbackResume{ Id: instance.ID, Address: instance.Host + ":" + strconv.FormatInt(val.Internal, 10), } resume = append(resume, instanceInfo) } } } jsonResume, _ := json.Marshal(resume) fmt.Println(string(jsonResume)) } else { util.Log.Fatalln("Deployment-Process terminated with errors") } }
// Sends a create project task to client based on the cli.Context // Returns an error if one occurred func createProject(c *cli.Context, w io.Writer) error { err := checkArgCount(c, 0) if err != nil { return err } tenantName := c.String("tenant") rtName := c.String("resource-ticket") name := c.String("name") limits := c.String("limits") percent := c.Float64("percent") securityGroups := c.String("security-groups") client.Esxclient, err = client.GetClient(c) if err != nil { return err } tenant, err := verifyTenant(tenantName) if err != nil { return err } var limitsList []photon.QuotaLineItem if c.IsSet("limits") && c.IsSet("percent") { return fmt.Errorf("Error: Can only specify one of '--limits' or '--percent'") } if c.IsSet("limits") { limitsList, err = parseLimitsListFromFlag(limits) if err != nil { return err } } if c.IsSet("percent") { limitsList = []photon.QuotaLineItem{ {Key: "subdivide.percent", Value: percent, Unit: "COUNT"}} } if !c.GlobalIsSet("non-interactive") { name, err = askForInput("Project name: ", name) if err != nil { return err } rtName, err = askForInput("Resource-ticket name: ", rtName) if err != nil { return err } limitsList, err = askForLimitList(limitsList) if err != nil { return err } } projectSpec := photon.ProjectCreateSpec{} projectSpec.Name = name projectSpec.ResourceTicket = photon.ResourceTicketReservation{Name: rtName, Limits: limitsList} if !c.GlobalIsSet("non-interactive") { fmt.Printf("\nTenant name: %s\n", tenant.Name) fmt.Printf("Resource ticket name: %s\n", rtName) fmt.Printf("Creating project name: %s\n\n", name) fmt.Println("Please make sure limits below are correct:") for i, l := range limitsList { fmt.Printf("%d: %s, %g, %s\n", i+1, l.Key, l.Value, l.Unit) } } if confirmed(c) { if len(securityGroups) > 0 { projectSpec.SecurityGroups = regexp.MustCompile(`\s*,\s*`).Split(securityGroups, -1) } createTask, err := client.Esxclient.Tenants.CreateProject(tenant.ID, &projectSpec) if err != nil { return err } id, err := waitOnTaskOperation(createTask.ID, c) if err != nil { return err } if utils.NeedsFormatting(c) { project, err := client.Esxclient.Projects.Get(id) if err != nil { return err } utils.FormatObject(project, w, c) } } else { fmt.Println("OK. Canceled") } return nil }
func run_check(c *cli.Context) { furl := c.String("url") // f for full-url prot := c.String("protocol") host := c.String("hostname") port := c.Int("port") path := c.String("urlpath") warn := c.Float64("warning") crit := c.Float64("critical") tmout := c.Float64("timeout") var dpurl string if furl != "" { dpurl = furl tmpurl, err := url.Parse(furl) if err == nil { path = tmpurl.EscapedPath() //log.Debugf("URL Path : %s", path) } } else { dpurl = fmt.Sprintf("%s://%s:%d%s", prot, host, port, path) } _debug(func() { log.Debugf("URL: : %q", furl) log.Debugf("Protocol : %s", prot) log.Debugf("Host : %s", host) log.Debugf("Port : %d", port) log.Debugf("UPath : %s", path) log.Debugf("Warning : %f", warn) log.Debugf("Critical : %f", crit) log.Debugf("Timeout : %f", tmout) log.Debugf("DP URL : %s", dpurl) }) chPRes := make(chan PingResponse) defer close(chPRes) // run in parallell thread go scrape(dpurl, chPRes) select { case res := <-chPRes: if res.HTTPCode != http.StatusOK { msg := fmt.Sprintf("Unexpected HTTP response code: %d", res.HTTPCode) nagios_result(E_CRITICAL, S_CRITICAL, msg, path, res.ResponseTime, warn, crit, &res) } if !res.Ok() { _debug(func() { var buf bytes.Buffer written, err := res.DumpJSON(&buf, true) if err != nil { log.Error(err) } log.Debugf("XML as JSON (%d bytes):\n%s", written, buf.String()) }) msg := "Response tagged as unsuccessful, see long output for details" nagios_result(E_CRITICAL, S_CRITICAL, msg, path, res.ResponseTime, warn, crit, &res) } if res.ResponseTime >= crit { msg := fmt.Sprintf("Response time above critical [ %ds ] limit", int(crit)) nagios_result(E_CRITICAL, S_CRITICAL, msg, path, res.ResponseTime, warn, crit, &res) } if res.ResponseTime >= warn { msg := fmt.Sprintf("Response time above warning [ %ds ] limit", int(warn)) nagios_result(E_WARNING, S_WARNING, msg, path, res.ResponseTime, warn, crit, &res) } // Got here, all good nagios_result(E_OK, S_OK, "Looking good", path, res.ResponseTime, warn, crit, &res) case <-time.After(time.Second * time.Duration(tmout)): fmt.Printf("%s: DP %q timed out after %d seconds.\n", S_CRITICAL, dpurl, int(tmout)) os.Exit(E_CRITICAL) } }
func deployBefore(c *cli.Context) error { if c.String("service-id") == "" { return errors.New("Service-id is empty") } if c.String("image") == "" { return errors.New("The name of the image is empty") } if c.String("tag") == "" { return errors.New("The Tag of the image is empty") } if c.String("memory") != "" { if _, err := strconv.ParseInt(c.String("memory"), 10, 64); err != nil { return errors.New("Invalid value of paramter memory") } } if c.Float64("cpu") < 0 { return errors.New("Cpu flag value should not be negative") } if c.String("framework") == "marathon" && c.Float64("cpu") > 1.0 { // Fix this: framework flag does not exist anymore return errors.New("Cpu flag value should not be > 1.0 for marathon") } else if c.String("framework") == "swarm" && c.Float64("cpu") > 1024 { // Fix this: framework flag does not exist anymore return errors.New("Cpu flag value should not be > 1024.0 for swarm") } for _, file := range c.StringSlice("env-file") { if err := util.FileExists(file); err != nil { return errors.New(fmt.Sprintf("El archivo %s con variables de entorno no existe", file)) } } if c.Float64("minimumHealthCapacity") < 0.0 || c.Float64("minimumHealthCapacity") > 1.0 { return errors.New("MinimumHealthCapacity flag value should be between 0.0 and 1.0") } if c.Float64("maximumOverCapacity") < 0.0 || c.Float64("maximumOverCapacity") > 1.0 { return errors.New("MaximumOverCapacity flag value should be between 0.0 and 1.0") } return nil }