func appList(c *cli.Context) { initStore(c) err := commander.AppList(configStore, utils.GalaxyEnv(c)) if err != nil { log.Fatalf("ERROR: %s", err) } }
func poolCreate(c *cli.Context) { ensureEnvArg(c) ensurePoolArg(c) initStore(c) created, err := configStore.CreatePool(utils.GalaxyPool(c), utils.GalaxyEnv(c)) if err != nil { log.Fatalf("ERROR: Could not create pool: %s", err) return } if created { log.Printf("Pool %s created\n", utils.GalaxyPool(c)) } else { log.Printf("Pool %s already exists\n", utils.GalaxyPool(c)) } ec2host, err := runtime.EC2PublicHostname() if err != nil || ec2host == "" { log.Debug("not running from AWS, skipping pool creation") return } // now create the cloudformation stack // is this fails, the stack can be created separately with // stack:create_pool stackCreatePool(c) }
func poolList(c *cli.Context) { initStore(c) envs := []string{utils.GalaxyEnv(c)} if utils.GalaxyEnv(c) == "" { var err error envs, err = configStore.ListEnvs() if err != nil { log.Fatalf("ERROR: %s", err) } } columns := []string{"ENV | POOL | APPS "} for _, env := range envs { pools, err := configStore.ListPools(env) if err != nil { log.Fatalf("ERROR: cannot list pools: %s", err) return } if len(pools) == 0 { columns = append(columns, strings.Join([]string{ env, "", ""}, " | ")) continue } for _, pool := range pools { assigments, err := configStore.ListAssignments(env, pool) if err != nil { log.Fatalf("ERROR: cannot list pool assignments: %s", err) } columns = append(columns, strings.Join([]string{ env, pool, strings.Join(assigments, ",")}, " | ")) } } output := columnize.SimpleFormat(columns) log.Println(output) }
// Backup app config to a file or STDOUT func appBackup(c *cli.Context) { initStore(c) env := utils.GalaxyEnv(c) if env == "" { log.Fatal("ERROR: env is required. Pass --env or set GALAXY_ENV") } backup := &backupData{ Time: time.Now(), } toBackup := c.Args() if len(toBackup) == 0 { appList, err := configStore.ListApps(env) if err != nil { log.Fatalf("ERROR: %s\n", err) } for _, app := range appList { toBackup = append(toBackup, app.Name()) } } errCount := 0 for _, app := range toBackup { data, err := getAppBackup(app, env) if err != nil { // log errors and continue log.Errorf("ERROR: %s [%s]", err, app) errCount++ continue } backup.Apps = append(backup.Apps, data) } if errCount > 0 { fmt.Printf("WARNING: backup completed with %d errors\n", errCount) defer os.Exit(errCount) } j, err := json.MarshalIndent(backup, "", " ") if err != nil { log.Fatal(err) } fileName := c.String("file") if fileName != "" { if err := ioutil.WriteFile(fileName, j, 0666); err != nil { log.Fatal(err) } return } os.Stdout.Write(j) }
func pgPsql(c *cli.Context) { ensureEnvArg(c) initStore(c) app := ensureAppParam(c, "pg:psql") appCfg, err := configStore.GetApp(app, utils.GalaxyEnv(c)) if err != nil { log.Fatalf("ERROR: Unable to run command: %s.", err) return } database_url := appCfg.Env()["DATABASE_URL"] if database_url == "" { log.Printf("No DATABASE_URL configured. Set one with config:set first.") return } if !strings.HasPrefix(database_url, "postgres://") { log.Printf("DATABASE_URL is not a postgres database.") return } cmd := exec.Command("psql", database_url) cmd.Stdin = os.Stdin cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr // Ignore SIGINT while the process is running ch := make(chan os.Signal, 1) signal.Notify(ch, os.Interrupt) defer func() { signal.Stop(ch) close(ch) }() go func() { for { _, ok := <-ch if !ok { break } } }() err = cmd.Start() if err != nil { log.Fatal(err) } err = cmd.Wait() if err != nil { fmt.Printf("Command finished with error: %v\n", err) } }
func appRestart(c *cli.Context) { initStore(c) app := ensureAppParam(c, "app:restart") err := commander.AppRestart(configStore, app, utils.GalaxyEnv(c)) if err != nil { log.Fatalf("ERROR: %s", err) } }
func appDelete(c *cli.Context) { ensureEnvArg(c) initStore(c) app := ensureAppParam(c, "app:delete") err := commander.AppDelete(configStore, app, utils.GalaxyEnv(c)) if err != nil { log.Fatalf("ERROR: %s", err) } }
func configList(c *cli.Context) { ensureEnvArg(c) initStore(c) app := ensureAppParam(c, "config") err := commander.ConfigList(configStore, app, utils.GalaxyEnv(c)) if err != nil { log.Fatalf("ERROR: Unable to list config: %s.", err) return } }
func configUnset(c *cli.Context) { ensureEnvArg(c) initStore(c) app := ensureAppParam(c, "config:unset") err := commander.ConfigUnset(configStore, app, utils.GalaxyEnv(c), c.Args().Tail()) if err != nil { log.Fatalf("ERROR: Unable to unset config: %s.", err) return } }
func poolAssign(c *cli.Context) { ensureEnvArg(c) ensurePoolArg(c) initStore(c) app := ensureAppParam(c, "pool:assign") err := commander.AppAssign(configStore, app, utils.GalaxyEnv(c), utils.GalaxyPool(c)) if err != nil { log.Fatalf("ERROR: %s", err) } }
func appShell(c *cli.Context) { ensureEnvArg(c) initStore(c) initRuntime(c) app := ensureAppParam(c, "app:shell") err := commander.AppShell(configStore, serviceRuntime, app, utils.GalaxyEnv(c), utils.GalaxyPool(c)) if err != nil { log.Fatalf("ERROR: %s", err) } }
func appCreate(c *cli.Context) { ensureEnvArg(c) initStore(c) app := c.Args().First() if app == "" { cli.ShowCommandHelp(c, "app:create") log.Fatal("ERROR: app name missing") } err := commander.AppCreate(configStore, app, utils.GalaxyEnv(c)) if err != nil { log.Fatalf("ERROR: %s", err) } }
func stackDeletePool(c *cli.Context) { ensureEnvArg(c) ensurePoolArg(c) if c.String("region") != "" { stack.Region = c.String("region") } baseStack := getBase(c) stackName := fmt.Sprintf("%s-%s-%s", baseStack, utils.GalaxyEnv(c), utils.GalaxyPool(c)) waitAndDelete(stackName) }
func poolCreate(c *cli.Context) { ensureEnvArg(c) ensurePoolArg(c) initStore(c) created, err := configStore.CreatePool(utils.GalaxyPool(c), utils.GalaxyEnv(c)) if err != nil { log.Fatalf("ERROR: Could not create pool: %s", err) return } if created { log.Printf("Pool %s created\n", utils.GalaxyPool(c)) } else { log.Printf("Pool %s already exists\n", utils.GalaxyPool(c)) } }
func poolUnassign(c *cli.Context) { ensureEnvArg(c) ensurePoolArg(c) initStore(c) app := c.Args().First() if app == "" { cli.ShowCommandHelp(c, "pool:assign") log.Fatal("ERROR: app name missing") } err := commander.AppUnassign(configStore, app, utils.GalaxyEnv(c), utils.GalaxyPool(c)) if err != nil { log.Fatalf("ERROR: %s", err) } }
func poolDelete(c *cli.Context) { ensureEnvArg(c) ensurePoolArg(c) initStore(c) empty, err := configStore.DeletePool(utils.GalaxyPool(c), utils.GalaxyEnv(c)) if err != nil { log.Fatalf("ERROR: Could not delete pool: %s", err) return } if empty { log.Printf("Pool %s deleted\n", utils.GalaxyPool(c)) } else { log.Printf("Pool %s has apps assigned. Unassign them first.\n", utils.GalaxyPool(c)) } }
func appRun(c *cli.Context) { ensureEnvArg(c) initStore(c) initRuntime(c) app := ensureAppParam(c, "app:run") if len(c.Args()) < 2 { log.Fatalf("ERROR: Missing command to run.") return } err := commander.AppRun(configStore, serviceRuntime, app, utils.GalaxyEnv(c), c.Args()[1:]) if err != nil { log.Fatalf("ERROR: %s", err) } }
func ensureAppParam(c *cli.Context, command string) string { app := c.Args().First() if app == "" { cli.ShowCommandHelp(c, command) log.Fatal("ERROR: app name missing") } exists, err := appExists(app, utils.GalaxyEnv(c)) if err != nil { log.Fatalf("ERROR: can't deteremine if %s exists: %s", app, err) } if !exists { log.Fatalf("ERROR: %s does not exist. Create it first.", app) } return app }
func appDeploy(c *cli.Context) { ensureEnvArg(c) initStore(c) initRuntime(c) app := ensureAppParam(c, "app:deploy") version := "" if len(c.Args().Tail()) == 1 { version = c.Args().Tail()[0] } if version == "" { log.Println("ERROR: version missing") cli.ShowCommandHelp(c, "app:deploy") return } err := commander.AppDeploy(configStore, serviceRuntime, app, utils.GalaxyEnv(c), version) if err != nil { log.Fatalf("ERROR: %s", err) } }
// Update an existing Pool Stack func stackUpdatePool(c *cli.Context) { ensureEnvArg(c) ensurePoolArg(c) if c.String("region") != "" { stack.Region = c.String("region") } poolName := utils.GalaxyPool(c) baseStack := getBase(c) poolEnv := utils.GalaxyEnv(c) stackName := fmt.Sprintf("%s-%s-%s", baseStack, poolEnv, poolName) pool, err := stack.GetPool(stackName) if err != nil { log.Fatal(err) } options := make(map[string]string) if policy := c.String("policy"); policy != "" { policyJSON, err := jsonFromArg(policy) if err != nil { log.Fatal("policy error:", err) } options["StackPolicyDuringUpdateBody"] = string(policyJSON) } resources := sharedResources(c, baseStack) asg := pool.ASG() if asg == nil { log.Fatal("missing ASG") } if c.Int("desired-size") > 0 { asg.Properties.DesiredCapacity = c.Int("desired-size") } if c.Int("min-size") > 0 { asg.Properties.MinSize = c.Int("min-size") } if c.Int("max-size") > 0 { asg.Properties.MaxSize = c.Int("max-size") } if c.Bool("auto-update") { // note that the max pause is only PT5M30S asg.SetASGUpdatePolicy(c.Int("update-min"), c.Int("update-batch"), c.Duration("update-pause")) } numZones := c.Int("availability-zones") if numZones == 0 { numZones = len(asg.Properties.VPCZoneIdentifier) } // start with the current settings subnetIDs := []string{} azIDs := []string{} // only update the subnets/AZs if we changed the count if len(asg.Properties.VPCZoneIdentifier) != numZones { subnets := resources.Subnets if numZones <= len(subnets) { subnets = subnets[:numZones] } else { log.Fatal("ERROR: cannot run in %d zones, only %d available.", numZones, len(subnets)) } for _, sn := range subnets { subnetIDs = append(subnetIDs, sn.ID) azIDs = append(azIDs, sn.AvailabilityZone) } asg.Properties.VPCZoneIdentifier = subnetIDs asg.Properties.AvailabilityZones = azIDs } elb := pool.ELB() sslCert := "" if cert := c.String("ssl-cert"); cert != "" { sslCert = resources.ServerCerts[cert] if sslCert == "" { log.Fatalf("Could not find certificate '%s'", cert) } } httpPort := c.Int("http-port") if (sslCert != "" || httpPort > 0) && elb == nil { log.Fatal("ERROR: Pool does not have an ELB") } // we can set the default now that we've verified that elb can be nil if httpPort == 0 { httpPort = 80 } if elb != nil { certAdded := false for _, l := range elb.Properties.Listeners { if sslCert != "" && l.Protocol == "HTTPS" { l.SSLCertificateId = sslCert certAdded = true } if httpPort > 0 { l.InstancePort = httpPort } } // the elb needs a cert, but doesn't have an https listener if sslCert != "" && !certAdded { elb.AddListener(443, "HTTPS", httpPort, "HTTP", sslCert, nil) } healthCheck := c.String("http-health-check") if healthCheck != "" && healthCheck != elb.Properties.HealthCheck.Target { elb.Properties.HealthCheck.Target = healthCheck } // always make sure the ELB is in the same subnets as the ASG elb.Properties.Subnets = asg.Properties.VPCZoneIdentifier } lc := pool.LC() if amiID := c.String("ami"); amiID != "" { lc.Properties.ImageId = amiID } if insType := c.String("instance-type"); insType != "" { lc.Properties.InstanceType = insType } // add autoscaling if it's required setCPUAutoScale(c, pool) poolTmpl, err := json.MarshalIndent(pool, "", " ") if err != nil { log.Fatal(err) } if c.Bool("print") { fmt.Println(string(poolTmpl)) return } log.Println("Updating stack:", stackName) if _, err := stack.Update(stackName, poolTmpl, options); err != nil { log.Fatal(err) } // do we want to wait on this by default? if err := stack.Wait(stackName, 5*time.Minute); err != nil { log.Fatal(err) } log.Println("UpdateStack complete") }
func ensureEnvArg(c *cli.Context) { if utils.GalaxyEnv(c) == "" { log.Fatal("ERROR: env is required. Pass --env or set GALAXY_ENV") } }
// restore an app's config from backup func appRestore(c *cli.Context) { initStore(c) var err error var rawBackup []byte fileName := c.String("file") if fileName != "" { rawBackup, err = ioutil.ReadFile(fileName) if err != nil { log.Fatal(err) } } else { log.Println("Reading backup from STDIN") rawBackup, err = ioutil.ReadAll(os.Stdin) if err != nil { log.Fatal(err) } } backup := &backupData{} if err := json.Unmarshal(rawBackup, backup); err != nil { log.Fatal(err) } fmt.Println("Found backup from ", backup.Time) var toRestore []*appCfg if apps := c.Args(); len(apps) > 0 { for _, app := range apps { found := false for _, bkup := range backup.Apps { if bkup.Name == app { toRestore = append(toRestore, bkup) found = true break } } if !found { log.Fatalf("no backup found for '%s'\n", app) } } } else { toRestore = backup.Apps } // check for conflicts // NOTE: there is still a race here if an app is created after this check if !c.Bool("force") { needForce := false for _, bkup := range toRestore { exists, err := configStore.AppExists(bkup.Name, utils.GalaxyEnv(c)) if err != nil { log.Fatal(err) } if exists { log.Warnf("Cannot restore over existing app '%s'", bkup.Name) needForce = true } } if needForce { log.Fatal("Use -force to overwrite") } } loggedErr := false for _, bkup := range toRestore { if err := restoreApp(bkup, utils.GalaxyEnv(c)); err != nil { log.Errorf("%s", err) loggedErr = true } } if loggedErr { // This is mostly to give a non-zero exit status log.Fatal("Error occured during restore") } }
func stackCreatePool(c *cli.Context) { var err error ensureEnvArg(c) ensurePoolArg(c) if c.String("region") != "" { stack.Region = c.String("region") } poolName := utils.GalaxyPool(c) baseStack := getBase(c) poolEnv := utils.GalaxyEnv(c) stackName := fmt.Sprintf("%s-%s-%s", baseStack, poolEnv, poolName) pool := stack.NewPool() // get the resources we need from the base stack // TODO: this may search for the base stack a second time resources := sharedResources(c, baseStack) desiredCap := c.Int("desired-size") if desiredCap == 0 { desiredCap = 1 } numZones := c.Int("availability-zones") if numZones == 0 { // default to running one host per zone numZones = desiredCap } minSize := c.Int("min-size") maxSize := c.Int("max-size") httpPort := c.Int("http-port") if httpPort == 0 { httpPort = 80 } sslCert := "" if cert := c.String("ssl-cert"); cert != "" { sslCert = resources.ServerCerts[cert] if sslCert == "" { log.Fatalf("Could not find certificate '%s'", cert) } } // Create our Launch Config lc := pool.LCTemplate lcName := "lc" + poolEnv + poolName if amiID := c.String("ami"); amiID != "" { lc.Properties.ImageId = amiID } else { lc.Properties.ImageId = resources.Parameters["PoolImageId"] } if insType := c.String("instance-type"); insType != "" { lc.Properties.InstanceType = insType } else { lc.Properties.InstanceType = resources.Parameters["PoolInstanceType"] } if keyName := c.String("keyname"); keyName != "" { lc.Properties.KeyName = keyName } else { lc.Properties.KeyName = resources.Parameters["KeyName"] } lc.Properties.IamInstanceProfile = resources.Roles["galaxyInstanceProfile"] lc.Properties.SecurityGroups = []string{ resources.SecurityGroups["sshSG"], resources.SecurityGroups["defaultSG"], } lc.SetVolumeSize(c.Int("volume-size")) pool.Resources[lcName] = lc // Create the Auto Scaling Group asg := pool.ASGTemplate asgName := "asg" + poolEnv + poolName asg.AddTag("Name", fmt.Sprintf("%s-%s-%s", baseStack, poolEnv, poolName), true) asg.AddTag("env", poolEnv, true) asg.AddTag("pool", poolName, true) asg.AddTag("galaxy", "pool", true) asg.Properties.DesiredCapacity = desiredCap // Don't always run in all zones subnets := resources.Subnets if numZones <= len(subnets) { subnets = subnets[:numZones] } else { log.Fatal("ERROR: cannot run in %d zones, only %d available.", numZones, len(subnets)) } // break the subnets info into separate subnet and AZ slices for the template subnetIDs := []string{} azIDs := []string{} for _, sn := range subnets { subnetIDs = append(subnetIDs, sn.ID) azIDs = append(azIDs, sn.AvailabilityZone) } asg.SetLaunchConfiguration(lcName) asg.Properties.AvailabilityZones = azIDs asg.Properties.VPCZoneIdentifier = subnetIDs if maxSize > 0 { asg.Properties.MaxSize = maxSize } if minSize > 0 { asg.Properties.MinSize = minSize } if c.Bool("auto-update") { asg.SetASGUpdatePolicy(c.Int("update-min"), c.Int("update-batch"), c.Duration("update-pause")) } pool.Resources[asgName] = asg // Optionally create the Elastic Load Balancer if c.Bool("elb") { elb := pool.ELBTemplate elbName := "elb" + poolEnv + poolName // make sure to add this to the ASG asg.AddLoadBalancer(elbName) elb.Properties.Subnets = subnetIDs elb.Properties.SecurityGroups = []string{ resources.SecurityGroups["webSG"], resources.SecurityGroups["defaultSG"], } elb.Properties.HealthCheck.Target = c.String("http-health-check") elb.AddListener(80, "HTTP", httpPort, "HTTP", "", nil) if sslCert != "" { elb.AddListener(443, "HTTPS", httpPort, "HTTP", sslCert, nil) } pool.Resources[elbName] = elb } // add autoscaling if it's required setCPUAutoScale(c, pool) poolTmpl, err := json.MarshalIndent(pool, "", " ") if err != nil { log.Fatal(err) } if c.Bool("print") { fmt.Println(string(poolTmpl)) return } opts := make(map[string]string) opts["tag.env"] = poolEnv opts["tag.pool"] = poolName opts["tag.galaxy"] = "pool" _, err = stack.Create(stackName, poolTmpl, opts) if err != nil { log.Fatal(err) } log.Println("Creating stack:", stackName) // do we want to wait on this by default? if err := stack.Wait(stackName, 5*time.Minute); err != nil { log.Error(err) log.Error("CreateStack Failed, attempting to delete") waitAndDelete(stackName) return } log.Println("CreateStack complete") }