Пример #1
0
// Print a Cloudformation template to stdout.
func stackTemplate(c *cli.Context) {
	stackName := c.Args().First()
	if c.String("region") != "" {
		stack.Region = c.String("region")
	}

	if stackName == "" {
		os.Stdout.Write(stack.DefaultGalaxyTemplate())
		return
	}

	if c.String("region") != "" {
		stack.Region = c.String("region")
	}

	stackTmpl, err := stack.GetTemplate(stackName)
	if err != nil {
		if err, ok := err.(*aws.Error); ok {
			if err.Code == "ValidationError" && strings.Contains(err.Message, "does not exist") {
				log.Fatalf("ERROR: Stack '%s' does not exist", stackName)
			}
		}
		log.Fatal(err)
	}

	if _, err := os.Stdout.Write(stackTmpl); err != nil {
		log.Fatal(err)
	}
}
Пример #2
0
// wait until a stack is in a final state, then delete it
func waitAndDelete(name string) {
	log.Println("Attempting to delete stack:", name)
	// we need to get the StackID in order to lookup DELETE events
	desc, err := stack.DescribeStacks(name)
	if err != nil {
		log.Fatalf("ERROR: %s", err)
	} else if len(desc.Stacks) == 0 {
		log.Fatal("ERROR: could not describe stack:", name)
	}

	stackId := desc.Stacks[0].Id

	err = stack.WaitForComplete(stackId, 5*time.Minute)
	if err != nil {
		log.Fatal(err)
	}

	_, err = stack.Delete(name)
	if err != nil {
		log.Fatal(err)
	}

	// wait
	err = stack.WaitForComplete(stackId, 5*time.Minute)
	if err != nil {
		log.Fatal(err)
	}
	log.Println("Deleted stack:", name)
}
Пример #3
0
// Backup app config to a file or STDOUT
func appBackup(c *cli.Context) {
	initStore(c)

	env := utils.GalaxyEnv(c)
	if env == "" {
		log.Fatal("ERROR: env is required.  Pass --env or set GALAXY_ENV")
	}

	backup := &backupData{
		Time: time.Now(),
	}

	toBackup := c.Args()

	if len(toBackup) == 0 {
		appList, err := configStore.ListApps(env)
		if err != nil {
			log.Fatalf("ERROR: %s\n", err)
		}

		for _, app := range appList {
			toBackup = append(toBackup, app.Name())
		}
	}

	errCount := 0
	for _, app := range toBackup {
		data, err := getAppBackup(app, env)
		if err != nil {
			// log errors and continue
			log.Errorf("ERROR: %s [%s]", err, app)
			errCount++
			continue
		}
		backup.Apps = append(backup.Apps, data)
	}

	if errCount > 0 {
		fmt.Printf("WARNING: backup completed with %d errors\n", errCount)
		defer os.Exit(errCount)
	}

	j, err := json.MarshalIndent(backup, "", "  ")
	if err != nil {
		log.Fatal(err)
	}

	fileName := c.String("file")
	if fileName != "" {
		if err := ioutil.WriteFile(fileName, j, 0666); err != nil {
			log.Fatal(err)
		}
		return
	}

	os.Stdout.Write(j)
}
Пример #4
0
// Return the path for the config directory, and create it if it doesn't exist
func cfgDir() string {
	homeDir := utils.HomeDir()
	if homeDir == "" {
		log.Fatal("ERROR: Unable to determine current home dir. Set $HOME.")
	}

	configDir := filepath.Join(homeDir, ".galaxy")
	_, err := os.Stat(configDir)
	if err != nil && os.IsNotExist(err) {
		err = os.Mkdir(configDir, 0700)
		if err != nil {
			log.Fatal("ERROR: cannot create config directory:", err)
		}
	}
	return configDir
}
Пример #5
0
// Return a default template to create our base stack.
func DefaultGalaxyTemplate() []byte {
	azResp, err := DescribeAvailabilityZones("")
	if err != nil {
		log.Warn(err)
		return nil
	}

	p := &GalaxyTmplParams{
		Name:    "galaxy",
		VPCCIDR: "10.24.0.1/16",
	}

	for i, az := range azResp.AvailabilityZones {
		s := &SubnetTmplParams{
			Name:   fmt.Sprintf("galaxySubnet%d", i+1),
			Subnet: fmt.Sprintf("10.24.%d.0/24", i+1),
			AZ:     az.Name,
		}

		p.Subnets = append(p.Subnets, s)
	}

	tmpl, err := GalaxyTemplate(p)
	if err != nil {
		// TODO
		log.Fatal(err)
	}
	return tmpl
}
Пример #6
0
func pgPsql(c *cli.Context) {
	ensureEnvArg(c)
	initStore(c)
	app := ensureAppParam(c, "pg:psql")

	appCfg, err := configStore.GetApp(app, utils.GalaxyEnv(c))
	if err != nil {
		log.Fatalf("ERROR: Unable to run command: %s.", err)
		return
	}

	database_url := appCfg.Env()["DATABASE_URL"]
	if database_url == "" {
		log.Printf("No DATABASE_URL configured.  Set one with config:set first.")
		return
	}

	if !strings.HasPrefix(database_url, "postgres://") {
		log.Printf("DATABASE_URL is not a postgres database.")
		return
	}

	cmd := exec.Command("psql", database_url)

	cmd.Stdin = os.Stdin
	cmd.Stdout = os.Stdout
	cmd.Stderr = os.Stderr

	// Ignore SIGINT while the process is running
	ch := make(chan os.Signal, 1)
	signal.Notify(ch, os.Interrupt)

	defer func() {
		signal.Stop(ch)
		close(ch)
	}()

	go func() {
		for {
			_, ok := <-ch
			if !ok {
				break
			}
		}
	}()

	err = cmd.Start()
	if err != nil {
		log.Fatal(err)
	}

	err = cmd.Wait()
	if err != nil {
		fmt.Printf("Command finished with error: %v\n", err)
	}
}
Пример #7
0
// return --base, or try to find a base cloudformation stack
func getBase(c *cli.Context) string {
	errNoBase := fmt.Errorf("could not identify a unique base stack")

	base := c.String("base")
	if base != "" {
		return base
	}

	descResp, err := stack.DescribeStacks("")
	if err != nil {
		log.Fatal(err)
	}

	for _, stack := range descResp.Stacks {
		// first check for galaxy:base tag
		baseTag := false
		for _, t := range stack.Tags {
			if t.Key == "galaxy" && t.Value == "base" {
				baseTag = true
			}
		}
		if baseTag {
			if base != "" {
				err = errNoBase
			}
			base = stack.Name
			continue
		}
		parts := strings.Split(stack.Name, "-")

		// check for "-base" in the name
		if parts[len(parts)-1] == "base" {
			if base != "" {
				err = errNoBase
			}
			base = stack.Name
			continue
		}

		// the best we can do for now is look for a stack with a single word
		if len(parts) == 1 {
			if base != "" {
				err = errNoBase
			}
			base = stack.Name

			log.Printf("Warning: guessing base stack: %s", base)
		}
	}

	if err != nil {
		log.Fatalf("%s: %s", err, "use --base")
	}

	return base
}
Пример #8
0
// create our base stack
func stackInit(c *cli.Context) {
	stackName := c.Args().First()
	if stackName == "" {
		log.Fatal("ERROR: stack name required")
	}

	if c.String("region") != "" {
		stack.Region = c.String("region")
	}

	exists, err := stack.Exists(stackName)
	if exists {
		log.Fatalf("ERROR: stack %s already exists", stackName)
	} else if err != nil {
		fmt.Println("EXISTS ERROR")
		log.Fatal(err)
	}

	params := getInitOpts(c)
	stackTmpl, err := stack.GalaxyTemplate(params)
	if err != nil {
		log.Fatalf("ERROR: %s", err)
	}

	if c.Bool("print") {
		fmt.Println(string(stackTmpl))
		return
	}

	opts := make(map[string]string)
	opts["tag.galaxy"] = "base"

	_, err = stack.Create(stackName, stackTmpl, opts)
	if err != nil {
		log.Fatalf("ERROR: %s", err)
	}
	log.Println("Initializing stack", stackName)
}
Пример #9
0
// List recent events for a stack
// Shows up to 20 events, or 24 hours of events.
func stackListEvents(c *cli.Context) {
	stackName := c.Args().First()
	if stackName == "" {
		log.Fatal("ERROR: stack name required")
	}

	if c.String("region") != "" {
		stack.Region = c.String("region")
	}

	resp, err := stack.DescribeStackEvents(stackName)
	if err != nil {
		log.Fatal(err)
	}

	if len(resp.Events) == 0 {
		log.Println("no events for", stackName)
		return
	}

	firstTS := resp.Events[0].Timestamp.Add(-24 * time.Hour)
	lines := []string{"TIMESTAMP | Logical ID | STATUS | REASON"}
	format := "%s | %s | %s | %s"

	for i, e := range resp.Events {
		if i > 20 || e.Timestamp.Before(firstTS) {
			break
		}

		displayTime := e.Timestamp.Format(time.Stamp)

		line := fmt.Sprintf(format, displayTime, e.LogicalResourceId, e.ResourceStatus, e.ResourceStatusReason)
		lines = append(lines, line)
	}

	output, _ := columnize.SimpleFormat(lines)
	log.Println(output)
}
Пример #10
0
// delete a pool
func stackDelete(c *cli.Context) {
	stackName := c.Args().First()
	if stackName == "" {
		log.Fatal("ERROR: stack name required")
	}

	ok := c.Bool("y")
	if !ok {
		switch strings.ToLower(promptValue(fmt.Sprintf("\nDelete Stack '%s'?", stackName), "n")) {
		case "y", "yes":
			ok = true
		}
	}
	if !ok {
		log.Fatal("aborted")
	}

	if c.String("region") != "" {
		stack.Region = c.String("region")
	}

	waitAndDelete(stackName)
}
Пример #11
0
func appCreate(c *cli.Context) {
	ensureEnvArg(c)
	initStore(c)

	app := c.Args().First()
	if app == "" {
		cli.ShowCommandHelp(c, "app:create")
		log.Fatal("ERROR: app name missing")
	}

	err := commander.AppCreate(configStore, app, utils.GalaxyEnv(c))
	if err != nil {
		log.Fatalf("ERROR: %s", err)
	}
}
Пример #12
0
func poolUnassign(c *cli.Context) {
	ensureEnvArg(c)
	ensurePoolArg(c)
	initStore(c)

	app := c.Args().First()
	if app == "" {
		cli.ShowCommandHelp(c, "pool:assign")
		log.Fatal("ERROR: app name missing")
	}

	err := commander.AppUnassign(configStore, app, utils.GalaxyEnv(c), utils.GalaxyPool(c))
	if err != nil {
		log.Fatalf("ERROR: %s", err)
	}
}
Пример #13
0
func ensureAppParam(c *cli.Context, command string) string {
	app := c.Args().First()
	if app == "" {
		cli.ShowCommandHelp(c, command)
		log.Fatal("ERROR: app name missing")
	}

	exists, err := appExists(app, utils.GalaxyEnv(c))
	if err != nil {
		log.Fatalf("ERROR: can't deteremine if %s exists: %s", app, err)
	}

	if !exists {
		log.Fatalf("ERROR: %s does not exist. Create it first.", app)
	}

	return app
}
Пример #14
0
// seto autoscaling options for a pool
func setCPUAutoScale(c *cli.Context, pool *stack.Pool) {
	scaleAdj := c.Int("scale-adj")
	scaleUpDel := c.Int("scale-up-delay")
	scaleDownDel := c.Int("scale-down-delay")
	scaleUpCPU := c.Int("scale-up-cpu")
	scaleDownCPU := c.Int("scale-down-cpu")

	asgName := pool.ASG().Name
	if asgName == "" {
		log.Fatal("Error: missing ASG Name")
	}

	// Any options set to 0 will use template defaults.
	// Don't autoscale if no options are set.
	if scaleAdj != 0 || scaleUpDel != 0 || scaleDownDel != 0 || scaleUpCPU != 0 || scaleDownCPU != 0 {
		pool.SetCPUAutoScaling(asgName, scaleAdj, scaleUpCPU, scaleUpDel, scaleDownCPU, scaleDownDel)
	}
}
Пример #15
0
func SSHCmd(host string, command string, background bool, debug bool) {

	port := "22"
	hostPort := strings.SplitN(host, ":", 2)
	if len(hostPort) > 1 {
		host, port = hostPort[0], hostPort[1]
	}

	cmd := exec.Command("/usr/bin/ssh",
		"-o", "RequestTTY=yes",
		host,
		"-p", port,
		"-C", "/bin/sh", "-i", "-l", "-c", "'"+command+"'")

	cmd.Stdin = os.Stdin
	cmd.Stdout = os.Stdout
	var b []byte
	buf := bytes.NewBuffer(b)
	cmd.Stderr = buf
	err := cmd.Start()
	if err != nil {
		log.Fatal(err)
	}

	if err := cmd.Wait(); err != nil {
		log.Error(buf.String())
		if exiterr, ok := err.(*exec.ExitError); ok {
			// The program has exited with an exit code != 0

			// This works on both Unix and Windows. Although package
			// syscall is generally platform dependent, WaitStatus is
			// defined for both Unix and Windows and in both cases has
			// an ExitStatus() method with the same signature.
			if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
				fmt.Fprintf(os.Stderr, "Command finished with error: %v\n", err)
				os.Exit(status.ExitStatus())
			}
		} else {
			fmt.Fprintf(os.Stderr, "Command finished with error: %v\n", err)
			os.Exit(1)
		}
	}
}
Пример #16
0
func stackList(c *cli.Context) {
	if c.String("region") != "" {
		stack.Region = c.String("region")
	}

	descResp, err := stack.DescribeStacks("")
	if err != nil {
		log.Fatal(err)
	}

	stacks := []string{"STACK | STATUS | "}

	for _, stack := range descResp.Stacks {
		s := fmt.Sprintf("%s | %s | %s", stack.Name, stack.Status, stack.StatusReason)
		stacks = append(stacks, s)
	}

	output, _ := columnize.SimpleFormat(stacks)
	log.Println(output)
}
Пример #17
0
func sharedResources(c *cli.Context, baseStack string) stack.SharedResources {
	// get the resources we need from the base stack
	resources, err := stack.GetSharedResources(baseStack)
	if err != nil {
		log.Fatal(err)
	}

	keyName := c.String("keyname")
	if keyName != "" {
		resources.Parameters["KeyName"] = keyName
	}

	amiID := c.String("ami")
	if amiID != "" {
		resources.Parameters["PoolImageId"] = amiID
	}

	instanceType := c.String("instance-type")
	if instanceType != "" {
		resources.Parameters["PoolInstanceType"] = instanceType
	}

	return resources
}
Пример #18
0
func ensurePoolArg(c *cli.Context) {
	if utils.GalaxyPool(c) == "" {
		log.Fatal("ERROR: pool is required.  Pass --pool or set GALAXY_POOL")
	}
}
Пример #19
0
func ensureEnvArg(c *cli.Context) {
	if utils.GalaxyEnv(c) == "" {
		log.Fatal("ERROR: env is required.  Pass --env or set GALAXY_ENV")
	}
}
Пример #20
0
// update the base stack
func stackUpdate(c *cli.Context) {
	var stackTmpl []byte
	var err error

	stackName := c.Args().First()
	if stackName == "" {
		log.Fatal("ERROR: stack name required")
	}

	if c.String("region") != "" {
		stack.Region = c.String("region")
	}

	params := make(map[string]string)
	if p := c.String("parameters"); p != "" {
		paramJSON, err := jsonFromArg(p)
		if err != nil {
			log.Fatal("ERROR: decoding parameters:", err)
		}

		err = json.Unmarshal(paramJSON, &params)
		if err != nil {
			log.Fatal(err)
		}
	}

	template := c.String("template")
	if template != "" {
		stackTmpl, err = jsonFromArg(template)
		if err != nil {
			log.Fatalf("ERROR: %s", err)
		}
	}

	if policy := c.String("policy"); policy != "" {
		policyJSON, err := jsonFromArg(policy)
		if err != nil {
			log.Fatal("policy error:", err)
		}

		params["StackPolicyDuringUpdateBody"] = string(policyJSON)
	}

	if len(stackTmpl) == 0 {
		// get the current running template
		stackTmpl, err = stack.GetTemplate(stackName)
		if err != nil {
			log.Fatal(err)
		}
	}

	// this reads the Parameters supplied for our current stack for us
	shared := sharedResources(c, stackName)

	// add any missing parameters to our
	for key, val := range shared.Parameters {
		if params[key] == "" {
			params[key] = val
		}
	}

	p, _ := json.MarshalIndent(params, "", "  ")
	ok := promptValue(fmt.Sprintf("\nUpdate the [%s] stack with:\n%s\nAccept?", stackName, string(p)), "n")
	switch strings.ToLower(ok) {
	case "y", "yes":
		_, err = stack.Update(stackName, stackTmpl, params)
		if err != nil {
			log.Fatal(err)
		}
		log.Println("Updating stack:", stackName)
	default:
		log.Fatal("aborted")
	}
}
Пример #21
0
// Update an existing Pool Stack
func stackUpdatePool(c *cli.Context) {
	ensureEnvArg(c)
	ensurePoolArg(c)

	if c.String("region") != "" {
		stack.Region = c.String("region")
	}

	poolName := utils.GalaxyPool(c)
	baseStack := getBase(c)
	poolEnv := utils.GalaxyEnv(c)

	stackName := fmt.Sprintf("%s-%s-%s", baseStack, poolEnv, poolName)

	pool, err := stack.GetPool(stackName)
	if err != nil {
		log.Fatal(err)
	}

	options := make(map[string]string)
	if policy := c.String("policy"); policy != "" {
		policyJSON, err := jsonFromArg(policy)
		if err != nil {
			log.Fatal("policy error:", err)
		}

		options["StackPolicyDuringUpdateBody"] = string(policyJSON)
	}

	resources := sharedResources(c, baseStack)

	asg := pool.ASG()
	if asg == nil {
		log.Fatal("missing ASG")
	}

	if c.Int("desired-size") > 0 {
		asg.Properties.DesiredCapacity = c.Int("desired-size")
	}

	if c.Int("min-size") > 0 {
		asg.Properties.MinSize = c.Int("min-size")
	}

	if c.Int("max-size") > 0 {
		asg.Properties.MaxSize = c.Int("max-size")
	}

	if c.Bool("auto-update") {
		// note that the max pause is only PT5M30S
		asg.SetASGUpdatePolicy(c.Int("update-min"), c.Int("update-batch"), c.Duration("update-pause"))
	}

	numZones := c.Int("availability-zones")
	if numZones == 0 {
		numZones = len(asg.Properties.VPCZoneIdentifier)
	}

	// start with the current settings
	subnetIDs := []string{}
	azIDs := []string{}

	// only update the subnets/AZs if we changed the count
	if len(asg.Properties.VPCZoneIdentifier) != numZones {
		subnets := resources.Subnets
		if numZones <= len(subnets) {
			subnets = subnets[:numZones]
		} else {
			log.Fatal("ERROR: cannot run in %d zones, only %d available.", numZones, len(subnets))
		}

		for _, sn := range subnets {
			subnetIDs = append(subnetIDs, sn.ID)
			azIDs = append(azIDs, sn.AvailabilityZone)
		}
		asg.Properties.VPCZoneIdentifier = subnetIDs
		asg.Properties.AvailabilityZones = azIDs

	}

	elb := pool.ELB()

	sslCert := ""
	if cert := c.String("ssl-cert"); cert != "" {
		sslCert = resources.ServerCerts[cert]
		if sslCert == "" {
			log.Fatalf("Could not find certificate '%s'", cert)
		}
	}

	httpPort := c.Int("http-port")

	if (sslCert != "" || httpPort > 0) && elb == nil {
		log.Fatal("ERROR: Pool does not have an ELB")
	}

	// we can set the default now that we've verified that elb can be nil
	if httpPort == 0 {
		httpPort = 80
	}

	if elb != nil {
		certAdded := false
		for _, l := range elb.Properties.Listeners {
			if sslCert != "" && l.Protocol == "HTTPS" {
				l.SSLCertificateId = sslCert
				certAdded = true
			}

			if httpPort > 0 {
				l.InstancePort = httpPort
			}
		}

		// the elb needs a cert, but doesn't have an https listener
		if sslCert != "" && !certAdded {
			elb.AddListener(443, "HTTPS", httpPort, "HTTP", sslCert, nil)
		}

		healthCheck := c.String("http-health-check")
		if healthCheck != "" && healthCheck != elb.Properties.HealthCheck.Target {
			elb.Properties.HealthCheck.Target = healthCheck
		}

		// always make sure the ELB is in the same subnets as the ASG
		elb.Properties.Subnets = asg.Properties.VPCZoneIdentifier
	}

	lc := pool.LC()
	if amiID := c.String("ami"); amiID != "" {
		lc.Properties.ImageId = amiID
	}

	if insType := c.String("instance-type"); insType != "" {
		lc.Properties.InstanceType = insType
	}

	// add autoscaling if it's required
	setCPUAutoScale(c, pool)

	poolTmpl, err := json.MarshalIndent(pool, "", "    ")
	if err != nil {
		log.Fatal(err)
	}

	if c.Bool("print") {
		fmt.Println(string(poolTmpl))
		return
	}

	log.Println("Updating stack:", stackName)
	if _, err := stack.Update(stackName, poolTmpl, options); err != nil {
		log.Fatal(err)
	}

	// do we want to wait on this by default?
	if err := stack.Wait(stackName, 5*time.Minute); err != nil {
		log.Fatal(err)
	}

	log.Println("UpdateStack complete")
}
Пример #22
0
func NewConsulBackend() *ConsulBackend {
	client, err := consul.NewClient(consul.DefaultConfig())
	if err != nil {
		// this shouldn't ever error with the default config
		panic(err)
	}

	node, err := client.Agent().NodeName()
	if err != nil {
		log.Fatal(err)
	}

	// find an existing galaxy session if one exists, or create a new one
	sessions, _, err := client.Session().Node(node, nil)
	if err != nil {
		log.Fatal(err)
	}

	var session *consul.SessionEntry
	for _, s := range sessions {
		if s.Name == "galaxy" {
			session = s
			break
		}
	}

	// we have a session, now make sure we can renew it so it doesn't expire
	// before we start running
	if session != nil {
		session, _, err = client.Session().Renew(session.ID, nil)
		if err != nil {
			log.Debug("error renewing galaxy session:", err)
		}
	}

	// no existing session, so create a new one
	if session == nil {
		session = &consul.SessionEntry{
			Name:     "galaxy",
			Behavior: "delete",
			TTL:      "15s",
		}

		session.ID, _, err = client.Session().Create(session, nil)
		if err != nil {
			// we can't continue without a session for key TTLs
			log.Fatal(err)
		}
	}

	// keep our session alive in the background
	done := make(chan struct{})
	go client.Session().RenewPeriodic("10s", session.ID, nil, done)

	return &ConsulBackend{
		client:    client,
		sessionID: session.ID,
		done:      done,
		seen: &eventCache{
			seen: make(map[string]uint64),
		},
	}
}
Пример #23
0
// restore an app's config from backup
func appRestore(c *cli.Context) {
	initStore(c)

	var err error
	var rawBackup []byte

	fileName := c.String("file")
	if fileName != "" {
		rawBackup, err = ioutil.ReadFile(fileName)
		if err != nil {
			log.Fatal(err)
		}
	} else {
		log.Println("Reading backup from STDIN")
		rawBackup, err = ioutil.ReadAll(os.Stdin)
		if err != nil {
			log.Fatal(err)
		}
	}

	backup := &backupData{}
	if err := json.Unmarshal(rawBackup, backup); err != nil {
		log.Fatal(err)
	}

	fmt.Println("Found backup from ", backup.Time)

	var toRestore []*appCfg

	if apps := c.Args(); len(apps) > 0 {
		for _, app := range apps {
			found := false
			for _, bkup := range backup.Apps {
				if bkup.Name == app {
					toRestore = append(toRestore, bkup)
					found = true
					break
				}
			}
			if !found {
				log.Fatalf("no backup found for '%s'\n", app)
			}

		}
	} else {
		toRestore = backup.Apps
	}

	// check for conflicts
	// NOTE: there is still a race here if an app is created after this check
	if !c.Bool("force") {
		needForce := false
		for _, bkup := range toRestore {
			exists, err := configStore.AppExists(bkup.Name, utils.GalaxyEnv(c))
			if err != nil {
				log.Fatal(err)
			}
			if exists {
				log.Warnf("Cannot restore over existing app '%s'", bkup.Name)
				needForce = true
			}
		}
		if needForce {
			log.Fatal("Use -force to overwrite")
		}
	}

	loggedErr := false
	for _, bkup := range toRestore {
		if err := restoreApp(bkup, utils.GalaxyEnv(c)); err != nil {
			log.Errorf("%s", err)
			loggedErr = true
		}
	}

	if loggedErr {
		// This is mostly to give a non-zero exit status
		log.Fatal("Error occured during restore")
	}
}
Пример #24
0
// Prompt user for required arguments
// TODO: parse CIDR and generate appropriate subnets
// TODO: check for subnet collision
func getInitOpts(c *cli.Context) *stack.GalaxyTmplParams {
	name := c.Args().First()
	if name == "" {
		name = promptValue("Base Stack Name", "galaxy-base")
	}

	keyName := c.String("keyname")
	if keyName == "" {
		keyName = promptValue("EC2 Keypair Name", "required")
		if keyName == "required" {
			log.Fatal("keyname required")
		}
	}

	controllerAMI := promptValue("Controller AMI", "ami-9a562df2")
	controllerInstance := promptValue("Controller Instance Type", "t2.medium")
	poolAMI := promptValue("Default Pool AMI", "ami-9a562df2")
	poolInstance := promptValue("Default Pool Instance Type", "t2.medium")

	vpcSubnet := promptValue("VPC CIDR Block", "10.24.0.0/16")
	// some *very* basic input verification
	if !strings.Contains(vpcSubnet, "/") || strings.Count(vpcSubnet, ".") != 3 {
		log.Fatal("VPC Subnet must be in CIDR notation")
	}

	region := c.String("region")
	if region == "" {
		region = os.Getenv("AWS_DEFAULT_REGION")
		if region == "" {
			region = "us-east-1"
		}
		region = promptValue("EC2 Region", region)
	}

	azResp, err := stack.DescribeAvailabilityZones(region)
	if err != nil {
		log.Fatal(err)
	}

	subnets := []*stack.SubnetTmplParams{}

	for i, az := range azResp.AvailabilityZones {
		s := &stack.SubnetTmplParams{
			Name:   fmt.Sprintf("%sSubnet%d", name, i+1),
			Subnet: fmt.Sprintf("10.24.%d.0/24", i+1),
			AZ:     az.Name,
		}

		subnets = append(subnets, s)
	}

	// replace default subnets with user values
	for i, s := range subnets {
		s.Subnet = promptValue(fmt.Sprintf("Subnet %d", i+1), s.Subnet)
	}

	opts := &stack.GalaxyTmplParams{
		Name:                   name,
		KeyName:                keyName,
		ControllerImageId:      controllerAMI,
		ControllerInstanceType: controllerInstance,
		PoolImageId:            poolAMI,
		PoolInstanceType:       poolInstance,
		VPCCIDR:                vpcSubnet,
		Subnets:                subnets,
	}

	return opts
}
Пример #25
0
func main() {
	flag.Int64Var(&stopCutoff, "cutoff", 10, "Seconds to wait before stopping old containers")
	flag.StringVar(&registryURL, "registry", utils.GetEnv("GALAXY_REGISTRY_URL", "redis://127.0.0.1:6379"), "registry URL")
	flag.StringVar(&env, "env", utils.GetEnv("GALAXY_ENV", ""), "Environment namespace")
	flag.StringVar(&pool, "pool", utils.GetEnv("GALAXY_POOL", ""), "Pool namespace")
	flag.StringVar(&hostIP, "host-ip", "127.0.0.1", "Host IP")
	flag.StringVar(&shuttleAddr, "shuttle-addr", "", "Shuttle API addr (127.0.0.1:9090)")
	flag.StringVar(&dns, "dns", "", "DNS addr to use for containers")
	flag.BoolVar(&debug, "debug", false, "verbose logging")
	flag.BoolVar(&version, "v", false, "display version info")

	flag.Usage = func() {
		println("Usage: commander [options] <command> [<args>]\n")
		println("Available commands are:")
		println("   agent           Runs commander agent")
		println("   app             List all apps")
		println("   app:assign      Assign an app to a pool")
		println("   app:create      Create an app")
		println("   app:deploy      Deploy an app")
		println("   app:delete      Delete an app")
		println("   app:restart     Restart an app")
		println("   app:run         Run a command within an app on this host")
		println("   app:shell       Run a shell within an app on this host")
		println("   app:start       Starts one or more apps")
		println("   app:stop        Stops one or more apps")
		println("   app:unassign    Unassign an app from a pool")
		println("   config          List config for an app")
		println("   config:get      Get config values for an app")
		println("   config:set      Set config values for an app")
		println("   config:unset    Unset config values for an app")
		println("   runtime         List container runtime policies")
		println("   runtime:set     Set container runtime policies")
		println("   hosts           List hosts in an env and pool")
		println("\nOptions:\n")
		flag.PrintDefaults()
	}

	flag.Parse()

	if version {
		fmt.Println(buildVersion)
		return
	}

	log.DefaultLogger = log.New(os.Stdout, "", log.INFO)
	log.DefaultLogger.SetFlags(0)

	if debug {
		log.DefaultLogger.Level = log.DEBUG
	}

	if flag.NArg() < 1 {
		fmt.Println("Need a command")
		flag.Usage()
		os.Exit(1)
	}

	initOrDie()

	switch flag.Args()[0] {
	case "dump":
		if flag.NArg() < 2 {
			fmt.Println("Usage: commander dump ENV")
			os.Exit(1)
		}
		dump(flag.Arg(1))
		return

	case "restore":
		if flag.NArg() < 2 {
			fmt.Println("Usage: commander dump ENV FILE")
			os.Exit(1)
		}
		restore(flag.Arg(1))
		return

	case "agent":
		log.DefaultLogger.SetFlags(golog.LstdFlags)
		loop = true
		agentFs := flag.NewFlagSet("agent", flag.ExitOnError)
		agentFs.Usage = func() {
			println("Usage: commander agent [options]\n")
			println("    Runs commander continuously\n\n")
			println("Options:\n\n")
			agentFs.PrintDefaults()
		}
		agentFs.Parse(flag.Args()[1:])

		ensureEnv()
		ensurePool()

	case "app":
		appFs := flag.NewFlagSet("app", flag.ExitOnError)
		appFs.Usage = func() {
			println("Usage: commander app\n")
			println("    List all apps or apps in an environment\n")
			println("Options:\n")
			appFs.PrintDefaults()
		}
		appFs.Parse(flag.Args()[1:])
		err := commander.AppList(configStore, env)
		if err != nil {
			log.Fatalf("ERROR: %s", err)
		}
		return

	case "app:assign":
		appFs := flag.NewFlagSet("app:assign", flag.ExitOnError)
		appFs.Usage = func() {
			println("Usage: commander app:assign <app>\n")
			println("    Assign an app to a pool\n")
			println("Options:\n")
			appFs.PrintDefaults()
		}
		appFs.Parse(flag.Args()[1:])

		ensureEnv()
		ensurePool()

		if appFs.NArg() != 1 {
			appFs.Usage()
			os.Exit(1)
		}

		err := commander.AppAssign(configStore, appFs.Args()[0], env, pool)
		if err != nil {
			log.Fatalf("ERROR: %s", err)
		}
		return
	case "app:create":
		appFs := flag.NewFlagSet("app:create", flag.ExitOnError)
		appFs.Usage = func() {
			println("Usage: commander app:create <app>\n")
			println("    Create an app in an environment\n")
			println("Options:\n")
			appFs.PrintDefaults()
		}
		appFs.Parse(flag.Args()[1:])

		ensureEnv()

		if appFs.NArg() == 0 {
			appFs.Usage()
			os.Exit(1)
		}

		err := commander.AppCreate(configStore, appFs.Args()[0], env)
		if err != nil {
			log.Fatalf("ERROR: %s", err)
		}
		return

	case "app:delete":
		appFs := flag.NewFlagSet("app:delete", flag.ExitOnError)
		appFs.Usage = func() {
			println("Usage: commander app:delete <app>\n")
			println("    Delete an app in an environment\n")
			println("Options:\n")
			appFs.PrintDefaults()
		}
		appFs.Parse(flag.Args()[1:])

		ensureEnv()

		if appFs.NArg() == 0 {
			appFs.Usage()
			os.Exit(1)
		}

		err := commander.AppDelete(configStore, appFs.Args()[0], env)
		if err != nil {
			log.Fatalf("ERROR: %s", err)
		}
		return

	case "app:deploy":
		appFs := flag.NewFlagSet("app:delete", flag.ExitOnError)
		appFs.Usage = func() {
			println("Usage: commander app:deploy [-force] <app> <version>\n")
			println("    Deploy an app in an environment\n")
			println("Options:\n")
			appFs.PrintDefaults()
		}
		appFs.Parse(flag.Args()[1:])

		ensureEnv()

		if appFs.NArg() != 2 {
			appFs.Usage()
			os.Exit(1)
		}

		err := commander.AppDeploy(configStore, serviceRuntime, appFs.Args()[0], env, appFs.Args()[1])
		if err != nil {
			log.Fatalf("ERROR: %s", err)
		}
		return

	case "app:restart":
		appFs := flag.NewFlagSet("app:restart", flag.ExitOnError)
		appFs.Usage = func() {
			println("Usage: commander app:restart <app>\n")
			println("    Restart an app in an environment\n")
			println("Options:\n")
			appFs.PrintDefaults()
		}
		appFs.Parse(flag.Args()[1:])

		ensureEnv()

		if appFs.NArg() == 0 {
			appFs.Usage()
			os.Exit(1)
		}

		err := commander.AppRestart(configStore, appFs.Args()[0], env)
		if err != nil {
			log.Fatalf("ERROR: %s", err)
		}
		return

	case "app:run":
		appFs := flag.NewFlagSet("app:run", flag.ExitOnError)
		appFs.Usage = func() {
			println("Usage: commander app:run <app> <cmd>\n")
			println("    Restart an app in an environment\n")
			println("Options:\n")
			appFs.PrintDefaults()
		}
		appFs.Parse(flag.Args()[1:])

		ensureEnv()

		if appFs.NArg() < 2 {
			appFs.Usage()
			os.Exit(1)
		}

		err := commander.AppRun(configStore, serviceRuntime, appFs.Args()[0], env, appFs.Args()[1:])
		if err != nil {
			log.Fatalf("ERROR: %s", err)
		}
		return

	case "app:shell":
		appFs := flag.NewFlagSet("app:shell", flag.ExitOnError)
		appFs.Usage = func() {
			println("Usage: commander app:shell <app>\n")
			println("    Run a shell for an app\n")
			println("Options:\n")
			appFs.PrintDefaults()
		}
		appFs.Parse(flag.Args()[1:])

		ensureEnv()
		ensurePool()

		if appFs.NArg() != 1 {
			appFs.Usage()
			os.Exit(1)
		}

		err := commander.AppShell(configStore, serviceRuntime, appFs.Args()[0], env, pool)
		if err != nil {
			log.Fatalf("ERROR: %s", err)
		}
		return

	case "app:start":

		startFs := flag.NewFlagSet("app:start", flag.ExitOnError)
		startFs.Usage = func() {
			println("Usage: commander app:start [options] [<app>]*\n")
			println("    Starts one or more apps. If no apps are specified, starts all apps.\n")
			println("Options:\n")
			startFs.PrintDefaults()
		}
		startFs.Parse(flag.Args()[1:])

		apps = startFs.Args()

		if len(apps) == 0 {
			acs, err := configStore.ListApps(env)
			if err != nil {
				log.Fatalf("ERROR: Unable to list apps: %s", err)
			}
			for _, ac := range acs {
				apps = append(apps, ac.Name())
			}
		}
		break
	case "app:status":
		// FIXME: undocumented

		statusFs := flag.NewFlagSet("app:status", flag.ExitOnError)
		statusFs.Usage = func() {
			println("Usage: commander app:status [options] [<app>]*\n")
			println("    Lists status of running apps.\n")
			println("Options:\n")
			statusFs.PrintDefaults()
		}
		statusFs.Parse(flag.Args()[1:])

		ensureEnv()
		ensurePool()

		err := discovery.Status(serviceRuntime, configStore, env, pool, hostIP)
		if err != nil {
			log.Fatalf("ERROR: Unable to list app status: %s", err)
		}
		return

	case "app:stop":
		stopFs := flag.NewFlagSet("app:stop", flag.ExitOnError)
		stopFs.Usage = func() {
			println("Usage: commander app:stop [options] [<app>]*\n")
			println("    Stops one or more apps. If no apps are specified, stops all apps.\n")
			println("Options:\n")
			stopFs.PrintDefaults()
		}
		stopFs.Parse(flag.Args()[1:])

		apps = stopFs.Args()

		for _, app := range apps {
			err := serviceRuntime.StopAllMatching(app)
			if err != nil {
				log.Fatalf("ERROR: Unable able to stop all containers: %s", err)
			}
		}
		if len(apps) > 0 {
			return
		}

		err := serviceRuntime.StopAll(env)
		if err != nil {
			log.Fatalf("ERROR: Unable able to stop all containers: %s", err)
		}
		return
	case "app:unassign":
		appFs := flag.NewFlagSet("app:unassign", flag.ExitOnError)
		appFs.Usage = func() {
			println("Usage: commander app:unassign <app>\n")
			println("    Unassign an app to a pool\n")
			println("Options:\n")
			appFs.PrintDefaults()
		}
		appFs.Parse(flag.Args()[1:])

		ensureEnv()
		ensurePool()

		if appFs.NArg() != 1 {
			appFs.Usage()
			os.Exit(1)
		}

		err := commander.AppUnassign(configStore, appFs.Args()[0], env, pool)
		if err != nil {
			log.Fatalf("ERROR: %s", err)
		}
		return

	case "hosts":
		hostFs := flag.NewFlagSet("hosts", flag.ExitOnError)
		hostFs.Usage = func() {
			println("Usage: commander hosts\n")
			println("    List hosts in an env and pool\n")
			println("Options:\n")
			hostFs.PrintDefaults()
		}
		err := hostFs.Parse(flag.Args()[1:])
		if err != nil {
			log.Fatalf("ERROR: Bad command line options: %s", err)
		}

		ensureEnv()
		ensurePool()

		err = commander.HostsList(configStore, env, pool)
		if err != nil {
			log.Fatalf("ERROR: %s", err)
		}
		return
	case "config":
		configFs := flag.NewFlagSet("config", flag.ExitOnError)
		usage := "Usage: commander config <app>"
		configFs.Usage = func() {
			println(usage)
			println("    List config values for an app\n")
			println("Options:\n")
			configFs.PrintDefaults()
		}
		err := configFs.Parse(flag.Args()[1:])
		if err != nil {
			log.Fatalf("ERROR: Bad command line options: %s", err)
		}

		ensureEnv()

		if configFs.NArg() != 1 {
			log.Error("ERROR: Missing app name argument")
			log.Printf("Usage: %s", usage)
			os.Exit(1)
		}
		app := configFs.Args()[0]

		err = commander.ConfigList(configStore, app, env)
		if err != nil {
			log.Fatalf("ERROR: %s", err)
		}
		return
	case "config:get":
		configFs := flag.NewFlagSet("config:get", flag.ExitOnError)
		configFs.Usage = func() {
			println("Usage: commander config <app> KEY [KEY]*\n")
			println("    Get config values for an app\n")
			println("Options:\n")
			configFs.PrintDefaults()
		}
		err := configFs.Parse(flag.Args()[1:])
		if err != nil {
			log.Fatalf("ERROR: Bad command line options: %s", err)
		}

		ensureEnv()

		if configFs.NArg() == 0 {
			log.Errorf("ERROR: Missing app name")
			configFs.Usage()
			os.Exit(1)
		}
		app := configFs.Args()[0]

		err = commander.ConfigGet(configStore, app, env, configFs.Args()[1:])
		if err != nil {
			log.Fatalf("ERROR: %s", err)
		}
		return
	case "config:set":
		configFs := flag.NewFlagSet("config:set", flag.ExitOnError)
		configFs.Usage = func() {
			println("Usage: commander config <app> KEY=VALUE [KEY=VALUE]*\n")
			println("    Set config values for an app\n")
			println("Options:\n")
			configFs.PrintDefaults()
		}
		err := configFs.Parse(flag.Args()[1:])
		if err != nil {
			log.Fatalf("ERROR: Bad command line options: %s", err)
		}

		ensureEnv()

		if configFs.NArg() == 0 {
			log.Errorf("ERROR: Missing app name")
			configFs.Usage()
			os.Exit(1)
		}
		app := configFs.Args()[0]

		err = commander.ConfigSet(configStore, app, env, configFs.Args()[1:])
		if err != nil {
			log.Fatalf("ERROR: %s", err)
		}
		return
	case "config:unset":
		configFs := flag.NewFlagSet("config:unset", flag.ExitOnError)
		configFs.Usage = func() {
			println("Usage: commander config <app> KEY [KEY]*\n")
			println("    Unset config values for an app\n")
			println("Options:\n")
			configFs.PrintDefaults()
		}
		err := configFs.Parse(flag.Args()[1:])
		if err != nil {
			log.Fatalf("ERROR: Bad command line options: %s", err)
		}

		ensureEnv()

		if configFs.NArg() == 0 {
			log.Errorf("ERROR: Missing app name")
			configFs.Usage()
			os.Exit(1)
		}
		app := configFs.Args()[0]

		err = commander.ConfigUnset(configStore, app, env, configFs.Args()[1:])
		if err != nil {
			log.Fatalf("ERROR: %s", err)
		}
		return

	case "runtime":
		runtimeFs := flag.NewFlagSet("runtime", flag.ExitOnError)
		runtimeFs.Usage = func() {
			println("Usage: commander runtime\n")
			println("    List container runtime policies\n")
			println("Options:\n")
			runtimeFs.PrintDefaults()
		}
		err := runtimeFs.Parse(flag.Args()[1:])
		if err != nil {
			log.Fatalf("ERROR: Bad command line options: %s", err)
		}

		app := ""
		if runtimeFs.NArg() > 0 {
			app = runtimeFs.Args()[0]
		}

		err = commander.RuntimeList(configStore, app, env, pool)
		if err != nil {
			log.Fatalf("ERROR: %s", err)
		}
		return

	case "runtime:set":
		var ps int
		var m string
		var c string
		var vhost string
		var port string
		var maint string
		runtimeFs := flag.NewFlagSet("runtime:set", flag.ExitOnError)
		runtimeFs.IntVar(&ps, "ps", 0, "Number of instances to run across all hosts")
		runtimeFs.StringVar(&m, "m", "", "Memory limit (format: <number><optional unit>, where unit = b, k, m or g)")
		runtimeFs.StringVar(&c, "c", "", "CPU shares (relative weight)")
		runtimeFs.StringVar(&vhost, "vhost", "", "Virtual host for HTTP routing")
		runtimeFs.StringVar(&port, "port", "", "Service port for service discovery")
		runtimeFs.StringVar(&maint, "maint", "", "Enable or disable maintenance mode")

		runtimeFs.Usage = func() {
			println("Usage: commander runtime:set [-ps 1] [-m 100m] [-c 512] [-vhost x.y.z] [-port 8000] [-maint false] <app>\n")
			println("    Set container runtime policies\n")
			println("Options:\n")
			runtimeFs.PrintDefaults()
		}

		err := runtimeFs.Parse(flag.Args()[1:])
		if err != nil {
			log.Fatalf("ERROR: Bad command line options: %s", err)
		}

		ensureEnv()

		if ps != 0 || m != "" || c != "" || maint != "" {
			ensurePool()
		}

		if runtimeFs.NArg() != 1 {
			runtimeFs.Usage()
			os.Exit(1)
		}

		app := runtimeFs.Args()[0]

		_, err = utils.ParseMemory(m)
		if err != nil {
			log.Fatalf("ERROR: Bad memory option %s: %s", m, err)
		}

		updated, err := commander.RuntimeSet(configStore, app, env, pool, commander.RuntimeOptions{
			Ps:              ps,
			Memory:          m,
			CPUShares:       c,
			VirtualHost:     vhost,
			Port:            port,
			MaintenanceMode: maint,
		})
		if err != nil {
			log.Fatalf("ERROR: %s", err)
		}

		if !updated {
			log.Fatalf("ERROR: Failed to set runtime options.")
		}

		if pool != "" {
			log.Printf("Runtime options updated for %s in %s running on %s", app, env, pool)
		} else {
			log.Printf("Runtime options updated for %s in %s", app, env)
		}
		return

	case "runtime:unset":
		var ps, m, c, port bool
		var vhost string
		runtimeFs := flag.NewFlagSet("runtime:unset", flag.ExitOnError)
		runtimeFs.BoolVar(&ps, "ps", false, "Number of instances to run across all hosts")
		runtimeFs.BoolVar(&m, "m", false, "Memory limit")
		runtimeFs.BoolVar(&c, "c", false, "CPU shares (relative weight)")
		runtimeFs.StringVar(&vhost, "vhost", "", "Virtual host for HTTP routing")
		runtimeFs.BoolVar(&port, "port", false, "Service port for service discovery")

		runtimeFs.Usage = func() {
			println("Usage: commander runtime:unset [-ps] [-m] [-c] [-vhost x.y.z] [-port] <app>\n")
			println("    Reset and removes container runtime policies to defaults\n")
			println("Options:\n")
			runtimeFs.PrintDefaults()
		}

		err := runtimeFs.Parse(flag.Args()[1:])
		if err != nil {
			log.Fatalf("ERROR: Bad command line options: %s", err)
		}

		ensureEnv()

		if ps || m || c {
			ensurePool()
		}

		if runtimeFs.NArg() != 1 {
			runtimeFs.Usage()
			os.Exit(1)
		}

		app := runtimeFs.Args()[0]

		options := commander.RuntimeOptions{
			VirtualHost: vhost,
		}
		if ps {
			options.Ps = -1
		}

		if m {
			options.Memory = "-"
		}

		if c {
			options.CPUShares = "-"
		}

		if port {
			options.Port = "-"
		}

		updated, err := commander.RuntimeUnset(configStore, app, env, pool, options)
		if err != nil {
			log.Fatalf("ERROR: %s", err)
		}

		if !updated {
			log.Fatalf("ERROR: Failed to set runtime options.")
		}

		if pool != "" {
			log.Printf("Runtime options updated for %s in %s running on %s", app, env, pool)
		} else {
			log.Printf("Runtime options updated for %s in %s", app, env)
		}
		return

	case "pool":

		err := commander.ListPools(configStore, env)
		if err != nil {
			log.Fatal(err)
		}
		return

	case "pool:create":
		appFs := flag.NewFlagSet("pool:create", flag.ExitOnError)
		appFs.Usage = func() {
			println("Usage: commander -env <env> pool:create <pool>\n")
			println("    Create a pool in <env>\n")
			appFs.PrintDefaults()
		}
		appFs.Parse(flag.Args()[1:])

		ensureEnv()

		if pool == "" && appFs.NArg() > 0 {
			pool = appFs.Arg(0)
		} else {
			ensurePool()
		}

		err := commander.PoolCreate(configStore, env, pool)
		if err != nil {
			log.Fatalf("ERROR: Could not create pool: %s", err)
		}
		fmt.Println("created pool:", pool)
		return

	case "pool:delete":
		appFs := flag.NewFlagSet("pool:delete", flag.ExitOnError)
		appFs.Usage = func() {
			println("Usage: commander -env <env> pool:delete <pool>\n")
			println("    Delete a pool from <env>\n")
			appFs.PrintDefaults()
		}
		appFs.Parse(flag.Args()[1:])

		ensureEnv()

		if pool == "" && flag.NArg() > 1 {
			pool = flag.Arg(1)
		} else {
			ensurePool()
		}

		err := commander.PoolDelete(configStore, env, pool)
		if err != nil {
			log.Fatalf("ERROR: Could not delete pool: %s", err)
			return
		}

		fmt.Println("deleted pool:", pool)
		return

	default:
		fmt.Println("Unknown command")
		flag.Usage()
		os.Exit(1)
	}

	ensureEnv()
	ensurePool()

	log.Printf("Starting commander %s", buildVersion)
	log.Printf("env=%s pool=%s host-ip=%s registry=%s shuttle-addr=%s dns=%s cutoff=%ds",
		env, pool, hostIP, registryURL, shuttleAddr, dns, stopCutoff)

	defer func() {
		configStore.DeleteHost(env, pool, config.HostInfo{
			HostIP: hostIP,
		})
	}()

	for app, ch := range workerChans {
		if len(apps) == 0 || utils.StringInSlice(app, apps) {
			wg.Add(1)
			go restartContainers(app, ch)
			ch <- "deploy"
		}
	}

	if loop {
		wg.Add(1)
		go heartbeatHost()

		go discovery.Register(serviceRuntime, configStore, env, pool, hostIP, shuttleAddr)
		cancelChan := make(chan struct{})
		// do we need to cancel ever?

		restartChan := configStore.Watch(env, cancelChan)
		monitorService(restartChan)
	}

	// TODO: do we still need a WaitGroup?
	wg.Wait()
}
Пример #26
0
func (s *ServiceRuntime) StartInteractive(env, pool string, appCfg config.App) error {

	// see if we have the image locally
	fmt.Fprintf(os.Stderr, "Pulling latest image for %s\n", appCfg.Version())
	_, err := s.PullImage(appCfg.Version(), appCfg.VersionID())
	if err != nil {
		return err
	}

	args := []string{
		"run", "--rm", "-i",
	}
	args = append(args, "-e")
	args = append(args, "ENV"+"="+env)

	for key, value := range appCfg.Env() {
		if key == "ENV" {
			continue
		}

		args = append(args, "-e")
		args = append(args, strings.ToUpper(key)+"="+s.replaceVarEnv(value, s.hostIP))
	}

	args = append(args, "-e")
	args = append(args, fmt.Sprintf("HOST_IP=%s", s.hostIP))
	if s.dns != "" {
		args = append(args, "--dns")
		args = append(args, s.dns)
	}
	args = append(args, "-e")
	args = append(args, fmt.Sprintf("GALAXY_APP=%s", appCfg.Name()))
	args = append(args, "-e")
	args = append(args, fmt.Sprintf("GALAXY_VERSION=%s", strconv.FormatInt(appCfg.ID(), 10)))

	instanceId, err := s.NextInstanceSlot(appCfg.Name(), strconv.FormatInt(appCfg.ID(), 10))
	if err != nil {
		return err
	}
	args = append(args, "-e")
	args = append(args, fmt.Sprintf("GALAXY_INSTANCE=%s", strconv.FormatInt(int64(instanceId), 10)))

	publicDns, err := EC2PublicHostname()
	if err != nil {
		log.Warnf("Unable to determine public hostname. Not on AWS? %s", err)
		publicDns = "127.0.0.1"
	}

	args = append(args, "-e")
	args = append(args, fmt.Sprintf("PUBLIC_HOSTNAME=%s", publicDns))

	mem := appCfg.GetMemory(pool)
	if mem != "" {
		args = append(args, "-m")
		args = append(args, mem)
	}

	cpu := appCfg.GetCPUShares(pool)
	if cpu != "" {
		args = append(args, "-c")
		args = append(args, cpu)
	}

	args = append(args, []string{"-t", appCfg.Version(), "/bin/bash"}...)
	// shell out to docker run to get signal forwarded and terminal setup correctly
	//cmd := exec.Command("docker", "run", "-rm", "-i", "-t", appCfg.Version(), "/bin/bash")
	cmd := exec.Command("docker", args...)

	cmd.Stdin = os.Stdin
	cmd.Stdout = os.Stdout
	cmd.Stderr = os.Stderr
	err = cmd.Start()
	if err != nil {
		log.Fatal(err)
	}

	err = cmd.Wait()
	if err != nil {
		fmt.Fprintf(os.Stderr, "Command finished with error: %v\n", err)
	}

	return err
}
Пример #27
0
func stackCreatePool(c *cli.Context) {
	var err error
	ensureEnvArg(c)
	ensurePoolArg(c)

	if c.String("region") != "" {
		stack.Region = c.String("region")
	}

	poolName := utils.GalaxyPool(c)
	baseStack := getBase(c)
	poolEnv := utils.GalaxyEnv(c)

	stackName := fmt.Sprintf("%s-%s-%s", baseStack, poolEnv, poolName)

	pool := stack.NewPool()

	// get the resources we need from the base stack
	// TODO: this may search for the base stack a second time
	resources := sharedResources(c, baseStack)

	desiredCap := c.Int("desired-size")
	if desiredCap == 0 {
		desiredCap = 1
	}

	numZones := c.Int("availability-zones")
	if numZones == 0 {
		// default to running one host per zone
		numZones = desiredCap
	}

	minSize := c.Int("min-size")
	maxSize := c.Int("max-size")
	httpPort := c.Int("http-port")
	if httpPort == 0 {
		httpPort = 80
	}

	sslCert := ""
	if cert := c.String("ssl-cert"); cert != "" {
		sslCert = resources.ServerCerts[cert]
		if sslCert == "" {
			log.Fatalf("Could not find certificate '%s'", cert)
		}
	}

	// Create our Launch Config
	lc := pool.LCTemplate
	lcName := "lc" + poolEnv + poolName

	if amiID := c.String("ami"); amiID != "" {
		lc.Properties.ImageId = amiID
	} else {
		lc.Properties.ImageId = resources.Parameters["PoolImageId"]
	}

	if insType := c.String("instance-type"); insType != "" {
		lc.Properties.InstanceType = insType
	} else {
		lc.Properties.InstanceType = resources.Parameters["PoolInstanceType"]
	}

	if keyName := c.String("keyname"); keyName != "" {
		lc.Properties.KeyName = keyName
	} else {
		lc.Properties.KeyName = resources.Parameters["KeyName"]
	}

	lc.Properties.IamInstanceProfile = resources.Roles["galaxyInstanceProfile"]

	lc.Properties.SecurityGroups = []string{
		resources.SecurityGroups["sshSG"],
		resources.SecurityGroups["defaultSG"],
	}

	lc.SetVolumeSize(c.Int("volume-size"))

	pool.Resources[lcName] = lc

	// Create the Auto Scaling Group
	asg := pool.ASGTemplate
	asgName := "asg" + poolEnv + poolName

	asg.AddTag("Name", fmt.Sprintf("%s-%s-%s", baseStack, poolEnv, poolName), true)
	asg.AddTag("env", poolEnv, true)
	asg.AddTag("pool", poolName, true)
	asg.AddTag("galaxy", "pool", true)

	asg.Properties.DesiredCapacity = desiredCap

	// Don't always run in all zones
	subnets := resources.Subnets
	if numZones <= len(subnets) {
		subnets = subnets[:numZones]
	} else {
		log.Fatal("ERROR: cannot run in %d zones, only %d available.", numZones, len(subnets))
	}

	// break the subnets info into separate subnet and AZ slices for the template
	subnetIDs := []string{}
	azIDs := []string{}
	for _, sn := range subnets {
		subnetIDs = append(subnetIDs, sn.ID)
		azIDs = append(azIDs, sn.AvailabilityZone)
	}

	asg.SetLaunchConfiguration(lcName)
	asg.Properties.AvailabilityZones = azIDs
	asg.Properties.VPCZoneIdentifier = subnetIDs
	if maxSize > 0 {
		asg.Properties.MaxSize = maxSize
	}
	if minSize > 0 {
		asg.Properties.MinSize = minSize
	}

	if c.Bool("auto-update") {
		asg.SetASGUpdatePolicy(c.Int("update-min"), c.Int("update-batch"), c.Duration("update-pause"))
	}

	pool.Resources[asgName] = asg

	// Optionally create the Elastic Load Balancer
	if c.Bool("elb") {
		elb := pool.ELBTemplate
		elbName := "elb" + poolEnv + poolName

		// make sure to add this to the ASG
		asg.AddLoadBalancer(elbName)

		elb.Properties.Subnets = subnetIDs

		elb.Properties.SecurityGroups = []string{
			resources.SecurityGroups["webSG"],
			resources.SecurityGroups["defaultSG"],
		}

		elb.Properties.HealthCheck.Target = c.String("http-health-check")

		elb.AddListener(80, "HTTP", httpPort, "HTTP", "", nil)

		if sslCert != "" {
			elb.AddListener(443, "HTTPS", httpPort, "HTTP", sslCert, nil)
		}

		pool.Resources[elbName] = elb
	}

	// add autoscaling if it's required
	setCPUAutoScale(c, pool)

	poolTmpl, err := json.MarshalIndent(pool, "", "    ")
	if err != nil {
		log.Fatal(err)
	}

	if c.Bool("print") {
		fmt.Println(string(poolTmpl))
		return
	}

	opts := make(map[string]string)
	opts["tag.env"] = poolEnv
	opts["tag.pool"] = poolName
	opts["tag.galaxy"] = "pool"

	_, err = stack.Create(stackName, poolTmpl, opts)
	if err != nil {
		log.Fatal(err)
	}

	log.Println("Creating stack:", stackName)

	// do we want to wait on this by default?
	if err := stack.Wait(stackName, 5*time.Minute); err != nil {
		log.Error(err)
		log.Error("CreateStack Failed, attempting to delete")

		waitAndDelete(stackName)
		return
	}

	log.Println("CreateStack complete")
}