Exemple #1
0
// wait until a stack is in a final state, then delete it
func waitAndDelete(name string) {
	log.Println("Attempting to delete stack:", name)
	// we need to get the StackID in order to lookup DELETE events
	desc, err := stack.DescribeStacks(name)
	if err != nil {
		log.Fatalf("ERROR: %s", err)
	} else if len(desc.Stacks) == 0 {
		log.Fatal("ERROR: could not describe stack:", name)
	}

	stackId := desc.Stacks[0].Id

	err = stack.WaitForComplete(stackId, 5*time.Minute)
	if err != nil {
		log.Fatal(err)
	}

	_, err = stack.Delete(name)
	if err != nil {
		log.Fatal(err)
	}

	// wait
	err = stack.WaitForComplete(stackId, 5*time.Minute)
	if err != nil {
		log.Fatal(err)
	}
	log.Println("Deleted stack:", name)
}
Exemple #2
0
func AppList(configStore *config.Store, env string) error {

	envs := []string{env}

	if env == "" {
		var err error
		envs, err = configStore.ListEnvs()
		if err != nil {
			return err
		}
	}

	columns := []string{"NAME | ENV | VERSION | IMAGE ID | CONFIG | POOLS "}

	for _, env := range envs {

		appList, err := configStore.ListApps(env)
		if err != nil {
			return err
		}

		pools, err := configStore.ListPools(env)
		if err != nil {
			return err
		}

		for _, app := range appList {
			name := app.Name()
			versionDeployed := app.Version()
			versionID := app.VersionID()
			if len(versionID) > 12 {
				versionID = versionID[:12]
			}

			assignments := []string{}
			for _, pool := range pools {
				aa, err := configStore.ListAssignments(env, pool)
				if err != nil {
					return err
				}
				if utils.StringInSlice(app.Name(), aa) {
					assignments = append(assignments, pool)
				}
			}

			columns = append(columns, strings.Join([]string{
				name,
				env,
				versionDeployed,
				versionID,
				strconv.FormatInt(app.ID(), 10),
				strings.Join(assignments, ","),
			}, " | "))
		}
	}
	output, _ := columnize.SimpleFormat(columns)
	log.Println(output)
	return nil
}
Exemple #3
0
func RuntimeList(configStore *config.Store, app, env, pool string) error {

	envs := []string{env}

	if env == "" {
		var err error
		envs, err = configStore.ListEnvs()
		if err != nil {
			return err
		}
	}

	columns := []string{"ENV | NAME | POOL | PS | MEM | VHOSTS | PORT"}

	for _, env := range envs {

		appList, err := configStore.ListApps(env)
		if err != nil {
			return err
		}

		for _, appCfg := range appList {

			if app != "" && appCfg.Name() != app {
				continue
			}

			for _, p := range appCfg.RuntimePools() {

				if pool != "" && p != pool {
					continue
				}

				name := appCfg.Name()
				ps := appCfg.GetProcesses(p)
				mem := appCfg.GetMemory(p)

				columns = append(columns, strings.Join([]string{
					env,
					name,
					p,
					strconv.FormatInt(int64(ps), 10),
					mem,
					appCfg.Env()["VIRTUAL_HOST"],
					appCfg.Env()["GALAXY_PORT"],
				}, " | "))
			}
		}
	}
	output, _ := columnize.SimpleFormat(columns)
	log.Println(output)
	return nil

}
Exemple #4
0
// inspectImage checks that the running image matches the config.
// We only use this to print warnings, since we likely need to deploy a new
// config version to fix the inconsistency.
func inspectImage(appCfg config.App) {
	image, err := serviceRuntime.InspectImage(appCfg.Version())
	if err != nil {
		log.Println("error inspecting image", appCfg.Version())
		return
	}

	if utils.StripSHA(image.ID) != appCfg.VersionID() {
		log.Printf("warning: %s image ID does not match config", appCfg.Name())
	}
}
Exemple #5
0
func HostsList(configStore *config.Store, env, pool string) error {

	envs := []string{env}

	if env == "" {
		var err error
		envs, err = configStore.ListEnvs()
		if err != nil {
			return err
		}
	}

	columns := []string{"ENV | POOL | HOST IP "}

	for _, env := range envs {

		var err error
		pools := []string{pool}
		if pool == "" {
			pools, err = configStore.ListPools(env)
			if err != nil {
				return err
			}
		}

		for _, pool := range pools {

			hosts, err := configStore.ListHosts(env, pool)
			if err != nil {
				return err
			}

			if len(hosts) == 0 {
				columns = append(columns, strings.Join([]string{
					env,
					pool,
					"",
				}, " | "))
				continue
			}
			for _, p := range hosts {
				columns = append(columns, strings.Join([]string{
					env,
					pool,
					p.HostIP,
				}, " | "))
			}
		}
	}
	output, _ := columnize.SimpleFormat(columns)
	log.Println(output)
	return nil

}
Exemple #6
0
func Status(serviceRuntime *runtime.ServiceRuntime, configStore *config.Store, env, pool, hostIP string) error {

	containers, err := serviceRuntime.ManagedContainers()
	if err != nil {
		panic(err)
	}

	//FIXME: addresses, port, and expires missing in output
	columns := []string{
		"APP | CONTAINER ID | IMAGE | EXTERNAL | INTERNAL | PORT | CREATED | EXPIRES"}

	for _, container := range containers {
		name := serviceRuntime.EnvFor(container)["GALAXY_APP"]
		registered, err := configStore.GetServiceRegistration(
			env, pool, hostIP, container)
		if err != nil {
			return err
		}

		if registered != nil {
			columns = append(columns,
				strings.Join([]string{
					registered.Name,
					registered.ContainerID[0:12],
					registered.Image,
					registered.ExternalAddr(),
					registered.InternalAddr(),
					registered.Port,
					utils.HumanDuration(time.Now().UTC().Sub(registered.StartedAt)) + " ago",
					"In " + utils.HumanDuration(registered.Expires.Sub(time.Now().UTC())),
				}, " | "))

		} else {
			columns = append(columns,
				strings.Join([]string{
					name,
					container.ID[0:12],
					container.Image,
					"",
					"",
					"",
					utils.HumanDuration(time.Now().Sub(container.Created)) + " ago",
					"",
				}, " | "))
		}

	}

	result, _ := columnize.SimpleFormat(columns)
	log.Println(result)
	return nil
}
Exemple #7
0
// List recent events for a stack
// Shows up to 20 events, or 24 hours of events.
func stackListEvents(c *cli.Context) {
	stackName := c.Args().First()
	if stackName == "" {
		log.Fatal("ERROR: stack name required")
	}

	if c.String("region") != "" {
		stack.Region = c.String("region")
	}

	resp, err := stack.DescribeStackEvents(stackName)
	if err != nil {
		log.Fatal(err)
	}

	if len(resp.Events) == 0 {
		log.Println("no events for", stackName)
		return
	}

	firstTS := resp.Events[0].Timestamp.Add(-24 * time.Hour)
	lines := []string{"TIMESTAMP | Logical ID | STATUS | REASON"}
	format := "%s | %s | %s | %s"

	for i, e := range resp.Events {
		if i > 20 || e.Timestamp.Before(firstTS) {
			break
		}

		displayTime := e.Timestamp.Format(time.Stamp)

		line := fmt.Sprintf(format, displayTime, e.LogicalResourceId, e.ResourceStatus, e.ResourceStatusReason)
		lines = append(lines, line)
	}

	output, _ := columnize.SimpleFormat(lines)
	log.Println(output)
}
Exemple #8
0
func poolList(c *cli.Context) {
	initStore(c)

	envs := []string{utils.GalaxyEnv(c)}
	if utils.GalaxyEnv(c) == "" {
		var err error
		envs, err = configStore.ListEnvs()
		if err != nil {
			log.Fatalf("ERROR: %s", err)
		}
	}

	columns := []string{"ENV | POOL | APPS "}

	for _, env := range envs {
		pools, err := configStore.ListPools(env)
		if err != nil {
			log.Fatalf("ERROR: cannot list pools: %s", err)
			return
		}

		if len(pools) == 0 {
			columns = append(columns, strings.Join([]string{
				env,
				"",
				""}, " | "))
			continue
		}

		for _, pool := range pools {

			assigments, err := configStore.ListAssignments(env, pool)
			if err != nil {
				log.Fatalf("ERROR: cannot list pool assignments: %s", err)
			}

			columns = append(columns, strings.Join([]string{
				env,
				pool,
				strings.Join(assigments, ",")}, " | "))
		}

	}
	output := columnize.SimpleFormat(columns)
	log.Println(output)
}
Exemple #9
0
// FIXME: This can't be shut down. Make sure that's not a problem
func (c *ConsulBackend) sub(key string, msgs chan string) {
	var events []*consul.UserEvent
	var meta *consul.QueryMeta
	var err error
	for {
		// No way to handle failure here, just keep trying to get our first set of events.
		// We need a successful query to get the last index to search from.
		events, meta, err = c.client.Event().List(key, nil)
		if err != nil {
			log.Println("Subscribe error:", err)
			time.Sleep(5 * time.Second)
			continue
		}
		// cache all old events
		c.seen.Filter(events)
		break
	}

	lastIndex := meta.LastIndex
	for {
		opts := &consul.QueryOptions{
			WaitIndex: lastIndex,
			WaitTime:  30 * time.Second,
		}
		events, meta, err = c.client.Event().List(key, opts)
		if err != nil {
			log.Printf("Subscribe(%s): %s\n", key, err.Error())
			continue
		}

		if meta.LastIndex == lastIndex {
			// no new events
			continue
		}

		for _, event := range c.seen.Filter(events) {
			msgs <- string(event.Payload)
		}

		lastIndex = meta.LastIndex
	}
}
Exemple #10
0
func stackList(c *cli.Context) {
	if c.String("region") != "" {
		stack.Region = c.String("region")
	}

	descResp, err := stack.DescribeStacks("")
	if err != nil {
		log.Fatal(err)
	}

	stacks := []string{"STACK | STATUS | "}

	for _, stack := range descResp.Stacks {
		s := fmt.Sprintf("%s | %s | %s", stack.Name, stack.Status, stack.StatusReason)
		stacks = append(stacks, s)
	}

	output, _ := columnize.SimpleFormat(stacks)
	log.Println(output)
}
Exemple #11
0
// List all apps in an environment
func (c *ConsulBackend) ListApps(env string) ([]App, error) {
	key := path.Join("galaxy", "apps", env)
	kvPairs, _, err := c.client.KV().List(key, nil)
	if err != nil {
		return nil, err
	}

	apps := make([]App, len(kvPairs))
	for i, kvp := range kvPairs {
		ad := &AppDefinition{}
		err := json.Unmarshal(kvp.Value, ad)
		if err != nil {
			log.Println("error decoding AppDefinition for %s: %s", kvp.Key, err.Error())
			continue
		}
		ad.ConfigIndex = int64(kvp.ModifyIndex)
		apps[i] = App(ad)
	}
	return apps, nil
}
Exemple #12
0
// create our base stack
func stackInit(c *cli.Context) {
	stackName := c.Args().First()
	if stackName == "" {
		log.Fatal("ERROR: stack name required")
	}

	if c.String("region") != "" {
		stack.Region = c.String("region")
	}

	exists, err := stack.Exists(stackName)
	if exists {
		log.Fatalf("ERROR: stack %s already exists", stackName)
	} else if err != nil {
		fmt.Println("EXISTS ERROR")
		log.Fatal(err)
	}

	params := getInitOpts(c)
	stackTmpl, err := stack.GalaxyTemplate(params)
	if err != nil {
		log.Fatalf("ERROR: %s", err)
	}

	if c.Bool("print") {
		fmt.Println(string(stackTmpl))
		return
	}

	opts := make(map[string]string)
	opts["tag.galaxy"] = "base"

	_, err = stack.Create(stackName, stackTmpl, opts)
	if err != nil {
		log.Fatalf("ERROR: %s", err)
	}
	log.Println("Initializing stack", stackName)
}
Exemple #13
0
func appDeploy(c *cli.Context) {
	ensureEnvArg(c)
	initStore(c)
	initRuntime(c)

	app := ensureAppParam(c, "app:deploy")

	version := ""
	if len(c.Args().Tail()) == 1 {
		version = c.Args().Tail()[0]
	}

	if version == "" {
		log.Println("ERROR: version missing")
		cli.ShowCommandHelp(c, "app:deploy")
		return
	}

	err := commander.AppDeploy(configStore, serviceRuntime, app, utils.GalaxyEnv(c), version)
	if err != nil {
		log.Fatalf("ERROR: %s", err)
	}
}
Exemple #14
0
func promptValue(prompt, dflt string) string {
	term := os.Getenv("TERM")
	if term == "" || term == "dumb" {
		return dflt
	}

	fmt.Printf("%s [%s]: ", prompt, dflt)

	val, err := bufio.NewReader(os.Stdin).ReadString('\n')
	if err != nil {
		log.Println(err)
		return dflt
	}

	val = strings.TrimSpace(val)

	// return the default if the input was empty
	if len(val) == 0 {
		return dflt
	}

	return val
}
Exemple #15
0
func (s *ServiceRuntime) RunCommand(env string, appCfg config.App, cmd []string) (*docker.Container, error) {

	// see if we have the image locally
	fmt.Fprintf(os.Stderr, "Pulling latest image for %s\n", appCfg.Version())
	_, err := s.PullImage(appCfg.Version(), appCfg.VersionID())
	if err != nil {
		return nil, err
	}

	instanceId, err := s.NextInstanceSlot(appCfg.Name(), strconv.FormatInt(appCfg.ID(), 10))
	if err != nil {
		return nil, err
	}

	envVars := []string{"ENV=" + env}

	for key, value := range appCfg.Env() {
		if key == "ENV" {
			continue
		}
		envVars = append(envVars, strings.ToUpper(key)+"="+s.replaceVarEnv(value, s.hostIP))
	}
	envVars = append(envVars, "GALAXY_APP="+appCfg.Name())
	envVars = append(envVars, "GALAXY_VERSION="+strconv.FormatInt(appCfg.ID(), 10))
	envVars = append(envVars, fmt.Sprintf("GALAXY_INSTANCE=%s", strconv.FormatInt(int64(instanceId), 10)))

	runCmd := []string{"/bin/bash", "-c", strings.Join(cmd, " ")}

	container, err := s.dockerClient.CreateContainer(docker.CreateContainerOptions{
		Config: &docker.Config{
			Image:        appCfg.Version(),
			Env:          envVars,
			AttachStdout: true,
			AttachStderr: true,
			Cmd:          runCmd,
			OpenStdin:    false,
		},
	})

	if err != nil {
		return nil, err
	}

	c := make(chan os.Signal, 1)
	signal.Notify(c, os.Interrupt, os.Kill)
	go func(s *ServiceRuntime, containerId string) {
		<-c
		log.Println("Stopping container...")
		err := s.dockerClient.StopContainer(containerId, 3)
		if err != nil {
			log.Printf("ERROR: Unable to stop container: %s", err)
		}
		err = s.dockerClient.RemoveContainer(docker.RemoveContainerOptions{
			ID: containerId,
		})
		if err != nil {
			log.Printf("ERROR: Unable to stop container: %s", err)
		}

	}(s, container.ID)

	defer s.dockerClient.RemoveContainer(docker.RemoveContainerOptions{
		ID: container.ID,
	})
	config := &docker.HostConfig{}
	if s.dns != "" {
		config.DNS = []string{s.dns}
	}
	err = s.dockerClient.StartContainer(container.ID, config)

	if err != nil {
		return container, err
	}

	// FIXME: Hack to work around the race of attaching to a container before it's
	// actually running.  Tried polling the container and then attaching but the
	// output gets lost sometimes if the command executes very quickly. Not sure
	// what's going on.
	time.Sleep(1 * time.Second)

	err = s.dockerClient.AttachToContainer(docker.AttachToContainerOptions{
		Container:    container.ID,
		OutputStream: os.Stdout,
		ErrorStream:  os.Stderr,
		Logs:         true,
		Stream:       false,
		Stdout:       true,
		Stderr:       true,
	})

	if err != nil {
		log.Printf("ERROR: Unable to attach to running container: %s", err.Error())
	}

	s.dockerClient.WaitContainer(container.ID)

	return container, err
}
Exemple #16
0
// RegisterEvents monitors the docker daemon for events, and returns those
// that require registration action over the listener chan.
func (s *ServiceRuntime) RegisterEvents(env, pool, hostIP string, listener chan ContainerEvent) error {
	go func() {
		c := make(chan *docker.APIEvents)

		watching := false
		for {

			err := s.Ping()
			if err != nil {
				log.Errorf("ERROR: Unable to ping docker daemaon: %s", err)
				if watching {
					s.dockerClient.RemoveEventListener(c)
					watching = false
				}
				time.Sleep(10 * time.Second)
				continue

			}

			if !watching {
				err = s.dockerClient.AddEventListener(c)
				if err != nil && err != docker.ErrListenerAlreadyExists {
					log.Printf("ERROR: Error registering docker event listener: %s", err)
					time.Sleep(10 * time.Second)
					continue
				}
				watching = true
			}

			select {

			case e := <-c:
				if e.Status == "start" || e.Status == "stop" || e.Status == "die" {
					container, err := s.InspectContainer(e.ID)
					if err != nil {
						log.Printf("ERROR: Error inspecting container: %s", err)
						continue
					}

					if container == nil {
						log.Printf("WARN: Nil container returned for %s", e.ID[:12])
						continue
					}

					name := s.EnvFor(container)["GALAXY_APP"]
					if name != "" {
						registration, err := s.configStore.GetServiceRegistration(env, pool, hostIP, container)
						if err != nil {
							log.Printf("WARN: Could not find service registration for %s/%s: %s", name, container.ID[:12], err)
							continue
						}

						if registration == nil && e.Status != "start" {
							continue
						}

						// if a container is restarting, don't continue re-registering the app
						if container.State.Restarting {
							if e.Status == "die" {
								log.Println("WARN: restarting", container.Name)
							}
							continue
						}

						listener <- ContainerEvent{
							Status:              e.Status,
							Container:           container,
							ServiceRegistration: registration,
						}
					}

				}
			case <-time.After(10 * time.Second):
				// check for docker liveness
			}

		}
	}()
	return nil
}
Exemple #17
0
// restore an app's config from backup
func appRestore(c *cli.Context) {
	initStore(c)

	var err error
	var rawBackup []byte

	fileName := c.String("file")
	if fileName != "" {
		rawBackup, err = ioutil.ReadFile(fileName)
		if err != nil {
			log.Fatal(err)
		}
	} else {
		log.Println("Reading backup from STDIN")
		rawBackup, err = ioutil.ReadAll(os.Stdin)
		if err != nil {
			log.Fatal(err)
		}
	}

	backup := &backupData{}
	if err := json.Unmarshal(rawBackup, backup); err != nil {
		log.Fatal(err)
	}

	fmt.Println("Found backup from ", backup.Time)

	var toRestore []*appCfg

	if apps := c.Args(); len(apps) > 0 {
		for _, app := range apps {
			found := false
			for _, bkup := range backup.Apps {
				if bkup.Name == app {
					toRestore = append(toRestore, bkup)
					found = true
					break
				}
			}
			if !found {
				log.Fatalf("no backup found for '%s'\n", app)
			}

		}
	} else {
		toRestore = backup.Apps
	}

	// check for conflicts
	// NOTE: there is still a race here if an app is created after this check
	if !c.Bool("force") {
		needForce := false
		for _, bkup := range toRestore {
			exists, err := configStore.AppExists(bkup.Name, utils.GalaxyEnv(c))
			if err != nil {
				log.Fatal(err)
			}
			if exists {
				log.Warnf("Cannot restore over existing app '%s'", bkup.Name)
				needForce = true
			}
		}
		if needForce {
			log.Fatal("Use -force to overwrite")
		}
	}

	loggedErr := false
	for _, bkup := range toRestore {
		if err := restoreApp(bkup, utils.GalaxyEnv(c)); err != nil {
			log.Errorf("%s", err)
			loggedErr = true
		}
	}

	if loggedErr {
		// This is mostly to give a non-zero exit status
		log.Fatal("Error occured during restore")
	}
}
Exemple #18
0
// update the base stack
func stackUpdate(c *cli.Context) {
	var stackTmpl []byte
	var err error

	stackName := c.Args().First()
	if stackName == "" {
		log.Fatal("ERROR: stack name required")
	}

	if c.String("region") != "" {
		stack.Region = c.String("region")
	}

	params := make(map[string]string)
	if p := c.String("parameters"); p != "" {
		paramJSON, err := jsonFromArg(p)
		if err != nil {
			log.Fatal("ERROR: decoding parameters:", err)
		}

		err = json.Unmarshal(paramJSON, &params)
		if err != nil {
			log.Fatal(err)
		}
	}

	template := c.String("template")
	if template != "" {
		stackTmpl, err = jsonFromArg(template)
		if err != nil {
			log.Fatalf("ERROR: %s", err)
		}
	}

	if policy := c.String("policy"); policy != "" {
		policyJSON, err := jsonFromArg(policy)
		if err != nil {
			log.Fatal("policy error:", err)
		}

		params["StackPolicyDuringUpdateBody"] = string(policyJSON)
	}

	if len(stackTmpl) == 0 {
		// get the current running template
		stackTmpl, err = stack.GetTemplate(stackName)
		if err != nil {
			log.Fatal(err)
		}
	}

	// this reads the Parameters supplied for our current stack for us
	shared := sharedResources(c, stackName)

	// add any missing parameters to our
	for key, val := range shared.Parameters {
		if params[key] == "" {
			params[key] = val
		}
	}

	p, _ := json.MarshalIndent(params, "", "  ")
	ok := promptValue(fmt.Sprintf("\nUpdate the [%s] stack with:\n%s\nAccept?", stackName, string(p)), "n")
	switch strings.ToLower(ok) {
	case "y", "yes":
		_, err = stack.Update(stackName, stackTmpl, params)
		if err != nil {
			log.Fatal(err)
		}
		log.Println("Updating stack:", stackName)
	default:
		log.Fatal("aborted")
	}
}
Exemple #19
0
// Update an existing Pool Stack
func stackUpdatePool(c *cli.Context) {
	ensureEnvArg(c)
	ensurePoolArg(c)

	if c.String("region") != "" {
		stack.Region = c.String("region")
	}

	poolName := utils.GalaxyPool(c)
	baseStack := getBase(c)
	poolEnv := utils.GalaxyEnv(c)

	stackName := fmt.Sprintf("%s-%s-%s", baseStack, poolEnv, poolName)

	pool, err := stack.GetPool(stackName)
	if err != nil {
		log.Fatal(err)
	}

	options := make(map[string]string)
	if policy := c.String("policy"); policy != "" {
		policyJSON, err := jsonFromArg(policy)
		if err != nil {
			log.Fatal("policy error:", err)
		}

		options["StackPolicyDuringUpdateBody"] = string(policyJSON)
	}

	resources := sharedResources(c, baseStack)

	asg := pool.ASG()
	if asg == nil {
		log.Fatal("missing ASG")
	}

	if c.Int("desired-size") > 0 {
		asg.Properties.DesiredCapacity = c.Int("desired-size")
	}

	if c.Int("min-size") > 0 {
		asg.Properties.MinSize = c.Int("min-size")
	}

	if c.Int("max-size") > 0 {
		asg.Properties.MaxSize = c.Int("max-size")
	}

	if c.Bool("auto-update") {
		// note that the max pause is only PT5M30S
		asg.SetASGUpdatePolicy(c.Int("update-min"), c.Int("update-batch"), c.Duration("update-pause"))
	}

	numZones := c.Int("availability-zones")
	if numZones == 0 {
		numZones = len(asg.Properties.VPCZoneIdentifier)
	}

	// start with the current settings
	subnetIDs := []string{}
	azIDs := []string{}

	// only update the subnets/AZs if we changed the count
	if len(asg.Properties.VPCZoneIdentifier) != numZones {
		subnets := resources.Subnets
		if numZones <= len(subnets) {
			subnets = subnets[:numZones]
		} else {
			log.Fatal("ERROR: cannot run in %d zones, only %d available.", numZones, len(subnets))
		}

		for _, sn := range subnets {
			subnetIDs = append(subnetIDs, sn.ID)
			azIDs = append(azIDs, sn.AvailabilityZone)
		}
		asg.Properties.VPCZoneIdentifier = subnetIDs
		asg.Properties.AvailabilityZones = azIDs

	}

	elb := pool.ELB()

	sslCert := ""
	if cert := c.String("ssl-cert"); cert != "" {
		sslCert = resources.ServerCerts[cert]
		if sslCert == "" {
			log.Fatalf("Could not find certificate '%s'", cert)
		}
	}

	httpPort := c.Int("http-port")

	if (sslCert != "" || httpPort > 0) && elb == nil {
		log.Fatal("ERROR: Pool does not have an ELB")
	}

	// we can set the default now that we've verified that elb can be nil
	if httpPort == 0 {
		httpPort = 80
	}

	if elb != nil {
		certAdded := false
		for _, l := range elb.Properties.Listeners {
			if sslCert != "" && l.Protocol == "HTTPS" {
				l.SSLCertificateId = sslCert
				certAdded = true
			}

			if httpPort > 0 {
				l.InstancePort = httpPort
			}
		}

		// the elb needs a cert, but doesn't have an https listener
		if sslCert != "" && !certAdded {
			elb.AddListener(443, "HTTPS", httpPort, "HTTP", sslCert, nil)
		}

		healthCheck := c.String("http-health-check")
		if healthCheck != "" && healthCheck != elb.Properties.HealthCheck.Target {
			elb.Properties.HealthCheck.Target = healthCheck
		}

		// always make sure the ELB is in the same subnets as the ASG
		elb.Properties.Subnets = asg.Properties.VPCZoneIdentifier
	}

	lc := pool.LC()
	if amiID := c.String("ami"); amiID != "" {
		lc.Properties.ImageId = amiID
	}

	if insType := c.String("instance-type"); insType != "" {
		lc.Properties.InstanceType = insType
	}

	// add autoscaling if it's required
	setCPUAutoScale(c, pool)

	poolTmpl, err := json.MarshalIndent(pool, "", "    ")
	if err != nil {
		log.Fatal(err)
	}

	if c.Bool("print") {
		fmt.Println(string(poolTmpl))
		return
	}

	log.Println("Updating stack:", stackName)
	if _, err := stack.Update(stackName, poolTmpl, options); err != nil {
		log.Fatal(err)
	}

	// do we want to wait on this by default?
	if err := stack.Wait(stackName, 5*time.Minute); err != nil {
		log.Fatal(err)
	}

	log.Println("UpdateStack complete")
}
Exemple #20
0
func stackCreatePool(c *cli.Context) {
	var err error
	ensureEnvArg(c)
	ensurePoolArg(c)

	if c.String("region") != "" {
		stack.Region = c.String("region")
	}

	poolName := utils.GalaxyPool(c)
	baseStack := getBase(c)
	poolEnv := utils.GalaxyEnv(c)

	stackName := fmt.Sprintf("%s-%s-%s", baseStack, poolEnv, poolName)

	pool := stack.NewPool()

	// get the resources we need from the base stack
	// TODO: this may search for the base stack a second time
	resources := sharedResources(c, baseStack)

	desiredCap := c.Int("desired-size")
	if desiredCap == 0 {
		desiredCap = 1
	}

	numZones := c.Int("availability-zones")
	if numZones == 0 {
		// default to running one host per zone
		numZones = desiredCap
	}

	minSize := c.Int("min-size")
	maxSize := c.Int("max-size")
	httpPort := c.Int("http-port")
	if httpPort == 0 {
		httpPort = 80
	}

	sslCert := ""
	if cert := c.String("ssl-cert"); cert != "" {
		sslCert = resources.ServerCerts[cert]
		if sslCert == "" {
			log.Fatalf("Could not find certificate '%s'", cert)
		}
	}

	// Create our Launch Config
	lc := pool.LCTemplate
	lcName := "lc" + poolEnv + poolName

	if amiID := c.String("ami"); amiID != "" {
		lc.Properties.ImageId = amiID
	} else {
		lc.Properties.ImageId = resources.Parameters["PoolImageId"]
	}

	if insType := c.String("instance-type"); insType != "" {
		lc.Properties.InstanceType = insType
	} else {
		lc.Properties.InstanceType = resources.Parameters["PoolInstanceType"]
	}

	if keyName := c.String("keyname"); keyName != "" {
		lc.Properties.KeyName = keyName
	} else {
		lc.Properties.KeyName = resources.Parameters["KeyName"]
	}

	lc.Properties.IamInstanceProfile = resources.Roles["galaxyInstanceProfile"]

	lc.Properties.SecurityGroups = []string{
		resources.SecurityGroups["sshSG"],
		resources.SecurityGroups["defaultSG"],
	}

	lc.SetVolumeSize(c.Int("volume-size"))

	pool.Resources[lcName] = lc

	// Create the Auto Scaling Group
	asg := pool.ASGTemplate
	asgName := "asg" + poolEnv + poolName

	asg.AddTag("Name", fmt.Sprintf("%s-%s-%s", baseStack, poolEnv, poolName), true)
	asg.AddTag("env", poolEnv, true)
	asg.AddTag("pool", poolName, true)
	asg.AddTag("galaxy", "pool", true)

	asg.Properties.DesiredCapacity = desiredCap

	// Don't always run in all zones
	subnets := resources.Subnets
	if numZones <= len(subnets) {
		subnets = subnets[:numZones]
	} else {
		log.Fatal("ERROR: cannot run in %d zones, only %d available.", numZones, len(subnets))
	}

	// break the subnets info into separate subnet and AZ slices for the template
	subnetIDs := []string{}
	azIDs := []string{}
	for _, sn := range subnets {
		subnetIDs = append(subnetIDs, sn.ID)
		azIDs = append(azIDs, sn.AvailabilityZone)
	}

	asg.SetLaunchConfiguration(lcName)
	asg.Properties.AvailabilityZones = azIDs
	asg.Properties.VPCZoneIdentifier = subnetIDs
	if maxSize > 0 {
		asg.Properties.MaxSize = maxSize
	}
	if minSize > 0 {
		asg.Properties.MinSize = minSize
	}

	if c.Bool("auto-update") {
		asg.SetASGUpdatePolicy(c.Int("update-min"), c.Int("update-batch"), c.Duration("update-pause"))
	}

	pool.Resources[asgName] = asg

	// Optionally create the Elastic Load Balancer
	if c.Bool("elb") {
		elb := pool.ELBTemplate
		elbName := "elb" + poolEnv + poolName

		// make sure to add this to the ASG
		asg.AddLoadBalancer(elbName)

		elb.Properties.Subnets = subnetIDs

		elb.Properties.SecurityGroups = []string{
			resources.SecurityGroups["webSG"],
			resources.SecurityGroups["defaultSG"],
		}

		elb.Properties.HealthCheck.Target = c.String("http-health-check")

		elb.AddListener(80, "HTTP", httpPort, "HTTP", "", nil)

		if sslCert != "" {
			elb.AddListener(443, "HTTPS", httpPort, "HTTP", sslCert, nil)
		}

		pool.Resources[elbName] = elb
	}

	// add autoscaling if it's required
	setCPUAutoScale(c, pool)

	poolTmpl, err := json.MarshalIndent(pool, "", "    ")
	if err != nil {
		log.Fatal(err)
	}

	if c.Bool("print") {
		fmt.Println(string(poolTmpl))
		return
	}

	opts := make(map[string]string)
	opts["tag.env"] = poolEnv
	opts["tag.pool"] = poolName
	opts["tag.galaxy"] = "pool"

	_, err = stack.Create(stackName, poolTmpl, opts)
	if err != nil {
		log.Fatal(err)
	}

	log.Println("Creating stack:", stackName)

	// do we want to wait on this by default?
	if err := stack.Wait(stackName, 5*time.Minute); err != nil {
		log.Error(err)
		log.Error("CreateStack Failed, attempting to delete")

		waitAndDelete(stackName)
		return
	}

	log.Println("CreateStack complete")
}