Example #1
0
func poolCreate(c *cli.Context) {
	ensureEnvArg(c)
	ensurePoolArg(c)
	initStore(c)
	created, err := configStore.CreatePool(utils.GalaxyPool(c), utils.GalaxyEnv(c))
	if err != nil {
		log.Fatalf("ERROR: Could not create pool: %s", err)
		return
	}

	if created {
		log.Printf("Pool %s created\n", utils.GalaxyPool(c))
	} else {
		log.Printf("Pool %s already exists\n", utils.GalaxyPool(c))
	}

	ec2host, err := runtime.EC2PublicHostname()
	if err != nil || ec2host == "" {
		log.Debug("not running from AWS, skipping pool creation")
		return
	}

	// now create the cloudformation stack
	// is this fails, the stack can be created separately with
	// stack:create_pool
	stackCreatePool(c)
}
Example #2
0
func poolCreate(c *cli.Context) {
	ensureEnvArg(c)
	ensurePoolArg(c)
	initStore(c)
	created, err := configStore.CreatePool(utils.GalaxyPool(c), utils.GalaxyEnv(c))
	if err != nil {
		log.Fatalf("ERROR: Could not create pool: %s", err)
		return
	}

	if created {
		log.Printf("Pool %s created\n", utils.GalaxyPool(c))
	} else {
		log.Printf("Pool %s already exists\n", utils.GalaxyPool(c))
	}
}
Example #3
0
func poolDelete(c *cli.Context) {
	ensureEnvArg(c)
	ensurePoolArg(c)
	initStore(c)
	empty, err := configStore.DeletePool(utils.GalaxyPool(c), utils.GalaxyEnv(c))
	if err != nil {
		log.Fatalf("ERROR: Could not delete pool: %s", err)
		return
	}

	if empty {
		log.Printf("Pool %s deleted\n", utils.GalaxyPool(c))

	} else {
		log.Printf("Pool %s has apps assigned. Unassign them first.\n", utils.GalaxyPool(c))
	}
}
Example #4
0
func poolAssign(c *cli.Context) {
	ensureEnvArg(c)
	ensurePoolArg(c)
	initStore(c)

	app := ensureAppParam(c, "pool:assign")

	err := commander.AppAssign(configStore, app, utils.GalaxyEnv(c), utils.GalaxyPool(c))
	if err != nil {
		log.Fatalf("ERROR: %s", err)
	}
}
Example #5
0
func appShell(c *cli.Context) {
	ensureEnvArg(c)
	initStore(c)
	initRuntime(c)

	app := ensureAppParam(c, "app:shell")

	err := commander.AppShell(configStore, serviceRuntime, app,
		utils.GalaxyEnv(c), utils.GalaxyPool(c))
	if err != nil {
		log.Fatalf("ERROR: %s", err)
	}
}
Example #6
0
func poolUnassign(c *cli.Context) {
	ensureEnvArg(c)
	ensurePoolArg(c)
	initStore(c)

	app := c.Args().First()
	if app == "" {
		cli.ShowCommandHelp(c, "pool:assign")
		log.Fatal("ERROR: app name missing")
	}

	err := commander.AppUnassign(configStore, app, utils.GalaxyEnv(c), utils.GalaxyPool(c))
	if err != nil {
		log.Fatalf("ERROR: %s", err)
	}
}
Example #7
0
func stackDeletePool(c *cli.Context) {
	ensureEnvArg(c)
	ensurePoolArg(c)

	if c.String("region") != "" {
		stack.Region = c.String("region")
	}

	baseStack := getBase(c)

	stackName := fmt.Sprintf("%s-%s-%s", baseStack,
		utils.GalaxyEnv(c),
		utils.GalaxyPool(c))

	waitAndDelete(stackName)
}
Example #8
0
func ensurePoolArg(c *cli.Context) {
	if utils.GalaxyPool(c) == "" {
		log.Fatal("ERROR: pool is required.  Pass --pool or set GALAXY_POOL")
	}
}
Example #9
0
// Update an existing Pool Stack
func stackUpdatePool(c *cli.Context) {
	ensureEnvArg(c)
	ensurePoolArg(c)

	if c.String("region") != "" {
		stack.Region = c.String("region")
	}

	poolName := utils.GalaxyPool(c)
	baseStack := getBase(c)
	poolEnv := utils.GalaxyEnv(c)

	stackName := fmt.Sprintf("%s-%s-%s", baseStack, poolEnv, poolName)

	pool, err := stack.GetPool(stackName)
	if err != nil {
		log.Fatal(err)
	}

	options := make(map[string]string)
	if policy := c.String("policy"); policy != "" {
		policyJSON, err := jsonFromArg(policy)
		if err != nil {
			log.Fatal("policy error:", err)
		}

		options["StackPolicyDuringUpdateBody"] = string(policyJSON)
	}

	resources := sharedResources(c, baseStack)

	asg := pool.ASG()
	if asg == nil {
		log.Fatal("missing ASG")
	}

	if c.Int("desired-size") > 0 {
		asg.Properties.DesiredCapacity = c.Int("desired-size")
	}

	if c.Int("min-size") > 0 {
		asg.Properties.MinSize = c.Int("min-size")
	}

	if c.Int("max-size") > 0 {
		asg.Properties.MaxSize = c.Int("max-size")
	}

	if c.Bool("auto-update") {
		// note that the max pause is only PT5M30S
		asg.SetASGUpdatePolicy(c.Int("update-min"), c.Int("update-batch"), c.Duration("update-pause"))
	}

	numZones := c.Int("availability-zones")
	if numZones == 0 {
		numZones = len(asg.Properties.VPCZoneIdentifier)
	}

	// start with the current settings
	subnetIDs := []string{}
	azIDs := []string{}

	// only update the subnets/AZs if we changed the count
	if len(asg.Properties.VPCZoneIdentifier) != numZones {
		subnets := resources.Subnets
		if numZones <= len(subnets) {
			subnets = subnets[:numZones]
		} else {
			log.Fatal("ERROR: cannot run in %d zones, only %d available.", numZones, len(subnets))
		}

		for _, sn := range subnets {
			subnetIDs = append(subnetIDs, sn.ID)
			azIDs = append(azIDs, sn.AvailabilityZone)
		}
		asg.Properties.VPCZoneIdentifier = subnetIDs
		asg.Properties.AvailabilityZones = azIDs

	}

	elb := pool.ELB()

	sslCert := ""
	if cert := c.String("ssl-cert"); cert != "" {
		sslCert = resources.ServerCerts[cert]
		if sslCert == "" {
			log.Fatalf("Could not find certificate '%s'", cert)
		}
	}

	httpPort := c.Int("http-port")

	if (sslCert != "" || httpPort > 0) && elb == nil {
		log.Fatal("ERROR: Pool does not have an ELB")
	}

	// we can set the default now that we've verified that elb can be nil
	if httpPort == 0 {
		httpPort = 80
	}

	if elb != nil {
		certAdded := false
		for _, l := range elb.Properties.Listeners {
			if sslCert != "" && l.Protocol == "HTTPS" {
				l.SSLCertificateId = sslCert
				certAdded = true
			}

			if httpPort > 0 {
				l.InstancePort = httpPort
			}
		}

		// the elb needs a cert, but doesn't have an https listener
		if sslCert != "" && !certAdded {
			elb.AddListener(443, "HTTPS", httpPort, "HTTP", sslCert, nil)
		}

		healthCheck := c.String("http-health-check")
		if healthCheck != "" && healthCheck != elb.Properties.HealthCheck.Target {
			elb.Properties.HealthCheck.Target = healthCheck
		}

		// always make sure the ELB is in the same subnets as the ASG
		elb.Properties.Subnets = asg.Properties.VPCZoneIdentifier
	}

	lc := pool.LC()
	if amiID := c.String("ami"); amiID != "" {
		lc.Properties.ImageId = amiID
	}

	if insType := c.String("instance-type"); insType != "" {
		lc.Properties.InstanceType = insType
	}

	// add autoscaling if it's required
	setCPUAutoScale(c, pool)

	poolTmpl, err := json.MarshalIndent(pool, "", "    ")
	if err != nil {
		log.Fatal(err)
	}

	if c.Bool("print") {
		fmt.Println(string(poolTmpl))
		return
	}

	log.Println("Updating stack:", stackName)
	if _, err := stack.Update(stackName, poolTmpl, options); err != nil {
		log.Fatal(err)
	}

	// do we want to wait on this by default?
	if err := stack.Wait(stackName, 5*time.Minute); err != nil {
		log.Fatal(err)
	}

	log.Println("UpdateStack complete")
}
Example #10
0
func stackCreatePool(c *cli.Context) {
	var err error
	ensureEnvArg(c)
	ensurePoolArg(c)

	if c.String("region") != "" {
		stack.Region = c.String("region")
	}

	poolName := utils.GalaxyPool(c)
	baseStack := getBase(c)
	poolEnv := utils.GalaxyEnv(c)

	stackName := fmt.Sprintf("%s-%s-%s", baseStack, poolEnv, poolName)

	pool := stack.NewPool()

	// get the resources we need from the base stack
	// TODO: this may search for the base stack a second time
	resources := sharedResources(c, baseStack)

	desiredCap := c.Int("desired-size")
	if desiredCap == 0 {
		desiredCap = 1
	}

	numZones := c.Int("availability-zones")
	if numZones == 0 {
		// default to running one host per zone
		numZones = desiredCap
	}

	minSize := c.Int("min-size")
	maxSize := c.Int("max-size")
	httpPort := c.Int("http-port")
	if httpPort == 0 {
		httpPort = 80
	}

	sslCert := ""
	if cert := c.String("ssl-cert"); cert != "" {
		sslCert = resources.ServerCerts[cert]
		if sslCert == "" {
			log.Fatalf("Could not find certificate '%s'", cert)
		}
	}

	// Create our Launch Config
	lc := pool.LCTemplate
	lcName := "lc" + poolEnv + poolName

	if amiID := c.String("ami"); amiID != "" {
		lc.Properties.ImageId = amiID
	} else {
		lc.Properties.ImageId = resources.Parameters["PoolImageId"]
	}

	if insType := c.String("instance-type"); insType != "" {
		lc.Properties.InstanceType = insType
	} else {
		lc.Properties.InstanceType = resources.Parameters["PoolInstanceType"]
	}

	if keyName := c.String("keyname"); keyName != "" {
		lc.Properties.KeyName = keyName
	} else {
		lc.Properties.KeyName = resources.Parameters["KeyName"]
	}

	lc.Properties.IamInstanceProfile = resources.Roles["galaxyInstanceProfile"]

	lc.Properties.SecurityGroups = []string{
		resources.SecurityGroups["sshSG"],
		resources.SecurityGroups["defaultSG"],
	}

	lc.SetVolumeSize(c.Int("volume-size"))

	pool.Resources[lcName] = lc

	// Create the Auto Scaling Group
	asg := pool.ASGTemplate
	asgName := "asg" + poolEnv + poolName

	asg.AddTag("Name", fmt.Sprintf("%s-%s-%s", baseStack, poolEnv, poolName), true)
	asg.AddTag("env", poolEnv, true)
	asg.AddTag("pool", poolName, true)
	asg.AddTag("galaxy", "pool", true)

	asg.Properties.DesiredCapacity = desiredCap

	// Don't always run in all zones
	subnets := resources.Subnets
	if numZones <= len(subnets) {
		subnets = subnets[:numZones]
	} else {
		log.Fatal("ERROR: cannot run in %d zones, only %d available.", numZones, len(subnets))
	}

	// break the subnets info into separate subnet and AZ slices for the template
	subnetIDs := []string{}
	azIDs := []string{}
	for _, sn := range subnets {
		subnetIDs = append(subnetIDs, sn.ID)
		azIDs = append(azIDs, sn.AvailabilityZone)
	}

	asg.SetLaunchConfiguration(lcName)
	asg.Properties.AvailabilityZones = azIDs
	asg.Properties.VPCZoneIdentifier = subnetIDs
	if maxSize > 0 {
		asg.Properties.MaxSize = maxSize
	}
	if minSize > 0 {
		asg.Properties.MinSize = minSize
	}

	if c.Bool("auto-update") {
		asg.SetASGUpdatePolicy(c.Int("update-min"), c.Int("update-batch"), c.Duration("update-pause"))
	}

	pool.Resources[asgName] = asg

	// Optionally create the Elastic Load Balancer
	if c.Bool("elb") {
		elb := pool.ELBTemplate
		elbName := "elb" + poolEnv + poolName

		// make sure to add this to the ASG
		asg.AddLoadBalancer(elbName)

		elb.Properties.Subnets = subnetIDs

		elb.Properties.SecurityGroups = []string{
			resources.SecurityGroups["webSG"],
			resources.SecurityGroups["defaultSG"],
		}

		elb.Properties.HealthCheck.Target = c.String("http-health-check")

		elb.AddListener(80, "HTTP", httpPort, "HTTP", "", nil)

		if sslCert != "" {
			elb.AddListener(443, "HTTPS", httpPort, "HTTP", sslCert, nil)
		}

		pool.Resources[elbName] = elb
	}

	// add autoscaling if it's required
	setCPUAutoScale(c, pool)

	poolTmpl, err := json.MarshalIndent(pool, "", "    ")
	if err != nil {
		log.Fatal(err)
	}

	if c.Bool("print") {
		fmt.Println(string(poolTmpl))
		return
	}

	opts := make(map[string]string)
	opts["tag.env"] = poolEnv
	opts["tag.pool"] = poolName
	opts["tag.galaxy"] = "pool"

	_, err = stack.Create(stackName, poolTmpl, opts)
	if err != nil {
		log.Fatal(err)
	}

	log.Println("Creating stack:", stackName)

	// do we want to wait on this by default?
	if err := stack.Wait(stackName, 5*time.Minute); err != nil {
		log.Error(err)
		log.Error("CreateStack Failed, attempting to delete")

		waitAndDelete(stackName)
		return
	}

	log.Println("CreateStack complete")
}