示例#1
0
func (x *ImportCluster) ImportAWSCluster() error {
	awsCloud := x.Cloud.(*awsup.AWSCloud)
	clusterName := x.ClusterName

	if clusterName == "" {
		return fmt.Errorf("ClusterName must be specified")
	}

	var instanceGroups []*api.InstanceGroup

	cluster := &api.Cluster{}
	cluster.Spec.CloudProvider = "aws"
	cluster.Name = clusterName

	cluster.Spec.KubeControllerManager = &api.KubeControllerManagerConfig{}

	masterGroup := &api.InstanceGroup{}
	masterGroup.Spec.Role = api.InstanceGroupRoleMaster
	masterGroup.Name = "masters"
	masterGroup.Spec.MinSize = fi.Int(1)
	masterGroup.Spec.MaxSize = fi.Int(1)
	instanceGroups = append(instanceGroups, masterGroup)

	instances, err := findInstances(awsCloud)
	if err != nil {
		return fmt.Errorf("error finding instances: %v", err)
	}

	var masterInstance *ec2.Instance
	for _, instance := range instances {
		role, _ := awsup.FindEC2Tag(instance.Tags, "Role")
		if role == clusterName+"-master" {
			if masterInstance != nil {
				masterState := aws.StringValue(masterInstance.State.Name)
				thisState := aws.StringValue(instance.State.Name)

				glog.Infof("Found multiple masters: %s and %s", masterState, thisState)

				if masterState == "terminated" && thisState != "terminated" {
					// OK
				} else if thisState == "terminated" && masterState != "terminated" {
					// Ignore this one
					continue
				} else {
					return fmt.Errorf("found multiple masters")
				}
			}
			masterInstance = instance
		}
	}
	if masterInstance == nil {
		return fmt.Errorf("could not find master node")
	}
	masterInstanceID := aws.StringValue(masterInstance.InstanceId)
	glog.Infof("Found master: %q", masterInstanceID)

	masterGroup.Spec.MachineType = aws.StringValue(masterInstance.InstanceType)

	masterSubnetID := aws.StringValue(masterInstance.SubnetId)

	subnets, err := DescribeSubnets(x.Cloud)
	if err != nil {
		return fmt.Errorf("error finding subnets: %v", err)
	}
	var masterSubnet *ec2.Subnet
	for _, s := range subnets {
		if masterSubnetID == aws.StringValue(s.SubnetId) {
			if masterSubnet != nil {
				return fmt.Errorf("found duplicate subnet")
			}
			masterSubnet = s
		}
	}
	if masterSubnet == nil {
		return fmt.Errorf("cannot find subnet %q", masterSubnetID)
	}

	vpcID := aws.StringValue(masterInstance.VpcId)
	var vpc *ec2.Vpc
	{
		vpc, err = awsCloud.DescribeVPC(vpcID)
		if err != nil {
			return err
		}
		if vpc == nil {
			return fmt.Errorf("cannot find vpc %q", vpcID)
		}
	}

	cluster.Spec.NetworkID = vpcID
	cluster.Spec.NetworkCIDR = aws.StringValue(vpc.CidrBlock)

	az := aws.StringValue(masterSubnet.AvailabilityZone)
	masterGroup.Spec.Zones = []string{az}
	cluster.Spec.Zones = append(cluster.Spec.Zones, &api.ClusterZoneSpec{
		Name: az,

		// We will allocate a new CIDR
		//CIDR: aws.StringValue(masterSubnet.CidrBlock),
	})

	userData, err := GetInstanceUserData(awsCloud, aws.StringValue(masterInstance.InstanceId))
	if err != nil {
		return fmt.Errorf("error getting master user-data: %v", err)
	}

	conf, err := ParseUserDataConfiguration(userData)
	if err != nil {
		return fmt.Errorf("error parsing master user-data: %v", err)
	}

	//master := &NodeSSH{
	//	Hostname: c.Master,
	//}
	//err := master.AddSSHIdentity(c.SSHIdentity)
	//if err != nil {
	//	return err
	//}
	//
	//
	//fmt.Printf("Connecting to node on %s\n", c.Node)
	//
	//node := &NodeSSH{
	//	Hostname: c.Node,
	//}
	//err = node.AddSSHIdentity(c.SSHIdentity)
	//if err != nil {
	//	return err
	//}

	instancePrefix := conf.Settings["INSTANCE_PREFIX"]
	if instancePrefix == "" {
		return fmt.Errorf("cannot determine INSTANCE_PREFIX")
	}
	if instancePrefix != clusterName {
		return fmt.Errorf("INSTANCE_PREFIX %q did not match cluster name %q", instancePrefix, clusterName)
	}

	//k8s.NodeMachineType, err = InstanceType(node)
	//if err != nil {
	//	return fmt.Errorf("cannot determine node instance type: %v", err)
	//}

	// We want to upgrade!
	// k8s.ImageId = ""

	//clusterConfig.ClusterIPRange = conf.Settings["CLUSTER_IP_RANGE"]
	cluster.Spec.KubeControllerManager.AllocateNodeCIDRs = conf.ParseBool("ALLOCATE_NODE_CIDRS")
	//clusterConfig.KubeUser = conf.Settings["KUBE_USER"]
	cluster.Spec.ServiceClusterIPRange = conf.Settings["SERVICE_CLUSTER_IP_RANGE"]
	cluster.Spec.NonMasqueradeCIDR = conf.Settings["NON_MASQUERADE_CIDR"]
	//clusterConfig.EnableClusterMonitoring = conf.Settings["ENABLE_CLUSTER_MONITORING"]
	//clusterConfig.EnableClusterLogging = conf.ParseBool("ENABLE_CLUSTER_LOGGING")
	//clusterConfig.EnableNodeLogging = conf.ParseBool("ENABLE_NODE_LOGGING")
	//clusterConfig.LoggingDestination = conf.Settings["LOGGING_DESTINATION"]
	//clusterConfig.ElasticsearchLoggingReplicas, err = parseInt(conf.Settings["ELASTICSEARCH_LOGGING_REPLICAS"])
	//if err != nil {
	//	return fmt.Errorf("cannot parse ELASTICSEARCH_LOGGING_REPLICAS=%q: %v", conf.Settings["ELASTICSEARCH_LOGGING_REPLICAS"], err)
	//}
	//clusterConfig.EnableClusterDNS = conf.ParseBool("ENABLE_CLUSTER_DNS")
	//clusterConfig.EnableClusterUI = conf.ParseBool("ENABLE_CLUSTER_UI")
	//clusterConfig.DNSReplicas, err = parseInt(conf.Settings["DNS_REPLICAS"])
	//if err != nil {
	//	return fmt.Errorf("cannot parse DNS_REPLICAS=%q: %v", conf.Settings["DNS_REPLICAS"], err)
	//}
	//clusterConfig.DNSServerIP = conf.Settings["DNS_SERVER_IP"]
	cluster.Spec.ClusterDNSDomain = conf.Settings["DNS_DOMAIN"]
	//clusterConfig.AdmissionControl = conf.Settings["ADMISSION_CONTROL"]
	//clusterConfig.MasterIPRange = conf.Settings["MASTER_IP_RANGE"]
	//clusterConfig.DNSServerIP = conf.Settings["DNS_SERVER_IP"]
	//clusterConfig.DockerStorage = conf.Settings["DOCKER_STORAGE"]
	//k8s.MasterExtraSans = conf.Settings["MASTER_EXTRA_SANS"] // Not user set

	primaryNodeSet := &api.InstanceGroup{}
	primaryNodeSet.Spec.Role = api.InstanceGroupRoleNode
	primaryNodeSet.Name = "nodes"
	instanceGroups = append(instanceGroups, primaryNodeSet)

	//primaryNodeSet.Spec.MinSize, err = conf.ParseInt("NUM_MINIONS")
	//if err != nil {
	//	return fmt.Errorf("cannot parse NUM_MINIONS=%q: %v", conf.Settings["NUM_MINIONS"], err)
	//}

	{
		groups, err := findAutoscalingGroups(awsCloud, awsCloud.Tags())
		if err != nil {
			return fmt.Errorf("error listing autoscaling groups: %v", err)
		}
		if len(groups) == 0 {
			glog.Warningf("No Autoscaling group found")
		}
		if len(groups) == 1 {
			glog.Warningf("Multiple Autoscaling groups found")
		}
		minSize := 0
		maxSize := 0
		for _, group := range groups {
			minSize += int(aws.Int64Value(group.MinSize))
			maxSize += int(aws.Int64Value(group.MaxSize))
		}
		if minSize != 0 {
			primaryNodeSet.Spec.MinSize = fi.Int(minSize)
		}
		if maxSize != 0 {
			primaryNodeSet.Spec.MaxSize = fi.Int(maxSize)
		}

		// TODO: machine types
		//primaryNodeSet.NodeMachineType = k8s.MasterMachineType
	}

	if conf.Version == "1.1" {
		// If users went with defaults on some things, clear them out so they get the new defaults
		//if clusterConfig.AdmissionControl == "NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota" {
		//	// More admission controllers in 1.2
		//	clusterConfig.AdmissionControl = ""
		//}
		if masterGroup.Spec.MachineType == "t2.micro" {
			// Different defaults in 1.2
			masterGroup.Spec.MachineType = ""
		}
		if primaryNodeSet.Spec.MachineType == "t2.micro" {
			// Encourage users to pick something better...
			primaryNodeSet.Spec.MachineType = ""
		}
	}
	if conf.Version == "1.2" {
		// If users went with defaults on some things, clear them out so they get the new defaults
		//if clusterConfig.AdmissionControl == "NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,ResourceQuota" {
		//	// More admission controllers in 1.2
		//	clusterConfig.AdmissionControl = ""
		//}
	}

	for _, etcdClusterName := range []string{"main", "events"} {
		etcdCluster := &api.EtcdClusterSpec{
			Name: etcdClusterName,
		}
		for _, az := range masterGroup.Spec.Zones {
			etcdCluster.Members = append(etcdCluster.Members, &api.EtcdMemberSpec{
				Name: az,
				Zone: az,
			})
		}
		cluster.Spec.EtcdClusters = append(cluster.Spec.EtcdClusters, etcdCluster)
	}

	//if masterInstance.PublicIpAddress != nil {
	//	eip, err := findElasticIP(cloud, *masterInstance.PublicIpAddress)
	//	if err != nil {
	//		return err
	//	}
	//	if eip != nil {
	//		k8s.MasterElasticIP = masterInstance.PublicIpAddress
	//	}
	//}
	//
	//vpc, err := cloud.DescribeVPC(*k8s.VPCID)
	//if err != nil {
	//	return err
	//}
	//k8s.DHCPOptionsID = vpc.DhcpOptionsId
	//
	//igw, err := findInternetGateway(cloud, *k8s.VPCID)
	//if err != nil {
	//	return err
	//}
	//if igw == nil {
	//	return fmt.Errorf("unable to find internet gateway for VPC %q", k8s.VPCID)
	//}
	//k8s.InternetGatewayID = igw.InternetGatewayId
	//
	//rt, err := findRouteTable(cloud, *k8s.SubnetID)
	//if err != nil {
	//	return err
	//}
	//if rt == nil {
	//	return fmt.Errorf("unable to find route table for Subnet %q", k8s.SubnetID)
	//}
	//k8s.RouteTableID = rt.RouteTableId

	//b.Context = "aws_" + instancePrefix

	newKeyStore := x.StateStore.CA()

	//caCert, err := masterSSH.Join("ca.crt").ReadFile()
	caCert, err := conf.ParseCert("CA_CERT")
	if err != nil {
		return err
	}
	err = newKeyStore.AddCert(fi.CertificateId_CA, caCert)
	if err != nil {
		return err
	}

	////masterKey, err := masterSSH.Join("server.key").ReadFile()
	//masterKey, err := conf.ParseKey("MASTER_KEY")
	//if err != nil {
	//	return err
	//}
	////masterCert, err := masterSSH.Join("server.cert").ReadFile()
	//masterCert, err := conf.ParseCert("MASTER_CERT")
	//if err != nil {
	//	return err
	//}
	//err = keyStore.ImportKeypair("master", masterKey, masterCert)
	//if err != nil {
	//	return err
	//}
	//
	////kubeletKey, err := kubeletSSH.Join("kubelet.key").ReadFile()
	//kubeletKey, err := conf.ParseKey("KUBELET_KEY")
	//if err != nil {
	//	return err
	//}
	////kubeletCert, err := kubeletSSH.Join("kubelet.cert").ReadFile()
	//kubeletCert, err := conf.ParseCert("KUBELET_CERT")
	//if err != nil {
	//	return err
	//}
	//err = keyStore.ImportKeypair("kubelet", kubeletKey, kubeletCert)
	//if err != nil {
	//	return err
	//}

	// We don't store the kubecfg key
	//kubecfgKey, err := masterSSH.Join("kubecfg.key").ReadFile()
	//if err != nil {
	//	return err
	//}
	//kubecfgCert, err := masterSSH.Join("kubecfg.cert").ReadFile()
	//if err != nil {
	//	return err
	//}
	//err = keyStore.ImportKeypair("kubecfg", kubecfgKey, kubecfgCert)
	//if err != nil {
	//	return err
	//}

	//// We will generate new tokens, but some of these are in existing API objects
	//secretStore := x.StateStore.Secrets()
	//kubePassword := conf.Settings["KUBE_PASSWORD"]
	//kubeletToken = conf.Settings["KUBELET_TOKEN"]
	//kubeProxyToken = conf.Settings["KUBE_PROXY_TOKEN"]

	err = api.WriteConfig(x.StateStore, cluster, instanceGroups)
	if err != nil {
		return err
	}

	return nil
}
示例#2
0
func (x *UpgradeCluster) Upgrade() error {
	awsCloud := x.Cloud.(*awsup.AWSCloud)

	cluster := x.ClusterConfig

	newClusterName := x.NewClusterName
	if newClusterName == "" {
		return fmt.Errorf("NewClusterName must be specified")
	}
	oldClusterName := x.OldClusterName
	if oldClusterName == "" {
		return fmt.Errorf("OldClusterName must be specified")
	}

	oldTags := awsCloud.Tags()

	newTags := awsCloud.Tags()
	newTags["KubernetesCluster"] = newClusterName

	// Try to pre-query as much as possible before doing anything destructive
	instances, err := findInstances(awsCloud)
	if err != nil {
		return fmt.Errorf("error finding instances: %v", err)
	}

	volumes, err := DescribeVolumes(x.Cloud)
	if err != nil {
		return err
	}

	dhcpOptions, err := DescribeDhcpOptions(x.Cloud)
	if err != nil {
		return err
	}

	autoscalingGroups, err := findAutoscalingGroups(awsCloud, oldTags)
	if err != nil {
		return err
	}

	elbs, _, err := DescribeELBs(x.Cloud)
	if err != nil {
		return err
	}

	// Find masters
	var masters []*ec2.Instance
	for _, instance := range instances {
		role, _ := awsup.FindEC2Tag(instance.Tags, "Role")
		if role == oldClusterName+"-master" {
			masters = append(masters, instance)
		}
	}
	if len(masters) == 0 {
		return fmt.Errorf("could not find masters")
	}

	// Stop autoscalingGroups
	for _, group := range autoscalingGroups {
		name := aws.StringValue(group.AutoScalingGroupName)
		glog.Infof("Stopping instances in autoscaling group %q", name)

		request := &autoscaling.UpdateAutoScalingGroupInput{
			AutoScalingGroupName: group.AutoScalingGroupName,
			DesiredCapacity:      aws.Int64(0),
			MinSize:              aws.Int64(0),
			MaxSize:              aws.Int64(0),
		}

		_, err := awsCloud.Autoscaling.UpdateAutoScalingGroup(request)
		if err != nil {
			return fmt.Errorf("error updating autoscaling group %q: %v", name, err)
		}
	}

	// Stop masters
	for _, master := range masters {
		masterInstanceID := aws.StringValue(master.InstanceId)

		masterState := aws.StringValue(master.State.Name)
		if masterState == "terminated" {
			glog.Infof("master already terminated: %q", masterInstanceID)
			continue
		}

		glog.Infof("Stopping master: %q", masterInstanceID)

		request := &ec2.StopInstancesInput{
			InstanceIds: []*string{master.InstanceId},
		}

		_, err := awsCloud.EC2.StopInstances(request)
		if err != nil {
			return fmt.Errorf("error stopping master instance: %v", err)
		}
	}

	// Detach volumes from masters
	for _, master := range masters {
		for _, bdm := range master.BlockDeviceMappings {
			if bdm.Ebs == nil || bdm.Ebs.VolumeId == nil {
				continue
			}
			volumeID := aws.StringValue(bdm.Ebs.VolumeId)
			masterInstanceID := aws.StringValue(master.InstanceId)
			glog.Infof("Detaching volume %q from instance %q", volumeID, masterInstanceID)

			request := &ec2.DetachVolumeInput{
				VolumeId:   bdm.Ebs.VolumeId,
				InstanceId: master.InstanceId,
			}

			for {
				_, err := awsCloud.EC2.DetachVolume(request)
				if err != nil {
					if AWSErrorCode(err) == "IncorrectState" {
						glog.Infof("retrying to detach volume (master has probably not stopped yet): %q", err)
						time.Sleep(5 * time.Second)
						continue
					}
					return fmt.Errorf("error detaching volume %q from master instance %q: %v", volumeID, masterInstanceID, err)
				} else {
					break
				}
			}
		}
	}

	//subnets, err := DescribeSubnets(x.Cloud)
	//if err != nil {
	//	return fmt.Errorf("error finding subnets: %v", err)
	//}
	//for _, s := range subnets {
	//	id := aws.StringValue(s.SubnetId)
	//	err := awsCloud.AddAWSTags(id, newTags)
	//	if err != nil {
	//		return fmt.Errorf("error re-tagging subnet %q: %v", id, err)
	//	}
	//}

	// Retag VPC
	// We have to be careful because VPCs can be shared
	{
		vpcID := cluster.Spec.NetworkID
		retagGateway := false

		if vpcID != "" {
			tags, err := awsCloud.GetTags(vpcID)
			if err != nil {
				return fmt.Errorf("error getting VPC tags: %v", err)
			}

			clusterTag := tags[awsup.TagClusterName]
			if clusterTag != "" {
				if clusterTag != oldClusterName {
					return fmt.Errorf("VPC is tagged with a different cluster: %v", clusterTag)
				}
				replaceTags := make(map[string]string)
				replaceTags[awsup.TagClusterName] = newClusterName

				glog.Infof("Retagging VPC %q", vpcID)

				err := awsCloud.CreateTags(vpcID, replaceTags)
				if err != nil {
					return fmt.Errorf("error re-tagging VPC: %v", err)
				}

				// The VPC was tagged as ours, so make sure the gateway is consistently retagged
				retagGateway = true
			}
		}

		if retagGateway {
			gateways, err := DescribeInternetGatewaysIgnoreTags(x.Cloud)
			if err != nil {
				return fmt.Errorf("error listing gateways: %v", err)
			}
			for _, igw := range gateways {
				match := false
				for _, a := range igw.Attachments {
					if vpcID == aws.StringValue(a.VpcId) {
						match = true
					}
				}
				if !match {
					continue
				}

				id := aws.StringValue(igw.InternetGatewayId)

				clusterTag, _ := awsup.FindEC2Tag(igw.Tags, awsup.TagClusterName)
				if clusterTag == "" || clusterTag == oldClusterName {
					replaceTags := make(map[string]string)
					replaceTags[awsup.TagClusterName] = newClusterName

					glog.Infof("Retagging InternetGateway %q", id)

					err := awsCloud.CreateTags(id, replaceTags)
					if err != nil {
						return fmt.Errorf("error re-tagging InternetGateway: %v", err)
					}
				}
			}
		}
	}

	// Retag DHCP options
	// We have to be careful because DHCP options can be shared
	for _, dhcpOption := range dhcpOptions {
		id := aws.StringValue(dhcpOption.DhcpOptionsId)

		clusterTag, _ := awsup.FindEC2Tag(dhcpOption.Tags, awsup.TagClusterName)
		if clusterTag != "" {
			if clusterTag != oldClusterName {
				return fmt.Errorf("DHCP options are tagged with a different cluster: %v", clusterTag)
			}
			replaceTags := make(map[string]string)
			replaceTags[awsup.TagClusterName] = newClusterName

			glog.Infof("Retagging DHCPOptions %q", id)

			err := awsCloud.CreateTags(id, replaceTags)
			if err != nil {
				return fmt.Errorf("error re-tagging DHCP options: %v", err)
			}
		}

	}

	// Adopt LoadBalancers & LoadBalancer Security Groups
	for _, elb := range elbs {
		id := aws.StringValue(elb.LoadBalancerName)

		// TODO: Batch re-tag?
		replaceTags := make(map[string]string)
		replaceTags[awsup.TagClusterName] = newClusterName

		glog.Infof("Retagging ELB %q", id)
		err := awsCloud.CreateELBTags(id, replaceTags)
		if err != nil {
			return fmt.Errorf("error re-tagging ELB %q: %v", id, err)
		}

	}

	for _, elb := range elbs {
		for _, sg := range elb.SecurityGroups {
			id := aws.StringValue(sg)

			// TODO: Batch re-tag?
			replaceTags := make(map[string]string)
			replaceTags[awsup.TagClusterName] = newClusterName

			glog.Infof("Retagging ELB security group %q", id)
			err := awsCloud.CreateTags(id, replaceTags)
			if err != nil {
				return fmt.Errorf("error re-tagging ELB security group %q: %v", id, err)
			}
		}

	}

	// Adopt Volumes
	for _, volume := range volumes {
		id := aws.StringValue(volume.VolumeId)

		// TODO: Batch re-tag?
		replaceTags := make(map[string]string)
		replaceTags[awsup.TagClusterName] = newClusterName

		name, _ := awsup.FindEC2Tag(volume.Tags, "Name")
		if name == oldClusterName+"-master-pd" {
			glog.Infof("Found master volume %q: %s", id, name)

			az := aws.StringValue(volume.AvailabilityZone)
			replaceTags["Name"] = az + ".etcd-main." + newClusterName
		}
		glog.Infof("Retagging volume %q", id)
		err := awsCloud.CreateTags(id, replaceTags)
		if err != nil {
			return fmt.Errorf("error re-tagging volume %q: %v", id, err)
		}
	}

	cluster.Name = newClusterName
	err = api.WriteConfig(x.NewStateStore, cluster, x.InstanceGroups)
	if err != nil {
		return fmt.Errorf("error writing updated configuration: %v", err)
	}

	oldCACertPool, err := x.OldStateStore.CA().CertificatePool(fi.CertificateId_CA)
	if err != nil {
		return fmt.Errorf("error reading old CA certs: %v", err)
	}
	for _, ca := range oldCACertPool.Secondary {
		err := x.NewStateStore.CA().AddCert(fi.CertificateId_CA, ca)
		if err != nil {
			return fmt.Errorf("error importing old CA certs: %v", err)
		}
	}
	if oldCACertPool.Primary != nil {
		err := x.NewStateStore.CA().AddCert(fi.CertificateId_CA, oldCACertPool.Primary)
		if err != nil {
			return fmt.Errorf("error importing old CA certs: %v", err)
		}
	}

	return nil
}
示例#3
0
func (c *CreateClusterCmd) Run() error {
	isDryrun := false
	if c.DryRun {
		isDryrun = true
		c.Target = "dryrun"
	}

	stateStoreLocation := rootCommand.stateLocation
	if stateStoreLocation == "" {
		return fmt.Errorf("--state is required")
	}

	clusterName := rootCommand.clusterName
	if clusterName == "" {
		return fmt.Errorf("--name is required")
	}

	// TODO: Reuse rootCommand stateStore logic?

	statePath, err := vfs.Context.BuildVfsPath(stateStoreLocation)
	if err != nil {
		return fmt.Errorf("error building state location: %v", err)
	}

	if c.OutDir == "" {
		c.OutDir = "out"
	}

	stateStore, err := fi.NewVFSStateStore(statePath, clusterName, isDryrun)
	if err != nil {
		return fmt.Errorf("error building state store: %v", err)
	}

	cluster, instanceGroups, err := api.ReadConfig(stateStore)
	if err != nil {
		return fmt.Errorf("error loading configuration: %v", err)
	}

	if c.Zones != "" {
		existingZones := make(map[string]*api.ClusterZoneSpec)
		for _, zone := range cluster.Spec.Zones {
			existingZones[zone.Name] = zone
		}

		for _, zone := range parseZoneList(c.Zones) {
			if existingZones[zone] == nil {
				cluster.Spec.Zones = append(cluster.Spec.Zones, &api.ClusterZoneSpec{
					Name: zone,
				})
			}
		}
	}

	if len(cluster.Spec.Zones) == 0 {
		return fmt.Errorf("must specify at least one zone for the cluster (use --zones)")
	}

	var masters []*api.InstanceGroup
	var nodes []*api.InstanceGroup

	for _, group := range instanceGroups {
		if group.IsMaster() {
			masters = append(masters, group)
		} else {
			nodes = append(nodes, group)
		}
	}

	if c.MasterZones == "" {
		if len(masters) == 0 {
			// Default to putting into every zone
			// TODO: just the first 1 or 3 zones; or should we force users to declare?
			for _, zone := range cluster.Spec.Zones {
				g := &api.InstanceGroup{}
				g.Spec.Role = api.InstanceGroupRoleMaster
				g.Spec.Zones = []string{zone.Name}
				g.Spec.MinSize = fi.Int(1)
				g.Spec.MaxSize = fi.Int(1)
				g.Name = "master-" + zone.Name // Subsequent masters (if we support that) could be <zone>-1, <zone>-2
				instanceGroups = append(instanceGroups, g)
				masters = append(masters, g)
			}
		}
	} else {
		if len(masters) == 0 {
			for _, zone := range parseZoneList(c.MasterZones) {
				g := &api.InstanceGroup{}
				g.Spec.Role = api.InstanceGroupRoleMaster
				g.Spec.Zones = []string{zone}
				g.Spec.MinSize = fi.Int(1)
				g.Spec.MaxSize = fi.Int(1)
				g.Name = "master-" + zone
				instanceGroups = append(instanceGroups, g)
				masters = append(masters, g)
			}
		} else {
			// This is hard, because of the etcd cluster
			glog.Errorf("Cannot change master-zones from the CLI")
			os.Exit(1)
		}
	}

	if len(cluster.Spec.EtcdClusters) == 0 {
		zones := sets.NewString()
		for _, group := range instanceGroups {
			for _, zone := range group.Spec.Zones {
				zones.Insert(zone)
			}
		}
		etcdZones := zones.List()
		if (len(etcdZones) % 2) == 0 {
			// Not technically a requirement, but doesn't really make sense to allow
			glog.Errorf("There should be an odd number of master-zones, for etcd's quorum.  Hint: Use --zones and --master-zones to declare node zones and master zones separately.")
			os.Exit(1)
		}

		for _, etcdCluster := range EtcdClusters {
			etcd := &api.EtcdClusterSpec{}
			etcd.Name = etcdCluster
			for _, zone := range etcdZones {
				m := &api.EtcdMemberSpec{}
				m.Name = zone
				m.Zone = zone
				etcd.Members = append(etcd.Members, m)
			}
			cluster.Spec.EtcdClusters = append(cluster.Spec.EtcdClusters, etcd)
		}
	}

	if len(nodes) == 0 {
		g := &api.InstanceGroup{}
		g.Spec.Role = api.InstanceGroupRoleNode
		g.Name = "nodes"
		instanceGroups = append(instanceGroups, g)
		nodes = append(nodes, g)
	}

	if c.NodeSize != "" {
		for _, group := range nodes {
			group.Spec.MachineType = c.NodeSize
		}
	}

	if c.Image != "" {
		for _, group := range instanceGroups {
			group.Spec.Image = c.Image
		}
	}

	if c.NodeCount != 0 {
		for _, group := range nodes {
			group.Spec.MinSize = fi.Int(c.NodeCount)
			group.Spec.MaxSize = fi.Int(c.NodeCount)
		}
	}

	if c.MasterSize != "" {
		for _, group := range masters {
			group.Spec.MachineType = c.MasterSize
		}
	}

	if c.DNSZone != "" {
		cluster.Spec.DNSZone = c.DNSZone
	}

	if c.Cloud != "" {
		cluster.Spec.CloudProvider = c.Cloud
	}

	if c.Project != "" {
		cluster.Spec.Project = c.Project
	}

	if clusterName != "" {
		cluster.Name = clusterName
	}

	if c.KubernetesVersion != "" {
		cluster.Spec.KubernetesVersion = c.KubernetesVersion
	}

	if c.VPCID != "" {
		cluster.Spec.NetworkID = c.VPCID
	}

	if c.NetworkCIDR != "" {
		cluster.Spec.NetworkCIDR = c.NetworkCIDR
	}

	if cluster.SharedVPC() && cluster.Spec.NetworkCIDR == "" {
		glog.Errorf("Must specify NetworkCIDR when VPC is set")
		os.Exit(1)
	}

	if cluster.Spec.CloudProvider == "" {
		for _, zone := range cluster.Spec.Zones {
			cloud, known := fi.GuessCloudForZone(zone.Name)
			if known {
				glog.Infof("Inferred --cloud=%s from zone %q", cloud, zone.Name)
				cluster.Spec.CloudProvider = string(cloud)
				break
			}
		}
	}

	if c.SSHPublicKey != "" {
		c.SSHPublicKey = utils.ExpandPath(c.SSHPublicKey)
	}

	err = cluster.PerformAssignments()
	if err != nil {
		return fmt.Errorf("error populating configuration: %v", err)
	}
	err = api.PerformAssignmentsInstanceGroups(instanceGroups)
	if err != nil {
		return fmt.Errorf("error populating configuration: %v", err)
	}

	err = api.WriteConfig(stateStore, cluster, instanceGroups)
	if err != nil {
		return fmt.Errorf("error writing updated configuration: %v", err)
	}

	cmd := &cloudup.CreateClusterCmd{
		Cluster:        cluster,
		InstanceGroups: instanceGroups,
		ModelStore:     c.ModelsBaseDir,
		Models:         strings.Split(c.Models, ","),
		StateStore:     stateStore,
		Target:         c.Target,
		NodeModel:      c.NodeModel,
		SSHPublicKey:   c.SSHPublicKey,
		OutDir:         c.OutDir,
	}
	//if *configFile != "" {
	//	//confFile := path.Join(cmd.StateDir, "kubernetes.yaml")
	//	err := cmd.LoadConfig(configFile)
	//	if err != nil {
	//		glog.Errorf("error loading config: %v", err)
	//		os.Exit(1)
	//	}
	//}

	return cmd.Run()
}