Example #1
0
func (u *UserDataConfiguration) ParseInt(key string) (*int, error) {
	s := u.Settings[key]
	if s == "" {
		return nil, nil
	}

	n, err := strconv.ParseInt(s, 10, 64)
	if err != nil {
		return nil, fmt.Errorf("error parsing key %q=%q", key, s)
	}

	return fi.Int(int(n)), nil
}
Example #2
0
func (x *ImportCluster) ImportAWSCluster() error {
	awsCloud := x.Cloud.(*awsup.AWSCloud)
	clusterName := x.ClusterName

	if clusterName == "" {
		return fmt.Errorf("ClusterName must be specified")
	}

	var instanceGroups []*api.InstanceGroup

	cluster := &api.Cluster{}
	cluster.Spec.CloudProvider = "aws"
	cluster.Name = clusterName

	cluster.Spec.KubeControllerManager = &api.KubeControllerManagerConfig{}

	masterGroup := &api.InstanceGroup{}
	masterGroup.Spec.Role = api.InstanceGroupRoleMaster
	masterGroup.Name = "masters"
	masterGroup.Spec.MinSize = fi.Int(1)
	masterGroup.Spec.MaxSize = fi.Int(1)
	instanceGroups = append(instanceGroups, masterGroup)

	instances, err := findInstances(awsCloud)
	if err != nil {
		return fmt.Errorf("error finding instances: %v", err)
	}

	var masterInstance *ec2.Instance
	for _, instance := range instances {
		role, _ := awsup.FindEC2Tag(instance.Tags, "Role")
		if role == clusterName+"-master" {
			if masterInstance != nil {
				masterState := aws.StringValue(masterInstance.State.Name)
				thisState := aws.StringValue(instance.State.Name)

				glog.Infof("Found multiple masters: %s and %s", masterState, thisState)

				if masterState == "terminated" && thisState != "terminated" {
					// OK
				} else if thisState == "terminated" && masterState != "terminated" {
					// Ignore this one
					continue
				} else {
					return fmt.Errorf("found multiple masters")
				}
			}
			masterInstance = instance
		}
	}
	if masterInstance == nil {
		return fmt.Errorf("could not find master node")
	}
	masterInstanceID := aws.StringValue(masterInstance.InstanceId)
	glog.Infof("Found master: %q", masterInstanceID)

	masterGroup.Spec.MachineType = aws.StringValue(masterInstance.InstanceType)

	masterSubnetID := aws.StringValue(masterInstance.SubnetId)

	subnets, err := DescribeSubnets(x.Cloud)
	if err != nil {
		return fmt.Errorf("error finding subnets: %v", err)
	}
	var masterSubnet *ec2.Subnet
	for _, s := range subnets {
		if masterSubnetID == aws.StringValue(s.SubnetId) {
			if masterSubnet != nil {
				return fmt.Errorf("found duplicate subnet")
			}
			masterSubnet = s
		}
	}
	if masterSubnet == nil {
		return fmt.Errorf("cannot find subnet %q", masterSubnetID)
	}

	vpcID := aws.StringValue(masterInstance.VpcId)
	var vpc *ec2.Vpc
	{
		vpc, err = awsCloud.DescribeVPC(vpcID)
		if err != nil {
			return err
		}
		if vpc == nil {
			return fmt.Errorf("cannot find vpc %q", vpcID)
		}
	}

	cluster.Spec.NetworkID = vpcID
	cluster.Spec.NetworkCIDR = aws.StringValue(vpc.CidrBlock)

	az := aws.StringValue(masterSubnet.AvailabilityZone)
	masterGroup.Spec.Zones = []string{az}
	cluster.Spec.Zones = append(cluster.Spec.Zones, &api.ClusterZoneSpec{
		Name: az,

		// We will allocate a new CIDR
		//CIDR: aws.StringValue(masterSubnet.CidrBlock),
	})

	userData, err := GetInstanceUserData(awsCloud, aws.StringValue(masterInstance.InstanceId))
	if err != nil {
		return fmt.Errorf("error getting master user-data: %v", err)
	}

	conf, err := ParseUserDataConfiguration(userData)
	if err != nil {
		return fmt.Errorf("error parsing master user-data: %v", err)
	}

	//master := &NodeSSH{
	//	Hostname: c.Master,
	//}
	//err := master.AddSSHIdentity(c.SSHIdentity)
	//if err != nil {
	//	return err
	//}
	//
	//
	//fmt.Printf("Connecting to node on %s\n", c.Node)
	//
	//node := &NodeSSH{
	//	Hostname: c.Node,
	//}
	//err = node.AddSSHIdentity(c.SSHIdentity)
	//if err != nil {
	//	return err
	//}

	instancePrefix := conf.Settings["INSTANCE_PREFIX"]
	if instancePrefix == "" {
		return fmt.Errorf("cannot determine INSTANCE_PREFIX")
	}
	if instancePrefix != clusterName {
		return fmt.Errorf("INSTANCE_PREFIX %q did not match cluster name %q", instancePrefix, clusterName)
	}

	//k8s.NodeMachineType, err = InstanceType(node)
	//if err != nil {
	//	return fmt.Errorf("cannot determine node instance type: %v", err)
	//}

	// We want to upgrade!
	// k8s.ImageId = ""

	//clusterConfig.ClusterIPRange = conf.Settings["CLUSTER_IP_RANGE"]
	cluster.Spec.KubeControllerManager.AllocateNodeCIDRs = conf.ParseBool("ALLOCATE_NODE_CIDRS")
	//clusterConfig.KubeUser = conf.Settings["KUBE_USER"]
	cluster.Spec.ServiceClusterIPRange = conf.Settings["SERVICE_CLUSTER_IP_RANGE"]
	cluster.Spec.NonMasqueradeCIDR = conf.Settings["NON_MASQUERADE_CIDR"]
	//clusterConfig.EnableClusterMonitoring = conf.Settings["ENABLE_CLUSTER_MONITORING"]
	//clusterConfig.EnableClusterLogging = conf.ParseBool("ENABLE_CLUSTER_LOGGING")
	//clusterConfig.EnableNodeLogging = conf.ParseBool("ENABLE_NODE_LOGGING")
	//clusterConfig.LoggingDestination = conf.Settings["LOGGING_DESTINATION"]
	//clusterConfig.ElasticsearchLoggingReplicas, err = parseInt(conf.Settings["ELASTICSEARCH_LOGGING_REPLICAS"])
	//if err != nil {
	//	return fmt.Errorf("cannot parse ELASTICSEARCH_LOGGING_REPLICAS=%q: %v", conf.Settings["ELASTICSEARCH_LOGGING_REPLICAS"], err)
	//}
	//clusterConfig.EnableClusterDNS = conf.ParseBool("ENABLE_CLUSTER_DNS")
	//clusterConfig.EnableClusterUI = conf.ParseBool("ENABLE_CLUSTER_UI")
	//clusterConfig.DNSReplicas, err = parseInt(conf.Settings["DNS_REPLICAS"])
	//if err != nil {
	//	return fmt.Errorf("cannot parse DNS_REPLICAS=%q: %v", conf.Settings["DNS_REPLICAS"], err)
	//}
	//clusterConfig.DNSServerIP = conf.Settings["DNS_SERVER_IP"]
	cluster.Spec.ClusterDNSDomain = conf.Settings["DNS_DOMAIN"]
	//clusterConfig.AdmissionControl = conf.Settings["ADMISSION_CONTROL"]
	//clusterConfig.MasterIPRange = conf.Settings["MASTER_IP_RANGE"]
	//clusterConfig.DNSServerIP = conf.Settings["DNS_SERVER_IP"]
	//clusterConfig.DockerStorage = conf.Settings["DOCKER_STORAGE"]
	//k8s.MasterExtraSans = conf.Settings["MASTER_EXTRA_SANS"] // Not user set

	primaryNodeSet := &api.InstanceGroup{}
	primaryNodeSet.Spec.Role = api.InstanceGroupRoleNode
	primaryNodeSet.Name = "nodes"
	instanceGroups = append(instanceGroups, primaryNodeSet)

	//primaryNodeSet.Spec.MinSize, err = conf.ParseInt("NUM_MINIONS")
	//if err != nil {
	//	return fmt.Errorf("cannot parse NUM_MINIONS=%q: %v", conf.Settings["NUM_MINIONS"], err)
	//}

	{
		groups, err := findAutoscalingGroups(awsCloud, awsCloud.Tags())
		if err != nil {
			return fmt.Errorf("error listing autoscaling groups: %v", err)
		}
		if len(groups) == 0 {
			glog.Warningf("No Autoscaling group found")
		}
		if len(groups) == 1 {
			glog.Warningf("Multiple Autoscaling groups found")
		}
		minSize := 0
		maxSize := 0
		for _, group := range groups {
			minSize += int(aws.Int64Value(group.MinSize))
			maxSize += int(aws.Int64Value(group.MaxSize))
		}
		if minSize != 0 {
			primaryNodeSet.Spec.MinSize = fi.Int(minSize)
		}
		if maxSize != 0 {
			primaryNodeSet.Spec.MaxSize = fi.Int(maxSize)
		}

		// TODO: machine types
		//primaryNodeSet.NodeMachineType = k8s.MasterMachineType
	}

	if conf.Version == "1.1" {
		// If users went with defaults on some things, clear them out so they get the new defaults
		//if clusterConfig.AdmissionControl == "NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota" {
		//	// More admission controllers in 1.2
		//	clusterConfig.AdmissionControl = ""
		//}
		if masterGroup.Spec.MachineType == "t2.micro" {
			// Different defaults in 1.2
			masterGroup.Spec.MachineType = ""
		}
		if primaryNodeSet.Spec.MachineType == "t2.micro" {
			// Encourage users to pick something better...
			primaryNodeSet.Spec.MachineType = ""
		}
	}
	if conf.Version == "1.2" {
		// If users went with defaults on some things, clear them out so they get the new defaults
		//if clusterConfig.AdmissionControl == "NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,ResourceQuota" {
		//	// More admission controllers in 1.2
		//	clusterConfig.AdmissionControl = ""
		//}
	}

	for _, etcdClusterName := range []string{"main", "events"} {
		etcdCluster := &api.EtcdClusterSpec{
			Name: etcdClusterName,
		}
		for _, az := range masterGroup.Spec.Zones {
			etcdCluster.Members = append(etcdCluster.Members, &api.EtcdMemberSpec{
				Name: az,
				Zone: az,
			})
		}
		cluster.Spec.EtcdClusters = append(cluster.Spec.EtcdClusters, etcdCluster)
	}

	//if masterInstance.PublicIpAddress != nil {
	//	eip, err := findElasticIP(cloud, *masterInstance.PublicIpAddress)
	//	if err != nil {
	//		return err
	//	}
	//	if eip != nil {
	//		k8s.MasterElasticIP = masterInstance.PublicIpAddress
	//	}
	//}
	//
	//vpc, err := cloud.DescribeVPC(*k8s.VPCID)
	//if err != nil {
	//	return err
	//}
	//k8s.DHCPOptionsID = vpc.DhcpOptionsId
	//
	//igw, err := findInternetGateway(cloud, *k8s.VPCID)
	//if err != nil {
	//	return err
	//}
	//if igw == nil {
	//	return fmt.Errorf("unable to find internet gateway for VPC %q", k8s.VPCID)
	//}
	//k8s.InternetGatewayID = igw.InternetGatewayId
	//
	//rt, err := findRouteTable(cloud, *k8s.SubnetID)
	//if err != nil {
	//	return err
	//}
	//if rt == nil {
	//	return fmt.Errorf("unable to find route table for Subnet %q", k8s.SubnetID)
	//}
	//k8s.RouteTableID = rt.RouteTableId

	//b.Context = "aws_" + instancePrefix

	newKeyStore := x.StateStore.CA()

	//caCert, err := masterSSH.Join("ca.crt").ReadFile()
	caCert, err := conf.ParseCert("CA_CERT")
	if err != nil {
		return err
	}
	err = newKeyStore.AddCert(fi.CertificateId_CA, caCert)
	if err != nil {
		return err
	}

	////masterKey, err := masterSSH.Join("server.key").ReadFile()
	//masterKey, err := conf.ParseKey("MASTER_KEY")
	//if err != nil {
	//	return err
	//}
	////masterCert, err := masterSSH.Join("server.cert").ReadFile()
	//masterCert, err := conf.ParseCert("MASTER_CERT")
	//if err != nil {
	//	return err
	//}
	//err = keyStore.ImportKeypair("master", masterKey, masterCert)
	//if err != nil {
	//	return err
	//}
	//
	////kubeletKey, err := kubeletSSH.Join("kubelet.key").ReadFile()
	//kubeletKey, err := conf.ParseKey("KUBELET_KEY")
	//if err != nil {
	//	return err
	//}
	////kubeletCert, err := kubeletSSH.Join("kubelet.cert").ReadFile()
	//kubeletCert, err := conf.ParseCert("KUBELET_CERT")
	//if err != nil {
	//	return err
	//}
	//err = keyStore.ImportKeypair("kubelet", kubeletKey, kubeletCert)
	//if err != nil {
	//	return err
	//}

	// We don't store the kubecfg key
	//kubecfgKey, err := masterSSH.Join("kubecfg.key").ReadFile()
	//if err != nil {
	//	return err
	//}
	//kubecfgCert, err := masterSSH.Join("kubecfg.cert").ReadFile()
	//if err != nil {
	//	return err
	//}
	//err = keyStore.ImportKeypair("kubecfg", kubecfgKey, kubecfgCert)
	//if err != nil {
	//	return err
	//}

	//// We will generate new tokens, but some of these are in existing API objects
	//secretStore := x.StateStore.Secrets()
	//kubePassword := conf.Settings["KUBE_PASSWORD"]
	//kubeletToken = conf.Settings["KUBELET_TOKEN"]
	//kubeProxyToken = conf.Settings["KUBE_PROXY_TOKEN"]

	err = api.WriteConfig(x.StateStore, cluster, instanceGroups)
	if err != nil {
		return err
	}

	return nil
}
Example #3
0
func (c *CreateClusterCmd) Run() error {
	isDryrun := false
	if c.DryRun {
		isDryrun = true
		c.Target = "dryrun"
	}

	stateStoreLocation := rootCommand.stateLocation
	if stateStoreLocation == "" {
		return fmt.Errorf("--state is required")
	}

	clusterName := rootCommand.clusterName
	if clusterName == "" {
		return fmt.Errorf("--name is required")
	}

	// TODO: Reuse rootCommand stateStore logic?

	statePath, err := vfs.Context.BuildVfsPath(stateStoreLocation)
	if err != nil {
		return fmt.Errorf("error building state location: %v", err)
	}

	if c.OutDir == "" {
		c.OutDir = "out"
	}

	stateStore, err := fi.NewVFSStateStore(statePath, clusterName, isDryrun)
	if err != nil {
		return fmt.Errorf("error building state store: %v", err)
	}

	cluster, instanceGroups, err := api.ReadConfig(stateStore)
	if err != nil {
		return fmt.Errorf("error loading configuration: %v", err)
	}

	if c.Zones != "" {
		existingZones := make(map[string]*api.ClusterZoneSpec)
		for _, zone := range cluster.Spec.Zones {
			existingZones[zone.Name] = zone
		}

		for _, zone := range parseZoneList(c.Zones) {
			if existingZones[zone] == nil {
				cluster.Spec.Zones = append(cluster.Spec.Zones, &api.ClusterZoneSpec{
					Name: zone,
				})
			}
		}
	}

	if len(cluster.Spec.Zones) == 0 {
		return fmt.Errorf("must specify at least one zone for the cluster (use --zones)")
	}

	var masters []*api.InstanceGroup
	var nodes []*api.InstanceGroup

	for _, group := range instanceGroups {
		if group.IsMaster() {
			masters = append(masters, group)
		} else {
			nodes = append(nodes, group)
		}
	}

	if c.MasterZones == "" {
		if len(masters) == 0 {
			// Default to putting into every zone
			// TODO: just the first 1 or 3 zones; or should we force users to declare?
			for _, zone := range cluster.Spec.Zones {
				g := &api.InstanceGroup{}
				g.Spec.Role = api.InstanceGroupRoleMaster
				g.Spec.Zones = []string{zone.Name}
				g.Spec.MinSize = fi.Int(1)
				g.Spec.MaxSize = fi.Int(1)
				g.Name = "master-" + zone.Name // Subsequent masters (if we support that) could be <zone>-1, <zone>-2
				instanceGroups = append(instanceGroups, g)
				masters = append(masters, g)
			}
		}
	} else {
		if len(masters) == 0 {
			for _, zone := range parseZoneList(c.MasterZones) {
				g := &api.InstanceGroup{}
				g.Spec.Role = api.InstanceGroupRoleMaster
				g.Spec.Zones = []string{zone}
				g.Spec.MinSize = fi.Int(1)
				g.Spec.MaxSize = fi.Int(1)
				g.Name = "master-" + zone
				instanceGroups = append(instanceGroups, g)
				masters = append(masters, g)
			}
		} else {
			// This is hard, because of the etcd cluster
			glog.Errorf("Cannot change master-zones from the CLI")
			os.Exit(1)
		}
	}

	if len(cluster.Spec.EtcdClusters) == 0 {
		zones := sets.NewString()
		for _, group := range instanceGroups {
			for _, zone := range group.Spec.Zones {
				zones.Insert(zone)
			}
		}
		etcdZones := zones.List()
		if (len(etcdZones) % 2) == 0 {
			// Not technically a requirement, but doesn't really make sense to allow
			glog.Errorf("There should be an odd number of master-zones, for etcd's quorum.  Hint: Use --zones and --master-zones to declare node zones and master zones separately.")
			os.Exit(1)
		}

		for _, etcdCluster := range EtcdClusters {
			etcd := &api.EtcdClusterSpec{}
			etcd.Name = etcdCluster
			for _, zone := range etcdZones {
				m := &api.EtcdMemberSpec{}
				m.Name = zone
				m.Zone = zone
				etcd.Members = append(etcd.Members, m)
			}
			cluster.Spec.EtcdClusters = append(cluster.Spec.EtcdClusters, etcd)
		}
	}

	if len(nodes) == 0 {
		g := &api.InstanceGroup{}
		g.Spec.Role = api.InstanceGroupRoleNode
		g.Name = "nodes"
		instanceGroups = append(instanceGroups, g)
		nodes = append(nodes, g)
	}

	if c.NodeSize != "" {
		for _, group := range nodes {
			group.Spec.MachineType = c.NodeSize
		}
	}

	if c.Image != "" {
		for _, group := range instanceGroups {
			group.Spec.Image = c.Image
		}
	}

	if c.NodeCount != 0 {
		for _, group := range nodes {
			group.Spec.MinSize = fi.Int(c.NodeCount)
			group.Spec.MaxSize = fi.Int(c.NodeCount)
		}
	}

	if c.MasterSize != "" {
		for _, group := range masters {
			group.Spec.MachineType = c.MasterSize
		}
	}

	if c.DNSZone != "" {
		cluster.Spec.DNSZone = c.DNSZone
	}

	if c.Cloud != "" {
		cluster.Spec.CloudProvider = c.Cloud
	}

	if c.Project != "" {
		cluster.Spec.Project = c.Project
	}

	if clusterName != "" {
		cluster.Name = clusterName
	}

	if c.KubernetesVersion != "" {
		cluster.Spec.KubernetesVersion = c.KubernetesVersion
	}

	if c.VPCID != "" {
		cluster.Spec.NetworkID = c.VPCID
	}

	if c.NetworkCIDR != "" {
		cluster.Spec.NetworkCIDR = c.NetworkCIDR
	}

	if cluster.SharedVPC() && cluster.Spec.NetworkCIDR == "" {
		glog.Errorf("Must specify NetworkCIDR when VPC is set")
		os.Exit(1)
	}

	if cluster.Spec.CloudProvider == "" {
		for _, zone := range cluster.Spec.Zones {
			cloud, known := fi.GuessCloudForZone(zone.Name)
			if known {
				glog.Infof("Inferred --cloud=%s from zone %q", cloud, zone.Name)
				cluster.Spec.CloudProvider = string(cloud)
				break
			}
		}
	}

	if c.SSHPublicKey != "" {
		c.SSHPublicKey = utils.ExpandPath(c.SSHPublicKey)
	}

	err = cluster.PerformAssignments()
	if err != nil {
		return fmt.Errorf("error populating configuration: %v", err)
	}
	err = api.PerformAssignmentsInstanceGroups(instanceGroups)
	if err != nil {
		return fmt.Errorf("error populating configuration: %v", err)
	}

	err = api.WriteConfig(stateStore, cluster, instanceGroups)
	if err != nil {
		return fmt.Errorf("error writing updated configuration: %v", err)
	}

	cmd := &cloudup.CreateClusterCmd{
		Cluster:        cluster,
		InstanceGroups: instanceGroups,
		ModelStore:     c.ModelsBaseDir,
		Models:         strings.Split(c.Models, ","),
		StateStore:     stateStore,
		Target:         c.Target,
		NodeModel:      c.NodeModel,
		SSHPublicKey:   c.SSHPublicKey,
		OutDir:         c.OutDir,
	}
	//if *configFile != "" {
	//	//confFile := path.Join(cmd.StateDir, "kubernetes.yaml")
	//	err := cmd.LoadConfig(configFile)
	//	if err != nil {
	//		glog.Errorf("error loading config: %v", err)
	//		os.Exit(1)
	//	}
	//}

	return cmd.Run()
}