Exemple #1
0
func (c *AddonsCmd) buildClusterAddons() (*kutil.ClusterAddons, error) {
	//if c.ClusterName == "" {
	//	return fmt.Errorf("--name is required")
	//}

	kubectl := &kutil.Kubectl{}
	//context, err := kubectl.GetCurrentContext()
	//if err != nil {
	//	return nil, fmt.Errorf("error getting current context from kubectl: %v", err)
	//}
	//glog.V(4).Infof("context = %q", context)

	configString, err := kubectl.GetConfig(true, "json")
	if err != nil {
		return nil, fmt.Errorf("error getting current config from kubectl: %v", err)
	}
	glog.V(8).Infof("config = %q", configString)

	config := &kubectlConfig{}
	err = json.Unmarshal([]byte(configString), config)
	if err != nil {
		return nil, fmt.Errorf("cannot parse current config from kubectl: %v", err)
	}

	if len(config.Clusters) != 1 {
		return nil, fmt.Errorf("expected exactly one cluster in kubectl config, found %d", len(config.Clusters))
	}

	namedCluster := config.Clusters[0]
	glog.V(4).Infof("using cluster name %q", namedCluster.Name)
	server := namedCluster.Cluster.Server
	server = strings.TrimSpace(server)
	if server == "" {
		return nil, fmt.Errorf("server was not set in kubectl config")
	}

	k := &kutil.ClusterAddons{
		APIEndpoint: server,
	}

	privateKeyFile := utils.ExpandPath("~/.ssh/id_rsa")
	err = kutil.AddSSHIdentity(&k.SSHConfig, privateKeyFile)
	if err != nil {
		return nil, fmt.Errorf("error adding SSH private key %q: %v", err)
	}

	return k, nil
}
Exemple #2
0
func (c *CreateClusterCmd) Run() error {
	isDryrun := false
	if c.DryRun {
		isDryrun = true
		c.Target = "dryrun"
	}

	stateStoreLocation := rootCommand.stateLocation
	if stateStoreLocation == "" {
		return fmt.Errorf("--state is required")
	}

	clusterName := rootCommand.clusterName
	if clusterName == "" {
		return fmt.Errorf("--name is required")
	}

	// TODO: Reuse rootCommand stateStore logic?

	statePath, err := vfs.Context.BuildVfsPath(stateStoreLocation)
	if err != nil {
		return fmt.Errorf("error building state location: %v", err)
	}

	if c.OutDir == "" {
		c.OutDir = "out"
	}

	stateStore, err := fi.NewVFSStateStore(statePath, clusterName, isDryrun)
	if err != nil {
		return fmt.Errorf("error building state store: %v", err)
	}

	cluster, instanceGroups, err := api.ReadConfig(stateStore)
	if err != nil {
		return fmt.Errorf("error loading configuration: %v", err)
	}

	if c.Zones != "" {
		existingZones := make(map[string]*api.ClusterZoneSpec)
		for _, zone := range cluster.Spec.Zones {
			existingZones[zone.Name] = zone
		}

		for _, zone := range parseZoneList(c.Zones) {
			if existingZones[zone] == nil {
				cluster.Spec.Zones = append(cluster.Spec.Zones, &api.ClusterZoneSpec{
					Name: zone,
				})
			}
		}
	}

	if len(cluster.Spec.Zones) == 0 {
		return fmt.Errorf("must specify at least one zone for the cluster (use --zones)")
	}

	var masters []*api.InstanceGroup
	var nodes []*api.InstanceGroup

	for _, group := range instanceGroups {
		if group.IsMaster() {
			masters = append(masters, group)
		} else {
			nodes = append(nodes, group)
		}
	}

	if c.MasterZones == "" {
		if len(masters) == 0 {
			// Default to putting into every zone
			// TODO: just the first 1 or 3 zones; or should we force users to declare?
			for _, zone := range cluster.Spec.Zones {
				g := &api.InstanceGroup{}
				g.Spec.Role = api.InstanceGroupRoleMaster
				g.Spec.Zones = []string{zone.Name}
				g.Spec.MinSize = fi.Int(1)
				g.Spec.MaxSize = fi.Int(1)
				g.Name = "master-" + zone.Name // Subsequent masters (if we support that) could be <zone>-1, <zone>-2
				instanceGroups = append(instanceGroups, g)
				masters = append(masters, g)
			}
		}
	} else {
		if len(masters) == 0 {
			for _, zone := range parseZoneList(c.MasterZones) {
				g := &api.InstanceGroup{}
				g.Spec.Role = api.InstanceGroupRoleMaster
				g.Spec.Zones = []string{zone}
				g.Spec.MinSize = fi.Int(1)
				g.Spec.MaxSize = fi.Int(1)
				g.Name = "master-" + zone
				instanceGroups = append(instanceGroups, g)
				masters = append(masters, g)
			}
		} else {
			// This is hard, because of the etcd cluster
			glog.Errorf("Cannot change master-zones from the CLI")
			os.Exit(1)
		}
	}

	if len(cluster.Spec.EtcdClusters) == 0 {
		zones := sets.NewString()
		for _, group := range instanceGroups {
			for _, zone := range group.Spec.Zones {
				zones.Insert(zone)
			}
		}
		etcdZones := zones.List()
		if (len(etcdZones) % 2) == 0 {
			// Not technically a requirement, but doesn't really make sense to allow
			glog.Errorf("There should be an odd number of master-zones, for etcd's quorum.  Hint: Use --zones and --master-zones to declare node zones and master zones separately.")
			os.Exit(1)
		}

		for _, etcdCluster := range EtcdClusters {
			etcd := &api.EtcdClusterSpec{}
			etcd.Name = etcdCluster
			for _, zone := range etcdZones {
				m := &api.EtcdMemberSpec{}
				m.Name = zone
				m.Zone = zone
				etcd.Members = append(etcd.Members, m)
			}
			cluster.Spec.EtcdClusters = append(cluster.Spec.EtcdClusters, etcd)
		}
	}

	if len(nodes) == 0 {
		g := &api.InstanceGroup{}
		g.Spec.Role = api.InstanceGroupRoleNode
		g.Name = "nodes"
		instanceGroups = append(instanceGroups, g)
		nodes = append(nodes, g)
	}

	if c.NodeSize != "" {
		for _, group := range nodes {
			group.Spec.MachineType = c.NodeSize
		}
	}

	if c.Image != "" {
		for _, group := range instanceGroups {
			group.Spec.Image = c.Image
		}
	}

	if c.NodeCount != 0 {
		for _, group := range nodes {
			group.Spec.MinSize = fi.Int(c.NodeCount)
			group.Spec.MaxSize = fi.Int(c.NodeCount)
		}
	}

	if c.MasterSize != "" {
		for _, group := range masters {
			group.Spec.MachineType = c.MasterSize
		}
	}

	if c.DNSZone != "" {
		cluster.Spec.DNSZone = c.DNSZone
	}

	if c.Cloud != "" {
		cluster.Spec.CloudProvider = c.Cloud
	}

	if c.Project != "" {
		cluster.Spec.Project = c.Project
	}

	if clusterName != "" {
		cluster.Name = clusterName
	}

	if c.KubernetesVersion != "" {
		cluster.Spec.KubernetesVersion = c.KubernetesVersion
	}

	if c.VPCID != "" {
		cluster.Spec.NetworkID = c.VPCID
	}

	if c.NetworkCIDR != "" {
		cluster.Spec.NetworkCIDR = c.NetworkCIDR
	}

	if cluster.SharedVPC() && cluster.Spec.NetworkCIDR == "" {
		glog.Errorf("Must specify NetworkCIDR when VPC is set")
		os.Exit(1)
	}

	if cluster.Spec.CloudProvider == "" {
		for _, zone := range cluster.Spec.Zones {
			cloud, known := fi.GuessCloudForZone(zone.Name)
			if known {
				glog.Infof("Inferred --cloud=%s from zone %q", cloud, zone.Name)
				cluster.Spec.CloudProvider = string(cloud)
				break
			}
		}
	}

	if c.SSHPublicKey != "" {
		c.SSHPublicKey = utils.ExpandPath(c.SSHPublicKey)
	}

	err = cluster.PerformAssignments()
	if err != nil {
		return fmt.Errorf("error populating configuration: %v", err)
	}
	err = api.PerformAssignmentsInstanceGroups(instanceGroups)
	if err != nil {
		return fmt.Errorf("error populating configuration: %v", err)
	}

	err = api.WriteConfig(stateStore, cluster, instanceGroups)
	if err != nil {
		return fmt.Errorf("error writing updated configuration: %v", err)
	}

	cmd := &cloudup.CreateClusterCmd{
		Cluster:        cluster,
		InstanceGroups: instanceGroups,
		ModelStore:     c.ModelsBaseDir,
		Models:         strings.Split(c.Models, ","),
		StateStore:     stateStore,
		Target:         c.Target,
		NodeModel:      c.NodeModel,
		SSHPublicKey:   c.SSHPublicKey,
		OutDir:         c.OutDir,
	}
	//if *configFile != "" {
	//	//confFile := path.Join(cmd.StateDir, "kubernetes.yaml")
	//	err := cmd.LoadConfig(configFile)
	//	if err != nil {
	//		glog.Errorf("error loading config: %v", err)
	//		os.Exit(1)
	//	}
	//}

	return cmd.Run()
}