Ejemplo n.º 1
0
func (c *GetClustersCmd) Run() error {
	clusterNames, err := rootCommand.ListClusters()
	if err != nil {
		return err
	}

	columns := []string{}
	fields := []func(*api.Cluster) string{}

	columns = append(columns, "NAME")
	fields = append(fields, func(c *api.Cluster) string {
		return c.Name
	})

	var clusters []*api.Cluster

	for _, clusterName := range clusterNames {
		stateStore, err := rootCommand.StateStoreForCluster(clusterName)
		if err != nil {
			return err
		}

		// TODO: Faster if we don't read groups...
		// We probably can just have a comand which directly reads all cluster config files
		cluster, _, err := api.ReadConfig(stateStore)
		clusters = append(clusters, cluster)
	}
	if len(clusters) == 0 {
		return nil
	}
	return WriteTable(clusters, columns, fields)
}
Ejemplo n.º 2
0
func (c *ExportKubecfgCommand) Run() error {
	stateStore, err := rootCommand.StateStore()
	if err != nil {
		return err
	}

	cluster, _, err := api.ReadConfig(stateStore)
	if err != nil {
		return fmt.Errorf("error reading configuration: %v", err)
	}

	clusterName := cluster.Name
	if clusterName == "" {
		return fmt.Errorf("ClusterName must be set in config")
	}

	master := cluster.Spec.MasterPublicName
	if master == "" {
		master = "api." + clusterName
	}

	//cloudProvider := config.CloudProvider
	//if cloudProvider == "" {
	//	return fmt.Errorf("cloud must be specified")
	//}

	c.tmpdir, err = ioutil.TempDir("", "k8s")
	if err != nil {
		return fmt.Errorf("error creating temporary directory: %v", err)
	}
	defer os.RemoveAll(c.tmpdir)

	b := &kubecfg.KubeconfigBuilder{}
	b.Init()

	b.Context = clusterName
	//switch cloudProvider {
	//case "aws":
	//	b.Context = "aws_" + clusterName
	//
	//case "gce":
	//	if config.Project == "" {
	//		return fmt.Errorf("Project must be configured (for GCE)")
	//	}
	//	b.Context = config.Project + "_" + clusterName
	//
	//default:
	//	return fmt.Errorf("Unknown cloud provider %q", cloudProvider)
	//}

	c.caStore, err = rootCommand.CA()
	if err != nil {
		return err
	}

	if b.CACert, err = c.copyCertificate(fi.CertificateId_CA); err != nil {
		return err
	}

	if b.KubecfgCert, err = c.copyCertificate("kubecfg"); err != nil {
		return err
	}

	if b.KubecfgKey, err = c.copyPrivateKey("kubecfg"); err != nil {
		return err
	}

	b.KubeMasterIP = master

	err = b.CreateKubeconfig()
	if err != nil {
		return err
	}

	return nil
}
Ejemplo n.º 3
0
func (c *CreateClusterCmd) Run() error {
	isDryrun := false
	if c.DryRun {
		isDryrun = true
		c.Target = "dryrun"
	}

	stateStoreLocation := rootCommand.stateLocation
	if stateStoreLocation == "" {
		return fmt.Errorf("--state is required")
	}

	clusterName := rootCommand.clusterName
	if clusterName == "" {
		return fmt.Errorf("--name is required")
	}

	// TODO: Reuse rootCommand stateStore logic?

	statePath, err := vfs.Context.BuildVfsPath(stateStoreLocation)
	if err != nil {
		return fmt.Errorf("error building state location: %v", err)
	}

	if c.OutDir == "" {
		c.OutDir = "out"
	}

	stateStore, err := fi.NewVFSStateStore(statePath, clusterName, isDryrun)
	if err != nil {
		return fmt.Errorf("error building state store: %v", err)
	}

	cluster, instanceGroups, err := api.ReadConfig(stateStore)
	if err != nil {
		return fmt.Errorf("error loading configuration: %v", err)
	}

	if c.Zones != "" {
		existingZones := make(map[string]*api.ClusterZoneSpec)
		for _, zone := range cluster.Spec.Zones {
			existingZones[zone.Name] = zone
		}

		for _, zone := range parseZoneList(c.Zones) {
			if existingZones[zone] == nil {
				cluster.Spec.Zones = append(cluster.Spec.Zones, &api.ClusterZoneSpec{
					Name: zone,
				})
			}
		}
	}

	if len(cluster.Spec.Zones) == 0 {
		return fmt.Errorf("must specify at least one zone for the cluster (use --zones)")
	}

	var masters []*api.InstanceGroup
	var nodes []*api.InstanceGroup

	for _, group := range instanceGroups {
		if group.IsMaster() {
			masters = append(masters, group)
		} else {
			nodes = append(nodes, group)
		}
	}

	if c.MasterZones == "" {
		if len(masters) == 0 {
			// Default to putting into every zone
			// TODO: just the first 1 or 3 zones; or should we force users to declare?
			for _, zone := range cluster.Spec.Zones {
				g := &api.InstanceGroup{}
				g.Spec.Role = api.InstanceGroupRoleMaster
				g.Spec.Zones = []string{zone.Name}
				g.Spec.MinSize = fi.Int(1)
				g.Spec.MaxSize = fi.Int(1)
				g.Name = "master-" + zone.Name // Subsequent masters (if we support that) could be <zone>-1, <zone>-2
				instanceGroups = append(instanceGroups, g)
				masters = append(masters, g)
			}
		}
	} else {
		if len(masters) == 0 {
			for _, zone := range parseZoneList(c.MasterZones) {
				g := &api.InstanceGroup{}
				g.Spec.Role = api.InstanceGroupRoleMaster
				g.Spec.Zones = []string{zone}
				g.Spec.MinSize = fi.Int(1)
				g.Spec.MaxSize = fi.Int(1)
				g.Name = "master-" + zone
				instanceGroups = append(instanceGroups, g)
				masters = append(masters, g)
			}
		} else {
			// This is hard, because of the etcd cluster
			glog.Errorf("Cannot change master-zones from the CLI")
			os.Exit(1)
		}
	}

	if len(cluster.Spec.EtcdClusters) == 0 {
		zones := sets.NewString()
		for _, group := range instanceGroups {
			for _, zone := range group.Spec.Zones {
				zones.Insert(zone)
			}
		}
		etcdZones := zones.List()
		if (len(etcdZones) % 2) == 0 {
			// Not technically a requirement, but doesn't really make sense to allow
			glog.Errorf("There should be an odd number of master-zones, for etcd's quorum.  Hint: Use --zones and --master-zones to declare node zones and master zones separately.")
			os.Exit(1)
		}

		for _, etcdCluster := range EtcdClusters {
			etcd := &api.EtcdClusterSpec{}
			etcd.Name = etcdCluster
			for _, zone := range etcdZones {
				m := &api.EtcdMemberSpec{}
				m.Name = zone
				m.Zone = zone
				etcd.Members = append(etcd.Members, m)
			}
			cluster.Spec.EtcdClusters = append(cluster.Spec.EtcdClusters, etcd)
		}
	}

	if len(nodes) == 0 {
		g := &api.InstanceGroup{}
		g.Spec.Role = api.InstanceGroupRoleNode
		g.Name = "nodes"
		instanceGroups = append(instanceGroups, g)
		nodes = append(nodes, g)
	}

	if c.NodeSize != "" {
		for _, group := range nodes {
			group.Spec.MachineType = c.NodeSize
		}
	}

	if c.Image != "" {
		for _, group := range instanceGroups {
			group.Spec.Image = c.Image
		}
	}

	if c.NodeCount != 0 {
		for _, group := range nodes {
			group.Spec.MinSize = fi.Int(c.NodeCount)
			group.Spec.MaxSize = fi.Int(c.NodeCount)
		}
	}

	if c.MasterSize != "" {
		for _, group := range masters {
			group.Spec.MachineType = c.MasterSize
		}
	}

	if c.DNSZone != "" {
		cluster.Spec.DNSZone = c.DNSZone
	}

	if c.Cloud != "" {
		cluster.Spec.CloudProvider = c.Cloud
	}

	if c.Project != "" {
		cluster.Spec.Project = c.Project
	}

	if clusterName != "" {
		cluster.Name = clusterName
	}

	if c.KubernetesVersion != "" {
		cluster.Spec.KubernetesVersion = c.KubernetesVersion
	}

	if c.VPCID != "" {
		cluster.Spec.NetworkID = c.VPCID
	}

	if c.NetworkCIDR != "" {
		cluster.Spec.NetworkCIDR = c.NetworkCIDR
	}

	if cluster.SharedVPC() && cluster.Spec.NetworkCIDR == "" {
		glog.Errorf("Must specify NetworkCIDR when VPC is set")
		os.Exit(1)
	}

	if cluster.Spec.CloudProvider == "" {
		for _, zone := range cluster.Spec.Zones {
			cloud, known := fi.GuessCloudForZone(zone.Name)
			if known {
				glog.Infof("Inferred --cloud=%s from zone %q", cloud, zone.Name)
				cluster.Spec.CloudProvider = string(cloud)
				break
			}
		}
	}

	if c.SSHPublicKey != "" {
		c.SSHPublicKey = utils.ExpandPath(c.SSHPublicKey)
	}

	err = cluster.PerformAssignments()
	if err != nil {
		return fmt.Errorf("error populating configuration: %v", err)
	}
	err = api.PerformAssignmentsInstanceGroups(instanceGroups)
	if err != nil {
		return fmt.Errorf("error populating configuration: %v", err)
	}

	err = api.WriteConfig(stateStore, cluster, instanceGroups)
	if err != nil {
		return fmt.Errorf("error writing updated configuration: %v", err)
	}

	cmd := &cloudup.CreateClusterCmd{
		Cluster:        cluster,
		InstanceGroups: instanceGroups,
		ModelStore:     c.ModelsBaseDir,
		Models:         strings.Split(c.Models, ","),
		StateStore:     stateStore,
		Target:         c.Target,
		NodeModel:      c.NodeModel,
		SSHPublicKey:   c.SSHPublicKey,
		OutDir:         c.OutDir,
	}
	//if *configFile != "" {
	//	//confFile := path.Join(cmd.StateDir, "kubernetes.yaml")
	//	err := cmd.LoadConfig(configFile)
	//	if err != nil {
	//		glog.Errorf("error loading config: %v", err)
	//		os.Exit(1)
	//	}
	//}

	return cmd.Run()
}
Ejemplo n.º 4
0
func (c *UpgradeClusterCmd) Run() error {
	if c.NewClusterName == "" {
		return fmt.Errorf("--newname is required")
	}

	oldStateStore, err := rootCommand.StateStore()
	if err != nil {
		return err
	}

	newStateStore, err := rootCommand.StateStoreForCluster(c.NewClusterName)
	if err != nil {
		return err
	}

	cluster, instanceGroups, err := api.ReadConfig(oldStateStore)
	if err != nil {
		return fmt.Errorf("error reading configuration: %v", err)
	}

	oldClusterName := cluster.Name
	if oldClusterName == "" {
		return fmt.Errorf("(Old) ClusterName must be set in configuration")
	}

	if len(cluster.Spec.Zones) == 0 {
		return fmt.Errorf("Configuration must include Zones")
	}

	region := ""
	for _, zone := range cluster.Spec.Zones {
		if len(zone.Name) <= 2 {
			return fmt.Errorf("Invalid AWS zone: %q", zone.Name)
		}

		zoneRegion := zone.Name[:len(zone.Name)-1]
		if region != "" && zoneRegion != region {
			return fmt.Errorf("Clusters cannot span multiple regions")
		}

		region = zoneRegion
	}

	tags := map[string]string{"KubernetesCluster": oldClusterName}
	cloud, err := awsup.NewAWSCloud(region, tags)
	if err != nil {
		return fmt.Errorf("error initializing AWS client: %v", err)
	}

	d := &kutil.UpgradeCluster{}
	d.NewClusterName = c.NewClusterName
	d.OldClusterName = oldClusterName
	d.Cloud = cloud
	d.ClusterConfig = cluster
	d.InstanceGroups = instanceGroups
	d.OldStateStore = oldStateStore
	d.NewStateStore = newStateStore

	err = d.Upgrade()
	if err != nil {
		return err
	}

	return nil
}
Ejemplo n.º 5
0
func (c *DeleteClusterCmd) Run() error {
	var stateStore fi.StateStore
	var err error

	var cloud fi.Cloud
	clusterName := ""
	region := ""
	if c.External {
		region = c.Region
		if region == "" {
			return fmt.Errorf("--region is required")
		}
		clusterName := rootCommand.clusterName
		if clusterName == "" {
			return fmt.Errorf("--name is required (when --external)")
		}

		tags := map[string]string{"KubernetesCluster": clusterName}
		cloud, err = awsup.NewAWSCloud(c.Region, tags)
		if err != nil {
			return fmt.Errorf("error initializing AWS client: %v", err)
		}
	} else {
		stateStore, err = rootCommand.StateStore()
		if err != nil {
			return err
		}

		cluster, _, err := api.ReadConfig(stateStore)
		if err != nil {
			return err
		}

		if rootCommand.clusterName != cluster.Name {
			return fmt.Errorf("sanity check failed: cluster name mismatch")
		}
		clusterName = cluster.Name

		cloud, err = cloudup.BuildCloud(cluster)
		if err != nil {
			return err
		}
	}

	d := &kutil.DeleteCluster{}
	d.ClusterName = clusterName
	d.Region = region
	d.Cloud = cloud

	resources, err := d.ListResources()
	if err != nil {
		return err
	}

	if len(resources) == 0 {
		fmt.Printf("Nothing to delete\n")
	} else {
		columns := []string{"TYPE", "ID", "NAME"}
		fields := []string{"Type", "ID", "Name"}

		var b bytes.Buffer
		w := new(tabwriter.Writer)

		// Format in tab-separated columns with a tab stop of 8.
		w.Init(os.Stdout, 0, 8, 0, '\t', tabwriter.StripEscape)

		writeHeader := true
		if writeHeader {
			for i, c := range columns {
				if i != 0 {
					b.WriteByte('\t')
				}
				b.WriteByte(tabwriter.Escape)
				b.WriteString(c)
				b.WriteByte(tabwriter.Escape)
			}
			b.WriteByte('\n')

			_, err := w.Write(b.Bytes())
			if err != nil {
				return fmt.Errorf("error writing to output: %v", err)
			}
			b.Reset()
		}

		for _, t := range resources {
			for i := range columns {
				if i != 0 {
					b.WriteByte('\t')
				}

				v := reflect.ValueOf(t)
				if v.Kind() == reflect.Ptr {
					v = v.Elem()
				}
				fv := v.FieldByName(fields[i])

				s := fi.ValueAsString(fv)

				b.WriteByte(tabwriter.Escape)
				b.WriteString(s)
				b.WriteByte(tabwriter.Escape)
			}
			b.WriteByte('\n')

			_, err := w.Write(b.Bytes())
			if err != nil {
				return fmt.Errorf("error writing to output: %v", err)
			}
			b.Reset()
		}
		w.Flush()

		if !c.Yes {
			return fmt.Errorf("Must specify --yes to delete")
		}

		err := d.DeleteResources(resources)
		if err != nil {
			return err
		}
	}

	if stateStore != nil {
		if !c.Yes {
			return fmt.Errorf("Must specify --yes to delete")
		}
		err := api.DeleteConfig(stateStore)
		if err != nil {
			return fmt.Errorf("error removing cluster from state store: %v", err)
		}
	}

	fmt.Printf("\nCluster deleted\n")

	return nil
}