func (c *RootCmd) StateStoreForCluster(clusterName string) (fi.StateStore, error) { if c.stateLocation == "" { return nil, fmt.Errorf("--state is required") } if clusterName == "" { return nil, fmt.Errorf("clusterName is required") } statePath, err := vfs.Context.BuildVfsPath(c.stateLocation) if err != nil { return nil, fmt.Errorf("error building state store path: %v", err) } isDryrun := false stateStore, err := fi.NewVFSStateStore(statePath, clusterName, isDryrun) if err != nil { return nil, fmt.Errorf("error building state store: %v", err) } return stateStore, nil }
func buildDefaultCreateCluster() *CreateClusterCmd { var err error c := &CreateClusterCmd{} c.ClusterConfig = &cloudup.CloudConfig{} c.ClusterConfig.ClusterName = "testcluster.mydomain.com" c.ClusterConfig.NodeZones = []string{"us-east-1a", "us-east-1b", "us-east-1c"} c.ClusterConfig.MasterZones = c.ClusterConfig.NodeZones c.SSHPublicKey = "~/.ssh/id_rsa.pub" c.ClusterConfig.CloudProvider = "aws" dryrun := false c.StateStore, err = fi.NewVFSStateStore(vfs.NewFSPath("test-state"), dryrun) if err != nil { glog.Fatalf("error building state store: %v", err) } return c }
func (c *CreateClusterCmd) Run() error { isDryrun := false if c.DryRun { isDryrun = true c.Target = "dryrun" } stateStoreLocation := rootCommand.stateLocation if stateStoreLocation == "" { return fmt.Errorf("--state is required") } clusterName := rootCommand.clusterName if clusterName == "" { return fmt.Errorf("--name is required") } // TODO: Reuse rootCommand stateStore logic? statePath, err := vfs.Context.BuildVfsPath(stateStoreLocation) if err != nil { return fmt.Errorf("error building state location: %v", err) } if c.OutDir == "" { c.OutDir = "out" } stateStore, err := fi.NewVFSStateStore(statePath, clusterName, isDryrun) if err != nil { return fmt.Errorf("error building state store: %v", err) } cluster, instanceGroups, err := api.ReadConfig(stateStore) if err != nil { return fmt.Errorf("error loading configuration: %v", err) } if c.Zones != "" { existingZones := make(map[string]*api.ClusterZoneSpec) for _, zone := range cluster.Spec.Zones { existingZones[zone.Name] = zone } for _, zone := range parseZoneList(c.Zones) { if existingZones[zone] == nil { cluster.Spec.Zones = append(cluster.Spec.Zones, &api.ClusterZoneSpec{ Name: zone, }) } } } if len(cluster.Spec.Zones) == 0 { return fmt.Errorf("must specify at least one zone for the cluster (use --zones)") } var masters []*api.InstanceGroup var nodes []*api.InstanceGroup for _, group := range instanceGroups { if group.IsMaster() { masters = append(masters, group) } else { nodes = append(nodes, group) } } if c.MasterZones == "" { if len(masters) == 0 { // Default to putting into every zone // TODO: just the first 1 or 3 zones; or should we force users to declare? for _, zone := range cluster.Spec.Zones { g := &api.InstanceGroup{} g.Spec.Role = api.InstanceGroupRoleMaster g.Spec.Zones = []string{zone.Name} g.Spec.MinSize = fi.Int(1) g.Spec.MaxSize = fi.Int(1) g.Name = "master-" + zone.Name // Subsequent masters (if we support that) could be <zone>-1, <zone>-2 instanceGroups = append(instanceGroups, g) masters = append(masters, g) } } } else { if len(masters) == 0 { for _, zone := range parseZoneList(c.MasterZones) { g := &api.InstanceGroup{} g.Spec.Role = api.InstanceGroupRoleMaster g.Spec.Zones = []string{zone} g.Spec.MinSize = fi.Int(1) g.Spec.MaxSize = fi.Int(1) g.Name = "master-" + zone instanceGroups = append(instanceGroups, g) masters = append(masters, g) } } else { // This is hard, because of the etcd cluster glog.Errorf("Cannot change master-zones from the CLI") os.Exit(1) } } if len(cluster.Spec.EtcdClusters) == 0 { zones := sets.NewString() for _, group := range instanceGroups { for _, zone := range group.Spec.Zones { zones.Insert(zone) } } etcdZones := zones.List() if (len(etcdZones) % 2) == 0 { // Not technically a requirement, but doesn't really make sense to allow glog.Errorf("There should be an odd number of master-zones, for etcd's quorum. Hint: Use --zones and --master-zones to declare node zones and master zones separately.") os.Exit(1) } for _, etcdCluster := range EtcdClusters { etcd := &api.EtcdClusterSpec{} etcd.Name = etcdCluster for _, zone := range etcdZones { m := &api.EtcdMemberSpec{} m.Name = zone m.Zone = zone etcd.Members = append(etcd.Members, m) } cluster.Spec.EtcdClusters = append(cluster.Spec.EtcdClusters, etcd) } } if len(nodes) == 0 { g := &api.InstanceGroup{} g.Spec.Role = api.InstanceGroupRoleNode g.Name = "nodes" instanceGroups = append(instanceGroups, g) nodes = append(nodes, g) } if c.NodeSize != "" { for _, group := range nodes { group.Spec.MachineType = c.NodeSize } } if c.Image != "" { for _, group := range instanceGroups { group.Spec.Image = c.Image } } if c.NodeCount != 0 { for _, group := range nodes { group.Spec.MinSize = fi.Int(c.NodeCount) group.Spec.MaxSize = fi.Int(c.NodeCount) } } if c.MasterSize != "" { for _, group := range masters { group.Spec.MachineType = c.MasterSize } } if c.DNSZone != "" { cluster.Spec.DNSZone = c.DNSZone } if c.Cloud != "" { cluster.Spec.CloudProvider = c.Cloud } if c.Project != "" { cluster.Spec.Project = c.Project } if clusterName != "" { cluster.Name = clusterName } if c.KubernetesVersion != "" { cluster.Spec.KubernetesVersion = c.KubernetesVersion } if c.VPCID != "" { cluster.Spec.NetworkID = c.VPCID } if c.NetworkCIDR != "" { cluster.Spec.NetworkCIDR = c.NetworkCIDR } if cluster.SharedVPC() && cluster.Spec.NetworkCIDR == "" { glog.Errorf("Must specify NetworkCIDR when VPC is set") os.Exit(1) } if cluster.Spec.CloudProvider == "" { for _, zone := range cluster.Spec.Zones { cloud, known := fi.GuessCloudForZone(zone.Name) if known { glog.Infof("Inferred --cloud=%s from zone %q", cloud, zone.Name) cluster.Spec.CloudProvider = string(cloud) break } } } if c.SSHPublicKey != "" { c.SSHPublicKey = utils.ExpandPath(c.SSHPublicKey) } err = cluster.PerformAssignments() if err != nil { return fmt.Errorf("error populating configuration: %v", err) } err = api.PerformAssignmentsInstanceGroups(instanceGroups) if err != nil { return fmt.Errorf("error populating configuration: %v", err) } err = api.WriteConfig(stateStore, cluster, instanceGroups) if err != nil { return fmt.Errorf("error writing updated configuration: %v", err) } cmd := &cloudup.CreateClusterCmd{ Cluster: cluster, InstanceGroups: instanceGroups, ModelStore: c.ModelsBaseDir, Models: strings.Split(c.Models, ","), StateStore: stateStore, Target: c.Target, NodeModel: c.NodeModel, SSHPublicKey: c.SSHPublicKey, OutDir: c.OutDir, } //if *configFile != "" { // //confFile := path.Join(cmd.StateDir, "kubernetes.yaml") // err := cmd.LoadConfig(configFile) // if err != nil { // glog.Errorf("error loading config: %v", err) // os.Exit(1) // } //} return cmd.Run() }