예제 #1
0
파일: iamrole.go 프로젝트: crohling/kops
func (e *IAMRole) Find(c *fi.Context) (*IAMRole, error) {
	cloud := c.Cloud.(*awsup.AWSCloud)

	request := &iam.GetRoleInput{RoleName: e.Name}

	response, err := cloud.IAM.GetRole(request)
	if awsErr, ok := err.(awserr.Error); ok {
		if awsErr.Code() == "NoSuchEntity" {
			return nil, nil
		}
	}
	if err != nil {
		return nil, fmt.Errorf("error getting role: %v", err)
	}

	r := response.Role
	actual := &IAMRole{}
	actual.ID = r.RoleId
	actual.Name = r.RoleName
	if r.AssumeRolePolicyDocument != nil {
		// The AssumeRolePolicyDocument is URI encoded (?)
		actualPolicy := *r.AssumeRolePolicyDocument
		actualPolicy, err = url.QueryUnescape(actualPolicy)
		if err != nil {
			return nil, fmt.Errorf("error parsing AssumeRolePolicyDocument for IAMRole %q: %v", e.Name, err)
		}

		// The RolePolicyDocument is reformatted by AWS
		// We parse both as JSON; if the json forms are equal we pretend the actual value is the expected value
		if e.RolePolicyDocument != nil {
			expectedPolicy, err := e.RolePolicyDocument.AsString()
			if err != nil {
				return nil, fmt.Errorf("error reading expected RolePolicyDocument for IAMRole %q: %v", e.Name, err)
			}
			expectedJson := make(map[string]interface{})
			err = json.Unmarshal([]byte(expectedPolicy), &expectedJson)
			if err != nil {
				return nil, fmt.Errorf("error parsing expected RolePolicyDocument for IAMRole %q: %v", e.Name, err)
			}
			actualJson := make(map[string]interface{})
			err = json.Unmarshal([]byte(actualPolicy), &actualJson)
			if err != nil {
				return nil, fmt.Errorf("error parsing actual RolePolicyDocument for IAMRole %q: %v", e.Name, err)
			}

			if reflect.DeepEqual(actualJson, expectedJson) {
				glog.V(2).Infof("actual RolePolicyDocument was json-equal to expected; returning expected value")
				actualPolicy = expectedPolicy
			}
		}

		actual.RolePolicyDocument = fi.WrapResource(fi.NewStringResource(actualPolicy))
	}

	glog.V(2).Infof("found matching IAMRole %q", *actual.ID)
	e.ID = actual.ID

	return actual, nil
}
예제 #2
0
파일: service.go 프로젝트: crohling/kops
func (_ *Service) RenderCloudInit(t *cloudinit.CloudInitTarget, a, e, changes *Service) error {
	serviceName := e.Name

	servicePath := path.Join(systemdSystemPath, serviceName)
	err := t.WriteFile(servicePath, fi.NewStringResource(*e.Definition), 0644, 0755)
	if err != nil {
		return err
	}

	if fi.BoolValue(e.ManageState) {
		t.AddCommand(cloudinit.Once, "systemctl", "daemon-reload")
		t.AddCommand(cloudinit.Once, "systemctl", "start", "--no-block", serviceName)
	}

	return nil
}
예제 #3
0
func (e *IAMRolePolicy) Find(c *fi.Context) (*IAMRolePolicy, error) {
	cloud := c.Cloud.(*awsup.AWSCloud)

	request := &iam.GetRolePolicyInput{
		RoleName:   e.Role.Name,
		PolicyName: e.Name,
	}

	response, err := cloud.IAM.GetRolePolicy(request)
	if awsErr, ok := err.(awserr.Error); ok {
		if awsErr.Code() == "NoSuchEntity" {
			return nil, nil
		}
	}
	if err != nil {
		return nil, fmt.Errorf("error getting role: %v", err)
	}

	p := response
	actual := &IAMRolePolicy{}
	actual.Role = &IAMRole{Name: p.RoleName}
	if aws.StringValue(e.Role.Name) == aws.StringValue(p.RoleName) {
		actual.Role.ID = e.Role.ID
	}
	if p.PolicyDocument != nil {
		// The PolicyDocument is URI encoded (?)
		policy := *p.PolicyDocument
		policy, err = url.QueryUnescape(policy)
		if err != nil {
			return nil, fmt.Errorf("error parsing PolicyDocument for IAMRolePolicy %q: %v", e.Name, err)
		}
		actual.PolicyDocument = fi.WrapResource(fi.NewStringResource(policy))
	}
	actual.Name = p.PolicyName

	e.ID = actual.ID

	return actual, nil
}
예제 #4
0
파일: loader.go 프로젝트: crohling/kops
func (r *Loader) handleFile(i *loader.TreeWalkItem) error {
	var task *nodetasks.File
	defaultFileType := nodetasks.FileType_File

	var err error
	if strings.HasSuffix(i.RelativePath, ".template") {
		contents, err := i.ReadString()
		if err != nil {
			return err
		}

		// TODO: Use template resource here to defer execution?
		destPath := "/" + strings.TrimSuffix(i.RelativePath, ".template")
		name := strings.TrimSuffix(i.Name, ".template")
		expanded, err := r.executeTemplate(name, contents)
		if err != nil {
			return fmt.Errorf("error executing template %q: %v", i.RelativePath, err)
		}

		task, err = nodetasks.NewFileTask(name, fi.NewStringResource(expanded), destPath, i.Meta)
	} else if strings.HasSuffix(i.RelativePath, ".asset") {
		contents, err := i.ReadBytes()
		if err != nil {
			return err
		}

		destPath := "/" + strings.TrimSuffix(i.RelativePath, ".asset")
		name := strings.TrimSuffix(i.Name, ".asset")

		def := &nodetasks.AssetDefinition{}
		err = json.Unmarshal(contents, def)
		if err != nil {
			return fmt.Errorf("error parsing json for asset %q: %v", name, err)
		}

		asset, err := r.assets.Find(name, def.AssetPath)
		if err != nil {
			return fmt.Errorf("error trying to locate asset %q: %v", name, err)
		}
		if asset == nil {
			return fmt.Errorf("unable to locate asset %q", name)
		}

		task, err = nodetasks.NewFileTask(i.Name, asset, destPath, i.Meta)
	} else {
		stat, err := os.Stat(i.Path)
		if err != nil {
			return fmt.Errorf("error doing stat on %q: %v", i.Path, err)
		}
		var contents fi.Resource
		if stat.IsDir() {
			defaultFileType = nodetasks.FileType_Directory
		} else {
			contents = fi.NewFileResource(i.Path)
		}
		task, err = nodetasks.NewFileTask(i.Name, contents, "/"+i.RelativePath, i.Meta)
	}

	if task.Type == "" {
		task.Type = defaultFileType
	}

	if err != nil {
		return fmt.Errorf("error building task %q: %v", i.RelativePath, err)
	}
	glog.V(2).Infof("path %q -> task %v", i.Path, task)

	if task != nil {
		key := "file/" + i.RelativePath
		r.tasks[key] = task
	}
	return nil
}
예제 #5
0
func (e *LaunchConfiguration) Find(c *fi.Context) (*LaunchConfiguration, error) {
	cloud := c.Cloud.(*awsup.AWSCloud)

	request := &autoscaling.DescribeLaunchConfigurationsInput{}

	prefix := *e.Name + "-"

	configurations := map[string]*autoscaling.LaunchConfiguration{}
	err := cloud.Autoscaling.DescribeLaunchConfigurationsPages(request, func(page *autoscaling.DescribeLaunchConfigurationsOutput, lastPage bool) bool {
		for _, l := range page.LaunchConfigurations {
			name := aws.StringValue(l.LaunchConfigurationName)
			if strings.HasPrefix(name, prefix) {
				suffix := name[len(prefix):]
				configurations[suffix] = l
			}
		}
		return true
	})

	if len(configurations) == 0 {
		return nil, nil
	}

	var newest *autoscaling.LaunchConfiguration
	var newestTime int64
	for _, lc := range configurations {
		t := lc.CreatedTime.UnixNano()
		if t > newestTime {
			newestTime = t
			newest = lc
		}
	}

	lc := newest

	glog.V(2).Infof("found existing AutoscalingLaunchConfiguration: %q", *lc.LaunchConfigurationName)

	actual := &LaunchConfiguration{
		Name:               e.Name,
		ID:                 lc.LaunchConfigurationName,
		ImageID:            lc.ImageId,
		InstanceType:       lc.InstanceType,
		SSHKey:             &SSHKey{Name: lc.KeyName},
		AssociatePublicIP:  lc.AssociatePublicIpAddress,
		IAMInstanceProfile: &IAMInstanceProfile{Name: lc.IamInstanceProfile},
	}

	securityGroups := []*SecurityGroup{}
	for _, sgID := range lc.SecurityGroups {
		securityGroups = append(securityGroups, &SecurityGroup{ID: sgID})
	}
	actual.SecurityGroups = securityGroups

	actual.BlockDeviceMappings = make(map[string]*BlockDeviceMapping)
	for _, b := range lc.BlockDeviceMappings {
		deviceName, bdm := BlockDeviceMappingFromAutoscaling(b)
		actual.BlockDeviceMappings[deviceName] = bdm
	}
	userData, err := base64.StdEncoding.DecodeString(*lc.UserData)
	if err != nil {
		return nil, fmt.Errorf("error decoding UserData: %v", err)
	}
	actual.UserData = fi.WrapResource(fi.NewStringResource(string(userData)))

	// Avoid spurious changes on ImageId
	if e.ImageID != nil && actual.ImageID != nil && *actual.ImageID != *e.ImageID {
		image, err := cloud.ResolveImage(*e.ImageID)
		if err != nil {
			glog.Warningf("unable to resolve image: %q: %v", *e.ImageID, err)
		} else if image == nil {
			glog.Warningf("unable to resolve image: %q: not found", *e.ImageID)
		} else if aws.StringValue(image.ImageId) == *actual.ImageID {
			glog.V(4).Infof("Returning matching ImageId as expected name: %q -> %q", *actual.ImageID, *e.ImageID)
			actual.ImageID = e.ImageID
		}
	}

	if e.ID == nil {
		e.ID = actual.ID
	}

	return actual, nil
}
예제 #6
0
func (e *InstanceTemplate) Find(c *fi.Context) (*InstanceTemplate, error) {
	cloud := c.Cloud.(*gce.GCECloud)

	r, err := cloud.Compute.InstanceTemplates.Get(cloud.Project, *e.Name).Do()
	if err != nil {
		if gce.IsNotFound(err) {
			return nil, nil
		}
		return nil, fmt.Errorf("error listing InstanceTemplates: %v", err)
	}

	actual := &InstanceTemplate{}
	actual.Name = &r.Name

	p := r.Properties

	for _, tag := range p.Tags.Items {
		actual.Tags = append(actual.Tags, tag)
	}
	actual.MachineType = fi.String(lastComponent(p.MachineType))
	actual.CanIPForward = &p.CanIpForward

	bootDiskImage, err := ShortenImageURL(cloud.Project, p.Disks[0].InitializeParams.SourceImage)
	if err != nil {
		return nil, fmt.Errorf("error parsing source image URL: %v", err)
	}
	actual.BootDiskImage = fi.String(bootDiskImage)
	actual.BootDiskType = &p.Disks[0].InitializeParams.DiskType
	actual.BootDiskSizeGB = &p.Disks[0].InitializeParams.DiskSizeGb

	if p.Scheduling != nil {
		actual.Preemptible = &p.Scheduling.Preemptible
	}
	if len(p.NetworkInterfaces) != 0 {
		ni := p.NetworkInterfaces[0]
		actual.Network = &Network{Name: fi.String(lastComponent(ni.Network))}
	}

	for _, serviceAccount := range p.ServiceAccounts {
		for _, scope := range serviceAccount.Scopes {
			actual.Scopes = append(actual.Scopes, scopeToShortForm(scope))
		}
	}

	//for i, disk := range p.Disks {
	//	if i == 0 {
	//		source := disk.Source
	//
	//		// TODO: Parse source URL instead of assuming same project/zone?
	//		name := lastComponent(source)
	//		d, err := cloud.Compute.Disks.Get(cloud.Project, *e.Zone, name).Do()
	//		if err != nil {
	//			if gce.IsNotFound(err) {
	//				return nil, fmt.Errorf("disk not found %q: %v", source, err)
	//			}
	//			return nil, fmt.Errorf("error querying for disk %q: %v", source, err)
	//		} else {
	//			imageURL, err := gce.ParseGoogleCloudURL(d.SourceImage)
	//			if err != nil {
	//				return nil, fmt.Errorf("unable to parse image URL: %q", d.SourceImage)
	//			}
	//			actual.Image = fi.String(imageURL.Project + "/" + imageURL.Name)
	//		}
	//	}
	//}

	if p.Metadata != nil {
		actual.Metadata = make(map[string]fi.Resource)
		for _, meta := range p.Metadata.Items {
			actual.Metadata[meta.Key] = fi.NewStringResource(*meta.Value)
		}
	}

	return actual, nil
}
예제 #7
0
파일: instance.go 프로젝트: crohling/kops
func (e *Instance) Find(c *fi.Context) (*Instance, error) {
	cloud := c.Cloud.(*gce.GCECloud)

	r, err := cloud.Compute.Instances.Get(cloud.Project, *e.Zone, *e.Name).Do()
	if err != nil {
		if gce.IsNotFound(err) {
			return nil, nil
		}
		return nil, fmt.Errorf("error listing Instances: %v", err)
	}

	actual := &Instance{}
	actual.Name = &r.Name
	for _, tag := range r.Tags.Items {
		actual.Tags = append(actual.Tags, tag)
	}
	actual.Zone = fi.String(lastComponent(r.Zone))
	actual.MachineType = fi.String(lastComponent(r.MachineType))
	actual.CanIPForward = &r.CanIpForward

	if r.Scheduling != nil {
		actual.Preemptible = &r.Scheduling.Preemptible
	}
	if len(r.NetworkInterfaces) != 0 {
		ni := r.NetworkInterfaces[0]
		actual.Network = &Network{Name: fi.String(lastComponent(ni.Network))}
		if len(ni.AccessConfigs) != 0 {
			ac := ni.AccessConfigs[0]
			if ac.NatIP != "" {
				addr, err := cloud.Compute.Addresses.List(cloud.Project, cloud.Region).Filter("address eq " + ac.NatIP).Do()
				if err != nil {
					return nil, fmt.Errorf("error querying for address %q: %v", ac.NatIP, err)
				} else if len(addr.Items) != 0 {
					actual.IPAddress = &IPAddress{Name: &addr.Items[0].Name}
				} else {
					return nil, fmt.Errorf("address not found %q: %v", ac.NatIP, err)
				}
			}
		}
	}

	for _, serviceAccount := range r.ServiceAccounts {
		for _, scope := range serviceAccount.Scopes {
			actual.Scopes = append(actual.Scopes, scopeToShortForm(scope))
		}
	}

	actual.Disks = make(map[string]*PersistentDisk)
	for i, disk := range r.Disks {
		if i == 0 {
			source := disk.Source

			// TODO: Parse source URL instead of assuming same project/zone?
			name := lastComponent(source)
			d, err := cloud.Compute.Disks.Get(cloud.Project, *e.Zone, name).Do()
			if err != nil {
				if gce.IsNotFound(err) {
					return nil, fmt.Errorf("disk not found %q: %v", source, err)
				}
				return nil, fmt.Errorf("error querying for disk %q: %v", source, err)
			}

			image, err := ShortenImageURL(cloud.Project, d.SourceImage)
			if err != nil {
				return nil, fmt.Errorf("error parsing source image URL: %v", err)
			}
			actual.Image = fi.String(image)
		} else {
			url, err := gce.ParseGoogleCloudURL(disk.Source)
			if err != nil {
				return nil, fmt.Errorf("unable to parse disk source URL: %q", disk.Source)
			}

			actual.Disks[disk.DeviceName] = &PersistentDisk{Name: &url.Name}
		}
	}

	if r.Metadata != nil {
		actual.Metadata = make(map[string]fi.Resource)
		for _, i := range r.Metadata.Items {
			if i.Value == nil {
				glog.Warningf("ignoring GCE instance metadata entry with nil-value: %q", i.Key)
				continue
			}
			actual.Metadata[i.Key] = fi.NewStringResource(*i.Value)
		}
		actual.metadataFingerprint = r.Metadata.Fingerprint
	}

	return actual, nil
}
예제 #8
0
파일: service.go 프로젝트: crohling/kops
func (_ *Service) RenderLocal(t *local.LocalTarget, a, e, changes *Service) error {
	serviceName := e.Name

	action := ""

	if changes.Running != nil && fi.BoolValue(e.ManageState) {
		if fi.BoolValue(e.Running) {
			action = "restart"
		} else {
			action = "stop"
		}
	}

	if changes.Definition != nil {
		servicePath := path.Join(systemdSystemPath, serviceName)
		err := fi.WriteFile(servicePath, fi.NewStringResource(*e.Definition), 0644, 0755)
		if err != nil {
			return fmt.Errorf("error writing systemd service file: %v", err)
		}

		glog.Infof("Reloading systemd configuration")
		cmd := exec.Command("systemctl", "daemon-reload")
		output, err := cmd.CombinedOutput()
		if err != nil {
			return fmt.Errorf("error doing systemd daemon-reload: %v\nOutput: %s", err, output)
		}
	}

	// "SmartRestart" - look at the obvious dependencies in the systemd service, restart if start time older
	if fi.BoolValue(e.ManageState) && fi.BoolValue(e.SmartRestart) {
		definition := fi.StringValue(e.Definition)
		if definition == "" && a != nil {
			definition = fi.StringValue(a.Definition)
		}

		if action == "" && fi.BoolValue(e.Running) && definition != "" {
			dependencies, err := getSystemdDependencies(serviceName, definition)
			if err != nil {
				return err
			}

			var newest time.Time
			for _, dependency := range dependencies {
				stat, err := os.Stat(dependency)
				if err != nil {
					glog.Infof("Ignoring error checking service dependency %q: %v", dependency, err)
					continue
				}
				modTime := stat.ModTime()
				if newest.IsZero() || newest.Before(modTime) {
					newest = modTime
				}
			}

			if !newest.IsZero() {
				properties, err := getSystemdStatus(e.Name)
				if err != nil {
					return err
				}

				startedAt := properties["ExecMainStartTimestamp"]
				if startedAt == "" {
					glog.Warningf("service was running, but did not have ExecMainStartTimestamp: %q", serviceName)
				} else {
					startedAtTime, err := time.Parse("Mon 2006-01-02 15:04:05 MST", startedAt)
					if err != nil {
						return fmt.Errorf("unable to parse service ExecMainStartTimestamp: %q", startedAt)
					}
					if startedAtTime.Before(newest) {
						glog.V(2).Infof("will restart service %q because dependency changed after service start", serviceName)
						action = "restart"
					} else {
						glog.V(2).Infof("will not restart service %q - started after dependencies", serviceName)
					}
				}
			}
		}
	}

	if action != "" && fi.BoolValue(e.ManageState) {
		glog.Infof("Restarting service %q", serviceName)
		cmd := exec.Command("systemctl", action, serviceName)
		output, err := cmd.CombinedOutput()
		if err != nil {
			return fmt.Errorf("error doing systemd %s %s: %v\nOutput: %s", action, serviceName, err, output)
		}
	}

	return nil
}
예제 #9
0
func (c *CreateClusterCmd) Run() error {
	// TODO: Make these configurable?
	useMasterASG := true
	useMasterLB := false

	//// We (currently) have to use protokube with ASGs
	//useProtokube := useMasterASG

	//if c.NodeUpConfig == nil {
	//	c.NodeUpConfig = &nodeup.NodeConfig{}
	//}

	clusterName := c.Cluster.Name
	if clusterName == "" {
		return fmt.Errorf("ClusterName is required (e.g. --name=mycluster.myzone.com)")
	}

	if c.Cluster.Spec.MasterPublicName == "" {
		c.Cluster.Spec.MasterPublicName = "api." + c.Cluster.Name
	}
	if c.Cluster.Spec.DNSZone == "" {
		tokens := strings.Split(c.Cluster.Spec.MasterPublicName, ".")
		c.Cluster.Spec.DNSZone = strings.Join(tokens[len(tokens)-2:], ".")
		glog.Infof("Defaulting DNS zone to: %s", c.Cluster.Spec.DNSZone)
	}

	if len(c.Cluster.Spec.Zones) == 0 {
		// TODO: Auto choose zones from region?
		return fmt.Errorf("must configuration at least one Zone (use --zones)")
	}

	if len(c.InstanceGroups) == 0 {
		return fmt.Errorf("must configure at least one InstanceGroup")
	}

	for i, g := range c.InstanceGroups {
		if g.Name == "" {
			return fmt.Errorf("InstanceGroup #%d Name not set", i)
		}
		if g.Spec.Role == "" {
			return fmt.Errorf("InstanceGroup %q Role not set", g.Name)
		}
	}

	masters, err := c.populateMasters()
	if err != nil {
		return err
	}
	c.masters = masters
	if len(c.masters) == 0 {
		return fmt.Errorf("must configure at least one Master InstanceGroup")
	}

	nodes, err := c.populateNodeSets()
	if err != nil {
		return err
	}
	c.nodes = nodes
	if len(c.nodes) == 0 {
		return fmt.Errorf("must configure at least one Node InstanceGroup")
	}

	err = c.assignSubnets()
	if err != nil {
		return err
	}

	// Check that instance groups are defined in valid zones
	{
		clusterZones := make(map[string]*api.ClusterZoneSpec)
		for _, z := range c.Cluster.Spec.Zones {
			if clusterZones[z.Name] != nil {
				return fmt.Errorf("Zones contained a duplicate value: %v", z.Name)
			}
			clusterZones[z.Name] = z
		}

		for _, group := range c.InstanceGroups {
			for _, z := range group.Spec.Zones {
				if clusterZones[z] == nil {
					return fmt.Errorf("InstanceGroup %q is configured in %q, but this is not configured as a Zone in the cluster", group.Name, z)
				}
			}
		}

		// Check etcd configuration
		{
			for i, etcd := range c.Cluster.Spec.EtcdClusters {
				if etcd.Name == "" {
					return fmt.Errorf("EtcdClusters #%d did not specify a Name", i)
				}

				for i, m := range etcd.Members {
					if m.Name == "" {
						return fmt.Errorf("EtcdMember #%d of etcd-cluster %s did not specify a Name", i, etcd.Name)
					}

					z := m.Zone
					if z == "" {
						return fmt.Errorf("EtcdMember %s:%s did not specify a Zone", etcd.Name, m.Name)
					}
				}

				etcdZones := make(map[string]*api.EtcdMemberSpec)
				etcdNames := make(map[string]*api.EtcdMemberSpec)

				for _, m := range etcd.Members {
					if etcdNames[m.Name] != nil {
						return fmt.Errorf("EtcdMembers found with same name %q in etcd-cluster %q", m.Name, etcd.Name)
					}

					if etcdZones[m.Zone] != nil {
						// Maybe this should just be a warning
						return fmt.Errorf("EtcdMembers are in the same zone %q in etcd-cluster %q", m.Zone, etcd.Name)
					}

					if clusterZones[m.Zone] == nil {
						return fmt.Errorf("EtcdMembers for %q is configured in zone %q, but that is not configured at the k8s-cluster level", etcd.Name, m.Zone)
					}
					etcdZones[m.Zone] = m
				}

				if (len(etcdZones) % 2) == 0 {
					// Not technically a requirement, but doesn't really make sense to allow
					return fmt.Errorf("There should be an odd number of master-zones, for etcd's quorum.  Hint: Use --zone and --master-zone to declare node zones and master zones separately.")
				}
			}
		}
	}

	if c.StateStore == nil {
		return fmt.Errorf("StateStore is required")
	}

	if c.Cluster.Spec.CloudProvider == "" {
		return fmt.Errorf("--cloud is required (e.g. aws, gce)")
	}

	tags := make(map[string]struct{})

	l := &Loader{}
	l.Init()

	keyStore := c.StateStore.CA()
	secretStore := c.StateStore.Secrets()

	if vfs.IsClusterReadable(secretStore.VFSPath()) {
		vfsPath := secretStore.VFSPath()
		c.Cluster.Spec.SecretStore = vfsPath.Path()
		if s3Path, ok := vfsPath.(*vfs.S3Path); ok {
			if c.Cluster.Spec.MasterPermissions == nil {
				c.Cluster.Spec.MasterPermissions = &api.CloudPermissions{}
			}
			c.Cluster.Spec.MasterPermissions.AddS3Bucket(s3Path.Bucket())
			if c.Cluster.Spec.NodePermissions == nil {
				c.Cluster.Spec.NodePermissions = &api.CloudPermissions{}
			}
			c.Cluster.Spec.NodePermissions.AddS3Bucket(s3Path.Bucket())
		}
	} else {
		// We could implement this approach, but it seems better to get all clouds using cluster-readable storage
		return fmt.Errorf("secrets path is not cluster readable: %v", secretStore.VFSPath())
	}

	if vfs.IsClusterReadable(keyStore.VFSPath()) {
		vfsPath := keyStore.VFSPath()
		c.Cluster.Spec.KeyStore = vfsPath.Path()
		if s3Path, ok := vfsPath.(*vfs.S3Path); ok {
			if c.Cluster.Spec.MasterPermissions == nil {
				c.Cluster.Spec.MasterPermissions = &api.CloudPermissions{}
			}
			c.Cluster.Spec.MasterPermissions.AddS3Bucket(s3Path.Bucket())
			if c.Cluster.Spec.NodePermissions == nil {
				c.Cluster.Spec.NodePermissions = &api.CloudPermissions{}
			}
			c.Cluster.Spec.NodePermissions.AddS3Bucket(s3Path.Bucket())
		}
	} else {
		// We could implement this approach, but it seems better to get all clouds using cluster-readable storage
		return fmt.Errorf("keyStore path is not cluster readable: %v", keyStore.VFSPath())
	}

	if vfs.IsClusterReadable(c.StateStore.VFSPath()) {
		c.Cluster.Spec.ConfigStore = c.StateStore.VFSPath().Path()
	} else {
		// We do support this...
	}

	if c.Cluster.Spec.KubernetesVersion == "" {
		stableURL := "https://storage.googleapis.com/kubernetes-release/release/stable.txt"
		b, err := vfs.Context.ReadFile(stableURL)
		if err != nil {
			return fmt.Errorf("--kubernetes-version not specified, and unable to download latest version from %q: %v", stableURL, err)
		}
		latestVersion := strings.TrimSpace(string(b))
		glog.Infof("Using kubernetes latest stable version: %s", latestVersion)

		c.Cluster.Spec.KubernetesVersion = latestVersion
		//return fmt.Errorf("Must either specify a KubernetesVersion (-kubernetes-version) or provide an asset with the release bundle")
	}

	// Normalize k8s version
	versionWithoutV := strings.TrimSpace(c.Cluster.Spec.KubernetesVersion)
	if strings.HasPrefix(versionWithoutV, "v") {
		versionWithoutV = versionWithoutV[1:]
	}
	if c.Cluster.Spec.KubernetesVersion != versionWithoutV {
		glog.Warningf("Normalizing kubernetes version: %q -> %q", c.Cluster.Spec.KubernetesVersion, versionWithoutV)
		c.Cluster.Spec.KubernetesVersion = versionWithoutV
	}

	if len(c.Assets) == 0 {
		//defaultReleaseAsset := fmt.Sprintf("https://storage.googleapis.com/kubernetes-release/release/v%s/kubernetes-server-linux-amd64.tar.gz", c.Config.KubernetesVersion)
		//glog.Infof("Adding default kubernetes release asset: %s", defaultReleaseAsset)

		defaultKubeletAsset := fmt.Sprintf("https://storage.googleapis.com/kubernetes-release/release/v%s/bin/linux/amd64/kubelet", c.Cluster.Spec.KubernetesVersion)
		glog.Infof("Adding default kubelet release asset: %s", defaultKubeletAsset)

		defaultKubectlAsset := fmt.Sprintf("https://storage.googleapis.com/kubernetes-release/release/v%s/bin/linux/amd64/kubectl", c.Cluster.Spec.KubernetesVersion)
		glog.Infof("Adding default kubelet release asset: %s", defaultKubectlAsset)

		// TODO: Verify assets exist, get the hash (that will check that KubernetesVersion is valid)

		c.Assets = append(c.Assets, defaultKubeletAsset, defaultKubectlAsset)
	}

	if c.NodeUpSource == "" {
		location := "https://kubeupv2.s3.amazonaws.com/nodeup/nodeup-1.3.tar.gz"
		glog.Infof("Using default nodeup location: %q", location)
		c.NodeUpSource = location
	}

	checkExisting := true

	//c.NodeUpConfig.Tags = append(c.NodeUpConfig.Tags, "_jessie", "_debian_family", "_systemd")
	//
	//if useProtokube {
	//	tags["_protokube"] = struct{}{}
	//	c.NodeUpConfig.Tags = append(c.NodeUpConfig.Tags, "_protokube")
	//} else {
	//	tags["_not_protokube"] = struct{}{}
	//	c.NodeUpConfig.Tags = append(c.NodeUpConfig.Tags, "_not_protokube")
	//}

	c.NodeUpTags = append(c.NodeUpTags, "_protokube")

	if useMasterASG {
		tags["_master_asg"] = struct{}{}
	} else {
		tags["_master_single"] = struct{}{}
	}

	if useMasterLB {
		tags["_master_lb"] = struct{}{}
	} else {
		tags["_not_master_lb"] = struct{}{}
	}

	if c.Cluster.Spec.MasterPublicName != "" {
		tags["_master_dns"] = struct{}{}
	}

	l.AddTypes(map[string]interface{}{
		"keypair": &fitasks.Keypair{},
		"secret":  &fitasks.Secret{},
	})

	cloud, err := BuildCloud(c.Cluster)
	if err != nil {
		return err
	}

	region := ""
	project := ""

	switch c.Cluster.Spec.CloudProvider {
	case "gce":
		{
			gceCloud := cloud.(*gce.GCECloud)
			region = gceCloud.Region
			project = gceCloud.Project

			glog.Fatalf("GCE is (probably) not working currently - please ping @justinsb for cleanup")
			tags["_gce"] = struct{}{}
			c.NodeUpTags = append(c.NodeUpTags, "_gce")

			l.AddTypes(map[string]interface{}{
				"persistentDisk":       &gcetasks.PersistentDisk{},
				"instance":             &gcetasks.Instance{},
				"instanceTemplate":     &gcetasks.InstanceTemplate{},
				"network":              &gcetasks.Network{},
				"managedInstanceGroup": &gcetasks.ManagedInstanceGroup{},
				"firewallRule":         &gcetasks.FirewallRule{},
				"ipAddress":            &gcetasks.IPAddress{},
			})
		}

	case "aws":
		{
			awsCloud := cloud.(*awsup.AWSCloud)
			region = awsCloud.Region

			tags["_aws"] = struct{}{}
			c.NodeUpTags = append(c.NodeUpTags, "_aws")

			l.AddTypes(map[string]interface{}{
				// EC2
				"elasticIP":                   &awstasks.ElasticIP{},
				"instance":                    &awstasks.Instance{},
				"instanceElasticIPAttachment": &awstasks.InstanceElasticIPAttachment{},
				"instanceVolumeAttachment":    &awstasks.InstanceVolumeAttachment{},
				"ebsVolume":                   &awstasks.EBSVolume{},
				"sshKey":                      &awstasks.SSHKey{},

				// IAM
				"iamInstanceProfile":     &awstasks.IAMInstanceProfile{},
				"iamInstanceProfileRole": &awstasks.IAMInstanceProfileRole{},
				"iamRole":                &awstasks.IAMRole{},
				"iamRolePolicy":          &awstasks.IAMRolePolicy{},

				// VPC / Networking
				"dhcpOptions":           &awstasks.DHCPOptions{},
				"internetGateway":       &awstasks.InternetGateway{},
				"route":                 &awstasks.Route{},
				"routeTable":            &awstasks.RouteTable{},
				"routeTableAssociation": &awstasks.RouteTableAssociation{},
				"securityGroup":         &awstasks.SecurityGroup{},
				"securityGroupRule":     &awstasks.SecurityGroupRule{},
				"subnet":                &awstasks.Subnet{},
				"vpc":                   &awstasks.VPC{},
				"vpcDHDCPOptionsAssociation": &awstasks.VPCDHCPOptionsAssociation{},

				// ELB
				"loadBalancer":             &awstasks.LoadBalancer{},
				"loadBalancerAttachment":   &awstasks.LoadBalancerAttachment{},
				"loadBalancerHealthChecks": &awstasks.LoadBalancerHealthChecks{},

				// Autoscaling
				"autoscalingGroup":    &awstasks.AutoscalingGroup{},
				"launchConfiguration": &awstasks.LaunchConfiguration{},

				// Route53
				"dnsName": &awstasks.DNSName{},
				"dnsZone": &awstasks.DNSZone{},
			})

			if c.SSHPublicKey == "" {
				return fmt.Errorf("SSH public key must be specified when running with AWS")
			}

			l.TemplateFunctions["MachineTypeInfo"] = awsup.GetMachineTypeInfo
		}

	default:
		return fmt.Errorf("unknown CloudProvider %q", c.Cluster.Spec.CloudProvider)
	}

	tf := &TemplateFunctions{
		cluster: c.Cluster,
	}

	l.Tags = tags
	l.WorkDir = c.OutDir
	l.ModelStore = c.ModelStore
	l.NodeModel = c.NodeModel

	l.TemplateFunctions["HasTag"] = func(tag string) bool {
		_, found := l.Tags[tag]
		return found
	}

	l.TemplateFunctions["CA"] = func() fi.CAStore {
		return keyStore
	}
	l.TemplateFunctions["Secrets"] = func() fi.SecretStore {
		return secretStore
	}

	l.TemplateFunctions["NodeUpTags"] = func() []string {
		return c.NodeUpTags
	}

	// TotalNodeCount computes the total count of nodes
	l.TemplateFunctions["TotalNodeCount"] = func() (int, error) {
		count := 0
		for _, group := range c.nodes {
			if group.Spec.MaxSize != nil {
				count += *group.Spec.MaxSize
			} else if group.Spec.MinSize != nil {
				count += *group.Spec.MinSize
			} else {
				// Guestimate
				count += 5
			}
		}
		return count, nil
	}
	l.TemplateFunctions["Region"] = func() string {
		return region
	}
	l.TemplateFunctions["NodeSets"] = c.populateNodeSets
	l.TemplateFunctions["Masters"] = c.populateMasters
	//l.TemplateFunctions["NodeUp"] = c.populateNodeUpConfig
	l.TemplateFunctions["NodeUpSource"] = func() string {
		return c.NodeUpSource
	}
	l.TemplateFunctions["NodeUpSourceHash"] = func() string {
		return ""
	}
	l.TemplateFunctions["ClusterLocation"] = func() string {
		return c.StateStore.VFSPath().Join(PathClusterCompleted).Path()
	}
	l.TemplateFunctions["Assets"] = func() []string {
		return c.Assets
	}

	l.TemplateFunctions["Base64Encode"] = func(s string) string {
		return base64.StdEncoding.EncodeToString([]byte(s))
	}
	l.TemplateFunctions["ClusterName"] = func() string {
		return clusterName
	}
	l.TemplateFunctions["replace"] = func(s, find, replace string) string {
		return strings.Replace(s, find, replace, -1)
	}
	l.TemplateFunctions["join"] = func(a []string, sep string) string {
		return strings.Join(a, sep)
	}

	tf.AddTo(l.TemplateFunctions)

	l.OptionsLoader = loader.NewOptionsLoader(l.TemplateFunctions)

	if c.SSHPublicKey != "" {
		authorized, err := ioutil.ReadFile(c.SSHPublicKey)
		if err != nil {
			return fmt.Errorf("error reading SSH key file %q: %v", c.SSHPublicKey, err)
		}

		l.Resources["ssh-public-key"] = fi.NewStringResource(string(authorized))
	}

	completed, err := l.BuildCompleteSpec(&c.Cluster.Spec, c.ModelStore, c.Models)
	if err != nil {
		return fmt.Errorf("error building complete spec: %v", err)
	}
	l.cluster = &api.Cluster{}
	*l.cluster = *c.Cluster
	l.cluster.Spec = *completed
	tf.cluster = l.cluster

	err = l.cluster.Validate()
	if err != nil {
		return fmt.Errorf("Completed cluster failed validation: %v", err)
	}

	taskMap, err := l.BuildTasks(c.ModelStore, c.Models)
	if err != nil {
		return fmt.Errorf("error building tasks: %v", err)
	}

	err = c.StateStore.WriteConfig(PathClusterCompleted, l.cluster)
	if err != nil {
		return fmt.Errorf("error writing completed cluster spec: %v", err)
	}

	var target fi.Target

	switch c.Target {
	case "direct":
		switch c.Cluster.Spec.CloudProvider {
		case "gce":
			target = gce.NewGCEAPITarget(cloud.(*gce.GCECloud))
		case "aws":
			target = awsup.NewAWSAPITarget(cloud.(*awsup.AWSCloud))
		default:
			return fmt.Errorf("direct configuration not supported with CloudProvider:%q", c.Cluster.Spec.CloudProvider)
		}

	case "terraform":
		checkExisting = false
		outDir := path.Join(c.OutDir, "terraform")
		target = terraform.NewTerraformTarget(cloud, region, project, outDir)

	case "dryrun":
		target = fi.NewDryRunTarget(os.Stdout)
	default:
		return fmt.Errorf("unsupported target type %q", c.Target)
	}

	context, err := fi.NewContext(target, cloud, keyStore, secretStore, checkExisting)
	if err != nil {
		return fmt.Errorf("error building context: %v", err)
	}
	defer context.Close()

	err = context.RunTasks(taskMap)
	if err != nil {
		return fmt.Errorf("error running tasks: %v", err)
	}

	err = target.Finish(taskMap)
	if err != nil {
		return fmt.Errorf("error closing target: %v", err)
	}

	return nil
}