func resourceAwsKmsAliasCreate(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).kmsconn

	var name string
	if v, ok := d.GetOk("name"); ok {
		name = v.(string)
	} else if v, ok := d.GetOk("name_prefix"); ok {
		name = resource.PrefixedUniqueId(v.(string))
	} else {
		name = resource.PrefixedUniqueId("alias/")
	}

	targetKeyId := d.Get("target_key_id").(string)

	log.Printf("[DEBUG] KMS alias create name: %s, target_key: %s", name, targetKeyId)

	req := &kms.CreateAliasInput{
		AliasName:   aws.String(name),
		TargetKeyId: aws.String(targetKeyId),
	}
	_, err := conn.CreateAlias(req)
	if err != nil {
		return err
	}
	d.SetId(name)
	return resourceAwsKmsAliasRead(d, meta)
}
func resourceAwsIamRoleCreate(d *schema.ResourceData, meta interface{}) error {
	iamconn := meta.(*AWSClient).iamconn

	var name string
	if v, ok := d.GetOk("name"); ok {
		name = v.(string)
	} else if v, ok := d.GetOk("name_prefix"); ok {
		name = resource.PrefixedUniqueId(v.(string))
	} else {
		name = resource.UniqueId()
	}

	request := &iam.CreateRoleInput{
		Path:                     aws.String(d.Get("path").(string)),
		RoleName:                 aws.String(name),
		AssumeRolePolicyDocument: aws.String(d.Get("assume_role_policy").(string)),
	}

	var createResp *iam.CreateRoleOutput
	err := resource.Retry(10*time.Second, func() *resource.RetryError {
		var err error
		createResp, err = iamconn.CreateRole(request)
		// IAM roles can take ~10 seconds to propagate in AWS:
		// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console
		if isAWSErr(err, "MalformedPolicyDocument", "Invalid principal in policy") {
			return resource.RetryableError(err)
		}
		return resource.NonRetryableError(err)
	})
	if err != nil {
		return fmt.Errorf("Error creating IAM Role %s: %s", name, err)
	}
	return resourceAwsIamRoleReadResult(d, createResp.Role)
}
func resourceAwsAlbAttachmentCreate(d *schema.ResourceData, meta interface{}) error {
	elbconn := meta.(*AWSClient).elbv2conn

	params := &elbv2.RegisterTargetsInput{
		TargetGroupArn: aws.String(d.Get("target_group_arn").(string)),
		Targets: []*elbv2.TargetDescription{
			{
				Id:   aws.String(d.Get("target_id").(string)),
				Port: aws.Int64(int64(d.Get("port").(int))),
			},
		},
	}

	log.Printf("[INFO] Registering Target %s (%d) with Target Group %s", d.Get("target_id").(string),
		d.Get("port").(int), d.Get("target_group_arn").(string))

	_, err := elbconn.RegisterTargets(params)
	if err != nil {
		return errwrap.Wrapf("Error registering targets with target group: {{err}}", err)
	}

	d.SetId(resource.PrefixedUniqueId(fmt.Sprintf("%s-", d.Get("target_group_arn"))))

	return nil
}
func resourceAwsIamInstanceProfileCreate(d *schema.ResourceData, meta interface{}) error {
	iamconn := meta.(*AWSClient).iamconn

	var name string
	if v, ok := d.GetOk("name"); ok {
		name = v.(string)
	} else if v, ok := d.GetOk("name_prefix"); ok {
		name = resource.PrefixedUniqueId(v.(string))
	} else {
		name = resource.UniqueId()
	}

	request := &iam.CreateInstanceProfileInput{
		InstanceProfileName: aws.String(name),
		Path:                aws.String(d.Get("path").(string)),
	}

	var err error
	response, err := iamconn.CreateInstanceProfile(request)
	if err == nil {
		err = instanceProfileReadResult(d, response.InstanceProfile)
	}
	if err != nil {
		return fmt.Errorf("Error creating IAM instance profile %s: %s", name, err)
	}

	return instanceProfileSetRoles(d, iamconn)
}
func resourceAwsElasticTranscoderPresetCreate(d *schema.ResourceData, meta interface{}) error {
	elastictranscoderconn := meta.(*AWSClient).elastictranscoderconn

	req := &elastictranscoder.CreatePresetInput{
		Audio:       expandETAudioParams(d),
		Container:   aws.String(d.Get("container").(string)),
		Description: getStringPtr(d, "description"),
		Thumbnails:  expandETThumbnails(d),
		Video:       exapandETVideoParams(d),
	}

	if name, ok := d.GetOk("name"); ok {
		req.Name = aws.String(name.(string))
	} else {
		name := resource.PrefixedUniqueId("tf-et-preset-")
		d.Set("name", name)
		req.Name = aws.String(name)
	}

	log.Printf("[DEBUG] Elastic Transcoder Preset create opts: %s", req)
	resp, err := elastictranscoderconn.CreatePreset(req)
	if err != nil {
		return fmt.Errorf("Error creating Elastic Transcoder Preset: %s", err)
	}

	if resp.Warning != nil && *resp.Warning != "" {
		log.Printf("[WARN] Elastic Transcoder Preset: %s", *resp.Warning)
	}

	d.SetId(*resp.Preset.Id)
	d.Set("arn", *resp.Preset.Arn)

	return nil
}
func resourceAwsIamRoleCreate(d *schema.ResourceData, meta interface{}) error {
	iamconn := meta.(*AWSClient).iamconn

	var name string
	if v, ok := d.GetOk("name"); ok {
		name = v.(string)
	} else if v, ok := d.GetOk("name_prefix"); ok {
		name = resource.PrefixedUniqueId(v.(string))
	} else {
		name = resource.UniqueId()
	}

	request := &iam.CreateRoleInput{
		Path:                     aws.String(d.Get("path").(string)),
		RoleName:                 aws.String(name),
		AssumeRolePolicyDocument: aws.String(d.Get("assume_role_policy").(string)),
	}

	var createResp *iam.CreateRoleOutput
	err := resource.Retry(30*time.Second, func() *resource.RetryError {
		var err error
		createResp, err = iamconn.CreateRole(request)
		// IAM users (referenced in Principal field of assume policy)
		// can take ~30 seconds to propagate in AWS
		if isAWSErr(err, "MalformedPolicyDocument", "Invalid principal in policy") {
			return resource.RetryableError(err)
		}
		return resource.NonRetryableError(err)
	})
	if err != nil {
		return fmt.Errorf("Error creating IAM Role %s: %s", name, err)
	}
	return resourceAwsIamRoleReadResult(d, createResp.Role)
}
func resourceAwsIamPolicyCreate(d *schema.ResourceData, meta interface{}) error {
	iamconn := meta.(*AWSClient).iamconn

	var name string
	if v, ok := d.GetOk("name"); ok {
		name = v.(string)
	} else if v, ok := d.GetOk("name_prefix"); ok {
		name = resource.PrefixedUniqueId(v.(string))
	} else {
		name = resource.UniqueId()
	}

	request := &iam.CreatePolicyInput{
		Description:    aws.String(d.Get("description").(string)),
		Path:           aws.String(d.Get("path").(string)),
		PolicyDocument: aws.String(d.Get("policy").(string)),
		PolicyName:     aws.String(name),
	}

	response, err := iamconn.CreatePolicy(request)
	if err != nil {
		return fmt.Errorf("Error creating IAM policy %s: %s", name, err)
	}

	return readIamPolicy(d, response.Policy)
}
func resourceAwsKeyPairCreate(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).ec2conn

	var keyName string
	if v, ok := d.GetOk("key_name"); ok {
		keyName = v.(string)
	} else if v, ok := d.GetOk("key_name_prefix"); ok {
		keyName = resource.PrefixedUniqueId(v.(string))
	} else {
		keyName = resource.UniqueId()
	}

	publicKey := d.Get("public_key").(string)
	req := &ec2.ImportKeyPairInput{
		KeyName:           aws.String(keyName),
		PublicKeyMaterial: []byte(publicKey),
	}
	resp, err := conn.ImportKeyPair(req)
	if err != nil {
		return fmt.Errorf("Error import KeyPair: %s", err)
	}

	d.SetId(*resp.KeyName)
	return nil
}
func resourceAwsAlbCreate(d *schema.ResourceData, meta interface{}) error {
	elbconn := meta.(*AWSClient).elbv2conn

	var name string
	if v, ok := d.GetOk("name"); ok {
		name = v.(string)
	} else if v, ok := d.GetOk("name_prefix"); ok {
		name = resource.PrefixedUniqueId(v.(string))
	} else {
		name = resource.PrefixedUniqueId("tf-lb-")
	}
	d.Set("name", name)

	elbOpts := &elbv2.CreateLoadBalancerInput{
		Name: aws.String(name),
		Tags: tagsFromMapELBv2(d.Get("tags").(map[string]interface{})),
	}

	if scheme, ok := d.GetOk("internal"); ok && scheme.(bool) {
		elbOpts.Scheme = aws.String("internal")
	}

	if v, ok := d.GetOk("security_groups"); ok {
		elbOpts.SecurityGroups = expandStringList(v.(*schema.Set).List())
	}

	if v, ok := d.GetOk("subnets"); ok {
		elbOpts.Subnets = expandStringList(v.(*schema.Set).List())
	}

	log.Printf("[DEBUG] ALB create configuration: %#v", elbOpts)

	resp, err := elbconn.CreateLoadBalancer(elbOpts)
	if err != nil {
		return errwrap.Wrapf("Error creating Application Load Balancer: {{err}}", err)
	}

	if len(resp.LoadBalancers) != 1 {
		return fmt.Errorf("No load balancers returned following creation of %s", d.Get("name").(string))
	}

	d.SetId(*resp.LoadBalancers[0].LoadBalancerArn)
	log.Printf("[INFO] ALB ID: %s", d.Id())

	return resourceAwsAlbUpdate(d, meta)
}
func validateReferenceName(v interface{}, k string) (ws []string, errors []error) {
	value := v.(string)
	creationToken := resource.PrefixedUniqueId(fmt.Sprintf("%s-", value))
	if len(creationToken) > 64 {
		errors = append(errors, fmt.Errorf(
			"%q cannot take the Creation Token over the limit of 64 characters: %q", k, value))
	}
	return
}
func resourceAwsIamRolePolicyAttachmentCreate(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).iamconn

	role := d.Get("role").(string)
	arn := d.Get("policy_arn").(string)

	err := attachPolicyToRole(conn, role, arn)
	if err != nil {
		return fmt.Errorf("[WARN] Error attaching policy %s to IAM Role %s: %v", arn, role, err)
	}

	d.SetId(resource.PrefixedUniqueId(fmt.Sprintf("%s-", role)))
	return resourceAwsIamRolePolicyAttachmentRead(d, meta)
}
func resourceAwsAutoscalingAttachmentCreate(d *schema.ResourceData, meta interface{}) error {
	asgconn := meta.(*AWSClient).autoscalingconn
	asgName := d.Get("autoscaling_group_name").(string)
	elbName := d.Get("elb").(string)

	attachElbInput := &autoscaling.AttachLoadBalancersInput{
		AutoScalingGroupName: aws.String(asgName),
		LoadBalancerNames:    []*string{aws.String(elbName)},
	}

	log.Printf("[INFO] registering asg %s with ELBs %s", asgName, elbName)

	if _, err := asgconn.AttachLoadBalancers(attachElbInput); err != nil {
		return errwrap.Wrapf(fmt.Sprintf("Failure attaching AutoScaling Group %s with Elastic Load Balancer: %s: {{err}}", asgName, elbName), err)
	}

	d.SetId(resource.PrefixedUniqueId(fmt.Sprintf("%s-", asgName)))

	return resourceAwsAutoscalingAttachmentRead(d, meta)
}
func resourceAwsIAMServerCertificateCreate(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).iamconn

	var sslCertName string
	if v, ok := d.GetOk("name"); ok {
		sslCertName = v.(string)
	} else if v, ok := d.GetOk("name_prefix"); ok {
		sslCertName = resource.PrefixedUniqueId(v.(string))
	} else {
		sslCertName = resource.UniqueId()
	}

	createOpts := &iam.UploadServerCertificateInput{
		CertificateBody:       aws.String(d.Get("certificate_body").(string)),
		PrivateKey:            aws.String(d.Get("private_key").(string)),
		ServerCertificateName: aws.String(sslCertName),
	}

	if v, ok := d.GetOk("certificate_chain"); ok {
		createOpts.CertificateChain = aws.String(v.(string))
	}

	if v, ok := d.GetOk("path"); ok {
		createOpts.Path = aws.String(v.(string))
	}

	log.Printf("[DEBUG] Creating IAM Server Certificate with opts: %s", createOpts)
	resp, err := conn.UploadServerCertificate(createOpts)
	if err != nil {
		if awsErr, ok := err.(awserr.Error); ok {
			return fmt.Errorf("[WARN] Error uploading server certificate, error: %s: %s", awsErr.Code(), awsErr.Message())
		}
		return fmt.Errorf("[WARN] Error uploading server certificate, error: %s", err)
	}

	d.SetId(*resp.ServerCertificateMetadata.ServerCertificateId)
	d.Set("name", sslCertName)

	return resourceAwsIAMServerCertificateRead(d, meta)
}
func resourceAwsElasticTranscoderPipelineCreate(d *schema.ResourceData, meta interface{}) error {
	elastictranscoderconn := meta.(*AWSClient).elastictranscoderconn

	req := &elastictranscoder.CreatePipelineInput{
		AwsKmsKeyArn:    getStringPtr(d, "aws_kms_key_arn"),
		ContentConfig:   expandETPiplineOutputConfig(d, "content_config"),
		InputBucket:     aws.String(d.Get("input_bucket").(string)),
		Notifications:   expandETNotifications(d),
		OutputBucket:    getStringPtr(d, "output_bucket"),
		Role:            getStringPtr(d, "role"),
		ThumbnailConfig: expandETPiplineOutputConfig(d, "thumbnail_config"),
	}

	if name, ok := d.GetOk("name"); ok {
		req.Name = aws.String(name.(string))
	} else {
		name := resource.PrefixedUniqueId("tf-et-")
		d.Set("name", name)
		req.Name = aws.String(name)
	}

	if (req.OutputBucket == nil && (req.ContentConfig == nil || req.ContentConfig.Bucket == nil)) ||
		(req.OutputBucket != nil && req.ContentConfig != nil && req.ContentConfig.Bucket != nil) {
		return fmt.Errorf("[ERROR] you must specify only one of output_bucket or content_config.bucket")
	}

	log.Printf("[DEBUG] Elastic Transcoder Pipeline create opts: %s", req)
	resp, err := elastictranscoderconn.CreatePipeline(req)
	if err != nil {
		return fmt.Errorf("Error creating Elastic Transcoder Pipeline: %s", err)
	}

	d.SetId(*resp.Pipeline.Id)

	for _, w := range resp.Warnings {
		log.Printf("[WARN] Elastic Transcoder Pipeline %v: %v", *w.Code, *w.Message)
	}

	return resourceAwsElasticTranscoderPipelineRead(d, meta)
}
func resourceAwsElbAttachmentCreate(d *schema.ResourceData, meta interface{}) error {
	elbconn := meta.(*AWSClient).elbconn
	elbName := d.Get("elb").(string)

	instance := d.Get("instance").(string)

	registerInstancesOpts := elb.RegisterInstancesWithLoadBalancerInput{
		LoadBalancerName: aws.String(elbName),
		Instances:        []*elb.Instance{{InstanceId: aws.String(instance)}},
	}

	log.Printf("[INFO] registering instance %s with ELB %s", instance, elbName)

	_, err := elbconn.RegisterInstancesWithLoadBalancer(&registerInstancesOpts)
	if err != nil {
		return fmt.Errorf("Failure registering instances with ELB: %s", err)
	}

	d.SetId(resource.PrefixedUniqueId(fmt.Sprintf("%s-", elbName)))

	return nil
}
func resourceAwsIamRoleCreate(d *schema.ResourceData, meta interface{}) error {
	iamconn := meta.(*AWSClient).iamconn

	var name string
	if v, ok := d.GetOk("name"); ok {
		name = v.(string)
	} else if v, ok := d.GetOk("name_prefix"); ok {
		name = resource.PrefixedUniqueId(v.(string))
	} else {
		name = resource.UniqueId()
	}

	request := &iam.CreateRoleInput{
		Path:                     aws.String(d.Get("path").(string)),
		RoleName:                 aws.String(name),
		AssumeRolePolicyDocument: aws.String(d.Get("assume_role_policy").(string)),
	}

	createResp, err := iamconn.CreateRole(request)
	if err != nil {
		return fmt.Errorf("Error creating IAM Role %s: %s", name, err)
	}
	return resourceAwsIamRoleReadResult(d, createResp.Role)
}
func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).rdsconn
	tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{}))

	identifier := d.Get("identifier").(string)
	// Generate a unique ID for the user
	if identifier == "" {
		identifier = resource.PrefixedUniqueId("tf-")
		// SQL Server identifier size is max 15 chars, so truncate
		if engine := d.Get("engine").(string); engine != "" {
			if strings.Contains(strings.ToLower(engine), "sqlserver") {
				identifier = identifier[:15]
			}
		}
		d.Set("identifier", identifier)
	}

	if v, ok := d.GetOk("replicate_source_db"); ok {
		opts := rds.CreateDBInstanceReadReplicaInput{
			SourceDBInstanceIdentifier: aws.String(v.(string)),
			CopyTagsToSnapshot:         aws.Bool(d.Get("copy_tags_to_snapshot").(bool)),
			DBInstanceClass:            aws.String(d.Get("instance_class").(string)),
			DBInstanceIdentifier:       aws.String(identifier),
			Tags:                       tags,
		}
		if attr, ok := d.GetOk("iops"); ok {
			opts.Iops = aws.Int64(int64(attr.(int)))
		}

		if attr, ok := d.GetOk("port"); ok {
			opts.Port = aws.Int64(int64(attr.(int)))
		}

		if attr, ok := d.GetOk("availability_zone"); ok {
			opts.AvailabilityZone = aws.String(attr.(string))
		}

		if attr, ok := d.GetOk("storage_type"); ok {
			opts.StorageType = aws.String(attr.(string))
		}

		if attr, ok := d.GetOk("publicly_accessible"); ok {
			opts.PubliclyAccessible = aws.Bool(attr.(bool))
		}

		if attr, ok := d.GetOk("db_subnet_group_name"); ok {
			opts.DBSubnetGroupName = aws.String(attr.(string))
		}

		if attr, ok := d.GetOk("monitoring_role_arn"); ok {
			opts.MonitoringRoleArn = aws.String(attr.(string))
		}

		if attr, ok := d.GetOk("monitoring_interval"); ok {
			opts.MonitoringInterval = aws.Int64(int64(attr.(int)))
		}

		if attr, ok := d.GetOk("option_group_name"); ok {
			opts.OptionGroupName = aws.String(attr.(string))
		}

		log.Printf("[DEBUG] DB Instance Replica create configuration: %#v", opts)
		_, err := conn.CreateDBInstanceReadReplica(&opts)
		if err != nil {
			return fmt.Errorf("Error creating DB Instance: %s", err)
		}
	} else if _, ok := d.GetOk("snapshot_identifier"); ok {
		opts := rds.RestoreDBInstanceFromDBSnapshotInput{
			DBInstanceClass:         aws.String(d.Get("instance_class").(string)),
			DBInstanceIdentifier:    aws.String(d.Get("identifier").(string)),
			DBSnapshotIdentifier:    aws.String(d.Get("snapshot_identifier").(string)),
			AutoMinorVersionUpgrade: aws.Bool(d.Get("auto_minor_version_upgrade").(bool)),
			Tags:               tags,
			CopyTagsToSnapshot: aws.Bool(d.Get("copy_tags_to_snapshot").(bool)),
		}

		if attr, ok := d.GetOk("availability_zone"); ok {
			opts.AvailabilityZone = aws.String(attr.(string))
		}

		if attr, ok := d.GetOk("db_subnet_group_name"); ok {
			opts.DBSubnetGroupName = aws.String(attr.(string))
		}

		if attr, ok := d.GetOk("engine"); ok {
			opts.Engine = aws.String(attr.(string))
		}

		if attr, ok := d.GetOk("iops"); ok {
			opts.Iops = aws.Int64(int64(attr.(int)))
		}

		if attr, ok := d.GetOk("license_model"); ok {
			opts.LicenseModel = aws.String(attr.(string))
		}

		if attr, ok := d.GetOk("multi_az"); ok {
			opts.MultiAZ = aws.Bool(attr.(bool))
		}

		if attr, ok := d.GetOk("option_group_name"); ok {
			opts.OptionGroupName = aws.String(attr.(string))

		}

		if attr, ok := d.GetOk("port"); ok {
			opts.Port = aws.Int64(int64(attr.(int)))
		}

		if attr, ok := d.GetOk("publicly_accessible"); ok {
			opts.PubliclyAccessible = aws.Bool(attr.(bool))
		}

		if attr, ok := d.GetOk("tde_credential_arn"); ok {
			opts.TdeCredentialArn = aws.String(attr.(string))
		}

		if attr, ok := d.GetOk("storage_type"); ok {
			opts.StorageType = aws.String(attr.(string))
		}

		log.Printf("[DEBUG] DB Instance restore from snapshot configuration: %s", opts)
		_, err := conn.RestoreDBInstanceFromDBSnapshot(&opts)
		if err != nil {
			return fmt.Errorf("Error creating DB Instance: %s", err)
		}

		var sgUpdate bool
		if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 {
			sgUpdate = true
		}
		if attr := d.Get("security_group_names").(*schema.Set); attr.Len() > 0 {
			sgUpdate = true
		}
		if sgUpdate {
			log.Printf("[INFO] DB is restoring from snapshot with default security, but custom security should be set, will now update after snapshot is restored!")

			// wait for instance to get up and then modify security
			d.SetId(d.Get("identifier").(string))

			log.Printf("[INFO] DB Instance ID: %s", d.Id())

			log.Println(
				"[INFO] Waiting for DB Instance to be available")

			stateConf := &resource.StateChangeConf{
				Pending: []string{"creating", "backing-up", "modifying", "resetting-master-credentials",
					"maintenance", "renaming", "rebooting", "upgrading"},
				Target:     []string{"available"},
				Refresh:    resourceAwsDbInstanceStateRefreshFunc(d, meta),
				Timeout:    40 * time.Minute,
				MinTimeout: 10 * time.Second,
				Delay:      30 * time.Second, // Wait 30 secs before starting
			}

			// Wait, catching any errors
			_, err := stateConf.WaitForState()
			if err != nil {
				return err
			}

			err = resourceAwsDbInstanceUpdate(d, meta)
			if err != nil {
				return err
			}

		}
	} else {
		if _, ok := d.GetOk("allocated_storage"); !ok {
			return fmt.Errorf(`provider.aws: aws_db_instance: %s: "allocated_storage": required field is not set`, d.Get("name").(string))
		}
		if _, ok := d.GetOk("engine"); !ok {
			return fmt.Errorf(`provider.aws: aws_db_instance: %s: "engine": required field is not set`, d.Get("name").(string))
		}
		if _, ok := d.GetOk("password"); !ok {
			return fmt.Errorf(`provider.aws: aws_db_instance: %s: "password": required field is not set`, d.Get("name").(string))
		}
		if _, ok := d.GetOk("username"); !ok {
			return fmt.Errorf(`provider.aws: aws_db_instance: %s: "username": required field is not set`, d.Get("name").(string))
		}
		opts := rds.CreateDBInstanceInput{
			AllocatedStorage:        aws.Int64(int64(d.Get("allocated_storage").(int))),
			DBName:                  aws.String(d.Get("name").(string)),
			DBInstanceClass:         aws.String(d.Get("instance_class").(string)),
			DBInstanceIdentifier:    aws.String(d.Get("identifier").(string)),
			MasterUsername:          aws.String(d.Get("username").(string)),
			MasterUserPassword:      aws.String(d.Get("password").(string)),
			Engine:                  aws.String(d.Get("engine").(string)),
			EngineVersion:           aws.String(d.Get("engine_version").(string)),
			StorageEncrypted:        aws.Bool(d.Get("storage_encrypted").(bool)),
			AutoMinorVersionUpgrade: aws.Bool(d.Get("auto_minor_version_upgrade").(bool)),
			Tags:               tags,
			CopyTagsToSnapshot: aws.Bool(d.Get("copy_tags_to_snapshot").(bool)),
		}

		attr := d.Get("backup_retention_period")
		opts.BackupRetentionPeriod = aws.Int64(int64(attr.(int)))
		if attr, ok := d.GetOk("multi_az"); ok {
			opts.MultiAZ = aws.Bool(attr.(bool))

		}

		if attr, ok := d.GetOk("maintenance_window"); ok {
			opts.PreferredMaintenanceWindow = aws.String(attr.(string))
		}

		if attr, ok := d.GetOk("backup_window"); ok {
			opts.PreferredBackupWindow = aws.String(attr.(string))
		}

		if attr, ok := d.GetOk("license_model"); ok {
			opts.LicenseModel = aws.String(attr.(string))
		}
		if attr, ok := d.GetOk("parameter_group_name"); ok {
			opts.DBParameterGroupName = aws.String(attr.(string))
		}

		if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 {
			var s []*string
			for _, v := range attr.List() {
				s = append(s, aws.String(v.(string)))
			}
			opts.VpcSecurityGroupIds = s
		}

		if attr := d.Get("security_group_names").(*schema.Set); attr.Len() > 0 {
			var s []*string
			for _, v := range attr.List() {
				s = append(s, aws.String(v.(string)))
			}
			opts.DBSecurityGroups = s
		}
		if attr, ok := d.GetOk("storage_type"); ok {
			opts.StorageType = aws.String(attr.(string))
		}

		if attr, ok := d.GetOk("db_subnet_group_name"); ok {
			opts.DBSubnetGroupName = aws.String(attr.(string))
		}

		if attr, ok := d.GetOk("iops"); ok {
			opts.Iops = aws.Int64(int64(attr.(int)))
		}

		if attr, ok := d.GetOk("port"); ok {
			opts.Port = aws.Int64(int64(attr.(int)))
		}

		if attr, ok := d.GetOk("availability_zone"); ok {
			opts.AvailabilityZone = aws.String(attr.(string))
		}

		if attr, ok := d.GetOk("publicly_accessible"); ok {
			opts.PubliclyAccessible = aws.Bool(attr.(bool))
		}

		if attr, ok := d.GetOk("monitoring_role_arn"); ok {
			opts.MonitoringRoleArn = aws.String(attr.(string))
		}

		if attr, ok := d.GetOk("monitoring_interval"); ok {
			opts.MonitoringInterval = aws.Int64(int64(attr.(int)))
		}

		if attr, ok := d.GetOk("option_group_name"); ok {
			opts.OptionGroupName = aws.String(attr.(string))
		}

		if attr, ok := d.GetOk("kms_key_id"); ok {
			opts.KmsKeyId = aws.String(attr.(string))
		}

		log.Printf("[DEBUG] DB Instance create configuration: %#v", opts)
		var err error
		err = resource.Retry(5*time.Minute, func() *resource.RetryError {
			_, err = conn.CreateDBInstance(&opts)
			if err != nil {
				if awsErr, ok := err.(awserr.Error); ok {
					if awsErr.Code() == "InvalidParameterValue" && strings.Contains(awsErr.Message(), "ENHANCED_MONITORING") {
						return resource.RetryableError(awsErr)
					}
				}
				return resource.NonRetryableError(err)
			}
			return nil
		})
		if err != nil {
			return fmt.Errorf("Error creating DB Instance: %s", err)
		}
	}

	d.SetId(d.Get("identifier").(string))

	log.Printf("[INFO] DB Instance ID: %s", d.Id())

	log.Println(
		"[INFO] Waiting for DB Instance to be available")

	stateConf := &resource.StateChangeConf{
		Pending: []string{"creating", "backing-up", "modifying", "resetting-master-credentials",
			"maintenance", "renaming", "rebooting", "upgrading"},
		Target:     []string{"available"},
		Refresh:    resourceAwsDbInstanceStateRefreshFunc(d, meta),
		Timeout:    40 * time.Minute,
		MinTimeout: 10 * time.Second,
		Delay:      30 * time.Second, // Wait 30 secs before starting
	}

	// Wait, catching any errors
	_, err := stateConf.WaitForState()
	if err != nil {
		return err
	}

	return resourceAwsDbInstanceRead(d, meta)
}
func resourceAwsLaunchConfigurationCreate(d *schema.ResourceData, meta interface{}) error {
	autoscalingconn := meta.(*AWSClient).autoscalingconn
	ec2conn := meta.(*AWSClient).ec2conn

	createLaunchConfigurationOpts := autoscaling.CreateLaunchConfigurationInput{
		LaunchConfigurationName: aws.String(d.Get("name").(string)),
		ImageId:                 aws.String(d.Get("image_id").(string)),
		InstanceType:            aws.String(d.Get("instance_type").(string)),
		EbsOptimized:            aws.Bool(d.Get("ebs_optimized").(bool)),
	}

	if v, ok := d.GetOk("user_data"); ok {
		userData := base64.StdEncoding.EncodeToString([]byte(v.(string)))
		createLaunchConfigurationOpts.UserData = aws.String(userData)
	}

	createLaunchConfigurationOpts.InstanceMonitoring = &autoscaling.InstanceMonitoring{
		Enabled: aws.Bool(d.Get("enable_monitoring").(bool)),
	}

	if v, ok := d.GetOk("iam_instance_profile"); ok {
		createLaunchConfigurationOpts.IamInstanceProfile = aws.String(v.(string))
	}

	if v, ok := d.GetOk("placement_tenancy"); ok {
		createLaunchConfigurationOpts.PlacementTenancy = aws.String(v.(string))
	}

	if v, ok := d.GetOk("associate_public_ip_address"); ok {
		createLaunchConfigurationOpts.AssociatePublicIpAddress = aws.Bool(v.(bool))
	}

	if v, ok := d.GetOk("key_name"); ok {
		createLaunchConfigurationOpts.KeyName = aws.String(v.(string))
	}
	if v, ok := d.GetOk("spot_price"); ok {
		createLaunchConfigurationOpts.SpotPrice = aws.String(v.(string))
	}

	if v, ok := d.GetOk("security_groups"); ok {
		createLaunchConfigurationOpts.SecurityGroups = expandStringList(
			v.(*schema.Set).List(),
		)
	}

	if v, ok := d.GetOk("vpc_classic_link_id"); ok {
		createLaunchConfigurationOpts.ClassicLinkVPCId = aws.String(v.(string))
	}

	if v, ok := d.GetOk("vpc_classic_link_security_groups"); ok {
		createLaunchConfigurationOpts.ClassicLinkVPCSecurityGroups = expandStringList(
			v.(*schema.Set).List(),
		)
	}

	var blockDevices []*autoscaling.BlockDeviceMapping

	// We'll use this to detect if we're declaring it incorrectly as an ebs_block_device.
	rootDeviceName, err := fetchRootDeviceName(d.Get("image_id").(string), ec2conn)
	if err != nil {
		return err
	}
	if rootDeviceName == nil {
		// We do this so the value is empty so we don't have to do nil checks later
		var blank string
		rootDeviceName = &blank
	}

	if v, ok := d.GetOk("ebs_block_device"); ok {
		vL := v.(*schema.Set).List()
		for _, v := range vL {
			bd := v.(map[string]interface{})
			ebs := &autoscaling.Ebs{
				DeleteOnTermination: aws.Bool(bd["delete_on_termination"].(bool)),
			}

			if v, ok := bd["snapshot_id"].(string); ok && v != "" {
				ebs.SnapshotId = aws.String(v)
			}

			if v, ok := bd["encrypted"].(bool); ok && v {
				ebs.Encrypted = aws.Bool(v)
			}

			if v, ok := bd["volume_size"].(int); ok && v != 0 {
				ebs.VolumeSize = aws.Int64(int64(v))
			}

			if v, ok := bd["volume_type"].(string); ok && v != "" {
				ebs.VolumeType = aws.String(v)
			}

			if v, ok := bd["iops"].(int); ok && v > 0 {
				ebs.Iops = aws.Int64(int64(v))
			}

			if *aws.String(bd["device_name"].(string)) == *rootDeviceName {
				return fmt.Errorf("Root device (%s) declared as an 'ebs_block_device'.  Use 'root_block_device' keyword.", *rootDeviceName)
			}

			blockDevices = append(blockDevices, &autoscaling.BlockDeviceMapping{
				DeviceName: aws.String(bd["device_name"].(string)),
				Ebs:        ebs,
			})
		}
	}

	if v, ok := d.GetOk("ephemeral_block_device"); ok {
		vL := v.(*schema.Set).List()
		for _, v := range vL {
			bd := v.(map[string]interface{})
			blockDevices = append(blockDevices, &autoscaling.BlockDeviceMapping{
				DeviceName:  aws.String(bd["device_name"].(string)),
				VirtualName: aws.String(bd["virtual_name"].(string)),
			})
		}
	}

	if v, ok := d.GetOk("root_block_device"); ok {
		vL := v.(*schema.Set).List()
		if len(vL) > 1 {
			return fmt.Errorf("Cannot specify more than one root_block_device.")
		}
		for _, v := range vL {
			bd := v.(map[string]interface{})
			ebs := &autoscaling.Ebs{
				DeleteOnTermination: aws.Bool(bd["delete_on_termination"].(bool)),
			}

			if v, ok := bd["volume_size"].(int); ok && v != 0 {
				ebs.VolumeSize = aws.Int64(int64(v))
			}

			if v, ok := bd["volume_type"].(string); ok && v != "" {
				ebs.VolumeType = aws.String(v)
			}

			if v, ok := bd["iops"].(int); ok && v > 0 {
				ebs.Iops = aws.Int64(int64(v))
			}

			if dn, err := fetchRootDeviceName(d.Get("image_id").(string), ec2conn); err == nil {
				if dn == nil {
					return fmt.Errorf(
						"Expected to find a Root Device name for AMI (%s), but got none",
						d.Get("image_id").(string))
				}
				blockDevices = append(blockDevices, &autoscaling.BlockDeviceMapping{
					DeviceName: dn,
					Ebs:        ebs,
				})
			} else {
				return err
			}
		}
	}

	if len(blockDevices) > 0 {
		createLaunchConfigurationOpts.BlockDeviceMappings = blockDevices
	}

	var lcName string
	if v, ok := d.GetOk("name"); ok {
		lcName = v.(string)
	} else if v, ok := d.GetOk("name_prefix"); ok {
		lcName = resource.PrefixedUniqueId(v.(string))
	} else {
		lcName = resource.UniqueId()
	}
	createLaunchConfigurationOpts.LaunchConfigurationName = aws.String(lcName)

	log.Printf(
		"[DEBUG] autoscaling create launch configuration: %s", createLaunchConfigurationOpts)

	// IAM profiles can take ~10 seconds to propagate in AWS:
	// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console
	err = resource.Retry(30*time.Second, func() *resource.RetryError {
		_, err := autoscalingconn.CreateLaunchConfiguration(&createLaunchConfigurationOpts)
		if err != nil {
			if awsErr, ok := err.(awserr.Error); ok {
				if strings.Contains(awsErr.Message(), "Invalid IamInstanceProfile") {
					return resource.RetryableError(err)
				}
				if strings.Contains(awsErr.Message(), "You are not authorized to perform this operation") {
					return resource.RetryableError(err)
				}
			}
			return resource.NonRetryableError(err)
		}
		return nil
	})

	if err != nil {
		return fmt.Errorf("Error creating launch configuration: %s", err)
	}

	d.SetId(lcName)
	log.Printf("[INFO] launch configuration ID: %s", d.Id())

	// We put a Retry here since sometimes eventual consistency bites
	// us and we need to retry a few times to get the LC to load properly
	return resource.Retry(30*time.Second, func() *resource.RetryError {
		err := resourceAwsLaunchConfigurationRead(d, meta)
		if err != nil {
			return resource.RetryableError(err)
		}
		return nil
	})
}
Beispiel #19
0
func resourceAwsElbCreate(d *schema.ResourceData, meta interface{}) error {
	elbconn := meta.(*AWSClient).elbconn

	// Expand the "listener" set to aws-sdk-go compat []*elb.Listener
	listeners, err := expandListeners(d.Get("listener").(*schema.Set).List())
	if err != nil {
		return err
	}

	var elbName string
	if v, ok := d.GetOk("name"); ok {
		elbName = v.(string)
	} else {
		elbName = resource.PrefixedUniqueId("tf-lb-")
		d.Set("name", elbName)
	}

	tags := tagsFromMapELB(d.Get("tags").(map[string]interface{}))
	// Provision the elb
	elbOpts := &elb.CreateLoadBalancerInput{
		LoadBalancerName: aws.String(elbName),
		Listeners:        listeners,
		Tags:             tags,
	}

	if scheme, ok := d.GetOk("internal"); ok && scheme.(bool) {
		elbOpts.Scheme = aws.String("internal")
	}

	if v, ok := d.GetOk("availability_zones"); ok {
		elbOpts.AvailabilityZones = expandStringList(v.(*schema.Set).List())
	}

	if v, ok := d.GetOk("security_groups"); ok {
		elbOpts.SecurityGroups = expandStringList(v.(*schema.Set).List())
	}

	if v, ok := d.GetOk("subnets"); ok {
		elbOpts.Subnets = expandStringList(v.(*schema.Set).List())
	}

	log.Printf("[DEBUG] ELB create configuration: %#v", elbOpts)
	if _, err := elbconn.CreateLoadBalancer(elbOpts); err != nil {
		return fmt.Errorf("Error creating ELB: %s", err)
	}

	// Assign the elb's unique identifier for use later
	d.SetId(elbName)
	log.Printf("[INFO] ELB ID: %s", d.Id())

	// Enable partial mode and record what we set
	d.Partial(true)
	d.SetPartial("name")
	d.SetPartial("internal")
	d.SetPartial("availability_zones")
	d.SetPartial("listener")
	d.SetPartial("security_groups")
	d.SetPartial("subnets")

	d.Set("tags", tagsToMapELB(tags))

	return resourceAwsElbUpdate(d, meta)
}
func resourceAwsAutoscalingGroupCreate(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).autoscalingconn

	var autoScalingGroupOpts autoscaling.CreateAutoScalingGroupInput

	var asgName string
	if v, ok := d.GetOk("name"); ok {
		asgName = v.(string)
	} else {
		asgName = resource.PrefixedUniqueId("tf-asg-")
		d.Set("name", asgName)
	}

	autoScalingGroupOpts.AutoScalingGroupName = aws.String(asgName)
	autoScalingGroupOpts.LaunchConfigurationName = aws.String(d.Get("launch_configuration").(string))
	autoScalingGroupOpts.MinSize = aws.Int64(int64(d.Get("min_size").(int)))
	autoScalingGroupOpts.MaxSize = aws.Int64(int64(d.Get("max_size").(int)))

	// Availability Zones are optional if VPC Zone Identifer(s) are specified
	if v, ok := d.GetOk("availability_zones"); ok && v.(*schema.Set).Len() > 0 {
		autoScalingGroupOpts.AvailabilityZones = expandStringList(v.(*schema.Set).List())
	}

	if v, ok := d.GetOk("tag"); ok {
		autoScalingGroupOpts.Tags = autoscalingTagsFromMap(
			setToMapByKey(v.(*schema.Set), "key"), d.Get("name").(string))
	}

	if v, ok := d.GetOk("default_cooldown"); ok {
		autoScalingGroupOpts.DefaultCooldown = aws.Int64(int64(v.(int)))
	}

	if v, ok := d.GetOk("health_check_type"); ok && v.(string) != "" {
		autoScalingGroupOpts.HealthCheckType = aws.String(v.(string))
	}

	if v, ok := d.GetOk("desired_capacity"); ok {
		autoScalingGroupOpts.DesiredCapacity = aws.Int64(int64(v.(int)))
	}

	if v, ok := d.GetOk("health_check_grace_period"); ok {
		autoScalingGroupOpts.HealthCheckGracePeriod = aws.Int64(int64(v.(int)))
	}

	if v, ok := d.GetOk("placement_group"); ok {
		autoScalingGroupOpts.PlacementGroup = aws.String(v.(string))
	}

	if v, ok := d.GetOk("load_balancers"); ok && v.(*schema.Set).Len() > 0 {
		autoScalingGroupOpts.LoadBalancerNames = expandStringList(
			v.(*schema.Set).List())
	}

	if v, ok := d.GetOk("vpc_zone_identifier"); ok && v.(*schema.Set).Len() > 0 {
		autoScalingGroupOpts.VPCZoneIdentifier = expandVpcZoneIdentifiers(v.(*schema.Set).List())
	}

	if v, ok := d.GetOk("termination_policies"); ok && len(v.([]interface{})) > 0 {
		autoScalingGroupOpts.TerminationPolicies = expandStringList(v.([]interface{}))
	}

	log.Printf("[DEBUG] AutoScaling Group create configuration: %#v", autoScalingGroupOpts)
	_, err := conn.CreateAutoScalingGroup(&autoScalingGroupOpts)
	if err != nil {
		return fmt.Errorf("Error creating Autoscaling Group: %s", err)
	}

	d.SetId(d.Get("name").(string))
	log.Printf("[INFO] AutoScaling Group ID: %s", d.Id())

	if err := waitForASGCapacity(d, meta, capacitySatifiedCreate); err != nil {
		return err
	}

	if _, ok := d.GetOk("enabled_metrics"); ok {
		metricsErr := enableASGMetricsCollection(d, conn)
		if metricsErr != nil {
			return metricsErr
		}
	}

	return resourceAwsAutoscalingGroupRead(d, meta)
}
func resourceAwsS3BucketNotificationPut(d *schema.ResourceData, meta interface{}) error {
	s3conn := meta.(*AWSClient).s3conn
	bucket := d.Get("bucket").(string)

	// TopicNotifications
	topicNotifications := d.Get("topic").([]interface{})
	topicConfigs := make([]*s3.TopicConfiguration, 0, len(topicNotifications))
	for i, c := range topicNotifications {
		tc := &s3.TopicConfiguration{}

		c := c.(map[string]interface{})

		// Id
		if val, ok := c["id"].(string); ok && val != "" {
			tc.Id = aws.String(val)
		} else {
			tc.Id = aws.String(resource.PrefixedUniqueId("tf-s3-topic-"))
		}

		// TopicArn
		if val, ok := c["topic_arn"].(string); ok {
			tc.TopicArn = aws.String(val)
		}

		// Events
		events := d.Get(fmt.Sprintf("topic.%d.events", i)).(*schema.Set).List()
		tc.Events = make([]*string, 0, len(events))
		for _, e := range events {
			tc.Events = append(tc.Events, aws.String(e.(string)))
		}

		// Filter
		filterRules := make([]*s3.FilterRule, 0, 2)
		if val, ok := c["filter_prefix"].(string); ok && val != "" {
			filterRule := &s3.FilterRule{
				Name:  aws.String("prefix"),
				Value: aws.String(val),
			}
			filterRules = append(filterRules, filterRule)
		}
		if val, ok := c["filter_suffix"].(string); ok && val != "" {
			filterRule := &s3.FilterRule{
				Name:  aws.String("suffix"),
				Value: aws.String(val),
			}
			filterRules = append(filterRules, filterRule)
		}
		if len(filterRules) > 0 {
			tc.Filter = &s3.NotificationConfigurationFilter{
				Key: &s3.KeyFilter{
					FilterRules: filterRules,
				},
			}
		}
		topicConfigs = append(topicConfigs, tc)
	}

	// SQS
	queueNotifications := d.Get("queue").([]interface{})
	queueConfigs := make([]*s3.QueueConfiguration, 0, len(queueNotifications))
	for i, c := range queueNotifications {
		qc := &s3.QueueConfiguration{}

		c := c.(map[string]interface{})

		// Id
		if val, ok := c["id"].(string); ok && val != "" {
			qc.Id = aws.String(val)
		} else {
			qc.Id = aws.String(resource.PrefixedUniqueId("tf-s3-queue-"))
		}

		// QueueArn
		if val, ok := c["queue_arn"].(string); ok {
			qc.QueueArn = aws.String(val)
		}

		// Events
		events := d.Get(fmt.Sprintf("queue.%d.events", i)).(*schema.Set).List()
		qc.Events = make([]*string, 0, len(events))
		for _, e := range events {
			qc.Events = append(qc.Events, aws.String(e.(string)))
		}

		// Filter
		filterRules := make([]*s3.FilterRule, 0, 2)
		if val, ok := c["filter_prefix"].(string); ok && val != "" {
			filterRule := &s3.FilterRule{
				Name:  aws.String("prefix"),
				Value: aws.String(val),
			}
			filterRules = append(filterRules, filterRule)
		}
		if val, ok := c["filter_suffix"].(string); ok && val != "" {
			filterRule := &s3.FilterRule{
				Name:  aws.String("suffix"),
				Value: aws.String(val),
			}
			filterRules = append(filterRules, filterRule)
		}
		if len(filterRules) > 0 {
			qc.Filter = &s3.NotificationConfigurationFilter{
				Key: &s3.KeyFilter{
					FilterRules: filterRules,
				},
			}
		}
		queueConfigs = append(queueConfigs, qc)
	}

	// Lambda
	lambdaFunctionNotifications := d.Get("lambda_function").([]interface{})
	lambdaConfigs := make([]*s3.LambdaFunctionConfiguration, 0, len(lambdaFunctionNotifications))
	for i, c := range lambdaFunctionNotifications {
		lc := &s3.LambdaFunctionConfiguration{}

		c := c.(map[string]interface{})

		// Id
		if val, ok := c["id"].(string); ok && val != "" {
			lc.Id = aws.String(val)
		} else {
			lc.Id = aws.String(resource.PrefixedUniqueId("tf-s3-lambda-"))
		}

		// LambdaFunctionArn
		if val, ok := c["lambda_function_arn"].(string); ok {
			lc.LambdaFunctionArn = aws.String(val)
		}

		// Events
		events := d.Get(fmt.Sprintf("lambda_function.%d.events", i)).(*schema.Set).List()
		lc.Events = make([]*string, 0, len(events))
		for _, e := range events {
			lc.Events = append(lc.Events, aws.String(e.(string)))
		}

		// Filter
		filterRules := make([]*s3.FilterRule, 0, 2)
		if val, ok := c["filter_prefix"].(string); ok && val != "" {
			filterRule := &s3.FilterRule{
				Name:  aws.String("prefix"),
				Value: aws.String(val),
			}
			filterRules = append(filterRules, filterRule)
		}
		if val, ok := c["filter_suffix"].(string); ok && val != "" {
			filterRule := &s3.FilterRule{
				Name:  aws.String("suffix"),
				Value: aws.String(val),
			}
			filterRules = append(filterRules, filterRule)
		}
		if len(filterRules) > 0 {
			lc.Filter = &s3.NotificationConfigurationFilter{
				Key: &s3.KeyFilter{
					FilterRules: filterRules,
				},
			}
		}
		lambdaConfigs = append(lambdaConfigs, lc)
	}

	notificationConfiguration := &s3.NotificationConfiguration{}
	if len(lambdaConfigs) > 0 {
		notificationConfiguration.LambdaFunctionConfigurations = lambdaConfigs
	}
	if len(queueConfigs) > 0 {
		notificationConfiguration.QueueConfigurations = queueConfigs
	}
	if len(topicConfigs) > 0 {
		notificationConfiguration.TopicConfigurations = topicConfigs
	}
	i := &s3.PutBucketNotificationConfigurationInput{
		Bucket: aws.String(bucket),
		NotificationConfiguration: notificationConfiguration,
	}

	log.Printf("[DEBUG] S3 bucket: %s, Putting notification: %v", bucket, i)
	err := resource.Retry(1*time.Minute, func() *resource.RetryError {
		if _, err := s3conn.PutBucketNotificationConfiguration(i); err != nil {
			if awserr, ok := err.(awserr.Error); ok {
				switch awserr.Message() {
				case "Unable to validate the following destination configurations":
					return resource.RetryableError(awserr)
				}
			}
			// Didn't recognize the error, so shouldn't retry.
			return resource.NonRetryableError(err)
		}
		// Successful put configuration
		return nil
	})
	if err != nil {
		return fmt.Errorf("Error putting S3 notification configuration: %s", err)
	}

	d.SetId(bucket)

	return resourceAwsS3BucketNotificationRead(d, meta)
}
Beispiel #22
0
func resourceAwsElbCreate(d *schema.ResourceData, meta interface{}) error {
	elbconn := meta.(*AWSClient).elbconn

	// Expand the "listener" set to aws-sdk-go compat []*elb.Listener
	listeners, err := expandListeners(d.Get("listener").(*schema.Set).List())
	if err != nil {
		return err
	}

	var elbName string
	if v, ok := d.GetOk("name"); ok {
		elbName = v.(string)
	} else {
		elbName = resource.PrefixedUniqueId("tf-lb-")
		d.Set("name", elbName)
	}

	tags := tagsFromMapELB(d.Get("tags").(map[string]interface{}))
	// Provision the elb
	elbOpts := &elb.CreateLoadBalancerInput{
		LoadBalancerName: aws.String(elbName),
		Listeners:        listeners,
		Tags:             tags,
	}

	if scheme, ok := d.GetOk("internal"); ok && scheme.(bool) {
		elbOpts.Scheme = aws.String("internal")
	}

	if v, ok := d.GetOk("availability_zones"); ok {
		elbOpts.AvailabilityZones = expandStringList(v.(*schema.Set).List())
	}

	if v, ok := d.GetOk("security_groups"); ok {
		elbOpts.SecurityGroups = expandStringList(v.(*schema.Set).List())
	}

	if v, ok := d.GetOk("subnets"); ok {
		elbOpts.Subnets = expandStringList(v.(*schema.Set).List())
	}

	log.Printf("[DEBUG] ELB create configuration: %#v", elbOpts)
	err = resource.Retry(1*time.Minute, func() *resource.RetryError {
		_, err := elbconn.CreateLoadBalancer(elbOpts)

		if err != nil {
			if awsErr, ok := err.(awserr.Error); ok {
				// Check for IAM SSL Cert error, eventual consistancy issue
				if awsErr.Code() == "CertificateNotFound" {
					return resource.RetryableError(
						fmt.Errorf("[WARN] Error creating ELB Listener with SSL Cert, retrying: %s", err))
				}
			}
			return resource.NonRetryableError(err)
		}
		return nil
	})

	if err != nil {
		return err
	}

	// Assign the elb's unique identifier for use later
	d.SetId(elbName)
	log.Printf("[INFO] ELB ID: %s", d.Id())

	// Enable partial mode and record what we set
	d.Partial(true)
	d.SetPartial("name")
	d.SetPartial("internal")
	d.SetPartial("availability_zones")
	d.SetPartial("listener")
	d.SetPartial("security_groups")
	d.SetPartial("subnets")

	d.Set("tags", tagsToMapELB(tags))

	return resourceAwsElbUpdate(d, meta)
}
func resourceAwsEfsFileSystemCreate(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).efsconn

	creationToken := ""
	if v, ok := d.GetOk("creation_token"); ok {
		creationToken = v.(string)
	} else {
		if v, ok := d.GetOk("reference_name"); ok {
			creationToken = resource.PrefixedUniqueId(fmt.Sprintf("%s-", v.(string)))
			log.Printf("[WARN] Using deprecated `reference_name' attribute.")
		} else {
			creationToken = resource.UniqueId()
		}
	}

	createOpts := &efs.CreateFileSystemInput{
		CreationToken: aws.String(creationToken),
	}

	if v, ok := d.GetOk("performance_mode"); ok {
		createOpts.PerformanceMode = aws.String(v.(string))
	}

	log.Printf("[DEBUG] EFS file system create options: %#v", *createOpts)
	fs, err := conn.CreateFileSystem(createOpts)
	if err != nil {
		return fmt.Errorf("Error creating EFS file system: %s", err)
	}

	d.SetId(*fs.FileSystemId)
	log.Printf("[INFO] EFS file system ID: %s", d.Id())

	stateConf := &resource.StateChangeConf{
		Pending: []string{"creating"},
		Target:  []string{"available"},
		Refresh: func() (interface{}, string, error) {
			resp, err := conn.DescribeFileSystems(&efs.DescribeFileSystemsInput{
				FileSystemId: aws.String(d.Id()),
			})
			if err != nil {
				return nil, "error", err
			}

			if len(resp.FileSystems) < 1 {
				return nil, "not-found", fmt.Errorf("EFS file system %q not found", d.Id())
			}

			fs := resp.FileSystems[0]
			log.Printf("[DEBUG] current status of %q: %q", *fs.FileSystemId, *fs.LifeCycleState)
			return fs, *fs.LifeCycleState, nil
		},
		Timeout:    10 * time.Minute,
		Delay:      2 * time.Second,
		MinTimeout: 3 * time.Second,
	}

	_, err = stateConf.WaitForState()
	if err != nil {
		return fmt.Errorf("Error waiting for EFS file system (%q) to create: %q",
			d.Id(), err.Error())
	}
	log.Printf("[DEBUG] EFS file system %q created.", d.Id())

	return resourceAwsEfsFileSystemUpdate(d, meta)
}
func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interface{}) error {
	config := meta.(*Config)

	project, err := getProject(d, config)
	if err != nil {
		return err
	}

	instanceProperties := &compute.InstanceProperties{}

	instanceProperties.CanIpForward = d.Get("can_ip_forward").(bool)
	instanceProperties.Description = d.Get("instance_description").(string)
	instanceProperties.MachineType = d.Get("machine_type").(string)
	disks, err := buildDisks(d, meta)
	if err != nil {
		return err
	}
	instanceProperties.Disks = disks
	metadata, err := resourceInstanceMetadata(d)
	if err != nil {
		return err
	}
	instanceProperties.Metadata = metadata
	networks, err := buildNetworks(d, meta)
	if err != nil {
		return err
	}
	instanceProperties.NetworkInterfaces = networks

	instanceProperties.Scheduling = &compute.Scheduling{}
	instanceProperties.Scheduling.OnHostMaintenance = "MIGRATE"

	if v, ok := d.GetOk("automatic_restart"); ok {
		instanceProperties.Scheduling.AutomaticRestart = v.(bool)
	}

	if v, ok := d.GetOk("on_host_maintenance"); ok {
		instanceProperties.Scheduling.OnHostMaintenance = v.(string)
	}

	forceSendFieldsScheduling := make([]string, 0, 3)
	var hasSendMaintenance bool
	hasSendMaintenance = false
	if v, ok := d.GetOk("scheduling"); ok {
		_schedulings := v.([]interface{})
		if len(_schedulings) > 1 {
			return fmt.Errorf("Error, at most one `scheduling` block can be defined")
		}
		_scheduling := _schedulings[0].(map[string]interface{})

		if vp, okp := _scheduling["automatic_restart"]; okp {
			instanceProperties.Scheduling.AutomaticRestart = vp.(bool)
			forceSendFieldsScheduling = append(forceSendFieldsScheduling, "AutomaticRestart")
		}

		if vp, okp := _scheduling["on_host_maintenance"]; okp {
			instanceProperties.Scheduling.OnHostMaintenance = vp.(string)
			forceSendFieldsScheduling = append(forceSendFieldsScheduling, "OnHostMaintenance")
			hasSendMaintenance = true
		}

		if vp, okp := _scheduling["preemptible"]; okp {
			instanceProperties.Scheduling.Preemptible = vp.(bool)
			forceSendFieldsScheduling = append(forceSendFieldsScheduling, "Preemptible")
			if vp.(bool) && !hasSendMaintenance {
				instanceProperties.Scheduling.OnHostMaintenance = "TERMINATE"
				forceSendFieldsScheduling = append(forceSendFieldsScheduling, "OnHostMaintenance")
			}
		}
	}
	instanceProperties.Scheduling.ForceSendFields = forceSendFieldsScheduling

	serviceAccountsCount := d.Get("service_account.#").(int)
	serviceAccounts := make([]*compute.ServiceAccount, 0, serviceAccountsCount)
	for i := 0; i < serviceAccountsCount; i++ {
		prefix := fmt.Sprintf("service_account.%d", i)

		scopesCount := d.Get(prefix + ".scopes.#").(int)
		scopes := make([]string, 0, scopesCount)
		for j := 0; j < scopesCount; j++ {
			scope := d.Get(fmt.Sprintf(prefix+".scopes.%d", j)).(string)
			scopes = append(scopes, canonicalizeServiceScope(scope))
		}

		serviceAccount := &compute.ServiceAccount{
			Email:  "default",
			Scopes: scopes,
		}

		serviceAccounts = append(serviceAccounts, serviceAccount)
	}
	instanceProperties.ServiceAccounts = serviceAccounts

	instanceProperties.Tags = resourceInstanceTags(d)

	var itName string
	if v, ok := d.GetOk("name"); ok {
		itName = v.(string)
	} else if v, ok := d.GetOk("name_prefix"); ok {
		itName = resource.PrefixedUniqueId(v.(string))
	} else {
		itName = resource.UniqueId()
	}
	instanceTemplate := compute.InstanceTemplate{
		Description: d.Get("description").(string),
		Properties:  instanceProperties,
		Name:        itName,
	}

	op, err := config.clientCompute.InstanceTemplates.Insert(
		project, &instanceTemplate).Do()
	if err != nil {
		return fmt.Errorf("Error creating instance: %s", err)
	}

	// Store the ID now
	d.SetId(instanceTemplate.Name)

	err = computeOperationWaitGlobal(config, op, project, "Creating Instance Template")
	if err != nil {
		return err
	}

	return resourceComputeInstanceTemplateRead(d, meta)
}
func resourceAwsS3BucketLifecycleUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
	bucket := d.Get("bucket").(string)

	lifecycleRules := d.Get("lifecycle_rule").([]interface{})

	rules := make([]*s3.LifecycleRule, 0, len(lifecycleRules))

	for i, lifecycleRule := range lifecycleRules {
		r := lifecycleRule.(map[string]interface{})

		rule := &s3.LifecycleRule{
			Prefix: aws.String(r["prefix"].(string)),
		}

		// ID
		if val, ok := r["id"].(string); ok && val != "" {
			rule.ID = aws.String(val)
		} else {
			rule.ID = aws.String(resource.PrefixedUniqueId("tf-s3-lifecycle-"))
		}

		// Enabled
		if val, ok := r["enabled"].(bool); ok && val {
			rule.Status = aws.String(s3.ExpirationStatusEnabled)
		} else {
			rule.Status = aws.String(s3.ExpirationStatusDisabled)
		}

		// AbortIncompleteMultipartUpload
		if val, ok := r["abort_incomplete_multipart_upload_days"].(int); ok && val > 0 {
			rule.AbortIncompleteMultipartUpload = &s3.AbortIncompleteMultipartUpload{
				DaysAfterInitiation: aws.Int64(int64(val)),
			}
		}

		// Expiration
		expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.expiration", i)).(*schema.Set).List()
		if len(expiration) > 0 {
			e := expiration[0].(map[string]interface{})
			i := &s3.LifecycleExpiration{}

			if val, ok := e["date"].(string); ok && val != "" {
				t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val))
				if err != nil {
					return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error())
				}
				i.Date = aws.Time(t)
			} else if val, ok := e["days"].(int); ok && val > 0 {
				i.Days = aws.Int64(int64(val))
			} else if val, ok := e["expired_object_delete_marker"].(bool); ok {
				i.ExpiredObjectDeleteMarker = aws.Bool(val)
			}
			rule.Expiration = i
		}

		// NoncurrentVersionExpiration
		nc_expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_expiration", i)).(*schema.Set).List()
		if len(nc_expiration) > 0 {
			e := nc_expiration[0].(map[string]interface{})

			if val, ok := e["days"].(int); ok && val > 0 {
				rule.NoncurrentVersionExpiration = &s3.NoncurrentVersionExpiration{
					NoncurrentDays: aws.Int64(int64(val)),
				}
			}
		}

		// Transitions
		transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.transition", i)).(*schema.Set).List()
		if len(transitions) > 0 {
			rule.Transitions = make([]*s3.Transition, 0, len(transitions))
			for _, transition := range transitions {
				transition := transition.(map[string]interface{})
				i := &s3.Transition{}
				if val, ok := transition["date"].(string); ok && val != "" {
					t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val))
					if err != nil {
						return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error())
					}
					i.Date = aws.Time(t)
				} else if val, ok := transition["days"].(int); ok && val > 0 {
					i.Days = aws.Int64(int64(val))
				}
				if val, ok := transition["storage_class"].(string); ok && val != "" {
					i.StorageClass = aws.String(val)
				}

				rule.Transitions = append(rule.Transitions, i)
			}
		}
		// NoncurrentVersionTransitions
		nc_transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_transition", i)).(*schema.Set).List()
		if len(nc_transitions) > 0 {
			rule.NoncurrentVersionTransitions = make([]*s3.NoncurrentVersionTransition, 0, len(nc_transitions))
			for _, transition := range nc_transitions {
				transition := transition.(map[string]interface{})
				i := &s3.NoncurrentVersionTransition{}
				if val, ok := transition["days"].(int); ok && val > 0 {
					i.NoncurrentDays = aws.Int64(int64(val))
				}
				if val, ok := transition["storage_class"].(string); ok && val != "" {
					i.StorageClass = aws.String(val)
				}

				rule.NoncurrentVersionTransitions = append(rule.NoncurrentVersionTransitions, i)
			}
		}

		rules = append(rules, rule)
	}

	i := &s3.PutBucketLifecycleConfigurationInput{
		Bucket: aws.String(bucket),
		LifecycleConfiguration: &s3.BucketLifecycleConfiguration{
			Rules: rules,
		},
	}

	err := resource.Retry(1*time.Minute, func() *resource.RetryError {
		if _, err := s3conn.PutBucketLifecycleConfiguration(i); err != nil {
			return resource.NonRetryableError(err)
		}
		return nil
	})
	if err != nil {
		return fmt.Errorf("Error putting S3 lifecycle: %s", err)
	}

	return nil
}
func resourceAwsSecurityGroupCreate(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).ec2conn

	securityGroupOpts := &ec2.CreateSecurityGroupInput{}

	if v, ok := d.GetOk("vpc_id"); ok {
		securityGroupOpts.VpcId = aws.String(v.(string))
	}

	if v := d.Get("description"); v != nil {
		securityGroupOpts.Description = aws.String(v.(string))
	}

	var groupName string
	if v, ok := d.GetOk("name"); ok {
		groupName = v.(string)
	} else if v, ok := d.GetOk("name_prefix"); ok {
		groupName = resource.PrefixedUniqueId(v.(string))
	} else {
		groupName = resource.UniqueId()
	}
	securityGroupOpts.GroupName = aws.String(groupName)

	var err error
	log.Printf(
		"[DEBUG] Security Group create configuration: %#v", securityGroupOpts)
	createResp, err := conn.CreateSecurityGroup(securityGroupOpts)
	if err != nil {
		return fmt.Errorf("Error creating Security Group: %s", err)
	}

	d.SetId(*createResp.GroupId)

	log.Printf("[INFO] Security Group ID: %s", d.Id())

	// Wait for the security group to truly exist
	log.Printf(
		"[DEBUG] Waiting for Security Group (%s) to exist",
		d.Id())
	stateConf := &resource.StateChangeConf{
		Pending: []string{""},
		Target:  []string{"exists"},
		Refresh: SGStateRefreshFunc(conn, d.Id()),
		Timeout: 1 * time.Minute,
	}

	resp, err := stateConf.WaitForState()
	if err != nil {
		return fmt.Errorf(
			"Error waiting for Security Group (%s) to become available: %s",
			d.Id(), err)
	}

	// AWS defaults all Security Groups to have an ALLOW ALL egress rule. Here we
	// revoke that rule, so users don't unknowingly have/use it.
	group := resp.(*ec2.SecurityGroup)
	if group.VpcId != nil && *group.VpcId != "" {
		log.Printf("[DEBUG] Revoking default egress rule for Security Group for %s", d.Id())

		req := &ec2.RevokeSecurityGroupEgressInput{
			GroupId: createResp.GroupId,
			IpPermissions: []*ec2.IpPermission{
				&ec2.IpPermission{
					FromPort: aws.Int64(int64(0)),
					ToPort:   aws.Int64(int64(0)),
					IpRanges: []*ec2.IpRange{
						&ec2.IpRange{
							CidrIp: aws.String("0.0.0.0/0"),
						},
					},
					IpProtocol: aws.String("-1"),
				},
			},
		}

		if _, err = conn.RevokeSecurityGroupEgress(req); err != nil {
			return fmt.Errorf(
				"Error revoking default egress rule for Security Group (%s): %s",
				d.Id(), err)
		}

	}

	return resourceAwsSecurityGroupUpdate(d, meta)
}
func resourceAwsAutoscalingGroupCreate(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).autoscalingconn

	var asgName string
	if v, ok := d.GetOk("name"); ok {
		asgName = v.(string)
	} else {
		asgName = resource.PrefixedUniqueId("tf-asg-")
		d.Set("name", asgName)
	}

	createOpts := autoscaling.CreateAutoScalingGroupInput{
		AutoScalingGroupName:             aws.String(asgName),
		LaunchConfigurationName:          aws.String(d.Get("launch_configuration").(string)),
		NewInstancesProtectedFromScaleIn: aws.Bool(d.Get("protect_from_scale_in").(bool)),
	}
	updateOpts := autoscaling.UpdateAutoScalingGroupInput{
		AutoScalingGroupName: aws.String(asgName),
	}

	initialLifecycleHooks := d.Get("initial_lifecycle_hook").(*schema.Set).List()
	twoPhases := len(initialLifecycleHooks) > 0

	minSize := aws.Int64(int64(d.Get("min_size").(int)))
	maxSize := aws.Int64(int64(d.Get("max_size").(int)))

	if twoPhases {
		createOpts.MinSize = aws.Int64(int64(0))
		createOpts.MaxSize = aws.Int64(int64(0))

		updateOpts.MinSize = minSize
		updateOpts.MaxSize = maxSize

		if v, ok := d.GetOk("desired_capacity"); ok {
			updateOpts.DesiredCapacity = aws.Int64(int64(v.(int)))
		}
	} else {
		createOpts.MinSize = minSize
		createOpts.MaxSize = maxSize

		if v, ok := d.GetOk("desired_capacity"); ok {
			createOpts.DesiredCapacity = aws.Int64(int64(v.(int)))
		}
	}

	// Availability Zones are optional if VPC Zone Identifer(s) are specified
	if v, ok := d.GetOk("availability_zones"); ok && v.(*schema.Set).Len() > 0 {
		createOpts.AvailabilityZones = expandStringList(v.(*schema.Set).List())
	}

	if v, ok := d.GetOk("tag"); ok {
		createOpts.Tags = autoscalingTagsFromMap(
			setToMapByKey(v.(*schema.Set), "key"), d.Get("name").(string))
	}

	if v, ok := d.GetOk("default_cooldown"); ok {
		createOpts.DefaultCooldown = aws.Int64(int64(v.(int)))
	}

	if v, ok := d.GetOk("health_check_type"); ok && v.(string) != "" {
		createOpts.HealthCheckType = aws.String(v.(string))
	}

	if v, ok := d.GetOk("health_check_grace_period"); ok {
		createOpts.HealthCheckGracePeriod = aws.Int64(int64(v.(int)))
	}

	if v, ok := d.GetOk("placement_group"); ok {
		createOpts.PlacementGroup = aws.String(v.(string))
	}

	if v, ok := d.GetOk("load_balancers"); ok && v.(*schema.Set).Len() > 0 {
		createOpts.LoadBalancerNames = expandStringList(
			v.(*schema.Set).List())
	}

	if v, ok := d.GetOk("vpc_zone_identifier"); ok && v.(*schema.Set).Len() > 0 {
		createOpts.VPCZoneIdentifier = expandVpcZoneIdentifiers(v.(*schema.Set).List())
	}

	if v, ok := d.GetOk("termination_policies"); ok && len(v.([]interface{})) > 0 {
		createOpts.TerminationPolicies = expandStringList(v.([]interface{}))
	}

	if v, ok := d.GetOk("target_group_arns"); ok && len(v.(*schema.Set).List()) > 0 {
		createOpts.TargetGroupARNs = expandStringList(v.(*schema.Set).List())
	}

	log.Printf("[DEBUG] AutoScaling Group create configuration: %#v", createOpts)
	_, err := conn.CreateAutoScalingGroup(&createOpts)
	if err != nil {
		return fmt.Errorf("Error creating AutoScaling Group: %s", err)
	}

	d.SetId(d.Get("name").(string))
	log.Printf("[INFO] AutoScaling Group ID: %s", d.Id())

	if twoPhases {
		for _, hook := range generatePutLifecycleHookInputs(asgName, initialLifecycleHooks) {
			if err = resourceAwsAutoscalingLifecycleHookPutOp(conn, &hook); err != nil {
				return fmt.Errorf("Error creating initial lifecycle hooks: %s", err)
			}
		}

		_, err = conn.UpdateAutoScalingGroup(&updateOpts)
		if err != nil {
			return fmt.Errorf("Error setting AutoScaling Group initial capacity: %s", err)
		}
	}

	if err := waitForASGCapacity(d, meta, capacitySatisfiedCreate); err != nil {
		return err
	}

	if _, ok := d.GetOk("enabled_metrics"); ok {
		metricsErr := enableASGMetricsCollection(d, conn)
		if metricsErr != nil {
			return metricsErr
		}
	}

	return resourceAwsAutoscalingGroupRead(d, meta)
}