func resourceAwsAutoscalingGroupDelete(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).autoscalingconn

	// Read the autoscaling group first. If it doesn't exist, we're done.
	// We need the group in order to check if there are instances attached.
	// If so, we need to remove those first.
	g, err := getAwsAutoscalingGroup(d, meta)
	if err != nil {
		return err
	}
	if g == nil {
		return nil
	}
	if len(g.Instances) > 0 || *g.DesiredCapacity > 0 {
		if err := resourceAwsAutoscalingGroupDrain(d, meta); err != nil {
			return err
		}
	}

	log.Printf("[DEBUG] AutoScaling Group destroy: %v", d.Id())
	deleteopts := autoscaling.DeleteAutoScalingGroupInput{
		AutoScalingGroupName: aws.String(d.Id()),
		ForceDelete:          aws.Bool(d.Get("force_delete").(bool)),
	}

	// We retry the delete operation to handle InUse/InProgress errors coming
	// from scaling operations. We should be able to sneak in a delete in between
	// scaling operations within 5m.
	err = resource.Retry(5*time.Minute, func() error {
		if _, err := conn.DeleteAutoScalingGroup(&deleteopts); err != nil {
			if awserr, ok := err.(awserr.Error); ok {
				switch awserr.Code() {
				case "InvalidGroup.NotFound":
					// Already gone? Sure!
					return nil
				case "ResourceInUse", "ScalingActivityInProgress":
					// These are retryable
					return awserr
				}
			}
			// Didn't recognize the error, so shouldn't retry.
			return resource.RetryError{Err: err}
		}
		// Successful delete
		return nil
	})
	if err != nil {
		return err
	}

	return resource.Retry(5*time.Minute, func() error {
		if g, _ = getAwsAutoscalingGroup(d, meta); g != nil {
			return fmt.Errorf("Auto Scaling Group still exists")
		}
		return nil
	})
}
func resourceAwsVpcDelete(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).ec2conn
	vpcID := d.Id()
	DeleteVpcOpts := &ec2.DeleteVpcInput{
		VpcId: &vpcID,
	}
	log.Printf("[INFO] Deleting VPC: %s", d.Id())

	return resource.Retry(5*time.Minute, func() error {
		_, err := conn.DeleteVpc(DeleteVpcOpts)
		if err == nil {
			return nil
		}

		ec2err, ok := err.(awserr.Error)
		if !ok {
			return &resource.RetryError{Err: err}
		}

		switch ec2err.Code() {
		case "InvalidVpcID.NotFound":
			return nil
		case "DependencyViolation":
			return err
		}

		return &resource.RetryError{
			Err: fmt.Errorf("Error deleting VPC: %s", err),
		}
	})
}
func resourceAwsIAMServerCertificateDelete(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).iamconn
	log.Printf("[INFO] Deleting IAM Server Certificate: %s", d.Id())
	err := resource.Retry(1*time.Minute, func() error {
		_, err := conn.DeleteServerCertificate(&iam.DeleteServerCertificateInput{
			ServerCertificateName: aws.String(d.Get("name").(string)),
		})

		if err != nil {
			if awsErr, ok := err.(awserr.Error); ok {
				if awsErr.Code() == "DeleteConflict" && strings.Contains(awsErr.Message(), "currently in use by arn") {
					return fmt.Errorf("[WARN] Conflict deleting server certificate: %s, retrying", awsErr.Message())
				}
			}
			return resource.RetryError{Err: err}
		}
		return nil
	})

	if err != nil {
		return err
	}

	d.SetId("")
	return nil
}
// resourceAwsLambdaEventSourceMappingUpdate maps to:
// UpdateEventSourceMapping in the API / SDK
func resourceAwsLambdaEventSourceMappingUpdate(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).lambdaconn

	log.Printf("[DEBUG] Updating Lambda event source mapping: %s", d.Id())

	params := &lambda.UpdateEventSourceMappingInput{
		UUID:         aws.String(d.Id()),
		BatchSize:    aws.Int64(int64(d.Get("batch_size").(int))),
		FunctionName: aws.String(d.Get("function_name").(string)),
		Enabled:      aws.Bool(d.Get("enabled").(bool)),
	}

	err := resource.Retry(1*time.Minute, func() error {
		_, err := conn.UpdateEventSourceMapping(params)
		if err != nil {
			if awserr, ok := err.(awserr.Error); ok {
				if awserr.Code() == "InvalidParameterValueException" {
					// Retryable
					return awserr
				}
			}
			// Not retryable
			return resource.RetryError{Err: err}
		}
		// No error
		return nil
	})

	if err != nil {
		return fmt.Errorf("Error updating Lambda event source mapping: %s", err)
	}

	return resourceAwsLambdaEventSourceMappingRead(d, meta)
}
func resourceAwsElasticacheSubnetGroupDelete(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).elasticacheconn

	log.Printf("[DEBUG] Cache subnet group delete: %s", d.Id())

	return resource.Retry(5*time.Minute, func() error {
		_, err := conn.DeleteCacheSubnetGroup(&elasticache.DeleteCacheSubnetGroupInput{
			CacheSubnetGroupName: aws.String(d.Id()),
		})
		if err != nil {
			apierr, ok := err.(awserr.Error)
			if !ok {
				return err
			}
			log.Printf("[DEBUG] APIError.Code: %v", apierr.Code)
			switch apierr.Code() {
			case "DependencyViolation":
				// If it is a dependency violation, we want to retry
				return err
			default:
				return resource.RetryError{Err: err}
			}
		}
		return nil
	})
}
func resourceAwsSecurityGroupDelete(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).ec2conn

	log.Printf("[DEBUG] Security Group destroy: %v", d.Id())

	return resource.Retry(5*time.Minute, func() error {
		_, err := conn.DeleteSecurityGroup(&ec2.DeleteSecurityGroupInput{
			GroupId: aws.String(d.Id()),
		})
		if err != nil {
			ec2err, ok := err.(awserr.Error)
			if !ok {
				return err
			}

			switch ec2err.Code() {
			case "InvalidGroup.NotFound":
				return nil
			case "DependencyViolation":
				// If it is a dependency violation, we want to retry
				return err
			default:
				// Any other error, we want to quit the retry loop immediately
				return resource.RetryError{Err: err}
			}
		}

		return nil
	})
}
func resourceAwsAutoscalingLifecycleHookPut(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).autoscalingconn
	params := getAwsAutoscalingPutLifecycleHookInput(d)

	log.Printf("[DEBUG] AutoScaling PutLifecyleHook: %s", params)
	err := resource.Retry(5*time.Minute, func() error {
		_, err := conn.PutLifecycleHook(&params)

		if err != nil {
			if awsErr, ok := err.(awserr.Error); ok {
				if strings.Contains(awsErr.Message(), "Unable to publish test message to notification target") {
					return fmt.Errorf("[DEBUG] Retrying AWS AutoScaling Lifecycle Hook: %s", params)
				}
			}
			return resource.RetryError{Err: fmt.Errorf("Error putting lifecycle hook: %s", err)}
		}
		return nil
	})

	if err != nil {
		return err
	}

	d.SetId(d.Get("name").(string))

	return resourceAwsAutoscalingLifecycleHookRead(d, meta)
}
func resourceAwsVpnGatewayDelete(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).ec2conn

	// Detach if it is attached
	if err := resourceAwsVpnGatewayDetach(d, meta); err != nil {
		return err
	}

	log.Printf("[INFO] Deleting VPN gateway: %s", d.Id())

	return resource.Retry(5*time.Minute, func() error {
		_, err := conn.DeleteVpnGateway(&ec2.DeleteVpnGatewayInput{
			VpnGatewayId: aws.String(d.Id()),
		})
		if err == nil {
			return nil
		}

		ec2err, ok := err.(awserr.Error)
		if !ok {
			return err
		}

		switch ec2err.Code() {
		case "InvalidVpnGatewayID.NotFound":
			return nil
		case "IncorrectState":
			return err // retry
		}

		return resource.RetryError{Err: err}
	})
}
func resourceAwsEcsServiceCreate(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).ecsconn

	input := ecs.CreateServiceInput{
		ServiceName:    aws.String(d.Get("name").(string)),
		TaskDefinition: aws.String(d.Get("task_definition").(string)),
		DesiredCount:   aws.Int64(int64(d.Get("desired_count").(int))),
		ClientToken:    aws.String(resource.UniqueId()),
	}

	if v, ok := d.GetOk("cluster"); ok {
		input.Cluster = aws.String(v.(string))
	}

	loadBalancers := expandEcsLoadBalancers(d.Get("load_balancer").(*schema.Set).List())
	if len(loadBalancers) > 0 {
		log.Printf("[DEBUG] Adding ECS load balancers: %s", loadBalancers)
		input.LoadBalancers = loadBalancers
	}
	if v, ok := d.GetOk("iam_role"); ok {
		input.Role = aws.String(v.(string))
	}

	log.Printf("[DEBUG] Creating ECS service: %s", input)

	// Retry due to AWS IAM policy eventual consistency
	// See https://github.com/hashicorp/terraform/issues/2869
	var out *ecs.CreateServiceOutput
	var err error
	err = resource.Retry(2*time.Minute, func() error {
		out, err = conn.CreateService(&input)

		if err != nil {
			ec2err, ok := err.(awserr.Error)
			if !ok {
				return &resource.RetryError{Err: err}
			}
			if ec2err.Code() == "InvalidParameterException" {
				log.Printf("[DEBUG] Trying to create ECS service again: %q",
					ec2err.Message())
				return err
			}

			return &resource.RetryError{Err: err}
		}

		return nil
	})
	if err != nil {
		return err
	}

	service := *out.Service

	log.Printf("[DEBUG] ECS service created: %s", *service.ServiceArn)
	d.SetId(*service.ServiceArn)

	return resourceAwsEcsServiceUpdate(d, meta)
}
func resourceAwsNetworkAclDelete(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).ec2conn

	log.Printf("[INFO] Deleting Network Acl: %s", d.Id())
	return resource.Retry(5*time.Minute, func() error {
		_, err := conn.DeleteNetworkAcl(&ec2.DeleteNetworkAclInput{
			NetworkAclId: aws.String(d.Id()),
		})
		if err != nil {
			ec2err := err.(awserr.Error)
			switch ec2err.Code() {
			case "InvalidNetworkAclID.NotFound":
				return nil
			case "DependencyViolation":
				// In case of dependency violation, we remove the association between subnet and network acl.
				// This means the subnet is attached to default acl of vpc.
				var associations []*ec2.NetworkAclAssociation
				if v, ok := d.GetOk("subnet_id"); ok {

					a, err := findNetworkAclAssociation(v.(string), conn)
					if err != nil {
						return resource.RetryError{Err: fmt.Errorf("Dependency violation: Cannot find ACL %s: %s", d.Id(), err)}
					}
					associations = append(associations, a)
				} else if v, ok := d.GetOk("subnet_ids"); ok {
					ids := v.(*schema.Set).List()
					for _, i := range ids {
						a, err := findNetworkAclAssociation(i.(string), conn)
						if err != nil {
							return resource.RetryError{Err: fmt.Errorf("Dependency violation: Cannot delete acl %s: %s", d.Id(), err)}
						}
						associations = append(associations, a)
					}
				}
				defaultAcl, err := getDefaultNetworkAcl(d.Get("vpc_id").(string), conn)
				if err != nil {
					return resource.RetryError{Err: fmt.Errorf("Dependency violation: Cannot delete acl %s: %s", d.Id(), err)}
				}

				for _, a := range associations {
					_, err = conn.ReplaceNetworkAclAssociation(&ec2.ReplaceNetworkAclAssociationInput{
						AssociationId: a.NetworkAclAssociationId,
						NetworkAclId:  defaultAcl.NetworkAclId,
					})
				}
				return resource.RetryError{Err: err}
			default:
				// Any other error, we want to quit the retry loop immediately
				return resource.RetryError{Err: err}
			}
		}
		log.Printf("[Info] Deleted network ACL %s successfully", d.Id())
		return nil
	})
}
func resourceAwsVpnGatewayAttach(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).ec2conn

	if d.Get("vpc_id").(string) == "" {
		log.Printf(
			"[DEBUG] Not attaching VPN Gateway '%s' as no VPC ID is set",
			d.Id())
		return nil
	}

	log.Printf(
		"[INFO] Attaching VPN Gateway '%s' to VPC '%s'",
		d.Id(),
		d.Get("vpc_id").(string))

	req := &ec2.AttachVpnGatewayInput{
		VpnGatewayId: aws.String(d.Id()),
		VpcId:        aws.String(d.Get("vpc_id").(string)),
	}

	err := resource.Retry(30*time.Second, func() error {
		_, err := conn.AttachVpnGateway(req)
		if err != nil {
			if ec2err, ok := err.(awserr.Error); ok {
				if "InvalidVpnGatewayID.NotFound" == ec2err.Code() {
					//retry
					return fmt.Errorf("Gateway not found, retry for eventual consistancy")
				}
			}
			return resource.RetryError{Err: err}
		}
		return nil
	})

	if err != nil {
		return err
	}

	// Wait for it to be fully attached before continuing
	log.Printf("[DEBUG] Waiting for VPN gateway (%s) to attach", d.Id())
	stateConf := &resource.StateChangeConf{
		Pending: []string{"detached", "attaching"},
		Target:  "attached",
		Refresh: vpnGatewayAttachStateRefreshFunc(conn, d.Id(), "available"),
		Timeout: 1 * time.Minute,
	}
	if _, err := stateConf.WaitForState(); err != nil {
		return fmt.Errorf(
			"Error waiting for VPN gateway (%s) to attach: %s",
			d.Id(), err)
	}

	return nil
}
func resourceAwsCodeDeployDeploymentGroupCreate(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).codedeployconn

	application := d.Get("app_name").(string)
	deploymentGroup := d.Get("deployment_group_name").(string)

	input := codedeploy.CreateDeploymentGroupInput{
		ApplicationName:     aws.String(application),
		DeploymentGroupName: aws.String(deploymentGroup),
		ServiceRoleArn:      aws.String(d.Get("service_role_arn").(string)),
	}
	if attr, ok := d.GetOk("deployment_config_name"); ok {
		input.DeploymentConfigName = aws.String(attr.(string))
	}
	if attr, ok := d.GetOk("autoscaling_groups"); ok {
		input.AutoScalingGroups = expandStringList(attr.(*schema.Set).List())
	}
	if attr, ok := d.GetOk("on_premises_instance_tag_filters"); ok {
		onPremFilters := buildOnPremTagFilters(attr.(*schema.Set).List())
		input.OnPremisesInstanceTagFilters = onPremFilters
	}
	if attr, ok := d.GetOk("ec2_tag_filter"); ok {
		ec2TagFilters := buildEC2TagFilters(attr.(*schema.Set).List())
		input.Ec2TagFilters = ec2TagFilters
	}

	// Retry to handle IAM role eventual consistency.
	var resp *codedeploy.CreateDeploymentGroupOutput
	var err error
	err = resource.Retry(2*time.Minute, func() error {
		resp, err = conn.CreateDeploymentGroup(&input)
		if err != nil {
			codedeployErr, ok := err.(awserr.Error)
			if !ok {
				return &resource.RetryError{Err: err}
			}
			if codedeployErr.Code() == "InvalidRoleException" {
				log.Printf("[DEBUG] Trying to create deployment group again: %q",
					codedeployErr.Message())
				return err
			}

			return &resource.RetryError{Err: err}
		}
		return nil
	})
	if err != nil {
		return err
	}

	d.SetId(*resp.DeploymentGroupId)

	return resourceAwsCodeDeployDeploymentGroupRead(d, meta)
}
func resourceAwsVpcDhcpOptionsDelete(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).ec2conn

	return resource.Retry(3*time.Minute, func() error {
		log.Printf("[INFO] Deleting DHCP Options ID %s...", d.Id())
		_, err := conn.DeleteDhcpOptions(&ec2.DeleteDhcpOptionsInput{
			DhcpOptionsId: aws.String(d.Id()),
		})

		if err == nil {
			return nil
		}

		log.Printf("[WARN] %s", err)

		ec2err, ok := err.(awserr.Error)
		if !ok {
			return err
		}

		switch ec2err.Code() {
		case "InvalidDhcpOptionsID.NotFound":
			return nil
		case "DependencyViolation":
			// If it is a dependency violation, we want to disassociate
			// all VPCs using the given DHCP Options ID, and retry deleting.
			vpcs, err2 := findVPCsByDHCPOptionsID(conn, d.Id())
			if err2 != nil {
				log.Printf("[ERROR] %s", err2)
				return err2
			}

			for _, vpc := range vpcs {
				log.Printf("[INFO] Disassociating DHCP Options Set %s from VPC %s...", d.Id(), *vpc.VpcId)
				if _, err := conn.AssociateDhcpOptions(&ec2.AssociateDhcpOptionsInput{
					DhcpOptionsId: aws.String("default"),
					VpcId:         vpc.VpcId,
				}); err != nil {
					return err
				}
			}
			return err //retry
		default:
			// Any other error, we want to quit the retry loop immediately
			return resource.RetryError{Err: err}
		}
	})
}
// resourceAwsLambdaEventSourceMappingCreate maps to:
// CreateEventSourceMapping in the API / SDK
func resourceAwsLambdaEventSourceMappingCreate(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).lambdaconn

	functionName := d.Get("function_name").(string)
	eventSourceArn := d.Get("event_source_arn").(string)

	log.Printf("[DEBUG] Creating Lambda event source mapping: source %s to function %s", eventSourceArn, functionName)

	params := &lambda.CreateEventSourceMappingInput{
		EventSourceArn:   aws.String(eventSourceArn),
		FunctionName:     aws.String(functionName),
		StartingPosition: aws.String(d.Get("starting_position").(string)),
		BatchSize:        aws.Int64(int64(d.Get("batch_size").(int))),
		Enabled:          aws.Bool(d.Get("enabled").(bool)),
	}

	// IAM profiles and roles can take some time to propagate in AWS:
	//  http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console
	// Error creating Lambda function: InvalidParameterValueException: The
	// function defined for the task cannot be assumed by Lambda.
	//
	// The role may exist, but the permissions may not have propagated, so we
	// retry
	err := resource.Retry(1*time.Minute, func() error {
		eventSourceMappingConfiguration, err := conn.CreateEventSourceMapping(params)
		if err != nil {
			if awserr, ok := err.(awserr.Error); ok {
				if awserr.Code() == "InvalidParameterValueException" {
					// Retryable
					return awserr
				}
			}
			// Not retryable
			return resource.RetryError{Err: err}
		}
		// No error
		d.Set("uuid", eventSourceMappingConfiguration.UUID)
		d.SetId(*eventSourceMappingConfiguration.UUID)
		return nil
	})

	if err != nil {
		return fmt.Errorf("Error creating Lambda event source mapping: %s", err)
	}

	return resourceAwsLambdaEventSourceMappingRead(d, meta)
}
func resourceAwsDynamoDbTableDelete(d *schema.ResourceData, meta interface{}) error {
	dynamodbconn := meta.(*AWSClient).dynamodbconn

	waitForTableToBeActive(d.Id(), meta)

	log.Printf("[DEBUG] DynamoDB delete table: %s", d.Id())

	_, err := dynamodbconn.DeleteTable(&dynamodb.DeleteTableInput{
		TableName: aws.String(d.Id()),
	})
	if err != nil {
		return err
	}

	params := &dynamodb.DescribeTableInput{
		TableName: aws.String(d.Id()),
	}

	err = resource.Retry(10*time.Minute, func() error {
		t, err := dynamodbconn.DescribeTable(params)
		if err != nil {
			if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "ResourceNotFoundException" {
				return nil
			}
			// Didn't recognize the error, so shouldn't retry.
			return resource.RetryError{Err: err}
		}

		if t != nil {
			if t.Table.TableStatus != nil && strings.ToLower(*t.Table.TableStatus) == "deleting" {
				log.Printf("[DEBUG] AWS Dynamo DB table (%s) is still deleting", d.Id())
				return fmt.Errorf("still deleting")
			}
		}

		// we should be not found or deleting, so error here
		return resource.RetryError{Err: fmt.Errorf("[ERR] Error deleting Dynamo DB table, unexpected state: %s", t)}
	})

	// check error from retry
	if err != nil {
		return err
	}

	return nil
}
func testAccAzureDatabaseServerFirewallRuleExists(name string, servers []string) resource.TestCheckFunc {
	return func(s *terraform.State) error {
		res, ok := s.RootModule().Resources[name]
		if !ok {
			return fmt.Errorf("Azure Database Server Firewall Rule %q doesn't exist.", name)
		}

		if res.Primary.ID == "" {
			return fmt.Errorf("Azure Database Server Firewall Rule %q res ID not set.", name)
		}

		sqlClient := testAccProvider.Meta().(*Client).sqlClient

		for _, server := range servers {
			var rules sql.ListFirewallRulesResponse

			err := resource.Retry(15*time.Minute, func() error {
				var erri error
				rules, erri = sqlClient.ListFirewallRules(server)
				if erri != nil {
					return fmt.Errorf("Error listing Azure Database Server Firewall Rules for Server %q: %s", server, erri)
				}

				return nil
			})
			if err != nil {
				return err
			}

			var found bool
			for _, rule := range rules.FirewallRules {
				if rule.Name == res.Primary.ID {
					found = true
					break
				}
			}
			if !found {
				return fmt.Errorf("Azure Database Server Firewall Rule %q doesn't exists on server %q.", res.Primary.ID, server)
			}
		}

		return nil
	}
}
func resourceAwsS3BucketPolicyUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
	bucket := d.Get("bucket").(string)
	policy := d.Get("policy").(string)

	if policy != "" {
		log.Printf("[DEBUG] S3 bucket: %s, put policy: %s", bucket, policy)

		params := &s3.PutBucketPolicyInput{
			Bucket: aws.String(bucket),
			Policy: aws.String(policy),
		}

		err := resource.Retry(1*time.Minute, func() error {
			if _, err := s3conn.PutBucketPolicy(params); err != nil {
				if awserr, ok := err.(awserr.Error); ok {
					if awserr.Code() == "MalformedPolicy" {
						// Retryable
						return awserr
					}
				}
				// Not retryable
				return resource.RetryError{Err: err}
			}
			// No error
			return nil
		})

		if err != nil {
			return fmt.Errorf("Error putting S3 policy: %s", err)
		}
	} else {
		log.Printf("[DEBUG] S3 bucket: %s, delete policy: %s", bucket, policy)
		_, err := s3conn.DeleteBucketPolicy(&s3.DeleteBucketPolicyInput{
			Bucket: aws.String(bucket),
		})

		if err != nil {
			return fmt.Errorf("Error deleting S3 policy: %s", err)
		}
	}

	return nil
}
func resourceAwsElasticSearchDomainDelete(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).esconn

	log.Printf("[DEBUG] Deleting ElasticSearch domain: %q", d.Get("domain_name").(string))
	_, err := conn.DeleteElasticsearchDomain(&elasticsearch.DeleteElasticsearchDomainInput{
		DomainName: aws.String(d.Get("domain_name").(string)),
	})
	if err != nil {
		return err
	}

	log.Printf("[DEBUG] Waiting for ElasticSearch domain %q to be deleted", d.Get("domain_name").(string))
	err = resource.Retry(15*time.Minute, func() error {
		out, err := conn.DescribeElasticsearchDomain(&elasticsearch.DescribeElasticsearchDomainInput{
			DomainName: aws.String(d.Get("domain_name").(string)),
		})

		if err != nil {
			awsErr, ok := err.(awserr.Error)
			if !ok {
				return resource.RetryError{Err: err}
			}

			if awsErr.Code() == "ResourceNotFoundException" {
				return nil
			}

			return resource.RetryError{Err: awsErr}
		}

		if !*out.DomainStatus.Processing {
			return nil
		}

		return fmt.Errorf("%q: Timeout while waiting for the domain to be deleted", d.Id())
	})

	d.SetId("")

	return err
}
func resourceMailgunDomainDelete(d *schema.ResourceData, meta interface{}) error {
	client := meta.(*mailgun.Client)

	log.Printf("[INFO] Deleting Domain: %s", d.Id())

	// Destroy the domain
	err := client.DestroyDomain(d.Id())
	if err != nil {
		return fmt.Errorf("Error deleting domain: %s", err)
	}

	// Give the destroy a chance to take effect
	return resource.Retry(1*time.Minute, func() error {
		_, err = client.RetrieveDomain(d.Id())
		if err == nil {
			log.Printf("[INFO] Retrying until domain disappears...")
			return fmt.Errorf("Domain seems to still exist; will check again.")
		}
		log.Printf("[INFO] Got error looking for domain, seems gone: %s", err)
		return nil
	})
}
func resourceAwsAutoscalingGroupDrain(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).autoscalingconn

	if d.Get("force_delete").(bool) {
		log.Printf("[DEBUG] Skipping ASG drain, force_delete was set.")
		return nil
	}

	// First, set the capacity to zero so the group will drain
	log.Printf("[DEBUG] Reducing autoscaling group capacity to zero")
	opts := autoscaling.UpdateAutoScalingGroupInput{
		AutoScalingGroupName: aws.String(d.Id()),
		DesiredCapacity:      aws.Int64(0),
		MinSize:              aws.Int64(0),
		MaxSize:              aws.Int64(0),
	}
	if _, err := conn.UpdateAutoScalingGroup(&opts); err != nil {
		return fmt.Errorf("Error setting capacity to zero to drain: %s", err)
	}

	// Next, wait for the autoscale group to drain
	log.Printf("[DEBUG] Waiting for group to have zero instances")
	return resource.Retry(10*time.Minute, func() error {
		g, err := getAwsAutoscalingGroup(d, meta)
		if err != nil {
			return resource.RetryError{Err: err}
		}
		if g == nil {
			return nil
		}

		if len(g.Instances) == 0 {
			return nil
		}

		return fmt.Errorf("group still has %d instances", len(g.Instances))
	})
}
func resourceAwsElasticSearchDomainCreate(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).esconn

	input := elasticsearch.CreateElasticsearchDomainInput{
		DomainName: aws.String(d.Get("domain_name").(string)),
	}

	if v, ok := d.GetOk("access_policies"); ok {
		input.AccessPolicies = aws.String(v.(string))
	}

	if v, ok := d.GetOk("advanced_options"); ok {
		input.AdvancedOptions = stringMapToPointers(v.(map[string]interface{}))
	}

	if v, ok := d.GetOk("ebs_options"); ok {
		options := v.([]interface{})

		if len(options) > 1 {
			return fmt.Errorf("Only a single ebs_options block is expected")
		} else if len(options) == 1 {
			if options[0] == nil {
				return fmt.Errorf("At least one field is expected inside ebs_options")
			}

			s := options[0].(map[string]interface{})
			input.EBSOptions = expandESEBSOptions(s)
		}
	}

	if v, ok := d.GetOk("cluster_config"); ok {
		config := v.([]interface{})

		if len(config) > 1 {
			return fmt.Errorf("Only a single cluster_config block is expected")
		} else if len(config) == 1 {
			if config[0] == nil {
				return fmt.Errorf("At least one field is expected inside cluster_config")
			}
			m := config[0].(map[string]interface{})
			input.ElasticsearchClusterConfig = expandESClusterConfig(m)
		}
	}

	if v, ok := d.GetOk("snapshot_options"); ok {
		options := v.([]interface{})

		if len(options) > 1 {
			return fmt.Errorf("Only a single snapshot_options block is expected")
		} else if len(options) == 1 {
			if options[0] == nil {
				return fmt.Errorf("At least one field is expected inside snapshot_options")
			}

			o := options[0].(map[string]interface{})

			snapshotOptions := elasticsearch.SnapshotOptions{
				AutomatedSnapshotStartHour: aws.Int64(int64(o["automated_snapshot_start_hour"].(int))),
			}

			input.SnapshotOptions = &snapshotOptions
		}
	}

	log.Printf("[DEBUG] Creating ElasticSearch domain: %s", input)
	out, err := conn.CreateElasticsearchDomain(&input)
	if err != nil {
		return err
	}

	d.SetId(*out.DomainStatus.ARN)

	log.Printf("[DEBUG] Waiting for ElasticSearch domain %q to be created", d.Id())
	err = resource.Retry(15*time.Minute, func() error {
		out, err := conn.DescribeElasticsearchDomain(&elasticsearch.DescribeElasticsearchDomainInput{
			DomainName: aws.String(d.Get("domain_name").(string)),
		})
		if err != nil {
			return resource.RetryError{Err: err}
		}

		if !*out.DomainStatus.Processing && out.DomainStatus.Endpoint != nil {
			return nil
		}

		return fmt.Errorf("%q: Timeout while waiting for the domain to be created", d.Id())
	})
	if err != nil {
		return err
	}

	log.Printf("[DEBUG] ElasticSearch domain %q created", d.Id())

	return resourceAwsElasticSearchDomainRead(d, meta)
}
func resourceAwsElasticSearchDomainUpdate(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).esconn

	input := elasticsearch.UpdateElasticsearchDomainConfigInput{
		DomainName: aws.String(d.Get("domain_name").(string)),
	}

	if d.HasChange("access_policies") {
		input.AccessPolicies = aws.String(d.Get("access_policies").(string))
	}

	if d.HasChange("advanced_options") {
		input.AdvancedOptions = stringMapToPointers(d.Get("advanced_options").(map[string]interface{}))
	}

	if d.HasChange("ebs_options") {
		options := d.Get("ebs_options").([]interface{})

		if len(options) > 1 {
			return fmt.Errorf("Only a single ebs_options block is expected")
		} else if len(options) == 1 {
			s := options[0].(map[string]interface{})
			input.EBSOptions = expandESEBSOptions(s)
		}
	}

	if d.HasChange("cluster_config") {
		config := d.Get("cluster_config").([]interface{})

		if len(config) > 1 {
			return fmt.Errorf("Only a single cluster_config block is expected")
		} else if len(config) == 1 {
			m := config[0].(map[string]interface{})
			input.ElasticsearchClusterConfig = expandESClusterConfig(m)
		}
	}

	if d.HasChange("snapshot_options") {
		options := d.Get("snapshot_options").([]interface{})

		if len(options) > 1 {
			return fmt.Errorf("Only a single snapshot_options block is expected")
		} else if len(options) == 1 {
			o := options[0].(map[string]interface{})

			snapshotOptions := elasticsearch.SnapshotOptions{
				AutomatedSnapshotStartHour: aws.Int64(int64(o["automated_snapshot_start_hour"].(int))),
			}

			input.SnapshotOptions = &snapshotOptions
		}
	}

	_, err := conn.UpdateElasticsearchDomainConfig(&input)
	if err != nil {
		return err
	}

	err = resource.Retry(25*time.Minute, func() error {
		out, err := conn.DescribeElasticsearchDomain(&elasticsearch.DescribeElasticsearchDomainInput{
			DomainName: aws.String(d.Get("domain_name").(string)),
		})
		if err != nil {
			return resource.RetryError{Err: err}
		}

		if *out.DomainStatus.Processing == false {
			return nil
		}

		return fmt.Errorf("%q: Timeout while waiting for changes to be processed", d.Id())
	})
	if err != nil {
		return err
	}

	return resourceAwsElasticSearchDomainRead(d, meta)
}
func resourceAzureInstanceDelete(d *schema.ResourceData, meta interface{}) error {
	azureClient := meta.(*Client)
	mc := azureClient.mgmtClient
	vmClient := azureClient.vmClient

	name := d.Get("name").(string)
	hostedServiceName := d.Get("hosted_service_name").(string)

	log.Printf("[DEBUG] Deleting instance: %s", name)

	// check if the instance had a hosted service created especially for it:
	if d.Get("has_dedicated_service").(bool) {
		// if so; we must delete the associated hosted service as well:
		hostedServiceClient := azureClient.hostedServiceClient
		req, err := hostedServiceClient.DeleteHostedService(name, true)
		if err != nil {
			return fmt.Errorf("Error deleting instance and hosted service %s: %s", name, err)
		}

		// Wait until the hosted service and the instance it contains is deleted:
		if err := mc.WaitForOperation(req, nil); err != nil {
			return fmt.Errorf(
				"Error waiting for instance %s to be deleted: %s", name, err)
		}
	} else {
		// else; just delete the instance:
		reqID, err := vmClient.DeleteDeployment(hostedServiceName, name)
		if err != nil {
			return fmt.Errorf("Error deleting instance %s off hosted service %s: %s", name, hostedServiceName, err)
		}

		// and wait for the deletion:
		if err := mc.WaitForOperation(reqID, nil); err != nil {
			return fmt.Errorf("Error waiting for intance %s to be deleted off the hosted service %s: %s",
				name, hostedServiceName, err)
		}
	}

	log.Printf("[INFO] Waiting for the deletion of instance '%s''s disk blob.", name)

	// in order to avoid `terraform taint`-like scenarios in which the instance
	// is deleted and re-created so fast the previous storage blob which held
	// the image doesn't manage to get deleted (despite it being in a
	// 'deleting' state) and a lease conflict occurs over it, we must ensure
	// the blob got completely deleted as well:
	storName := d.Get("storage_service_name").(string)
	blobClient, err := azureClient.getStorageServiceBlobClient(storName)
	if err != nil {
		return err
	}

	err = resource.Retry(15*time.Minute, func() error {
		exists, err := blobClient.BlobExists(
			storageContainterName, fmt.Sprintf(osDiskBlobNameFormat, name),
		)
		if err != nil {
			return resource.RetryError{Err: err}
		}

		if exists {
			return fmt.Errorf("Instance '%s''s disk storage blob still exists.", name)
		}

		return nil
	})

	return err
}
func resourceAwsEcsServiceDelete(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).ecsconn

	// Check if it's not already gone
	resp, err := conn.DescribeServices(&ecs.DescribeServicesInput{
		Services: []*string{aws.String(d.Id())},
		Cluster:  aws.String(d.Get("cluster").(string)),
	})
	if err != nil {
		return err
	}

	if len(resp.Services) == 0 {
		log.Printf("[DEBUG] ECS Service %q is already gone", d.Id())
		return nil
	}

	log.Printf("[DEBUG] ECS service %s is currently %s", d.Id(), *resp.Services[0].Status)

	if *resp.Services[0].Status == "INACTIVE" {
		return nil
	}

	// Drain the ECS service
	if *resp.Services[0].Status != "DRAINING" {
		log.Printf("[DEBUG] Draining ECS service %s", d.Id())
		_, err = conn.UpdateService(&ecs.UpdateServiceInput{
			Service:      aws.String(d.Id()),
			Cluster:      aws.String(d.Get("cluster").(string)),
			DesiredCount: aws.Int64(int64(0)),
		})
		if err != nil {
			return err
		}
	}

	// Wait until the ECS service is drained
	err = resource.Retry(5*time.Minute, func() error {
		input := ecs.DeleteServiceInput{
			Service: aws.String(d.Id()),
			Cluster: aws.String(d.Get("cluster").(string)),
		}

		log.Printf("[DEBUG] Trying to delete ECS service %s", input)
		_, err := conn.DeleteService(&input)
		if err == nil {
			return nil
		}

		ec2err, ok := err.(awserr.Error)
		if !ok {
			return &resource.RetryError{Err: err}
		}
		if ec2err.Code() == "InvalidParameterException" {
			// Prevent "The service cannot be stopped while deployments are active."
			log.Printf("[DEBUG] Trying to delete ECS service again: %q",
				ec2err.Message())
			return err
		}

		return &resource.RetryError{Err: err}

	})
	if err != nil {
		return err
	}

	// Wait until it's deleted
	wait := resource.StateChangeConf{
		Pending:    []string{"DRAINING"},
		Target:     "INACTIVE",
		Timeout:    5 * time.Minute,
		MinTimeout: 1 * time.Second,
		Refresh: func() (interface{}, string, error) {
			log.Printf("[DEBUG] Checking if ECS service %s is INACTIVE", d.Id())
			resp, err := conn.DescribeServices(&ecs.DescribeServicesInput{
				Services: []*string{aws.String(d.Id())},
				Cluster:  aws.String(d.Get("cluster").(string)),
			})
			if err != nil {
				return resp, "FAILED", err
			}

			log.Printf("[DEBUG] ECS service (%s) is currently %q", d.Id(), *resp.Services[0].Status)
			return resp, *resp.Services[0].Status, nil
		},
	}

	_, err = wait.WaitForState()
	if err != nil {
		return err
	}

	log.Printf("[DEBUG] ECS service %s deleted.", d.Id())
	return nil
}
示例#25
0
func retryCall(seconds int, f resource.RetryFunc) error {
	return resource.Retry(time.Duration(seconds)*time.Second, f)
}
func resourceAwsOpsworksStackCreate(d *schema.ResourceData, meta interface{}) error {
	client := meta.(*AWSClient).opsworksconn

	err := resourceAwsOpsworksStackValidate(d)
	if err != nil {
		return err
	}

	req := &opsworks.CreateStackInput{
		DefaultInstanceProfileArn: aws.String(d.Get("default_instance_profile_arn").(string)),
		Name:                      aws.String(d.Get("name").(string)),
		Region:                    aws.String(d.Get("region").(string)),
		ServiceRoleArn:            aws.String(d.Get("service_role_arn").(string)),
		UseOpsworksSecurityGroups: aws.Bool(d.Get("use_opsworks_security_groups").(bool)),
	}
	inVpc := false
	if vpcId, ok := d.GetOk("vpc_id"); ok {
		req.VpcId = aws.String(vpcId.(string))
		inVpc = true
	}
	if defaultSubnetId, ok := d.GetOk("default_subnet_id"); ok {
		req.DefaultSubnetId = aws.String(defaultSubnetId.(string))
	}
	if defaultAvailabilityZone, ok := d.GetOk("default_availability_zone"); ok {
		req.DefaultAvailabilityZone = aws.String(defaultAvailabilityZone.(string))
	}

	log.Printf("[DEBUG] Creating OpsWorks stack: %s", req)

	var resp *opsworks.CreateStackOutput
	err = resource.Retry(20*time.Minute, func() error {
		var cerr error
		resp, cerr = client.CreateStack(req)
		if cerr != nil {
			if opserr, ok := cerr.(awserr.Error); ok {
				// If Terraform is also managing the service IAM role,
				// it may have just been created and not yet be
				// propagated.
				// AWS doesn't provide a machine-readable code for this
				// specific error, so we're forced to do fragile message
				// matching.
				// The full error we're looking for looks something like
				// the following:
				// Service Role Arn: [...] is not yet propagated, please try again in a couple of minutes
				propErr := "not yet propagated"
				trustErr := "not the necessary trust relationship"
				if opserr.Code() == "ValidationException" && (strings.Contains(opserr.Message(), trustErr) || strings.Contains(opserr.Message(), propErr)) {
					log.Printf("[INFO] Waiting for service IAM role to propagate")
					return cerr
				}
			}
			return resource.RetryError{Err: cerr}
		}
		return nil
	})
	if err != nil {
		return err
	}

	stackId := *resp.StackId
	d.SetId(stackId)
	d.Set("id", stackId)

	if inVpc && *req.UseOpsworksSecurityGroups {
		// For VPC-based stacks, OpsWorks asynchronously creates some default
		// security groups which must exist before layers can be created.
		// Unfortunately it doesn't tell us what the ids of these are, so
		// we can't actually check for them. Instead, we just wait a nominal
		// amount of time for their creation to complete.
		log.Print("[INFO] Waiting for OpsWorks built-in security groups to be created")
		time.Sleep(30 * time.Second)
	}

	return resourceAwsOpsworksStackUpdate(d, meta)
}
// Waits for a minimum number of healthy instances to show up as healthy in the
// ASG before continuing. Waits up to `waitForASGCapacityTimeout` for
// "desired_capacity", or "min_size" if desired capacity is not specified.
//
// If "wait_for_elb_capacity" is specified, will also wait for that number of
// instances to show up InService in all attached ELBs. See "Waiting for
// Capacity" in docs for more discussion of the feature.
func waitForASGCapacity(d *schema.ResourceData, meta interface{}) error {
	wantASG := d.Get("min_size").(int)
	if v := d.Get("desired_capacity").(int); v > 0 {
		wantASG = v
	}
	wantELB := d.Get("wait_for_elb_capacity").(int)

	// Covers deprecated field support
	wantELB += d.Get("min_elb_capacity").(int)

	wait, err := time.ParseDuration(d.Get("wait_for_capacity_timeout").(string))
	if err != nil {
		return err
	}

	if wait == 0 {
		log.Printf("[DEBUG] Capacity timeout set to 0, skipping capacity waiting.")
		return nil
	}

	log.Printf("[DEBUG] Waiting %s for capacity: %d ASG, %d ELB",
		wait, wantASG, wantELB)

	return resource.Retry(wait, func() error {
		g, err := getAwsAutoscalingGroup(d, meta)
		if err != nil {
			return resource.RetryError{Err: err}
		}
		if g == nil {
			return nil
		}
		lbis, err := getLBInstanceStates(g, meta)
		if err != nil {
			return resource.RetryError{Err: err}
		}

		haveASG := 0
		haveELB := 0

		for _, i := range g.Instances {
			if i.HealthStatus == nil || i.InstanceId == nil || i.LifecycleState == nil {
				continue
			}

			if !strings.EqualFold(*i.HealthStatus, "Healthy") {
				continue
			}

			if !strings.EqualFold(*i.LifecycleState, "InService") {
				continue
			}

			haveASG++

			if wantELB > 0 {
				inAllLbs := true
				for _, states := range lbis {
					state, ok := states[*i.InstanceId]
					if !ok || !strings.EqualFold(state, "InService") {
						inAllLbs = false
					}
				}
				if inAllLbs {
					haveELB++
				}
			}
		}

		log.Printf("[DEBUG] %q Capacity: %d/%d ASG, %d/%d ELB",
			d.Id(), haveASG, wantASG, haveELB, wantELB)

		if haveASG == wantASG && haveELB == wantELB {
			return nil
		}

		return fmt.Errorf(
			"Still waiting for %q instances. Current/Desired: %d/%d ASG, %d/%d ELB",
			d.Id(), haveASG, wantASG, haveELB, wantELB)
	})
}
func resourceAwsEcsClusterDelete(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).ecsconn

	log.Printf("[DEBUG] Deleting ECS cluster %s", d.Id())

	err := resource.Retry(10*time.Minute, func() error {
		out, err := conn.DeleteCluster(&ecs.DeleteClusterInput{
			Cluster: aws.String(d.Id()),
		})

		if err == nil {
			log.Printf("[DEBUG] ECS cluster %s deleted: %s", d.Id(), out)
			return nil
		}

		awsErr, ok := err.(awserr.Error)
		if !ok {
			return resource.RetryError{Err: err}
		}

		if awsErr.Code() == "ClusterContainsContainerInstancesException" {
			log.Printf("[TRACE] Retrying ECS cluster %q deletion after %q", d.Id(), awsErr.Code())
			return err
		}

		if awsErr.Code() == "ClusterContainsServicesException" {
			log.Printf("[TRACE] Retrying ECS cluster %q deletion after %q", d.Id(), awsErr.Code())
			return err
		}

		return resource.RetryError{Err: err}
	})
	if err != nil {
		return err
	}

	clusterName := d.Get("name").(string)
	err = resource.Retry(5*time.Minute, func() error {
		log.Printf("[DEBUG] Checking if ECS Cluster %q is INACTIVE", d.Id())
		out, err := conn.DescribeClusters(&ecs.DescribeClustersInput{
			Clusters: []*string{aws.String(clusterName)},
		})

		for _, c := range out.Clusters {
			if *c.ClusterName == clusterName {
				if *c.Status == "INACTIVE" {
					return nil
				}

				return fmt.Errorf("ECS Cluster %q is still %q", clusterName, *c.Status)
			}
		}

		if err != nil {
			return resource.RetryError{Err: err}
		}

		return nil
	})
	if err != nil {
		return err
	}

	log.Printf("[DEBUG] ECS cluster %q deleted", d.Id())
	return nil
}
func resourceAwsEipDelete(d *schema.ResourceData, meta interface{}) error {
	ec2conn := meta.(*AWSClient).ec2conn

	if err := resourceAwsEipRead(d, meta); err != nil {
		return err
	}
	if d.Id() == "" {
		// This might happen from the read
		return nil
	}

	// If we are attached to an instance or interface, detach first.
	if d.Get("instance").(string) != "" || d.Get("association_id").(string) != "" {
		log.Printf("[DEBUG] Disassociating EIP: %s", d.Id())
		var err error
		switch resourceAwsEipDomain(d) {
		case "vpc":
			_, err = ec2conn.DisassociateAddress(&ec2.DisassociateAddressInput{
				AssociationId: aws.String(d.Get("association_id").(string)),
			})
		case "standard":
			_, err = ec2conn.DisassociateAddress(&ec2.DisassociateAddressInput{
				PublicIp: aws.String(d.Get("public_ip").(string)),
			})
		}

		if err != nil {
			// First check if the association ID is not found. If this
			// is the case, then it was already disassociated somehow,
			// and that is okay. The most commmon reason for this is that
			// the instance or ENI it was attached it was destroyed.
			if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidAssociationID.NotFound" {
				err = nil
			}
		}

		if err != nil {
			return err
		}
	}

	domain := resourceAwsEipDomain(d)
	return resource.Retry(3*time.Minute, func() error {
		var err error
		switch domain {
		case "vpc":
			log.Printf(
				"[DEBUG] EIP release (destroy) address allocation: %v",
				d.Id())
			_, err = ec2conn.ReleaseAddress(&ec2.ReleaseAddressInput{
				AllocationId: aws.String(d.Id()),
			})
		case "standard":
			log.Printf("[DEBUG] EIP release (destroy) address: %v", d.Id())
			_, err = ec2conn.ReleaseAddress(&ec2.ReleaseAddressInput{
				PublicIp: aws.String(d.Id()),
			})
		}

		if err == nil {
			return nil
		}
		if _, ok := err.(awserr.Error); !ok {
			return resource.RetryError{Err: err}
		}

		return err
	})
}
// resourceAwsLambdaFunction maps to:
// CreateFunction in the API / SDK
func resourceAwsLambdaFunctionCreate(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).lambdaconn

	functionName := d.Get("function_name").(string)
	iamRole := d.Get("role").(string)

	log.Printf("[DEBUG] Creating Lambda Function %s with role %s", functionName, iamRole)

	var functionCode *lambda.FunctionCode
	if v, ok := d.GetOk("filename"); ok {
		filename, err := homedir.Expand(v.(string))
		if err != nil {
			return err
		}
		zipfile, err := ioutil.ReadFile(filename)
		if err != nil {
			return err
		}
		d.Set("source_code_hash", sha256.Sum256(zipfile))
		functionCode = &lambda.FunctionCode{
			ZipFile: zipfile,
		}
	} else {
		s3Bucket, bucketOk := d.GetOk("s3_bucket")
		s3Key, keyOk := d.GetOk("s3_key")
		s3ObjectVersion, versionOk := d.GetOk("s3_object_version")
		if !bucketOk || !keyOk || !versionOk {
			return errors.New("s3_bucket, s3_key and s3_object_version must all be set while using S3 code source")
		}
		functionCode = &lambda.FunctionCode{
			S3Bucket:        aws.String(s3Bucket.(string)),
			S3Key:           aws.String(s3Key.(string)),
			S3ObjectVersion: aws.String(s3ObjectVersion.(string)),
		}
	}

	params := &lambda.CreateFunctionInput{
		Code:         functionCode,
		Description:  aws.String(d.Get("description").(string)),
		FunctionName: aws.String(functionName),
		Handler:      aws.String(d.Get("handler").(string)),
		MemorySize:   aws.Int64(int64(d.Get("memory_size").(int))),
		Role:         aws.String(iamRole),
		Runtime:      aws.String(d.Get("runtime").(string)),
		Timeout:      aws.Int64(int64(d.Get("timeout").(int))),
	}

	// IAM profiles can take ~10 seconds to propagate in AWS:
	//  http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console
	// Error creating Lambda function: InvalidParameterValueException: The role defined for the task cannot be assumed by Lambda.
	err := resource.Retry(1*time.Minute, func() error {
		_, err := conn.CreateFunction(params)
		if err != nil {
			if awserr, ok := err.(awserr.Error); ok {
				if awserr.Code() == "InvalidParameterValueException" {
					// Retryable
					return awserr
				}
			}
			// Not retryable
			return resource.RetryError{Err: err}
		}
		// No error
		return nil
	})
	if err != nil {
		return fmt.Errorf("Error creating Lambda function: %s", err)
	}

	d.SetId(d.Get("function_name").(string))

	return resourceAwsLambdaFunctionRead(d, meta)
}