func resourceAwsRoute53ZoneCreate(d *schema.ResourceData, meta interface{}) error {
	r53 := meta.(*AWSClient).r53conn

	req := &route53.CreateHostedZoneInput{
		Name:             aws.String(d.Get("name").(string)),
		HostedZoneConfig: &route53.HostedZoneConfig{Comment: aws.String(d.Get("comment").(string))},
		CallerReference:  aws.String(time.Now().Format(time.RFC3339Nano)),
	}
	if v := d.Get("vpc_id"); v != "" {
		req.VPC = &route53.VPC{
			VPCId:     aws.String(v.(string)),
			VPCRegion: aws.String(meta.(*AWSClient).region),
		}
		if w := d.Get("vpc_region"); w != "" {
			req.VPC.VPCRegion = aws.String(w.(string))
		}
		d.Set("vpc_region", req.VPC.VPCRegion)
	}

	if v, ok := d.GetOk("delegation_set_id"); ok {
		req.DelegationSetId = aws.String(v.(string))
	}

	log.Printf("[DEBUG] Creating Route53 hosted zone: %s", *req.Name)
	var err error
	resp, err := r53.CreateHostedZone(req)
	if err != nil {
		return err
	}

	// Store the zone_id
	zone := cleanZoneID(*resp.HostedZone.Id)
	d.Set("zone_id", zone)
	d.SetId(zone)

	// Wait until we are done initializing
	wait := resource.StateChangeConf{
		Delay:      30 * time.Second,
		Pending:    []string{"PENDING"},
		Target:     "INSYNC",
		Timeout:    10 * time.Minute,
		MinTimeout: 2 * time.Second,
		Refresh: func() (result interface{}, state string, err error) {
			changeRequest := &route53.GetChangeInput{
				Id: aws.String(cleanChangeID(*resp.ChangeInfo.Id)),
			}
			return resourceAwsGoRoute53Wait(r53, changeRequest)
		},
	}
	_, err = wait.WaitForState()
	if err != nil {
		return err
	}
	return resourceAwsRoute53ZoneUpdate(d, meta)
}
func resourceAwsRoute53ZoneAssociationCreate(d *schema.ResourceData, meta interface{}) error {
	r53 := meta.(*AWSClient).r53conn

	req := &route53.AssociateVPCWithHostedZoneInput{
		HostedZoneId: aws.String(d.Get("zone_id").(string)),
		VPC: &route53.VPC{
			VPCId:     aws.String(d.Get("vpc_id").(string)),
			VPCRegion: aws.String(meta.(*AWSClient).region),
		},
		Comment: aws.String("Managed by Terraform"),
	}
	if w := d.Get("vpc_region"); w != "" {
		req.VPC.VPCRegion = aws.String(w.(string))
	}

	log.Printf("[DEBUG] Associating Route53 Private Zone %s with VPC %s with region %s", *req.HostedZoneId, *req.VPC.VPCId, *req.VPC.VPCRegion)
	var err error
	resp, err := r53.AssociateVPCWithHostedZone(req)
	if err != nil {
		return err
	}

	// Store association id
	d.SetId(fmt.Sprintf("%s:%s", *req.HostedZoneId, *req.VPC.VPCId))
	d.Set("vpc_region", req.VPC.VPCRegion)

	// Wait until we are done initializing
	wait := resource.StateChangeConf{
		Delay:      30 * time.Second,
		Pending:    []string{"PENDING"},
		Target:     "INSYNC",
		Timeout:    10 * time.Minute,
		MinTimeout: 2 * time.Second,
		Refresh: func() (result interface{}, state string, err error) {
			changeRequest := &route53.GetChangeInput{
				Id: aws.String(cleanChangeID(*resp.ChangeInfo.Id)),
			}
			return resourceAwsGoRoute53Wait(r53, changeRequest)
		},
	}
	_, err = wait.WaitForState()
	if err != nil {
		return err
	}

	return resourceAwsRoute53ZoneAssociationUpdate(d, meta)
}
func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) error {
	config := meta.(*Config)

	zoneName := d.Get("zone").(string)
	clusterName := d.Get("name").(string)
	desiredNodeVersion := d.Get("node_version").(string)

	req := &container.UpdateClusterRequest{
		Update: &container.ClusterUpdate{
			DesiredNodeVersion: desiredNodeVersion,
		},
	}
	op, err := config.clientContainer.Projects.Zones.Clusters.Update(
		config.Project, zoneName, clusterName, req).Do()
	if err != nil {
		return err
	}

	// Wait until it's updated
	wait := resource.StateChangeConf{
		Pending:    []string{"PENDING", "RUNNING"},
		Target:     "DONE",
		Timeout:    10 * time.Minute,
		MinTimeout: 2 * time.Second,
		Refresh: func() (interface{}, string, error) {
			log.Printf("[DEBUG] Checking if GKE cluster %s is updated", clusterName)
			resp, err := config.clientContainer.Projects.Zones.Operations.Get(
				config.Project, zoneName, op.Name).Do()
			log.Printf("[DEBUG] Progress of updating GKE cluster %s: %s",
				clusterName, resp.Status)
			return resp, resp.Status, err
		},
	}

	_, err = wait.WaitForState()
	if err != nil {
		return err
	}

	log.Printf("[INFO] GKE cluster %s has been updated to %s", d.Id(),
		desiredNodeVersion)

	return resourceContainerClusterRead(d, meta)
}
func resourceAwsSubnetDelete(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).ec2conn

	log.Printf("[INFO] Deleting subnet: %s", d.Id())
	req := &ec2.DeleteSubnetInput{
		SubnetId: aws.String(d.Id()),
	}

	wait := resource.StateChangeConf{
		Pending:    []string{"pending"},
		Target:     "destroyed",
		Timeout:    5 * time.Minute,
		MinTimeout: 1 * time.Second,
		Refresh: func() (interface{}, string, error) {
			_, err := conn.DeleteSubnet(req)
			if err != nil {
				if apiErr, ok := err.(awserr.Error); ok {
					if apiErr.Code() == "DependencyViolation" {
						// There is some pending operation, so just retry
						// in a bit.
						return 42, "pending", nil
					}

					if apiErr.Code() == "InvalidSubnetID.NotFound" {
						return 42, "destroyed", nil
					}
				}

				return 42, "failure", err
			}

			return 42, "destroyed", nil
		},
	}

	if _, err := wait.WaitForState(); err != nil {
		return fmt.Errorf("Error deleting subnet: %s", err)
	}

	return nil
}
func resourceContainerClusterDelete(d *schema.ResourceData, meta interface{}) error {
	config := meta.(*Config)

	zoneName := d.Get("zone").(string)
	clusterName := d.Get("name").(string)

	log.Printf("[DEBUG] Deleting GKE cluster %s", d.Get("name").(string))
	op, err := config.clientContainer.Projects.Zones.Clusters.Delete(
		config.Project, zoneName, clusterName).Do()
	if err != nil {
		return err
	}

	// Wait until it's deleted
	wait := resource.StateChangeConf{
		Pending:    []string{"PENDING", "RUNNING"},
		Target:     "DONE",
		Timeout:    10 * time.Minute,
		MinTimeout: 3 * time.Second,
		Refresh: func() (interface{}, string, error) {
			log.Printf("[DEBUG] Checking if GKE cluster %s is deleted", clusterName)
			resp, err := config.clientContainer.Projects.Zones.Operations.Get(
				config.Project, zoneName, op.Name).Do()
			log.Printf("[DEBUG] Progress of deleting GKE cluster %s: %s",
				clusterName, resp.Status)
			return resp, resp.Status, err
		},
	}

	_, err = wait.WaitForState()
	if err != nil {
		return err
	}

	log.Printf("[INFO] GKE cluster %s has been deleted", d.Id())

	d.SetId("")

	return nil
}
func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) error {
	config := meta.(*Config)

	zoneName := d.Get("zone").(string)
	clusterName := d.Get("name").(string)

	masterAuths := d.Get("master_auth").([]interface{})
	if len(masterAuths) > 1 {
		return fmt.Errorf("Cannot specify more than one master_auth.")
	}
	masterAuth := masterAuths[0].(map[string]interface{})

	cluster := &container.Cluster{
		MasterAuth: &container.MasterAuth{
			Password: masterAuth["password"].(string),
			Username: masterAuth["username"].(string),
		},
		Name:             clusterName,
		InitialNodeCount: int64(d.Get("initial_node_count").(int)),
	}

	if v, ok := d.GetOk("cluster_ipv4_cidr"); ok {
		cluster.ClusterIpv4Cidr = v.(string)
	}

	if v, ok := d.GetOk("description"); ok {
		cluster.Description = v.(string)
	}

	if v, ok := d.GetOk("logging_service"); ok {
		cluster.LoggingService = v.(string)
	}

	if v, ok := d.GetOk("monitoring_service"); ok {
		cluster.MonitoringService = v.(string)
	}

	if v, ok := d.GetOk("network"); ok {
		cluster.Network = v.(string)
	}

	if v, ok := d.GetOk("node_config"); ok {
		nodeConfigs := v.([]interface{})
		if len(nodeConfigs) > 1 {
			return fmt.Errorf("Cannot specify more than one node_config.")
		}
		nodeConfig := nodeConfigs[0].(map[string]interface{})

		cluster.NodeConfig = &container.NodeConfig{}

		if v, ok = nodeConfig["machine_type"]; ok {
			cluster.NodeConfig.MachineType = v.(string)
		}

		if v, ok = nodeConfig["disk_size_gb"]; ok {
			cluster.NodeConfig.DiskSizeGb = int64(v.(int))
		}

		if v, ok := nodeConfig["oauth_scopes"]; ok {
			scopesList := v.([]interface{})
			scopes := []string{}
			for _, v := range scopesList {
				scopes = append(scopes, v.(string))
			}

			cluster.NodeConfig.OauthScopes = scopes
		}
	}

	req := &container.CreateClusterRequest{
		Cluster: cluster,
	}

	op, err := config.clientContainer.Projects.Zones.Clusters.Create(
		config.Project, zoneName, req).Do()
	if err != nil {
		return err
	}

	// Wait until it's created
	wait := resource.StateChangeConf{
		Pending:    []string{"PENDING", "RUNNING"},
		Target:     "DONE",
		Timeout:    30 * time.Minute,
		MinTimeout: 3 * time.Second,
		Refresh: func() (interface{}, string, error) {
			resp, err := config.clientContainer.Projects.Zones.Operations.Get(
				config.Project, zoneName, op.Name).Do()
			log.Printf("[DEBUG] Progress of creating GKE cluster %s: %s",
				clusterName, resp.Status)
			return resp, resp.Status, err
		},
	}

	_, err = wait.WaitForState()
	if err != nil {
		return err
	}

	log.Printf("[INFO] GKE cluster %s has been created", clusterName)

	d.SetId(clusterName)

	return resourceContainerClusterRead(d, meta)
}
func resourceAwsRoute53RecordDelete(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).r53conn

	zone := cleanZoneID(d.Get("zone_id").(string))
	log.Printf("[DEBUG] Deleting resource records for zone: %s, name: %s",
		zone, d.Get("name").(string))
	var err error
	zoneRecord, err := conn.GetHostedZone(&route53.GetHostedZoneInput{Id: aws.String(zone)})
	if err != nil {
		if r53err, ok := err.(awserr.Error); ok && r53err.Code() == "NoSuchHostedZone" {
			log.Printf("[DEBUG] No matching Route 53 Record found for: %s, removing from state file", d.Id())
			d.SetId("")
			return nil
		}
		return err
	}
	// Get the records
	rec, err := resourceAwsRoute53RecordBuildSet(d, *zoneRecord.HostedZone.Name)
	if err != nil {
		return err
	}

	// Create the new records
	changeBatch := &route53.ChangeBatch{
		Comment: aws.String("Deleted by Terraform"),
		Changes: []*route53.Change{
			&route53.Change{
				Action:            aws.String("DELETE"),
				ResourceRecordSet: rec,
			},
		},
	}

	req := &route53.ChangeResourceRecordSetsInput{
		HostedZoneId: aws.String(cleanZoneID(zone)),
		ChangeBatch:  changeBatch,
	}

	wait := resource.StateChangeConf{
		Pending:    []string{"rejected"},
		Target:     "accepted",
		Timeout:    5 * time.Minute,
		MinTimeout: 1 * time.Second,
		Refresh: func() (interface{}, string, error) {
			_, err := conn.ChangeResourceRecordSets(req)
			if err != nil {
				if r53err, ok := err.(awserr.Error); ok {
					if r53err.Code() == "PriorRequestNotComplete" {
						// There is some pending operation, so just retry
						// in a bit.
						return 42, "rejected", nil
					}

					if r53err.Code() == "InvalidChangeBatch" {
						// This means that the record is already gone.
						return 42, "accepted", nil
					}
				}

				return 42, "failure", err
			}

			return 42, "accepted", nil
		},
	}

	if _, err := wait.WaitForState(); err != nil {
		return err
	}

	return nil
}
func resourceAwsRoute53RecordCreate(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).r53conn
	zone := cleanZoneID(d.Get("zone_id").(string))

	var err error
	zoneRecord, err := conn.GetHostedZone(&route53.GetHostedZoneInput{Id: aws.String(zone)})
	if err != nil {
		return err
	}
	if zoneRecord.HostedZone == nil {
		return fmt.Errorf("[WARN] No Route53 Zone found for id (%s)", zone)
	}

	// Get the record
	rec, err := resourceAwsRoute53RecordBuildSet(d, *zoneRecord.HostedZone.Name)
	if err != nil {
		return err
	}

	// Create the new records. We abuse StateChangeConf for this to
	// retry for us since Route53 sometimes returns errors about another
	// operation happening at the same time.
	changeBatch := &route53.ChangeBatch{
		Comment: aws.String("Managed by Terraform"),
		Changes: []*route53.Change{
			&route53.Change{
				Action:            aws.String("UPSERT"),
				ResourceRecordSet: rec,
			},
		},
	}

	req := &route53.ChangeResourceRecordSetsInput{
		HostedZoneId: aws.String(cleanZoneID(*zoneRecord.HostedZone.Id)),
		ChangeBatch:  changeBatch,
	}

	log.Printf("[DEBUG] Creating resource records for zone: %s, name: %s\n\n%s",
		zone, *rec.Name, req)

	wait := resource.StateChangeConf{
		Pending:    []string{"rejected"},
		Target:     "accepted",
		Timeout:    5 * time.Minute,
		MinTimeout: 1 * time.Second,
		Refresh: func() (interface{}, string, error) {
			resp, err := conn.ChangeResourceRecordSets(req)
			if err != nil {
				if r53err, ok := err.(awserr.Error); ok {
					if r53err.Code() == "PriorRequestNotComplete" {
						// There is some pending operation, so just retry
						// in a bit.
						return nil, "rejected", nil
					}
				}

				return nil, "failure", err
			}

			return resp, "accepted", nil
		},
	}

	respRaw, err := wait.WaitForState()
	if err != nil {
		return err
	}
	changeInfo := respRaw.(*route53.ChangeResourceRecordSetsOutput).ChangeInfo

	// Generate an ID
	vars := []string{
		zone,
		strings.ToLower(d.Get("name").(string)),
		d.Get("type").(string),
	}
	if v, ok := d.GetOk("set_identifier"); ok {
		vars = append(vars, v.(string))
	}

	d.SetId(strings.Join(vars, "_"))

	// Wait until we are done
	wait = resource.StateChangeConf{
		Delay:      30 * time.Second,
		Pending:    []string{"PENDING"},
		Target:     "INSYNC",
		Timeout:    30 * time.Minute,
		MinTimeout: 5 * time.Second,
		Refresh: func() (result interface{}, state string, err error) {
			changeRequest := &route53.GetChangeInput{
				Id: aws.String(cleanChangeID(*changeInfo.Id)),
			}
			return resourceAwsGoRoute53Wait(conn, changeRequest)
		},
	}
	_, err = wait.WaitForState()
	if err != nil {
		return err
	}

	return resourceAwsRoute53RecordRead(d, meta)
}
func resourceAwsEcsServiceDelete(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).ecsconn

	// Check if it's not already gone
	resp, err := conn.DescribeServices(&ecs.DescribeServicesInput{
		Services: []*string{aws.String(d.Id())},
		Cluster:  aws.String(d.Get("cluster").(string)),
	})
	if err != nil {
		return err
	}

	if len(resp.Services) == 0 {
		log.Printf("[DEBUG] ECS Service %q is already gone", d.Id())
		return nil
	}

	log.Printf("[DEBUG] ECS service %s is currently %s", d.Id(), *resp.Services[0].Status)

	if *resp.Services[0].Status == "INACTIVE" {
		return nil
	}

	// Drain the ECS service
	if *resp.Services[0].Status != "DRAINING" {
		log.Printf("[DEBUG] Draining ECS service %s", d.Id())
		_, err = conn.UpdateService(&ecs.UpdateServiceInput{
			Service:      aws.String(d.Id()),
			Cluster:      aws.String(d.Get("cluster").(string)),
			DesiredCount: aws.Int64(int64(0)),
		})
		if err != nil {
			return err
		}
	}

	// Wait until the ECS service is drained
	err = resource.Retry(5*time.Minute, func() error {
		input := ecs.DeleteServiceInput{
			Service: aws.String(d.Id()),
			Cluster: aws.String(d.Get("cluster").(string)),
		}

		log.Printf("[DEBUG] Trying to delete ECS service %s", input)
		_, err := conn.DeleteService(&input)
		if err == nil {
			return nil
		}

		ec2err, ok := err.(awserr.Error)
		if !ok {
			return &resource.RetryError{Err: err}
		}
		if ec2err.Code() == "InvalidParameterException" {
			// Prevent "The service cannot be stopped while deployments are active."
			log.Printf("[DEBUG] Trying to delete ECS service again: %q",
				ec2err.Message())
			return err
		}

		return &resource.RetryError{Err: err}

	})
	if err != nil {
		return err
	}

	// Wait until it's deleted
	wait := resource.StateChangeConf{
		Pending:    []string{"DRAINING"},
		Target:     "INACTIVE",
		Timeout:    5 * time.Minute,
		MinTimeout: 1 * time.Second,
		Refresh: func() (interface{}, string, error) {
			log.Printf("[DEBUG] Checking if ECS service %s is INACTIVE", d.Id())
			resp, err := conn.DescribeServices(&ecs.DescribeServicesInput{
				Services: []*string{aws.String(d.Id())},
				Cluster:  aws.String(d.Get("cluster").(string)),
			})
			if err != nil {
				return resp, "FAILED", err
			}

			log.Printf("[DEBUG] ECS service (%s) is currently %q", d.Id(), *resp.Services[0].Status)
			return resp, *resp.Services[0].Status, nil
		},
	}

	_, err = wait.WaitForState()
	if err != nil {
		return err
	}

	log.Printf("[DEBUG] ECS service %s deleted.", d.Id())
	return nil
}
func resourceAwsCloudFormationStackCreate(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).cfconn

	input := cloudformation.CreateStackInput{
		StackName: aws.String(d.Get("name").(string)),
	}
	if v, ok := d.GetOk("template_body"); ok {
		input.TemplateBody = aws.String(normalizeJson(v.(string)))
	}
	if v, ok := d.GetOk("template_url"); ok {
		input.TemplateURL = aws.String(v.(string))
	}
	if v, ok := d.GetOk("capabilities"); ok {
		input.Capabilities = expandStringList(v.(*schema.Set).List())
	}
	if v, ok := d.GetOk("disable_rollback"); ok {
		input.DisableRollback = aws.Bool(v.(bool))
	}
	if v, ok := d.GetOk("notification_arns"); ok {
		input.NotificationARNs = expandStringList(v.(*schema.Set).List())
	}
	if v, ok := d.GetOk("on_failure"); ok {
		input.OnFailure = aws.String(v.(string))
	}
	if v, ok := d.GetOk("parameters"); ok {
		input.Parameters = expandCloudFormationParameters(v.(map[string]interface{}))
	}
	if v, ok := d.GetOk("policy_body"); ok {
		input.StackPolicyBody = aws.String(normalizeJson(v.(string)))
	}
	if v, ok := d.GetOk("policy_url"); ok {
		input.StackPolicyURL = aws.String(v.(string))
	}
	if v, ok := d.GetOk("tags"); ok {
		input.Tags = expandCloudFormationTags(v.(map[string]interface{}))
	}
	if v, ok := d.GetOk("timeout_in_minutes"); ok {
		input.TimeoutInMinutes = aws.Int64(int64(v.(int)))
	}

	log.Printf("[DEBUG] Creating CloudFormation Stack: %s", input)
	resp, err := conn.CreateStack(&input)
	if err != nil {
		return fmt.Errorf("Creating CloudFormation stack failed: %s", err.Error())
	}

	d.SetId(*resp.StackId)

	wait := resource.StateChangeConf{
		Pending:    []string{"CREATE_IN_PROGRESS", "ROLLBACK_IN_PROGRESS", "ROLLBACK_COMPLETE"},
		Target:     "CREATE_COMPLETE",
		Timeout:    30 * time.Minute,
		MinTimeout: 5 * time.Second,
		Refresh: func() (interface{}, string, error) {
			resp, err := conn.DescribeStacks(&cloudformation.DescribeStacksInput{
				StackName: aws.String(d.Get("name").(string)),
			})
			status := *resp.Stacks[0].StackStatus
			log.Printf("[DEBUG] Current CloudFormation stack status: %q", status)

			if status == "ROLLBACK_COMPLETE" {
				stack := resp.Stacks[0]
				failures, err := getCloudFormationFailures(stack.StackName, *stack.CreationTime, conn)
				if err != nil {
					return resp, "", fmt.Errorf(
						"Failed getting details about rollback: %q", err.Error())
				}

				return resp, "", fmt.Errorf("ROLLBACK_COMPLETE:\n%q", failures)
			}
			return resp, status, err
		},
	}

	_, err = wait.WaitForState()
	if err != nil {
		return err
	}

	log.Printf("[INFO] CloudFormation Stack %q created", d.Get("name").(string))

	return resourceAwsCloudFormationStackRead(d, meta)
}
func resourceAwsCloudFormationStackDelete(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).cfconn

	input := &cloudformation.DeleteStackInput{
		StackName: aws.String(d.Get("name").(string)),
	}
	log.Printf("[DEBUG] Deleting CloudFormation stack %s", input)
	_, err := conn.DeleteStack(input)
	if err != nil {
		awsErr, ok := err.(awserr.Error)
		if !ok {
			return err
		}

		if awsErr.Code() == "ValidationError" {
			// Ignore stack which has been already deleted
			return nil
		}
		return err
	}

	wait := resource.StateChangeConf{
		Pending:    []string{"DELETE_IN_PROGRESS", "ROLLBACK_IN_PROGRESS"},
		Target:     "DELETE_COMPLETE",
		Timeout:    30 * time.Minute,
		MinTimeout: 5 * time.Second,
		Refresh: func() (interface{}, string, error) {
			resp, err := conn.DescribeStacks(&cloudformation.DescribeStacksInput{
				StackName: aws.String(d.Get("name").(string)),
			})

			if err != nil {
				awsErr, ok := err.(awserr.Error)
				if !ok {
					return resp, "DELETE_FAILED", err
				}

				log.Printf("[DEBUG] Error when deleting CloudFormation stack: %s: %s",
					awsErr.Code(), awsErr.Message())

				if awsErr.Code() == "ValidationError" {
					return resp, "DELETE_COMPLETE", nil
				}
			}

			if len(resp.Stacks) == 0 {
				log.Printf("[DEBUG] CloudFormation stack %q is already gone", d.Get("name"))
				return resp, "DELETE_COMPLETE", nil
			}

			status := *resp.Stacks[0].StackStatus
			log.Printf("[DEBUG] Current CloudFormation stack status: %q", status)

			return resp, status, err
		},
	}

	_, err = wait.WaitForState()
	if err != nil {
		return err
	}

	log.Printf("[DEBUG] CloudFormation stack %q has been deleted", d.Id())

	d.SetId("")

	return nil
}
func resourceAwsCloudFormationStackUpdate(d *schema.ResourceData, meta interface{}) error {
	conn := meta.(*AWSClient).cfconn

	input := &cloudformation.UpdateStackInput{
		StackName: aws.String(d.Get("name").(string)),
	}

	// Either TemplateBody or TemplateURL are required for each change
	if v, ok := d.GetOk("template_url"); ok {
		input.TemplateURL = aws.String(v.(string))
	}
	if v, ok := d.GetOk("template_body"); ok && input.TemplateURL == nil {
		input.TemplateBody = aws.String(normalizeJson(v.(string)))
	}

	if d.HasChange("capabilities") {
		input.Capabilities = expandStringList(d.Get("capabilities").(*schema.Set).List())
	}
	if d.HasChange("notification_arns") {
		input.NotificationARNs = expandStringList(d.Get("notification_arns").(*schema.Set).List())
	}
	if d.HasChange("parameters") {
		input.Parameters = expandCloudFormationParameters(d.Get("parameters").(map[string]interface{}))
	}
	if d.HasChange("policy_body") {
		input.StackPolicyBody = aws.String(normalizeJson(d.Get("policy_body").(string)))
	}
	if d.HasChange("policy_url") {
		input.StackPolicyURL = aws.String(d.Get("policy_url").(string))
	}

	log.Printf("[DEBUG] Updating CloudFormation stack: %s", input)
	stack, err := conn.UpdateStack(input)
	if err != nil {
		return err
	}

	lastUpdatedTime, err := getLastCfEventTimestamp(d.Get("name").(string), conn)
	if err != nil {
		return err
	}

	wait := resource.StateChangeConf{
		Pending: []string{
			"UPDATE_COMPLETE_CLEANUP_IN_PROGRESS",
			"UPDATE_IN_PROGRESS",
			"UPDATE_ROLLBACK_IN_PROGRESS",
			"UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS",
			"UPDATE_ROLLBACK_COMPLETE",
		},
		Target:     "UPDATE_COMPLETE",
		Timeout:    15 * time.Minute,
		MinTimeout: 5 * time.Second,
		Refresh: func() (interface{}, string, error) {
			resp, err := conn.DescribeStacks(&cloudformation.DescribeStacksInput{
				StackName: aws.String(d.Get("name").(string)),
			})
			stack := resp.Stacks[0]
			status := *stack.StackStatus
			log.Printf("[DEBUG] Current CloudFormation stack status: %q", status)

			if status == "UPDATE_ROLLBACK_COMPLETE" {
				failures, err := getCloudFormationFailures(stack.StackName, *lastUpdatedTime, conn)
				if err != nil {
					return resp, "", fmt.Errorf(
						"Failed getting details about rollback: %q", err.Error())
				}

				return resp, "", fmt.Errorf(
					"UPDATE_ROLLBACK_COMPLETE:\n%q", failures)
			}

			return resp, status, err
		},
	}

	_, err = wait.WaitForState()
	if err != nil {
		return err
	}

	log.Printf("[DEBUG] CloudFormation stack %q has been updated", *stack.StackId)

	return resourceAwsCloudFormationStackRead(d, meta)
}