func deleteRoute53RecordSet(conn *route53.Route53, input *route53.ChangeResourceRecordSetsInput) (interface{}, error) { wait := resource.StateChangeConf{ Pending: []string{"rejected"}, Target: []string{"accepted"}, Timeout: 5 * time.Minute, MinTimeout: 1 * time.Second, Refresh: func() (interface{}, string, error) { resp, err := conn.ChangeResourceRecordSets(input) if err != nil { if r53err, ok := err.(awserr.Error); ok { if r53err.Code() == "PriorRequestNotComplete" { // There is some pending operation, so just retry // in a bit. return 42, "rejected", nil } if r53err.Code() == "InvalidChangeBatch" { // This means that the record is already gone. return resp, "accepted", nil } } return 42, "failure", err } return resp, "accepted", nil }, } return wait.WaitForState() }
func resourceAwsRoute53ZoneCreate(d *schema.ResourceData, meta interface{}) error { r53 := meta.(*AWSClient).route53 req := &route53.CreateHostedZoneRequest{ Name: d.Get("name").(string), Comment: "Managed by Terraform", } log.Printf("[DEBUG] Creating Route53 hosted zone: %s", req.Name) resp, err := r53.CreateHostedZone(req) if err != nil { return err } // Store the zone_id zone := route53.CleanZoneID(resp.HostedZone.ID) d.Set("zone_id", zone) d.SetId(zone) // Wait until we are done initializing wait := resource.StateChangeConf{ Delay: 30 * time.Second, Pending: []string{"PENDING"}, Target: "INSYNC", Timeout: 10 * time.Minute, MinTimeout: 2 * time.Second, Refresh: func() (result interface{}, state string, err error) { return resourceAwsRoute53Wait(r53, resp.ChangeInfo.ID) }, } _, err = wait.WaitForState() if err != nil { return err } return nil }
func resourceBrightboxServerDelete( d *schema.ResourceData, meta interface{}, ) error { client := meta.(*CompositeClient).ApiClient log.Printf("[DEBUG] Server delete called for %s", d.Id()) err := client.DestroyServer(d.Id()) if err != nil { return fmt.Errorf("Error deleting server: %s", err) } stateConf := resource.StateChangeConf{ Pending: []string{"deleting", "active", "inactive"}, Target: []string{"deleted"}, Refresh: serverStateRefresh(client, d.Id()), Timeout: 5 * time.Minute, Delay: 10 * time.Second, MinTimeout: 3 * time.Second, } _, err = stateConf.WaitForState() if err != nil { return err } return nil }
func resourceAwsSubnetDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn log.Printf("[INFO] Deleting subnet: %s", d.Id()) req := &ec2.DeleteSubnetInput{ SubnetId: aws.String(d.Id()), } wait := resource.StateChangeConf{ Pending: []string{"pending"}, Target: []string{"destroyed"}, Timeout: 5 * time.Minute, MinTimeout: 1 * time.Second, Refresh: func() (interface{}, string, error) { _, err := conn.DeleteSubnet(req) if err != nil { if apiErr, ok := err.(awserr.Error); ok { if apiErr.Code() == "InvalidSubnetID.NotFound" { return 42, "destroyed", nil } } return 42, "failure", err } return 42, "destroyed", nil }, } if _, err := wait.WaitForState(); err != nil { return fmt.Errorf("Error deleting subnet: %s", err) } return nil }
func resource_aws_r53_record_create( s *terraform.ResourceState, d *terraform.ResourceDiff, meta interface{}) (*terraform.ResourceState, error) { p := meta.(*ResourceProvider) conn := p.route53 // Merge the diff into the state so that we have all the attributes // properly. rs := s.MergeDiff(d) // Get the record rec, err := resource_aws_r53_build_record_set(rs) if err != nil { return rs, err } // Create the new records req := &route53.ChangeResourceRecordSetsRequest{ Comment: "Managed by Terraform", Changes: []route53.Change{ route53.Change{ Action: "UPSERT", Record: *rec, }, }, } zone := rs.Attributes["zone_id"] log.Printf("[DEBUG] Creating resource records for zone: %s, name: %s", zone, rs.Attributes["name"]) resp, err := conn.ChangeResourceRecordSets(zone, req) if err != nil { return rs, err } // Generate an ID rs.ID = fmt.Sprintf("%s_%s_%s", zone, rs.Attributes["name"], rs.Attributes["type"]) rs.Dependencies = []terraform.ResourceDependency{ terraform.ResourceDependency{ID: zone}, } // Wait until we are done wait := resource.StateChangeConf{ Delay: 30 * time.Second, Pending: []string{"PENDING"}, Target: "INSYNC", Timeout: 10 * time.Minute, MinTimeout: 5 * time.Second, Refresh: func() (result interface{}, state string, err error) { return resource_aws_r53_wait(conn, resp.ChangeInfo.ID) }, } _, err = wait.WaitForState() if err != nil { return rs, err } return rs, nil }
func resourceAwsRoute53ZoneCreate(d *schema.ResourceData, meta interface{}) error { r53 := meta.(*AWSClient).r53conn req := &route53.CreateHostedZoneInput{ Name: aws.String(d.Get("name").(string)), HostedZoneConfig: &route53.HostedZoneConfig{Comment: aws.String(d.Get("comment").(string))}, CallerReference: aws.String(time.Now().Format(time.RFC3339Nano)), } if v := d.Get("vpc_id"); v != "" { req.VPC = &route53.VPC{ VPCId: aws.String(v.(string)), VPCRegion: aws.String(meta.(*AWSClient).region), } if w := d.Get("vpc_region"); w != "" { req.VPC.VPCRegion = aws.String(w.(string)) } d.Set("vpc_region", req.VPC.VPCRegion) } if v, ok := d.GetOk("delegation_set_id"); ok { req.DelegationSetId = aws.String(v.(string)) } log.Printf("[DEBUG] Creating Route53 hosted zone: %s", *req.Name) var err error resp, err := r53.CreateHostedZone(req) if err != nil { return err } // Store the zone_id zone := cleanZoneID(*resp.HostedZone.Id) d.Set("zone_id", zone) d.SetId(zone) // Wait until we are done initializing wait := resource.StateChangeConf{ Delay: 30 * time.Second, Pending: []string{"PENDING"}, Target: []string{"INSYNC"}, Timeout: 10 * time.Minute, MinTimeout: 2 * time.Second, Refresh: func() (result interface{}, state string, err error) { changeRequest := &route53.GetChangeInput{ Id: aws.String(cleanChangeID(*resp.ChangeInfo.Id)), } return resourceAwsGoRoute53Wait(r53, changeRequest) }, } _, err = wait.WaitForState() if err != nil { return err } return resourceAwsRoute53ZoneUpdate(d, meta) }
func resourceAwsRoute53RecordDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).route53 // Get the records rec, err := resourceAwsRoute53RecordBuildSet(d) if err != nil { return err } // Create the new records req := &route53.ChangeResourceRecordSetsRequest{ Comment: "Deleted by Terraform", Changes: []route53.Change{ route53.Change{ Action: "DELETE", Record: *rec, }, }, } zone := d.Get("zone_id").(string) log.Printf("[DEBUG] Deleting resource records for zone: %s, name: %s", zone, d.Get("name").(string)) wait := resource.StateChangeConf{ Pending: []string{"rejected"}, Target: "accepted", Timeout: 5 * time.Minute, MinTimeout: 1 * time.Second, Refresh: func() (interface{}, string, error) { _, err := conn.ChangeResourceRecordSets(zone, req) if err != nil { if strings.Contains(err.Error(), "PriorRequestNotComplete") { // There is some pending operation, so just retry // in a bit. return 42, "rejected", nil } if strings.Contains(err.Error(), "InvalidChangeBatch") { // This means that the record is already gone. return 42, "accepted", nil } return 42, "failure", err } return 42, "accepted", nil }, } if _, err := wait.WaitForState(); err != nil { return err } return nil }
func resource_aws_r53_record_destroy( s *terraform.ResourceState, meta interface{}) error { p := meta.(*ResourceProvider) conn := p.route53 // Get the record rec, err := resource_aws_r53_build_record_set(s) if err != nil { return err } // Create the new records req := &route53.ChangeResourceRecordSetsRequest{ Comment: "Deleted by Terraform", Changes: []route53.Change{ route53.Change{ Action: "DELETE", Record: *rec, }, }, } zone := s.Attributes["zone_id"] log.Printf("[DEBUG] Deleting resource records for zone: %s, name: %s", zone, s.Attributes["name"]) wait := resource.StateChangeConf{ Pending: []string{"rejected"}, Target: "accepted", Timeout: 5 * time.Minute, MinTimeout: 1 * time.Second, Refresh: func() (interface{}, string, error) { _, err := conn.ChangeResourceRecordSets(zone, req) if err != nil { if strings.Contains(err.Error(), "PriorRequestNotComplete") { // There is some pending operation, so just retry // in a bit. return nil, "rejected", nil } return nil, "failure", err } return nil, "accepted", nil }, } if _, err := wait.WaitForState(); err != nil { return err } return nil }
func updateKmsKeyRotationStatus(conn *kms.KMS, d *schema.ResourceData) error { var err error shouldEnableRotation := d.Get("enable_key_rotation").(bool) if shouldEnableRotation { log.Printf("[DEBUG] Enabling key rotation for KMS key %q", d.Id()) _, err = conn.EnableKeyRotation(&kms.EnableKeyRotationInput{ KeyId: aws.String(d.Id()), }) } else { log.Printf("[DEBUG] Disabling key rotation for KMS key %q", d.Id()) _, err = conn.DisableKeyRotation(&kms.DisableKeyRotationInput{ KeyId: aws.String(d.Id()), }) } if err != nil { return fmt.Errorf("Failed to set key rotation for %q to %t: %q", d.Id(), shouldEnableRotation, err.Error()) } // Wait for propagation since KMS is eventually consistent wait := resource.StateChangeConf{ Pending: []string{fmt.Sprintf("%t", !shouldEnableRotation)}, Target: []string{fmt.Sprintf("%t", shouldEnableRotation)}, Timeout: 5 * time.Minute, MinTimeout: 1 * time.Second, ContinuousTargetOccurence: 5, Refresh: func() (interface{}, string, error) { log.Printf("[DEBUG] Checking if KMS key %s rotation status is %t", d.Id(), shouldEnableRotation) resp, err := conn.GetKeyRotationStatus(&kms.GetKeyRotationStatusInput{ KeyId: aws.String(d.Id()), }) if err != nil { return resp, "FAILED", err } status := fmt.Sprintf("%t", *resp.KeyRotationEnabled) log.Printf("[DEBUG] KMS key %s rotation status received: %s, retrying", d.Id(), status) return resp, status, nil }, } _, err = wait.WaitForState() if err != nil { return fmt.Errorf("Failed setting KMS key rotation status to %t: %s", shouldEnableRotation, err) } return nil }
func updateKmsKeyStatus(conn *kms.KMS, id string, shouldBeEnabled bool) error { var err error if shouldBeEnabled { log.Printf("[DEBUG] Enabling KMS key %q", id) _, err = conn.EnableKey(&kms.EnableKeyInput{ KeyId: aws.String(id), }) } else { log.Printf("[DEBUG] Disabling KMS key %q", id) _, err = conn.DisableKey(&kms.DisableKeyInput{ KeyId: aws.String(id), }) } if err != nil { return fmt.Errorf("Failed to set KMS key %q status to %t: %q", id, shouldBeEnabled, err.Error()) } // Wait for propagation since KMS is eventually consistent wait := resource.StateChangeConf{ Pending: []string{fmt.Sprintf("%t", !shouldBeEnabled)}, Target: []string{fmt.Sprintf("%t", shouldBeEnabled)}, Timeout: 20 * time.Minute, MinTimeout: 2 * time.Second, ContinuousTargetOccurence: 10, Refresh: func() (interface{}, string, error) { log.Printf("[DEBUG] Checking if KMS key %s enabled status is %t", id, shouldBeEnabled) resp, err := conn.DescribeKey(&kms.DescribeKeyInput{ KeyId: aws.String(id), }) if err != nil { return resp, "FAILED", err } status := fmt.Sprintf("%t", *resp.KeyMetadata.Enabled) log.Printf("[DEBUG] KMS key %s status received: %s, retrying", id, status) return resp, status, nil }, } _, err = wait.WaitForState() if err != nil { return fmt.Errorf("Failed setting KMS key status to %t: %s", shouldBeEnabled, err) } return nil }
func resourceBrightboxServerCreate( d *schema.ResourceData, meta interface{}, ) error { client := meta.(*CompositeClient).ApiClient log.Printf("[DEBUG] Server create called") server_opts := &brightbox.ServerOptions{ Image: d.Get("image").(string), } err := addUpdateableServerOptions(d, server_opts) if err != nil { return err } server_type := &server_opts.ServerType assign_string(d, &server_type, "type") zone := &server_opts.Zone assign_string(d, &zone, "zone") log.Printf("[DEBUG] Server create configuration: %#v", server_opts) server, err := client.CreateServer(server_opts) if err != nil { return fmt.Errorf("Error creating server: %s", err) } d.SetId(server.Id) log.Printf("[INFO] Waiting for Server (%s) to become available", d.Id()) stateConf := resource.StateChangeConf{ Pending: []string{"creating"}, Target: []string{"active", "inactive"}, Refresh: serverStateRefresh(client, server.Id), Timeout: 5 * time.Minute, Delay: 10 * time.Second, MinTimeout: 3 * time.Second, } active_server, err := stateConf.WaitForState() if err != nil { return err } setServerAttributes(d, active_server.(*brightbox.Server)) return nil }
func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) project, err := getProject(d, config) if err != nil { return err } zoneName := d.Get("zone").(string) clusterName := d.Get("name").(string) desiredNodeVersion := d.Get("node_version").(string) req := &container.UpdateClusterRequest{ Update: &container.ClusterUpdate{ DesiredNodeVersion: desiredNodeVersion, }, } op, err := config.clientContainer.Projects.Zones.Clusters.Update( project, zoneName, clusterName, req).Do() if err != nil { return err } // Wait until it's updated wait := resource.StateChangeConf{ Pending: []string{"PENDING", "RUNNING"}, Target: []string{"DONE"}, Timeout: 10 * time.Minute, MinTimeout: 2 * time.Second, Refresh: func() (interface{}, string, error) { log.Printf("[DEBUG] Checking if GKE cluster %s is updated", clusterName) resp, err := config.clientContainer.Projects.Zones.Operations.Get( project, zoneName, op.Name).Do() log.Printf("[DEBUG] Progress of updating GKE cluster %s: %s", clusterName, resp.Status) return resp, resp.Status, err }, } _, err = wait.WaitForState() if err != nil { return err } log.Printf("[INFO] GKE cluster %s has been updated to %s", d.Id(), desiredNodeVersion) return resourceContainerClusterRead(d, meta) }
func resourceAwsPlacementGroupCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn name := d.Get("name").(string) input := ec2.CreatePlacementGroupInput{ GroupName: aws.String(name), Strategy: aws.String(d.Get("strategy").(string)), } log.Printf("[DEBUG] Creating EC2 Placement group: %s", input) _, err := conn.CreatePlacementGroup(&input) if err != nil { return err } wait := resource.StateChangeConf{ Pending: []string{"pending"}, Target: []string{"available"}, Timeout: 5 * time.Minute, MinTimeout: 1 * time.Second, Refresh: func() (interface{}, string, error) { out, err := conn.DescribePlacementGroups(&ec2.DescribePlacementGroupsInput{ GroupNames: []*string{aws.String(name)}, }) if err != nil { return out, "", err } if len(out.PlacementGroups) == 0 { return out, "", fmt.Errorf("Placement group not found (%q)", name) } pg := out.PlacementGroups[0] return out, *pg.State, nil }, } _, err = wait.WaitForState() if err != nil { return err } log.Printf("[DEBUG] EC2 Placement group created: %q", name) d.SetId(name) return resourceAwsPlacementGroupRead(d, meta) }
func resourceAwsPlacementGroupDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn log.Printf("[DEBUG] Deleting EC2 Placement Group %q", d.Id()) _, err := conn.DeletePlacementGroup(&ec2.DeletePlacementGroupInput{ GroupName: aws.String(d.Id()), }) if err != nil { return err } wait := resource.StateChangeConf{ Pending: []string{"deleting"}, Target: []string{"deleted"}, Timeout: 5 * time.Minute, MinTimeout: 1 * time.Second, Refresh: func() (interface{}, string, error) { out, err := conn.DescribePlacementGroups(&ec2.DescribePlacementGroupsInput{ GroupNames: []*string{aws.String(d.Id())}, }) if err != nil { awsErr := err.(awserr.Error) if awsErr.Code() == "InvalidPlacementGroup.Unknown" { return out, "deleted", nil } return out, "", awsErr } if len(out.PlacementGroups) == 0 { return out, "deleted", nil } pg := out.PlacementGroups[0] return out, *pg.State, nil }, } _, err = wait.WaitForState() if err != nil { return err } d.SetId("") return nil }
func createDatabaseServer(d *schema.ResourceData, client *brightbox.Client) error { log.Printf("[DEBUG] Database Server create called") database_server_opts := getBlankDatabaseServerOpts() err := addUpdateableDatabaseServerOptions(d, database_server_opts) if err != nil { return err } engine := &database_server_opts.Engine assign_string(d, &engine, "database_engine") version := &database_server_opts.Version assign_string(d, &version, "database_version") snapshot := &database_server_opts.Snapshot assign_string(d, &snapshot, "snapshot") zone := &database_server_opts.Zone assign_string(d, &zone, "zone") database_server_opts.AllowAccess = &allow_any log.Printf("[DEBUG] Database Server create configuration %#v", database_server_opts) output_database_server_options(database_server_opts) database_server, err := client.CreateDatabaseServer(database_server_opts) if err != nil { return fmt.Errorf("Error creating server: %s", err) } log.Printf("[DEBUG] Setting Partial") d.Partial(true) d.SetId(database_server.Id) if database_server.AdminPassword == "" { log.Printf("[WARN] No password returned for Cloud SQL server %s", database_server.Id) } else { d.Set("admin_password", database_server.AdminPassword) } log.Printf("[INFO] Waiting for Database Server (%s) to become available", d.Id()) stateConf := resource.StateChangeConf{ Pending: []string{"creating"}, Target: []string{"active"}, Refresh: databaseServerStateRefresh(client, database_server.Id), Timeout: 5 * time.Minute, Delay: 10 * time.Second, MinTimeout: 3 * time.Second, } active_database_server, err := stateConf.WaitForState() if err != nil { return err } d.SetPartial("admin_password") setDatabaseServerAttributes(d, active_database_server.(*brightbox.DatabaseServer)) return nil }
func resourceAwsKmsKeyDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).kmsconn keyId := d.Get("key_id").(string) req := &kms.ScheduleKeyDeletionInput{ KeyId: aws.String(keyId), } if v, exists := d.GetOk("deletion_window_in_days"); exists { req.PendingWindowInDays = aws.Int64(int64(v.(int))) } _, err := conn.ScheduleKeyDeletion(req) if err != nil { return err } // Wait for propagation since KMS is eventually consistent wait := resource.StateChangeConf{ Pending: []string{"Enabled", "Disabled"}, Target: []string{"PendingDeletion"}, Timeout: 20 * time.Minute, MinTimeout: 2 * time.Second, ContinuousTargetOccurence: 10, Refresh: func() (interface{}, string, error) { log.Printf("[DEBUG] Checking if KMS key %s state is PendingDeletion", keyId) resp, err := conn.DescribeKey(&kms.DescribeKeyInput{ KeyId: aws.String(keyId), }) if err != nil { return resp, "Failed", err } metadata := *resp.KeyMetadata log.Printf("[DEBUG] KMS key %s state is %s, retrying", keyId, *metadata.KeyState) return resp, *metadata.KeyState, nil }, } _, err = wait.WaitForState() if err != nil { return fmt.Errorf("Failed deactivating KMS key %s: %s", keyId, err) } log.Printf("[DEBUG] KMS Key %s deactivated.", keyId) d.SetId("") return nil }
func resourceAwsRoute53ZoneAssociationCreate(d *schema.ResourceData, meta interface{}) error { r53 := meta.(*AWSClient).r53conn req := &route53.AssociateVPCWithHostedZoneInput{ HostedZoneId: aws.String(d.Get("zone_id").(string)), VPC: &route53.VPC{ VPCId: aws.String(d.Get("vpc_id").(string)), VPCRegion: aws.String(meta.(*AWSClient).region), }, Comment: aws.String("Managed by Terraform"), } if w := d.Get("vpc_region"); w != "" { req.VPC.VPCRegion = aws.String(w.(string)) } log.Printf("[DEBUG] Associating Route53 Private Zone %s with VPC %s with region %s", *req.HostedZoneId, *req.VPC.VPCId, *req.VPC.VPCRegion) var err error resp, err := r53.AssociateVPCWithHostedZone(req) if err != nil { return err } // Store association id d.SetId(fmt.Sprintf("%s:%s", *req.HostedZoneId, *req.VPC.VPCId)) d.Set("vpc_region", req.VPC.VPCRegion) // Wait until we are done initializing wait := resource.StateChangeConf{ Delay: 30 * time.Second, Pending: []string{"PENDING"}, Target: []string{"INSYNC"}, Timeout: 10 * time.Minute, MinTimeout: 2 * time.Second, Refresh: func() (result interface{}, state string, err error) { changeRequest := &route53.GetChangeInput{ Id: aws.String(cleanChangeID(*resp.ChangeInfo.Id)), } return resourceAwsGoRoute53Wait(r53, changeRequest) }, } _, err = wait.WaitForState() if err != nil { return err } return resourceAwsRoute53ZoneAssociationUpdate(d, meta) }
func waitForRoute53RecordSetToSync(conn *route53.Route53, requestId string) error { wait := resource.StateChangeConf{ Delay: 30 * time.Second, Pending: []string{"PENDING"}, Target: []string{"INSYNC"}, Timeout: 30 * time.Minute, MinTimeout: 5 * time.Second, Refresh: func() (result interface{}, state string, err error) { changeRequest := &route53.GetChangeInput{ Id: aws.String(requestId), } return resourceAwsGoRoute53Wait(conn, changeRequest) }, } _, err := wait.WaitForState() return err }
func resourceContainerClusterDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) project, err := getProject(d, config) if err != nil { return err } zoneName := d.Get("zone").(string) clusterName := d.Get("name").(string) log.Printf("[DEBUG] Deleting GKE cluster %s", d.Get("name").(string)) op, err := config.clientContainer.Projects.Zones.Clusters.Delete( project, zoneName, clusterName).Do() if err != nil { return err } // Wait until it's deleted wait := resource.StateChangeConf{ Pending: []string{"PENDING", "RUNNING"}, Target: []string{"DONE"}, Timeout: 10 * time.Minute, MinTimeout: 3 * time.Second, Refresh: func() (interface{}, string, error) { log.Printf("[DEBUG] Checking if GKE cluster %s is deleted", clusterName) resp, err := config.clientContainer.Projects.Zones.Operations.Get( project, zoneName, op.Name).Do() log.Printf("[DEBUG] Progress of deleting GKE cluster %s: %s", clusterName, resp.Status) return resp, resp.Status, err }, } _, err = wait.WaitForState() if err != nil { return err } log.Printf("[INFO] GKE cluster %s has been deleted", d.Id()) d.SetId("") return nil }
func resource_aws_r53_zone_create( s *terraform.ResourceState, d *terraform.ResourceDiff, meta interface{}) (*terraform.ResourceState, error) { p := meta.(*ResourceProvider) r53 := p.route53 // Merge the diff into the state so that we have all the attributes // properly. rs := s.MergeDiff(d) req := &route53.CreateHostedZoneRequest{ Name: rs.Attributes["name"], Comment: "Managed by Terraform", } log.Printf("[DEBUG] Creating Route53 hosted zone: %s", req.Name) resp, err := r53.CreateHostedZone(req) if err != nil { return rs, err } // Store the zone_id zone := route53.CleanZoneID(resp.HostedZone.ID) rs.ID = zone rs.Attributes["zone_id"] = zone // Wait until we are done initializing wait := resource.StateChangeConf{ Delay: 30 * time.Second, Pending: []string{"PENDING"}, Target: "INSYNC", Timeout: 10 * time.Minute, MinTimeout: 5 * time.Second, Refresh: func() (result interface{}, state string, err error) { return resource_aws_r53_wait(r53, resp.ChangeInfo.ID) }, } _, err = wait.WaitForState() if err != nil { return rs, err } return rs, nil }
func resourceBrightboxLoadBalancerCreate( d *schema.ResourceData, meta interface{}, ) error { client := meta.(*CompositeClient).ApiClient log.Printf("[DEBUG] Load Balancer create called") load_balancer_opts := &brightbox.LoadBalancerOptions{} err := addUpdateableLoadBalancerOptions(d, load_balancer_opts) if err != nil { return err } log.Printf("[DEBUG] Load Balancer create configuration %#v", load_balancer_opts) output_load_balancer_options(load_balancer_opts) load_balancer, err := client.CreateLoadBalancer(load_balancer_opts) if err != nil { return fmt.Errorf("Error creating server: %s", err) } d.SetId(load_balancer.Id) log.Printf("[INFO] Waiting for Load Balancer (%s) to become available", d.Id()) stateConf := resource.StateChangeConf{ Pending: []string{"creating"}, Target: []string{"active"}, Refresh: loadBalancerStateRefresh(client, load_balancer.Id), Timeout: 5 * time.Minute, Delay: 10 * time.Second, MinTimeout: 3 * time.Second, } active_load_balancer, err := stateConf.WaitForState() if err != nil { return err } setLoadBalancerAttributes(d, active_load_balancer.(*brightbox.LoadBalancer)) return nil }
func resourceAwsSubnetDelete(d *schema.ResourceData, meta interface{}) error { ec2conn := meta.(*AWSClient).ec2conn log.Printf("[INFO] Deleting subnet: %s", d.Id()) req := &ec2.DeleteSubnetRequest{ SubnetID: aws.String(d.Id()), } wait := resource.StateChangeConf{ Pending: []string{"pending"}, Target: "destroyed", Timeout: 5 * time.Minute, MinTimeout: 1 * time.Second, Refresh: func() (interface{}, string, error) { err := ec2conn.DeleteSubnet(req) if err != nil { if apiErr, ok := err.(aws.APIError); ok { if apiErr.Code == "DependencyViolation" { // There is some pending operation, so just retry // in a bit. return 42, "pending", nil } if apiErr.Code == "InvalidSubnetID.NotFound" { return 42, "destroyed", nil } } return 42, "failure", err } return 42, "destroyed", nil }, } if _, err := wait.WaitForState(); err != nil { return fmt.Errorf("Error deleting subnet: %s", err) } return nil }
func resourceAwsRoute53ZoneCreate(d *schema.ResourceData, meta interface{}) error { r53 := meta.(*AWSClient).r53conn comment := &route53.HostedZoneConfig{Comment: aws.String("Managed by Terraform")} req := &route53.CreateHostedZoneRequest{ Name: aws.String(d.Get("name").(string)), HostedZoneConfig: comment, CallerReference: aws.String(time.Now().Format(time.RFC3339Nano)), } log.Printf("[DEBUG] Creating Route53 hosted zone: %s", *req.Name) resp, err := r53.CreateHostedZone(req) if err != nil { return err } // Store the zone_id zone := cleanZoneID(*resp.HostedZone.ID) d.Set("zone_id", zone) d.SetId(zone) // Wait until we are done initializing wait := resource.StateChangeConf{ Delay: 30 * time.Second, Pending: []string{"PENDING"}, Target: "INSYNC", Timeout: 10 * time.Minute, MinTimeout: 2 * time.Second, Refresh: func() (result interface{}, state string, err error) { changeRequest := &route53.GetChangeRequest{ ID: aws.String(cleanChangeID(*resp.ChangeInfo.ID)), } return resourceAwsGoRoute53Wait(r53, changeRequest) }, } _, err = wait.WaitForState() if err != nil { return err } return resourceAwsRoute53ZoneUpdate(d, meta) }
func waitForCloudip( client *brightbox.Client, cloudip_id string, pending string, target string, ) (*brightbox.CloudIP, error) { stateConf := resource.StateChangeConf{ Pending: []string{pending}, Target: []string{target}, Refresh: cloudipStateRefresh(client, cloudip_id), Timeout: 5 * time.Minute, MinTimeout: 3 * time.Second, } active_cloudip, err := stateConf.WaitForState() if err != nil { return nil, err } return active_cloudip.(*brightbox.CloudIP), err }
func resourceAwsRoute53RecordDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).r53conn zone := cleanZoneID(d.Get("zone_id").(string)) log.Printf("[DEBUG] Deleting resource records for zone: %s, name: %s", zone, d.Get("name").(string)) var err error zoneRecord, err := conn.GetHostedZone(&route53.GetHostedZoneInput{Id: aws.String(zone)}) if err != nil { if r53err, ok := err.(awserr.Error); ok && r53err.Code() == "NoSuchHostedZone" { log.Printf("[DEBUG] No matching Route 53 Record found for: %s, removing from state file", d.Id()) d.SetId("") return nil } return err } // Get the records rec, err := resourceAwsRoute53RecordBuildSet(d, *zoneRecord.HostedZone.Name) if err != nil { return err } // Create the new records changeBatch := &route53.ChangeBatch{ Comment: aws.String("Deleted by Terraform"), Changes: []*route53.Change{ &route53.Change{ Action: aws.String("DELETE"), ResourceRecordSet: rec, }, }, } req := &route53.ChangeResourceRecordSetsInput{ HostedZoneId: aws.String(cleanZoneID(zone)), ChangeBatch: changeBatch, } wait := resource.StateChangeConf{ Pending: []string{"rejected"}, Target: "accepted", Timeout: 5 * time.Minute, MinTimeout: 1 * time.Second, Refresh: func() (interface{}, string, error) { _, err := conn.ChangeResourceRecordSets(req) if err != nil { if r53err, ok := err.(awserr.Error); ok { if r53err.Code() == "PriorRequestNotComplete" { // There is some pending operation, so just retry // in a bit. return 42, "rejected", nil } if r53err.Code() == "InvalidChangeBatch" { // This means that the record is already gone. return 42, "accepted", nil } } return 42, "failure", err } return 42, "accepted", nil }, } if _, err := wait.WaitForState(); err != nil { return err } return nil }
func resourceAwsRoute53RecordCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).r53conn zone := cleanZoneID(d.Get("zone_id").(string)) var err error zoneRecord, err := conn.GetHostedZone(&route53.GetHostedZoneInput{Id: aws.String(zone)}) if err != nil { return err } if zoneRecord.HostedZone == nil { return fmt.Errorf("[WARN] No Route53 Zone found for id (%s)", zone) } // Get the record rec, err := resourceAwsRoute53RecordBuildSet(d, *zoneRecord.HostedZone.Name) if err != nil { return err } // Create the new records. We abuse StateChangeConf for this to // retry for us since Route53 sometimes returns errors about another // operation happening at the same time. changeBatch := &route53.ChangeBatch{ Comment: aws.String("Managed by Terraform"), Changes: []*route53.Change{ &route53.Change{ Action: aws.String("UPSERT"), ResourceRecordSet: rec, }, }, } req := &route53.ChangeResourceRecordSetsInput{ HostedZoneId: aws.String(cleanZoneID(*zoneRecord.HostedZone.Id)), ChangeBatch: changeBatch, } log.Printf("[DEBUG] Creating resource records for zone: %s, name: %s\n\n%s", zone, *rec.Name, req) wait := resource.StateChangeConf{ Pending: []string{"rejected"}, Target: "accepted", Timeout: 5 * time.Minute, MinTimeout: 1 * time.Second, Refresh: func() (interface{}, string, error) { resp, err := conn.ChangeResourceRecordSets(req) if err != nil { if r53err, ok := err.(awserr.Error); ok { if r53err.Code() == "PriorRequestNotComplete" { // There is some pending operation, so just retry // in a bit. return nil, "rejected", nil } } return nil, "failure", err } return resp, "accepted", nil }, } respRaw, err := wait.WaitForState() if err != nil { return err } changeInfo := respRaw.(*route53.ChangeResourceRecordSetsOutput).ChangeInfo // Generate an ID vars := []string{ zone, strings.ToLower(d.Get("name").(string)), d.Get("type").(string), } if v, ok := d.GetOk("set_identifier"); ok { vars = append(vars, v.(string)) } d.SetId(strings.Join(vars, "_")) // Wait until we are done wait = resource.StateChangeConf{ Delay: 30 * time.Second, Pending: []string{"PENDING"}, Target: "INSYNC", Timeout: 30 * time.Minute, MinTimeout: 5 * time.Second, Refresh: func() (result interface{}, state string, err error) { changeRequest := &route53.GetChangeInput{ Id: aws.String(cleanChangeID(*changeInfo.Id)), } return resourceAwsGoRoute53Wait(conn, changeRequest) }, } _, err = wait.WaitForState() if err != nil { return err } return resourceAwsRoute53RecordRead(d, meta) }
func resourceAwsCloudFormationStackCreate(d *schema.ResourceData, meta interface{}) error { retryTimeout := int64(30) conn := meta.(*AWSClient).cfconn input := cloudformation.CreateStackInput{ StackName: aws.String(d.Get("name").(string)), } if v, ok := d.GetOk("template_body"); ok { input.TemplateBody = aws.String(normalizeJson(v.(string))) } if v, ok := d.GetOk("template_url"); ok { input.TemplateURL = aws.String(v.(string)) } if v, ok := d.GetOk("capabilities"); ok { input.Capabilities = expandStringList(v.(*schema.Set).List()) } if v, ok := d.GetOk("disable_rollback"); ok { input.DisableRollback = aws.Bool(v.(bool)) } if v, ok := d.GetOk("notification_arns"); ok { input.NotificationARNs = expandStringList(v.(*schema.Set).List()) } if v, ok := d.GetOk("on_failure"); ok { input.OnFailure = aws.String(v.(string)) } if v, ok := d.GetOk("parameters"); ok { input.Parameters = expandCloudFormationParameters(v.(map[string]interface{})) } if v, ok := d.GetOk("policy_body"); ok { input.StackPolicyBody = aws.String(normalizeJson(v.(string))) } if v, ok := d.GetOk("policy_url"); ok { input.StackPolicyURL = aws.String(v.(string)) } if v, ok := d.GetOk("tags"); ok { input.Tags = expandCloudFormationTags(v.(map[string]interface{})) } if v, ok := d.GetOk("timeout_in_minutes"); ok { m := int64(v.(int)) input.TimeoutInMinutes = aws.Int64(m) if m > retryTimeout { retryTimeout = m + 5 log.Printf("[DEBUG] CloudFormation timeout: %d", retryTimeout) } } log.Printf("[DEBUG] Creating CloudFormation Stack: %s", input) resp, err := conn.CreateStack(&input) if err != nil { return fmt.Errorf("Creating CloudFormation stack failed: %s", err.Error()) } d.SetId(*resp.StackId) wait := resource.StateChangeConf{ Pending: []string{"CREATE_IN_PROGRESS", "ROLLBACK_IN_PROGRESS", "ROLLBACK_COMPLETE"}, Target: []string{"CREATE_COMPLETE"}, Timeout: time.Duration(retryTimeout) * time.Minute, MinTimeout: 5 * time.Second, Refresh: func() (interface{}, string, error) { resp, err := conn.DescribeStacks(&cloudformation.DescribeStacksInput{ StackName: aws.String(d.Get("name").(string)), }) status := *resp.Stacks[0].StackStatus log.Printf("[DEBUG] Current CloudFormation stack status: %q", status) if status == "ROLLBACK_COMPLETE" { stack := resp.Stacks[0] failures, err := getCloudFormationFailures(stack.StackName, *stack.CreationTime, conn) if err != nil { return resp, "", fmt.Errorf( "Failed getting details about rollback: %q", err.Error()) } return resp, "", fmt.Errorf("ROLLBACK_COMPLETE:\n%q", failures) } return resp, status, err }, } _, err = wait.WaitForState() if err != nil { return err } log.Printf("[INFO] CloudFormation Stack %q created", d.Get("name").(string)) return resourceAwsCloudFormationStackRead(d, meta) }
func resourceAwsCloudFormationStackDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).cfconn input := &cloudformation.DeleteStackInput{ StackName: aws.String(d.Get("name").(string)), } log.Printf("[DEBUG] Deleting CloudFormation stack %s", input) _, err := conn.DeleteStack(input) if err != nil { awsErr, ok := err.(awserr.Error) if !ok { return err } if awsErr.Code() == "ValidationError" { // Ignore stack which has been already deleted return nil } return err } wait := resource.StateChangeConf{ Pending: []string{"DELETE_IN_PROGRESS", "ROLLBACK_IN_PROGRESS"}, Target: []string{"DELETE_COMPLETE"}, Timeout: 30 * time.Minute, MinTimeout: 5 * time.Second, Refresh: func() (interface{}, string, error) { resp, err := conn.DescribeStacks(&cloudformation.DescribeStacksInput{ StackName: aws.String(d.Get("name").(string)), }) if err != nil { awsErr, ok := err.(awserr.Error) if !ok { return resp, "DELETE_FAILED", err } log.Printf("[DEBUG] Error when deleting CloudFormation stack: %s: %s", awsErr.Code(), awsErr.Message()) if awsErr.Code() == "ValidationError" { return resp, "DELETE_COMPLETE", nil } } if len(resp.Stacks) == 0 { log.Printf("[DEBUG] CloudFormation stack %q is already gone", d.Get("name")) return resp, "DELETE_COMPLETE", nil } status := *resp.Stacks[0].StackStatus log.Printf("[DEBUG] Current CloudFormation stack status: %q", status) return resp, status, err }, } _, err = wait.WaitForState() if err != nil { return err } log.Printf("[DEBUG] CloudFormation stack %q has been deleted", d.Id()) d.SetId("") return nil }
func resourceAwsCloudFormationStackUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).cfconn input := &cloudformation.UpdateStackInput{ StackName: aws.String(d.Get("name").(string)), } // Either TemplateBody, TemplateURL or UsePreviousTemplate are required if v, ok := d.GetOk("template_url"); ok { input.TemplateURL = aws.String(v.(string)) } if v, ok := d.GetOk("template_body"); ok && input.TemplateURL == nil { input.TemplateBody = aws.String(normalizeJson(v.(string))) } // Capabilities must be present whether they are changed or not if v, ok := d.GetOk("capabilities"); ok { input.Capabilities = expandStringList(v.(*schema.Set).List()) } if d.HasChange("notification_arns") { input.NotificationARNs = expandStringList(d.Get("notification_arns").(*schema.Set).List()) } // Parameters must be present whether they are changed or not if v, ok := d.GetOk("parameters"); ok { input.Parameters = expandCloudFormationParameters(v.(map[string]interface{})) } if d.HasChange("policy_body") { input.StackPolicyBody = aws.String(normalizeJson(d.Get("policy_body").(string))) } if d.HasChange("policy_url") { input.StackPolicyURL = aws.String(d.Get("policy_url").(string)) } log.Printf("[DEBUG] Updating CloudFormation stack: %s", input) stack, err := conn.UpdateStack(input) if err != nil { return err } lastUpdatedTime, err := getLastCfEventTimestamp(d.Get("name").(string), conn) if err != nil { return err } wait := resource.StateChangeConf{ Pending: []string{ "UPDATE_COMPLETE_CLEANUP_IN_PROGRESS", "UPDATE_IN_PROGRESS", "UPDATE_ROLLBACK_IN_PROGRESS", "UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS", "UPDATE_ROLLBACK_COMPLETE", }, Target: []string{"UPDATE_COMPLETE"}, Timeout: 15 * time.Minute, MinTimeout: 5 * time.Second, Refresh: func() (interface{}, string, error) { resp, err := conn.DescribeStacks(&cloudformation.DescribeStacksInput{ StackName: aws.String(d.Get("name").(string)), }) stack := resp.Stacks[0] status := *stack.StackStatus log.Printf("[DEBUG] Current CloudFormation stack status: %q", status) if status == "UPDATE_ROLLBACK_COMPLETE" { failures, err := getCloudFormationFailures(stack.StackName, *lastUpdatedTime, conn) if err != nil { return resp, "", fmt.Errorf( "Failed getting details about rollback: %q", err.Error()) } return resp, "", fmt.Errorf( "UPDATE_ROLLBACK_COMPLETE:\n%q", failures) } return resp, status, err }, } _, err = wait.WaitForState() if err != nil { return err } log.Printf("[DEBUG] CloudFormation stack %q has been updated", *stack.StackId) return resourceAwsCloudFormationStackRead(d, meta) }
func resourceAwsRoute53RecordDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).r53conn // Get the records rec, err := findRecord(d, meta) if err != nil { switch err { case r53NoHostedZoneFound, r53NoRecordsFound: log.Printf("[DEBUG] %s for: %s, removing from state file", err, d.Id()) d.SetId("") return nil default: return err } } // Change batch for deleting changeBatch := &route53.ChangeBatch{ Comment: aws.String("Deleted by Terraform"), Changes: []*route53.Change{ &route53.Change{ Action: aws.String("DELETE"), ResourceRecordSet: rec, }, }, } zone := cleanZoneID(d.Get("zone_id").(string)) req := &route53.ChangeResourceRecordSetsInput{ HostedZoneId: aws.String(cleanZoneID(zone)), ChangeBatch: changeBatch, } wait := resource.StateChangeConf{ Pending: []string{"rejected"}, Target: []string{"accepted"}, Timeout: 5 * time.Minute, MinTimeout: 1 * time.Second, Refresh: func() (interface{}, string, error) { _, err := conn.ChangeResourceRecordSets(req) if err != nil { if r53err, ok := err.(awserr.Error); ok { if r53err.Code() == "PriorRequestNotComplete" { // There is some pending operation, so just retry // in a bit. return 42, "rejected", nil } if r53err.Code() == "InvalidChangeBatch" { // This means that the record is already gone. return 42, "accepted", nil } } return 42, "failure", err } return 42, "accepted", nil }, } if _, err := wait.WaitForState(); err != nil { return err } return nil }