func resourceAwsAmiFromInstanceCreate(d *schema.ResourceData, meta interface{}) error { client := meta.(*AWSClient).ec2conn req := &ec2.CreateImageInput{ Name: aws.String(d.Get("name").(string)), Description: aws.String(d.Get("description").(string)), InstanceId: aws.String(d.Get("source_instance_id").(string)), NoReboot: aws.Bool(d.Get("snapshot_without_reboot").(bool)), } res, err := client.CreateImage(req) if err != nil { return err } id := *res.ImageId d.SetId(id) d.Partial(true) // make sure we record the id even if the rest of this gets interrupted d.Set("id", id) d.Set("manage_ebs_snapshots", true) d.SetPartial("id") d.SetPartial("manage_ebs_snapshots") d.Partial(false) _, err = resourceAwsAmiWaitForAvailable(id, client) if err != nil { return err } return resourceAwsAmiUpdate(d, meta) }
func resourceAwsSubnetUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn d.Partial(true) if err := setTags(conn, d); err != nil { return err } else { d.SetPartial("tags") } if d.HasChange("map_public_ip_on_launch") { modifyOpts := &ec2.ModifySubnetAttributeInput{ SubnetId: aws.String(d.Id()), MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{ Value: aws.Bool(d.Get("map_public_ip_on_launch").(bool)), }, } log.Printf("[DEBUG] Subnet modify attributes: %#v", modifyOpts) _, err := conn.ModifySubnetAttribute(modifyOpts) if err != nil { return err } else { d.SetPartial("map_public_ip_on_launch") } } d.Partial(false) return resourceAwsSubnetRead(d, meta) }
func resourceComputeTargetHttpProxyUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) d.Partial(true) if d.HasChange("url_map") { url_map := d.Get("url_map").(string) url_map_ref := &compute.UrlMapReference{UrlMap: url_map} op, err := config.clientCompute.TargetHttpProxies.SetUrlMap( config.Project, d.Id(), url_map_ref).Do() if err != nil { return fmt.Errorf("Error updating target: %s", err) } err = computeOperationWaitGlobal(config, op, "Updating Target Http Proxy") if err != nil { return err } d.SetPartial("url_map") } d.Partial(false) return resourceComputeTargetHttpProxyRead(d, meta) }
func instanceProfileSetRoles(d *schema.ResourceData, iamconn *iam.IAM) error { oldInterface, newInterface := d.GetChange("roles") oldRoles := oldInterface.(*schema.Set) newRoles := newInterface.(*schema.Set) currentRoles := schema.CopySet(oldRoles) d.Partial(true) for _, role := range oldRoles.Difference(newRoles).List() { err := instanceProfileRemoveRole(iamconn, d.Id(), role.(string)) if err != nil { return fmt.Errorf("Error removing role %s from IAM instance profile %s: %s", role, d.Id(), err) } currentRoles.Remove(role) d.Set("roles", currentRoles) d.SetPartial("roles") } for _, role := range newRoles.Difference(oldRoles).List() { err := instanceProfileAddRole(iamconn, d.Id(), role.(string)) if err != nil { return fmt.Errorf("Error adding role %s to IAM instance profile %s: %s", role, d.Id(), err) } currentRoles.Add(role) d.Set("roles", currentRoles) d.SetPartial("roles") } d.Partial(false) return nil }
func resourceComputeForwardingRuleUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) region := getOptionalRegion(d, config) d.Partial(true) if d.HasChange("target") { target_name := d.Get("target").(string) target_ref := &compute.TargetReference{Target: target_name} op, err := config.clientCompute.ForwardingRules.SetTarget( config.Project, region, d.Id(), target_ref).Do() if err != nil { return fmt.Errorf("Error updating target: %s", err) } err = computeOperationWaitRegion(config, op, region, "Updating Forwarding Rule") if err != nil { return err } d.SetPartial("target") } d.Partial(false) return resourceComputeForwardingRuleRead(d, meta) }
func resourceAwsProxyProtocolPolicyCreate(d *schema.ResourceData, meta interface{}) error { elbconn := meta.(*AWSClient).elbconn elbname := aws.String(d.Get("load_balancer").(string)) input := &elb.CreateLoadBalancerPolicyInput{ LoadBalancerName: elbname, PolicyAttributes: []*elb.PolicyAttribute{ &elb.PolicyAttribute{ AttributeName: aws.String("ProxyProtocol"), AttributeValue: aws.String("True"), }, }, PolicyName: aws.String("TFEnableProxyProtocol"), PolicyTypeName: aws.String("ProxyProtocolPolicyType"), } // Create a policy log.Printf("[DEBUG] ELB create a policy %s from policy type %s", *input.PolicyName, *input.PolicyTypeName) if _, err := elbconn.CreateLoadBalancerPolicy(input); err != nil { return fmt.Errorf("Error creating a policy %s: %s", *input.PolicyName, err) } // Assign the policy name for use later d.Partial(true) d.SetId(fmt.Sprintf("%s:%s", *elbname, *input.PolicyName)) d.SetPartial("load_balancer") log.Printf("[INFO] ELB PolicyName: %s", *input.PolicyName) return resourceAwsProxyProtocolPolicyUpdate(d, meta) }
func resourceAwsDbSubnetGroupUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).rdsconn if d.HasChange("subnet_ids") { _, n := d.GetChange("subnet_ids") if n == nil { n = new(schema.Set) } ns := n.(*schema.Set) var sIds []*string for _, s := range ns.List() { sIds = append(sIds, aws.String(s.(string))) } _, err := conn.ModifyDBSubnetGroup(&rds.ModifyDBSubnetGroupInput{ DBSubnetGroupName: aws.String(d.Id()), SubnetIds: sIds, }) if err != nil { return err } } if arn, err := buildRDSsubgrpARN(d, meta); err == nil { if err := setTagsRDS(conn, d, arn); err != nil { return err } else { d.SetPartial("tags") } } return resourceAwsDbSubnetGroupRead(d, meta) }
func resourceAwsSecurityGroupUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn sgRaw, _, err := SGStateRefreshFunc(conn, d.Id())() if err != nil { return err } if sgRaw == nil { d.SetId("") return nil } group := sgRaw.(*ec2.SecurityGroup) err = resourceAwsSecurityGroupUpdateRules(d, "ingress", meta, group) if err != nil { return err } if d.Get("vpc_id") != nil { err = resourceAwsSecurityGroupUpdateRules(d, "egress", meta, group) if err != nil { return err } } if err := setTags(conn, d); err != nil { return err } d.SetPartial("tags") return resourceAwsSecurityGroupRead(d, meta) }
func resourceAwsAmiUpdate(d *schema.ResourceData, meta interface{}) error { client := meta.(*AWSClient).ec2conn d.Partial(true) if err := setTags(client, d); err != nil { return err } else { d.SetPartial("tags") } if d.Get("description").(string) != "" { _, err := client.ModifyImageAttribute(&ec2.ModifyImageAttributeInput{ ImageId: aws.String(d.Id()), Description: &ec2.AttributeValue{ Value: aws.String(d.Get("description").(string)), }, }) if err != nil { return err } d.SetPartial("description") } d.Partial(false) return resourceAwsAmiRead(d, meta) }
func resourceAwsDirectoryServiceDirectoryUpdate(d *schema.ResourceData, meta interface{}) error { dsconn := meta.(*AWSClient).dsconn if d.HasChange("enable_sso") { d.SetPartial("enable_sso") var err error if v, ok := d.GetOk("enable_sso"); ok && v.(bool) { log.Printf("[DEBUG] Enabling SSO for DS directory %q", d.Id()) _, err = dsconn.EnableSso(&directoryservice.EnableSsoInput{ DirectoryId: aws.String(d.Id()), }) } else { log.Printf("[DEBUG] Disabling SSO for DS directory %q", d.Id()) _, err = dsconn.DisableSso(&directoryservice.DisableSsoInput{ DirectoryId: aws.String(d.Id()), }) } if err != nil { return err } } return resourceAwsDirectoryServiceDirectoryRead(d, meta) }
func resourceAwsProxyProtocolPolicyUpdate(d *schema.ResourceData, meta interface{}) error { elbconn := meta.(*AWSClient).elbconn elbname := aws.String(d.Get("load_balancer").(string)) // Retrieve the current ELB policies for updating the state req := &elb.DescribeLoadBalancersInput{ LoadBalancerNames: []*string{elbname}, } resp, err := elbconn.DescribeLoadBalancers(req) if err != nil { if isLoadBalancerNotFound(err) { // The ELB is gone now, so just remove it from the state d.SetId("") return nil } return fmt.Errorf("Error retrieving ELB attributes: %s", err) } backends := flattenBackendPolicies(resp.LoadBalancerDescriptions[0].BackendServerDescriptions) _, policyName := resourceAwsProxyProtocolPolicyParseId(d.Id()) d.Partial(true) if d.HasChange("instance_ports") { o, n := d.GetChange("instance_ports") os := o.(*schema.Set) ns := n.(*schema.Set) remove := os.Difference(ns).List() add := ns.Difference(os).List() inputs := []*elb.SetLoadBalancerPoliciesForBackendServerInput{} i, err := resourceAwsProxyProtocolPolicyRemove(policyName, remove, backends) if err != nil { return err } inputs = append(inputs, i...) i, err = resourceAwsProxyProtocolPolicyAdd(policyName, add, backends) if err != nil { return err } inputs = append(inputs, i...) for _, input := range inputs { input.LoadBalancerName = elbname if _, err := elbconn.SetLoadBalancerPoliciesForBackendServer(input); err != nil { return fmt.Errorf("Error setting policy for backend: %s", err) } } d.SetPartial("instance_ports") } return resourceAwsProxyProtocolPolicyRead(d, meta) }
func resourceAwsRoute53ZoneUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).r53conn if err := setTagsR53(conn, d, "hostedzone"); err != nil { return err } else { d.SetPartial("tags") } return resourceAwsRoute53ZoneRead(d, meta) }
func resourceAwsCustomerGatewayUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn // Update tags if required. if err := setTags(conn, d); err != nil { return err } d.SetPartial("tags") return resourceAwsCustomerGatewayRead(d, meta) }
// resourceArmStorageAccountUpdate is unusual in the ARM API where most resources have a combined // and idempotent operation for CreateOrUpdate. In particular updating all of the parameters // available requires a call to Update per parameter... func resourceArmStorageAccountUpdate(d *schema.ResourceData, meta interface{}) error { client := meta.(*ArmClient).storageServiceClient id, err := parseAzureResourceID(d.Id()) if err != nil { return err } storageAccountName := id.Path["storageAccounts"] resourceGroupName := id.ResourceGroup d.Partial(true) if d.HasChange("account_type") { accountType := d.Get("account_type").(string) opts := storage.AccountUpdateParameters{ Properties: &storage.AccountPropertiesUpdateParameters{ AccountType: storage.AccountType(accountType), }, } accResp, err := client.Update(resourceGroupName, storageAccountName, opts) if err != nil { return fmt.Errorf("Error updating Azure Storage Account type %q: %s", storageAccountName, err) } _, err = pollIndefinitelyAsNeeded(client.Client, accResp.Response.Response, http.StatusOK) if err != nil { return fmt.Errorf("Error updating Azure Storage Account type %q: %s", storageAccountName, err) } d.SetPartial("account_type") } if d.HasChange("tags") { tags := d.Get("tags").(map[string]interface{}) opts := storage.AccountUpdateParameters{ Tags: expandTags(tags), } accResp, err := client.Update(resourceGroupName, storageAccountName, opts) if err != nil { return fmt.Errorf("Error updating Azure Storage Account tags %q: %s", storageAccountName, err) } _, err = pollIndefinitelyAsNeeded(client.Client, accResp.Response.Response, http.StatusOK) if err != nil { return fmt.Errorf("Error updating Azure Storage Account tags %q: %s", storageAccountName, err) } d.SetPartial("tags") } d.Partial(false) return nil }
func resourceAwsKinesisStreamUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).kinesisconn d.Partial(true) if err := setTagsKinesis(conn, d); err != nil { return err } d.SetPartial("tags") d.Partial(false) return resourceAwsKinesisStreamRead(d, meta) }
func resourceAwsDbParameterGroupUpdate(d *schema.ResourceData, meta interface{}) error { rdsconn := meta.(*AWSClient).rdsconn d.Partial(true) if d.HasChange("parameter") { o, n := d.GetChange("parameter") if o == nil { o = new(schema.Set) } if n == nil { n = new(schema.Set) } os := o.(*schema.Set) ns := n.(*schema.Set) // Expand the "parameter" set to aws-sdk-go compat []rds.Parameter parameters, err := expandParameters(ns.Difference(os).List()) if err != nil { return err } if len(parameters) > 0 { modifyOpts := rds.ModifyDBParameterGroupInput{ DBParameterGroupName: aws.String(d.Get("name").(string)), Parameters: parameters, } log.Printf("[DEBUG] Modify DB Parameter Group: %s", modifyOpts) _, err = rdsconn.ModifyDBParameterGroup(&modifyOpts) if err != nil { return fmt.Errorf("Error modifying DB Parameter Group: %s", err) } } d.SetPartial("parameter") } if arn, err := buildRDSPGARN(d, meta); err == nil { if err := setTagsRDS(rdsconn, d, arn); err != nil { return err } else { d.SetPartial("tags") } } d.Partial(false) return resourceAwsDbParameterGroupRead(d, meta) }
func resourceAwsSpotInstanceRequestUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn d.Partial(true) if err := setTags(conn, d); err != nil { return err } else { d.SetPartial("tags") } d.Partial(false) return resourceAwsSpotInstanceRequestRead(d, meta) }
func resourceAwsDbSecurityGroupUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).rdsconn d.Partial(true) if arn, err := buildRDSSecurityGroupARN(d, meta); err == nil { if err := setTagsRDS(conn, d, arn); err != nil { return err } else { d.SetPartial("tags") } } d.Partial(false) return resourceAwsDbSecurityGroupRead(d, meta) }
func resourceAwsVpcCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn instance_tenancy := "default" if v, ok := d.GetOk("instance_tenancy"); ok { instance_tenancy = v.(string) } // Create the VPC createOpts := &ec2.CreateVpcInput{ CidrBlock: aws.String(d.Get("cidr_block").(string)), InstanceTenancy: aws.String(instance_tenancy), } log.Printf("[DEBUG] VPC create config: %#v", *createOpts) vpcResp, err := conn.CreateVpc(createOpts) if err != nil { return fmt.Errorf("Error creating VPC: %s", err) } // Get the ID and store it vpc := vpcResp.Vpc d.SetId(*vpc.VpcId) log.Printf("[INFO] VPC ID: %s", d.Id()) // Set partial mode and say that we setup the cidr block d.Partial(true) d.SetPartial("cidr_block") // Wait for the VPC to become available log.Printf( "[DEBUG] Waiting for VPC (%s) to become available", d.Id()) stateConf := &resource.StateChangeConf{ Pending: []string{"pending"}, Target: "available", Refresh: VPCStateRefreshFunc(conn, d.Id()), Timeout: 10 * time.Minute, } if _, err := stateConf.WaitForState(); err != nil { return fmt.Errorf( "Error waiting for VPC (%s) to become available: %s", d.Id(), err) } // Update our attributes and return return resourceAwsVpcUpdate(d, meta) }
func resourceAwsElasticacheParameterGroupUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).elasticacheconn d.Partial(true) if d.HasChange("parameter") { o, n := d.GetChange("parameter") if o == nil { o = new(schema.Set) } if n == nil { n = new(schema.Set) } os := o.(*schema.Set) ns := n.(*schema.Set) // Expand the "parameter" set to aws-sdk-go compat []elasticacheconn.Parameter parameters, err := expandElastiCacheParameters(ns.Difference(os).List()) if err != nil { return err } if len(parameters) > 0 { modifyOpts := elasticache.ModifyCacheParameterGroupInput{ CacheParameterGroupName: aws.String(d.Get("name").(string)), ParameterNameValues: parameters, } log.Printf("[DEBUG] Modify Cache Parameter Group: %#v", modifyOpts) _, err = conn.ModifyCacheParameterGroup(&modifyOpts) if err != nil { return fmt.Errorf("Error modifying Cache Parameter Group: %s", err) } } d.SetPartial("parameter") } d.Partial(false) return resourceAwsElasticacheParameterGroupRead(d, meta) }
func resourceAwsDbParameterGroupCreate(d *schema.ResourceData, meta interface{}) error { rdsconn := meta.(*AWSClient).rdsconn tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{})) createOpts := rds.CreateDBParameterGroupInput{ DBParameterGroupName: aws.String(d.Get("name").(string)), DBParameterGroupFamily: aws.String(d.Get("family").(string)), Description: aws.String(d.Get("description").(string)), Tags: tags, } log.Printf("[DEBUG] Create DB Parameter Group: %#v", createOpts) _, err := rdsconn.CreateDBParameterGroup(&createOpts) if err != nil { return fmt.Errorf("Error creating DB Parameter Group: %s", err) } d.Partial(true) d.SetPartial("name") d.SetPartial("family") d.SetPartial("description") d.Partial(false) d.SetId(*createOpts.DBParameterGroupName) log.Printf("[INFO] DB Parameter Group ID: %s", d.Id()) return resourceAwsDbParameterGroupUpdate(d, meta) }
func resourceAwsElasticacheParameterGroupCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).elasticacheconn createOpts := elasticache.CreateCacheParameterGroupInput{ CacheParameterGroupName: aws.String(d.Get("name").(string)), CacheParameterGroupFamily: aws.String(d.Get("family").(string)), Description: aws.String(d.Get("description").(string)), } log.Printf("[DEBUG] Create Cache Parameter Group: %#v", createOpts) _, err := conn.CreateCacheParameterGroup(&createOpts) if err != nil { return fmt.Errorf("Error creating DB Parameter Group: %s", err) } d.Partial(true) d.SetPartial("name") d.SetPartial("family") d.SetPartial("description") d.Partial(false) d.SetId(*createOpts.CacheParameterGroupName) log.Printf("[INFO] Cache Parameter Group ID: %s", d.Id()) return resourceAwsElasticacheParameterGroupUpdate(d, meta) }
func resourceAwsVpnGatewayUpdate(d *schema.ResourceData, meta interface{}) error { if d.HasChange("vpc_id") { // If we're already attached, detach it first if err := resourceAwsVpnGatewayDetach(d, meta); err != nil { return err } // Attach the VPN gateway to the new vpc if err := resourceAwsVpnGatewayAttach(d, meta); err != nil { return err } } conn := meta.(*AWSClient).ec2conn if err := setTags(conn, d); err != nil { return err } d.SetPartial("tags") return resourceAwsVpnGatewayRead(d, meta) }
func resourcePostgresqlRoleUpdate(d *schema.ResourceData, meta interface{}) error { client := meta.(*Client) conn, err := client.Connect() if err != nil { return err } defer conn.Close() d.Partial(true) roleName := d.Get("name").(string) if d.HasChange("login") { loginAttr := getLoginStr(d.Get("login").(bool)) query := fmt.Sprintf("ALTER ROLE %s %s", pq.QuoteIdentifier(roleName), pq.QuoteIdentifier(loginAttr)) _, err := conn.Query(query) if err != nil { return fmt.Errorf("Error updating login attribute for role: %s", err) } d.SetPartial("login") } password := d.Get("password").(string) if d.HasChange("password") { encryptedCfg := getEncryptedStr(d.Get("encrypted").(bool)) query := fmt.Sprintf("ALTER ROLE %s %s PASSWORD '%s'", pq.QuoteIdentifier(roleName), encryptedCfg, password) _, err := conn.Query(query) if err != nil { return fmt.Errorf("Error updating password attribute for role: %s", err) } d.SetPartial("password") } if d.HasChange("encrypted") { encryptedCfg := getEncryptedStr(d.Get("encrypted").(bool)) query := fmt.Sprintf("ALTER ROLE %s %s PASSWORD '%s'", pq.QuoteIdentifier(roleName), encryptedCfg, password) _, err := conn.Query(query) if err != nil { return fmt.Errorf("Error updating encrypted attribute for role: %s", err) } d.SetPartial("encrypted") } d.Partial(false) return resourcePostgresqlRoleRead(d, meta) }
func resourceAwsDbInstanceUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).rdsconn d.Partial(true) req := &rds.ModifyDBInstanceInput{ ApplyImmediately: aws.Bool(d.Get("apply_immediately").(bool)), DBInstanceIdentifier: aws.String(d.Id()), } d.SetPartial("apply_immediately") requestUpdate := false if d.HasChange("allocated_storage") { d.SetPartial("allocated_storage") req.AllocatedStorage = aws.Int64(int64(d.Get("allocated_storage").(int))) requestUpdate = true } if d.HasChange("allow_major_version_upgrade") { d.SetPartial("allow_major_version_upgrade") req.AllowMajorVersionUpgrade = aws.Bool(d.Get("allow_major_version_upgrade").(bool)) requestUpdate = true } if d.HasChange("backup_retention_period") { d.SetPartial("backup_retention_period") req.BackupRetentionPeriod = aws.Int64(int64(d.Get("backup_retention_period").(int))) requestUpdate = true } if d.HasChange("copy_tags_to_snapshot") { d.SetPartial("copy_tags_to_snapshot") req.CopyTagsToSnapshot = aws.Bool(d.Get("copy_tags_to_snapshot").(bool)) requestUpdate = true } if d.HasChange("instance_class") { d.SetPartial("instance_class") req.DBInstanceClass = aws.String(d.Get("instance_class").(string)) requestUpdate = true } if d.HasChange("parameter_group_name") { d.SetPartial("parameter_group_name") req.DBParameterGroupName = aws.String(d.Get("parameter_group_name").(string)) requestUpdate = true } if d.HasChange("engine_version") { d.SetPartial("engine_version") req.EngineVersion = aws.String(d.Get("engine_version").(string)) requestUpdate = true } if d.HasChange("iops") { d.SetPartial("iops") req.Iops = aws.Int64(int64(d.Get("iops").(int))) requestUpdate = true } if d.HasChange("backup_window") { d.SetPartial("backup_window") req.PreferredBackupWindow = aws.String(d.Get("backup_window").(string)) requestUpdate = true } if d.HasChange("maintenance_window") { d.SetPartial("maintenance_window") req.PreferredMaintenanceWindow = aws.String(d.Get("maintenance_window").(string)) requestUpdate = true } if d.HasChange("password") { d.SetPartial("password") req.MasterUserPassword = aws.String(d.Get("password").(string)) requestUpdate = true } if d.HasChange("multi_az") { d.SetPartial("multi_az") req.MultiAZ = aws.Bool(d.Get("multi_az").(bool)) requestUpdate = true } if d.HasChange("publicly_accessible") { d.SetPartial("publicly_accessible") req.PubliclyAccessible = aws.Bool(d.Get("publicly_accessible").(bool)) requestUpdate = true } if d.HasChange("storage_type") { d.SetPartial("storage_type") req.StorageType = aws.String(d.Get("storage_type").(string)) requestUpdate = true } if d.HasChange("auto_minor_version_upgrade") { d.SetPartial("auto_minor_version_upgrade") req.AutoMinorVersionUpgrade = aws.Bool(d.Get("auto_minor_version_upgrade").(bool)) requestUpdate = true } if d.HasChange("vpc_security_group_ids") { if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { var s []*string for _, v := range attr.List() { s = append(s, aws.String(v.(string))) } req.VpcSecurityGroupIds = s } requestUpdate = true } if d.HasChange("vpc_security_group_ids") { if attr := d.Get("security_group_names").(*schema.Set); attr.Len() > 0 { var s []*string for _, v := range attr.List() { s = append(s, aws.String(v.(string))) } req.DBSecurityGroups = s } requestUpdate = true } log.Printf("[DEBUG] Send DB Instance Modification request: %#v", requestUpdate) if requestUpdate { log.Printf("[DEBUG] DB Instance Modification request: %#v", req) _, err := conn.ModifyDBInstance(req) if err != nil { return fmt.Errorf("Error modifying DB Instance %s: %s", d.Id(), err) } } // separate request to promote a database if d.HasChange("replicate_source_db") { if d.Get("replicate_source_db").(string) == "" { // promote opts := rds.PromoteReadReplicaInput{ DBInstanceIdentifier: aws.String(d.Id()), } attr := d.Get("backup_retention_period") opts.BackupRetentionPeriod = aws.Int64(int64(attr.(int))) if attr, ok := d.GetOk("backup_window"); ok { opts.PreferredBackupWindow = aws.String(attr.(string)) } _, err := conn.PromoteReadReplica(&opts) if err != nil { return fmt.Errorf("Error promoting database: %#v", err) } d.Set("replicate_source_db", "") } else { return fmt.Errorf("cannot elect new source database for replication") } } if arn, err := buildRDSARN(d, meta); err == nil { if err := setTagsRDS(conn, d, arn); err != nil { return err } else { d.SetPartial("tags") } } d.Partial(false) return resourceAwsDbInstanceRead(d, meta) }
func resourceComputeTargetHttpsProxyUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) d.Partial(true) if d.HasChange("url_map") { url_map := d.Get("url_map").(string) url_map_ref := &compute.UrlMapReference{UrlMap: url_map} op, err := config.clientCompute.TargetHttpsProxies.SetUrlMap( config.Project, d.Id(), url_map_ref).Do() if err != nil { return fmt.Errorf("Error updating Target HTTPS proxy URL map: %s", err) } err = computeOperationWaitGlobal(config, op, "Updating Target Https Proxy URL Map") if err != nil { return err } d.SetPartial("url_map") } if d.HasChange("ssl_certificates") { proxy, err := config.clientCompute.TargetHttpsProxies.Get( config.Project, d.Id()).Do() _old, _new := d.GetChange("ssl_certificates") _oldCerts := _old.([]interface{}) _newCerts := _new.([]interface{}) current := proxy.SslCertificates _oldMap := make(map[string]bool) _newMap := make(map[string]bool) for _, v := range _oldCerts { _oldMap[v.(string)] = true } for _, v := range _newCerts { _newMap[v.(string)] = true } sslCertificates := make([]string, 0) // Only modify certificates in one of our old or new states for _, v := range current { _, okOld := _oldMap[v] _, okNew := _newMap[v] // we deleted the certificate if okOld && !okNew { continue } sslCertificates = append(sslCertificates, v) // Keep track of the fact that we have added this certificate if okNew { delete(_newMap, v) } } // Add fresh certificates for k, _ := range _newMap { sslCertificates = append(sslCertificates, k) } cert_ref := &compute.TargetHttpsProxiesSetSslCertificatesRequest{ SslCertificates: sslCertificates, } op, err := config.clientCompute.TargetHttpsProxies.SetSslCertificates( config.Project, d.Id(), cert_ref).Do() if err != nil { return fmt.Errorf("Error updating Target Https Proxy SSL Certificates: %s", err) } err = computeOperationWaitGlobal(config, op, "Updating Target Https Proxy SSL certificates") if err != nil { return err } d.SetPartial("ssl_certificate") } d.Partial(false) return resourceComputeTargetHttpsProxyRead(d, meta) }
func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) region := getOptionalRegion(d, config) d.Partial(true) if d.HasChange("health_checks") { from_, to_ := d.GetChange("health_checks") from := convertStringArr(from_.([]interface{})) to := convertStringArr(to_.([]interface{})) fromUrls, err := convertHealthChecks(config, from) if err != nil { return err } toUrls, err := convertHealthChecks(config, to) if err != nil { return err } add, remove := calcAddRemove(fromUrls, toUrls) removeReq := &compute.TargetPoolsRemoveHealthCheckRequest{ HealthChecks: make([]*compute.HealthCheckReference, len(remove)), } for i, v := range remove { removeReq.HealthChecks[i] = &compute.HealthCheckReference{HealthCheck: v} } op, err := config.clientCompute.TargetPools.RemoveHealthCheck( config.Project, region, d.Id(), removeReq).Do() if err != nil { return fmt.Errorf("Error updating health_check: %s", err) } err = computeOperationWaitRegion(config, op, region, "Updating Target Pool") if err != nil { return err } addReq := &compute.TargetPoolsAddHealthCheckRequest{ HealthChecks: make([]*compute.HealthCheckReference, len(add)), } for i, v := range add { addReq.HealthChecks[i] = &compute.HealthCheckReference{HealthCheck: v} } op, err = config.clientCompute.TargetPools.AddHealthCheck( config.Project, region, d.Id(), addReq).Do() if err != nil { return fmt.Errorf("Error updating health_check: %s", err) } err = computeOperationWaitRegion(config, op, region, "Updating Target Pool") if err != nil { return err } d.SetPartial("health_checks") } if d.HasChange("instances") { from_, to_ := d.GetChange("instances") from := convertStringArr(from_.([]interface{})) to := convertStringArr(to_.([]interface{})) fromUrls, err := convertInstances(config, from) if err != nil { return err } toUrls, err := convertInstances(config, to) if err != nil { return err } add, remove := calcAddRemove(fromUrls, toUrls) addReq := &compute.TargetPoolsAddInstanceRequest{ Instances: make([]*compute.InstanceReference, len(add)), } for i, v := range add { addReq.Instances[i] = &compute.InstanceReference{Instance: v} } op, err := config.clientCompute.TargetPools.AddInstance( config.Project, region, d.Id(), addReq).Do() if err != nil { return fmt.Errorf("Error updating instances: %s", err) } err = computeOperationWaitRegion(config, op, region, "Updating Target Pool") if err != nil { return err } removeReq := &compute.TargetPoolsRemoveInstanceRequest{ Instances: make([]*compute.InstanceReference, len(remove)), } for i, v := range remove { removeReq.Instances[i] = &compute.InstanceReference{Instance: v} } op, err = config.clientCompute.TargetPools.RemoveInstance( config.Project, region, d.Id(), removeReq).Do() if err != nil { return fmt.Errorf("Error updating instances: %s", err) } err = computeOperationWaitRegion(config, op, region, "Updating Target Pool") if err != nil { return err } d.SetPartial("instances") } if d.HasChange("backup_pool") { bpool_name := d.Get("backup_pool").(string) tref := &compute.TargetReference{ Target: bpool_name, } op, err := config.clientCompute.TargetPools.SetBackup( config.Project, region, d.Id(), tref).Do() if err != nil { return fmt.Errorf("Error updating backup_pool: %s", err) } err = computeOperationWaitRegion(config, op, region, "Updating Target Pool") if err != nil { return err } d.SetPartial("backup_pool") } d.Partial(false) return resourceComputeTargetPoolRead(d, meta) }
func resourceAwsInstanceUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn d.Partial(true) if err := setTags(conn, d); err != nil { return err } else { d.SetPartial("tags") } // SourceDestCheck can only be set on VPC instances // AWS will return an error of InvalidParameterCombination if we attempt // to modify the source_dest_check of an instance in EC2 Classic log.Printf("[INFO] Modifying instance %s", d.Id()) _, err := conn.ModifyInstanceAttribute(&ec2.ModifyInstanceAttributeInput{ InstanceId: aws.String(d.Id()), SourceDestCheck: &ec2.AttributeBooleanValue{ Value: aws.Bool(d.Get("source_dest_check").(bool)), }, }) if err != nil { if ec2err, ok := err.(awserr.Error); ok { // Toloerate InvalidParameterCombination error in Classic, otherwise // return the error if "InvalidParameterCombination" != ec2err.Code() { return err } log.Printf("[WARN] Attempted to modify SourceDestCheck on non VPC instance: %s", ec2err.Message()) } } if d.HasChange("vpc_security_group_ids") { var groups []*string if v := d.Get("vpc_security_group_ids").(*schema.Set); v.Len() > 0 { for _, v := range v.List() { groups = append(groups, aws.String(v.(string))) } } _, err := conn.ModifyInstanceAttribute(&ec2.ModifyInstanceAttributeInput{ InstanceId: aws.String(d.Id()), Groups: groups, }) if err != nil { return err } } if d.HasChange("disable_api_termination") { _, err := conn.ModifyInstanceAttribute(&ec2.ModifyInstanceAttributeInput{ InstanceId: aws.String(d.Id()), DisableApiTermination: &ec2.AttributeBooleanValue{ Value: aws.Bool(d.Get("disable_api_termination").(bool)), }, }) if err != nil { return err } } if d.HasChange("instance_initiated_shutdown_behavior") { log.Printf("[INFO] Modifying instance %s", d.Id()) _, err := conn.ModifyInstanceAttribute(&ec2.ModifyInstanceAttributeInput{ InstanceId: aws.String(d.Id()), InstanceInitiatedShutdownBehavior: &ec2.AttributeValue{ Value: aws.String(d.Get("instance_initiated_shutdown_behavior").(string)), }, }) if err != nil { return err } } if d.HasChange("monitoring") { var mErr error if d.Get("monitoring").(bool) { log.Printf("[DEBUG] Enabling monitoring for Instance (%s)", d.Id()) _, mErr = conn.MonitorInstances(&ec2.MonitorInstancesInput{ InstanceIds: []*string{aws.String(d.Id())}, }) } else { log.Printf("[DEBUG] Disabling monitoring for Instance (%s)", d.Id()) _, mErr = conn.UnmonitorInstances(&ec2.UnmonitorInstancesInput{ InstanceIds: []*string{aws.String(d.Id())}, }) } if mErr != nil { return fmt.Errorf("[WARN] Error updating Instance monitoring: %s", mErr) } } // TODO(mitchellh): wait for the attributes we modified to // persist the change... d.Partial(false) return resourceAwsInstanceRead(d, meta) }
func resourceCloudStackDiskUpdate(d *schema.ResourceData, meta interface{}) error { cs := meta.(*cloudstack.CloudStackClient) d.Partial(true) name := d.Get("name").(string) if d.HasChange("disk_offering") || d.HasChange("size") { // Detach the volume (re-attach is done at the end of this function) if err := resourceCloudStackDiskDetach(d, meta); err != nil { return fmt.Errorf("Error detaching disk %s from virtual machine: %s", name, err) } // Create a new parameter struct p := cs.Volume.NewResizeVolumeParams(d.Id()) // Retrieve the disk_offering ID diskofferingid, e := retrieveID(cs, "disk_offering", d.Get("disk_offering").(string)) if e != nil { return e.Error() } // Set the disk_offering ID p.SetDiskofferingid(diskofferingid) if d.Get("size").(int) != 0 { // Set the size p.SetSize(int64(d.Get("size").(int))) } // Set the shrink bit p.SetShrinkok(d.Get("shrink_ok").(bool)) // Change the disk_offering r, err := cs.Volume.ResizeVolume(p) if err != nil { return fmt.Errorf("Error changing disk offering/size for disk %s: %s", name, err) } // Update the volume ID and set partials d.SetId(r.Id) d.SetPartial("disk_offering") d.SetPartial("size") } // If the device changed, just detach here so we can re-attach the // volume at the end of this function if d.HasChange("device") || d.HasChange("virtual_machine") { // Detach the volume if err := resourceCloudStackDiskDetach(d, meta); err != nil { return fmt.Errorf("Error detaching disk %s from virtual machine: %s", name, err) } } if d.Get("attach").(bool) { // Attach the volume err := resourceCloudStackDiskAttach(d, meta) if err != nil { return fmt.Errorf("Error attaching disk %s to virtual machine: %s", name, err) } // Set the additional partials d.SetPartial("attach") d.SetPartial("device") d.SetPartial("virtual_machine") } else { // Detach the volume if err := resourceCloudStackDiskDetach(d, meta); err != nil { return fmt.Errorf("Error detaching disk %s from virtual machine: %s", name, err) } } d.Partial(false) return resourceCloudStackDiskRead(d, meta) }
func resourceCloudStackDiskCreate(d *schema.ResourceData, meta interface{}) error { cs := meta.(*cloudstack.CloudStackClient) d.Partial(true) name := d.Get("name").(string) // Create a new parameter struct p := cs.Volume.NewCreateVolumeParams(name) // Retrieve the disk_offering ID diskofferingid, e := retrieveID(cs, "disk_offering", d.Get("disk_offering").(string)) if e != nil { return e.Error() } // Set the disk_offering ID p.SetDiskofferingid(diskofferingid) if d.Get("size").(int) != 0 { // Set the volume size p.SetSize(int64(d.Get("size").(int))) } // If there is a project supplied, we retrieve and set the project id if project, ok := d.GetOk("project"); ok { // Retrieve the project ID projectid, e := retrieveID(cs, "project", project.(string)) if e != nil { return e.Error() } // Set the default project ID p.SetProjectid(projectid) } // Retrieve the zone ID zoneid, e := retrieveID(cs, "zone", d.Get("zone").(string)) if e != nil { return e.Error() } // Set the zone ID p.SetZoneid(zoneid) // Create the new volume r, err := cs.Volume.CreateVolume(p) if err != nil { return fmt.Errorf("Error creating the new disk %s: %s", name, err) } // Set the volume ID and partials d.SetId(r.Id) d.SetPartial("name") d.SetPartial("device") d.SetPartial("disk_offering") d.SetPartial("size") d.SetPartial("virtual_machine") d.SetPartial("project") d.SetPartial("zone") if d.Get("attach").(bool) { err := resourceCloudStackDiskAttach(d, meta) if err != nil { return fmt.Errorf("Error attaching the new disk %s to virtual machine: %s", name, err) } // Set the additional partial d.SetPartial("attach") } d.Partial(false) return resourceCloudStackDiskRead(d, meta) }