func (lb *LoadBalancer) DeleteTCPLoadBalancer(name, region string) error { glog.V(2).Infof("DeleteTCPLoadBalancer(%v, %v)", name, region) vip, err := getVipByName(lb.network, name) if err != nil { return err } pool, err := pools.Get(lb.network, vip.PoolID).Extract() if err != nil { return err } // Have to delete VIP before pool can be deleted err = vips.Delete(lb.network, vip.ID).ExtractErr() if err != nil { return err } // Ignore errors for everything following here for _, monId := range pool.MonitorIDs { pools.DisassociateMonitor(lb.network, pool.ID, monId) } pools.Delete(lb.network, pool.ID) return nil }
func disassociateMonitor(t *testing.T, poolID, monitorID string) { res := pools.DisassociateMonitor(base.Client, poolID, monitorID) th.AssertNoErr(t, res.Err) t.Logf("Disassociated pool %s with monitor %s", poolID, monitorID) }
func (lb *LoadBalancer) EnsureTCPLoadBalancerDeleted(name, region string) error { glog.V(4).Infof("EnsureTCPLoadBalancerDeleted(%v, %v)", name, region) vip, err := getVipByName(lb.network, name) if err != nil && err != ErrNotFound { return err } // We have to delete the VIP before the pool can be deleted, // so no point continuing if this fails. if vip != nil { err := vips.Delete(lb.network, vip.ID).ExtractErr() if err != nil && !isNotFound(err) { return err } } var pool *pools.Pool if vip != nil { pool, err = pools.Get(lb.network, vip.PoolID).Extract() if err != nil && !isNotFound(err) { return err } } else { // The VIP is gone, but it is conceivable that a Pool // still exists that we failed to delete on some // previous occasion. Make a best effort attempt to // cleanup any pools with the same name as the VIP. pool, err = getPoolByName(lb.network, name) if err != nil && err != ErrNotFound { return err } } if pool != nil { for _, monId := range pool.MonitorIDs { _, err = pools.DisassociateMonitor(lb.network, pool.ID, monId).Extract() if err != nil { return err } err = monitors.Delete(lb.network, monId).ExtractErr() if err != nil && !isNotFound(err) { return err } } err = pools.Delete(lb.network, pool.ID).ExtractErr() if err != nil && !isNotFound(err) { return err } } return nil }
func (lb *LoadBalancer) EnsureTCPLoadBalancerDeleted(name, region string) error { glog.V(4).Infof("EnsureTCPLoadBalancerDeleted(%v, %v)", name, region) // TODO(#8352): Because we look up the pool using the VIP object, if the VIP // is already gone we can't attempt to delete the pool. We should instead // continue even if the VIP doesn't exist and attempt to delete the pool by // name. vip, vipErr := getVipByName(lb.network, name) if vipErr == ErrNotFound { return nil } else if vipErr != nil { return vipErr } // It's ok if the pool doesn't exist, as we may still need to delete the vip // (although I don't believe the system should ever be in that state). pool, poolErr := pools.Get(lb.network, vip.PoolID).Extract() if poolErr != nil { detailedErr, ok := poolErr.(*gophercloud.UnexpectedResponseCodeError) if !ok || detailedErr.Actual != http.StatusNotFound { return poolErr } } poolExists := (poolErr == nil) // We have to delete the VIP before the pool can be deleted, so we can't // continue on if this fails. // TODO(#8352): Only do this if the VIP exists once we can delete pools by // name rather than by ID. err := vips.Delete(lb.network, vip.ID).ExtractErr() if err != nil && err != ErrNotFound { return err } // Ignore errors for everything following here if poolExists { for _, monId := range pool.MonitorIDs { // TODO(#8352): Delete the monitor, don't just disassociate it. pools.DisassociateMonitor(lb.network, pool.ID, monId) } pools.Delete(lb.network, pool.ID) } return nil }
func resourceLBPoolV1Delete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) networkingClient, err := config.networkingV2Client(d.Get("region").(string)) if err != nil { return fmt.Errorf("Error creating OpenStack networking client: %s", err) } // Make sure all monitors are disassociated first if v, ok := d.GetOk("monitor_ids"); ok { if monitorIDList, ok := v.([]interface{}); ok { for _, monitorID := range monitorIDList { mID := monitorID.(string) log.Printf("[DEBUG] Attempting to disassociate monitor %s from pool %s", mID, d.Id()) if res := pools.DisassociateMonitor(networkingClient, d.Id(), mID); res.Err != nil { return fmt.Errorf("Error disassociating monitor %s from pool %s: %s", mID, d.Id(), err) } } } } stateConf := &resource.StateChangeConf{ Pending: []string{"ACTIVE", "PENDING_DELETE"}, Target: []string{"DELETED"}, Refresh: waitForLBPoolDelete(networkingClient, d.Id()), Timeout: 2 * time.Minute, Delay: 5 * time.Second, MinTimeout: 3 * time.Second, } _, err = stateConf.WaitForState() if err != nil { return fmt.Errorf("Error deleting OpenStack LB Pool: %s", err) } d.SetId("") return nil }
func resourceLBPoolV1Update(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) networkingClient, err := config.networkingV2Client(d.Get("region").(string)) if err != nil { return fmt.Errorf("Error creating OpenStack networking client: %s", err) } var updateOpts pools.UpdateOpts // If either option changed, update both. // Gophercloud complains if one is empty. if d.HasChange("name") || d.HasChange("lb_method") { updateOpts.Name = d.Get("name").(string) updateOpts.LBMethod = d.Get("lb_method").(string) } log.Printf("[DEBUG] Updating OpenStack LB Pool %s with options: %+v", d.Id(), updateOpts) _, err = pools.Update(networkingClient, d.Id(), updateOpts).Extract() if err != nil { return fmt.Errorf("Error updating OpenStack LB Pool: %s", err) } if d.HasChange("monitor_ids") { oldMIDsRaw, newMIDsRaw := d.GetChange("security_groups") oldMIDsSet, newMIDsSet := oldMIDsRaw.(*schema.Set), newMIDsRaw.(*schema.Set) monitorsToAdd := newMIDsSet.Difference(oldMIDsSet) monitorsToRemove := oldMIDsSet.Difference(newMIDsSet) log.Printf("[DEBUG] Monitors to add: %v", monitorsToAdd) log.Printf("[DEBUG] Monitors to remove: %v", monitorsToRemove) for _, m := range monitorsToAdd.List() { _, err := pools.AssociateMonitor(networkingClient, d.Id(), m.(string)).Extract() if err != nil { return fmt.Errorf("Error associating monitor (%s) with OpenStack server (%s): %s", m.(string), d.Id(), err) } log.Printf("[DEBUG] Associated monitor (%s) with pool (%s)", m.(string), d.Id()) } for _, m := range monitorsToRemove.List() { _, err := pools.DisassociateMonitor(networkingClient, d.Id(), m.(string)).Extract() if err != nil { return fmt.Errorf("Error disassociating monitor (%s) from OpenStack server (%s): %s", m.(string), d.Id(), err) } log.Printf("[DEBUG] Disassociated monitor (%s) from pool (%s)", m.(string), d.Id()) } } if d.HasChange("member") { oldMembersRaw, newMembersRaw := d.GetChange("member") oldMembersSet, newMembersSet := oldMembersRaw.(*schema.Set), newMembersRaw.(*schema.Set) membersToAdd := newMembersSet.Difference(oldMembersSet) membersToRemove := oldMembersSet.Difference(newMembersSet) log.Printf("[DEBUG] Members to add: %v", membersToAdd) log.Printf("[DEBUG] Members to remove: %v", membersToRemove) for _, m := range membersToRemove.List() { oldMember := resourcePoolMemberV1(d, m) listOpts := members.ListOpts{ PoolID: d.Id(), Address: oldMember.Address, ProtocolPort: oldMember.ProtocolPort, } err = members.List(networkingClient, listOpts).EachPage(func(page pagination.Page) (bool, error) { extractedMembers, err := members.ExtractMembers(page) if err != nil { return false, err } for _, member := range extractedMembers { err := members.Delete(networkingClient, member.ID).ExtractErr() if err != nil { return false, fmt.Errorf("Error deleting member (%s) from OpenStack LB pool (%s): %s", member.ID, d.Id(), err) } log.Printf("[DEBUG] Deleted member (%s) from pool (%s)", member.ID, d.Id()) } return true, nil }) } for _, m := range membersToAdd.List() { createOpts := resourcePoolMemberV1(d, m) newMember, err := members.Create(networkingClient, createOpts).Extract() if err != nil { return fmt.Errorf("Error creating LB member: %s", err) } log.Printf("[DEBUG] Created member (%s) in OpenStack LB pool (%s)", newMember.ID, d.Id()) } } return resourceLBPoolV1Read(d, meta) }
func (lb *LoadBalancer) EnsureLoadBalancerDeleted(service *api.Service) error { loadBalancerName := cloudprovider.GetLoadBalancerName(service) glog.V(4).Infof("EnsureLoadBalancerDeleted(%v)", loadBalancerName) vip, err := getVipByName(lb.network, loadBalancerName) if err != nil && err != ErrNotFound { return err } if lb.opts.FloatingNetworkId != "" && vip != nil { floatingIP, err := getFloatingIPByPortID(lb.network, vip.PortID) if err != nil && !isNotFound(err) { return err } if floatingIP != nil { err = floatingips.Delete(lb.network, floatingIP.ID).ExtractErr() if err != nil && !isNotFound(err) { return err } } } // We have to delete the VIP before the pool can be deleted, // so no point continuing if this fails. if vip != nil { err := vips.Delete(lb.network, vip.ID).ExtractErr() if err != nil && !isNotFound(err) { return err } } var pool *pools.Pool if vip != nil { pool, err = pools.Get(lb.network, vip.PoolID).Extract() if err != nil && !isNotFound(err) { return err } } else { // The VIP is gone, but it is conceivable that a Pool // still exists that we failed to delete on some // previous occasion. Make a best effort attempt to // cleanup any pools with the same name as the VIP. pool, err = getPoolByName(lb.network, service.Name) if err != nil && err != ErrNotFound { return err } } if pool != nil { for _, monId := range pool.MonitorIDs { _, err = pools.DisassociateMonitor(lb.network, pool.ID, monId).Extract() if err != nil { return err } err = monitors.Delete(lb.network, monId).ExtractErr() if err != nil && !isNotFound(err) { return err } } err = pools.Delete(lb.network, pool.ID).ExtractErr() if err != nil && !isNotFound(err) { return err } } return nil }