func deleteMonitor(t *testing.T, monitorID string) { res := monitors.Delete(base.Client, monitorID) th.AssertNoErr(t, res.Err) t.Logf("Deleted monitor %s", monitorID) }
func (lb *LoadBalancer) EnsureTCPLoadBalancerDeleted(name, region string) error { glog.V(4).Infof("EnsureTCPLoadBalancerDeleted(%v, %v)", name, region) vip, err := getVipByName(lb.network, name) if err != nil && err != ErrNotFound { return err } // We have to delete the VIP before the pool can be deleted, // so no point continuing if this fails. if vip != nil { err := vips.Delete(lb.network, vip.ID).ExtractErr() if err != nil && !isNotFound(err) { return err } } var pool *pools.Pool if vip != nil { pool, err = pools.Get(lb.network, vip.PoolID).Extract() if err != nil && !isNotFound(err) { return err } } else { // The VIP is gone, but it is conceivable that a Pool // still exists that we failed to delete on some // previous occasion. Make a best effort attempt to // cleanup any pools with the same name as the VIP. pool, err = getPoolByName(lb.network, name) if err != nil && err != ErrNotFound { return err } } if pool != nil { for _, monId := range pool.MonitorIDs { _, err = pools.DisassociateMonitor(lb.network, pool.ID, monId).Extract() if err != nil { return err } err = monitors.Delete(lb.network, monId).ExtractErr() if err != nil && !isNotFound(err) { return err } } err = pools.Delete(lb.network, pool.ID).ExtractErr() if err != nil && !isNotFound(err) { return err } } return nil }
func resourceLBMonitorV1Delete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) networkingClient, err := config.networkingV2Client(d.Get("region").(string)) if err != nil { return fmt.Errorf("Error creating OpenStack networking client: %s", err) } err = monitors.Delete(networkingClient, d.Id()).ExtractErr() if err != nil { return fmt.Errorf("Error deleting OpenStack LB Monitor: %s", err) } d.SetId("") return nil }
func waitForLBMonitorDelete(networkingClient *gophercloud.ServiceClient, monitorId string) resource.StateRefreshFunc { return func() (interface{}, string, error) { log.Printf("[DEBUG] Attempting to delete OpenStack LB Monitor %s", monitorId) m, err := monitors.Get(networkingClient, monitorId).Extract() if err != nil { errCode, ok := err.(*gophercloud.UnexpectedResponseCodeError) if !ok { return m, "ACTIVE", err } if errCode.Actual == 404 { log.Printf("[DEBUG] Successfully deleted OpenStack LB Monitor %s", monitorId) return m, "DELETED", nil } if errCode.Actual == 409 { log.Printf("[DEBUG] OpenStack LB Monitor (%s) is waiting for Pool to delete.", monitorId) return m, "PENDING", nil } } log.Printf("[DEBUG] OpenStack LB Monitor: %+v", m) err = monitors.Delete(networkingClient, monitorId).ExtractErr() if err != nil { errCode, ok := err.(*gophercloud.UnexpectedResponseCodeError) if !ok { return m, "ACTIVE", err } if errCode.Actual == 404 { log.Printf("[DEBUG] Successfully deleted OpenStack LB Monitor %s", monitorId) return m, "DELETED", nil } if errCode.Actual == 409 { log.Printf("[DEBUG] OpenStack LB Monitor (%s) is waiting for Pool to delete.", monitorId) return m, "PENDING", nil } } log.Printf("[DEBUG] OpenStack LB Monitor %s still active.", monitorId) return m, "ACTIVE", nil } }
func (lb *LoadBalancer) CreateTCPLoadBalancer(name, region string, externalIP net.IP, ports []*api.ServicePort, hosts []string, affinity api.ServiceAffinity) (*api.LoadBalancerStatus, error) { glog.V(4).Infof("CreateTCPLoadBalancer(%v, %v, %v, %v, %v, %v)", name, region, externalIP, ports, hosts, affinity) if len(ports) > 1 { return nil, fmt.Errorf("multiple ports are not yet supported in openstack load balancers") } var persistence *vips.SessionPersistence switch affinity { case api.ServiceAffinityNone: persistence = nil case api.ServiceAffinityClientIP: persistence = &vips.SessionPersistence{Type: "SOURCE_IP"} default: return nil, fmt.Errorf("unsupported load balancer affinity: %v", affinity) } lbmethod := lb.opts.LBMethod if lbmethod == "" { lbmethod = pools.LBMethodRoundRobin } pool, err := pools.Create(lb.network, pools.CreateOpts{ Name: name, Protocol: pools.ProtocolTCP, SubnetID: lb.opts.SubnetId, LBMethod: lbmethod, }).Extract() if err != nil { return nil, err } for _, host := range hosts { addr, err := getAddressByName(lb.compute, host) if err != nil { return nil, err } _, err = members.Create(lb.network, members.CreateOpts{ PoolID: pool.ID, ProtocolPort: ports[0].Port, //TODO: need to handle multi-port Address: addr, }).Extract() if err != nil { pools.Delete(lb.network, pool.ID) return nil, err } } var mon *monitors.Monitor if lb.opts.CreateMonitor { mon, err = monitors.Create(lb.network, monitors.CreateOpts{ Type: monitors.TypeTCP, Delay: int(lb.opts.MonitorDelay.Duration.Seconds()), Timeout: int(lb.opts.MonitorTimeout.Duration.Seconds()), MaxRetries: int(lb.opts.MonitorMaxRetries), }).Extract() if err != nil { pools.Delete(lb.network, pool.ID) return nil, err } _, err = pools.AssociateMonitor(lb.network, pool.ID, mon.ID).Extract() if err != nil { monitors.Delete(lb.network, mon.ID) pools.Delete(lb.network, pool.ID) return nil, err } } createOpts := vips.CreateOpts{ Name: name, Description: fmt.Sprintf("Kubernetes external service %s", name), Protocol: "TCP", ProtocolPort: ports[0].Port, //TODO: need to handle multi-port PoolID: pool.ID, Persistence: persistence, } if !externalIP.IsUnspecified() { createOpts.Address = externalIP.String() } vip, err := vips.Create(lb.network, createOpts).Extract() if err != nil { if mon != nil { monitors.Delete(lb.network, mon.ID) } pools.Delete(lb.network, pool.ID) return nil, err } status := &api.LoadBalancerStatus{} status.Ingress = []api.LoadBalancerIngress{{IP: vip.Address}} return status, nil }
func (lb *LoadBalancer) EnsureLoadBalancer(name, region string, loadBalancerIP net.IP, ports []*api.ServicePort, hosts []string, serviceName types.NamespacedName, affinity api.ServiceAffinity, annotations map[string]string) (*api.LoadBalancerStatus, error) { glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)", name, region, loadBalancerIP, ports, hosts, serviceName, annotations) if len(ports) > 1 { return nil, fmt.Errorf("multiple ports are not yet supported in openstack load balancers") } else if len(ports) == 0 { return nil, fmt.Errorf("no ports provided to openstack load balancer") } // The service controller verified all the protocols match on the ports, just check and use the first one // TODO: Convert all error messages to use an event recorder if ports[0].Protocol != api.ProtocolTCP { return nil, fmt.Errorf("Only TCP LoadBalancer is supported for openstack load balancers") } var persistence *vips.SessionPersistence switch affinity { case api.ServiceAffinityNone: persistence = nil case api.ServiceAffinityClientIP: persistence = &vips.SessionPersistence{Type: "SOURCE_IP"} default: return nil, fmt.Errorf("unsupported load balancer affinity: %v", affinity) } sourceRanges, err := service.GetLoadBalancerSourceRanges(annotations) if err != nil { return nil, err } if !service.IsAllowAll(sourceRanges) { return nil, fmt.Errorf("Source range restrictions are not supported for openstack load balancers") } glog.V(2).Infof("Checking if openstack load balancer already exists: %s", name) _, exists, err := lb.GetLoadBalancer(name, region) if err != nil { return nil, fmt.Errorf("error checking if openstack load balancer already exists: %v", err) } // TODO: Implement a more efficient update strategy for common changes than delete & create // In particular, if we implement hosts update, we can get rid of UpdateHosts if exists { err := lb.EnsureLoadBalancerDeleted(name, region) if err != nil { return nil, fmt.Errorf("error deleting existing openstack load balancer: %v", err) } } lbmethod := lb.opts.LBMethod if lbmethod == "" { lbmethod = pools.LBMethodRoundRobin } pool, err := pools.Create(lb.network, pools.CreateOpts{ Name: name, Protocol: pools.ProtocolTCP, SubnetID: lb.opts.SubnetId, LBMethod: lbmethod, }).Extract() if err != nil { return nil, err } for _, host := range hosts { addr, err := getAddressByName(lb.compute, host) if err != nil { return nil, err } _, err = members.Create(lb.network, members.CreateOpts{ PoolID: pool.ID, ProtocolPort: ports[0].NodePort, //TODO: need to handle multi-port Address: addr, }).Extract() if err != nil { pools.Delete(lb.network, pool.ID) return nil, err } } var mon *monitors.Monitor if lb.opts.CreateMonitor { mon, err = monitors.Create(lb.network, monitors.CreateOpts{ Type: monitors.TypeTCP, Delay: int(lb.opts.MonitorDelay.Duration.Seconds()), Timeout: int(lb.opts.MonitorTimeout.Duration.Seconds()), MaxRetries: int(lb.opts.MonitorMaxRetries), }).Extract() if err != nil { pools.Delete(lb.network, pool.ID) return nil, err } _, err = pools.AssociateMonitor(lb.network, pool.ID, mon.ID).Extract() if err != nil { monitors.Delete(lb.network, mon.ID) pools.Delete(lb.network, pool.ID) return nil, err } } createOpts := vips.CreateOpts{ Name: name, Description: fmt.Sprintf("Kubernetes external service %s", name), Protocol: "TCP", ProtocolPort: ports[0].Port, //TODO: need to handle multi-port PoolID: pool.ID, SubnetID: lb.opts.SubnetId, Persistence: persistence, } if loadBalancerIP != nil { createOpts.Address = loadBalancerIP.String() } vip, err := vips.Create(lb.network, createOpts).Extract() if err != nil { if mon != nil { monitors.Delete(lb.network, mon.ID) } pools.Delete(lb.network, pool.ID) return nil, err } status := &api.LoadBalancerStatus{} status.Ingress = []api.LoadBalancerIngress{{IP: vip.Address}} if lb.opts.FloatingNetworkId != "" { floatIPOpts := floatingips.CreateOpts{ FloatingNetworkID: lb.opts.FloatingNetworkId, PortID: vip.PortID, } floatIP, err := floatingips.Create(lb.network, floatIPOpts).Extract() if err != nil { return nil, err } status.Ingress = append(status.Ingress, api.LoadBalancerIngress{IP: floatIP.FloatingIP}) } return status, nil }
func (lb *LoadBalancer) EnsureLoadBalancerDeleted(service *api.Service) error { loadBalancerName := cloudprovider.GetLoadBalancerName(service) glog.V(4).Infof("EnsureLoadBalancerDeleted(%v)", loadBalancerName) vip, err := getVipByName(lb.network, loadBalancerName) if err != nil && err != ErrNotFound { return err } if lb.opts.FloatingNetworkId != "" && vip != nil { floatingIP, err := getFloatingIPByPortID(lb.network, vip.PortID) if err != nil && !isNotFound(err) { return err } if floatingIP != nil { err = floatingips.Delete(lb.network, floatingIP.ID).ExtractErr() if err != nil && !isNotFound(err) { return err } } } // We have to delete the VIP before the pool can be deleted, // so no point continuing if this fails. if vip != nil { err := vips.Delete(lb.network, vip.ID).ExtractErr() if err != nil && !isNotFound(err) { return err } } var pool *pools.Pool if vip != nil { pool, err = pools.Get(lb.network, vip.PoolID).Extract() if err != nil && !isNotFound(err) { return err } } else { // The VIP is gone, but it is conceivable that a Pool // still exists that we failed to delete on some // previous occasion. Make a best effort attempt to // cleanup any pools with the same name as the VIP. pool, err = getPoolByName(lb.network, service.Name) if err != nil && err != ErrNotFound { return err } } if pool != nil { for _, monId := range pool.MonitorIDs { _, err = pools.DisassociateMonitor(lb.network, pool.ID, monId).Extract() if err != nil { return err } err = monitors.Delete(lb.network, monId).ExtractErr() if err != nil && !isNotFound(err) { return err } } err = pools.Delete(lb.network, pool.ID).ExtractErr() if err != nil && !isNotFound(err) { return err } } return nil }
func (lb *LoadBalancer) CreateTCPLoadBalancer(name, region string, externalIP net.IP, port int, hosts []string, affinity api.AffinityType) (net.IP, error) { glog.V(2).Infof("CreateTCPLoadBalancer(%v, %v, %v, %v, %v)", name, region, externalIP, port, hosts) if affinity != api.AffinityTypeNone { return nil, fmt.Errorf("unsupported load balancer affinity: %v", affinity) } pool, err := pools.Create(lb.network, pools.CreateOpts{ Name: name, Protocol: pools.ProtocolTCP, SubnetID: lb.opts.SubnetId, }).Extract() if err != nil { return nil, err } for _, host := range hosts { addr, err := getAddressByName(lb.compute, host) if err != nil { return nil, err } _, err = members.Create(lb.network, members.CreateOpts{ PoolID: pool.ID, ProtocolPort: port, Address: addr, }).Extract() if err != nil { pools.Delete(lb.network, pool.ID) return nil, err } } var mon *monitors.Monitor if lb.opts.CreateMonitor { mon, err = monitors.Create(lb.network, monitors.CreateOpts{ Type: monitors.TypeTCP, Delay: int(lb.opts.MonitorDelay.Duration.Seconds()), Timeout: int(lb.opts.MonitorTimeout.Duration.Seconds()), MaxRetries: int(lb.opts.MonitorMaxRetries), }).Extract() if err != nil { pools.Delete(lb.network, pool.ID) return nil, err } _, err = pools.AssociateMonitor(lb.network, pool.ID, mon.ID).Extract() if err != nil { monitors.Delete(lb.network, mon.ID) pools.Delete(lb.network, pool.ID) return nil, err } } vip, err := vips.Create(lb.network, vips.CreateOpts{ Name: name, Description: fmt.Sprintf("Kubernetes external service %s", name), Address: externalIP.String(), Protocol: "TCP", ProtocolPort: port, PoolID: pool.ID, }).Extract() if err != nil { if mon != nil { monitors.Delete(lb.network, mon.ID) } pools.Delete(lb.network, pool.ID) return nil, err } return net.ParseIP(vip.Address), nil }