Exemplo n.º 1
0
// DeleteMonitor will delete a specified monitor. A fatal error will occur if
// the monitor could not be deleted. This works best when used as a deferred
// function.
func DeleteMonitor(t *testing.T, client *gophercloud.ServiceClient, monitorID string) {
	t.Logf("Attempting to delete monitor %s", monitorID)

	if err := monitors.Delete(client, monitorID).ExtractErr(); err != nil {
		t.Fatalf("Unable to delete monitor: %v", err)
	}

	t.Logf("Successfully deleted monitor %s", monitorID)
}
Exemplo n.º 2
0
// Delete load balancer
func (os *OpenStack) DeleteLoadBalancer(name string) error {
	vip, err := os.getVipByName(name)
	if err != nil && err != ErrNotFound {
		return err
	}

	// We have to delete the VIP before the pool can be deleted,
	// so no point continuing if this fails.
	if vip != nil {
		err = vips.Delete(os.network, vip.ID).ExtractErr()
		if err != nil && !isNotFound(err) {
			return err
		}
	}

	var pool *pools.Pool
	if vip != nil {
		pool, err = pools.Get(os.network, vip.PoolID).Extract()
		if err != nil && !isNotFound(err) {
			return err
		}
	} else {
		// The VIP is gone, but it is conceivable that a Pool
		// still exists that we failed to delete on some
		// previous occasion.  Make a best effort attempt to
		// cleanup any pools with the same name as the VIP.
		pool, err = os.getPoolByName(name)
		if err != nil && err != ErrNotFound {
			return err
		}
	}

	if pool != nil {
		for _, monId := range pool.MonitorIDs {
			_, err = pools.DisassociateMonitor(os.network, pool.ID, monId).Extract()
			if err != nil {
				return err
			}

			err = monitors.Delete(os.network, monId).ExtractErr()
			if err != nil && !isNotFound(err) {
				return err
			}
		}
		err = pools.Delete(os.network, pool.ID).ExtractErr()
		if err != nil && !isNotFound(err) {
			return err
		}
	}

	return nil
}
Exemplo n.º 3
0
func TestDelete(t *testing.T) {
	th.SetupHTTP()
	defer th.TeardownHTTP()

	th.Mux.HandleFunc("/v2.0/lb/health_monitors/b05e44b5-81f9-4551-b474-711a722698f7", func(w http.ResponseWriter, r *http.Request) {
		th.TestMethod(t, r, "DELETE")
		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
		w.WriteHeader(http.StatusNoContent)
	})

	res := monitors.Delete(fake.ServiceClient(), "b05e44b5-81f9-4551-b474-711a722698f7")
	th.AssertNoErr(t, res.Err)
}
func waitForLBMonitorDelete(networkingClient *gophercloud.ServiceClient, monitorId string) resource.StateRefreshFunc {
	return func() (interface{}, string, error) {
		log.Printf("[DEBUG] Attempting to delete OpenStack LB Monitor %s", monitorId)

		m, err := monitors.Get(networkingClient, monitorId).Extract()
		if err != nil {
			if _, ok := err.(gophercloud.ErrDefault404); ok {
				log.Printf("[DEBUG] Successfully deleted OpenStack LB Monitor %s", monitorId)
				return m, "DELETED", nil
			}

			if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok {
				if errCode.Actual == 409 {
					log.Printf("[DEBUG] OpenStack LB Monitor (%s) is waiting for Pool to delete.", monitorId)
					return m, "PENDING", nil
				}
			}

			return m, "ACTIVE", err
		}

		log.Printf("[DEBUG] OpenStack LB Monitor: %+v", m)
		err = monitors.Delete(networkingClient, monitorId).ExtractErr()
		if err != nil {
			if _, ok := err.(gophercloud.ErrDefault404); ok {
				log.Printf("[DEBUG] Successfully deleted OpenStack LB Monitor %s", monitorId)
				return m, "DELETED", nil
			}

			if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok {
				if errCode.Actual == 409 {
					log.Printf("[DEBUG] OpenStack LB Monitor (%s) is waiting for Pool to delete.", monitorId)
					return m, "PENDING", nil
				}
			}

			return m, "ACTIVE", err
		}

		log.Printf("[DEBUG] OpenStack LB Monitor %s still active.", monitorId)
		return m, "ACTIVE", nil
	}

}
Exemplo n.º 5
0
// Create load balancer
func (os *OpenStack) CreateLoadBalancer(loadBalancer *provider.LoadBalancer, affinity string) (string, error) {
	if len(loadBalancer.ExternalIPs) > 1 {
		return "", fmt.Errorf("multiple floatingips are not yet supported by openstack")
	}

	servicePort := 0
	for _, p := range loadBalancer.Hosts {
		if servicePort == 0 {
			servicePort = int(p.ServicePort)
		} else if int(p.ServicePort) != servicePort {
			return "", fmt.Errorf("multiple ports are not yet supported in openstack load balancers")
		}
	}

	var persistence *vips.SessionPersistence
	switch affinity {
	case ServiceAffinityNone:
		persistence = nil
	case ServiceAffinityClientIP:
		persistence = &vips.SessionPersistence{Type: "SOURCE_IP"}
	default:
		return "", fmt.Errorf("unsupported load balancer affinity: %v", affinity)
	}

	glog.V(2).Info("Checking if openstack load balancer already exists: ", loadBalancer.Name)
	_, err := os.getPoolByName(loadBalancer.Name)
	if err != nil && err != ErrNotFound {
		return "", fmt.Errorf("error checking if openstack load balancer already exists: %v", err)
	}

	if err == nil {
		err := os.DeleteLoadBalancer(loadBalancer.Name)
		if err != nil {
			return "", fmt.Errorf("error deleting existing openstack load balancer: %v", err)
		}
	}

	lbmethod := pools.LBMethodRoundRobin
	if os.lbOpts.LBMethod != "" {
		lbmethod = pools.LBMethod(os.lbOpts.LBMethod)
	}
	pool, err := pools.Create(os.network, pools.CreateOpts{
		Name:     loadBalancer.Name,
		Protocol: pools.ProtocolTCP,
		SubnetID: loadBalancer.Subnets[0].Uid,
		LBMethod: lbmethod,
		TenantID: loadBalancer.TenantID,
	}).Extract()
	if err != nil {
		return "", err
	}

	for _, host := range loadBalancer.Hosts {
		_, err = members.Create(os.network, members.CreateOpts{
			PoolID:       pool.ID,
			ProtocolPort: int(host.TargetPort),
			Address:      host.Ipaddress,
			TenantID:     loadBalancer.TenantID,
		}).Extract()
		if err != nil {
			pools.Delete(os.network, pool.ID)
			return "", err
		}
	}

	var mon *monitors.Monitor
	if os.lbOpts.CreateMonitor {
		mon, err = monitors.Create(os.network, monitors.CreateOpts{
			Type:       monitors.TypeTCP,
			TenantID:   loadBalancer.TenantID,
			Delay:      int(os.lbOpts.MonitorDelay.Duration.Seconds()),
			Timeout:    int(os.lbOpts.MonitorTimeout.Duration.Seconds()),
			MaxRetries: int(os.lbOpts.MonitorMaxRetries),
		}).Extract()
		if err != nil {
			pools.Delete(os.network, pool.ID)
			return "", err
		}

		_, err = pools.AssociateMonitor(os.network, pool.ID, mon.ID).Extract()
		if err != nil {
			monitors.Delete(os.network, mon.ID)
			pools.Delete(os.network, pool.ID)
			return "", err
		}
	}

	createOpts := vips.CreateOpts{
		Name:         loadBalancer.Name,
		Description:  fmt.Sprintf("Kubernetes service %s", loadBalancer.Name),
		Protocol:     "TCP",
		ProtocolPort: servicePort,
		PoolID:       pool.ID,
		SubnetID:     loadBalancer.Subnets[0].Uid,
		Persistence:  persistence,
		TenantID:     loadBalancer.TenantID,
	}
	//if loadBalancer.Vip != "" {
	//	createOpts.Address = loadBalancer.Vip
	//}

	vip, err := vips.Create(os.network, createOpts).Extract()
	if err != nil {
		if mon != nil {
			monitors.Delete(os.network, mon.ID)
		}
		pools.Delete(os.network, pool.ID)
		return "", err
	}

	// bind external ip
	if len(loadBalancer.ExternalIPs) > 0 {
		err := os.BindPortToFloatingip(vip.PortID, loadBalancer.ExternalIPs[0], vip.TenantID)
		if err != nil {
			vips.Delete(os.network, vip.ID)
			if mon != nil {
				monitors.Delete(os.network, mon.ID)
			}
			pools.Delete(os.network, pool.ID)
			return "", err
		}
	}

	return vip.Address, nil
}