func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Service, nodeNames []string) (*v1.LoadBalancerStatus, error) { glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)", clusterName, apiService.Namespace, apiService.Name, apiService.Spec.LoadBalancerIP, apiService.Spec.Ports, nodeNames, apiService.Annotations) ports := apiService.Spec.Ports if len(ports) == 0 { return nil, fmt.Errorf("no ports provided to openstack load balancer") } // Check for TCP protocol on each port // TODO: Convert all error messages to use an event recorder for _, port := range ports { if port.Protocol != v1.ProtocolTCP { return nil, fmt.Errorf("Only TCP LoadBalancer is supported for openstack load balancers") } } sourceRanges, err := service.GetLoadBalancerSourceRanges(apiService) if err != nil { return nil, err } if !service.IsAllowAll(sourceRanges) && !lbaas.opts.ManageSecurityGroups { return nil, fmt.Errorf("Source range restrictions are not supported for openstack load balancers without managing security groups") } affinity := v1.ServiceAffinityNone var persistence *v2pools.SessionPersistence switch affinity { case v1.ServiceAffinityNone: persistence = nil case v1.ServiceAffinityClientIP: persistence = &v2pools.SessionPersistence{Type: "SOURCE_IP"} default: return nil, fmt.Errorf("unsupported load balancer affinity: %v", affinity) } name := cloudprovider.GetLoadBalancerName(apiService) loadbalancer, err := getLoadbalancerByName(lbaas.network, name) if err != nil { if err != ErrNotFound { return nil, fmt.Errorf("Error getting loadbalancer %s: %v", name, err) } glog.V(2).Infof("Creating loadbalancer %s", name) loadbalancer, err = lbaas.createLoadBalancer(apiService, name) if err != nil { // Unknown error, retry later return nil, fmt.Errorf("Error creating loadbalancer %s: %v", name, err) } } else { glog.V(2).Infof("LoadBalancer %s already exists", name) } waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID) lbmethod := v2pools.LBMethod(lbaas.opts.LBMethod) if lbmethod == "" { lbmethod = v2pools.LBMethodRoundRobin } oldListeners, err := getListenersByLoadBalancerID(lbaas.network, loadbalancer.ID) if err != nil { return nil, fmt.Errorf("Error getting LB %s listeners: %v", name, err) } for portIndex, port := range ports { listener := getListenerForPort(oldListeners, port) if listener == nil { glog.V(4).Infof("Creating listener for port %d", int(port.Port)) listener, err = listeners.Create(lbaas.network, listeners.CreateOpts{ Name: fmt.Sprintf("listener_%s_%d", name, portIndex), Protocol: listeners.Protocol(port.Protocol), ProtocolPort: int(port.Port), LoadbalancerID: loadbalancer.ID, }).Extract() if err != nil { // Unknown error, retry later return nil, fmt.Errorf("Error creating LB listener: %v", err) } waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID) } glog.V(4).Infof("Listener for %s port %d: %s", string(port.Protocol), int(port.Port), listener.ID) // After all ports have been processed, remaining listeners are removed as obsolete. // Pop valid listeners. oldListeners = popListener(oldListeners, listener.ID) pool, err := getPoolByListenerID(lbaas.network, loadbalancer.ID, listener.ID) if err != nil && err != ErrNotFound { // Unknown error, retry later return nil, fmt.Errorf("Error getting pool for listener %s: %v", listener.ID, err) } if pool == nil { glog.V(4).Infof("Creating pool for listener %s", listener.ID) pool, err = v2pools.Create(lbaas.network, v2pools.CreateOpts{ Name: fmt.Sprintf("pool_%s_%d", name, portIndex), Protocol: v2pools.Protocol(port.Protocol), LBMethod: lbmethod, ListenerID: listener.ID, Persistence: persistence, }).Extract() if err != nil { // Unknown error, retry later return nil, fmt.Errorf("Error creating pool for listener %s: %v", listener.ID, err) } waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID) } glog.V(4).Infof("Pool for listener %s: %s", listener.ID, pool.ID) members, err := getMembersByPoolID(lbaas.network, pool.ID) if err != nil && !isNotFound(err) { return nil, fmt.Errorf("Error getting pool members %s: %v", pool.ID, err) } for _, nodeName := range nodeNames { addr, err := getAddressByName(lbaas.compute, types.NodeName(nodeName)) if err != nil { if err == ErrNotFound { // Node failure, do not create member glog.Warningf("Failed to create LB pool member for node %s: %v", nodeName, err) continue } else { return nil, fmt.Errorf("Error getting address for node %s: %v", nodeName, err) } } if !memberExists(members, addr, int(port.NodePort)) { glog.V(4).Infof("Creating member for pool %s", pool.ID) _, err := v2pools.CreateAssociateMember(lbaas.network, pool.ID, v2pools.MemberCreateOpts{ ProtocolPort: int(port.NodePort), Address: addr, SubnetID: lbaas.opts.SubnetId, }).Extract() if err != nil { return nil, fmt.Errorf("Error creating LB pool member for node: %s, %v", nodeName, err) } waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID) } else { // After all members have been processed, remaining members are deleted as obsolete. members = popMember(members, addr, int(port.NodePort)) } glog.V(4).Infof("Ensured pool %s has member for %s at %s", pool.ID, nodeName, addr) } // Delete obsolete members for this pool for _, member := range members { glog.V(4).Infof("Deleting obsolete member %s for pool %s address %s", member.ID, pool.ID, member.Address) err := v2pools.DeleteMember(lbaas.network, pool.ID, member.ID).ExtractErr() if err != nil && !isNotFound(err) { return nil, fmt.Errorf("Error deleting obsolete member %s for pool %s address %s: %v", member.ID, pool.ID, member.Address, err) } waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID) } monitorID := pool.MonitorID if monitorID == "" && lbaas.opts.CreateMonitor { glog.V(4).Infof("Creating monitor for pool %s", pool.ID) monitor, err := v2monitors.Create(lbaas.network, v2monitors.CreateOpts{ PoolID: pool.ID, Type: string(port.Protocol), Delay: int(lbaas.opts.MonitorDelay.Duration.Seconds()), Timeout: int(lbaas.opts.MonitorTimeout.Duration.Seconds()), MaxRetries: int(lbaas.opts.MonitorMaxRetries), }).Extract() if err != nil { return nil, fmt.Errorf("Error creating LB pool healthmonitor: %v", err) } waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID) monitorID = monitor.ID } glog.V(4).Infof("Monitor for pool %s: %s", pool.ID, monitorID) } // All remaining listeners are obsolete, delete for _, listener := range oldListeners { glog.V(4).Infof("Deleting obsolete listener %s:", listener.ID) // get pool for listener pool, err := getPoolByListenerID(lbaas.network, loadbalancer.ID, listener.ID) if err != nil && err != ErrNotFound { return nil, fmt.Errorf("Error getting pool for obsolete listener %s: %v", listener.ID, err) } if pool != nil { // get and delete monitor monitorID := pool.MonitorID if monitorID != "" { glog.V(4).Infof("Deleting obsolete monitor %s for pool %s", monitorID, pool.ID) err = v2monitors.Delete(lbaas.network, monitorID).ExtractErr() if err != nil && !isNotFound(err) { return nil, fmt.Errorf("Error deleting obsolete monitor %s for pool %s: %v", monitorID, pool.ID, err) } waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID) } // get and delete pool members members, err := getMembersByPoolID(lbaas.network, pool.ID) if err != nil && !isNotFound(err) { return nil, fmt.Errorf("Error getting members for pool %s: %v", pool.ID, err) } if members != nil { for _, member := range members { glog.V(4).Infof("Deleting obsolete member %s for pool %s address %s", member.ID, pool.ID, member.Address) err := v2pools.DeleteMember(lbaas.network, pool.ID, member.ID).ExtractErr() if err != nil && !isNotFound(err) { return nil, fmt.Errorf("Error deleting obsolete member %s for pool %s address %s: %v", member.ID, pool.ID, member.Address, err) } waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID) } } glog.V(4).Infof("Deleting obsolete pool %s for listener %s", pool.ID, listener.ID) // delete pool err = v2pools.Delete(lbaas.network, pool.ID).ExtractErr() if err != nil && !isNotFound(err) { return nil, fmt.Errorf("Error deleting obsolete pool %s for listener %s: %v", pool.ID, listener.ID, err) } waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID) } // delete listener err = listeners.Delete(lbaas.network, listener.ID).ExtractErr() if err != nil && !isNotFound(err) { return nil, fmt.Errorf("Error deleteting obsolete listener: %v", err) } waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID) glog.V(2).Infof("Deleted obsolete listener: %s", listener.ID) } status := &v1.LoadBalancerStatus{} status.Ingress = []v1.LoadBalancerIngress{{IP: loadbalancer.VipAddress}} port, err := getPortByIP(lbaas.network, loadbalancer.VipAddress) if err != nil { return nil, fmt.Errorf("Error getting port for LB vip %s: %v", loadbalancer.VipAddress, err) } floatIP, err := getFloatingIPByPortID(lbaas.network, port.ID) if err != nil && err != ErrNotFound { return nil, fmt.Errorf("Error getting floating ip for port %s: %v", port.ID, err) } if floatIP == nil && lbaas.opts.FloatingNetworkId != "" { glog.V(4).Infof("Creating floating ip for loadbalancer %s port %s", loadbalancer.ID, port.ID) floatIPOpts := floatingips.CreateOpts{ FloatingNetworkID: lbaas.opts.FloatingNetworkId, PortID: port.ID, } floatIP, err = floatingips.Create(lbaas.network, floatIPOpts).Extract() if err != nil { return nil, fmt.Errorf("Error creating LB floatingip %+v: %v", floatIPOpts, err) } } if floatIP != nil { status.Ingress = append(status.Ingress, v1.LoadBalancerIngress{IP: floatIP.FloatingIP}) } if lbaas.opts.ManageSecurityGroups { lbSecGroupCreateOpts := groups.CreateOpts{ Name: getSecurityGroupName(clusterName, apiService), Description: fmt.Sprintf("Securty Group for %v Service LoadBalancer", apiService.Name), } lbSecGroup, err := groups.Create(lbaas.network, lbSecGroupCreateOpts).Extract() if err != nil { // cleanup what was created so far _ = lbaas.EnsureLoadBalancerDeleted(clusterName, apiService) return nil, err } for _, port := range ports { for _, sourceRange := range sourceRanges.StringSlice() { ethertype := "IPv4" network, _, err := net.ParseCIDR(sourceRange) if err != nil { // cleanup what was created so far glog.Errorf("Error parsing source range %s as a CIDR", sourceRange) _ = lbaas.EnsureLoadBalancerDeleted(clusterName, apiService) return nil, err } if network.To4() == nil { ethertype = "IPv6" } lbSecGroupRuleCreateOpts := rules.CreateOpts{ Direction: "ingress", PortRangeMax: int(port.Port), PortRangeMin: int(port.Port), Protocol: strings.ToLower(string(port.Protocol)), RemoteIPPrefix: sourceRange, SecGroupID: lbSecGroup.ID, EtherType: ethertype, } _, err = rules.Create(lbaas.network, lbSecGroupRuleCreateOpts).Extract() if err != nil { // cleanup what was created so far _ = lbaas.EnsureLoadBalancerDeleted(clusterName, apiService) return nil, err } } err := createNodeSecurityGroup(lbaas.network, lbaas.opts.NodeSecurityGroupID, int(port.NodePort), string(port.Protocol), lbSecGroup.ID) if err != nil { glog.Errorf("Error occured creating security group for loadbalancer %s:", loadbalancer.ID) _ = lbaas.EnsureLoadBalancerDeleted(clusterName, apiService) return nil, err } } lbSecGroupRuleCreateOpts := rules.CreateOpts{ Direction: "ingress", PortRangeMax: 4, // ICMP: Code - Values for ICMP "Destination Unreachable: Fragmentation Needed and Don't Fragment was Set" PortRangeMin: 3, // ICMP: Type Protocol: "icmp", RemoteIPPrefix: "0.0.0.0/0", // The Fragmentation packet can come from anywhere along the path back to the sourceRange - we need to all this from all SecGroupID: lbSecGroup.ID, EtherType: "IPv4", } _, err = rules.Create(lbaas.network, lbSecGroupRuleCreateOpts).Extract() if err != nil { // cleanup what was created so far _ = lbaas.EnsureLoadBalancerDeleted(clusterName, apiService) return nil, err } lbSecGroupRuleCreateOpts = rules.CreateOpts{ Direction: "ingress", PortRangeMax: 0, // ICMP: Code - Values for ICMP "Packet Too Big" PortRangeMin: 2, // ICMP: Type Protocol: "icmp", RemoteIPPrefix: "::/0", // The Fragmentation packet can come from anywhere along the path back to the sourceRange - we need to all this from all SecGroupID: lbSecGroup.ID, EtherType: "IPv6", } _, err = rules.Create(lbaas.network, lbSecGroupRuleCreateOpts).Extract() if err != nil { // cleanup what was created so far _ = lbaas.EnsureLoadBalancerDeleted(clusterName, apiService) return nil, err } // Get the port ID port, err := getPortByIP(lbaas.network, loadbalancer.VipAddress) if err != nil { // cleanup what was created so far _ = lbaas.EnsureLoadBalancerDeleted(clusterName, apiService) return nil, err } update_opts := neutronports.UpdateOpts{SecurityGroups: []string{lbSecGroup.ID}} res := neutronports.Update(lbaas.network, port.ID, update_opts) if res.Err != nil { glog.Errorf("Error occured updating port: %s", port.ID) // cleanup what was created so far _ = lbaas.EnsureLoadBalancerDeleted(clusterName, apiService) return nil, res.Err } } return status, nil }
// This reconciles the Network Security Group similar to how the LB is reconciled. // This entails adding required, missing SecurityRules and removing stale rules. func (az *Cloud) reconcileSecurityGroup(sg network.SecurityGroup, clusterName string, service *v1.Service) (network.SecurityGroup, bool, error) { serviceName := getServiceName(service) wantLb := len(service.Spec.Ports) > 0 sourceRanges, err := serviceapi.GetLoadBalancerSourceRanges(service) if err != nil { return sg, false, err } var sourceAddressPrefixes []string if sourceRanges == nil || serviceapi.IsAllowAll(sourceRanges) { sourceAddressPrefixes = []string{"Internet"} } else { for _, ip := range sourceRanges { sourceAddressPrefixes = append(sourceAddressPrefixes, ip.String()) } } expectedSecurityRules := make([]network.SecurityRule, len(service.Spec.Ports)*len(sourceAddressPrefixes)) for i, port := range service.Spec.Ports { securityRuleName := getRuleName(service, port) _, securityProto, _, err := getProtocolsFromKubernetesProtocol(port.Protocol) if err != nil { return sg, false, err } for j := range sourceAddressPrefixes { ix := i*len(sourceAddressPrefixes) + j expectedSecurityRules[ix] = network.SecurityRule{ Name: to.StringPtr(securityRuleName), Properties: &network.SecurityRulePropertiesFormat{ Protocol: securityProto, SourcePortRange: to.StringPtr("*"), DestinationPortRange: to.StringPtr(strconv.Itoa(int(port.Port))), SourceAddressPrefix: to.StringPtr(sourceAddressPrefixes[j]), DestinationAddressPrefix: to.StringPtr("*"), Access: network.Allow, Direction: network.Inbound, }, } } } // update security rules dirtySg := false var updatedRules []network.SecurityRule if sg.Properties.SecurityRules != nil { updatedRules = *sg.Properties.SecurityRules } // update security rules: remove unwanted for i := len(updatedRules) - 1; i >= 0; i-- { existingRule := updatedRules[i] if serviceOwnsRule(service, *existingRule.Name) { glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name) keepRule := false if findSecurityRule(expectedSecurityRules, existingRule) { glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - keeping", serviceName, wantLb, *existingRule.Name) keepRule = true } if !keepRule { glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - dropping", serviceName, wantLb, *existingRule.Name) updatedRules = append(updatedRules[:i], updatedRules[i+1:]...) dirtySg = true } } } // update security rules: add needed for _, expectedRule := range expectedSecurityRules { foundRule := false if findSecurityRule(updatedRules, expectedRule) { glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - already exists", serviceName, wantLb, *expectedRule.Name) foundRule = true } if !foundRule { glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - adding", serviceName, wantLb, *expectedRule.Name) nextAvailablePriority, err := getNextAvailablePriority(updatedRules) if err != nil { return sg, false, err } expectedRule.Properties.Priority = to.Int32Ptr(nextAvailablePriority) updatedRules = append(updatedRules, expectedRule) dirtySg = true } } if dirtySg { sg.Properties.SecurityRules = &updatedRules } return sg, dirtySg, nil }
func (lb *LbaasV1) EnsureLoadBalancer(clusterName string, apiService *v1.Service, nodeNames []string) (*v1.LoadBalancerStatus, error) { glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)", clusterName, apiService.Namespace, apiService.Name, apiService.Spec.LoadBalancerIP, apiService.Spec.Ports, nodeNames, apiService.Annotations) ports := apiService.Spec.Ports if len(ports) > 1 { return nil, fmt.Errorf("multiple ports are not supported in openstack v1 load balancers") } else if len(ports) == 0 { return nil, fmt.Errorf("no ports provided to openstack load balancer") } // The service controller verified all the protocols match on the ports, just check and use the first one // TODO: Convert all error messages to use an event recorder if ports[0].Protocol != v1.ProtocolTCP { return nil, fmt.Errorf("Only TCP LoadBalancer is supported for openstack load balancers") } affinity := apiService.Spec.SessionAffinity var persistence *vips.SessionPersistence switch affinity { case v1.ServiceAffinityNone: persistence = nil case v1.ServiceAffinityClientIP: persistence = &vips.SessionPersistence{Type: "SOURCE_IP"} default: return nil, fmt.Errorf("unsupported load balancer affinity: %v", affinity) } sourceRanges, err := service.GetLoadBalancerSourceRanges(apiService) if err != nil { return nil, err } if !service.IsAllowAll(sourceRanges) { return nil, fmt.Errorf("Source range restrictions are not supported for openstack load balancers") } glog.V(2).Infof("Checking if openstack load balancer already exists: %s", cloudprovider.GetLoadBalancerName(apiService)) _, exists, err := lb.GetLoadBalancer(clusterName, apiService) if err != nil { return nil, fmt.Errorf("error checking if openstack load balancer already exists: %v", err) } // TODO: Implement a more efficient update strategy for common changes than delete & create // In particular, if we implement hosts update, we can get rid of UpdateHosts if exists { err := lb.EnsureLoadBalancerDeleted(clusterName, apiService) if err != nil { return nil, fmt.Errorf("error deleting existing openstack load balancer: %v", err) } } lbmethod := lb.opts.LBMethod if lbmethod == "" { lbmethod = pools.LBMethodRoundRobin } name := cloudprovider.GetLoadBalancerName(apiService) pool, err := pools.Create(lb.network, pools.CreateOpts{ Name: name, Protocol: pools.ProtocolTCP, SubnetID: lb.opts.SubnetId, LBMethod: lbmethod, }).Extract() if err != nil { return nil, err } for _, nodeName := range nodeNames { addr, err := getAddressByName(lb.compute, types.NodeName(nodeName)) if err != nil { return nil, err } _, err = members.Create(lb.network, members.CreateOpts{ PoolID: pool.ID, ProtocolPort: int(ports[0].NodePort), //Note: only handles single port Address: addr, }).Extract() if err != nil { pools.Delete(lb.network, pool.ID) return nil, err } } var mon *monitors.Monitor if lb.opts.CreateMonitor { mon, err = monitors.Create(lb.network, monitors.CreateOpts{ Type: monitors.TypeTCP, Delay: int(lb.opts.MonitorDelay.Duration.Seconds()), Timeout: int(lb.opts.MonitorTimeout.Duration.Seconds()), MaxRetries: int(lb.opts.MonitorMaxRetries), }).Extract() if err != nil { pools.Delete(lb.network, pool.ID) return nil, err } _, err = pools.AssociateMonitor(lb.network, pool.ID, mon.ID).Extract() if err != nil { monitors.Delete(lb.network, mon.ID) pools.Delete(lb.network, pool.ID) return nil, err } } createOpts := vips.CreateOpts{ Name: name, Description: fmt.Sprintf("Kubernetes external service %s", name), Protocol: "TCP", ProtocolPort: int(ports[0].Port), //TODO: need to handle multi-port PoolID: pool.ID, SubnetID: lb.opts.SubnetId, Persistence: persistence, } loadBalancerIP := apiService.Spec.LoadBalancerIP if loadBalancerIP != "" { createOpts.Address = loadBalancerIP } vip, err := vips.Create(lb.network, createOpts).Extract() if err != nil { if mon != nil { monitors.Delete(lb.network, mon.ID) } pools.Delete(lb.network, pool.ID) return nil, err } status := &v1.LoadBalancerStatus{} status.Ingress = []v1.LoadBalancerIngress{{IP: vip.Address}} if lb.opts.FloatingNetworkId != "" { floatIPOpts := floatingips.CreateOpts{ FloatingNetworkID: lb.opts.FloatingNetworkId, PortID: vip.PortID, } floatIP, err := floatingips.Create(lb.network, floatIPOpts).Extract() if err != nil { return nil, err } status.Ingress = append(status.Ingress, v1.LoadBalancerIngress{IP: floatIP.FloatingIP}) } return status, nil }