Exemplo n.º 1
0
// Adds backoff to delay if this delivery is related to some failure. Resets backoff if there was no failure.
func (configmapcontroller *ConfigMapController) deliverConfigMap(configmap types.NamespacedName, delay time.Duration, failed bool) {
	key := configmap.String()
	if failed {
		configmapcontroller.configmapBackoff.Next(key, time.Now())
		delay = delay + configmapcontroller.configmapBackoff.Get(key)
	} else {
		configmapcontroller.configmapBackoff.Reset(key)
	}
	configmapcontroller.configmapDeliverer.DeliverAfter(key, &configmap, delay)
}
Exemplo n.º 2
0
// Adds backoff to delay if this delivery is related to some failure. Resets backoff if there was no failure.
func (secretcontroller *SecretController) deliverSecret(secret types.NamespacedName, delay time.Duration, failed bool) {
	key := secret.String()
	if failed {
		secretcontroller.secretBackoff.Next(key, time.Now())
		delay = delay + secretcontroller.secretBackoff.Get(key)
	} else {
		secretcontroller.secretBackoff.Reset(key)
	}
	secretcontroller.secretDeliverer.DeliverAfter(key, &secret, delay)
}
Exemplo n.º 3
0
func (ic *IngressController) deliverIngress(ingress types.NamespacedName, delay time.Duration, failed bool) {
	glog.V(4).Infof("Delivering ingress: %s with delay: %v error: %v", ingress, delay, failed)
	key := ingress.String()
	if failed {
		ic.ingressBackoff.Next(key, time.Now())
		delay = delay + ic.ingressBackoff.Get(key)
	} else {
		ic.ingressBackoff.Reset(key)
	}
	ic.ingressDeliverer.DeliverAfter(key, ingress, delay)
}
Exemplo n.º 4
0
func (secretcontroller *SecretController) reconcileSecret(secret types.NamespacedName) {
	if !secretcontroller.isSynced() {
		secretcontroller.deliverSecret(secret, secretcontroller.clusterAvailableDelay, false)
		return
	}

	key := secret.String()
	baseSecretObjFromStore, exist, err := secretcontroller.secretInformerStore.GetByKey(key)
	if err != nil {
		glog.Errorf("Failed to query main secret store for %v: %v", key, err)
		secretcontroller.deliverSecret(secret, 0, true)
		return
	}

	if !exist {
		// Not federated secret, ignoring.
		return
	}

	// Create a copy before modifying the obj to prevent race condition with
	// other readers of obj from store.
	baseSecretObj, err := api.Scheme.DeepCopy(baseSecretObjFromStore)
	baseSecret, ok := baseSecretObj.(*apiv1.Secret)
	if err != nil || !ok {
		glog.Errorf("Error in retrieving obj from store: %v, %v", ok, err)
		secretcontroller.deliverSecret(secret, 0, true)
		return
	}
	if baseSecret.DeletionTimestamp != nil {
		if err := secretcontroller.delete(baseSecret); err != nil {
			glog.Errorf("Failed to delete %s: %v", secret, err)
			secretcontroller.eventRecorder.Eventf(baseSecret, api.EventTypeNormal, "DeleteFailed",
				"Secret delete failed: %v", err)
			secretcontroller.deliverSecret(secret, 0, true)
		}
		return
	}

	glog.V(3).Infof("Ensuring delete object from underlying clusters finalizer for secret: %s",
		baseSecret.Name)
	// Add the required finalizers before creating a secret in underlying clusters.
	updatedSecretObj, err := secretcontroller.deletionHelper.EnsureFinalizers(baseSecret)
	if err != nil {
		glog.Errorf("Failed to ensure delete object from underlying clusters finalizer in secret %s: %v",
			baseSecret.Name, err)
		secretcontroller.deliverSecret(secret, 0, false)
		return
	}
	baseSecret = updatedSecretObj.(*apiv1.Secret)

	glog.V(3).Infof("Syncing secret %s in underlying clusters", baseSecret.Name)

	clusters, err := secretcontroller.secretFederatedInformer.GetReadyClusters()
	if err != nil {
		glog.Errorf("Failed to get cluster list: %v", err)
		secretcontroller.deliverSecret(secret, secretcontroller.clusterAvailableDelay, false)
		return
	}

	operations := make([]util.FederatedOperation, 0)
	for _, cluster := range clusters {
		clusterSecretObj, found, err := secretcontroller.secretFederatedInformer.GetTargetStore().GetByKey(cluster.Name, key)
		if err != nil {
			glog.Errorf("Failed to get %s from %s: %v", key, cluster.Name, err)
			secretcontroller.deliverSecret(secret, 0, true)
			return
		}

		// The data should not be modified.
		desiredSecret := &apiv1.Secret{
			ObjectMeta: util.DeepCopyRelevantObjectMeta(baseSecret.ObjectMeta),
			Data:       baseSecret.Data,
			Type:       baseSecret.Type,
		}

		if !found {
			secretcontroller.eventRecorder.Eventf(baseSecret, api.EventTypeNormal, "CreateInCluster",
				"Creating secret in cluster %s", cluster.Name)

			operations = append(operations, util.FederatedOperation{
				Type:        util.OperationTypeAdd,
				Obj:         desiredSecret,
				ClusterName: cluster.Name,
			})
		} else {
			clusterSecret := clusterSecretObj.(*apiv1.Secret)

			// Update existing secret, if needed.
			if !util.SecretEquivalent(*desiredSecret, *clusterSecret) {

				secretcontroller.eventRecorder.Eventf(baseSecret, api.EventTypeNormal, "UpdateInCluster",
					"Updating secret in cluster %s", cluster.Name)
				operations = append(operations, util.FederatedOperation{
					Type:        util.OperationTypeUpdate,
					Obj:         desiredSecret,
					ClusterName: cluster.Name,
				})
			}
		}
	}

	if len(operations) == 0 {
		// Everything is in order
		return
	}
	err = secretcontroller.federatedUpdater.UpdateWithOnError(operations, secretcontroller.updateTimeout,
		func(op util.FederatedOperation, operror error) {
			secretcontroller.eventRecorder.Eventf(baseSecret, api.EventTypeNormal, "UpdateInClusterFailed",
				"Secret update in cluster %s failed: %v", op.ClusterName, operror)
		})

	if err != nil {
		glog.Errorf("Failed to execute updates for %s: %v", key, err)
		secretcontroller.deliverSecret(secret, 0, true)
		return
	}

	// Evertyhing is in order but lets be double sure
	secretcontroller.deliverSecret(secret, secretcontroller.secretReviewDelay, false)
}
Exemplo n.º 5
0
func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBalancerName string, listeners []*elb.Listener, subnetIDs []string, securityGroupIDs []string, internalELB, proxyProtocol bool, loadBalancerAttributes *elb.LoadBalancerAttributes) (*elb.LoadBalancerDescription, error) {
	loadBalancer, err := c.describeLoadBalancer(loadBalancerName)
	if err != nil {
		return nil, err
	}

	dirty := false

	if loadBalancer == nil {
		createRequest := &elb.CreateLoadBalancerInput{}
		createRequest.LoadBalancerName = aws.String(loadBalancerName)

		createRequest.Listeners = listeners

		if internalELB {
			createRequest.Scheme = aws.String("internal")
		}

		// We are supposed to specify one subnet per AZ.
		// TODO: What happens if we have more than one subnet per AZ?
		createRequest.Subnets = stringPointerArray(subnetIDs)

		createRequest.SecurityGroups = stringPointerArray(securityGroupIDs)

		createRequest.Tags = []*elb.Tag{
			{Key: aws.String(TagNameKubernetesCluster), Value: aws.String(c.getClusterName())},
			{Key: aws.String(TagNameKubernetesService), Value: aws.String(namespacedName.String())},
		}

		glog.Infof("Creating load balancer for %v with name: %s", namespacedName, loadBalancerName)
		_, err := c.elb.CreateLoadBalancer(createRequest)
		if err != nil {
			return nil, err
		}

		if proxyProtocol {
			err = c.createProxyProtocolPolicy(loadBalancerName)
			if err != nil {
				return nil, err
			}

			for _, listener := range listeners {
				glog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to true", *listener.InstancePort)
				err := c.setBackendPolicies(loadBalancerName, *listener.InstancePort, []*string{aws.String(ProxyProtocolPolicyName)})
				if err != nil {
					return nil, err
				}
			}
		}

		dirty = true
	} else {
		// TODO: Sync internal vs non-internal

		{
			// Sync subnets
			expected := sets.NewString(subnetIDs...)
			actual := stringSetFromPointers(loadBalancer.Subnets)

			additions := expected.Difference(actual)
			removals := actual.Difference(expected)

			if removals.Len() != 0 {
				request := &elb.DetachLoadBalancerFromSubnetsInput{}
				request.LoadBalancerName = aws.String(loadBalancerName)
				request.Subnets = stringSetToPointers(removals)
				glog.V(2).Info("Detaching load balancer from removed subnets")
				_, err := c.elb.DetachLoadBalancerFromSubnets(request)
				if err != nil {
					return nil, fmt.Errorf("error detaching AWS loadbalancer from subnets: %v", err)
				}
				dirty = true
			}

			if additions.Len() != 0 {
				request := &elb.AttachLoadBalancerToSubnetsInput{}
				request.LoadBalancerName = aws.String(loadBalancerName)
				request.Subnets = stringSetToPointers(additions)
				glog.V(2).Info("Attaching load balancer to added subnets")
				_, err := c.elb.AttachLoadBalancerToSubnets(request)
				if err != nil {
					return nil, fmt.Errorf("error attaching AWS loadbalancer to subnets: %v", err)
				}
				dirty = true
			}
		}

		{
			// Sync security groups
			expected := sets.NewString(securityGroupIDs...)
			actual := stringSetFromPointers(loadBalancer.SecurityGroups)

			if !expected.Equal(actual) {
				// This call just replaces the security groups, unlike e.g. subnets (!)
				request := &elb.ApplySecurityGroupsToLoadBalancerInput{}
				request.LoadBalancerName = aws.String(loadBalancerName)
				request.SecurityGroups = stringPointerArray(securityGroupIDs)
				glog.V(2).Info("Applying updated security groups to load balancer")
				_, err := c.elb.ApplySecurityGroupsToLoadBalancer(request)
				if err != nil {
					return nil, fmt.Errorf("error applying AWS loadbalancer security groups: %v", err)
				}
				dirty = true
			}
		}

		{
			// Sync listeners
			listenerDescriptions := loadBalancer.ListenerDescriptions

			foundSet := make(map[int]bool)
			removals := []*int64{}
			for _, listenerDescription := range listenerDescriptions {
				actual := listenerDescription.Listener
				if actual == nil {
					glog.Warning("Ignoring empty listener in AWS loadbalancer: ", loadBalancerName)
					continue
				}

				found := -1
				for i, expected := range listeners {
					if orEmpty(actual.Protocol) != orEmpty(expected.Protocol) {
						continue
					}
					if orEmpty(actual.InstanceProtocol) != orEmpty(expected.InstanceProtocol) {
						continue
					}
					if orZero(actual.InstancePort) != orZero(expected.InstancePort) {
						continue
					}
					if orZero(actual.LoadBalancerPort) != orZero(expected.LoadBalancerPort) {
						continue
					}
					if orEmpty(actual.SSLCertificateId) != orEmpty(expected.SSLCertificateId) {
						continue
					}
					found = i
				}
				if found != -1 {
					foundSet[found] = true
				} else {
					removals = append(removals, actual.LoadBalancerPort)
				}
			}

			additions := []*elb.Listener{}
			for i := range listeners {
				if foundSet[i] {
					continue
				}
				additions = append(additions, listeners[i])
			}

			if len(removals) != 0 {
				request := &elb.DeleteLoadBalancerListenersInput{}
				request.LoadBalancerName = aws.String(loadBalancerName)
				request.LoadBalancerPorts = removals
				glog.V(2).Info("Deleting removed load balancer listeners")
				_, err := c.elb.DeleteLoadBalancerListeners(request)
				if err != nil {
					return nil, fmt.Errorf("error deleting AWS loadbalancer listeners: %v", err)
				}
				dirty = true
			}

			if len(additions) != 0 {
				request := &elb.CreateLoadBalancerListenersInput{}
				request.LoadBalancerName = aws.String(loadBalancerName)
				request.Listeners = additions
				glog.V(2).Info("Creating added load balancer listeners")
				_, err := c.elb.CreateLoadBalancerListeners(request)
				if err != nil {
					return nil, fmt.Errorf("error creating AWS loadbalancer listeners: %v", err)
				}
				dirty = true
			}
		}

		{
			// Sync proxy protocol state for new and existing listeners

			proxyPolicies := make([]*string, 0)
			if proxyProtocol {
				// Ensure the backend policy exists

				// NOTE The documentation for the AWS API indicates we could get an HTTP 400
				// back if a policy of the same name already exists. However, the aws-sdk does not
				// seem to return an error to us in these cases. Therefore, this will issue an API
				// request every time.
				err := c.createProxyProtocolPolicy(loadBalancerName)
				if err != nil {
					return nil, err
				}

				proxyPolicies = append(proxyPolicies, aws.String(ProxyProtocolPolicyName))
			}

			foundBackends := make(map[int64]bool)
			proxyProtocolBackends := make(map[int64]bool)
			for _, backendListener := range loadBalancer.BackendServerDescriptions {
				foundBackends[*backendListener.InstancePort] = false
				proxyProtocolBackends[*backendListener.InstancePort] = proxyProtocolEnabled(backendListener)
			}

			for _, listener := range listeners {
				setPolicy := false
				instancePort := *listener.InstancePort

				if currentState, ok := proxyProtocolBackends[instancePort]; !ok {
					// This is a new ELB backend so we only need to worry about
					// potentially adding a policy and not removing an
					// existing one
					setPolicy = proxyProtocol
				} else {
					foundBackends[instancePort] = true
					// This is an existing ELB backend so we need to determine
					// if the state changed
					setPolicy = (currentState != proxyProtocol)
				}

				if setPolicy {
					glog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to %t", instancePort, proxyProtocol)
					err := c.setBackendPolicies(loadBalancerName, instancePort, proxyPolicies)
					if err != nil {
						return nil, err
					}
					dirty = true
				}
			}

			// We now need to figure out if any backend policies need removed
			// because these old policies will stick around even if there is no
			// corresponding listener anymore
			for instancePort, found := range foundBackends {
				if !found {
					glog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to false", instancePort)
					err := c.setBackendPolicies(loadBalancerName, instancePort, []*string{})
					if err != nil {
						return nil, err
					}
					dirty = true
				}
			}
		}
	}

	// Whether the ELB was new or existing, sync attributes regardless. This accounts for things
	// that cannot be specified at the time of creation and can only be modified after the fact,
	// e.g. idle connection timeout.
	{
		describeAttributesRequest := &elb.DescribeLoadBalancerAttributesInput{}
		describeAttributesRequest.LoadBalancerName = aws.String(loadBalancerName)
		describeAttributesOutput, err := c.elb.DescribeLoadBalancerAttributes(describeAttributesRequest)
		if err != nil {
			glog.Warning("Unable to retrieve load balancer attributes during attribute sync")
			return nil, err
		}

		foundAttributes := &describeAttributesOutput.LoadBalancerAttributes

		// Update attributes if they're dirty
		if !reflect.DeepEqual(loadBalancerAttributes, foundAttributes) {
			glog.V(2).Info("Updating load-balancer attributes for %q", loadBalancerName)

			modifyAttributesRequest := &elb.ModifyLoadBalancerAttributesInput{}
			modifyAttributesRequest.LoadBalancerName = aws.String(loadBalancerName)
			modifyAttributesRequest.LoadBalancerAttributes = loadBalancerAttributes
			_, err = c.elb.ModifyLoadBalancerAttributes(modifyAttributesRequest)
			if err != nil {
				return nil, fmt.Errorf("Unable to update load balancer attributes during attribute sync: %v", err)
			}
			dirty = true
		}
	}

	if dirty {
		loadBalancer, err = c.describeLoadBalancer(loadBalancerName)
		if err != nil {
			glog.Warning("Unable to retrieve load balancer after creation/update")
			return nil, err
		}
	}

	return loadBalancer, nil
}
Exemplo n.º 6
0
func (configmapcontroller *ConfigMapController) reconcileConfigMap(configmap types.NamespacedName) {

	if !configmapcontroller.isSynced() {
		glog.V(4).Infof("Configmap controller not synced")
		configmapcontroller.deliverConfigMap(configmap, configmapcontroller.clusterAvailableDelay, false)
		return
	}

	key := configmap.String()
	baseConfigMapObj, exist, err := configmapcontroller.configmapInformerStore.GetByKey(key)
	if err != nil {
		glog.Errorf("Failed to query main configmap store for %v: %v", key, err)
		configmapcontroller.deliverConfigMap(configmap, 0, true)
		return
	}

	if !exist {
		// Not federated configmap, ignoring.
		glog.V(8).Infof("Skipping not federated config map: %s", key)
		return
	}
	baseConfigMap := baseConfigMapObj.(*apiv1.ConfigMap)

	clusters, err := configmapcontroller.configmapFederatedInformer.GetReadyClusters()
	if err != nil {
		glog.Errorf("Failed to get cluster list: %v, retrying shortly", err)
		configmapcontroller.deliverConfigMap(configmap, configmapcontroller.clusterAvailableDelay, false)
		return
	}

	operations := make([]util.FederatedOperation, 0)
	for _, cluster := range clusters {
		clusterConfigMapObj, found, err := configmapcontroller.configmapFederatedInformer.GetTargetStore().GetByKey(cluster.Name, key)
		if err != nil {
			glog.Errorf("Failed to get %s from %s: %v, retrying shortly", key, cluster.Name, err)
			configmapcontroller.deliverConfigMap(configmap, 0, true)
			return
		}

		// Do not modify data.
		desiredConfigMap := &apiv1.ConfigMap{
			ObjectMeta: util.DeepCopyRelevantObjectMeta(baseConfigMap.ObjectMeta),
			Data:       baseConfigMap.Data,
		}

		if !found {
			configmapcontroller.eventRecorder.Eventf(baseConfigMap, api.EventTypeNormal, "CreateInCluster",
				"Creating configmap in cluster %s", cluster.Name)

			operations = append(operations, util.FederatedOperation{
				Type:        util.OperationTypeAdd,
				Obj:         desiredConfigMap,
				ClusterName: cluster.Name,
			})
		} else {
			clusterConfigMap := clusterConfigMapObj.(*apiv1.ConfigMap)

			// Update existing configmap, if needed.
			if !util.ConfigMapEquivalent(desiredConfigMap, clusterConfigMap) {
				configmapcontroller.eventRecorder.Eventf(baseConfigMap, api.EventTypeNormal, "UpdateInCluster",
					"Updating configmap in cluster %s", cluster.Name)
				operations = append(operations, util.FederatedOperation{
					Type:        util.OperationTypeUpdate,
					Obj:         desiredConfigMap,
					ClusterName: cluster.Name,
				})
			}
		}
	}

	if len(operations) == 0 {
		// Everything is in order
		glog.V(8).Infof("No operations needed for %s", key)
		return
	}
	err = configmapcontroller.federatedUpdater.UpdateWithOnError(operations, configmapcontroller.updateTimeout,
		func(op util.FederatedOperation, operror error) {
			configmapcontroller.eventRecorder.Eventf(baseConfigMap, api.EventTypeNormal, "UpdateInClusterFailed",
				"ConfigMap update in cluster %s failed: %v", op.ClusterName, operror)
		})

	if err != nil {
		glog.Errorf("Failed to execute updates for %s: %v, retrying shortly", key, err)
		configmapcontroller.deliverConfigMap(configmap, 0, true)
		return
	}
}
Exemplo n.º 7
0
func (ic *IngressController) reconcileIngress(ingress types.NamespacedName) {
	glog.V(4).Infof("Reconciling ingress %q for all clusters", ingress)
	if !ic.isSynced() {
		ic.deliverIngress(ingress, ic.clusterAvailableDelay, false)
		return
	}

	key := ingress.String()
	baseIngressObjFromStore, exist, err := ic.ingressInformerStore.GetByKey(key)
	if err != nil {
		glog.Errorf("Failed to query main ingress store for %v: %v", ingress, err)
		ic.deliverIngress(ingress, 0, true)
		return
	}
	if !exist {
		// Not federated ingress, ignoring.
		glog.V(4).Infof("Ingress %q is not federated.  Ignoring.", ingress)
		return
	}
	baseIngressObj, err := api.Scheme.DeepCopy(baseIngressObjFromStore)
	baseIngress, ok := baseIngressObj.(*extensionsv1beta1.Ingress)
	if err != nil || !ok {
		glog.Errorf("Internal Error %v : Object retrieved from ingressInformerStore with key %q is not of correct type *extensionsv1beta1.Ingress: %v", err, key, baseIngressObj)
	} else {
		glog.V(4).Infof("Base (federated) ingress: %v", baseIngress)
	}

	if baseIngress.DeletionTimestamp != nil {
		if err := ic.delete(baseIngress); err != nil {
			glog.Errorf("Failed to delete %s: %v", ingress, err)
			ic.eventRecorder.Eventf(baseIngress, api.EventTypeNormal, "DeleteFailed",
				"Ingress delete failed: %v", err)
			ic.deliverIngress(ingress, 0, true)
		}
		return
	}

	glog.V(3).Infof("Ensuring delete object from underlying clusters finalizer for ingress: %s",
		baseIngress.Name)
	// Add the required finalizers before creating a ingress in underlying clusters.
	updatedIngressObj, err := ic.deletionHelper.EnsureFinalizers(baseIngress)
	if err != nil {
		glog.Errorf("Failed to ensure delete object from underlying clusters finalizer in ingress %s: %v",
			baseIngress.Name, err)
		ic.deliverIngress(ingress, 0, true)
		return
	}
	baseIngress = updatedIngressObj.(*extensionsv1beta1.Ingress)

	glog.V(3).Infof("Syncing ingress %s in underlying clusters", baseIngress.Name)

	clusters, err := ic.ingressFederatedInformer.GetReadyClusters()
	if err != nil {
		glog.Errorf("Failed to get cluster list: %v", err)
		ic.deliverIngress(ingress, ic.clusterAvailableDelay, false)
		return
	} else {
		glog.V(4).Infof("Found %d ready clusters across which to reconcile ingress %q", len(clusters), ingress)
	}

	operations := make([]util.FederatedOperation, 0)

	for _, cluster := range clusters {
		baseIPName, baseIPAnnotationExists := baseIngress.ObjectMeta.Annotations[staticIPNameKeyWritable]
		firstClusterName, firstClusterExists := baseIngress.ObjectMeta.Annotations[firstClusterAnnotation]
		clusterIngressObj, clusterIngressFound, err := ic.ingressFederatedInformer.GetTargetStore().GetByKey(cluster.Name, key)
		if err != nil {
			glog.Errorf("Failed to get cached ingress %s for cluster %s, will retry: %v", ingress, cluster.Name, err)
			ic.deliverIngress(ingress, 0, true)
			return
		}
		desiredIngress := &extensionsv1beta1.Ingress{}
		objMeta, err := api.Scheme.DeepCopy(&baseIngress.ObjectMeta)
		if err != nil {
			glog.Errorf("Error deep copying ObjectMeta: %v", err)
		}
		objSpec, err := api.Scheme.DeepCopy(&baseIngress.Spec)
		if err != nil {
			glog.Errorf("Error deep copying Spec: %v", err)
		}
		objMetaCopy, ok := objMeta.(*metav1.ObjectMeta)
		if !ok {
			glog.Errorf("Internal error: Failed to cast to *metav1.ObjectMeta: %v", objMeta)
		}
		desiredIngress.ObjectMeta = *objMetaCopy
		objSpecCopy, ok := objSpec.(*extensionsv1beta1.IngressSpec)
		if !ok {
			glog.Errorf("Internal error: Failed to cast to extensionsv1beta1.Ingressespec: %v", objSpec)
		}
		desiredIngress.Spec = *objSpecCopy
		glog.V(4).Infof("Desired Ingress: %v", desiredIngress)

		if !clusterIngressFound {
			glog.V(4).Infof("No existing Ingress %s in cluster %s - checking if appropriate to queue a create operation", ingress, cluster.Name)
			// We can't supply server-created fields when creating a new object.
			desiredIngress.ObjectMeta = util.DeepCopyRelevantObjectMeta(baseIngress.ObjectMeta)
			ic.eventRecorder.Eventf(baseIngress, api.EventTypeNormal, "CreateInCluster",
				"Creating ingress in cluster %s", cluster.Name)

			// We always first create an ingress in the first available cluster. Once that ingress
			// has been created and allocated a global IP (visible via an annotation),
			// we record that annotation on the federated ingress, and create all other cluster
			// ingresses with that same global IP.
			// Note: If the first cluster becomes (e.g. temporarily) unavailable, the
			// second cluster will become the first cluster, but eventually all ingresses
			// will share the single global IP recorded in the annotation of the
			// federated ingress.
			haveFirstCluster := firstClusterExists && firstClusterName != "" && ic.isClusterReady(firstClusterName)
			if !haveFirstCluster {
				glog.V(4).Infof("No cluster has been chosen as the first cluster. Electing cluster %s as the first cluster to create ingress in", cluster.Name)
				ic.updateAnnotationOnIngress(baseIngress, firstClusterAnnotation, cluster.Name)
				return
			}
			if baseIPAnnotationExists || firstClusterName == cluster.Name {
				if baseIPAnnotationExists {
					glog.V(4).Infof("No existing Ingress %s in cluster %s and static IP annotation (%q) exists on base ingress - queuing a create operation", ingress, cluster.Name, staticIPNameKeyWritable)
				} else {
					glog.V(4).Infof("No existing Ingress %s in cluster %s and no static IP annotation (%q) on base ingress - queuing a create operation in first cluster", ingress, cluster.Name, staticIPNameKeyWritable)
				}
				operations = append(operations, util.FederatedOperation{
					Type:        util.OperationTypeAdd,
					Obj:         desiredIngress,
					ClusterName: cluster.Name,
				})
			} else {
				glog.V(4).Infof("No annotation %q exists on ingress %q in federation and waiting for ingress in cluster %s. Not queueing create operation for ingress until annotation exists", staticIPNameKeyWritable, ingress, firstClusterName)
			}
		} else {
			clusterIngress := clusterIngressObj.(*extensionsv1beta1.Ingress)
			glog.V(4).Infof("Found existing Ingress %s in cluster %s - checking if update is required (in either direction)", ingress, cluster.Name)
			clusterIPName, clusterIPNameExists := clusterIngress.ObjectMeta.Annotations[staticIPNameKeyReadonly]
			baseLBStatusExists := len(baseIngress.Status.LoadBalancer.Ingress) > 0
			clusterLBStatusExists := len(clusterIngress.Status.LoadBalancer.Ingress) > 0
			logStr := fmt.Sprintf("Cluster ingress %q has annotation %q=%q, loadbalancer status exists? [%v], federated ingress has annotation %q=%q, loadbalancer status exists? [%v].  %%s annotation and/or loadbalancer status from cluster ingress to federated ingress.", ingress, staticIPNameKeyReadonly, clusterIPName, clusterLBStatusExists, staticIPNameKeyWritable, baseIPName, baseLBStatusExists)
			if (!baseIPAnnotationExists && clusterIPNameExists) || (!baseLBStatusExists && clusterLBStatusExists) { // copy the IP name from the readonly annotation on the cluster ingress, to the writable annotation on the federated ingress
				glog.V(4).Infof(logStr, "Transferring")
				if !baseIPAnnotationExists && clusterIPNameExists {
					ic.updateAnnotationOnIngress(baseIngress, staticIPNameKeyWritable, clusterIPName)
					return
				}
				if !baseLBStatusExists && clusterLBStatusExists {
					lbstatusObj, lbErr := api.Scheme.DeepCopy(&clusterIngress.Status.LoadBalancer)
					lbstatus, ok := lbstatusObj.(*v1.LoadBalancerStatus)
					if lbErr != nil || !ok {
						glog.Errorf("Internal error: Failed to clone LoadBalancerStatus of %q in cluster %q while attempting to update master loadbalancer ingress status, will try again later. error: %v, Object to be cloned: %v", ingress, cluster.Name, lbErr, lbstatusObj)
						ic.deliverIngress(ingress, ic.ingressReviewDelay, true)
						return
					}
					baseIngress.Status.LoadBalancer = *lbstatus
					glog.V(4).Infof("Attempting to update base federated ingress status: %v", baseIngress)
					if updatedFedIngress, err := ic.federatedApiClient.Extensions().Ingresses(baseIngress.Namespace).UpdateStatus(baseIngress); err != nil {
						glog.Errorf("Failed to update federated ingress status of %q (loadbalancer status), will try again later: %v", ingress, err)
						ic.deliverIngress(ingress, ic.ingressReviewDelay, true)
						return
					} else {
						glog.V(4).Infof("Successfully updated federated ingress status of %q (added loadbalancer status), after update: %q", ingress, updatedFedIngress)
						ic.deliverIngress(ingress, ic.smallDelay, false)
						return
					}
				}
			} else {
				glog.V(4).Infof(logStr, "Not transferring")
			}
			// Update existing cluster ingress, if needed.
			if util.ObjectMetaAndSpecEquivalent(baseIngress, clusterIngress) {
				glog.V(4).Infof("Ingress %q in cluster %q does not need an update: cluster ingress is equivalent to federated ingress", ingress, cluster.Name)
			} else {
				glog.V(4).Infof("Ingress %s in cluster %s needs an update: cluster ingress %v is not equivalent to federated ingress %v", ingress, cluster.Name, clusterIngress, desiredIngress)
				objMeta, err := api.Scheme.DeepCopy(&clusterIngress.ObjectMeta)
				if err != nil {
					glog.Errorf("Error deep copying ObjectMeta: %v", err)
					ic.deliverIngress(ingress, ic.ingressReviewDelay, true)
				}
				objMetaCopy, ok := objMeta.(*metav1.ObjectMeta)
				if !ok {
					glog.Errorf("Internal error: Failed to cast to metav1.ObjectMeta: %v", objMeta)
					ic.deliverIngress(ingress, ic.ingressReviewDelay, true)
				}
				desiredIngress.ObjectMeta = *objMetaCopy
				// Merge any annotations and labels on the federated ingress onto the underlying cluster ingress,
				// overwriting duplicates.
				if desiredIngress.ObjectMeta.Annotations == nil {
					desiredIngress.ObjectMeta.Annotations = make(map[string]string)
				}
				for key, val := range baseIngress.ObjectMeta.Annotations {
					desiredIngress.ObjectMeta.Annotations[key] = val
				}
				if desiredIngress.ObjectMeta.Labels == nil {
					desiredIngress.ObjectMeta.Labels = make(map[string]string)
				}
				for key, val := range baseIngress.ObjectMeta.Labels {
					desiredIngress.ObjectMeta.Labels[key] = val
				}
				ic.eventRecorder.Eventf(baseIngress, api.EventTypeNormal, "UpdateInCluster",
					"Updating ingress in cluster %s", cluster.Name)

				operations = append(operations, util.FederatedOperation{
					Type:        util.OperationTypeUpdate,
					Obj:         desiredIngress,
					ClusterName: cluster.Name,
				})
				// TODO: Transfer any readonly (target-proxy, url-map etc) annotations from the master cluster to the federation, if this is the master cluster.
				// This is only for consistency, so that the federation ingress metadata matches the underlying clusters.  It's not actually required				}
			}
		}
	}

	if len(operations) == 0 {
		// Everything is in order
		glog.V(4).Infof("Ingress %q is up-to-date in all clusters - no propagation to clusters required.", ingress)
		return
	}
	glog.V(4).Infof("Calling federatedUpdater.Update() - operations: %v", operations)
	err = ic.federatedIngressUpdater.UpdateWithOnError(operations, ic.updateTimeout, func(op util.FederatedOperation, operror error) {
		ic.eventRecorder.Eventf(baseIngress, api.EventTypeNormal, "FailedClusterUpdate",
			"Ingress update in cluster %s failed: %v", op.ClusterName, operror)
	})
	if err != nil {
		glog.Errorf("Failed to execute updates for %s: %v", ingress, err)
		ic.deliverIngress(ingress, ic.ingressReviewDelay, true)
		return
	}
	// Schedule another periodic reconciliation, only to account for possible bugs in watch processing.
	ic.deliverIngress(ingress, ic.ingressReviewDelay, false)
}
Exemplo n.º 8
0
/*
  reconcileConfigMapForCluster ensures that the configmap for the ingress controller in the cluster has objectmeta.data.UID
  consistent with all the other clusters in the federation. If clusterName == allClustersKey, then all available clusters
  configmaps are reconciled.
*/
func (ic *IngressController) reconcileConfigMapForCluster(clusterName string) {
	glog.V(4).Infof("Reconciling ConfigMap for cluster(s) %q", clusterName)

	if !ic.isSynced() {
		ic.configMapDeliverer.DeliverAfter(clusterName, nil, ic.clusterAvailableDelay)
		return
	}

	ingressList := ic.ingressInformerStore.List()
	if len(ingressList) <= 0 {
		glog.V(4).Infof("No federated ingresses, ignore reconcile config map.")
		return
	}

	if clusterName == allClustersKey {
		clusters, err := ic.configMapFederatedInformer.GetReadyClusters()
		if err != nil {
			glog.Errorf("Failed to get ready clusters.  redelivering %q: %v", clusterName, err)
			ic.configMapDeliverer.DeliverAfter(clusterName, nil, ic.clusterAvailableDelay)
			return
		}
		for _, cluster := range clusters {
			glog.V(4).Infof("Delivering ConfigMap for cluster(s) %q", clusterName)
			ic.configMapDeliverer.DeliverAt(cluster.Name, nil, time.Now())
		}
		return
	} else {
		cluster, found, err := ic.configMapFederatedInformer.GetReadyCluster(clusterName)
		if err != nil || !found {
			glog.Errorf("Internal error: Cluster %q queued for configmap reconciliation, but not found.  Will try again later: error = %v", clusterName, err)
			ic.configMapDeliverer.DeliverAfter(clusterName, nil, ic.clusterAvailableDelay)
			return
		}
		uidConfigMapNamespacedName := types.NamespacedName{Name: uidConfigMapName, Namespace: uidConfigMapNamespace}
		configMapObj, found, err := ic.configMapFederatedInformer.GetTargetStore().GetByKey(cluster.Name, uidConfigMapNamespacedName.String())
		if !found || err != nil {
			glog.Errorf("Failed to get ConfigMap %q for cluster %q.  Will try again later: %v", uidConfigMapNamespacedName, cluster.Name, err)
			ic.configMapDeliverer.DeliverAfter(clusterName, nil, ic.configMapReviewDelay)
			return
		}
		glog.V(4).Infof("Successfully got ConfigMap %q for cluster %q.", uidConfigMapNamespacedName, clusterName)
		configMap, ok := configMapObj.(*v1.ConfigMap)
		if !ok {
			glog.Errorf("Internal error: The object in the ConfigMap cache for cluster %q configmap %q is not a *ConfigMap", cluster.Name, uidConfigMapNamespacedName)
			return
		}
		ic.reconcileConfigMap(cluster, configMap)
		return
	}
}