コード例 #1
0
ファイル: factory_test.go プロジェクト: alex-mohr/kubernetes
func TestBackoff(t *testing.T) {
	clock := fakeClock{}
	backoff := podBackoff{
		perPodBackoff:   map[types.NamespacedName]*backoffEntry{},
		clock:           &clock,
		defaultDuration: 1 * time.Second,
		maxDuration:     60 * time.Second,
	}

	tests := []struct {
		podID            types.NamespacedName
		expectedDuration time.Duration
		advanceClock     time.Duration
	}{
		{
			podID:            types.NamespacedName{Namespace: "default", Name: "foo"},
			expectedDuration: 1 * time.Second,
		},
		{
			podID:            types.NamespacedName{Namespace: "default", Name: "foo"},
			expectedDuration: 2 * time.Second,
		},
		{
			podID:            types.NamespacedName{Namespace: "default", Name: "foo"},
			expectedDuration: 4 * time.Second,
		},
		{
			podID:            types.NamespacedName{Namespace: "default", Name: "bar"},
			expectedDuration: 1 * time.Second,
			advanceClock:     120 * time.Second,
		},
		// 'foo' should have been gc'd here.
		{
			podID:            types.NamespacedName{Namespace: "default", Name: "foo"},
			expectedDuration: 1 * time.Second,
		},
	}

	for _, test := range tests {
		duration := backoff.getEntry(test.podID).getBackoff(backoff.maxDuration)
		if duration != test.expectedDuration {
			t.Errorf("expected: %s, got %s for %s", test.expectedDuration.String(), duration.String(), test.podID)
		}
		clock.t = clock.t.Add(test.advanceClock)
		backoff.gc()
	}
	fooID := types.NamespacedName{Namespace: "default", Name: "foo"}
	backoff.perPodBackoff[fooID].backoff = 60 * time.Second
	duration := backoff.getEntry(fooID).getBackoff(backoff.maxDuration)
	if duration != 60*time.Second {
		t.Errorf("expected: 60, got %s", duration.String())
	}
	// Verify that we split on namespaces correctly, same name, different namespace
	fooID.Namespace = "other"
	duration = backoff.getEntry(fooID).getBackoff(backoff.maxDuration)
	if duration != 1*time.Second {
		t.Errorf("expected: 1, got %s", duration.String())
	}
}
コード例 #2
0
// Adds backoff to delay if this delivery is related to some failure. Resets backoff if there was no failure.
func (configmapcontroller *ConfigMapController) deliverConfigMap(configmap types.NamespacedName, delay time.Duration, failed bool) {
	key := configmap.String()
	if failed {
		configmapcontroller.configmapBackoff.Next(key, time.Now())
		delay = delay + configmapcontroller.configmapBackoff.Get(key)
	} else {
		configmapcontroller.configmapBackoff.Reset(key)
	}
	configmapcontroller.configmapDeliverer.DeliverAfter(key, &configmap, delay)
}
コード例 #3
0
ファイル: secret_controller.go プロジェクト: nak3/kubernetes
// Adds backoff to delay if this delivery is related to some failure. Resets backoff if there was no failure.
func (secretcontroller *SecretController) deliverSecret(secret types.NamespacedName, delay time.Duration, failed bool) {
	key := secret.String()
	if failed {
		secretcontroller.secretBackoff.Next(key, time.Now())
		delay = delay + secretcontroller.secretBackoff.Get(key)
	} else {
		secretcontroller.secretBackoff.Reset(key)
	}
	secretcontroller.secretDeliverer.DeliverAfter(key, &secret, delay)
}
コード例 #4
0
func (ic *IngressController) deliverIngress(ingress types.NamespacedName, delay time.Duration, failed bool) {
	glog.V(4).Infof("Delivering ingress: %s with delay: %v error: %v", ingress, delay, failed)
	key := ingress.String()
	if failed {
		ic.ingressBackoff.Next(key, time.Now())
		delay = delay + ic.ingressBackoff.Get(key)
	} else {
		ic.ingressBackoff.Reset(key)
	}
	ic.ingressDeliverer.DeliverAfter(key, ingress, delay)
}
コード例 #5
0
/*
  reconcileConfigMapForCluster ensures that the configmap for the ingress controller in the cluster has objectmeta.data.UID
  consistent with all the other clusters in the federation. If clusterName == allClustersKey, then all avaliable clusters
  configmaps are reconciled.
*/
func (ic *IngressController) reconcileConfigMapForCluster(clusterName string) {
	glog.V(4).Infof("Reconciling ConfigMap for cluster(s) %q", clusterName)

	if !ic.isSynced() {
		ic.configMapDeliverer.DeliverAfter(clusterName, nil, ic.clusterAvailableDelay)
		return
	}

	ingressList := ic.ingressInformerStore.List()
	if len(ingressList) <= 0 {
		glog.V(4).Infof("No federated ingresses, ignore reconcile config map.")
		return
	}

	if clusterName == allClustersKey {
		clusters, err := ic.configMapFederatedInformer.GetReadyClusters()
		if err != nil {
			glog.Errorf("Failed to get ready clusters.  redelivering %q: %v", clusterName, err)
			ic.configMapDeliverer.DeliverAfter(clusterName, nil, ic.clusterAvailableDelay)
			return
		}
		for _, cluster := range clusters {
			glog.V(4).Infof("Delivering ConfigMap for cluster(s) %q", clusterName)
			ic.configMapDeliverer.DeliverAt(cluster.Name, nil, time.Now())
		}
		return
	} else {
		cluster, found, err := ic.configMapFederatedInformer.GetReadyCluster(clusterName)
		if err != nil || !found {
			glog.Errorf("Internal error: Cluster %q queued for configmap reconciliation, but not found.  Will try again later: error = %v", clusterName, err)
			ic.configMapDeliverer.DeliverAfter(clusterName, nil, ic.clusterAvailableDelay)
			return
		}
		uidConfigMapNamespacedName := types.NamespacedName{Name: uidConfigMapName, Namespace: uidConfigMapNamespace}
		configMapObj, found, err := ic.configMapFederatedInformer.GetTargetStore().GetByKey(cluster.Name, uidConfigMapNamespacedName.String())
		if !found || err != nil {
			glog.Errorf("Failed to get ConfigMap %q for cluster %q.  Will try again later: %v", uidConfigMapNamespacedName, cluster.Name, err)
			ic.configMapDeliverer.DeliverAfter(clusterName, nil, ic.configMapReviewDelay)
			return
		}
		glog.V(4).Infof("Successfully got ConfigMap %q for cluster %q.", uidConfigMapNamespacedName, clusterName)
		configMap, ok := configMapObj.(*v1.ConfigMap)
		if !ok {
			glog.Errorf("Internal error: The object in the ConfigMap cache for cluster %q configmap %q is not a *ConfigMap", cluster.Name, uidConfigMapNamespacedName)
			return
		}
		ic.reconcileConfigMap(cluster, configMap)
		return
	}
}
コード例 #6
0
// Returns an error if processing the delta failed, along with a time.Duration
// indicating whether processing should be retried; zero means no-retry; otherwise
// we should retry in that Duration.
func (s *ServiceController) processDelta(delta *cache.Delta) (error, time.Duration) {
	var (
		namespacedName types.NamespacedName
		cachedService  *cachedService
	)

	deltaService, ok := delta.Object.(*api.Service)
	if ok {
		namespacedName.Namespace = deltaService.Namespace
		namespacedName.Name = deltaService.Name
		cachedService = s.cache.getOrCreate(namespacedName.String())
	} else {
		// If the DeltaFIFO saw a key in our cache that it didn't know about, it
		// can send a deletion with an unknown state. Grab the service from our
		// cache for deleting.
		key, ok := delta.Object.(cache.DeletedFinalStateUnknown)
		if !ok {
			return fmt.Errorf("delta contained object that wasn't a service or a deleted key: %+v", delta), doNotRetry
		}
		cachedService, ok = s.cache.get(key.Key)
		if !ok {
			return fmt.Errorf("service %s not in cache even though the watcher thought it was. Ignoring the deletion.", key), doNotRetry
		}
		deltaService = cachedService.lastState
		delta.Object = deltaService
		namespacedName = types.NamespacedName{Namespace: deltaService.Namespace, Name: deltaService.Name}
	}
	glog.V(2).Infof("Got new %s delta for service: %v", delta.Type, namespacedName)

	// Ensure that no other goroutine will interfere with our processing of the
	// service.
	cachedService.mu.Lock()
	defer cachedService.mu.Unlock()

	// Get the most recent state of the service from the API directly rather than
	// trusting the body of the delta. This avoids update re-ordering problems.
	// TODO: Handle sync delta types differently rather than doing a get on every
	// service every time we sync?
	service, err := s.kubeClient.Core().Services(namespacedName.Namespace).Get(namespacedName.Name)
	if err != nil && !errors.IsNotFound(err) {
		glog.Warningf("Failed to get most recent state of service %v from API (will retry): %v", namespacedName, err)
		return err, cachedService.nextRetryDelay()
	}
	if errors.IsNotFound(err) {
		glog.V(2).Infof("Service %v not found, ensuring load balancer is deleted", namespacedName)
		s.eventRecorder.Event(deltaService, api.EventTypeNormal, "DeletingLoadBalancer", "Deleting load balancer")
		err := s.balancer.EnsureLoadBalancerDeleted(deltaService)
		if err != nil {
			message := "Error deleting load balancer (will retry): " + err.Error()
			s.eventRecorder.Event(deltaService, api.EventTypeWarning, "DeletingLoadBalancerFailed", message)
			return err, cachedService.nextRetryDelay()
		}
		s.eventRecorder.Event(deltaService, api.EventTypeNormal, "DeletedLoadBalancer", "Deleted load balancer")
		s.cache.delete(namespacedName.String())

		cachedService.resetRetryDelay()
		return nil, doNotRetry
	}

	// Update the cached service (used above for populating synthetic deletes)
	cachedService.lastState = service

	err, retry := s.createLoadBalancerIfNeeded(namespacedName, service, cachedService.appliedState)
	if err != nil {
		message := "Error creating load balancer"
		if retry {
			message += " (will retry): "
		} else {
			message += " (will not retry): "
		}
		message += err.Error()
		s.eventRecorder.Event(service, api.EventTypeWarning, "CreatingLoadBalancerFailed", message)

		return err, cachedService.nextRetryDelay()
	}
	// Always update the cache upon success.
	// NOTE: Since we update the cached service if and only if we successfully
	// processed it, a cached service being nil implies that it hasn't yet
	// been successfully processed.
	cachedService.appliedState = service
	s.cache.set(namespacedName.String(), cachedService)

	cachedService.resetRetryDelay()
	return nil, doNotRetry
}
コード例 #7
0
ファイル: aws_loadbalancer.go プロジェクト: 40a/bootkube
func (s *AWSCloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBalancerName string, listeners []*elb.Listener, subnetIDs []string, securityGroupIDs []string, internalELB, proxyProtocol bool) (*elb.LoadBalancerDescription, error) {
	loadBalancer, err := s.describeLoadBalancer(loadBalancerName)
	if err != nil {
		return nil, err
	}

	dirty := false

	if loadBalancer == nil {
		createRequest := &elb.CreateLoadBalancerInput{}
		createRequest.LoadBalancerName = aws.String(loadBalancerName)

		createRequest.Listeners = listeners

		if internalELB {
			createRequest.Scheme = aws.String("internal")
		}

		// We are supposed to specify one subnet per AZ.
		// TODO: What happens if we have more than one subnet per AZ?
		createRequest.Subnets = stringPointerArray(subnetIDs)

		createRequest.SecurityGroups = stringPointerArray(securityGroupIDs)

		createRequest.Tags = []*elb.Tag{
			{Key: aws.String(TagNameKubernetesCluster), Value: aws.String(s.getClusterName())},
			{Key: aws.String(TagNameKubernetesService), Value: aws.String(namespacedName.String())},
		}

		glog.Infof("Creating load balancer for %v with name: ", namespacedName, loadBalancerName)
		_, err := s.elb.CreateLoadBalancer(createRequest)
		if err != nil {
			return nil, err
		}

		if proxyProtocol {
			err = s.createProxyProtocolPolicy(loadBalancerName)
			if err != nil {
				return nil, err
			}

			for _, listener := range listeners {
				glog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to true", *listener.InstancePort)
				err := s.setBackendPolicies(loadBalancerName, *listener.InstancePort, []*string{aws.String(ProxyProtocolPolicyName)})
				if err != nil {
					return nil, err
				}
			}
		}

		dirty = true
	} else {
		// TODO: Sync internal vs non-internal

		{
			// Sync subnets
			expected := sets.NewString(subnetIDs...)
			actual := stringSetFromPointers(loadBalancer.Subnets)

			additions := expected.Difference(actual)
			removals := actual.Difference(expected)

			if removals.Len() != 0 {
				request := &elb.DetachLoadBalancerFromSubnetsInput{}
				request.LoadBalancerName = aws.String(loadBalancerName)
				request.Subnets = stringSetToPointers(removals)
				glog.V(2).Info("Detaching load balancer from removed subnets")
				_, err := s.elb.DetachLoadBalancerFromSubnets(request)
				if err != nil {
					return nil, fmt.Errorf("error detaching AWS loadbalancer from subnets: %v", err)
				}
				dirty = true
			}

			if additions.Len() != 0 {
				request := &elb.AttachLoadBalancerToSubnetsInput{}
				request.LoadBalancerName = aws.String(loadBalancerName)
				request.Subnets = stringSetToPointers(additions)
				glog.V(2).Info("Attaching load balancer to added subnets")
				_, err := s.elb.AttachLoadBalancerToSubnets(request)
				if err != nil {
					return nil, fmt.Errorf("error attaching AWS loadbalancer to subnets: %v", err)
				}
				dirty = true
			}
		}

		{
			// Sync security groups
			expected := sets.NewString(securityGroupIDs...)
			actual := stringSetFromPointers(loadBalancer.SecurityGroups)

			if !expected.Equal(actual) {
				// This call just replaces the security groups, unlike e.g. subnets (!)
				request := &elb.ApplySecurityGroupsToLoadBalancerInput{}
				request.LoadBalancerName = aws.String(loadBalancerName)
				request.SecurityGroups = stringPointerArray(securityGroupIDs)
				glog.V(2).Info("Applying updated security groups to load balancer")
				_, err := s.elb.ApplySecurityGroupsToLoadBalancer(request)
				if err != nil {
					return nil, fmt.Errorf("error applying AWS loadbalancer security groups: %v", err)
				}
				dirty = true
			}
		}

		{
			// Sync listeners
			listenerDescriptions := loadBalancer.ListenerDescriptions

			foundSet := make(map[int]bool)
			removals := []*int64{}
			for _, listenerDescription := range listenerDescriptions {
				actual := listenerDescription.Listener
				if actual == nil {
					glog.Warning("Ignoring empty listener in AWS loadbalancer: ", loadBalancerName)
					continue
				}

				found := -1
				for i, expected := range listeners {
					if orEmpty(actual.Protocol) != orEmpty(expected.Protocol) {
						continue
					}
					if orEmpty(actual.InstanceProtocol) != orEmpty(expected.InstanceProtocol) {
						continue
					}
					if orZero(actual.InstancePort) != orZero(expected.InstancePort) {
						continue
					}
					if orZero(actual.LoadBalancerPort) != orZero(expected.LoadBalancerPort) {
						continue
					}
					if orEmpty(actual.SSLCertificateId) != orEmpty(expected.SSLCertificateId) {
						continue
					}
					found = i
				}
				if found != -1 {
					foundSet[found] = true
				} else {
					removals = append(removals, actual.LoadBalancerPort)
				}
			}

			additions := []*elb.Listener{}
			for i := range listeners {
				if foundSet[i] {
					continue
				}
				additions = append(additions, listeners[i])
			}

			if len(removals) != 0 {
				request := &elb.DeleteLoadBalancerListenersInput{}
				request.LoadBalancerName = aws.String(loadBalancerName)
				request.LoadBalancerPorts = removals
				glog.V(2).Info("Deleting removed load balancer listeners")
				_, err := s.elb.DeleteLoadBalancerListeners(request)
				if err != nil {
					return nil, fmt.Errorf("error deleting AWS loadbalancer listeners: %v", err)
				}
				dirty = true
			}

			if len(additions) != 0 {
				request := &elb.CreateLoadBalancerListenersInput{}
				request.LoadBalancerName = aws.String(loadBalancerName)
				request.Listeners = additions
				glog.V(2).Info("Creating added load balancer listeners")
				_, err := s.elb.CreateLoadBalancerListeners(request)
				if err != nil {
					return nil, fmt.Errorf("error creating AWS loadbalancer listeners: %v", err)
				}
				dirty = true
			}
		}

		{
			// Sync proxy protocol state for new and existing listeners

			proxyPolicies := make([]*string, 0)
			if proxyProtocol {
				// Ensure the backend policy exists

				// NOTE The documentation for the AWS API indicates we could get an HTTP 400
				// back if a policy of the same name already exists. However, the aws-sdk does not
				// seem to return an error to us in these cases. Therefore this will issue an API
				// request everytime.
				err := s.createProxyProtocolPolicy(loadBalancerName)
				if err != nil {
					return nil, err
				}

				proxyPolicies = append(proxyPolicies, aws.String(ProxyProtocolPolicyName))
			}

			foundBackends := make(map[int64]bool)
			proxyProtocolBackends := make(map[int64]bool)
			for _, backendListener := range loadBalancer.BackendServerDescriptions {
				foundBackends[*backendListener.InstancePort] = false
				proxyProtocolBackends[*backendListener.InstancePort] = proxyProtocolEnabled(backendListener)
			}

			for _, listener := range listeners {
				setPolicy := false
				instancePort := *listener.InstancePort

				if currentState, ok := proxyProtocolBackends[instancePort]; !ok {
					// This is a new ELB backend so we only need to worry about
					// potentientally adding a policy and not removing an
					// existing one
					setPolicy = proxyProtocol
				} else {
					foundBackends[instancePort] = true
					// This is an existing ELB backend so we need to determine
					// if the state changed
					setPolicy = (currentState != proxyProtocol)
				}

				if setPolicy {
					glog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to %t", instancePort, proxyProtocol)
					err := s.setBackendPolicies(loadBalancerName, instancePort, proxyPolicies)
					if err != nil {
						return nil, err
					}
					dirty = true
				}
			}

			// We now need to figure out if any backend policies need removed
			// because these old policies will stick around even if there is no
			// corresponding listener anymore
			for instancePort, found := range foundBackends {
				if !found {
					glog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to false", instancePort)
					err := s.setBackendPolicies(loadBalancerName, instancePort, []*string{})
					if err != nil {
						return nil, err
					}
					dirty = true
				}
			}
		}
	}

	if dirty {
		loadBalancer, err = s.describeLoadBalancer(loadBalancerName)
		if err != nil {
			glog.Warning("Unable to retrieve load balancer after creation/update")
			return nil, err
		}
	}

	return loadBalancer, nil
}
コード例 #8
0
// Returns an error if processing the delta failed, along with a boolean
// indicator of whether the processing should be retried.
func (s *ServiceController) processDelta(delta *cache.Delta) (error, bool) {
	service, ok := delta.Object.(*api.Service)
	var namespacedName types.NamespacedName
	var cachedService *cachedService
	if !ok {
		// If the DeltaFIFO saw a key in our cache that it didn't know about, it
		// can send a deletion with an unknown state. Grab the service from our
		// cache for deleting.
		key, ok := delta.Object.(cache.DeletedFinalStateUnknown)
		if !ok {
			return fmt.Errorf("Delta contained object that wasn't a service or a deleted key: %+v", delta), notRetryable
		}
		cachedService, ok = s.cache.get(key.Key)
		if !ok {
			return fmt.Errorf("Service %s not in cache even though the watcher thought it was. Ignoring the deletion.", key), notRetryable
		}
		service = cachedService.lastState
		delta.Object = cachedService.lastState
		namespacedName = types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
	} else {
		namespacedName.Namespace = service.Namespace
		namespacedName.Name = service.Name
		cachedService = s.cache.getOrCreate(namespacedName.String())
	}
	glog.V(2).Infof("Got new %s delta for service: %+v", delta.Type, service)

	// Ensure that no other goroutine will interfere with our processing of the
	// service.
	cachedService.mu.Lock()
	defer cachedService.mu.Unlock()

	// Update the cached service (used above for populating synthetic deletes)
	cachedService.lastState = service

	// TODO: Handle added, updated, and sync differently?
	switch delta.Type {
	case cache.Added:
		fallthrough
	case cache.Updated:
		fallthrough
	case cache.Sync:
		err, retry := s.createLoadBalancerIfNeeded(namespacedName, service, cachedService.appliedState)
		if err != nil {
			message := "Error creating load balancer"
			if retry {
				message += " (will retry): "
			} else {
				message += " (will not retry): "
			}
			message += err.Error()
			s.eventRecorder.Event(service, "CreatingLoadBalancerFailed", message)
			return err, retry
		}
		// Always update the cache upon success.
		// NOTE: Since we update the cached service if and only if we successfully
		// processed it, a cached service being nil implies that it hasn't yet
		// been successfully processed.
		cachedService.appliedState = service
		s.cache.set(namespacedName.String(), cachedService)
	case cache.Deleted:
		s.eventRecorder.Event(service, "DeletingLoadBalancer", "Deleting load balancer")
		err := s.balancer.EnsureTCPLoadBalancerDeleted(s.loadBalancerName(service), s.zone.Region)
		if err != nil {
			message := "Error deleting load balancer (will retry): " + err.Error()
			s.eventRecorder.Event(service, "DeletingLoadBalancerFailed", message)
			return err, retryable
		}
		s.eventRecorder.Event(service, "DeletedLoadBalancer", "Deleted load balancer")
		s.cache.delete(namespacedName.String())
	default:
		glog.Errorf("Unexpected delta type: %v", delta.Type)
	}
	return nil, notRetryable
}
コード例 #9
0
func (ic *IngressController) reconcileIngress(ingress types.NamespacedName) {
	glog.V(4).Infof("Reconciling ingress %q for all clusters", ingress)
	if !ic.isSynced() {
		ic.deliverIngress(ingress, ic.clusterAvailableDelay, false)
		return
	}

	key := ingress.String()
	baseIngressObjFromStore, exist, err := ic.ingressInformerStore.GetByKey(key)
	if err != nil {
		glog.Errorf("Failed to query main ingress store for %v: %v", ingress, err)
		ic.deliverIngress(ingress, 0, true)
		return
	}
	if !exist {
		// Not federated ingress, ignoring.
		glog.V(4).Infof("Ingress %q is not federated.  Ignoring.", ingress)
		return
	}
	baseIngressObj, err := conversion.NewCloner().DeepCopy(baseIngressObjFromStore)
	baseIngress, ok := baseIngressObj.(*extensionsv1beta1.Ingress)
	if err != nil || !ok {
		glog.Errorf("Internal Error %v : Object retrieved from ingressInformerStore with key %q is not of correct type *extensionsv1beta1.Ingress: %v", err, key, baseIngressObj)
	} else {
		glog.V(4).Infof("Base (federated) ingress: %v", baseIngress)
	}

	if baseIngress.DeletionTimestamp != nil {
		if err := ic.delete(baseIngress); err != nil {
			glog.Errorf("Failed to delete %s: %v", ingress, err)
			ic.eventRecorder.Eventf(baseIngress, api.EventTypeNormal, "DeleteFailed",
				"Ingress delete failed: %v", err)
			ic.deliverIngress(ingress, 0, true)
		}
		return
	}

	glog.V(3).Infof("Ensuring delete object from underlying clusters finalizer for ingress: %s",
		baseIngress.Name)
	// Add the required finalizers before creating a ingress in underlying clusters.
	updatedIngressObj, err := ic.deletionHelper.EnsureFinalizers(baseIngress)
	if err != nil {
		glog.Errorf("Failed to ensure delete object from underlying clusters finalizer in ingress %s: %v",
			baseIngress.Name, err)
		ic.deliverIngress(ingress, 0, true)
		return
	}
	baseIngress = updatedIngressObj.(*extensionsv1beta1.Ingress)

	glog.V(3).Infof("Syncing ingress %s in underlying clusters", baseIngress.Name)

	clusters, err := ic.ingressFederatedInformer.GetReadyClusters()
	if err != nil {
		glog.Errorf("Failed to get cluster list: %v", err)
		ic.deliverIngress(ingress, ic.clusterAvailableDelay, false)
		return
	} else {
		glog.V(4).Infof("Found %d ready clusters across which to reconcile ingress %q", len(clusters), ingress)
	}

	operations := make([]util.FederatedOperation, 0)

	for _, cluster := range clusters {
		baseIPName, baseIPAnnotationExists := baseIngress.ObjectMeta.Annotations[staticIPNameKeyWritable]
		firstClusterName, firstClusterExists := baseIngress.ObjectMeta.Annotations[firstClusterAnnotation]
		clusterIngressObj, clusterIngressFound, err := ic.ingressFederatedInformer.GetTargetStore().GetByKey(cluster.Name, key)
		if err != nil {
			glog.Errorf("Failed to get cached ingress %s for cluster %s, will retry: %v", ingress, cluster.Name, err)
			ic.deliverIngress(ingress, 0, true)
			return
		}
		desiredIngress := &extensionsv1beta1.Ingress{}
		objMeta, err := conversion.NewCloner().DeepCopy(baseIngress.ObjectMeta)
		if err != nil {
			glog.Errorf("Error deep copying ObjectMeta: %v", err)
		}
		objSpec, err := conversion.NewCloner().DeepCopy(baseIngress.Spec)
		if err != nil {
			glog.Errorf("Error deep copying Spec: %v", err)
		}
		desiredIngress.ObjectMeta, ok = objMeta.(v1.ObjectMeta)
		if !ok {
			glog.Errorf("Internal error: Failed to cast to v1.ObjectMeta: %v", objMeta)
		}
		desiredIngress.Spec = objSpec.(extensionsv1beta1.IngressSpec)
		if !ok {
			glog.Errorf("Internal error: Failed to cast to extensionsv1beta1.Ingressespec: %v", objSpec)
		}
		glog.V(4).Infof("Desired Ingress: %v", desiredIngress)

		if !clusterIngressFound {
			glog.V(4).Infof("No existing Ingress %s in cluster %s - checking if appropriate to queue a create operation", ingress, cluster.Name)
			// We can't supply server-created fields when creating a new object.
			desiredIngress.ObjectMeta = util.DeepCopyRelevantObjectMeta(baseIngress.ObjectMeta)
			ic.eventRecorder.Eventf(baseIngress, api.EventTypeNormal, "CreateInCluster",
				"Creating ingress in cluster %s", cluster.Name)

			// We always first create an ingress in the first available cluster. Once that ingress
			// has been created and allocated a global IP (visible via an annotation),
			// we record that annotation on the federated ingress, and create all other cluster
			// ingresses with that same global IP.
			// Note: If the first cluster becomes (e.g. temporarily) unavailable, the
			// second cluster will become the first cluster, but eventually all ingresses
			// will share the single global IP recorded in the annotation of the
			// federated ingress.
			haveFirstCluster := firstClusterExists && firstClusterName != "" && ic.isClusterReady(firstClusterName)
			if !haveFirstCluster {
				glog.V(4).Infof("No cluster has been chosen as the first cluster. Electing cluster %s as the first cluster to create ingress in", cluster.Name)
				ic.updateAnnotationOnIngress(baseIngress, firstClusterAnnotation, cluster.Name)
				return
			}
			if baseIPAnnotationExists || firstClusterName == cluster.Name {
				if baseIPAnnotationExists {
					glog.V(4).Infof("No existing Ingress %s in cluster %s and static IP annotation (%q) exists on base ingress - queuing a create operation", ingress, cluster.Name, staticIPNameKeyWritable)
				} else {
					glog.V(4).Infof("No existing Ingress %s in cluster %s and no static IP annotation (%q) on base ingress - queuing a create operation in first cluster", ingress, cluster.Name, staticIPNameKeyWritable)
				}
				operations = append(operations, util.FederatedOperation{
					Type:        util.OperationTypeAdd,
					Obj:         desiredIngress,
					ClusterName: cluster.Name,
				})
			} else {
				glog.V(4).Infof("No annotation %q exists on ingress %q in federation and waiting for ingress in cluster %s. Not queueing create operation for ingress until annotation exists", staticIPNameKeyWritable, ingress, firstClusterName)
			}
		} else {
			clusterIngress := clusterIngressObj.(*extensionsv1beta1.Ingress)
			glog.V(4).Infof("Found existing Ingress %s in cluster %s - checking if update is required (in either direction)", ingress, cluster.Name)
			clusterIPName, clusterIPNameExists := clusterIngress.ObjectMeta.Annotations[staticIPNameKeyReadonly]
			baseLBStatusExists := len(baseIngress.Status.LoadBalancer.Ingress) > 0
			clusterLBStatusExists := len(clusterIngress.Status.LoadBalancer.Ingress) > 0
			logStr := fmt.Sprintf("Cluster ingress %q has annotation %q=%q, loadbalancer status exists? [%v], federated ingress has annotation %q=%q, loadbalancer status exists? [%v].  %%s annotation and/or loadbalancer status from cluster ingress to federated ingress.", ingress, staticIPNameKeyReadonly, clusterIPName, clusterLBStatusExists, staticIPNameKeyWritable, baseIPName, baseLBStatusExists)
			if (!baseIPAnnotationExists && clusterIPNameExists) || (!baseLBStatusExists && clusterLBStatusExists) { // copy the IP name from the readonly annotation on the cluster ingress, to the writable annotation on the federated ingress
				glog.V(4).Infof(logStr, "Transferring")
				if !baseIPAnnotationExists && clusterIPNameExists {
					ic.updateAnnotationOnIngress(baseIngress, staticIPNameKeyWritable, clusterIPName)
					return
				}
				if !baseLBStatusExists && clusterLBStatusExists {
					lbstatusObj, lbErr := conversion.NewCloner().DeepCopy(&clusterIngress.Status.LoadBalancer)
					lbstatus, ok := lbstatusObj.(*v1.LoadBalancerStatus)
					if lbErr != nil || !ok {
						glog.Errorf("Internal error: Failed to clone LoadBalancerStatus of %q in cluster %q while attempting to update master loadbalancer ingress status, will try again later. error: %v, Object to be cloned: %v", ingress, cluster.Name, lbErr, lbstatusObj)
						ic.deliverIngress(ingress, ic.ingressReviewDelay, true)
						return
					}
					baseIngress.Status.LoadBalancer = *lbstatus
					glog.V(4).Infof("Attempting to update base federated ingress status: %v", baseIngress)
					if updatedFedIngress, err := ic.federatedApiClient.Extensions().Ingresses(baseIngress.Namespace).UpdateStatus(baseIngress); err != nil {
						glog.Errorf("Failed to update federated ingress status of %q (loadbalancer status), will try again later: %v", ingress, err)
						ic.deliverIngress(ingress, ic.ingressReviewDelay, true)
						return
					} else {
						glog.V(4).Infof("Successfully updated federated ingress status of %q (added loadbalancer status), after update: %q", ingress, updatedFedIngress)
						ic.deliverIngress(ingress, ic.smallDelay, false)
						return
					}
				}
			} else {
				glog.V(4).Infof(logStr, "Not transferring")
			}
			// Update existing cluster ingress, if needed.
			if util.ObjectMetaAndSpecEquivalent(baseIngress, clusterIngress) {
				glog.V(4).Infof("Ingress %q in cluster %q does not need an update: cluster ingress is equivalent to federated ingress", ingress, cluster.Name)
			} else {
				glog.V(4).Infof("Ingress %s in cluster %s needs an update: cluster ingress %v is not equivalent to federated ingress %v", ingress, cluster.Name, clusterIngress, desiredIngress)
				objMeta, err := conversion.NewCloner().DeepCopy(clusterIngress.ObjectMeta)
				if err != nil {
					glog.Errorf("Error deep copying ObjectMeta: %v", err)
					ic.deliverIngress(ingress, ic.ingressReviewDelay, true)

				}
				desiredIngress.ObjectMeta, ok = objMeta.(v1.ObjectMeta)
				if !ok {
					glog.Errorf("Internal error: Failed to cast to v1.ObjectMeta: %v", objMeta)
					ic.deliverIngress(ingress, ic.ingressReviewDelay, true)
				}
				// Merge any annotations and labels on the federated ingress onto the underlying cluster ingress,
				// overwriting duplicates.
				if desiredIngress.ObjectMeta.Annotations == nil {
					desiredIngress.ObjectMeta.Annotations = make(map[string]string)
				}
				for key, val := range baseIngress.ObjectMeta.Annotations {
					desiredIngress.ObjectMeta.Annotations[key] = val
				}
				if desiredIngress.ObjectMeta.Labels == nil {
					desiredIngress.ObjectMeta.Labels = make(map[string]string)
				}
				for key, val := range baseIngress.ObjectMeta.Labels {
					desiredIngress.ObjectMeta.Labels[key] = val
				}
				ic.eventRecorder.Eventf(baseIngress, api.EventTypeNormal, "UpdateInCluster",
					"Updating ingress in cluster %s", cluster.Name)

				operations = append(operations, util.FederatedOperation{
					Type:        util.OperationTypeUpdate,
					Obj:         desiredIngress,
					ClusterName: cluster.Name,
				})
				// TODO: Transfer any readonly (target-proxy, url-map etc) annotations from the master cluster to the federation, if this is the master cluster.
				// This is only for consistency, so that the federation ingress metadata matches the underlying clusters.  It's not actually required				}
			}
		}
	}

	if len(operations) == 0 {
		// Everything is in order
		glog.V(4).Infof("Ingress %q is up-to-date in all clusters - no propagation to clusters required.", ingress)
		return
	}
	glog.V(4).Infof("Calling federatedUpdater.Update() - operations: %v", operations)
	err = ic.federatedIngressUpdater.UpdateWithOnError(operations, ic.updateTimeout, func(op util.FederatedOperation, operror error) {
		ic.eventRecorder.Eventf(baseIngress, api.EventTypeNormal, "FailedClusterUpdate",
			"Ingress update in cluster %s failed: %v", op.ClusterName, operror)
	})
	if err != nil {
		glog.Errorf("Failed to execute updates for %s: %v", ingress, err)
		ic.deliverIngress(ingress, ic.ingressReviewDelay, true)
		return
	}
	// Schedule another periodic reconciliation, only to account for possible bugs in watch processing.
	ic.deliverIngress(ingress, ic.ingressReviewDelay, false)
}
コード例 #10
0
func (configmapcontroller *ConfigMapController) reconcileConfigMap(configmap types.NamespacedName) {

	if !configmapcontroller.isSynced() {
		glog.V(4).Infof("Configmap controller not synced")
		configmapcontroller.deliverConfigMap(configmap, configmapcontroller.clusterAvailableDelay, false)
		return
	}

	key := configmap.String()
	baseConfigMapObj, exist, err := configmapcontroller.configmapInformerStore.GetByKey(key)
	if err != nil {
		glog.Errorf("Failed to query main configmap store for %v: %v", key, err)
		configmapcontroller.deliverConfigMap(configmap, 0, true)
		return
	}

	if !exist {
		// Not federated configmap, ignoring.
		glog.V(8).Infof("Skipping not federated config map: %s", key)
		return
	}
	baseConfigMap := baseConfigMapObj.(*apiv1.ConfigMap)

	clusters, err := configmapcontroller.configmapFederatedInformer.GetReadyClusters()
	if err != nil {
		glog.Errorf("Failed to get cluster list: %v, retrying shortly", err)
		configmapcontroller.deliverConfigMap(configmap, configmapcontroller.clusterAvailableDelay, false)
		return
	}

	operations := make([]util.FederatedOperation, 0)
	for _, cluster := range clusters {
		clusterConfigMapObj, found, err := configmapcontroller.configmapFederatedInformer.GetTargetStore().GetByKey(cluster.Name, key)
		if err != nil {
			glog.Errorf("Failed to get %s from %s: %v, retrying shortly", key, cluster.Name, err)
			configmapcontroller.deliverConfigMap(configmap, 0, true)
			return
		}

		// Do not modify data.
		desiredConfigMap := &apiv1.ConfigMap{
			ObjectMeta: util.DeepCopyRelevantObjectMeta(baseConfigMap.ObjectMeta),
			Data:       baseConfigMap.Data,
		}

		if !found {
			configmapcontroller.eventRecorder.Eventf(baseConfigMap, api.EventTypeNormal, "CreateInCluster",
				"Creating configmap in cluster %s", cluster.Name)

			operations = append(operations, util.FederatedOperation{
				Type:        util.OperationTypeAdd,
				Obj:         desiredConfigMap,
				ClusterName: cluster.Name,
			})
		} else {
			clusterConfigMap := clusterConfigMapObj.(*apiv1.ConfigMap)

			// Update existing configmap, if needed.
			if !util.ConfigMapEquivalent(desiredConfigMap, clusterConfigMap) {
				configmapcontroller.eventRecorder.Eventf(baseConfigMap, api.EventTypeNormal, "UpdateInCluster",
					"Updating configmap in cluster %s", cluster.Name)
				operations = append(operations, util.FederatedOperation{
					Type:        util.OperationTypeUpdate,
					Obj:         desiredConfigMap,
					ClusterName: cluster.Name,
				})
			}
		}
	}

	if len(operations) == 0 {
		// Everything is in order
		glog.V(8).Infof("No operations needed for %s", key)
		return
	}
	err = configmapcontroller.federatedUpdater.UpdateWithOnError(operations, configmapcontroller.updateTimeout,
		func(op util.FederatedOperation, operror error) {
			configmapcontroller.eventRecorder.Eventf(baseConfigMap, api.EventTypeNormal, "UpdateInClusterFailed",
				"ConfigMap update in cluster %s failed: %v", op.ClusterName, operror)
		})

	if err != nil {
		glog.Errorf("Failed to execute updates for %s: %v, retrying shortly", key, err)
		configmapcontroller.deliverConfigMap(configmap, 0, true)
		return
	}
}
コード例 #11
0
ファイル: idle.go プロジェクト: dcbw/origin
// pairScalesWithScaleRefs takes some subresource references, a map of new scales for those subresource references,
// and annotations from an existing object.  It merges the scales and references found in the existing annotations
// with the new data (using the new scale in case of conflict if present and not 0, and the old scale otherwise),
// and returns a slice of RecordedScaleReferences suitable for using as the new annotation value.
func pairScalesWithScaleRefs(serviceName types.NamespacedName, annotations map[string]string, rawScaleRefs map[unidlingapi.CrossGroupObjectReference]struct{}, scales map[unidlingapi.CrossGroupObjectReference]int32) ([]unidlingapi.RecordedScaleReference, error) {
	oldTargetsRaw, hasOldTargets := annotations[unidlingapi.UnidleTargetAnnotation]

	scaleRefs := make([]unidlingapi.RecordedScaleReference, 0, len(rawScaleRefs))

	// initialize the list of new annotations
	for rawScaleRef := range rawScaleRefs {
		scaleRefs = append(scaleRefs, unidlingapi.RecordedScaleReference{
			CrossGroupObjectReference: rawScaleRef,
			Replicas:                  0,
		})
	}

	// if the new preserved scale would be 0, see if we have an old scale that we can use instead
	if hasOldTargets {
		var oldTargets []unidlingapi.RecordedScaleReference
		oldTargetsSet := make(map[unidlingapi.CrossGroupObjectReference]int)
		if err := json.Unmarshal([]byte(oldTargetsRaw), &oldTargets); err != nil {
			return nil, fmt.Errorf("unable to extract existing scale information from endpoints %s: %v", serviceName.String(), err)
		}

		for i, target := range oldTargets {
			oldTargetsSet[target.CrossGroupObjectReference] = i
		}

		// figure out which new targets were already present...
		for _, newScaleRef := range scaleRefs {
			if oldTargetInd, ok := oldTargetsSet[newScaleRef.CrossGroupObjectReference]; ok {
				if newScale, ok := scales[newScaleRef.CrossGroupObjectReference]; !ok || newScale == 0 {
					scales[newScaleRef.CrossGroupObjectReference] = oldTargets[oldTargetInd].Replicas
				}
				delete(oldTargetsSet, newScaleRef.CrossGroupObjectReference)
			}
		}

		// ...and add in any existing targets not already on the new list to the new list
		for _, ind := range oldTargetsSet {
			scaleRefs = append(scaleRefs, oldTargets[ind])
		}
	}

	for i := range scaleRefs {
		scaleRef := &scaleRefs[i]
		newScale, ok := scales[scaleRef.CrossGroupObjectReference]
		if !ok || newScale == 0 {
			newScale = 1
			if scaleRef.Replicas != 0 {
				newScale = scaleRef.Replicas
			}
		}

		scaleRef.Replicas = newScale
	}

	return scaleRefs, nil
}
コード例 #12
0
func (ic *IngressController) reconcileIngress(ingress types.NamespacedName) {
	glog.V(4).Infof("Reconciling ingress %q", ingress)
	if !ic.isSynced() {
		ic.deliverIngress(ingress, ic.clusterAvailableDelay, false)
		return
	}

	key := ingress.String()
	baseIngressObj, exist, err := ic.ingressInformerStore.GetByKey(key)
	if err != nil {
		glog.Errorf("Failed to query main ingress store for %v: %v", ingress, err)
		ic.deliverIngress(ingress, 0, true)
		return
	}
	if !exist {
		// Not federated ingress, ignoring.
		glog.V(4).Infof("Ingress %q is not federated.  Ignoring.", ingress)
		return
	}
	baseIngress := baseIngressObj.(*extensions_v1beta1.Ingress)

	clusters, err := ic.ingressFederatedInformer.GetReadyClusters()
	if err != nil {
		glog.Errorf("Failed to get cluster list: %v", err)
		ic.deliverIngress(ingress, ic.clusterAvailableDelay, false)
		return
	}

	operations := make([]util.FederatedOperation, 0)

	for clusterIndex, cluster := range clusters {
		_, baseIPExists := baseIngress.ObjectMeta.Annotations[staticIPAnnotationKey]
		clusterIngressObj, found, err := ic.ingressFederatedInformer.GetTargetStore().GetByKey(cluster.Name, key)
		if err != nil {
			glog.Errorf("Failed to get %s from %s: %v", ingress, cluster.Name, err)
			ic.deliverIngress(ingress, 0, true)
			return
		}
		desiredIngress := &extensions_v1beta1.Ingress{
			ObjectMeta: baseIngress.ObjectMeta,
			Spec:       baseIngress.Spec,
		}

		if !found {
			// We can't supply server-created fields when creating a new object.
			desiredIngress.ObjectMeta.ResourceVersion = ""
			desiredIngress.ObjectMeta.UID = ""
			ic.eventRecorder.Eventf(baseIngress, api.EventTypeNormal, "CreateInCluster",
				"Creating ingress in cluster %s", cluster.Name)

			// We always first create an ingress in the first available cluster.  Once that ingress
			// has been created and allocated a global IP (visible via an annotation),
			// we record that annotation on the federated ingress, and create all other cluster
			// ingresses with that same global IP.
			// Note: If the first cluster becomes (e.g. temporarily) unavailable, the second cluster will be allocated
			// index 0, but eventually all ingresses will share the single global IP recorded in the annotation
			// of the federated ingress.
			if baseIPExists || (clusterIndex == 0) {
				operations = append(operations, util.FederatedOperation{
					Type:        util.OperationTypeAdd,
					Obj:         desiredIngress,
					ClusterName: cluster.Name,
				})
			}
		} else {
			clusterIngress := clusterIngressObj.(*extensions_v1beta1.Ingress)
			glog.V(4).Infof("Found existing Ingress %s in cluster %s - checking if update is required", ingress, cluster.Name)
			clusterIPName, clusterIPExists := clusterIngress.ObjectMeta.Annotations[staticIPAnnotationKey]
			if !baseIPExists && clusterIPExists {
				// Add annotation to federated ingress via API.
				original, err := ic.federatedApiClient.Extensions().Ingresses(baseIngress.Namespace).Get(baseIngress.Name)
				if err == nil {
					original.ObjectMeta.Annotations[staticIPAnnotationKey] = clusterIPName
					if _, err = ic.federatedApiClient.Extensions().Ingresses(baseIngress.Namespace).Update(original); err != nil {
						glog.Errorf("Failed to add static IP annotation to federated ingress %q: %v", ingress, err)
					}
				} else {
					glog.Errorf("Failed to get federated ingress %q: %v", ingress, err)
				}
			}
			// Update existing ingress, if needed.
			if !util.ObjectMetaIsEquivalent(desiredIngress.ObjectMeta, clusterIngress.ObjectMeta) ||
				!reflect.DeepEqual(desiredIngress.Spec, clusterIngress.Spec) {
				// TODO: In some cases Ingress controllers in the clusters add annotations, so we ideally need to exclude those from
				// the equivalence comparison to cut down on unnecessary updates.
				glog.V(4).Infof("Ingress %s in cluster %s needs an update: cluster ingress %v is not equivalent to federated ingress %v", ingress, cluster.Name, clusterIngress, desiredIngress)
				// We need to use server-created fields from the cluster, not the desired object when updating.
				desiredIngress.ObjectMeta.ResourceVersion = clusterIngress.ObjectMeta.ResourceVersion
				desiredIngress.ObjectMeta.UID = clusterIngress.ObjectMeta.UID
				// Merge any annotations on the federated ingress onto the underlying cluster ingress,
				// overwriting duplicates.
				// TODO: We should probably use a PATCH operation for this instead.
				for key, val := range baseIngress.ObjectMeta.Annotations {
					desiredIngress.ObjectMeta.Annotations[key] = val
				}
				ic.eventRecorder.Eventf(baseIngress, api.EventTypeNormal, "UpdateInCluster",
					"Updating ingress in cluster %s", cluster.Name)

				operations = append(operations, util.FederatedOperation{
					Type:        util.OperationTypeUpdate,
					Obj:         desiredIngress,
					ClusterName: cluster.Name,
				})
			}
		}
	}

	if len(operations) == 0 {
		// Everything is in order
		return
	}
	glog.V(4).Infof("Calling federatedUpdater.Update() - operations: %v", operations)
	err = ic.federatedUpdater.UpdateWithOnError(operations, ic.updateTimeout, func(op util.FederatedOperation, operror error) {
		ic.eventRecorder.Eventf(baseIngress, api.EventTypeNormal, "FailedUpdateInCluster",
			"Ingress update in cluster %s failed: %v", op.ClusterName, operror)
	})
	if err != nil {
		glog.Errorf("Failed to execute updates for %s: %v", ingress, err)
		ic.deliverIngress(ingress, ic.ingressReviewDelay, true)
		return
	}

	// Evertyhing is in order but lets be double sure - TODO: quinton: Why? This seems like a hack.
	ic.deliverIngress(ingress, ic.ingressReviewDelay, false)
}
コード例 #13
0
ファイル: secret_controller.go プロジェクト: nak3/kubernetes
func (secretcontroller *SecretController) reconcileSecret(secret types.NamespacedName) {
	if !secretcontroller.isSynced() {
		secretcontroller.deliverSecret(secret, secretcontroller.clusterAvailableDelay, false)
		return
	}

	key := secret.String()
	baseSecretObjFromStore, exist, err := secretcontroller.secretInformerStore.GetByKey(key)
	if err != nil {
		glog.Errorf("Failed to query main secret store for %v: %v", key, err)
		secretcontroller.deliverSecret(secret, 0, true)
		return
	}

	if !exist {
		// Not federated secret, ignoring.
		return
	}

	// Create a copy before modifying the obj to prevent race condition with
	// other readers of obj from store.
	baseSecretObj, err := api.Scheme.DeepCopy(baseSecretObjFromStore)
	baseSecret, ok := baseSecretObj.(*apiv1.Secret)
	if err != nil || !ok {
		glog.Errorf("Error in retrieving obj from store: %v, %v", ok, err)
		secretcontroller.deliverSecret(secret, 0, true)
		return
	}
	if baseSecret.DeletionTimestamp != nil {
		if err := secretcontroller.delete(baseSecret); err != nil {
			glog.Errorf("Failed to delete %s: %v", secret, err)
			secretcontroller.eventRecorder.Eventf(baseSecret, api.EventTypeNormal, "DeleteFailed",
				"Secret delete failed: %v", err)
			secretcontroller.deliverSecret(secret, 0, true)
		}
		return
	}

	glog.V(3).Infof("Ensuring delete object from underlying clusters finalizer for secret: %s",
		baseSecret.Name)
	// Add the required finalizers before creating a secret in underlying clusters.
	updatedSecretObj, err := secretcontroller.deletionHelper.EnsureFinalizers(baseSecret)
	if err != nil {
		glog.Errorf("Failed to ensure delete object from underlying clusters finalizer in secret %s: %v",
			baseSecret.Name, err)
		secretcontroller.deliverSecret(secret, 0, false)
		return
	}
	baseSecret = updatedSecretObj.(*apiv1.Secret)

	glog.V(3).Infof("Syncing secret %s in underlying clusters", baseSecret.Name)

	clusters, err := secretcontroller.secretFederatedInformer.GetReadyClusters()
	if err != nil {
		glog.Errorf("Failed to get cluster list: %v", err)
		secretcontroller.deliverSecret(secret, secretcontroller.clusterAvailableDelay, false)
		return
	}

	operations := make([]util.FederatedOperation, 0)
	for _, cluster := range clusters {
		clusterSecretObj, found, err := secretcontroller.secretFederatedInformer.GetTargetStore().GetByKey(cluster.Name, key)
		if err != nil {
			glog.Errorf("Failed to get %s from %s: %v", key, cluster.Name, err)
			secretcontroller.deliverSecret(secret, 0, true)
			return
		}

		// The data should not be modified.
		desiredSecret := &apiv1.Secret{
			ObjectMeta: util.DeepCopyRelevantObjectMeta(baseSecret.ObjectMeta),
			Data:       baseSecret.Data,
			Type:       baseSecret.Type,
		}

		if !found {
			secretcontroller.eventRecorder.Eventf(baseSecret, api.EventTypeNormal, "CreateInCluster",
				"Creating secret in cluster %s", cluster.Name)

			operations = append(operations, util.FederatedOperation{
				Type:        util.OperationTypeAdd,
				Obj:         desiredSecret,
				ClusterName: cluster.Name,
			})
		} else {
			clusterSecret := clusterSecretObj.(*apiv1.Secret)

			// Update existing secret, if needed.
			if !util.SecretEquivalent(*desiredSecret, *clusterSecret) {

				secretcontroller.eventRecorder.Eventf(baseSecret, api.EventTypeNormal, "UpdateInCluster",
					"Updating secret in cluster %s", cluster.Name)
				operations = append(operations, util.FederatedOperation{
					Type:        util.OperationTypeUpdate,
					Obj:         desiredSecret,
					ClusterName: cluster.Name,
				})
			}
		}
	}

	if len(operations) == 0 {
		// Everything is in order
		return
	}
	err = secretcontroller.federatedUpdater.UpdateWithOnError(operations, secretcontroller.updateTimeout,
		func(op util.FederatedOperation, operror error) {
			secretcontroller.eventRecorder.Eventf(baseSecret, api.EventTypeNormal, "UpdateInClusterFailed",
				"Secret update in cluster %s failed: %v", op.ClusterName, operror)
		})

	if err != nil {
		glog.Errorf("Failed to execute updates for %s: %v", key, err)
		secretcontroller.deliverSecret(secret, 0, true)
		return
	}

	// Evertyhing is in order but lets be double sure
	secretcontroller.deliverSecret(secret, secretcontroller.secretReviewDelay, false)
}
コード例 #14
0
func (s *AWSCloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBalancerName string, listeners []*elb.Listener, subnetIDs []string, securityGroupIDs []string, internalELB bool) (*elb.LoadBalancerDescription, error) {
	loadBalancer, err := s.describeLoadBalancer(loadBalancerName)
	if err != nil {
		return nil, err
	}

	dirty := false

	if loadBalancer == nil {
		createRequest := &elb.CreateLoadBalancerInput{}
		createRequest.LoadBalancerName = aws.String(loadBalancerName)

		createRequest.Listeners = listeners

		if internalELB {
			createRequest.Scheme = aws.String("internal")
		}

		// We are supposed to specify one subnet per AZ.
		// TODO: What happens if we have more than one subnet per AZ?
		createRequest.Subnets = stringPointerArray(subnetIDs)

		createRequest.SecurityGroups = stringPointerArray(securityGroupIDs)

		createRequest.Tags = []*elb.Tag{
			{Key: aws.String(TagNameKubernetesCluster), Value: aws.String(s.getClusterName())},
			{Key: aws.String(TagNameKubernetesService), Value: aws.String(namespacedName.String())},
		}

		glog.Infof("Creating load balancer for %v with name: ", namespacedName, loadBalancerName)
		_, err := s.elb.CreateLoadBalancer(createRequest)
		if err != nil {
			return nil, err
		}
		dirty = true
	} else {
		// TODO: Sync internal vs non-internal

		{
			// Sync subnets
			expected := sets.NewString(subnetIDs...)
			actual := stringSetFromPointers(loadBalancer.Subnets)

			additions := expected.Difference(actual)
			removals := actual.Difference(expected)

			if removals.Len() != 0 {
				request := &elb.DetachLoadBalancerFromSubnetsInput{}
				request.LoadBalancerName = aws.String(loadBalancerName)
				request.Subnets = stringSetToPointers(removals)
				glog.V(2).Info("Detaching load balancer from removed subnets")
				_, err := s.elb.DetachLoadBalancerFromSubnets(request)
				if err != nil {
					return nil, fmt.Errorf("error detaching AWS loadbalancer from subnets: %v", err)
				}
				dirty = true
			}

			if additions.Len() != 0 {
				request := &elb.AttachLoadBalancerToSubnetsInput{}
				request.LoadBalancerName = aws.String(loadBalancerName)
				request.Subnets = stringSetToPointers(additions)
				glog.V(2).Info("Attaching load balancer to added subnets")
				_, err := s.elb.AttachLoadBalancerToSubnets(request)
				if err != nil {
					return nil, fmt.Errorf("error attaching AWS loadbalancer to subnets: %v", err)
				}
				dirty = true
			}
		}

		{
			// Sync security groups
			expected := sets.NewString(securityGroupIDs...)
			actual := stringSetFromPointers(loadBalancer.SecurityGroups)

			if !expected.Equal(actual) {
				// This call just replaces the security groups, unlike e.g. subnets (!)
				request := &elb.ApplySecurityGroupsToLoadBalancerInput{}
				request.LoadBalancerName = aws.String(loadBalancerName)
				request.SecurityGroups = stringPointerArray(securityGroupIDs)
				glog.V(2).Info("Applying updated security groups to load balancer")
				_, err := s.elb.ApplySecurityGroupsToLoadBalancer(request)
				if err != nil {
					return nil, fmt.Errorf("error applying AWS loadbalancer security groups: %v", err)
				}
				dirty = true
			}
		}

		{
			// Sync listeners
			listenerDescriptions := loadBalancer.ListenerDescriptions

			foundSet := make(map[int]bool)
			removals := []*int64{}
			for _, listenerDescription := range listenerDescriptions {
				actual := listenerDescription.Listener
				if actual == nil {
					glog.Warning("Ignoring empty listener in AWS loadbalancer: ", loadBalancerName)
					continue
				}

				found := -1
				for i, expected := range listeners {
					if orEmpty(actual.Protocol) != orEmpty(expected.Protocol) {
						continue
					}
					if orEmpty(actual.InstanceProtocol) != orEmpty(expected.InstanceProtocol) {
						continue
					}
					if orZero(actual.InstancePort) != orZero(expected.InstancePort) {
						continue
					}
					if orZero(actual.LoadBalancerPort) != orZero(expected.LoadBalancerPort) {
						continue
					}
					if orEmpty(actual.SSLCertificateId) != orEmpty(expected.SSLCertificateId) {
						continue
					}
					found = i
				}
				if found != -1 {
					foundSet[found] = true
				} else {
					removals = append(removals, actual.LoadBalancerPort)
				}
			}

			additions := []*elb.Listener{}
			for i := range listeners {
				if foundSet[i] {
					continue
				}
				additions = append(additions, listeners[i])
			}

			if len(removals) != 0 {
				request := &elb.DeleteLoadBalancerListenersInput{}
				request.LoadBalancerName = aws.String(loadBalancerName)
				request.LoadBalancerPorts = removals
				glog.V(2).Info("Deleting removed load balancer listeners")
				_, err := s.elb.DeleteLoadBalancerListeners(request)
				if err != nil {
					return nil, fmt.Errorf("error deleting AWS loadbalancer listeners: %v", err)
				}
				dirty = true
			}

			if len(additions) != 0 {
				request := &elb.CreateLoadBalancerListenersInput{}
				request.LoadBalancerName = aws.String(loadBalancerName)
				request.Listeners = additions
				glog.V(2).Info("Creating added load balancer listeners")
				_, err := s.elb.CreateLoadBalancerListeners(request)
				if err != nil {
					return nil, fmt.Errorf("error creating AWS loadbalancer listeners: %v", err)
				}
				dirty = true
			}
		}
	}

	if dirty {
		loadBalancer, err = s.describeLoadBalancer(loadBalancerName)
		if err != nil {
			glog.Warning("Unable to retrieve load balancer after creation/update")
			return nil, err
		}
	}

	return loadBalancer, nil
}
コード例 #15
0
func chooseServiceName(tc int, hint int) types.NamespacedName {
	var svc types.NamespacedName
	svc.Namespace = fmt.Sprintf("ns_%d", tc)
	svc.Name = fmt.Sprintf("name_%d", hint)
	return svc
}
コード例 #16
0
ファイル: secret_controller.go プロジェクト: oszi/kubernetes
func (secretcontroller *SecretController) reconcileSecret(secret types.NamespacedName) {

	if !secretcontroller.isSynced() {
		secretcontroller.deliverSecret(secret, secretcontroller.clusterAvailableDelay, false)
		return
	}

	key := secret.String()
	baseSecretObj, exist, err := secretcontroller.secretInformerStore.GetByKey(key)
	if err != nil {
		glog.Errorf("Failed to query main secret store for %v: %v", key, err)
		secretcontroller.deliverSecret(secret, 0, true)
		return
	}

	if !exist {
		// Not federated secret, ignoring.
		return
	}
	baseSecret := baseSecretObj.(*api_v1.Secret)

	clusters, err := secretcontroller.secretFederatedInformer.GetReadyClusters()
	if err != nil {
		glog.Errorf("Failed to get cluster list: %v", err)
		secretcontroller.deliverSecret(secret, secretcontroller.clusterAvailableDelay, false)
		return
	}

	operations := make([]util.FederatedOperation, 0)
	for _, cluster := range clusters {
		clusterSecretObj, found, err := secretcontroller.secretFederatedInformer.GetTargetStore().GetByKey(cluster.Name, key)
		if err != nil {
			glog.Errorf("Failed to get %s from %s: %v", key, cluster.Name, err)
			secretcontroller.deliverSecret(secret, 0, true)
			return
		}

		desiredSecret := &api_v1.Secret{
			ObjectMeta: util.CopyObjectMeta(baseSecret.ObjectMeta),
			Data:       baseSecret.Data,
			Type:       baseSecret.Type,
		}

		if !found {
			secretcontroller.eventRecorder.Eventf(baseSecret, api.EventTypeNormal, "CreateInCluster",
				"Creating secret in cluster %s", cluster.Name)

			operations = append(operations, util.FederatedOperation{
				Type:        util.OperationTypeAdd,
				Obj:         desiredSecret,
				ClusterName: cluster.Name,
			})
		} else {
			clusterSecret := clusterSecretObj.(*api_v1.Secret)

			// Update existing secret, if needed.
			if !util.SecretEquivalent(*desiredSecret, *clusterSecret) {

				secretcontroller.eventRecorder.Eventf(baseSecret, api.EventTypeNormal, "UpdateInCluster",
					"Updating secret in cluster %s", cluster.Name)
				operations = append(operations, util.FederatedOperation{
					Type:        util.OperationTypeUpdate,
					Obj:         desiredSecret,
					ClusterName: cluster.Name,
				})
			}
		}
	}

	if len(operations) == 0 {
		// Everything is in order
		return
	}
	err = secretcontroller.federatedUpdater.UpdateWithOnError(operations, secretcontroller.updateTimeout,
		func(op util.FederatedOperation, operror error) {
			secretcontroller.eventRecorder.Eventf(baseSecret, api.EventTypeNormal, "UpdateInClusterFailed",
				"Secret update in cluster %s failed: %v", op.ClusterName, operror)
		})

	if err != nil {
		glog.Errorf("Failed to execute updates for %s: %v", key, err)
		secretcontroller.deliverSecret(secret, 0, true)
		return
	}

	// Evertyhing is in order but lets be double sure
	secretcontroller.deliverSecret(secret, secretcontroller.secretReviewDelay, false)
}