示例#1
0
func (nc *NamespaceController) reconcileNamespace(namespace string) {
	if !nc.isSynced() {
		nc.deliverNamespace(namespace, nc.clusterAvailableDelay, false)
		return
	}

	baseNamespaceObj, exist, err := nc.namespaceInformerStore.GetByKey(namespace)
	if err != nil {
		glog.Errorf("Failed to query main namespace store for %v: %v", namespace, err)
		nc.deliverNamespace(namespace, 0, true)
		return
	}

	if !exist {
		// Not federated namespace, ignoring.
		return
	}
	baseNamespace := baseNamespaceObj.(*api_v1.Namespace)
	if baseNamespace.DeletionTimestamp != nil {
		if err := nc.delete(baseNamespace); err != nil {
			glog.Errorf("Failed to delete %s: %v", namespace, err)
			nc.eventRecorder.Eventf(baseNamespace, api.EventTypeNormal, "DeleteFailed",
				"Namespace delete failed: %v", err)
			nc.deliverNamespace(namespace, 0, true)
		}
		return
	}

	clusters, err := nc.namespaceFederatedInformer.GetReadyClusters()
	if err != nil {
		glog.Errorf("Failed to get cluster list: %v", err)
		nc.deliverNamespace(namespace, nc.clusterAvailableDelay, false)
		return
	}

	operations := make([]util.FederatedOperation, 0)
	for _, cluster := range clusters {
		clusterNamespaceObj, found, err := nc.namespaceFederatedInformer.GetTargetStore().GetByKey(cluster.Name, namespace)
		if err != nil {
			glog.Errorf("Failed to get %s from %s: %v", namespace, cluster.Name, err)
			nc.deliverNamespace(namespace, 0, true)
			return
		}
		desiredNamespace := &api_v1.Namespace{
			ObjectMeta: util.CopyObjectMeta(baseNamespace.ObjectMeta),
			Spec:       baseNamespace.Spec,
		}

		if !found {
			nc.eventRecorder.Eventf(baseNamespace, api.EventTypeNormal, "CreateInCluster",
				"Creating namespace in cluster %s", cluster.Name)

			operations = append(operations, util.FederatedOperation{
				Type:        util.OperationTypeAdd,
				Obj:         desiredNamespace,
				ClusterName: cluster.Name,
			})
		} else {
			clusterNamespace := clusterNamespaceObj.(*api_v1.Namespace)

			// Update existing namespace, if needed.
			if !util.ObjectMetaEquivalent(desiredNamespace.ObjectMeta, clusterNamespace.ObjectMeta) ||
				!reflect.DeepEqual(desiredNamespace.Spec, clusterNamespace.Spec) {
				nc.eventRecorder.Eventf(baseNamespace, api.EventTypeNormal, "UpdateInCluster",
					"Updating namespace in cluster %s", cluster.Name)

				operations = append(operations, util.FederatedOperation{
					Type:        util.OperationTypeUpdate,
					Obj:         desiredNamespace,
					ClusterName: cluster.Name,
				})
			}
		}
	}

	if len(operations) == 0 {
		// Everything is in order
		return
	}
	err = nc.federatedUpdater.UpdateWithOnError(operations, nc.updateTimeout, func(op util.FederatedOperation, operror error) {
		nc.eventRecorder.Eventf(baseNamespace, api.EventTypeNormal, "UpdateInClusterFailed",
			"Namespace update in cluster %s failed: %v", op.ClusterName, operror)
	})
	if err != nil {
		glog.Errorf("Failed to execute updates for %s: %v", namespace, err)
		nc.deliverNamespace(namespace, 0, true)
		return
	}

	// Evertyhing is in order but lets be double sure
	nc.deliverNamespace(namespace, nc.namespaceReviewDelay, false)
}
func equivalentReplicaSet(fedReplicaSet, localReplicaSet *v1beta1.ReplicaSet) bool {
	localReplicaSetSpec := localReplicaSet.Spec
	localReplicaSetSpec.Replicas = fedReplicaSet.Spec.Replicas
	return fedutil.ObjectMetaEquivalent(fedReplicaSet.ObjectMeta, localReplicaSet.ObjectMeta) &&
		reflect.DeepEqual(fedReplicaSet.Spec, localReplicaSetSpec)
}
func TestIngressController(t *testing.T) {
	fakeClusterList := federation_api.ClusterList{Items: []federation_api.Cluster{}}
	fakeConfigMapList1 := api_v1.ConfigMapList{Items: []api_v1.ConfigMap{}}
	fakeConfigMapList2 := api_v1.ConfigMapList{Items: []api_v1.ConfigMap{}}
	cluster1 := NewCluster("cluster1", api_v1.ConditionTrue)
	cluster2 := NewCluster("cluster2", api_v1.ConditionTrue)
	cfg1 := NewConfigMap("foo")
	cfg2 := NewConfigMap("bar") // Different UID from cfg1, so that we can check that they get reconciled.

	t.Log("Creating fake infrastructure")
	fedClient := &fake_federation_release_1_4.Clientset{}
	RegisterFakeList("clusters", &fedClient.Fake, &fakeClusterList)
	RegisterFakeList("ingresses", &fedClient.Fake, &extensions_v1beta1.IngressList{Items: []extensions_v1beta1.Ingress{}})
	fedIngressWatch := RegisterFakeWatch("ingresses", &fedClient.Fake)
	clusterWatch := RegisterFakeWatch("clusters", &fedClient.Fake)
	fedClusterUpdateChan := RegisterFakeCopyOnUpdate("clusters", &fedClient.Fake, clusterWatch)
	fedIngressUpdateChan := RegisterFakeCopyOnUpdate("ingresses", &fedClient.Fake, fedIngressWatch)

	cluster1Client := &fake_kube_release_1_4.Clientset{}
	RegisterFakeList("ingresses", &cluster1Client.Fake, &extensions_v1beta1.IngressList{Items: []extensions_v1beta1.Ingress{}})
	RegisterFakeList("configmaps", &cluster1Client.Fake, &fakeConfigMapList1)
	cluster1IngressWatch := RegisterFakeWatch("ingresses", &cluster1Client.Fake)
	cluster1ConfigMapWatch := RegisterFakeWatch("configmaps", &cluster1Client.Fake)
	cluster1IngressCreateChan := RegisterFakeCopyOnCreate("ingresses", &cluster1Client.Fake, cluster1IngressWatch)
	cluster1IngressUpdateChan := RegisterFakeCopyOnUpdate("ingresses", &cluster1Client.Fake, cluster1IngressWatch)

	cluster2Client := &fake_kube_release_1_4.Clientset{}
	RegisterFakeList("ingresses", &cluster2Client.Fake, &extensions_v1beta1.IngressList{Items: []extensions_v1beta1.Ingress{}})
	RegisterFakeList("configmaps", &cluster2Client.Fake, &fakeConfigMapList2)
	cluster2IngressWatch := RegisterFakeWatch("ingresses", &cluster2Client.Fake)
	cluster2ConfigMapWatch := RegisterFakeWatch("configmaps", &cluster2Client.Fake)
	cluster2IngressCreateChan := RegisterFakeCopyOnCreate("ingresses", &cluster2Client.Fake, cluster2IngressWatch)
	cluster2ConfigMapUpdateChan := RegisterFakeCopyOnUpdate("configmaps", &cluster2Client.Fake, cluster2ConfigMapWatch)

	clientFactoryFunc := func(cluster *federation_api.Cluster) (kube_release_1_4.Interface, error) {
		switch cluster.Name {
		case cluster1.Name:
			return cluster1Client, nil
		case cluster2.Name:
			return cluster2Client, nil
		default:
			return nil, fmt.Errorf("Unknown cluster")
		}
	}
	ingressController := NewIngressController(fedClient)
	ingressInformer := ToFederatedInformerForTestOnly(ingressController.ingressFederatedInformer)
	ingressInformer.SetClientFactory(clientFactoryFunc)
	configMapInformer := ToFederatedInformerForTestOnly(ingressController.configMapFederatedInformer)
	configMapInformer.SetClientFactory(clientFactoryFunc)
	ingressController.clusterAvailableDelay = time.Second
	ingressController.ingressReviewDelay = 50 * time.Millisecond
	ingressController.configMapReviewDelay = 50 * time.Millisecond
	ingressController.smallDelay = 20 * time.Millisecond
	ingressController.updateTimeout = 5 * time.Second

	stop := make(chan struct{})
	t.Log("Running Ingress Controller")
	ingressController.Run(stop)

	ing1 := extensions_v1beta1.Ingress{
		ObjectMeta: api_v1.ObjectMeta{
			Name:      "test-ingress",
			Namespace: "mynamespace",
			SelfLink:  "/api/v1/namespaces/mynamespace/ingress/test-ingress",
			// TODO: Remove: Annotations: map[string]string{},
		},
		Status: extensions_v1beta1.IngressStatus{
			LoadBalancer: api_v1.LoadBalancerStatus{
				Ingress: make([]api_v1.LoadBalancerIngress, 0, 0),
			},
		},
	}

	t.Log("Adding cluster 1")
	clusterWatch.Add(cluster1)

	t.Log("Adding Ingress UID ConfigMap to cluster 1")
	cluster1ConfigMapWatch.Add(cfg1)

	t.Log("Checking that UID annotation on Cluster 1 annotation was correctly updated")
	cluster := GetClusterFromChan(fedClusterUpdateChan)
	assert.NotNil(t, cluster)
	assert.Equal(t, cluster.ObjectMeta.Annotations[uidAnnotationKey], cfg1.Data[uidKey])

	// Test add federated ingress.
	t.Log("Adding Federated Ingress")
	fedIngressWatch.Add(&ing1)
	t.Log("Checking that Ingress was correctly created in cluster 1")
	createdIngress := GetIngressFromChan(t, cluster1IngressCreateChan)
	assert.NotNil(t, createdIngress)
	assert.True(t, reflect.DeepEqual(ing1.Spec, createdIngress.Spec), "Spec of created ingress is not equal")
	assert.True(t, util.ObjectMetaEquivalent(ing1.ObjectMeta, createdIngress.ObjectMeta), "Metadata of created object is not equivalent")

	// Test that IP address gets transferred from cluster ingress to federated ingress.
	t.Log("Checking that IP address gets transferred from cluster ingress to federated ingress")
	createdIngress.Status.LoadBalancer.Ingress = append(createdIngress.Status.LoadBalancer.Ingress, api_v1.LoadBalancerIngress{IP: "1.2.3.4"})
	cluster1IngressWatch.Modify(createdIngress)
	updatedIngress := GetIngressFromChan(t, fedIngressUpdateChan)
	assert.NotNil(t, updatedIngress, "Cluster's ingress load balancer status was not correctly transferred to the federated ingress")
	if updatedIngress != nil {
		assert.True(t, reflect.DeepEqual(createdIngress.Status.LoadBalancer.Ingress, updatedIngress.Status.LoadBalancer.Ingress), fmt.Sprintf("Ingress IP was not transferred from cluster ingress to federated ingress.  %v is not equal to %v", createdIngress.Status.LoadBalancer.Ingress, updatedIngress.Status.LoadBalancer.Ingress))
	}

	// Test update federated ingress.
	if updatedIngress.ObjectMeta.Annotations == nil {
		updatedIngress.ObjectMeta.Annotations = make(map[string]string)
	}
	updatedIngress.ObjectMeta.Annotations["A"] = "B"
	t.Log("Modifying Federated Ingress")
	fedIngressWatch.Modify(updatedIngress)
	t.Log("Checking that Ingress was correctly updated in cluster 1")
	updatedIngress2 := GetIngressFromChan(t, cluster1IngressUpdateChan)
	assert.NotNil(t, updatedIngress2)
	assert.True(t, reflect.DeepEqual(updatedIngress2.Spec, updatedIngress.Spec), "Spec of updated ingress is not equal")
	assert.Equal(t, updatedIngress2.ObjectMeta.Annotations["A"], updatedIngress.ObjectMeta.Annotations["A"], "Updated annotation not transferred from federated to cluster ingress.")
	// Test add cluster
	t.Log("Adding a second cluster")
	ing1.Annotations[staticIPNameKeyWritable] = "foo" // Make sure that the base object has a static IP name first.
	fedIngressWatch.Modify(&ing1)
	clusterWatch.Add(cluster2)
	// First check that the original values are not equal - see above comment
	assert.NotEqual(t, cfg1.Data[uidKey], cfg2.Data[uidKey], fmt.Sprintf("ConfigMap in cluster 2 must initially not equal that in cluster 1 for this test - please fix test"))
	cluster2ConfigMapWatch.Add(cfg2)
	t.Log("Checking that the ingress got created in cluster 2")
	createdIngress2 := GetIngressFromChan(t, cluster2IngressCreateChan)
	assert.NotNil(t, createdIngress2)
	assert.True(t, reflect.DeepEqual(ing1.Spec, createdIngress2.Spec), "Spec of created ingress is not equal")
	assert.True(t, util.ObjectMetaEquivalent(ing1.ObjectMeta, createdIngress2.ObjectMeta), "Metadata of created object is not equivalent")

	t.Log("Checking that the configmap in cluster 2 got updated.")
	updatedConfigMap2 := GetConfigMapFromChan(cluster2ConfigMapUpdateChan)
	assert.NotNil(t, updatedConfigMap2, fmt.Sprintf("ConfigMap in cluster 2 was not updated (or more likely the test is broken and the API type written is wrong)"))
	if updatedConfigMap2 != nil {
		assert.Equal(t, cfg1.Data[uidKey], updatedConfigMap2.Data[uidKey],
			fmt.Sprintf("UID's in configmaps in cluster's 1 and 2 are not equal (%q != %q)", cfg1.Data["uid"], updatedConfigMap2.Data["uid"]))
	}

	close(stop)
}
func TestIngressController(t *testing.T) {
	fakeClusterList := federationapi.ClusterList{Items: []federationapi.Cluster{}}
	fakeConfigMapList1 := apiv1.ConfigMapList{Items: []apiv1.ConfigMap{}}
	fakeConfigMapList2 := apiv1.ConfigMapList{Items: []apiv1.ConfigMap{}}
	cluster1 := NewCluster("cluster1", apiv1.ConditionTrue)
	cluster2 := NewCluster("cluster2", apiv1.ConditionTrue)
	cfg1 := NewConfigMap("foo")
	cfg2 := NewConfigMap("bar") // Different UID from cfg1, so that we can check that they get reconciled.

	t.Log("Creating fake infrastructure")
	fedClient := &fakefedclientset.Clientset{}
	RegisterFakeList("clusters", &fedClient.Fake, &fakeClusterList)
	RegisterFakeList("ingresses", &fedClient.Fake, &extensionsv1beta1.IngressList{Items: []extensionsv1beta1.Ingress{}})
	fedIngressWatch := RegisterFakeWatch("ingresses", &fedClient.Fake)
	clusterWatch := RegisterFakeWatch("clusters", &fedClient.Fake)
	fedClusterUpdateChan := RegisterFakeCopyOnUpdate("clusters", &fedClient.Fake, clusterWatch)
	fedIngressUpdateChan := RegisterFakeCopyOnUpdate("ingresses", &fedClient.Fake, fedIngressWatch)

	cluster1Client := &fakekubeclientset.Clientset{}
	RegisterFakeList("ingresses", &cluster1Client.Fake, &extensionsv1beta1.IngressList{Items: []extensionsv1beta1.Ingress{}})
	RegisterFakeList("configmaps", &cluster1Client.Fake, &fakeConfigMapList1)
	cluster1IngressWatch := RegisterFakeWatch("ingresses", &cluster1Client.Fake)
	cluster1ConfigMapWatch := RegisterFakeWatch("configmaps", &cluster1Client.Fake)
	cluster1IngressCreateChan := RegisterFakeCopyOnCreate("ingresses", &cluster1Client.Fake, cluster1IngressWatch)
	cluster1IngressUpdateChan := RegisterFakeCopyOnUpdate("ingresses", &cluster1Client.Fake, cluster1IngressWatch)

	cluster2Client := &fakekubeclientset.Clientset{}
	RegisterFakeList("ingresses", &cluster2Client.Fake, &extensionsv1beta1.IngressList{Items: []extensionsv1beta1.Ingress{}})
	RegisterFakeList("configmaps", &cluster2Client.Fake, &fakeConfigMapList2)
	cluster2IngressWatch := RegisterFakeWatch("ingresses", &cluster2Client.Fake)
	cluster2ConfigMapWatch := RegisterFakeWatch("configmaps", &cluster2Client.Fake)
	cluster2IngressCreateChan := RegisterFakeCopyOnCreate("ingresses", &cluster2Client.Fake, cluster2IngressWatch)
	cluster2ConfigMapUpdateChan := RegisterFakeCopyOnUpdate("configmaps", &cluster2Client.Fake, cluster2ConfigMapWatch)

	clientFactoryFunc := func(cluster *federationapi.Cluster) (kubeclientset.Interface, error) {
		switch cluster.Name {
		case cluster1.Name:
			return cluster1Client, nil
		case cluster2.Name:
			return cluster2Client, nil
		default:
			return nil, fmt.Errorf("Unknown cluster")
		}
	}
	ingressController := NewIngressController(fedClient)
	ingressInformer := ToFederatedInformerForTestOnly(ingressController.ingressFederatedInformer)
	ingressInformer.SetClientFactory(clientFactoryFunc)
	configMapInformer := ToFederatedInformerForTestOnly(ingressController.configMapFederatedInformer)
	configMapInformer.SetClientFactory(clientFactoryFunc)
	ingressController.clusterAvailableDelay = time.Second
	ingressController.ingressReviewDelay = 100 * time.Millisecond
	ingressController.configMapReviewDelay = 100 * time.Millisecond
	ingressController.smallDelay = 100 * time.Millisecond
	ingressController.updateTimeout = 5 * time.Second

	stop := make(chan struct{})
	t.Log("Running Ingress Controller")
	ingressController.Run(stop)

	// TODO: Here we are creating the ingress with first cluster annotation.
	// Add another test without that annotation when
	// https://github.com/kubernetes/kubernetes/issues/36540 is fixed.
	fedIngress := extensionsv1beta1.Ingress{
		ObjectMeta: metav1.ObjectMeta{
			Name:      "test-ingress",
			Namespace: "mynamespace",
			SelfLink:  "/api/v1/namespaces/mynamespace/ingress/test-ingress",
			Annotations: map[string]string{
				firstClusterAnnotation: cluster1.Name,
			},
		},
		Status: extensionsv1beta1.IngressStatus{
			LoadBalancer: apiv1.LoadBalancerStatus{
				Ingress: make([]apiv1.LoadBalancerIngress, 0, 0),
			},
		},
	}

	t.Log("Adding cluster 1")
	clusterWatch.Add(cluster1)

	t.Log("Adding Ingress UID ConfigMap to cluster 1")
	cluster1ConfigMapWatch.Add(cfg1)

	// Test add federated ingress.
	t.Log("Adding Federated Ingress")
	fedIngressWatch.Add(&fedIngress)

	t.Log("Checking that UID annotation on Cluster 1 annotation was correctly updated after adding Federated Ingress")
	cluster := GetClusterFromChan(fedClusterUpdateChan)
	assert.NotNil(t, cluster)
	assert.Equal(t, cluster.ObjectMeta.Annotations[uidAnnotationKey], cfg1.Data[uidKey])

	t.Logf("Checking that appropriate finalizers are added")
	// There should be 2 updates to add both the finalizers.
	updatedIngress := GetIngressFromChan(t, fedIngressUpdateChan)
	assert.True(t, ingressController.hasFinalizerFunc(updatedIngress, deletionhelper.FinalizerDeleteFromUnderlyingClusters))
	updatedIngress = GetIngressFromChan(t, fedIngressUpdateChan)
	assert.True(t, ingressController.hasFinalizerFunc(updatedIngress, apiv1.FinalizerOrphan), fmt.Sprintf("ingress does not have the orphan finalizer: %v", updatedIngress))
	fedIngress = *updatedIngress

	t.Log("Checking that Ingress was correctly created in cluster 1")
	createdIngress := GetIngressFromChan(t, cluster1IngressCreateChan)
	assert.NotNil(t, createdIngress)
	cluster1Ingress := *createdIngress
	assert.True(t, reflect.DeepEqual(fedIngress.Spec, cluster1Ingress.Spec), "Spec of created ingress is not equal")
	assert.True(t, util.ObjectMetaEquivalent(fedIngress.ObjectMeta, cluster1Ingress.ObjectMeta),
		"Metadata of created object is not equivalent")

	// Wait for finalizers to appear in federation store.
	assert.NoError(t, WaitForFinalizersInFederationStore(ingressController, ingressController.ingressInformerStore,
		types.NamespacedName{Namespace: fedIngress.Namespace, Name: fedIngress.Name}.String()), "finalizers not found in federated ingress")

	// Wait for the cluster ingress to appear in cluster store.
	assert.NoError(t, WaitForIngressInClusterStore(ingressController.ingressFederatedInformer.GetTargetStore(), cluster1.Name,
		types.NamespacedName{Namespace: createdIngress.Namespace, Name: createdIngress.Name}.String()),
		"Created ingress not found in underlying cluster store")

	// Test that IP address gets transferred from cluster ingress to federated ingress.
	t.Log("Checking that IP address gets transferred from cluster ingress to federated ingress")
	cluster1Ingress.Status.LoadBalancer.Ingress = append(cluster1Ingress.Status.LoadBalancer.Ingress,
		apiv1.LoadBalancerIngress{IP: "1.2.3.4"})
	glog.Infof("Setting artificial IP address for cluster1 ingress")

	for trial := 0; trial < maxTrials; trial++ {
		cluster1IngressWatch.Modify(&cluster1Ingress)
		// Wait for store to see the updated cluster ingress.
		key := types.NamespacedName{Namespace: createdIngress.Namespace, Name: createdIngress.Name}.String()
		if err := WaitForStatusUpdate(t, ingressController.ingressFederatedInformer.GetTargetStore(),
			cluster1.Name, key, cluster1Ingress.Status.LoadBalancer, time.Second); err != nil {
			continue
		}
		if err := WaitForFedStatusUpdate(t, ingressController.ingressInformerStore,
			key, cluster1Ingress.Status.LoadBalancer, time.Second); err != nil {
			continue
		}
	}

	for trial := 0; trial < maxTrials; trial++ {
		updatedIngress = GetIngressFromChan(t, fedIngressUpdateChan)
		assert.NotNil(t, updatedIngress, "Cluster's ingress load balancer status was not correctly transferred to the federated ingress")
		if updatedIngress == nil {
			return
		}
		if reflect.DeepEqual(cluster1Ingress.Status.LoadBalancer.Ingress, updatedIngress.Status.LoadBalancer.Ingress) {
			fedIngress.Status.LoadBalancer = updatedIngress.Status.LoadBalancer
			break
		} else {
			glog.Infof("Status check failed: expected: %v actual: %v", cluster1Ingress.Status, updatedIngress.Status)
		}
	}
	glog.Infof("Status check: expected: %v actual: %v", cluster1Ingress.Status, updatedIngress.Status)
	assert.True(t, reflect.DeepEqual(cluster1Ingress.Status.LoadBalancer.Ingress, updatedIngress.Status.LoadBalancer.Ingress),
		fmt.Sprintf("Ingress IP was not transferred from cluster ingress to federated ingress.  %v is not equal to %v",
			cluster1Ingress.Status.LoadBalancer.Ingress, updatedIngress.Status.LoadBalancer.Ingress))

	assert.NoError(t, WaitForStatusUpdate(t, ingressController.ingressFederatedInformer.GetTargetStore(),
		cluster1.Name, types.NamespacedName{Namespace: createdIngress.Namespace, Name: createdIngress.Name}.String(),
		cluster1Ingress.Status.LoadBalancer, time.Second))
	assert.NoError(t, WaitForFedStatusUpdate(t, ingressController.ingressInformerStore,
		types.NamespacedName{Namespace: createdIngress.Namespace, Name: createdIngress.Name}.String(),
		cluster1Ingress.Status.LoadBalancer, time.Second))
	t.Logf("expected: %v, actual: %v", createdIngress, updatedIngress)

	// Test update federated ingress.
	if fedIngress.ObjectMeta.Annotations == nil {
		fedIngress.ObjectMeta.Annotations = make(map[string]string)
	}
	fedIngress.ObjectMeta.Annotations["A"] = "B"
	t.Log("Modifying Federated Ingress")
	fedIngressWatch.Modify(&fedIngress)
	t.Log("Checking that Ingress was correctly updated in cluster 1")
	var updatedIngress2 *extensionsv1beta1.Ingress

	for trial := 0; trial < maxTrials; trial++ {
		updatedIngress2 = GetIngressFromChan(t, cluster1IngressUpdateChan)
		assert.NotNil(t, updatedIngress2)
		if updatedIngress2 == nil {
			return
		}
		if reflect.DeepEqual(fedIngress.Spec, updatedIngress.Spec) &&
			updatedIngress2.ObjectMeta.Annotations["A"] == fedIngress.ObjectMeta.Annotations["A"] {
			break
		}
	}

	assert.True(t, reflect.DeepEqual(updatedIngress2.Spec, fedIngress.Spec), "Spec of updated ingress is not equal")
	assert.Equal(t, updatedIngress2.ObjectMeta.Annotations["A"], fedIngress.ObjectMeta.Annotations["A"], "Updated annotation not transferred from federated to cluster ingress.")

	// Test add cluster
	t.Log("Adding a second cluster")

	fedIngress.Annotations[staticIPNameKeyWritable] = "foo" // Make sure that the base object has a static IP name first.
	fedIngressWatch.Modify(&fedIngress)
	clusterWatch.Add(cluster2)
	// First check that the original values are not equal - see above comment
	assert.NotEqual(t, cfg1.Data[uidKey], cfg2.Data[uidKey], fmt.Sprintf("ConfigMap in cluster 2 must initially not equal that in cluster 1 for this test - please fix test"))
	cluster2ConfigMapWatch.Add(cfg2)
	t.Log("Checking that the ingress got created in cluster 2")
	createdIngress2 := GetIngressFromChan(t, cluster2IngressCreateChan)
	assert.NotNil(t, createdIngress2)
	assert.True(t, reflect.DeepEqual(fedIngress.Spec, createdIngress2.Spec), "Spec of created ingress is not equal")
	t.Logf("created meta: %v fed meta: %v", createdIngress2.ObjectMeta, fedIngress.ObjectMeta)
	assert.True(t, util.ObjectMetaEquivalent(fedIngress.ObjectMeta, createdIngress2.ObjectMeta), "Metadata of created object is not equivalent")

	t.Log("Checking that the configmap in cluster 2 got updated.")
	updatedConfigMap2 := GetConfigMapFromChan(cluster2ConfigMapUpdateChan)
	assert.NotNil(t, updatedConfigMap2, fmt.Sprintf("ConfigMap in cluster 2 was not updated (or more likely the test is broken and the API type written is wrong)"))
	if updatedConfigMap2 != nil {
		assert.Equal(t, cfg1.Data[uidKey], updatedConfigMap2.Data[uidKey],
			fmt.Sprintf("UID's in configmaps in cluster's 1 and 2 are not equal (%q != %q)", cfg1.Data["uid"], updatedConfigMap2.Data["uid"]))
	}

	close(stop)
}
func daemonsetsEqual(a, b extensionsv1.DaemonSet) bool {
	return util.ObjectMetaEquivalent(a.ObjectMeta, b.ObjectMeta) && reflect.DeepEqual(a.Spec, b.Spec)
}
func (daemonsetcontroller *DaemonSetController) reconcileDaemonSet(namespace string, daemonsetName string) {
	glog.V(4).Infof("Reconciling daemonset %s/%s", namespace, daemonsetName)

	if !daemonsetcontroller.isSynced() {
		glog.V(4).Infof("Daemonset controller is not synced")
		daemonsetcontroller.deliverDaemonSet(namespace, daemonsetName, daemonsetcontroller.clusterAvailableDelay, false)
		return
	}

	key := getDaemonSetKey(namespace, daemonsetName)
	baseDaemonSetObjFromStore, exist, err := daemonsetcontroller.daemonsetInformerStore.GetByKey(key)
	if err != nil {
		glog.Errorf("Failed to query main daemonset store for %v: %v", key, err)
		daemonsetcontroller.deliverDaemonSet(namespace, daemonsetName, 0, true)
		return
	}

	if !exist {
		glog.V(4).Infof("Skipping daemonset %s/%s - not federated", namespace, daemonsetName)
		// Not federated daemonset, ignoring.
		return
	}
	baseDaemonSetObj, err := conversion.NewCloner().DeepCopy(baseDaemonSetObjFromStore)
	baseDaemonSet, ok := baseDaemonSetObj.(*extensionsv1.DaemonSet)
	if err != nil || !ok {
		glog.Errorf("Error in retrieving obj %s from store: %v, %v", daemonsetName, ok, err)
		daemonsetcontroller.deliverDaemonSet(namespace, daemonsetName, 0, true)
		return
	}
	if baseDaemonSet.DeletionTimestamp != nil {
		if err := daemonsetcontroller.delete(baseDaemonSet); err != nil {
			glog.Errorf("Failed to delete %s: %v", daemonsetName, err)
			daemonsetcontroller.eventRecorder.Eventf(baseDaemonSet, api.EventTypeNormal, "DeleteFailed",
				"DaemonSet delete failed: %v", err)
			daemonsetcontroller.deliverDaemonSet(namespace, daemonsetName, 0, true)
		}
		return
	}

	glog.V(3).Infof("Ensuring delete object from underlying clusters finalizer for daemonset: %s",
		baseDaemonSet.Name)
	// Add the required finalizers before creating a daemonset in underlying clusters.
	updatedDaemonSetObj, err := daemonsetcontroller.deletionHelper.EnsureFinalizers(baseDaemonSet)
	if err != nil {
		glog.Errorf("Failed to ensure delete object from underlying clusters finalizer in daemonset %s: %v",
			baseDaemonSet.Name, err)
		daemonsetcontroller.deliverDaemonSet(namespace, daemonsetName, 0, false)
		return
	}
	baseDaemonSet = updatedDaemonSetObj.(*extensionsv1.DaemonSet)

	glog.V(3).Infof("Syncing daemonset %s in underlying clusters", baseDaemonSet.Name)

	clusters, err := daemonsetcontroller.daemonsetFederatedInformer.GetReadyClusters()
	if err != nil {
		glog.Errorf("Failed to get cluster list: %v", err)
		daemonsetcontroller.deliverDaemonSet(namespace, daemonsetName, daemonsetcontroller.clusterAvailableDelay, false)
		return
	}

	operations := make([]util.FederatedOperation, 0)
	for _, cluster := range clusters {
		clusterDaemonSetObj, found, err := daemonsetcontroller.daemonsetFederatedInformer.GetTargetStore().GetByKey(cluster.Name, key)
		if err != nil {
			glog.Errorf("Failed to get %s from %s: %v", key, cluster.Name, err)
			daemonsetcontroller.deliverDaemonSet(namespace, daemonsetName, 0, true)
			return
		}

		// Do not modify. Otherwise make a deep copy.
		desiredDaemonSet := &extensionsv1.DaemonSet{
			ObjectMeta: util.DeepCopyRelevantObjectMeta(baseDaemonSet.ObjectMeta),
			Spec:       util.DeepCopyApiTypeOrPanic(baseDaemonSet.Spec).(extensionsv1.DaemonSetSpec),
		}

		if !found {
			glog.V(4).Infof("Creating daemonset %s/%s in cluster %s", namespace, daemonsetName, cluster.Name)

			daemonsetcontroller.eventRecorder.Eventf(baseDaemonSet, api.EventTypeNormal, "CreateInCluster",
				"Creating daemonset in cluster %s", cluster.Name)

			operations = append(operations, util.FederatedOperation{
				Type:        util.OperationTypeAdd,
				Obj:         desiredDaemonSet,
				ClusterName: cluster.Name,
			})
		} else {
			clusterDaemonSet := clusterDaemonSetObj.(*extensionsv1.DaemonSet)

			// Update existing daemonset, if needed.
			if !util.ObjectMetaEquivalent(desiredDaemonSet.ObjectMeta, clusterDaemonSet.ObjectMeta) ||
				!reflect.DeepEqual(desiredDaemonSet.Spec, clusterDaemonSet.Spec) {

				glog.V(4).Infof("Upadting daemonset %s/%s in cluster %s", namespace, daemonsetName, cluster.Name)
				daemonsetcontroller.eventRecorder.Eventf(baseDaemonSet, api.EventTypeNormal, "UpdateInCluster",
					"Updating daemonset in cluster %s", cluster.Name)
				operations = append(operations, util.FederatedOperation{
					Type:        util.OperationTypeUpdate,
					Obj:         desiredDaemonSet,
					ClusterName: cluster.Name,
				})
			}
		}
	}

	if len(operations) == 0 {
		glog.V(4).Infof("No operation needed for %s/%s", namespace, daemonsetName)
		// Everything is in order
		return
	}
	err = daemonsetcontroller.federatedUpdater.UpdateWithOnError(operations, daemonsetcontroller.updateTimeout,
		func(op util.FederatedOperation, operror error) {
			daemonsetcontroller.eventRecorder.Eventf(baseDaemonSet, api.EventTypeNormal, "UpdateInClusterFailed",
				"DaemonSet update in cluster %s failed: %v", op.ClusterName, operror)
		})

	if err != nil {
		glog.Errorf("Failed to execute updates for %s: %v, retrying shortly", key, err)
		daemonsetcontroller.deliverDaemonSet(namespace, daemonsetName, 0, true)
		return
	}
}
func (frsc *ReplicaSetController) reconcileReplicaSet(key string) (reconciliationStatus, error) {
	if !frsc.isSynced() {
		return statusNotSynced, nil
	}

	glog.V(4).Infof("Start reconcile replicaset %q", key)
	startTime := time.Now()
	defer glog.V(4).Infof("Finished reconcile replicaset %q (%v)", key, time.Now().Sub(startTime))

	obj, exists, err := frsc.replicaSetStore.Indexer.GetByKey(key)
	if err != nil {
		return statusError, err
	}
	if !exists {
		// don't delete local replicasets for now. Do not reconcile it anymore.
		return statusAllOk, nil
	}
	frs := obj.(*extensionsv1.ReplicaSet)

	clusters, err := frsc.fedReplicaSetInformer.GetReadyClusters()
	if err != nil {
		return statusError, err
	}

	// collect current status and do schedule
	allPods, err := frsc.fedPodInformer.GetTargetStore().List()
	if err != nil {
		return statusError, err
	}
	podStatus, err := AnalysePods(frs, allPods, time.Now())
	current := make(map[string]int64)
	estimatedCapacity := make(map[string]int64)
	for _, cluster := range clusters {
		lrsObj, exists, err := frsc.fedReplicaSetInformer.GetTargetStore().GetByKey(cluster.Name, key)
		if err != nil {
			return statusError, err
		}
		if exists {
			lrs := lrsObj.(*extensionsv1.ReplicaSet)
			current[cluster.Name] = int64(podStatus[cluster.Name].RunningAndReady) // include pending as well?
			unschedulable := int64(podStatus[cluster.Name].Unschedulable)
			if unschedulable > 0 {
				estimatedCapacity[cluster.Name] = int64(*lrs.Spec.Replicas) - unschedulable
			}
		}
	}

	scheduleResult := frsc.schedule(frs, clusters, current, estimatedCapacity)

	glog.V(4).Infof("Start syncing local replicaset %s: %v", key, scheduleResult)

	fedStatus := extensionsv1.ReplicaSetStatus{ObservedGeneration: frs.Generation}
	operations := make([]fedutil.FederatedOperation, 0)
	for clusterName, replicas := range scheduleResult {

		lrsObj, exists, err := frsc.fedReplicaSetInformer.GetTargetStore().GetByKey(clusterName, key)
		if err != nil {
			return statusError, err
		}

		lrs := &extensionsv1.ReplicaSet{
			ObjectMeta: fedutil.CopyObjectMeta(frs.ObjectMeta),
			Spec:       frs.Spec,
		}
		specReplicas := int32(replicas)
		lrs.Spec.Replicas = &specReplicas

		if !exists {
			if replicas > 0 {
				frsc.eventRecorder.Eventf(frs, api.EventTypeNormal, "CreateInCluster",
					"Creating replicaset in cluster %s", clusterName)

				operations = append(operations, fedutil.FederatedOperation{
					Type:        fedutil.OperationTypeAdd,
					Obj:         lrs,
					ClusterName: clusterName,
				})
			}
		} else {
			currentLrs := lrsObj.(*extensionsv1.ReplicaSet)
			// Update existing replica set, if needed.
			if !fedutil.ObjectMetaEquivalent(lrs.ObjectMeta, currentLrs.ObjectMeta) ||
				!reflect.DeepEqual(lrs.Spec, currentLrs.Spec) {
				frsc.eventRecorder.Eventf(frs, api.EventTypeNormal, "UpdateInCluster",
					"Updating replicaset in cluster %s", clusterName)

				operations = append(operations, fedutil.FederatedOperation{
					Type:        fedutil.OperationTypeUpdate,
					Obj:         lrs,
					ClusterName: clusterName,
				})
			}
			fedStatus.Replicas += currentLrs.Status.Replicas
			fedStatus.FullyLabeledReplicas += currentLrs.Status.FullyLabeledReplicas
			// leave the replicaset even the replicas dropped to 0
		}
	}
	if fedStatus.Replicas != frs.Status.Replicas || fedStatus.FullyLabeledReplicas != frs.Status.FullyLabeledReplicas {
		frs.Status = fedStatus
		_, err = frsc.fedClient.Extensions().ReplicaSets(frs.Namespace).UpdateStatus(frs)
		if err != nil {
			return statusError, err
		}
	}

	if len(operations) == 0 {
		// Everything is in order
		return statusAllOk, nil
	}
	err = frsc.fedUpdater.UpdateWithOnError(operations, updateTimeout, func(op fedutil.FederatedOperation, operror error) {
		frsc.eventRecorder.Eventf(frs, api.EventTypeNormal, "FailedUpdateInCluster",
			"Replicaset update in cluster %s failed: %v", op.ClusterName, operror)
	})
	if err != nil {
		glog.Errorf("Failed to execute updates for %s: %v", key, err)
		return statusError, err
	}

	// Some operations were made, reconcile after a while.
	return statusNeedRecheck, nil
}
示例#8
0
func (secretcontroller *SecretController) reconcileSecret(namespace string, secretName string) {

	if !secretcontroller.isSynced() {
		secretcontroller.deliverSecret(namespace, secretName, secretcontroller.clusterAvailableDelay, false)
		return
	}

	key := getSecretKey(namespace, secretName)
	baseSecretObj, exist, err := secretcontroller.secretInformerStore.GetByKey(key)
	if err != nil {
		glog.Errorf("Failed to query main secret store for %v: %v", key, err)
		secretcontroller.deliverSecret(namespace, secretName, 0, true)
		return
	}

	if !exist {
		// Not federated secret, ignoring.
		return
	}
	baseSecret := baseSecretObj.(*api_v1.Secret)

	clusters, err := secretcontroller.secretFederatedInformer.GetReadyClusters()
	if err != nil {
		glog.Errorf("Failed to get cluster list: %v", err)
		secretcontroller.deliverSecret(namespace, secretName, secretcontroller.clusterAvailableDelay, false)
		return
	}

	operations := make([]util.FederatedOperation, 0)
	for _, cluster := range clusters {
		clusterSecretObj, found, err := secretcontroller.secretFederatedInformer.GetTargetStore().GetByKey(cluster.Name, key)
		if err != nil {
			glog.Errorf("Failed to get %s from %s: %v", key, cluster.Name, err)
			secretcontroller.deliverSecret(namespace, secretName, 0, true)
			return
		}

		desiredSecret := &api_v1.Secret{
			ObjectMeta: util.CopyObjectMeta(baseSecret.ObjectMeta),
			Data:       baseSecret.Data,
			Type:       baseSecret.Type,
		}

		if !found {
			secretcontroller.eventRecorder.Eventf(baseSecret, api.EventTypeNormal, "CreateInCluster",
				"Creating secret in cluster %s", cluster.Name)

			operations = append(operations, util.FederatedOperation{
				Type:        util.OperationTypeAdd,
				Obj:         desiredSecret,
				ClusterName: cluster.Name,
			})
		} else {
			clusterSecret := clusterSecretObj.(*api_v1.Secret)

			// Update existing secret, if needed.
			if !util.ObjectMetaEquivalent(desiredSecret.ObjectMeta, clusterSecret.ObjectMeta) ||
				!reflect.DeepEqual(desiredSecret.Data, clusterSecret.Data) ||
				!reflect.DeepEqual(desiredSecret.Type, clusterSecret.Type) {

				secretcontroller.eventRecorder.Eventf(baseSecret, api.EventTypeNormal, "UpdateInCluster",
					"Updating secret in cluster %s", cluster.Name)
				operations = append(operations, util.FederatedOperation{
					Type:        util.OperationTypeUpdate,
					Obj:         desiredSecret,
					ClusterName: cluster.Name,
				})
			}
		}
	}

	if len(operations) == 0 {
		// Everything is in order
		return
	}
	err = secretcontroller.federatedUpdater.UpdateWithOnError(operations, secretcontroller.updateTimeout,
		func(op util.FederatedOperation, operror error) {
			secretcontroller.eventRecorder.Eventf(baseSecret, api.EventTypeNormal, "FailedUpdateInCluster",
				"Update secret in cluster %s failed: %v", op.ClusterName, operror)
		})

	if err != nil {
		glog.Errorf("Failed to execute updates for %s: %v", key, err)
		secretcontroller.deliverSecret(namespace, secretName, 0, true)
		return
	}

	// Evertyhing is in order but lets be double sure
	secretcontroller.deliverSecret(namespace, secretName, secretcontroller.secretReviewDelay, false)
}
func (ic *IngressController) reconcileIngress(ingress types.NamespacedName) {
	glog.V(4).Infof("Reconciling ingress %q for all clusters", ingress)
	if !ic.isSynced() {
		ic.deliverIngress(ingress, ic.clusterAvailableDelay, false)
		return
	}

	key := ingress.String()
	baseIngressObj, exist, err := ic.ingressInformerStore.GetByKey(key)
	if err != nil {
		glog.Errorf("Failed to query main ingress store for %v: %v", ingress, err)
		ic.deliverIngress(ingress, 0, true)
		return
	}
	if !exist {
		// Not federated ingress, ignoring.
		glog.V(4).Infof("Ingress %q is not federated.  Ignoring.", ingress)
		return
	}
	baseIngress, ok := baseIngressObj.(*extensions_v1beta1.Ingress)
	if !ok {
		glog.Errorf("Internal Error: Object retrieved from ingressInformerStore with key %q is not of correct type *extensions_v1beta1.Ingress: %v", key, baseIngressObj)
	} else {
		glog.V(4).Infof("Base (federated) ingress: %v", baseIngress)
	}

	clusters, err := ic.ingressFederatedInformer.GetReadyClusters()
	if err != nil {
		glog.Errorf("Failed to get cluster list: %v", err)
		ic.deliverIngress(ingress, ic.clusterAvailableDelay, false)
		return
	} else {
		glog.V(4).Infof("Found %d ready clusters across which to reconcile ingress %q", len(clusters), ingress)
	}

	operations := make([]util.FederatedOperation, 0)

	for clusterIndex, cluster := range clusters {
		baseIPName, baseIPAnnotationExists := baseIngress.ObjectMeta.Annotations[staticIPNameKeyWritable]
		clusterIngressObj, clusterIngressFound, err := ic.ingressFederatedInformer.GetTargetStore().GetByKey(cluster.Name, key)
		if err != nil {
			glog.Errorf("Failed to get cached ingress %s for cluster %s, will retry: %v", ingress, cluster.Name, err)
			ic.deliverIngress(ingress, 0, true)
			return
		}
		desiredIngress := &extensions_v1beta1.Ingress{}
		objMeta, err := conversion.NewCloner().DeepCopy(baseIngress.ObjectMeta)
		if err != nil {
			glog.Errorf("Error deep copying ObjectMeta: %v", err)
		}
		objSpec, err := conversion.NewCloner().DeepCopy(baseIngress.Spec)
		if err != nil {
			glog.Errorf("Error deep copying Spec: %v", err)
		}
		desiredIngress.ObjectMeta, ok = objMeta.(v1.ObjectMeta)
		if !ok {
			glog.Errorf("Internal error: Failed to cast to v1.ObjectMeta: %v", objMeta)
		}
		desiredIngress.Spec = objSpec.(extensions_v1beta1.IngressSpec)
		if !ok {
			glog.Errorf("Internal error: Failed to cast to extensions_v1beta1.IngressSpec: %v", objSpec)
		}
		glog.V(4).Infof("Desired Ingress: %v", desiredIngress)

		if !clusterIngressFound {
			glog.V(4).Infof("No existing Ingress %s in cluster %s - checking if appropriate to queue a create operation", ingress, cluster.Name)
			// We can't supply server-created fields when creating a new object.
			desiredIngress.ObjectMeta = util.DeepCopyObjectMeta(baseIngress.ObjectMeta)
			ic.eventRecorder.Eventf(baseIngress, api.EventTypeNormal, "CreateInCluster",
				"Creating ingress in cluster %s", cluster.Name)

			// We always first create an ingress in the first available cluster.  Once that ingress
			// has been created and allocated a global IP (visible via an annotation),
			// we record that annotation on the federated ingress, and create all other cluster
			// ingresses with that same global IP.
			// Note: If the first cluster becomes (e.g. temporarily) unavailable, the second cluster will be allocated
			// index 0, but eventually all ingresses will share the single global IP recorded in the annotation
			// of the federated ingress.
			if baseIPAnnotationExists || (clusterIndex == 0) {
				glog.V(4).Infof("No existing Ingress %s in cluster %s (index %d) and static IP annotation (%q) on base ingress - queuing a create operation", ingress, cluster.Name, clusterIndex, staticIPNameKeyWritable)
				operations = append(operations, util.FederatedOperation{
					Type:        util.OperationTypeAdd,
					Obj:         desiredIngress,
					ClusterName: cluster.Name,
				})
			} else {
				glog.V(4).Infof("No annotation %q exists on ingress %q in federation, and index of cluster %q is %d and not zero.  Not queueing create operation for ingress %q until annotation exists", staticIPNameKeyWritable, ingress, cluster.Name, clusterIndex, ingress)
			}
		} else {
			clusterIngress := clusterIngressObj.(*extensions_v1beta1.Ingress)
			glog.V(4).Infof("Found existing Ingress %s in cluster %s - checking if update is required (in either direction)", ingress, cluster.Name)
			clusterIPName, clusterIPNameExists := clusterIngress.ObjectMeta.Annotations[staticIPNameKeyReadonly]
			baseLBStatusExists := len(baseIngress.Status.LoadBalancer.Ingress) > 0
			clusterLBStatusExists := len(clusterIngress.Status.LoadBalancer.Ingress) > 0
			logStr := fmt.Sprintf("Cluster ingress %q has annotation %q=%q, loadbalancer status exists? [%v], federated ingress has annotation %q=%q, loadbalancer status exists? [%v].  %%s annotation and/or loadbalancer status from cluster ingress to federated ingress.", ingress, staticIPNameKeyReadonly, clusterIPName, clusterLBStatusExists, staticIPNameKeyWritable, baseIPName, baseLBStatusExists)
			if (!baseIPAnnotationExists && clusterIPNameExists) || (!baseLBStatusExists && clusterLBStatusExists) { // copy the IP name from the readonly annotation on the cluster ingress, to the writable annotation on the federated ingress
				glog.V(4).Infof(logStr, "Transferring")
				if !baseIPAnnotationExists && clusterIPNameExists {
					baseIngress.ObjectMeta.Annotations[staticIPNameKeyWritable] = clusterIPName
					glog.V(4).Infof("Attempting to update base federated ingress annotations: %v", baseIngress)
					if updatedFedIngress, err := ic.federatedApiClient.Extensions().Ingresses(baseIngress.Namespace).Update(baseIngress); err != nil {
						glog.Errorf("Failed to add static IP annotation to federated ingress %q, will try again later: %v", ingress, err)
						ic.deliverIngress(ingress, ic.ingressReviewDelay, true)
						return
					} else {
						glog.V(4).Infof("Successfully updated federated ingress %q (added IP annotation), after update: %q", ingress, updatedFedIngress)
						ic.deliverIngress(ingress, ic.smallDelay, false)
						return
					}
				}
				if !baseLBStatusExists && clusterLBStatusExists {
					lbstatusObj, lbErr := conversion.NewCloner().DeepCopy(&clusterIngress.Status.LoadBalancer)
					lbstatus, ok := lbstatusObj.(*v1.LoadBalancerStatus)
					if lbErr != nil || !ok {
						glog.Errorf("Internal error: Failed to clone LoadBalancerStatus of %q in cluster %q while attempting to update master loadbalancer ingress status, will try again later. error: %v, Object to be cloned: %v", ingress, cluster.Name, lbErr, lbstatusObj)
						ic.deliverIngress(ingress, ic.ingressReviewDelay, true)
						return
					}
					baseIngress.Status.LoadBalancer = *lbstatus
					glog.V(4).Infof("Attempting to update base federated ingress status: %v", baseIngress)
					if updatedFedIngress, err := ic.federatedApiClient.Extensions().Ingresses(baseIngress.Namespace).UpdateStatus(baseIngress); err != nil {
						glog.Errorf("Failed to update federated ingress status of %q (loadbalancer status), will try again later: %v", ingress, err)
						ic.deliverIngress(ingress, ic.ingressReviewDelay, true)
						return
					} else {
						glog.V(4).Infof("Successfully updated federated ingress status of %q (added loadbalancer status), after update: %q", ingress, updatedFedIngress)
						ic.deliverIngress(ingress, ic.smallDelay, false)
						return
					}
				}
			} else {
				glog.V(4).Infof(logStr, "Not transferring")
			}
			// Update existing cluster ingress, if needed.
			if util.ObjectMetaEquivalent(baseIngress.ObjectMeta, clusterIngress.ObjectMeta) &&
				reflect.DeepEqual(baseIngress.Spec, clusterIngress.Spec) {
				glog.V(4).Infof("Ingress %q in cluster %q does not need an update: cluster ingress is equivalent to federated ingress", ingress, cluster.Name)
			} else {
				glog.V(4).Infof("Ingress %s in cluster %s needs an update: cluster ingress %v is not equivalent to federated ingress %v", ingress, cluster.Name, clusterIngress, desiredIngress)
				objMeta, err := conversion.NewCloner().DeepCopy(clusterIngress.ObjectMeta)
				if err != nil {
					glog.Errorf("Error deep copying ObjectMeta: %v", err)
					ic.deliverIngress(ingress, ic.ingressReviewDelay, true)

				}
				desiredIngress.ObjectMeta, ok = objMeta.(v1.ObjectMeta)
				if !ok {
					glog.Errorf("Internal error: Failed to cast to v1.ObjectMeta: %v", objMeta)
					ic.deliverIngress(ingress, ic.ingressReviewDelay, true)
				}
				// Merge any annotations and labels on the federated ingress onto the underlying cluster ingress,
				// overwriting duplicates.
				if desiredIngress.ObjectMeta.Annotations == nil {
					desiredIngress.ObjectMeta.Annotations = make(map[string]string)
				}
				for key, val := range baseIngress.ObjectMeta.Annotations {
					desiredIngress.ObjectMeta.Annotations[key] = val
				}
				if desiredIngress.ObjectMeta.Labels == nil {
					desiredIngress.ObjectMeta.Labels = make(map[string]string)
				}
				for key, val := range baseIngress.ObjectMeta.Labels {
					desiredIngress.ObjectMeta.Labels[key] = val
				}
				ic.eventRecorder.Eventf(baseIngress, api.EventTypeNormal, "UpdateInCluster",
					"Updating ingress in cluster %s", cluster.Name)

				operations = append(operations, util.FederatedOperation{
					Type:        util.OperationTypeUpdate,
					Obj:         desiredIngress,
					ClusterName: cluster.Name,
				})
				// TODO: Transfer any readonly (target-proxy, url-map etc) annotations from the master cluster to the federation, if this is the master cluster.
				// This is only for consistency, so that the federation ingress metadata matches the underlying clusters.  It's not actually required				}
			}
		}
	}

	if len(operations) == 0 {
		// Everything is in order
		glog.V(4).Infof("Ingress %q is up-to-date in all clusters - no propagation to clusters required.", ingress)
		ic.deliverIngress(ingress, ic.ingressReviewDelay, false)
		return
	}
	glog.V(4).Infof("Calling federatedUpdater.Update() - operations: %v", operations)
	err = ic.federatedIngressUpdater.UpdateWithOnError(operations, ic.updateTimeout, func(op util.FederatedOperation, operror error) {
		ic.eventRecorder.Eventf(baseIngress, api.EventTypeNormal, "FailedClusterUpdate",
			"Ingress update in cluster %s failed: %v", op.ClusterName, operror)
	})
	if err != nil {
		glog.Errorf("Failed to execute updates for %s: %v", ingress, err)
		ic.deliverIngress(ingress, ic.ingressReviewDelay, true)
		return
	}
	// Schedule another periodic reconciliation, only to account for possible bugs in watch processing.
	ic.deliverIngress(ingress, ic.ingressReviewDelay, false)
}
示例#10
0
func equivalentDeployment(fedDeployment, localDeployment *v1beta1.Deployment) bool {
	localDeploymentSpec := localDeployment.Spec
	localDeploymentSpec.Replicas = fedDeployment.Spec.Replicas
	return fedutil.ObjectMetaEquivalent(fedDeployment.ObjectMeta, localDeployment.ObjectMeta) &&
		reflect.DeepEqual(fedDeployment.Spec, localDeploymentSpec)
}
示例#11
0
func (nc *NamespaceController) reconcileNamespace(namespace string) {
	if !nc.isSynced() {
		nc.deliverNamespace(namespace, nc.clusterAvailableDelay, false)
		return
	}

	baseNamespaceObj, exist, err := nc.namespaceInformerStore.GetByKey(namespace)
	if err != nil {
		glog.Errorf("Failed to query main namespace store for %v: %v", namespace, err)
		nc.deliverNamespace(namespace, 0, true)
		return
	}

	if !exist {
		// Not federated namespace, ignoring.
		return
	}
	baseNamespace := baseNamespaceObj.(*api_v1.Namespace)
	if baseNamespace.Status.Phase == api_v1.NamespaceTerminating {
		// TODO: What about namespaces in subclusters ???
		err = nc.federatedApiClient.Core().Namespaces().Delete(baseNamespace.Name, &api.DeleteOptions{})
		if err != nil {
			glog.Errorf("Failed to delete namespace %s: %v", baseNamespace.Name, err)
			nc.deliverNamespace(namespace, 0, true)
		}
		return
	}

	clusters, err := nc.namespaceFederatedInformer.GetReadyClusters()
	if err != nil {
		glog.Errorf("Failed to get cluster list: %v", err)
		nc.deliverNamespace(namespace, nc.clusterAvailableDelay, false)
		return
	}

	operations := make([]util.FederatedOperation, 0)
	for _, cluster := range clusters {
		clusterNamespaceObj, found, err := nc.namespaceFederatedInformer.GetTargetStore().GetByKey(cluster.Name, namespace)
		if err != nil {
			glog.Errorf("Failed to get %s from %s: %v", namespace, cluster.Name, err)
			nc.deliverNamespace(namespace, 0, true)
			return
		}
		desiredNamespace := &api_v1.Namespace{
			ObjectMeta: util.CopyObjectMeta(baseNamespace.ObjectMeta),
			Spec:       baseNamespace.Spec,
		}

		if !found {
			operations = append(operations, util.FederatedOperation{
				Type:        util.OperationTypeAdd,
				Obj:         desiredNamespace,
				ClusterName: cluster.Name,
			})
		} else {
			clusterNamespace := clusterNamespaceObj.(*api_v1.Namespace)

			// Update existing namespace, if needed.
			if !util.ObjectMetaEquivalent(desiredNamespace.ObjectMeta, clusterNamespace.ObjectMeta) ||
				!reflect.DeepEqual(desiredNamespace.Spec, clusterNamespace.Spec) {
				operations = append(operations, util.FederatedOperation{
					Type:        util.OperationTypeUpdate,
					Obj:         desiredNamespace,
					ClusterName: cluster.Name,
				})
			}
		}
	}

	if len(operations) == 0 {
		// Everything is in order
		return
	}
	err = nc.federatedUpdater.Update(operations, nc.updateTimeout)
	if err != nil {
		glog.Errorf("Failed to execute updates for %s: %v", namespace, err)
		nc.deliverNamespace(namespace, 0, true)
		return
	}

	// Evertyhing is in order but lets be double sure
	nc.deliverNamespace(namespace, nc.namespaceReviewDelay, false)
}
func (daemonsetcontroller *DaemonSetController) reconcileDaemonSet(namespace string, daemonsetName string) {
	glog.V(4).Infof("Reconciling daemonset %s/%s", namespace, daemonsetName)

	if !daemonsetcontroller.isSynced() {
		glog.V(4).Infof("Daemonset controller is not synced")
		daemonsetcontroller.deliverDaemonSet(namespace, daemonsetName, daemonsetcontroller.clusterAvailableDelay, false)
		return
	}

	key := getDaemonSetKey(namespace, daemonsetName)
	baseDaemonSetObj, exist, err := daemonsetcontroller.daemonsetInformerStore.GetByKey(key)
	if err != nil {
		glog.Errorf("Failed to query main daemonset store for %v: %v", key, err)
		daemonsetcontroller.deliverDaemonSet(namespace, daemonsetName, 0, true)
		return
	}

	if !exist {
		glog.V(4).Infof("Skipping daemonset %s/%s - not federated", namespace, daemonsetName)
		// Not federated daemonset, ignoring.
		return
	}
	baseDaemonSet := baseDaemonSetObj.(*extensionsv1.DaemonSet)

	clusters, err := daemonsetcontroller.daemonsetFederatedInformer.GetReadyClusters()
	if err != nil {
		glog.Errorf("Failed to get cluster list: %v", err)
		daemonsetcontroller.deliverDaemonSet(namespace, daemonsetName, daemonsetcontroller.clusterAvailableDelay, false)
		return
	}

	operations := make([]util.FederatedOperation, 0)
	for _, cluster := range clusters {
		clusterDaemonSetObj, found, err := daemonsetcontroller.daemonsetFederatedInformer.GetTargetStore().GetByKey(cluster.Name, key)
		if err != nil {
			glog.Errorf("Failed to get %s from %s: %v", key, cluster.Name, err)
			daemonsetcontroller.deliverDaemonSet(namespace, daemonsetName, 0, true)
			return
		}

		desiredDaemonSet := &extensionsv1.DaemonSet{
			ObjectMeta: util.CopyObjectMeta(baseDaemonSet.ObjectMeta),
			Spec:       baseDaemonSet.Spec,
		}

		if !found {
			glog.V(4).Infof("Creating daemonset %s/%s in cluster %s", namespace, daemonsetName, cluster.Name)

			daemonsetcontroller.eventRecorder.Eventf(baseDaemonSet, api.EventTypeNormal, "CreateInCluster",
				"Creating daemonset in cluster %s", cluster.Name)

			operations = append(operations, util.FederatedOperation{
				Type:        util.OperationTypeAdd,
				Obj:         desiredDaemonSet,
				ClusterName: cluster.Name,
			})
		} else {
			clusterDaemonSet := clusterDaemonSetObj.(*extensionsv1.DaemonSet)

			// Update existing daemonset, if needed.
			if !util.ObjectMetaEquivalent(desiredDaemonSet.ObjectMeta, clusterDaemonSet.ObjectMeta) ||
				!reflect.DeepEqual(desiredDaemonSet.Spec, clusterDaemonSet.Spec) {

				glog.V(4).Infof("Upadting daemonset %s/%s in cluster %s", namespace, daemonsetName, cluster.Name)
				daemonsetcontroller.eventRecorder.Eventf(baseDaemonSet, api.EventTypeNormal, "UpdateInCluster",
					"Updating daemonset in cluster %s", cluster.Name)
				operations = append(operations, util.FederatedOperation{
					Type:        util.OperationTypeUpdate,
					Obj:         desiredDaemonSet,
					ClusterName: cluster.Name,
				})
			}
		}
	}

	if len(operations) == 0 {
		glog.V(4).Infof("No operation needed for %s/%s", namespace, daemonsetName)
		// Everything is in order
		return
	}
	err = daemonsetcontroller.federatedUpdater.UpdateWithOnError(operations, daemonsetcontroller.updateTimeout,
		func(op util.FederatedOperation, operror error) {
			daemonsetcontroller.eventRecorder.Eventf(baseDaemonSet, api.EventTypeNormal, "UpdateInClusterFailed",
				"DaemonSet update in cluster %s failed: %v", op.ClusterName, operror)
		})

	if err != nil {
		glog.Errorf("Failed to execute updates for %s: %v, retrying shortly", key, err)
		daemonsetcontroller.deliverDaemonSet(namespace, daemonsetName, 0, true)
		return
	}
}