// updateFederationService Returns whatever error occurred along with a boolean indicator of whether it // should be retried. func (s *ServiceController) updateFederationService(key string, cachedService *cachedService) (error, bool) { // Clone federation service, and create them in underlying k8s cluster desiredService := &v1.Service{ ObjectMeta: util.DeepCopyRelevantObjectMeta(cachedService.lastState.ObjectMeta), Spec: *(util.DeepCopyApiTypeOrPanic(&cachedService.lastState.Spec).(*v1.ServiceSpec)), } // handle available clusters one by one var hasErr bool for clusterName, cache := range s.clusterCache.clientMap { go func(cache *clusterCache, clusterName string) { err := s.processServiceForCluster(cachedService, clusterName, desiredService, cache.clientset) if err != nil { hasErr = true } }(cache, clusterName) } if hasErr { // detail error has been dumped inside the loop return fmt.Errorf("Service %s/%s was not successfully updated to all clusters", desiredService.Namespace, desiredService.Name), retryable } return nil, !retryable }
func (ic *IngressController) reconcileIngress(ingress types.NamespacedName) { glog.V(4).Infof("Reconciling ingress %q for all clusters", ingress) if !ic.isSynced() { ic.deliverIngress(ingress, ic.clusterAvailableDelay, false) return } key := ingress.String() baseIngressObjFromStore, exist, err := ic.ingressInformerStore.GetByKey(key) if err != nil { glog.Errorf("Failed to query main ingress store for %v: %v", ingress, err) ic.deliverIngress(ingress, 0, true) return } if !exist { // Not federated ingress, ignoring. glog.V(4).Infof("Ingress %q is not federated. Ignoring.", ingress) return } baseIngressObj, err := conversion.NewCloner().DeepCopy(baseIngressObjFromStore) baseIngress, ok := baseIngressObj.(*extensionsv1beta1.Ingress) if err != nil || !ok { glog.Errorf("Internal Error %v : Object retrieved from ingressInformerStore with key %q is not of correct type *extensionsv1beta1.Ingress: %v", err, key, baseIngressObj) } else { glog.V(4).Infof("Base (federated) ingress: %v", baseIngress) } if baseIngress.DeletionTimestamp != nil { if err := ic.delete(baseIngress); err != nil { glog.Errorf("Failed to delete %s: %v", ingress, err) ic.eventRecorder.Eventf(baseIngress, api.EventTypeNormal, "DeleteFailed", "Ingress delete failed: %v", err) ic.deliverIngress(ingress, 0, true) } return } glog.V(3).Infof("Ensuring delete object from underlying clusters finalizer for ingress: %s", baseIngress.Name) // Add the required finalizers before creating a ingress in underlying clusters. updatedIngressObj, err := ic.deletionHelper.EnsureFinalizers(baseIngress) if err != nil { glog.Errorf("Failed to ensure delete object from underlying clusters finalizer in ingress %s: %v", baseIngress.Name, err) ic.deliverIngress(ingress, 0, true) return } baseIngress = updatedIngressObj.(*extensionsv1beta1.Ingress) glog.V(3).Infof("Syncing ingress %s in underlying clusters", baseIngress.Name) clusters, err := ic.ingressFederatedInformer.GetReadyClusters() if err != nil { glog.Errorf("Failed to get cluster list: %v", err) ic.deliverIngress(ingress, ic.clusterAvailableDelay, false) return } else { glog.V(4).Infof("Found %d ready clusters across which to reconcile ingress %q", len(clusters), ingress) } operations := make([]util.FederatedOperation, 0) for _, cluster := range clusters { baseIPName, baseIPAnnotationExists := baseIngress.ObjectMeta.Annotations[staticIPNameKeyWritable] firstClusterName, firstClusterExists := baseIngress.ObjectMeta.Annotations[firstClusterAnnotation] clusterIngressObj, clusterIngressFound, err := ic.ingressFederatedInformer.GetTargetStore().GetByKey(cluster.Name, key) if err != nil { glog.Errorf("Failed to get cached ingress %s for cluster %s, will retry: %v", ingress, cluster.Name, err) ic.deliverIngress(ingress, 0, true) return } desiredIngress := &extensionsv1beta1.Ingress{} objMeta, err := conversion.NewCloner().DeepCopy(baseIngress.ObjectMeta) if err != nil { glog.Errorf("Error deep copying ObjectMeta: %v", err) } objSpec, err := conversion.NewCloner().DeepCopy(baseIngress.Spec) if err != nil { glog.Errorf("Error deep copying Spec: %v", err) } desiredIngress.ObjectMeta, ok = objMeta.(v1.ObjectMeta) if !ok { glog.Errorf("Internal error: Failed to cast to v1.ObjectMeta: %v", objMeta) } desiredIngress.Spec = objSpec.(extensionsv1beta1.IngressSpec) if !ok { glog.Errorf("Internal error: Failed to cast to extensionsv1beta1.Ingressespec: %v", objSpec) } glog.V(4).Infof("Desired Ingress: %v", desiredIngress) if !clusterIngressFound { glog.V(4).Infof("No existing Ingress %s in cluster %s - checking if appropriate to queue a create operation", ingress, cluster.Name) // We can't supply server-created fields when creating a new object. desiredIngress.ObjectMeta = util.DeepCopyRelevantObjectMeta(baseIngress.ObjectMeta) ic.eventRecorder.Eventf(baseIngress, api.EventTypeNormal, "CreateInCluster", "Creating ingress in cluster %s", cluster.Name) // We always first create an ingress in the first available cluster. Once that ingress // has been created and allocated a global IP (visible via an annotation), // we record that annotation on the federated ingress, and create all other cluster // ingresses with that same global IP. // Note: If the first cluster becomes (e.g. temporarily) unavailable, the // second cluster will become the first cluster, but eventually all ingresses // will share the single global IP recorded in the annotation of the // federated ingress. haveFirstCluster := firstClusterExists && firstClusterName != "" && ic.isClusterReady(firstClusterName) if !haveFirstCluster { glog.V(4).Infof("No cluster has been chosen as the first cluster. Electing cluster %s as the first cluster to create ingress in", cluster.Name) ic.updateAnnotationOnIngress(baseIngress, firstClusterAnnotation, cluster.Name) return } if baseIPAnnotationExists || firstClusterName == cluster.Name { if baseIPAnnotationExists { glog.V(4).Infof("No existing Ingress %s in cluster %s and static IP annotation (%q) exists on base ingress - queuing a create operation", ingress, cluster.Name, staticIPNameKeyWritable) } else { glog.V(4).Infof("No existing Ingress %s in cluster %s and no static IP annotation (%q) on base ingress - queuing a create operation in first cluster", ingress, cluster.Name, staticIPNameKeyWritable) } operations = append(operations, util.FederatedOperation{ Type: util.OperationTypeAdd, Obj: desiredIngress, ClusterName: cluster.Name, }) } else { glog.V(4).Infof("No annotation %q exists on ingress %q in federation and waiting for ingress in cluster %s. Not queueing create operation for ingress until annotation exists", staticIPNameKeyWritable, ingress, firstClusterName) } } else { clusterIngress := clusterIngressObj.(*extensionsv1beta1.Ingress) glog.V(4).Infof("Found existing Ingress %s in cluster %s - checking if update is required (in either direction)", ingress, cluster.Name) clusterIPName, clusterIPNameExists := clusterIngress.ObjectMeta.Annotations[staticIPNameKeyReadonly] baseLBStatusExists := len(baseIngress.Status.LoadBalancer.Ingress) > 0 clusterLBStatusExists := len(clusterIngress.Status.LoadBalancer.Ingress) > 0 logStr := fmt.Sprintf("Cluster ingress %q has annotation %q=%q, loadbalancer status exists? [%v], federated ingress has annotation %q=%q, loadbalancer status exists? [%v]. %%s annotation and/or loadbalancer status from cluster ingress to federated ingress.", ingress, staticIPNameKeyReadonly, clusterIPName, clusterLBStatusExists, staticIPNameKeyWritable, baseIPName, baseLBStatusExists) if (!baseIPAnnotationExists && clusterIPNameExists) || (!baseLBStatusExists && clusterLBStatusExists) { // copy the IP name from the readonly annotation on the cluster ingress, to the writable annotation on the federated ingress glog.V(4).Infof(logStr, "Transferring") if !baseIPAnnotationExists && clusterIPNameExists { ic.updateAnnotationOnIngress(baseIngress, staticIPNameKeyWritable, clusterIPName) return } if !baseLBStatusExists && clusterLBStatusExists { lbstatusObj, lbErr := conversion.NewCloner().DeepCopy(&clusterIngress.Status.LoadBalancer) lbstatus, ok := lbstatusObj.(*v1.LoadBalancerStatus) if lbErr != nil || !ok { glog.Errorf("Internal error: Failed to clone LoadBalancerStatus of %q in cluster %q while attempting to update master loadbalancer ingress status, will try again later. error: %v, Object to be cloned: %v", ingress, cluster.Name, lbErr, lbstatusObj) ic.deliverIngress(ingress, ic.ingressReviewDelay, true) return } baseIngress.Status.LoadBalancer = *lbstatus glog.V(4).Infof("Attempting to update base federated ingress status: %v", baseIngress) if updatedFedIngress, err := ic.federatedApiClient.Extensions().Ingresses(baseIngress.Namespace).UpdateStatus(baseIngress); err != nil { glog.Errorf("Failed to update federated ingress status of %q (loadbalancer status), will try again later: %v", ingress, err) ic.deliverIngress(ingress, ic.ingressReviewDelay, true) return } else { glog.V(4).Infof("Successfully updated federated ingress status of %q (added loadbalancer status), after update: %q", ingress, updatedFedIngress) ic.deliverIngress(ingress, ic.smallDelay, false) return } } } else { glog.V(4).Infof(logStr, "Not transferring") } // Update existing cluster ingress, if needed. if util.ObjectMetaAndSpecEquivalent(baseIngress, clusterIngress) { glog.V(4).Infof("Ingress %q in cluster %q does not need an update: cluster ingress is equivalent to federated ingress", ingress, cluster.Name) } else { glog.V(4).Infof("Ingress %s in cluster %s needs an update: cluster ingress %v is not equivalent to federated ingress %v", ingress, cluster.Name, clusterIngress, desiredIngress) objMeta, err := conversion.NewCloner().DeepCopy(clusterIngress.ObjectMeta) if err != nil { glog.Errorf("Error deep copying ObjectMeta: %v", err) ic.deliverIngress(ingress, ic.ingressReviewDelay, true) } desiredIngress.ObjectMeta, ok = objMeta.(v1.ObjectMeta) if !ok { glog.Errorf("Internal error: Failed to cast to v1.ObjectMeta: %v", objMeta) ic.deliverIngress(ingress, ic.ingressReviewDelay, true) } // Merge any annotations and labels on the federated ingress onto the underlying cluster ingress, // overwriting duplicates. if desiredIngress.ObjectMeta.Annotations == nil { desiredIngress.ObjectMeta.Annotations = make(map[string]string) } for key, val := range baseIngress.ObjectMeta.Annotations { desiredIngress.ObjectMeta.Annotations[key] = val } if desiredIngress.ObjectMeta.Labels == nil { desiredIngress.ObjectMeta.Labels = make(map[string]string) } for key, val := range baseIngress.ObjectMeta.Labels { desiredIngress.ObjectMeta.Labels[key] = val } ic.eventRecorder.Eventf(baseIngress, api.EventTypeNormal, "UpdateInCluster", "Updating ingress in cluster %s", cluster.Name) operations = append(operations, util.FederatedOperation{ Type: util.OperationTypeUpdate, Obj: desiredIngress, ClusterName: cluster.Name, }) // TODO: Transfer any readonly (target-proxy, url-map etc) annotations from the master cluster to the federation, if this is the master cluster. // This is only for consistency, so that the federation ingress metadata matches the underlying clusters. It's not actually required } } } } if len(operations) == 0 { // Everything is in order glog.V(4).Infof("Ingress %q is up-to-date in all clusters - no propagation to clusters required.", ingress) return } glog.V(4).Infof("Calling federatedUpdater.Update() - operations: %v", operations) err = ic.federatedIngressUpdater.UpdateWithOnError(operations, ic.updateTimeout, func(op util.FederatedOperation, operror error) { ic.eventRecorder.Eventf(baseIngress, api.EventTypeNormal, "FailedClusterUpdate", "Ingress update in cluster %s failed: %v", op.ClusterName, operror) }) if err != nil { glog.Errorf("Failed to execute updates for %s: %v", ingress, err) ic.deliverIngress(ingress, ic.ingressReviewDelay, true) return } // Schedule another periodic reconciliation, only to account for possible bugs in watch processing. ic.deliverIngress(ingress, ic.ingressReviewDelay, false) }
func (configmapcontroller *ConfigMapController) reconcileConfigMap(configmap types.NamespacedName) { if !configmapcontroller.isSynced() { glog.V(4).Infof("Configmap controller not synced") configmapcontroller.deliverConfigMap(configmap, configmapcontroller.clusterAvailableDelay, false) return } key := configmap.String() baseConfigMapObj, exist, err := configmapcontroller.configmapInformerStore.GetByKey(key) if err != nil { glog.Errorf("Failed to query main configmap store for %v: %v", key, err) configmapcontroller.deliverConfigMap(configmap, 0, true) return } if !exist { // Not federated configmap, ignoring. glog.V(8).Infof("Skipping not federated config map: %s", key) return } baseConfigMap := baseConfigMapObj.(*apiv1.ConfigMap) clusters, err := configmapcontroller.configmapFederatedInformer.GetReadyClusters() if err != nil { glog.Errorf("Failed to get cluster list: %v, retrying shortly", err) configmapcontroller.deliverConfigMap(configmap, configmapcontroller.clusterAvailableDelay, false) return } operations := make([]util.FederatedOperation, 0) for _, cluster := range clusters { clusterConfigMapObj, found, err := configmapcontroller.configmapFederatedInformer.GetTargetStore().GetByKey(cluster.Name, key) if err != nil { glog.Errorf("Failed to get %s from %s: %v, retrying shortly", key, cluster.Name, err) configmapcontroller.deliverConfigMap(configmap, 0, true) return } // Do not modify data. desiredConfigMap := &apiv1.ConfigMap{ ObjectMeta: util.DeepCopyRelevantObjectMeta(baseConfigMap.ObjectMeta), Data: baseConfigMap.Data, } if !found { configmapcontroller.eventRecorder.Eventf(baseConfigMap, api.EventTypeNormal, "CreateInCluster", "Creating configmap in cluster %s", cluster.Name) operations = append(operations, util.FederatedOperation{ Type: util.OperationTypeAdd, Obj: desiredConfigMap, ClusterName: cluster.Name, }) } else { clusterConfigMap := clusterConfigMapObj.(*apiv1.ConfigMap) // Update existing configmap, if needed. if !util.ConfigMapEquivalent(desiredConfigMap, clusterConfigMap) { configmapcontroller.eventRecorder.Eventf(baseConfigMap, api.EventTypeNormal, "UpdateInCluster", "Updating configmap in cluster %s", cluster.Name) operations = append(operations, util.FederatedOperation{ Type: util.OperationTypeUpdate, Obj: desiredConfigMap, ClusterName: cluster.Name, }) } } } if len(operations) == 0 { // Everything is in order glog.V(8).Infof("No operations needed for %s", key) return } err = configmapcontroller.federatedUpdater.UpdateWithOnError(operations, configmapcontroller.updateTimeout, func(op util.FederatedOperation, operror error) { configmapcontroller.eventRecorder.Eventf(baseConfigMap, api.EventTypeNormal, "UpdateInClusterFailed", "ConfigMap update in cluster %s failed: %v", op.ClusterName, operror) }) if err != nil { glog.Errorf("Failed to execute updates for %s: %v, retrying shortly", key, err) configmapcontroller.deliverConfigMap(configmap, 0, true) return } }
func (nc *NamespaceController) reconcileNamespace(namespace string) { if !nc.isSynced() { nc.deliverNamespace(namespace, nc.clusterAvailableDelay, false) return } namespaceObjFromStore, exist, err := nc.namespaceInformerStore.GetByKey(namespace) if err != nil { glog.Errorf("Failed to query main namespace store for %v: %v", namespace, err) nc.deliverNamespace(namespace, 0, true) return } if !exist { // Not federated namespace, ignoring. return } // Create a copy before modifying the namespace to prevent race condition with // other readers of namespace from store. namespaceObj, err := api.Scheme.DeepCopy(namespaceObjFromStore) baseNamespace, ok := namespaceObj.(*apiv1.Namespace) if err != nil || !ok { glog.Errorf("Error in retrieving obj from store: %v, %v", ok, err) nc.deliverNamespace(namespace, 0, true) return } if baseNamespace.DeletionTimestamp != nil { if err := nc.delete(baseNamespace); err != nil { glog.Errorf("Failed to delete %s: %v", namespace, err) nc.eventRecorder.Eventf(baseNamespace, api.EventTypeNormal, "DeleteFailed", "Namespace delete failed: %v", err) nc.deliverNamespace(namespace, 0, true) } return } glog.V(3).Infof("Ensuring delete object from underlying clusters finalizer for namespace: %s", baseNamespace.Name) // Add the required finalizers before creating a namespace in // underlying clusters. // This ensures that the dependent namespaces are deleted in underlying // clusters when the federated namespace is deleted. updatedNamespaceObj, err := nc.deletionHelper.EnsureFinalizers(baseNamespace) if err != nil { glog.Errorf("Failed to ensure delete object from underlying clusters finalizer in namespace %s: %v", baseNamespace.Name, err) nc.deliverNamespace(namespace, 0, false) return } baseNamespace = updatedNamespaceObj.(*apiv1.Namespace) glog.V(3).Infof("Syncing namespace %s in underlying clusters", baseNamespace.Name) // Sync the namespace in all underlying clusters. clusters, err := nc.namespaceFederatedInformer.GetReadyClusters() if err != nil { glog.Errorf("Failed to get cluster list: %v", err) nc.deliverNamespace(namespace, nc.clusterAvailableDelay, false) return } operations := make([]util.FederatedOperation, 0) for _, cluster := range clusters { clusterNamespaceObj, found, err := nc.namespaceFederatedInformer.GetTargetStore().GetByKey(cluster.Name, namespace) if err != nil { glog.Errorf("Failed to get %s from %s: %v", namespace, cluster.Name, err) nc.deliverNamespace(namespace, 0, true) return } // The object should not be modified. desiredNamespace := &apiv1.Namespace{ ObjectMeta: util.DeepCopyRelevantObjectMeta(baseNamespace.ObjectMeta), Spec: *(util.DeepCopyApiTypeOrPanic(&baseNamespace.Spec).(*apiv1.NamespaceSpec)), } glog.V(5).Infof("Desired namespace in underlying clusters: %+v", desiredNamespace) if !found { nc.eventRecorder.Eventf(baseNamespace, api.EventTypeNormal, "CreateInCluster", "Creating namespace in cluster %s", cluster.Name) operations = append(operations, util.FederatedOperation{ Type: util.OperationTypeAdd, Obj: desiredNamespace, ClusterName: cluster.Name, }) } else { clusterNamespace := clusterNamespaceObj.(*apiv1.Namespace) // Update existing namespace, if needed. if !util.ObjectMetaAndSpecEquivalent(desiredNamespace, clusterNamespace) { nc.eventRecorder.Eventf(baseNamespace, api.EventTypeNormal, "UpdateInCluster", "Updating namespace in cluster %s. Desired: %+v\n Actual: %+v\n", cluster.Name, desiredNamespace, clusterNamespace) operations = append(operations, util.FederatedOperation{ Type: util.OperationTypeUpdate, Obj: desiredNamespace, ClusterName: cluster.Name, }) } } } if len(operations) == 0 { // Everything is in order return } glog.V(2).Infof("Updating namespace %s in underlying clusters. Operations: %d", baseNamespace.Name, len(operations)) err = nc.federatedUpdater.UpdateWithOnError(operations, nc.updateTimeout, func(op util.FederatedOperation, operror error) { nc.eventRecorder.Eventf(baseNamespace, api.EventTypeNormal, "UpdateInClusterFailed", "Namespace update in cluster %s failed: %v", op.ClusterName, operror) }) if err != nil { glog.Errorf("Failed to execute updates for %s: %v", namespace, err) nc.deliverNamespace(namespace, 0, true) return } // Everything is in order but lets be double sure nc.deliverNamespace(namespace, nc.namespaceReviewDelay, false) }
func (secretcontroller *SecretController) reconcileSecret(secret types.NamespacedName) { if !secretcontroller.isSynced() { secretcontroller.deliverSecret(secret, secretcontroller.clusterAvailableDelay, false) return } key := secret.String() baseSecretObjFromStore, exist, err := secretcontroller.secretInformerStore.GetByKey(key) if err != nil { glog.Errorf("Failed to query main secret store for %v: %v", key, err) secretcontroller.deliverSecret(secret, 0, true) return } if !exist { // Not federated secret, ignoring. return } // Create a copy before modifying the obj to prevent race condition with // other readers of obj from store. baseSecretObj, err := api.Scheme.DeepCopy(baseSecretObjFromStore) baseSecret, ok := baseSecretObj.(*apiv1.Secret) if err != nil || !ok { glog.Errorf("Error in retrieving obj from store: %v, %v", ok, err) secretcontroller.deliverSecret(secret, 0, true) return } if baseSecret.DeletionTimestamp != nil { if err := secretcontroller.delete(baseSecret); err != nil { glog.Errorf("Failed to delete %s: %v", secret, err) secretcontroller.eventRecorder.Eventf(baseSecret, api.EventTypeNormal, "DeleteFailed", "Secret delete failed: %v", err) secretcontroller.deliverSecret(secret, 0, true) } return } glog.V(3).Infof("Ensuring delete object from underlying clusters finalizer for secret: %s", baseSecret.Name) // Add the required finalizers before creating a secret in underlying clusters. updatedSecretObj, err := secretcontroller.deletionHelper.EnsureFinalizers(baseSecret) if err != nil { glog.Errorf("Failed to ensure delete object from underlying clusters finalizer in secret %s: %v", baseSecret.Name, err) secretcontroller.deliverSecret(secret, 0, false) return } baseSecret = updatedSecretObj.(*apiv1.Secret) glog.V(3).Infof("Syncing secret %s in underlying clusters", baseSecret.Name) clusters, err := secretcontroller.secretFederatedInformer.GetReadyClusters() if err != nil { glog.Errorf("Failed to get cluster list: %v", err) secretcontroller.deliverSecret(secret, secretcontroller.clusterAvailableDelay, false) return } operations := make([]util.FederatedOperation, 0) for _, cluster := range clusters { clusterSecretObj, found, err := secretcontroller.secretFederatedInformer.GetTargetStore().GetByKey(cluster.Name, key) if err != nil { glog.Errorf("Failed to get %s from %s: %v", key, cluster.Name, err) secretcontroller.deliverSecret(secret, 0, true) return } // The data should not be modified. desiredSecret := &apiv1.Secret{ ObjectMeta: util.DeepCopyRelevantObjectMeta(baseSecret.ObjectMeta), Data: baseSecret.Data, Type: baseSecret.Type, } if !found { secretcontroller.eventRecorder.Eventf(baseSecret, api.EventTypeNormal, "CreateInCluster", "Creating secret in cluster %s", cluster.Name) operations = append(operations, util.FederatedOperation{ Type: util.OperationTypeAdd, Obj: desiredSecret, ClusterName: cluster.Name, }) } else { clusterSecret := clusterSecretObj.(*apiv1.Secret) // Update existing secret, if needed. if !util.SecretEquivalent(*desiredSecret, *clusterSecret) { secretcontroller.eventRecorder.Eventf(baseSecret, api.EventTypeNormal, "UpdateInCluster", "Updating secret in cluster %s", cluster.Name) operations = append(operations, util.FederatedOperation{ Type: util.OperationTypeUpdate, Obj: desiredSecret, ClusterName: cluster.Name, }) } } } if len(operations) == 0 { // Everything is in order return } err = secretcontroller.federatedUpdater.UpdateWithOnError(operations, secretcontroller.updateTimeout, func(op util.FederatedOperation, operror error) { secretcontroller.eventRecorder.Eventf(baseSecret, api.EventTypeNormal, "UpdateInClusterFailed", "Secret update in cluster %s failed: %v", op.ClusterName, operror) }) if err != nil { glog.Errorf("Failed to execute updates for %s: %v", key, err) secretcontroller.deliverSecret(secret, 0, true) return } // Evertyhing is in order but lets be double sure secretcontroller.deliverSecret(secret, secretcontroller.secretReviewDelay, false) }
func (daemonsetcontroller *DaemonSetController) reconcileDaemonSet(namespace string, daemonsetName string) { glog.V(4).Infof("Reconciling daemonset %s/%s", namespace, daemonsetName) if !daemonsetcontroller.isSynced() { glog.V(4).Infof("Daemonset controller is not synced") daemonsetcontroller.deliverDaemonSet(namespace, daemonsetName, daemonsetcontroller.clusterAvailableDelay, false) return } key := getDaemonSetKey(namespace, daemonsetName) baseDaemonSetObjFromStore, exist, err := daemonsetcontroller.daemonsetInformerStore.GetByKey(key) if err != nil { glog.Errorf("Failed to query main daemonset store for %v: %v", key, err) daemonsetcontroller.deliverDaemonSet(namespace, daemonsetName, 0, true) return } if !exist { glog.V(4).Infof("Skipping daemonset %s/%s - not federated", namespace, daemonsetName) // Not federated daemonset, ignoring. return } baseDaemonSetObj, err := conversion.NewCloner().DeepCopy(baseDaemonSetObjFromStore) baseDaemonSet, ok := baseDaemonSetObj.(*extensionsv1.DaemonSet) if err != nil || !ok { glog.Errorf("Error in retrieving obj %s from store: %v, %v", daemonsetName, ok, err) daemonsetcontroller.deliverDaemonSet(namespace, daemonsetName, 0, true) return } if baseDaemonSet.DeletionTimestamp != nil { if err := daemonsetcontroller.delete(baseDaemonSet); err != nil { glog.Errorf("Failed to delete %s: %v", daemonsetName, err) daemonsetcontroller.eventRecorder.Eventf(baseDaemonSet, api.EventTypeNormal, "DeleteFailed", "DaemonSet delete failed: %v", err) daemonsetcontroller.deliverDaemonSet(namespace, daemonsetName, 0, true) } return } glog.V(3).Infof("Ensuring delete object from underlying clusters finalizer for daemonset: %s", baseDaemonSet.Name) // Add the required finalizers before creating a daemonset in underlying clusters. updatedDaemonSetObj, err := daemonsetcontroller.deletionHelper.EnsureFinalizers(baseDaemonSet) if err != nil { glog.Errorf("Failed to ensure delete object from underlying clusters finalizer in daemonset %s: %v", baseDaemonSet.Name, err) daemonsetcontroller.deliverDaemonSet(namespace, daemonsetName, 0, false) return } baseDaemonSet = updatedDaemonSetObj.(*extensionsv1.DaemonSet) glog.V(3).Infof("Syncing daemonset %s in underlying clusters", baseDaemonSet.Name) clusters, err := daemonsetcontroller.daemonsetFederatedInformer.GetReadyClusters() if err != nil { glog.Errorf("Failed to get cluster list: %v", err) daemonsetcontroller.deliverDaemonSet(namespace, daemonsetName, daemonsetcontroller.clusterAvailableDelay, false) return } operations := make([]util.FederatedOperation, 0) for _, cluster := range clusters { clusterDaemonSetObj, found, err := daemonsetcontroller.daemonsetFederatedInformer.GetTargetStore().GetByKey(cluster.Name, key) if err != nil { glog.Errorf("Failed to get %s from %s: %v", key, cluster.Name, err) daemonsetcontroller.deliverDaemonSet(namespace, daemonsetName, 0, true) return } // Do not modify. Otherwise make a deep copy. desiredDaemonSet := &extensionsv1.DaemonSet{ ObjectMeta: util.DeepCopyRelevantObjectMeta(baseDaemonSet.ObjectMeta), Spec: util.DeepCopyApiTypeOrPanic(baseDaemonSet.Spec).(extensionsv1.DaemonSetSpec), } if !found { glog.V(4).Infof("Creating daemonset %s/%s in cluster %s", namespace, daemonsetName, cluster.Name) daemonsetcontroller.eventRecorder.Eventf(baseDaemonSet, api.EventTypeNormal, "CreateInCluster", "Creating daemonset in cluster %s", cluster.Name) operations = append(operations, util.FederatedOperation{ Type: util.OperationTypeAdd, Obj: desiredDaemonSet, ClusterName: cluster.Name, }) } else { clusterDaemonSet := clusterDaemonSetObj.(*extensionsv1.DaemonSet) // Update existing daemonset, if needed. if !util.ObjectMetaEquivalent(desiredDaemonSet.ObjectMeta, clusterDaemonSet.ObjectMeta) || !reflect.DeepEqual(desiredDaemonSet.Spec, clusterDaemonSet.Spec) { glog.V(4).Infof("Upadting daemonset %s/%s in cluster %s", namespace, daemonsetName, cluster.Name) daemonsetcontroller.eventRecorder.Eventf(baseDaemonSet, api.EventTypeNormal, "UpdateInCluster", "Updating daemonset in cluster %s", cluster.Name) operations = append(operations, util.FederatedOperation{ Type: util.OperationTypeUpdate, Obj: desiredDaemonSet, ClusterName: cluster.Name, }) } } } if len(operations) == 0 { glog.V(4).Infof("No operation needed for %s/%s", namespace, daemonsetName) // Everything is in order return } err = daemonsetcontroller.federatedUpdater.UpdateWithOnError(operations, daemonsetcontroller.updateTimeout, func(op util.FederatedOperation, operror error) { daemonsetcontroller.eventRecorder.Eventf(baseDaemonSet, api.EventTypeNormal, "UpdateInClusterFailed", "DaemonSet update in cluster %s failed: %v", op.ClusterName, operror) }) if err != nil { glog.Errorf("Failed to execute updates for %s: %v, retrying shortly", key, err) daemonsetcontroller.deliverDaemonSet(namespace, daemonsetName, 0, true) return } }
func (frsc *ReplicaSetController) reconcileReplicaSet(key string) (reconciliationStatus, error) { if !frsc.isSynced() { return statusNotSynced, nil } glog.V(4).Infof("Start reconcile replicaset %q", key) startTime := time.Now() defer glog.V(4).Infof("Finished reconcile replicaset %q (%v)", key, time.Now().Sub(startTime)) objFromStore, exists, err := frsc.replicaSetStore.Indexer.GetByKey(key) if err != nil { return statusError, err } if !exists { // don't delete local replicasets for now. Do not reconcile it anymore. return statusAllOk, nil } obj, err := conversion.NewCloner().DeepCopy(objFromStore) frs, ok := obj.(*extensionsv1.ReplicaSet) if err != nil || !ok { glog.Errorf("Error in retrieving obj from store: %v, %v", ok, err) frsc.deliverReplicaSetByKey(key, 0, true) return statusError, err } if frs.DeletionTimestamp != nil { if err := frsc.delete(frs); err != nil { glog.Errorf("Failed to delete %s: %v", frs, err) frsc.eventRecorder.Eventf(frs, api.EventTypeNormal, "DeleteFailed", "ReplicaSet delete failed: %v", err) frsc.deliverReplicaSetByKey(key, 0, true) return statusError, err } return statusAllOk, nil } glog.V(3).Infof("Ensuring delete object from underlying clusters finalizer for replicaset: %s", frs.Name) // Add the required finalizers before creating a replicaset in underlying clusters. updatedRsObj, err := frsc.deletionHelper.EnsureFinalizers(frs) if err != nil { glog.Errorf("Failed to ensure delete object from underlying clusters finalizer in replicaset %s: %v", frs.Name, err) frsc.deliverReplicaSetByKey(key, 0, false) return statusError, err } frs = updatedRsObj.(*extensionsv1.ReplicaSet) glog.V(3).Infof("Syncing replicaset %s in underlying clusters", frs.Name) clusters, err := frsc.fedReplicaSetInformer.GetReadyClusters() if err != nil { return statusError, err } // collect current status and do schedule allPods, err := frsc.fedPodInformer.GetTargetStore().List() if err != nil { return statusError, err } podStatus, err := podanalyzer.AnalysePods(frs.Spec.Selector, allPods, time.Now()) current := make(map[string]int64) estimatedCapacity := make(map[string]int64) for _, cluster := range clusters { lrsObj, exists, err := frsc.fedReplicaSetInformer.GetTargetStore().GetByKey(cluster.Name, key) if err != nil { return statusError, err } if exists { lrs := lrsObj.(*extensionsv1.ReplicaSet) current[cluster.Name] = int64(podStatus[cluster.Name].RunningAndReady) // include pending as well? unschedulable := int64(podStatus[cluster.Name].Unschedulable) if unschedulable > 0 { estimatedCapacity[cluster.Name] = int64(*lrs.Spec.Replicas) - unschedulable } } } scheduleResult := frsc.schedule(frs, clusters, current, estimatedCapacity) glog.V(4).Infof("Start syncing local replicaset %s: %v", key, scheduleResult) fedStatus := extensionsv1.ReplicaSetStatus{ObservedGeneration: frs.Generation} operations := make([]fedutil.FederatedOperation, 0) for clusterName, replicas := range scheduleResult { lrsObj, exists, err := frsc.fedReplicaSetInformer.GetTargetStore().GetByKey(clusterName, key) if err != nil { return statusError, err } // The object can be modified. lrs := &extensionsv1.ReplicaSet{ ObjectMeta: fedutil.DeepCopyRelevantObjectMeta(frs.ObjectMeta), Spec: fedutil.DeepCopyApiTypeOrPanic(frs.Spec).(extensionsv1.ReplicaSetSpec), } specReplicas := int32(replicas) lrs.Spec.Replicas = &specReplicas if !exists { if replicas > 0 { frsc.eventRecorder.Eventf(frs, api.EventTypeNormal, "CreateInCluster", "Creating replicaset in cluster %s", clusterName) operations = append(operations, fedutil.FederatedOperation{ Type: fedutil.OperationTypeAdd, Obj: lrs, ClusterName: clusterName, }) } } else { currentLrs := lrsObj.(*extensionsv1.ReplicaSet) // Update existing replica set, if needed. if !fedutil.ObjectMetaAndSpecEquivalent(lrs, currentLrs) { frsc.eventRecorder.Eventf(frs, api.EventTypeNormal, "UpdateInCluster", "Updating replicaset in cluster %s", clusterName) operations = append(operations, fedutil.FederatedOperation{ Type: fedutil.OperationTypeUpdate, Obj: lrs, ClusterName: clusterName, }) } fedStatus.Replicas += currentLrs.Status.Replicas fedStatus.FullyLabeledReplicas += currentLrs.Status.FullyLabeledReplicas fedStatus.ReadyReplicas += currentLrs.Status.ReadyReplicas fedStatus.AvailableReplicas += currentLrs.Status.AvailableReplicas } } if fedStatus.Replicas != frs.Status.Replicas || fedStatus.FullyLabeledReplicas != frs.Status.FullyLabeledReplicas || fedStatus.ReadyReplicas != frs.Status.ReadyReplicas || fedStatus.AvailableReplicas != frs.Status.AvailableReplicas { frs.Status = fedStatus _, err = frsc.fedClient.Extensions().ReplicaSets(frs.Namespace).UpdateStatus(frs) if err != nil { return statusError, err } } if len(operations) == 0 { // Everything is in order return statusAllOk, nil } err = frsc.fedUpdater.UpdateWithOnError(operations, updateTimeout, func(op fedutil.FederatedOperation, operror error) { frsc.eventRecorder.Eventf(frs, api.EventTypeNormal, "FailedUpdateInCluster", "Replicaset update in cluster %s failed: %v", op.ClusterName, operror) }) if err != nil { glog.Errorf("Failed to execute updates for %s: %v", key, err) return statusError, err } // Some operations were made, reconcile after a while. return statusNeedRecheck, nil }