func (frsc *ReplicaSetController) reconcileReplicaSet(key string) (reconciliationStatus, error) { if !frsc.isSynced() { return statusNotSynced, nil } glog.V(4).Infof("Start reconcile replicaset %q", key) startTime := time.Now() defer glog.V(4).Infof("Finished reconcile replicaset %q (%v)", key, time.Now().Sub(startTime)) objFromStore, exists, err := frsc.replicaSetStore.Indexer.GetByKey(key) if err != nil { return statusError, err } if !exists { // don't delete local replicasets for now. Do not reconcile it anymore. return statusAllOk, nil } obj, err := conversion.NewCloner().DeepCopy(objFromStore) frs, ok := obj.(*extensionsv1.ReplicaSet) if err != nil || !ok { glog.Errorf("Error in retrieving obj from store: %v, %v", ok, err) frsc.deliverReplicaSetByKey(key, 0, true) return statusError, err } if frs.DeletionTimestamp != nil { if err := frsc.delete(frs); err != nil { glog.Errorf("Failed to delete %s: %v", frs, err) frsc.eventRecorder.Eventf(frs, api.EventTypeNormal, "DeleteFailed", "ReplicaSet delete failed: %v", err) frsc.deliverReplicaSetByKey(key, 0, true) return statusError, err } return statusAllOk, nil } glog.V(3).Infof("Ensuring delete object from underlying clusters finalizer for replicaset: %s", frs.Name) // Add the required finalizers before creating a replicaset in underlying clusters. updatedRsObj, err := frsc.deletionHelper.EnsureFinalizers(frs) if err != nil { glog.Errorf("Failed to ensure delete object from underlying clusters finalizer in replicaset %s: %v", frs.Name, err) frsc.deliverReplicaSetByKey(key, 0, false) return statusError, err } frs = updatedRsObj.(*extensionsv1.ReplicaSet) glog.V(3).Infof("Syncing replicaset %s in underlying clusters", frs.Name) clusters, err := frsc.fedReplicaSetInformer.GetReadyClusters() if err != nil { return statusError, err } // collect current status and do schedule allPods, err := frsc.fedPodInformer.GetTargetStore().List() if err != nil { return statusError, err } podStatus, err := podanalyzer.AnalysePods(frs.Spec.Selector, allPods, time.Now()) current := make(map[string]int64) estimatedCapacity := make(map[string]int64) for _, cluster := range clusters { lrsObj, exists, err := frsc.fedReplicaSetInformer.GetTargetStore().GetByKey(cluster.Name, key) if err != nil { return statusError, err } if exists { lrs := lrsObj.(*extensionsv1.ReplicaSet) current[cluster.Name] = int64(podStatus[cluster.Name].RunningAndReady) // include pending as well? unschedulable := int64(podStatus[cluster.Name].Unschedulable) if unschedulable > 0 { estimatedCapacity[cluster.Name] = int64(*lrs.Spec.Replicas) - unschedulable } } } scheduleResult := frsc.schedule(frs, clusters, current, estimatedCapacity) glog.V(4).Infof("Start syncing local replicaset %s: %v", key, scheduleResult) fedStatus := extensionsv1.ReplicaSetStatus{ObservedGeneration: frs.Generation} operations := make([]fedutil.FederatedOperation, 0) for clusterName, replicas := range scheduleResult { lrsObj, exists, err := frsc.fedReplicaSetInformer.GetTargetStore().GetByKey(clusterName, key) if err != nil { return statusError, err } lrs := &extensionsv1.ReplicaSet{ ObjectMeta: fedutil.CopyObjectMeta(frs.ObjectMeta), Spec: frs.Spec, } specReplicas := int32(replicas) lrs.Spec.Replicas = &specReplicas if !exists { if replicas > 0 { frsc.eventRecorder.Eventf(frs, api.EventTypeNormal, "CreateInCluster", "Creating replicaset in cluster %s", clusterName) operations = append(operations, fedutil.FederatedOperation{ Type: fedutil.OperationTypeAdd, Obj: lrs, ClusterName: clusterName, }) } } else { currentLrs := lrsObj.(*extensionsv1.ReplicaSet) // Update existing replica set, if needed. if !fedutil.ObjectMetaAndSpecEquivalent(lrs, currentLrs) { frsc.eventRecorder.Eventf(frs, api.EventTypeNormal, "UpdateInCluster", "Updating replicaset in cluster %s", clusterName) operations = append(operations, fedutil.FederatedOperation{ Type: fedutil.OperationTypeUpdate, Obj: lrs, ClusterName: clusterName, }) } fedStatus.Replicas += currentLrs.Status.Replicas fedStatus.FullyLabeledReplicas += currentLrs.Status.FullyLabeledReplicas // leave the replicaset even the replicas dropped to 0 } } if fedStatus.Replicas != frs.Status.Replicas || fedStatus.FullyLabeledReplicas != frs.Status.FullyLabeledReplicas { frs.Status = fedStatus _, err = frsc.fedClient.Extensions().ReplicaSets(frs.Namespace).UpdateStatus(frs) if err != nil { return statusError, err } } if len(operations) == 0 { // Everything is in order return statusAllOk, nil } err = frsc.fedUpdater.UpdateWithOnError(operations, updateTimeout, func(op fedutil.FederatedOperation, operror error) { frsc.eventRecorder.Eventf(frs, api.EventTypeNormal, "FailedUpdateInCluster", "Replicaset update in cluster %s failed: %v", op.ClusterName, operror) }) if err != nil { glog.Errorf("Failed to execute updates for %s: %v", key, err) return statusError, err } // Some operations were made, reconcile after a while. return statusNeedRecheck, nil }
func (frsc *ReplicaSetController) reconcileReplicaSet(key string) error { if !frsc.isSynced() { frsc.deliverReplicaSetByKey(key, clusterAvailableDelay, false) return nil } glog.Infof("Start reconcile replicaset %q", key) startTime := time.Now() defer glog.Infof("Finished reconcile replicaset %q (%v)", key, time.Now().Sub(startTime)) obj, exists, err := frsc.replicaSetStore.Store.GetByKey(key) if err != nil { return err } if !exists { // don't delete local replicasets for now return nil } frs := obj.(*extensionsv1.ReplicaSet) clusters, err := frsc.fedReplicaSetInformer.GetReadyClusters() if err != nil { return err } // collect current status and do schedule allPods, err := frsc.fedPodInformer.GetTargetStore().List() if err != nil { return err } podStatus, err := AnalysePods(frs, allPods, time.Now()) current := make(map[string]int64) estimatedCapacity := make(map[string]int64) for _, cluster := range clusters { lrsObj, exists, err := frsc.fedReplicaSetInformer.GetTargetStore().GetByKey(cluster.Name, key) if err != nil { return err } if exists { lrs := lrsObj.(*extensionsv1.ReplicaSet) current[cluster.Name] = int64(podStatus[cluster.Name].RunningAndReady) // include pending as well? unschedulable := int64(podStatus[cluster.Name].Unschedulable) if unschedulable > 0 { estimatedCapacity[cluster.Name] = int64(*lrs.Spec.Replicas) - unschedulable } } } scheduleResult := frsc.schedule(frs, clusters, current, estimatedCapacity) glog.Infof("Start syncing local replicaset %v", scheduleResult) fedStatus := extensionsv1.ReplicaSetStatus{ObservedGeneration: frs.Generation} for clusterName, replicas := range scheduleResult { // TODO: updater or parallelizer doesnn't help as results are needed for updating fed rs status clusterClient, err := frsc.fedReplicaSetInformer.GetClientsetForCluster(clusterName) if err != nil { return err } lrsObj, exists, err := frsc.fedReplicaSetInformer.GetTargetStore().GetByKey(clusterName, key) if err != nil { return err } else if !exists { if replicas > 0 { lrs := &extensionsv1.ReplicaSet{ ObjectMeta: apiv1.ObjectMeta{ Name: frs.Name, Namespace: frs.Namespace, Labels: frs.Labels, Annotations: frs.Annotations, }, Spec: frs.Spec, } specReplicas := int32(replicas) lrs.Spec.Replicas = &specReplicas lrs, err = clusterClient.Extensions().ReplicaSets(frs.Namespace).Create(lrs) if err != nil { return err } fedStatus.Replicas += lrs.Status.Replicas fedStatus.FullyLabeledReplicas += lrs.Status.FullyLabeledReplicas } } else { lrs := lrsObj.(*extensionsv1.ReplicaSet) lrsExpectedSpec := frs.Spec specReplicas := int32(replicas) lrsExpectedSpec.Replicas = &specReplicas if !reflect.DeepEqual(lrs.Spec, lrsExpectedSpec) { lrs.Spec = lrsExpectedSpec lrs, err = clusterClient.Extensions().ReplicaSets(frs.Namespace).Update(lrs) if err != nil { return err } } fedStatus.Replicas += lrs.Status.Replicas fedStatus.FullyLabeledReplicas += lrs.Status.FullyLabeledReplicas // leave the replicaset even the replicas dropped to 0 } } if fedStatus.Replicas != frs.Status.Replicas || fedStatus.FullyLabeledReplicas != frs.Status.FullyLabeledReplicas { frs.Status = fedStatus _, err = frsc.fedClient.Extensions().ReplicaSets(frs.Namespace).UpdateStatus(frs) if err != nil { return err } } return nil }