// manageReplicas checks and updates replicas for the given replication controller. func (rm *ReplicationManager) manageReplicas(filteredPods []*api.Pod, rc *api.ReplicationController) { diff := len(filteredPods) - rc.Spec.Replicas rcKey, err := controller.KeyFunc(rc) if err != nil { glog.Errorf("Couldn't get key for replication controller %#v: %v", rc, err) return } if diff < 0 { diff *= -1 if diff > rm.burstReplicas { diff = rm.burstReplicas } rm.expectations.ExpectCreations(rcKey, diff) wait := sync.WaitGroup{} wait.Add(diff) glog.V(2).Infof("Too few %q/%q replicas, need %d, creating %d", rc.Namespace, rc.Name, rc.Spec.Replicas, diff) for i := 0; i < diff; i++ { go func() { defer wait.Done() if err := rm.podControl.CreatePods(rc.Namespace, rc.Spec.Template, rc); err != nil { // Decrement the expected number of creates because the informer won't observe this pod glog.V(2).Infof("Failed creation, decrementing expectations for controller %q/%q", rc.Namespace, rc.Name) rm.expectations.CreationObserved(rcKey) util.HandleError(err) } }() } wait.Wait() } else if diff > 0 { if diff > rm.burstReplicas { diff = rm.burstReplicas } rm.expectations.ExpectDeletions(rcKey, diff) glog.V(2).Infof("Too many %q/%q replicas, need %d, deleting %d", rc.Namespace, rc.Name, rc.Spec.Replicas, diff) // No need to sort pods if we are about to delete all of them if rc.Spec.Replicas != 0 { // Sort the pods in the order such that not-ready < ready, unscheduled // < scheduled, and pending < running. This ensures that we delete pods // in the earlier stages whenever possible. sort.Sort(controller.ActivePods(filteredPods)) } wait := sync.WaitGroup{} wait.Add(diff) for i := 0; i < diff; i++ { go func(ix int) { defer wait.Done() if err := rm.podControl.DeletePod(rc.Namespace, filteredPods[ix].Name); err != nil { // Decrement the expected number of deletes because the informer won't observe this deletion glog.V(2).Infof("Failed deletion, decrementing expectations for controller %q/%q", rc.Namespace, rc.Name) rm.expectations.DeletionObserved(rcKey) } }(i) } wait.Wait() } }
func (jm *JobController) manageJob(activePods []*api.Pod, succeeded int, job *extensions.Job) int { var activeLock sync.Mutex active := len(activePods) parallelism := *job.Spec.Parallelism jobKey, err := controller.KeyFunc(job) if err != nil { glog.Errorf("Couldn't get key for job %#v: %v", job, err) return 0 } if active > parallelism { diff := active - parallelism jm.expectations.ExpectDeletions(jobKey, diff) glog.V(4).Infof("Too many pods running job %q, need %d, deleting %d", jobKey, parallelism, diff) // Sort the pods in the order such that not-ready < ready, unscheduled // < scheduled, and pending < running. This ensures that we delete pods // in the earlier stages whenever possible. sort.Sort(controller.ActivePods(activePods)) active -= diff wait := sync.WaitGroup{} wait.Add(diff) for i := 0; i < diff; i++ { go func(ix int) { defer wait.Done() if err := jm.podControl.DeletePod(job.Namespace, activePods[ix].Name); err != nil { defer util.HandleError(err) // Decrement the expected number of deletes because the informer won't observe this deletion jm.expectations.DeletionObserved(jobKey) activeLock.Lock() active++ activeLock.Unlock() } }(i) } wait.Wait() } else if active < parallelism { // how many executions are left to run diff := *job.Spec.Completions - succeeded // limit to parallelism and count active pods as well if diff > parallelism { diff = parallelism } diff -= active jm.expectations.ExpectCreations(jobKey, diff) glog.V(4).Infof("Too few pods running job %q, need %d, creating %d", jobKey, parallelism, diff) active += diff wait := sync.WaitGroup{} wait.Add(diff) for i := 0; i < diff; i++ { go func() { defer wait.Done() if err := jm.podControl.CreatePods(job.Namespace, &job.Spec.Template, job); err != nil { defer util.HandleError(err) // Decrement the expected number of creates because the informer won't observe this pod jm.expectations.CreationObserved(jobKey) activeLock.Lock() active-- activeLock.Unlock() } }() } wait.Wait() } return active }