// queuedPodStore: pods queued before processing. // cache: scheduler cache that might contain assumed pods. func setupTestSchedulerWithOnePodOnNode(t *testing.T, queuedPodStore *clientcache.FIFO, scache schedulercache.Cache, nodeLister algorithm.FakeNodeLister, predicateMap map[string]algorithm.FitPredicate, pod *api.Pod, node *api.Node) (*Scheduler, chan *api.Binding, chan error) { scheduler, bindingChan, errChan := setupTestScheduler(queuedPodStore, scache, nodeLister, predicateMap) queuedPodStore.Add(pod) // queuedPodStore: [foo:8080] // cache: [] scheduler.scheduleOne() // queuedPodStore: [] // cache: [(assumed)foo:8080] select { case b := <-bindingChan: expectBinding := &api.Binding{ ObjectMeta: api.ObjectMeta{Name: pod.Name}, Target: api.ObjectReference{Kind: "Node", Name: node.Name}, } if !reflect.DeepEqual(expectBinding, b) { t.Errorf("binding want=%v, get=%v", expectBinding, b) } case <-time.After(wait.ForeverTestTimeout): t.Fatalf("timeout after %v", wait.ForeverTestTimeout) } return scheduler, bindingChan, errChan }
func (factory *ConfigFactory) makeDefaultErrorFunc(backoff *podBackoff, podQueue *cache.FIFO) func(pod *api.Pod, err error) { return func(pod *api.Pod, err error) { if err == scheduler.ErrNoNodesAvailable { glog.V(4).Infof("Unable to schedule %v %v: no nodes are registered to the cluster; waiting", pod.Namespace, pod.Name) } else { glog.Errorf("Error scheduling %v %v: %v; retrying", pod.Namespace, pod.Name, err) } backoff.gc() // Retry asynchronously. // Note that this is extremely rudimentary and we need a more real error handling path. go func() { defer util.HandleCrash() podID := pod.Name podNamespace := pod.Namespace backoff.wait(podID) // Get the pod again; it may have changed/been scheduled already. pod = &api.Pod{} err := factory.Client.Get().Namespace(podNamespace).Resource("pods").Name(podID).Do().Into(pod) if err != nil { if !errors.IsNotFound(err) { glog.Errorf("Error getting pod %v for retry: %v; abandoning", podID, err) } return } if pod.Spec.NodeName == "" { podQueue.Add(pod) } }() } }
func setupTestSchedulerLongBindingWithRetry(queuedPodStore *clientcache.FIFO, scache schedulercache.Cache, nodeLister algorithm.FakeNodeLister, predicateMap map[string]algorithm.FitPredicate, stop chan struct{}, bindingTime time.Duration) (*Scheduler, chan *v1.Binding) { algo := NewGenericScheduler( scache, predicateMap, algorithm.EmptyMetadataProducer, []algorithm.PriorityConfig{}, algorithm.EmptyMetadataProducer, []algorithm.SchedulerExtender{}) bindingChan := make(chan *v1.Binding, 2) cfg := &Config{ SchedulerCache: scache, NodeLister: nodeLister, Algorithm: algo, Binder: fakeBinder{func(b *v1.Binding) error { time.Sleep(bindingTime) bindingChan <- b return nil }}, NextPod: func() *v1.Pod { return clientcache.Pop(queuedPodStore).(*v1.Pod) }, Error: func(p *v1.Pod, err error) { queuedPodStore.AddIfNotPresent(p) }, Recorder: &record.FakeRecorder{}, PodConditionUpdater: fakePodConditionUpdater{}, StopEverything: stop, } return New(cfg), bindingChan }
// queuedPodStore: pods queued before processing. // cache: scheduler cache that might contain assumed pods. func setupTestSchedulerWithOnePod(t *testing.T, queuedPodStore *clientcache.FIFO, scache schedulercache.Cache, pod *api.Pod) (*Scheduler, chan *api.Binding, chan error) { // Create the scheduler config algo := NewGenericScheduler( scache, map[string]algorithm.FitPredicate{"PodFitsHostPorts": predicates.PodFitsHostPorts}, []algorithm.PriorityConfig{}, []algorithm.SchedulerExtender{}) bindingChan := make(chan *api.Binding, 1) errChan := make(chan error, 1) cfg := &Config{ SchedulerCache: scache, NodeLister: algorithm.FakeNodeLister( []*api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}, ), Algorithm: algo, Binder: fakeBinder{func(b *api.Binding) error { bindingChan <- b return nil }}, NextPod: func() *api.Pod { return clientcache.Pop(queuedPodStore).(*api.Pod) }, Error: func(p *api.Pod, err error) { errChan <- err }, Recorder: &record.FakeRecorder{}, PodConditionUpdater: fakePodConditionUpdater{}, } scheduler := New(cfg) queuedPodStore.Add(pod) // queuedPodStore: [foo:8080] // cache: [] scheduler.scheduleOne() // queuedPodStore: [] // cache: [(assumed)foo:8080] select { case b := <-bindingChan: expectBinding := &api.Binding{ ObjectMeta: api.ObjectMeta{Name: "pod.Name"}, Target: api.ObjectReference{Kind: "Node", Name: "machine1"}, } if !reflect.DeepEqual(expectBinding, b) { t.Errorf("binding want=%v, get=%v", expectBinding, b) } case <-time.After(wait.ForeverTestTimeout): t.Fatalf("timeout after %v", wait.ForeverTestTimeout) } return scheduler, bindingChan, errChan }
func (factory *ConfigFactory) makeDefaultErrorFunc(backoff *podBackoff, podQueue *cache.FIFO) func(pod *v1.Pod, err error) { return func(pod *v1.Pod, err error) { if err == scheduler.ErrNoNodesAvailable { glog.V(4).Infof("Unable to schedule %v %v: no nodes are registered to the cluster; waiting", pod.Namespace, pod.Name) } else { glog.Errorf("Error scheduling %v %v: %v; retrying", pod.Namespace, pod.Name, err) } backoff.gc() // Retry asynchronously. // Note that this is extremely rudimentary and we need a more real error handling path. go func() { defer runtime.HandleCrash() podID := types.NamespacedName{ Namespace: pod.Namespace, Name: pod.Name, } entry := backoff.getEntry(podID) if !entry.TryWait(backoff.maxDuration) { glog.Warningf("Request for pod %v already in flight, abandoning", podID) return } // Get the pod again; it may have changed/been scheduled already. getBackoff := initialGetBackoff for { pod, err := factory.Client.Core().Pods(podID.Namespace).Get(podID.Name, metav1.GetOptions{}) if err == nil { if len(pod.Spec.NodeName) == 0 { podQueue.AddIfNotPresent(pod) } break } if errors.IsNotFound(err) { glog.Warningf("A pod %v no longer exists", podID) return } glog.Errorf("Error getting pod %v for retry: %v; retrying...", podID, err) if getBackoff = getBackoff * 2; getBackoff > maximalGetBackoff { getBackoff = maximalGetBackoff } time.Sleep(getBackoff) } }() } }