Ejemplo n.º 1
0
func setupTestSchedulerLongBindingWithRetry(queuedPodStore *clientcache.FIFO, scache schedulercache.Cache, nodeLister algorithm.FakeNodeLister, predicateMap map[string]algorithm.FitPredicate, stop chan struct{}, bindingTime time.Duration) (*Scheduler, chan *v1.Binding) {
	algo := NewGenericScheduler(
		scache,
		predicateMap,
		algorithm.EmptyMetadataProducer,
		[]algorithm.PriorityConfig{},
		algorithm.EmptyMetadataProducer,
		[]algorithm.SchedulerExtender{})
	bindingChan := make(chan *v1.Binding, 2)
	cfg := &Config{
		SchedulerCache: scache,
		NodeLister:     nodeLister,
		Algorithm:      algo,
		Binder: fakeBinder{func(b *v1.Binding) error {
			time.Sleep(bindingTime)
			bindingChan <- b
			return nil
		}},
		NextPod: func() *v1.Pod {
			return clientcache.Pop(queuedPodStore).(*v1.Pod)
		},
		Error: func(p *v1.Pod, err error) {
			queuedPodStore.AddIfNotPresent(p)
		},
		Recorder:            &record.FakeRecorder{},
		PodConditionUpdater: fakePodConditionUpdater{},
		StopEverything:      stop,
	}
	return New(cfg), bindingChan
}
// queuedPodStore: pods queued before processing.
// scache: scheduler cache that might contain assumed pods.
func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache schedulercache.Cache, nodeLister algorithm.FakeNodeLister, predicateMap map[string]algorithm.FitPredicate) (*Scheduler, chan *api.Binding, chan error) {
	algo := NewGenericScheduler(
		scache,
		predicateMap,
		algorithm.EmptyMetadataProducer,
		[]algorithm.PriorityConfig{},
		algorithm.EmptyMetadataProducer,
		[]algorithm.SchedulerExtender{})
	bindingChan := make(chan *api.Binding, 1)
	errChan := make(chan error, 1)
	cfg := &Config{
		SchedulerCache: scache,
		NodeLister:     nodeLister,
		Algorithm:      algo,
		Binder: fakeBinder{func(b *api.Binding) error {
			bindingChan <- b
			return nil
		}},
		NextPod: func() *api.Pod {
			return clientcache.Pop(queuedPodStore).(*api.Pod)
		},
		Error: func(p *api.Pod, err error) {
			errChan <- err
		},
		Recorder:            &record.FakeRecorder{},
		PodConditionUpdater: fakePodConditionUpdater{},
	}
	return New(cfg), bindingChan, errChan
}
Ejemplo n.º 3
0
func (f *ConfigFactory) getNextPod() *v1.Pod {
	for {
		pod := cache.Pop(f.PodQueue).(*v1.Pod)
		if f.responsibleForPod(pod) {
			glog.V(4).Infof("About to try and schedule pod %v", pod.Name)
			return pod
		}
	}
}
Ejemplo n.º 4
0
// queuedPodStore: pods queued before processing.
// cache: scheduler cache that might contain assumed pods.
func setupTestSchedulerWithOnePod(t *testing.T, queuedPodStore *clientcache.FIFO, scache schedulercache.Cache, pod *api.Pod) (*Scheduler, chan *api.Binding, chan error) {
	// Create the scheduler config
	algo := NewGenericScheduler(
		scache,
		map[string]algorithm.FitPredicate{"PodFitsHostPorts": predicates.PodFitsHostPorts},
		[]algorithm.PriorityConfig{},
		[]algorithm.SchedulerExtender{})
	bindingChan := make(chan *api.Binding, 1)
	errChan := make(chan error, 1)
	cfg := &Config{
		SchedulerCache: scache,
		NodeLister: algorithm.FakeNodeLister(
			[]*api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}},
		),
		Algorithm: algo,
		Binder: fakeBinder{func(b *api.Binding) error {
			bindingChan <- b
			return nil
		}},
		NextPod: func() *api.Pod {
			return clientcache.Pop(queuedPodStore).(*api.Pod)
		},
		Error: func(p *api.Pod, err error) {
			errChan <- err
		},
		Recorder:            &record.FakeRecorder{},
		PodConditionUpdater: fakePodConditionUpdater{},
	}
	scheduler := New(cfg)

	queuedPodStore.Add(pod)
	// queuedPodStore: [foo:8080]
	// cache: []

	scheduler.scheduleOne()
	// queuedPodStore: []
	// cache: [(assumed)foo:8080]

	select {
	case b := <-bindingChan:
		expectBinding := &api.Binding{
			ObjectMeta: api.ObjectMeta{Name: "pod.Name"},
			Target:     api.ObjectReference{Kind: "Node", Name: "machine1"},
		}
		if !reflect.DeepEqual(expectBinding, b) {
			t.Errorf("binding want=%v, get=%v", expectBinding, b)
		}
	case <-time.After(wait.ForeverTestTimeout):
		t.Fatalf("timeout after %v", wait.ForeverTestTimeout)
	}
	return scheduler, bindingChan, errChan
}
Ejemplo n.º 5
0
// This test ensures that when an asynchronous state update is received
// on the queue during failed event handling, that the updated state is
// retried, NOT the event that failed (which is now stale).
func TestRetryController_realFifoEventOrdering(t *testing.T) {
	keyFunc := func(obj interface{}) (string, error) {
		return obj.(testObj).id, nil
	}

	fifo := kcache.NewFIFO(keyFunc)

	wg := sync.WaitGroup{}
	wg.Add(1)

	controller := &RetryController{
		Queue:        fifo,
		RetryManager: NewQueueRetryManager(fifo, keyFunc, func(_ interface{}, _ error, _ Retry) bool { return true }, flowcontrol.NewTokenBucketRateLimiter(1000, 10)),
		Handle: func(obj interface{}) error {
			if e, a := 1, obj.(testObj).value; e != a {
				t.Fatalf("expected to handle test value %d, got %d", e, a)
			}

			go func() {
				fifo.Add(testObj{"a", 2})
				wg.Done()
			}()
			wg.Wait()
			return fmt.Errorf("retryable error")
		},
	}

	fifo.Add(testObj{"a", 1})
	controller.handleOne(kcache.Pop(fifo))

	if e, a := 1, len(fifo.List()); e != a {
		t.Fatalf("expected queue length %d, got %d", e, a)
	}

	obj := kcache.Pop(fifo)
	if e, a := 2, obj.(testObj).value; e != a {
		t.Fatalf("expected queued value %d, got %d", e, a)
	}
}
Ejemplo n.º 6
0
// WatchPod returns a ListWatch for watching a pod.  The stopChannel is used
// to close the reflector backing the watch.  The caller is responsible for
// derring a close on the channel to stop the reflector.
func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan struct{}) func() *api.Pod {
	fieldSelector, _ := fields.ParseSelector("metadata.name=" + name)

	podLW := &cache.ListWatch{
		ListFunc: func(options api.ListOptions) (runtime.Object, error) {
			options.FieldSelector = fieldSelector
			return c.client.Core().Pods(namespace).List(options)
		},
		WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
			options.FieldSelector = fieldSelector
			return c.client.Core().Pods(namespace).Watch(options)
		},
	}
	queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(podLW, &api.Pod{}, queue, 1*time.Minute).RunUntil(stopChannel)

	return func() *api.Pod {
		return cache.Pop(queue).(*api.Pod)
	}
}
Ejemplo n.º 7
0
// NewPodWatch creates a pod watching function which is backed by a
// FIFO/reflector pair. This avoids managing watches directly.
// A stop channel to close the watch's reflector is also returned.
// It is the caller's responsibility to defer closing the stop channel to prevent leaking resources.
func NewPodWatch(client kcoreclient.PodInterface, namespace, name, resourceVersion string, stopChannel chan struct{}) func() *kapi.Pod {
	fieldSelector := fields.OneTermEqualSelector("metadata.name", name)
	podLW := &cache.ListWatch{
		ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
			options.FieldSelector = fieldSelector
			return client.List(options)
		},
		WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) {
			options.FieldSelector = fieldSelector
			return client.Watch(options)
		},
	}

	queue := cache.NewResyncableFIFO(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(podLW, &kapi.Pod{}, queue, 1*time.Minute).RunUntil(stopChannel)

	return func() *kapi.Pod {
		obj := cache.Pop(queue)
		return obj.(*kapi.Pod)
	}
}
Ejemplo n.º 8
0
func (q queueWrapper) Pop() interface{} {
	return kcache.Pop(q.queue)
}
Ejemplo n.º 9
0
func TestSchedulerForgetAssumedPodAfterDelete(t *testing.T) {
	// Set up a channel through which we'll funnel log messages from the watcher.
	// This way, we can guarantee that when the test ends no thread will still be
	// trying to write to t.Logf (which it would if we handed t.Logf directly to
	// StartLogging).
	ch := make(chan string)
	done := make(chan struct{})
	var wg sync.WaitGroup
	wg.Add(1)
	go func() {
		defer wg.Done()
		for {
			select {
			case msg := <-ch:
				t.Log(msg)
			case <-done:
				return
			}
		}
	}()
	eventBroadcaster := record.NewBroadcaster()
	watcher := eventBroadcaster.StartLogging(func(format string, args ...interface{}) {
		ch <- fmt.Sprintf(format, args...)
	})
	defer func() {
		watcher.Stop()
		close(done)
		wg.Wait()
	}()

	// Setup stores to test pod's workflow:
	// - queuedPodStore: pods queued before processing
	// - scheduledPodStore: pods that has a scheduling decision
	scheduledPodStore := clientcache.NewStore(clientcache.MetaNamespaceKeyFunc)
	queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)

	// Port is the easiest way to cause a fit predicate failure
	podPort := 8080
	firstPod := podWithPort("foo", "", podPort)

	stop := make(chan struct{})
	defer close(stop)
	cache := schedulercache.New(1*time.Second, stop)
	// Create the scheduler config
	algo := NewGenericScheduler(
		cache,
		map[string]algorithm.FitPredicate{"PodFitsHostPorts": predicates.PodFitsHostPorts},
		[]algorithm.PriorityConfig{},
		[]algorithm.SchedulerExtender{})

	var gotBinding *api.Binding
	c := &Config{
		SchedulerCache: cache,
		NodeLister: algorithm.FakeNodeLister(
			api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}},
		),
		Algorithm: algo,
		Binder: fakeBinder{func(b *api.Binding) error {
			scheduledPodStore.Add(podWithPort(b.Name, b.Target.Name, podPort))
			gotBinding = b
			return nil
		}},
		NextPod: func() *api.Pod {
			return clientcache.Pop(queuedPodStore).(*api.Pod)
		},
		Error: func(p *api.Pod, err error) {
			t.Errorf("Unexpected error when scheduling pod %+v: %v", p, err)
		},
		Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}),
	}

	// First scheduling pass should schedule the pod
	s := New(c)
	called := make(chan struct{})
	events := eventBroadcaster.StartEventWatcher(func(e *api.Event) {
		if e, a := "Scheduled", e.Reason; e != a {
			t.Errorf("expected %v, got %v", e, a)
		}
		close(called)
	})

	queuedPodStore.Add(firstPod)
	// queuedPodStore: [foo:8080]
	// scheduledPodStore: []
	// assumedPods: []

	s.scheduleOne()
	<-called
	// queuedPodStore: []
	// scheduledPodStore: [foo:8080]
	// assumedPods: [foo:8080]

	pod, exists, _ := scheduledPodStore.GetByKey("foo")
	if !exists {
		t.Errorf("Expected scheduled pod store to contain pod")
	}
	pod, exists, _ = queuedPodStore.GetByKey("foo")
	if exists {
		t.Errorf("Did not expect a queued pod, found %+v", pod)
	}

	expectBind := &api.Binding{
		ObjectMeta: api.ObjectMeta{Name: "foo"},
		Target:     api.ObjectReference{Kind: "Node", Name: "machine1"},
	}
	if ex, ac := expectBind, gotBinding; !reflect.DeepEqual(ex, ac) {
		t.Errorf("Expected exact match on binding: %s", diff.ObjectDiff(ex, ac))
	}

	events.Stop()

	scheduledPodStore.Delete(pod)

	secondPod := podWithPort("bar", "", podPort)
	queuedPodStore.Add(secondPod)
	// queuedPodStore: [bar:8080]
	// scheduledPodStore: []
	// assumedPods: [foo:8080]

	var waitUntilExpired sync.WaitGroup
	waitUntilExpired.Add(1)
	// waiting for the assumed pod to expire
	go func() {
		for {
			pods, err := cache.List(labels.Everything())
			if err != nil {
				t.Fatalf("cache.List failed: %v", err)
			}
			if len(pods) == 0 {
				waitUntilExpired.Done()
				return
			}
			time.Sleep(1 * time.Second)
		}
	}()
	waitUntilExpired.Wait()

	// Second scheduling pass will fail to schedule if the store hasn't expired
	// the deleted pod. This would normally happen with a timeout.

	called = make(chan struct{})
	events = eventBroadcaster.StartEventWatcher(func(e *api.Event) {
		if e, a := "Scheduled", e.Reason; e != a {
			t.Errorf("expected %v, got %v", e, a)
		}
		close(called)
	})

	s.scheduleOne()
	<-called

	expectBind = &api.Binding{
		ObjectMeta: api.ObjectMeta{Name: "bar"},
		Target:     api.ObjectReference{Kind: "Node", Name: "machine1"},
	}
	if ex, ac := expectBind, gotBinding; !reflect.DeepEqual(ex, ac) {
		t.Errorf("Expected exact match on binding: %s", diff.ObjectDiff(ex, ac))
	}
	events.Stop()
}
Ejemplo n.º 10
0
// RunUntil begins processes the resources from queue asynchronously until
// stopCh is closed.
func (rlf *RateLimitedFunction) RunUntil(stopCh <-chan struct{}) {
	go utilwait.Until(func() { rlf.handleOne(kcache.Pop(rlf.queue)) }, 0, stopCh)
}