Beispiel #1
0
func CreateRegistry(c RegistryConfig) Registry {
	metrics.Register()
	return &offerStorage{
		RegistryConfig: c,
		offers: cache.NewFIFO(cache.KeyFunc(func(v interface{}) (string, error) {
			if perishable, ok := v.(Perishable); !ok {
				return "", fmt.Errorf("expected perishable offer, not '%+v'", v)
			} else {
				return perishable.Id(), nil
			}
		})),
		listeners: queue.NewDelayFIFO(),
		delayed:   queue.NewDelayQueue(),
		slaves:    newSlaveStorage(),
	}
}
func TestSchedulerRateLimitsBinding(t *testing.T) {
	scheduledPodStore := cache.NewStore(cache.MetaNamespaceKeyFunc)
	scheduledPodLister := &cache.StoreToPodLister{scheduledPodStore}
	queuedPodStore := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
	queuedPodLister := &cache.StoreToPodLister{queuedPodStore}
	modeler := NewSimpleModeler(queuedPodLister, scheduledPodLister)

	algo := NewGenericScheduler(
		map[string]algorithm.FitPredicate{},
		[]algorithm.PriorityConfig{},
		modeler.PodLister(),
		rand.New(rand.NewSource(time.Now().UnixNano())))

	// Rate limit to 1 pod
	fr := FakeRateLimiter{util.NewTokenBucketRateLimiter(0.02, 1), []bool{}}
	c := &Config{
		Modeler: modeler,
		MinionLister: algorithm.FakeMinionLister(
			api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}},
		),
		Algorithm: algo,
		Binder: fakeBinder{func(b *api.Binding) error {
			return nil
		}},
		NextPod: func() *api.Pod {
			return queuedPodStore.Pop().(*api.Pod)
		},
		Error: func(p *api.Pod, err error) {
			t.Errorf("Unexpected error when scheduling pod %+v: %v", p, err)
		},
		Recorder:            &record.FakeRecorder{},
		BindPodsRateLimiter: &fr,
	}

	s := New(c)
	firstPod := podWithID("foo", "")
	secondPod := podWithID("boo", "")
	queuedPodStore.Add(firstPod)
	queuedPodStore.Add(secondPod)

	for i, hitRateLimit := range []bool{true, false} {
		s.scheduleOne()
		if fr.acceptValues[i] != hitRateLimit {
			t.Errorf("Unexpected rate limiting, expect rate limit to be: %v but found it was %v", hitRateLimit, fr.acceptValues[i])
		}
	}
}
Beispiel #3
0
// Initializes the factory.
func NewConfigFactory(client *client.Client) *ConfigFactory {
	c := &ConfigFactory{
		Client:             client,
		PodQueue:           cache.NewFIFO(cache.MetaNamespaceKeyFunc),
		ScheduledPodLister: &cache.StoreToPodLister{},
		// Only nodes in the "Ready" condition with status == "True" are schedulable
		NodeLister:     &cache.StoreToNodeLister{cache.NewStore(cache.MetaNamespaceKeyFunc)},
		ServiceLister:  &cache.StoreToServiceLister{cache.NewStore(cache.MetaNamespaceKeyFunc)},
		StopEverything: make(chan struct{}),
	}
	modeler := scheduler.NewSimpleModeler(&cache.StoreToPodLister{c.PodQueue}, c.ScheduledPodLister)
	c.modeler = modeler
	c.PodLister = modeler.PodLister()
	c.BindPodsRateLimiter = util.NewTokenBucketRateLimiter(BindPodsQps, BindPodsBurst)

	// On add/delete to the scheduled pods, remove from the assumed pods.
	// We construct this here instead of in CreateFromKeys because
	// ScheduledPodLister is something we provide to plug in functions that
	// they may need to call.
	c.ScheduledPodLister.Store, c.scheduledPodPopulator = framework.NewInformer(
		c.createAssignedPodLW(),
		&api.Pod{},
		0,
		framework.ResourceEventHandlerFuncs{
			AddFunc: func(obj interface{}) {
				if pod, ok := obj.(*api.Pod); ok {
					c.modeler.LockedAction(func() {
						c.modeler.ForgetPod(pod)
					})
				}
			},
			DeleteFunc: func(obj interface{}) {
				c.modeler.LockedAction(func() {
					switch t := obj.(type) {
					case *api.Pod:
						c.modeler.ForgetPod(t)
					case cache.DeletedFinalStateUnknown:
						c.modeler.ForgetPodByKey(t.Key)
					}
				})
			},
		},
	)

	return c
}
Beispiel #4
0
// WatchPod returns a ListWatch for watching a pod.  The stopChannel is used
// to close the reflector backing the watch.  The caller is responsible for derring a close on the channel to
// stop the reflector.
func (c *realScrubberClient) WatchPod(name, namespace, resourceVersion string, stopChannel chan struct{}) func() *api.Pod {
	fieldSelector, _ := fields.ParseSelector("metadata.name=" + name)

	podLW := &cache.ListWatch{
		ListFunc: func() (runtime.Object, error) {
			return c.client.Pods(namespace).List(labels.Everything(), fieldSelector)
		},
		WatchFunc: func(resourceVersion string) (watch.Interface, error) {
			return c.client.Pods(namespace).Watch(labels.Everything(), fieldSelector, resourceVersion)
		},
	}
	queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(podLW, &api.Pod{}, queue, 1*time.Minute).RunUntil(stopChannel)

	return func() *api.Pod {
		obj := queue.Pop()
		return obj.(*api.Pod)
	}
}
func TestSchedulerForgetAssumedPodAfterDelete(t *testing.T) {
	eventBroadcaster := record.NewBroadcaster()
	defer eventBroadcaster.StartLogging(t.Logf).Stop()

	// Setup modeler so we control the contents of all 3 stores: assumed,
	// scheduled and queued
	scheduledPodStore := cache.NewStore(cache.MetaNamespaceKeyFunc)
	scheduledPodLister := &cache.StoreToPodLister{scheduledPodStore}

	queuedPodStore := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
	queuedPodLister := &cache.StoreToPodLister{queuedPodStore}

	modeler := NewSimpleModeler(queuedPodLister, scheduledPodLister)

	// Create a fake clock used to timestamp entries and calculate ttl. Nothing
	// will expire till we flip to something older than the ttl, at which point
	// all entries inserted with fakeTime will expire.
	ttl := 30 * time.Second
	fakeTime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
	fakeClock := &util.FakeClock{fakeTime}
	ttlPolicy := &cache.TTLPolicy{ttl, fakeClock}
	assumedPodsStore := cache.NewFakeExpirationStore(
		cache.MetaNamespaceKeyFunc, nil, ttlPolicy, fakeClock)
	modeler.assumedPods = &cache.StoreToPodLister{assumedPodsStore}

	// Port is the easiest way to cause a fit predicate failure
	podPort := 8080
	firstPod := podWithPort("foo", "", podPort)

	// Create the scheduler config
	algo := NewGenericScheduler(
		map[string]algorithm.FitPredicate{"PodFitsPorts": predicates.PodFitsPorts},
		[]algorithm.PriorityConfig{},
		modeler.PodLister(),
		rand.New(rand.NewSource(time.Now().UnixNano())))

	var gotBinding *api.Binding
	c := &Config{
		Modeler: modeler,
		MinionLister: algorithm.FakeMinionLister(
			api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}},
		),
		Algorithm: algo,
		Binder: fakeBinder{func(b *api.Binding) error {
			scheduledPodStore.Add(podWithPort(b.Name, b.Target.Name, podPort))
			gotBinding = b
			return nil
		}},
		NextPod: func() *api.Pod {
			return queuedPodStore.Pop().(*api.Pod)
		},
		Error: func(p *api.Pod, err error) {
			t.Errorf("Unexpected error when scheduling pod %+v: %v", p, err)
		},
		Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}),
	}

	// First scheduling pass should schedule the pod
	s := New(c)
	called := make(chan struct{})
	events := eventBroadcaster.StartEventWatcher(func(e *api.Event) {
		if e, a := "scheduled", e.Reason; e != a {
			t.Errorf("expected %v, got %v", e, a)
		}
		close(called)
	})

	queuedPodStore.Add(firstPod)
	// queuedPodStore: [foo:8080]
	// scheduledPodStore: []
	// assumedPods: []

	s.scheduleOne()
	// queuedPodStore: []
	// scheduledPodStore: [foo:8080]
	// assumedPods: [foo:8080]

	pod, exists, _ := scheduledPodStore.GetByKey("foo")
	if !exists {
		t.Errorf("Expected scheduled pod store to contain pod")
	}
	pod, exists, _ = queuedPodStore.GetByKey("foo")
	if exists {
		t.Errorf("Did not expect a queued pod, found %+v", pod)
	}
	pod, exists, _ = assumedPodsStore.GetByKey("foo")
	if !exists {
		t.Errorf("Assumed pod store should contain stale pod")
	}

	expectBind := &api.Binding{
		ObjectMeta: api.ObjectMeta{Name: "foo"},
		Target:     api.ObjectReference{Kind: "Node", Name: "machine1"},
	}
	if ex, ac := expectBind, gotBinding; !reflect.DeepEqual(ex, ac) {
		t.Errorf("Expected exact match on binding: %s", util.ObjectDiff(ex, ac))
	}

	<-called
	events.Stop()

	scheduledPodStore.Delete(pod)
	_, exists, _ = assumedPodsStore.Get(pod)
	if !exists {
		t.Errorf("Expected pod %#v in assumed pod store", pod)
	}

	secondPod := podWithPort("bar", "", podPort)
	queuedPodStore.Add(secondPod)
	// queuedPodStore: [bar:8080]
	// scheduledPodStore: []
	// assumedPods: [foo:8080]

	// Second scheduling pass will fail to schedule if the store hasn't expired
	// the deleted pod. This would normally happen with a timeout.
	//expirationPolicy.NeverExpire = util.NewStringSet()
	fakeClock.Time = fakeClock.Time.Add(ttl + 1)

	called = make(chan struct{})
	events = eventBroadcaster.StartEventWatcher(func(e *api.Event) {
		if e, a := "scheduled", e.Reason; e != a {
			t.Errorf("expected %v, got %v", e, a)
		}
		close(called)
	})

	s.scheduleOne()

	expectBind = &api.Binding{
		ObjectMeta: api.ObjectMeta{Name: "bar"},
		Target:     api.ObjectReference{Kind: "Node", Name: "machine1"},
	}
	if ex, ac := expectBind, gotBinding; !reflect.DeepEqual(ex, ac) {
		t.Errorf("Expected exact match on binding: %s", util.ObjectDiff(ex, ac))
	}
	<-called
	events.Stop()
}