func TestDefaultErrorFunc(t *testing.T) { grace := int64(30) testPod := &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "bar"}, Spec: api.PodSpec{ RestartPolicy: api.RestartPolicyAlways, DNSPolicy: api.DNSClusterFirst, TerminationGracePeriodSeconds: &grace, }, } handler := util.FakeHandler{ StatusCode: 200, ResponseBody: runtime.EncodeOrDie(latest.Codec, testPod), T: t, } mux := http.NewServeMux() // FakeHandler musn't be sent requests other than the one you want to test. mux.Handle(testapi.ResourcePath("pods", "bar", "foo"), &handler) server := httptest.NewServer(mux) defer server.Close() factory := NewConfigFactory(client.NewOrDie(&client.Config{Host: server.URL, Version: testapi.Version()}), nil) queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc) podBackoff := podBackoff{ perPodBackoff: map[string]*backoffEntry{}, clock: &fakeClock{}, defaultDuration: 1 * time.Millisecond, maxDuration: 1 * time.Second, } errFunc := factory.makeDefaultErrorFunc(&podBackoff, queue) errFunc(testPod, nil) for { // This is a terrible way to do this but I plan on replacing this // whole error handling system in the future. The test will time // out if something doesn't work. time.Sleep(10 * time.Millisecond) got, exists, _ := queue.Get(testPod) if !exists { continue } handler.ValidateRequest(t, testapi.ResourcePath("pods", "bar", "foo"), "GET", nil) if e, a := testPod, got; !reflect.DeepEqual(e, a) { t.Errorf("Expected %v, got %v", e, a) } break } }
func CreateRegistry(c RegistryConfig) Registry { metrics.Register() return &offerStorage{ RegistryConfig: c, offers: cache.NewFIFO(cache.KeyFunc(func(v interface{}) (string, error) { if perishable, ok := v.(Perishable); !ok { return "", fmt.Errorf("expected perishable offer, not '%+v'", v) } else { return perishable.Id(), nil } })), listeners: queue.NewDelayFIFO(), delayed: queue.NewDelayQueue(), slaves: newSlaveStorage(), } }
func TestSchedulerRateLimitsBinding(t *testing.T) { scheduledPodStore := cache.NewStore(cache.MetaNamespaceKeyFunc) scheduledPodLister := &cache.StoreToPodLister{Store: scheduledPodStore} queuedPodStore := cache.NewFIFO(cache.MetaNamespaceKeyFunc) queuedPodLister := &cache.StoreToPodLister{Store: queuedPodStore} modeler := NewSimpleModeler(queuedPodLister, scheduledPodLister) algo := NewGenericScheduler( map[string]algorithm.FitPredicate{}, []algorithm.PriorityConfig{}, modeler.PodLister(), rand.New(rand.NewSource(time.Now().UnixNano()))) // Rate limit to 1 pod fr := FakeRateLimiter{util.NewTokenBucketRateLimiter(0.02, 1), []bool{}} c := &Config{ Modeler: modeler, MinionLister: algorithm.FakeMinionLister( api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}}, ), Algorithm: algo, Binder: fakeBinder{func(b *api.Binding) error { return nil }}, NextPod: func() *api.Pod { return queuedPodStore.Pop().(*api.Pod) }, Error: func(p *api.Pod, err error) { t.Errorf("Unexpected error when scheduling pod %+v: %v", p, err) }, Recorder: &record.FakeRecorder{}, BindPodsRateLimiter: &fr, } s := New(c) firstPod := podWithID("foo", "") secondPod := podWithID("boo", "") queuedPodStore.Add(firstPod) queuedPodStore.Add(secondPod) for i, hitRateLimit := range []bool{true, false} { s.scheduleOne() if fr.acceptValues[i] != hitRateLimit { t.Errorf("Unexpected rate limiting, expect rate limit to be: %v but found it was %v", hitRateLimit, fr.acceptValues[i]) } } }
// Initializes the factory. func NewConfigFactory(client *client.Client, rateLimiter util.RateLimiter) *ConfigFactory { c := &ConfigFactory{ Client: client, PodQueue: cache.NewFIFO(cache.MetaNamespaceKeyFunc), ScheduledPodLister: &cache.StoreToPodLister{}, // Only nodes in the "Ready" condition with status == "True" are schedulable NodeLister: &cache.StoreToNodeLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)}, ServiceLister: &cache.StoreToServiceLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)}, ControllerLister: &cache.StoreToReplicationControllerLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)}, StopEverything: make(chan struct{}), } modeler := scheduler.NewSimpleModeler(&cache.StoreToPodLister{Store: c.PodQueue}, c.ScheduledPodLister) c.modeler = modeler c.PodLister = modeler.PodLister() c.BindPodsRateLimiter = rateLimiter // On add/delete to the scheduled pods, remove from the assumed pods. // We construct this here instead of in CreateFromKeys because // ScheduledPodLister is something we provide to plug in functions that // they may need to call. c.ScheduledPodLister.Store, c.scheduledPodPopulator = framework.NewInformer( c.createAssignedPodLW(), &api.Pod{}, 0, framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { if pod, ok := obj.(*api.Pod); ok { c.modeler.LockedAction(func() { c.modeler.ForgetPod(pod) }) } }, DeleteFunc: func(obj interface{}) { c.modeler.LockedAction(func() { switch t := obj.(type) { case *api.Pod: c.modeler.ForgetPod(t) case cache.DeletedFinalStateUnknown: c.modeler.ForgetPodByKey(t.Key) } }) }, }, ) return c }
// WatchPod returns a ListWatch for watching a pod. The stopChannel is used // to close the reflector backing the watch. The caller is responsible for derring a close on the channel to // stop the reflector. func (c *realScrubberClient) WatchPod(name, namespace, resourceVersion string, stopChannel chan struct{}) func() *api.Pod { fieldSelector, _ := fields.ParseSelector("metadata.name=" + name) podLW := &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return c.client.Pods(namespace).List(labels.Everything(), fieldSelector) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return c.client.Pods(namespace).Watch(labels.Everything(), fieldSelector, resourceVersion) }, } queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc) cache.NewReflector(podLW, &api.Pod{}, queue, 1*time.Minute).RunUntil(stopChannel) return func() *api.Pod { obj := queue.Pop() return obj.(*api.Pod) } }
func TestSchedulerForgetAssumedPodAfterDelete(t *testing.T) { eventBroadcaster := record.NewBroadcaster() defer eventBroadcaster.StartLogging(t.Logf).Stop() // Setup modeler so we control the contents of all 3 stores: assumed, // scheduled and queued scheduledPodStore := cache.NewStore(cache.MetaNamespaceKeyFunc) scheduledPodLister := &cache.StoreToPodLister{Store: scheduledPodStore} queuedPodStore := cache.NewFIFO(cache.MetaNamespaceKeyFunc) queuedPodLister := &cache.StoreToPodLister{Store: queuedPodStore} modeler := NewSimpleModeler(queuedPodLister, scheduledPodLister) // Create a fake clock used to timestamp entries and calculate ttl. Nothing // will expire till we flip to something older than the ttl, at which point // all entries inserted with fakeTime will expire. ttl := 30 * time.Second fakeTime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) fakeClock := &util.FakeClock{Time: fakeTime} ttlPolicy := &cache.TTLPolicy{Ttl: ttl, Clock: fakeClock} assumedPodsStore := cache.NewFakeExpirationStore( cache.MetaNamespaceKeyFunc, nil, ttlPolicy, fakeClock) modeler.assumedPods = &cache.StoreToPodLister{Store: assumedPodsStore} // Port is the easiest way to cause a fit predicate failure podPort := 8080 firstPod := podWithPort("foo", "", podPort) // Create the scheduler config algo := NewGenericScheduler( map[string]algorithm.FitPredicate{"PodFitsPorts": predicates.PodFitsPorts}, []algorithm.PriorityConfig{}, modeler.PodLister(), rand.New(rand.NewSource(time.Now().UnixNano()))) var gotBinding *api.Binding c := &Config{ Modeler: modeler, MinionLister: algorithm.FakeMinionLister( api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}}, ), Algorithm: algo, Binder: fakeBinder{func(b *api.Binding) error { scheduledPodStore.Add(podWithPort(b.Name, b.Target.Name, podPort)) gotBinding = b return nil }}, NextPod: func() *api.Pod { return queuedPodStore.Pop().(*api.Pod) }, Error: func(p *api.Pod, err error) { t.Errorf("Unexpected error when scheduling pod %+v: %v", p, err) }, Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}), } // First scheduling pass should schedule the pod s := New(c) called := make(chan struct{}) events := eventBroadcaster.StartEventWatcher(func(e *api.Event) { if e, a := "Scheduled", e.Reason; e != a { t.Errorf("expected %v, got %v", e, a) } close(called) }) queuedPodStore.Add(firstPod) // queuedPodStore: [foo:8080] // scheduledPodStore: [] // assumedPods: [] s.scheduleOne() // queuedPodStore: [] // scheduledPodStore: [foo:8080] // assumedPods: [foo:8080] pod, exists, _ := scheduledPodStore.GetByKey("foo") if !exists { t.Errorf("Expected scheduled pod store to contain pod") } pod, exists, _ = queuedPodStore.GetByKey("foo") if exists { t.Errorf("Did not expect a queued pod, found %+v", pod) } pod, exists, _ = assumedPodsStore.GetByKey("foo") if !exists { t.Errorf("Assumed pod store should contain stale pod") } expectBind := &api.Binding{ ObjectMeta: api.ObjectMeta{Name: "foo"}, Target: api.ObjectReference{Kind: "Node", Name: "machine1"}, } if ex, ac := expectBind, gotBinding; !reflect.DeepEqual(ex, ac) { t.Errorf("Expected exact match on binding: %s", util.ObjectDiff(ex, ac)) } <-called events.Stop() scheduledPodStore.Delete(pod) _, exists, _ = assumedPodsStore.Get(pod) if !exists { t.Errorf("Expected pod %#v in assumed pod store", pod) } secondPod := podWithPort("bar", "", podPort) queuedPodStore.Add(secondPod) // queuedPodStore: [bar:8080] // scheduledPodStore: [] // assumedPods: [foo:8080] // Second scheduling pass will fail to schedule if the store hasn't expired // the deleted pod. This would normally happen with a timeout. //expirationPolicy.NeverExpire = util.NewStringSet() fakeClock.Time = fakeClock.Time.Add(ttl + 1) called = make(chan struct{}) events = eventBroadcaster.StartEventWatcher(func(e *api.Event) { if e, a := "Scheduled", e.Reason; e != a { t.Errorf("expected %v, got %v", e, a) } close(called) }) s.scheduleOne() expectBind = &api.Binding{ ObjectMeta: api.ObjectMeta{Name: "bar"}, Target: api.ObjectReference{Kind: "Node", Name: "machine1"}, } if ex, ac := expectBind, gotBinding; !reflect.DeepEqual(ex, ac) { t.Errorf("Expected exact match on binding: %s", util.ObjectDiff(ex, ac)) } <-called events.Stop() }