func TestBackoffGC(t *testing.T) { id := "_idGC" tc := clock.NewFakeClock(time.Now()) step := time.Second maxDuration := 5 * step b := NewFakeBackOff(step, maxDuration, tc) for i := 0; i <= int(maxDuration/step); i++ { tc.Step(step) b.Next(id, tc.Now()) } lastUpdate := tc.Now() tc.Step(maxDuration + step) b.GC() _, found := b.perItemBackoff[id] if !found { t.Errorf("expected GC to skip entry, elapsed time=%s maxDuration=%s", tc.Now().Sub(lastUpdate), maxDuration) } tc.Step(maxDuration + step) b.GC() r, found := b.perItemBackoff[id] if found { t.Errorf("expected GC of entry after %s got entry %v", tc.Now().Sub(lastUpdate), r) } }
func getTestSQ(startThreads bool, config *github_util.Config, server *httptest.Server) *SubmitQueue { // TODO: Remove this line when we fix the plumbing regarding the fake/real e2e tester. admin.Mux = admin.NewConcurrentMux() sq := new(SubmitQueue) sq.GateApproved = true sq.RequiredStatusContexts = []string{notRequiredReTestContext1, notRequiredReTestContext2} sq.RequiredRetestContexts = []string{requiredReTestContext1, requiredReTestContext2} sq.BlockingJobNames = []string{"foo"} sq.WeakStableJobNames = []string{"bar"} sq.githubE2EQueue = map[int]*github_util.MungeObject{} sq.githubE2EPollTime = 50 * time.Millisecond sq.clock = utilclock.NewFakeClock(time.Time{}) sq.lastMergeTime = sq.clock.Now() sq.lastE2EStable = true sq.prStatus = map[string]submitStatus{} sq.lastPRStatus = map[string]submitStatus{} sq.lgtmTimeCache = mungerutil.NewLabelTimeCache(lgtmLabel) sq.startTime = sq.clock.Now() sq.healthHistory = make([]healthRecord, 0) sq.DoNotMergeMilestones = []string{doNotMergeMilestone} sq.e2e = &fake_e2e.FakeE2ETester{ JobNames: sq.BlockingJobNames, WeakStableJobNames: sq.WeakStableJobNames, } if startThreads { sq.internalInitialize(config, nil, server.URL) sq.EachLoop() } return sq }
func TestTTLPolicy(t *testing.T) { fakeTime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) ttl := 30 * time.Second exactlyOnTTL := fakeTime.Add(-ttl) expiredTime := fakeTime.Add(-(ttl + 1)) policy := TTLPolicy{ttl, clock.NewFakeClock(fakeTime)} fakeTimestampedEntry := ×tampedEntry{obj: struct{}{}, timestamp: exactlyOnTTL} if policy.IsExpired(fakeTimestampedEntry) { t.Errorf("TTL cache should not expire entries exactly on ttl") } fakeTimestampedEntry.timestamp = fakeTime if policy.IsExpired(fakeTimestampedEntry) { t.Errorf("TTL Cache should not expire entries before ttl") } fakeTimestampedEntry.timestamp = expiredTime if !policy.IsExpired(fakeTimestampedEntry) { t.Errorf("TTL Cache should expire entries older than ttl") } for _, ttl = range []time.Duration{0, -1} { policy.Ttl = ttl if policy.IsExpired(fakeTimestampedEntry) { t.Errorf("TTL policy should only expire entries when initialized with a ttl > 0") } } }
func TestBackoffReset(t *testing.T) { id := "_idReset" tc := clock.NewFakeClock(time.Now()) step := time.Second maxDuration := step * 5 b := NewFakeBackOff(step, maxDuration, tc) startTime := tc.Now() // get to backoff = maxDuration for i := 0; i <= int(maxDuration/step); i++ { tc.Step(step) b.Next(id, tc.Now()) } // backoff should be capped at maxDuration if !b.IsInBackOffSince(id, tc.Now()) { t.Errorf("expected to be in Backoff got %s", b.Get(id)) } lastUpdate := tc.Now() tc.Step(2*maxDuration + step) // time += 11s, 11 > 2*maxDuration if b.IsInBackOffSince(id, lastUpdate) { t.Errorf("expected to not be in Backoff after reset (start=%s, now=%s, lastUpdate=%s), got %s", startTime, tc.Now(), lastUpdate, b.Get(id)) } }
func TestExpirationBasic(t *testing.T) { unexpectedVal := "bar" expectedVal := "bar2" testObj := testObject{ key: "foo", val: unexpectedVal, } fakeClock := clock.NewFakeClock(time.Now()) objectCache := NewFakeObjectCache(func() (interface{}, error) { return expectedVal, nil }, 1*time.Second, fakeClock) err := objectCache.Add(testObj.key, testObj.val) if err != nil { t.Errorf("Unable to add obj %#v by key: %s", testObj, testObj.key) } // sleep 2s so cache should be expired. fakeClock.Sleep(2 * time.Second) value, err := objectCache.Get(testObj.key) if err != nil { t.Errorf("Unable to get obj %#v by key: %s", testObj, testObj.key) } if value.(string) != expectedVal { t.Errorf("Expected to get cached value: %#v, but got: %s", expectedVal, value.(string)) } }
func NewFakeRecorder() *FakeRecorder { return &FakeRecorder{ source: api.EventSource{Component: "nodeControllerTest"}, events: []*api.Event{}, clock: clock.NewFakeClock(time.Now()), } }
func TestSlowBackoff(t *testing.T) { id := "_idSlow" tc := clock.NewFakeClock(time.Now()) step := time.Second maxDuration := 50 * step b := NewFakeBackOff(step, maxDuration, tc) cases := []time.Duration{0, 1, 2, 4, 8, 16, 32, 50, 50, 50} for ix, c := range cases { tc.Step(step) w := b.Get(id) if w != c*step { t.Errorf("input: '%d': expected %s, got %s", ix, c*step, w) } b.Next(id, tc.Now()) } //Now confirm that the Reset cancels backoff. b.Next(id, tc.Now()) b.Reset(id) if b.Get(id) != 0 { t.Errorf("Reset didn't clear the backoff.") } }
// newTestWatchCache just adds a fake clock. func newTestWatchCache(capacity int) *watchCache { keyFunc := func(obj runtime.Object) (string, error) { return NamespaceKeyFunc("prefix", obj) } wc := newWatchCache(capacity, keyFunc) wc.clock = clock.NewFakeClock(time.Now()) return wc }
func newTestBasicWorkQueue() (*basicWorkQueue, *clock.FakeClock) { fakeClock := clock.NewFakeClock(time.Now()) wq := &basicWorkQueue{ clock: fakeClock, queue: make(map[types.UID]time.Time), } return wq, fakeClock }
// NewFakeControllerExpectationsLookup creates a fake store for PodExpectations. func NewFakeControllerExpectationsLookup(ttl time.Duration) (*ControllerExpectations, *clock.FakeClock) { fakeTime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) fakeClock := clock.NewFakeClock(fakeTime) ttlPolicy := &cache.TTLPolicy{Ttl: ttl, Clock: fakeClock} ttlStore := cache.NewFakeExpirationStore( ExpKeyFunc, nil, ttlPolicy, fakeClock) return &ControllerExpectations{ttlStore}, fakeClock }
func TestDeduping(t *testing.T) { fakeClock := clock.NewFakeClock(time.Now()) q := newDelayingQueue(fakeClock) first := "foo" q.AddAfter(first, 50*time.Millisecond) if err := waitForWaitingQueueToFill(q); err != nil { t.Fatalf("unexpected err: %v", err) } q.AddAfter(first, 70*time.Millisecond) if err := waitForWaitingQueueToFill(q); err != nil { t.Fatalf("unexpected err: %v", err) } if q.Len() != 0 { t.Errorf("should not have added") } // step past the first block, we should receive now fakeClock.Step(60 * time.Millisecond) if err := waitForAdded(q, 1); err != nil { t.Errorf("should have added") } item, _ := q.Get() q.Done(item) // step past the second add fakeClock.Step(20 * time.Millisecond) if q.Len() != 0 { t.Errorf("should not have added") } // test again, but this time the earlier should override q.AddAfter(first, 50*time.Millisecond) q.AddAfter(first, 30*time.Millisecond) if err := waitForWaitingQueueToFill(q); err != nil { t.Fatalf("unexpected err: %v", err) } if q.Len() != 0 { t.Errorf("should not have added") } fakeClock.Step(40 * time.Millisecond) if err := waitForAdded(q, 1); err != nil { t.Errorf("should have added") } item, _ = q.Get() q.Done(item) // step past the second add fakeClock.Step(20 * time.Millisecond) if q.Len() != 0 { t.Errorf("should not have added") } if q.Len() != 0 { t.Errorf("should not have added") } }
// TestSecondsSinceSync verifies that proper results are returned // when checking the time between syncs func TestSecondsSinceSync(t *testing.T) { tunneler := &SSHTunneler{} assert := assert.New(t) tunneler.lastSync = time.Date(2015, time.January, 1, 1, 1, 1, 1, time.UTC).Unix() // Nano Second. No difference. tunneler.clock = clock.NewFakeClock(time.Date(2015, time.January, 1, 1, 1, 1, 2, time.UTC)) assert.Equal(int64(0), tunneler.SecondsSinceSync()) // Second tunneler.clock = clock.NewFakeClock(time.Date(2015, time.January, 1, 1, 1, 2, 1, time.UTC)) assert.Equal(int64(1), tunneler.SecondsSinceSync()) // Minute tunneler.clock = clock.NewFakeClock(time.Date(2015, time.January, 1, 1, 2, 1, 1, time.UTC)) assert.Equal(int64(60), tunneler.SecondsSinceSync()) // Hour tunneler.clock = clock.NewFakeClock(time.Date(2015, time.January, 1, 2, 1, 1, 1, time.UTC)) assert.Equal(int64(3600), tunneler.SecondsSinceSync()) // Day tunneler.clock = clock.NewFakeClock(time.Date(2015, time.January, 2, 1, 1, 1, 1, time.UTC)) assert.Equal(int64(86400), tunneler.SecondsSinceSync()) // Month tunneler.clock = clock.NewFakeClock(time.Date(2015, time.February, 1, 1, 1, 1, 1, time.UTC)) assert.Equal(int64(2678400), tunneler.SecondsSinceSync()) // Future Month. Should be -Month. tunneler.lastSync = time.Date(2015, time.February, 1, 1, 1, 1, 1, time.UTC).Unix() tunneler.clock = clock.NewFakeClock(time.Date(2015, time.January, 1, 1, 1, 1, 1, time.UTC)) assert.Equal(int64(-2678400), tunneler.SecondsSinceSync()) }
// newTestWatchCache just adds a fake clock. func newTestWatchCache(capacity int) *watchCache { keyFunc := func(obj runtime.Object) (string, error) { return NamespaceKeyFunc("prefix", obj) } getAttrsFunc := func(obj runtime.Object) (labels.Set, fields.Set, error) { return nil, nil, nil } wc := newWatchCache(capacity, keyFunc, getAttrsFunc) wc.clock = clock.NewFakeClock(time.Now()) return wc }
func TestRepositoryBucketAddOversize(t *testing.T) { clock := clock.NewFakeClock(time.Now()) b := repositoryBucket{ clock: clock, } i := 0 for ; i < bucketSize; i++ { ttl := time.Duration(uint64(ttl5m) * uint64(i)) b.Add(ttl, fmt.Sprintf("%d", i)) } if len(b.list) != bucketSize { t.Fatalf("unexpected number of items: %d != %d", len(b.list), bucketSize) } // make first three stale clock.Step(ttl5m * 3) if !b.Has("3") { t.Fatalf("bucket does not contain repository 3") } if len(b.list) != bucketSize-3 { t.Fatalf("unexpected number of items: %d != %d", len(b.list), bucketSize-3) } // add few repos one by one for ; i < bucketSize+5; i++ { ttl := time.Duration(uint64(ttl5m) * uint64(i)) b.Add(ttl, fmt.Sprintf("%d", i)) } if len(b.list) != bucketSize { t.Fatalf("unexpected number of items: %d != %d", len(b.list), bucketSize) } // add few repos at once newRepos := []string{} for ; i < bucketSize+10; i++ { newRepos = append(newRepos, fmt.Sprintf("%d", i)) } b.Add(ttl5m, newRepos...) if len(b.list) != bucketSize { t.Fatalf("unexpected number of items: %d != %d", len(b.list), bucketSize) } for j := 0; j < bucketSize; j++ { expected := fmt.Sprintf("%d", i-bucketSize+j) if b.list[j].repository != expected { t.Fatalf("unexpected repository on index %d: %s != %s", j, b.list[j].repository, expected) } } }
func TestRepositoryBucketCopy(t *testing.T) { now := time.Now() clock := clock.NewFakeClock(now) ttl5m := time.Minute * 5 for _, tc := range []struct { name string entries []bucketEntry expectedRepos []string }{ { name: "no entry", expectedRepos: []string{}, }, { name: "one stale entry", entries: []bucketEntry{ { repository: "1", }, }, expectedRepos: []string{}, }, { name: "two entries", entries: []bucketEntry{ { repository: "a", evictOn: now.Add(ttl5m), }, { repository: "b", evictOn: now.Add(ttl5m), }, }, expectedRepos: []string{"a", "b"}, }, } { b := repositoryBucket{ clock: clock, list: tc.entries, } result := b.Copy() if !reflect.DeepEqual(result, tc.expectedRepos) { t.Errorf("[%s] got unexpected repo list: %s", tc.name, diff.ObjectGoPrintDiff(result, tc.expectedRepos)) } } }
func newTestGenericPLEG() *TestGenericPLEG { fakeRuntime := &containertest.FakeRuntime{} clock := clock.NewFakeClock(time.Time{}) // The channel capacity should be large enough to hold all events in a // single test. pleg := &GenericPLEG{ relistPeriod: time.Hour, runtime: fakeRuntime, eventChannel: make(chan *PodLifecycleEvent, 100), podRecords: make(podRecords), clock: clock, } return &TestGenericPLEG{pleg: pleg, runtime: fakeRuntime, clock: clock} }
func TestRateLimitingQueue(t *testing.T) { limiter := NewItemExponentialFailureRateLimiter(1*time.Millisecond, 1*time.Second) queue := NewRateLimitingQueue(limiter).(*rateLimitingType) fakeClock := clock.NewFakeClock(time.Now()) delayingQueue := &delayingType{ Interface: New(), clock: fakeClock, heartbeat: fakeClock.Tick(maxWait), stopCh: make(chan struct{}), waitingForAddCh: make(chan waitFor, 1000), metrics: newRetryMetrics(""), } queue.DelayingInterface = delayingQueue queue.AddRateLimited("one") waitEntry := <-delayingQueue.waitingForAddCh if e, a := 1*time.Millisecond, waitEntry.readyAt.Sub(fakeClock.Now()); e != a { t.Errorf("expected %v, got %v", e, a) } queue.AddRateLimited("one") waitEntry = <-delayingQueue.waitingForAddCh if e, a := 2*time.Millisecond, waitEntry.readyAt.Sub(fakeClock.Now()); e != a { t.Errorf("expected %v, got %v", e, a) } if e, a := 2, queue.NumRequeues("one"); e != a { t.Errorf("expected %v, got %v", e, a) } queue.AddRateLimited("two") waitEntry = <-delayingQueue.waitingForAddCh if e, a := 1*time.Millisecond, waitEntry.readyAt.Sub(fakeClock.Now()); e != a { t.Errorf("expected %v, got %v", e, a) } queue.AddRateLimited("two") waitEntry = <-delayingQueue.waitingForAddCh if e, a := 2*time.Millisecond, waitEntry.readyAt.Sub(fakeClock.Now()); e != a { t.Errorf("expected %v, got %v", e, a) } queue.Forget("one") if e, a := 0, queue.NumRequeues("one"); e != a { t.Errorf("expected %v, got %v", e, a) } queue.AddRateLimited("one") waitEntry = <-delayingQueue.waitingForAddCh if e, a := 1*time.Millisecond, waitEntry.readyAt.Sub(fakeClock.Now()); e != a { t.Errorf("expected %v, got %v", e, a) } }
// TestActiveDeadlineHandler verifies the active deadline handler functions as expected. func TestActiveDeadlineHandler(t *testing.T) { pods := newTestPods(4) fakeClock := clock.NewFakeClock(time.Now()) podStatusProvider := &mockPodStatusProvider{pods: pods} fakeRecorder := &record.FakeRecorder{} handler, err := newActiveDeadlineHandler(podStatusProvider, fakeRecorder, fakeClock) if err != nil { t.Fatalf("unexpected error: %v", err) } now := metav1.Now() startTime := metav1.NewTime(now.Time.Add(-1 * time.Minute)) // this pod has exceeded its active deadline exceededActiveDeadlineSeconds := int64(30) pods[0].Status.StartTime = &startTime pods[0].Spec.ActiveDeadlineSeconds = &exceededActiveDeadlineSeconds // this pod has not exceeded its active deadline notYetActiveDeadlineSeconds := int64(120) pods[1].Status.StartTime = &startTime pods[1].Spec.ActiveDeadlineSeconds = ¬YetActiveDeadlineSeconds // this pod has no deadline pods[2].Status.StartTime = &startTime pods[2].Spec.ActiveDeadlineSeconds = nil testCases := []struct { pod *v1.Pod expected bool }{{pods[0], true}, {pods[1], false}, {pods[2], false}, {pods[3], false}} for i, testCase := range testCases { if actual := handler.ShouldSync(testCase.pod); actual != testCase.expected { t.Errorf("[%d] ShouldSync expected %#v, got %#v", i, testCase.expected, actual) } actual := handler.ShouldEvict(testCase.pod) if actual.Evict != testCase.expected { t.Errorf("[%d] ShouldEvict.Evict expected %#v, got %#v", i, testCase.expected, actual.Evict) } if testCase.expected { if actual.Reason != reason { t.Errorf("[%d] ShouldEvict.Reason expected %#v, got %#v", i, message, actual.Reason) } if actual.Message != message { t.Errorf("[%d] ShouldEvict.Message expected %#v, got %#v", i, message, actual.Message) } } } }
func TestAddTwoFireEarly(t *testing.T) { fakeClock := clock.NewFakeClock(time.Now()) q := newDelayingQueue(fakeClock) first := "foo" second := "bar" third := "baz" q.AddAfter(first, 1*time.Second) q.AddAfter(second, 50*time.Millisecond) if err := waitForWaitingQueueToFill(q); err != nil { t.Fatalf("unexpected err: %v", err) } if q.Len() != 0 { t.Errorf("should not have added") } fakeClock.Step(60 * time.Millisecond) if err := waitForAdded(q, 1); err != nil { t.Fatalf("unexpected err: %v", err) } item, _ := q.Get() if !reflect.DeepEqual(item, second) { t.Errorf("expected %v, got %v", second, item) } q.AddAfter(third, 2*time.Second) fakeClock.Step(1 * time.Second) if err := waitForAdded(q, 1); err != nil { t.Fatalf("unexpected err: %v", err) } item, _ = q.Get() if !reflect.DeepEqual(item, first) { t.Errorf("expected %v, got %v", first, item) } fakeClock.Step(2 * time.Second) if err := waitForAdded(q, 1); err != nil { t.Fatalf("unexpected err: %v", err) } item, _ = q.Get() if !reflect.DeepEqual(item, third) { t.Errorf("expected %v, got %v", third, item) } }
func TestGarbageCollectImageNotOldEnough(t *testing.T) { policy := ImageGCPolicy{ HighThresholdPercent: 90, LowThresholdPercent: 80, MinAge: time.Minute * 1, } fakeRuntime := &containertest.FakeRuntime{} mockCadvisor := new(cadvisortest.Mock) manager := &realImageGCManager{ runtime: fakeRuntime, policy: policy, imageRecords: make(map[string]*imageRecord), cadvisor: mockCadvisor, recorder: &record.FakeRecorder{}, } fakeRuntime.ImageList = []container.Image{ makeImage(0, 1024), makeImage(1, 2048), } // 1 image is in use, and another one is not old enough fakeRuntime.AllPodList = []*containertest.FakePod{ {Pod: &container.Pod{ Containers: []*container.Container{ makeContainer(1), }, }}, } fakeClock := clock.NewFakeClock(time.Now()) t.Log(fakeClock.Now()) require.NoError(t, manager.detectImages(fakeClock.Now())) require.Equal(t, manager.imageRecordsLen(), 2) // no space freed since one image is in used, and another one is not old enough spaceFreed, err := manager.freeSpace(1024, fakeClock.Now()) assert := assert.New(t) require.NoError(t, err) assert.EqualValues(0, spaceFreed) assert.Len(fakeRuntime.ImageList, 2) // move clock by minAge duration, then 1 image will be garbage collected fakeClock.Step(policy.MinAge) spaceFreed, err = manager.freeSpace(1024, fakeClock.Now()) require.NoError(t, err) assert.EqualValues(1024, spaceFreed) assert.Len(fakeRuntime.ImageList, 1) }
func TestSimpleQueue(t *testing.T) { fakeClock := clock.NewFakeClock(time.Now()) q := newDelayingQueue(fakeClock) first := "foo" q.AddAfter(first, 50*time.Millisecond) if err := waitForWaitingQueueToFill(q); err != nil { t.Fatalf("unexpected err: %v", err) } if q.Len() != 0 { t.Errorf("should not have added") } fakeClock.Step(60 * time.Millisecond) if err := waitForAdded(q, 1); err != nil { t.Errorf("should have added") } item, _ := q.Get() q.Done(item) // step past the next heartbeat fakeClock.Step(10 * time.Second) err := wait.Poll(1*time.Millisecond, 30*time.Millisecond, func() (done bool, err error) { if q.Len() > 0 { return false, fmt.Errorf("added to queue") } return false, nil }) if err != wait.ErrWaitTimeout { t.Errorf("expected timeout, got: %v", err) } if q.Len() != 0 { t.Errorf("should not have added") } }
func TestBackoffHightWaterMark(t *testing.T) { id := "_idHiWaterMark" tc := clock.NewFakeClock(time.Now()) step := time.Second maxDuration := 5 * step b := NewFakeBackOff(step, maxDuration, tc) // get to backoff = maxDuration for i := 0; i <= int(maxDuration/step); i++ { tc.Step(step) b.Next(id, tc.Now()) } // backoff high watermark expires after 2*maxDuration tc.Step(maxDuration + step) b.Next(id, tc.Now()) if b.Get(id) != maxDuration { t.Errorf("expected Backoff to stay at high watermark %s got %s", maxDuration, b.Get(id)) } }
func TestAddAndGet(t *testing.T) { testObj := testObject{ key: "foo", val: "bar", } objectCache := NewFakeObjectCache(func() (interface{}, error) { return nil, fmt.Errorf("Unexpected Error: updater should never be called in this test!") }, 1*time.Hour, clock.NewFakeClock(time.Now())) err := objectCache.Add(testObj.key, testObj.val) if err != nil { t.Errorf("Unable to add obj %#v by key: %s", testObj, testObj.key) } value, err := objectCache.Get(testObj.key) if err != nil { t.Errorf("Unable to get obj %#v by key: %s", testObj, testObj.key) } if value.(string) != testObj.val { t.Errorf("Expected to get cached value: %#v, but got: %s", testObj.val, value.(string)) } }
func TestCopyShifting(t *testing.T) { fakeClock := clock.NewFakeClock(time.Now()) q := newDelayingQueue(fakeClock) first := "foo" second := "bar" third := "baz" q.AddAfter(first, 1*time.Second) q.AddAfter(second, 500*time.Millisecond) q.AddAfter(third, 250*time.Millisecond) if err := waitForWaitingQueueToFill(q); err != nil { t.Fatalf("unexpected err: %v", err) } if q.Len() != 0 { t.Errorf("should not have added") } fakeClock.Step(2 * time.Second) if err := waitForAdded(q, 3); err != nil { t.Fatalf("unexpected err: %v", err) } actualFirst, _ := q.Get() if !reflect.DeepEqual(actualFirst, third) { t.Errorf("expected %v, got %v", third, actualFirst) } actualSecond, _ := q.Get() if !reflect.DeepEqual(actualSecond, second) { t.Errorf("expected %v, got %v", second, actualSecond) } actualThird, _ := q.Get() if !reflect.DeepEqual(actualThird, first) { t.Errorf("expected %v, got %v", first, actualThird) } }
func TestDiskPressureNodeFsInodes(t *testing.T) { // TODO(dashpole): we need to know inodes used when cadvisor supports per container stats podMaker := func(name string, requests api.ResourceList, limits api.ResourceList) (*api.Pod, statsapi.PodStats) { pod := newPod(name, []api.Container{ newContainer(name, requests, limits), }, nil) podStats := newPodInodeStats(pod) return pod, podStats } summaryStatsMaker := func(rootFsInodesFree, rootFsInodes string, podStats map[*api.Pod]statsapi.PodStats) *statsapi.Summary { rootFsInodesFreeVal := resource.MustParse(rootFsInodesFree) internalRootFsInodesFree := uint64(rootFsInodesFreeVal.Value()) rootFsInodesVal := resource.MustParse(rootFsInodes) internalRootFsInodes := uint64(rootFsInodesVal.Value()) result := &statsapi.Summary{ Node: statsapi.NodeStats{ Fs: &statsapi.FsStats{ InodesFree: &internalRootFsInodesFree, Inodes: &internalRootFsInodes, }, }, Pods: []statsapi.PodStats{}, } for _, podStat := range podStats { result.Pods = append(result.Pods, podStat) } return result } // TODO(dashpole): pass inodes used in future when supported by cadvisor. podsToMake := []struct { name string requests api.ResourceList limits api.ResourceList }{ {name: "best-effort-high", requests: newResourceList("", ""), limits: newResourceList("", "")}, {name: "best-effort-low", requests: newResourceList("", ""), limits: newResourceList("", "")}, {name: "burstable-high", requests: newResourceList("100m", "100Mi"), limits: newResourceList("200m", "1Gi")}, {name: "burstable-low", requests: newResourceList("100m", "100Mi"), limits: newResourceList("200m", "1Gi")}, {name: "guaranteed-high", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi")}, {name: "guaranteed-low", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi")}, } pods := []*api.Pod{} podStats := map[*api.Pod]statsapi.PodStats{} for _, podToMake := range podsToMake { pod, podStat := podMaker(podToMake.name, podToMake.requests, podToMake.limits) pods = append(pods, pod) podStats[pod] = podStat } activePodsFunc := func() []*api.Pod { return pods } fakeClock := clock.NewFakeClock(time.Now()) podKiller := &mockPodKiller{} diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false} imageGC := &mockImageGC{freed: int64(0), err: nil} nodeRef := &api.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""} config := Config{ MaxPodGracePeriodSeconds: 5, PressureTransitionPeriod: time.Minute * 5, Thresholds: []Threshold{ { Signal: SignalNodeFsInodesFree, Operator: OpLessThan, Value: ThresholdValue{ Quantity: quantityMustParse("1Mi"), }, }, { Signal: SignalNodeFsInodesFree, Operator: OpLessThan, Value: ThresholdValue{ Quantity: quantityMustParse("2Mi"), }, GracePeriod: time.Minute * 2, }, }, } summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("3Mi", "4Mi", podStats)} manager := &managerImpl{ clock: fakeClock, killPodFunc: podKiller.killPodNow, imageGC: imageGC, config: config, recorder: &record.FakeRecorder{}, summaryProvider: summaryProvider, nodeRef: nodeRef, nodeConditionsLastObservedAt: nodeConditionsObservedAt{}, thresholdsFirstObservedAt: thresholdsObservedAt{}, } // create a best effort pod to test admission podToAdmit, _ := podMaker("pod-to-admit", newResourceList("", ""), newResourceList("", "")) // synchronize manager.synchronize(diskInfoProvider, activePodsFunc) // we should not have inode pressure if manager.IsUnderInodePressure() { t.Errorf("Manager should not report inode pressure") } // try to admit our pod (should succeed) if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: podToAdmit}); !result.Admit { t.Errorf("Admit pod: %v, expected: %v, actual: %v", podToAdmit, true, result.Admit) } // induce soft threshold fakeClock.Step(1 * time.Minute) summaryProvider.result = summaryStatsMaker("1.5Mi", "4Mi", podStats) manager.synchronize(diskInfoProvider, activePodsFunc) // we should have inode pressure if !manager.IsUnderInodePressure() { t.Errorf("Manager should report inode pressure since soft threshold was met") } // verify no pod was yet killed because there has not yet been enough time passed. if podKiller.pod != nil { t.Errorf("Manager should not have killed a pod yet, but killed: %v", podKiller.pod) } // step forward in time pass the grace period fakeClock.Step(3 * time.Minute) summaryProvider.result = summaryStatsMaker("1.5Mi", "4Mi", podStats) manager.synchronize(diskInfoProvider, activePodsFunc) // we should have inode pressure if !manager.IsUnderInodePressure() { t.Errorf("Manager should report inode pressure since soft threshold was met") } // verify the right pod was killed with the right grace period. if podKiller.pod != pods[0] { t.Errorf("Manager chose to kill pod: %v, but should have chosen %v", podKiller.pod, pods[0]) } if podKiller.gracePeriodOverride == nil { t.Errorf("Manager chose to kill pod but should have had a grace period override.") } observedGracePeriod := *podKiller.gracePeriodOverride if observedGracePeriod != manager.config.MaxPodGracePeriodSeconds { t.Errorf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", manager.config.MaxPodGracePeriodSeconds, observedGracePeriod) } // reset state podKiller.pod = nil podKiller.gracePeriodOverride = nil // remove inode pressure fakeClock.Step(20 * time.Minute) summaryProvider.result = summaryStatsMaker("3Mi", "4Mi", podStats) manager.synchronize(diskInfoProvider, activePodsFunc) // we should not have inode pressure if manager.IsUnderInodePressure() { t.Errorf("Manager should not report inode pressure") } // induce inode pressure! fakeClock.Step(1 * time.Minute) summaryProvider.result = summaryStatsMaker("0.5Mi", "4Mi", podStats) manager.synchronize(diskInfoProvider, activePodsFunc) // we should have inode pressure if !manager.IsUnderInodePressure() { t.Errorf("Manager should report inode pressure") } // check the right pod was killed if podKiller.pod != pods[0] { t.Errorf("Manager chose to kill pod: %v, but should have chosen %v", podKiller.pod, pods[0]) } observedGracePeriod = *podKiller.gracePeriodOverride if observedGracePeriod != int64(0) { t.Errorf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", 0, observedGracePeriod) } // try to admit our pod (should fail) if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: podToAdmit}); result.Admit { t.Errorf("Admit pod: %v, expected: %v, actual: %v", podToAdmit, false, result.Admit) } // reduce inode pressure fakeClock.Step(1 * time.Minute) summaryProvider.result = summaryStatsMaker("3Mi", "4Mi", podStats) podKiller.pod = nil // reset state manager.synchronize(diskInfoProvider, activePodsFunc) // we should have inode pressure (because transition period not yet met) if !manager.IsUnderInodePressure() { t.Errorf("Manager should report inode pressure") } // no pod should have been killed if podKiller.pod != nil { t.Errorf("Manager chose to kill pod: %v when no pod should have been killed", podKiller.pod) } // try to admit our pod (should fail) if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: podToAdmit}); result.Admit { t.Errorf("Admit pod: %v, expected: %v, actual: %v", podToAdmit, false, result.Admit) } // move the clock past transition period to ensure that we stop reporting pressure fakeClock.Step(5 * time.Minute) summaryProvider.result = summaryStatsMaker("3Mi", "4Mi", podStats) podKiller.pod = nil // reset state manager.synchronize(diskInfoProvider, activePodsFunc) // we should not have inode pressure (because transition period met) if manager.IsUnderInodePressure() { t.Errorf("Manager should not report inode pressure") } // no pod should have been killed if podKiller.pod != nil { t.Errorf("Manager chose to kill pod: %v when no pod should have been killed", podKiller.pod) } // try to admit our pod (should succeed) if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: podToAdmit}); !result.Admit { t.Errorf("Admit pod: %v, expected: %v, actual: %v", podToAdmit, true, result.Admit) } }
func TestNodeReclaimFuncs(t *testing.T) { podMaker := func(name string, requests api.ResourceList, limits api.ResourceList, rootFsUsed, logsUsed, perLocalVolumeUsed string) (*api.Pod, statsapi.PodStats) { pod := newPod(name, []api.Container{ newContainer(name, requests, limits), }, nil) podStats := newPodDiskStats(pod, parseQuantity(rootFsUsed), parseQuantity(logsUsed), parseQuantity(perLocalVolumeUsed)) return pod, podStats } summaryStatsMaker := func(rootFsAvailableBytes, imageFsAvailableBytes string, podStats map[*api.Pod]statsapi.PodStats) *statsapi.Summary { rootFsVal := resource.MustParse(rootFsAvailableBytes) rootFsBytes := uint64(rootFsVal.Value()) rootFsCapacityBytes := uint64(rootFsVal.Value() * 2) imageFsVal := resource.MustParse(imageFsAvailableBytes) imageFsBytes := uint64(imageFsVal.Value()) imageFsCapacityBytes := uint64(imageFsVal.Value() * 2) result := &statsapi.Summary{ Node: statsapi.NodeStats{ Fs: &statsapi.FsStats{ AvailableBytes: &rootFsBytes, CapacityBytes: &rootFsCapacityBytes, }, Runtime: &statsapi.RuntimeStats{ ImageFs: &statsapi.FsStats{ AvailableBytes: &imageFsBytes, CapacityBytes: &imageFsCapacityBytes, }, }, }, Pods: []statsapi.PodStats{}, } for _, podStat := range podStats { result.Pods = append(result.Pods, podStat) } return result } podsToMake := []struct { name string requests api.ResourceList limits api.ResourceList rootFsUsed string logsFsUsed string perLocalVolumeUsed string }{ {name: "best-effort-high", requests: newResourceList("", ""), limits: newResourceList("", ""), rootFsUsed: "500Mi"}, {name: "best-effort-low", requests: newResourceList("", ""), limits: newResourceList("", ""), perLocalVolumeUsed: "300Mi"}, {name: "burstable-high", requests: newResourceList("100m", "100Mi"), limits: newResourceList("200m", "1Gi"), rootFsUsed: "800Mi"}, {name: "burstable-low", requests: newResourceList("100m", "100Mi"), limits: newResourceList("200m", "1Gi"), logsFsUsed: "300Mi"}, {name: "guaranteed-high", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), rootFsUsed: "800Mi"}, {name: "guaranteed-low", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), rootFsUsed: "200Mi"}, } pods := []*api.Pod{} podStats := map[*api.Pod]statsapi.PodStats{} for _, podToMake := range podsToMake { pod, podStat := podMaker(podToMake.name, podToMake.requests, podToMake.limits, podToMake.rootFsUsed, podToMake.logsFsUsed, podToMake.perLocalVolumeUsed) pods = append(pods, pod) podStats[pod] = podStat } activePodsFunc := func() []*api.Pod { return pods } fakeClock := clock.NewFakeClock(time.Now()) podKiller := &mockPodKiller{} diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false} imageGcFree := resource.MustParse("700Mi") imageGC := &mockImageGC{freed: imageGcFree.Value(), err: nil} nodeRef := &api.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""} config := Config{ MaxPodGracePeriodSeconds: 5, PressureTransitionPeriod: time.Minute * 5, Thresholds: []Threshold{ { Signal: SignalNodeFsAvailable, Operator: OpLessThan, Value: ThresholdValue{ Quantity: quantityMustParse("1Gi"), }, MinReclaim: &ThresholdValue{ Quantity: quantityMustParse("500Mi"), }, }, }, } summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("16Gi", "200Gi", podStats)} manager := &managerImpl{ clock: fakeClock, killPodFunc: podKiller.killPodNow, imageGC: imageGC, config: config, recorder: &record.FakeRecorder{}, summaryProvider: summaryProvider, nodeRef: nodeRef, nodeConditionsLastObservedAt: nodeConditionsObservedAt{}, thresholdsFirstObservedAt: thresholdsObservedAt{}, } // synchronize manager.synchronize(diskInfoProvider, activePodsFunc) // we should not have disk pressure if manager.IsUnderDiskPressure() { t.Errorf("Manager should not report disk pressure") } // induce hard threshold fakeClock.Step(1 * time.Minute) summaryProvider.result = summaryStatsMaker(".9Gi", "200Gi", podStats) manager.synchronize(diskInfoProvider, activePodsFunc) // we should have disk pressure if !manager.IsUnderDiskPressure() { t.Errorf("Manager should report disk pressure since soft threshold was met") } // verify image gc was invoked if !imageGC.invoked { t.Errorf("Manager should have invoked image gc") } // verify no pod was killed because image gc was sufficient if podKiller.pod != nil { t.Errorf("Manager should not have killed a pod, but killed: %v", podKiller.pod) } // reset state imageGC.invoked = false // remove disk pressure fakeClock.Step(20 * time.Minute) summaryProvider.result = summaryStatsMaker("16Gi", "200Gi", podStats) manager.synchronize(diskInfoProvider, activePodsFunc) // we should not have disk pressure if manager.IsUnderDiskPressure() { t.Errorf("Manager should not report disk pressure") } // induce disk pressure! fakeClock.Step(1 * time.Minute) summaryProvider.result = summaryStatsMaker("400Mi", "200Gi", podStats) manager.synchronize(diskInfoProvider, activePodsFunc) // we should have disk pressure if !manager.IsUnderDiskPressure() { t.Errorf("Manager should report disk pressure") } // ensure image gc was invoked if !imageGC.invoked { t.Errorf("Manager should have invoked image gc") } // check the right pod was killed if podKiller.pod != pods[0] { t.Errorf("Manager chose to kill pod: %v, but should have chosen %v", podKiller.pod, pods[0]) } observedGracePeriod := *podKiller.gracePeriodOverride if observedGracePeriod != int64(0) { t.Errorf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", 0, observedGracePeriod) } // reduce disk pressure fakeClock.Step(1 * time.Minute) summaryProvider.result = summaryStatsMaker("16Gi", "200Gi", podStats) imageGC.invoked = false // reset state podKiller.pod = nil // reset state manager.synchronize(diskInfoProvider, activePodsFunc) // we should have disk pressure (because transition period not yet met) if !manager.IsUnderDiskPressure() { t.Errorf("Manager should report disk pressure") } // no image gc should have occurred if imageGC.invoked { t.Errorf("Manager chose to perform image gc when it was not neeed") } // no pod should have been killed if podKiller.pod != nil { t.Errorf("Manager chose to kill pod: %v when no pod should have been killed", podKiller.pod) } // move the clock past transition period to ensure that we stop reporting pressure fakeClock.Step(5 * time.Minute) summaryProvider.result = summaryStatsMaker("16Gi", "200Gi", podStats) imageGC.invoked = false // reset state podKiller.pod = nil // reset state manager.synchronize(diskInfoProvider, activePodsFunc) // we should not have disk pressure (because transition period met) if manager.IsUnderDiskPressure() { t.Errorf("Manager should not report disk pressure") } // no image gc should have occurred if imageGC.invoked { t.Errorf("Manager chose to perform image gc when it was not neeed") } // no pod should have been killed if podKiller.pod != nil { t.Errorf("Manager chose to kill pod: %v when no pod should have been killed", podKiller.pod) } }
// TestMemoryPressure func TestMemoryPressure(t *testing.T) { podMaker := func(name string, requests api.ResourceList, limits api.ResourceList, memoryWorkingSet string) (*api.Pod, statsapi.PodStats) { pod := newPod(name, []api.Container{ newContainer(name, requests, limits), }, nil) podStats := newPodMemoryStats(pod, resource.MustParse(memoryWorkingSet)) return pod, podStats } summaryStatsMaker := func(nodeAvailableBytes string, podStats map[*api.Pod]statsapi.PodStats) *statsapi.Summary { val := resource.MustParse(nodeAvailableBytes) availableBytes := uint64(val.Value()) WorkingSetBytes := uint64(val.Value()) result := &statsapi.Summary{ Node: statsapi.NodeStats{ Memory: &statsapi.MemoryStats{ AvailableBytes: &availableBytes, WorkingSetBytes: &WorkingSetBytes, }, }, Pods: []statsapi.PodStats{}, } for _, podStat := range podStats { result.Pods = append(result.Pods, podStat) } return result } podsToMake := []struct { name string requests api.ResourceList limits api.ResourceList memoryWorkingSet string }{ {name: "best-effort-high", requests: newResourceList("", ""), limits: newResourceList("", ""), memoryWorkingSet: "500Mi"}, {name: "best-effort-low", requests: newResourceList("", ""), limits: newResourceList("", ""), memoryWorkingSet: "300Mi"}, {name: "burstable-high", requests: newResourceList("100m", "100Mi"), limits: newResourceList("200m", "1Gi"), memoryWorkingSet: "800Mi"}, {name: "burstable-low", requests: newResourceList("100m", "100Mi"), limits: newResourceList("200m", "1Gi"), memoryWorkingSet: "300Mi"}, {name: "guaranteed-high", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), memoryWorkingSet: "800Mi"}, {name: "guaranteed-low", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), memoryWorkingSet: "200Mi"}, } pods := []*api.Pod{} podStats := map[*api.Pod]statsapi.PodStats{} for _, podToMake := range podsToMake { pod, podStat := podMaker(podToMake.name, podToMake.requests, podToMake.limits, podToMake.memoryWorkingSet) pods = append(pods, pod) podStats[pod] = podStat } activePodsFunc := func() []*api.Pod { return pods } fakeClock := clock.NewFakeClock(time.Now()) podKiller := &mockPodKiller{} diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false} imageGC := &mockImageGC{freed: int64(0), err: nil} nodeRef := &api.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""} config := Config{ MaxPodGracePeriodSeconds: 5, PressureTransitionPeriod: time.Minute * 5, Thresholds: []Threshold{ { Signal: SignalMemoryAvailable, Operator: OpLessThan, Value: ThresholdValue{ Quantity: quantityMustParse("1Gi"), }, }, { Signal: SignalMemoryAvailable, Operator: OpLessThan, Value: ThresholdValue{ Quantity: quantityMustParse("2Gi"), }, GracePeriod: time.Minute * 2, }, }, } summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("2Gi", podStats)} manager := &managerImpl{ clock: fakeClock, killPodFunc: podKiller.killPodNow, imageGC: imageGC, config: config, recorder: &record.FakeRecorder{}, summaryProvider: summaryProvider, nodeRef: nodeRef, nodeConditionsLastObservedAt: nodeConditionsObservedAt{}, thresholdsFirstObservedAt: thresholdsObservedAt{}, } // create a best effort pod to test admission bestEffortPodToAdmit, _ := podMaker("best-admit", newResourceList("", ""), newResourceList("", ""), "0Gi") burstablePodToAdmit, _ := podMaker("burst-admit", newResourceList("100m", "100Mi"), newResourceList("200m", "200Mi"), "0Gi") // synchronize manager.synchronize(diskInfoProvider, activePodsFunc) // we should not have memory pressure if manager.IsUnderMemoryPressure() { t.Errorf("Manager should not report memory pressure") } // try to admit our pods (they should succeed) expected := []bool{true, true} for i, pod := range []*api.Pod{bestEffortPodToAdmit, burstablePodToAdmit} { if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}); expected[i] != result.Admit { t.Errorf("Admit pod: %v, expected: %v, actual: %v", pod, expected[i], result.Admit) } } // induce soft threshold fakeClock.Step(1 * time.Minute) summaryProvider.result = summaryStatsMaker("1500Mi", podStats) manager.synchronize(diskInfoProvider, activePodsFunc) // we should have memory pressure if !manager.IsUnderMemoryPressure() { t.Errorf("Manager should report memory pressure since soft threshold was met") } // verify no pod was yet killed because there has not yet been enough time passed. if podKiller.pod != nil { t.Errorf("Manager should not have killed a pod yet, but killed: %v", podKiller.pod) } // step forward in time pass the grace period fakeClock.Step(3 * time.Minute) summaryProvider.result = summaryStatsMaker("1500Mi", podStats) manager.synchronize(diskInfoProvider, activePodsFunc) // we should have memory pressure if !manager.IsUnderMemoryPressure() { t.Errorf("Manager should report memory pressure since soft threshold was met") } // verify the right pod was killed with the right grace period. if podKiller.pod != pods[0] { t.Errorf("Manager chose to kill pod: %v, but should have chosen %v", podKiller.pod, pods[0]) } if podKiller.gracePeriodOverride == nil { t.Errorf("Manager chose to kill pod but should have had a grace period override.") } observedGracePeriod := *podKiller.gracePeriodOverride if observedGracePeriod != manager.config.MaxPodGracePeriodSeconds { t.Errorf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", manager.config.MaxPodGracePeriodSeconds, observedGracePeriod) } // reset state podKiller.pod = nil podKiller.gracePeriodOverride = nil // remove memory pressure fakeClock.Step(20 * time.Minute) summaryProvider.result = summaryStatsMaker("3Gi", podStats) manager.synchronize(diskInfoProvider, activePodsFunc) // we should not have memory pressure if manager.IsUnderMemoryPressure() { t.Errorf("Manager should not report memory pressure") } // induce memory pressure! fakeClock.Step(1 * time.Minute) summaryProvider.result = summaryStatsMaker("500Mi", podStats) manager.synchronize(diskInfoProvider, activePodsFunc) // we should have memory pressure if !manager.IsUnderMemoryPressure() { t.Errorf("Manager should report memory pressure") } // check the right pod was killed if podKiller.pod != pods[0] { t.Errorf("Manager chose to kill pod: %v, but should have chosen %v", podKiller.pod, pods[0]) } observedGracePeriod = *podKiller.gracePeriodOverride if observedGracePeriod != int64(0) { t.Errorf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", 0, observedGracePeriod) } // the best-effort pod should not admit, burstable should expected = []bool{false, true} for i, pod := range []*api.Pod{bestEffortPodToAdmit, burstablePodToAdmit} { if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}); expected[i] != result.Admit { t.Errorf("Admit pod: %v, expected: %v, actual: %v", pod, expected[i], result.Admit) } } // reduce memory pressure fakeClock.Step(1 * time.Minute) summaryProvider.result = summaryStatsMaker("2Gi", podStats) podKiller.pod = nil // reset state manager.synchronize(diskInfoProvider, activePodsFunc) // we should have memory pressure (because transition period not yet met) if !manager.IsUnderMemoryPressure() { t.Errorf("Manager should report memory pressure") } // no pod should have been killed if podKiller.pod != nil { t.Errorf("Manager chose to kill pod: %v when no pod should have been killed", podKiller.pod) } // the best-effort pod should not admit, burstable should expected = []bool{false, true} for i, pod := range []*api.Pod{bestEffortPodToAdmit, burstablePodToAdmit} { if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}); expected[i] != result.Admit { t.Errorf("Admit pod: %v, expected: %v, actual: %v", pod, expected[i], result.Admit) } } // move the clock past transition period to ensure that we stop reporting pressure fakeClock.Step(5 * time.Minute) summaryProvider.result = summaryStatsMaker("2Gi", podStats) podKiller.pod = nil // reset state manager.synchronize(diskInfoProvider, activePodsFunc) // we should not have memory pressure (because transition period met) if manager.IsUnderMemoryPressure() { t.Errorf("Manager should not report memory pressure") } // no pod should have been killed if podKiller.pod != nil { t.Errorf("Manager chose to kill pod: %v when no pod should have been killed", podKiller.pod) } // all pods should admit now expected = []bool{true, true} for i, pod := range []*api.Pod{bestEffortPodToAdmit, burstablePodToAdmit} { if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}); expected[i] != result.Admit { t.Errorf("Admit pod: %v, expected: %v, actual: %v", pod, expected[i], result.Admit) } } }
// TestMinReclaim verifies that min-reclaim works as desired. func TestMinReclaim(t *testing.T) { podMaker := func(name string, requests api.ResourceList, limits api.ResourceList, memoryWorkingSet string) (*api.Pod, statsapi.PodStats) { pod := newPod(name, []api.Container{ newContainer(name, requests, limits), }, nil) podStats := newPodMemoryStats(pod, resource.MustParse(memoryWorkingSet)) return pod, podStats } summaryStatsMaker := func(nodeAvailableBytes string, podStats map[*api.Pod]statsapi.PodStats) *statsapi.Summary { val := resource.MustParse(nodeAvailableBytes) availableBytes := uint64(val.Value()) WorkingSetBytes := uint64(val.Value()) result := &statsapi.Summary{ Node: statsapi.NodeStats{ Memory: &statsapi.MemoryStats{ AvailableBytes: &availableBytes, WorkingSetBytes: &WorkingSetBytes, }, }, Pods: []statsapi.PodStats{}, } for _, podStat := range podStats { result.Pods = append(result.Pods, podStat) } return result } podsToMake := []struct { name string requests api.ResourceList limits api.ResourceList memoryWorkingSet string }{ {name: "best-effort-high", requests: newResourceList("", ""), limits: newResourceList("", ""), memoryWorkingSet: "500Mi"}, {name: "best-effort-low", requests: newResourceList("", ""), limits: newResourceList("", ""), memoryWorkingSet: "300Mi"}, {name: "burstable-high", requests: newResourceList("100m", "100Mi"), limits: newResourceList("200m", "1Gi"), memoryWorkingSet: "800Mi"}, {name: "burstable-low", requests: newResourceList("100m", "100Mi"), limits: newResourceList("200m", "1Gi"), memoryWorkingSet: "300Mi"}, {name: "guaranteed-high", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), memoryWorkingSet: "800Mi"}, {name: "guaranteed-low", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), memoryWorkingSet: "200Mi"}, } pods := []*api.Pod{} podStats := map[*api.Pod]statsapi.PodStats{} for _, podToMake := range podsToMake { pod, podStat := podMaker(podToMake.name, podToMake.requests, podToMake.limits, podToMake.memoryWorkingSet) pods = append(pods, pod) podStats[pod] = podStat } activePodsFunc := func() []*api.Pod { return pods } fakeClock := clock.NewFakeClock(time.Now()) podKiller := &mockPodKiller{} diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false} imageGC := &mockImageGC{freed: int64(0), err: nil} nodeRef := &api.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""} config := Config{ MaxPodGracePeriodSeconds: 5, PressureTransitionPeriod: time.Minute * 5, Thresholds: []Threshold{ { Signal: SignalMemoryAvailable, Operator: OpLessThan, Value: ThresholdValue{ Quantity: quantityMustParse("1Gi"), }, MinReclaim: &ThresholdValue{ Quantity: quantityMustParse("500Mi"), }, }, }, } summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("2Gi", podStats)} manager := &managerImpl{ clock: fakeClock, killPodFunc: podKiller.killPodNow, imageGC: imageGC, config: config, recorder: &record.FakeRecorder{}, summaryProvider: summaryProvider, nodeRef: nodeRef, nodeConditionsLastObservedAt: nodeConditionsObservedAt{}, thresholdsFirstObservedAt: thresholdsObservedAt{}, } // synchronize manager.synchronize(diskInfoProvider, activePodsFunc) // we should not have memory pressure if manager.IsUnderMemoryPressure() { t.Errorf("Manager should not report memory pressure") } // induce memory pressure! fakeClock.Step(1 * time.Minute) summaryProvider.result = summaryStatsMaker("500Mi", podStats) manager.synchronize(diskInfoProvider, activePodsFunc) // we should have memory pressure if !manager.IsUnderMemoryPressure() { t.Errorf("Manager should report memory pressure") } // check the right pod was killed if podKiller.pod != pods[0] { t.Errorf("Manager chose to kill pod: %v, but should have chosen %v", podKiller.pod, pods[0]) } observedGracePeriod := *podKiller.gracePeriodOverride if observedGracePeriod != int64(0) { t.Errorf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", 0, observedGracePeriod) } // reduce memory pressure, but not below the min-reclaim amount fakeClock.Step(1 * time.Minute) summaryProvider.result = summaryStatsMaker("1.2Gi", podStats) podKiller.pod = nil // reset state manager.synchronize(diskInfoProvider, activePodsFunc) // we should have memory pressure (because transition period not yet met) if !manager.IsUnderMemoryPressure() { t.Errorf("Manager should report memory pressure") } // check the right pod was killed if podKiller.pod != pods[0] { t.Errorf("Manager chose to kill pod: %v, but should have chosen %v", podKiller.pod, pods[0]) } observedGracePeriod = *podKiller.gracePeriodOverride if observedGracePeriod != int64(0) { t.Errorf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", 0, observedGracePeriod) } // reduce memory pressure and ensure the min-reclaim amount fakeClock.Step(1 * time.Minute) summaryProvider.result = summaryStatsMaker("2Gi", podStats) podKiller.pod = nil // reset state manager.synchronize(diskInfoProvider, activePodsFunc) // we should have memory pressure (because transition period not yet met) if !manager.IsUnderMemoryPressure() { t.Errorf("Manager should report memory pressure") } // no pod should have been killed if podKiller.pod != nil { t.Errorf("Manager chose to kill pod: %v when no pod should have been killed", podKiller.pod) } // move the clock past transition period to ensure that we stop reporting pressure fakeClock.Step(5 * time.Minute) summaryProvider.result = summaryStatsMaker("2Gi", podStats) podKiller.pod = nil // reset state manager.synchronize(diskInfoProvider, activePodsFunc) // we should not have memory pressure (because transition period met) if manager.IsUnderMemoryPressure() { t.Errorf("Manager should not report memory pressure") } // no pod should have been killed if podKiller.pod != nil { t.Errorf("Manager chose to kill pod: %v when no pod should have been killed", podKiller.pod) } }
func newTestDockerSevice() (*dockerService, *dockertools.FakeDockerClient, *clock.FakeClock) { c := dockertools.NewFakeDockerClient() fakeClock := clock.NewFakeClock(time.Time{}) return &dockerService{client: c}, c, fakeClock }
func TestSerializedPuller(t *testing.T) { pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "test_pod", Namespace: "test-ns", UID: "bar", ResourceVersion: "42", SelfLink: "/api/v1/pods/foo", }} cases := []struct { containerImage string policy api.PullPolicy calledFunctions []string inspectErr error pullerErr error expectedErr []error }{ { // pull missing image containerImage: "missing_image", policy: api.PullIfNotPresent, calledFunctions: []string{"IsImagePresent", "PullImage"}, inspectErr: nil, pullerErr: nil, expectedErr: []error{nil}}, { // image present, dont pull containerImage: "present_image", policy: api.PullIfNotPresent, calledFunctions: []string{"IsImagePresent"}, inspectErr: nil, pullerErr: nil, expectedErr: []error{nil, nil, nil}}, // image present, pull it {containerImage: "present_image", policy: api.PullAlways, calledFunctions: []string{"IsImagePresent", "PullImage"}, inspectErr: nil, pullerErr: nil, expectedErr: []error{nil, nil, nil}}, // missing image, error PullNever {containerImage: "missing_image", policy: api.PullNever, calledFunctions: []string{"IsImagePresent"}, inspectErr: nil, pullerErr: nil, expectedErr: []error{ErrImageNeverPull, ErrImageNeverPull, ErrImageNeverPull}}, // missing image, unable to inspect {containerImage: "missing_image", policy: api.PullIfNotPresent, calledFunctions: []string{"IsImagePresent"}, inspectErr: errors.New("unknown inspectError"), pullerErr: nil, expectedErr: []error{ErrImageInspect, ErrImageInspect, ErrImageInspect}}, // missing image, unable to fetch {containerImage: "typo_image", policy: api.PullIfNotPresent, calledFunctions: []string{"IsImagePresent", "PullImage"}, inspectErr: nil, pullerErr: errors.New("404"), expectedErr: []error{ErrImagePull, ErrImagePull, ErrImagePullBackOff, ErrImagePull, ErrImagePullBackOff, ErrImagePullBackOff}}, } for i, c := range cases { container := &api.Container{ Name: "container_name", Image: c.containerImage, ImagePullPolicy: c.policy, } backOff := flowcontrol.NewBackOff(time.Second, time.Minute) fakeClock := clock.NewFakeClock(time.Now()) backOff.Clock = fakeClock fakeRuntime := &ctest.FakeRuntime{} fakeRecorder := &record.FakeRecorder{} puller := newSerializedImagePuller(fakeRecorder, fakeRuntime, backOff) fakeRuntime.ImageList = []Image{{"present_image", nil, nil, 0}} fakeRuntime.Err = c.pullerErr fakeRuntime.InspectErr = c.inspectErr for tick, expected := range c.expectedErr { fakeClock.Step(time.Second) err, _ := puller.pullImage(pod, container, nil) fakeRuntime.AssertCalls(c.calledFunctions) assert.Equal(t, expected, err, "in test %d tick=%d", i, tick) } } }