func TestTTLPolicy(t *testing.T) {
	fakeTime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
	ttl := 30 * time.Second
	exactlyOnTTL := fakeTime.Add(-ttl)
	expiredTime := fakeTime.Add(-(ttl + 1))

	policy := TTLPolicy{ttl, util.NewFakeClock(fakeTime)}
	fakeTimestampedEntry := &timestampedEntry{obj: struct{}{}, timestamp: exactlyOnTTL}
	if policy.IsExpired(fakeTimestampedEntry) {
		t.Errorf("TTL cache should not expire entries exactly on ttl")
	}
	fakeTimestampedEntry.timestamp = fakeTime
	if policy.IsExpired(fakeTimestampedEntry) {
		t.Errorf("TTL Cache should not expire entries before ttl")
	}
	fakeTimestampedEntry.timestamp = expiredTime
	if !policy.IsExpired(fakeTimestampedEntry) {
		t.Errorf("TTL Cache should expire entries older than ttl")
	}
	for _, ttl = range []time.Duration{0, -1} {
		policy.Ttl = ttl
		if policy.IsExpired(fakeTimestampedEntry) {
			t.Errorf("TTL policy should only expire entries when initialized with a ttl > 0")
		}
	}
}
Exemplo n.º 2
0
func TestBackoffGC(t *testing.T) {
	id := "_idGC"
	tc := util.NewFakeClock(time.Now())
	step := time.Second
	maxDuration := 5 * step

	b := NewFakeBackOff(step, maxDuration, tc)

	for i := 0; i <= int(maxDuration/step); i++ {
		tc.Step(step)
		b.Next(id, tc.Now())
	}
	lastUpdate := tc.Now()
	tc.Step(maxDuration + step)
	b.GC()
	_, found := b.perItemBackoff[id]
	if !found {
		t.Errorf("expected GC to skip entry, elapsed time=%s maxDuration=%s", tc.Now().Sub(lastUpdate), maxDuration)
	}

	tc.Step(maxDuration + step)
	b.GC()
	r, found := b.perItemBackoff[id]
	if found {
		t.Errorf("expected GC of entry after %s got entry %v", tc.Now().Sub(lastUpdate), r)
	}
}
Exemplo n.º 3
0
func TestExpirationBasic(t *testing.T) {
	unexpectedVal := "bar"
	expectedVal := "bar2"

	testObj := testObject{
		key: "foo",
		val: unexpectedVal,
	}

	fakeClock := util.NewFakeClock(time.Now())

	objectCache := NewFakeObjectCache(func() (interface{}, error) {
		return expectedVal, nil
	}, 1*time.Second, fakeClock)

	err := objectCache.Add(testObj.key, testObj.val)
	if err != nil {
		t.Errorf("Unable to add obj %#v by key: %s", testObj, testObj.key)
	}

	// sleep 2s so cache should be expired.
	fakeClock.Sleep(2 * time.Second)

	value, err := objectCache.Get(testObj.key)
	if err != nil {
		t.Errorf("Unable to get obj %#v by key: %s", testObj, testObj.key)
	}
	if value.(string) != expectedVal {
		t.Errorf("Expected to get cached value: %#v, but got: %s", expectedVal, value.(string))
	}
}
Exemplo n.º 4
0
func getTestSQ(startThreads bool, config *github_util.Config, server *httptest.Server) *SubmitQueue {
	// TODO: Remove this line when we fix the plumbing regarding the fake/real e2e tester.
	admin.Mux = admin.NewConcurrentMux()
	sq := new(SubmitQueue)
	sq.RequiredStatusContexts = []string{notRequiredReTestContext1, notRequiredReTestContext2}
	sq.RequiredRetestContexts = []string{requiredReTestContext1, requiredReTestContext2}
	sq.BlockingJobNames = []string{"foo"}
	sq.WeakStableJobNames = []string{"bar"}
	sq.githubE2EQueue = map[int]*github_util.MungeObject{}
	sq.githubE2EPollTime = 50 * time.Millisecond

	sq.clock = util.NewFakeClock(time.Time{})
	sq.lastMergeTime = sq.clock.Now()
	sq.lastE2EStable = true
	sq.prStatus = map[string]submitStatus{}
	sq.lastPRStatus = map[string]submitStatus{}
	sq.lgtmTimeCache = mungerutil.NewLabelTimeCache(lgtmLabel)

	sq.startTime = sq.clock.Now()
	sq.healthHistory = make([]healthRecord, 0)

	sq.DoNotMergeMilestones = []string{doNotMergeMilestone}

	sq.e2e = &fake_e2e.FakeE2ETester{
		JobNames:           sq.BlockingJobNames,
		WeakStableJobNames: sq.WeakStableJobNames,
	}

	if startThreads {
		sq.internalInitialize(config, nil, server.URL)
		sq.EachLoop()
	}
	return sq
}
Exemplo n.º 5
0
func getTestSQ(startThreads bool, config *github_util.Config, server *httptest.Server) *SubmitQueue {
	sq := new(SubmitQueue)
	sq.RequiredStatusContexts = []string{jenkinsUnitContext}
	sq.E2EStatusContext = jenkinsE2EContext
	sq.UnitStatusContext = jenkinsUnitContext
	if server != nil {
		sq.JenkinsHost = server.URL
	}
	sq.JobNames = []string{"foo"}
	sq.WeakStableJobNames = []string{"bar"}
	sq.githubE2EQueue = map[int]*github_util.MungeObject{}
	sq.githubE2EPollTime = 50 * time.Millisecond

	sq.clock = util.NewFakeClock(time.Time{})
	sq.lastMergeTime = sq.clock.Now()
	sq.lastE2EStable = true
	sq.prStatus = map[string]submitStatus{}
	sq.lastPRStatus = map[string]submitStatus{}

	if startThreads {
		sq.internalInitialize(config, nil, server.URL)
		sq.EachLoop()
		sq.userWhitelist.Insert(whitelistUser)
	}
	return sq
}
Exemplo n.º 6
0
func TestBackoffReset(t *testing.T) {
	id := "_idReset"
	tc := util.NewFakeClock(time.Now())
	step := time.Second
	maxDuration := step * 5
	b := NewFakeBackOff(step, maxDuration, tc)
	startTime := tc.Now()

	// get to backoff = maxDuration
	for i := 0; i <= int(maxDuration/step); i++ {
		tc.Step(step)
		b.Next(id, tc.Now())
	}

	// backoff should be capped at maxDuration
	if !b.IsInBackOffSince(id, tc.Now()) {
		t.Errorf("expected to be in Backoff got %s", b.Get(id))
	}

	lastUpdate := tc.Now()
	tc.Step(2*maxDuration + step) // time += 11s, 11 > 2*maxDuration
	if b.IsInBackOffSince(id, lastUpdate) {
		t.Errorf("now=%s lastUpdate=%s (%s) expected Backoff reset got %s b.lastUpdate=%s", tc.Now(), startTime, tc.Now().Sub(lastUpdate), b.Get(id))
	}
}
Exemplo n.º 7
0
func TestSlowBackoff(t *testing.T) {
	id := "_idSlow"
	tc := util.NewFakeClock(time.Now())
	step := time.Second
	maxDuration := 50 * step

	b := NewFakeBackOff(step, maxDuration, tc)
	cases := []time.Duration{0, 1, 2, 4, 8, 16, 32, 50, 50, 50}
	for ix, c := range cases {
		tc.Step(step)
		w := b.Get(id)
		if w != c*step {
			t.Errorf("input: '%d': expected %s, got %s", ix, c*step, w)
		}
		b.Next(id, tc.Now())
	}

	//Now confirm that the Reset cancels backoff.
	b.Next(id, tc.Now())
	b.Reset(id)
	if b.Get(id) != 0 {
		t.Errorf("Reset didn't clear the backoff.")
	}

}
Exemplo n.º 8
0
// NewFakeControllerExpectationsLookup creates a fake store for PodExpectations.
func NewFakeControllerExpectationsLookup(ttl time.Duration) (*ControllerExpectations, *util.FakeClock) {
	fakeTime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
	fakeClock := util.NewFakeClock(fakeTime)
	ttlPolicy := &cache.TTLPolicy{Ttl: ttl, Clock: fakeClock}
	ttlStore := cache.NewFakeExpirationStore(
		ExpKeyFunc, nil, ttlPolicy, fakeClock)
	return &ControllerExpectations{ttlStore}, fakeClock
}
Exemplo n.º 9
0
// TestIsTunnelSyncHealthy verifies that the 600 second lag test
// is honored.
func TestIsTunnelSyncHealthy(t *testing.T) {
	tunneler := &SSHTunneler{}
	master, etcdserver, _, assert := setUp(t)
	defer etcdserver.Terminate(t)
	master.tunneler = tunneler

	// Pass case: 540 second lag
	tunneler.lastSync = time.Date(2015, time.January, 1, 1, 1, 1, 1, time.UTC).Unix()
	tunneler.clock = util.NewFakeClock(time.Date(2015, time.January, 1, 1, 9, 1, 1, time.UTC))
	err := master.IsTunnelSyncHealthy(nil)
	assert.NoError(err, "IsTunnelSyncHealthy() should not have returned an error.")

	// Fail case: 720 second lag
	tunneler.clock = util.NewFakeClock(time.Date(2015, time.January, 1, 1, 12, 1, 1, time.UTC))
	err = master.IsTunnelSyncHealthy(nil)
	assert.Error(err, "IsTunnelSyncHealthy() should have returned an error.")
}
Exemplo n.º 10
0
func newTestBasicWorkQueue() (*basicWorkQueue, *util.FakeClock) {
	fakeClock := util.NewFakeClock(time.Now())
	wq := &basicWorkQueue{
		clock: fakeClock,
		queue: make(map[types.UID]time.Time),
	}
	return wq, fakeClock
}
Exemplo n.º 11
0
func TestDeduping(t *testing.T) {
	fakeClock := util.NewFakeClock(time.Now())
	q := newDelayingQueue(fakeClock)

	first := "foo"

	q.AddAfter(first, 50*time.Millisecond)
	if err := waitForWaitingQueueToFill(q); err != nil {
		t.Fatalf("unexpected err: %v", err)
	}
	q.AddAfter(first, 70*time.Millisecond)
	if err := waitForWaitingQueueToFill(q); err != nil {
		t.Fatalf("unexpected err: %v", err)
	}
	if q.Len() != 0 {
		t.Errorf("should not have added")
	}

	// step past the first block, we should receive now
	fakeClock.Step(60 * time.Millisecond)
	if err := waitForAdded(q, 1); err != nil {
		t.Errorf("should have added")
	}
	item, _ := q.Get()
	q.Done(item)

	// step past the second add
	fakeClock.Step(20 * time.Millisecond)
	if q.Len() != 0 {
		t.Errorf("should not have added")
	}

	// test again, but this time the earlier should override
	q.AddAfter(first, 50*time.Millisecond)
	q.AddAfter(first, 30*time.Millisecond)
	if err := waitForWaitingQueueToFill(q); err != nil {
		t.Fatalf("unexpected err: %v", err)
	}
	if q.Len() != 0 {
		t.Errorf("should not have added")
	}

	fakeClock.Step(40 * time.Millisecond)
	if err := waitForAdded(q, 1); err != nil {
		t.Errorf("should have added")
	}
	item, _ = q.Get()
	q.Done(item)

	// step past the second add
	fakeClock.Step(20 * time.Millisecond)
	if q.Len() != 0 {
		t.Errorf("should not have added")
	}
	if q.Len() != 0 {
		t.Errorf("should not have added")
	}
}
Exemplo n.º 12
0
// TestSecondsSinceSync verifies that proper results are returned
// when checking the time between syncs
func TestSecondsSinceSync(t *testing.T) {
	tunneler := &SSHTunneler{}
	assert := assert.New(t)

	tunneler.lastSync = time.Date(2015, time.January, 1, 1, 1, 1, 1, time.UTC).Unix()

	// Nano Second. No difference.
	tunneler.clock = util.NewFakeClock(time.Date(2015, time.January, 1, 1, 1, 1, 2, time.UTC))
	assert.Equal(int64(0), tunneler.SecondsSinceSync())

	// Second
	tunneler.clock = util.NewFakeClock(time.Date(2015, time.January, 1, 1, 1, 2, 1, time.UTC))
	assert.Equal(int64(1), tunneler.SecondsSinceSync())

	// Minute
	tunneler.clock = util.NewFakeClock(time.Date(2015, time.January, 1, 1, 2, 1, 1, time.UTC))
	assert.Equal(int64(60), tunneler.SecondsSinceSync())

	// Hour
	tunneler.clock = util.NewFakeClock(time.Date(2015, time.January, 1, 2, 1, 1, 1, time.UTC))
	assert.Equal(int64(3600), tunneler.SecondsSinceSync())

	// Day
	tunneler.clock = util.NewFakeClock(time.Date(2015, time.January, 2, 1, 1, 1, 1, time.UTC))
	assert.Equal(int64(86400), tunneler.SecondsSinceSync())

	// Month
	tunneler.clock = util.NewFakeClock(time.Date(2015, time.February, 1, 1, 1, 1, 1, time.UTC))
	assert.Equal(int64(2678400), tunneler.SecondsSinceSync())

	// Future Month. Should be -Month.
	tunneler.lastSync = time.Date(2015, time.February, 1, 1, 1, 1, 1, time.UTC).Unix()
	tunneler.clock = util.NewFakeClock(time.Date(2015, time.January, 1, 1, 1, 1, 1, time.UTC))
	assert.Equal(int64(-2678400), tunneler.SecondsSinceSync())
}
Exemplo n.º 13
0
func TestRepositoryBucketAddOversize(t *testing.T) {
	clock := util.NewFakeClock(time.Now())

	b := repositoryBucket{
		clock: clock,
	}

	i := 0
	for ; i < bucketSize; i++ {
		ttl := time.Duration(uint64(ttl5m) * uint64(i))
		b.Add(ttl, fmt.Sprintf("%d", i))
	}
	if len(b.list) != bucketSize {
		t.Fatalf("unexpected number of items: %d != %d", len(b.list), bucketSize)
	}

	// make first three stale
	clock.Step(ttl5m * 3)
	if !b.Has("3") {
		t.Fatalf("bucket does not contain repository 3")
	}
	if len(b.list) != bucketSize-3 {
		t.Fatalf("unexpected number of items: %d != %d", len(b.list), bucketSize-3)
	}

	// add few repos one by one
	for ; i < bucketSize+5; i++ {
		ttl := time.Duration(uint64(ttl5m) * uint64(i))
		b.Add(ttl, fmt.Sprintf("%d", i))
	}
	if len(b.list) != bucketSize {
		t.Fatalf("unexpected number of items: %d != %d", len(b.list), bucketSize)
	}

	// add few repos at once
	newRepos := []string{}
	for ; i < bucketSize+10; i++ {
		newRepos = append(newRepos, fmt.Sprintf("%d", i))
	}
	b.Add(ttl5m, newRepos...)
	if len(b.list) != bucketSize {
		t.Fatalf("unexpected number of items: %d != %d", len(b.list), bucketSize)
	}

	for j := 0; j < bucketSize; j++ {
		expected := fmt.Sprintf("%d", i-bucketSize+j)
		if b.list[j].repository != expected {
			t.Fatalf("unexpected repository on index %d: %s != %s", j, b.list[j].repository, expected)
		}
	}
}
Exemplo n.º 14
0
func TestRepositoryBucketCopy(t *testing.T) {
	now := time.Now()
	clock := util.NewFakeClock(now)

	ttl5m := time.Minute * 5
	for _, tc := range []struct {
		name          string
		entries       []bucketEntry
		expectedRepos []string
	}{
		{
			name:          "no entry",
			expectedRepos: []string{},
		},

		{
			name: "one stale entry",
			entries: []bucketEntry{
				{
					repository: "1",
				},
			},
			expectedRepos: []string{},
		},

		{
			name: "two entries",
			entries: []bucketEntry{
				{
					repository: "a",
					evictOn:    now.Add(ttl5m),
				},
				{
					repository: "b",
					evictOn:    now.Add(ttl5m),
				},
			},
			expectedRepos: []string{"a", "b"},
		},
	} {
		b := repositoryBucket{
			clock: clock,
			list:  tc.entries,
		}
		result := b.Copy()

		if !reflect.DeepEqual(result, tc.expectedRepos) {
			t.Errorf("[%s] got unexpected repo list: %s", tc.name, diff.ObjectGoPrintDiff(result, tc.expectedRepos))
		}
	}
}
Exemplo n.º 15
0
// TestActiveDeadlineHandler verifies the active deadline handler functions as expected.
func TestActiveDeadlineHandler(t *testing.T) {
	pods := newTestPods(4)
	fakeClock := util.NewFakeClock(time.Now())
	podStatusProvider := &mockPodStatusProvider{pods: pods}
	fakeRecorder := &record.FakeRecorder{}
	handler, err := newActiveDeadlineHandler(podStatusProvider, fakeRecorder, fakeClock)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	now := unversioned.Now()
	startTime := unversioned.NewTime(now.Time.Add(-1 * time.Minute))

	// this pod has exceeded its active deadline
	exceededActiveDeadlineSeconds := int64(30)
	pods[0].Status.StartTime = &startTime
	pods[0].Spec.ActiveDeadlineSeconds = &exceededActiveDeadlineSeconds

	// this pod has not exceeded its active deadline
	notYetActiveDeadlineSeconds := int64(120)
	pods[1].Status.StartTime = &startTime
	pods[1].Spec.ActiveDeadlineSeconds = &notYetActiveDeadlineSeconds

	// this pod has no deadline
	pods[2].Status.StartTime = &startTime
	pods[2].Spec.ActiveDeadlineSeconds = nil

	testCases := []struct {
		pod      *api.Pod
		expected bool
	}{{pods[0], true}, {pods[1], false}, {pods[2], false}, {pods[3], false}}

	for i, testCase := range testCases {
		if actual := handler.ShouldSync(testCase.pod); actual != testCase.expected {
			t.Errorf("[%d] ShouldSync expected %#v, got %#v", i, testCase.expected, actual)
		}
		actual := handler.ShouldEvict(testCase.pod)
		if actual.Evict != testCase.expected {
			t.Errorf("[%d] ShouldEvict.Evict expected %#v, got %#v", i, testCase.expected, actual.Evict)
		}
		if testCase.expected {
			if actual.Reason != reason {
				t.Errorf("[%d] ShouldEvict.Reason expected %#v, got %#v", i, message, actual.Reason)
			}
			if actual.Message != message {
				t.Errorf("[%d] ShouldEvict.Message expected %#v, got %#v", i, message, actual.Message)
			}
		}
	}
}
Exemplo n.º 16
0
func newTestGenericPLEG() *TestGenericPLEG {
	fakeRuntime := &containertest.FakeRuntime{}
	clock := util.NewFakeClock(time.Time{})
	// The channel capacity should be large enough to hold all events in a
	// single test.
	pleg := &GenericPLEG{
		relistPeriod: time.Hour,
		runtime:      fakeRuntime,
		eventChannel: make(chan *PodLifecycleEvent, 100),
		podRecords:   make(podRecords),
		clock:        clock,
	}
	return &TestGenericPLEG{pleg: pleg, runtime: fakeRuntime, clock: clock}
}
Exemplo n.º 17
0
func TestRateLimitingQueue(t *testing.T) {
	limiter := NewItemExponentialFailureRateLimiter(1*time.Millisecond, 1*time.Second)
	queue := NewRateLimitingQueue(limiter).(*rateLimitingType)
	fakeClock := util.NewFakeClock(time.Now())
	delayingQueue := &delayingType{
		Interface:       New(),
		clock:           fakeClock,
		heartbeat:       fakeClock.Tick(maxWait),
		stopCh:          make(chan struct{}),
		waitingForAddCh: make(chan waitFor, 1000),
		metrics:         newRetryMetrics(""),
	}
	queue.DelayingInterface = delayingQueue

	queue.AddRateLimited("one")
	waitEntry := <-delayingQueue.waitingForAddCh
	if e, a := 1*time.Millisecond, waitEntry.readyAt.Sub(fakeClock.Now()); e != a {
		t.Errorf("expected %v, got %v", e, a)
	}
	queue.AddRateLimited("one")
	waitEntry = <-delayingQueue.waitingForAddCh
	if e, a := 10*time.Millisecond, waitEntry.readyAt.Sub(fakeClock.Now()); e != a {
		t.Errorf("expected %v, got %v", e, a)
	}
	if e, a := 2, queue.NumRequeues("one"); e != a {
		t.Errorf("expected %v, got %v", e, a)
	}

	queue.AddRateLimited("two")
	waitEntry = <-delayingQueue.waitingForAddCh
	if e, a := 1*time.Millisecond, waitEntry.readyAt.Sub(fakeClock.Now()); e != a {
		t.Errorf("expected %v, got %v", e, a)
	}
	queue.AddRateLimited("two")
	waitEntry = <-delayingQueue.waitingForAddCh
	if e, a := 10*time.Millisecond, waitEntry.readyAt.Sub(fakeClock.Now()); e != a {
		t.Errorf("expected %v, got %v", e, a)
	}

	queue.Forget("one")
	if e, a := 0, queue.NumRequeues("one"); e != a {
		t.Errorf("expected %v, got %v", e, a)
	}
	queue.AddRateLimited("one")
	waitEntry = <-delayingQueue.waitingForAddCh
	if e, a := 1*time.Millisecond, waitEntry.readyAt.Sub(fakeClock.Now()); e != a {
		t.Errorf("expected %v, got %v", e, a)
	}

}
Exemplo n.º 18
0
func TestAddTwoFireEarly(t *testing.T) {
	fakeClock := util.NewFakeClock(time.Now())
	q := newDelayingQueue(fakeClock)

	first := "foo"
	second := "bar"
	third := "baz"

	q.AddAfter(first, 1*time.Second)
	q.AddAfter(second, 50*time.Millisecond)
	if err := waitForWaitingQueueToFill(q); err != nil {
		t.Fatalf("unexpected err: %v", err)
	}

	if q.Len() != 0 {
		t.Errorf("should not have added")
	}

	fakeClock.Step(60 * time.Millisecond)

	if err := waitForAdded(t, q, 1); err != nil {
		t.Fatalf("unexpected err: %v", err)
	}
	item, _ := q.Get()
	if !reflect.DeepEqual(item, second) {
		t.Errorf("expected %v, got %v", second, item)
	}

	q.AddAfter(third, 2*time.Second)

	fakeClock.Step(1 * time.Second)
	if err := waitForAdded(t, q, 1); err != nil {
		t.Fatalf("unexpected err: %v", err)
	}
	item, _ = q.Get()
	if !reflect.DeepEqual(item, first) {
		t.Errorf("expected %v, got %v", first, item)
	}

	fakeClock.Step(2 * time.Second)
	if err := waitForAdded(t, q, 1); err != nil {
		t.Fatalf("unexpected err: %v", err)
	}
	item, _ = q.Get()
	if !reflect.DeepEqual(item, third) {
		t.Errorf("expected %v, got %v", third, item)
	}

}
Exemplo n.º 19
0
func TestGarbageCollectImageNotOldEnough(t *testing.T) {
	policy := ImageGCPolicy{
		HighThresholdPercent: 90,
		LowThresholdPercent:  80,
		MinAge:               time.Minute * 1,
	}
	fakeRuntime := &containertest.FakeRuntime{}
	mockCadvisor := new(cadvisortest.Mock)
	manager := &realImageManager{
		runtime:      fakeRuntime,
		policy:       policy,
		imageRecords: make(map[string]*imageRecord),
		cadvisor:     mockCadvisor,
		recorder:     &record.FakeRecorder{},
	}

	fakeRuntime.ImageList = []container.Image{
		makeImage(0, 1024),
		makeImage(1, 2048),
	}
	// 1 image is in use, and another one is not old enough
	fakeRuntime.AllPodList = []*containertest.FakePod{
		{Pod: &container.Pod{
			Containers: []*container.Container{
				makeContainer(1),
			},
		}},
	}

	fakeClock := util.NewFakeClock(time.Now())
	t.Log(fakeClock.Now())
	require.NoError(t, manager.detectImages(fakeClock.Now()))
	require.Equal(t, manager.imageRecordsLen(), 2)
	// no space freed since one image is in used, and another one is not old enough
	spaceFreed, err := manager.freeSpace(1024, fakeClock.Now())
	assert := assert.New(t)
	require.NoError(t, err)
	assert.EqualValues(0, spaceFreed)
	assert.Len(fakeRuntime.ImageList, 2)

	// move clock by minAge duration, then 1 image will be garbage collected
	fakeClock.Step(policy.MinAge)
	spaceFreed, err = manager.freeSpace(1024, fakeClock.Now())
	require.NoError(t, err)
	assert.EqualValues(1024, spaceFreed)
	assert.Len(fakeRuntime.ImageList, 1)
}
Exemplo n.º 20
0
func TestSimpleQueue(t *testing.T) {
	fakeClock := util.NewFakeClock(time.Now())
	q := newDelayingQueue(fakeClock)

	first := "foo"

	q.AddAfter(first, 50*time.Millisecond)
	if err := waitForWaitingQueueToFill(q); err != nil {
		t.Fatalf("unexpected err: %v", err)
	}

	if q.Len() != 0 {
		t.Errorf("should not have added")
	}

	fakeClock.Step(60 * time.Millisecond)

	if err := waitForAdded(t, q, 1); err != nil {
		t.Errorf("should have added")
	}
	item, _ := q.Get()
	q.Done(item)

	// step past the next heartbeat
	fakeClock.Step(10 * time.Second)

	err := wait.Poll(1*time.Millisecond, 30*time.Millisecond, func() (done bool, err error) {
		if q.Len() > 0 {
			return false, fmt.Errorf("added to queue")
		}

		return false, nil
	})
	if err != wait.ErrWaitTimeout {
		t.Errorf("expected timeout, got: %v", err)
	}

	if q.Len() != 0 {
		t.Errorf("should not have added")
	}
}
Exemplo n.º 21
0
func TestBackoffHightWaterMark(t *testing.T) {
	id := "_idHiWaterMark"
	tc := util.NewFakeClock(time.Now())
	step := time.Second
	maxDuration := 5 * step
	b := NewFakeBackOff(step, maxDuration, tc)

	// get to backoff = maxDuration
	for i := 0; i <= int(maxDuration/step); i++ {
		tc.Step(step)
		b.Next(id, tc.Now())
	}

	// backoff high watermark expires after 2*maxDuration
	tc.Step(maxDuration + step)
	b.Next(id, tc.Now())

	if b.Get(id) != maxDuration {
		t.Errorf("expected Backoff to stay at high watermark %s got %s", maxDuration, b.Get(id))
	}
}
Exemplo n.º 22
0
func TestAddAndGet(t *testing.T) {
	testObj := testObject{
		key: "foo",
		val: "bar",
	}
	objectCache := NewFakeObjectCache(func() (interface{}, error) {
		return nil, fmt.Errorf("Unexpected Error: updater should never be called in this test!")
	}, 1*time.Hour, util.NewFakeClock(time.Now()))

	err := objectCache.Add(testObj.key, testObj.val)
	if err != nil {
		t.Errorf("Unable to add obj %#v by key: %s", testObj, testObj.key)
	}
	value, err := objectCache.Get(testObj.key)
	if err != nil {
		t.Errorf("Unable to get obj %#v by key: %s", testObj, testObj.key)
	}
	if value.(string) != testObj.val {
		t.Errorf("Expected to get cached value: %#v, but got: %s", testObj.val, value.(string))
	}

}
Exemplo n.º 23
0
func TestCopyShifting(t *testing.T) {
	fakeClock := util.NewFakeClock(time.Now())
	q := newDelayingQueue(fakeClock)

	first := "foo"
	second := "bar"
	third := "baz"

	q.AddAfter(first, 1*time.Second)
	q.AddAfter(second, 500*time.Millisecond)
	q.AddAfter(third, 250*time.Millisecond)
	if err := waitForWaitingQueueToFill(q); err != nil {
		t.Fatalf("unexpected err: %v", err)
	}

	if q.Len() != 0 {
		t.Errorf("should not have added")
	}

	fakeClock.Step(2 * time.Second)

	if err := waitForAdded(t, q, 3); err != nil {
		t.Fatalf("unexpected err: %v", err)
	}
	actualFirst, _ := q.Get()
	if !reflect.DeepEqual(actualFirst, third) {
		t.Errorf("expected %v, got %v", third, actualFirst)
	}
	actualSecond, _ := q.Get()
	if !reflect.DeepEqual(actualSecond, second) {
		t.Errorf("expected %v, got %v", second, actualSecond)
	}
	actualThird, _ := q.Get()
	if !reflect.DeepEqual(actualThird, first) {
		t.Errorf("expected %v, got %v", first, actualThird)
	}
}
func TestPuller(t *testing.T) {
	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name:            "test_pod",
			Namespace:       "test-ns",
			UID:             "bar",
			ResourceVersion: "42",
			SelfLink:        "/api/v1/pods/foo",
		}}

	cases := []struct {
		containerImage  string
		policy          api.PullPolicy
		calledFunctions []string
		inspectErr      error
		pullerErr       error
		expectedErr     []error
	}{
		{ // pull missing image
			containerImage:  "missing_image",
			policy:          api.PullIfNotPresent,
			calledFunctions: []string{"IsImagePresent", "PullImage"},
			inspectErr:      nil,
			pullerErr:       nil,
			expectedErr:     []error{nil}},

		{ // image present, dont pull
			containerImage:  "present_image",
			policy:          api.PullIfNotPresent,
			calledFunctions: []string{"IsImagePresent"},
			inspectErr:      nil,
			pullerErr:       nil,
			expectedErr:     []error{nil, nil, nil}},
		// image present, pull it
		{containerImage: "present_image",
			policy:          api.PullAlways,
			calledFunctions: []string{"IsImagePresent", "PullImage"},
			inspectErr:      nil,
			pullerErr:       nil,
			expectedErr:     []error{nil, nil, nil}},
		// missing image, error PullNever
		{containerImage: "missing_image",
			policy:          api.PullNever,
			calledFunctions: []string{"IsImagePresent"},
			inspectErr:      nil,
			pullerErr:       nil,
			expectedErr:     []error{ErrImageNeverPull, ErrImageNeverPull, ErrImageNeverPull}},
		// missing image, unable to inspect
		{containerImage: "missing_image",
			policy:          api.PullIfNotPresent,
			calledFunctions: []string{"IsImagePresent"},
			inspectErr:      errors.New("unknown inspectError"),
			pullerErr:       nil,
			expectedErr:     []error{ErrImageInspect, ErrImageInspect, ErrImageInspect}},
		// missing image, unable to fetch
		{containerImage: "typo_image",
			policy:          api.PullIfNotPresent,
			calledFunctions: []string{"IsImagePresent", "PullImage"},
			inspectErr:      nil,
			pullerErr:       errors.New("404"),
			expectedErr:     []error{ErrImagePull, ErrImagePull, ErrImagePullBackOff, ErrImagePull, ErrImagePullBackOff, ErrImagePullBackOff}},
	}

	for i, c := range cases {
		container := &api.Container{
			Name:            "container_name",
			Image:           c.containerImage,
			ImagePullPolicy: c.policy,
		}

		backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
		fakeClock := util.NewFakeClock(time.Now())
		backOff.Clock = fakeClock

		fakeRuntime := &ctest.FakeRuntime{}
		fakeRecorder := &record.FakeRecorder{}
		puller := newParallelImagePuller(fakeRecorder, fakeRuntime, backOff)

		fakeRuntime.ImageList = []Image{{"present_image", nil, nil, 1}}
		fakeRuntime.Err = c.pullerErr
		fakeRuntime.InspectErr = c.inspectErr

		for tick, expected := range c.expectedErr {
			fakeClock.Step(time.Second)
			err, _ := puller.pullImage(pod, container, nil)
			fakeRuntime.AssertCalls(c.calledFunctions)
			assert.Equal(t, expected, err, "in test %d tick=%d", i, tick)
		}

	}
}
Exemplo n.º 25
0
func TestIsInBackOffSinceUpdate(t *testing.T) {
	id := "_idIsInBackOffSinceUpdate"
	tc := util.NewFakeClock(time.Now())
	step := time.Second
	maxDuration := 10 * step
	b := NewFakeBackOff(step, maxDuration, tc)
	startTime := tc.Now()

	cases := []struct {
		tick      time.Duration
		inBackOff bool
		value     int
	}{
		{tick: 0, inBackOff: false, value: 0},
		{tick: 1, inBackOff: false, value: 1},
		{tick: 2, inBackOff: true, value: 2},
		{tick: 3, inBackOff: false, value: 2},
		{tick: 4, inBackOff: true, value: 4},
		{tick: 5, inBackOff: true, value: 4},
		{tick: 6, inBackOff: true, value: 4},
		{tick: 7, inBackOff: false, value: 4},
		{tick: 8, inBackOff: true, value: 8},
		{tick: 9, inBackOff: true, value: 8},
		{tick: 10, inBackOff: true, value: 8},
		{tick: 11, inBackOff: true, value: 8},
		{tick: 12, inBackOff: true, value: 8},
		{tick: 13, inBackOff: true, value: 8},
		{tick: 14, inBackOff: true, value: 8},
		{tick: 15, inBackOff: false, value: 8},
		{tick: 16, inBackOff: true, value: 10},
		{tick: 17, inBackOff: true, value: 10},
		{tick: 18, inBackOff: true, value: 10},
		{tick: 19, inBackOff: true, value: 10},
		{tick: 20, inBackOff: true, value: 10},
		{tick: 21, inBackOff: true, value: 10},
		{tick: 22, inBackOff: true, value: 10},
		{tick: 23, inBackOff: true, value: 10},
		{tick: 24, inBackOff: true, value: 10},
		{tick: 25, inBackOff: false, value: 10},
		{tick: 26, inBackOff: true, value: 10},
		{tick: 27, inBackOff: true, value: 10},
		{tick: 28, inBackOff: true, value: 10},
		{tick: 29, inBackOff: true, value: 10},
		{tick: 30, inBackOff: true, value: 10},
		{tick: 31, inBackOff: true, value: 10},
		{tick: 32, inBackOff: true, value: 10},
		{tick: 33, inBackOff: true, value: 10},
		{tick: 34, inBackOff: true, value: 10},
		{tick: 35, inBackOff: false, value: 10},
		{tick: 56, inBackOff: false, value: 0},
		{tick: 57, inBackOff: false, value: 1},
	}

	for _, c := range cases {
		tc.SetTime(startTime.Add(c.tick * step))
		if c.inBackOff != b.IsInBackOffSinceUpdate(id, tc.Now()) {
			t.Errorf("expected IsInBackOffSinceUpdate %v got %v at tick %s", c.inBackOff, b.IsInBackOffSinceUpdate(id, tc.Now()), c.tick*step)
		}

		if c.inBackOff && (time.Duration(c.value)*step != b.Get(id)) {
			t.Errorf("expected backoff value=%s got %s at tick %s", time.Duration(c.value)*step, b.Get(id), c.tick*step)
		}

		if !c.inBackOff {
			b.Next(id, tc.Now())
		}
	}
}
Exemplo n.º 26
0
func TestSyncPodBackoff(t *testing.T) {
	var fakeClock = util.NewFakeClock(time.Now())
	startTime := fakeClock.Now()

	dm, fakeDocker := newTestDockerManager()
	containers := []api.Container{
		{Name: "good"},
		{Name: "bad"},
	}
	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			UID:       "12345678",
			Name:      "podfoo",
			Namespace: "nsnew",
		},
		Spec: api.PodSpec{
			Containers: containers,
		},
	}

	stableId := "k8s_bad." + strconv.FormatUint(kubecontainer.HashContainer(&containers[1]), 16) + "_podfoo_nsnew_12345678"
	dockerContainers := []*docker.Container{
		{
			ID:   "9876",
			Name: "/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pod), 16) + "_podfoo_nsnew_12345678_0",
			State: docker.State{
				StartedAt: startTime,
				Running:   true,
			},
		},
		{
			ID:   "1234",
			Name: "/k8s_good." + strconv.FormatUint(kubecontainer.HashContainer(&containers[0]), 16) + "_podfoo_nsnew_12345678_0",
			State: docker.State{
				StartedAt: startTime,
				Running:   true,
			},
		},
		{
			ID:   "5678",
			Name: "/k8s_bad." + strconv.FormatUint(kubecontainer.HashContainer(&containers[1]), 16) + "_podfoo_nsnew_12345678_0",
			State: docker.State{
				ExitCode:   42,
				StartedAt:  startTime,
				FinishedAt: fakeClock.Now(),
			},
		},
	}

	startCalls := []string{"inspect_container", "create", "start", "inspect_container"}
	backOffCalls := []string{"inspect_container"}
	startResult := &kubecontainer.SyncResult{kubecontainer.StartContainer, "bad", nil, ""}
	backoffResult := &kubecontainer.SyncResult{kubecontainer.StartContainer, "bad", kubecontainer.ErrCrashLoopBackOff, ""}
	tests := []struct {
		tick      int
		backoff   int
		killDelay int
		result    []string
		expectErr bool
	}{
		{1, 1, 1, startCalls, false},
		{2, 2, 2, startCalls, false},
		{3, 2, 3, backOffCalls, true},
		{4, 4, 4, startCalls, false},
		{5, 4, 5, backOffCalls, true},
		{6, 4, 6, backOffCalls, true},
		{7, 4, 7, backOffCalls, true},
		{8, 8, 129, startCalls, false},
		{130, 1, 0, startCalls, false},
	}

	backOff := util.NewBackOff(time.Second, time.Minute)
	backOff.Clock = fakeClock
	for _, c := range tests {
		fakeDocker.SetFakeContainers(dockerContainers)
		fakeClock.SetTime(startTime.Add(time.Duration(c.tick) * time.Second))

		result := runSyncPod(t, dm, fakeDocker, pod, backOff, c.expectErr)
		verifyCalls(t, fakeDocker, c.result)

		// Verify whether the correct sync pod result is generated
		if c.expectErr {
			verifySyncResults(t, []*kubecontainer.SyncResult{backoffResult}, result)
		} else {
			verifySyncResults(t, []*kubecontainer.SyncResult{startResult}, result)
		}

		if backOff.Get(stableId) != time.Duration(c.backoff)*time.Second {
			t.Errorf("At tick %s expected backoff=%s got=%s", time.Duration(c.tick)*time.Second, time.Duration(c.backoff)*time.Second, backOff.Get(stableId))
		}

		if len(fakeDocker.Created) > 0 {
			// pretend kill the container
			fakeDocker.Created = nil
			dockerContainers[2].State.FinishedAt = startTime.Add(time.Duration(c.killDelay) * time.Second)
		}
	}
}
Exemplo n.º 27
0
// TestMemoryPressure
func TestMemoryPressure(t *testing.T) {
	podMaker := func(name string, requests api.ResourceList, limits api.ResourceList, memoryWorkingSet string) (*api.Pod, statsapi.PodStats) {
		pod := newPod(name, []api.Container{
			newContainer(name, requests, api.ResourceList{}),
		})
		podStats := newPodMemoryStats(pod, resource.MustParse(memoryWorkingSet))
		return pod, podStats
	}
	summaryStatsMaker := func(nodeAvailableBytes string, podStats map[*api.Pod]statsapi.PodStats) *statsapi.Summary {
		val := resource.MustParse(nodeAvailableBytes)
		availableBytes := uint64(val.Value())
		result := &statsapi.Summary{
			Node: statsapi.NodeStats{
				Memory: &statsapi.MemoryStats{
					AvailableBytes: &availableBytes,
				},
			},
			Pods: []statsapi.PodStats{},
		}
		for _, podStat := range podStats {
			result.Pods = append(result.Pods, podStat)
		}
		return result
	}
	podsToMake := []struct {
		name             string
		requests         api.ResourceList
		limits           api.ResourceList
		memoryWorkingSet string
	}{
		{name: "best-effort-high", requests: newResourceList("", ""), limits: newResourceList("", ""), memoryWorkingSet: "500Mi"},
		{name: "best-effort-low", requests: newResourceList("", ""), limits: newResourceList("", ""), memoryWorkingSet: "300Mi"},
		{name: "burstable-high", requests: newResourceList("100m", "100Mi"), limits: newResourceList("200m", "1Gi"), memoryWorkingSet: "800Mi"},
		{name: "burstable-low", requests: newResourceList("100m", "100Mi"), limits: newResourceList("200m", "1Gi"), memoryWorkingSet: "300Mi"},
		{name: "guaranteed-high", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), memoryWorkingSet: "800Mi"},
		{name: "guaranteed-low", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), memoryWorkingSet: "200Mi"},
	}
	pods := []*api.Pod{}
	podStats := map[*api.Pod]statsapi.PodStats{}
	for _, podToMake := range podsToMake {
		pod, podStat := podMaker(podToMake.name, podToMake.requests, podToMake.limits, podToMake.memoryWorkingSet)
		pods = append(pods, pod)
		podStats[pod] = podStat
	}
	activePodsFunc := func() []*api.Pod {
		return pods
	}

	fakeClock := util.NewFakeClock(time.Now())
	podKiller := &mockPodKiller{}
	nodeRef := &api.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}

	config := Config{
		PressureTransitionPeriod: time.Minute * 5,
		Thresholds: []Threshold{
			{
				Signal:   SignalMemoryAvailable,
				Operator: OpLessThan,
				Value:    quantityMustParse("1Gi"),
			},
		},
	}
	summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("2Gi", podStats)}
	manager := &managerImpl{
		clock:           fakeClock,
		killPodFunc:     podKiller.killPodNow,
		config:          config,
		recorder:        &record.FakeRecorder{},
		summaryProvider: summaryProvider,
		nodeRef:         nodeRef,
		nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
		thresholdsFirstObservedAt:    thresholdsObservedAt{},
	}

	// create a best effort pod to test admission
	bestEffortPodToAdmit, _ := podMaker("best-admit", newResourceList("", ""), newResourceList("", ""), "0Gi")
	burstablePodToAdmit, _ := podMaker("burst-admit", newResourceList("100m", "100Mi"), newResourceList("200m", "200Mi"), "0Gi")

	// synchronize
	manager.synchronize(activePodsFunc)

	// we should not have memory pressure
	if manager.IsUnderMemoryPressure() {
		t.Errorf("Manager should not report memory pressure")
	}

	// try to admit our pods (they should succeed)
	expected := []bool{true, true}
	for i, pod := range []*api.Pod{bestEffortPodToAdmit, burstablePodToAdmit} {
		if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}); expected[i] != result.Admit {
			t.Errorf("Admit pod: %v, expected: %v, actual: %v", pod, expected[i], result.Admit)
		}
	}

	// induce memory pressure!
	fakeClock.Step(1 * time.Minute)
	summaryProvider.result = summaryStatsMaker("500Mi", podStats)
	manager.synchronize(activePodsFunc)

	// we should have memory pressure
	if !manager.IsUnderMemoryPressure() {
		t.Errorf("Manager should report memory pressure")
	}

	// check the right pod was killed
	if podKiller.pod != pods[0] {
		t.Errorf("Manager chose to kill pod: %v, but should have chosen %v", podKiller.pod, pods[0])
	}

	// the best-effort pod should not admit, burstable should
	expected = []bool{false, true}
	for i, pod := range []*api.Pod{bestEffortPodToAdmit, burstablePodToAdmit} {
		if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}); expected[i] != result.Admit {
			t.Errorf("Admit pod: %v, expected: %v, actual: %v", pod, expected[i], result.Admit)
		}
	}

	// reduce memory pressure
	fakeClock.Step(1 * time.Minute)
	summaryProvider.result = summaryStatsMaker("2Gi", podStats)
	podKiller.pod = nil // reset state
	manager.synchronize(activePodsFunc)

	// we should have memory pressure (because transition period not yet met)
	if !manager.IsUnderMemoryPressure() {
		t.Errorf("Manager should report memory pressure")
	}

	// no pod should have been killed
	if podKiller.pod != nil {
		t.Errorf("Manager chose to kill pod: %v when no pod should have been killed", podKiller.pod)
	}

	// the best-effort pod should not admit, burstable should
	expected = []bool{false, true}
	for i, pod := range []*api.Pod{bestEffortPodToAdmit, burstablePodToAdmit} {
		if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}); expected[i] != result.Admit {
			t.Errorf("Admit pod: %v, expected: %v, actual: %v", pod, expected[i], result.Admit)
		}
	}

	// move the clock past transition period to ensure that we stop reporting pressure
	fakeClock.Step(5 * time.Minute)
	summaryProvider.result = summaryStatsMaker("2Gi", podStats)
	podKiller.pod = nil // reset state
	manager.synchronize(activePodsFunc)

	// we should not have memory pressure (because transition period met)
	if manager.IsUnderMemoryPressure() {
		t.Errorf("Manager should not report memory pressure")
	}

	// no pod should have been killed
	if podKiller.pod != nil {
		t.Errorf("Manager chose to kill pod: %v when no pod should have been killed", podKiller.pod)
	}

	// all pods should admit now
	expected = []bool{true, true}
	for i, pod := range []*api.Pod{bestEffortPodToAdmit, burstablePodToAdmit} {
		if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}); expected[i] != result.Admit {
			t.Errorf("Admit pod: %v, expected: %v, actual: %v", pod, expected[i], result.Admit)
		}
	}
}
Exemplo n.º 28
0
func TestEventfNoNamespace(t *testing.T) {
	testPod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			SelfLink: "/api/version/pods/foo",
			Name:     "foo",
			UID:      "bar",
		},
	}
	testRef, err := api.GetPartialReference(testPod, "spec.containers[2]")
	if err != nil {
		t.Fatal(err)
	}
	table := []struct {
		obj          k8sruntime.Object
		eventtype    string
		reason       string
		messageFmt   string
		elements     []interface{}
		expect       *api.Event
		expectLog    string
		expectUpdate bool
	}{
		{
			obj:        testRef,
			eventtype:  api.EventTypeNormal,
			reason:     "Started",
			messageFmt: "some verbose message: %v",
			elements:   []interface{}{1},
			expect: &api.Event{
				ObjectMeta: api.ObjectMeta{
					Name:      "foo",
					Namespace: "default",
				},
				InvolvedObject: api.ObjectReference{
					Kind:       "Pod",
					Name:       "foo",
					Namespace:  "",
					UID:        "bar",
					APIVersion: "version",
					FieldPath:  "spec.containers[2]",
				},
				Reason:  "Started",
				Message: "some verbose message: 1",
				Source:  api.EventSource{Component: "eventTest"},
				Count:   1,
				Type:    api.EventTypeNormal,
			},
			expectLog:    `Event(api.ObjectReference{Kind:"Pod", Namespace:"", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): type: 'Normal' reason: 'Started' some verbose message: 1`,
			expectUpdate: false,
		},
	}

	testCache := map[string]*api.Event{}
	logCalled := make(chan struct{})
	createEvent := make(chan *api.Event)
	updateEvent := make(chan *api.Event)
	patchEvent := make(chan *api.Event)
	testEvents := testEventSink{
		OnCreate: OnCreateFactory(testCache, createEvent),
		OnUpdate: func(event *api.Event) (*api.Event, error) {
			updateEvent <- event
			return event, nil
		},
		OnPatch: OnPatchFactory(testCache, patchEvent),
	}
	eventBroadcaster := NewBroadcasterForTests(0)
	sinkWatcher := eventBroadcaster.StartRecordingToSink(&testEvents)

	clock := util.NewFakeClock(time.Now())
	recorder := recorderWithFakeClock(api.EventSource{Component: "eventTest"}, eventBroadcaster, clock)

	for index, item := range table {
		clock.Step(1 * time.Second)
		logWatcher1 := eventBroadcaster.StartLogging(t.Logf) // Prove that it is useful
		logWatcher2 := eventBroadcaster.StartLogging(func(formatter string, args ...interface{}) {
			if e, a := item.expectLog, fmt.Sprintf(formatter, args...); e != a {
				t.Errorf("Expected '%v', got '%v'", e, a)
			}
			logCalled <- struct{}{}
		})
		recorder.Eventf(item.obj, item.eventtype, item.reason, item.messageFmt, item.elements...)

		<-logCalled

		// validate event
		if item.expectUpdate {
			actualEvent := <-patchEvent
			validateEvent(string(index), actualEvent, item.expect, t)
		} else {
			actualEvent := <-createEvent
			validateEvent(string(index), actualEvent, item.expect, t)
		}

		logWatcher1.Stop()
		logWatcher2.Stop()
	}
	sinkWatcher.Stop()
}
Exemplo n.º 29
0
func TestMultiSinkCache(t *testing.T) {
	testPod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			SelfLink:  "/api/version/pods/foo",
			Name:      "foo",
			Namespace: "baz",
			UID:       "bar",
		},
	}
	testPod2 := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			SelfLink:  "/api/version/pods/foo",
			Name:      "foo",
			Namespace: "baz",
			UID:       "differentUid",
		},
	}
	testRef, err := api.GetPartialReference(testPod, "spec.containers[2]")
	testRef2, err := api.GetPartialReference(testPod2, "spec.containers[3]")
	if err != nil {
		t.Fatal(err)
	}
	table := []struct {
		obj          k8sruntime.Object
		eventtype    string
		reason       string
		messageFmt   string
		elements     []interface{}
		expect       *api.Event
		expectLog    string
		expectUpdate bool
	}{
		{
			obj:        testRef,
			eventtype:  api.EventTypeNormal,
			reason:     "Started",
			messageFmt: "some verbose message: %v",
			elements:   []interface{}{1},
			expect: &api.Event{
				ObjectMeta: api.ObjectMeta{
					Name:      "foo",
					Namespace: "baz",
				},
				InvolvedObject: api.ObjectReference{
					Kind:       "Pod",
					Name:       "foo",
					Namespace:  "baz",
					UID:        "bar",
					APIVersion: "version",
					FieldPath:  "spec.containers[2]",
				},
				Reason:  "Started",
				Message: "some verbose message: 1",
				Source:  api.EventSource{Component: "eventTest"},
				Count:   1,
				Type:    api.EventTypeNormal,
			},
			expectLog:    `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): type: 'Normal' reason: 'Started' some verbose message: 1`,
			expectUpdate: false,
		},
		{
			obj:        testPod,
			eventtype:  api.EventTypeNormal,
			reason:     "Killed",
			messageFmt: "some other verbose message: %v",
			elements:   []interface{}{1},
			expect: &api.Event{
				ObjectMeta: api.ObjectMeta{
					Name:      "foo",
					Namespace: "baz",
				},
				InvolvedObject: api.ObjectReference{
					Kind:       "Pod",
					Name:       "foo",
					Namespace:  "baz",
					UID:        "bar",
					APIVersion: "version",
				},
				Reason:  "Killed",
				Message: "some other verbose message: 1",
				Source:  api.EventSource{Component: "eventTest"},
				Count:   1,
				Type:    api.EventTypeNormal,
			},
			expectLog:    `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'Killed' some other verbose message: 1`,
			expectUpdate: false,
		},
		{
			obj:        testRef,
			eventtype:  api.EventTypeNormal,
			reason:     "Started",
			messageFmt: "some verbose message: %v",
			elements:   []interface{}{1},
			expect: &api.Event{
				ObjectMeta: api.ObjectMeta{
					Name:      "foo",
					Namespace: "baz",
				},
				InvolvedObject: api.ObjectReference{
					Kind:       "Pod",
					Name:       "foo",
					Namespace:  "baz",
					UID:        "bar",
					APIVersion: "version",
					FieldPath:  "spec.containers[2]",
				},
				Reason:  "Started",
				Message: "some verbose message: 1",
				Source:  api.EventSource{Component: "eventTest"},
				Count:   2,
				Type:    api.EventTypeNormal,
			},
			expectLog:    `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): type: 'Normal' reason: 'Started' some verbose message: 1`,
			expectUpdate: true,
		},
		{
			obj:        testRef2,
			eventtype:  api.EventTypeNormal,
			reason:     "Started",
			messageFmt: "some verbose message: %v",
			elements:   []interface{}{1},
			expect: &api.Event{
				ObjectMeta: api.ObjectMeta{
					Name:      "foo",
					Namespace: "baz",
				},
				InvolvedObject: api.ObjectReference{
					Kind:       "Pod",
					Name:       "foo",
					Namespace:  "baz",
					UID:        "differentUid",
					APIVersion: "version",
					FieldPath:  "spec.containers[3]",
				},
				Reason:  "Started",
				Message: "some verbose message: 1",
				Source:  api.EventSource{Component: "eventTest"},
				Count:   1,
				Type:    api.EventTypeNormal,
			},
			expectLog:    `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"differentUid", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[3]"}): type: 'Normal' reason: 'Started' some verbose message: 1`,
			expectUpdate: false,
		},
		{
			obj:        testRef,
			eventtype:  api.EventTypeNormal,
			reason:     "Started",
			messageFmt: "some verbose message: %v",
			elements:   []interface{}{1},
			expect: &api.Event{
				ObjectMeta: api.ObjectMeta{
					Name:      "foo",
					Namespace: "baz",
				},
				InvolvedObject: api.ObjectReference{
					Kind:       "Pod",
					Name:       "foo",
					Namespace:  "baz",
					UID:        "bar",
					APIVersion: "version",
					FieldPath:  "spec.containers[2]",
				},
				Reason:  "Started",
				Message: "some verbose message: 1",
				Source:  api.EventSource{Component: "eventTest"},
				Count:   3,
				Type:    api.EventTypeNormal,
			},
			expectLog:    `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): type: 'Normal' reason: 'Started' some verbose message: 1`,
			expectUpdate: true,
		},
		{
			obj:        testRef2,
			eventtype:  api.EventTypeNormal,
			reason:     "Stopped",
			messageFmt: "some verbose message: %v",
			elements:   []interface{}{1},
			expect: &api.Event{
				ObjectMeta: api.ObjectMeta{
					Name:      "foo",
					Namespace: "baz",
				},
				InvolvedObject: api.ObjectReference{
					Kind:       "Pod",
					Name:       "foo",
					Namespace:  "baz",
					UID:        "differentUid",
					APIVersion: "version",
					FieldPath:  "spec.containers[3]",
				},
				Reason:  "Stopped",
				Message: "some verbose message: 1",
				Source:  api.EventSource{Component: "eventTest"},
				Count:   1,
				Type:    api.EventTypeNormal,
			},
			expectLog:    `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"differentUid", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[3]"}): type: 'Normal' reason: 'Stopped' some verbose message: 1`,
			expectUpdate: false,
		},
		{
			obj:        testRef2,
			eventtype:  api.EventTypeNormal,
			reason:     "Stopped",
			messageFmt: "some verbose message: %v",
			elements:   []interface{}{1},
			expect: &api.Event{
				ObjectMeta: api.ObjectMeta{
					Name:      "foo",
					Namespace: "baz",
				},
				InvolvedObject: api.ObjectReference{
					Kind:       "Pod",
					Name:       "foo",
					Namespace:  "baz",
					UID:        "differentUid",
					APIVersion: "version",
					FieldPath:  "spec.containers[3]",
				},
				Reason:  "Stopped",
				Message: "some verbose message: 1",
				Source:  api.EventSource{Component: "eventTest"},
				Count:   2,
				Type:    api.EventTypeNormal,
			},
			expectLog:    `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"differentUid", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[3]"}): type: 'Normal' reason: 'Stopped' some verbose message: 1`,
			expectUpdate: true,
		},
	}

	testCache := map[string]*api.Event{}
	createEvent := make(chan *api.Event)
	updateEvent := make(chan *api.Event)
	patchEvent := make(chan *api.Event)
	testEvents := testEventSink{
		OnCreate: OnCreateFactory(testCache, createEvent),
		OnUpdate: func(event *api.Event) (*api.Event, error) {
			updateEvent <- event
			return event, nil
		},
		OnPatch: OnPatchFactory(testCache, patchEvent),
	}

	testCache2 := map[string]*api.Event{}
	createEvent2 := make(chan *api.Event)
	updateEvent2 := make(chan *api.Event)
	patchEvent2 := make(chan *api.Event)
	testEvents2 := testEventSink{
		OnCreate: OnCreateFactory(testCache2, createEvent2),
		OnUpdate: func(event *api.Event) (*api.Event, error) {
			updateEvent2 <- event
			return event, nil
		},
		OnPatch: OnPatchFactory(testCache2, patchEvent2),
	}

	eventBroadcaster := NewBroadcasterForTests(0)
	clock := util.NewFakeClock(time.Now())
	recorder := recorderWithFakeClock(api.EventSource{Component: "eventTest"}, eventBroadcaster, clock)

	sinkWatcher := eventBroadcaster.StartRecordingToSink(&testEvents)
	for index, item := range table {
		clock.Step(1 * time.Second)
		recorder.Eventf(item.obj, item.eventtype, item.reason, item.messageFmt, item.elements...)

		// validate event
		if item.expectUpdate {
			actualEvent := <-patchEvent
			validateEvent(string(index), actualEvent, item.expect, t)
		} else {
			actualEvent := <-createEvent
			validateEvent(string(index), actualEvent, item.expect, t)
		}
	}

	// Another StartRecordingToSink call should start to record events with new clean cache.
	sinkWatcher2 := eventBroadcaster.StartRecordingToSink(&testEvents2)
	for index, item := range table {
		clock.Step(1 * time.Second)
		recorder.Eventf(item.obj, item.eventtype, item.reason, item.messageFmt, item.elements...)

		// validate event
		if item.expectUpdate {
			actualEvent := <-patchEvent2
			validateEvent(string(index), actualEvent, item.expect, t)
		} else {
			actualEvent := <-createEvent2
			validateEvent(string(index), actualEvent, item.expect, t)
		}
	}

	sinkWatcher.Stop()
	sinkWatcher2.Stop()
}
Exemplo n.º 30
0
// newTestWatchCache just adds a fake clock.
func newTestWatchCache(capacity int) *watchCache {
	wc := newWatchCache(capacity)
	wc.clock = util.NewFakeClock(time.Now())
	return wc
}