Пример #1
0
func NewFakeKubeRuntimeManager(runtimeService internalApi.RuntimeService, imageService internalApi.ImageManagerService, networkPlugin network.NetworkPlugin, osInterface kubecontainer.OSInterface) (*kubeGenericRuntimeManager, error) {
	recorder := &record.FakeRecorder{}
	kubeRuntimeManager := &kubeGenericRuntimeManager{
		recorder:            recorder,
		cpuCFSQuota:         false,
		livenessManager:     proberesults.NewManager(),
		containerRefManager: kubecontainer.NewRefManager(),
		osInterface:         osInterface,
		networkPlugin:       networkPlugin,
		runtimeHelper:       &fakeRuntimeHelper{},
		runtimeService:      runtimeService,
		imageService:        imageService,
		keyring:             credentialprovider.NewDockerKeyring(),
	}

	typedVersion, err := runtimeService.Version(kubeRuntimeAPIVersion)
	if err != nil {
		return nil, err
	}

	kubeRuntimeManager.containerGC = NewContainerGC(runtimeService, newFakePodGetter(), kubeRuntimeManager)
	kubeRuntimeManager.runtimeName = typedVersion.GetRuntimeName()
	kubeRuntimeManager.imagePuller = images.NewImageManager(
		kubecontainer.FilterEventRecorder(recorder),
		kubeRuntimeManager,
		flowcontrol.NewBackOff(time.Second, 300*time.Second),
		false)
	kubeRuntimeManager.runner = lifecycle.NewHandlerRunner(
		&fakeHTTP{},
		kubeRuntimeManager,
		kubeRuntimeManager)

	return kubeRuntimeManager, nil
}
Пример #2
0
func TestRunOnce(t *testing.T) {
	cadvisor := &cadvisor.Mock{}
	cadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
	podManager := kubepod.NewBasicPodManager(kubepod.NewFakeMirrorClient())
	diskSpaceManager, _ := newDiskSpaceManager(cadvisor, DiskSpacePolicy{})
	fakeRuntime := &kubecontainer.FakeRuntime{}
	basePath, err := ioutil.TempDir(os.TempDir(), "kubelet")
	if err != nil {
		t.Fatalf("can't make a temp rootdir %v", err)
	}
	defer os.RemoveAll(basePath)
	kb := &Kubelet{
		rootDirectory:       basePath,
		recorder:            &record.FakeRecorder{},
		cadvisor:            cadvisor,
		nodeLister:          testNodeLister{},
		nodeInfo:            testNodeInfo{},
		statusManager:       status.NewManager(nil, podManager),
		containerRefManager: kubecontainer.NewRefManager(),
		podManager:          podManager,
		os:                  kubecontainer.FakeOS{},
		volumeManager:       newVolumeManager(),
		diskSpaceManager:    diskSpaceManager,
		containerRuntime:    fakeRuntime,
	}
	kb.containerManager = cm.NewStubContainerManager()

	kb.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil))
	if err := kb.setupDataDirs(); err != nil {
		t.Errorf("Failed to init data dirs: %v", err)
	}

	pods := []*api.Pod{
		{
			ObjectMeta: api.ObjectMeta{
				UID:       "12345678",
				Name:      "foo",
				Namespace: "new",
			},
			Spec: api.PodSpec{
				Containers: []api.Container{
					{Name: "bar"},
				},
			},
		},
	}
	podManager.SetPods(pods)
	results, err := kb.runOnce(pods, time.Millisecond)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	if results[0].Err != nil {
		t.Errorf("unexpected run pod error: %v", results[0].Err)
	}
	if results[0].Pod.Name != "foo" {
		t.Errorf("unexpected pod: %q", results[0].Pod.Name)
	}
}
Пример #3
0
func TestRunOnce(t *testing.T) {
	cadvisor := &cadvisor.Mock{}
	cadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)

	podManager, _ := newFakePodManager()
	diskSpaceManager, _ := newDiskSpaceManager(cadvisor, DiskSpacePolicy{})
	fakeRuntime := &kubecontainer.FakeRuntime{}

	kb := &Kubelet{
		rootDirectory:       "/tmp/kubelet",
		recorder:            &record.FakeRecorder{},
		cadvisor:            cadvisor,
		nodeLister:          testNodeLister{},
		statusManager:       status.NewManager(nil),
		containerRefManager: kubecontainer.NewRefManager(),
		readinessManager:    kubecontainer.NewReadinessManager(),
		podManager:          podManager,
		os:                  kubecontainer.FakeOS{},
		volumeManager:       newVolumeManager(),
		diskSpaceManager:    diskSpaceManager,
		containerRuntime:    fakeRuntime,
	}
	kb.containerManager, _ = newContainerManager(fakeContainerMgrMountInt(), cadvisor, "", "", "")

	kb.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil))
	if err := kb.setupDataDirs(); err != nil {
		t.Errorf("Failed to init data dirs: %v", err)
	}

	pods := []*api.Pod{
		{
			ObjectMeta: api.ObjectMeta{
				UID:       "12345678",
				Name:      "foo",
				Namespace: "new",
			},
			Spec: api.PodSpec{
				Containers: []api.Container{
					{Name: "bar"},
				},
			},
		},
	}
	podManager.SetPods(pods)
	results, err := kb.runOnce(pods, time.Millisecond)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	if results[0].Err != nil {
		t.Errorf("unexpected run pod error: %v", results[0].Err)
	}
	if results[0].Pod.Name != "foo" {
		t.Errorf("unexpected pod: %q", results[0].Pod.Name)
	}
}
Пример #4
0
func newTestManager() *manager {
	m := NewManager(
		status.NewManager(&testclient.Fake{}, kubepod.NewBasicPodManager(nil)),
		results.NewManager(),
		results.NewManager(),
		nil, // runner
		kubecontainer.NewRefManager(),
		&record.FakeRecorder{},
	).(*manager)
	// Don't actually execute probes.
	m.prober.exec = fakeExecProber{probe.Success, nil}
	return m
}
Пример #5
0
func newTestManager() *manager {
	refManager := kubecontainer.NewRefManager()
	refManager.SetRef(testContainerID, &api.ObjectReference{}) // Suppress prober warnings.
	m := NewManager(
		status.NewManager(&testclient.Fake{}, kubepod.NewBasicPodManager(nil)),
		results.NewManager(),
		nil, // runner
		refManager,
		&record.FakeRecorder{},
	).(*manager)
	// Don't actually execute probes.
	m.prober.exec = fakeExecProber{probe.Success, nil}
	return m
}
Пример #6
0
func newTestManager() *manager {
	const probePeriod = 1
	m := NewManager(
		probePeriod,
		status.NewManager(&testclient.Fake{}),
		results.NewManager(),
		results.NewManager(),
		nil, // runner
		kubecontainer.NewRefManager(),
		&record.FakeRecorder{},
	).(*manager)
	// Don't actually execute probes.
	m.prober.exec = fakeExecProber{probe.Success, nil}
	return m
}
Пример #7
0
func newTestManager() *manager {
	refManager := kubecontainer.NewRefManager()
	refManager.SetRef(testContainerID, &v1.ObjectReference{}) // Suppress prober warnings.
	podManager := kubepod.NewBasicPodManager(nil)
	// Add test pod to pod manager, so that status manager can get the pod from pod manager if needed.
	podManager.AddPod(getTestPod())
	m := NewManager(
		status.NewManager(&fake.Clientset{}, podManager),
		results.NewManager(),
		nil, // runner
		refManager,
		&record.FakeRecorder{},
	).(*manager)
	// Don't actually execute probes.
	m.prober.exec = fakeExecProber{probe.Success, nil}
	return m
}
Пример #8
0
func TestHandleCrash(t *testing.T) {
	m := newTestManager()
	w := newTestWorker(m, readiness, api.Probe{})
	m.statusManager.SetPodStatus(w.pod, getRunningStatus())

	expectContinue(t, w, w.doProbe(), "Initial successful probe.")
	expectResult(t, w, results.Success, "Initial successful probe.")

	// Prober starts crashing.
	m.prober = &prober{
		refManager: kubecontainer.NewRefManager(),
		recorder:   &record.FakeRecorder{},
		exec:       crashingExecProber{},
	}

	// doProbe should recover from the crash, and keep going.
	expectContinue(t, w, w.doProbe(), "Crashing probe.")
	expectResult(t, w, results.Success, "Crashing probe unchanged.")
}
Пример #9
0
func TestHandleCrash(t *testing.T) {
	m := newTestManager()
	m.prober = &prober{
		refManager: kubecontainer.NewRefManager(),
		recorder:   &record.FakeRecorder{},
		exec:       crashingExecProber{},
	}

	w := newTestWorker(m, readiness, api.Probe{})
	m.statusManager.SetPodStatus(w.pod, getRunningStatus())

	// doProbe should recover from the crash, and keep going.
	if !w.doProbe() {
		t.Error("Expected to keep going, but terminated.")
	}
	if _, ok := m.readinessManager.Get(containerID); ok {
		t.Error("Expected readiness to be unchanged from crash.")
	}
}
Пример #10
0
func newTestDockerManager() (*dockertools.DockerManager, *dockertools.FakeDockerClient) {
	fakeDocker := &dockertools.FakeDockerClient{VersionInfo: docker.Env{"Version=1.1.3", "ApiVersion=1.15"}, Errors: make(map[string]error), RemovedImages: sets.String{}}
	fakeRecorder := &record.FakeRecorder{}
	containerRefManager := kubecontainer.NewRefManager()
	networkPlugin, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil))
	dockerManager := dockertools.NewFakeDockerManager(
		fakeDocker,
		fakeRecorder,
		prober.FakeProber{},
		containerRefManager,
		&cadvisorApi.MachineInfo{},
		dockertools.PodInfraContainerImage,
		0, 0, "",
		kubecontainer.FakeOS{},
		networkPlugin,
		nil,
		nil)

	return dockerManager, fakeDocker
}
Пример #11
0
func newTestDockerManager() (*dockertools.DockerManager, *dockertools.FakeDockerClient) {
	fakeDocker := dockertools.NewFakeDockerClient()
	fakeRecorder := &record.FakeRecorder{}
	containerRefManager := kubecontainer.NewRefManager()
	networkPlugin, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil))
	dockerManager := dockertools.NewFakeDockerManager(
		fakeDocker,
		fakeRecorder,
		proberesults.NewManager(),
		containerRefManager,
		&cadvisorapi.MachineInfo{},
		dockertools.PodInfraContainerImage,
		0, 0, "",
		kubecontainer.FakeOS{},
		networkPlugin,
		nil,
		nil,
		nil)

	return dockerManager, fakeDocker
}
Пример #12
0
func newTestDockerManager() (*dockertools.DockerManager, *dockertools.FakeDockerClient) {
	fakeDocker := dockertools.NewFakeDockerClient()
	fakeRecorder := &record.FakeRecorder{}
	containerRefManager := kubecontainer.NewRefManager()
	networkPlugin, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", nettest.NewFakeHost(nil), componentconfig.HairpinNone)
	dockerManager := dockertools.NewFakeDockerManager(
		fakeDocker,
		fakeRecorder,
		proberesults.NewManager(),
		containerRefManager,
		&cadvisorapi.MachineInfo{},
		options.GetDefaultPodInfraContainerImage(),
		0, 0, "",
		&containertest.FakeOS{},
		networkPlugin,
		nil,
		nil,
		nil)

	return dockerManager, fakeDocker
}
Пример #13
0
func newTestDockerManagerWithHTTPClient(fakeHTTPClient *fakeHTTP) (*DockerManager, *FakeDockerClient) {
	fakeDocker := NewFakeDockerClient()
	fakeRecorder := &record.FakeRecorder{}
	containerRefManager := kubecontainer.NewRefManager()
	networkPlugin, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil))
	dockerManager := NewFakeDockerManager(
		fakeDocker,
		fakeRecorder,
		proberesults.NewManager(),
		containerRefManager,
		&cadvisorapi.MachineInfo{},
		kubetypes.PodInfraContainerImage,
		0, 0, "",
		kubecontainer.FakeOS{},
		networkPlugin,
		&fakeRuntimeHelper{},
		fakeHTTPClient,
		util.NewBackOff(time.Second, 300*time.Second))

	return dockerManager, fakeDocker
}
Пример #14
0
func NewFakeKubeRuntimeManager(runtimeService internalApi.RuntimeService, imageService internalApi.ImageManagerService) (*kubeGenericRuntimeManager, error) {
	networkPlugin, _ := network.InitNetworkPlugin(
		[]network.NetworkPlugin{},
		"",
		nettest.NewFakeHost(nil),
		componentconfig.HairpinNone,
		"10.0.0.0/8",
	)

	return NewKubeGenericRuntimeManager(
		&record.FakeRecorder{},
		proberesults.NewManager(),
		kubecontainer.NewRefManager(),
		&containertest.FakeOS{},
		networkPlugin,
		&fakeRuntimeHelper{},
		&fakeHTTP{},
		flowcontrol.NewBackOff(time.Second, 300*time.Second),
		false,
		false,
		runtimeService,
		imageService,
	)
}
Пример #15
0
func TestLifeCycleHooks(t *testing.T) {
	runner := lifecycle.NewFakeHandlerRunner()
	fr := newFakeRktInterface()
	fs := newFakeSystemd()

	rkt := &Runtime{
		runner:              runner,
		apisvc:              fr,
		systemd:             fs,
		containerRefManager: kubecontainer.NewRefManager(),
	}

	tests := []struct {
		pod           *api.Pod
		runtimePod    *kubecontainer.Pod
		postStartRuns []string
		preStopRuns   []string
		err           error
	}{
		{
			// Case 0, container without any hooks.
			&api.Pod{
				ObjectMeta: api.ObjectMeta{
					Name:      "pod-1",
					Namespace: "ns-1",
					UID:       "uid-1",
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{Name: "container-name-1"},
					},
				},
			},
			&kubecontainer.Pod{
				Containers: []*kubecontainer.Container{
					{ID: kubecontainer.BuildContainerID("rkt", "id-1")},
				},
			},
			[]string{},
			[]string{},
			nil,
		},
		{
			// Case 1, containers with post-start and pre-stop hooks.
			&api.Pod{
				ObjectMeta: api.ObjectMeta{
					Name:      "pod-1",
					Namespace: "ns-1",
					UID:       "uid-1",
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{
							Name: "container-name-1",
							Lifecycle: &api.Lifecycle{
								PostStart: &api.Handler{
									Exec: &api.ExecAction{},
								},
							},
						},
						{
							Name: "container-name-2",
							Lifecycle: &api.Lifecycle{
								PostStart: &api.Handler{
									HTTPGet: &api.HTTPGetAction{},
								},
							},
						},
						{
							Name: "container-name-3",
							Lifecycle: &api.Lifecycle{
								PreStop: &api.Handler{
									Exec: &api.ExecAction{},
								},
							},
						},
						{
							Name: "container-name-4",
							Lifecycle: &api.Lifecycle{
								PreStop: &api.Handler{
									HTTPGet: &api.HTTPGetAction{},
								},
							},
						},
					},
				},
			},
			&kubecontainer.Pod{
				Containers: []*kubecontainer.Container{
					{
						ID:   kubecontainer.ParseContainerID("rkt://uuid:container-name-4"),
						Name: "container-name-4",
					},
					{
						ID:   kubecontainer.ParseContainerID("rkt://uuid:container-name-3"),
						Name: "container-name-3",
					},
					{
						ID:   kubecontainer.ParseContainerID("rkt://uuid:container-name-2"),
						Name: "container-name-2",
					},
					{
						ID:   kubecontainer.ParseContainerID("rkt://uuid:container-name-1"),
						Name: "container-name-1",
					},
				},
			},
			[]string{
				"exec on pod: pod-1_ns-1(uid-1), container: container-name-1: rkt://uuid:container-name-1",
				"http-get on pod: pod-1_ns-1(uid-1), container: container-name-2: rkt://uuid:container-name-2",
			},
			[]string{
				"exec on pod: pod-1_ns-1(uid-1), container: container-name-3: rkt://uuid:container-name-3",
				"http-get on pod: pod-1_ns-1(uid-1), container: container-name-4: rkt://uuid:container-name-4",
			},
			nil,
		},
		{
			// Case 2, one container with invalid hooks.
			&api.Pod{
				ObjectMeta: api.ObjectMeta{
					Name:      "pod-1",
					Namespace: "ns-1",
					UID:       "uid-1",
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{
							Name: "container-name-1",
							Lifecycle: &api.Lifecycle{
								PostStart: &api.Handler{},
								PreStop:   &api.Handler{},
							},
						},
					},
				},
			},
			&kubecontainer.Pod{
				Containers: []*kubecontainer.Container{
					{
						ID:   kubecontainer.ParseContainerID("rkt://uuid:container-name-1"),
						Name: "container-name-1",
					},
				},
			},
			[]string{},
			[]string{},
			errors.NewAggregate([]error{fmt.Errorf("Invalid handler: %v", &api.Handler{})}),
		},
	}

	for i, tt := range tests {
		testCaseHint := fmt.Sprintf("test case #%d", i)

		pod := &rktapi.Pod{Id: "uuid"}
		for _, c := range tt.runtimePod.Containers {
			pod.Apps = append(pod.Apps, &rktapi.App{
				Name:  c.Name,
				State: rktapi.AppState_APP_STATE_RUNNING,
			})
		}
		fr.pods = []*rktapi.Pod{pod}

		// Run post-start hooks
		err := rkt.runLifecycleHooks(tt.pod, tt.runtimePod, lifecyclePostStartHook)
		assert.Equal(t, tt.err, err, testCaseHint)

		sort.Sort(sortedStringList(tt.postStartRuns))
		sort.Sort(sortedStringList(runner.HandlerRuns))

		assert.Equal(t, tt.postStartRuns, runner.HandlerRuns, testCaseHint)

		runner.Reset()

		// Run pre-stop hooks.
		err = rkt.runLifecycleHooks(tt.pod, tt.runtimePod, lifecyclePreStopHook)
		assert.Equal(t, tt.err, err, testCaseHint)

		sort.Sort(sortedStringList(tt.preStopRuns))
		sort.Sort(sortedStringList(runner.HandlerRuns))

		assert.Equal(t, tt.preStopRuns, runner.HandlerRuns, testCaseHint)

		runner.Reset()
	}
}
Пример #16
0
// TestProbeContainer tests the functionality of probeContainer.
// Test cases are:
//
// No probe.
// Only LivenessProbe.
// Only ReadinessProbe.
// Both probes.
//
// Also, for each probe, there will be several cases covering whether the initial
// delay has passed, whether the probe handler will return Success, Failure,
// Unknown or error.
//
// PLEASE READ THE PROBE DOCS BEFORE CHANGING THIS TEST IF YOU ARE UNSURE HOW PROBES ARE SUPPOSED TO WORK:
// (See https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/user-guide/pod-states.md#pod-conditions)
func TestProbeContainer(t *testing.T) {
	prober := &prober{
		refManager: kubecontainer.NewRefManager(),
		recorder:   &record.FakeRecorder{},
	}
	containerID := "foobar"
	createdAt := time.Now().Unix()

	tests := []struct {
		testContainer     api.Container
		expectError       bool
		expectedLiveness  probe.Result
		expectedReadiness probe.Result
	}{
		// No probes.
		{
			testContainer:     api.Container{},
			expectedLiveness:  probe.Success,
			expectedReadiness: probe.Success,
		},
		// Only LivenessProbe. expectedReadiness should always be true here.
		{
			testContainer: api.Container{
				LivenessProbe: &api.Probe{InitialDelaySeconds: 100},
			},
			expectedLiveness:  probe.Success,
			expectedReadiness: probe.Success,
		},
		{
			testContainer: api.Container{
				LivenessProbe: &api.Probe{InitialDelaySeconds: -100},
			},
			expectedLiveness:  probe.Unknown,
			expectedReadiness: probe.Success,
		},
		{
			testContainer: api.Container{
				LivenessProbe: &api.Probe{
					InitialDelaySeconds: -100,
					Handler: api.Handler{
						Exec: &api.ExecAction{},
					},
				},
			},
			expectedLiveness:  probe.Failure,
			expectedReadiness: probe.Success,
		},
		{
			testContainer: api.Container{
				LivenessProbe: &api.Probe{
					InitialDelaySeconds: -100,
					Handler: api.Handler{
						Exec: &api.ExecAction{},
					},
				},
			},
			expectedLiveness:  probe.Success,
			expectedReadiness: probe.Success,
		},
		{
			testContainer: api.Container{
				LivenessProbe: &api.Probe{
					InitialDelaySeconds: -100,
					Handler: api.Handler{
						Exec: &api.ExecAction{},
					},
				},
			},
			expectedLiveness:  probe.Unknown,
			expectedReadiness: probe.Success,
		},
		{
			testContainer: api.Container{
				LivenessProbe: &api.Probe{
					InitialDelaySeconds: -100,
					Handler: api.Handler{
						Exec: &api.ExecAction{},
					},
				},
			},
			expectError:       true,
			expectedLiveness:  probe.Unknown,
			expectedReadiness: probe.Success,
		},
		// // Only ReadinessProbe. expectedLiveness should always be probe.Success here.
		{
			testContainer: api.Container{
				ReadinessProbe: &api.Probe{InitialDelaySeconds: 100},
			},
			expectedLiveness:  probe.Success,
			expectedReadiness: probe.Unknown,
		},
		{
			testContainer: api.Container{
				ReadinessProbe: &api.Probe{
					InitialDelaySeconds: -100,
					Handler: api.Handler{
						Exec: &api.ExecAction{},
					},
				},
			},
			expectedLiveness:  probe.Success,
			expectedReadiness: probe.Success,
		},
		{
			testContainer: api.Container{
				ReadinessProbe: &api.Probe{
					InitialDelaySeconds: -100,
					Handler: api.Handler{
						Exec: &api.ExecAction{},
					},
				},
			},
			expectedLiveness:  probe.Success,
			expectedReadiness: probe.Success,
		},
		{
			testContainer: api.Container{
				ReadinessProbe: &api.Probe{
					InitialDelaySeconds: -100,
					Handler: api.Handler{
						Exec: &api.ExecAction{},
					},
				},
			},
			expectedLiveness:  probe.Success,
			expectedReadiness: probe.Success,
		},
		{
			testContainer: api.Container{
				ReadinessProbe: &api.Probe{
					InitialDelaySeconds: -100,
					Handler: api.Handler{
						Exec: &api.ExecAction{},
					},
				},
			},
			expectError:       false,
			expectedLiveness:  probe.Success,
			expectedReadiness: probe.Success,
		},
		// Both LivenessProbe and ReadinessProbe.
		{
			testContainer: api.Container{
				LivenessProbe:  &api.Probe{InitialDelaySeconds: 100},
				ReadinessProbe: &api.Probe{InitialDelaySeconds: 100},
			},
			expectedLiveness:  probe.Success,
			expectedReadiness: probe.Unknown,
		},
		{
			testContainer: api.Container{
				LivenessProbe:  &api.Probe{InitialDelaySeconds: 100},
				ReadinessProbe: &api.Probe{InitialDelaySeconds: -100},
			},
			expectedLiveness:  probe.Success,
			expectedReadiness: probe.Unknown,
		},
		{
			testContainer: api.Container{
				LivenessProbe:  &api.Probe{InitialDelaySeconds: -100},
				ReadinessProbe: &api.Probe{InitialDelaySeconds: 100},
			},
			expectedLiveness:  probe.Unknown,
			expectedReadiness: probe.Unknown,
		},
		{
			testContainer: api.Container{
				LivenessProbe:  &api.Probe{InitialDelaySeconds: -100},
				ReadinessProbe: &api.Probe{InitialDelaySeconds: -100},
			},
			expectedLiveness:  probe.Unknown,
			expectedReadiness: probe.Unknown,
		},
		{
			testContainer: api.Container{
				LivenessProbe: &api.Probe{
					InitialDelaySeconds: -100,
					Handler: api.Handler{
						Exec: &api.ExecAction{},
					},
				},
				ReadinessProbe: &api.Probe{InitialDelaySeconds: -100},
			},
			expectedLiveness:  probe.Unknown,
			expectedReadiness: probe.Unknown,
		},
		{
			testContainer: api.Container{
				LivenessProbe: &api.Probe{
					InitialDelaySeconds: -100,
					Handler: api.Handler{
						Exec: &api.ExecAction{},
					},
				},
				ReadinessProbe: &api.Probe{InitialDelaySeconds: -100},
			},
			expectedLiveness:  probe.Failure,
			expectedReadiness: probe.Unknown,
		},
		{
			testContainer: api.Container{
				LivenessProbe: &api.Probe{
					InitialDelaySeconds: -100,
					Handler: api.Handler{
						Exec: &api.ExecAction{},
					},
				},
				ReadinessProbe: &api.Probe{
					InitialDelaySeconds: -100,
					Handler: api.Handler{
						Exec: &api.ExecAction{},
					},
				},
			},
			expectedLiveness:  probe.Success,
			expectedReadiness: probe.Success,
		},
	}

	for i, test := range tests {
		if test.expectError {
			prober.exec = fakeExecProber{test.expectedLiveness, errors.New("exec error")}
		} else {
			prober.exec = fakeExecProber{test.expectedLiveness, nil}
		}

		liveness, err := prober.ProbeLiveness(&api.Pod{}, api.PodStatus{}, test.testContainer, containerID, createdAt)
		if test.expectError && err == nil {
			t.Errorf("[%d] Expected liveness probe error but no error was returned.", i)
		}
		if !test.expectError && err != nil {
			t.Errorf("[%d] Didn't expect liveness probe error but got: %v", i, err)
		}
		if test.expectedLiveness != liveness {
			t.Errorf("[%d] Expected liveness result to be %v but was %v", i, test.expectedLiveness, liveness)
		}

		// TODO: Test readiness errors
		prober.exec = fakeExecProber{test.expectedReadiness, nil}
		readiness, err := prober.ProbeReadiness(&api.Pod{}, api.PodStatus{}, test.testContainer, containerID)
		if err != nil {
			t.Errorf("[%d] Unexpected readiness probe error: %v", i, err)
		}
		if test.expectedReadiness != readiness {
			t.Errorf("[%d] Expected readiness result to be %v but was %v", i, test.expectedReadiness, readiness)
		}
	}
}
Пример #17
0
func TestRunOnce(t *testing.T) {
	cadvisor := &cadvisor.Mock{}
	cadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
	cadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{
		Usage:     400 * mb,
		Capacity:  1000 * mb,
		Available: 600 * mb,
	}, nil)
	cadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{
		Usage:    9 * mb,
		Capacity: 10 * mb,
	}, nil)
	podManager := kubepod.NewBasicPodManager(kubepod.NewFakeMirrorClient())
	diskSpaceManager, _ := newDiskSpaceManager(cadvisor, DiskSpacePolicy{})
	fakeRuntime := &kubecontainer.FakeRuntime{}
	basePath, err := utiltesting.MkTmpdir("kubelet")
	if err != nil {
		t.Fatalf("can't make a temp rootdir %v", err)
	}
	defer os.RemoveAll(basePath)
	kb := &Kubelet{
		rootDirectory:       basePath,
		recorder:            &record.FakeRecorder{},
		cadvisor:            cadvisor,
		nodeLister:          testNodeLister{},
		nodeInfo:            testNodeInfo{},
		statusManager:       status.NewManager(nil, podManager),
		containerRefManager: kubecontainer.NewRefManager(),
		podManager:          podManager,
		os:                  kubecontainer.FakeOS{},
		volumeManager:       newVolumeManager(),
		diskSpaceManager:    diskSpaceManager,
		containerRuntime:    fakeRuntime,
		reasonCache:         NewReasonCache(),
		clock:               util.RealClock{},
	}
	kb.containerManager = cm.NewStubContainerManager()

	kb.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil))
	if err := kb.setupDataDirs(); err != nil {
		t.Errorf("Failed to init data dirs: %v", err)
	}

	pods := []*api.Pod{
		{
			ObjectMeta: api.ObjectMeta{
				UID:       "12345678",
				Name:      "foo",
				Namespace: "new",
			},
			Spec: api.PodSpec{
				Containers: []api.Container{
					{Name: "bar"},
				},
			},
		},
	}
	podManager.SetPods(pods)
	// The original test here is totally meaningless, because fakeruntime will always return an empty podStatus. While
	// the originial logic of isPodRunning happens to return true when podstatus is empty, so the test can always pass.
	// Now the logic in isPodRunning is changed, to let the test pass, we set the podstatus directly in fake runtime.
	// This is also a meaningless test, because the isPodRunning will also always return true after setting this. However,
	// because runonce is never used in kubernetes now, we should deprioritize the cleanup work.
	// TODO(random-liu) Fix the test, make it meaningful.
	fakeRuntime.PodStatus = kubecontainer.PodStatus{
		ContainerStatuses: []*kubecontainer.ContainerStatus{
			{
				Name:  "bar",
				State: kubecontainer.ContainerStateRunning,
			},
		},
	}
	results, err := kb.runOnce(pods, time.Millisecond)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	if results[0].Err != nil {
		t.Errorf("unexpected run pod error: %v", results[0].Err)
	}
	if results[0].Pod.Name != "foo" {
		t.Errorf("unexpected pod: %q", results[0].Pod.Name)
	}
}
Пример #18
0
func TestPreStopHooks(t *testing.T) {
	runner := lifecycle.NewFakeHandlerRunner()
	fr := newFakeRktInterface()
	fs := newFakeSystemd()

	rkt := &Runtime{
		runner:              runner,
		apisvc:              fr,
		systemd:             fs,
		containerRefManager: kubecontainer.NewRefManager(),
	}

	tests := []struct {
		pod         *api.Pod
		runtimePod  *kubecontainer.Pod
		preStopRuns []string
		err         error
	}{
		{
			// Case 0, container without any hooks.
			&api.Pod{
				ObjectMeta: api.ObjectMeta{
					Name:      "pod-1",
					Namespace: "ns-1",
					UID:       "uid-1",
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{Name: "container-name-1"},
					},
				},
			},
			&kubecontainer.Pod{
				Containers: []*kubecontainer.Container{
					{ID: kubecontainer.BuildContainerID("rkt", "id-1")},
				},
			},
			[]string{},
			nil,
		},
		{
			// Case 1, containers with pre-stop hook.
			&api.Pod{
				ObjectMeta: api.ObjectMeta{
					Name:      "pod-1",
					Namespace: "ns-1",
					UID:       "uid-1",
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{
							Name: "container-name-1",
							Lifecycle: &api.Lifecycle{
								PreStop: &api.Handler{
									Exec: &api.ExecAction{},
								},
							},
						},
						{
							Name: "container-name-2",
							Lifecycle: &api.Lifecycle{
								PreStop: &api.Handler{
									HTTPGet: &api.HTTPGetAction{},
								},
							},
						},
					},
				},
			},
			&kubecontainer.Pod{
				Containers: []*kubecontainer.Container{
					{ID: kubecontainer.BuildContainerID("rkt", "id-1")},
					{ID: kubecontainer.BuildContainerID("rkt", "id-2")},
				},
			},
			[]string{
				"exec on pod: pod-1_ns-1(uid-1), container: container-name-1: rkt://id-1",
				"http-get on pod: pod-1_ns-1(uid-1), container: container-name-2: rkt://id-2",
			},
			nil,
		},
		{
			// Case 2, one container with invalid hooks.
			&api.Pod{
				ObjectMeta: api.ObjectMeta{
					Name:      "pod-1",
					Namespace: "ns-1",
					UID:       "uid-1",
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{
							Name: "container-name-1",
							Lifecycle: &api.Lifecycle{
								PreStop: &api.Handler{},
							},
						},
					},
				},
			},
			&kubecontainer.Pod{
				Containers: []*kubecontainer.Container{
					{ID: kubecontainer.BuildContainerID("rkt", "id-1")},
				},
			},
			[]string{},
			errors.NewAggregate([]error{fmt.Errorf("Invalid handler: %v", &api.Handler{})}),
		},
	}

	for i, tt := range tests {
		testCaseHint := fmt.Sprintf("test case #%d", i)

		// Run pre-stop hooks.
		err := rkt.runPreStopHook(tt.pod, tt.runtimePod)
		assert.Equal(t, tt.err, err, testCaseHint)

		sort.Sort(sortedStringList(tt.preStopRuns))
		sort.Sort(sortedStringList(runner.HandlerRuns))

		assert.Equal(t, tt.preStopRuns, runner.HandlerRuns, testCaseHint)

		runner.Reset()
	}
}
Пример #19
0
func TestRunOnce(t *testing.T) {
	cadvisor := &cadvisor.Mock{}
	cadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)

	podManager, _ := newFakePodManager()

	kb := &Kubelet{
		rootDirectory:       "/tmp/kubelet",
		recorder:            &record.FakeRecorder{},
		cadvisor:            cadvisor,
		nodeLister:          testNodeLister{},
		statusManager:       newStatusManager(nil),
		containerRefManager: kubecontainer.NewRefManager(),
		readinessManager:    kubecontainer.NewReadinessManager(),
		podManager:          podManager,
		os:                  kubecontainer.FakeOS{},
		volumeManager:       newVolumeManager(),
	}
	kb.containerManager, _ = newContainerManager(cadvisor, "", "", "")

	kb.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil))
	if err := kb.setupDataDirs(); err != nil {
		t.Errorf("Failed to init data dirs: %v", err)
	}
	podContainers := []docker.APIContainers{
		{
			Names:  []string{"/k8s_bar." + strconv.FormatUint(kubecontainer.HashContainer(&api.Container{Name: "bar"}), 16) + "_foo_new_12345678_42"},
			ID:     "1234",
			Status: "running",
		},
		{
			Names:  []string{"/k8s_net_foo.new.test_abcdefgh_42"},
			ID:     "9876",
			Status: "running",
		},
	}
	kb.dockerClient = &testDocker{
		listContainersResults: []listContainersResult{
			{label: "list pod container", containers: []docker.APIContainers{}},
			{label: "syncPod", containers: []docker.APIContainers{}},
			{label: "list pod container", containers: []docker.APIContainers{}},
			{label: "syncPod", containers: podContainers},
			{label: "list pod container", containers: podContainers},
			{label: "list pod container", containers: podContainers},
		},
		inspectContainersResults: []inspectContainersResult{
			{
				label: "syncPod",
				container: docker.Container{
					Config: &docker.Config{Image: "someimage"},
					State:  docker.State{Running: true, Pid: 42},
				},
			},
			{
				label: "syncPod",
				container: docker.Container{
					Config: &docker.Config{Image: "someimage"},
					State:  docker.State{Running: true, Pid: 42},
				},
			},
			{
				label: "syncPod",
				container: docker.Container{
					Config: &docker.Config{Image: "someimage"},
					State:  docker.State{Running: true, Pid: 42},
				},
			},
			{
				label: "syncPod",
				container: docker.Container{
					Config: &docker.Config{Image: "someimage"},
					State:  docker.State{Running: true, Pid: 42},
				},
			},
		},
		t: t,
	}

	kb.containerRuntime = dockertools.NewFakeDockerManager(
		kb.dockerClient,
		kb.recorder,
		kb.readinessManager,
		kb.containerRefManager,
		dockertools.PodInfraContainerImage,
		0,
		0,
		"",
		kubecontainer.FakeOS{},
		kb.networkPlugin,
		kb,
		nil,
		newKubeletRuntimeHooks(kb.recorder))

	pods := []*api.Pod{
		{
			ObjectMeta: api.ObjectMeta{
				UID:       "12345678",
				Name:      "foo",
				Namespace: "new",
			},
			Spec: api.PodSpec{
				Containers: []api.Container{
					{Name: "bar"},
				},
			},
		},
	}
	podManager.SetPods(pods)
	results, err := kb.runOnce(pods, time.Millisecond)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	if results[0].Err != nil {
		t.Errorf("unexpected run pod error: %v", results[0].Err)
	}
	if results[0].Pod.Name != "foo" {
		t.Errorf("unexpected pod: %q", results[0].Pod.Name)
	}
}
func TestProbe(t *testing.T) {
	prober := &prober{
		refManager: kubecontainer.NewRefManager(),
		recorder:   &record.FakeRecorder{},
	}
	containerID := kubecontainer.ContainerID{Type: "test", ID: "foobar"}

	execProbe := &api.Probe{
		Handler: api.Handler{
			Exec: &api.ExecAction{},
		},
	}
	tests := []struct {
		probe          *api.Probe
		execError      bool
		expectError    bool
		execResult     probe.Result
		expectedResult results.Result
	}{
		{ // No probe
			probe:          nil,
			expectedResult: results.Success,
		},
		{ // No handler
			probe:          &api.Probe{},
			expectError:    true,
			expectedResult: results.Failure,
		},
		{ // Probe fails
			probe:          execProbe,
			execResult:     probe.Failure,
			expectedResult: results.Failure,
		},
		{ // Probe succeeds
			probe:          execProbe,
			execResult:     probe.Success,
			expectedResult: results.Success,
		},
		{ // Probe result is unknown
			probe:          execProbe,
			execResult:     probe.Unknown,
			expectedResult: results.Failure,
		},
		{ // Probe has an error
			probe:          execProbe,
			execError:      true,
			expectError:    true,
			execResult:     probe.Unknown,
			expectedResult: results.Failure,
		},
	}

	for i, test := range tests {
		for _, probeType := range [...]probeType{liveness, readiness} {
			testID := fmt.Sprintf("%d-%s", i, probeType)
			testContainer := api.Container{}
			switch probeType {
			case liveness:
				testContainer.LivenessProbe = test.probe
			case readiness:
				testContainer.ReadinessProbe = test.probe
			}
			if test.execError {
				prober.exec = fakeExecProber{test.execResult, errors.New("exec error")}
			} else {
				prober.exec = fakeExecProber{test.execResult, nil}
			}

			result, err := prober.probe(probeType, &api.Pod{}, api.PodStatus{}, testContainer, containerID)
			if test.expectError && err == nil {
				t.Errorf("[%s] Expected probe error but no error was returned.", testID)
			}
			if !test.expectError && err != nil {
				t.Errorf("[%s] Didn't expect probe error but got: %v", testID, err)
			}
			if test.expectedResult != result {
				t.Errorf("[%s] Expected result to be %v but was %v", testID, test.expectedResult, result)
			}
		}
	}
}
Пример #21
0
func TestRunOnce(t *testing.T) {
	cadvisor := &cadvisortest.Mock{}
	cadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
	cadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{
		Usage:     400 * mb,
		Capacity:  1000 * mb,
		Available: 600 * mb,
	}, nil)
	cadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{
		Usage:    9 * mb,
		Capacity: 10 * mb,
	}, nil)
	podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient())
	diskSpaceManager, _ := newDiskSpaceManager(cadvisor, DiskSpacePolicy{})
	fakeRuntime := &containertest.FakeRuntime{}
	basePath, err := utiltesting.MkTmpdir("kubelet")
	if err != nil {
		t.Fatalf("can't make a temp rootdir %v", err)
	}
	defer os.RemoveAll(basePath)
	kb := &Kubelet{
		rootDirectory:       basePath,
		recorder:            &record.FakeRecorder{},
		cadvisor:            cadvisor,
		nodeLister:          testNodeLister{},
		nodeInfo:            testNodeInfo{},
		statusManager:       status.NewManager(nil, podManager),
		containerRefManager: kubecontainer.NewRefManager(),
		podManager:          podManager,
		os:                  &containertest.FakeOS{},
		diskSpaceManager:    diskSpaceManager,
		containerRuntime:    fakeRuntime,
		reasonCache:         NewReasonCache(),
		clock:               util.RealClock{},
		kubeClient:          &fake.Clientset{},
		hostname:            testKubeletHostname,
		nodeName:            testKubeletHostname,
	}
	kb.containerManager = cm.NewStubContainerManager()

	plug := &volumetest.FakeVolumePlugin{PluginName: "fake", Host: nil}
	kb.volumePluginMgr, err =
		NewInitializedVolumePluginMgr(kb, []volume.VolumePlugin{plug})
	if err != nil {
		t.Fatalf("failed to initialize VolumePluginMgr: %v", err)
	}
	kb.volumeManager, err = kubeletvolume.NewVolumeManager(
		true,
		kb.hostname,
		kb.podManager,
		kb.kubeClient,
		kb.volumePluginMgr,
		fakeRuntime)

	kb.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", nettest.NewFakeHost(nil), componentconfig.HairpinNone, kb.nonMasqueradeCIDR)
	// TODO: Factor out "StatsProvider" from Kubelet so we don't have a cyclic dependency
	volumeStatsAggPeriod := time.Second * 10
	kb.resourceAnalyzer = stats.NewResourceAnalyzer(kb, volumeStatsAggPeriod, kb.containerRuntime)
	nodeRef := &api.ObjectReference{
		Kind:      "Node",
		Name:      kb.nodeName,
		UID:       types.UID(kb.nodeName),
		Namespace: "",
	}
	fakeKillPodFunc := func(pod *api.Pod, podStatus api.PodStatus, gracePeriodOverride *int64) error {
		return nil
	}
	evictionManager, evictionAdmitHandler, err := eviction.NewManager(kb.resourceAnalyzer, eviction.Config{}, fakeKillPodFunc, kb.recorder, nodeRef, kb.clock)
	if err != nil {
		t.Fatalf("failed to initialize eviction manager: %v", err)
	}
	kb.evictionManager = evictionManager
	kb.AddPodAdmitHandler(evictionAdmitHandler)
	if err := kb.setupDataDirs(); err != nil {
		t.Errorf("Failed to init data dirs: %v", err)
	}

	pods := []*api.Pod{
		{
			ObjectMeta: api.ObjectMeta{
				UID:       "12345678",
				Name:      "foo",
				Namespace: "new",
			},
			Spec: api.PodSpec{
				Containers: []api.Container{
					{Name: "bar"},
				},
			},
		},
	}
	podManager.SetPods(pods)
	// The original test here is totally meaningless, because fakeruntime will always return an empty podStatus. While
	// the originial logic of isPodRunning happens to return true when podstatus is empty, so the test can always pass.
	// Now the logic in isPodRunning is changed, to let the test pass, we set the podstatus directly in fake runtime.
	// This is also a meaningless test, because the isPodRunning will also always return true after setting this. However,
	// because runonce is never used in kubernetes now, we should deprioritize the cleanup work.
	// TODO(random-liu) Fix the test, make it meaningful.
	fakeRuntime.PodStatus = kubecontainer.PodStatus{
		ContainerStatuses: []*kubecontainer.ContainerStatus{
			{
				Name:  "bar",
				State: kubecontainer.ContainerStateRunning,
			},
		},
	}
	results, err := kb.runOnce(pods, time.Millisecond)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	if results[0].Err != nil {
		t.Errorf("unexpected run pod error: %v", results[0].Err)
	}
	if results[0].Pod.Name != "foo" {
		t.Errorf("unexpected pod: %q", results[0].Pod.Name)
	}
}
Пример #22
0
func TestGarbageCollect(t *testing.T) {
	fr := newFakeRktInterface()
	fs := newFakeSystemd()
	cli := newFakeRktCli()
	fakeOS := kubetesting.NewFakeOS()
	getter := newFakePodGetter()

	rkt := &Runtime{
		os:                  fakeOS,
		cli:                 cli,
		apisvc:              fr,
		podGetter:           getter,
		systemd:             fs,
		containerRefManager: kubecontainer.NewRefManager(),
	}

	fakeApp := &rktapi.App{Name: "app-foo"}

	tests := []struct {
		gcPolicy             kubecontainer.ContainerGCPolicy
		apiPods              []*api.Pod
		pods                 []*rktapi.Pod
		serviceFilesOnDisk   []string
		expectedCommands     []string
		expectedServiceFiles []string
	}{
		// All running pods, should not be gc'd.
		// Dead, new pods should not be gc'd.
		// Dead, old pods should be gc'd.
		// Deleted pods should be gc'd.
		// Service files without corresponded pods should be removed.
		{
			kubecontainer.ContainerGCPolicy{
				MinAge:        0,
				MaxContainers: 0,
			},
			[]*api.Pod{
				{ObjectMeta: api.ObjectMeta{UID: "pod-uid-1"}},
				{ObjectMeta: api.ObjectMeta{UID: "pod-uid-2"}},
				{ObjectMeta: api.ObjectMeta{UID: "pod-uid-3"}},
				{ObjectMeta: api.ObjectMeta{UID: "pod-uid-4"}},
			},
			[]*rktapi.Pod{
				{
					Id:        "deleted-foo",
					State:     rktapi.PodState_POD_STATE_EXITED,
					CreatedAt: time.Now().Add(time.Hour).UnixNano(),
					StartedAt: time.Now().Add(time.Hour).UnixNano(),
					Apps:      []*rktapi.App{fakeApp},
					Annotations: []*rktapi.KeyValue{
						{
							Key:   k8sRktUIDAnno,
							Value: "pod-uid-0",
						},
					},
				},
				{
					Id:        "running-foo",
					State:     rktapi.PodState_POD_STATE_RUNNING,
					CreatedAt: 0,
					StartedAt: 0,
					Apps:      []*rktapi.App{fakeApp},
					Annotations: []*rktapi.KeyValue{
						{
							Key:   k8sRktUIDAnno,
							Value: "pod-uid-1",
						},
					},
				},
				{
					Id:        "running-bar",
					State:     rktapi.PodState_POD_STATE_RUNNING,
					CreatedAt: 0,
					StartedAt: 0,
					Apps:      []*rktapi.App{fakeApp},
					Annotations: []*rktapi.KeyValue{
						{
							Key:   k8sRktUIDAnno,
							Value: "pod-uid-2",
						},
					},
				},
				{
					Id:        "dead-old",
					State:     rktapi.PodState_POD_STATE_EXITED,
					CreatedAt: 0,
					StartedAt: 0,
					Apps:      []*rktapi.App{fakeApp},
					Annotations: []*rktapi.KeyValue{
						{
							Key:   k8sRktUIDAnno,
							Value: "pod-uid-3",
						},
					},
				},
				{
					Id:        "dead-new",
					State:     rktapi.PodState_POD_STATE_EXITED,
					CreatedAt: time.Now().Add(time.Hour).UnixNano(),
					StartedAt: time.Now().Add(time.Hour).UnixNano(),
					Apps:      []*rktapi.App{fakeApp},
					Annotations: []*rktapi.KeyValue{
						{
							Key:   k8sRktUIDAnno,
							Value: "pod-uid-4",
						},
					},
				},
			},
			[]string{"k8s_dead-old.service", "k8s_deleted-foo.service", "k8s_non-existing-bar.service"},
			[]string{"rkt rm dead-old", "rkt rm deleted-foo"},
			[]string{"/run/systemd/system/k8s_dead-old.service", "/run/systemd/system/k8s_deleted-foo.service", "/run/systemd/system/k8s_non-existing-bar.service"},
		},
		// gcPolicy.MaxContainers should be enforced.
		// Oldest ones are removed first.
		{
			kubecontainer.ContainerGCPolicy{
				MinAge:        0,
				MaxContainers: 1,
			},
			[]*api.Pod{
				{ObjectMeta: api.ObjectMeta{UID: "pod-uid-0"}},
				{ObjectMeta: api.ObjectMeta{UID: "pod-uid-1"}},
				{ObjectMeta: api.ObjectMeta{UID: "pod-uid-2"}},
			},
			[]*rktapi.Pod{
				{
					Id:        "dead-2",
					State:     rktapi.PodState_POD_STATE_EXITED,
					CreatedAt: 2,
					StartedAt: 2,
					Apps:      []*rktapi.App{fakeApp},
					Annotations: []*rktapi.KeyValue{
						{
							Key:   k8sRktUIDAnno,
							Value: "pod-uid-2",
						},
					},
				},
				{
					Id:        "dead-1",
					State:     rktapi.PodState_POD_STATE_EXITED,
					CreatedAt: 1,
					StartedAt: 1,
					Apps:      []*rktapi.App{fakeApp},
					Annotations: []*rktapi.KeyValue{
						{
							Key:   k8sRktUIDAnno,
							Value: "pod-uid-1",
						},
					},
				},
				{
					Id:        "dead-0",
					State:     rktapi.PodState_POD_STATE_EXITED,
					CreatedAt: 0,
					StartedAt: 0,
					Apps:      []*rktapi.App{fakeApp},
					Annotations: []*rktapi.KeyValue{
						{
							Key:   k8sRktUIDAnno,
							Value: "pod-uid-0",
						},
					},
				},
			},
			[]string{"k8s_dead-0.service", "k8s_dead-1.service", "k8s_dead-2.service"},
			[]string{"rkt rm dead-0", "rkt rm dead-1"},
			[]string{"/run/systemd/system/k8s_dead-0.service", "/run/systemd/system/k8s_dead-1.service"},
		},
	}

	for i, tt := range tests {
		testCaseHint := fmt.Sprintf("test case #%d", i)

		ctrl := gomock.NewController(t)

		fakeOS.ReadDirFn = func(dirname string) ([]os.FileInfo, error) {
			serviceFileNames := tt.serviceFilesOnDisk
			var fileInfos []os.FileInfo

			for _, name := range serviceFileNames {
				mockFI := mock_os.NewMockFileInfo(ctrl)
				mockFI.EXPECT().Name().Return(name)
				fileInfos = append(fileInfos, mockFI)
			}
			return fileInfos, nil
		}

		fr.pods = tt.pods
		for _, p := range tt.apiPods {
			getter.pods[p.UID] = p
		}

		err := rkt.GarbageCollect(tt.gcPolicy)
		assert.NoError(t, err, testCaseHint)

		sort.Sort(sortedStringList(tt.expectedCommands))
		sort.Sort(sortedStringList(cli.cmds))

		assert.Equal(t, tt.expectedCommands, cli.cmds, testCaseHint)

		sort.Sort(sortedStringList(tt.expectedServiceFiles))
		sort.Sort(sortedStringList(fakeOS.Removes))

		assert.Equal(t, tt.expectedServiceFiles, fakeOS.Removes, testCaseHint)

		// Cleanup after each test.
		cli.Reset()
		ctrl.Finish()
		fakeOS.Removes = []string{}
		getter.pods = make(map[types.UID]*api.Pod)
	}
}