func TestRunOnce(t *testing.T) { cadvisor := &cadvisor.Mock{} cadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil) podManager := kubepod.NewBasicPodManager(kubepod.NewFakeMirrorClient()) diskSpaceManager, _ := newDiskSpaceManager(cadvisor, DiskSpacePolicy{}) fakeRuntime := &kubecontainer.FakeRuntime{} basePath, err := ioutil.TempDir(os.TempDir(), "kubelet") if err != nil { t.Fatalf("can't make a temp rootdir %v", err) } defer os.RemoveAll(basePath) kb := &Kubelet{ rootDirectory: basePath, recorder: &record.FakeRecorder{}, cadvisor: cadvisor, nodeLister: testNodeLister{}, nodeInfo: testNodeInfo{}, statusManager: status.NewManager(nil, podManager), containerRefManager: kubecontainer.NewRefManager(), podManager: podManager, os: kubecontainer.FakeOS{}, volumeManager: newVolumeManager(), diskSpaceManager: diskSpaceManager, containerRuntime: fakeRuntime, } kb.containerManager = cm.NewStubContainerManager() kb.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil)) if err := kb.setupDataDirs(); err != nil { t.Errorf("Failed to init data dirs: %v", err) } pods := []*api.Pod{ { ObjectMeta: api.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", }, Spec: api.PodSpec{ Containers: []api.Container{ {Name: "bar"}, }, }, }, } podManager.SetPods(pods) results, err := kb.runOnce(pods, time.Millisecond) if err != nil { t.Errorf("unexpected error: %v", err) } if results[0].Err != nil { t.Errorf("unexpected run pod error: %v", results[0].Err) } if results[0].Pod.Name != "foo" { t.Errorf("unexpected pod: %q", results[0].Pod.Name) } }
func TestRunOnce(t *testing.T) { cadvisor := &cadvisor.Mock{} cadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil) podManager, _ := newFakePodManager() diskSpaceManager, _ := newDiskSpaceManager(cadvisor, DiskSpacePolicy{}) fakeRuntime := &kubecontainer.FakeRuntime{} kb := &Kubelet{ rootDirectory: "/tmp/kubelet", recorder: &record.FakeRecorder{}, cadvisor: cadvisor, nodeLister: testNodeLister{}, statusManager: status.NewManager(nil), containerRefManager: kubecontainer.NewRefManager(), readinessManager: kubecontainer.NewReadinessManager(), podManager: podManager, os: kubecontainer.FakeOS{}, volumeManager: newVolumeManager(), diskSpaceManager: diskSpaceManager, containerRuntime: fakeRuntime, } kb.containerManager, _ = newContainerManager(fakeContainerMgrMountInt(), cadvisor, "", "", "") kb.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil)) if err := kb.setupDataDirs(); err != nil { t.Errorf("Failed to init data dirs: %v", err) } pods := []*api.Pod{ { ObjectMeta: api.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", }, Spec: api.PodSpec{ Containers: []api.Container{ {Name: "bar"}, }, }, }, } podManager.SetPods(pods) results, err := kb.runOnce(pods, time.Millisecond) if err != nil { t.Errorf("unexpected error: %v", err) } if results[0].Err != nil { t.Errorf("unexpected run pod error: %v", results[0].Err) } if results[0].Pod.Name != "foo" { t.Errorf("unexpected pod: %q", results[0].Pod.Name) } }
func newTestManager() *manager { m := NewManager( status.NewManager(&testclient.Fake{}, kubepod.NewBasicPodManager(nil)), results.NewManager(), results.NewManager(), nil, // runner kubecontainer.NewRefManager(), &record.FakeRecorder{}, ).(*manager) // Don't actually execute probes. m.prober.exec = fakeExecProber{probe.Success, nil} return m }
func newTestManager() *manager { refManager := kubecontainer.NewRefManager() refManager.SetRef(testContainerID, &api.ObjectReference{}) // Suppress prober warnings. m := NewManager( status.NewManager(&testclient.Fake{}, kubepod.NewBasicPodManager(nil)), results.NewManager(), nil, // runner refManager, &record.FakeRecorder{}, ).(*manager) // Don't actually execute probes. m.prober.exec = fakeExecProber{probe.Success, nil} return m }
func newTestManager() *manager { const probePeriod = 1 m := NewManager( probePeriod, status.NewManager(&testclient.Fake{}), results.NewManager(), results.NewManager(), nil, // runner kubecontainer.NewRefManager(), &record.FakeRecorder{}, ).(*manager) // Don't actually execute probes. m.prober.exec = fakeExecProber{probe.Success, nil} return m }
func newTestManager() *manager { refManager := kubecontainer.NewRefManager() refManager.SetRef(testContainerID, &v1.ObjectReference{}) // Suppress prober warnings. podManager := kubepod.NewBasicPodManager(nil) // Add test pod to pod manager, so that status manager can get the pod from pod manager if needed. podManager.AddPod(getTestPod()) m := NewManager( status.NewManager(&fake.Clientset{}, podManager), results.NewManager(), nil, // runner refManager, &record.FakeRecorder{}, ).(*manager) // Don't actually execute probes. m.prober.exec = fakeExecProber{probe.Success, nil} return m }
func TestRunOnce(t *testing.T) { cadvisor := &cadvisortest.Mock{} cadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil) cadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{ Usage: 400 * mb, Capacity: 1000 * mb, Available: 600 * mb, }, nil) cadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{ Usage: 9 * mb, Capacity: 10 * mb, }, nil) podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient()) diskSpaceManager, _ := newDiskSpaceManager(cadvisor, DiskSpacePolicy{}) fakeRuntime := &containertest.FakeRuntime{} basePath, err := utiltesting.MkTmpdir("kubelet") if err != nil { t.Fatalf("can't make a temp rootdir %v", err) } defer os.RemoveAll(basePath) kb := &Kubelet{ rootDirectory: basePath, recorder: &record.FakeRecorder{}, cadvisor: cadvisor, nodeLister: testNodeLister{}, nodeInfo: testNodeInfo{}, statusManager: status.NewManager(nil, podManager), containerRefManager: kubecontainer.NewRefManager(), podManager: podManager, os: &containertest.FakeOS{}, diskSpaceManager: diskSpaceManager, containerRuntime: fakeRuntime, reasonCache: NewReasonCache(), clock: util.RealClock{}, kubeClient: &fake.Clientset{}, hostname: testKubeletHostname, nodeName: testKubeletHostname, } kb.containerManager = cm.NewStubContainerManager() plug := &volumetest.FakeVolumePlugin{PluginName: "fake", Host: nil} kb.volumePluginMgr, err = NewInitializedVolumePluginMgr(kb, []volume.VolumePlugin{plug}) if err != nil { t.Fatalf("failed to initialize VolumePluginMgr: %v", err) } kb.volumeManager, err = kubeletvolume.NewVolumeManager( true, kb.hostname, kb.podManager, kb.kubeClient, kb.volumePluginMgr, fakeRuntime) kb.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", nettest.NewFakeHost(nil), componentconfig.HairpinNone, kb.nonMasqueradeCIDR) // TODO: Factor out "StatsProvider" from Kubelet so we don't have a cyclic dependency volumeStatsAggPeriod := time.Second * 10 kb.resourceAnalyzer = stats.NewResourceAnalyzer(kb, volumeStatsAggPeriod, kb.containerRuntime) nodeRef := &api.ObjectReference{ Kind: "Node", Name: kb.nodeName, UID: types.UID(kb.nodeName), Namespace: "", } fakeKillPodFunc := func(pod *api.Pod, podStatus api.PodStatus, gracePeriodOverride *int64) error { return nil } evictionManager, evictionAdmitHandler, err := eviction.NewManager(kb.resourceAnalyzer, eviction.Config{}, fakeKillPodFunc, kb.recorder, nodeRef, kb.clock) if err != nil { t.Fatalf("failed to initialize eviction manager: %v", err) } kb.evictionManager = evictionManager kb.AddPodAdmitHandler(evictionAdmitHandler) if err := kb.setupDataDirs(); err != nil { t.Errorf("Failed to init data dirs: %v", err) } pods := []*api.Pod{ { ObjectMeta: api.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", }, Spec: api.PodSpec{ Containers: []api.Container{ {Name: "bar"}, }, }, }, } podManager.SetPods(pods) // The original test here is totally meaningless, because fakeruntime will always return an empty podStatus. While // the originial logic of isPodRunning happens to return true when podstatus is empty, so the test can always pass. // Now the logic in isPodRunning is changed, to let the test pass, we set the podstatus directly in fake runtime. // This is also a meaningless test, because the isPodRunning will also always return true after setting this. However, // because runonce is never used in kubernetes now, we should deprioritize the cleanup work. // TODO(random-liu) Fix the test, make it meaningful. fakeRuntime.PodStatus = kubecontainer.PodStatus{ ContainerStatuses: []*kubecontainer.ContainerStatus{ { Name: "bar", State: kubecontainer.ContainerStateRunning, }, }, } results, err := kb.runOnce(pods, time.Millisecond) if err != nil { t.Errorf("unexpected error: %v", err) } if results[0].Err != nil { t.Errorf("unexpected run pod error: %v", results[0].Err) } if results[0].Pod.Name != "foo" { t.Errorf("unexpected pod: %q", results[0].Pod.Name) } }
func TestDoProbe(t *testing.T) { m := newTestManager() // Test statuses. runningStatus := getTestRunningStatus() pendingStatus := getTestRunningStatus() pendingStatus.ContainerStatuses[0].State.Running = nil terminatedStatus := getTestRunningStatus() terminatedStatus.ContainerStatuses[0].State.Running = nil terminatedStatus.ContainerStatuses[0].State.Terminated = &api.ContainerStateTerminated{ StartedAt: unversioned.Now(), } otherStatus := getTestRunningStatus() otherStatus.ContainerStatuses[0].Name = "otherContainer" failedStatus := getTestRunningStatus() failedStatus.Phase = api.PodFailed tests := []struct { probe api.Probe podStatus *api.PodStatus expectContinue bool expectSet bool expectedResult results.Result }{ { // No status. expectContinue: true, }, { // Pod failed podStatus: &failedStatus, }, { // No container status podStatus: &otherStatus, expectContinue: true, }, { // Container waiting podStatus: &pendingStatus, expectContinue: true, expectSet: true, }, { // Container terminated podStatus: &terminatedStatus, expectSet: true, }, { // Probe successful. podStatus: &runningStatus, expectContinue: true, expectSet: true, expectedResult: results.Success, }, { // Initial delay passed podStatus: &runningStatus, probe: api.Probe{ InitialDelaySeconds: -100, }, expectContinue: true, expectSet: true, expectedResult: results.Success, }, } for _, probeType := range [...]probeType{liveness, readiness} { for i, test := range tests { w := newTestWorker(m, probeType, test.probe) if test.podStatus != nil { m.statusManager.SetPodStatus(w.pod, *test.podStatus) } if c := w.doProbe(); c != test.expectContinue { t.Errorf("[%s-%d] Expected continue to be %v but got %v", probeType, i, test.expectContinue, c) } result, ok := resultsManager(m, probeType).Get(testContainerID) if ok != test.expectSet { t.Errorf("[%s-%d] Expected to have result: %v but got %v", probeType, i, test.expectSet, ok) } if result != test.expectedResult { t.Errorf("[%s-%d] Expected result: %v but got %v", probeType, i, test.expectedResult, result) } // Clean up. m.statusManager = status.NewManager(&fake.Clientset{}, kubepod.NewBasicPodManager(nil)) resultsManager(m, probeType).Remove(testContainerID) } } }
func TestRunOnce(t *testing.T) { cadvisor := &cadvisor.Mock{} cadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil) cadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{ Usage: 400 * mb, Capacity: 1000 * mb, Available: 600 * mb, }, nil) cadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{ Usage: 9 * mb, Capacity: 10 * mb, }, nil) podManager := kubepod.NewBasicPodManager(kubepod.NewFakeMirrorClient()) diskSpaceManager, _ := newDiskSpaceManager(cadvisor, DiskSpacePolicy{}) fakeRuntime := &kubecontainer.FakeRuntime{} basePath, err := utiltesting.MkTmpdir("kubelet") if err != nil { t.Fatalf("can't make a temp rootdir %v", err) } defer os.RemoveAll(basePath) kb := &Kubelet{ rootDirectory: basePath, recorder: &record.FakeRecorder{}, cadvisor: cadvisor, nodeLister: testNodeLister{}, nodeInfo: testNodeInfo{}, statusManager: status.NewManager(nil, podManager), containerRefManager: kubecontainer.NewRefManager(), podManager: podManager, os: kubecontainer.FakeOS{}, volumeManager: newVolumeManager(), diskSpaceManager: diskSpaceManager, containerRuntime: fakeRuntime, reasonCache: NewReasonCache(), clock: util.RealClock{}, } kb.containerManager = cm.NewStubContainerManager() kb.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil)) if err := kb.setupDataDirs(); err != nil { t.Errorf("Failed to init data dirs: %v", err) } pods := []*api.Pod{ { ObjectMeta: api.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", }, Spec: api.PodSpec{ Containers: []api.Container{ {Name: "bar"}, }, }, }, } podManager.SetPods(pods) // The original test here is totally meaningless, because fakeruntime will always return an empty podStatus. While // the originial logic of isPodRunning happens to return true when podstatus is empty, so the test can always pass. // Now the logic in isPodRunning is changed, to let the test pass, we set the podstatus directly in fake runtime. // This is also a meaningless test, because the isPodRunning will also always return true after setting this. However, // because runonce is never used in kubernetes now, we should deprioritize the cleanup work. // TODO(random-liu) Fix the test, make it meaningful. fakeRuntime.PodStatus = kubecontainer.PodStatus{ ContainerStatuses: []*kubecontainer.ContainerStatus{ { Name: "bar", State: kubecontainer.ContainerStateRunning, }, }, } results, err := kb.runOnce(pods, time.Millisecond) if err != nil { t.Errorf("unexpected error: %v", err) } if results[0].Err != nil { t.Errorf("unexpected run pod error: %v", results[0].Err) } if results[0].Pod.Name != "foo" { t.Errorf("unexpected pod: %q", results[0].Pod.Name) } }
func newTestManager() *manager { const probePeriod = 1 statusManager := status.NewManager(&testclient.Fake{}) prober := FakeProber{Readiness: probe.Success} return NewManager(probePeriod, statusManager, prober).(*manager) }
func TestRunOnce(t *testing.T) { cadvisor := &cadvisor.Mock{} cadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil) podManager, _ := newFakePodManager() diskSpaceManager, _ := newDiskSpaceManager(cadvisor, DiskSpacePolicy{}) kb := &Kubelet{ rootDirectory: "/tmp/kubelet", recorder: &record.FakeRecorder{}, cadvisor: cadvisor, statusManager: status.NewManager(nil), containerRefManager: kubecontainer.NewRefManager(), readinessManager: kubecontainer.NewReadinessManager(), podManager: podManager, nodeManager: &fakeNodeManager{}, os: kubecontainer.FakeOS{}, volumeManager: newVolumeManager(), diskSpaceManager: diskSpaceManager, } kb.containerManager, _ = newContainerManager(cadvisor, "", "", "") kb.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil)) if err := kb.setupDataDirs(); err != nil { t.Errorf("Failed to init data dirs: %v", err) } podContainers := []docker.APIContainers{ { Names: []string{"/k8s_bar." + strconv.FormatUint(kubecontainer.HashContainer(&api.Container{Name: "bar"}), 16) + "_foo_new_12345678_42"}, ID: "1234", Status: "running", }, { Names: []string{"/k8s_net_foo.new.test_abcdefgh_42"}, ID: "9876", Status: "running", }, } kb.dockerClient = &testDocker{ listContainersResults: []listContainersResult{ {label: "list pod container", containers: []docker.APIContainers{}}, {label: "syncPod", containers: []docker.APIContainers{}}, {label: "list pod container", containers: []docker.APIContainers{}}, {label: "syncPod", containers: podContainers}, {label: "list pod container", containers: podContainers}, {label: "list pod container", containers: podContainers}, }, inspectContainersResults: []inspectContainersResult{ { label: "syncPod", container: docker.Container{ Config: &docker.Config{Image: "someimage"}, State: docker.State{Running: true, Pid: 42}, }, }, { label: "syncPod", container: docker.Container{ Config: &docker.Config{Image: "someimage"}, State: docker.State{Running: true, Pid: 42}, }, }, { label: "syncPod", container: docker.Container{ Config: &docker.Config{Image: "someimage"}, State: docker.State{Running: true, Pid: 42}, }, }, { label: "syncPod", container: docker.Container{ Config: &docker.Config{Image: "someimage"}, State: docker.State{Running: true, Pid: 42}, }, }, { label: "syncPod", container: docker.Container{ Config: &docker.Config{Image: "someimage"}, State: docker.State{Running: true, Pid: 42}, }, }, }, t: t, } kb.containerRuntime = dockertools.NewFakeDockerManager( kb.dockerClient, kb.recorder, kb.readinessManager, kb.containerRefManager, &cadvisorApi.MachineInfo{}, dockertools.PodInfraContainerImage, 0, 0, "", kubecontainer.FakeOS{}, kb.networkPlugin, kb, nil) pods := []*api.Pod{ { ObjectMeta: api.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", }, Spec: api.PodSpec{ Containers: []api.Container{ {Name: "bar"}, }, }, }, } podManager.SetPods(pods) results, err := kb.runOnce(pods, time.Millisecond) if err != nil { t.Errorf("unexpected error: %v", err) } if results[0].Err != nil { t.Errorf("unexpected run pod error: %v", results[0].Err) } if results[0].Pod.Name != "foo" { t.Errorf("unexpected pod: %q", results[0].Pod.Name) } }