func TestGetMountedVolumesForPodAndGetVolumesInUse(t *testing.T) { tmpDir, err := utiltesting.MkTmpdir("volumeManagerTest") if err != nil { t.Fatalf("can't make a temp dir: %v", err) } defer os.RemoveAll(tmpDir) podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient()) node, pod, pv, claim := createObjects() kubeClient := fake.NewSimpleClientset(node, pod, pv, claim) manager, err := newTestVolumeManager(tmpDir, podManager, kubeClient) if err != nil { t.Fatalf("Failed to initialize volume manager: %v", err) } stopCh := make(chan struct{}) go manager.Run(stopCh) defer close(stopCh) podManager.SetPods([]*api.Pod{pod}) // Fake node status update go simulateVolumeInUseUpdate( api.UniqueVolumeName(node.Status.VolumesAttached[0].Name), stopCh, manager) err = manager.WaitForAttachAndMount(pod) if err != nil { t.Errorf("Expected success: %v", err) } expectedMounted := pod.Spec.Volumes[0].Name actualMounted := manager.GetMountedVolumesForPod(types.UniquePodName(pod.ObjectMeta.UID)) if _, ok := actualMounted[expectedMounted]; !ok || (len(actualMounted) != 1) { t.Errorf("Expected %v to be mounted to pod but got %v", expectedMounted, actualMounted) } expectedInUse := []api.UniqueVolumeName{api.UniqueVolumeName(node.Status.VolumesAttached[0].Name)} actualInUse := manager.GetVolumesInUse() if !reflect.DeepEqual(expectedInUse, actualInUse) { t.Errorf("Expected %v to be in use but got %v", expectedInUse, actualInUse) } }
func TestRunOnce(t *testing.T) { cadvisor := &cadvisortest.Mock{} cadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil) cadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{ Usage: 400 * mb, Capacity: 1000 * mb, Available: 600 * mb, }, nil) cadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{ Usage: 9 * mb, Capacity: 10 * mb, }, nil) podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient()) diskSpaceManager, _ := newDiskSpaceManager(cadvisor, DiskSpacePolicy{}) fakeRuntime := &containertest.FakeRuntime{} basePath, err := utiltesting.MkTmpdir("kubelet") if err != nil { t.Fatalf("can't make a temp rootdir %v", err) } defer os.RemoveAll(basePath) kb := &Kubelet{ rootDirectory: basePath, recorder: &record.FakeRecorder{}, cadvisor: cadvisor, nodeLister: testNodeLister{}, nodeInfo: testNodeInfo{}, statusManager: status.NewManager(nil, podManager), containerRefManager: kubecontainer.NewRefManager(), podManager: podManager, os: &containertest.FakeOS{}, diskSpaceManager: diskSpaceManager, containerRuntime: fakeRuntime, reasonCache: NewReasonCache(), clock: util.RealClock{}, kubeClient: &fake.Clientset{}, hostname: testKubeletHostname, nodeName: testKubeletHostname, } kb.containerManager = cm.NewStubContainerManager() plug := &volumetest.FakeVolumePlugin{PluginName: "fake", Host: nil} kb.volumePluginMgr, err = NewInitializedVolumePluginMgr(kb, []volume.VolumePlugin{plug}) if err != nil { t.Fatalf("failed to initialize VolumePluginMgr: %v", err) } kb.volumeManager, err = kubeletvolume.NewVolumeManager( true, kb.hostname, kb.podManager, kb.kubeClient, kb.volumePluginMgr, fakeRuntime) kb.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", nettest.NewFakeHost(nil), componentconfig.HairpinNone, kb.nonMasqueradeCIDR) // TODO: Factor out "StatsProvider" from Kubelet so we don't have a cyclic dependency volumeStatsAggPeriod := time.Second * 10 kb.resourceAnalyzer = stats.NewResourceAnalyzer(kb, volumeStatsAggPeriod, kb.containerRuntime) nodeRef := &api.ObjectReference{ Kind: "Node", Name: kb.nodeName, UID: types.UID(kb.nodeName), Namespace: "", } fakeKillPodFunc := func(pod *api.Pod, podStatus api.PodStatus, gracePeriodOverride *int64) error { return nil } evictionManager, evictionAdmitHandler, err := eviction.NewManager(kb.resourceAnalyzer, eviction.Config{}, fakeKillPodFunc, kb.recorder, nodeRef, kb.clock) if err != nil { t.Fatalf("failed to initialize eviction manager: %v", err) } kb.evictionManager = evictionManager kb.AddPodAdmitHandler(evictionAdmitHandler) if err := kb.setupDataDirs(); err != nil { t.Errorf("Failed to init data dirs: %v", err) } pods := []*api.Pod{ { ObjectMeta: api.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", }, Spec: api.PodSpec{ Containers: []api.Container{ {Name: "bar"}, }, }, }, } podManager.SetPods(pods) // The original test here is totally meaningless, because fakeruntime will always return an empty podStatus. While // the originial logic of isPodRunning happens to return true when podstatus is empty, so the test can always pass. // Now the logic in isPodRunning is changed, to let the test pass, we set the podstatus directly in fake runtime. // This is also a meaningless test, because the isPodRunning will also always return true after setting this. However, // because runonce is never used in kubernetes now, we should deprioritize the cleanup work. // TODO(random-liu) Fix the test, make it meaningful. fakeRuntime.PodStatus = kubecontainer.PodStatus{ ContainerStatuses: []*kubecontainer.ContainerStatus{ { Name: "bar", State: kubecontainer.ContainerStateRunning, }, }, } results, err := kb.runOnce(pods, time.Millisecond) if err != nil { t.Errorf("unexpected error: %v", err) } if results[0].Err != nil { t.Errorf("unexpected run pod error: %v", results[0].Err) } if results[0].Pod.Name != "foo" { t.Errorf("unexpected pod: %q", results[0].Pod.Name) } }
func TestGetExtraSupplementalGroupsForPod(t *testing.T) { tmpDir, err := utiltesting.MkTmpdir("volumeManagerTest") if err != nil { t.Fatalf("can't make a temp dir: %v", err) } defer os.RemoveAll(tmpDir) podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient()) node, pod, _, claim := createObjects() existingGid := pod.Spec.SecurityContext.SupplementalGroups[0] cases := []struct { gidAnnotation string expected []int64 }{ { gidAnnotation: "777", expected: []int64{777}, }, { gidAnnotation: strconv.FormatInt(existingGid, 10), expected: []int64{}, }, { gidAnnotation: "a", expected: []int64{}, }, { gidAnnotation: "", expected: []int64{}, }, } for _, tc := range cases { pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "pvA", Annotations: map[string]string{ volumehelper.VolumeGidAnnotationKey: tc.gidAnnotation, }, }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ PDName: "fake-device", }, }, ClaimRef: &api.ObjectReference{ Name: claim.ObjectMeta.Name, }, }, } kubeClient := fake.NewSimpleClientset(node, pod, pv, claim) manager, err := newTestVolumeManager(tmpDir, podManager, kubeClient) if err != nil { t.Errorf("Failed to initialize volume manager: %v", err) continue } stopCh := make(chan struct{}) go manager.Run(stopCh) podManager.SetPods([]*api.Pod{pod}) // Fake node status update go simulateVolumeInUseUpdate( api.UniqueVolumeName(node.Status.VolumesAttached[0].Name), stopCh, manager) err = manager.WaitForAttachAndMount(pod) if err != nil { t.Errorf("Expected success: %v", err) continue } actual := manager.GetExtraSupplementalGroupsForPod(pod) if !reflect.DeepEqual(tc.expected, actual) { t.Errorf("Expected supplemental groups %v, got %v", tc.expected, actual) } close(stopCh) } }
func newTestManager(kubeClient clientset.Interface) *manager { podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient()) podManager.AddPod(getTestPod()) return NewManager(kubeClient, podManager).(*manager) }
// Stub out mirror client for testing purpose. func newTestManager() (*basicManager, *podtest.FakeMirrorClient) { fakeMirrorClient := podtest.NewFakeMirrorClient() manager := NewBasicPodManager(fakeMirrorClient).(*basicManager) return manager, fakeMirrorClient }
func TestRunOnce(t *testing.T) { cadvisor := &cadvisortest.Mock{} cadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil) cadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{ Usage: 400 * mb, Capacity: 1000 * mb, Available: 600 * mb, }, nil) cadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{ Usage: 9 * mb, Capacity: 10 * mb, }, nil) podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient()) diskSpaceManager, _ := newDiskSpaceManager(cadvisor, DiskSpacePolicy{}) fakeRuntime := &containertest.FakeRuntime{} basePath, err := utiltesting.MkTmpdir("kubelet") if err != nil { t.Fatalf("can't make a temp rootdir %v", err) } defer os.RemoveAll(basePath) kb := &Kubelet{ rootDirectory: basePath, recorder: &record.FakeRecorder{}, cadvisor: cadvisor, nodeLister: testNodeLister{}, nodeInfo: testNodeInfo{}, statusManager: status.NewManager(nil, podManager), containerRefManager: kubecontainer.NewRefManager(), podManager: podManager, os: containertest.FakeOS{}, volumeManager: newVolumeManager(), diskSpaceManager: diskSpaceManager, containerRuntime: fakeRuntime, reasonCache: NewReasonCache(), clock: util.RealClock{}, } kb.containerManager = cm.NewStubContainerManager() kb.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", nettest.NewFakeHost(nil)) if err := kb.setupDataDirs(); err != nil { t.Errorf("Failed to init data dirs: %v", err) } pods := []*api.Pod{ { ObjectMeta: api.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", }, Spec: api.PodSpec{ Containers: []api.Container{ {Name: "bar"}, }, }, }, } podManager.SetPods(pods) // The original test here is totally meaningless, because fakeruntime will always return an empty podStatus. While // the originial logic of isPodRunning happens to return true when podstatus is empty, so the test can always pass. // Now the logic in isPodRunning is changed, to let the test pass, we set the podstatus directly in fake runtime. // This is also a meaningless test, because the isPodRunning will also always return true after setting this. However, // because runonce is never used in kubernetes now, we should deprioritize the cleanup work. // TODO(random-liu) Fix the test, make it meaningful. fakeRuntime.PodStatus = kubecontainer.PodStatus{ ContainerStatuses: []*kubecontainer.ContainerStatus{ { Name: "bar", State: kubecontainer.ContainerStateRunning, }, }, } results, err := kb.runOnce(pods, time.Millisecond) if err != nil { t.Errorf("unexpected error: %v", err) } if results[0].Err != nil { t.Errorf("unexpected run pod error: %v", results[0].Err) } if results[0].Pod.Name != "foo" { t.Errorf("unexpected pod: %q", results[0].Pod.Name) } }