func TestPluginValidation(t *testing.T) { // The temp dir where test plugins will be stored. testPluginPath := tmpDirOrDie() // install some random plugin under testPluginPath pluginName := selectName() defer tearDownPlugin(testPluginPath) defer releaseName(pluginName) installPluginUnderTest(t, "", testPluginPath, pluginName, nil) // modify the perms of the pluginExecutable f, err := os.Open(path.Join(testPluginPath, pluginName, pluginName)) if err != nil { t.Errorf("Nil value expected.") } err = f.Chmod(0444) if err != nil { t.Errorf("Failed to set perms on plugin exec") } f.Close() _, err = network.InitNetworkPlugin(ProbeNetworkPlugins(testPluginPath), pluginName, network.NewFakeHost(nil)) if err == nil { // we expected an error here because validation would have failed t.Errorf("Expected non-nil value.") } }
func TestRunOnce(t *testing.T) { cadvisor := &cadvisor.Mock{} cadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil) podManager := kubepod.NewBasicPodManager(kubepod.NewFakeMirrorClient()) diskSpaceManager, _ := newDiskSpaceManager(cadvisor, DiskSpacePolicy{}) fakeRuntime := &kubecontainer.FakeRuntime{} basePath, err := ioutil.TempDir(os.TempDir(), "kubelet") if err != nil { t.Fatalf("can't make a temp rootdir %v", err) } defer os.RemoveAll(basePath) kb := &Kubelet{ rootDirectory: basePath, recorder: &record.FakeRecorder{}, cadvisor: cadvisor, nodeLister: testNodeLister{}, nodeInfo: testNodeInfo{}, statusManager: status.NewManager(nil, podManager), containerRefManager: kubecontainer.NewRefManager(), podManager: podManager, os: kubecontainer.FakeOS{}, volumeManager: newVolumeManager(), diskSpaceManager: diskSpaceManager, containerRuntime: fakeRuntime, } kb.containerManager = cm.NewStubContainerManager() kb.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil)) if err := kb.setupDataDirs(); err != nil { t.Errorf("Failed to init data dirs: %v", err) } pods := []*api.Pod{ { ObjectMeta: api.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", }, Spec: api.PodSpec{ Containers: []api.Container{ {Name: "bar"}, }, }, }, } podManager.SetPods(pods) results, err := kb.runOnce(pods, time.Millisecond) if err != nil { t.Errorf("unexpected error: %v", err) } if results[0].Err != nil { t.Errorf("unexpected run pod error: %v", results[0].Err) } if results[0].Pod.Name != "foo" { t.Errorf("unexpected pod: %q", results[0].Pod.Name) } }
func TestRunOnce(t *testing.T) { cadvisor := &cadvisor.Mock{} cadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil) podManager, _ := newFakePodManager() diskSpaceManager, _ := newDiskSpaceManager(cadvisor, DiskSpacePolicy{}) fakeRuntime := &kubecontainer.FakeRuntime{} kb := &Kubelet{ rootDirectory: "/tmp/kubelet", recorder: &record.FakeRecorder{}, cadvisor: cadvisor, nodeLister: testNodeLister{}, statusManager: status.NewManager(nil), containerRefManager: kubecontainer.NewRefManager(), readinessManager: kubecontainer.NewReadinessManager(), podManager: podManager, os: kubecontainer.FakeOS{}, volumeManager: newVolumeManager(), diskSpaceManager: diskSpaceManager, containerRuntime: fakeRuntime, } kb.containerManager, _ = newContainerManager(fakeContainerMgrMountInt(), cadvisor, "", "", "") kb.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil)) if err := kb.setupDataDirs(); err != nil { t.Errorf("Failed to init data dirs: %v", err) } pods := []*api.Pod{ { ObjectMeta: api.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", }, Spec: api.PodSpec{ Containers: []api.Container{ {Name: "bar"}, }, }, }, } podManager.SetPods(pods) results, err := kb.runOnce(pods, time.Millisecond) if err != nil { t.Errorf("unexpected error: %v", err) } if results[0].Err != nil { t.Errorf("unexpected run pod error: %v", results[0].Err) } if results[0].Pod.Name != "foo" { t.Errorf("unexpected pod: %q", results[0].Pod.Name) } }
func newTestDockerManager() (*dockertools.DockerManager, *dockertools.FakeDockerClient) { fakeDocker := &dockertools.FakeDockerClient{VersionInfo: docker.Env{"Version=1.1.3", "ApiVersion=1.15"}, Errors: make(map[string]error), RemovedImages: sets.String{}} fakeRecorder := &record.FakeRecorder{} containerRefManager := kubecontainer.NewRefManager() networkPlugin, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil)) dockerManager := dockertools.NewFakeDockerManager( fakeDocker, fakeRecorder, prober.FakeProber{}, containerRefManager, &cadvisorApi.MachineInfo{}, dockertools.PodInfraContainerImage, 0, 0, "", kubecontainer.FakeOS{}, networkPlugin, nil, nil) return dockerManager, fakeDocker }
func newTestDockerManager() (*dockertools.DockerManager, *dockertools.FakeDockerClient) { fakeDocker := dockertools.NewFakeDockerClient() fakeRecorder := &record.FakeRecorder{} containerRefManager := kubecontainer.NewRefManager() networkPlugin, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil)) dockerManager := dockertools.NewFakeDockerManager( fakeDocker, fakeRecorder, proberesults.NewManager(), containerRefManager, &cadvisorapi.MachineInfo{}, dockertools.PodInfraContainerImage, 0, 0, "", kubecontainer.FakeOS{}, networkPlugin, nil, nil, nil) return dockerManager, fakeDocker }
func newTestDockerManagerWithHTTPClient(fakeHTTPClient *fakeHTTP) (*DockerManager, *FakeDockerClient) { fakeDocker := NewFakeDockerClient() fakeRecorder := &record.FakeRecorder{} containerRefManager := kubecontainer.NewRefManager() networkPlugin, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil)) dockerManager := NewFakeDockerManager( fakeDocker, fakeRecorder, proberesults.NewManager(), containerRefManager, &cadvisorapi.MachineInfo{}, kubetypes.PodInfraContainerImage, 0, 0, "", kubecontainer.FakeOS{}, networkPlugin, &fakeRuntimeHelper{}, fakeHTTPClient, util.NewBackOff(time.Second, 300*time.Second)) return dockerManager, fakeDocker }
func TestPluginValidation(t *testing.T) { pluginName := fmt.Sprintf("test%d", rand.Intn(1000)) defer tearDownPlugin(pluginName) installPluginUnderTest(t, "", pluginName, nil) // modify the perms of the pluginExecutable f, err := os.Open(path.Join(testPluginPath, pluginName, pluginName)) if err != nil { t.Errorf("Nil value expected.") } err = f.Chmod(0444) if err != nil { t.Errorf("Failed to set perms on plugin exec") } f.Close() _, err = network.InitNetworkPlugin(ProbeNetworkPlugins(testPluginPath), pluginName, network.NewFakeHost(nil)) if err == nil { // we expected an error here because validation would have failed t.Errorf("Expected non-nil value.") } }
func TestSelectWrongPlugin(t *testing.T) { // install some random plugin under testPluginPath pluginName := fmt.Sprintf("test%d", rand.Intn(1000)) defer tearDownPlugin(pluginName) installPluginUnderTest(t, "", pluginName, nil) wrongPlugin := "abcd" plug, err := network.InitNetworkPlugin(ProbeNetworkPlugins(testPluginPath), wrongPlugin, network.NewFakeHost(nil)) if plug != nil || err == nil { t.Errorf("Expected to see an error. Wrong plugin selected.") } }
func TestFindContainersByPod(t *testing.T) { tests := []struct { containerList []docker.APIContainers exitedContainerList []docker.APIContainers all bool expectedPods []*kubecontainer.Pod }{ { []docker.APIContainers{ { ID: "foobar", Names: []string{"/k8s_foobar.1234_qux_ns_1234_42"}, }, { ID: "barbar", Names: []string{"/k8s_barbar.1234_qux_ns_2343_42"}, }, { ID: "baz", Names: []string{"/k8s_baz.1234_qux_ns_1234_42"}, }, }, []docker.APIContainers{ { ID: "barfoo", Names: []string{"/k8s_barfoo.1234_qux_ns_1234_42"}, }, { ID: "bazbaz", Names: []string{"/k8s_bazbaz.1234_qux_ns_5678_42"}, }, }, false, []*kubecontainer.Pod{ { ID: "1234", Name: "qux", Namespace: "ns", Containers: []*kubecontainer.Container{ { ID: kubetypes.DockerID("foobar").ContainerID(), Name: "foobar", Hash: 0x1234, State: kubecontainer.ContainerStateUnknown, }, { ID: kubetypes.DockerID("baz").ContainerID(), Name: "baz", Hash: 0x1234, State: kubecontainer.ContainerStateUnknown, }, }, }, { ID: "2343", Name: "qux", Namespace: "ns", Containers: []*kubecontainer.Container{ { ID: kubetypes.DockerID("barbar").ContainerID(), Name: "barbar", Hash: 0x1234, State: kubecontainer.ContainerStateUnknown, }, }, }, }, }, { []docker.APIContainers{ { ID: "foobar", Names: []string{"/k8s_foobar.1234_qux_ns_1234_42"}, }, { ID: "barbar", Names: []string{"/k8s_barbar.1234_qux_ns_2343_42"}, }, { ID: "baz", Names: []string{"/k8s_baz.1234_qux_ns_1234_42"}, }, }, []docker.APIContainers{ { ID: "barfoo", Names: []string{"/k8s_barfoo.1234_qux_ns_1234_42"}, }, { ID: "bazbaz", Names: []string{"/k8s_bazbaz.1234_qux_ns_5678_42"}, }, }, true, []*kubecontainer.Pod{ { ID: "1234", Name: "qux", Namespace: "ns", Containers: []*kubecontainer.Container{ { ID: kubetypes.DockerID("foobar").ContainerID(), Name: "foobar", Hash: 0x1234, State: kubecontainer.ContainerStateUnknown, }, { ID: kubetypes.DockerID("barfoo").ContainerID(), Name: "barfoo", Hash: 0x1234, State: kubecontainer.ContainerStateUnknown, }, { ID: kubetypes.DockerID("baz").ContainerID(), Name: "baz", Hash: 0x1234, State: kubecontainer.ContainerStateUnknown, }, }, }, { ID: "2343", Name: "qux", Namespace: "ns", Containers: []*kubecontainer.Container{ { ID: kubetypes.DockerID("barbar").ContainerID(), Name: "barbar", Hash: 0x1234, State: kubecontainer.ContainerStateUnknown, }, }, }, { ID: "5678", Name: "qux", Namespace: "ns", Containers: []*kubecontainer.Container{ { ID: kubetypes.DockerID("bazbaz").ContainerID(), Name: "bazbaz", Hash: 0x1234, State: kubecontainer.ContainerStateUnknown, }, }, }, }, }, { []docker.APIContainers{}, []docker.APIContainers{}, true, nil, }, } fakeClient := &FakeDockerClient{} np, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil)) // image back-off is set to nil, this test shouldnt pull images containerManager := NewFakeDockerManager(fakeClient, &record.FakeRecorder{}, nil, nil, &cadvisorapi.MachineInfo{}, PodInfraContainerImage, 0, 0, "", kubecontainer.FakeOS{}, np, nil, nil, nil) for i, test := range tests { fakeClient.ContainerList = test.containerList fakeClient.ExitedContainerList = test.exitedContainerList result, _ := containerManager.GetPods(test.all) for i := range result { sort.Sort(containersByID(result[i].Containers)) } for i := range test.expectedPods { sort.Sort(containersByID(test.expectedPods[i].Containers)) } sort.Sort(podsByID(result)) sort.Sort(podsByID(test.expectedPods)) if !reflect.DeepEqual(test.expectedPods, result) { t.Errorf("%d: expected: %#v, saw: %#v", i, test.expectedPods, result) } } }
func TestPluginTearDownHook(t *testing.T) { pluginName := fmt.Sprintf("test%d", rand.Intn(1000)) defer tearDownPlugin(pluginName) installPluginUnderTest(t, "", pluginName, nil) plug, err := network.InitNetworkPlugin(ProbeNetworkPlugins(testPluginPath), pluginName, network.NewFakeHost(nil)) err = plug.TearDownPod("podNamespace", "podName", "dockerid2345") if err != nil { t.Errorf("Expected nil") } // check output of setup hook output, err := ioutil.ReadFile(path.Join(testPluginPath, pluginName, pluginName+".out")) if err != nil { t.Errorf("Expected nil") } expectedOutput := "teardown podNamespace podName dockerid2345" if string(output) != expectedOutput { t.Errorf("Mismatch in expected output for teardown hook. Expected '%s', got '%s'", expectedOutput, string(output)) } }
func TestRunOnce(t *testing.T) { cadvisor := &cadvisor.Mock{} cadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil) cadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{ Usage: 400 * mb, Capacity: 1000 * mb, Available: 600 * mb, }, nil) cadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{ Usage: 9 * mb, Capacity: 10 * mb, }, nil) podManager := kubepod.NewBasicPodManager(kubepod.NewFakeMirrorClient()) diskSpaceManager, _ := newDiskSpaceManager(cadvisor, DiskSpacePolicy{}) fakeRuntime := &kubecontainer.FakeRuntime{} basePath, err := utiltesting.MkTmpdir("kubelet") if err != nil { t.Fatalf("can't make a temp rootdir %v", err) } defer os.RemoveAll(basePath) kb := &Kubelet{ rootDirectory: basePath, recorder: &record.FakeRecorder{}, cadvisor: cadvisor, nodeLister: testNodeLister{}, nodeInfo: testNodeInfo{}, statusManager: status.NewManager(nil, podManager), containerRefManager: kubecontainer.NewRefManager(), podManager: podManager, os: kubecontainer.FakeOS{}, volumeManager: newVolumeManager(), diskSpaceManager: diskSpaceManager, containerRuntime: fakeRuntime, reasonCache: NewReasonCache(), clock: util.RealClock{}, } kb.containerManager = cm.NewStubContainerManager() kb.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil)) if err := kb.setupDataDirs(); err != nil { t.Errorf("Failed to init data dirs: %v", err) } pods := []*api.Pod{ { ObjectMeta: api.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", }, Spec: api.PodSpec{ Containers: []api.Container{ {Name: "bar"}, }, }, }, } podManager.SetPods(pods) // The original test here is totally meaningless, because fakeruntime will always return an empty podStatus. While // the originial logic of isPodRunning happens to return true when podstatus is empty, so the test can always pass. // Now the logic in isPodRunning is changed, to let the test pass, we set the podstatus directly in fake runtime. // This is also a meaningless test, because the isPodRunning will also always return true after setting this. However, // because runonce is never used in kubernetes now, we should deprioritize the cleanup work. // TODO(random-liu) Fix the test, make it meaningful. fakeRuntime.PodStatus = kubecontainer.PodStatus{ ContainerStatuses: []*kubecontainer.ContainerStatus{ { Name: "bar", State: kubecontainer.ContainerStateRunning, }, }, } results, err := kb.runOnce(pods, time.Millisecond) if err != nil { t.Errorf("unexpected error: %v", err) } if results[0].Err != nil { t.Errorf("unexpected run pod error: %v", results[0].Err) } if results[0].Pod.Name != "foo" { t.Errorf("unexpected pod: %q", results[0].Pod.Name) } }
func TestRunOnce(t *testing.T) { cadvisor := &cadvisor.Mock{} cadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil) podManager, _ := newFakePodManager() kb := &Kubelet{ rootDirectory: "/tmp/kubelet", recorder: &record.FakeRecorder{}, cadvisor: cadvisor, nodeLister: testNodeLister{}, statusManager: newStatusManager(nil), containerRefManager: kubecontainer.NewRefManager(), readinessManager: kubecontainer.NewReadinessManager(), podManager: podManager, os: kubecontainer.FakeOS{}, volumeManager: newVolumeManager(), } kb.containerManager, _ = newContainerManager(cadvisor, "", "", "") kb.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil)) if err := kb.setupDataDirs(); err != nil { t.Errorf("Failed to init data dirs: %v", err) } podContainers := []docker.APIContainers{ { Names: []string{"/k8s_bar." + strconv.FormatUint(kubecontainer.HashContainer(&api.Container{Name: "bar"}), 16) + "_foo_new_12345678_42"}, ID: "1234", Status: "running", }, { Names: []string{"/k8s_net_foo.new.test_abcdefgh_42"}, ID: "9876", Status: "running", }, } kb.dockerClient = &testDocker{ listContainersResults: []listContainersResult{ {label: "list pod container", containers: []docker.APIContainers{}}, {label: "syncPod", containers: []docker.APIContainers{}}, {label: "list pod container", containers: []docker.APIContainers{}}, {label: "syncPod", containers: podContainers}, {label: "list pod container", containers: podContainers}, {label: "list pod container", containers: podContainers}, }, inspectContainersResults: []inspectContainersResult{ { label: "syncPod", container: docker.Container{ Config: &docker.Config{Image: "someimage"}, State: docker.State{Running: true, Pid: 42}, }, }, { label: "syncPod", container: docker.Container{ Config: &docker.Config{Image: "someimage"}, State: docker.State{Running: true, Pid: 42}, }, }, { label: "syncPod", container: docker.Container{ Config: &docker.Config{Image: "someimage"}, State: docker.State{Running: true, Pid: 42}, }, }, { label: "syncPod", container: docker.Container{ Config: &docker.Config{Image: "someimage"}, State: docker.State{Running: true, Pid: 42}, }, }, }, t: t, } kb.containerRuntime = dockertools.NewFakeDockerManager( kb.dockerClient, kb.recorder, kb.readinessManager, kb.containerRefManager, dockertools.PodInfraContainerImage, 0, 0, "", kubecontainer.FakeOS{}, kb.networkPlugin, kb, nil, newKubeletRuntimeHooks(kb.recorder)) pods := []*api.Pod{ { ObjectMeta: api.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", }, Spec: api.PodSpec{ Containers: []api.Container{ {Name: "bar"}, }, }, }, } podManager.SetPods(pods) results, err := kb.runOnce(pods, time.Millisecond) if err != nil { t.Errorf("unexpected error: %v", err) } if results[0].Err != nil { t.Errorf("unexpected run pod error: %v", results[0].Err) } if results[0].Pod.Name != "foo" { t.Errorf("unexpected pod: %q", results[0].Pod.Name) } }
func TestPluginStatusHookIPv6(t *testing.T) { // The temp dir where test plugins will be stored. testPluginPath := tmpDirOrDie() // install some random plugin under testPluginPath pluginName := selectName() defer tearDownPlugin(testPluginPath) defer releaseName(pluginName) pluginDir := path.Join(testPluginPath, pluginName) execTemplate := &map[string]interface{}{ "IPAddress": "fe80::e2cb:4eff:fef9:6710", "OutputFile": path.Join(pluginDir, pluginName+".out"), } installPluginUnderTest(t, "", testPluginPath, pluginName, execTemplate) plug, err := network.InitNetworkPlugin(ProbeNetworkPlugins(testPluginPath), pluginName, network.NewFakeHost(nil)) if err != nil { t.Errorf("InitNetworkPlugin() failed: %v", err) } ip, err := plug.Status("namespace", "name", "dockerid2345") if err != nil { t.Errorf("Status() failed: %v", err) } // check output of status hook outPath := path.Join(testPluginPath, pluginName, pluginName+".out") output, err := ioutil.ReadFile(outPath) if err != nil { t.Errorf("ReadFile(%q) failed: %v", outPath, err) } expectedOutput := "status namespace name dockerid2345" if string(output) != expectedOutput { t.Errorf("Mismatch in expected output for status hook. Expected %q, got %q", expectedOutput, string(output)) } if ip.IP.String() != "fe80::e2cb:4eff:fef9:6710" { t.Errorf("Mismatch in expected output for status hook. Expected 'fe80::e2cb:4eff:fef9:6710', got '%s'", ip.IP.String()) } }
func TestPluginStatusHook(t *testing.T) { // The temp dir where test plugins will be stored. testPluginPath := tmpDirOrDie() // install some random plugin under testPluginPath pluginName := selectName() defer tearDownPlugin(testPluginPath) defer releaseName(pluginName) installPluginUnderTest(t, "", testPluginPath, pluginName, nil) plug, err := network.InitNetworkPlugin(ProbeNetworkPlugins(testPluginPath), pluginName, network.NewFakeHost(nil)) ip, err := plug.Status("namespace", "name", "dockerid2345") if err != nil { t.Errorf("Expected nil got %v", err) } // check output of status hook output, err := ioutil.ReadFile(path.Join(testPluginPath, pluginName, pluginName+".out")) if err != nil { t.Errorf("Expected nil") } expectedOutput := "status namespace name dockerid2345" if string(output) != expectedOutput { t.Errorf("Mismatch in expected output for status hook. Expected '%s', got '%s'", expectedOutput, string(output)) } if ip.IP.String() != "10.20.30.40" { t.Errorf("Mismatch in expected output for status hook. Expected '10.20.30.40', got '%s'", ip.IP.String()) } }
// TestFakePodWorkers verifies that the fakePodWorkers behaves the same way as the real podWorkers // for their invocation of the syncPodFn. func TestFakePodWorkers(t *testing.T) { // Create components for pod workers. fakeDocker := &dockertools.FakeDockerClient{} fakeRecorder := &record.FakeRecorder{} np, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil)) dockerManager := dockertools.NewFakeDockerManager(fakeDocker, fakeRecorder, nil, nil, &cadvisorApi.MachineInfo{}, dockertools.PodInfraContainerImage, 0, 0, "", kubecontainer.FakeOS{}, np, nil, nil) fakeRuntimeCache := kubecontainer.NewFakeRuntimeCache(dockerManager) kubeletForRealWorkers := &simpleFakeKubelet{} kubeletForFakeWorkers := &simpleFakeKubelet{} realPodWorkers := newPodWorkers(fakeRuntimeCache, kubeletForRealWorkers.syncPodWithWaitGroup, fakeRecorder) fakePodWorkers := &fakePodWorkers{kubeletForFakeWorkers.syncPod, fakeRuntimeCache, t} tests := []struct { pod *api.Pod mirrorPod *api.Pod containerList []docker.APIContainers containersInRunningPod int }{ { &api.Pod{}, &api.Pod{}, []docker.APIContainers{}, 0, }, { &api.Pod{ ObjectMeta: api.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "fooContainer", }, }, }, }, &api.Pod{ ObjectMeta: api.ObjectMeta{ UID: "12345678", Name: "fooMirror", Namespace: "new", }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "fooContainerMirror", }, }, }, }, []docker.APIContainers{ { // format is // k8s_<container-id>_<pod-fullname>_<pod-uid>_<random> Names: []string{"/k8s_bar.hash123_foo_new_12345678_0"}, ID: "1234", }, { // pod infra container Names: []string{"/k8s_POD.hash123_foo_new_12345678_0"}, ID: "9876", }, }, 2, }, { &api.Pod{ ObjectMeta: api.ObjectMeta{ UID: "98765", Name: "bar", Namespace: "new", }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "fooContainer", }, }, }, }, &api.Pod{ ObjectMeta: api.ObjectMeta{ UID: "98765", Name: "fooMirror", Namespace: "new", }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "fooContainerMirror", }, }, }, }, []docker.APIContainers{ { // format is // k8s_<container-id>_<pod-fullname>_<pod-uid>_<random> Names: []string{"/k8s_bar.hash123_bar_new_98765_0"}, ID: "1234", }, { // pod infra container Names: []string{"/k8s_POD.hash123_foo_new_12345678_0"}, ID: "9876", }, }, 1, }, // Empty running pod. { &api.Pod{ ObjectMeta: api.ObjectMeta{ UID: "98765", Name: "baz", Namespace: "new", }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "bazContainer", }, }, }, }, &api.Pod{ ObjectMeta: api.ObjectMeta{ UID: "98765", Name: "bazMirror", Namespace: "new", }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "bazContainerMirror", }, }, }, }, []docker.APIContainers{ { // format is // k8s_<container-id>_<pod-fullname>_<pod-uid>_<random> Names: []string{"/k8s_bar.hash123_bar_new_12345678_0"}, ID: "1234", }, { // pod infra container Names: []string{"/k8s_POD.hash123_foo_new_12345678_0"}, ID: "9876", }, }, 0, }, } for i, tt := range tests { kubeletForRealWorkers.wg.Add(1) fakeDocker.ContainerList = tt.containerList realPodWorkers.UpdatePod(tt.pod, tt.mirrorPod, func() {}) fakePodWorkers.UpdatePod(tt.pod, tt.mirrorPod, func() {}) kubeletForRealWorkers.wg.Wait() if !reflect.DeepEqual(kubeletForRealWorkers.pod, kubeletForFakeWorkers.pod) { t.Errorf("%d: Expected: %#v, Actual: %#v", i, kubeletForRealWorkers.pod, kubeletForFakeWorkers.pod) } if !reflect.DeepEqual(kubeletForRealWorkers.mirrorPod, kubeletForFakeWorkers.mirrorPod) { t.Errorf("%d: Expected: %#v, Actual: %#v", i, kubeletForRealWorkers.mirrorPod, kubeletForFakeWorkers.mirrorPod) } if tt.containersInRunningPod != len(kubeletForFakeWorkers.runningPod.Containers) { t.Errorf("%d: Expected: %#v, Actual: %#v", i, tt.containersInRunningPod, len(kubeletForFakeWorkers.runningPod.Containers)) } sort.Sort(byContainerName(kubeletForRealWorkers.runningPod)) sort.Sort(byContainerName(kubeletForFakeWorkers.runningPod)) if !reflect.DeepEqual(kubeletForRealWorkers.runningPod, kubeletForFakeWorkers.runningPod) { t.Errorf("%d: Expected: %#v, Actual: %#v", i, kubeletForRealWorkers.runningPod, kubeletForFakeWorkers.runningPod) } } }
func TestSelectWrongPlugin(t *testing.T) { // The temp dir where test plugins will be stored. testPluginPath := tmpDirOrDie() // install some random plugin under testPluginPath pluginName := selectName() defer tearDownPlugin(testPluginPath) defer releaseName(pluginName) installPluginUnderTest(t, "", testPluginPath, pluginName, nil) wrongPlugin := "abcd" plug, err := network.InitNetworkPlugin(ProbeNetworkPlugins(testPluginPath), wrongPlugin, network.NewFakeHost(nil)) if plug != nil || err == nil { t.Errorf("Expected to see an error. Wrong plugin selected.") } }
func TestSelectVendoredPlugin(t *testing.T) { // The temp dir where test plugins will be stored. testPluginPath := tmpDirOrDie() // install some random plugin under testPluginPath pluginName := selectName() defer tearDownPlugin(testPluginPath) defer releaseName(pluginName) vendor := "mycompany" installPluginUnderTest(t, vendor, testPluginPath, pluginName, nil) vendoredPluginName := fmt.Sprintf("%s/%s", vendor, pluginName) plug, err := network.InitNetworkPlugin(ProbeNetworkPlugins(testPluginPath), vendoredPluginName, network.NewFakeHost(nil)) if err != nil { t.Errorf("Failed to select the desired plugin: %v", err) } if plug.Name() != vendoredPluginName { t.Errorf("Wrong plugin selected, chose %s, got %s\n", vendoredPluginName, plug.Name()) } }
func TestSelectPlugin(t *testing.T) { // install some random plugin under testPluginPath pluginName := fmt.Sprintf("test%d", rand.Intn(1000)) defer tearDownPlugin(pluginName) installPluginUnderTest(t, "", pluginName, nil) plug, err := network.InitNetworkPlugin(ProbeNetworkPlugins(testPluginPath), pluginName, network.NewFakeHost(nil)) if err != nil { t.Errorf("Failed to select the desired plugin: %v", err) } if plug.Name() != pluginName { t.Errorf("Wrong plugin selected, chose %s, got %s\n", pluginName, plug.Name()) } }
func TestPluginStatusHookIPv6(t *testing.T) { pluginName := fmt.Sprintf("test%d", rand.Intn(1000)) defer tearDownPlugin(pluginName) pluginDir := path.Join(testPluginPath, pluginName) execTemplate := &map[string]interface{}{ "IPAddress": "fe80::e2cb:4eff:fef9:6710", "OutputFile": path.Join(pluginDir, pluginName+".out"), } installPluginUnderTest(t, "", pluginName, execTemplate) plug, err := network.InitNetworkPlugin(ProbeNetworkPlugins(testPluginPath), pluginName, network.NewFakeHost(nil)) ip, err := plug.Status("namespace", "name", "dockerid2345") if err != nil { t.Errorf("Expected nil got %v", err) } // check output of status hook output, err := ioutil.ReadFile(path.Join(testPluginPath, pluginName, pluginName+".out")) if err != nil { t.Errorf("Expected nil") } expectedOutput := "status namespace name dockerid2345" if string(output) != expectedOutput { t.Errorf("Mismatch in expected output for status hook. Expected '%s', got '%s'", expectedOutput, string(output)) } if ip.IP.String() != "fe80::e2cb:4eff:fef9:6710" { t.Errorf("Mismatch in expected output for status hook. Expected 'fe80::e2cb:4eff:fef9:6710', got '%s'", ip.IP.String()) } }
func TestPluginStatusHook(t *testing.T) { pluginName := fmt.Sprintf("test%d", rand.Intn(1000)) defer tearDownPlugin(pluginName) installPluginUnderTest(t, "", pluginName, nil) plug, err := network.InitNetworkPlugin(ProbeNetworkPlugins(testPluginPath), pluginName, network.NewFakeHost(nil)) ip, err := plug.Status("namespace", "name", "dockerid2345") if err != nil { t.Errorf("Expected nil got %v", err) } // check output of status hook output, err := ioutil.ReadFile(path.Join(testPluginPath, pluginName, pluginName+".out")) if err != nil { t.Errorf("Expected nil") } expectedOutput := "status namespace name dockerid2345" if string(output) != expectedOutput { t.Errorf("Mismatch in expected output for status hook. Expected '%s', got '%s'", expectedOutput, string(output)) } if ip.IP.String() != "10.20.30.40" { t.Errorf("Mismatch in expected output for status hook. Expected '10.20.30.40', got '%s'", ip.IP.String()) } }
func createFakeRuntimeCache(fakeRecorder *record.FakeRecorder) kubecontainer.RuntimeCache { fakeDocker := &dockertools.FakeDockerClient{} np, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil)) dockerManager := dockertools.NewFakeDockerManager(fakeDocker, fakeRecorder, nil, nil, &cadvisorApi.MachineInfo{}, dockertools.PodInfraContainerImage, 0, 0, "", kubecontainer.FakeOS{}, np, nil, nil) return kubecontainer.NewFakeRuntimeCache(dockerManager) }
func TestPluginTearDownHook(t *testing.T) { // The temp dir where test plugins will be stored. testPluginPath := tmpDirOrDie() // install some random plugin under testPluginPath pluginName := selectName() defer tearDownPlugin(testPluginPath) defer releaseName(pluginName) installPluginUnderTest(t, "", testPluginPath, pluginName, nil) plug, err := network.InitNetworkPlugin(ProbeNetworkPlugins(testPluginPath), pluginName, network.NewFakeHost(nil)) err = plug.TearDownPod("podNamespace", "podName", "dockerid2345") if err != nil { t.Errorf("Expected nil") } // check output of setup hook output, err := ioutil.ReadFile(path.Join(testPluginPath, pluginName, pluginName+".out")) if err != nil { t.Errorf("Expected nil") } expectedOutput := "teardown podNamespace podName dockerid2345" if string(output) != expectedOutput { t.Errorf("Mismatch in expected output for teardown hook. Expected '%s', got '%s'", expectedOutput, string(output)) } }