Пример #1
0
// TestTeardownBeforeSetUp tests that a `TearDown` call does call
// `shaper.Reset`
func TestTeardownCallsShaper(t *testing.T) {
	fexec := &exec.FakeExec{
		CommandScript: []exec.FakeCommandAction{},
		LookPathFunc: func(file string) (string, error) {
			return fmt.Sprintf("/fake-bin/%s", file), nil
		},
	}
	fhost := nettest.NewFakeHost(nil)
	fshaper := &bandwidth.FakeShaper{}
	mockcni := &mock_cni.MockCNI{}
	kubenet := newFakeKubenetPlugin(map[kubecontainer.ContainerID]string{}, fexec, fhost)
	kubenet.cniConfig = mockcni
	kubenet.iptables = ipttest.NewFake()
	kubenet.bandwidthShaper = fshaper
	kubenet.hostportHandler = hostporttest.NewFakeHostportHandler()

	mockcni.On("DelNetwork", mock.AnythingOfType("*libcni.NetworkConfig"), mock.AnythingOfType("*libcni.RuntimeConf")).Return(nil)

	details := make(map[string]interface{})
	details[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR] = "10.0.0.1/24"
	kubenet.Event(network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE, details)

	existingContainerID := kubecontainer.BuildContainerID("docker", "123")
	kubenet.podIPs[existingContainerID] = "10.0.0.1"

	if err := kubenet.TearDownPod("namespace", "name", existingContainerID); err != nil {
		t.Fatalf("Unexpected error in TearDownPod: %v", err)
	}
	assert.Equal(t, []string{"10.0.0.1/32"}, fshaper.ResetCIDRs, "shaper.Reset should have been called")

	mockcni.AssertExpectations(t)
}
Пример #2
0
// TestInvocationWithoutRuntime invokes the plugin without a runtime.
// This is how kubenet is invoked from the cri.
func TestTearDownWithoutRuntime(t *testing.T) {
	fhost := nettest.NewFakeHost(nil)
	fhost.Legacy = false
	fhost.Runtime = nil
	mockcni := &mock_cni.MockCNI{}

	fexec := &exec.FakeExec{
		CommandScript: []exec.FakeCommandAction{},
		LookPathFunc: func(file string) (string, error) {
			return fmt.Sprintf("/fake-bin/%s", file), nil
		},
	}

	kubenet := newFakeKubenetPlugin(map[kubecontainer.ContainerID]string{}, fexec, fhost)
	kubenet.cniConfig = mockcni
	kubenet.iptables = ipttest.NewFake()

	details := make(map[string]interface{})
	details[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR] = "10.0.0.1/24"
	kubenet.Event(network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE, details)

	existingContainerID := kubecontainer.BuildContainerID("docker", "123")
	kubenet.podIPs[existingContainerID] = "10.0.0.1"

	mockcni.On("DelNetwork", mock.AnythingOfType("*libcni.NetworkConfig"), mock.AnythingOfType("*libcni.RuntimeConf")).Return(nil)

	if err := kubenet.TearDownPod("namespace", "name", existingContainerID); err != nil {
		t.Fatalf("Unexpected error in TearDownPod: %v", err)
	}
	// Assert that the CNI DelNetwork made it through and we didn't crash
	// without a runtime.
	mockcni.AssertExpectations(t)
}
Пример #3
0
// RunPodSandbox creates and starts a pod-level sandbox. Runtimes should ensure
// the sandbox is in ready state.
// For docker, PodSandbox is implemented by a container holding the network
// namespace for the pod.
// Note: docker doesn't use LogDirectory (yet).
func (ds *dockerService) RunPodSandbox(config *runtimeapi.PodSandboxConfig) (string, error) {
	// Step 1: Pull the image for the sandbox.
	image := defaultSandboxImage
	podSandboxImage := ds.podSandboxImage
	if len(podSandboxImage) != 0 {
		image = podSandboxImage
	}

	// NOTE: To use a custom sandbox image in a private repository, users need to configure the nodes with credentials properly.
	// see: http://kubernetes.io/docs/user-guide/images/#configuring-nodes-to-authenticate-to-a-private-repository
	if err := ds.client.PullImage(image, dockertypes.AuthConfig{}, dockertypes.ImagePullOptions{}); err != nil {
		return "", fmt.Errorf("unable to pull image for the sandbox container: %v", err)
	}

	// Step 2: Create the sandbox container.
	createConfig, err := ds.makeSandboxDockerConfig(config, image)
	if err != nil {
		return "", fmt.Errorf("failed to make sandbox docker config for pod %q: %v", config.Metadata.GetName(), err)
	}
	createResp, err := ds.client.CreateContainer(*createConfig)
	recoverFromConflictIfNeeded(ds.client, err)

	if err != nil || createResp == nil {
		return "", fmt.Errorf("failed to create a sandbox for pod %q: %v", config.Metadata.GetName(), err)
	}

	// Step 3: Start the sandbox container.
	// Assume kubelet's garbage collector would remove the sandbox later, if
	// startContainer failed.
	err = ds.client.StartContainer(createResp.ID)
	if err != nil {
		return createResp.ID, fmt.Errorf("failed to start sandbox container for pod %q: %v", config.Metadata.GetName(), err)
	}
	if config.GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostNetwork() {
		return createResp.ID, nil
	}

	// Step 4: Setup networking for the sandbox.
	// All pod networking is setup by a CNI plugin discovered at startup time.
	// This plugin assigns the pod ip, sets up routes inside the sandbox,
	// creates interfaces etc. In theory, its jurisdiction ends with pod
	// sandbox networking, but it might insert iptables rules or open ports
	// on the host as well, to satisfy parts of the pod spec that aren't
	// recognized by the CNI standard yet.
	cID := kubecontainer.BuildContainerID(runtimeName, createResp.ID)
	err = ds.networkPlugin.SetUpPod(config.GetMetadata().GetNamespace(), config.GetMetadata().GetName(), cID)
	// TODO: Do we need to teardown on failure or can we rely on a StopPodSandbox call with the given ID?
	return createResp.ID, err
}
Пример #4
0
// StopPodSandbox stops the sandbox. If there are any running containers in the
// sandbox, they should be force terminated.
// TODO: This function blocks sandbox teardown on networking teardown. Is it
// better to cut our losses assuming an out of band GC routine will cleanup
// after us?
func (ds *dockerService) StopPodSandbox(podSandboxID string) error {
	status, err := ds.PodSandboxStatus(podSandboxID)
	if err != nil {
		return fmt.Errorf("Failed to get sandbox status: %v", err)
	}
	if !status.GetLinux().GetNamespaces().GetOptions().GetHostNetwork() {
		m := status.GetMetadata()
		cID := kubecontainer.BuildContainerID(runtimeName, podSandboxID)
		if err := ds.networkPlugin.TearDownPod(m.GetNamespace(), m.GetName(), cID); err != nil {
			// TODO: Figure out a way to retry this error. We can't
			// right now because the plugin throws errors when it doesn't find
			// eth0, which might not exist for various reasons (setup failed,
			// conf changed etc). In theory, it should teardown everything else
			// so there's no need to retry.
			glog.Errorf("Failed to teardown sandbox %v for pod %v/%v: %v", m.GetNamespace(), m.GetName(), podSandboxID, err)
		}
	}
	return ds.client.StopContainer(podSandboxID, defaultSandboxGracePeriod)
	// TODO: Stop all running containers in the sandbox.
}
Пример #5
0
// getIPFromPlugin interrogates the network plugin for an IP.
func (ds *dockerService) getIPFromPlugin(sandbox *dockertypes.ContainerJSON) (string, error) {
	metadata, err := parseSandboxName(sandbox.Name)
	if err != nil {
		return "", err
	}
	msg := fmt.Sprintf("Couldn't find network status for %s/%s through plugin", *metadata.Namespace, *metadata.Name)
	if sharesHostNetwork(sandbox) {
		return "", fmt.Errorf("%v: not responsible for host-network sandboxes", msg)
	}
	cID := kubecontainer.BuildContainerID(runtimeName, sandbox.ID)
	networkStatus, err := ds.networkPlugin.GetPodNetworkStatus(*metadata.Namespace, *metadata.Name, cID)
	if err != nil {
		// This might be a sandbox that somehow ended up without a default
		// interface (eth0). We can't distinguish this from a more serious
		// error, so callers should probably treat it as non-fatal.
		return "", fmt.Errorf("%v: %v", msg, err)
	}
	if networkStatus == nil {
		return "", fmt.Errorf("%v: invalid network status for", msg)
	}
	return networkStatus.IP.String(), nil
}
Пример #6
0
func TestGetPodStatus(t *testing.T) {
	fr := newFakeRktInterface()
	fs := newFakeSystemd()
	r := &Runtime{apisvc: fr, systemd: fs}

	tests := []struct {
		pods   []*rktapi.Pod
		result *kubecontainer.PodStatus
	}{
		// No pods.
		{
			nil,
			&kubecontainer.PodStatus{ID: "42", Name: "guestbook", Namespace: "default"},
		},
		// One pod.
		{
			[]*rktapi.Pod{
				makeRktPod(rktapi.PodState_POD_STATE_RUNNING,
					"uuid-4002", "42", "guestbook", "default",
					"10.10.10.42", "100000", "7",
					[]string{"app-1", "app-2"},
					[]string{"img-id-1", "img-id-2"},
					[]string{"img-name-1", "img-name-2"},
					[]string{"1001", "1002"},
					[]rktapi.AppState{rktapi.AppState_APP_STATE_RUNNING, rktapi.AppState_APP_STATE_EXITED},
					[]int32{0, 0},
				),
			},
			&kubecontainer.PodStatus{
				ID:        "42",
				Name:      "guestbook",
				Namespace: "default",
				IP:        "10.10.10.42",
				ContainerStatuses: []*kubecontainer.ContainerStatus{
					{
						ID:           kubecontainer.BuildContainerID("rkt", "uuid-4002:app-1"),
						Name:         "app-1",
						State:        kubecontainer.ContainerStateRunning,
						CreatedAt:    time.Unix(100000, 0),
						StartedAt:    time.Unix(100000, 0),
						Image:        "img-name-1",
						ImageID:      "rkt://img-id-1",
						Hash:         1001,
						RestartCount: 7,
					},
					{
						ID:           kubecontainer.BuildContainerID("rkt", "uuid-4002:app-2"),
						Name:         "app-2",
						State:        kubecontainer.ContainerStateExited,
						CreatedAt:    time.Unix(100000, 0),
						StartedAt:    time.Unix(100000, 0),
						Image:        "img-name-2",
						ImageID:      "rkt://img-id-2",
						Hash:         1002,
						RestartCount: 7,
						Reason:       "Completed",
					},
				},
			},
		},
		// Multiple pods.
		{
			[]*rktapi.Pod{
				makeRktPod(rktapi.PodState_POD_STATE_EXITED,
					"uuid-4002", "42", "guestbook", "default",
					"10.10.10.42", "90000", "7",
					[]string{"app-1", "app-2"},
					[]string{"img-id-1", "img-id-2"},
					[]string{"img-name-1", "img-name-2"},
					[]string{"1001", "1002"},
					[]rktapi.AppState{rktapi.AppState_APP_STATE_RUNNING, rktapi.AppState_APP_STATE_EXITED},
					[]int32{0, 0},
				),
				makeRktPod(rktapi.PodState_POD_STATE_RUNNING, // The latest pod is running.
					"uuid-4003", "42", "guestbook", "default",
					"10.10.10.42", "100000", "10",
					[]string{"app-1", "app-2"},
					[]string{"img-id-1", "img-id-2"},
					[]string{"img-name-1", "img-name-2"},
					[]string{"1001", "1002"},
					[]rktapi.AppState{rktapi.AppState_APP_STATE_RUNNING, rktapi.AppState_APP_STATE_EXITED},
					[]int32{0, 1},
				),
			},
			&kubecontainer.PodStatus{
				ID:        "42",
				Name:      "guestbook",
				Namespace: "default",
				IP:        "10.10.10.42",
				// Result should contain all containers.
				ContainerStatuses: []*kubecontainer.ContainerStatus{
					{
						ID:           kubecontainer.BuildContainerID("rkt", "uuid-4002:app-1"),
						Name:         "app-1",
						State:        kubecontainer.ContainerStateRunning,
						CreatedAt:    time.Unix(90000, 0),
						StartedAt:    time.Unix(90000, 0),
						Image:        "img-name-1",
						ImageID:      "rkt://img-id-1",
						Hash:         1001,
						RestartCount: 7,
					},
					{
						ID:           kubecontainer.BuildContainerID("rkt", "uuid-4002:app-2"),
						Name:         "app-2",
						State:        kubecontainer.ContainerStateExited,
						CreatedAt:    time.Unix(90000, 0),
						StartedAt:    time.Unix(90000, 0),
						Image:        "img-name-2",
						ImageID:      "rkt://img-id-2",
						Hash:         1002,
						RestartCount: 7,
						Reason:       "Completed",
					},
					{
						ID:           kubecontainer.BuildContainerID("rkt", "uuid-4003:app-1"),
						Name:         "app-1",
						State:        kubecontainer.ContainerStateRunning,
						CreatedAt:    time.Unix(100000, 0),
						StartedAt:    time.Unix(100000, 0),
						Image:        "img-name-1",
						ImageID:      "rkt://img-id-1",
						Hash:         1001,
						RestartCount: 10,
					},
					{
						ID:           kubecontainer.BuildContainerID("rkt", "uuid-4003:app-2"),
						Name:         "app-2",
						State:        kubecontainer.ContainerStateExited,
						CreatedAt:    time.Unix(100000, 0),
						StartedAt:    time.Unix(100000, 0),
						Image:        "img-name-2",
						ImageID:      "rkt://img-id-2",
						Hash:         1002,
						RestartCount: 10,
						ExitCode:     1,
						Reason:       "Error",
					},
				},
			},
		},
	}

	for i, tt := range tests {
		testCaseHint := fmt.Sprintf("test case #%d", i)
		fr.pods = tt.pods

		status, err := r.GetPodStatus("42", "guestbook", "default")
		if err != nil {
			t.Errorf("test case #%d: unexpected error: %v", i, err)
		}

		assert.Equal(t, tt.result, status, testCaseHint)
		assert.Equal(t, []string{"ListPods"}, fr.called, testCaseHint)
		fr.CleanCalls()
	}
}
Пример #7
0
func TestGetPods(t *testing.T) {
	fr := newFakeRktInterface()
	fs := newFakeSystemd()
	r := &Runtime{apisvc: fr, systemd: fs}

	tests := []struct {
		pods   []*rktapi.Pod
		result []*kubecontainer.Pod
	}{
		// No pods.
		{},
		// One pod.
		{
			[]*rktapi.Pod{
				makeRktPod(rktapi.PodState_POD_STATE_RUNNING,
					"uuid-4002", "42", "guestbook", "default",
					"10.10.10.42", "100000", "7",
					[]string{"app-1", "app-2"},
					[]string{"img-id-1", "img-id-2"},
					[]string{"img-name-1", "img-name-2"},
					[]string{"1001", "1002"},
					[]rktapi.AppState{rktapi.AppState_APP_STATE_RUNNING, rktapi.AppState_APP_STATE_EXITED},
					[]int32{0, 0},
				),
			},
			[]*kubecontainer.Pod{
				{
					ID:        "42",
					Name:      "guestbook",
					Namespace: "default",
					Containers: []*kubecontainer.Container{
						{
							ID:      kubecontainer.BuildContainerID("rkt", "uuid-4002:app-1"),
							Name:    "app-1",
							Image:   "img-name-1",
							Hash:    1001,
							Created: 100000,
							State:   "running",
						},
						{
							ID:      kubecontainer.BuildContainerID("rkt", "uuid-4002:app-2"),
							Name:    "app-2",
							Image:   "img-name-2",
							Hash:    1002,
							Created: 100000,
							State:   "exited",
						},
					},
				},
			},
		},
		// Multiple pods.
		{
			[]*rktapi.Pod{
				makeRktPod(rktapi.PodState_POD_STATE_RUNNING,
					"uuid-4002", "42", "guestbook", "default",
					"10.10.10.42", "100000", "7",
					[]string{"app-1", "app-2"},
					[]string{"img-id-1", "img-id-2"},
					[]string{"img-name-1", "img-name-2"},
					[]string{"1001", "1002"},
					[]rktapi.AppState{rktapi.AppState_APP_STATE_RUNNING, rktapi.AppState_APP_STATE_EXITED},
					[]int32{0, 0},
				),
				makeRktPod(rktapi.PodState_POD_STATE_EXITED,
					"uuid-4003", "43", "guestbook", "default",
					"10.10.10.43", "90000", "7",
					[]string{"app-11", "app-22"},
					[]string{"img-id-11", "img-id-22"},
					[]string{"img-name-11", "img-name-22"},
					[]string{"10011", "10022"},
					[]rktapi.AppState{rktapi.AppState_APP_STATE_EXITED, rktapi.AppState_APP_STATE_EXITED},
					[]int32{0, 0},
				),
				makeRktPod(rktapi.PodState_POD_STATE_EXITED,
					"uuid-4004", "43", "guestbook", "default",
					"10.10.10.44", "100000", "8",
					[]string{"app-11", "app-22"},
					[]string{"img-id-11", "img-id-22"},
					[]string{"img-name-11", "img-name-22"},
					[]string{"10011", "10022"},
					[]rktapi.AppState{rktapi.AppState_APP_STATE_RUNNING, rktapi.AppState_APP_STATE_RUNNING},
					[]int32{0, 0},
				),
			},
			[]*kubecontainer.Pod{
				{
					ID:        "42",
					Name:      "guestbook",
					Namespace: "default",
					Containers: []*kubecontainer.Container{
						{
							ID:      kubecontainer.BuildContainerID("rkt", "uuid-4002:app-1"),
							Name:    "app-1",
							Image:   "img-name-1",
							Hash:    1001,
							Created: 100000,
							State:   "running",
						},
						{
							ID:      kubecontainer.BuildContainerID("rkt", "uuid-4002:app-2"),
							Name:    "app-2",
							Image:   "img-name-2",
							Hash:    1002,
							Created: 100000,
							State:   "exited",
						},
					},
				},
				{
					ID:        "43",
					Name:      "guestbook",
					Namespace: "default",
					Containers: []*kubecontainer.Container{
						{
							ID:      kubecontainer.BuildContainerID("rkt", "uuid-4003:app-11"),
							Name:    "app-11",
							Image:   "img-name-11",
							Hash:    10011,
							Created: 90000,
							State:   "exited",
						},
						{
							ID:      kubecontainer.BuildContainerID("rkt", "uuid-4003:app-22"),
							Name:    "app-22",
							Image:   "img-name-22",
							Hash:    10022,
							Created: 90000,
							State:   "exited",
						},
						{
							ID:      kubecontainer.BuildContainerID("rkt", "uuid-4004:app-11"),
							Name:    "app-11",
							Image:   "img-name-11",
							Hash:    10011,
							Created: 100000,
							State:   "running",
						},
						{
							ID:      kubecontainer.BuildContainerID("rkt", "uuid-4004:app-22"),
							Name:    "app-22",
							Image:   "img-name-22",
							Hash:    10022,
							Created: 100000,
							State:   "running",
						},
					},
				},
			},
		},
	}

	for i, tt := range tests {
		testCaseHint := fmt.Sprintf("test case #%d", i)
		fr.pods = tt.pods

		pods, err := r.GetPods(true)
		if err != nil {
			t.Errorf("test case #%d: unexpected error: %v", i, err)
		}

		assert.Equal(t, tt.result, pods, testCaseHint)
		assert.Equal(t, []string{"ListPods"}, fr.called, fmt.Sprintf("test case %d: unexpected called list", i))

		fr.CleanCalls()
	}
}
Пример #8
0
func TestGetPodStatus(t *testing.T) {
	fr := newFakeRktInterface()
	fs := newFakeSystemd()
	fos := &containertesting.FakeOS{}
	frh := &fakeRuntimeHelper{}
	r := &Runtime{
		apisvc:        fr,
		systemd:       fs,
		runtimeHelper: frh,
		os:            fos,
	}

	ns := func(seconds int64) int64 {
		return seconds * 1e9
	}

	tests := []struct {
		pods   []*rktapi.Pod
		result *kubecontainer.PodStatus
	}{
		// No pods.
		{
			nil,
			&kubecontainer.PodStatus{ID: "42", Name: "guestbook", Namespace: "default"},
		},
		// One pod.
		{
			[]*rktapi.Pod{
				makeRktPod(rktapi.PodState_POD_STATE_RUNNING,
					"uuid-4002", "42", "guestbook", "default",
					"10.10.10.42", ns(10), ns(20), "7",
					[]string{"app-1", "app-2"},
					[]string{"img-id-1", "img-id-2"},
					[]string{"img-name-1", "img-name-2"},
					[]string{"1001", "1002"},
					[]rktapi.AppState{rktapi.AppState_APP_STATE_RUNNING, rktapi.AppState_APP_STATE_EXITED},
					[]int32{0, 0},
				),
			},
			&kubecontainer.PodStatus{
				ID:        "42",
				Name:      "guestbook",
				Namespace: "default",
				IP:        "10.10.10.42",
				ContainerStatuses: []*kubecontainer.ContainerStatus{
					{
						ID:           kubecontainer.BuildContainerID("rkt", "uuid-4002:app-1"),
						Name:         "app-1",
						State:        kubecontainer.ContainerStateRunning,
						CreatedAt:    time.Unix(10, 0),
						StartedAt:    time.Unix(20, 0),
						FinishedAt:   time.Unix(0, 30),
						Image:        "img-name-1:latest",
						ImageID:      "rkt://img-id-1",
						Hash:         1001,
						RestartCount: 7,
					},
					{
						ID:           kubecontainer.BuildContainerID("rkt", "uuid-4002:app-2"),
						Name:         "app-2",
						State:        kubecontainer.ContainerStateExited,
						CreatedAt:    time.Unix(10, 0),
						StartedAt:    time.Unix(20, 0),
						FinishedAt:   time.Unix(0, 30),
						Image:        "img-name-2:latest",
						ImageID:      "rkt://img-id-2",
						Hash:         1002,
						RestartCount: 7,
						Reason:       "Completed",
					},
				},
			},
		},
		// Multiple pods.
		{
			[]*rktapi.Pod{
				makeRktPod(rktapi.PodState_POD_STATE_EXITED,
					"uuid-4002", "42", "guestbook", "default",
					"10.10.10.42", ns(10), ns(20), "7",
					[]string{"app-1", "app-2"},
					[]string{"img-id-1", "img-id-2"},
					[]string{"img-name-1", "img-name-2"},
					[]string{"1001", "1002"},
					[]rktapi.AppState{rktapi.AppState_APP_STATE_RUNNING, rktapi.AppState_APP_STATE_EXITED},
					[]int32{0, 0},
				),
				makeRktPod(rktapi.PodState_POD_STATE_RUNNING, // The latest pod is running.
					"uuid-4003", "42", "guestbook", "default",
					"10.10.10.42", ns(10), ns(20), "10",
					[]string{"app-1", "app-2"},
					[]string{"img-id-1", "img-id-2"},
					[]string{"img-name-1", "img-name-2"},
					[]string{"1001", "1002"},
					[]rktapi.AppState{rktapi.AppState_APP_STATE_RUNNING, rktapi.AppState_APP_STATE_EXITED},
					[]int32{0, 1},
				),
			},
			&kubecontainer.PodStatus{
				ID:        "42",
				Name:      "guestbook",
				Namespace: "default",
				IP:        "10.10.10.42",
				// Result should contain all containers.
				ContainerStatuses: []*kubecontainer.ContainerStatus{
					{
						ID:           kubecontainer.BuildContainerID("rkt", "uuid-4002:app-1"),
						Name:         "app-1",
						State:        kubecontainer.ContainerStateRunning,
						CreatedAt:    time.Unix(10, 0),
						StartedAt:    time.Unix(20, 0),
						FinishedAt:   time.Unix(0, 30),
						Image:        "img-name-1:latest",
						ImageID:      "rkt://img-id-1",
						Hash:         1001,
						RestartCount: 7,
					},
					{
						ID:           kubecontainer.BuildContainerID("rkt", "uuid-4002:app-2"),
						Name:         "app-2",
						State:        kubecontainer.ContainerStateExited,
						CreatedAt:    time.Unix(10, 0),
						StartedAt:    time.Unix(20, 0),
						FinishedAt:   time.Unix(0, 30),
						Image:        "img-name-2:latest",
						ImageID:      "rkt://img-id-2",
						Hash:         1002,
						RestartCount: 7,
						Reason:       "Completed",
					},
					{
						ID:           kubecontainer.BuildContainerID("rkt", "uuid-4003:app-1"),
						Name:         "app-1",
						State:        kubecontainer.ContainerStateRunning,
						CreatedAt:    time.Unix(10, 0),
						StartedAt:    time.Unix(20, 0),
						FinishedAt:   time.Unix(0, 30),
						Image:        "img-name-1:latest",
						ImageID:      "rkt://img-id-1",
						Hash:         1001,
						RestartCount: 10,
					},
					{
						ID:           kubecontainer.BuildContainerID("rkt", "uuid-4003:app-2"),
						Name:         "app-2",
						State:        kubecontainer.ContainerStateExited,
						CreatedAt:    time.Unix(10, 0),
						StartedAt:    time.Unix(20, 0),
						FinishedAt:   time.Unix(0, 30),
						Image:        "img-name-2:latest",
						ImageID:      "rkt://img-id-2",
						Hash:         1002,
						RestartCount: 10,
						ExitCode:     1,
						Reason:       "Error",
					},
				},
			},
		},
	}

	ctrl := gomock.NewController(t)
	defer ctrl.Finish()

	for i, tt := range tests {
		testCaseHint := fmt.Sprintf("test case #%d", i)
		fr.pods = tt.pods

		podTimes := map[string]time.Time{}
		for _, pod := range tt.pods {
			podTimes[podFinishedMarkerPath(r.runtimeHelper.GetPodDir(tt.result.ID), pod.Id)] = tt.result.ContainerStatuses[0].FinishedAt
		}

		r.os.(*containertesting.FakeOS).StatFn = func(name string) (os.FileInfo, error) {
			podTime, ok := podTimes[name]
			if !ok {
				t.Errorf("osStat called with %v, but only knew about %#v", name, podTimes)
			}
			mockFI := mock_os.NewMockFileInfo(ctrl)
			mockFI.EXPECT().ModTime().Return(podTime)
			return mockFI, nil
		}

		status, err := r.GetPodStatus("42", "guestbook", "default")
		if err != nil {
			t.Errorf("test case #%d: unexpected error: %v", i, err)
		}

		assert.Equal(t, tt.result, status, testCaseHint)
		assert.Equal(t, []string{"ListPods"}, fr.called, testCaseHint)
		fr.CleanCalls()
	}
}
Пример #9
0
func TestLifeCycleHooks(t *testing.T) {
	runner := lifecycle.NewFakeHandlerRunner()
	fr := newFakeRktInterface()
	fs := newFakeSystemd()

	rkt := &Runtime{
		runner:              runner,
		apisvc:              fr,
		systemd:             fs,
		containerRefManager: kubecontainer.NewRefManager(),
	}

	tests := []struct {
		pod           *api.Pod
		runtimePod    *kubecontainer.Pod
		postStartRuns []string
		preStopRuns   []string
		err           error
	}{
		{
			// Case 0, container without any hooks.
			&api.Pod{
				ObjectMeta: api.ObjectMeta{
					Name:      "pod-1",
					Namespace: "ns-1",
					UID:       "uid-1",
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{Name: "container-name-1"},
					},
				},
			},
			&kubecontainer.Pod{
				Containers: []*kubecontainer.Container{
					{ID: kubecontainer.BuildContainerID("rkt", "id-1")},
				},
			},
			[]string{},
			[]string{},
			nil,
		},
		{
			// Case 1, containers with post-start and pre-stop hooks.
			&api.Pod{
				ObjectMeta: api.ObjectMeta{
					Name:      "pod-1",
					Namespace: "ns-1",
					UID:       "uid-1",
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{
							Name: "container-name-1",
							Lifecycle: &api.Lifecycle{
								PostStart: &api.Handler{
									Exec: &api.ExecAction{},
								},
							},
						},
						{
							Name: "container-name-2",
							Lifecycle: &api.Lifecycle{
								PostStart: &api.Handler{
									HTTPGet: &api.HTTPGetAction{},
								},
							},
						},
						{
							Name: "container-name-3",
							Lifecycle: &api.Lifecycle{
								PreStop: &api.Handler{
									Exec: &api.ExecAction{},
								},
							},
						},
						{
							Name: "container-name-4",
							Lifecycle: &api.Lifecycle{
								PreStop: &api.Handler{
									HTTPGet: &api.HTTPGetAction{},
								},
							},
						},
					},
				},
			},
			&kubecontainer.Pod{
				Containers: []*kubecontainer.Container{
					{
						ID:   kubecontainer.ParseContainerID("rkt://uuid:container-name-4"),
						Name: "container-name-4",
					},
					{
						ID:   kubecontainer.ParseContainerID("rkt://uuid:container-name-3"),
						Name: "container-name-3",
					},
					{
						ID:   kubecontainer.ParseContainerID("rkt://uuid:container-name-2"),
						Name: "container-name-2",
					},
					{
						ID:   kubecontainer.ParseContainerID("rkt://uuid:container-name-1"),
						Name: "container-name-1",
					},
				},
			},
			[]string{
				"exec on pod: pod-1_ns-1(uid-1), container: container-name-1: rkt://uuid:container-name-1",
				"http-get on pod: pod-1_ns-1(uid-1), container: container-name-2: rkt://uuid:container-name-2",
			},
			[]string{
				"exec on pod: pod-1_ns-1(uid-1), container: container-name-3: rkt://uuid:container-name-3",
				"http-get on pod: pod-1_ns-1(uid-1), container: container-name-4: rkt://uuid:container-name-4",
			},
			nil,
		},
		{
			// Case 2, one container with invalid hooks.
			&api.Pod{
				ObjectMeta: api.ObjectMeta{
					Name:      "pod-1",
					Namespace: "ns-1",
					UID:       "uid-1",
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{
							Name: "container-name-1",
							Lifecycle: &api.Lifecycle{
								PostStart: &api.Handler{},
								PreStop:   &api.Handler{},
							},
						},
					},
				},
			},
			&kubecontainer.Pod{
				Containers: []*kubecontainer.Container{
					{
						ID:   kubecontainer.ParseContainerID("rkt://uuid:container-name-1"),
						Name: "container-name-1",
					},
				},
			},
			[]string{},
			[]string{},
			errors.NewAggregate([]error{fmt.Errorf("Invalid handler: %v", &api.Handler{})}),
		},
	}

	for i, tt := range tests {
		testCaseHint := fmt.Sprintf("test case #%d", i)

		pod := &rktapi.Pod{Id: "uuid"}
		for _, c := range tt.runtimePod.Containers {
			pod.Apps = append(pod.Apps, &rktapi.App{
				Name:  c.Name,
				State: rktapi.AppState_APP_STATE_RUNNING,
			})
		}
		fr.pods = []*rktapi.Pod{pod}

		// Run post-start hooks
		err := rkt.runLifecycleHooks(tt.pod, tt.runtimePod, lifecyclePostStartHook)
		assert.Equal(t, tt.err, err, testCaseHint)

		sort.Sort(sortedStringList(tt.postStartRuns))
		sort.Sort(sortedStringList(runner.HandlerRuns))

		assert.Equal(t, tt.postStartRuns, runner.HandlerRuns, testCaseHint)

		runner.Reset()

		// Run pre-stop hooks.
		err = rkt.runLifecycleHooks(tt.pod, tt.runtimePod, lifecyclePreStopHook)
		assert.Equal(t, tt.err, err, testCaseHint)

		sort.Sort(sortedStringList(tt.preStopRuns))
		sort.Sort(sortedStringList(runner.HandlerRuns))

		assert.Equal(t, tt.preStopRuns, runner.HandlerRuns, testCaseHint)

		runner.Reset()
	}
}
Пример #10
0
func TestPreStopHooks(t *testing.T) {
	runner := lifecycle.NewFakeHandlerRunner()
	fr := newFakeRktInterface()
	fs := newFakeSystemd()

	rkt := &Runtime{
		runner:              runner,
		apisvc:              fr,
		systemd:             fs,
		containerRefManager: kubecontainer.NewRefManager(),
	}

	tests := []struct {
		pod         *api.Pod
		runtimePod  *kubecontainer.Pod
		preStopRuns []string
		err         error
	}{
		{
			// Case 0, container without any hooks.
			&api.Pod{
				ObjectMeta: api.ObjectMeta{
					Name:      "pod-1",
					Namespace: "ns-1",
					UID:       "uid-1",
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{Name: "container-name-1"},
					},
				},
			},
			&kubecontainer.Pod{
				Containers: []*kubecontainer.Container{
					{ID: kubecontainer.BuildContainerID("rkt", "id-1")},
				},
			},
			[]string{},
			nil,
		},
		{
			// Case 1, containers with pre-stop hook.
			&api.Pod{
				ObjectMeta: api.ObjectMeta{
					Name:      "pod-1",
					Namespace: "ns-1",
					UID:       "uid-1",
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{
							Name: "container-name-1",
							Lifecycle: &api.Lifecycle{
								PreStop: &api.Handler{
									Exec: &api.ExecAction{},
								},
							},
						},
						{
							Name: "container-name-2",
							Lifecycle: &api.Lifecycle{
								PreStop: &api.Handler{
									HTTPGet: &api.HTTPGetAction{},
								},
							},
						},
					},
				},
			},
			&kubecontainer.Pod{
				Containers: []*kubecontainer.Container{
					{ID: kubecontainer.BuildContainerID("rkt", "id-1")},
					{ID: kubecontainer.BuildContainerID("rkt", "id-2")},
				},
			},
			[]string{
				"exec on pod: pod-1_ns-1(uid-1), container: container-name-1: rkt://id-1",
				"http-get on pod: pod-1_ns-1(uid-1), container: container-name-2: rkt://id-2",
			},
			nil,
		},
		{
			// Case 2, one container with invalid hooks.
			&api.Pod{
				ObjectMeta: api.ObjectMeta{
					Name:      "pod-1",
					Namespace: "ns-1",
					UID:       "uid-1",
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{
							Name: "container-name-1",
							Lifecycle: &api.Lifecycle{
								PreStop: &api.Handler{},
							},
						},
					},
				},
			},
			&kubecontainer.Pod{
				Containers: []*kubecontainer.Container{
					{ID: kubecontainer.BuildContainerID("rkt", "id-1")},
				},
			},
			[]string{},
			errors.NewAggregate([]error{fmt.Errorf("Invalid handler: %v", &api.Handler{})}),
		},
	}

	for i, tt := range tests {
		testCaseHint := fmt.Sprintf("test case #%d", i)

		// Run pre-stop hooks.
		err := rkt.runPreStopHook(tt.pod, tt.runtimePod)
		assert.Equal(t, tt.err, err, testCaseHint)

		sort.Sort(sortedStringList(tt.preStopRuns))
		sort.Sort(sortedStringList(runner.HandlerRuns))

		assert.Equal(t, tt.preStopRuns, runner.HandlerRuns, testCaseHint)

		runner.Reset()
	}
}