Пример #1
0
func (l *LogInterface) logPodInfo(kubeClient *kclientset.Clientset) {
	pods, _, err := GetLocalAndNonLocalDiagnosticPods(kubeClient)
	if err != nil {
		l.Result.Error("DLogNet1003", err, err.Error())
		return
	}

	for _, pod := range pods {
		if len(pod.Status.ContainerStatuses) == 0 {
			continue
		}

		containerID := kcontainer.ParseContainerID(pod.Status.ContainerStatuses[0].ContainerID).ID
		out, err := exec.Command("docker", []string{"inspect", "-f", "'{{.State.Pid}}'", containerID}...).Output()
		if err != nil {
			l.Result.Error("DLogNet1004", err, fmt.Sprintf("Fetching pid for container %q failed: %s", containerID, err))
			continue
		}
		pid := strings.Trim(string(out[:]), "\n")

		p := LogInterface{
			Result: l.Result,
			Logdir: filepath.Join(l.Logdir, NetworkDiagPodLogDirPrefix, pod.Name),
		}
		p.Run(fmt.Sprintf("nsenter -n -t %s -- ip addr show", pid), "addresses")
		p.Run(fmt.Sprintf("nsenter -n -t %s -- ip route show", pid), "routes")
	}
}
Пример #2
0
func (d CheckServiceNetwork) checkPodToServiceConnection(fromPod *kapi.Pod, toService *kapi.Service) {
	if len(fromPod.Status.ContainerStatuses) == 0 {
		d.res.Error("DSvcNet1008", nil, fmt.Sprintf("ContainerID not found for pod %q", util.PrintPod(fromPod)))
		return
	}

	success := util.ExpectedConnectionStatus(fromPod.Namespace, toService.Namespace, d.vnidMap)

	kexecer := kexec.New()
	containerID := kcontainer.ParseContainerID(fromPod.Status.ContainerStatuses[0].ContainerID).ID
	pid, err := kexecer.Command("docker", "inspect", "-f", "{{.State.Pid}}", containerID).CombinedOutput()
	if err != nil {
		d.res.Error("DSvcNet1009", err, fmt.Sprintf("Fetching pid for pod %q failed. Error: %s", util.PrintPod(fromPod), err))
		return
	}

	// In bash, redirecting to /dev/tcp/HOST/PORT or /dev/udp/HOST/PORT opens a connection
	// to that HOST:PORT. Use this to test connectivity to the service; we can't use ping
	// like in the pod connectivity check because only connections to the correct port
	// get redirected by the iptables rules.
	srvConCmd := fmt.Sprintf("echo -n '' > /dev/%s/%s/%d", strings.ToLower(string(toService.Spec.Ports[0].Protocol)), toService.Spec.ClusterIP, toService.Spec.Ports[0].Port)
	out, err := kexecer.Command("nsenter", "-n", "-t", strings.Trim(fmt.Sprintf("%s", pid), "\n"), "--", "timeout", "1", "bash", "-c", srvConCmd).CombinedOutput()
	if success && err != nil {
		d.res.Error("DSvcNet1010", err, fmt.Sprintf("Connectivity from pod %q to service %q failed. Error: %s, Out: %s", util.PrintPod(fromPod), printService(toService), err, string(out)))
	} else if !success && err == nil {
		msg := fmt.Sprintf("Unexpected connectivity from pod %q to service %q.", util.PrintPod(fromPod), printService(toService))
		d.res.Error("DSvcNet1011", fmt.Errorf("%s", msg), msg)
	}
}
Пример #3
0
// checkPodToPodConnection verifies connection from fromPod to toPod.
// Connection check from toPod to fromPod will be done by the node of toPod.
func (d CheckPodNetwork) checkPodToPodConnection(fromPod, toPod *kapi.Pod) {
	if len(fromPod.Status.ContainerStatuses) == 0 {
		err := fmt.Errorf("ContainerID not found for pod %q", util.PrintPod(fromPod))
		d.res.Error("DPodNet1006", err, err.Error())
		return
	}

	success := util.ExpectedConnectionStatus(fromPod.Namespace, toPod.Namespace, d.vnidMap)

	kexecer := kexec.New()
	containerID := kcontainer.ParseContainerID(fromPod.Status.ContainerStatuses[0].ContainerID).ID
	pid, err := kexecer.Command("docker", "inspect", "-f", "{{.State.Pid}}", containerID).CombinedOutput()
	if err != nil {
		d.res.Error("DPodNet1007", err, fmt.Sprintf("Fetching pid for pod %q, container %q failed. Error: %s", util.PrintPod(fromPod), containerID, err))
		return
	}

	out, err := kexecer.Command("nsenter", "-n", "-t", strings.Trim(fmt.Sprintf("%s", pid), "\n"), "--", "ping", "-c1", "-W2", toPod.Status.PodIP).CombinedOutput()
	if success && err != nil {
		d.res.Error("DPodNet1008", err, fmt.Sprintf("Connectivity from pod %q to pod %q failed. Error: %s, Out: %s", util.PrintPod(fromPod), util.PrintPod(toPod), err, string(out)))
	} else if !success && err == nil {
		msg := fmt.Sprintf("Unexpected connectivity from pod %q to pod %q.", util.PrintPod(fromPod), util.PrintPod(toPod))
		d.res.Error("DPodNet1009", fmt.Errorf("%s", msg), msg)
	}
}
Пример #4
0
// doProbe probes the container once and records the result.
// Returns whether the worker should continue.
func (w *worker) doProbe() (keepGoing bool) {
	defer util.HandleCrash(func(_ interface{}) { keepGoing = true })

	status, ok := w.probeManager.statusManager.GetPodStatus(w.pod.UID)
	if !ok {
		// Either the pod has not been created yet, or it was already deleted.
		glog.V(3).Infof("No status for pod: %v", kubeletutil.FormatPodName(w.pod))
		return true
	}

	// Worker should terminate if pod is terminated.
	if status.Phase == api.PodFailed || status.Phase == api.PodSucceeded {
		glog.V(3).Infof("Pod %v %v, exiting probe worker",
			kubeletutil.FormatPodName(w.pod), status.Phase)
		return false
	}

	c, ok := api.GetContainerStatus(status.ContainerStatuses, w.container.Name)
	if !ok {
		// Either the container has not been created yet, or it was deleted.
		glog.V(3).Infof("Non-existant container probed: %v - %v",
			kubeletutil.FormatPodName(w.pod), w.container.Name)
		return true // Wait for more information.
	}

	if w.containerID.String() != c.ContainerID {
		if !w.containerID.IsEmpty() {
			w.resultsManager.Remove(w.containerID)
		}
		w.containerID = kubecontainer.ParseContainerID(c.ContainerID)
	}

	if c.State.Running == nil {
		glog.V(3).Infof("Non-running container probed: %v - %v",
			kubeletutil.FormatPodName(w.pod), w.container.Name)
		if !w.containerID.IsEmpty() {
			w.resultsManager.Set(w.containerID, results.Failure, w.pod)
		}
		// Abort if the container will not be restarted.
		return c.State.Terminated == nil ||
			w.pod.Spec.RestartPolicy != api.RestartPolicyNever
	}

	if int64(time.Since(c.State.Running.StartedAt.Time).Seconds()) < w.spec.InitialDelaySeconds {
		w.resultsManager.Set(w.containerID, w.initialValue, w.pod)
		return true
	}

	// TODO: Move error handling out of prober.
	result, _ := w.probeManager.prober.probe(w.probeType, w.pod, status, w.container, w.containerID)
	if result != probe.Unknown {
		w.resultsManager.Set(w.containerID, result != probe.Failure, w.pod)
	}

	return true
}
Пример #5
0
func (m *manager) UpdatePodStatus(podUID types.UID, podStatus *api.PodStatus) {
	for i, c := range podStatus.ContainerStatuses {
		var ready bool
		if c.State.Running == nil {
			ready = false
		} else if result, ok := m.readinessManager.Get(kubecontainer.ParseContainerID(c.ContainerID)); ok {
			ready = result == results.Success
		} else {
			// The check whether there is a probe which hasn't run yet.
			_, exists := m.getWorker(podUID, c.Name, readiness)
			ready = !exists
		}
		podStatus.ContainerStatuses[i].Ready = ready
	}
}
Пример #6
0
// validateContainerLogStatus returns the container ID for the desired container to retrieve logs for, based on the state
// of the container. The previous flag will only return the logs for the last terminated container, otherwise, the current
// running container is preferred over a previous termination. If info about the container is not available then a specific
// error is returned to the end user.
func (kl *Kubelet) validateContainerLogStatus(podName string, podStatus *api.PodStatus, containerName string, previous bool) (containerID kubecontainer.ContainerID, err error) {
	var cID string

	cStatus, found := api.GetContainerStatus(podStatus.ContainerStatuses, containerName)
	// if not found, check the init containers
	if !found {
		cStatus, found = api.GetContainerStatus(podStatus.InitContainerStatuses, containerName)
	}
	if !found {
		return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is not available", containerName, podName)
	}
	lastState := cStatus.LastTerminationState
	waiting, running, terminated := cStatus.State.Waiting, cStatus.State.Running, cStatus.State.Terminated

	switch {
	case previous:
		if lastState.Terminated == nil {
			return kubecontainer.ContainerID{}, fmt.Errorf("previous terminated container %q in pod %q not found", containerName, podName)
		}
		cID = lastState.Terminated.ContainerID

	case running != nil:
		cID = cStatus.ContainerID

	case terminated != nil:
		cID = terminated.ContainerID

	case lastState.Terminated != nil:
		cID = lastState.Terminated.ContainerID

	case waiting != nil:
		// output some info for the most common pending failures
		switch reason := waiting.Reason; reason {
		case images.ErrImagePull.Error():
			return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start: image can't be pulled", containerName, podName)
		case images.ErrImagePullBackOff.Error():
			return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start: trying and failing to pull image", containerName, podName)
		default:
			return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start: %v", containerName, podName, reason)
		}
	default:
		// unrecognized state
		return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start - no logs yet", containerName, podName)
	}

	return kubecontainer.ParseContainerID(cID), nil
}
Пример #7
0
func checkNodeConnection(pod *kapi.Pod, nodeIP string, r types.DiagnosticResult) {
	if len(pod.Status.ContainerStatuses) == 0 {
		err := fmt.Errorf("ContainerID not found for pod %q", util.PrintPod(pod))
		r.Error("DNodeNet1003", err, err.Error())
		return
	}

	kexecer := kexec.New()
	containerID := kcontainer.ParseContainerID(pod.Status.ContainerStatuses[0].ContainerID).ID
	pid, err := kexecer.Command("docker", "inspect", "-f", "{{.State.Pid}}", containerID).CombinedOutput()
	if err != nil {
		r.Error("DNodeNet1004", err, fmt.Sprintf("Fetching pid for pod %q, container %q failed. Error: %s", util.PrintPod(pod), containerID, err))
		return
	}

	if _, err := kexecer.Command("nsenter", "-n", "-t", strings.Trim(fmt.Sprintf("%s", pid), "\n"), "--", "ping", "-c1", "-W2", nodeIP).CombinedOutput(); err != nil {
		r.Error("DNodeNet1005", err, fmt.Sprintf("Connectivity from pod %q to node %q failed. Error: %s", util.PrintPod(pod), nodeIP, err))
	}
}
Пример #8
0
func (m *manager) UpdatePodStatus(podUID types.UID, podStatus *v1.PodStatus) {
	for i, c := range podStatus.ContainerStatuses {
		var ready bool
		if c.State.Running == nil {
			ready = false
		} else if result, ok := m.readinessManager.Get(kubecontainer.ParseContainerID(c.ContainerID)); ok {
			ready = result == results.Success
		} else {
			// The check whether there is a probe which hasn't run yet.
			_, exists := m.getWorker(podUID, c.Name, readiness)
			ready = !exists
		}
		podStatus.ContainerStatuses[i].Ready = ready
	}
	// init containers are ready if they have exited with success or if a readiness probe has
	// succeeded.
	for i, c := range podStatus.InitContainerStatuses {
		var ready bool
		if c.State.Terminated != nil && c.State.Terminated.ExitCode == 0 {
			ready = true
		}
		podStatus.InitContainerStatuses[i].Ready = ready
	}
}
Пример #9
0
// doProbe probes the container once and records the result.
// Returns whether the worker should continue.
func (w *worker) doProbe() (keepGoing bool) {
	defer runtime.HandleCrash(func(_ interface{}) { keepGoing = true })

	status, ok := w.probeManager.statusManager.GetPodStatus(w.pod.UID)
	if !ok {
		// Either the pod has not been created yet, or it was already deleted.
		glog.V(3).Infof("No status for pod: %v", format.Pod(w.pod))
		return true
	}

	// Worker should terminate if pod is terminated.
	if status.Phase == api.PodFailed || status.Phase == api.PodSucceeded {
		glog.V(3).Infof("Pod %v %v, exiting probe worker",
			format.Pod(w.pod), status.Phase)
		return false
	}

	c, ok := api.GetContainerStatus(status.ContainerStatuses, w.container.Name)
	if !ok || len(c.ContainerID) == 0 {
		// Either the container has not been created yet, or it was deleted.
		glog.V(3).Infof("Probe target container not found: %v - %v",
			format.Pod(w.pod), w.container.Name)
		return true // Wait for more information.
	}

	if w.containerID.String() != c.ContainerID {
		if !w.containerID.IsEmpty() {
			w.resultsManager.Remove(w.containerID)
		}
		w.containerID = kubecontainer.ParseContainerID(c.ContainerID)
		w.resultsManager.Set(w.containerID, w.initialValue, w.pod)
	}

	if c.State.Running == nil {
		glog.V(3).Infof("Non-running container probed: %v - %v",
			format.Pod(w.pod), w.container.Name)
		if !w.containerID.IsEmpty() {
			w.resultsManager.Set(w.containerID, results.Failure, w.pod)
		}
		// Abort if the container will not be restarted.
		return c.State.Terminated == nil ||
			w.pod.Spec.RestartPolicy != api.RestartPolicyNever
	}

	if int(time.Since(c.State.Running.StartedAt.Time).Seconds()) < w.spec.InitialDelaySeconds {
		return true
	}

	result, err := w.probeManager.prober.probe(w.probeType, w.pod, status, w.container, w.containerID)
	if err != nil {
		// Prober error, throw away the result.
		return true
	}

	if w.lastResult == result {
		w.resultRun++
	} else {
		w.lastResult = result
		w.resultRun = 1
	}

	if (result == results.Failure && w.resultRun < w.spec.FailureThreshold) ||
		(result == results.Success && w.resultRun < w.spec.SuccessThreshold) {
		// Success or failure is below threshold - leave the probe state unchanged.
		return true
	}

	w.resultsManager.Set(w.containerID, result, w.pod)

	return true
}
Пример #10
0
// doProbe probes the container once and records the result.
// Returns whether the worker should continue.
func (w *worker) doProbe() (keepGoing bool) {
	defer func() { recover() }() // Actually eat panics (HandleCrash takes care of logging)
	defer runtime.HandleCrash(func(_ interface{}) { keepGoing = true })

	status, ok := w.probeManager.statusManager.GetPodStatus(w.pod.UID)
	if !ok {
		// Either the pod has not been created yet, or it was already deleted.
		glog.V(3).Infof("No status for pod: %v", format.Pod(w.pod))
		return true
	}

	// Worker should terminate if pod is terminated.
	if status.Phase == v1.PodFailed || status.Phase == v1.PodSucceeded {
		glog.V(3).Infof("Pod %v %v, exiting probe worker",
			format.Pod(w.pod), status.Phase)
		return false
	}

	c, ok := v1.GetContainerStatus(status.ContainerStatuses, w.container.Name)
	if !ok || len(c.ContainerID) == 0 {
		// Either the container has not been created yet, or it was deleted.
		glog.V(3).Infof("Probe target container not found: %v - %v",
			format.Pod(w.pod), w.container.Name)
		return true // Wait for more information.
	}

	if w.containerID.String() != c.ContainerID {
		if !w.containerID.IsEmpty() {
			w.resultsManager.Remove(w.containerID)
		}
		w.containerID = kubecontainer.ParseContainerID(c.ContainerID)
		w.resultsManager.Set(w.containerID, w.initialValue, w.pod)
		// We've got a new container; resume probing.
		w.onHold = false
	}

	if w.onHold {
		// Worker is on hold until there is a new container.
		return true
	}

	if c.State.Running == nil {
		glog.V(3).Infof("Non-running container probed: %v - %v",
			format.Pod(w.pod), w.container.Name)
		if !w.containerID.IsEmpty() {
			w.resultsManager.Set(w.containerID, results.Failure, w.pod)
		}
		// Abort if the container will not be restarted.
		return c.State.Terminated == nil ||
			w.pod.Spec.RestartPolicy != v1.RestartPolicyNever
	}

	if int32(time.Since(c.State.Running.StartedAt.Time).Seconds()) < w.spec.InitialDelaySeconds {
		return true
	}

	result, err := w.probeManager.prober.probe(w.probeType, w.pod, status, w.container, w.containerID)
	if err != nil {
		// Prober error, throw away the result.
		return true
	}

	if w.lastResult == result {
		w.resultRun++
	} else {
		w.lastResult = result
		w.resultRun = 1
	}

	if (result == results.Failure && w.resultRun < int(w.spec.FailureThreshold)) ||
		(result == results.Success && w.resultRun < int(w.spec.SuccessThreshold)) {
		// Success or failure is below threshold - leave the probe state unchanged.
		return true
	}

	w.resultsManager.Set(w.containerID, result, w.pod)

	if w.probeType == liveness && result == results.Failure {
		// The container fails a liveness check, it will need to be restared.
		// Stop probing until we see a new container ID. This is to reduce the
		// chance of hitting #21751, where running `docker exec` when a
		// container is being stopped may lead to corrupted container state.
		w.onHold = true
	}

	return true
}
Пример #11
0
func TestUpdatePodStatus(t *testing.T) {
	unprobed := api.ContainerStatus{
		Name:        "unprobed_container",
		ContainerID: "test://unprobed_container_id",
		State: api.ContainerState{
			Running: &api.ContainerStateRunning{},
		},
	}
	probedReady := api.ContainerStatus{
		Name:        "probed_container_ready",
		ContainerID: "test://probed_container_ready_id",
		State: api.ContainerState{
			Running: &api.ContainerStateRunning{},
		},
	}
	probedPending := api.ContainerStatus{
		Name:        "probed_container_pending",
		ContainerID: "test://probed_container_pending_id",
		State: api.ContainerState{
			Running: &api.ContainerStateRunning{},
		},
	}
	probedUnready := api.ContainerStatus{
		Name:        "probed_container_unready",
		ContainerID: "test://probed_container_unready_id",
		State: api.ContainerState{
			Running: &api.ContainerStateRunning{},
		},
	}
	terminated := api.ContainerStatus{
		Name:        "terminated_container",
		ContainerID: "test://terminated_container_id",
		State: api.ContainerState{
			Terminated: &api.ContainerStateTerminated{},
		},
	}
	podStatus := api.PodStatus{
		Phase: api.PodRunning,
		ContainerStatuses: []api.ContainerStatus{
			unprobed, probedReady, probedPending, probedUnready, terminated,
		},
	}

	m := newTestManager()
	// no cleanup: using fake workers.

	// Setup probe "workers" and cached results.
	m.workers = map[probeKey]*worker{
		probeKey{testPodUID, unprobed.Name, liveness}:       {},
		probeKey{testPodUID, probedReady.Name, readiness}:   {},
		probeKey{testPodUID, probedPending.Name, readiness}: {},
		probeKey{testPodUID, probedUnready.Name, readiness}: {},
		probeKey{testPodUID, terminated.Name, readiness}:    {},
	}
	m.readinessManager.Set(kubecontainer.ParseContainerID(probedReady.ContainerID), results.Success, &api.Pod{})
	m.readinessManager.Set(kubecontainer.ParseContainerID(probedUnready.ContainerID), results.Failure, &api.Pod{})
	m.readinessManager.Set(kubecontainer.ParseContainerID(terminated.ContainerID), results.Success, &api.Pod{})

	m.UpdatePodStatus(testPodUID, &podStatus)

	expectedReadiness := map[probeKey]bool{
		probeKey{testPodUID, unprobed.Name, readiness}:      true,
		probeKey{testPodUID, probedReady.Name, readiness}:   true,
		probeKey{testPodUID, probedPending.Name, readiness}: false,
		probeKey{testPodUID, probedUnready.Name, readiness}: false,
		probeKey{testPodUID, terminated.Name, readiness}:    false,
	}
	for _, c := range podStatus.ContainerStatuses {
		expected, ok := expectedReadiness[probeKey{testPodUID, c.Name, readiness}]
		if !ok {
			t.Fatalf("Missing expectation for test case: %v", c.Name)
		}
		if expected != c.Ready {
			t.Errorf("Unexpected readiness for container %v: Expected %v but got %v",
				c.Name, expected, c.Ready)
		}
	}
}
Пример #12
0
							{
								Image:   "gcr.io/google_containers/busybox:1.24",
								Name:    logContName,
								Command: []string{"sh", "-c", "echo " + logString},
							},
						},
					},
				}

				podClient.Create(logPod)
				err := framework.WaitForPodSuccessInNamespace(f.ClientSet, logPodName, ns)
				framework.ExpectNoError(err, "Failed waiting for pod: %s to enter success state", logPodName)

				// get containerID from created Pod
				createdLogPod, err := podClient.Get(logPodName, metav1.GetOptions{})
				logConID := kubecontainer.ParseContainerID(createdLogPod.Status.ContainerStatuses[0].ContainerID)
				framework.ExpectNoError(err, "Failed to get pod: %s", logPodName)

				expectedlogFile := logDir + "/" + logPodName + "_" + ns + "_" + logContName + "-" + logConID.ID + ".log"

				checkPod := &v1.Pod{
					ObjectMeta: metav1.ObjectMeta{
						Name: checkPodName,
					},
					Spec: v1.PodSpec{
						// this pod is expected to exit successfully
						RestartPolicy: v1.RestartPolicyNever,
						Containers: []v1.Container{
							{
								Image: "gcr.io/google_containers/busybox:1.24",
								Name:  checkContName,
Пример #13
0
func TestLifeCycleHooks(t *testing.T) {
	runner := lifecycle.NewFakeHandlerRunner()
	fr := newFakeRktInterface()
	fs := newFakeSystemd()

	rkt := &Runtime{
		runner:              runner,
		apisvc:              fr,
		systemd:             fs,
		containerRefManager: kubecontainer.NewRefManager(),
	}

	tests := []struct {
		pod           *api.Pod
		runtimePod    *kubecontainer.Pod
		postStartRuns []string
		preStopRuns   []string
		err           error
	}{
		{
			// Case 0, container without any hooks.
			&api.Pod{
				ObjectMeta: api.ObjectMeta{
					Name:      "pod-1",
					Namespace: "ns-1",
					UID:       "uid-1",
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{Name: "container-name-1"},
					},
				},
			},
			&kubecontainer.Pod{
				Containers: []*kubecontainer.Container{
					{ID: kubecontainer.BuildContainerID("rkt", "id-1")},
				},
			},
			[]string{},
			[]string{},
			nil,
		},
		{
			// Case 1, containers with post-start and pre-stop hooks.
			&api.Pod{
				ObjectMeta: api.ObjectMeta{
					Name:      "pod-1",
					Namespace: "ns-1",
					UID:       "uid-1",
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{
							Name: "container-name-1",
							Lifecycle: &api.Lifecycle{
								PostStart: &api.Handler{
									Exec: &api.ExecAction{},
								},
							},
						},
						{
							Name: "container-name-2",
							Lifecycle: &api.Lifecycle{
								PostStart: &api.Handler{
									HTTPGet: &api.HTTPGetAction{},
								},
							},
						},
						{
							Name: "container-name-3",
							Lifecycle: &api.Lifecycle{
								PreStop: &api.Handler{
									Exec: &api.ExecAction{},
								},
							},
						},
						{
							Name: "container-name-4",
							Lifecycle: &api.Lifecycle{
								PreStop: &api.Handler{
									HTTPGet: &api.HTTPGetAction{},
								},
							},
						},
					},
				},
			},
			&kubecontainer.Pod{
				Containers: []*kubecontainer.Container{
					{
						ID:   kubecontainer.ParseContainerID("rkt://uuid:container-name-4"),
						Name: "container-name-4",
					},
					{
						ID:   kubecontainer.ParseContainerID("rkt://uuid:container-name-3"),
						Name: "container-name-3",
					},
					{
						ID:   kubecontainer.ParseContainerID("rkt://uuid:container-name-2"),
						Name: "container-name-2",
					},
					{
						ID:   kubecontainer.ParseContainerID("rkt://uuid:container-name-1"),
						Name: "container-name-1",
					},
				},
			},
			[]string{
				"exec on pod: pod-1_ns-1(uid-1), container: container-name-1: rkt://uuid:container-name-1",
				"http-get on pod: pod-1_ns-1(uid-1), container: container-name-2: rkt://uuid:container-name-2",
			},
			[]string{
				"exec on pod: pod-1_ns-1(uid-1), container: container-name-3: rkt://uuid:container-name-3",
				"http-get on pod: pod-1_ns-1(uid-1), container: container-name-4: rkt://uuid:container-name-4",
			},
			nil,
		},
		{
			// Case 2, one container with invalid hooks.
			&api.Pod{
				ObjectMeta: api.ObjectMeta{
					Name:      "pod-1",
					Namespace: "ns-1",
					UID:       "uid-1",
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{
							Name: "container-name-1",
							Lifecycle: &api.Lifecycle{
								PostStart: &api.Handler{},
								PreStop:   &api.Handler{},
							},
						},
					},
				},
			},
			&kubecontainer.Pod{
				Containers: []*kubecontainer.Container{
					{
						ID:   kubecontainer.ParseContainerID("rkt://uuid:container-name-1"),
						Name: "container-name-1",
					},
				},
			},
			[]string{},
			[]string{},
			errors.NewAggregate([]error{fmt.Errorf("Invalid handler: %v", &api.Handler{})}),
		},
	}

	for i, tt := range tests {
		testCaseHint := fmt.Sprintf("test case #%d", i)

		pod := &rktapi.Pod{Id: "uuid"}
		for _, c := range tt.runtimePod.Containers {
			pod.Apps = append(pod.Apps, &rktapi.App{
				Name:  c.Name,
				State: rktapi.AppState_APP_STATE_RUNNING,
			})
		}
		fr.pods = []*rktapi.Pod{pod}

		// Run post-start hooks
		err := rkt.runLifecycleHooks(tt.pod, tt.runtimePod, lifecyclePostStartHook)
		assert.Equal(t, tt.err, err, testCaseHint)

		sort.Sort(sortedStringList(tt.postStartRuns))
		sort.Sort(sortedStringList(runner.HandlerRuns))

		assert.Equal(t, tt.postStartRuns, runner.HandlerRuns, testCaseHint)

		runner.Reset()

		// Run pre-stop hooks.
		err = rkt.runLifecycleHooks(tt.pod, tt.runtimePod, lifecyclePreStopHook)
		assert.Equal(t, tt.err, err, testCaseHint)

		sort.Sort(sortedStringList(tt.preStopRuns))
		sort.Sort(sortedStringList(runner.HandlerRuns))

		assert.Equal(t, tt.preStopRuns, runner.HandlerRuns, testCaseHint)

		runner.Reset()
	}
}
Пример #14
0
func TestUpdatePodStatus(t *testing.T) {
	const podUID = "pod_uid"
	unprobed := api.ContainerStatus{
		Name:        "unprobed_container",
		ContainerID: "test://unprobed_container_id",
		State: api.ContainerState{
			Running: &api.ContainerStateRunning{},
		},
	}
	probedReady := api.ContainerStatus{
		Name:        "probed_container_ready",
		ContainerID: "test://probed_container_ready_id",
		State: api.ContainerState{
			Running: &api.ContainerStateRunning{},
		},
	}
	probedPending := api.ContainerStatus{
		Name:        "probed_container_pending",
		ContainerID: "test://probed_container_pending_id",
		State: api.ContainerState{
			Running: &api.ContainerStateRunning{},
		},
	}
	probedUnready := api.ContainerStatus{
		Name:        "probed_container_unready",
		ContainerID: "test://probed_container_unready_id",
		State: api.ContainerState{
			Running: &api.ContainerStateRunning{},
		},
	}
	terminated := api.ContainerStatus{
		Name:        "terminated_container",
		ContainerID: "test://terminated_container_id",
		State: api.ContainerState{
			Terminated: &api.ContainerStateTerminated{},
		},
	}
	podStatus := api.PodStatus{
		Phase: api.PodRunning,
		ContainerStatuses: []api.ContainerStatus{
			unprobed, probedReady, probedPending, probedUnready, terminated,
		},
	}

	m := newTestManager()
	// Setup probe "workers" and cached results.
	m.readinessProbes = map[containerPath]*worker{
		containerPath{podUID, probedReady.Name}:   {},
		containerPath{podUID, probedPending.Name}: {},
		containerPath{podUID, probedUnready.Name}: {},
		containerPath{podUID, terminated.Name}:    {},
	}

	m.readinessCache.setReadiness(kubecontainer.ParseContainerID(probedReady.ContainerID), true)
	m.readinessCache.setReadiness(kubecontainer.ParseContainerID(probedUnready.ContainerID), false)
	m.readinessCache.setReadiness(kubecontainer.ParseContainerID(terminated.ContainerID), true)

	m.UpdatePodStatus(podUID, &podStatus)

	expectedReadiness := map[containerPath]bool{
		containerPath{podUID, unprobed.Name}:      true,
		containerPath{podUID, probedReady.Name}:   true,
		containerPath{podUID, probedPending.Name}: false,
		containerPath{podUID, probedUnready.Name}: false,
		containerPath{podUID, terminated.Name}:    false,
	}
	for _, c := range podStatus.ContainerStatuses {
		expected, ok := expectedReadiness[containerPath{podUID, c.Name}]
		if !ok {
			t.Fatalf("Missing expectation for test case: %v", c.Name)
		}
		if expected != c.Ready {
			t.Errorf("Unexpected readiness for container %v: Expected %v but got %v",
				c.Name, expected, c.Ready)
		}
	}
}
Пример #15
0
func getPodContainerID(pod *kapi.Pod) string {
	if len(pod.Status.ContainerStatuses) > 0 {
		return kcontainer.ParseContainerID(pod.Status.ContainerStatuses[0].ContainerID).ID
	}
	return ""
}