func getRestartDelay(podClient *framework.PodClient, podName string, containerName string) (time.Duration, error) { beginTime := time.Now() for time.Since(beginTime) < (2 * maxBackOffTolerance) { // may just miss the 1st MaxContainerBackOff delay time.Sleep(time.Second) pod, err := podClient.Get(podName, metav1.GetOptions{}) framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName)) status, ok := v1.GetContainerStatus(pod.Status.ContainerStatuses, containerName) if !ok { framework.Logf("getRestartDelay: status missing") continue } if status.State.Waiting == nil && status.State.Running != nil && status.LastTerminationState.Terminated != nil && status.State.Running.StartedAt.Time.After(beginTime) { startedAt := status.State.Running.StartedAt.Time finishedAt := status.LastTerminationState.Terminated.FinishedAt.Time framework.Logf("getRestartDelay: restartCount = %d, finishedAt=%s restartedAt=%s (%s)", status.RestartCount, finishedAt, startedAt, startedAt.Sub(finishedAt)) return startedAt.Sub(finishedAt), nil } } return 0, fmt.Errorf("timeout getting pod restart delay") }
// testHostIP tests that a pod gets a host IP func testHostIP(podClient *framework.PodClient, pod *v1.Pod) { By("creating pod") podClient.CreateSync(pod) // Try to make sure we get a hostIP for each pod. hostIPTimeout := 2 * time.Minute t := time.Now() for { p, err := podClient.Get(pod.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred(), "Failed to get pod %q", pod.Name) if p.Status.HostIP != "" { framework.Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP) break } if time.Since(t) >= hostIPTimeout { framework.Failf("Gave up waiting for hostIP of pod %s after %v seconds", p.Name, time.Since(t).Seconds()) } framework.Logf("Retrying to get the hostIP of pod %s", p.Name) time.Sleep(5 * time.Second) } }
By("Watching for error events or started pod") // watch for events instead of termination of pod because the kubelet deletes // failed pods without running containers. This would create a race as the pod // might have already been deleted here. ev, err := waitForPodErrorEventOrStarted(pod) Expect(err).NotTo(HaveOccurred()) if ev != nil && ev.Reason == sysctl.UnsupportedReason { framework.Skipf("No sysctl support in Docker <1.12") } Expect(ev).To(BeNil()) By("Waiting for pod completion") err = f.WaitForPodNoLongerRunning(pod.Name) Expect(err).NotTo(HaveOccurred()) pod, err = podClient.Get(pod.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) By("Checking that the pod succeeded") Expect(pod.Status.Phase).To(Equal(v1.PodSucceeded)) By("Getting logs from the pod") log, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) Expect(err).NotTo(HaveOccurred()) By("Checking that the sysctl is actually updated") Expect(log).To(ContainSubstring("kernel.shm_rmid_forced = 1")) }) It("should support unsafe sysctls which are actually whitelisted", func() { pod := testPod()
By("verifying pod creation was observed") select { case event, _ := <-w.ResultChan(): if event.Type != watch.Added { framework.Failf("Failed to observe pod creation: %v", event) } case <-time.After(framework.PodStartTimeout): Fail("Timeout while waiting for pod creation") } // We need to wait for the pod to be running, otherwise the deletion // may be carried out immediately rather than gracefully. framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) // save the running pod pod, err = podClient.Get(pod.Name) Expect(err).NotTo(HaveOccurred(), "failed to GET scheduled pod") framework.Logf("running pod: %#v", pod) By("deleting the pod gracefully") err = podClient.Delete(pod.Name, api.NewDeleteOptions(30)) Expect(err).NotTo(HaveOccurred(), "failed to delete pod") By("verifying the kubelet observed the termination notice") Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) { podList, err := framework.GetKubeletPods(f.Client, pod.Spec.NodeName) if err != nil { framework.Logf("Unable to retrieve kubelet pods for node %v: %v", pod.Spec.NodeName, err) return false, nil } for _, kubeletPod := range podList.Items {
) var _ = framework.KubeDescribe("Probing container", func() { f := framework.NewDefaultFramework("container-probe") var podClient *framework.PodClient probe := webserverProbeBuilder{} BeforeEach(func() { podClient = f.PodClient() }) It("with readiness probe should not be ready before initial delay and never restart [Conformance]", func() { p := podClient.Create(makePodSpec(probe.withInitialDelay().build(), nil)) f.WaitForPodReady(p.Name) p, err := podClient.Get(p.Name, metav1.GetOptions{}) framework.ExpectNoError(err) isReady, err := testutils.PodRunningReady(p) framework.ExpectNoError(err) Expect(isReady).To(BeTrue(), "pod should be ready") // We assume the pod became ready when the container became ready. This // is true for a single container pod. readyTime, err := getTransitionTimeForReadyCondition(p) framework.ExpectNoError(err) startedTime, err := getContainerStartedTime(p, probTestContainerName) framework.ExpectNoError(err) framework.Logf("Container started at %v, pod became ready at %v", startedTime, readyTime) initialDelay := probTestInitialDelaySeconds * time.Second if readyTime.Sub(startedTime) < initialDelay {
}) It("should evict the pod using the most disk space [Slow]", func() { evictionOccurred := false nodeDiskPressureCondition := false podRescheduleable := false Eventually(func() error { // Avoid the test using up all the disk space err := checkDiskUsage(0.05) if err != nil { return err } // The pod should be evicted. if !evictionOccurred { podData, err := podClient.Get(busyPodName) if err != nil { return err } err = verifyPodEviction(podData) if err != nil { return err } podData, err = podClient.Get(idlePodName) if err != nil { return err } if podData.Status.Phase != api.PodRunning {
) var _ = framework.KubeDescribe("Probing container", func() { f := framework.NewDefaultFramework("container-probe") var podClient *framework.PodClient probe := webserverProbeBuilder{} BeforeEach(func() { podClient = f.PodClient() }) It("with readiness probe should not be ready before initial delay and never restart [Conformance]", func() { p := podClient.Create(makePodSpec(probe.withInitialDelay().build(), nil)) f.WaitForPodReady(p.Name) p, err := podClient.Get(p.Name) framework.ExpectNoError(err) isReady, err := testutils.PodRunningReady(p) framework.ExpectNoError(err) Expect(isReady).To(BeTrue(), "pod should be ready") // We assume the pod became ready when the container became ready. This // is true for a single container pod. readyTime, err := getTransitionTimeForReadyCondition(p) framework.ExpectNoError(err) startedTime, err := getContainerStartedTime(p, probTestContainerName) framework.ExpectNoError(err) framework.Logf("Container started at %v, pod became ready at %v", startedTime, readyTime) initialDelay := probTestInitialDelaySeconds * time.Second if readyTime.Sub(startedTime) < initialDelay {
// Don't restart the Pod since it is expected to exit RestartPolicy: v1.RestartPolicyNever, Containers: []v1.Container{ { Image: "gcr.io/google_containers/busybox:1.24", Name: podName, Command: []string{"/bin/false"}, }, }, }, }) }) It("should have an error terminated reason", func() { Eventually(func() error { podData, err := podClient.Get(podName) if err != nil { return err } if len(podData.Status.ContainerStatuses) != 1 { return fmt.Errorf("expected only one container in the pod %q", podName) } contTerminatedState := podData.Status.ContainerStatuses[0].State.Terminated if contTerminatedState == nil { return fmt.Errorf("expected state to be terminated. Got pod status: %+v", podData.Status) } if contTerminatedState.Reason != "Error" { return fmt.Errorf("expected terminated state reason to be error. Got %+v", contTerminatedState) } return nil }, time.Minute, time.Second*4).Should(BeNil())
}) It("should evict the pod using the most disk space [Slow]", func() { evictionOccurred := false nodeDiskPressureCondition := false podRescheduleable := false Eventually(func() error { // Avoid the test using up all the disk space err := checkDiskUsage(0.05) if err != nil { return err } // The pod should be evicted. if !evictionOccurred { podData, err := podClient.Get(busyPodName, metav1.GetOptions{}) if err != nil { return err } err = verifyPodEviction(podData) if err != nil { return err } podData, err = podClient.Get(idlePodName, metav1.GetOptions{}) if err != nil { return err } if podData.Status.Phase != v1.PodRunning {
// Don't restart the Pod since it is expected to exit RestartPolicy: v1.RestartPolicyNever, Containers: []v1.Container{ { Image: "gcr.io/google_containers/busybox:1.24", Name: podName, Command: []string{"/bin/false"}, }, }, }, }) }) It("should have an error terminated reason", func() { Eventually(func() error { podData, err := podClient.Get(podName, metav1.GetOptions{}) if err != nil { return err } if len(podData.Status.ContainerStatuses) != 1 { return fmt.Errorf("expected only one container in the pod %q", podName) } contTerminatedState := podData.Status.ContainerStatuses[0].State.Terminated if contTerminatedState == nil { return fmt.Errorf("expected state to be terminated. Got pod status: %+v", podData.Status) } if contTerminatedState.Reason != "Error" { return fmt.Errorf("expected terminated state reason to be error. Got %+v", contTerminatedState) } return nil }, time.Minute, time.Second*4).Should(BeNil())