func podRunningOrUnschedulable(pod *v1.Pod) bool { _, cond := v1.GetPodCondition(&pod.Status, v1.PodScheduled) if cond != nil && cond.Status == v1.ConditionFalse && cond.Reason == "Unschedulable" { return true } running, _ := testutils.PodRunningReady(pod) return running }
// waitForPods waits for timeout duration, for pod_count. // If the timeout is hit, it returns the list of currently running pods. func waitForPods(f *framework.Framework, pod_count int, timeout time.Duration) (runningPods []*v1.Pod) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) { podList, err := f.PodClient().List(v1.ListOptions{}) if err != nil { framework.Logf("Failed to list pods on node: %v", err) continue } runningPods = []*v1.Pod{} for _, pod := range podList.Items { if r, err := testutils.PodRunningReady(&pod); err != nil || !r { continue } runningPods = append(runningPods, &pod) } framework.Logf("Running pod count %d", len(runningPods)) if len(runningPods) >= pod_count { break } } return runningPods }
func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podNames []string, pods []*v1.Pod) { printFn := func(id, log string, err error, previous bool) { prefix := "Retrieving log for container" if previous { prefix = "Retrieving log for the last terminated container" } if err != nil { framework.Logf("%s %s, err: %v:\n%s\n", prefix, id, err, log) } else { framework.Logf("%s %s:\n%s\n", prefix, id, log) } } podNameSet := sets.NewString(podNames...) for _, p := range pods { if p.Namespace != ns { continue } if !podNameSet.Has(p.Name) { continue } if ok, _ := testutils.PodRunningReady(p); ok { continue } framework.Logf("Status for not ready pod %s/%s: %+v", p.Namespace, p.Name, p.Status) // Print the log of the containers if pod is not running and ready. for _, container := range p.Status.ContainerStatuses { cIdentifer := fmt.Sprintf("%s/%s/%s", p.Namespace, p.Name, container.Name) log, err := framework.GetPodLogs(c, p.Namespace, p.Name, container.Name) printFn(cIdentifer, log, err, false) // Get log from the previous container. if container.RestartCount > 0 { printFn(cIdentifer, log, err, true) } } } }
var _ = framework.KubeDescribe("Probing container", func() { f := framework.NewDefaultFramework("container-probe") var podClient *framework.PodClient probe := webserverProbeBuilder{} BeforeEach(func() { podClient = f.PodClient() }) It("with readiness probe should not be ready before initial delay and never restart [Conformance]", func() { p := podClient.Create(makePodSpec(probe.withInitialDelay().build(), nil)) f.WaitForPodReady(p.Name) p, err := podClient.Get(p.Name, metav1.GetOptions{}) framework.ExpectNoError(err) isReady, err := testutils.PodRunningReady(p) framework.ExpectNoError(err) Expect(isReady).To(BeTrue(), "pod should be ready") // We assume the pod became ready when the container became ready. This // is true for a single container pod. readyTime, err := getTransitionTimeForReadyCondition(p) framework.ExpectNoError(err) startedTime, err := getContainerStartedTime(p, probTestContainerName) framework.ExpectNoError(err) framework.Logf("Container started at %v, pod became ready at %v", startedTime, readyTime) initialDelay := probTestInitialDelaySeconds * time.Second if readyTime.Sub(startedTime) < initialDelay { framework.Failf("Pod became ready before it's %v initial delay", initialDelay) }