// Returns a number of currently scheduled and not scheduled Pods. func getPodsScheduled(pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) { for _, pod := range pods.Items { if !masterNodes.Has(pod.Spec.NodeName) { if pod.Spec.NodeName != "" { _, scheduledCondition := v1.GetPodCondition(&pod.Status, v1.PodScheduled) // We can't assume that the scheduledCondition is always set if Pod is assigned to Node, // as e.g. DaemonController doesn't set it when assigning Pod to a Node. Currently // Kubelet sets this condition when it gets a Pod without it, but if we were expecting // that it would always be not nil, this would cause a rare race condition. if scheduledCondition != nil { Expect(scheduledCondition.Status).To(Equal(v1.ConditionTrue)) } scheduledPods = append(scheduledPods, pod) } else { _, scheduledCondition := v1.GetPodCondition(&pod.Status, v1.PodScheduled) if scheduledCondition != nil { Expect(scheduledCondition.Status).To(Equal(v1.ConditionFalse)) } if scheduledCondition.Reason == "Unschedulable" { notScheduledPods = append(notScheduledPods, pod) } } } } return }
func podRunningOrUnschedulable(pod *v1.Pod) bool { _, cond := v1.GetPodCondition(&pod.Status, v1.PodScheduled) if cond != nil && cond.Status == v1.ConditionFalse && cond.Reason == "Unschedulable" { return true } running, _ := testutils.PodRunningReady(pod) return running }
// List returns all unscheduled pods. func (unschedulablePodLister *UnschedulablePodLister) List() ([]*apiv1.Pod, error) { var unschedulablePods []*apiv1.Pod allPods, err := unschedulablePodLister.podLister.List(labels.Everything()) if err != nil { return unschedulablePods, err } for _, pod := range allPods { _, condition := apiv1.GetPodCondition(&pod.Status, apiv1.PodScheduled) if condition != nil && condition.Status == apiv1.ConditionFalse && condition.Reason == "Unschedulable" { unschedulablePods = append(unschedulablePods, pod) } } return unschedulablePods, nil }
Expect(err).To(BeNil()) } startedPod := podClient.Create(pod) w, err := podClient.Watch(v1.SingleObject(startedPod.ObjectMeta)) Expect(err).NotTo(HaveOccurred(), "error watching a pod") wr := watch.NewRecorder(w) event, err := watch.Until(framework.PodStartTimeout, wr, conditions.PodCompleted) Expect(err).To(BeNil()) framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant) endPod := event.Object.(*v1.Pod) if err := podutil.SetInitContainersAndStatuses(endPod); err != nil { Expect(err).To(BeNil()) } Expect(endPod.Status.Phase).To(Equal(v1.PodSucceeded)) _, init := v1.GetPodCondition(&endPod.Status, v1.PodInitialized) Expect(init).NotTo(BeNil()) Expect(init.Status).To(Equal(v1.ConditionTrue)) Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2)) for _, status := range endPod.Status.InitContainerStatuses { Expect(status.Ready).To(BeTrue()) Expect(status.State.Terminated).NotTo(BeNil()) Expect(status.State.Terminated.ExitCode).To(BeZero()) } }) It("should invoke init containers on a RestartAlways pod", func() { framework.SkipIfContainerRuntimeIs("rkt") // #25988 By("creating the pod")
// updateStatusInternal updates the internal status cache, and queues an update to the api server if // necessary. Returns whether an update was triggered. // This method IS NOT THREAD SAFE and must be called from a locked function. func (m *manager) updateStatusInternal(pod *v1.Pod, status v1.PodStatus, forceUpdate bool) bool { var oldStatus v1.PodStatus cachedStatus, isCached := m.podStatuses[pod.UID] if isCached { oldStatus = cachedStatus.status } else if mirrorPod, ok := m.podManager.GetMirrorPodByPod(pod); ok { oldStatus = mirrorPod.Status } else { oldStatus = pod.Status } // Set ReadyCondition.LastTransitionTime. if _, readyCondition := v1.GetPodCondition(&status, v1.PodReady); readyCondition != nil { // Need to set LastTransitionTime. lastTransitionTime := metav1.Now() _, oldReadyCondition := v1.GetPodCondition(&oldStatus, v1.PodReady) if oldReadyCondition != nil && readyCondition.Status == oldReadyCondition.Status { lastTransitionTime = oldReadyCondition.LastTransitionTime } readyCondition.LastTransitionTime = lastTransitionTime } // Set InitializedCondition.LastTransitionTime. if _, initCondition := v1.GetPodCondition(&status, v1.PodInitialized); initCondition != nil { // Need to set LastTransitionTime. lastTransitionTime := metav1.Now() _, oldInitCondition := v1.GetPodCondition(&oldStatus, v1.PodInitialized) if oldInitCondition != nil && initCondition.Status == oldInitCondition.Status { lastTransitionTime = oldInitCondition.LastTransitionTime } initCondition.LastTransitionTime = lastTransitionTime } // ensure that the start time does not change across updates. if oldStatus.StartTime != nil && !oldStatus.StartTime.IsZero() { status.StartTime = oldStatus.StartTime } else if status.StartTime.IsZero() { // if the status has no start time, we need to set an initial time now := metav1.Now() status.StartTime = &now } normalizeStatus(pod, &status) // The intent here is to prevent concurrent updates to a pod's status from // clobbering each other so the phase of a pod progresses monotonically. if isCached && isStatusEqual(&cachedStatus.status, &status) && !forceUpdate { glog.V(3).Infof("Ignoring same status for pod %q, status: %+v", format.Pod(pod), status) return false // No new status. } newStatus := versionedPodStatus{ status: status, version: cachedStatus.version + 1, podName: pod.Name, podNamespace: pod.Namespace, } m.podStatuses[pod.UID] = newStatus select { case m.podStatusChannel <- podStatusSyncRequest{pod.UID, newStatus}: return true default: // Let the periodic syncBatch handle the update if the channel is full. // We can't block, since we hold the mutex lock. glog.V(4).Infof("Skpping the status update for pod %q for now because the channel is full; status: %+v", format.Pod(pod), status) return false } }