// Returns a number of currently scheduled and not scheduled Pods.
func getPodsScheduled(pods *api.PodList) (scheduledPods, notScheduledPods []api.Pod) {
	for _, pod := range pods.Items {
		if !masterNodes.Has(pod.Spec.NodeName) {
			if pod.Spec.NodeName != "" {
				_, scheduledCondition := api.GetPodCondition(&pod.Status, api.PodScheduled)
				// We can't assume that the scheduledCondition is always set if Pod is assigned to Node,
				// as e.g. DaemonController doesn't set it when assigning Pod to a Node. Currently
				// Kubelet sets this condition when it gets a Pod without it, but if we were expecting
				// that it would always be not nil, this would cause a rare race condition.
				if scheduledCondition != nil {
					Expect(scheduledCondition.Status).To(Equal(api.ConditionTrue))
				}
				scheduledPods = append(scheduledPods, pod)
			} else {
				_, scheduledCondition := api.GetPodCondition(&pod.Status, api.PodScheduled)
				if scheduledCondition != nil {
					Expect(scheduledCondition.Status).To(Equal(api.ConditionFalse))
				}
				if scheduledCondition.Reason == "Unschedulable" {
					notScheduledPods = append(notScheduledPods, pod)
				}
			}
		}
	}
	return
}
Example #2
0
func podRunningOrUnschedulable(pod *api.Pod) bool {
	_, cond := api.GetPodCondition(&pod.Status, api.PodScheduled)
	if cond != nil && cond.Status == api.ConditionFalse && cond.Reason == "Unschedulable" {
		return true
	}
	running, _ := framework.PodRunningReady(pod)
	return running
}
// Returns a number of currently scheduled and not scheduled Pods.
func getPodsScheduled(pods *api.PodList) (scheduledPods, notScheduledPods []api.Pod) {
	for _, pod := range pods.Items {
		if !masterNodes.Has(pod.Spec.NodeName) {
			if pod.Spec.NodeName != "" {
				_, scheduledCondition := api.GetPodCondition(&pod.Status, api.PodScheduled)
				Expect(scheduledCondition != nil).To(Equal(true))
				Expect(scheduledCondition.Status).To(Equal(api.ConditionTrue))
				scheduledPods = append(scheduledPods, pod)
			} else {
				_, scheduledCondition := api.GetPodCondition(&pod.Status, api.PodScheduled)
				Expect(scheduledCondition != nil).To(Equal(true))
				Expect(scheduledCondition.Status).To(Equal(api.ConditionFalse))
				notScheduledPods = append(notScheduledPods, pod)
			}
		}
	}
	return
}
Example #4
0
func resetPodScheduledCondition(kubeClient *kube_client.Client, pod *kube_api.Pod) error {
	_, condition := kube_api.GetPodCondition(&pod.Status, kube_api.PodScheduled)
	if condition != nil {
		condition.Status = kube_api.ConditionUnknown
		condition.LastTransitionTime = kube_api_unversioned.Now()
		_, err := kubeClient.Pods(pod.Namespace).UpdateStatus(pod)
		return err
	}
	return fmt.Errorf("Expected condition PodScheduled")
}
Example #5
0
// Returns pods for which PodScheduled condition have LastTransitionTime after
// the threshold.
// Each pod must be in condition "Scheduled: False; Reason: Unschedulable"
// NOTE: This function must be in sync with resetOldPods.
func filterOldPods(pods []*kube_api.Pod, threshold time.Time) []*kube_api.Pod {
	var result []*kube_api.Pod
	for _, pod := range pods {
		_, condition := kube_api.GetPodCondition(&pod.Status, kube_api.PodScheduled)
		if condition != nil && condition.LastTransitionTime.After(threshold) {
			result = append(result, pod)
		}
	}
	return result
}
Example #6
0
func resetPodScheduledConditionForPod(kubeClient *kube_client.Client, pod *kube_api.Pod) error {
	_, condition := kube_api.GetPodCondition(&pod.Status, kube_api.PodScheduled)
	if condition != nil {
		glog.V(4).Infof("Reseting pod condition for %s/%s, last transition: %s",
			pod.Namespace, pod.Name, condition.LastTransitionTime.Time.String())
		condition.Status = kube_api.ConditionUnknown
		condition.LastTransitionTime = kube_api_unversioned.Now()
		_, err := kubeClient.Pods(pod.Namespace).UpdateStatus(pod)
		return err
	}
	return fmt.Errorf("Expected condition PodScheduled")
}
Example #7
0
// Resets pod condition PodScheduled to "unknown" for all the pods with LastTransitionTime
// not after the threshold time.
// NOTE: This function must be in sync with resetOldPods.
func resetOldPods(kubeClient *kube_client.Client, pods []*kube_api.Pod, threshold time.Time) {
	for _, pod := range pods {
		_, condition := kube_api.GetPodCondition(&pod.Status, kube_api.PodScheduled)
		if condition != nil && !condition.LastTransitionTime.After(threshold) {
			glog.V(4).Infof("Reseting pod condition for %s/%s, last transition: %s",
				pod.Namespace, pod.Name, condition.LastTransitionTime.Time.String())
			if err := resetPodScheduledCondition(kubeClient, pod); err != nil {
				glog.Errorf("Error during reseting pod condition for %s/%s: %v", pod.Namespace, pod.Name, err)
			}
		}
	}
}
Example #8
0
// SlicePodsByPodScheduledTime slices given pod array into those where PodScheduled condition
// have been updated after the thresold and others.
// Each pod must be in condition "Scheduled: False; Reason: Unschedulable"
func SlicePodsByPodScheduledTime(pods []*kube_api.Pod, threshold time.Time) (oldPods []*kube_api.Pod, newPods []*kube_api.Pod) {
	for _, pod := range pods {
		_, condition := kube_api.GetPodCondition(&pod.Status, kube_api.PodScheduled)
		if condition != nil {
			if condition.LastTransitionTime.After(threshold) {
				newPods = append(newPods, pod)
			} else {
				oldPods = append(oldPods, pod)
			}
		}
	}
	return
}
Example #9
0
// List returns all unscheduled pods.
func (unschedulablePodLister *UnschedulablePodLister) List() ([]*kube_api.Pod, error) {
	var unschedulablePods []*kube_api.Pod
	allPods, err := unschedulablePodLister.podLister.List(labels.Everything())
	if err != nil {
		return unschedulablePods, err
	}
	for _, pod := range allPods {
		_, condition := kube_api.GetPodCondition(&pod.Status, kube_api.PodScheduled)
		if condition != nil && condition.Status == kube_api.ConditionFalse && condition.Reason == "Unschedulable" {
			unschedulablePods = append(unschedulablePods, pod)
		}
	}
	return unschedulablePods, nil
}
Example #10
0
// generateAPIPodStatus creates the final API pod status for a pod, given the
// internal pod status.
func (kl *Kubelet) generateAPIPodStatus(pod *api.Pod, podStatus *kubecontainer.PodStatus) api.PodStatus {
	glog.V(3).Infof("Generating status for %q", format.Pod(pod))

	// check if an internal module has requested the pod is evicted.
	for _, podSyncHandler := range kl.PodSyncHandlers {
		if result := podSyncHandler.ShouldEvict(pod); result.Evict {
			return api.PodStatus{
				Phase:   api.PodFailed,
				Reason:  result.Reason,
				Message: result.Message,
			}
		}
	}

	s := kl.convertStatusToAPIStatus(pod, podStatus)

	// Assume info is ready to process
	spec := &pod.Spec
	allStatus := append(append([]api.ContainerStatus{}, s.ContainerStatuses...), s.InitContainerStatuses...)
	s.Phase = GetPhase(spec, allStatus)
	kl.probeManager.UpdatePodStatus(pod.UID, s)
	s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(spec, s.InitContainerStatuses, s.Phase))
	s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(spec, s.ContainerStatuses, s.Phase))
	// s (the PodStatus we are creating) will not have a PodScheduled condition yet, because converStatusToAPIStatus()
	// does not create one. If the existing PodStatus has a PodScheduled condition, then copy it into s and make sure
	// it is set to true. If the existing PodStatus does not have a PodScheduled condition, then create one that is set to true.
	if _, oldPodScheduled := api.GetPodCondition(&pod.Status, api.PodScheduled); oldPodScheduled != nil {
		s.Conditions = append(s.Conditions, *oldPodScheduled)
	}
	api.UpdatePodCondition(&pod.Status, &api.PodCondition{
		Type:   api.PodScheduled,
		Status: api.ConditionTrue,
	})

	if !kl.standaloneMode {
		hostIP, err := kl.getHostIPAnyWay()
		if err != nil {
			glog.V(4).Infof("Cannot get host IP: %v", err)
		} else {
			s.HostIP = hostIP.String()
			if podUsesHostNetwork(pod) && s.PodIP == "" {
				s.PodIP = hostIP.String()
			}
		}
	}

	return *s
}
Example #11
0
// updateStatusInternal updates the internal status cache, and queues an update to the api server if
// necessary. Returns whether an update was triggered.
// This method IS NOT THREAD SAFE and must be called from a locked function.
func (m *manager) updateStatusInternal(pod *api.Pod, status api.PodStatus, forceUpdate bool) bool {
	var oldStatus api.PodStatus
	cachedStatus, isCached := m.podStatuses[pod.UID]
	if isCached {
		oldStatus = cachedStatus.status
	} else if mirrorPod, ok := m.podManager.GetMirrorPodByPod(pod); ok {
		oldStatus = mirrorPod.Status
	} else {
		oldStatus = pod.Status
	}

	// Set ReadyCondition.LastTransitionTime.
	if _, readyCondition := api.GetPodCondition(&status, api.PodReady); readyCondition != nil {
		// Need to set LastTransitionTime.
		lastTransitionTime := unversioned.Now()
		_, oldReadyCondition := api.GetPodCondition(&oldStatus, api.PodReady)
		if oldReadyCondition != nil && readyCondition.Status == oldReadyCondition.Status {
			lastTransitionTime = oldReadyCondition.LastTransitionTime
		}
		readyCondition.LastTransitionTime = lastTransitionTime
	}

	// Set InitializedCondition.LastTransitionTime.
	if _, initCondition := api.GetPodCondition(&status, api.PodInitialized); initCondition != nil {
		// Need to set LastTransitionTime.
		lastTransitionTime := unversioned.Now()
		_, oldInitCondition := api.GetPodCondition(&oldStatus, api.PodInitialized)
		if oldInitCondition != nil && initCondition.Status == oldInitCondition.Status {
			lastTransitionTime = oldInitCondition.LastTransitionTime
		}
		initCondition.LastTransitionTime = lastTransitionTime
	}

	// ensure that the start time does not change across updates.
	if oldStatus.StartTime != nil && !oldStatus.StartTime.IsZero() {
		status.StartTime = oldStatus.StartTime
	} else if status.StartTime.IsZero() {
		// if the status has no start time, we need to set an initial time
		now := unversioned.Now()
		status.StartTime = &now
	}

	normalizeStatus(pod, &status)
	// The intent here is to prevent concurrent updates to a pod's status from
	// clobbering each other so the phase of a pod progresses monotonically.
	if isCached && isStatusEqual(&cachedStatus.status, &status) && !forceUpdate {
		glog.V(3).Infof("Ignoring same status for pod %q, status: %+v", format.Pod(pod), status)
		return false // No new status.
	}

	newStatus := versionedPodStatus{
		status:       status,
		version:      cachedStatus.version + 1,
		podName:      pod.Name,
		podNamespace: pod.Namespace,
	}
	m.podStatuses[pod.UID] = newStatus

	select {
	case m.podStatusChannel <- podStatusSyncRequest{pod.UID, newStatus}:
		return true
	default:
		// Let the periodic syncBatch handle the update if the channel is full.
		// We can't block, since we hold the mutex lock.
		glog.V(4).Infof("Skpping the status update for pod %q for now because the channel is full; status: %+v",
			format.Pod(pod), status)
		return false
	}
}
Example #12
0
		startedPod, err := podClient.Create(pod)
		if err != nil {
			framework.Failf("Error creating a pod: %v", err)
		}
		w, err := podClient.Watch(api.SingleObject(startedPod.ObjectMeta))
		if err != nil {
			framework.Failf("Error watching a pod: %v", err)
		}
		wr := watch.NewRecorder(w)
		event, err := watch.Until(framework.PodStartTimeout, wr, client.PodCompleted)
		Expect(err).To(BeNil())
		framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
		endPod := event.Object.(*api.Pod)

		Expect(endPod.Status.Phase).To(Equal(api.PodSucceeded))
		_, init := api.GetPodCondition(&endPod.Status, api.PodInitialized)
		Expect(init).NotTo(BeNil())
		Expect(init.Status).To(Equal(api.ConditionTrue))

		Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
		for _, status := range endPod.Status.InitContainerStatuses {
			Expect(status.Ready).To(BeTrue())
			Expect(status.State.Terminated).NotTo(BeNil())
			Expect(status.State.Terminated.ExitCode).To(BeZero())
		}
	})

	It("should invoke init containers on a RestartAlways pod", func() {
		framework.SkipIfContainerRuntimeIs("rkt") // #25988
		podClient := f.Client.Pods(f.Namespace.Name)