// Admit rejects a pod if its not safe to admit for node stability. func (m *managerImpl) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult { m.RLock() defer m.RUnlock() if len(m.nodeConditions) == 0 { return lifecycle.PodAdmitResult{Admit: true} } // Check the node conditions to identify the resource under pressure. // The resource can only be either disk or memory; set the default to disk. resource := api.ResourceStorage if hasNodeCondition(m.nodeConditions, api.NodeMemoryPressure) { resource = api.ResourceMemory // the node has memory pressure, admit if not best-effort notBestEffort := qos.BestEffort != qos.GetPodQOS(attrs.Pod) if notBestEffort { return lifecycle.PodAdmitResult{Admit: true} } } // reject pods when under memory pressure (if pod is best effort), or if under disk pressure. glog.Warningf("Failed to admit pod %q - node has conditions: %v", format.Pod(attrs.Pod), m.nodeConditions) return lifecycle.PodAdmitResult{ Admit: false, Reason: reason, Message: getMessage(resource), } }
// qosComparator compares pods by QoS (BestEffort < Burstable < Guaranteed) func qosComparator(p1, p2 *api.Pod) int { qosP1 := qos.GetPodQOS(p1) qosP2 := qos.GetPodQOS(p2) // its a tie if qosP1 == qosP2 { return 0 } // if p1 is best effort, we know p2 is burstable or guaranteed if qosP1 == qos.BestEffort { return -1 } // we know p1 and p2 are not besteffort, so if p1 is burstable, p2 must be guaranteed if qosP1 == qos.Burstable { if qosP2 == qos.Guaranteed { return -1 } return 1 } // ok, p1 must be guaranteed. return 1 }
// ResourceConfigForPod takes the input pod and outputs the cgroup resource config. func ResourceConfigForPod(pod *v1.Pod) *ResourceConfig { // sum requests and limits, track if limits were applied for each resource. cpuRequests := int64(0) cpuLimits := int64(0) memoryLimits := int64(0) memoryLimitsDeclared := true cpuLimitsDeclared := true for _, container := range pod.Spec.Containers { cpuRequests += container.Resources.Requests.Cpu().MilliValue() cpuLimits += container.Resources.Limits.Cpu().MilliValue() if container.Resources.Limits.Cpu().IsZero() { cpuLimitsDeclared = false } memoryLimits += container.Resources.Limits.Memory().Value() if container.Resources.Limits.Memory().IsZero() { memoryLimitsDeclared = false } } // convert to CFS values cpuShares := MilliCPUToShares(cpuRequests) cpuQuota, cpuPeriod := MilliCPUToQuota(cpuLimits) // determine the qos class qosClass := qos.GetPodQOS(pod) // build the result result := &ResourceConfig{} if qosClass == qos.Guaranteed { result.CpuShares = &cpuShares result.CpuQuota = &cpuQuota result.CpuPeriod = &cpuPeriod result.Memory = &memoryLimits } else if qosClass == qos.Burstable { result.CpuShares = &cpuShares if cpuLimitsDeclared { result.CpuQuota = &cpuQuota result.CpuPeriod = &cpuPeriod } if memoryLimitsDeclared { result.Memory = &memoryLimits } } else { shares := int64(MinShares) result.CpuShares = &shares } return result }
// GetPodContainerName is a util func takes in a pod as an argument // and returns the pod's cgroup name. We follow a pod cgroup naming format // which is opaque and deterministic. Given a pod it's cgroup would be named // "pod-UID" where the UID is the Pod UID func (m *podContainerManagerImpl) GetPodContainerName(pod *api.Pod) string { podQOS := qos.GetPodQOS(pod) // Get the parent QOS container name var parentContainer string switch podQOS { case qos.Guaranteed: parentContainer = m.qosContainersInfo.Guaranteed case qos.Burstable: parentContainer = m.qosContainersInfo.Burstable case qos.BestEffort: parentContainer = m.qosContainersInfo.BestEffort } podContainer := podCgroupNamePrefix + string(pod.UID) // Get the absolute path of the cgroup return path.Join(parentContainer, podContainer) }
// Admit rejects a pod if its not safe to admit for node stability. func (m *managerImpl) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult { m.RLock() defer m.RUnlock() if len(m.nodeConditions) == 0 { return lifecycle.PodAdmitResult{Admit: true} } notBestEffort := qos.BestEffort != qos.GetPodQOS(attrs.Pod) if notBestEffort { return lifecycle.PodAdmitResult{Admit: true} } glog.Warningf("Failed to admit pod %v - %s", format.Pod(attrs.Pod), "node has conditions: %v", m.nodeConditions) // we reject all best effort pods until we are stable. return lifecycle.PodAdmitResult{ Admit: false, Reason: reason, Message: message, } }
// GetPodContainerName returns the CgroupName identifer, and its literal cgroupfs form on the host. func (m *podContainerManagerImpl) GetPodContainerName(pod *v1.Pod) (CgroupName, string) { podQOS := qos.GetPodQOS(pod) // Get the parent QOS container name var parentContainer string switch podQOS { case v1.PodQOSGuaranteed: parentContainer = m.qosContainersInfo.Guaranteed case v1.PodQOSBurstable: parentContainer = m.qosContainersInfo.Burstable case v1.PodQOSBestEffort: parentContainer = m.qosContainersInfo.BestEffort } podContainer := podCgroupNamePrefix + string(pod.UID) // Get the absolute path of the cgroup cgroupName := (CgroupName)(path.Join(parentContainer, podContainer)) // Get the literal cgroupfs name cgroupfsName := m.cgroupManager.Name(cgroupName) return cgroupName, cgroupfsName }
// Admit rejects a pod if its not safe to admit for node stability. func (m *managerImpl) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult { m.RLock() defer m.RUnlock() if len(m.nodeConditions) == 0 { return lifecycle.PodAdmitResult{Admit: true} } // the node has memory pressure, admit if not best-effort if hasNodeCondition(m.nodeConditions, v1.NodeMemoryPressure) { notBestEffort := qos.BestEffort != qos.GetPodQOS(attrs.Pod) if notBestEffort { return lifecycle.PodAdmitResult{Admit: true} } } // reject pods when under memory pressure (if pod is best effort), or if under disk pressure. glog.Warningf("Failed to admit pod %v - %s", format.Pod(attrs.Pod), "node has conditions: %v", m.nodeConditions) return lifecycle.PodAdmitResult{ Admit: false, Reason: reason, Message: fmt.Sprintf(message, m.nodeConditions), } }
func isBestEffort(pod *api.Pod) bool { return qos.GetPodQOS(pod) == qos.BestEffort }
// Determine if a pod is scheduled with best-effort QoS func isPodBestEffort(pod *v1.Pod) bool { return qos.GetPodQOS(pod) == qos.BestEffort }