Esempio n. 1
0
// GetContainerOOMAdjust returns the amount by which the OOM score of all processes in the
// container should be adjusted.
// The OOM score of a process is the percentage of memory it consumes
// multiplied by 10 (barring exceptional cases) + a configurable quantity which is between -1000
// and 1000. Containers with higher OOM scores are killed if the system runs out of memory.
// See https://lwn.net/Articles/391222/ for more information.
func GetContainerOOMScoreAdjust(pod *api.Pod, container *api.Container, memoryCapacity int64) int {
	switch util.GetPodQos(pod) {
	case util.Guaranteed:
		// Guaranteed containers should be the last to get killed.
		return guaranteedOOMScoreAdj
	case util.BestEffort:
		return besteffortOOMScoreAdj
	}

	// Burstable containers are a middle tier, between Guaranteed and Best-Effort. Ideally,
	// we want to protect Burstable containers that consume less memory than requested.
	// The formula below is a heuristic. A container requesting for 10% of a system's
	// memory will have an OOM score adjust of 900. If a process in container Y
	// uses over 10% of memory, its OOM score will be 1000. The idea is that containers
	// which use more than their request will have an OOM score of 1000 and will be prime
	// targets for OOM kills.
	// Note that this is a heuristic, it won't work if a container has many small processes.
	memoryRequest := container.Resources.Requests.Memory().Value()
	oomScoreAdjust := 1000 - (1000*memoryRequest)/memoryCapacity
	// A guaranteed pod using 100% of memory can have an OOM score of 1. Ensure
	// that burstable pods have a higher OOM score adjustment.
	if oomScoreAdjust < 2 {
		return 2
	}
	// Give burstable pods a higher chance of survival over besteffort pods.
	if int(oomScoreAdjust) == besteffortOOMScoreAdj {
		return int(oomScoreAdjust - 1)
	}
	return int(oomScoreAdjust)
}
Esempio n. 2
0
// qos compares pods by QoS (BestEffort < Burstable < Guaranteed)
func qos(p1, p2 *api.Pod) int {
	qosP1 := qosutil.GetPodQos(p1)
	qosP2 := qosutil.GetPodQos(p2)
	// its a tie
	if qosP1 == qosP2 {
		return 0
	}
	// if p1 is best effort, we know p2 is burstable or guaranteed
	if qosP1 == qosutil.BestEffort {
		return -1
	}
	// we know p1 and p2 are not besteffort, so if p1 is burstable, p2 must be guaranteed
	if qosP1 == qosutil.Burstable {
		if qosP2 == qosutil.Guaranteed {
			return -1
		}
		return 1
	}
	// ok, p1 must be guaranteed.
	return 1
}
Esempio n. 3
0
// Admit rejects a pod if its not safe to admit for node stability.
func (m *managerImpl) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult {
	m.RLock()
	defer m.RUnlock()
	if len(m.nodeConditions) == 0 {
		return lifecycle.PodAdmitResult{Admit: true}
	}
	notBestEffort := qosutil.BestEffort != qosutil.GetPodQos(attrs.Pod)
	if notBestEffort {
		return lifecycle.PodAdmitResult{Admit: true}
	}
	glog.Warningf("Failed to admit pod %v - %s", format.Pod(attrs.Pod), "node has conditions: %v", m.nodeConditions)
	// we reject all best effort pods until we are stable.
	return lifecycle.PodAdmitResult{
		Admit:   false,
		Reason:  reason,
		Message: message,
	}
}
Esempio n. 4
0
File: pods.go Progetto: ncdc/origin
func isBestEffort(pod *api.Pod) bool {
	return util.GetPodQos(pod) == util.BestEffort
}
Esempio n. 5
0
// Determine if a pod is scheduled with best-effort QoS
func isPodBestEffort(pod *api.Pod) bool {
	return qosutil.GetPodQos(pod) == qosutil.BestEffort
}