// Calculate the resource occupancy on a node. 'node' has information about the resources on the node. // 'pods' is a list of pods currently scheduled on the node. func calculateResourceOccupancy(pod *api.Pod, node api.Node, nodeInfo *schedulercache.NodeInfo) schedulerapi.HostPriority { totalMilliCPU := nodeInfo.NonZeroRequest().MilliCPU totalMemory := nodeInfo.NonZeroRequest().Memory capacityMilliCPU := node.Status.Allocatable.Cpu().MilliValue() capacityMemory := node.Status.Allocatable.Memory().Value() // Add the resources requested by the current pod being scheduled. // This also helps differentiate between differently sized, but empty, nodes. for _, container := range pod.Spec.Containers { cpu, memory := priorityutil.GetNonzeroRequests(&container.Resources.Requests) totalMilliCPU += cpu totalMemory += memory } cpuScore := calculateScore(totalMilliCPU, capacityMilliCPU, node.Name) memoryScore := calculateScore(totalMemory, capacityMemory, node.Name) glog.V(10).Infof( "%v -> %v: Least Requested Priority, capacity %d millicores %d memory bytes, total request %d millicores %d memory bytes, score %d CPU %d memory", pod.Name, node.Name, capacityMilliCPU, capacityMemory, totalMilliCPU, totalMemory, cpuScore, memoryScore, ) return schedulerapi.HostPriority{ Host: node.Name, Score: int((cpuScore + memoryScore) / 2), } }
// Calculates host priority based on the amount of unused resources. // 'node' has information about the resources on the node. // 'pods' is a list of pods currently scheduled on the node. // TODO: Use Node() from nodeInfo instead of passing it. func calculateUnusedPriority(pod *api.Pod, podRequests *schedulercache.Resource, node *api.Node, nodeInfo *schedulercache.NodeInfo) schedulerapi.HostPriority { allocatableResources := nodeInfo.AllocatableResource() totalResources := *podRequests totalResources.MilliCPU += nodeInfo.NonZeroRequest().MilliCPU totalResources.Memory += nodeInfo.NonZeroRequest().Memory cpuScore := calculateUnusedScore(totalResources.MilliCPU, allocatableResources.MilliCPU, node.Name) memoryScore := calculateUnusedScore(totalResources.Memory, allocatableResources.Memory, node.Name) if glog.V(10) { // We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is // not logged. There is visible performance gain from it. glog.V(10).Infof( "%v -> %v: Least Requested Priority, capacity %d millicores %d memory bytes, total request %d millicores %d memory bytes, score %d CPU %d memory", pod.Name, node.Name, allocatableResources.MilliCPU, allocatableResources.Memory, totalResources.MilliCPU, totalResources.Memory, cpuScore, memoryScore, ) } return schedulerapi.HostPriority{ Host: node.Name, Score: int((cpuScore + memoryScore) / 2), } }
func calculateBalancedResourceAllocation(pod *api.Pod, podRequests *schedulercache.Resource, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { node := nodeInfo.Node() if node == nil { return schedulerapi.HostPriority{}, fmt.Errorf("node not found") } allocatableResources := nodeInfo.AllocatableResource() totalResources := *podRequests totalResources.MilliCPU += nodeInfo.NonZeroRequest().MilliCPU totalResources.Memory += nodeInfo.NonZeroRequest().Memory cpuFraction := fractionOfCapacity(totalResources.MilliCPU, allocatableResources.MilliCPU) memoryFraction := fractionOfCapacity(totalResources.Memory, allocatableResources.Memory) score := int(0) if cpuFraction >= 1 || memoryFraction >= 1 { // if requested >= capacity, the corresponding host should never be preferred. score = 0 } else { // Upper and lower boundary of difference between cpuFraction and memoryFraction are -1 and 1 // respectively. Multilying the absolute value of the difference by 10 scales the value to // 0-10 with 0 representing well balanced allocation and 10 poorly balanced. Subtracting it from // 10 leads to the score which also scales from 0 to 10 while 10 representing well balanced. diff := math.Abs(cpuFraction - memoryFraction) score = int(10 - diff*10) } if glog.V(10) { // We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is // not logged. There is visible performance gain from it. glog.V(10).Infof( "%v -> %v: Balanced Resource Allocation, capacity %d millicores %d memory bytes, total request %d millicores %d memory bytes, score %d", pod.Name, node.Name, allocatableResources.MilliCPU, allocatableResources.Memory, totalResources.MilliCPU, totalResources.Memory, score, ) } return schedulerapi.HostPriority{ Host: node.Name, Score: score, }, nil }
// TODO: Use Node() from nodeInfo instead of passing it. func calculateBalancedResourceAllocation(pod *api.Pod, node *api.Node, nodeInfo *schedulercache.NodeInfo) schedulerapi.HostPriority { totalMilliCPU := nodeInfo.NonZeroRequest().MilliCPU totalMemory := nodeInfo.NonZeroRequest().Memory score := int(0) // Add the resources requested by the current pod being scheduled. // This also helps differentiate between differently sized, but empty, nodes. for i := range pod.Spec.Containers { container := &pod.Spec.Containers[i] cpu, memory := priorityutil.GetNonzeroRequests(&container.Resources.Requests) totalMilliCPU += cpu totalMemory += memory } capacityMilliCPU := node.Status.Allocatable.Cpu().MilliValue() capacityMemory := node.Status.Allocatable.Memory().Value() cpuFraction := fractionOfCapacity(totalMilliCPU, capacityMilliCPU) memoryFraction := fractionOfCapacity(totalMemory, capacityMemory) if cpuFraction >= 1 || memoryFraction >= 1 { // if requested >= capacity, the corresponding host should never be preferrred. score = 0 } else { // Upper and lower boundary of difference between cpuFraction and memoryFraction are -1 and 1 // respectively. Multilying the absolute value of the difference by 10 scales the value to // 0-10 with 0 representing well balanced allocation and 10 poorly balanced. Subtracting it from // 10 leads to the score which also scales from 0 to 10 while 10 representing well balanced. diff := math.Abs(cpuFraction - memoryFraction) score = int(10 - diff*10) } glog.V(10).Infof( "%v -> %v: Balanced Resource Allocation, capacity %d millicores %d memory bytes, total request %d millicores %d memory bytes, score %d", pod.Name, node.Name, capacityMilliCPU, capacityMemory, totalMilliCPU, totalMemory, score, ) return schedulerapi.HostPriority{ Host: node.Name, Score: score, } }