コード例 #1
0
// PodFitsResources calculates fit based on requested, rather than used resources
func (r *ResourceFit) PodFitsResources(pod api.Pod, existingPods []api.Pod, node string) (bool, error) {
	podRequest := getResourceRequest(&pod)
	if podRequest.milliCPU == 0 && podRequest.memory == 0 {
		// no resources requested always fits.
		return true, nil
	}
	info, err := r.info.GetNodeInfo(node)
	if err != nil {
		return false, err
	}
	milliCPURequested := 0
	memoryRequested := 0
	for ix := range existingPods {
		existingRequest := getResourceRequest(&existingPods[ix])
		milliCPURequested += existingRequest.milliCPU
		memoryRequested += existingRequest.memory
	}

	// TODO: convert to general purpose resource matching, when pods ask for resources
	totalMilliCPU := int(resources.GetFloatResource(info.NodeResources.Capacity, resources.CPU, 0) * 1000)
	totalMemory := resources.GetIntegerResource(info.NodeResources.Capacity, resources.Memory, 0)

	fitsCPU := totalMilliCPU == 0 || (totalMilliCPU-milliCPURequested) >= podRequest.milliCPU
	fitsMemory := totalMemory == 0 || (totalMemory-memoryRequested) >= podRequest.memory
	glog.V(3).Infof("Calculated fit: cpu: %s, memory %s", fitsCPU, fitsMemory)

	return fitsCPU && fitsMemory, nil
}
コード例 #2
0
// PodFitsResources calculates fit based on requested, rather than used resources
func (r *ResourceFit) PodFitsResources(pod api.Pod, existingPods []api.Pod, node string) (bool, error) {
	podRequest := getResourceRequest(&pod)
	if podRequest.milliCPU == 0 && podRequest.memory == 0 {
		// no resources requested always fits.
		return true, nil
	}
	info, err := r.info.GetNodeInfo(node)
	if err != nil {
		return false, err
	}

	// check wether exsit free VM, only for bridge mode
	vmNum := len(info.Spec.VMs)
	if pod.Spec.NetworkMode == api.PodNetworkModeBridge && vmNum <= len(existingPods) {
		return false, nil
	}

	milliCPURequested := 0
	memoryRequested := 0
	coreRequested := 0
	diskRequested := 0
	for ix := range existingPods {
		existingRequest := getResourceRequest(&existingPods[ix])
		milliCPURequested += existingRequest.milliCPU
		memoryRequested += existingRequest.memory
		coreRequested += existingRequest.core
		diskRequested += existingRequest.disk
	}

	// TODO: convert to general purpose resource matching, when pods ask for resources
	totalMilliCPU := int(resources.GetFloatResource(info.Spec.Capacity, resources.CPU, 0) * 1000)
	totalMemory := resources.GetIntegerResource(info.Spec.Capacity, resources.Memory, 0)
	totalCore := resources.GetIntegerResource(info.Spec.Capacity, resources.Core, 0)
	totalDisk := resources.GetIntegerResource(info.Spec.Capacity, resources.Disk, 0)

	fitsCPU := totalMilliCPU == 0 || (totalMilliCPU-milliCPURequested) >= podRequest.milliCPU
	fitsMemory := totalMemory == 0 || (totalMemory-memoryRequested) >= podRequest.memory
	fitsCore := totalCore == 0 || (totalCore-coreRequested) >= podRequest.core
	fitsDisk := totalDisk == 0 || (totalDisk-diskRequested) >= podRequest.disk
	glog.V(3).Infof("Calculated fit: cpu: %s, memory %s, core: %s, disk: %s", fitsCPU, fitsMemory, fitsCore, fitsDisk)

	return fitsCPU && fitsMemory && fitsCore && fitsDisk, nil
}
コード例 #3
0
// Calculate the occupancy on a node.  'node' has information about the resources on the node.
// 'pods' is a list of pods currently scheduled on the node.
func calculateOccupancy(node api.Minion, pods []api.Pod) HostPriority {
	totalCPU := 0
	totalMemory := 0
	for _, pod := range pods {
		for _, container := range pod.Spec.Containers {
			totalCPU += container.CPU
			totalMemory += container.Memory
		}
	}

	percentageCPU := calculatePercentage(totalCPU, resources.GetIntegerResource(node.Spec.Capacity, resources.CPU, 0))
	percentageMemory := calculatePercentage(totalMemory, resources.GetIntegerResource(node.Spec.Capacity, resources.Memory, 0))
	glog.V(4).Infof("Least Requested Priority, AbsoluteRequested: (%d, %d) Percentage:(%d\\%m, %d\\%)", totalCPU, totalMemory, percentageCPU, percentageMemory)

	return HostPriority{
		host:  node.Name,
		score: int((percentageCPU + percentageMemory) / 2),
	}
}
コード例 #4
0
func (g *genericScheduler) numaCpuSelect(pod api.Pod, podLister PodLister, nodes api.MinionList) (int, []string, error) {
	var (
		cpuNodeNum       int
		numaCpuSet       []string
		numaSelectMinion int

		noNumaCpuSet       []string
		noNumaSelectMinion int
	)

	machineToPods, err := MapPodsToMachines(podLister)
	if err != nil {
		return -1, nil, err
	}

	reqCore := 0
	for ix := range pod.Spec.Containers {
		reqCore += pod.Spec.Containers[ix].Core
	}

	//no cpuset
	if reqCore == 0 {
		return 0, nil, nil
	}

	numaSelectMinion = -1
	noNumaSelectMinion = -1

	for index, minion := range nodes.Items {
		var set1 []string

		pods := machineToPods[minion.Name]

		coreNum := resources.GetIntegerResource(minion.Spec.Capacity, resources.Core, 24)
		cpuNodeNum = resources.GetIntegerResource(minion.Spec.Capacity, resources.CpuNode, 2)
		cpuMap := bitmap.NewNumaBitmapSize(uint(coreNum), cpuNodeNum)
		cpuMap.SetDefaultBit()

		//get used cpu cores
		for _, pod := range pods {
			set := strings.Split(pod.Status.CpuSet, ",")
			for _, c := range set {
				coreNo, _ := strconv.Atoi(c)
				cpuMap.SetBit(uint(coreNo), 1)
			}
		}

		freeCores1 := cpuMap.Get0BitOffs()
		if len(freeCores1) < reqCore {
			continue
		} else {
			for j := 0; j < reqCore; j++ {
				off := freeCores1[j]
				set1 = append(set1, strconv.Itoa(int(off)))
				//noNumaCpuSet = append(noNumaCpuSet, strconv.Itoa(int(off)))
			}
			noNumaCpuSet = set1
			noNumaSelectMinion = index
		}

		var (
			freeCores2 [][]uint
			err2       error
		)
		if val, exists := minion.Labels["numaflat"]; exists && val == "1" {
			freeCores2, err2 = cpuMap.Get0BitOffsNumaVer(uint(cpuNodeNum))
		} else {
			freeCores2, err2 = cpuMap.Get0BitOffsNuma(uint(cpuNodeNum))
		}
		if err2 != nil {
			return -1, nil, err2
		}

		for i := 0; i < cpuNodeNum; i++ {
			offs := freeCores2[i]
			if len(offs) >= reqCore {
				for j := 0; j < reqCore; j++ {
					off := offs[j]
					//cpuMap.SetBit(off, 1)
					numaCpuSet = append(numaCpuSet, strconv.Itoa(int(off)))
				}
				numaSelectMinion = index
				break
			}
		}

		if numaCpuSet != nil {
			break
		}
	} //minion.Items

	if numaCpuSet != nil {
		selectNode := nodes.Items[numaSelectMinion]
		glog.V(3).Infof("Selected Numa CPU set: %v, Minion index: %d, name: %s", numaCpuSet, numaSelectMinion, selectNode.Name)
		return numaSelectMinion, numaCpuSet, nil
	} else {
		selectNode := nodes.Items[noNumaSelectMinion]
		glog.V(3).Infof("Selected Uma CPU set: %v, Minion index: %d, name :%s", noNumaCpuSet, noNumaSelectMinion, selectNode.Name)
		return noNumaSelectMinion, noNumaCpuSet, nil
	}
}