コード例 #1
0
ファイル: predicates.go プロジェクト: timstclair/kube-contrib
func CheckPodsExceedingFreeResources(pods []*api.Pod, allocatable api.ResourceList) (fitting []*api.Pod, notFittingCPU, notFittingMemory []*api.Pod) {
	totalMilliCPU := allocatable.Cpu().MilliValue()
	totalMemory := allocatable.Memory().Value()
	milliCPURequested := int64(0)
	memoryRequested := int64(0)
	for _, pod := range pods {
		podRequest := getResourceRequest(pod)
		fitsCPU := (totalMilliCPU - milliCPURequested) >= podRequest.milliCPU
		fitsMemory := (totalMemory - memoryRequested) >= podRequest.memory
		if !fitsCPU {
			// the pod doesn't fit due to CPU request
			notFittingCPU = append(notFittingCPU, pod)
			continue
		}
		if !fitsMemory {
			// the pod doesn't fit due to Memory request
			notFittingMemory = append(notFittingMemory, pod)
			continue
		}
		// the pod fits
		milliCPURequested += podRequest.milliCPU
		memoryRequested += podRequest.memory
		fitting = append(fitting, pod)
	}
	return
}
コード例 #2
0
ファイル: non_zero.go プロジェクト: xachca/kubernetes-1.2
func GetNonZeroCore(resource *api.ResourceList) int64 {
	var outCore int64
	// Override if un-set, but not if explicitly set to zero
	if _, found := (*resource)[api.ResourceCPU]; !found {
		outCore = DefaultNodeCore
	} else {
		outCore = resource.Cpu().Value()
	}
	return outCore
}
コード例 #3
0
ファイル: non_zero.go プロジェクト: xachca/kubernetes-1.2
// GetNonzeroRequests returns the default resource request if none is found or what is provided on the request
// TODO: Consider setting default as a fixed fraction of machine capacity (take "capacity api.ResourceList"
// as an additional argument here) rather than using constants
func GetNonzeroRequests(requests *api.ResourceList) (int64, int64) {
	var outMilliCPU, outMemory int64
	// Override if un-set, but not if explicitly set to zero
	if _, found := (*requests)[api.ResourceCPU]; !found {
		outMilliCPU = DefaultMilliCpuRequest
	} else {
		outMilliCPU = requests.Cpu().MilliValue()
	}
	// Override if un-set, but not if explicitly set to zero
	if _, found := (*requests)[api.ResourceMemory]; !found {
		outMemory = DefaultMemoryRequest
	} else {
		outMemory = requests.Memory().Value()
	}
	return outMilliCPU, outMemory
}
コード例 #4
0
ファイル: predicates.go プロジェクト: ThunderYe/kubernetes
func CheckPodsExceedingCapacity(pods []*api.Pod, capacity api.ResourceList) (fitting []*api.Pod, notFitting []*api.Pod) {
	totalMilliCPU := capacity.Cpu().MilliValue()
	totalMemory := capacity.Memory().Value()
	milliCPURequested := int64(0)
	memoryRequested := int64(0)
	for _, pod := range pods {
		podRequest := getResourceRequest(pod)
		fitsCPU := totalMilliCPU == 0 || (totalMilliCPU-milliCPURequested) >= podRequest.milliCPU
		fitsMemory := totalMemory == 0 || (totalMemory-memoryRequested) >= podRequest.memory
		if !fitsCPU || !fitsMemory {
			// the pod doesn't fit
			notFitting = append(notFitting, pod)
			continue
		}
		// the pod fits
		milliCPURequested += podRequest.milliCPU
		memoryRequested += podRequest.memory
		fitting = append(fitting, pod)
	}
	return
}
コード例 #5
0
ファイル: priorities.go プロジェクト: richm/origin
// TODO: Consider setting default as a fixed fraction of machine capacity (take "capacity api.ResourceList"
// as an additional argument here) rather than using constants
func getNonzeroRequests(requests *api.ResourceList) (int64, int64) {
	var out_millicpu, out_memory int64
	// Override if un-set, but not if explicitly set to zero
	if (*requests.Cpu() == resource.Quantity{}) {
		out_millicpu = defaultMilliCpuRequest
	} else {
		out_millicpu = requests.Cpu().MilliValue()
	}
	// Override if un-set, but not if explicitly set to zero
	if (*requests.Memory() == resource.Quantity{}) {
		out_memory = defaultMemoryRequest
	} else {
		out_memory = requests.Memory().Value()
	}
	return out_millicpu, out_memory
}
コード例 #6
0
func (cm *containerManagerImpl) setupNode() error {
	f, err := validateSystemRequirements(cm.mountUtil)
	if err != nil {
		return err
	}
	if !f.cpuHardcapping {
		cm.status.SoftRequirements = fmt.Errorf("CPU hardcapping unsupported")
	}
	// TODO: plumb kernel tunable options into container manager, right now, we modify by default
	if err := setupKernelTunables(KernelTunableModify); err != nil {
		return err
	}

	systemContainers := []*systemContainer{}
	if cm.ContainerRuntime == "docker" {
		if cm.RuntimeCgroupsName != "" {
			cont := newSystemCgroups(cm.RuntimeCgroupsName)
			info, err := cm.cadvisorInterface.MachineInfo()
			var capacity = api.ResourceList{}
			if err != nil {
			} else {
				capacity = cadvisor.CapacityFromMachineInfo(info)
			}
			memoryLimit := (int64(capacity.Memory().Value() * DockerMemoryLimitThresholdPercent / 100))
			if memoryLimit < MinDockerMemoryLimit {
				glog.Warningf("Memory limit %d for container %s is too small, reset it to %d", memoryLimit, cm.RuntimeCgroupsName, MinDockerMemoryLimit)
				memoryLimit = MinDockerMemoryLimit
			}

			glog.V(2).Infof("Configure resource-only container %s with memory limit: %d", cm.RuntimeCgroupsName, memoryLimit)

			dockerContainer := &fs.Manager{
				Cgroups: &configs.Cgroup{
					Parent: "/",
					Name:   cm.RuntimeCgroupsName,
					Resources: &configs.Resources{
						Memory:          memoryLimit,
						MemorySwap:      -1,
						AllowAllDevices: true,
					},
				},
			}
			dockerVersion := getDockerVersion(cm.cadvisorInterface)
			cont.ensureStateFunc = func(manager *fs.Manager) error {
				return ensureDockerInContainer(dockerVersion, -900, dockerContainer)
			}
			systemContainers = append(systemContainers, cont)
		} else {
			cm.periodicTasks = append(cm.periodicTasks, func() {
				cont, err := getContainerNameForProcess(dockerProcessName, dockerPidFile)
				if err != nil {
					glog.Error(err)
					return
				}
				glog.V(2).Infof("Discovered runtime cgroups name: %s", cont)
				cm.Lock()
				defer cm.Unlock()
				cm.RuntimeCgroupsName = cont
			})
		}
	}

	if cm.SystemCgroupsName != "" {
		if cm.SystemCgroupsName == "/" {
			return fmt.Errorf("system container cannot be root (\"/\")")
		}
		cont := newSystemCgroups(cm.SystemCgroupsName)
		rootContainer := &fs.Manager{
			Cgroups: &configs.Cgroup{
				Parent: "/",
				Name:   "/",
			},
		}
		cont.ensureStateFunc = func(manager *fs.Manager) error {
			return ensureSystemCgroups(rootContainer, manager)
		}
		systemContainers = append(systemContainers, cont)
	}

	if cm.KubeletCgroupsName != "" {
		cont := newSystemCgroups(cm.KubeletCgroupsName)
		manager := fs.Manager{
			Cgroups: &configs.Cgroup{
				Parent: "/",
				Name:   cm.KubeletCgroupsName,
				Resources: &configs.Resources{
					AllowAllDevices: true,
				},
			},
		}
		cont.ensureStateFunc = func(_ *fs.Manager) error {
			return manager.Apply(os.Getpid())
		}
		systemContainers = append(systemContainers, cont)
	} else {
		cm.periodicTasks = append(cm.periodicTasks, func() {
			cont, err := getContainer(os.Getpid())
			if err != nil {
				glog.Errorf("failed to find cgroups of kubelet - %v", err)
				return
			}
			cm.Lock()
			defer cm.Unlock()

			cm.KubeletCgroupsName = cont
		})
	}

	cm.systemContainers = systemContainers
	return nil
}
コード例 #7
0
func (cm *containerManagerImpl) setupNode() error {
	if err := validateSystemRequirements(cm.mountUtil); err != nil {
		return err
	}
	systemContainers := []*systemContainer{}
	if cm.dockerDaemonContainerName != "" {
		cont := newSystemContainer(cm.dockerDaemonContainerName)

		info, err := cm.cadvisorInterface.MachineInfo()
		var capacity = api.ResourceList{}
		if err != nil {
		} else {
			capacity = CapacityFromMachineInfo(info)
		}
		memoryLimit := (int64(capacity.Memory().Value() * DockerMemoryLimitThresholdPercent / 100))
		if memoryLimit < MinDockerMemoryLimit {
			glog.Warningf("Memory limit %d for container %s is too small, reset it to %d", memoryLimit, cm.dockerDaemonContainerName, MinDockerMemoryLimit)
			memoryLimit = MinDockerMemoryLimit
		}

		glog.V(2).Infof("Configure resource-only container %s with memory limit: %d", cm.dockerDaemonContainerName, memoryLimit)

		dockerContainer := &fs.Manager{
			Cgroups: &configs.Cgroup{
				Name:            cm.dockerDaemonContainerName,
				Memory:          memoryLimit,
				MemorySwap:      -1,
				AllowAllDevices: true,
			},
		}
		cont.ensureStateFunc = func(manager *fs.Manager) error {
			return ensureDockerInContainer(cm.cadvisorInterface, -900, dockerContainer)
		}
		systemContainers = append(systemContainers, cont)
	}

	if cm.systemContainerName != "" {
		if cm.systemContainerName == "/" {
			return fmt.Errorf("system container cannot be root (\"/\")")
		}

		rootContainer := &fs.Manager{
			Cgroups: &configs.Cgroup{
				Name: "/",
			},
		}
		manager := createManager(cm.systemContainerName)

		err := ensureSystemContainer(rootContainer, manager)
		if err != nil {
			return err
		}
		systemContainers = append(systemContainers, newSystemContainer(cm.systemContainerName))
	}

	if cm.kubeletContainerName != "" {
		systemContainers = append(systemContainers, newSystemContainer(cm.kubeletContainerName))
	}
	cm.systemContainers = systemContainers
	return nil
}
コード例 #8
0
func (cm *containerManagerImpl) setupNode() error {
	f, err := validateSystemRequirements(cm.mountUtil)
	if err != nil {
		return err
	}
	if !f.cpuHardcapping {
		cm.status.SoftRequirements = fmt.Errorf("CPU hardcapping unsupported")
	}
	b := KernelTunableModify
	if cm.GetNodeConfig().ProtectKernelDefaults {
		b = KernelTunableError
	}
	if err := setupKernelTunables(b); err != nil {
		return err
	}

	// Setup top level qos containers only if CgroupsPerQOS flag is specified as true
	if cm.NodeConfig.CgroupsPerQOS {
		qosContainersInfo, err := InitQOS(cm.NodeConfig.CgroupDriver, cm.NodeConfig.CgroupRoot, cm.subsystems)
		if err != nil {
			return fmt.Errorf("failed to initialise top level QOS containers: %v", err)
		}
		cm.qosContainers = qosContainersInfo
	}

	systemContainers := []*systemContainer{}
	if cm.ContainerRuntime == "docker" {
		dockerVersion := getDockerVersion(cm.cadvisorInterface)
		if cm.EnableCRI {
			// If kubelet uses CRI, dockershim will manage the cgroups and oom
			// score for the docker processes.
			// In the future, NodeSpec should mandate the cgroup that the
			// runtime processes need to be in. For now, we still check the
			// cgroup for docker periodically, so that kubelet can recognize
			// the cgroup for docker and serve stats for the runtime.
			// TODO(#27097): Fix this after NodeSpec is clearly defined.
			cm.periodicTasks = append(cm.periodicTasks, func() {
				glog.V(4).Infof("[ContainerManager]: Adding periodic tasks for docker CRI integration")
				cont, err := getContainerNameForProcess(dockerProcessName, dockerPidFile)
				if err != nil {
					glog.Error(err)
					return
				}
				glog.V(2).Infof("[ContainerManager]: Discovered runtime cgroups name: %s", cont)
				cm.Lock()
				defer cm.Unlock()
				cm.RuntimeCgroupsName = cont
			})
		} else if cm.RuntimeCgroupsName != "" {
			cont := newSystemCgroups(cm.RuntimeCgroupsName)
			var capacity = api.ResourceList{}
			if info, err := cm.cadvisorInterface.MachineInfo(); err == nil {
				capacity = cadvisor.CapacityFromMachineInfo(info)
			}
			memoryLimit := (int64(capacity.Memory().Value() * DockerMemoryLimitThresholdPercent / 100))
			if memoryLimit < MinDockerMemoryLimit {
				glog.Warningf("Memory limit %d for container %s is too small, reset it to %d", memoryLimit, cm.RuntimeCgroupsName, MinDockerMemoryLimit)
				memoryLimit = MinDockerMemoryLimit
			}

			glog.V(2).Infof("Configure resource-only container %s with memory limit: %d", cm.RuntimeCgroupsName, memoryLimit)
			allowAllDevices := true
			dockerContainer := &fs.Manager{
				Cgroups: &configs.Cgroup{
					Parent: "/",
					Name:   cm.RuntimeCgroupsName,
					Resources: &configs.Resources{
						Memory:          memoryLimit,
						MemorySwap:      -1,
						AllowAllDevices: &allowAllDevices,
					},
				},
			}
			cont.ensureStateFunc = func(manager *fs.Manager) error {
				return EnsureDockerInContainer(dockerVersion, qos.DockerOOMScoreAdj, dockerContainer)
			}
			systemContainers = append(systemContainers, cont)
		} else {
			cm.periodicTasks = append(cm.periodicTasks, func() {
				glog.V(10).Infof("Adding docker daemon periodic tasks")
				if err := EnsureDockerInContainer(dockerVersion, qos.DockerOOMScoreAdj, nil); err != nil {
					glog.Error(err)
					return
				}
				cont, err := getContainerNameForProcess(dockerProcessName, dockerPidFile)
				if err != nil {
					glog.Error(err)
					return
				}
				glog.V(2).Infof("Discovered runtime cgroups name: %s", cont)
				cm.Lock()
				defer cm.Unlock()
				cm.RuntimeCgroupsName = cont
			})
		}
	}

	if cm.SystemCgroupsName != "" {
		if cm.SystemCgroupsName == "/" {
			return fmt.Errorf("system container cannot be root (\"/\")")
		}
		cont := newSystemCgroups(cm.SystemCgroupsName)
		cont.ensureStateFunc = func(manager *fs.Manager) error {
			return ensureSystemCgroups("/", manager)
		}
		systemContainers = append(systemContainers, cont)
	}

	if cm.KubeletCgroupsName != "" {
		cont := newSystemCgroups(cm.KubeletCgroupsName)
		allowAllDevices := true
		manager := fs.Manager{
			Cgroups: &configs.Cgroup{
				Parent: "/",
				Name:   cm.KubeletCgroupsName,
				Resources: &configs.Resources{
					AllowAllDevices: &allowAllDevices,
				},
			},
		}
		cont.ensureStateFunc = func(_ *fs.Manager) error {
			return ensureProcessInContainerWithOOMScore(os.Getpid(), qos.KubeletOOMScoreAdj, &manager)
		}
		systemContainers = append(systemContainers, cont)
	} else {
		cm.periodicTasks = append(cm.periodicTasks, func() {
			if err := ensureProcessInContainerWithOOMScore(os.Getpid(), qos.KubeletOOMScoreAdj, nil); err != nil {
				glog.Error(err)
				return
			}
			cont, err := getContainer(os.Getpid())
			if err != nil {
				glog.Errorf("failed to find cgroups of kubelet - %v", err)
				return
			}
			cm.Lock()
			defer cm.Unlock()

			cm.KubeletCgroupsName = cont
		})
	}

	cm.systemContainers = systemContainers
	return nil
}
コード例 #9
0
// TODO(vmarmol): Add limits to the system containers.
// Takes the absolute name of the specified containers.
// Empty container name disables use of the specified container.
func newContainerManager(cadvisorInterface cadvisor.Interface, dockerDaemonContainerName, systemContainerName, kubeletContainerName string) (containerManager, error) {
	systemContainers := []*systemContainer{}

	if dockerDaemonContainerName != "" {
		cont := newSystemContainer(dockerDaemonContainerName)

		info, err := cadvisorInterface.MachineInfo()
		var capacity = api.ResourceList{}
		if err != nil {
		} else {
			capacity = CapacityFromMachineInfo(info)
		}
		memoryLimit := (int64(capacity.Memory().Value() * DockerMemoryLimitThresholdPercent / 100))
		if memoryLimit < MinDockerMemoryLimit {
			glog.Warningf("Memory limit %d for container %s is too small, reset it to %d", memoryLimit, dockerDaemonContainerName, MinDockerMemoryLimit)
			memoryLimit = MinDockerMemoryLimit
		}

		glog.V(2).Infof("Configure resource-only container %s with memory limit: %d", dockerDaemonContainerName, memoryLimit)

		dockerContainer := &fs.Manager{
			Cgroups: &configs.Cgroup{
				Name:            dockerDaemonContainerName,
				Memory:          memoryLimit,
				MemorySwap:      -1,
				AllowAllDevices: true,
			},
		}
		cont.ensureStateFunc = func(manager *fs.Manager) error {
			return ensureDockerInContainer(cadvisorInterface, -900, dockerContainer)
		}
		systemContainers = append(systemContainers, cont)
	}

	if systemContainerName != "" {
		if systemContainerName == "/" {
			return nil, fmt.Errorf("system container cannot be root (\"/\")")
		}

		rootContainer := &fs.Manager{
			Cgroups: &configs.Cgroup{
				Name: "/",
			},
		}
		manager := createManager(systemContainerName)

		err := ensureSystemContainer(rootContainer, manager)
		if err != nil {
			return nil, err
		}
		systemContainers = append(systemContainers, newSystemContainer(systemContainerName))
	}

	if kubeletContainerName != "" {
		systemContainers = append(systemContainers, newSystemContainer(kubeletContainerName))
	}

	// TODO(vmarmol): Add Kube-proxy container.

	return &containerManagerImpl{
		systemContainers: systemContainers,
	}, nil
}