// GetNonzeroRequests returns the default resource request if none is found or what is provided on the request // TODO: Consider setting default as a fixed fraction of machine capacity (take "capacity v1.ResourceList" // as an additional argument here) rather than using constants func GetNonzeroRequests(requests *v1.ResourceList) (int64, int64) { var outMilliCPU, outMemory int64 // Override if un-set, but not if explicitly set to zero if _, found := (*requests)[v1.ResourceCPU]; !found { outMilliCPU = DefaultMilliCpuRequest } else { outMilliCPU = requests.Cpu().MilliValue() } // Override if un-set, but not if explicitly set to zero if _, found := (*requests)[v1.ResourceMemory]; !found { outMemory = DefaultMemoryRequest } else { outMemory = requests.Memory().Value() } return outMilliCPU, outMemory }
func (cm *containerManagerImpl) setupNode() error { f, err := validateSystemRequirements(cm.mountUtil) if err != nil { return err } if !f.cpuHardcapping { cm.status.SoftRequirements = fmt.Errorf("CPU hardcapping unsupported") } b := KernelTunableModify if cm.GetNodeConfig().ProtectKernelDefaults { b = KernelTunableError } if err := setupKernelTunables(b); err != nil { return err } // Setup top level qos containers only if CgroupsPerQOS flag is specified as true if cm.NodeConfig.CgroupsPerQOS { qosContainersInfo, err := InitQOS(cm.NodeConfig.CgroupDriver, cm.NodeConfig.CgroupRoot, cm.subsystems) if err != nil { return fmt.Errorf("failed to initialise top level QOS containers: %v", err) } cm.qosContainers = qosContainersInfo } systemContainers := []*systemContainer{} if cm.ContainerRuntime == "docker" { dockerVersion := getDockerVersion(cm.cadvisorInterface) if cm.EnableCRI { // If kubelet uses CRI, dockershim will manage the cgroups and oom // score for the docker processes. // In the future, NodeSpec should mandate the cgroup that the // runtime processes need to be in. For now, we still check the // cgroup for docker periodically, so that kubelet can recognize // the cgroup for docker and serve stats for the runtime. // TODO(#27097): Fix this after NodeSpec is clearly defined. cm.periodicTasks = append(cm.periodicTasks, func() { glog.V(4).Infof("[ContainerManager]: Adding periodic tasks for docker CRI integration") cont, err := getContainerNameForProcess(dockerProcessName, dockerPidFile) if err != nil { glog.Error(err) return } glog.V(2).Infof("[ContainerManager]: Discovered runtime cgroups name: %s", cont) cm.Lock() defer cm.Unlock() cm.RuntimeCgroupsName = cont }) } else if cm.RuntimeCgroupsName != "" { cont := newSystemCgroups(cm.RuntimeCgroupsName) var capacity = v1.ResourceList{} if info, err := cm.cadvisorInterface.MachineInfo(); err == nil { capacity = cadvisor.CapacityFromMachineInfo(info) } memoryLimit := (int64(capacity.Memory().Value() * DockerMemoryLimitThresholdPercent / 100)) if memoryLimit < MinDockerMemoryLimit { glog.Warningf("Memory limit %d for container %s is too small, reset it to %d", memoryLimit, cm.RuntimeCgroupsName, MinDockerMemoryLimit) memoryLimit = MinDockerMemoryLimit } glog.V(2).Infof("Configure resource-only container %s with memory limit: %d", cm.RuntimeCgroupsName, memoryLimit) allowAllDevices := true dockerContainer := &fs.Manager{ Cgroups: &configs.Cgroup{ Parent: "/", Name: cm.RuntimeCgroupsName, Resources: &configs.Resources{ Memory: memoryLimit, MemorySwap: -1, AllowAllDevices: &allowAllDevices, }, }, } cont.ensureStateFunc = func(manager *fs.Manager) error { return EnsureDockerInContainer(dockerVersion, qos.DockerOOMScoreAdj, dockerContainer) } systemContainers = append(systemContainers, cont) } else { cm.periodicTasks = append(cm.periodicTasks, func() { glog.V(10).Infof("Adding docker daemon periodic tasks") if err := EnsureDockerInContainer(dockerVersion, qos.DockerOOMScoreAdj, nil); err != nil { glog.Error(err) return } cont, err := getContainerNameForProcess(dockerProcessName, dockerPidFile) if err != nil { glog.Error(err) return } glog.V(2).Infof("Discovered runtime cgroups name: %s", cont) cm.Lock() defer cm.Unlock() cm.RuntimeCgroupsName = cont }) } } if cm.SystemCgroupsName != "" { if cm.SystemCgroupsName == "/" { return fmt.Errorf("system container cannot be root (\"/\")") } cont := newSystemCgroups(cm.SystemCgroupsName) cont.ensureStateFunc = func(manager *fs.Manager) error { return ensureSystemCgroups("/", manager) } systemContainers = append(systemContainers, cont) } if cm.KubeletCgroupsName != "" { cont := newSystemCgroups(cm.KubeletCgroupsName) allowAllDevices := true manager := fs.Manager{ Cgroups: &configs.Cgroup{ Parent: "/", Name: cm.KubeletCgroupsName, Resources: &configs.Resources{ AllowAllDevices: &allowAllDevices, }, }, } cont.ensureStateFunc = func(_ *fs.Manager) error { return ensureProcessInContainerWithOOMScore(os.Getpid(), qos.KubeletOOMScoreAdj, &manager) } systemContainers = append(systemContainers, cont) } else { cm.periodicTasks = append(cm.periodicTasks, func() { if err := ensureProcessInContainerWithOOMScore(os.Getpid(), qos.KubeletOOMScoreAdj, nil); err != nil { glog.Error(err) return } cont, err := getContainer(os.Getpid()) if err != nil { glog.Errorf("failed to find cgroups of kubelet - %v", err) return } cm.Lock() defer cm.Unlock() cm.KubeletCgroupsName = cont }) } cm.systemContainers = systemContainers return nil }