func (kl *Kubelet) setNodeStatusMachineInfo(node *api.Node) { // TODO: Post NotReady if we cannot get MachineInfo from cAdvisor. This needs to start // cAdvisor locally, e.g. for test-cmd.sh, and in integration test. info, err := kl.GetCachedMachineInfo() if err != nil { // TODO(roberthbailey): This is required for test-cmd.sh to pass. // See if the test should be updated instead. node.Status.Capacity = api.ResourceList{ api.ResourceCPU: *resource.NewMilliQuantity(0, resource.DecimalSI), api.ResourceMemory: resource.MustParse("0Gi"), api.ResourcePods: *resource.NewQuantity(int64(kl.maxPods), resource.DecimalSI), api.ResourceNvidiaGPU: *resource.NewQuantity(int64(kl.nvidiaGPUs), resource.DecimalSI), } glog.Errorf("Error getting machine info: %v", err) } else { node.Status.NodeInfo.MachineID = info.MachineID node.Status.NodeInfo.SystemUUID = info.SystemUUID node.Status.Capacity = cadvisor.CapacityFromMachineInfo(info) if kl.podsPerCore > 0 { node.Status.Capacity[api.ResourcePods] = *resource.NewQuantity( int64(math.Min(float64(info.NumCores*kl.podsPerCore), float64(kl.maxPods))), resource.DecimalSI) } else { node.Status.Capacity[api.ResourcePods] = *resource.NewQuantity( int64(kl.maxPods), resource.DecimalSI) } node.Status.Capacity[api.ResourceNvidiaGPU] = *resource.NewQuantity( int64(kl.nvidiaGPUs), resource.DecimalSI) if node.Status.NodeInfo.BootID != "" && node.Status.NodeInfo.BootID != info.BootID { // TODO: This requires a transaction, either both node status is updated // and event is recorded or neither should happen, see issue #6055. kl.recorder.Eventf(kl.nodeRef, api.EventTypeWarning, events.NodeRebooted, "Node %s has been rebooted, boot id: %s", kl.nodeName, info.BootID) } node.Status.NodeInfo.BootID = info.BootID } // Set Allocatable. node.Status.Allocatable = make(api.ResourceList) for k, v := range node.Status.Capacity { value := *(v.Copy()) if kl.reservation.System != nil { value.Sub(kl.reservation.System[k]) } if kl.reservation.Kubernetes != nil { value.Sub(kl.reservation.Kubernetes[k]) } if value.Sign() < 0 { // Negative Allocatable resources don't make sense. value.Set(0) } node.Status.Allocatable[k] = value } }
func (cm *containerManagerImpl) setupNode() error { f, err := validateSystemRequirements(cm.mountUtil) if err != nil { return err } if !f.cpuHardcapping { cm.status.SoftRequirements = fmt.Errorf("CPU hardcapping unsupported") } // TODO: plumb kernel tunable options into container manager, right now, we modify by default if err := setupKernelTunables(KernelTunableModify); err != nil { return err } systemContainers := []*systemContainer{} if cm.ContainerRuntime == "docker" { if cm.RuntimeCgroupsName != "" { cont := newSystemCgroups(cm.RuntimeCgroupsName) info, err := cm.cadvisorInterface.MachineInfo() var capacity = api.ResourceList{} if err != nil { } else { capacity = cadvisor.CapacityFromMachineInfo(info) } memoryLimit := (int64(capacity.Memory().Value() * DockerMemoryLimitThresholdPercent / 100)) if memoryLimit < MinDockerMemoryLimit { glog.Warningf("Memory limit %d for container %s is too small, reset it to %d", memoryLimit, cm.RuntimeCgroupsName, MinDockerMemoryLimit) memoryLimit = MinDockerMemoryLimit } glog.V(2).Infof("Configure resource-only container %s with memory limit: %d", cm.RuntimeCgroupsName, memoryLimit) dockerContainer := &fs.Manager{ Cgroups: &configs.Cgroup{ Parent: "/", Name: cm.RuntimeCgroupsName, Resources: &configs.Resources{ Memory: memoryLimit, MemorySwap: -1, AllowAllDevices: true, }, }, } dockerVersion := getDockerVersion(cm.cadvisorInterface) cont.ensureStateFunc = func(manager *fs.Manager) error { return ensureDockerInContainer(dockerVersion, -900, dockerContainer) } systemContainers = append(systemContainers, cont) } else { cm.periodicTasks = append(cm.periodicTasks, func() { cont, err := getContainerNameForProcess(dockerProcessName, dockerPidFile) if err != nil { glog.Error(err) return } glog.V(2).Infof("Discovered runtime cgroups name: %s", cont) cm.Lock() defer cm.Unlock() cm.RuntimeCgroupsName = cont }) } } if cm.SystemCgroupsName != "" { if cm.SystemCgroupsName == "/" { return fmt.Errorf("system container cannot be root (\"/\")") } cont := newSystemCgroups(cm.SystemCgroupsName) rootContainer := &fs.Manager{ Cgroups: &configs.Cgroup{ Parent: "/", Name: "/", }, } cont.ensureStateFunc = func(manager *fs.Manager) error { return ensureSystemCgroups(rootContainer, manager) } systemContainers = append(systemContainers, cont) } if cm.KubeletCgroupsName != "" { cont := newSystemCgroups(cm.KubeletCgroupsName) manager := fs.Manager{ Cgroups: &configs.Cgroup{ Parent: "/", Name: cm.KubeletCgroupsName, Resources: &configs.Resources{ AllowAllDevices: true, }, }, } cont.ensureStateFunc = func(_ *fs.Manager) error { return manager.Apply(os.Getpid()) } systemContainers = append(systemContainers, cont) } else { cm.periodicTasks = append(cm.periodicTasks, func() { cont, err := getContainer(os.Getpid()) if err != nil { glog.Errorf("failed to find cgroups of kubelet - %v", err) return } cm.Lock() defer cm.Unlock() cm.KubeletCgroupsName = cont }) } cm.systemContainers = systemContainers return nil }
func (cm *containerManagerImpl) setupNode() error { f, err := validateSystemRequirements(cm.mountUtil) if err != nil { return err } if !f.cpuHardcapping { cm.status.SoftRequirements = fmt.Errorf("CPU hardcapping unsupported") } b := KernelTunableModify if cm.GetNodeConfig().ProtectKernelDefaults { b = KernelTunableError } if err := setupKernelTunables(b); err != nil { return err } // Setup top level qos containers only if CgroupsPerQOS flag is specified as true if cm.NodeConfig.CgroupsPerQOS { qosContainersInfo, err := InitQOS(cm.NodeConfig.CgroupDriver, cm.NodeConfig.CgroupRoot, cm.subsystems) if err != nil { return fmt.Errorf("failed to initialise top level QOS containers: %v", err) } cm.qosContainers = qosContainersInfo } systemContainers := []*systemContainer{} if cm.ContainerRuntime == "docker" { dockerVersion := getDockerVersion(cm.cadvisorInterface) if cm.EnableCRI { // If kubelet uses CRI, dockershim will manage the cgroups and oom // score for the docker processes. // In the future, NodeSpec should mandate the cgroup that the // runtime processes need to be in. For now, we still check the // cgroup for docker periodically, so that kubelet can recognize // the cgroup for docker and serve stats for the runtime. // TODO(#27097): Fix this after NodeSpec is clearly defined. cm.periodicTasks = append(cm.periodicTasks, func() { glog.V(4).Infof("[ContainerManager]: Adding periodic tasks for docker CRI integration") cont, err := getContainerNameForProcess(dockerProcessName, dockerPidFile) if err != nil { glog.Error(err) return } glog.V(2).Infof("[ContainerManager]: Discovered runtime cgroups name: %s", cont) cm.Lock() defer cm.Unlock() cm.RuntimeCgroupsName = cont }) } else if cm.RuntimeCgroupsName != "" { cont := newSystemCgroups(cm.RuntimeCgroupsName) var capacity = api.ResourceList{} if info, err := cm.cadvisorInterface.MachineInfo(); err == nil { capacity = cadvisor.CapacityFromMachineInfo(info) } memoryLimit := (int64(capacity.Memory().Value() * DockerMemoryLimitThresholdPercent / 100)) if memoryLimit < MinDockerMemoryLimit { glog.Warningf("Memory limit %d for container %s is too small, reset it to %d", memoryLimit, cm.RuntimeCgroupsName, MinDockerMemoryLimit) memoryLimit = MinDockerMemoryLimit } glog.V(2).Infof("Configure resource-only container %s with memory limit: %d", cm.RuntimeCgroupsName, memoryLimit) allowAllDevices := true dockerContainer := &fs.Manager{ Cgroups: &configs.Cgroup{ Parent: "/", Name: cm.RuntimeCgroupsName, Resources: &configs.Resources{ Memory: memoryLimit, MemorySwap: -1, AllowAllDevices: &allowAllDevices, }, }, } cont.ensureStateFunc = func(manager *fs.Manager) error { return EnsureDockerInContainer(dockerVersion, qos.DockerOOMScoreAdj, dockerContainer) } systemContainers = append(systemContainers, cont) } else { cm.periodicTasks = append(cm.periodicTasks, func() { glog.V(10).Infof("Adding docker daemon periodic tasks") if err := EnsureDockerInContainer(dockerVersion, qos.DockerOOMScoreAdj, nil); err != nil { glog.Error(err) return } cont, err := getContainerNameForProcess(dockerProcessName, dockerPidFile) if err != nil { glog.Error(err) return } glog.V(2).Infof("Discovered runtime cgroups name: %s", cont) cm.Lock() defer cm.Unlock() cm.RuntimeCgroupsName = cont }) } } if cm.SystemCgroupsName != "" { if cm.SystemCgroupsName == "/" { return fmt.Errorf("system container cannot be root (\"/\")") } cont := newSystemCgroups(cm.SystemCgroupsName) cont.ensureStateFunc = func(manager *fs.Manager) error { return ensureSystemCgroups("/", manager) } systemContainers = append(systemContainers, cont) } if cm.KubeletCgroupsName != "" { cont := newSystemCgroups(cm.KubeletCgroupsName) allowAllDevices := true manager := fs.Manager{ Cgroups: &configs.Cgroup{ Parent: "/", Name: cm.KubeletCgroupsName, Resources: &configs.Resources{ AllowAllDevices: &allowAllDevices, }, }, } cont.ensureStateFunc = func(_ *fs.Manager) error { return ensureProcessInContainerWithOOMScore(os.Getpid(), qos.KubeletOOMScoreAdj, &manager) } systemContainers = append(systemContainers, cont) } else { cm.periodicTasks = append(cm.periodicTasks, func() { if err := ensureProcessInContainerWithOOMScore(os.Getpid(), qos.KubeletOOMScoreAdj, nil); err != nil { glog.Error(err) return } cont, err := getContainer(os.Getpid()) if err != nil { glog.Errorf("failed to find cgroups of kubelet - %v", err) return } cm.Lock() defer cm.Unlock() cm.KubeletCgroupsName = cont }) } cm.systemContainers = systemContainers return nil }
func (cm *containerManagerImpl) setupNode() error { if err := validateSystemRequirements(cm.mountUtil); err != nil { return err } // TODO: plumb kernel tunable options into container manager, right now, we modify by default if err := setupKernelTunables(KernelTunableModify); err != nil { return err } systemContainers := []*systemContainer{} if cm.DockerDaemonContainerName != "" { cont := newSystemContainer(cm.DockerDaemonContainerName) info, err := cm.cadvisorInterface.MachineInfo() var capacity = api.ResourceList{} if err != nil { } else { capacity = cadvisor.CapacityFromMachineInfo(info) } memoryLimit := (int64(capacity.Memory().Value() * DockerMemoryLimitThresholdPercent / 100)) if memoryLimit < MinDockerMemoryLimit { glog.Warningf("Memory limit %d for container %s is too small, reset it to %d", memoryLimit, cm.DockerDaemonContainerName, MinDockerMemoryLimit) memoryLimit = MinDockerMemoryLimit } glog.V(2).Infof("Configure resource-only container %s with memory limit: %d", cm.DockerDaemonContainerName, memoryLimit) dockerContainer := &fs.Manager{ Cgroups: &configs.Cgroup{ Name: cm.DockerDaemonContainerName, Memory: memoryLimit, MemorySwap: -1, AllowAllDevices: true, }, } cont.ensureStateFunc = func(manager *fs.Manager) error { return ensureDockerInContainer(cm.cadvisorInterface, -900, dockerContainer) } systemContainers = append(systemContainers, cont) } if cm.SystemContainerName != "" { if cm.SystemContainerName == "/" { return fmt.Errorf("system container cannot be root (\"/\")") } rootContainer := &fs.Manager{ Cgroups: &configs.Cgroup{ Name: "/", }, } manager := createManager(cm.SystemContainerName) err := ensureSystemContainer(rootContainer, manager) if err != nil { return err } systemContainers = append(systemContainers, newSystemContainer(cm.SystemContainerName)) } if cm.KubeletContainerName != "" { systemContainers = append(systemContainers, newSystemContainer(cm.KubeletContainerName)) } cm.systemContainers = systemContainers return nil }