// reconcileCMADAnnotationWithExistingNode reconciles the controller-managed // attach-detach annotation on a new node and the existing node, returning // whether the existing node must be updated. func (kl *Kubelet) reconcileCMADAnnotationWithExistingNode(node, existingNode *v1.Node) bool { var ( existingCMAAnnotation = existingNode.Annotations[volumehelper.ControllerManagedAttachAnnotation] newCMAAnnotation, newSet = node.Annotations[volumehelper.ControllerManagedAttachAnnotation] ) if newCMAAnnotation == existingCMAAnnotation { return false } // If the just-constructed node and the existing node do // not have the same value, update the existing node with // the correct value of the annotation. if !newSet { glog.Info("Controller attach-detach setting changed to false; updating existing Node") delete(existingNode.Annotations, volumehelper.ControllerManagedAttachAnnotation) } else { glog.Info("Controller attach-detach setting changed to true; updating existing Node") if existingNode.Annotations == nil { existingNode.Annotations = make(map[string]string) } existingNode.Annotations[volumehelper.ControllerManagedAttachAnnotation] = newCMAAnnotation } return true }
// PatchNodeStatus patches node status. func PatchNodeStatus(c clientset.Interface, nodeName types.NodeName, oldNode *v1.Node, newNode *v1.Node) (*v1.Node, error) { oldData, err := json.Marshal(oldNode) if err != nil { return nil, fmt.Errorf("failed to marshal old node %#v for node %q: %v", oldNode, nodeName, err) } // Reset spec to make sure only patch for Status or ObjectMeta is generated. // Note that we don't reset ObjectMeta here, because: // 1. This aligns with Nodes().UpdateStatus(). // 2. Some component does use this to update node annotations. newNode.Spec = oldNode.Spec newData, err := json.Marshal(newNode) if err != nil { return nil, fmt.Errorf("failed to marshal new node %#v for node %q: %v", newNode, nodeName, err) } patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{}) if err != nil { return nil, fmt.Errorf("failed to create patch for node %q: %v", nodeName, err) } updatedNode, err := c.Core().Nodes().Patch(string(nodeName), types.StrategicMergePatchType, patchBytes, "status") if err != nil { return nil, fmt.Errorf("failed to patch status %q for node %q: %v", patchBytes, nodeName, err) } return updatedNode, nil }
func addToBeDeletedTaint(node *apiv1.Node) (bool, error) { taints, err := apiv1.GetTaintsFromNodeAnnotations(node.Annotations) if err != nil { glog.Warningf("Error while getting Taints for node %v: %v", node.Name, err) return false, err } for _, taint := range taints { if taint.Key == ToBeDeletedTaint { glog.Infof("ToBeDeletedTaint already present on on node %v", taint, node.Name) return false, nil } } taints = append(taints, apiv1.Taint{ Key: ToBeDeletedTaint, Value: time.Now().String(), Effect: apiv1.TaintEffectNoSchedule, }) taintsJson, err := json.Marshal(taints) if err != nil { glog.Warningf("Error while adding taints on node %v: %v", node.Name, err) return false, err } if node.Annotations == nil { node.Annotations = make(map[string]string) } node.Annotations[apiv1.TaintsAnnotationKey] = string(taintsJson) return true, nil }
func TestBuildSummary(t *testing.T) { node := k8sv1.Node{} node.Name = "FooNode" nodeConfig := cm.NodeConfig{ RuntimeCgroupsName: "/docker-daemon", SystemCgroupsName: "/system", KubeletCgroupsName: "/kubelet", } const ( namespace0 = "test0" namespace2 = "test2" ) const ( seedRoot = 0 seedRuntime = 100 seedKubelet = 200 seedMisc = 300 seedPod0Infra = 1000 seedPod0Container0 = 2000 seedPod0Container1 = 2001 seedPod1Infra = 3000 seedPod1Container = 4000 seedPod2Infra = 5000 seedPod2Container = 6000 ) const ( pName0 = "pod0" pName1 = "pod1" pName2 = "pod0" // ensure pName2 conflicts with pName0, but is in a different namespace ) const ( cName00 = "c0" cName01 = "c1" cName10 = "c0" // ensure cName10 conflicts with cName02, but is in a different pod cName20 = "c1" // ensure cName20 conflicts with cName01, but is in a different pod + namespace ) const ( rootfsCapacity = uint64(10000000) rootfsAvailable = uint64(5000000) rootfsInodesFree = uint64(1000) rootfsInodes = uint64(2000) imagefsCapacity = uint64(20000000) imagefsAvailable = uint64(8000000) imagefsInodesFree = uint64(2000) imagefsInodes = uint64(4000) ) prf0 := kubestats.PodReference{Name: pName0, Namespace: namespace0, UID: "UID" + pName0} prf1 := kubestats.PodReference{Name: pName1, Namespace: namespace0, UID: "UID" + pName1} prf2 := kubestats.PodReference{Name: pName2, Namespace: namespace2, UID: "UID" + pName2} infos := map[string]v2.ContainerInfo{ "/": summaryTestContainerInfo(seedRoot, "", "", ""), "/docker-daemon": summaryTestContainerInfo(seedRuntime, "", "", ""), "/kubelet": summaryTestContainerInfo(seedKubelet, "", "", ""), "/system": summaryTestContainerInfo(seedMisc, "", "", ""), // Pod0 - Namespace0 "/pod0-i": summaryTestContainerInfo(seedPod0Infra, pName0, namespace0, leaky.PodInfraContainerName), "/pod0-c0": summaryTestContainerInfo(seedPod0Container0, pName0, namespace0, cName00), "/pod0-c1": summaryTestContainerInfo(seedPod0Container1, pName0, namespace0, cName01), // Pod1 - Namespace0 "/pod1-i": summaryTestContainerInfo(seedPod1Infra, pName1, namespace0, leaky.PodInfraContainerName), "/pod1-c0": summaryTestContainerInfo(seedPod1Container, pName1, namespace0, cName10), // Pod2 - Namespace2 "/pod2-i": summaryTestContainerInfo(seedPod2Infra, pName2, namespace2, leaky.PodInfraContainerName), "/pod2-c0": summaryTestContainerInfo(seedPod2Container, pName2, namespace2, cName20), } freeRootfsInodes := rootfsInodesFree totalRootfsInodes := rootfsInodes rootfs := v2.FsInfo{ Capacity: rootfsCapacity, Available: rootfsAvailable, InodesFree: &freeRootfsInodes, Inodes: &totalRootfsInodes, } freeImagefsInodes := imagefsInodesFree totalImagefsInodes := imagefsInodes imagefs := v2.FsInfo{ Capacity: imagefsCapacity, Available: imagefsAvailable, InodesFree: &freeImagefsInodes, Inodes: &totalImagefsInodes, } // memory limit overrides for each container (used to test available bytes if a memory limit is known) memoryLimitOverrides := map[string]uint64{ "/": uint64(1 << 30), "/pod2-c0": uint64(1 << 15), } for name, memoryLimitOverride := range memoryLimitOverrides { info, found := infos[name] if !found { t.Errorf("No container defined with name %v", name) } info.Spec.Memory.Limit = memoryLimitOverride infos[name] = info } sb := &summaryBuilder{ newFsResourceAnalyzer(&MockStatsProvider{}, time.Minute*5), &node, nodeConfig, rootfs, imagefs, container.ImageStats{}, infos} summary, err := sb.build() assert.NoError(t, err) nodeStats := summary.Node assert.Equal(t, "FooNode", nodeStats.NodeName) assert.EqualValues(t, testTime(creationTime, seedRoot).Unix(), nodeStats.StartTime.Time.Unix()) checkCPUStats(t, "Node", seedRoot, nodeStats.CPU) checkMemoryStats(t, "Node", seedRoot, infos["/"], nodeStats.Memory) checkNetworkStats(t, "Node", seedRoot, nodeStats.Network) systemSeeds := map[string]int{ kubestats.SystemContainerRuntime: seedRuntime, kubestats.SystemContainerKubelet: seedKubelet, kubestats.SystemContainerMisc: seedMisc, } systemContainerToNodeCgroup := map[string]string{ kubestats.SystemContainerRuntime: nodeConfig.RuntimeCgroupsName, kubestats.SystemContainerKubelet: nodeConfig.KubeletCgroupsName, kubestats.SystemContainerMisc: nodeConfig.SystemCgroupsName, } for _, sys := range nodeStats.SystemContainers { name := sys.Name info := infos[systemContainerToNodeCgroup[name]] seed, found := systemSeeds[name] if !found { t.Errorf("Unknown SystemContainer: %q", name) } assert.EqualValues(t, testTime(creationTime, seed).Unix(), sys.StartTime.Time.Unix(), name+".StartTime") checkCPUStats(t, name, seed, sys.CPU) checkMemoryStats(t, name, seed, info, sys.Memory) assert.Nil(t, sys.Logs, name+".Logs") assert.Nil(t, sys.Rootfs, name+".Rootfs") } assert.Equal(t, 3, len(summary.Pods)) indexPods := make(map[kubestats.PodReference]kubestats.PodStats, len(summary.Pods)) for _, pod := range summary.Pods { indexPods[pod.PodRef] = pod } // Validate Pod0 Results ps, found := indexPods[prf0] assert.True(t, found) assert.Len(t, ps.Containers, 2) indexCon := make(map[string]kubestats.ContainerStats, len(ps.Containers)) for _, con := range ps.Containers { indexCon[con.Name] = con } con := indexCon[cName00] assert.EqualValues(t, testTime(creationTime, seedPod0Container0).Unix(), con.StartTime.Time.Unix()) checkCPUStats(t, "Pod0Container0", seedPod0Container0, con.CPU) checkMemoryStats(t, "Pod0Conainer0", seedPod0Container0, infos["/pod0-c0"], con.Memory) con = indexCon[cName01] assert.EqualValues(t, testTime(creationTime, seedPod0Container1).Unix(), con.StartTime.Time.Unix()) checkCPUStats(t, "Pod0Container1", seedPod0Container1, con.CPU) checkMemoryStats(t, "Pod0Container1", seedPod0Container1, infos["/pod0-c1"], con.Memory) assert.EqualValues(t, testTime(creationTime, seedPod0Infra).Unix(), ps.StartTime.Time.Unix()) checkNetworkStats(t, "Pod0", seedPod0Infra, ps.Network) // Validate Pod1 Results ps, found = indexPods[prf1] assert.True(t, found) assert.Len(t, ps.Containers, 1) con = ps.Containers[0] assert.Equal(t, cName10, con.Name) checkCPUStats(t, "Pod1Container0", seedPod1Container, con.CPU) checkMemoryStats(t, "Pod1Container0", seedPod1Container, infos["/pod1-c0"], con.Memory) checkNetworkStats(t, "Pod1", seedPod1Infra, ps.Network) // Validate Pod2 Results ps, found = indexPods[prf2] assert.True(t, found) assert.Len(t, ps.Containers, 1) con = ps.Containers[0] assert.Equal(t, cName20, con.Name) checkCPUStats(t, "Pod2Container0", seedPod2Container, con.CPU) checkMemoryStats(t, "Pod2Container0", seedPod2Container, infos["/pod2-c0"], con.Memory) checkNetworkStats(t, "Pod2", seedPod2Infra, ps.Network) }