// verifyResource verifies whether resource usage satisfies the limit. func verifyResource(f *framework.Framework, cpuLimits framework.ContainersCPUSummary, memLimits framework.ResourceUsagePerContainer, rc *ResourceCollector) { nodeName := framework.TestContext.NodeName // Obtain memory PerfData usagePerContainer, err := rc.GetLatest() Expect(err).NotTo(HaveOccurred()) framework.Logf("%s", formatResourceUsageStats(usagePerContainer)) usagePerNode := make(framework.ResourceUsagePerNode) usagePerNode[nodeName] = usagePerContainer // Obtain cpu PerfData cpuSummary := rc.GetCPUSummary() framework.Logf("%s", formatCPUSummary(cpuSummary)) cpuSummaryPerNode := make(framework.NodesCPUSummary) cpuSummaryPerNode[nodeName] = cpuSummary // Log resource usage framework.PrintPerfData(framework.ResourceUsageToPerfData(usagePerNode)) framework.PrintPerfData(framework.CPUUsageToPerfData(cpuSummaryPerNode)) // Verify resource usage verifyMemoryLimits(f.Client, memLimits, usagePerNode) verifyCPULimits(cpuLimits, cpuSummaryPerNode) }
func verifyResource(f *framework.Framework, testArg DensityTest, rc *ResourceCollector) { nodeName := framework.TestContext.NodeName // verify and log memory usagePerContainer, err := rc.GetLatest() Expect(err).NotTo(HaveOccurred()) framework.Logf("%s", formatResourceUsageStats(usagePerContainer)) usagePerNode := make(framework.ResourceUsagePerNode) usagePerNode[nodeName] = usagePerContainer memPerfData := framework.ResourceUsageToPerfData(usagePerNode) framework.PrintPerfData(memPerfData) verifyMemoryLimits(f.Client, testArg.memLimits, usagePerNode) // verify and log cpu cpuSummary := rc.GetCPUSummary() framework.Logf("%s", formatCPUSummary(cpuSummary)) cpuSummaryPerNode := make(framework.NodesCPUSummary) cpuSummaryPerNode[nodeName] = cpuSummary cpuPerfData := framework.CPUUsageToPerfData(cpuSummaryPerNode) framework.PrintPerfData(cpuPerfData) verifyCPULimits(testArg.cpuLimits, cpuSummaryPerNode) }
func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames sets.String, rm *framework.ResourceMonitor, expectedCPU map[string]map[float64]float64, expectedMemory framework.ResourceUsagePerContainer) { numNodes := nodeNames.Len() totalPods := podsPerNode * numNodes By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods)) rcName := fmt.Sprintf("resource%d-%s", totalPods, string(uuid.NewUUID())) // TODO: Use a more realistic workload Expect(framework.RunRC(testutils.RCConfig{ Client: f.ClientSet, InternalClient: f.InternalClientset, Name: rcName, Namespace: f.Namespace.Name, Image: framework.GetPauseImageName(f.ClientSet), Replicas: totalPods, })).NotTo(HaveOccurred()) // Log once and flush the stats. rm.LogLatest() rm.Reset() By("Start monitoring resource usage") // Periodically dump the cpu summary until the deadline is met. // Note that without calling framework.ResourceMonitor.Reset(), the stats // would occupy increasingly more memory. This should be fine // for the current test duration, but we should reclaim the // entries if we plan to monitor longer (e.g., 8 hours). deadline := time.Now().Add(monitoringTime) for time.Now().Before(deadline) { timeLeft := deadline.Sub(time.Now()) framework.Logf("Still running...%v left", timeLeft) if timeLeft < reportingPeriod { time.Sleep(timeLeft) } else { time.Sleep(reportingPeriod) } logPodsOnNodes(f.ClientSet, nodeNames.List()) } By("Reporting overall resource usage") logPodsOnNodes(f.ClientSet, nodeNames.List()) usageSummary, err := rm.GetLatest() Expect(err).NotTo(HaveOccurred()) // TODO(random-liu): Remove the original log when we migrate to new perfdash framework.Logf("%s", rm.FormatResourceUsage(usageSummary)) // Log perf result framework.PrintPerfData(framework.ResourceUsageToPerfData(rm.GetMasterNodeLatest(usageSummary))) verifyMemoryLimits(f.ClientSet, expectedMemory, usageSummary) cpuSummary := rm.GetCPUSummary() framework.Logf("%s", rm.FormatCPUSummary(cpuSummary)) // Log perf result framework.PrintPerfData(framework.CPUUsageToPerfData(rm.GetMasterNodeCPUSummary(cpuSummary))) verifyCPULimits(expectedCPU, cpuSummary) By("Deleting the RC") framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, rcName) }
framework.Logf("%s", formatResourceUsageStats(usagePerContainer)) // Log perf result usagePerNode := make(framework.ResourceUsagePerNode) usagePerNode[nodeName] = usagePerContainer framework.PrintPerfData(framework.ResourceUsageToPerfData(usagePerNode)) verifyMemoryLimits(f.Client, expectedMemory, usagePerNode) cpuSummary := rc.GetCPUSummary() framework.Logf("%s", formatCPUSummary(cpuSummary)) // Log perf result cpuSummaryPerNode := make(framework.NodesCPUSummary) cpuSummaryPerNode[nodeName] = cpuSummary framework.PrintPerfData(framework.CPUUsageToPerfData(cpuSummaryPerNode)) verifyCPULimits(expectedCPU, cpuSummaryPerNode) }) } }) }) type resourceTest struct { podsPerNode int cpuLimits framework.ContainersCPUSummary memLimits framework.ResourceUsagePerContainer } func verifyMemoryLimits(c *client.Client, expected framework.ResourceUsagePerContainer, actual framework.ResourceUsagePerNode) { if expected == nil { return