Esempio n. 1
0
func formatCPUSummary(summary framework.ContainersCPUSummary) string {
	// Example output for a node (the percentiles may differ):
	// CPU usage of containers on node "e2e-test-foo-minion-0vj7":
	// container        5th%  50th% 90th% 95th%
	// "/"              0.051 0.159 0.387 0.455
	// "/runtime        0.000 0.000 0.146 0.166
	// "/kubelet"       0.036 0.053 0.091 0.154
	// "/misc"          0.001 0.001 0.001 0.002
	var summaryStrings []string
	var header []string
	header = append(header, "container")
	for _, p := range percentiles {
		header = append(header, fmt.Sprintf("%.0fth%%", p*100))
	}

	buf := &bytes.Buffer{}
	w := tabwriter.NewWriter(buf, 1, 0, 1, ' ', 0)
	fmt.Fprintf(w, "%s\n", strings.Join(header, "\t"))

	for _, containerName := range framework.TargetContainers() {
		var s []string
		s = append(s, fmt.Sprintf("%q", containerName))
		data, ok := summary[containerName]
		for _, p := range percentiles {
			value := "N/A"
			if ok {
				value = fmt.Sprintf("%.3f", data[p])
			}
			s = append(s, value)
		}
		fmt.Fprintf(w, "%s\n", strings.Join(s, "\t"))
	}
	w.Flush()
	summaryStrings = append(summaryStrings, fmt.Sprintf("CPU usage of containers:\n%s", buf.String()))

	return strings.Join(summaryStrings, "\n")
}
	var rm *framework.ResourceMonitor

	BeforeEach(func() {
		// Wait until image prepull pod has completed so that they wouldn't
		// affect the runtime cpu usage. Fail the test if prepulling cannot
		// finish in time.
		if err := framework.WaitForPodsSuccess(f.ClientSet, api.NamespaceSystem, framework.ImagePullerLabels, imagePrePullingLongTimeout); err != nil {
			framework.Failf("Image puller didn't complete in %v, not running resource usage test since the metrics might be adultrated", imagePrePullingLongTimeout)
		}
		nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
		nodeNames = sets.NewString()
		for _, node := range nodes.Items {
			nodeNames.Insert(node.Name)
		}
		om = framework.NewRuntimeOperationMonitor(f.ClientSet)
		rm = framework.NewResourceMonitor(f.ClientSet, framework.TargetContainers(), containerStatsPollingPeriod)
		rm.Start()
	})

	AfterEach(func() {
		rm.Stop()
		result := om.GetLatestRuntimeOperationErrorRate()
		framework.Logf("runtime operation error metrics:\n%s", framework.FormatRuntimeOperationErrorRate(result))
	})
	framework.KubeDescribe("regular resource usage tracking", func() {
		// We assume that the scheduler will make reasonable scheduling choices
		// and assign ~N pods on the node.
		// Although we want to track N pods per node, there are N + add-on pods
		// in the cluster. The cluster add-on pods can be distributed unevenly
		// among the nodes because they are created during the cluster
		// initialization. This *noise* is obvious when N is small. We
Esempio n. 3
0
		nodeNames = sets.NewString()
		// If there are a lot of nodes, we don't want to use all of them
		// (if there are 1000 nodes in the cluster, starting 10 pods/node
		// will take ~10 minutes today). And there is also deletion phase.
		// Instead, we choose at most 10 nodes.
		if numNodes > maxNodesToCheck {
			numNodes = maxNodesToCheck
		}
		for i := 0; i < numNodes; i++ {
			nodeNames.Insert(nodes.Items[i].Name)
		}
		updateNodeLabels(c, nodeNames, nodeLabels, nil)

		// Start resourceMonitor only in small clusters.
		if len(nodes.Items) <= maxNodesToCheck {
			resourceMonitor = framework.NewResourceMonitor(f.ClientSet, framework.TargetContainers(), containerStatsPollingInterval)
			resourceMonitor.Start()
		}
	})

	AfterEach(func() {
		if resourceMonitor != nil {
			resourceMonitor.Stop()
		}
		// If we added labels to nodes in this test, remove them now.
		updateNodeLabels(c, nodeNames, nil, nodeLabels)
	})

	framework.KubeDescribe("Clean up pods on node", func() {
		type DeleteTest struct {
			podsPerNode int