Пример #1
0
	BeforeEach(func() {
		// Wait until image prepull pod has completed so that they wouldn't
		// affect the runtime cpu usage. Fail the test if prepulling cannot
		// finish in time.
		if err := framework.WaitForPodsSuccess(f.ClientSet, api.NamespaceSystem, framework.ImagePullerLabels, imagePrePullingLongTimeout); err != nil {
			framework.Failf("Image puller didn't complete in %v, not running resource usage test since the metrics might be adultrated", imagePrePullingLongTimeout)
		}
		nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
		nodeNames = sets.NewString()
		for _, node := range nodes.Items {
			nodeNames.Insert(node.Name)
		}
		om = framework.NewRuntimeOperationMonitor(f.ClientSet)
		rm = framework.NewResourceMonitor(f.ClientSet, framework.TargetContainers(), containerStatsPollingPeriod)
		rm.Start()
	})

	AfterEach(func() {
		rm.Stop()
		result := om.GetLatestRuntimeOperationErrorRate()
		framework.Logf("runtime operation error metrics:\n%s", framework.FormatRuntimeOperationErrorRate(result))
	})
	framework.KubeDescribe("regular resource usage tracking", func() {
		// We assume that the scheduler will make reasonable scheduling choices
		// and assign ~N pods on the node.
		// Although we want to track N pods per node, there are N + add-on pods
		// in the cluster. The cluster add-on pods can be distributed unevenly
		// among the nodes because they are created during the cluster
		// initialization. This *noise* is obvious when N is small. We
		// deliberately set higher resource usage limits to account for the
Пример #2
0
		// If there are a lot of nodes, we don't want to use all of them
		// (if there are 1000 nodes in the cluster, starting 10 pods/node
		// will take ~10 minutes today). And there is also deletion phase.
		// Instead, we choose at most 10 nodes.
		if numNodes > maxNodesToCheck {
			numNodes = maxNodesToCheck
		}
		for i := 0; i < numNodes; i++ {
			nodeNames.Insert(nodes.Items[i].Name)
		}
		updateNodeLabels(c, nodeNames, nodeLabels, nil)

		// Start resourceMonitor only in small clusters.
		if len(nodes.Items) <= maxNodesToCheck {
			resourceMonitor = framework.NewResourceMonitor(f.ClientSet, framework.TargetContainers(), containerStatsPollingInterval)
			resourceMonitor.Start()
		}
	})

	AfterEach(func() {
		if resourceMonitor != nil {
			resourceMonitor.Stop()
		}
		// If we added labels to nodes in this test, remove them now.
		updateNodeLabels(c, nodeNames, nil, nodeLabels)
	})

	framework.KubeDescribe("Clean up pods on node", func() {
		type DeleteTest struct {
			podsPerNode int
			timeout     time.Duration