// finish in time. if err := framework.WaitForPodsSuccess(f.ClientSet, api.NamespaceSystem, framework.ImagePullerLabels, imagePrePullingLongTimeout); err != nil { framework.Failf("Image puller didn't complete in %v, not running resource usage test since the metrics might be adultrated", imagePrePullingLongTimeout) } nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodeNames = sets.NewString() for _, node := range nodes.Items { nodeNames.Insert(node.Name) } om = framework.NewRuntimeOperationMonitor(f.ClientSet) rm = framework.NewResourceMonitor(f.ClientSet, framework.TargetContainers(), containerStatsPollingPeriod) rm.Start() }) AfterEach(func() { rm.Stop() result := om.GetLatestRuntimeOperationErrorRate() framework.Logf("runtime operation error metrics:\n%s", framework.FormatRuntimeOperationErrorRate(result)) }) framework.KubeDescribe("regular resource usage tracking", func() { // We assume that the scheduler will make reasonable scheduling choices // and assign ~N pods on the node. // Although we want to track N pods per node, there are N + add-on pods // in the cluster. The cluster add-on pods can be distributed unevenly // among the nodes because they are created during the cluster // initialization. This *noise* is obvious when N is small. We // deliberately set higher resource usage limits to account for the // noise. rTests := []resourceTest{ { podsPerNode: 0,
} for i := 0; i < numNodes; i++ { nodeNames.Insert(nodes.Items[i].Name) } updateNodeLabels(c, nodeNames, nodeLabels, nil) // Start resourceMonitor only in small clusters. if len(nodes.Items) <= maxNodesToCheck { resourceMonitor = framework.NewResourceMonitor(f.ClientSet, framework.TargetContainers(), containerStatsPollingInterval) resourceMonitor.Start() } }) AfterEach(func() { if resourceMonitor != nil { resourceMonitor.Stop() } // If we added labels to nodes in this test, remove them now. updateNodeLabels(c, nodeNames, nil, nodeLabels) }) framework.KubeDescribe("Clean up pods on node", func() { type DeleteTest struct { podsPerNode int timeout time.Duration } deleteTests := []DeleteTest{ {podsPerNode: 10, timeout: 1 * time.Minute}, } for _, itArg := range deleteTests { name := fmt.Sprintf(