示例#1
0
func reserveAllCpu(f *framework.Framework, id string, millicores int) error {
	timeout := 5 * time.Minute
	replicas := millicores / 100

	ReserveCpu(f, id, 1, 100)
	framework.ExpectNoError(framework.ScaleRC(f.Client, f.Namespace.Name, id, uint(replicas), false))

	for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) {
		pods, err := framework.GetPodsInNamespace(f.Client, f.Namespace.Name, framework.ImagePullerLabels)
		if err != nil {
			return err
		}

		if len(pods) != replicas {
			continue
		}

		allRunningOrUnschedulable := true
		for _, pod := range pods {
			if !podRunningOrUnschedulable(pod) {
				allRunningOrUnschedulable = false
				break
			}
		}
		if allRunningOrUnschedulable {
			return nil
		}
	}
	return fmt.Errorf("Pod name %s: Gave up waiting %v for %d pods to come up", id, timeout, replicas)
}
	BeforeEach(func() {
		c = f.Client
		cs = f.ClientSet
		ns = f.Namespace.Name
		nodeList = &api.NodeList{}

		framework.WaitForAllNodesHealthy(c, time.Minute)
		masterNodes, nodeList = framework.GetMasterAndWorkerNodesOrDie(c)

		err := framework.CheckTestingNSDeletedExcept(c, ns)
		framework.ExpectNoError(err)

		// Every test case in this suite assumes that cluster add-on pods stay stable and
		// cannot be run in parallel with any other test that touches Nodes or Pods.
		// It is so because we need to have precise control on what's running in the cluster.
		systemPods, err := framework.GetPodsInNamespace(c, ns, ignoreLabels)
		Expect(err).NotTo(HaveOccurred())
		systemPodsNo = 0
		for _, pod := range systemPods {
			if !masterNodes.Has(pod.Spec.NodeName) && pod.DeletionTimestamp == nil {
				systemPodsNo++
			}
		}

		err = framework.WaitForPodsRunningReady(c, api.NamespaceSystem, int32(systemPodsNo), framework.PodReadyBeforeTimeout, ignoreLabels)
		Expect(err).NotTo(HaveOccurred())

		for _, node := range nodeList.Items {
			framework.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name)
			framework.PrintAllKubeletPods(c, node.Name)
		}