Пример #1
0
func (rc *ResourceConsumer) CleanUp() {
	By(fmt.Sprintf("Removing consuming RC %s", rc.name))
	rc.stopCPU <- 0
	rc.stopMem <- 0
	rc.stopCustomMetric <- 0
	// Wait some time to ensure all child goroutines are finished.
	time.Sleep(10 * time.Second)
	framework.ExpectNoError(framework.DeleteRC(rc.framework.Client, rc.framework.Namespace.Name, rc.name))
	framework.ExpectNoError(rc.framework.Client.Services(rc.framework.Namespace.Name).Delete(rc.name))
	framework.ExpectNoError(framework.DeleteRC(rc.framework.Client, rc.framework.Namespace.Name, rc.controllerName))
	framework.ExpectNoError(rc.framework.Client.Services(rc.framework.Namespace.Name).Delete(rc.controllerName))
}
Пример #2
0
func deleteRC(wg *sync.WaitGroup, config *framework.RCConfig, deletingTime time.Duration) {
	defer GinkgoRecover()
	defer wg.Done()

	sleepUpTo(deletingTime)
	framework.ExpectNoError(framework.DeleteRC(config.Client, config.Namespace, config.Name), fmt.Sprintf("deleting rc %s", config.Name))
}
Пример #3
0
func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames sets.String, rm *framework.ResourceMonitor,
	expectedCPU map[string]map[float64]float64, expectedMemory framework.ResourceUsagePerContainer) {
	numNodes := nodeNames.Len()
	totalPods := podsPerNode * numNodes
	By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
	rcName := fmt.Sprintf("resource%d-%s", totalPods, string(util.NewUUID()))

	// TODO: Use a more realistic workload
	Expect(framework.RunRC(framework.RCConfig{
		Client:    f.Client,
		Name:      rcName,
		Namespace: f.Namespace.Name,
		Image:     framework.GetPauseImageName(f.Client),
		Replicas:  totalPods,
	})).NotTo(HaveOccurred())

	// Log once and flush the stats.
	rm.LogLatest()
	rm.Reset()

	By("Start monitoring resource usage")
	// Periodically dump the cpu summary until the deadline is met.
	// Note that without calling framework.ResourceMonitor.Reset(), the stats
	// would occupy increasingly more memory. This should be fine
	// for the current test duration, but we should reclaim the
	// entries if we plan to monitor longer (e.g., 8 hours).
	deadline := time.Now().Add(monitoringTime)
	for time.Now().Before(deadline) {
		timeLeft := deadline.Sub(time.Now())
		framework.Logf("Still running...%v left", timeLeft)
		if timeLeft < reportingPeriod {
			time.Sleep(timeLeft)
		} else {
			time.Sleep(reportingPeriod)
		}
		logPodsOnNodes(f.Client, nodeNames.List())
	}

	By("Reporting overall resource usage")
	logPodsOnNodes(f.Client, nodeNames.List())
	usageSummary, err := rm.GetLatest()
	Expect(err).NotTo(HaveOccurred())
	// TODO(random-liu): Remove the original log when we migrate to new perfdash
	framework.Logf("%s", rm.FormatResourceUsage(usageSummary))
	// Log perf result
	framework.PrintPerfData(framework.ResourceUsageToPerfData(rm.GetMasterNodeLatest(usageSummary)))
	verifyMemoryLimits(f.Client, expectedMemory, usageSummary)

	cpuSummary := rm.GetCPUSummary()
	framework.Logf("%s", rm.FormatCPUSummary(cpuSummary))
	// Log perf result
	framework.PrintPerfData(framework.CPUUsageToPerfData(rm.GetMasterNodeCPUSummary(cpuSummary)))
	verifyCPULimits(expectedCPU, cpuSummary)

	By("Deleting the RC")
	framework.DeleteRC(f.Client, f.Namespace.Name, rcName)
}
Пример #4
0
// Check that the pods comprising a replication controller get spread evenly across available zones
func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) {
	name := "ubelite-spread-rc-" + string(util.NewUUID())
	By(fmt.Sprintf("Creating replication controller %s", name))
	controller, err := f.Client.ReplicationControllers(f.Namespace.Name).Create(&api.ReplicationController{
		ObjectMeta: api.ObjectMeta{
			Namespace: f.Namespace.Name,
			Name:      name,
		},
		Spec: api.ReplicationControllerSpec{
			Replicas: replicaCount,
			Selector: map[string]string{
				"name": name,
			},
			Template: &api.PodTemplateSpec{
				ObjectMeta: api.ObjectMeta{
					Labels: map[string]string{"name": name},
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{
							Name:  name,
							Image: image,
							Ports: []api.ContainerPort{{ContainerPort: 9376}},
						},
					},
				},
			},
		},
	})
	Expect(err).NotTo(HaveOccurred())
	// Cleanup the replication controller when we are done.
	defer func() {
		// Resize the replication controller to zero to get rid of pods.
		if err := framework.DeleteRC(f.Client, f.Namespace.Name, controller.Name); err != nil {
			framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
		}
	}()
	// List the pods, making sure we observe all the replicas.
	selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
	pods, err := framework.PodsCreated(f.Client, f.Namespace.Name, name, replicaCount)
	Expect(err).NotTo(HaveOccurred())

	// Wait for all of them to be scheduled
	By(fmt.Sprintf("Waiting for %d replicas of %s to be scheduled.  Selector: %v", replicaCount, name, selector))
	pods, err = framework.WaitForPodsWithLabelScheduled(f.Client, f.Namespace.Name, selector)
	Expect(err).NotTo(HaveOccurred())

	// Now make sure they're spread across zones
	zoneNames, err := getZoneNames(f.Client)
	Expect(err).NotTo(HaveOccurred())
	Expect(checkZoneSpreading(f.Client, pods, zoneNames)).To(Equal(true))
}
Пример #5
0
func cleanupDensityTest(dtc DensityTestConfig) {
	defer GinkgoRecover()
	By("Deleting ReplicationController")
	// We explicitly delete all pods to have API calls necessary for deletion accounted in metrics.
	for i := range dtc.Configs {
		rcName := dtc.Configs[i].Name
		rc, err := dtc.Client.ReplicationControllers(dtc.Namespace).Get(rcName)
		if err == nil && rc.Spec.Replicas != 0 {
			By("Cleaning up the replication controller")
			err := framework.DeleteRC(dtc.Client, dtc.Namespace, rcName)
			framework.ExpectNoError(err)
		}
	}
}
Пример #6
0
}

var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
	var c *client.Client
	var nodeList *api.NodeList
	var systemPodsNo int
	var totalPodCapacity int64
	var RCName string
	var ns string
	ignoreLabels := framework.ImagePullerLabels

	AfterEach(func() {
		rc, err := c.ReplicationControllers(ns).Get(RCName)
		if err == nil && rc.Spec.Replicas != 0 {
			By("Cleaning up the replication controller")
			err := framework.DeleteRC(c, ns, RCName)
			framework.ExpectNoError(err)
		}
	})

	f := framework.NewDefaultFramework("sched-pred")

	BeforeEach(func() {
		c = f.Client
		ns = f.Namespace.Name
		nodeList = &api.NodeList{}
		nodes, err := c.Nodes().List(api.ListOptions{})
		masterNodes = sets.NewString()
		for _, node := range nodes.Items {
			if system.IsMasterNode(&node) {
				masterNodes.Insert(node.Name)
Пример #7
0
// A basic test to check the deployment of an image using
// a replication controller. The image serves its hostname
// which is checked for each replica.
func ServeImageOrFail(f *framework.Framework, test string, image string) {
	name := "my-hostname-" + test + "-" + string(util.NewUUID())
	replicas := int32(2)

	// Create a replication controller for a service
	// that serves its hostname.
	// The source for the Docker containter kubernetes/serve_hostname is
	// in contrib/for-demos/serve_hostname
	By(fmt.Sprintf("Creating replication controller %s", name))
	controller, err := f.Client.ReplicationControllers(f.Namespace.Name).Create(&api.ReplicationController{
		ObjectMeta: api.ObjectMeta{
			Name: name,
		},
		Spec: api.ReplicationControllerSpec{
			Replicas: replicas,
			Selector: map[string]string{
				"name": name,
			},
			Template: &api.PodTemplateSpec{
				ObjectMeta: api.ObjectMeta{
					Labels: map[string]string{"name": name},
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{
							Name:  name,
							Image: image,
							Ports: []api.ContainerPort{{ContainerPort: 9376}},
						},
					},
				},
			},
		},
	})
	Expect(err).NotTo(HaveOccurred())
	// Cleanup the replication controller when we are done.
	defer func() {
		// Resize the replication controller to zero to get rid of pods.
		if err := framework.DeleteRC(f.Client, f.Namespace.Name, controller.Name); err != nil {
			framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
		}
	}()

	// List the pods, making sure we observe all the replicas.
	label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))

	pods, err := framework.PodsCreated(f.Client, f.Namespace.Name, name, replicas)

	By("Ensuring each pod is running")

	// Wait for the pods to enter the running state. Waiting loops until the pods
	// are running so non-running pods cause a timeout for this test.
	for _, pod := range pods.Items {
		if pod.DeletionTimestamp != nil {
			continue
		}
		err = f.WaitForPodRunning(pod.Name)
		Expect(err).NotTo(HaveOccurred())
	}

	// Verify that something is listening.
	By("Trying to dial each unique pod")
	retryTimeout := 2 * time.Minute
	retryInterval := 5 * time.Second
	err = wait.Poll(retryInterval, retryTimeout, framework.PodProxyResponseChecker(f.Client, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
	if err != nil {
		framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
	}
}
Пример #8
0
				// Test whether e2e pod startup time is acceptable.
				podStartupLatency := framework.PodStartupLatency{Latency: framework.ExtractLatencyMetrics(e2eLag)}
				framework.ExpectNoError(framework.VerifyPodStartupLatency(podStartupLatency))

				framework.LogSuspiciousLatency(startupLag, e2eLag, nodeCount, c)
			}

			By("Deleting ReplicationController")
			// We explicitly delete all pods to have API calls necessary for deletion accounted in metrics.
			for i := range RCConfigs {
				rcName := RCConfigs[i].Name
				rc, err := c.ReplicationControllers(ns).Get(rcName)
				if err == nil && rc.Spec.Replicas != 0 {
					By("Cleaning up the replication controller")
					err := framework.DeleteRC(c, ns, rcName)
					framework.ExpectNoError(err)
				}
			}

			By("Removing additional replication controllers if any")
			for i := 1; i <= nodeCount; i++ {
				name := additionalPodsPrefix + "-" + strconv.Itoa(i)
				c.ReplicationControllers(ns).Delete(name)
			}
		})
	}
})

func createRunningPodFromRC(wg *sync.WaitGroup, c *client.Client, name, ns, image, podType string, cpuRequest, memRequest resource.Quantity) {
	defer GinkgoRecover()
Пример #9
0
					Image:        framework.GetPauseImageName(f.Client),
					Replicas:     totalPods,
					NodeSelector: nodeLabels,
				})).NotTo(HaveOccurred())
				// Perform a sanity check so that we know all desired pods are
				// running on the nodes according to kubelet. The timeout is set to
				// only 30 seconds here because framework.RunRC already waited for all pods to
				// transition to the running status.
				Expect(waitTillNPodsRunningOnNodes(f.Client, nodeNames, rcName, f.Namespace.Name, totalPods,
					time.Second*30)).NotTo(HaveOccurred())
				if resourceMonitor != nil {
					resourceMonitor.LogLatest()
				}

				By("Deleting the RC")
				framework.DeleteRC(f.Client, f.Namespace.Name, rcName)
				// Check that the pods really are gone by querying /runningpods on the
				// node. The /runningpods handler checks the container runtime (or its
				// cache) and  returns a list of running pods. Some possible causes of
				// failures are:
				//   - kubelet deadlock
				//   - a bug in graceful termination (if it is enabled)
				//   - docker slow to delete pods (or resource problems causing slowness)
				start := time.Now()
				Expect(waitTillNPodsRunningOnNodes(f.Client, nodeNames, rcName, f.Namespace.Name, 0,
					itArg.timeout)).NotTo(HaveOccurred())
				framework.Logf("Deleting %d pods on %d nodes completed in %v after the RC was deleted", totalPods, len(nodeNames),
					time.Since(start))
				if resourceMonitor != nil {
					resourceMonitor.LogCPUSummary()
				}
Пример #10
0
func runServiceLatencies(f *framework.Framework, inParallel, total int) (output []time.Duration, err error) {
	cfg := framework.RCConfig{
		Client:       f.Client,
		Image:        framework.GetPauseImageName(f.Client),
		Name:         "svc-latency-rc",
		Namespace:    f.Namespace.Name,
		Replicas:     1,
		PollInterval: time.Second,
	}
	if err := framework.RunRC(cfg); err != nil {
		return nil, err
	}
	defer framework.DeleteRC(f.Client, f.Namespace.Name, cfg.Name)

	// Run a single watcher, to reduce the number of API calls we have to
	// make; this is to minimize the timing error. It's how kube-proxy
	// consumes the endpoints data, so it seems like the right thing to
	// test.
	endpointQueries := newQuerier()
	startEndpointWatcher(f, endpointQueries)
	defer close(endpointQueries.stop)

	// run one test and throw it away-- this is to make sure that the pod's
	// ready status has propagated.
	singleServiceLatency(f, cfg.Name, endpointQueries)

	// These channels are never closed, and each attempt sends on exactly
	// one of these channels, so the sum of the things sent over them will
	// be exactly total.
	errs := make(chan error, total)
	durations := make(chan time.Duration, total)

	blocker := make(chan struct{}, inParallel)
	for i := 0; i < total; i++ {
		go func() {
			defer GinkgoRecover()
			blocker <- struct{}{}
			defer func() { <-blocker }()
			if d, err := singleServiceLatency(f, cfg.Name, endpointQueries); err != nil {
				errs <- err
			} else {
				durations <- d
			}
		}()
	}

	errCount := 0
	for i := 0; i < total; i++ {
		select {
		case e := <-errs:
			framework.Logf("Got error: %v", e)
			errCount += 1
		case d := <-durations:
			output = append(output, d)
		}
	}
	if errCount != 0 {
		return output, fmt.Errorf("got %v errors", errCount)
	}
	return output, nil
}
Пример #11
0
func proxyContext(version string) {
	f := framework.NewDefaultFramework("proxy")
	prefix := "/api/" + version

	// Port here has to be kept in sync with default kubelet port.
	It("should proxy logs on node with explicit kubelet port [Conformance]", func() { nodeProxyTest(f, prefix+"/proxy/nodes/", ":10250/logs/") })
	It("should proxy logs on node [Conformance]", func() { nodeProxyTest(f, prefix+"/proxy/nodes/", "/logs/") })
	It("should proxy to cadvisor [Conformance]", func() { nodeProxyTest(f, prefix+"/proxy/nodes/", ":4194/containers/") })

	It("should proxy logs on node with explicit kubelet port using proxy subresource [Conformance]", func() { nodeProxyTest(f, prefix+"/nodes/", ":10250/proxy/logs/") })
	It("should proxy logs on node using proxy subresource [Conformance]", func() { nodeProxyTest(f, prefix+"/nodes/", "/proxy/logs/") })
	It("should proxy to cadvisor using proxy subresource [Conformance]", func() { nodeProxyTest(f, prefix+"/nodes/", ":4194/proxy/containers/") })

	It("should proxy through a service and a pod [Conformance]", func() {
		labels := map[string]string{"proxy-service-target": "true"}
		service, err := f.Client.Services(f.Namespace.Name).Create(&api.Service{
			ObjectMeta: api.ObjectMeta{
				GenerateName: "proxy-service-",
			},
			Spec: api.ServiceSpec{
				Selector: labels,
				Ports: []api.ServicePort{
					{
						Name:       "portname1",
						Port:       80,
						TargetPort: intstr.FromString("dest1"),
					},
					{
						Name:       "portname2",
						Port:       81,
						TargetPort: intstr.FromInt(162),
					},
					{
						Name:       "tlsportname1",
						Port:       443,
						TargetPort: intstr.FromString("tlsdest1"),
					},
					{
						Name:       "tlsportname2",
						Port:       444,
						TargetPort: intstr.FromInt(462),
					},
				},
			},
		})
		Expect(err).NotTo(HaveOccurred())
		defer func(name string) {
			err := f.Client.Services(f.Namespace.Name).Delete(name)
			if err != nil {
				framework.Logf("Failed deleting service %v: %v", name, err)
			}
		}(service.Name)

		// Make an RC with a single pod.
		pods := []*api.Pod{}
		cfg := framework.RCConfig{
			Client:       f.Client,
			Image:        "gcr.io/google_containers/porter:cd5cb5791ebaa8641955f0e8c2a9bed669b1eaab",
			Name:         service.Name,
			Namespace:    f.Namespace.Name,
			Replicas:     1,
			PollInterval: time.Second,
			Env: map[string]string{
				"SERVE_PORT_80":  `<a href="/rewriteme">test</a>`,
				"SERVE_PORT_160": "foo",
				"SERVE_PORT_162": "bar",

				"SERVE_TLS_PORT_443": `<a href="/tlsrewriteme">test</a>`,
				"SERVE_TLS_PORT_460": `tls baz`,
				"SERVE_TLS_PORT_462": `tls qux`,
			},
			Ports: map[string]int{
				"dest1": 160,
				"dest2": 162,

				"tlsdest1": 460,
				"tlsdest2": 462,
			},
			ReadinessProbe: &api.Probe{
				Handler: api.Handler{
					HTTPGet: &api.HTTPGetAction{
						Port: intstr.FromInt(80),
					},
				},
				InitialDelaySeconds: 1,
				TimeoutSeconds:      5,
				PeriodSeconds:       10,
			},
			Labels:      labels,
			CreatedPods: &pods,
		}
		Expect(framework.RunRC(cfg)).NotTo(HaveOccurred())
		defer framework.DeleteRC(f.Client, f.Namespace.Name, cfg.Name)

		Expect(f.WaitForAnEndpoint(service.Name)).NotTo(HaveOccurred())

		// Try proxying through the service and directly to through the pod.
		svcProxyURL := func(scheme, port string) string {
			return prefix + "/proxy/namespaces/" + f.Namespace.Name + "/services/" + net.JoinSchemeNamePort(scheme, service.Name, port)
		}
		subresourceServiceProxyURL := func(scheme, port string) string {
			return prefix + "/namespaces/" + f.Namespace.Name + "/services/" + net.JoinSchemeNamePort(scheme, service.Name, port) + "/proxy"
		}
		podProxyURL := func(scheme, port string) string {
			return prefix + "/proxy/namespaces/" + f.Namespace.Name + "/pods/" + net.JoinSchemeNamePort(scheme, pods[0].Name, port)
		}
		subresourcePodProxyURL := func(scheme, port string) string {
			return prefix + "/namespaces/" + f.Namespace.Name + "/pods/" + net.JoinSchemeNamePort(scheme, pods[0].Name, port) + "/proxy"
		}
		expectations := map[string]string{
			svcProxyURL("", "portname1") + "/": "foo",
			svcProxyURL("", "80") + "/":        "foo",
			svcProxyURL("", "portname2") + "/": "bar",
			svcProxyURL("", "81") + "/":        "bar",

			svcProxyURL("http", "portname1") + "/": "foo",
			svcProxyURL("http", "80") + "/":        "foo",
			svcProxyURL("http", "portname2") + "/": "bar",
			svcProxyURL("http", "81") + "/":        "bar",

			svcProxyURL("https", "tlsportname1") + "/": "tls baz",
			svcProxyURL("https", "443") + "/":          "tls baz",
			svcProxyURL("https", "tlsportname2") + "/": "tls qux",
			svcProxyURL("https", "444") + "/":          "tls qux",

			subresourceServiceProxyURL("", "portname1") + "/":         "foo",
			subresourceServiceProxyURL("http", "portname1") + "/":     "foo",
			subresourceServiceProxyURL("", "portname2") + "/":         "bar",
			subresourceServiceProxyURL("http", "portname2") + "/":     "bar",
			subresourceServiceProxyURL("https", "tlsportname1") + "/": "tls baz",
			subresourceServiceProxyURL("https", "tlsportname2") + "/": "tls qux",

			podProxyURL("", "80") + "/":  `<a href="` + podProxyURL("", "80") + `/rewriteme">test</a>`,
			podProxyURL("", "160") + "/": "foo",
			podProxyURL("", "162") + "/": "bar",

			podProxyURL("http", "80") + "/":  `<a href="` + podProxyURL("http", "80") + `/rewriteme">test</a>`,
			podProxyURL("http", "160") + "/": "foo",
			podProxyURL("http", "162") + "/": "bar",

			subresourcePodProxyURL("", "") + "/":        `<a href="` + subresourcePodProxyURL("", "") + `/rewriteme">test</a>`,
			subresourcePodProxyURL("", "80") + "/":      `<a href="` + subresourcePodProxyURL("", "80") + `/rewriteme">test</a>`,
			subresourcePodProxyURL("http", "80") + "/":  `<a href="` + subresourcePodProxyURL("http", "80") + `/rewriteme">test</a>`,
			subresourcePodProxyURL("", "160") + "/":     "foo",
			subresourcePodProxyURL("http", "160") + "/": "foo",
			subresourcePodProxyURL("", "162") + "/":     "bar",
			subresourcePodProxyURL("http", "162") + "/": "bar",

			subresourcePodProxyURL("https", "443") + "/": `<a href="` + subresourcePodProxyURL("https", "443") + `/tlsrewriteme">test</a>`,
			subresourcePodProxyURL("https", "460") + "/": "tls baz",
			subresourcePodProxyURL("https", "462") + "/": "tls qux",

			// TODO: below entries don't work, but I believe we should make them work.
			// podPrefix + ":dest1": "foo",
			// podPrefix + ":dest2": "bar",
		}

		wg := sync.WaitGroup{}
		errs := []string{}
		errLock := sync.Mutex{}
		recordError := func(s string) {
			errLock.Lock()
			defer errLock.Unlock()
			errs = append(errs, s)
		}
		for i := 0; i < proxyAttempts; i++ {
			for path, val := range expectations {
				wg.Add(1)
				go func(i int, path, val string) {
					defer wg.Done()
					body, status, d, err := doProxy(f, path)
					if err != nil {
						if serr, ok := err.(*errors.StatusError); ok {
							recordError(fmt.Sprintf("%v: path %v gave status error: %+v", i, path, serr.Status()))
						} else {
							recordError(fmt.Sprintf("%v: path %v gave error: %v", i, path, err))
						}
						return
					}
					if status != http.StatusOK {
						recordError(fmt.Sprintf("%v: path %v gave status: %v", i, path, status))
					}
					if e, a := val, string(body); e != a {
						recordError(fmt.Sprintf("%v: path %v: wanted %v, got %v", i, path, e, a))
					}
					if d > proxyHTTPCallTimeout {
						recordError(fmt.Sprintf("%v: path %v took %v > %v", i, path, d, proxyHTTPCallTimeout))
					}
				}(i, path, val)
				// default QPS is 5
				time.Sleep(200 * time.Millisecond)
			}
		}
		wg.Wait()

		if len(errs) != 0 {
			body, err := f.Client.Pods(f.Namespace.Name).GetLogs(pods[0].Name, &api.PodLogOptions{}).Do().Raw()
			if err != nil {
				framework.Logf("Error getting logs for pod %s: %v", pods[0].Name, err)
			} else {
				framework.Logf("Pod %s has the following error logs: %s", pods[0].Name, body)
			}

			Fail(strings.Join(errs, "\n"))
		}
	})
}
			originalSizes[mig] = size
			sum += size
		}
		Expect(nodeCount).Should(Equal(sum))
	})

	AfterEach(func() {
		By(fmt.Sprintf("Restoring initial size of the cluster"))
		setMigSizes(originalSizes)
		framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount, scaleDownTimeout))
	})

	It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() {
		By("Creating unschedulable pod")
		ReserveMemory(f, "memory-reservation", 1, memCapacityMb, false)
		defer framework.DeleteRC(f.Client, f.Namespace.Name, "memory-reservation")

		By("Waiting for scale up hoping it won't happen")
		// Verfiy, that the appropreate event was generated.
		eventFound := false
	EventsLoop:
		for start := time.Now(); time.Since(start) < scaleUpTimeout; time.Sleep(20 * time.Second) {
			By("Waiting for NotTriggerScaleUp event")
			events, err := f.Client.Events(f.Namespace.Name).List(api.ListOptions{})
			framework.ExpectNoError(err)

			for _, e := range events.Items {
				if e.InvolvedObject.Kind == "Pod" && e.Reason == "NotTriggerScaleUp" && strings.Contains(e.Message, "it wouldn't fit if a new node is added") {
					By("NotTriggerScaleUp event found")
					eventFound = true
					break EventsLoop
		err := framework.WaitForClusterSize(f.Client, nodeCount+1, scaleUpTimeout)
		for _, rc := range rcs {
			rc.CleanUp()
		}
		framework.ExpectNoError(err)

		framework.ExpectNoError(framework.WaitForClusterSize(f.Client, nodeCount, scaleDownTimeout))
	})

	It("Should scale cluster size based on cpu reservation", func() {
		setUpAutoscaler("cpu/node_reservation", 0.5, nodeCount, nodeCount+1)

		ReserveCpu(f, "cpu-reservation", 600*nodeCount*coresPerNode)
		framework.ExpectNoError(framework.WaitForClusterSize(f.Client, nodeCount+1, scaleUpTimeout))

		framework.ExpectNoError(framework.DeleteRC(f.Client, f.Namespace.Name, "cpu-reservation"))
		framework.ExpectNoError(framework.WaitForClusterSize(f.Client, nodeCount, scaleDownTimeout))
	})

	It("Should scale cluster size based on memory utilization", func() {
		setUpAutoscaler("memory/node_utilization", 0.6, nodeCount, nodeCount+1)

		// Consume 60% of total memory capacity
		megabytesPerReplica := int(memCapacityMb * 6 / 10 / coresPerNode)
		rcs := createConsumingRCs(f, "mem-utilization", nodeCount*coresPerNode, 0, megabytesPerReplica)
		err := framework.WaitForClusterSize(f.Client, nodeCount+1, scaleUpTimeout)
		for _, rc := range rcs {
			rc.CleanUp()
		}
		framework.ExpectNoError(err)
Пример #14
0
}

var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
	var c *client.Client
	var nodeList *api.NodeList
	var systemPodsNo int
	var totalPodCapacity int64
	var RCName string
	var ns string
	ignoreLabels := framework.ImagePullerLabels

	AfterEach(func() {
		rc, err := c.ReplicationControllers(ns).Get(RCName)
		if err == nil && rc.Spec.Replicas != 0 {
			By("Cleaning up the replication controller")
			err := framework.DeleteRC(c, ns, RCName)
			framework.ExpectNoError(err)
		}
	})

	f := framework.NewDefaultFramework("sched-pred")

	BeforeEach(func() {
		c = f.Client
		ns = f.Namespace.Name
		nodeList = &api.NodeList{}

		masterNodes, nodeList = framework.GetMasterAndWorkerNodesOrDie(c)

		err := framework.CheckTestingNSDeletedExcept(c, ns)
		framework.ExpectNoError(err)