예제 #1
0
// Clean both server and client pods.
func volumeTestCleanup(f *framework.Framework, config VolumeTestConfig) {
	By(fmt.Sprint("cleaning the environment after ", config.prefix))

	defer GinkgoRecover()

	client := f.Client
	podClient := client.Pods(config.namespace)

	err := podClient.Delete(config.prefix+"-client", nil)
	if err != nil {
		// Log the error before failing test: if the test has already failed,
		// framework.ExpectNoError() won't print anything to logs!
		glog.Warningf("Failed to delete client pod: %v", err)
		framework.ExpectNoError(err, "Failed to delete client pod: %v", err)
	}

	if config.serverImage != "" {
		if err := f.WaitForPodTerminated(config.prefix+"-client", ""); !apierrs.IsNotFound(err) {
			framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
		}
		// See issue #24100.
		// Prevent umount errors by making sure making sure the client pod exits cleanly *before* the volume server pod exits.
		By("sleeping a bit so client can stop and unmount")
		time.Sleep(20 * time.Second)

		err = podClient.Delete(config.prefix+"-server", nil)
		if err != nil {
			glog.Warningf("Failed to delete server pod: %v", err)
			framework.ExpectNoError(err, "Failed to delete server pod: %v", err)
		}
	}
}
예제 #2
0
func waitForPodsOrDie(cs *release_1_4.Clientset, ns string, n int) {
	By("Waiting for all pods to be running")
	err := wait.PollImmediate(framework.Poll, 10*time.Minute, func() (bool, error) {
		selector, err := labels.Parse("foo=bar")
		framework.ExpectNoError(err, "Waiting for pods in namespace %q to be ready", ns)
		pods, err := cs.Core().Pods(ns).List(api.ListOptions{LabelSelector: selector})
		if err != nil {
			return false, err
		}
		if pods == nil {
			return false, fmt.Errorf("pods is nil")
		}
		if len(pods.Items) < n {
			framework.Logf("pods: %v < %v", len(pods.Items), n)
			return false, nil
		}
		ready := 0
		for i := 0; i < n; i++ {
			if pods.Items[i].Status.Phase == apiv1.PodRunning {
				ready++
			}
		}
		if ready < n {
			framework.Logf("running pods: %v < %v", ready, n)
			return false, nil
		}
		return true, nil
	})
	framework.ExpectNoError(err, "Waiting for pods in namespace %q to be ready", ns)
}
예제 #3
0
// Must be called within a Context. Allows the function to modify the KubeletConfiguration during the BeforeEach of the context.
// The change is reverted in the AfterEach of the context.
func tempSetCurrentKubeletConfig(f *framework.Framework, updateFunction func(initialConfig *componentconfig.KubeletConfiguration)) {
	var oldCfg *componentconfig.KubeletConfiguration
	BeforeEach(func() {
		configEnabled, err := isKubeletConfigEnabled(f)
		framework.ExpectNoError(err)
		if configEnabled {
			oldCfg, err = getCurrentKubeletConfig()
			framework.ExpectNoError(err)
			clone, err := api.Scheme.DeepCopy(oldCfg)
			framework.ExpectNoError(err)
			newCfg := clone.(*componentconfig.KubeletConfiguration)
			updateFunction(newCfg)
			framework.ExpectNoError(setKubeletConfiguration(f, newCfg))
		} else {
			framework.Logf("The Dynamic Kubelet Configuration feature is not enabled.\n" +
				"Pass --feature-gates=DynamicKubeletConfig=true to the Kubelet to enable this feature.\n" +
				"For `make test-e2e-node`, you can set `TEST_ARGS='--feature-gates=DynamicKubeletConfig=true'`.")
		}
	})
	AfterEach(func() {
		if oldCfg != nil {
			err := setKubeletConfiguration(f, oldCfg)
			framework.ExpectNoError(err)
		}
	})
}
예제 #4
0
func cleanupDensityTest(dtc DensityTestConfig) {
	defer GinkgoRecover()
	By("Deleting created Collections")
	// We explicitly delete all pods to have API calls necessary for deletion accounted in metrics.
	for i := range dtc.Configs {
		name := dtc.Configs[i].GetName()
		namespace := dtc.Configs[i].GetNamespace()
		kind := dtc.Configs[i].GetKind()
		// TODO: Remove Deployment guard once GC is implemented for Deployments.
		if framework.TestContext.GarbageCollectorEnabled && kind != extensions.Kind("Deployment") {
			By(fmt.Sprintf("Cleaning up only the %v, garbage collector will clean up the pods", kind))
			err := framework.DeleteResourceAndWaitForGC(dtc.ClientSet, kind, namespace, name)
			framework.ExpectNoError(err)
		} else {
			By(fmt.Sprintf("Cleaning up the %v and pods", kind))
			err := framework.DeleteResourceAndPods(dtc.ClientSet, dtc.InternalClientset, kind, namespace, name)
			framework.ExpectNoError(err)
		}
	}

	// Delete all secrets
	for i := range dtc.SecretConfigs {
		dtc.SecretConfigs[i].Stop()
	}

	for i := range dtc.DaemonConfigs {
		framework.ExpectNoError(framework.DeleteResourceAndPods(
			dtc.ClientSet,
			dtc.InternalClientset,
			extensions.Kind("DaemonSet"),
			dtc.DaemonConfigs[i].Namespace,
			dtc.DaemonConfigs[i].Name,
		))
	}
}
예제 #5
0
func (j *testJig) waitForIngress() {
	// Wait for the loadbalancer IP.
	address, err := framework.WaitForIngressAddress(j.client, j.ing.Namespace, j.ing.Name, lbPollTimeout)
	if err != nil {
		framework.Failf("Ingress failed to acquire an IP address within %v", lbPollTimeout)
	}
	j.address = address
	framework.Logf("Found address %v for ingress %v", j.address, j.ing.Name)
	timeoutClient := &http.Client{Timeout: reqTimeout}

	// Check that all rules respond to a simple GET.
	for _, rules := range j.ing.Spec.Rules {
		proto := "http"
		if len(j.ing.Spec.TLS) > 0 {
			knownHosts := sets.NewString(j.ing.Spec.TLS[0].Hosts...)
			if knownHosts.Has(rules.Host) {
				timeoutClient.Transport, err = buildTransport(rules.Host, j.getRootCA(j.ing.Spec.TLS[0].SecretName))
				framework.ExpectNoError(err)
				proto = "https"
			}
		}
		for _, p := range rules.IngressRuleValue.HTTP.Paths {
			j.curlServiceNodePort(j.ing.Namespace, p.Backend.ServiceName, int(p.Backend.ServicePort.IntVal))
			route := fmt.Sprintf("%v://%v%v", proto, address, p.Path)
			framework.Logf("Testing route %v host %v with simple GET", route, rules.Host)
			framework.ExpectNoError(pollURL(route, rules.Host, lbPollTimeout, j.pollInterval, timeoutClient, false))
		}
	}
}
예제 #6
0
func runPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
	pod := createPausePod(f, conf)
	framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod))
	pod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(conf.Name, metav1.GetOptions{})
	framework.ExpectNoError(err)
	return pod
}
예제 #7
0
// increaseKubeletAPIQPSLimit sets Kubelet API QPS via ConfigMap. Kubelet will restart with the new QPS.
func setKubeletAPIQPSLimit(f *framework.Framework, newAPIQPS int32) {
	const restartGap = 40 * time.Second

	resp := pollConfigz(2*time.Minute, 5*time.Second)
	kubeCfg, err := decodeConfigz(resp)
	framework.ExpectNoError(err)
	framework.Logf("Old QPS limit is: %d\n", kubeCfg.KubeAPIQPS)

	// Set new API QPS limit
	kubeCfg.KubeAPIQPS = newAPIQPS
	// TODO(coufon): createConfigMap should firstly check whether configmap already exists, if so, use updateConfigMap.
	// Calling createConfigMap twice will result in error. It is fine for benchmark test because we only run one test on a new node.
	_, err = createConfigMap(f, kubeCfg)
	framework.ExpectNoError(err)

	// Wait for Kubelet to restart
	time.Sleep(restartGap)

	// Check new QPS has been set
	resp = pollConfigz(2*time.Minute, 5*time.Second)
	kubeCfg, err = decodeConfigz(resp)
	framework.ExpectNoError(err)
	framework.Logf("New QPS limit is: %d\n", kubeCfg.KubeAPIQPS)

	// TODO(coufon): check test result to see if we need to retry here
	if kubeCfg.KubeAPIQPS != newAPIQPS {
		framework.Failf("Fail to set new kubelet API QPS limit.")
	}
}
예제 #8
0
func (config *KubeProxyTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod {
	framework.ExpectNoError(framework.WaitForAllNodesSchedulable(config.f.Client))
	nodes := framework.GetReadySchedulableNodesOrDie(config.f.Client)

	// create pods, one for each node
	createdPods := make([]*api.Pod, 0, len(nodes.Items))
	for i, n := range nodes.Items {
		podName := fmt.Sprintf("%s-%d", podName, i)
		pod := config.createNetShellPodSpec(podName, n.Name)
		pod.ObjectMeta.Labels = selector
		createdPod := config.createPod(pod)
		createdPods = append(createdPods, createdPod)
	}

	// wait that all of them are up
	runningPods := make([]*api.Pod, 0, len(nodes.Items))
	for _, p := range createdPods {
		framework.ExpectNoError(config.f.WaitForPodReady(p.Name))
		rp, err := config.getPodClient().Get(p.Name)
		framework.ExpectNoError(err)
		runningPods = append(runningPods, rp)
	}

	return runningPods
}
예제 #9
0
// Waits until all existing pods are scheduled and returns their amount.
func waitForStableCluster(c *client.Client) int {
	timeout := 10 * time.Minute
	startTime := time.Now()

	allPods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{})
	framework.ExpectNoError(err)
	// API server returns also Pods that succeeded. We need to filter them out.
	currentPods := make([]api.Pod, 0, len(allPods.Items))
	for _, pod := range allPods.Items {
		if pod.Status.Phase != api.PodSucceeded && pod.Status.Phase != api.PodFailed {
			currentPods = append(currentPods, pod)
		}
	}
	allPods.Items = currentPods
	scheduledPods, currentlyNotScheduledPods := getPodsScheduled(allPods)
	for len(currentlyNotScheduledPods) != 0 {
		time.Sleep(2 * time.Second)

		allPods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{})
		framework.ExpectNoError(err)
		scheduledPods, currentlyNotScheduledPods = getPodsScheduled(allPods)

		if startTime.Add(timeout).Before(time.Now()) {
			framework.Failf("Timed out after %v waiting for stable cluster.", timeout)
			break
		}
	}
	return len(scheduledPods)
}
예제 #10
0
func runAppArmorTest(f *framework.Framework, profile string) api.PodStatus {
	pod := createPodWithAppArmor(f, profile)
	framework.ExpectNoError(f.WaitForPodNoLongerRunning(pod.Name))
	p, err := f.PodClient().Get(pod.Name)
	framework.ExpectNoError(err)
	return p.Status
}
예제 #11
0
func (f *Framework) GetUnderlyingFederatedContexts() []E2EContext {
	kubeconfig := framework.KubeConfig{}
	configBytes, err := ioutil.ReadFile(framework.TestContext.KubeConfig)
	framework.ExpectNoError(err)
	err = yaml.Unmarshal(configBytes, &kubeconfig)
	framework.ExpectNoError(err)

	e2eContexts := []E2EContext{}
	for _, context := range kubeconfig.Contexts {
		if strings.HasPrefix(context.Name, "federation") && context.Name != framework.TestContext.FederatedKubeContext {
			user := kubeconfig.FindUser(context.Context.User)
			if user == nil {
				framework.Failf("Could not find user for context %+v", context)
			}

			cluster := kubeconfig.FindCluster(context.Context.Cluster)
			if cluster == nil {
				framework.Failf("Could not find cluster for context %+v", context)
			}

			dnsSubdomainName, err := GetValidDNSSubdomainName(context.Name)
			if err != nil {
				framework.Failf("Could not convert context name %s to a valid dns subdomain name, error: %s", context.Name, err)
			}
			e2eContexts = append(e2eContexts, E2EContext{
				RawName: context.Name,
				Name:    dnsSubdomainName,
				Cluster: cluster,
				User:    user,
			})
		}
	}

	return e2eContexts
}
예제 #12
0
func (p *statefulSetTester) getPodList(ps *apps.StatefulSet) *v1.PodList {
	selector, err := metav1.LabelSelectorAsSelector(ps.Spec.Selector)
	framework.ExpectNoError(err)
	podList, err := p.c.Core().Pods(ps.Namespace).List(v1.ListOptions{LabelSelector: selector.String()})
	framework.ExpectNoError(err)
	return podList
}
예제 #13
0
func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) v1.PodStatus {
	pod := createPodWithAppArmor(f, profile)
	if shouldRun {
		// The pod needs to start before it stops, so wait for the longer start timeout.
		framework.ExpectNoError(framework.WaitTimeoutForPodNoLongerRunningInNamespace(
			f.ClientSet, pod.Name, f.Namespace.Name, "", framework.PodStartTimeout))
	} else {
		// Pod should remain in the pending state. Wait for the Reason to be set to "AppArmor".
		w, err := f.PodClient().Watch(v1.SingleObject(metav1.ObjectMeta{Name: pod.Name}))
		framework.ExpectNoError(err)
		_, err = watch.Until(framework.PodStartTimeout, w, func(e watch.Event) (bool, error) {
			switch e.Type {
			case watch.Deleted:
				return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, pod.Name)
			}
			switch t := e.Object.(type) {
			case *v1.Pod:
				if t.Status.Reason == "AppArmor" {
					return true, nil
				}
			}
			return false, nil
		})
		framework.ExpectNoError(err)
	}
	p, err := f.PodClient().Get(pod.Name, metav1.GetOptions{})
	framework.ExpectNoError(err)
	return p.Status
}
예제 #14
0
func runPausePod(f *framework.Framework, conf pausePodConfig) *api.Pod {
	pod := createPausePod(f, conf)
	framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, pod))
	pod, err := f.Client.Pods(f.Namespace.Name).Get(conf.Name)
	framework.ExpectNoError(err)
	return pod
}
예제 #15
0
func (rc *ResourceConsumer) GetReplicas() int {
	switch rc.kind {
	case kindRC:
		replicationController, err := rc.framework.ClientSet.Core().ReplicationControllers(rc.framework.Namespace.Name).Get(rc.name, metav1.GetOptions{})
		framework.ExpectNoError(err)
		if replicationController == nil {
			framework.Failf(rcIsNil)
		}
		return int(replicationController.Status.Replicas)
	case kindDeployment:
		deployment, err := rc.framework.ClientSet.Extensions().Deployments(rc.framework.Namespace.Name).Get(rc.name, metav1.GetOptions{})
		framework.ExpectNoError(err)
		if deployment == nil {
			framework.Failf(deploymentIsNil)
		}
		return int(deployment.Status.Replicas)
	case kindReplicaSet:
		rs, err := rc.framework.ClientSet.Extensions().ReplicaSets(rc.framework.Namespace.Name).Get(rc.name, metav1.GetOptions{})
		framework.ExpectNoError(err)
		if rs == nil {
			framework.Failf(rsIsNil)
		}
		return int(rs.Status.Replicas)
	default:
		framework.Failf(invalidKind)
	}
	return 0
}
예제 #16
0
func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod {
	framework.ExpectNoError(framework.WaitForAllNodesSchedulable(config.f.Client))
	nodeList := framework.GetReadySchedulableNodesOrDie(config.f.Client)

	// To make this test work reasonably fast in large clusters,
	// we limit the number of NetProxyPods to no more than 100 ones
	// on random nodes.
	nodes := shuffleNodes(nodeList.Items)
	if len(nodes) > maxNetProxyPodsCount {
		nodes = nodes[:maxNetProxyPodsCount]
	}

	// create pods, one for each node
	createdPods := make([]*api.Pod, 0, len(nodes))
	for i, n := range nodes {
		podName := fmt.Sprintf("%s-%d", podName, i)
		pod := config.createNetShellPodSpec(podName, n.Name)
		pod.ObjectMeta.Labels = selector
		createdPod := config.createPod(pod)
		createdPods = append(createdPods, createdPod)
	}

	// wait that all of them are up
	runningPods := make([]*api.Pod, 0, len(nodes))
	for _, p := range createdPods {
		framework.ExpectNoError(config.f.WaitForPodReady(p.Name))
		rp, err := config.getPodClient().Get(p.Name)
		framework.ExpectNoError(err)
		runningPods = append(runningPods, rp)
	}

	return runningPods
}
예제 #17
0
func runLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int, timeout time.Duration) {
	podClient := f.PodClient()
	ns := f.Namespace.Name
	Expect(pod.Spec.Containers).NotTo(BeEmpty())
	containerName := pod.Spec.Containers[0].Name
	// At the end of the test, clean up by removing the pod.
	defer func() {
		By("deleting the pod")
		podClient.Delete(pod.Name, v1.NewDeleteOptions(0))
	}()
	By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns))
	podClient.Create(pod)

	// Wait until the pod is not pending. (Here we need to check for something other than
	// 'Pending' other than checking for 'Running', since when failures occur, we go to
	// 'Terminated' which can cause indefinite blocking.)
	framework.ExpectNoError(framework.WaitForPodNotPending(f.ClientSet, ns, pod.Name, pod.ResourceVersion),
		fmt.Sprintf("starting pod %s in namespace %s", pod.Name, ns))
	framework.Logf("Started pod %s in namespace %s", pod.Name, ns)

	// Check the pod's current state and verify that restartCount is present.
	By("checking the pod's current state and verifying that restartCount is present")
	pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
	framework.ExpectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", pod.Name, ns))
	initialRestartCount := v1.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
	framework.Logf("Initial restart count of pod %s is %d", pod.Name, initialRestartCount)

	// Wait for the restart state to be as desired.
	deadline := time.Now().Add(timeout)
	lastRestartCount := initialRestartCount
	observedRestarts := int32(0)
	for start := time.Now(); time.Now().Before(deadline); time.Sleep(2 * time.Second) {
		pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
		framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", pod.Name))
		restartCount := v1.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
		if restartCount != lastRestartCount {
			framework.Logf("Restart count of pod %s/%s is now %d (%v elapsed)",
				ns, pod.Name, restartCount, time.Since(start))
			if restartCount < lastRestartCount {
				framework.Failf("Restart count should increment monotonically: restart cont of pod %s/%s changed from %d to %d",
					ns, pod.Name, lastRestartCount, restartCount)
			}
		}
		observedRestarts = restartCount - initialRestartCount
		if expectNumRestarts > 0 && int(observedRestarts) >= expectNumRestarts {
			// Stop if we have observed more than expectNumRestarts restarts.
			break
		}
		lastRestartCount = restartCount
	}

	// If we expected 0 restarts, fail if observed any restart.
	// If we expected n restarts (n > 0), fail if we observed < n restarts.
	if (expectNumRestarts == 0 && observedRestarts > 0) || (expectNumRestarts > 0 &&
		int(observedRestarts) < expectNumRestarts) {
		framework.Failf("pod %s/%s - expected number of restarts: %d, found restarts: %d",
			ns, pod.Name, expectNumRestarts, observedRestarts)
	}
}
예제 #18
0
func runAppArmorTest(f *framework.Framework, profile string) api.PodStatus {
	pod := createPodWithAppArmor(f, profile)
	// The pod needs to start before it stops, so wait for the longer start timeout.
	framework.ExpectNoError(framework.WaitTimeoutForPodNoLongerRunningInNamespace(
		f.Client, pod.Name, f.Namespace.Name, "", framework.PodStartTimeout))
	p, err := f.PodClient().Get(pod.Name)
	framework.ExpectNoError(err)
	return p.Status
}
예제 #19
0
// runDensityTest will perform a density test and return the time it took for
// all pods to start
func runDensityTest(dtc DensityTestConfig) time.Duration {
	defer GinkgoRecover()

	// Create all secrets
	for i := range dtc.SecretConfigs {
		dtc.SecretConfigs[i].Run()
	}

	for i := range dtc.DaemonConfigs {
		dtc.DaemonConfigs[i].Run()
	}

	// Start all replication controllers.
	startTime := time.Now()
	wg := sync.WaitGroup{}
	wg.Add(len(dtc.Configs))
	for i := range dtc.Configs {
		config := dtc.Configs[i]
		go func() {
			defer GinkgoRecover()
			// Call wg.Done() in defer to avoid blocking whole test
			// in case of error from RunRC.
			defer wg.Done()
			framework.ExpectNoError(config.Run())
		}()
	}
	logStopCh := make(chan struct{})
	go logPodStartupStatus(dtc.ClientSet, dtc.PodCount, map[string]string{"type": "densityPod"}, dtc.PollInterval, logStopCh)
	wg.Wait()
	startupTime := time.Now().Sub(startTime)
	close(logStopCh)
	framework.Logf("E2E startup time for %d pods: %v", dtc.PodCount, startupTime)
	framework.Logf("Throughput (pods/s) during cluster saturation phase: %v", float32(dtc.PodCount)/float32(startupTime/time.Second))

	// Print some data about Pod to Node allocation
	By("Printing Pod to Node allocation data")
	podList, err := dtc.ClientSet.Core().Pods(v1.NamespaceAll).List(v1.ListOptions{})
	framework.ExpectNoError(err)
	pausePodAllocation := make(map[string]int)
	systemPodAllocation := make(map[string][]string)
	for _, pod := range podList.Items {
		if pod.Namespace == api.NamespaceSystem {
			systemPodAllocation[pod.Spec.NodeName] = append(systemPodAllocation[pod.Spec.NodeName], pod.Name)
		} else {
			pausePodAllocation[pod.Spec.NodeName]++
		}
	}
	nodeNames := make([]string, 0)
	for k := range pausePodAllocation {
		nodeNames = append(nodeNames, k)
	}
	sort.Strings(nodeNames)
	for _, node := range nodeNames {
		framework.Logf("%v: %v pause pods, system pods: %v", node, pausePodAllocation[node], systemPodAllocation[node])
	}
	return startupTime
}
func cleanupPods(c *client.Client, ns string) {
	By("Removing all pods in namespace " + ns)
	pods, err := c.Pods(ns).List(api.ListOptions{})
	framework.ExpectNoError(err)
	opt := api.NewDeleteOptions(0)
	for _, p := range pods.Items {
		framework.ExpectNoError(c.Pods(ns).Delete(p.ObjectMeta.Name, opt))
	}
}
예제 #21
0
func runServiceAndWorkloadForResourceConsumer(c *client.Client, ns, name, kind string, replicas int, cpuLimitMillis, memLimitMb int64) {
	By(fmt.Sprintf("Running consuming RC %s via %s with %v replicas", name, kind, replicas))
	_, err := c.Services(ns).Create(&api.Service{
		ObjectMeta: api.ObjectMeta{
			Name: name,
		},
		Spec: api.ServiceSpec{
			Ports: []api.ServicePort{{
				Port:       port,
				TargetPort: intstr.FromInt(targetPort),
			}},

			Selector: map[string]string{
				"name": name,
			},
		},
	})
	framework.ExpectNoError(err)

	rcConfig := framework.RCConfig{
		Client:     c,
		Image:      resourceConsumerImage,
		Name:       name,
		Namespace:  ns,
		Timeout:    timeoutRC,
		Replicas:   replicas,
		CpuRequest: cpuLimitMillis,
		CpuLimit:   cpuLimitMillis,
		MemRequest: memLimitMb * 1024 * 1024, // MemLimit is in bytes
		MemLimit:   memLimitMb * 1024 * 1024,
	}

	switch kind {
	case kindRC:
		framework.ExpectNoError(framework.RunRC(rcConfig))
		break
	case kindDeployment:
		dpConfig := framework.DeploymentConfig{
			RCConfig: rcConfig,
		}
		framework.ExpectNoError(framework.RunDeployment(dpConfig))
		break
	case kindReplicaSet:
		rsConfig := framework.ReplicaSetConfig{
			RCConfig: rcConfig,
		}
		framework.ExpectNoError(framework.RunReplicaSet(rsConfig))
		break
	default:
		framework.Failf(invalidKind)
	}

	// Make sure endpoints are propagated.
	// TODO(piosz): replace sleep with endpoints watch.
	time.Sleep(10 * time.Second)
}
예제 #22
0
func clusterIsReadyOrFail(f *framework.Framework, context *framework.E2EContext) {
	c, err := f.FederationClientset_1_5.Federation().Clusters().Get(context.Name)
	framework.ExpectNoError(err, fmt.Sprintf("get cluster: %+v", err))
	if c.ObjectMeta.Name != context.Name {
		framework.Failf("cluster name does not match input context: actual=%+v, expected=%+v", c, context)
	}
	err = isReady(context.Name, f.FederationClientset_1_5)
	framework.ExpectNoError(err, fmt.Sprintf("unexpected error in verifying if cluster %s is ready: %+v", context.Name, err))
	framework.Logf("Cluster %s is Ready", context.Name)
}
예제 #23
0
func (rc *ResourceConsumer) CleanUp() {
	By(fmt.Sprintf("Removing consuming RC %s", rc.name))
	rc.stopCPU <- 0
	rc.stopMem <- 0
	rc.stopCustomMetric <- 0
	// Wait some time to ensure all child goroutines are finished.
	time.Sleep(10 * time.Second)
	framework.ExpectNoError(framework.DeleteRC(rc.framework.Client, rc.framework.Namespace.Name, rc.name))
	framework.ExpectNoError(rc.framework.Client.Services(rc.framework.Namespace.Name).Delete(rc.name))
}
func setMigSizes(sizes map[string]int) {
	for mig, desiredSize := range sizes {
		currentSize, err := GroupSize(mig)
		framework.ExpectNoError(err)
		if desiredSize != currentSize {
			By(fmt.Sprintf("Setting size of %s to %d", mig, desiredSize))
			err = ResizeGroup(mig, int32(desiredSize))
			framework.ExpectNoError(err)
		}
	}
}
예제 #25
0
파일: load.go 프로젝트: jumpkick/kubernetes
func deleteRC(wg *sync.WaitGroup, config *testutils.RCConfig, deletingTime time.Duration) {
	defer GinkgoRecover()
	defer wg.Done()

	sleepUpTo(deletingTime)
	if framework.TestContext.GarbageCollectorEnabled {
		framework.ExpectNoError(framework.DeleteRCAndWaitForGC(config.Client, config.Namespace, config.Name), fmt.Sprintf("deleting rc %s", config.Name))
	} else {
		framework.ExpectNoError(framework.DeleteRCAndPods(config.Client, config.Namespace, config.Name), fmt.Sprintf("deleting rc %s", config.Name))
	}
}
예제 #26
0
// sendOneConsumeMemRequest sends POST request for memory consumption
func (rc *ResourceConsumer) sendOneConsumeMemRequest(megabytes int, durationSec int) {
	defer GinkgoRecover()
	proxyRequest, err := framework.GetServicesProxyRequest(rc.framework.Client, rc.framework.Client.Post())
	framework.ExpectNoError(err)
	_, err = proxyRequest.Namespace(rc.framework.Namespace.Name).
		Name(rc.name).
		Suffix("ConsumeMem").
		Param("megabytes", strconv.Itoa(megabytes)).
		Param("durationSec", strconv.Itoa(durationSec)).
		DoRaw()
	framework.ExpectNoError(err)
}
예제 #27
0
// ingFromManifest reads a .json/yaml file and returns the rc in it.
func ingFromManifest(fileName string) *extensions.Ingress {
	var ing extensions.Ingress
	framework.Logf("Parsing ingress from %v", fileName)
	data, err := ioutil.ReadFile(fileName)
	framework.ExpectNoError(err)

	json, err := utilyaml.ToJSON(data)
	framework.ExpectNoError(err)

	framework.ExpectNoError(runtime.DecodeInto(api.Codecs.UniversalDecoder(), json, &ing))
	return &ing
}
예제 #28
0
func (rc *ResourceConsumer) CleanUp() {
	By(fmt.Sprintf("Removing consuming RC %s", rc.name))
	close(rc.stopCPU)
	close(rc.stopMem)
	close(rc.stopCustomMetric)
	// Wait some time to ensure all child goroutines are finished.
	time.Sleep(10 * time.Second)
	framework.ExpectNoError(framework.DeleteRCAndPods(rc.framework.ClientSet, rc.framework.InternalClientset, rc.framework.Namespace.Name, rc.name))
	framework.ExpectNoError(rc.framework.ClientSet.Core().Services(rc.framework.Namespace.Name).Delete(rc.name, nil))
	framework.ExpectNoError(framework.DeleteRCAndPods(rc.framework.ClientSet, rc.framework.InternalClientset, rc.framework.Namespace.Name, rc.controllerName))
	framework.ExpectNoError(rc.framework.ClientSet.Core().Services(rc.framework.Namespace.Name).Delete(rc.controllerName, nil))
}
예제 #29
0
// sendOneConsumeCustomMetric sends POST request for custom metric consumption
func (rc *ResourceConsumer) sendOneConsumeCustomMetric(delta int, durationSec int) {
	defer GinkgoRecover()
	proxyRequest, err := framework.GetServicesProxyRequest(rc.framework.Client, rc.framework.Client.Post())
	framework.ExpectNoError(err)
	_, err = proxyRequest.Namespace(rc.framework.Namespace.Name).
		Name(rc.name).
		Suffix("BumpMetric").
		Param("metric", customMetricName).
		Param("delta", strconv.Itoa(delta)).
		Param("durationSec", strconv.Itoa(durationSec)).
		DoRaw()
	framework.ExpectNoError(err)
}
예제 #30
0
// sendConsumeMemRequest sends POST request for memory consumption
func (rc *ResourceConsumer) sendConsumeMemRequest(megabytes int) {
	proxyRequest, err := framework.GetServicesProxyRequest(rc.framework.ClientSet, rc.framework.ClientSet.Core().RESTClient().Post())
	framework.ExpectNoError(err)
	req := proxyRequest.Namespace(rc.framework.Namespace.Name).
		Name(rc.controllerName).
		Suffix("ConsumeMem").
		Param("megabytes", strconv.Itoa(megabytes)).
		Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
		Param("requestSizeMegabytes", strconv.Itoa(rc.requestSizeInMegabytes))
	framework.Logf("URL: %v", *req.URL())
	_, err = req.DoRaw()
	framework.ExpectNoError(err)
}