Exemplo n.º 1
0
// runDensitySeqTest runs the density sequential pod creation test
func runDensitySeqTest(f *framework.Framework, rc *ResourceCollector, testArg densityTest) (time.Duration, []framework.PodLatencyData) {
	const (
		podType               = "density_test_pod"
		sleepBeforeCreatePods = 30 * time.Second
	)
	bgPods := newTestPods(testArg.bgPodsNr, ImageRegistry[pauseImage], "background_pod")
	testPods := newTestPods(testArg.podsNr, ImageRegistry[pauseImage], podType)

	By("Creating a batch of background pods")

	// CreatBatch is synchronized, all pods are running when it returns
	f.PodClient().CreateBatch(bgPods)

	time.Sleep(sleepBeforeCreatePods)

	rc.Start()
	// Explicitly delete pods to prevent namespace controller cleanning up timeout
	defer deletePodsSync(f, append(bgPods, append(testPods, getCadvisorPod())...))
	defer rc.Stop()

	// Create pods sequentially (back-to-back). e2eLags have been sorted.
	batchlag, e2eLags := createBatchPodSequential(f, testPods)

	// Log throughput data.
	logPodCreateThroughput(batchlag, e2eLags, testArg.podsNr, testArg.getTestName())

	return batchlag, e2eLags
}
Exemplo n.º 2
0
// Clean both server and client pods.
func volumeTestCleanup(f *framework.Framework, config VolumeTestConfig) {
	By(fmt.Sprint("cleaning the environment after ", config.prefix))

	defer GinkgoRecover()

	podClient := f.PodClient()

	err := podClient.Delete(config.prefix+"-client", nil)
	if err != nil {
		// Log the error before failing test: if the test has already failed,
		// framework.ExpectNoError() won't print anything to logs!
		glog.Warningf("Failed to delete client pod: %v", err)
		framework.ExpectNoError(err, "Failed to delete client pod: %v", err)
	}

	if config.serverImage != "" {
		if err := f.WaitForPodTerminated(config.prefix+"-client", ""); !apierrs.IsNotFound(err) {
			framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
		}
		// See issue #24100.
		// Prevent umount errors by making sure making sure the client pod exits cleanly *before* the volume server pod exits.
		By("sleeping a bit so client can stop and unmount")
		time.Sleep(20 * time.Second)

		err = podClient.Delete(config.prefix+"-server", nil)
		if err != nil {
			glog.Warningf("Failed to delete server pod: %v", err)
			framework.ExpectNoError(err, "Failed to delete server pod: %v", err)
		}
	}
}
Exemplo n.º 3
0
func testDownwardAPI(f *framework.Framework, podName string, env []api.EnvVar, expectations []string) {
	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name:   podName,
			Labels: map[string]string{"name": podName},
		},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:    "dapi-container",
					Image:   "gcr.io/google_containers/busybox:1.24",
					Command: []string{"sh", "-c", "env"},
					Resources: api.ResourceRequirements{
						Requests: api.ResourceList{
							api.ResourceCPU:    resource.MustParse("250m"),
							api.ResourceMemory: resource.MustParse("32Mi"),
						},
						Limits: api.ResourceList{
							api.ResourceCPU:    resource.MustParse("1250m"),
							api.ResourceMemory: resource.MustParse("64Mi"),
						},
					},
					Env: env,
				},
			},
			RestartPolicy: api.RestartPolicyNever,
		},
	}
	// TODO(random-liu): Change TestContainerOutputRegexp to use PodClient and avoid MungeSpec explicitly
	f.PodClient().MungeSpec(pod)

	f.TestContainerOutputRegexp("downward api env vars", pod, 0, expectations)
}
func reportLogsFromFluentdPod(f *framework.Framework) error {
	synthLoggerPod, err := f.PodClient().Get(synthLoggerPodName, metav1.GetOptions{})
	if err != nil {
		return fmt.Errorf("Failed to get synth logger pod due to %v", err)
	}

	synthLoggerNodeName := synthLoggerPod.Spec.NodeName
	if synthLoggerNodeName == "" {
		return errors.New("Synthlogger pod is not assigned to the node")
	}

	label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "fluentd-logging"}))
	options := v1.ListOptions{LabelSelector: label.String()}
	fluentdPods, err := f.ClientSet.Core().Pods(api.NamespaceSystem).List(options)

	for _, fluentdPod := range fluentdPods.Items {
		if fluentdPod.Spec.NodeName == synthLoggerNodeName {
			containerName := fluentdPod.Spec.Containers[0].Name
			logs, err := framework.GetPodLogs(f.ClientSet, api.NamespaceSystem, fluentdPod.Name, containerName)
			if err != nil {
				return fmt.Errorf("Failed to get logs from fluentd pod %s due to %v", fluentdPod.Name, err)
			}
			framework.Logf("Logs from fluentd pod %s:\n%s", fluentdPod.Name, logs)
			return nil
		}
	}

	return fmt.Errorf("Failed to find fluentd pod running on node %s", synthLoggerNodeName)
}
Exemplo n.º 5
0
func runAppArmorTest(f *framework.Framework, profile string) api.PodStatus {
	pod := createPodWithAppArmor(f, profile)
	framework.ExpectNoError(f.WaitForPodNoLongerRunning(pod.Name))
	p, err := f.PodClient().Get(pod.Name)
	framework.ExpectNoError(err)
	return p.Status
}
Exemplo n.º 6
0
func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) v1.PodStatus {
	pod := createPodWithAppArmor(f, profile)
	if shouldRun {
		// The pod needs to start before it stops, so wait for the longer start timeout.
		framework.ExpectNoError(framework.WaitTimeoutForPodNoLongerRunningInNamespace(
			f.ClientSet, pod.Name, f.Namespace.Name, "", framework.PodStartTimeout))
	} else {
		// Pod should remain in the pending state. Wait for the Reason to be set to "AppArmor".
		w, err := f.PodClient().Watch(v1.SingleObject(metav1.ObjectMeta{Name: pod.Name}))
		framework.ExpectNoError(err)
		_, err = watch.Until(framework.PodStartTimeout, w, func(e watch.Event) (bool, error) {
			switch e.Type {
			case watch.Deleted:
				return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, pod.Name)
			}
			switch t := e.Object.(type) {
			case *v1.Pod:
				if t.Status.Reason == "AppArmor" {
					return true, nil
				}
			}
			return false, nil
		})
		framework.ExpectNoError(err)
	}
	p, err := f.PodClient().Get(pod.Name, metav1.GetOptions{})
	framework.ExpectNoError(err)
	return p.Status
}
Exemplo n.º 7
0
func runLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int, timeout time.Duration) {
	podClient := f.PodClient()
	ns := f.Namespace.Name
	Expect(pod.Spec.Containers).NotTo(BeEmpty())
	containerName := pod.Spec.Containers[0].Name
	// At the end of the test, clean up by removing the pod.
	defer func() {
		By("deleting the pod")
		podClient.Delete(pod.Name, v1.NewDeleteOptions(0))
	}()
	By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns))
	podClient.Create(pod)

	// Wait until the pod is not pending. (Here we need to check for something other than
	// 'Pending' other than checking for 'Running', since when failures occur, we go to
	// 'Terminated' which can cause indefinite blocking.)
	framework.ExpectNoError(framework.WaitForPodNotPending(f.ClientSet, ns, pod.Name, pod.ResourceVersion),
		fmt.Sprintf("starting pod %s in namespace %s", pod.Name, ns))
	framework.Logf("Started pod %s in namespace %s", pod.Name, ns)

	// Check the pod's current state and verify that restartCount is present.
	By("checking the pod's current state and verifying that restartCount is present")
	pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
	framework.ExpectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", pod.Name, ns))
	initialRestartCount := v1.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
	framework.Logf("Initial restart count of pod %s is %d", pod.Name, initialRestartCount)

	// Wait for the restart state to be as desired.
	deadline := time.Now().Add(timeout)
	lastRestartCount := initialRestartCount
	observedRestarts := int32(0)
	for start := time.Now(); time.Now().Before(deadline); time.Sleep(2 * time.Second) {
		pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
		framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", pod.Name))
		restartCount := v1.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
		if restartCount != lastRestartCount {
			framework.Logf("Restart count of pod %s/%s is now %d (%v elapsed)",
				ns, pod.Name, restartCount, time.Since(start))
			if restartCount < lastRestartCount {
				framework.Failf("Restart count should increment monotonically: restart cont of pod %s/%s changed from %d to %d",
					ns, pod.Name, lastRestartCount, restartCount)
			}
		}
		observedRestarts = restartCount - initialRestartCount
		if expectNumRestarts > 0 && int(observedRestarts) >= expectNumRestarts {
			// Stop if we have observed more than expectNumRestarts restarts.
			break
		}
		lastRestartCount = restartCount
	}

	// If we expected 0 restarts, fail if observed any restart.
	// If we expected n restarts (n > 0), fail if we observed < n restarts.
	if (expectNumRestarts == 0 && observedRestarts > 0) || (expectNumRestarts > 0 &&
		int(observedRestarts) < expectNumRestarts) {
		framework.Failf("pod %s/%s - expected number of restarts: %d, found restarts: %d",
			ns, pod.Name, expectNumRestarts, observedRestarts)
	}
}
Exemplo n.º 8
0
func runAppArmorTest(f *framework.Framework, profile string) api.PodStatus {
	pod := createPodWithAppArmor(f, profile)
	// The pod needs to start before it stops, so wait for the longer start timeout.
	framework.ExpectNoError(framework.WaitTimeoutForPodNoLongerRunningInNamespace(
		f.Client, pod.Name, f.Namespace.Name, "", framework.PodStartTimeout))
	p, err := f.PodClient().Get(pod.Name)
	framework.ExpectNoError(err)
	return p.Status
}
Exemplo n.º 9
0
// createBatchPodWithRateControl creates a batch of pods concurrently, uses one goroutine for each creation.
// between creations there is an interval for throughput control
func createBatchPodWithRateControl(f *framework.Framework, pods []*api.Pod, interval time.Duration) map[string]unversioned.Time {
	createTimes := make(map[string]unversioned.Time)
	for _, pod := range pods {
		createTimes[pod.ObjectMeta.Name] = unversioned.Now()
		go f.PodClient().Create(pod)
		time.Sleep(interval)
	}
	return createTimes
}
Exemplo n.º 10
0
// createBatchPodSequential creats pods back-to-back in sequence.
func createBatchPodSequential(f *framework.Framework, pods []*api.Pod) (time.Duration, []framework.PodLatencyData) {
	batchStartTime := unversioned.Now()
	e2eLags := make([]framework.PodLatencyData, 0)
	for _, pod := range pods {
		create := unversioned.Now()
		f.PodClient().CreateSync(pod)
		e2eLags = append(e2eLags,
			framework.PodLatencyData{Name: pod.Name, Latency: unversioned.Now().Time.Sub(create.Time)})
	}
	batchLag := unversioned.Now().Time.Sub(batchStartTime.Time)
	sort.Sort(framework.LatencySlice(e2eLags))
	return batchLag, e2eLags
}
Exemplo n.º 11
0
func createMemhogPod(f *framework.Framework, genName string, ctnName string, res api.ResourceRequirements) *api.Pod {
	env := []api.EnvVar{
		{
			Name: "MEMORY_LIMIT",
			ValueFrom: &api.EnvVarSource{
				ResourceFieldRef: &api.ResourceFieldSelector{
					Resource: "limits.memory",
				},
			},
		},
	}

	// If there is a limit specified, pass 80% of it for -mem-total, otherwise use the downward API
	// to pass limits.memory, which will be the total memory available.
	// This helps prevent a guaranteed pod from triggering an OOM kill due to it's low memory limit,
	// which will cause the test to fail inappropriately.
	var memLimit string
	if limit, ok := res.Limits["memory"]; ok {
		memLimit = strconv.Itoa(int(
			float64(limit.Value()) * 0.8))
	} else {
		memLimit = "$(MEMORY_LIMIT)"
	}

	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			GenerateName: genName,
		},
		Spec: api.PodSpec{
			RestartPolicy: api.RestartPolicyNever,
			Containers: []api.Container{
				{
					Name:            ctnName,
					Image:           "gcr.io/google-containers/stress:v1",
					ImagePullPolicy: "Always",
					Env:             env,
					// 60 min timeout * 60s / tick per 10s = 360 ticks before timeout => ~11.11Mi/tick
					// to fill ~4Gi of memory, so initial ballpark 12Mi/tick.
					// We might see flakes due to timeout if the total memory on the nodes increases.
					Args:      []string{"-mem-alloc-size", "12Mi", "-mem-alloc-sleep", "10s", "-mem-total", memLimit},
					Resources: res,
				},
			},
		},
	}
	// The generated pod.Name will be on the pod spec returned by CreateSync
	pod = f.PodClient().CreateSync(pod)
	glog.Infof("pod created with name: %s", pod.Name)
	return pod
}
Exemplo n.º 12
0
// runResourceUsageTest runs the resource usage test
func runResourceUsageTest(f *framework.Framework, rc *ResourceCollector, testArg resourceTest) {
	const (
		// The monitoring time for one test
		monitoringTime = 10 * time.Minute
		// The periodic reporting period
		reportingPeriod = 5 * time.Minute
		// sleep for an interval here to measure steady data
		sleepAfterCreatePods = 10 * time.Second
	)
	pods := newTestPods(testArg.podsNr, ImageRegistry[pauseImage], "test_pod")

	rc.Start()
	// Explicitly delete pods to prevent namespace controller cleanning up timeout
	defer deletePodsSync(f, append(pods, getCadvisorPod()))
	defer rc.Stop()

	By("Creating a batch of Pods")
	f.PodClient().CreateBatch(pods)

	// wait for a while to let the node be steady
	time.Sleep(sleepAfterCreatePods)

	// Log once and flush the stats.
	rc.LogLatest()
	rc.Reset()

	By("Start monitoring resource usage")
	// Periodically dump the cpu summary until the deadline is met.
	// Note that without calling framework.ResourceMonitor.Reset(), the stats
	// would occupy increasingly more memory. This should be fine
	// for the current test duration, but we should reclaim the
	// entries if we plan to monitor longer (e.g., 8 hours).
	deadline := time.Now().Add(monitoringTime)
	for time.Now().Before(deadline) {
		timeLeft := deadline.Sub(time.Now())
		framework.Logf("Still running...%v left", timeLeft)
		if timeLeft < reportingPeriod {
			time.Sleep(timeLeft)
		} else {
			time.Sleep(reportingPeriod)
		}
		logPods(f.Client)
	}

	By("Reporting overall resource usage")
	logPods(f.Client)
}
Exemplo n.º 13
0
// deletePodsSync deletes a list of pods and block until pods disappear.
func deletePodsSync(f *framework.Framework, pods []*v1.Pod) {
	var wg sync.WaitGroup
	for _, pod := range pods {
		wg.Add(1)
		go func(pod *v1.Pod) {
			defer wg.Done()

			err := f.PodClient().Delete(pod.ObjectMeta.Name, v1.NewDeleteOptions(30))
			Expect(err).NotTo(HaveOccurred())

			Expect(framework.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(),
				30*time.Second, 10*time.Minute)).NotTo(HaveOccurred())
		}(pod)
	}
	wg.Wait()
	return
}
Exemplo n.º 14
0
func createPodWithAppArmor(f *framework.Framework, profile string) *v1.Pod {
	pod := &v1.Pod{
		ObjectMeta: metav1.ObjectMeta{
			Name: fmt.Sprintf("test-apparmor-%s", strings.Replace(profile, "/", "-", -1)),
			Annotations: map[string]string{
				apparmor.ContainerAnnotationKeyPrefix + "test": profile,
			},
		},
		Spec: v1.PodSpec{
			Containers: []v1.Container{{
				Name:    "test",
				Image:   "gcr.io/google_containers/busybox:1.24",
				Command: []string{"touch", "foo"},
			}},
			RestartPolicy: v1.RestartPolicyNever,
		},
	}
	return f.PodClient().Create(pod)
}
Exemplo n.º 15
0
func createPodWithAppArmor(f *framework.Framework, profile string) *api.Pod {
	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name: fmt.Sprintf("test-apparmor-%s", strings.Replace(profile, "/", "-", -1)),
			Annotations: map[string]string{
				"container.apparmor.security.alpha.kubernetes.io/test": profile,
			},
		},
		Spec: api.PodSpec{
			Containers: []api.Container{{
				Name:    "test",
				Image:   ImageRegistry[busyBoxImage],
				Command: []string{"touch", "foo"},
			}},
			RestartPolicy: api.RestartPolicyNever,
		},
	}
	return f.PodClient().Create(pod)
}
Exemplo n.º 16
0
func createSynthLogger(f *framework.Framework, linesCount int) {
	f.PodClient().Create(&v1.Pod{
		ObjectMeta: metav1.ObjectMeta{
			Name:      synthLoggerPodName,
			Namespace: f.Namespace.Name,
		},
		Spec: v1.PodSpec{
			RestartPolicy: v1.RestartPolicyOnFailure,
			Containers: []v1.Container{
				{
					Name:  synthLoggerPodName,
					Image: "gcr.io/google_containers/busybox:1.24",
					// notice: the subshell syntax is escaped with `$$`
					Command: []string{"/bin/sh", "-c", fmt.Sprintf("i=0; while [ $i -lt %d ]; do echo $i; i=`expr $i + 1`; done", linesCount)},
				},
			},
		},
	})
}
Exemplo n.º 17
0
func createSummaryTestPods(f *framework.Framework, names ...string) {
	pods := make([]*v1.Pod, 0, len(names))
	for _, name := range names {
		pods = append(pods, &v1.Pod{
			ObjectMeta: v1.ObjectMeta{
				Name: name,
			},
			Spec: v1.PodSpec{
				RestartPolicy: v1.RestartPolicyAlways,
				Containers: []v1.Container{
					{
						Name:    "busybox-container",
						Image:   "gcr.io/google_containers/busybox:1.24",
						Command: []string{"sh", "-c", "ping -c 1 google.com; while true; do echo 'hello world' >> /test-empty-dir-mnt/file ; sleep 1; done"},
						Resources: v1.ResourceRequirements{
							Limits: v1.ResourceList{
								// Must set memory limit to get MemoryStats.AvailableBytes
								v1.ResourceMemory: resource.MustParse("10M"),
							},
						},
						VolumeMounts: []v1.VolumeMount{
							{MountPath: "/test-empty-dir-mnt", Name: "test-empty-dir"},
						},
					},
				},
				SecurityContext: &v1.PodSecurityContext{
					SELinuxOptions: &v1.SELinuxOptions{
						Level: "s0",
					},
				},
				Volumes: []v1.Volume{
					// TODO(#28393): Test secret volumes
					// TODO(#28394): Test hostpath volumes
					{Name: "test-empty-dir", VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}},
				},
			},
		})
	}
	f.PodClient().CreateBatch(pods)
}
Exemplo n.º 18
0
// waitForPods waits for timeout duration, for pod_count.
// If the timeout is hit, it returns the list of currently running pods.
func waitForPods(f *framework.Framework, pod_count int, timeout time.Duration) (runningPods []*v1.Pod) {
	for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) {
		podList, err := f.PodClient().List(v1.ListOptions{})
		if err != nil {
			framework.Logf("Failed to list pods on node: %v", err)
			continue
		}

		runningPods = []*v1.Pod{}
		for _, pod := range podList.Items {
			if r, err := testutils.PodRunningReady(&pod); err != nil || !r {
				continue
			}
			runningPods = append(runningPods, &pod)
		}
		framework.Logf("Running pod count %d", len(runningPods))
		if len(runningPods) >= pod_count {
			break
		}
	}
	return runningPods
}
Exemplo n.º 19
0
// Start a client pod using given VolumeSource (exported by startVolumeServer())
// and check that the pod sees the data from the server pod.
func testVolumeClient(f *framework.Framework, config VolumeTestConfig, volume v1.VolumeSource, fsGroup *int64, expectedContent string) {
	By(fmt.Sprint("starting ", config.prefix, " client"))
	clientPod := &v1.Pod{
		TypeMeta: metav1.TypeMeta{
			Kind:       "Pod",
			APIVersion: "v1",
		},
		ObjectMeta: metav1.ObjectMeta{
			Name: config.prefix + "-client",
			Labels: map[string]string{
				"role": config.prefix + "-client",
			},
		},
		Spec: v1.PodSpec{
			Containers: []v1.Container{
				{
					Name:       config.prefix + "-client",
					Image:      "gcr.io/google_containers/busybox:1.24",
					WorkingDir: "/opt",
					// An imperative and easily debuggable container which reads vol contents for
					// us to scan in the tests or by eye.
					// We expect that /opt is empty in the minimal containers which we use in this test.
					Command: []string{
						"/bin/sh",
						"-c",
						"while true ; do cat /opt/index.html ; sleep 2 ; ls -altrh /opt/  ; sleep 2 ; done ",
					},
					VolumeMounts: []v1.VolumeMount{
						{
							Name:      config.prefix + "-volume",
							MountPath: "/opt/",
						},
					},
				},
			},
			SecurityContext: &v1.PodSecurityContext{
				SELinuxOptions: &v1.SELinuxOptions{
					Level: "s0:c0,c1",
				},
			},
			Volumes: []v1.Volume{
				{
					Name:         config.prefix + "-volume",
					VolumeSource: volume,
				},
			},
		},
	}
	podClient := f.PodClient()

	if fsGroup != nil {
		clientPod.Spec.SecurityContext.FSGroup = fsGroup
	}
	clientPod = podClient.CreateSync(clientPod)

	By("Checking that text file contents are perfect.")
	result := f.ExecCommandInPod(clientPod.Name, "cat", "/opt/index.html")
	var err error
	if !strings.Contains(result, expectedContent) {
		err = fmt.Errorf("Failed to find \"%s\", last result: \"%s\"", expectedContent, result)
	}
	Expect(err).NotTo(HaveOccurred(), "failed: finding the contents of the mounted file.")

	if fsGroup != nil {

		By("Checking fsGroup is correct.")
		_, err := framework.LookForStringInPodExec(config.namespace, clientPod.Name, []string{"ls", "-ld", "/opt"}, strconv.Itoa(int(*fsGroup)), time.Minute)
		Expect(err).NotTo(HaveOccurred(), "failed: getting the right priviliges in the file %v", int(*fsGroup))
	}
}
Exemplo n.º 20
0
// runEvictionTest sets up a testing environment given the provided nodes, and checks a few things:
//		It ensures that the desired testCondition is actually triggered.
//		It ensures that evictionPriority 0 pods are not evicted
//		It ensures that lower evictionPriority pods are always evicted before higher evictionPriority pods (2 evicted before 1, etc.)
//		It ensures that all lower evictionPriority pods are eventually evicted.
// runEvictionTest then cleans up the testing environment by deleting provided nodes, and ensures that testCondition no longer exists
func runEvictionTest(f *framework.Framework, testCondition string, podTestSpecs []podTestSpec,
	evictionTestTimeout time.Duration, hasPressureCondition func(*framework.Framework, string) (bool, error)) {

	Context(fmt.Sprintf("when we run containers that should cause %s", testCondition), func() {

		BeforeEach(func() {
			By("seting up pods to be used by tests")
			for _, spec := range podTestSpecs {
				By(fmt.Sprintf("creating pod with container: %s", spec.pod.Name))
				f.PodClient().CreateSync(&spec.pod)
			}
		})

		It(fmt.Sprintf("should eventually see %s, and then evict all of the correct pods", testCondition), func() {
			Eventually(func() error {
				hasPressure, err := hasPressureCondition(f, testCondition)
				framework.ExpectNoError(err, fmt.Sprintf("checking if we have %s", testCondition))
				if hasPressure {
					return nil
				}
				return fmt.Errorf("Condition: %s not encountered", testCondition)
			}, evictionTestTimeout, evictionPollInterval).Should(BeNil())

			Eventually(func() error {
				// Gather current information
				updatedPodList, err := f.ClientSet.Core().Pods(f.Namespace.Name).List(v1.ListOptions{})
				updatedPods := updatedPodList.Items
				for _, p := range updatedPods {
					framework.Logf("fetching pod %s; phase= %v", p.Name, p.Status.Phase)
				}
				_, err = hasPressureCondition(f, testCondition)
				framework.ExpectNoError(err, fmt.Sprintf("checking if we have %s", testCondition))

				By("checking eviction ordering and ensuring important pods dont fail")
				done := true
				for _, priorityPodSpec := range podTestSpecs {
					var priorityPod v1.Pod
					for _, p := range updatedPods {
						if p.Name == priorityPodSpec.pod.Name {
							priorityPod = p
						}
					}
					Expect(priorityPod).NotTo(BeNil())

					// Check eviction ordering.
					// Note: it is alright for a priority 1 and priority 2 pod (for example) to fail in the same round
					for _, lowPriorityPodSpec := range podTestSpecs {
						var lowPriorityPod v1.Pod
						for _, p := range updatedPods {
							if p.Name == lowPriorityPodSpec.pod.Name {
								lowPriorityPod = p
							}
						}
						Expect(lowPriorityPod).NotTo(BeNil())
						if priorityPodSpec.evictionPriority < lowPriorityPodSpec.evictionPriority && lowPriorityPod.Status.Phase == v1.PodRunning {
							Expect(priorityPod.Status.Phase).NotTo(Equal(v1.PodFailed),
								fmt.Sprintf("%s pod failed before %s pod", priorityPodSpec.pod.Name, lowPriorityPodSpec.pod.Name))
						}
					}

					// EvictionPriority 0 pods should not fail
					if priorityPodSpec.evictionPriority == 0 {
						Expect(priorityPod.Status.Phase).NotTo(Equal(v1.PodFailed),
							fmt.Sprintf("%s pod failed (and shouldn't have failed)", priorityPod.Name))
					}

					// If a pod that is not evictionPriority 0 has not been evicted, we are not done
					if priorityPodSpec.evictionPriority != 0 && priorityPod.Status.Phase != v1.PodFailed {
						done = false
					}
				}
				if done {
					return nil
				}
				return fmt.Errorf("pods that caused %s have not been evicted.", testCondition)
			}, evictionTestTimeout, evictionPollInterval).Should(BeNil())
		})

		AfterEach(func() {
			By("making sure conditions eventually return to normal")
			Eventually(func() bool {
				hasPressure, err := hasPressureCondition(f, testCondition)
				framework.ExpectNoError(err, fmt.Sprintf("checking if we have %s", testCondition))
				return hasPressure
			}, evictionTestTimeout, evictionPollInterval).Should(BeFalse())

			By("making sure conditions do not return")
			Consistently(func() bool {
				hasPressure, err := hasPressureCondition(f, testCondition)
				framework.ExpectNoError(err, fmt.Sprintf("checking if we have %s", testCondition))
				return hasPressure
			}, postTestConditionMonitoringPeriod, evictionPollInterval).Should(BeFalse())

			By("making sure we can start a new pod after the test")
			podName := "test-admit-pod"
			f.PodClient().Create(&v1.Pod{
				ObjectMeta: v1.ObjectMeta{
					Name: podName,
				},
				Spec: v1.PodSpec{
					RestartPolicy: v1.RestartPolicyNever,
					Containers: []v1.Container{
						{
							Image: "gcr.io/google_containers/busybox:1.24",
							Name:  podName,
						},
					},
				},
			})
			if CurrentGinkgoTestDescription().Failed && framework.TestContext.DumpLogsOnFailure {
				logPodEvents(f)
				logNodeEvents(f)
			}
		})
	})
}
Exemplo n.º 21
0
// runEvictionTest sets up a testing environment given the provided nodes, and checks a few things:
//		It ensures that the desired testCondition is actually triggered.
//		It ensures that evictionPriority 0 pods are not evicted
//		It ensures that lower evictionPriority pods are always evicted before higher evictionPriority pods (2 evicted before 1, etc.)
//		It ensures that all lower evictionPriority pods are eventually evicted.
// runEvictionTest then cleans up the testing environment by deleting provided nodes, and ensures that testCondition no longer exists
func runEvictionTest(f *framework.Framework, testCondition string, podTestSpecs []podTestSpec,
	evictionTestTimeout time.Duration, hasPressureCondition func(*framework.Framework, string) (bool, error)) {

	Context(fmt.Sprintf("when we run containers that should cause %s", testCondition), func() {

		BeforeEach(func() {
			By("seting up pods to be used by tests")
			for _, spec := range podTestSpecs {
				By(fmt.Sprintf("creating pod with container: %s", spec.pod.Name))
				f.PodClient().CreateSync(&spec.pod)
			}
		})

		It(fmt.Sprintf("should eventually see %s, and then evict all of the correct pods", testCondition), func() {
			Eventually(func() error {
				hasPressure, err := hasPressureCondition(f, testCondition)
				framework.ExpectNoError(err, fmt.Sprintf("checking if we have %s", testCondition))
				if hasPressure {
					return nil
				}
				return fmt.Errorf("Condition: %s not encountered", testCondition)
			}, evictionTestTimeout, evictionPollInterval).Should(BeNil())

			Eventually(func() error {
				// Gather current information
				updatedPodList, err := f.ClientSet.Core().Pods(f.Namespace.Name).List(v1.ListOptions{})
				updatedPods := updatedPodList.Items
				for _, p := range updatedPods {
					framework.Logf("fetching pod %s; phase= %v", p.Name, p.Status.Phase)
				}
				_, err = hasPressureCondition(f, testCondition)
				framework.ExpectNoError(err, fmt.Sprintf("checking if we have %s", testCondition))

				By("checking eviction ordering and ensuring important pods dont fail")
				done := true
				for _, priorityPodSpec := range podTestSpecs {
					var priorityPod v1.Pod
					for _, p := range updatedPods {
						if p.Name == priorityPodSpec.pod.Name {
							priorityPod = p
						}
					}
					Expect(priorityPod).NotTo(BeNil())

					// Check eviction ordering.
					// Note: it is alright for a priority 1 and priority 2 pod (for example) to fail in the same round
					for _, lowPriorityPodSpec := range podTestSpecs {
						var lowPriorityPod v1.Pod
						for _, p := range updatedPods {
							if p.Name == lowPriorityPodSpec.pod.Name {
								lowPriorityPod = p
							}
						}
						Expect(lowPriorityPod).NotTo(BeNil())
						if priorityPodSpec.evictionPriority < lowPriorityPodSpec.evictionPriority && lowPriorityPod.Status.Phase == v1.PodRunning {
							Expect(priorityPod.Status.Phase).NotTo(Equal(v1.PodFailed),
								fmt.Sprintf("%s pod failed before %s pod", priorityPodSpec.pod.Name, lowPriorityPodSpec.pod.Name))
						}
					}

					// EvictionPriority 0 pods should not fail
					if priorityPodSpec.evictionPriority == 0 {
						Expect(priorityPod.Status.Phase).NotTo(Equal(v1.PodFailed),
							fmt.Sprintf("%s pod failed (and shouldn't have failed)", priorityPod.Name))
					}

					// If a pod that is not evictionPriority 0 has not been evicted, we are not done
					if priorityPodSpec.evictionPriority != 0 && priorityPod.Status.Phase != v1.PodFailed {
						done = false
					}
				}
				if done {
					return nil
				}
				return fmt.Errorf("pods that caused %s have not been evicted.", testCondition)
			}, evictionTestTimeout, evictionPollInterval).Should(BeNil())

			// We observe pressure from the API server.  The eviction manager observes pressure from the kubelet internal stats.
			// This means the eviction manager will observe pressure before we will, creating a delay between when the eviction manager
			// evicts a pod, and when we observe the pressure by querrying the API server.  Add a delay here to account for this delay
			By("making sure pressure from test has surfaced before continuing")
			time.Sleep(pressureDelay)

			By("making sure conditions eventually return to normal")
			Eventually(func() error {
				hasPressure, err := hasPressureCondition(f, testCondition)
				framework.ExpectNoError(err, fmt.Sprintf("checking if we have %s", testCondition))
				if hasPressure {
					return fmt.Errorf("Conditions havent returned to normal, we still have %s", testCondition)
				}
				return nil
			}, evictionTestTimeout, evictionPollInterval).Should(BeNil())

			By("making sure conditions do not return")
			Consistently(func() error {
				hasPressure, err := hasPressureCondition(f, testCondition)
				framework.ExpectNoError(err, fmt.Sprintf("checking if we have %s", testCondition))
				if hasPressure {
					return fmt.Errorf("%s dissappeared and then reappeared", testCondition)
				}
				return nil
			}, postTestConditionMonitoringPeriod, evictionPollInterval).Should(BeNil())

			By("making sure we can start a new pod after the test")
			podName := "test-admit-pod"
			f.PodClient().CreateSync(&v1.Pod{
				ObjectMeta: v1.ObjectMeta{
					Name: podName,
				},
				Spec: v1.PodSpec{
					RestartPolicy: v1.RestartPolicyNever,
					Containers: []v1.Container{
						{
							Image: framework.GetPauseImageNameForHostArch(),
							Name:  podName,
						},
					},
				},
			})
		})

		AfterEach(func() {
			By("deleting pods")
			for _, spec := range podTestSpecs {
				By(fmt.Sprintf("deleting pod: %s", spec.pod.Name))
				f.PodClient().DeleteSync(spec.pod.Name, &v1.DeleteOptions{}, podDisappearTimeout)
			}

			if CurrentGinkgoTestDescription().Failed {
				if framework.TestContext.DumpLogsOnFailure {
					logPodEvents(f)
					logNodeEvents(f)
				}
				By("sleeping to allow for cleanup of test")
				time.Sleep(postTestConditionMonitoringPeriod)
			}
		})
	})
}
Exemplo n.º 22
0
func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cleanup func()) {
	var err error
	gitServerPodName := "git-server-" + string(uuid.NewUUID())
	containerPort := 8000

	labels := map[string]string{"name": gitServerPodName}

	gitServerPod := &v1.Pod{
		ObjectMeta: v1.ObjectMeta{
			Name:   gitServerPodName,
			Labels: labels,
		},
		Spec: v1.PodSpec{
			Containers: []v1.Container{
				{
					Name:            "git-repo",
					Image:           "gcr.io/google_containers/fakegitserver:0.1",
					ImagePullPolicy: "IfNotPresent",
					Ports: []v1.ContainerPort{
						{ContainerPort: int32(containerPort)},
					},
				},
			},
		},
	}
	f.PodClient().CreateSync(gitServerPod)

	// Portal IP and port
	httpPort := 2345

	gitServerSvc := &v1.Service{
		ObjectMeta: v1.ObjectMeta{
			Name: "git-server-svc",
		},
		Spec: v1.ServiceSpec{
			Selector: labels,
			Ports: []v1.ServicePort{
				{
					Name:       "http-portal",
					Port:       int32(httpPort),
					TargetPort: intstr.FromInt(containerPort),
				},
			},
		},
	}

	if gitServerSvc, err = f.ClientSet.Core().Services(f.Namespace.Name).Create(gitServerSvc); err != nil {
		framework.Failf("unable to create test git server service %s: %v", gitServerSvc.Name, err)
	}

	return "http://" + gitServerSvc.Spec.ClusterIP + ":" + strconv.Itoa(httpPort), "test", func() {
		By("Cleaning up the git server pod")
		if err := f.ClientSet.Core().Pods(f.Namespace.Name).Delete(gitServerPod.Name, v1.NewDeleteOptions(0)); err != nil {
			framework.Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err)
		}
		By("Cleaning up the git server svc")
		if err := f.ClientSet.Core().Services(f.Namespace.Name).Delete(gitServerSvc.Name, nil); err != nil {
			framework.Failf("unable to delete git server svc %v: %v", gitServerSvc.Name, err)
		}
	}
}
Exemplo n.º 23
0
func createCadvisorPod(f *framework.Framework) {
	f.PodClient().CreateSync(&api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name: cadvisorPodName,
		},
		Spec: api.PodSpec{
			// It uses a host port for the tests to collect data.
			// Currently we can not use port mapping in test-e2e-node.
			SecurityContext: &api.PodSecurityContext{
				HostNetwork: true,
			},
			Containers: []api.Container{
				{
					Image: cadvisorImageName,
					Name:  cadvisorPodName,
					Ports: []api.ContainerPort{
						{
							Name:          "http",
							HostPort:      cadvisorPort,
							ContainerPort: cadvisorPort,
							Protocol:      api.ProtocolTCP,
						},
					},
					VolumeMounts: []api.VolumeMount{
						{
							Name:      "sys",
							ReadOnly:  true,
							MountPath: "/sys",
						},
						{
							Name:      "var-run",
							ReadOnly:  false,
							MountPath: "/var/run",
						},
						{
							Name:      "docker",
							ReadOnly:  true,
							MountPath: "/var/lib/docker/",
						},
						{
							Name:      "rootfs",
							ReadOnly:  true,
							MountPath: "/rootfs",
						},
					},
					Args: []string{
						"--profiling",
						fmt.Sprintf("--housekeeping_interval=%ds", houseKeepingInterval),
						fmt.Sprintf("--port=%d", cadvisorPort),
					},
				},
			},
			Volumes: []api.Volume{
				{
					Name:         "rootfs",
					VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/"}},
				},
				{
					Name:         "var-run",
					VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/var/run"}},
				},
				{
					Name:         "sys",
					VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/sys"}},
				},
				{
					Name:         "docker",
					VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/var/lib/docker"}},
				},
			},
		},
	})
}
Exemplo n.º 24
0
// Tests the following:
// 	pods are created, and all containers restart the specified number of times
// 	while contianers are running, the number of copies of a single container does not exceed maxPerPodContainer
// 	while containers are running, the total number of containers does not exceed maxTotalContainers
// 	while containers are running, if not constrained by maxPerPodContainer or maxTotalContainers, keep an extra copy of each container
// 	once pods are killed, all containers are eventually cleaned up
func containerGCTest(f *framework.Framework, test testRun) {
	Context(fmt.Sprintf("Garbage Collection Test: %s", test.testName), func() {
		BeforeEach(func() {
			realPods := getPods(test.testPods)
			f.PodClient().CreateBatch(realPods)
			By("Making sure all containers restart the specified number of times")
			Eventually(func() error {
				for _, podSpec := range test.testPods {
					updatedPod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(podSpec.podName, metav1.GetOptions{})
					if err != nil {
						return err
					}
					if len(updatedPod.Status.ContainerStatuses) != podSpec.numContainers {
						return fmt.Errorf("expected pod %s to have %d containers, actual: %d",
							updatedPod.Name, podSpec.numContainers, len(updatedPod.Status.ContainerStatuses))
					}
					for _, containerStatus := range updatedPod.Status.ContainerStatuses {
						if containerStatus.RestartCount != podSpec.restartCount {
							return fmt.Errorf("pod %s had container with restartcount %d.  Should have been at least %d",
								updatedPod.Name, containerStatus.RestartCount, podSpec.restartCount)
						}
					}
				}
				return nil
			}, setupDuration, runtimePollInterval).Should(BeNil())
		})

		It(fmt.Sprintf("Should eventually garbage collect containers when we exceed the number of dead containers per container"), func() {
			totalContainers := 0
			for _, pod := range test.testPods {
				totalContainers += pod.numContainers*2 + 1
			}
			Eventually(func() error {
				total := 0
				for _, pod := range test.testPods {
					containerNames, err := pod.getContainerNames()
					if err != nil {
						return err
					}
					total += len(containerNames)
					// Check maxPerPodContainer for each container in the pod
					for i := 0; i < pod.numContainers; i++ {
						containerCount := 0
						for _, containerName := range containerNames {
							if strings.Contains(containerName, pod.getContainerName(i)) {
								containerCount += 1
							}
						}
						if containerCount > maxPerPodContainer+1 {
							return fmt.Errorf("expected number of copies of container: %s, to be <= maxPerPodContainer: %d; list of containers: %v",
								pod.getContainerName(i), maxPerPodContainer, containerNames)
						}
					}
				}
				//Check maxTotalContainers.  Currently, the default is -1, so this will never happen until we can configure maxTotalContainers
				if maxTotalContainers > 0 && totalContainers <= maxTotalContainers && total > maxTotalContainers {
					return fmt.Errorf("expected total number of containers: %v, to be <= maxTotalContainers: %v", total, maxTotalContainers)
				}
				return nil
			}, garbageCollectDuration, runtimePollInterval).Should(BeNil())

			if maxPerPodContainer >= 2 && maxTotalContainers < 0 { // make sure constraints wouldn't make us gc old containers
				By("Making sure the kubelet consistently keeps around an extra copy of each container.")
				Consistently(func() error {
					for _, pod := range test.testPods {
						containerNames, err := pod.getContainerNames()
						if err != nil {
							return err
						}
						for i := 0; i < pod.numContainers; i++ {
							containerCount := 0
							for _, containerName := range containerNames {
								if strings.Contains(containerName, pod.getContainerName(i)) {
									containerCount += 1
								}
							}
							if pod.restartCount > 0 && containerCount < maxPerPodContainer+1 {
								return fmt.Errorf("expected pod %v to have extra copies of old containers", pod.podName)
							}
						}
					}
					return nil
				}, garbageCollectDuration, runtimePollInterval).Should(BeNil())
			}
		})

		AfterEach(func() {
			for _, pod := range test.testPods {
				By(fmt.Sprintf("Deleting Pod %v", pod.podName))
				f.PodClient().DeleteSync(pod.podName, &v1.DeleteOptions{}, defaultRuntimeRequestTimeoutDuration)
			}

			By("Making sure all containers get cleaned up")
			Eventually(func() error {
				for _, pod := range test.testPods {
					containerNames, err := pod.getContainerNames()
					if err != nil {
						return err
					}
					if len(containerNames) > 0 {
						return fmt.Errorf("%v containers still remain", containerNames)
					}
				}
				return nil
			}, garbageCollectDuration, runtimePollInterval).Should(BeNil())

			if CurrentGinkgoTestDescription().Failed && framework.TestContext.DumpLogsOnFailure {
				logNodeEvents(f)
				logPodEvents(f)
			}
		})
	})
}
Exemplo n.º 25
0
// Starts a container specified by config.serverImage and exports all
// config.serverPorts from it. The returned pod should be used to get the server
// IP address and create appropriate VolumeSource.
func startVolumeServer(f *framework.Framework, config VolumeTestConfig) *v1.Pod {
	podClient := f.PodClient()

	portCount := len(config.serverPorts)
	serverPodPorts := make([]v1.ContainerPort, portCount)

	for i := 0; i < portCount; i++ {
		portName := fmt.Sprintf("%s-%d", config.prefix, i)

		serverPodPorts[i] = v1.ContainerPort{
			Name:          portName,
			ContainerPort: int32(config.serverPorts[i]),
			Protocol:      v1.ProtocolTCP,
		}
	}

	volumeCount := len(config.volumes)
	volumes := make([]v1.Volume, volumeCount)
	mounts := make([]v1.VolumeMount, volumeCount)

	i := 0
	for src, dst := range config.volumes {
		mountName := fmt.Sprintf("path%d", i)
		volumes[i].Name = mountName
		volumes[i].VolumeSource.HostPath = &v1.HostPathVolumeSource{
			Path: src,
		}

		mounts[i].Name = mountName
		mounts[i].ReadOnly = false
		mounts[i].MountPath = dst

		i++
	}

	By(fmt.Sprint("creating ", config.prefix, " server pod"))
	privileged := new(bool)
	*privileged = true
	serverPod := &v1.Pod{
		TypeMeta: metav1.TypeMeta{
			Kind:       "Pod",
			APIVersion: "v1",
		},
		ObjectMeta: metav1.ObjectMeta{
			Name: config.prefix + "-server",
			Labels: map[string]string{
				"role": config.prefix + "-server",
			},
		},

		Spec: v1.PodSpec{
			Containers: []v1.Container{
				{
					Name:  config.prefix + "-server",
					Image: config.serverImage,
					SecurityContext: &v1.SecurityContext{
						Privileged: privileged,
					},
					Args:         config.serverArgs,
					Ports:        serverPodPorts,
					VolumeMounts: mounts,
				},
			},
			Volumes: volumes,
		},
	}
	serverPod = podClient.CreateSync(serverPod)

	By("locating the server pod")
	pod, err := podClient.Get(serverPod.Name, metav1.GetOptions{})
	framework.ExpectNoError(err, "Cannot locate the server pod %v: %v", serverPod.Name, err)

	By("sleeping a bit to give the server time to start")
	time.Sleep(20 * time.Second)
	return pod
}
Exemplo n.º 26
0
func doConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64) {
	var (
		name            = "configmap-test-volume-map-" + string(util.NewUUID())
		volumeName      = "configmap-volume"
		volumeMountPath = "/etc/configmap-volume"
		configMap       = newConfigMap(f, name)
	)

	By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))

	var err error
	if configMap, err = f.Client.ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
		framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
	}

	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name: "pod-configmaps-" + string(util.NewUUID()),
		},
		Spec: api.PodSpec{
			SecurityContext: &api.PodSecurityContext{},
			Volumes: []api.Volume{
				{
					Name: volumeName,
					VolumeSource: api.VolumeSource{
						ConfigMap: &api.ConfigMapVolumeSource{
							LocalObjectReference: api.LocalObjectReference{
								Name: name,
							},
							Items: []api.KeyToPath{
								{
									Key:  "data-2",
									Path: "path/to/data-2",
								},
							},
						},
					},
				},
			},
			Containers: []api.Container{
				{
					Name:  "configmap-volume-test",
					Image: "gcr.io/google_containers/mounttest:0.6",
					Args:  []string{"--file_content=/etc/configmap-volume/path/to/data-2"},
					VolumeMounts: []api.VolumeMount{
						{
							Name:      volumeName,
							MountPath: volumeMountPath,
							ReadOnly:  true,
						},
					},
				},
			},
			RestartPolicy: api.RestartPolicyNever,
		},
	}

	if uid != 0 {
		pod.Spec.SecurityContext.RunAsUser = &uid
	}

	if fsGroup != 0 {
		pod.Spec.SecurityContext.FSGroup = &fsGroup
	}

	f.PodClient().MungeSpec(pod)

	framework.TestContainerOutput("consume configMaps", f.Client, pod, 0, []string{
		"content of file \"/etc/configmap-volume/path/to/data-2\": value-2",
	}, f.Namespace.Name)
}