Esempio n. 1
0
// runDensitySeqTest runs the density sequential pod creation test
func runDensitySeqTest(f *framework.Framework, rc *ResourceCollector, testArg densityTest) (time.Duration, []framework.PodLatencyData) {
	const (
		podType               = "density_test_pod"
		sleepBeforeCreatePods = 30 * time.Second
	)
	bgPods := newTestPods(testArg.bgPodsNr, ImageRegistry[pauseImage], "background_pod")
	testPods := newTestPods(testArg.podsNr, ImageRegistry[pauseImage], podType)

	By("Creating a batch of background pods")

	// CreatBatch is synchronized, all pods are running when it returns
	f.PodClient().CreateBatch(bgPods)

	time.Sleep(sleepBeforeCreatePods)

	rc.Start()
	// Explicitly delete pods to prevent namespace controller cleanning up timeout
	defer deletePodsSync(f, append(bgPods, append(testPods, getCadvisorPod())...))
	defer rc.Stop()

	// Create pods sequentially (back-to-back). e2eLags have been sorted.
	batchlag, e2eLags := createBatchPodSequential(f, testPods)

	// Log throughput data.
	logPodCreateThroughput(batchlag, e2eLags, testArg.podsNr, testArg.getTestName())

	return batchlag, e2eLags
}
Esempio n. 2
0
func doTest0644FSGroup(f *framework.Framework, image string, medium api.StorageMedium) {
	var (
		volumePath = "/test-volume"
		filePath   = path.Join(volumePath, "test-file")
		source     = &api.EmptyDirVolumeSource{Medium: medium}
		pod        = testPodWithVolume(image, volumePath, source)
	)

	pod.Spec.Containers[0].Args = []string{
		fmt.Sprintf("--fs_type=%v", volumePath),
		fmt.Sprintf("--new_file_0644=%v", filePath),
		fmt.Sprintf("--file_perm=%v", filePath),
	}

	fsGroup := int64(123)
	pod.Spec.SecurityContext.FSGroup = &fsGroup

	msg := fmt.Sprintf("emptydir 0644 on %v", formatMedium(medium))
	out := []string{
		"perms of file \"/test-volume/test-file\": -rw-r--r--",
		"content of file \"/test-volume/test-file\": mount-tester new file",
	}
	if medium == api.StorageMediumMemory {
		out = append(out, "mount type of \"/test-volume\": tmpfs")
	}
	f.TestContainerOutput(msg, pod, 0, out)
}
Esempio n. 3
0
func testDownwardAPI(f *framework.Framework, podName string, env []api.EnvVar, expectations []string) {
	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name:   podName,
			Labels: map[string]string{"name": podName},
		},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:    "dapi-container",
					Image:   "gcr.io/google_containers/busybox:1.24",
					Command: []string{"sh", "-c", "env"},
					Resources: api.ResourceRequirements{
						Requests: api.ResourceList{
							api.ResourceCPU:    resource.MustParse("250m"),
							api.ResourceMemory: resource.MustParse("32Mi"),
						},
						Limits: api.ResourceList{
							api.ResourceCPU:    resource.MustParse("1250m"),
							api.ResourceMemory: resource.MustParse("64Mi"),
						},
					},
					Env: env,
				},
			},
			RestartPolicy: api.RestartPolicyNever,
		},
	}

	f.TestContainerOutputRegexp("downward api env vars", pod, 0, expectations)
}
Esempio n. 4
0
func testDownwardAPI(f *framework.Framework, podName string, env []api.EnvVar, expectations []string) {
	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name:   podName,
			Labels: map[string]string{"name": podName},
		},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:    "dapi-container",
					Image:   ImageRegistry[busyBoxImage],
					Command: []string{"sh", "-c", "env"},
					Resources: api.ResourceRequirements{
						Requests: api.ResourceList{
							api.ResourceCPU:    resource.MustParse("250m"),
							api.ResourceMemory: resource.MustParse("32Mi"),
						},
						Limits: api.ResourceList{
							api.ResourceCPU:    resource.MustParse("1250m"),
							api.ResourceMemory: resource.MustParse("64Mi"),
						},
					},
					Env: env,
				},
			},
			RestartPolicy: api.RestartPolicyNever,
		},
	}
	// TODO(random-liu): Change TestContainerOutputRegexp to use PodClient and avoid MungeSpec explicitly
	f.PodClient().MungeSpec(pod)

	f.TestContainerOutputRegexp("downward api env vars", pod, 0, expectations)
}
Esempio n. 5
0
func doTestWriteAndReadToLocalSsd(f *framework.Framework) {
	var pod = testPodWithSsd("echo 'hello world' > /mnt/disks/ssd0/data  && sleep 1 && cat /mnt/disks/ssd0/data")
	var msg string
	var out = []string{"hello world"}

	f.TestContainerOutput(msg, pod, 0, out)
}
Esempio n. 6
0
func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames []string, value string) {

	By("submitting the pod to kubernetes")
	podClient := f.ClientSet.Core().Pods(f.Namespace.Name)
	defer func() {
		By("deleting the pod")
		defer GinkgoRecover()
		podClient.Delete(pod.Name, v1.NewDeleteOptions(0))
	}()
	if _, err := podClient.Create(pod); err != nil {
		framework.Failf("Failed to create %s pod: %v", pod.Name, err)
	}

	framework.ExpectNoError(f.WaitForPodRunning(pod.Name))

	By("retrieving the pod")
	pod, err := podClient.Get(pod.Name)
	if err != nil {
		framework.Failf("Failed to get pod %s: %v", pod.Name, err)
	}
	// Try to find the expected value for each expected name.
	By("looking for the results for each expected name from probers")
	assertFilesContain(fileNames, "results", pod, f.ClientSet, true, value)

	framework.Logf("DNS probes using %s succeeded\n", pod.Name)
}
Esempio n. 7
0
func verifyPDContentsViaContainer(f *framework.Framework, podName, containerName string, fileAndContentToVerify map[string]string) {
	for filePath, expectedContents := range fileAndContentToVerify {
		var value string
		// Add a retry to avoid temporal failure in reading the content
		for i := 0; i < maxReadRetry; i++ {
			v, err := f.ReadFileViaContainer(podName, containerName, filePath)
			value = v
			if err != nil {
				framework.Logf("Error reading file: %v", err)
			}
			framework.ExpectNoError(err)
			framework.Logf("Read file %q with content: %v (iteration %d)", filePath, v, i)
			if strings.TrimSpace(v) != strings.TrimSpace(expectedContents) {
				framework.Logf("Warning: read content <%q> does not match execpted content <%q>.", v, expectedContents)
				size, err := f.CheckFileSizeViaContainer(podName, containerName, filePath)
				if err != nil {
					framework.Logf("Error checking file size: %v", err)
				}
				framework.Logf("Check file %q size: %q", filePath, size)
			} else {
				break
			}
		}
		Expect(strings.TrimSpace(value)).To(Equal(strings.TrimSpace(expectedContents)))
	}
}
Esempio n. 8
0
// Delete the passed in pod.
func deletePod(f *framework.Framework, c *client.Client, ns string, pod *api.Pod) error {

	framework.Logf("Deleting pod %v", pod.Name)
	err := c.Pods(ns).Delete(pod.Name, nil)
	if err != nil {
		return fmt.Errorf("Pod %v encountered a delete error: %v", pod.Name, err)
	}

	// Wait for pod to terminate
	err = f.WaitForPodTerminated(pod.Name, "")
	if err != nil && !apierrs.IsNotFound(err) {
		return fmt.Errorf("Pod %v will not teminate: %v", pod.Name, err)
	}

	// Re-get the pod to double check that it has been deleted; expect err
	// Note: Get() writes a log error if the pod is not found
	_, err = c.Pods(ns).Get(pod.Name)
	if err == nil {
		return fmt.Errorf("Pod %v has been deleted but able to re-Get the deleted pod", pod.Name)
	}
	if !apierrs.IsNotFound(err) {
		return fmt.Errorf("Pod %v has been deleted but still exists: %v", pod.Name, err)
	}

	framework.Logf("Ignore \"not found\" error above. Pod %v successfully deleted", pod.Name)
	return nil
}
Esempio n. 9
0
func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) v1.PodStatus {
	pod := createPodWithAppArmor(f, profile)
	if shouldRun {
		// The pod needs to start before it stops, so wait for the longer start timeout.
		framework.ExpectNoError(framework.WaitTimeoutForPodNoLongerRunningInNamespace(
			f.ClientSet, pod.Name, f.Namespace.Name, "", framework.PodStartTimeout))
	} else {
		// Pod should remain in the pending state. Wait for the Reason to be set to "AppArmor".
		w, err := f.PodClient().Watch(v1.SingleObject(metav1.ObjectMeta{Name: pod.Name}))
		framework.ExpectNoError(err)
		_, err = watch.Until(framework.PodStartTimeout, w, func(e watch.Event) (bool, error) {
			switch e.Type {
			case watch.Deleted:
				return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, pod.Name)
			}
			switch t := e.Object.(type) {
			case *v1.Pod:
				if t.Status.Reason == "AppArmor" {
					return true, nil
				}
			}
			return false, nil
		})
		framework.ExpectNoError(err)
	}
	p, err := f.PodClient().Get(pod.Name, metav1.GetOptions{})
	framework.ExpectNoError(err)
	return p.Status
}
func reportLogsFromFluentdPod(f *framework.Framework) error {
	synthLoggerPod, err := f.PodClient().Get(synthLoggerPodName, metav1.GetOptions{})
	if err != nil {
		return fmt.Errorf("Failed to get synth logger pod due to %v", err)
	}

	synthLoggerNodeName := synthLoggerPod.Spec.NodeName
	if synthLoggerNodeName == "" {
		return errors.New("Synthlogger pod is not assigned to the node")
	}

	label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "fluentd-logging"}))
	options := v1.ListOptions{LabelSelector: label.String()}
	fluentdPods, err := f.ClientSet.Core().Pods(api.NamespaceSystem).List(options)

	for _, fluentdPod := range fluentdPods.Items {
		if fluentdPod.Spec.NodeName == synthLoggerNodeName {
			containerName := fluentdPod.Spec.Containers[0].Name
			logs, err := framework.GetPodLogs(f.ClientSet, api.NamespaceSystem, fluentdPod.Name, containerName)
			if err != nil {
				return fmt.Errorf("Failed to get logs from fluentd pod %s due to %v", fluentdPod.Name, err)
			}
			framework.Logf("Logs from fluentd pod %s:\n%s", fluentdPod.Name, logs)
			return nil
		}
	}

	return fmt.Errorf("Failed to find fluentd pod running on node %s", synthLoggerNodeName)
}
Esempio n. 11
0
func runAppArmorTest(f *framework.Framework, profile string) api.PodStatus {
	pod := createPodWithAppArmor(f, profile)
	framework.ExpectNoError(f.WaitForPodNoLongerRunning(pod.Name))
	p, err := f.PodClient().Get(pod.Name)
	framework.ExpectNoError(err)
	return p.Status
}
Esempio n. 12
0
// can not be moved to util, as By and Expect must be put in Ginkgo test unit
func registerClusters(clusters map[string]*cluster, userAgentName, federationName string, f *framework.Framework) string {
	contexts := f.GetUnderlyingFederatedContexts()

	for _, context := range contexts {
		createClusterObjectOrFail(f, &context)
	}

	By("Obtaining a list of all the clusters")
	clusterList := waitForAllClustersReady(f, len(contexts))

	framework.Logf("Checking that %d clusters are Ready", len(contexts))
	for _, context := range contexts {
		clusterIsReadyOrFail(f, &context)
	}
	framework.Logf("%d clusters are Ready", len(contexts))

	primaryClusterName := clusterList.Items[0].Name
	By(fmt.Sprintf("Labeling %q as the first cluster", primaryClusterName))
	for i, c := range clusterList.Items {
		framework.Logf("Creating a clientset for the cluster %s", c.Name)
		Expect(framework.TestContext.KubeConfig).ToNot(Equal(""), "KubeConfig must be specified to load clusters' client config")
		clusters[c.Name] = &cluster{c.Name, createClientsetForCluster(c, i, userAgentName), false, nil}
	}
	createNamespaceInClusters(clusters, f)
	return primaryClusterName
}
Esempio n. 13
0
func validateDNSResults(f *framework.Framework, pod *api.Pod, fileNames []string) {

	By("submitting the pod to kubernetes")
	podClient := f.Client.Pods(f.Namespace.Name)
	defer func() {
		By("deleting the pod")
		defer GinkgoRecover()
		podClient.Delete(pod.Name, api.NewDeleteOptions(0))
	}()
	if _, err := podClient.Create(pod); err != nil {
		framework.Failf("Failed to create %s pod: %v", pod.Name, err)
	}

	framework.ExpectNoError(f.WaitForPodRunning(pod.Name))

	By("retrieving the pod")
	pod, err := podClient.Get(pod.Name)
	if err != nil {
		framework.Failf("Failed to get pod %s: %v", pod.Name, err)
	}
	// Try to find results for each expected name.
	By("looking for the results for each expected name from probiers")
	assertFilesExist(fileNames, "results", pod, f.Client)

	// TODO: probe from the host, too.

	framework.Logf("DNS probes using %s succeeded\n", pod.Name)
}
Esempio n. 14
0
// Clean both server and client pods.
func volumeTestCleanup(f *framework.Framework, config VolumeTestConfig) {
	By(fmt.Sprint("cleaning the environment after ", config.prefix))

	defer GinkgoRecover()

	client := f.Client
	podClient := client.Pods(config.namespace)

	err := podClient.Delete(config.prefix+"-client", nil)
	if err != nil {
		// Log the error before failing test: if the test has already failed,
		// framework.ExpectNoError() won't print anything to logs!
		glog.Warningf("Failed to delete client pod: %v", err)
		framework.ExpectNoError(err, "Failed to delete client pod: %v", err)
	}

	if config.serverImage != "" {
		if err := f.WaitForPodTerminated(config.prefix+"-client", ""); !apierrs.IsNotFound(err) {
			framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
		}
		// See issue #24100.
		// Prevent umount errors by making sure making sure the client pod exits cleanly *before* the volume server pod exits.
		By("sleeping a bit so client can stop and unmount")
		time.Sleep(20 * time.Second)

		err = podClient.Delete(config.prefix+"-server", nil)
		if err != nil {
			glog.Warningf("Failed to delete server pod: %v", err)
			framework.ExpectNoError(err, "Failed to delete server pod: %v", err)
		}
	}
}
Esempio n. 15
0
func discoverService(f *framework.Framework, name string, exists bool) {
	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name:   FederatedServicePod,
			Labels: map[string]string{"name": FederatedServicePod},
		},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:    "federated-service-discovery-container",
					Image:   "gcr.io/google_containers/busybox:1.24",
					Command: []string{"sh", "-c", "nslookup", name},
				},
			},
			RestartPolicy: api.RestartPolicyNever,
		},
	}
	if exists {
		f.TestContainerOutputRegexp("federated service discovery", pod, 0, []string{
			`Name:\s+` + FederatedDNS1123Regexp + `\nAddress \d+:\s+` + FederatedIPAddrRegexp + `\s+` + FederatedDNS1123Regexp,
		})
	} else {
		f.TestContainerOutputRegexp("federated service discovery", pod, 0, []string{
			`nslookup: can't resolve '` + name + `'`,
		})
	}
}
Esempio n. 16
0
func runLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int, timeout time.Duration) {
	podClient := f.PodClient()
	ns := f.Namespace.Name
	Expect(pod.Spec.Containers).NotTo(BeEmpty())
	containerName := pod.Spec.Containers[0].Name
	// At the end of the test, clean up by removing the pod.
	defer func() {
		By("deleting the pod")
		podClient.Delete(pod.Name, v1.NewDeleteOptions(0))
	}()
	By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns))
	podClient.Create(pod)

	// Wait until the pod is not pending. (Here we need to check for something other than
	// 'Pending' other than checking for 'Running', since when failures occur, we go to
	// 'Terminated' which can cause indefinite blocking.)
	framework.ExpectNoError(framework.WaitForPodNotPending(f.ClientSet, ns, pod.Name, pod.ResourceVersion),
		fmt.Sprintf("starting pod %s in namespace %s", pod.Name, ns))
	framework.Logf("Started pod %s in namespace %s", pod.Name, ns)

	// Check the pod's current state and verify that restartCount is present.
	By("checking the pod's current state and verifying that restartCount is present")
	pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
	framework.ExpectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", pod.Name, ns))
	initialRestartCount := v1.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
	framework.Logf("Initial restart count of pod %s is %d", pod.Name, initialRestartCount)

	// Wait for the restart state to be as desired.
	deadline := time.Now().Add(timeout)
	lastRestartCount := initialRestartCount
	observedRestarts := int32(0)
	for start := time.Now(); time.Now().Before(deadline); time.Sleep(2 * time.Second) {
		pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
		framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", pod.Name))
		restartCount := v1.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
		if restartCount != lastRestartCount {
			framework.Logf("Restart count of pod %s/%s is now %d (%v elapsed)",
				ns, pod.Name, restartCount, time.Since(start))
			if restartCount < lastRestartCount {
				framework.Failf("Restart count should increment monotonically: restart cont of pod %s/%s changed from %d to %d",
					ns, pod.Name, lastRestartCount, restartCount)
			}
		}
		observedRestarts = restartCount - initialRestartCount
		if expectNumRestarts > 0 && int(observedRestarts) >= expectNumRestarts {
			// Stop if we have observed more than expectNumRestarts restarts.
			break
		}
		lastRestartCount = restartCount
	}

	// If we expected 0 restarts, fail if observed any restart.
	// If we expected n restarts (n > 0), fail if we observed < n restarts.
	if (expectNumRestarts == 0 && observedRestarts > 0) || (expectNumRestarts > 0 &&
		int(observedRestarts) < expectNumRestarts) {
		framework.Failf("pod %s/%s - expected number of restarts: %d, found restarts: %d",
			ns, pod.Name, expectNumRestarts, observedRestarts)
	}
}
Esempio n. 17
0
func runAppArmorTest(f *framework.Framework, profile string) api.PodStatus {
	pod := createPodWithAppArmor(f, profile)
	// The pod needs to start before it stops, so wait for the longer start timeout.
	framework.ExpectNoError(framework.WaitTimeoutForPodNoLongerRunningInNamespace(
		f.Client, pod.Name, f.Namespace.Name, "", framework.PodStartTimeout))
	p, err := f.PodClient().Get(pod.Name)
	framework.ExpectNoError(err)
	return p.Status
}
Esempio n. 18
0
// createBatchPodWithRateControl creates a batch of pods concurrently, uses one goroutine for each creation.
// between creations there is an interval for throughput control
func createBatchPodWithRateControl(f *framework.Framework, pods []*api.Pod, interval time.Duration) map[string]unversioned.Time {
	createTimes := make(map[string]unversioned.Time)
	for _, pod := range pods {
		createTimes[pod.ObjectMeta.Name] = unversioned.Now()
		go f.PodClient().Create(pod)
		time.Sleep(interval)
	}
	return createTimes
}
Esempio n. 19
0
func createNamespaces(f *framework.Framework, nodeCount, podsPerNode int) []*api.Namespace {
	namespaceCount := (nodeCount + nodeCountPerNamespace - 1) / nodeCountPerNamespace
	namespaces := []*api.Namespace{}
	for i := 1; i <= namespaceCount; i++ {
		namespace, err := f.CreateNamespace(fmt.Sprintf("load-%d-nodepods-%d", podsPerNode, i), nil)
		framework.ExpectNoError(err)
		namespaces = append(namespaces, namespace)
	}
	return namespaces
}
Esempio n. 20
0
func doTestConnectSendDisconnect(bindAddress string, f *framework.Framework) {
	By("creating the target pod")
	pod := pfPod("", "10", "10", "100", fmt.Sprintf("%s", bindAddress))
	if _, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod); err != nil {
		framework.Failf("Couldn't create pod: %v", err)
	}
	if err := f.WaitForPodReady(pod.Name); err != nil {
		framework.Failf("Pod did not start running: %v", err)
	}
	defer func() {
		logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester")
		if err != nil {
			framework.Logf("Error getting pod log: %v", err)
		} else {
			framework.Logf("Pod log:\n%s", logs)
		}
	}()

	By("Running 'kubectl port-forward'")
	cmd := runPortForward(f.Namespace.Name, pod.Name, 80)
	defer cmd.Stop()

	By("Dialing the local port")
	conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", cmd.port))
	if err != nil {
		framework.Failf("Couldn't connect to port %d: %v", cmd.port, err)
	}
	defer func() {
		By("Closing the connection to the local port")
		conn.Close()
	}()

	By("Reading data from the local port")
	fromServer, err := ioutil.ReadAll(conn)
	if err != nil {
		framework.Failf("Unexpected error reading data from the server: %v", err)
	}

	if e, a := strings.Repeat("x", 100), string(fromServer); e != a {
		framework.Failf("Expected %q from server, got %q", e, a)
	}

	By("Waiting for the target pod to stop running")
	if err := WaitForTerminatedContainer(f, pod, "portforwardtester"); err != nil {
		framework.Failf("Container did not terminate: %v", err)
	}

	By("Verifying logs")
	logOutput, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester")
	if err != nil {
		framework.Failf("Error retrieving pod logs: %v", err)
	}
	verifyLogMessage(logOutput, "Accepted client connection")
	verifyLogMessage(logOutput, "Done")
}
Esempio n. 21
0
func CreateNamespaces(f *framework.Framework, namespaceCount int, namePrefix string) ([]*api.Namespace, error) {
	namespaces := []*api.Namespace{}
	for i := 1; i <= namespaceCount; i++ {
		namespace, err := f.CreateNamespace(fmt.Sprintf("%v-%d", namePrefix, i), nil)
		if err != nil {
			return []*api.Namespace{}, err
		}
		namespaces = append(namespaces, namespace)
	}
	return namespaces, nil
}
Esempio n. 22
0
func verifyPDContentsViaContainer(f *framework.Framework, podName, containerName string, fileAndContentToVerify map[string]string) {
	for filePath, expectedContents := range fileAndContentToVerify {
		v, err := f.ReadFileViaContainer(podName, containerName, filePath)
		if err != nil {
			framework.Logf("Error reading file: %v", err)
		}
		framework.ExpectNoError(err)
		framework.Logf("Read file %q with content: %v", filePath, v)
		Expect(strings.TrimSpace(v)).To(Equal(strings.TrimSpace(expectedContents)))
	}
}
Esempio n. 23
0
// RunLogPodsWithSleepOf creates a pod on every node, logs continuously (with "sleep" pauses), and verifies that the log string
// was produced in each and every pod at least once.  The final arg is the timeout for the test to verify all the pods got logs.
func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname string, timeout time.Duration) {

	nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
	totalPods := len(nodes.Items)
	Expect(totalPods).NotTo(Equal(0))

	kilobyte := strings.Repeat("logs-123", 128) // 8*128=1024 = 1KB of text.

	appName := "logging-soak" + podname
	podlables := f.CreatePodsPerNodeForSimpleApp(
		appName,
		func(n v1.Node) v1.PodSpec {
			return v1.PodSpec{
				Containers: []v1.Container{{
					Name:  "logging-soak",
					Image: "gcr.io/google_containers/busybox:1.24",
					Args: []string{
						"/bin/sh",
						"-c",
						fmt.Sprintf("while true ; do echo %v ; sleep %v; done", kilobyte, sleep.Seconds()),
					},
				}},
				NodeName:      n.Name,
				RestartPolicy: v1.RestartPolicyAlways,
			}
		},
		totalPods,
	)

	logSoakVerification := f.NewClusterVerification(
		f.Namespace,
		framework.PodStateVerification{
			Selectors:   podlables,
			ValidPhases: []v1.PodPhase{v1.PodRunning, v1.PodSucceeded},
			// we don't validate total log data, since there is no gaurantee all logs will be stored forever.
			// instead, we just validate that some logs are being created in std out.
			Verify: func(p v1.Pod) (bool, error) {
				s, err := framework.LookForStringInLog(f.Namespace.Name, p.Name, "logging-soak", "logs-123", 1*time.Second)
				return s != "", err
			},
		},
	)

	largeClusterForgiveness := time.Duration(len(nodes.Items)/5) * time.Second // i.e. a 100 node cluster gets an extra 20 seconds to complete.
	pods, err := logSoakVerification.WaitFor(totalPods, timeout+largeClusterForgiveness)

	if err != nil {
		framework.Failf("Error in wait... %v", err)
	} else if len(pods) < totalPods {
		framework.Failf("Only got %v out of %v", len(pods), totalPods)
	}
}
Esempio n. 24
0
// Delete the passed in pod.
func deletePod(f *framework.Framework, c clientset.Interface, ns string, pod *v1.Pod) {

	framework.Logf("Deleting pod %v", pod.Name)
	err := c.Core().Pods(ns).Delete(pod.Name, nil)
	Expect(err).NotTo(HaveOccurred())

	// Wait for pod to terminate.  Expect apierr NotFound
	err = f.WaitForPodTerminated(pod.Name, "")
	Expect(err).To(HaveOccurred())
	Expect(apierrs.IsNotFound(err)).To(BeTrue())

	framework.Logf("Ignore \"not found\" error above. Pod %v successfully deleted", pod.Name)
}
Esempio n. 25
0
// createBatchPodSequential creats pods back-to-back in sequence.
func createBatchPodSequential(f *framework.Framework, pods []*api.Pod) (time.Duration, []framework.PodLatencyData) {
	batchStartTime := unversioned.Now()
	e2eLags := make([]framework.PodLatencyData, 0)
	for _, pod := range pods {
		create := unversioned.Now()
		f.PodClient().CreateSync(pod)
		e2eLags = append(e2eLags,
			framework.PodLatencyData{Name: pod.Name, Latency: unversioned.Now().Time.Sub(create.Time)})
	}
	batchLag := unversioned.Now().Time.Sub(batchStartTime.Time)
	sort.Sort(framework.LatencySlice(e2eLags))
	return batchLag, e2eLags
}
Esempio n. 26
0
func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
	var err error

	By("Creating a test namespace")
	namespace, err := f.CreateNamespace("nsdeletetest", nil)
	Expect(err).NotTo(HaveOccurred())

	By("Waiting for a default service account to be provisioned in namespace")
	err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
	Expect(err).NotTo(HaveOccurred())

	By("Creating a service in the namespace")
	serviceName := "test-service"
	labels := map[string]string{
		"foo": "bar",
		"baz": "blah",
	}
	service := &v1.Service{
		ObjectMeta: v1.ObjectMeta{
			Name: serviceName,
		},
		Spec: v1.ServiceSpec{
			Selector: labels,
			Ports: []v1.ServicePort{{
				Port:       80,
				TargetPort: intstr.FromInt(80),
			}},
		},
	}
	service, err = f.ClientSet.Core().Services(namespace.Name).Create(service)
	Expect(err).NotTo(HaveOccurred())

	By("Deleting the namespace")
	err = f.ClientSet.Core().Namespaces().Delete(namespace.Name, nil)
	Expect(err).NotTo(HaveOccurred())

	By("Waiting for the namespace to be removed.")
	maxWaitSeconds := int64(60)
	framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second,
		func() (bool, error) {
			_, err = f.ClientSet.Core().Namespaces().Get(namespace.Name, metav1.GetOptions{})
			if err != nil && errors.IsNotFound(err) {
				return true, nil
			}
			return false, nil
		}))

	By("Verifying there is no service in the namespace")
	_, err = f.ClientSet.Core().Services(namespace.Name).Get(service.Name, metav1.GetOptions{})
	Expect(err).To(HaveOccurred())
}
Esempio n. 27
0
func createMemhogPod(f *framework.Framework, genName string, ctnName string, res api.ResourceRequirements) *api.Pod {
	env := []api.EnvVar{
		{
			Name: "MEMORY_LIMIT",
			ValueFrom: &api.EnvVarSource{
				ResourceFieldRef: &api.ResourceFieldSelector{
					Resource: "limits.memory",
				},
			},
		},
	}

	// If there is a limit specified, pass 80% of it for -mem-total, otherwise use the downward API
	// to pass limits.memory, which will be the total memory available.
	// This helps prevent a guaranteed pod from triggering an OOM kill due to it's low memory limit,
	// which will cause the test to fail inappropriately.
	var memLimit string
	if limit, ok := res.Limits["memory"]; ok {
		memLimit = strconv.Itoa(int(
			float64(limit.Value()) * 0.8))
	} else {
		memLimit = "$(MEMORY_LIMIT)"
	}

	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			GenerateName: genName,
		},
		Spec: api.PodSpec{
			RestartPolicy: api.RestartPolicyNever,
			Containers: []api.Container{
				{
					Name:            ctnName,
					Image:           "gcr.io/google-containers/stress:v1",
					ImagePullPolicy: "Always",
					Env:             env,
					// 60 min timeout * 60s / tick per 10s = 360 ticks before timeout => ~11.11Mi/tick
					// to fill ~4Gi of memory, so initial ballpark 12Mi/tick.
					// We might see flakes due to timeout if the total memory on the nodes increases.
					Args:      []string{"-mem-alloc-size", "12Mi", "-mem-alloc-sleep", "10s", "-mem-total", memLimit},
					Resources: res,
				},
			},
		},
	}
	// The generated pod.Name will be on the pod spec returned by CreateSync
	pod = f.PodClient().CreateSync(pod)
	glog.Infof("pod created with name: %s", pod.Name)
	return pod
}
Esempio n. 28
0
func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
	var err error

	By("Creating a test namespace")
	namespace, err := f.CreateNamespace("nsdeletetest", nil)
	Expect(err).NotTo(HaveOccurred())

	By("Waiting for a default service account to be provisioned in namespace")
	err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
	Expect(err).NotTo(HaveOccurred())

	By("Creating a pod in the namespace")
	pod := &v1.Pod{
		ObjectMeta: v1.ObjectMeta{
			Name: "test-pod",
		},
		Spec: v1.PodSpec{
			Containers: []v1.Container{
				{
					Name:  "nginx",
					Image: framework.GetPauseImageName(f.ClientSet),
				},
			},
		},
	}
	pod, err = f.ClientSet.Core().Pods(namespace.Name).Create(pod)
	Expect(err).NotTo(HaveOccurred())

	By("Waiting for the pod to have running status")
	framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod))

	By("Deleting the namespace")
	err = f.ClientSet.Core().Namespaces().Delete(namespace.Name, nil)
	Expect(err).NotTo(HaveOccurred())

	By("Waiting for the namespace to be removed.")
	maxWaitSeconds := int64(60) + *pod.Spec.TerminationGracePeriodSeconds
	framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second,
		func() (bool, error) {
			_, err = f.ClientSet.Core().Namespaces().Get(namespace.Name, metav1.GetOptions{})
			if err != nil && errors.IsNotFound(err) {
				return true, nil
			}
			return false, nil
		}))

	By("Verifying there is no pod in the namespace")
	_, err = f.ClientSet.Core().Pods(namespace.Name).Get(pod.Name, metav1.GetOptions{})
	Expect(err).To(HaveOccurred())
}
Esempio n. 29
0
func validateDNSResults(f *e2e.Framework, pod *api.Pod, fileNames sets.String, expect int) {
	By("submitting the pod to kubernetes")
	podClient := f.Client.Pods(f.Namespace.Name)
	defer func() {
		By("deleting the pod")
		defer GinkgoRecover()
		podClient.Delete(pod.Name, api.NewDeleteOptions(0))
	}()
	if _, err := podClient.Create(pod); err != nil {
		e2e.Failf("Failed to create %s pod: %v", pod.Name, err)
	}

	Expect(f.WaitForPodRunning(pod.Name)).To(BeNil())
	Expect(wait.Poll(2*time.Second, 5*time.Minute, func() (bool, error) {
		pod, err := podClient.Get(pod.Name)
		if err != nil {
			return false, err
		}
		switch pod.Status.Phase {
		case api.PodSucceeded:
			return true, nil
		case api.PodFailed:
			return false, fmt.Errorf("pod failed")
		default:
			return false, nil
		}
	})).To(BeNil())

	By("retrieving the pod logs")
	r, err := podClient.GetLogs(pod.Name, &api.PodLogOptions{Container: "querier"}).Stream()
	if err != nil {
		e2e.Failf("Failed to get pod logs %s: %v", pod.Name, err)
	}
	out, err := ioutil.ReadAll(r)
	if err != nil {
		e2e.Failf("Failed to read pod logs %s: %v", pod.Name, err)
	}

	// Try to find results for each expected name.
	By("looking for the results for each expected name from probiers")

	if err := assertLinesExist(fileNames, expect, bytes.NewBuffer(out)); err != nil {
		e2e.Logf("Got results from pod:\n%s", out)
		e2e.Failf("Unexpected results: %v", err)
	}

	e2e.Logf("DNS probes using %s succeeded\n", pod.Name)
}
Esempio n. 30
0
// runResourceUsageTest runs the resource usage test
func runResourceUsageTest(f *framework.Framework, rc *ResourceCollector, testArg resourceTest) {
	const (
		// The monitoring time for one test
		monitoringTime = 10 * time.Minute
		// The periodic reporting period
		reportingPeriod = 5 * time.Minute
		// sleep for an interval here to measure steady data
		sleepAfterCreatePods = 10 * time.Second
	)
	pods := newTestPods(testArg.podsNr, ImageRegistry[pauseImage], "test_pod")

	rc.Start()
	// Explicitly delete pods to prevent namespace controller cleanning up timeout
	defer deletePodsSync(f, append(pods, getCadvisorPod()))
	defer rc.Stop()

	By("Creating a batch of Pods")
	f.PodClient().CreateBatch(pods)

	// wait for a while to let the node be steady
	time.Sleep(sleepAfterCreatePods)

	// Log once and flush the stats.
	rc.LogLatest()
	rc.Reset()

	By("Start monitoring resource usage")
	// Periodically dump the cpu summary until the deadline is met.
	// Note that without calling framework.ResourceMonitor.Reset(), the stats
	// would occupy increasingly more memory. This should be fine
	// for the current test duration, but we should reclaim the
	// entries if we plan to monitor longer (e.g., 8 hours).
	deadline := time.Now().Add(monitoringTime)
	for time.Now().Before(deadline) {
		timeLeft := deadline.Sub(time.Now())
		framework.Logf("Still running...%v left", timeLeft)
		if timeLeft < reportingPeriod {
			time.Sleep(timeLeft)
		} else {
			time.Sleep(reportingPeriod)
		}
		logPods(f.Client)
	}

	By("Reporting overall resource usage")
	logPods(f.Client)
}