コード例 #1
0
ファイル: density_test.go プロジェクト: alex-mohr/kubernetes
// runDensitySeqTest runs the density sequential pod creation test
func runDensitySeqTest(f *framework.Framework, rc *ResourceCollector, testArg densityTest, testInfo map[string]string) (time.Duration, []framework.PodLatencyData) {
	const (
		podType               = "density_test_pod"
		sleepBeforeCreatePods = 30 * time.Second
	)
	bgPods := newTestPods(testArg.bgPodsNr, framework.GetPauseImageNameForHostArch(), "background_pod")
	testPods := newTestPods(testArg.podsNr, framework.GetPauseImageNameForHostArch(), podType)

	By("Creating a batch of background pods")

	// CreatBatch is synchronized, all pods are running when it returns
	f.PodClient().CreateBatch(bgPods)

	time.Sleep(sleepBeforeCreatePods)

	rc.Start()
	// Explicitly delete pods to prevent namespace controller cleanning up timeout
	defer deletePodsSync(f, append(bgPods, append(testPods, getCadvisorPod())...))
	defer rc.Stop()

	// Create pods sequentially (back-to-back). e2eLags have been sorted.
	batchlag, e2eLags := createBatchPodSequential(f, testPods)

	// Log throughput data.
	logPodCreateThroughput(batchlag, e2eLags, testArg.podsNr, testInfo)

	return batchlag, e2eLags
}
コード例 #2
0
func createIdlePod(podName string, podClient *framework.PodClient) {
	podClient.Create(&api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name: podName,
		},
		Spec: api.PodSpec{
			RestartPolicy: api.RestartPolicyNever,
			Containers: []api.Container{
				{
					Image: framework.GetPauseImageNameForHostArch(),
					Name:  podName,
				},
			},
		},
	})
}
コード例 #3
0
// runResourceUsageTest runs the resource usage test
func runResourceUsageTest(f *framework.Framework, rc *ResourceCollector, testArg resourceTest) {
	const (
		// The monitoring time for one test
		monitoringTime = 10 * time.Minute
		// The periodic reporting period
		reportingPeriod = 5 * time.Minute
		// sleep for an interval here to measure steady data
		sleepAfterCreatePods = 10 * time.Second
	)
	pods := newTestPods(testArg.podsNr, framework.GetPauseImageNameForHostArch(), "test_pod")

	rc.Start()
	// Explicitly delete pods to prevent namespace controller cleanning up timeout
	defer deletePodsSync(f, append(pods, getCadvisorPod()))
	defer rc.Stop()

	By("Creating a batch of Pods")
	f.PodClient().CreateBatch(pods)

	// wait for a while to let the node be steady
	time.Sleep(sleepAfterCreatePods)

	// Log once and flush the stats.
	rc.LogLatest()
	rc.Reset()

	By("Start monitoring resource usage")
	// Periodically dump the cpu summary until the deadline is met.
	// Note that without calling framework.ResourceMonitor.Reset(), the stats
	// would occupy increasingly more memory. This should be fine
	// for the current test duration, but we should reclaim the
	// entries if we plan to monitor longer (e.g., 8 hours).
	deadline := time.Now().Add(monitoringTime)
	for time.Now().Before(deadline) {
		timeLeft := deadline.Sub(time.Now())
		framework.Logf("Still running...%v left", timeLeft)
		if timeLeft < reportingPeriod {
			time.Sleep(timeLeft)
		} else {
			time.Sleep(reportingPeriod)
		}
		logPods(f.Client)
	}

	By("Reporting overall resource usage")
	logPods(f.Client)
}
コード例 #4
0
ファイル: util.go プロジェクト: olegshaldybin/kubernetes
func makePodSpec() api.PodSpec {
	return api.PodSpec{
		Containers: []api.Container{{
			Name:  "pause",
			Image: e2e.GetPauseImageNameForHostArch(),
			Ports: []api.ContainerPort{{ContainerPort: 80}},
			Resources: api.ResourceRequirements{
				Limits: api.ResourceList{
					api.ResourceCPU:    resource.MustParse("100m"),
					api.ResourceMemory: resource.MustParse("500Mi"),
				},
				Requests: api.ResourceList{
					api.ResourceCPU:    resource.MustParse("100m"),
					api.ResourceMemory: resource.MustParse("500Mi"),
				},
			},
		}},
	}
}
コード例 #5
0
ファイル: simple_mount.go プロジェクト: kubernetes/kubernetes
	// This is a very simple test that exercises the Kubelet's mounter code path.
	// If the mount fails, the pod will not be able to run, and CreateSync will timeout.
	It("should be able to mount an emptydir on a container", func() {
		pod := &v1.Pod{
			TypeMeta: metav1.TypeMeta{
				Kind:       "Pod",
				APIVersion: "v1",
			},
			ObjectMeta: metav1.ObjectMeta{
				Name: "simple-mount-pod",
			},
			Spec: v1.PodSpec{
				Containers: []v1.Container{
					{
						Name:  "simple-mount-container",
						Image: framework.GetPauseImageNameForHostArch(),
						VolumeMounts: []v1.VolumeMount{
							{
								Name:      "simply-mounted-volume",
								MountPath: "/opt/",
							},
						},
					},
				},
				Volumes: []v1.Volume{
					{
						Name: "simply-mounted-volume",
						VolumeSource: v1.VolumeSource{
							EmptyDir: &v1.EmptyDirVolumeSource{
								Medium: "Memory",
							},
コード例 #6
0
ファイル: restart_test.go プロジェクト: alex-mohr/kubernetes
		podCount            = 100
		podCreationInterval = 100 * time.Millisecond
		recoverTimeout      = 5 * time.Minute
		startTimeout        = 3 * time.Minute
		// restartCount is chosen so even with minPods we exhaust the default
		// allocation of a /24.
		minPods      = 50
		restartCount = 6
	)

	f := framework.NewDefaultFramework("restart-test")
	Context("Docker Daemon", func() {
		Context("Network", func() {
			It("should recover from ip leak", func() {

				pods := newTestPods(podCount, framework.GetPauseImageNameForHostArch(), "restart-docker-test")
				By(fmt.Sprintf("Trying to create %d pods on node", len(pods)))
				createBatchPodWithRateControl(f, pods, podCreationInterval)
				defer deletePodsSync(f, pods)

				// Give the node some time to stabilize, assume pods that enter RunningReady within
				// startTimeout fit on the node and the node is now saturated.
				runningPods := waitForPods(f, podCount, startTimeout)
				if len(runningPods) < minPods {
					framework.Failf("Failed to start %d pods, cannot test that restarting docker doesn't leak IPs", minPods)
				}

				for i := 0; i < restartCount; i += 1 {
					By(fmt.Sprintf("Restarting Docker Daemon iteration %d", i))

					// TODO: Find a uniform way to deal with systemctl/initctl/service operations. #34494
コード例 #7
0
ファイル: density_test.go プロジェクト: alex-mohr/kubernetes
// runDensityBatchTest runs the density batch pod creation test
func runDensityBatchTest(f *framework.Framework, rc *ResourceCollector, testArg densityTest, testInfo map[string]string,
	isLogTimeSeries bool) (time.Duration, []framework.PodLatencyData) {
	const (
		podType               = "density_test_pod"
		sleepBeforeCreatePods = 30 * time.Second
	)
	var (
		mutex      = &sync.Mutex{}
		watchTimes = make(map[string]metav1.Time, 0)
		stopCh     = make(chan struct{})
	)

	// create test pod data structure
	pods := newTestPods(testArg.podsNr, framework.GetPauseImageNameForHostArch(), podType)

	// the controller watches the change of pod status
	controller := newInformerWatchPod(f, mutex, watchTimes, podType)
	go controller.Run(stopCh)
	defer close(stopCh)

	// TODO(coufon): in the test we found kubelet starts while it is busy on something, as a result 'syncLoop'
	// does not response to pod creation immediately. Creating the first pod has a delay around 5s.
	// The node status has already been 'ready' so `wait and check node being ready does not help here.
	// Now wait here for a grace period to let 'syncLoop' be ready
	time.Sleep(sleepBeforeCreatePods)

	rc.Start()
	// Explicitly delete pods to prevent namespace controller cleanning up timeout
	defer deletePodsSync(f, append(pods, getCadvisorPod()))
	defer rc.Stop()

	By("Creating a batch of pods")
	// It returns a map['pod name']'creation time' containing the creation timestamps
	createTimes := createBatchPodWithRateControl(f, pods, testArg.interval)

	By("Waiting for all Pods to be observed by the watch...")

	Eventually(func() bool {
		return len(watchTimes) == testArg.podsNr
	}, 10*time.Minute, 10*time.Second).Should(BeTrue())

	if len(watchTimes) < testArg.podsNr {
		framework.Failf("Timeout reached waiting for all Pods to be observed by the watch.")
	}

	// Analyze results
	var (
		firstCreate metav1.Time
		lastRunning metav1.Time
		init        = true
		e2eLags     = make([]framework.PodLatencyData, 0)
	)

	for name, create := range createTimes {
		watch, ok := watchTimes[name]
		Expect(ok).To(Equal(true))

		e2eLags = append(e2eLags,
			framework.PodLatencyData{Name: name, Latency: watch.Time.Sub(create.Time)})

		if !init {
			if firstCreate.Time.After(create.Time) {
				firstCreate = create
			}
			if lastRunning.Time.Before(watch.Time) {
				lastRunning = watch
			}
		} else {
			init = false
			firstCreate, lastRunning = create, watch
		}
	}

	sort.Sort(framework.LatencySlice(e2eLags))
	batchLag := lastRunning.Time.Sub(firstCreate.Time)

	// Log time series data.
	if isLogTimeSeries {
		logDensityTimeSeries(rc, createTimes, watchTimes, testInfo)
	}
	// Log throughput data.
	logPodCreateThroughput(batchLag, e2eLags, testArg.podsNr, testInfo)

	return batchLag, e2eLags
}
コード例 #8
0
// runEvictionTest sets up a testing environment given the provided nodes, and checks a few things:
//		It ensures that the desired testCondition is actually triggered.
//		It ensures that evictionPriority 0 pods are not evicted
//		It ensures that lower evictionPriority pods are always evicted before higher evictionPriority pods (2 evicted before 1, etc.)
//		It ensures that all lower evictionPriority pods are eventually evicted.
// runEvictionTest then cleans up the testing environment by deleting provided nodes, and ensures that testCondition no longer exists
func runEvictionTest(f *framework.Framework, testCondition string, podTestSpecs []podTestSpec,
	evictionTestTimeout time.Duration, hasPressureCondition func(*framework.Framework, string) (bool, error)) {

	Context(fmt.Sprintf("when we run containers that should cause %s", testCondition), func() {

		BeforeEach(func() {
			By("seting up pods to be used by tests")
			for _, spec := range podTestSpecs {
				By(fmt.Sprintf("creating pod with container: %s", spec.pod.Name))
				f.PodClient().CreateSync(&spec.pod)
			}
		})

		It(fmt.Sprintf("should eventually see %s, and then evict all of the correct pods", testCondition), func() {
			Eventually(func() error {
				hasPressure, err := hasPressureCondition(f, testCondition)
				framework.ExpectNoError(err, fmt.Sprintf("checking if we have %s", testCondition))
				if hasPressure {
					return nil
				}
				return fmt.Errorf("Condition: %s not encountered", testCondition)
			}, evictionTestTimeout, evictionPollInterval).Should(BeNil())

			Eventually(func() error {
				// Gather current information
				updatedPodList, err := f.ClientSet.Core().Pods(f.Namespace.Name).List(v1.ListOptions{})
				updatedPods := updatedPodList.Items
				for _, p := range updatedPods {
					framework.Logf("fetching pod %s; phase= %v", p.Name, p.Status.Phase)
				}
				_, err = hasPressureCondition(f, testCondition)
				framework.ExpectNoError(err, fmt.Sprintf("checking if we have %s", testCondition))

				By("checking eviction ordering and ensuring important pods dont fail")
				done := true
				for _, priorityPodSpec := range podTestSpecs {
					var priorityPod v1.Pod
					for _, p := range updatedPods {
						if p.Name == priorityPodSpec.pod.Name {
							priorityPod = p
						}
					}
					Expect(priorityPod).NotTo(BeNil())

					// Check eviction ordering.
					// Note: it is alright for a priority 1 and priority 2 pod (for example) to fail in the same round
					for _, lowPriorityPodSpec := range podTestSpecs {
						var lowPriorityPod v1.Pod
						for _, p := range updatedPods {
							if p.Name == lowPriorityPodSpec.pod.Name {
								lowPriorityPod = p
							}
						}
						Expect(lowPriorityPod).NotTo(BeNil())
						if priorityPodSpec.evictionPriority < lowPriorityPodSpec.evictionPriority && lowPriorityPod.Status.Phase == v1.PodRunning {
							Expect(priorityPod.Status.Phase).NotTo(Equal(v1.PodFailed),
								fmt.Sprintf("%s pod failed before %s pod", priorityPodSpec.pod.Name, lowPriorityPodSpec.pod.Name))
						}
					}

					// EvictionPriority 0 pods should not fail
					if priorityPodSpec.evictionPriority == 0 {
						Expect(priorityPod.Status.Phase).NotTo(Equal(v1.PodFailed),
							fmt.Sprintf("%s pod failed (and shouldn't have failed)", priorityPod.Name))
					}

					// If a pod that is not evictionPriority 0 has not been evicted, we are not done
					if priorityPodSpec.evictionPriority != 0 && priorityPod.Status.Phase != v1.PodFailed {
						done = false
					}
				}
				if done {
					return nil
				}
				return fmt.Errorf("pods that caused %s have not been evicted.", testCondition)
			}, evictionTestTimeout, evictionPollInterval).Should(BeNil())

			// We observe pressure from the API server.  The eviction manager observes pressure from the kubelet internal stats.
			// This means the eviction manager will observe pressure before we will, creating a delay between when the eviction manager
			// evicts a pod, and when we observe the pressure by querrying the API server.  Add a delay here to account for this delay
			By("making sure pressure from test has surfaced before continuing")
			time.Sleep(pressureDelay)

			By("making sure conditions eventually return to normal")
			Eventually(func() error {
				hasPressure, err := hasPressureCondition(f, testCondition)
				framework.ExpectNoError(err, fmt.Sprintf("checking if we have %s", testCondition))
				if hasPressure {
					return fmt.Errorf("Conditions havent returned to normal, we still have %s", testCondition)
				}
				return nil
			}, evictionTestTimeout, evictionPollInterval).Should(BeNil())

			By("making sure conditions do not return")
			Consistently(func() error {
				hasPressure, err := hasPressureCondition(f, testCondition)
				framework.ExpectNoError(err, fmt.Sprintf("checking if we have %s", testCondition))
				if hasPressure {
					return fmt.Errorf("%s dissappeared and then reappeared", testCondition)
				}
				return nil
			}, postTestConditionMonitoringPeriod, evictionPollInterval).Should(BeNil())

			By("making sure we can start a new pod after the test")
			podName := "test-admit-pod"
			f.PodClient().CreateSync(&v1.Pod{
				ObjectMeta: v1.ObjectMeta{
					Name: podName,
				},
				Spec: v1.PodSpec{
					RestartPolicy: v1.RestartPolicyNever,
					Containers: []v1.Container{
						{
							Image: framework.GetPauseImageNameForHostArch(),
							Name:  podName,
						},
					},
				},
			})
		})

		AfterEach(func() {
			By("deleting pods")
			for _, spec := range podTestSpecs {
				By(fmt.Sprintf("deleting pod: %s", spec.pod.Name))
				f.PodClient().DeleteSync(spec.pod.Name, &v1.DeleteOptions{}, podDisappearTimeout)
			}

			if CurrentGinkgoTestDescription().Failed {
				if framework.TestContext.DumpLogsOnFailure {
					logPodEvents(f)
					logNodeEvents(f)
				}
				By("sleeping to allow for cleanup of test")
				time.Sleep(postTestConditionMonitoringPeriod)
			}
		})
	})
}