Esempio n. 1
0
// testHostIP tests that a pod gets a host IP
func testHostIP(c *client.Client, ns string, pod *api.Pod) {
	podClient := c.Pods(ns)
	By("creating pod")
	defer podClient.Delete(pod.Name, api.NewDeleteOptions(0))
	if _, err := podClient.Create(pod); err != nil {
		Failf("Failed to create pod: %v", err)
	}
	By("ensuring that pod is running and has a hostIP")
	// Wait for the pods to enter the running state. Waiting loops until the pods
	// are running so non-running pods cause a timeout for this test.
	err := waitForPodRunningInNamespace(c, pod.Name, ns)
	Expect(err).NotTo(HaveOccurred())
	// Try to make sure we get a hostIP for each pod.
	hostIPTimeout := 2 * time.Minute
	t := time.Now()
	for {
		p, err := podClient.Get(pod.Name)
		Expect(err).NotTo(HaveOccurred())
		if p.Status.HostIP != "" {
			Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP)
			break
		}
		if time.Since(t) >= hostIPTimeout {
			Failf("Gave up waiting for hostIP of pod %s after %v seconds",
				p.Name, time.Since(t).Seconds())
		}
		Logf("Retrying to get the hostIP of pod %s", p.Name)
		time.Sleep(5 * time.Second)
	}
}
Esempio n. 2
0
// StartPods check for numPods in TestNS. If they exist, it no-ops, otherwise it starts up
// a temp rc, scales it to match numPods, then deletes the rc leaving behind the pods.
func StartPods(numPods int, host string, restClient *client.Client) error {
	start := time.Now()
	defer func() {
		glog.Infof("StartPods took %v with numPods %d", time.Since(start), numPods)
	}()
	hostField := fields.OneTermEqualSelector(client.PodHost, host)
	pods, err := restClient.Pods(TestNS).List(labels.Everything(), hostField)
	if err != nil || len(pods.Items) == numPods {
		return err
	}
	glog.Infof("Found %d pods that match host %v, require %d", len(pods.Items), hostField, numPods)
	// For the sake of simplicity, assume all pods in TestNS have selectors matching TestRCManifest.
	controller := RCFromManifest(TestRCManifest)

	// Make the rc unique to the given host.
	controller.Spec.Replicas = numPods
	controller.Spec.Template.Spec.NodeName = host
	controller.Name = controller.Name + host
	controller.Spec.Selector["host"] = host
	controller.Spec.Template.Labels["host"] = host

	if rc, err := StartRC(controller, restClient); err != nil {
		return err
	} else {
		// Delete the rc, otherwise when we restart master components for the next benchmark
		// the rc controller will race with the pods controller in the rc manager.
		return restClient.ReplicationControllers(TestNS).Delete(rc.Name)
	}
}
Esempio n. 3
0
func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error) {
	expectedPods := []string{}
	// Iterate over the labels that identify the replication controllers that we
	// want to check. The rcLabels contains the value values for the k8s-app key
	// that identify the replication controllers that we want to check. Using a label
	// rather than an explicit name is preferred because the names will typically have
	// a version suffix e.g. heapster-monitoring-v1 and this will change after a rolling
	// update e.g. to heapster-monitoring-v2. By using a label query we can check for the
	// situaiton when a heapster-monitoring-v1 and heapster-monitoring-v2 replication controller
	// is running (which would be an error except during a rolling update).
	for _, rcLabel := range rcLabels {
		rcList, err := c.ReplicationControllers(api.NamespaceSystem).List(labels.Set{"k8s-app": rcLabel}.AsSelector())
		if err != nil {
			return nil, err
		}
		if len(rcList.Items) != 1 {
			return nil, fmt.Errorf("expected to find one replica for RC with label %s but got %d",
				rcLabel, len(rcList.Items))
		}
		for _, rc := range rcList.Items {
			podList, err := c.Pods(api.NamespaceSystem).List(labels.Set(rc.Spec.Selector).AsSelector(), fields.Everything())
			if err != nil {
				return nil, err
			}
			for _, pod := range podList.Items {
				if pod.DeletionTimestamp != nil {
					continue
				}
				expectedPods = append(expectedPods, string(pod.UID))
			}
		}
	}
	return expectedPods, nil
}
Esempio n. 4
0
func podsCreated(c *client.Client, ns, name string, replicas int) (*api.PodList, error) {
	timeout := 2 * time.Minute
	// List the pods, making sure we observe all the replicas.
	label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
	for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
		pods, err := c.Pods(ns).List(label, fields.Everything())
		if err != nil {
			return nil, err
		}

		created := []api.Pod{}
		for _, pod := range pods.Items {
			if pod.DeletionTimestamp != nil {
				continue
			}
			created = append(created, pod)
		}
		Logf("Pod name %s: Found %d pods out of %d", name, len(created), replicas)

		if len(created) == replicas {
			pods.Items = created
			return pods, nil
		}
	}
	return nil, fmt.Errorf("Pod name %s: Gave up waiting %v for %d pods to come up", name, timeout, replicas)
}
Esempio n. 5
0
func waitForPodRunning(c *client.Client, pod *api.Pod, out io.Writer) (status api.PodPhase, err error) {
	for {
		pod, err := c.Pods(pod.Namespace).Get(pod.Name)
		if err != nil {
			return api.PodUnknown, err
		}
		ready := false
		if pod.Status.Phase == api.PodRunning {
			ready = true
			for _, status := range pod.Status.ContainerStatuses {
				if !status.Ready {
					ready = false
					break
				}
			}
			if ready {
				return api.PodRunning, nil
			}
		}
		if pod.Status.Phase == api.PodSucceeded || pod.Status.Phase == api.PodFailed {
			return pod.Status.Phase, nil
		}
		fmt.Fprintf(out, "Waiting for pod %s/%s to be running, status is %s, pod ready: %v\n", pod.Namespace, pod.Name, pod.Status.Phase, ready)
		time.Sleep(2 * time.Second)
		continue
	}
}
Esempio n. 6
0
func runSchedulerNoPhantomPodsTest(client *client.Client) {
	pod := &api.Pod{
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:  "c1",
					Image: "kubernetes/pause",
					Ports: []api.ContainerPort{
						{ContainerPort: 1234, HostPort: 9999},
					},
					ImagePullPolicy: api.PullIfNotPresent,
				},
			},
		},
	}

	// Assuming we only have two kublets, the third pod here won't schedule
	// if the scheduler doesn't correctly handle the delete for the second
	// pod.
	pod.ObjectMeta.Name = "phantom.foo"
	foo, err := client.Pods(api.NamespaceDefault).Create(pod)
	if err != nil {
		glog.Fatalf("Failed to create pod: %v, %v", pod, err)
	}
	if err := wait.Poll(time.Second, time.Second*30, podRunning(client, foo.Namespace, foo.Name)); err != nil {
		glog.Fatalf("FAILED: pod never started running %v", err)
	}

	pod.ObjectMeta.Name = "phantom.bar"
	bar, err := client.Pods(api.NamespaceDefault).Create(pod)
	if err != nil {
		glog.Fatalf("Failed to create pod: %v, %v", pod, err)
	}
	if err := wait.Poll(time.Second, time.Second*30, podRunning(client, bar.Namespace, bar.Name)); err != nil {
		glog.Fatalf("FAILED: pod never started running %v", err)
	}

	// Delete a pod to free up room.
	glog.Infof("Deleting pod %v", bar.Name)
	err = client.Pods(api.NamespaceDefault).Delete(bar.Name, api.NewDeleteOptions(0))
	if err != nil {
		glog.Fatalf("FAILED: couldn't delete pod %q: %v", bar.Name, err)
	}

	pod.ObjectMeta.Name = "phantom.baz"
	baz, err := client.Pods(api.NamespaceDefault).Create(pod)
	if err != nil {
		glog.Fatalf("Failed to create pod: %v, %v", pod, err)
	}
	if err := wait.Poll(time.Second, time.Second*60, podRunning(client, baz.Namespace, baz.Name)); err != nil {
		if pod, perr := client.Pods(api.NamespaceDefault).Get("phantom.bar"); perr == nil {
			glog.Fatalf("FAILED: 'phantom.bar' was never deleted: %#v", pod)
		} else {
			glog.Fatalf("FAILED: (Scheduler probably didn't process deletion of 'phantom.bar') Pod never started running: %v", err)
		}
	}

	glog.Info("Scheduler doesn't make phantom pods: test passed.")
}
Esempio n. 7
0
func newPodOnNode(c *client.Client, namespace, podName, nodeName string) error {
	pod, err := c.Pods(namespace).Create(podOnNode(podName, nodeName, serveHostnameImage))
	if err == nil {
		Logf("Created pod %s on node %s", pod.ObjectMeta.Name, nodeName)
	} else {
		Logf("Failed to create pod %s on node %s: %v", podName, nodeName, err)
	}
	return err
}
Esempio n. 8
0
func cleanupPods(c *client.Client, ns string) {
	By("Removing all pods in namespace " + ns)
	pods, err := c.Pods(ns).List(labels.Everything(), fields.Everything())
	expectNoError(err)
	opt := api.NewDeleteOptions(0)
	for _, p := range pods.Items {
		expectNoError(c.Pods(ns).Delete(p.ObjectMeta.Name, opt))
	}
}
Esempio n. 9
0
func translatePodNameToUIDOrFail(c *client.Client, ns string, expectedEndpoints PortsByPodName) PortsByPodUID {
	portsByUID := make(PortsByPodUID)

	for name, portList := range expectedEndpoints {
		pod, err := c.Pods(ns).Get(name)
		if err != nil {
			Failf("failed to get pod %s, that's pretty weird. validation failed: %s", name, err)
		}
		portsByUID[pod.ObjectMeta.UID] = portList
	}
	// Logf("successfully translated pod names to UIDs: %v -> %v on namespace %s", expectedEndpoints, portsByUID, ns)
	return portsByUID
}
Esempio n. 10
0
// getContainerRestarts returns the count of container restarts across all pods matching the given labelSelector,
// and a list of nodenames across which these containers restarted.
func getContainerRestarts(c *client.Client, ns string, labelSelector labels.Selector) (int, []string) {
	pods, err := c.Pods(ns).List(labelSelector, fields.Everything())
	expectNoError(err)
	failedContainers := 0
	containerRestartNodes := sets.NewString()
	for _, p := range pods.Items {
		for _, v := range FailedContainers(&p) {
			failedContainers = failedContainers + v.restarts
			containerRestartNodes.Insert(p.Spec.NodeName)
		}
	}
	return failedContainers, containerRestartNodes.List()
}
Esempio n. 11
0
func doServiceAccountAPIRequests(t *testing.T, c *client.Client, ns string, authenticated bool, canRead bool, canWrite bool) {
	testSecret := &api.Secret{
		ObjectMeta: api.ObjectMeta{Name: "testSecret"},
		Data:       map[string][]byte{"test": []byte("data")},
	}

	readOps := []testOperation{
		func() error { _, err := c.Secrets(ns).List(labels.Everything(), fields.Everything()); return err },
		func() error { _, err := c.Pods(ns).List(labels.Everything(), fields.Everything()); return err },
	}
	writeOps := []testOperation{
		func() error { _, err := c.Secrets(ns).Create(testSecret); return err },
		func() error { return c.Secrets(ns).Delete(testSecret.Name) },
	}

	for _, op := range readOps {
		err := op()
		unauthorizedError := errors.IsUnauthorized(err)
		forbiddenError := errors.IsForbidden(err)

		switch {
		case !authenticated && !unauthorizedError:
			t.Fatalf("expected unauthorized error, got %v", err)
		case authenticated && unauthorizedError:
			t.Fatalf("unexpected unauthorized error: %v", err)
		case authenticated && canRead && forbiddenError:
			t.Fatalf("unexpected forbidden error: %v", err)
		case authenticated && !canRead && !forbiddenError:
			t.Fatalf("expected forbidden error, got: %v", err)
		}
	}

	for _, op := range writeOps {
		err := op()
		unauthorizedError := errors.IsUnauthorized(err)
		forbiddenError := errors.IsForbidden(err)

		switch {
		case !authenticated && !unauthorizedError:
			t.Fatalf("expected unauthorized error, got %v", err)
		case authenticated && unauthorizedError:
			t.Fatalf("unexpected unauthorized error: %v", err)
		case authenticated && canWrite && forbiddenError:
			t.Fatalf("unexpected forbidden error: %v", err)
		case authenticated && !canWrite && !forbiddenError:
			t.Fatalf("expected forbidden error, got: %v", err)
		}
	}
}
Esempio n. 12
0
func handleAttachReplicationController(c *client.Client, controller *api.ReplicationController, opts *AttachOptions) error {
	var pods *api.PodList
	for pods == nil || len(pods.Items) == 0 {
		var err error
		if pods, err = c.Pods(controller.Namespace).List(labels.SelectorFromSet(controller.Spec.Selector), fields.Everything()); err != nil {
			return err
		}
		if len(pods.Items) == 0 {
			fmt.Fprint(opts.Out, "Waiting for pod to be scheduled\n")
			time.Sleep(2 * time.Second)
		}
	}
	pod := &pods.Items[0]
	return handleAttachPod(c, pod, opts)
}
Esempio n. 13
0
// Wait for all pods to become Running.  Only use when pods will run for a long time, or it will be racy.
func waitForAllPodsRunning(c *client.Client, ns, jobName string, parallelism int) error {
	label := labels.SelectorFromSet(labels.Set(map[string]string{jobSelectorKey: jobName}))
	return wait.Poll(poll, jobTimeout, func() (bool, error) {
		pods, err := c.Pods(ns).List(label, fields.Everything())
		if err != nil {
			return false, err
		}
		count := 0
		for _, p := range pods.Items {
			if p.Status.Phase == api.PodRunning {
				count++
			}
		}
		return count == parallelism, nil
	})
}
Esempio n. 14
0
func podScheduled(c *client.Client, podNamespace, podName string) wait.ConditionFunc {
	return func() (bool, error) {
		pod, err := c.Pods(podNamespace).Get(podName)
		if errors.IsNotFound(err) {
			return false, nil
		}
		if err != nil {
			// This could be a connection error so we want to retry.
			return false, nil
		}
		if pod.Spec.NodeName == "" {
			return false, nil
		}
		return true, nil
	}
}
Esempio n. 15
0
func runLivenessTest(c *client.Client, ns string, podDescr *api.Pod, expectNumRestarts int) {
	By(fmt.Sprintf("Creating pod %s in namespace %s", podDescr.Name, ns))
	_, err := c.Pods(ns).Create(podDescr)
	expectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name))

	// At the end of the test, clean up by removing the pod.
	defer func() {
		By("deleting the pod")
		c.Pods(ns).Delete(podDescr.Name, api.NewDeleteOptions(0))
	}()

	// Wait until the pod is not pending. (Here we need to check for something other than
	// 'Pending' other than checking for 'Running', since when failures occur, we go to
	// 'Terminated' which can cause indefinite blocking.)
	expectNoError(waitForPodNotPending(c, ns, podDescr.Name),
		fmt.Sprintf("starting pod %s in namespace %s", podDescr.Name, ns))
	By(fmt.Sprintf("Started pod %s in namespace %s", podDescr.Name, ns))

	// Check the pod's current state and verify that restartCount is present.
	By("checking the pod's current state and verifying that restartCount is present")
	pod, err := c.Pods(ns).Get(podDescr.Name)
	expectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", podDescr.Name, ns))
	initialRestartCount := api.GetExistingContainerStatus(pod.Status.ContainerStatuses, "liveness").RestartCount
	By(fmt.Sprintf("Initial restart count of pod %s is %d", podDescr.Name, initialRestartCount))

	// Wait for the restart state to be as desired.
	deadline := time.Now().Add(2 * time.Minute)
	lastRestartCount := initialRestartCount
	observedRestarts := 0
	for start := time.Now(); time.Now().Before(deadline); time.Sleep(2 * time.Second) {
		pod, err = c.Pods(ns).Get(podDescr.Name)
		expectNoError(err, fmt.Sprintf("getting pod %s", podDescr.Name))
		restartCount := api.GetExistingContainerStatus(pod.Status.ContainerStatuses, "liveness").RestartCount
		if restartCount != lastRestartCount {
			By(fmt.Sprintf("Restart count of pod %s/%s is now %d (%v elapsed)",
				ns, podDescr.Name, restartCount, time.Since(start)))
			if restartCount < lastRestartCount {
				Failf("Restart count should increment monotonically: restart cont of pod %s/%s changed from %d to %d",
					ns, podDescr.Name, lastRestartCount, restartCount)
			}
		}
		observedRestarts = restartCount - initialRestartCount
		if expectNumRestarts > 0 && observedRestarts >= expectNumRestarts {
			// Stop if we have observed more than expectNumRestarts restarts.
			break
		}
		lastRestartCount = restartCount
	}

	// If we expected 0 restarts, fail if observed any restart.
	// If we expected n restarts (n > 0), fail if we observed < n restarts.
	if (expectNumRestarts == 0 && observedRestarts > 0) || (expectNumRestarts > 0 &&
		observedRestarts < expectNumRestarts) {
		Failf("pod %s/%s - expected number of restarts: %t, found restarts: %t",
			ns, podDescr.Name, expectNumRestarts, observedRestarts)
	}
}
Esempio n. 16
0
// kubectlLogLBController logs kubectl debug output for the L7 controller pod.
func kubectlLogLBController(c *client.Client, ns string) {
	selector := labels.SelectorFromSet(labels.Set(controllerLabels))
	podList, err := c.Pods(api.NamespaceAll).List(selector, fields.Everything())
	if err != nil {
		Logf("Cannot log L7 controller output, error listing pods %v", err)
		return
	}
	if len(podList.Items) == 0 {
		Logf("Loadbalancer controller pod not found")
		return
	}
	for _, p := range podList.Items {
		Logf("\nLast 100 log lines of %v\n", p.Name)
		l, _ := runKubectl("logs", p.Name, fmt.Sprintf("--namespace=%v", ns), "-c", lbContainerName, "--tail=100")
		Logf(l)
	}
}
Esempio n. 17
0
func podRunning(c *client.Client, podNamespace string, podName string) wait.ConditionFunc {
	return func() (bool, error) {
		pod, err := c.Pods(podNamespace).Get(podName)
		if apierrors.IsNotFound(err) {
			return false, nil
		}
		if err != nil {
			// This could be a connection error so we want to retry, but log the error.
			glog.Errorf("Error when reading pod %q: %v", podName, err)
			return false, nil
		}
		if pod.Status.Phase != api.PodRunning {
			return false, nil
		}
		return true, nil
	}
}
Esempio n. 18
0
func verifyResult(c *client.Client, podName string, ns string, oldNotRunning int) {
	allPods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
	expectNoError(err)
	_, notRunningPods := getPodsNumbers(allPods)

	schedEvents, err := c.Events(ns).List(
		labels.Everything(),
		fields.Set{
			"involvedObject.kind":      "Pod",
			"involvedObject.name":      podName,
			"involvedObject.namespace": ns,
			"source":                   "scheduler",
			"reason":                   "FailedScheduling",
		}.AsSelector())
	expectNoError(err)
	// If we failed to find event with a capitalized first letter of reason
	// try looking for one starting with a small one for backward compatibility.
	// If we don't do it we end up in #15806.
	// TODO: remove this block when we don't care about supporting v1.0 too much.
	if len(schedEvents.Items) == 0 {
		schedEvents, err = c.Events(ns).List(
			labels.Everything(),
			fields.Set{
				"involvedObject.kind":      "Pod",
				"involvedObject.name":      podName,
				"involvedObject.namespace": ns,
				"source":                   "scheduler",
				"reason":                   "failedScheduling",
			}.AsSelector())
		expectNoError(err)
	}

	printed := false
	printOnce := func(msg string) string {
		if !printed {
			printed = true
			return msg
		} else {
			return ""
		}
	}

	Expect(notRunningPods).To(Equal(1+oldNotRunning), printOnce(fmt.Sprintf("Pods found in the cluster: %#v", allPods)))
	Expect(schedEvents.Items).ToNot(BeEmpty(), printOnce(fmt.Sprintf("Pods found in the cluster: %#v", allPods)))
}
Esempio n. 19
0
func forEachPod(c *client.Client, ns, selectorKey, selectorValue string, fn func(api.Pod)) {
	var pods *api.PodList
	var err error
	for t := time.Now(); time.Since(t) < podListTimeout; time.Sleep(poll) {
		pods, err = c.Pods(ns).List(labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue})), fields.Everything())
		Expect(err).NotTo(HaveOccurred())
		if len(pods.Items) > 0 {
			break
		}
	}
	if pods == nil || len(pods.Items) == 0 {
		Failf("No pods found")
	}
	for _, pod := range pods.Items {
		err = waitForPodRunningInNamespace(c, pod.Name, ns)
		Expect(err).NotTo(HaveOccurred())
		fn(pod)
	}
}
Esempio n. 20
0
// Clean both server and client pods.
func volumeTestCleanup(client *client.Client, config VolumeTestConfig) {
	By(fmt.Sprint("cleaning the environment after ", config.prefix))

	defer GinkgoRecover()

	podClient := client.Pods(config.namespace)

	// ignore all errors, the pods may not be even created
	podClient.Delete(config.prefix+"-client", nil)
	podClient.Delete(config.prefix+"-server", nil)
}
Esempio n. 21
0
func createPodOrFail(c *client.Client, ns, name string, labels map[string]string, containerPorts []api.ContainerPort) {
	By(fmt.Sprintf("creating pod %s in namespace %s", name, ns))
	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name:   name,
			Labels: labels,
		},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:  "test",
					Image: "gcr.io/google_containers/pause",
					Ports: containerPorts,
				},
			},
		},
	}
	_, err := c.Pods(ns).Create(pod)
	Expect(err).NotTo(HaveOccurred())
}
Esempio n. 22
0
// podsOnMinions returns true when all of the selected pods exist on a minion.
func podsOnMinions(c *client.Client, podNamespace string, labelSelector labels.Selector) wait.ConditionFunc {
	// Wait until all pods are running on the node.
	return func() (bool, error) {
		pods, err := c.Pods(podNamespace).List(labelSelector, fields.Everything())
		if err != nil {
			glog.Infof("Unable to get pods to list: %v", err)
			return false, nil
		}
		for i := range pods.Items {
			pod := pods.Items[i]
			podString := fmt.Sprintf("%q/%q", pod.Namespace, pod.Name)
			glog.Infof("Check whether pod %q exists on node %q", podString, pod.Spec.NodeName)
			if len(pod.Spec.NodeName) == 0 {
				glog.Infof("Pod %q is not bound to a host yet", podString)
				return false, nil
			}
			if pod.Status.Phase != api.PodRunning {
				return false, nil
			}
		}
		return true, nil
	}
}
Esempio n. 23
0
func validateEndpointsOrFail(c *client.Client, namespace, serviceName string, expectedEndpoints PortsByPodName) {
	By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to expose endpoints %v", serviceStartTimeout, serviceName, namespace, expectedEndpoints))
	i := 1
	for start := time.Now(); time.Since(start) < serviceStartTimeout; time.Sleep(1 * time.Second) {
		endpoints, err := c.Endpoints(namespace).Get(serviceName)
		if err != nil {
			Logf("Get endpoints failed (%v elapsed, ignoring for 5s): %v", time.Since(start), err)
			continue
		}
		// Logf("Found endpoints %v", endpoints)

		portsByPodUID := getContainerPortsByPodUID(endpoints)
		// Logf("Found port by pod UID %v", portsByPodUID)

		expectedPortsByPodUID := translatePodNameToUIDOrFail(c, namespace, expectedEndpoints)
		if len(portsByPodUID) == len(expectedEndpoints) {
			validatePortsOrFail(portsByPodUID, expectedPortsByPodUID)
			Logf("successfully validated that service %s in namespace %s exposes endpoints %v (%v elapsed)",
				serviceName, namespace, expectedEndpoints, time.Since(start))
			return
		}

		if i%5 == 0 {
			Logf("Unexpected endpoints: found %v, expected %v (%v elapsed, will retry)", portsByPodUID, expectedEndpoints, time.Since(start))
		}
		i++
	}

	if pods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything()); err == nil {
		for _, pod := range pods.Items {
			Logf("Pod %s\t%s\t%s\t%s", pod.Namespace, pod.Name, pod.Spec.NodeName, pod.DeletionTimestamp)
		}
	} else {
		Logf("Can't list pod debug info: %v", err)
	}
	Failf("Timed out waiting for service %s in namespace %s to expose endpoints %v (%v elapsed)", serviceName, namespace, expectedEndpoints, serviceStartTimeout)
}
Esempio n. 24
0
func createRunningPod(wg *sync.WaitGroup, c *client.Client, name, ns, image string, labels map[string]string) {
	defer GinkgoRecover()
	defer wg.Done()
	pod := &api.Pod{
		TypeMeta: unversioned.TypeMeta{
			Kind: "Pod",
		},
		ObjectMeta: api.ObjectMeta{
			Name:   name,
			Labels: labels,
		},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:  name,
					Image: image,
				},
			},
		},
	}
	_, err := c.Pods(ns).Create(pod)
	expectNoError(err)
	expectNoError(waitForPodRunningInNamespace(c, name, ns))
}
Esempio n. 25
0
// Simplified version of RunRC, that does not create RC, but creates plain Pods and
// requires passing whole Pod definition, which is needed to test various Scheduler predicates.
func startPods(c *client.Client, replicas int, ns string, podNamePrefix string, pod api.Pod) {
	allPods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
	expectNoError(err)
	podsRunningBefore, _ := getPodsNumbers(allPods)

	for i := 0; i < replicas; i++ {
		podName := fmt.Sprintf("%v-%v", podNamePrefix, i)
		pod.ObjectMeta.Name = podName
		pod.ObjectMeta.Labels["name"] = podName
		pod.Spec.Containers[0].Name = podName
		_, err = c.Pods(ns).Create(&pod)
		expectNoError(err)
	}

	// Wait for pods to start running.  Note: this is a functional
	// test, not a performance test, so the timeout needs to be
	// sufficiently long that it's only triggered if things are
	// completely broken vs. running slowly.
	timeout := 10 * time.Minute
	startTime := time.Now()
	currentlyRunningPods := 0
	for podsRunningBefore+replicas != currentlyRunningPods {
		allPods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
		expectNoError(err)
		runningPods := 0
		for _, pod := range allPods.Items {
			if pod.Status.Phase == api.PodRunning {
				runningPods += 1
			}
		}
		currentlyRunningPods = runningPods
		Logf("%v pods running", currentlyRunningPods)
		if startTime.Add(timeout).Before(time.Now()) {
			Logf("Timed out after %v waiting for pods to start running.", timeout)
			break
		}
		time.Sleep(5 * time.Second)
	}
	Expect(currentlyRunningPods).To(Equal(podsRunningBefore + replicas))
}
Esempio n. 26
0
func testPreStop(c *client.Client, ns string) {
	// This is the server that will receive the preStop notification
	podDescr := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name: "server",
		},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:  "server",
					Image: "gcr.io/google_containers/nettest:1.6",
					Ports: []api.ContainerPort{{ContainerPort: 8080}},
				},
			},
		},
	}
	By(fmt.Sprintf("Creating server pod %s in namespace %s", podDescr.Name, ns))
	_, err := c.Pods(ns).Create(podDescr)
	expectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name))

	// At the end of the test, clean up by removing the pod.
	defer func() {
		By("Deleting the server pod")
		c.Pods(ns).Delete(podDescr.Name, nil)
	}()

	By("Waiting for pods to come up.")
	err = waitForPodRunningInNamespace(c, podDescr.Name, ns)
	expectNoError(err, "waiting for server pod to start")

	val := "{\"Source\": \"prestop\"}"

	podOut, err := c.Pods(ns).Get(podDescr.Name)
	expectNoError(err, "getting pod info")

	preStopDescr := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name: "tester",
		},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:    "tester",
					Image:   "gcr.io/google_containers/busybox",
					Command: []string{"sleep", "600"},
					Lifecycle: &api.Lifecycle{
						PreStop: &api.Handler{
							Exec: &api.ExecAction{
								Command: []string{
									"wget", "-O-", "--post-data=" + val, fmt.Sprintf("http://%s:8080/write", podOut.Status.PodIP),
								},
							},
						},
					},
				},
			},
		},
	}

	By(fmt.Sprintf("Creating tester pod %s in namespace %s", podDescr.Name, ns))
	_, err = c.Pods(ns).Create(preStopDescr)
	expectNoError(err, fmt.Sprintf("creating pod %s", preStopDescr.Name))
	deletePreStop := true

	// At the end of the test, clean up by removing the pod.
	defer func() {
		if deletePreStop {
			By("Deleting the tester pod")
			c.Pods(ns).Delete(preStopDescr.Name, nil)
		}
	}()

	err = waitForPodRunningInNamespace(c, preStopDescr.Name, ns)
	expectNoError(err, "waiting for tester pod to start")

	// Delete the pod with the preStop handler.
	By("Deleting pre-stop pod")
	if err := c.Pods(ns).Delete(preStopDescr.Name, nil); err == nil {
		deletePreStop = false
	}
	expectNoError(err, fmt.Sprintf("deleting pod: %s", preStopDescr.Name))

	// Validate that the server received the web poke.
	err = wait.Poll(time.Second*5, time.Second*60, func() (bool, error) {
		if body, err := c.Get().
			Namespace(ns).Prefix("proxy").
			Resource("pods").
			Name(podDescr.Name).
			Suffix("read").
			DoRaw(); err != nil {
			By(fmt.Sprintf("Error validating prestop: %v", err))
		} else {
			Logf("Saw: %s", string(body))
			state := State{}
			err := json.Unmarshal(body, &state)
			if err != nil {
				Logf("Error parsing: %v", err)
				return false, nil
			}
			if state.Received["prestop"] != 0 {
				return true, nil
			}
		}
		return false, nil
	})
	expectNoError(err, "validating pre-stop.")
}
Esempio n. 27
0
	})

	// This test verifies that max-pods flag works as advertised. It assumes that cluster add-on pods stay stable
	// and cannot be run in parallel with any other test that touches Nodes or Pods. It is so because to check
	// if max-pods is working we need to fully saturate the cluster and keep it in this state for few seconds.
	It("validates MaxPods limit number of pods that are allowed to run", func() {
		totalPodCapacity = 0

		for _, node := range nodeList.Items {
			podCapacity, found := node.Status.Capacity["pods"]
			Expect(found).To(Equal(true))
			totalPodCapacity += podCapacity.Value()
			Logf("Node: %v", node)
		}

		allPods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
		expectNoError(err)
		currentlyRunningPods, currentlyDeadPods := getPodsNumbers(allPods)
		podsNeededForSaturation := int(totalPodCapacity) - currentlyRunningPods

		By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster max pods and trying to start another one", podsNeededForSaturation))

		startPods(c, podsNeededForSaturation, ns, "maxp", api.Pod{
			TypeMeta: unversioned.TypeMeta{
				Kind: "Pod",
			},
			ObjectMeta: api.ObjectMeta{
				Name:   "",
				Labels: map[string]string{"name": ""},
			},
			Spec: api.PodSpec{
Esempio n. 28
0
func deletePodOrFail(c *client.Client, ns, name string) {
	By(fmt.Sprintf("deleting pod %s in namespace %s", name, ns))
	err := c.Pods(ns).Delete(name, nil)
	Expect(err).NotTo(HaveOccurred())
}
Esempio n. 29
0
				Selector: labels,
				Ports: []api.ServicePort{{
					Port:       80,
					TargetPort: util.NewIntOrStringFromInt(80),
				}},
			},
		}
		_, err := c.Services(ns).Create(service)
		Expect(err).NotTo(HaveOccurred())

		validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{})

		names := map[string]bool{}
		defer func() {
			for name := range names {
				err := c.Pods(ns).Delete(name, nil)
				Expect(err).NotTo(HaveOccurred())
			}
		}()

		name1 := "pod1"
		name2 := "pod2"

		createPodOrFail(c, ns, name1, labels, []api.ContainerPort{{ContainerPort: 80}})
		names[name1] = true
		validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{name1: {80}})

		createPodOrFail(c, ns, name2, labels, []api.ContainerPort{{ContainerPort: 80}})
		names[name2] = true
		validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{name1: {80}, name2: {80}})
Esempio n. 30
0
		err = c.PersistentVolumeClaims(ns).Delete(pvc.Name)
		Expect(err).NotTo(HaveOccurred())

		// allow the recycler a chance to catch up.  it has to perform NFS scrub, which can be slow in e2e.
		waitForPersistentVolumePhase(api.VolumeAvailable, c, pv.Name, 5*time.Second, 300*time.Second)

		pv, err = c.PersistentVolumes().Get(pv.Name)
		Expect(err).NotTo(HaveOccurred())
		if pv.Spec.ClaimRef != nil {
			Failf("Expected PersistentVolume to be unbound, but found non-nil ClaimRef: %+v", pv)
		}

		// The NFS Server pod we're using contains an index.html file
		// Verify the file was really scrubbed from the volume
		podTemplate := makeCheckPod(ns, serverIP)
		checkpod, err := c.Pods(ns).Create(podTemplate)
		expectNoError(err, "Failed to create checker pod: %v", err)
		err = waitForPodSuccessInNamespace(c, checkpod.Name, checkpod.Spec.Containers[0].Name, checkpod.Namespace)
		Expect(err).NotTo(HaveOccurred())
	})
})

func makePersistentVolume(serverIP string) *api.PersistentVolume {
	return &api.PersistentVolume{
		ObjectMeta: api.ObjectMeta{
			GenerateName: "nfs-",
		},
		Spec: api.PersistentVolumeSpec{
			PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimRecycle,
			Capacity: api.ResourceList{
				api.ResourceName(api.ResourceStorage): resource.MustParse("2Gi"),