コード例 #1
0
var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
	f := framework.NewDefaultFramework("container-lifecycle-hook")
	var podClient *framework.PodClient
	const (
		podCheckInterval     = 1 * time.Second
		podWaitTimeout       = 2 * time.Minute
		postStartWaitTimeout = 2 * time.Minute
		preStopWaitTimeout   = 30 * time.Second
	)
	Context("when create a pod with lifecycle hook", func() {
		BeforeEach(func() {
			podClient = f.PodClient()
		})

		Context("when it is exec hook", func() {
			var file string
			testPodWithExecHook := func(podWithHook *v1.Pod) {
				podCheckHook := getExecHookTestPod("pod-check-hook",
					// Wait until the file is created.
					[]string{"sh", "-c", fmt.Sprintf("while [ ! -e %s ]; do sleep 1; done", file)},
				)
				By("create the pod with lifecycle hook")
				podClient.CreateSync(podWithHook)
				if podWithHook.Spec.Containers[0].Lifecycle.PostStart != nil {
					By("create the hook check pod")
					podClient.Create(podCheckHook)
					By("wait for the hook check pod to success")
					podClient.WaitForSuccess(podCheckHook.Name, postStartWaitTimeout)
				}
				By("delete the pod with lifecycle hook")
				podClient.DeleteSync(podWithHook.Name, v1.NewDeleteOptions(15), podWaitTimeout)
				if podWithHook.Spec.Containers[0].Lifecycle.PreStop != nil {
					By("create the hook check pod")
					podClient.Create(podCheckHook)
					By("wait for the prestop check pod to success")
					podClient.WaitForSuccess(podCheckHook.Name, preStopWaitTimeout)
				}
			}

			BeforeEach(func() {
				file = "/tmp/test-" + string(uuid.NewUUID())
			})

			AfterEach(func() {
				By("cleanup the temporary file created in the test.")
				cleanupPod := getExecHookTestPod("pod-clean-up", []string{"rm", file})
				podClient.Create(cleanupPod)
				podClient.WaitForSuccess(cleanupPod.Name, podWaitTimeout)
			})

			It("should execute poststart exec hook properly [Conformance]", func() {
				podWithHook := getExecHookTestPod("pod-with-poststart-exec-hook",
					// Block forever
					[]string{"tail", "-f", "/dev/null"},
				)
				podWithHook.Spec.Containers[0].Lifecycle = &v1.Lifecycle{
					PostStart: &v1.Handler{
						Exec: &v1.ExecAction{Command: []string{"touch", file}},
					},
				}
				testPodWithExecHook(podWithHook)
			})

			It("should execute prestop exec hook properly [Conformance]", func() {
				podWithHook := getExecHookTestPod("pod-with-prestop-exec-hook",
					// Block forever
					[]string{"tail", "-f", "/dev/null"},
				)
				podWithHook.Spec.Containers[0].Lifecycle = &v1.Lifecycle{
					PreStop: &v1.Handler{
						Exec: &v1.ExecAction{Command: []string{"touch", file}},
					},
				}
				testPodWithExecHook(podWithHook)
			})
		})

		Context("when it is http hook", func() {
			var targetIP string
			podHandleHookRequest := &v1.Pod{
				ObjectMeta: v1.ObjectMeta{
					Name: "pod-handle-http-request",
				},
				Spec: v1.PodSpec{
					Containers: []v1.Container{
						{
							Name:  "pod-handle-http-request",
							Image: "gcr.io/google_containers/netexec:1.7",
							Ports: []v1.ContainerPort{
								{
									ContainerPort: 8080,
									Protocol:      v1.ProtocolTCP,
								},
							},
						},
					},
				},
			}
			BeforeEach(func() {
				By("create the container to handle the HTTPGet hook request.")
				newPod := podClient.CreateSync(podHandleHookRequest)
				targetIP = newPod.Status.PodIP
			})
			testPodWithHttpHook := func(podWithHook *v1.Pod) {
				By("create the pod with lifecycle hook")
				podClient.CreateSync(podWithHook)
				if podWithHook.Spec.Containers[0].Lifecycle.PostStart != nil {
					By("check poststart hook")
					Eventually(func() error {
						return podClient.MatchContainerOutput(podHandleHookRequest.Name, podHandleHookRequest.Spec.Containers[0].Name,
							`GET /echo\?msg=poststart`)
					}, postStartWaitTimeout, podCheckInterval).Should(BeNil())
				}
				By("delete the pod with lifecycle hook")
				podClient.DeleteSync(podWithHook.Name, v1.NewDeleteOptions(15), podWaitTimeout)
				if podWithHook.Spec.Containers[0].Lifecycle.PreStop != nil {
					By("check prestop hook")
					Eventually(func() error {
						return podClient.MatchContainerOutput(podHandleHookRequest.Name, podHandleHookRequest.Spec.Containers[0].Name,
							`GET /echo\?msg=prestop`)
					}, preStopWaitTimeout, podCheckInterval).Should(BeNil())
				}
			}
			It("should execute poststart http hook properly [Conformance]", func() {
				podWithHook := &v1.Pod{
					ObjectMeta: v1.ObjectMeta{
						Name: "pod-with-poststart-http-hook",
					},
					Spec: v1.PodSpec{
						Containers: []v1.Container{
							{
								Name:  "pod-with-poststart-http-hook",
								Image: framework.GetPauseImageNameForHostArch(),
								Lifecycle: &v1.Lifecycle{
									PostStart: &v1.Handler{
										HTTPGet: &v1.HTTPGetAction{
											Path: "/echo?msg=poststart",
											Host: targetIP,
											Port: intstr.FromInt(8080),
										},
									},
								},
							},
						},
					},
				}
				testPodWithHttpHook(podWithHook)
			})
			It("should execute prestop http hook properly [Conformance]", func() {
				podWithHook := &v1.Pod{
					ObjectMeta: v1.ObjectMeta{
						Name: "pod-with-prestop-http-hook",
					},
					Spec: v1.PodSpec{
						Containers: []v1.Container{
							{
								Name:  "pod-with-prestop-http-hook",
								Image: framework.GetPauseImageNameForHostArch(),
								Lifecycle: &v1.Lifecycle{
									PreStop: &v1.Handler{
										HTTPGet: &v1.HTTPGetAction{
											Path: "/echo?msg=prestop",
											Host: targetIP,
											Port: intstr.FromInt(8080),
										},
									},
								},
							},
						},
					},
				}
				testPodWithHttpHook(podWithHook)
			})
		})
	})
})
コード例 #2
0
ファイル: disruption.go プロジェクト: paralin/kubernetes
var _ = framework.KubeDescribe("DisruptionController", func() {
	f := framework.NewDefaultFramework("disruption")
	var ns string
	var cs *kubernetes.Clientset

	BeforeEach(func() {
		// skip on GKE since alpha features are disabled
		framework.SkipIfProviderIs("gke")

		cs = f.StagingClient
		ns = f.Namespace.Name
	})

	It("should create a PodDisruptionBudget", func() {
		createPodDisruptionBudgetOrDie(cs, ns, intstr.FromString("1%"))
	})

	It("should update PodDisruptionBudget status", func() {
		createPodDisruptionBudgetOrDie(cs, ns, intstr.FromInt(2))

		createPodsOrDie(cs, ns, 3)
		waitForPodsOrDie(cs, ns, 3)

		// Since disruptionAllowed starts out 0, if we see it ever become positive,
		// that means the controller is working.
		err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
			pdb, err := cs.Policy().PodDisruptionBudgets(ns).Get("foo")
			if err != nil {
				return false, err
			}
			return pdb.Status.PodDisruptionsAllowed > 0, nil
		})
		Expect(err).NotTo(HaveOccurred())

	})

	evictionCases := []struct {
		description    string
		minAvailable   intstr.IntOrString
		podCount       int
		replicaSetSize int32
		shouldDeny     bool
		exclusive      bool
	}{
		{
			description:  "no PDB",
			minAvailable: intstr.FromString(""),
			podCount:     1,
			shouldDeny:   false,
		}, {
			description:  "too few pods, absolute",
			minAvailable: intstr.FromInt(2),
			podCount:     2,
			shouldDeny:   true,
		}, {
			description:  "enough pods, absolute",
			minAvailable: intstr.FromInt(2),
			podCount:     3,
			shouldDeny:   false,
		}, {
			description:    "enough pods, replicaSet, percentage",
			minAvailable:   intstr.FromString("90%"),
			replicaSetSize: 10,
			exclusive:      false,
			shouldDeny:     false,
		}, {
			description:    "too few pods, replicaSet, percentage",
			minAvailable:   intstr.FromString("90%"),
			replicaSetSize: 10,
			exclusive:      true,
			shouldDeny:     true,
		},
	}
	for i := range evictionCases {
		c := evictionCases[i]
		expectation := "should allow an eviction"
		if c.shouldDeny {
			expectation = "should not allow an eviction"
		}
		It(fmt.Sprintf("evictions: %s => %s", c.description, expectation), func() {
			createPodsOrDie(cs, ns, c.podCount)
			if c.replicaSetSize > 0 {
				createReplicaSetOrDie(cs, ns, c.replicaSetSize, c.exclusive)
			}

			if c.minAvailable.String() != "" {
				createPodDisruptionBudgetOrDie(cs, ns, c.minAvailable)
			}

			// Locate a running pod.
			var pod v1.Pod
			err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) {
				podList, err := cs.Pods(ns).List(v1.ListOptions{})
				if err != nil {
					return false, err
				}

				for i := range podList.Items {
					if podList.Items[i].Status.Phase == v1.PodRunning {
						pod = podList.Items[i]
						return true, nil
					}
				}

				return false, nil
			})
			Expect(err).NotTo(HaveOccurred())

			e := &policy.Eviction{
				ObjectMeta: v1.ObjectMeta{
					Name:      pod.Name,
					Namespace: ns,
				},
			}

			if c.shouldDeny {
				// Since disruptionAllowed starts out false, wait at least 60s hoping that
				// this gives the controller enough time to have truly set the status.
				time.Sleep(timeout)

				err = cs.Pods(ns).Evict(e)
				Expect(err).Should(MatchError("Cannot evict pod as it would violate the pod's disruption budget."))
			} else {
				// Only wait for running pods in the "allow" case
				// because one of shouldDeny cases relies on the
				// replicaSet not fitting on the cluster.
				waitForPodsOrDie(cs, ns, c.podCount+int(c.replicaSetSize))

				// Since disruptionAllowed starts out false, if an eviction is ever allowed,
				// that means the controller is working.
				err = wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
					err = cs.Pods(ns).Evict(e)
					if err != nil {
						return false, nil
					} else {
						return true, nil
					}
				})
				Expect(err).NotTo(HaveOccurred())
			}
		})
	}

})
コード例 #3
0
ファイル: dns.go プロジェクト: jeremyeder/kubernetes
var _ = framework.KubeDescribe("DNS", func() {
	f := framework.NewDefaultFramework("dns")

	It("should provide DNS for the cluster [Conformance]", func() {
		verifyDNSPodIsRunning(f)

		// All the names we need to be able to resolve.
		// TODO: Spin up a separate test service and test that dns works for that service.
		namesToResolve := []string{
			"kubernetes.default",
			"kubernetes.default.svc",
			"kubernetes.default.svc.cluster.local",
			"google.com",
		}
		// Added due to #8512. This is critical for GCE and GKE deployments.
		if framework.ProviderIs("gce", "gke") {
			namesToResolve = append(namesToResolve, "metadata")
		}

		wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, nil, "wheezy", f.Namespace.Name)
		jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, nil, "jessie", f.Namespace.Name)
		By("Running these commands on wheezy:" + wheezyProbeCmd + "\n")
		By("Running these commands on jessie:" + jessieProbeCmd + "\n")

		// Run a pod which probes DNS and exposes the results by HTTP.
		By("creating a pod to probe DNS")
		pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd)
		validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...))
	})

	It("should provide DNS for services [Conformance]", func() {
		verifyDNSPodIsRunning(f)

		// Create a test headless service.
		By("Creating a test headless service")
		testServiceSelector := map[string]string{
			"dns-test": "true",
		}
		headlessService := createServiceSpec(dnsTestServiceName, true, testServiceSelector)
		_, err := f.Client.Services(f.Namespace.Name).Create(headlessService)
		Expect(err).NotTo(HaveOccurred())
		defer func() {
			By("deleting the test headless service")
			defer GinkgoRecover()
			f.Client.Services(f.Namespace.Name).Delete(headlessService.Name)
		}()

		regularService := createServiceSpec("test-service-2", false, testServiceSelector)
		_, err = f.Client.Services(f.Namespace.Name).Create(regularService)
		Expect(err).NotTo(HaveOccurred())
		defer func() {
			By("deleting the test service")
			defer GinkgoRecover()
			f.Client.Services(f.Namespace.Name).Delete(regularService.Name)
		}()

		// All the names we need to be able to resolve.
		// TODO: Create more endpoints and ensure that multiple A records are returned
		// for headless service.
		namesToResolve := []string{
			fmt.Sprintf("%s", headlessService.Name),
			fmt.Sprintf("%s.%s", headlessService.Name, f.Namespace.Name),
			fmt.Sprintf("%s.%s.svc", headlessService.Name, f.Namespace.Name),
			fmt.Sprintf("_http._tcp.%s.%s.svc", headlessService.Name, f.Namespace.Name),
			fmt.Sprintf("_http._tcp.%s.%s.svc", regularService.Name, f.Namespace.Name),
		}

		wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, nil, "wheezy", f.Namespace.Name)
		jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, nil, "jessie", f.Namespace.Name)
		By("Running these commands on wheezy:" + wheezyProbeCmd + "\n")
		By("Running these commands on jessie:" + jessieProbeCmd + "\n")

		// Run a pod which probes DNS and exposes the results by HTTP.
		By("creating a pod to probe DNS")
		pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd)
		pod.ObjectMeta.Labels = testServiceSelector

		validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...))
	})

	It("should provide DNS for pods for Hostname and Subdomain Annotation", func() {
		verifyDNSPodIsRunning(f)

		// Create a test headless service.
		By("Creating a test headless service")
		testServiceSelector := map[string]string{
			"dns-test-hostname-attribute": "true",
		}
		serviceName := "dns-test-service-2"
		podHostname := "dns-querier-2"
		headlessService := createServiceSpec(serviceName, true, testServiceSelector)
		_, err := f.Client.Services(f.Namespace.Name).Create(headlessService)
		Expect(err).NotTo(HaveOccurred())
		defer func() {
			By("deleting the test headless service")
			defer GinkgoRecover()
			f.Client.Services(f.Namespace.Name).Delete(headlessService.Name)
		}()

		hostFQDN := fmt.Sprintf("%s.%s.%s.svc.cluster.local", podHostname, serviceName, f.Namespace.Name)
		wheezyProbeCmd, wheezyFileNames := createProbeCommand([]string{hostFQDN}, []string{hostFQDN, podHostname}, "wheezy", f.Namespace.Name)
		jessieProbeCmd, jessieFileNames := createProbeCommand([]string{hostFQDN}, []string{hostFQDN, podHostname}, "jessie", f.Namespace.Name)
		By("Running these commands on wheezy:" + wheezyProbeCmd + "\n")
		By("Running these commands on jessie:" + jessieProbeCmd + "\n")

		// Run a pod which probes DNS and exposes the results by HTTP.
		By("creating a pod to probe DNS")
		pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd)
		pod1.ObjectMeta.Labels = testServiceSelector
		pod1.ObjectMeta.Annotations = map[string]string{
			pod.PodHostnameAnnotation:  podHostname,
			pod.PodSubdomainAnnotation: serviceName,
		}

		validateDNSResults(f, pod1, append(wheezyFileNames, jessieFileNames...))
	})
})
コード例 #4
0
	cm      *v1.ConfigMap
	isValid bool

	dnsPod      *v1.Pod
	utilPod     *v1.Pod
	utilService *v1.Service
}

var _ = framework.KubeDescribe("DNS config map", func() {
	test := &dnsConfigMapTest{
		f:    framework.NewDefaultFramework("dns-config-map"),
		ns:   "kube-system",
		name: "kube-dns",
	}

	BeforeEach(func() {
		test.c = test.f.ClientSet
	})

	It("should be able to change configuration", func() {
		test.run()
	})
})

func (t *dnsConfigMapTest) init() {
	By("Finding a DNS pod")
	label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kube-dns"}))
	options := v1.ListOptions{LabelSelector: label.String()}

	pods, err := t.f.ClientSet.Core().Pods("kube-system").List(options)
	Expect(err).NotTo(HaveOccurred())
コード例 #5
0
var _ = framework.KubeDescribe("Network", func() {
	const (
		testDaemonHttpPort    = 11301
		testDaemonTcpPort     = 11302
		timeoutSeconds        = 10
		postFinTimeoutSeconds = 5
	)

	fr := framework.NewDefaultFramework("network")

	It("should set TCP CLOSE_WAIT timeout", func() {
		nodes := framework.GetReadySchedulableNodesOrDie(fr.ClientSet)
		ips := collectAddresses(nodes, api.NodeInternalIP)

		if len(nodes.Items) < 2 {
			framework.Skipf(
				"Test requires >= 2 Ready nodes, but there are only %v nodes",
				len(nodes.Items))
		}

		type NodeInfo struct {
			node   *api.Node
			name   string
			nodeIp string
		}

		clientNodeInfo := NodeInfo{
			node:   &nodes.Items[0],
			name:   nodes.Items[0].Name,
			nodeIp: ips[0],
		}

		serverNodeInfo := NodeInfo{
			node:   &nodes.Items[1],
			name:   nodes.Items[1].Name,
			nodeIp: ips[1],
		}

		zero := int64(0)

		clientPodSpec := &api.Pod{
			ObjectMeta: api.ObjectMeta{
				Name:      "e2e-net-client",
				Namespace: fr.Namespace.Name,
				Labels:    map[string]string{"app": "e2e-net-client"},
			},
			Spec: api.PodSpec{
				NodeName: clientNodeInfo.name,
				Containers: []api.Container{
					{
						Name:            "e2e-net-client",
						Image:           kubeProxyE2eImage,
						ImagePullPolicy: "Always",
						Command: []string{
							"/net", "-serve", fmt.Sprintf("0.0.0.0:%d", testDaemonHttpPort),
						},
					},
				},
				TerminationGracePeriodSeconds: &zero,
			},
		}

		serverPodSpec := &api.Pod{
			ObjectMeta: api.ObjectMeta{
				Name:      "e2e-net-server",
				Namespace: fr.Namespace.Name,
				Labels:    map[string]string{"app": "e2e-net-server"},
			},
			Spec: api.PodSpec{
				NodeName: serverNodeInfo.name,
				Containers: []api.Container{
					{
						Name:            "e2e-net-server",
						Image:           kubeProxyE2eImage,
						ImagePullPolicy: "Always",
						Command: []string{
							"/net",
							"-runner", "nat-closewait-server",
							"-options",
							fmt.Sprintf(`{"LocalAddr":"0.0.0.0:%v", "PostFindTimeoutSeconds":%v}`,
								testDaemonTcpPort,
								postFinTimeoutSeconds),
						},
						Ports: []api.ContainerPort{
							{
								Name:          "tcp",
								ContainerPort: testDaemonTcpPort,
								HostPort:      testDaemonTcpPort,
							},
						},
					},
				},
				TerminationGracePeriodSeconds: &zero,
			},
		}

		By(fmt.Sprintf(
			"Launching a server daemon on node %v (node ip: %v, image: %v)",
			serverNodeInfo.name,
			serverNodeInfo.nodeIp,
			kubeProxyE2eImage))
		fr.PodClient().CreateSync(serverPodSpec)

		By(fmt.Sprintf(
			"Launching a client daemon on node %v (node ip: %v, image: %v)",
			clientNodeInfo.name,
			clientNodeInfo.nodeIp,
			kubeProxyE2eImage))
		fr.PodClient().CreateSync(clientPodSpec)

		By("Make client connect")

		options := nat.CloseWaitClientOptions{
			RemoteAddr: fmt.Sprintf("%v:%v",
				serverNodeInfo.nodeIp, testDaemonTcpPort),
			TimeoutSeconds:        timeoutSeconds,
			PostFinTimeoutSeconds: 0,
			LeakConnection:        true,
		}

		jsonBytes, err := json.Marshal(options)
		cmd := fmt.Sprintf(
			`curl -X POST http://localhost:%v/run/nat-closewait-client -d `+
				`'%v' 2>/dev/null`,
			testDaemonHttpPort,
			string(jsonBytes))
		framework.RunHostCmdOrDie(fr.Namespace.Name, "e2e-net-client", cmd)

		<-time.After(time.Duration(1) * time.Second)

		By("Checking /proc/net/nf_conntrack for the timeout")
		// If test flakes occur here, then this check should be performed
		// in a loop as there may be a race with the client connecting.
		framework.IssueSSHCommandWithResult(
			fmt.Sprintf("sudo cat /proc/net/ip_conntrack | grep 'dport=%v'",
				testDaemonTcpPort),
			framework.TestContext.Provider,
			clientNodeInfo.node)

		// Timeout in seconds is available as the third column from
		// /proc/net/ip_conntrack.
		result, err := framework.IssueSSHCommandWithResult(
			fmt.Sprintf(
				"sudo cat /proc/net/ip_conntrack "+
					"| grep 'CLOSE_WAIT.*dst=%v.*dport=%v' "+
					"| awk '{print $3}'",
				serverNodeInfo.nodeIp,
				testDaemonTcpPort),
			framework.TestContext.Provider,
			clientNodeInfo.node)
		framework.ExpectNoError(err)

		timeoutSeconds, err := strconv.Atoi(strings.TrimSpace(result.Stdout))
		framework.ExpectNoError(err)

		// These must be synchronized from the default values set in
		// pkg/apis/../defaults.go ConntrackTCPCloseWaitTimeout. The
		// current defaults are hidden in the initialization code.
		const epsilonSeconds = 10
		const expectedTimeoutSeconds = 60 * 60

		framework.Logf("conntrack entry timeout was: %v, expected: %v",
			timeoutSeconds, expectedTimeoutSeconds)

		Expect(math.Abs(float64(timeoutSeconds - expectedTimeoutSeconds))).Should(
			BeNumerically("<", (epsilonSeconds)))
	})
})
コード例 #6
0
ファイル: service_accounts.go プロジェクト: ncdc/kubernetes
var _ = framework.KubeDescribe("ServiceAccounts", func() {
	f := framework.NewDefaultFramework("svcaccounts")

	It("should ensure a single API token exists", func() {
		// wait for the service account to reference a single secret
		var secrets []api.ObjectReference
		framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*10, func() (bool, error) {
			By("waiting for a single token reference")
			sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default")
			if apierrors.IsNotFound(err) {
				framework.Logf("default service account was not found")
				return false, nil
			}
			if err != nil {
				framework.Logf("error getting default service account: %v", err)
				return false, err
			}
			switch len(sa.Secrets) {
			case 0:
				framework.Logf("default service account has no secret references")
				return false, nil
			case 1:
				framework.Logf("default service account has a single secret reference")
				secrets = sa.Secrets
				return true, nil
			default:
				return false, fmt.Errorf("default service account has too many secret references: %#v", sa.Secrets)
			}
		}))

		// make sure the reference doesn't flutter
		{
			By("ensuring the single token reference persists")
			time.Sleep(2 * time.Second)
			sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default")
			framework.ExpectNoError(err)
			Expect(sa.Secrets).To(Equal(secrets))
		}

		// delete the referenced secret
		By("deleting the service account token")
		framework.ExpectNoError(f.Client.Secrets(f.Namespace.Name).Delete(secrets[0].Name))

		// wait for the referenced secret to be removed, and another one autocreated
		framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) {
			By("waiting for a new token reference")
			sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default")
			if err != nil {
				framework.Logf("error getting default service account: %v", err)
				return false, err
			}
			switch len(sa.Secrets) {
			case 0:
				framework.Logf("default service account has no secret references")
				return false, nil
			case 1:
				if sa.Secrets[0] == secrets[0] {
					framework.Logf("default service account still has the deleted secret reference")
					return false, nil
				}
				framework.Logf("default service account has a new single secret reference")
				secrets = sa.Secrets
				return true, nil
			default:
				return false, fmt.Errorf("default service account has too many secret references: %#v", sa.Secrets)
			}
		}))

		// make sure the reference doesn't flutter
		{
			By("ensuring the single token reference persists")
			time.Sleep(2 * time.Second)
			sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default")
			framework.ExpectNoError(err)
			Expect(sa.Secrets).To(Equal(secrets))
		}

		// delete the reference from the service account
		By("deleting the reference to the service account token")
		{
			sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default")
			framework.ExpectNoError(err)
			sa.Secrets = nil
			_, updateErr := f.Client.ServiceAccounts(f.Namespace.Name).Update(sa)
			framework.ExpectNoError(updateErr)
		}

		// wait for another one to be autocreated
		framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) {
			By("waiting for a new token to be created and added")
			sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default")
			if err != nil {
				framework.Logf("error getting default service account: %v", err)
				return false, err
			}
			switch len(sa.Secrets) {
			case 0:
				framework.Logf("default service account has no secret references")
				return false, nil
			case 1:
				framework.Logf("default service account has a new single secret reference")
				secrets = sa.Secrets
				return true, nil
			default:
				return false, fmt.Errorf("default service account has too many secret references: %#v", sa.Secrets)
			}
		}))

		// make sure the reference doesn't flutter
		{
			By("ensuring the single token reference persists")
			time.Sleep(2 * time.Second)
			sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default")
			framework.ExpectNoError(err)
			Expect(sa.Secrets).To(Equal(secrets))
		}
	})

	It("should mount an API token into pods [Conformance]", func() {
		var tokenContent string
		var rootCAContent string

		// Standard get, update retry loop
		framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) {
			By("getting the auto-created API token")
			sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default")
			if apierrors.IsNotFound(err) {
				framework.Logf("default service account was not found")
				return false, nil
			}
			if err != nil {
				framework.Logf("error getting default service account: %v", err)
				return false, err
			}
			if len(sa.Secrets) == 0 {
				framework.Logf("default service account has no secret references")
				return false, nil
			}
			for _, secretRef := range sa.Secrets {
				secret, err := f.Client.Secrets(f.Namespace.Name).Get(secretRef.Name)
				if err != nil {
					framework.Logf("Error getting secret %s: %v", secretRef.Name, err)
					continue
				}
				if secret.Type == api.SecretTypeServiceAccountToken {
					tokenContent = string(secret.Data[api.ServiceAccountTokenKey])
					rootCAContent = string(secret.Data[api.ServiceAccountRootCAKey])
					return true, nil
				}
			}

			framework.Logf("default service account has no secret references to valid service account tokens")
			return false, nil
		}))

		pod := &api.Pod{
			ObjectMeta: api.ObjectMeta{
				Name: "pod-service-account-" + string(uuid.NewUUID()),
			},
			Spec: api.PodSpec{
				Containers: []api.Container{
					{
						Name:  "token-test",
						Image: "gcr.io/google_containers/mounttest:0.7",
						Args: []string{
							fmt.Sprintf("--file_content=%s/%s", serviceaccount.DefaultAPITokenMountPath, api.ServiceAccountTokenKey),
						},
					},
					{
						Name:  "root-ca-test",
						Image: "gcr.io/google_containers/mounttest:0.7",
						Args: []string{
							fmt.Sprintf("--file_content=%s/%s", serviceaccount.DefaultAPITokenMountPath, api.ServiceAccountRootCAKey),
						},
					},
				},
				RestartPolicy: api.RestartPolicyNever,
			},
		}

		supportsTokenNamespace, _ := framework.ServerVersionGTE(serviceAccountTokenNamespaceVersion, f.Client)
		if supportsTokenNamespace {
			pod.Spec.Containers = append(pod.Spec.Containers, api.Container{
				Name:  "namespace-test",
				Image: "gcr.io/google_containers/mounttest:0.7",
				Args: []string{
					fmt.Sprintf("--file_content=%s/%s", serviceaccount.DefaultAPITokenMountPath, api.ServiceAccountNamespaceKey),
				},
			})
		}

		f.TestContainerOutput("consume service account token", pod, 0, []string{
			fmt.Sprintf(`content of file "%s/%s": %s`, serviceaccount.DefaultAPITokenMountPath, api.ServiceAccountTokenKey, tokenContent),
		})
		f.TestContainerOutput("consume service account root CA", pod, 1, []string{
			fmt.Sprintf(`content of file "%s/%s": %s`, serviceaccount.DefaultAPITokenMountPath, api.ServiceAccountRootCAKey, rootCAContent),
		})

		if supportsTokenNamespace {
			f.TestContainerOutput("consume service account namespace", pod, 2, []string{
				fmt.Sprintf(`content of file "%s/%s": %s`, serviceaccount.DefaultAPITokenMountPath, api.ServiceAccountNamespaceKey, f.Namespace.Name),
			})
		}
	})
})
コード例 #7
0
ファイル: mesos.go プロジェクト: jonboulle/kubernetes
var _ = framework.KubeDescribe("Mesos", func() {
	f := framework.NewDefaultFramework("pods")
	var c clientset.Interface
	var ns string

	BeforeEach(func() {
		framework.SkipUnlessProviderIs("mesos/docker")
		c = f.ClientSet
		ns = f.Namespace.Name
	})

	It("applies slave attributes as labels", func() {
		nodeClient := f.ClientSet.Core().Nodes()

		rackA := labels.SelectorFromSet(map[string]string{"k8s.mesosphere.io/attribute-rack": "1"})
		options := v1.ListOptions{LabelSelector: rackA.String()}
		nodes, err := nodeClient.List(options)
		if err != nil {
			framework.Failf("Failed to query for node: %v", err)
		}
		Expect(len(nodes.Items)).To(Equal(1))

		var addr string
		for _, a := range nodes.Items[0].Status.Addresses {
			if a.Type == v1.NodeInternalIP {
				addr = a.Address
			}
		}
		Expect(len(addr)).NotTo(Equal(""))
	})

	It("starts static pods on every node in the mesos cluster", func() {
		client := f.ClientSet
		framework.ExpectNoError(framework.AllNodesReady(client, wait.ForeverTestTimeout), "all nodes ready")

		nodelist := framework.GetReadySchedulableNodesOrDie(client)
		const ns = "static-pods"
		numpods := int32(len(nodelist.Items))
		framework.ExpectNoError(framework.WaitForPodsRunningReady(client, ns, numpods, wait.ForeverTestTimeout, map[string]string{}, false),
			fmt.Sprintf("number of static pods in namespace %s is %d", ns, numpods))
	})

	It("schedules pods annotated with roles on correct slaves", func() {
		// launch a pod to find a node which can launch a pod. We intentionally do
		// not just take the node list and choose the first of them. Depending on the
		// cluster and the scheduler it might be that a "normal" pod cannot be
		// scheduled onto it.
		By("Trying to launch a pod with a label to get a node which can launch it.")
		podName := "with-label"
		_, err := c.Core().Pods(ns).Create(&v1.Pod{
			TypeMeta: metav1.TypeMeta{
				Kind: "Pod",
			},
			ObjectMeta: v1.ObjectMeta{
				Name: podName,
				Annotations: map[string]string{
					"k8s.mesosphere.io/roles": "public",
				},
			},
			Spec: v1.PodSpec{
				Containers: []v1.Container{
					{
						Name:  podName,
						Image: framework.GetPauseImageName(f.ClientSet),
					},
				},
			},
		})
		framework.ExpectNoError(err)

		framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, podName, ns))
		pod, err := c.Core().Pods(ns).Get(podName, metav1.GetOptions{})
		framework.ExpectNoError(err)

		nodeClient := f.ClientSet.Core().Nodes()

		// schedule onto node with rack=2 being assigned to the "public" role
		rack2 := labels.SelectorFromSet(map[string]string{
			"k8s.mesosphere.io/attribute-rack": "2",
		})
		options := v1.ListOptions{LabelSelector: rack2.String()}
		nodes, err := nodeClient.List(options)
		framework.ExpectNoError(err)

		Expect(nodes.Items[0].Name).To(Equal(pod.Spec.NodeName))
	})
})
コード例 #8
0
ファイル: batch_v1_jobs.go プロジェクト: CodeJuan/kubernetes
var _ = framework.KubeDescribe("V1Job", func() {
	f := framework.NewDefaultFramework("v1job")
	parallelism := int32(2)
	completions := int32(4)
	lotsOfFailures := int32(5) // more than completions

	// Simplest case: all pods succeed promptly
	It("should run a job to completion when tasks succeed", func() {
		By("Creating a job")
		job := newTestV1Job("succeed", "all-succeed", api.RestartPolicyNever, parallelism, completions)
		job, err := createV1Job(f.Client, f.Namespace.Name, job)
		Expect(err).NotTo(HaveOccurred())

		By("Ensuring job reaches completions")
		err = waitForV1JobFinish(f.Client, f.Namespace.Name, job.Name, completions)
		Expect(err).NotTo(HaveOccurred())
	})

	// Pods sometimes fail, but eventually succeed.
	It("should run a job to completion when tasks sometimes fail and are locally restarted", func() {
		By("Creating a job")
		// One failure, then a success, local restarts.
		// We can't use the random failure approach used by the
		// non-local test below, because kubelet will throttle
		// frequently failing containers in a given pod, ramping
		// up to 5 minutes between restarts, making test timeouts
		// due to successive failures too likely with a reasonable
		// test timeout.
		job := newTestV1Job("failOnce", "fail-once-local", api.RestartPolicyOnFailure, parallelism, completions)
		job, err := createV1Job(f.Client, f.Namespace.Name, job)
		Expect(err).NotTo(HaveOccurred())

		By("Ensuring job reaches completions")
		err = waitForV1JobFinish(f.Client, f.Namespace.Name, job.Name, completions)
		Expect(err).NotTo(HaveOccurred())
	})

	// Pods sometimes fail, but eventually succeed, after pod restarts
	It("should run a job to completion when tasks sometimes fail and are not locally restarted", func() {
		By("Creating a job")
		// 50% chance of container success, local restarts.
		// Can't use the failOnce approach because that relies
		// on an emptyDir, which is not preserved across new pods.
		// Worst case analysis: 15 failures, each taking 1 minute to
		// run due to some slowness, 1 in 2^15 chance of happening,
		// causing test flake.  Should be very rare.
		job := newTestV1Job("randomlySucceedOrFail", "rand-non-local", api.RestartPolicyNever, parallelism, completions)
		job, err := createV1Job(f.Client, f.Namespace.Name, job)
		Expect(err).NotTo(HaveOccurred())

		By("Ensuring job reaches completions")
		err = waitForV1JobFinish(f.Client, f.Namespace.Name, job.Name, completions)
		Expect(err).NotTo(HaveOccurred())
	})

	It("should keep restarting failed pods", func() {
		By("Creating a job")
		job := newTestV1Job("fail", "all-fail", api.RestartPolicyNever, parallelism, completions)
		job, err := createV1Job(f.Client, f.Namespace.Name, job)
		Expect(err).NotTo(HaveOccurred())

		By("Ensuring job shows many failures")
		err = wait.Poll(framework.Poll, v1JobTimeout, func() (bool, error) {
			curr, err := f.Client.Batch().Jobs(f.Namespace.Name).Get(job.Name)
			if err != nil {
				return false, err
			}
			return curr.Status.Failed > lotsOfFailures, nil
		})
	})

	It("should scale a job up", func() {
		startParallelism := int32(1)
		endParallelism := int32(2)
		By("Creating a job")
		job := newTestV1Job("notTerminate", "scale-up", api.RestartPolicyNever, startParallelism, completions)
		job, err := createV1Job(f.Client, f.Namespace.Name, job)
		Expect(err).NotTo(HaveOccurred())

		By("Ensuring active pods == startParallelism")
		err = waitForAllPodsRunningV1(f.Client, f.Namespace.Name, job.Name, startParallelism)
		Expect(err).NotTo(HaveOccurred())

		By("scale job up")
		scaler, err := kubectl.ScalerFor(batch.Kind("Job"), f.Client)
		Expect(err).NotTo(HaveOccurred())
		waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
		waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
		scaler.Scale(f.Namespace.Name, job.Name, uint(endParallelism), nil, waitForScale, waitForReplicas)
		Expect(err).NotTo(HaveOccurred())

		By("Ensuring active pods == endParallelism")
		err = waitForAllPodsRunningV1(f.Client, f.Namespace.Name, job.Name, endParallelism)
		Expect(err).NotTo(HaveOccurred())
	})

	It("should scale a job down", func() {
		startParallelism := int32(2)
		endParallelism := int32(1)
		By("Creating a job")
		job := newTestV1Job("notTerminate", "scale-down", api.RestartPolicyNever, startParallelism, completions)
		job, err := createV1Job(f.Client, f.Namespace.Name, job)
		Expect(err).NotTo(HaveOccurred())

		By("Ensuring active pods == startParallelism")
		err = waitForAllPodsRunningV1(f.Client, f.Namespace.Name, job.Name, startParallelism)
		Expect(err).NotTo(HaveOccurred())

		By("scale job down")
		scaler, err := kubectl.ScalerFor(batch.Kind("Job"), f.Client)
		Expect(err).NotTo(HaveOccurred())
		waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
		waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
		err = scaler.Scale(f.Namespace.Name, job.Name, uint(endParallelism), nil, waitForScale, waitForReplicas)
		Expect(err).NotTo(HaveOccurred())

		By("Ensuring active pods == endParallelism")
		err = waitForAllPodsRunningV1(f.Client, f.Namespace.Name, job.Name, endParallelism)
		Expect(err).NotTo(HaveOccurred())
	})

	It("should delete a job", func() {
		By("Creating a job")
		job := newTestV1Job("notTerminate", "foo", api.RestartPolicyNever, parallelism, completions)
		job, err := createV1Job(f.Client, f.Namespace.Name, job)
		Expect(err).NotTo(HaveOccurred())

		By("Ensuring active pods == parallelism")
		err = waitForAllPodsRunningV1(f.Client, f.Namespace.Name, job.Name, parallelism)
		Expect(err).NotTo(HaveOccurred())

		By("delete a job")
		reaper, err := kubectl.ReaperFor(batch.Kind("Job"), f.Client)
		Expect(err).NotTo(HaveOccurred())
		timeout := 1 * time.Minute
		err = reaper.Stop(f.Namespace.Name, job.Name, timeout, api.NewDeleteOptions(0))
		Expect(err).NotTo(HaveOccurred())

		By("Ensuring job was deleted")
		_, err = f.Client.Batch().Jobs(f.Namespace.Name).Get(job.Name)
		Expect(err).To(HaveOccurred())
		Expect(errors.IsNotFound(err)).To(BeTrue())
	})

	It("should fail a job", func() {
		By("Creating a job")
		job := newTestV1Job("notTerminate", "foo", api.RestartPolicyNever, parallelism, completions)
		activeDeadlineSeconds := int64(10)
		job.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
		job, err := createV1Job(f.Client, f.Namespace.Name, job)
		Expect(err).NotTo(HaveOccurred())

		By("Ensuring job was failed")
		err = waitForV1JobFail(f.Client, f.Namespace.Name, job.Name)
		Expect(err).NotTo(HaveOccurred())
	})
})
コード例 #9
0
ファイル: security_context.go プロジェクト: Q-Lee/kubernetes
var _ = framework.KubeDescribe("Security Context [Feature:SecurityContext]", func() {
	f := framework.NewDefaultFramework("security-context")

	It("should support pod.Spec.SecurityContext.SupplementalGroups", func() {
		pod := scTestPod(false, false)
		pod.Spec.Containers[0].Command = []string{"id", "-G"}
		pod.Spec.SecurityContext.SupplementalGroups = []int64{1234, 5678}
		groups := []string{"1234", "5678"}
		f.TestContainerOutput("pod.Spec.SecurityContext.SupplementalGroups", pod, 0, groups)
	})

	It("should support pod.Spec.SecurityContext.RunAsUser", func() {
		pod := scTestPod(false, false)
		var uid int64 = 1001
		pod.Spec.SecurityContext.RunAsUser = &uid
		pod.Spec.Containers[0].Command = []string{"sh", "-c", "id -u"}

		f.TestContainerOutput("pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{
			fmt.Sprintf("%v", uid),
		})
	})

	It("should support container.SecurityContext.RunAsUser", func() {
		pod := scTestPod(false, false)
		var uid int64 = 1001
		var overrideUid int64 = 1002
		pod.Spec.SecurityContext.RunAsUser = &uid
		pod.Spec.Containers[0].SecurityContext = new(api.SecurityContext)
		pod.Spec.Containers[0].SecurityContext.RunAsUser = &overrideUid
		pod.Spec.Containers[0].Command = []string{"sh", "-c", "id -u"}

		f.TestContainerOutput("pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{
			fmt.Sprintf("%v", overrideUid),
		})
	})

	It("should support volume SELinux relabeling", func() {
		testPodSELinuxLabeling(f, false, false)
	})

	It("should support volume SELinux relabeling when using hostIPC", func() {
		testPodSELinuxLabeling(f, true, false)
	})

	It("should support volume SELinux relabeling when using hostPID", func() {
		testPodSELinuxLabeling(f, false, true)
	})

	It("should support seccomp alpha unconfined annotation on the container [Feature:Seccomp]", func() {
		// TODO: port to SecurityContext as soon as seccomp is out of alpha
		pod := scTestPod(false, false)
		pod.Annotations[api.SeccompContainerAnnotationKeyPrefix+"test-container"] = "unconfined"
		pod.Annotations[api.SeccompPodAnnotationKey] = "docker/default"
		pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"}
		f.TestContainerOutput(api.SeccompPodAnnotationKey, pod, 0, []string{"0"}) // seccomp disabled
	})

	It("should support seccomp alpha unconfined annotation on the pod [Feature:Seccomp]", func() {
		// TODO: port to SecurityContext as soon as seccomp is out of alpha
		pod := scTestPod(false, false)
		pod.Annotations[api.SeccompPodAnnotationKey] = "unconfined"
		pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"}
		f.TestContainerOutput(api.SeccompPodAnnotationKey, pod, 0, []string{"0"}) // seccomp disabled
	})

	It("should support seccomp alpha docker/default annotation [Feature:Seccomp]", func() {
		// TODO: port to SecurityContext as soon as seccomp is out of alpha
		pod := scTestPod(false, false)
		pod.Annotations[api.SeccompContainerAnnotationKeyPrefix+"test-container"] = "docker/default"
		pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"}
		f.TestContainerOutput(api.SeccompPodAnnotationKey, pod, 0, []string{"2"}) // seccomp filtered
	})

	It("should support seccomp default which is unconfined [Feature:Seccomp]", func() {
		// TODO: port to SecurityContext as soon as seccomp is out of alpha
		pod := scTestPod(false, false)
		pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"}
		f.TestContainerOutput(api.SeccompPodAnnotationKey, pod, 0, []string{"0"}) // seccomp disabled
	})
})
コード例 #10
0
ファイル: limit_range.go プロジェクト: ipbabble/kubernetes
var _ = framework.KubeDescribe("LimitRange", func() {
	f := framework.NewDefaultFramework("limitrange")

	It("should create a LimitRange with defaults and ensure pod has those defaults applied.", func() {
		By("Creating a LimitRange")

		min := getResourceList("50m", "100Mi")
		max := getResourceList("500m", "500Mi")
		defaultLimit := getResourceList("500m", "500Mi")
		defaultRequest := getResourceList("100m", "200Mi")
		maxLimitRequestRatio := api.ResourceList{}
		limitRange := newLimitRange("limit-range", api.LimitTypeContainer,
			min, max,
			defaultLimit, defaultRequest,
			maxLimitRequestRatio)
		limitRange, err := f.Client.LimitRanges(f.Namespace.Name).Create(limitRange)
		Expect(err).NotTo(HaveOccurred())

		By("Fetching the LimitRange to ensure it has proper values")
		limitRange, err = f.Client.LimitRanges(f.Namespace.Name).Get(limitRange.Name)
		expected := api.ResourceRequirements{Requests: defaultRequest, Limits: defaultLimit}
		actual := api.ResourceRequirements{Requests: limitRange.Spec.Limits[0].DefaultRequest, Limits: limitRange.Spec.Limits[0].Default}
		err = equalResourceRequirement(expected, actual)
		Expect(err).NotTo(HaveOccurred())

		By("Creating a Pod with no resource requirements")
		pod := newTestPod("pod-no-resources", api.ResourceList{}, api.ResourceList{})
		pod, err = f.Client.Pods(f.Namespace.Name).Create(pod)
		Expect(err).NotTo(HaveOccurred())

		By("Ensuring Pod has resource requirements applied from LimitRange")
		pod, err = f.Client.Pods(f.Namespace.Name).Get(pod.Name)
		Expect(err).NotTo(HaveOccurred())
		for i := range pod.Spec.Containers {
			err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
			if err != nil {
				// Print the pod to help in debugging.
				framework.Logf("Pod %+v does not have the expected requirements", pod)
				Expect(err).NotTo(HaveOccurred())
			}
		}

		By("Creating a Pod with partial resource requirements")
		pod = newTestPod("pod-partial-resources", getResourceList("", "150Mi"), getResourceList("300m", ""))
		pod, err = f.Client.Pods(f.Namespace.Name).Create(pod)
		Expect(err).NotTo(HaveOccurred())

		By("Ensuring Pod has merged resource requirements applied from LimitRange")
		pod, err = f.Client.Pods(f.Namespace.Name).Get(pod.Name)
		Expect(err).NotTo(HaveOccurred())
		// This is an interesting case, so it's worth a comment
		// If you specify a Limit, and no Request, the Limit will default to the Request
		// This means that the LimitRange.DefaultRequest will ONLY take affect if a container.resources.limit is not supplied
		expected = api.ResourceRequirements{Requests: getResourceList("300m", "150Mi"), Limits: getResourceList("300m", "500Mi")}
		for i := range pod.Spec.Containers {
			err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
			if err != nil {
				// Print the pod to help in debugging.
				framework.Logf("Pod %+v does not have the expected requirements", pod)
				Expect(err).NotTo(HaveOccurred())
			}
		}

		By("Failing to create a Pod with less than min resources")
		pod = newTestPod(podName, getResourceList("10m", "50Mi"), api.ResourceList{})
		pod, err = f.Client.Pods(f.Namespace.Name).Create(pod)
		Expect(err).To(HaveOccurred())

		By("Failing to create a Pod with more than max resources")
		pod = newTestPod(podName, getResourceList("600m", "600Mi"), api.ResourceList{})
		pod, err = f.Client.Pods(f.Namespace.Name).Create(pod)
		Expect(err).To(HaveOccurred())
	})

})
コード例 #11
0
ファイル: reboot.go プロジェクト: ncdc/kubernetes
var _ = framework.KubeDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
	var f *framework.Framework

	BeforeEach(func() {
		// These tests requires SSH to nodes, so the provider check should be identical to there
		// (the limiting factor is the implementation of util.go's framework.GetSigner(...)).

		// Cluster must support node reboot
		framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
	})

	AfterEach(func() {
		if CurrentGinkgoTestDescription().Failed {
			// Most of the reboot tests just make sure that addon/system pods are running, so dump
			// events for the kube-system namespace on failures
			namespaceName := api.NamespaceSystem
			By(fmt.Sprintf("Collecting events from namespace %q.", namespaceName))
			events, err := f.Client.Events(namespaceName).List(api.ListOptions{})
			Expect(err).NotTo(HaveOccurred())

			for _, e := range events.Items {
				framework.Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
			}
		}
		// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
		// rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated.  Most tests
		// make use of some proxy feature to verify functionality. So, if a reboot test runs right before a test
		// that tries to get logs, for example, we may get unlucky and try to use a closed tunnel to a node that
		// was recently rebooted. There's no good way to framework.Poll for proxies being closed, so we sleep.
		//
		// TODO(cjcullen) reduce this sleep (#19314)
		if framework.ProviderIs("gke") {
			By("waiting 5 minutes for all dead tunnels to be dropped")
			time.Sleep(5 * time.Minute)
		}
	})

	f = framework.NewDefaultFramework("reboot")

	It("each node by ordering clean reboot and ensure they function upon restart", func() {
		// clean shutdown and restart
		// We sleep 10 seconds to give some time for ssh command to cleanly finish before the node is rebooted.
		testReboot(f.Client, "nohup sh -c 'sleep 10 && sudo reboot' >/dev/null 2>&1 &")
	})

	It("each node by ordering unclean reboot and ensure they function upon restart", func() {
		// unclean shutdown and restart
		// We sleep 10 seconds to give some time for ssh command to cleanly finish before the node is shutdown.
		testReboot(f.Client, "nohup sh -c 'sleep 10 && echo b | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &")
	})

	It("each node by triggering kernel panic and ensure they function upon restart", func() {
		// kernel panic
		// We sleep 10 seconds to give some time for ssh command to cleanly finish before kernel panic is triggered.
		testReboot(f.Client, "nohup sh -c 'sleep 10 && echo c | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &")
	})

	It("each node by switching off the network interface and ensure they function upon switch on", func() {
		// switch the network interface off for a while to simulate a network outage
		// We sleep 10 seconds to give some time for ssh command to cleanly finish before network is down.
		testReboot(f.Client, "nohup sh -c 'sleep 10 && sudo ip link set eth0 down && sleep 120 && sudo ip link set eth0 up' >/dev/null 2>&1 &")
	})

	It("each node by dropping all inbound packets for a while and ensure they function afterwards", func() {
		// tell the firewall to drop all inbound packets for a while
		// We sleep 10 seconds to give some time for ssh command to cleanly finish before starting dropping inbound packets.
		// We still accept packages send from localhost to prevent monit from restarting kubelet.
		testReboot(f.Client, "nohup sh -c 'sleep 10 && sudo iptables -I INPUT 1 -s 127.0.0.1 -j ACCEPT && sudo iptables -I INPUT 2 -j DROP && "+
			" sleep 120 && sudo iptables -D INPUT -j DROP && sudo iptables -D INPUT -s 127.0.0.1 -j ACCEPT' >/dev/null 2>&1 &")
	})

	It("each node by dropping all outbound packets for a while and ensure they function afterwards", func() {
		// tell the firewall to drop all outbound packets for a while
		// We sleep 10 seconds to give some time for ssh command to cleanly finish before starting dropping outbound packets.
		// We still accept packages send to localhost to prevent monit from restarting kubelet.
		testReboot(f.Client, "nohup sh -c 'sleep 10 &&  sudo iptables -I OUTPUT 1 -s 127.0.0.1 -j ACCEPT && sudo iptables -I OUTPUT 2 -j DROP && "+
			" sleep 120 && sudo iptables -D OUTPUT -j DROP && sudo iptables -D OUTPUT -s 127.0.0.1 -j ACCEPT' >/dev/null 2>&1 &")
	})
})
コード例 #12
0
ファイル: petset.go プロジェクト: abutcher/kubernetes
var _ = framework.KubeDescribe("StatefulSet", func() {
	f := framework.NewDefaultFramework("statefulset")
	var ns string
	var c clientset.Interface

	BeforeEach(func() {
		c = f.ClientSet
		ns = f.Namespace.Name
	})

	framework.KubeDescribe("Basic StatefulSet functionality", func() {
		psName := "ss"
		labels := map[string]string{
			"foo": "bar",
			"baz": "blah",
		}
		headlessSvcName := "test"
		var petMounts, podMounts []v1.VolumeMount
		var ps *apps.StatefulSet

		BeforeEach(func() {
			petMounts = []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
			podMounts = []v1.VolumeMount{{Name: "home", MountPath: "/home"}}
			ps = newStatefulSet(psName, ns, headlessSvcName, 2, petMounts, podMounts, labels)

			By("Creating service " + headlessSvcName + " in namespace " + ns)
			headlessService := createServiceSpec(headlessSvcName, "", true, labels)
			_, err := c.Core().Services(ns).Create(headlessService)
			Expect(err).NotTo(HaveOccurred())
		})

		AfterEach(func() {
			if CurrentGinkgoTestDescription().Failed {
				dumpDebugInfo(c, ns)
			}
			framework.Logf("Deleting all statefulset in ns %v", ns)
			deleteAllStatefulSets(c, ns)
		})

		It("should provide basic identity", func() {
			By("Creating statefulset " + psName + " in namespace " + ns)
			*(ps.Spec.Replicas) = 3
			setInitializedAnnotation(ps, "false")

			_, err := c.Apps().StatefulSets(ns).Create(ps)
			Expect(err).NotTo(HaveOccurred())

			pst := statefulSetTester{c: c}

			By("Saturating stateful set " + ps.Name)
			pst.saturate(ps)

			By("Verifying statefulset mounted data directory is usable")
			framework.ExpectNoError(pst.checkMount(ps, "/data"))

			By("Verifying statefulset provides a stable hostname for each pod")
			framework.ExpectNoError(pst.checkHostname(ps))

			cmd := "echo $(hostname) > /data/hostname; sync;"
			By("Running " + cmd + " in all stateful pods")
			framework.ExpectNoError(pst.execInPets(ps, cmd))

			By("Restarting statefulset " + ps.Name)
			pst.restart(ps)
			pst.saturate(ps)

			By("Verifying statefulset mounted data directory is usable")
			framework.ExpectNoError(pst.checkMount(ps, "/data"))

			cmd = "if [ \"$(cat /data/hostname)\" = \"$(hostname)\" ]; then exit 0; else exit 1; fi"
			By("Running " + cmd + " in all stateful pods")
			framework.ExpectNoError(pst.execInPets(ps, cmd))
		})

		It("should handle healthy stateful pod restarts during scale", func() {
			By("Creating statefulset " + psName + " in namespace " + ns)
			*(ps.Spec.Replicas) = 2
			setInitializedAnnotation(ps, "false")

			_, err := c.Apps().StatefulSets(ns).Create(ps)
			Expect(err).NotTo(HaveOccurred())

			pst := statefulSetTester{c: c}

			pst.waitForRunningAndReady(1, ps)

			By("Marking stateful pod at index 0 as healthy.")
			pst.setHealthy(ps)

			By("Waiting for stateful pod at index 1 to enter running.")
			pst.waitForRunningAndReady(2, ps)

			// Now we have 1 healthy and 1 unhealthy stateful pod. Deleting the healthy stateful pod should *not*
			// create a new stateful pod till the remaining stateful pod becomes healthy, which won't happen till
			// we set the healthy bit.

			By("Deleting healthy stateful pod at index 0.")
			pst.deletePetAtIndex(0, ps)

			By("Confirming stateful pod at index 0 is not recreated.")
			pst.confirmPetCount(1, ps, 10*time.Second)

			By("Deleting unhealthy stateful pod at index 1.")
			pst.deletePetAtIndex(1, ps)

			By("Confirming all stateful pods in statefulset are created.")
			pst.saturate(ps)
		})

		It("should allow template updates", func() {
			By("Creating stateful set " + psName + " in namespace " + ns)
			*(ps.Spec.Replicas) = 2

			ps, err := c.Apps().StatefulSets(ns).Create(ps)
			Expect(err).NotTo(HaveOccurred())

			pst := statefulSetTester{c: c}

			pst.waitForRunningAndReady(*ps.Spec.Replicas, ps)

			newImage := newNginxImage
			oldImage := ps.Spec.Template.Spec.Containers[0].Image
			By(fmt.Sprintf("Updating stateful set template: update image from %s to %s", oldImage, newImage))
			Expect(oldImage).NotTo(Equal(newImage), "Incorrect test setup: should update to a different image")
			_, err = framework.UpdateStatefulSetWithRetries(c, ns, ps.Name, func(update *apps.StatefulSet) {
				update.Spec.Template.Spec.Containers[0].Image = newImage
			})
			Expect(err).NotTo(HaveOccurred())

			updateIndex := 0
			By(fmt.Sprintf("Deleting stateful pod at index %d", updateIndex))
			pst.deletePetAtIndex(updateIndex, ps)

			By("Waiting for all stateful pods to be running again")
			pst.waitForRunningAndReady(*ps.Spec.Replicas, ps)

			By(fmt.Sprintf("Verifying stateful pod at index %d is updated", updateIndex))
			verify := func(pod *v1.Pod) {
				podImage := pod.Spec.Containers[0].Image
				Expect(podImage).To(Equal(newImage), fmt.Sprintf("Expected stateful pod image %s updated to %s", podImage, newImage))
			}
			pst.verifyPodAtIndex(updateIndex, ps, verify)
		})

		It("Scaling down before scale up is finished should wait until current pod will be running and ready before it will be removed", func() {
			By("Creating stateful set " + psName + " in namespace " + ns + ", and pausing scale operations after each pod")
			testProbe := &v1.Probe{Handler: v1.Handler{HTTPGet: &v1.HTTPGetAction{
				Path: "/index.html",
				Port: intstr.IntOrString{IntVal: 80}}}}
			ps := newStatefulSet(psName, ns, headlessSvcName, 1, nil, nil, labels)
			ps.Spec.Template.Spec.Containers[0].ReadinessProbe = testProbe
			setInitializedAnnotation(ps, "false")
			ps, err := c.Apps().StatefulSets(ns).Create(ps)
			Expect(err).NotTo(HaveOccurred())
			pst := &statefulSetTester{c: c}
			pst.waitForRunningAndReady(1, ps)

			By("Scaling up stateful set " + psName + " to 3 replicas and pausing after 2nd pod")
			pst.setHealthy(ps)
			pst.updateReplicas(ps, 3)
			pst.waitForRunningAndReady(2, ps)

			By("Before scale up finished setting 2nd pod to be not ready by breaking readiness probe")
			pst.breakProbe(ps, testProbe)
			pst.waitForRunningAndNotReady(2, ps)

			By("Continue scale operation after the 2nd pod, and scaling down to 1 replica")
			pst.setHealthy(ps)
			pst.updateReplicas(ps, 1)

			By("Verifying that the 2nd pod wont be removed if it is not running and ready")
			pst.confirmPetCount(2, ps, 10*time.Second)
			expectedPodName := ps.Name + "-1"
			expectedPod, err := f.ClientSet.Core().Pods(ns).Get(expectedPodName, metav1.GetOptions{})
			Expect(err).NotTo(HaveOccurred())
			watcher, err := f.ClientSet.Core().Pods(ns).Watch(v1.SingleObject(
				v1.ObjectMeta{
					Name:            expectedPod.Name,
					ResourceVersion: expectedPod.ResourceVersion,
				},
			))
			Expect(err).NotTo(HaveOccurred())

			By("Verifying the 2nd pod is removed only when it becomes running and ready")
			pst.restoreProbe(ps, testProbe)
			_, err = watch.Until(statefulsetTimeout, watcher, func(event watch.Event) (bool, error) {
				pod := event.Object.(*v1.Pod)
				if event.Type == watch.Deleted && pod.Name == expectedPodName {
					return false, fmt.Errorf("Pod %v was deleted before enter running", pod.Name)
				}
				framework.Logf("Observed event %v for pod %v. Phase %v, Pod is ready %v",
					event.Type, pod.Name, pod.Status.Phase, v1.IsPodReady(pod))
				if pod.Name != expectedPodName {
					return false, nil
				}
				if pod.Status.Phase == v1.PodRunning && v1.IsPodReady(pod) {
					return true, nil
				}
				return false, nil
			})
			Expect(err).NotTo(HaveOccurred())
		})

		It("Scaling should happen in predictable order and halt if any stateful pod is unhealthy", func() {
			psLabels := klabels.Set(labels)
			By("Initializing watcher for selector " + psLabels.String())
			watcher, err := f.ClientSet.Core().Pods(ns).Watch(v1.ListOptions{
				LabelSelector: psLabels.AsSelector().String(),
			})
			Expect(err).NotTo(HaveOccurred())

			By("Creating stateful set " + psName + " in namespace " + ns)
			testProbe := &v1.Probe{Handler: v1.Handler{HTTPGet: &v1.HTTPGetAction{
				Path: "/index.html",
				Port: intstr.IntOrString{IntVal: 80}}}}
			ps := newStatefulSet(psName, ns, headlessSvcName, 1, nil, nil, psLabels)
			ps.Spec.Template.Spec.Containers[0].ReadinessProbe = testProbe
			ps, err = c.Apps().StatefulSets(ns).Create(ps)
			Expect(err).NotTo(HaveOccurred())

			By("Waiting until all stateful set " + psName + " replicas will be running in namespace " + ns)
			pst := &statefulSetTester{c: c}
			pst.waitForRunningAndReady(*ps.Spec.Replicas, ps)

			By("Confirming that stateful set scale up will halt with unhealthy stateful pod")
			pst.breakProbe(ps, testProbe)
			pst.waitForRunningAndNotReady(*ps.Spec.Replicas, ps)
			pst.updateReplicas(ps, 3)
			pst.confirmPetCount(1, ps, 10*time.Second)

			By("Scaling up stateful set " + psName + " to 3 replicas and waiting until all of them will be running in namespace " + ns)
			pst.restoreProbe(ps, testProbe)
			pst.waitForRunningAndReady(3, ps)

			By("Verifying that stateful set " + psName + " was scaled up in order")
			expectedOrder := []string{psName + "-0", psName + "-1", psName + "-2"}
			_, err = watch.Until(statefulsetTimeout, watcher, func(event watch.Event) (bool, error) {
				if event.Type != watch.Added {
					return false, nil
				}
				pod := event.Object.(*v1.Pod)
				if pod.Name == expectedOrder[0] {
					expectedOrder = expectedOrder[1:]
				}
				return len(expectedOrder) == 0, nil

			})
			Expect(err).NotTo(HaveOccurred())

			By("Scale down will halt with unhealthy stateful pod")
			watcher, err = f.ClientSet.Core().Pods(ns).Watch(v1.ListOptions{
				LabelSelector: psLabels.AsSelector().String(),
			})
			Expect(err).NotTo(HaveOccurred())

			pst.breakProbe(ps, testProbe)
			pst.waitForRunningAndNotReady(3, ps)
			pst.updateReplicas(ps, 0)
			pst.confirmPetCount(3, ps, 10*time.Second)

			By("Scaling down stateful set " + psName + " to 0 replicas and waiting until none of pods will run in namespace" + ns)
			pst.restoreProbe(ps, testProbe)
			pst.scale(ps, 0)

			By("Verifying that stateful set " + psName + " was scaled down in reverse order")
			expectedOrder = []string{psName + "-2", psName + "-1", psName + "-0"}
			_, err = watch.Until(statefulsetTimeout, watcher, func(event watch.Event) (bool, error) {
				if event.Type != watch.Deleted {
					return false, nil
				}
				pod := event.Object.(*v1.Pod)
				if pod.Name == expectedOrder[0] {
					expectedOrder = expectedOrder[1:]
				}
				return len(expectedOrder) == 0, nil

			})
			Expect(err).NotTo(HaveOccurred())
		})
	})

	framework.KubeDescribe("Deploy clustered applications [Feature:StatefulSet] [Slow]", func() {
		var pst *statefulSetTester
		var appTester *clusterAppTester

		BeforeEach(func() {
			pst = &statefulSetTester{c: c}
			appTester = &clusterAppTester{tester: pst, ns: ns}
		})

		AfterEach(func() {
			if CurrentGinkgoTestDescription().Failed {
				dumpDebugInfo(c, ns)
			}
			framework.Logf("Deleting all statefulset in ns %v", ns)
			deleteAllStatefulSets(c, ns)
		})

		It("should creating a working zookeeper cluster", func() {
			appTester.pet = &zookeeperTester{tester: pst}
			appTester.run()
		})

		It("should creating a working redis cluster", func() {
			appTester.pet = &redisTester{tester: pst}
			appTester.run()
		})

		It("should creating a working mysql cluster", func() {
			appTester.pet = &mysqlGaleraTester{tester: pst}
			appTester.run()
		})

		It("should creating a working CockroachDB cluster", func() {
			appTester.pet = &cockroachDBTester{tester: pst}
			appTester.run()
		})
	})
})
コード例 #13
0
	}

	// We should have exceeded the finalTransactionsExpected num of transactions.
	// If this fails, but there are transactions being created, we may need to recalibrate
	// the finalTransactionsExpected value - or else - your cluster is broken/slow !
	Ω(totalTransactions).Should(BeNumerically(">", finalTransactionsExpected))
}

var _ = framework.KubeDescribe("Pet Store [Feature:Example]", func() {

	BeforeEach(func() {
		// The shell scripts in k8petstore break on jenkins... Pure golang rewrite is in progress.
		framework.SkipUnlessProviderIs("local")
	})

	// The number of nodes dictates total number of generators/transaction expectations.
	var nodeCount int
	f := framework.NewDefaultFramework("petstore")

	It(fmt.Sprintf("should scale to persist a nominal number ( %v ) of transactions in %v seconds", k8bpsSmokeTestFinalTransactions, k8bpsSmokeTestTimeout), func() {
		nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
		nodeCount = len(nodes.Items)

		loadGenerators := nodeCount
		restServers := nodeCount
		fmt.Printf("load generators / rest servers [ %v  /  %v ] ", loadGenerators, restServers)
		runK8petstore(restServers, loadGenerators, f.ClientSet, f.Namespace.Name, k8bpsSmokeTestFinalTransactions, k8bpsSmokeTestTimeout)
	})

})
コード例 #14
0
ファイル: downward_api.go プロジェクト: CodeJuan/kubernetes
var _ = framework.KubeDescribe("Downward API", func() {
	f := framework.NewDefaultFramework("downward-api")

	It("should provide pod name and namespace as env vars [Conformance]", func() {
		podName := "downward-api-" + string(uuid.NewUUID())
		env := []api.EnvVar{
			{
				Name: "POD_NAME",
				ValueFrom: &api.EnvVarSource{
					FieldRef: &api.ObjectFieldSelector{
						APIVersion: "v1",
						FieldPath:  "metadata.name",
					},
				},
			},
			{
				Name: "POD_NAMESPACE",
				ValueFrom: &api.EnvVarSource{
					FieldRef: &api.ObjectFieldSelector{
						APIVersion: "v1",
						FieldPath:  "metadata.namespace",
					},
				},
			},
		}

		expectations := []string{
			fmt.Sprintf("POD_NAME=%v", podName),
			fmt.Sprintf("POD_NAMESPACE=%v", f.Namespace.Name),
		}

		testDownwardAPI(f, podName, env, expectations)
	})

	It("should provide pod IP as an env var", func() {
		podName := "downward-api-" + string(uuid.NewUUID())
		env := []api.EnvVar{
			{
				Name: "POD_IP",
				ValueFrom: &api.EnvVarSource{
					FieldRef: &api.ObjectFieldSelector{
						APIVersion: "v1",
						FieldPath:  "status.podIP",
					},
				},
			},
		}

		expectations := []string{
			"POD_IP=(?:\\d+)\\.(?:\\d+)\\.(?:\\d+)\\.(?:\\d+)",
		}

		testDownwardAPI(f, podName, env, expectations)
	})

	It("should provide container's limits.cpu/memory and requests.cpu/memory as env vars", func() {
		podName := "downward-api-" + string(uuid.NewUUID())
		env := []api.EnvVar{
			{
				Name: "CPU_LIMIT",
				ValueFrom: &api.EnvVarSource{
					ResourceFieldRef: &api.ResourceFieldSelector{
						Resource: "limits.cpu",
					},
				},
			},
			{
				Name: "MEMORY_LIMIT",
				ValueFrom: &api.EnvVarSource{
					ResourceFieldRef: &api.ResourceFieldSelector{
						Resource: "limits.memory",
					},
				},
			},
			{
				Name: "CPU_REQUEST",
				ValueFrom: &api.EnvVarSource{
					ResourceFieldRef: &api.ResourceFieldSelector{
						Resource: "requests.cpu",
					},
				},
			},
			{
				Name: "MEMORY_REQUEST",
				ValueFrom: &api.EnvVarSource{
					ResourceFieldRef: &api.ResourceFieldSelector{
						Resource: "requests.memory",
					},
				},
			},
		}
		expectations := []string{
			fmt.Sprintf("CPU_LIMIT=2"),
			fmt.Sprintf("MEMORY_LIMIT=67108864"),
			fmt.Sprintf("CPU_REQUEST=1"),
			fmt.Sprintf("MEMORY_REQUEST=33554432"),
		}

		testDownwardAPI(f, podName, env, expectations)
	})

	It("should provide default limits.cpu/memory from node capacity", func() {
		podName := "downward-api-" + string(uuid.NewUUID())
		env := []api.EnvVar{
			{
				Name: "CPU_LIMIT",
				ValueFrom: &api.EnvVarSource{
					ResourceFieldRef: &api.ResourceFieldSelector{
						Resource: "limits.cpu",
					},
				},
			},
			{
				Name: "MEMORY_LIMIT",
				ValueFrom: &api.EnvVarSource{
					ResourceFieldRef: &api.ResourceFieldSelector{
						Resource: "limits.memory",
					},
				},
			},
		}
		expectations := []string{
			fmt.Sprintf("CPU_LIMIT=[1-9]"),
			fmt.Sprintf("MEMORY_LIMIT=[1-9]"),
		}
		pod := &api.Pod{
			ObjectMeta: api.ObjectMeta{
				Name:   podName,
				Labels: map[string]string{"name": podName},
			},
			Spec: api.PodSpec{
				Containers: []api.Container{
					{
						Name:    "dapi-container",
						Image:   "gcr.io/google_containers/busybox:1.24",
						Command: []string{"sh", "-c", "env"},
						Env:     env,
					},
				},
				RestartPolicy: api.RestartPolicyNever,
			},
		}

		testDownwardAPIUsingPod(f, pod, env, expectations)
	})
})
コード例 #15
0
ファイル: etcd_failure.go プロジェクト: alex-mohr/kubernetes
var _ = framework.KubeDescribe("Etcd failure [Disruptive]", func() {

	f := framework.NewDefaultFramework("etcd-failure")

	BeforeEach(func() {
		// This test requires:
		// - SSH
		// - master access
		// ... so the provider check should be identical to the intersection of
		// providers that provide those capabilities.
		framework.SkipUnlessProviderIs("gce")

		Expect(framework.RunRC(testutils.RCConfig{
			Client:    f.ClientSet,
			Name:      "baz",
			Namespace: f.Namespace.Name,
			Image:     framework.GetPauseImageName(f.ClientSet),
			Replicas:  1,
		})).NotTo(HaveOccurred())
	})

	It("should recover from network partition with master", func() {
		etcdFailTest(
			f,
			"sudo iptables -A INPUT -p tcp --destination-port 2379 -j DROP",
			"sudo iptables -D INPUT -p tcp --destination-port 2379 -j DROP",
		)
	})

	It("should recover from SIGKILL", func() {
		etcdFailTest(
			f,
			"pgrep etcd | xargs -I {} sudo kill -9 {}",
			"echo 'do nothing. monit should restart etcd.'",
		)
	})
})
コード例 #16
0
ファイル: simple_mount.go プロジェクト: kubernetes/kubernetes
var _ = framework.KubeDescribe("SimpleMount", func() {
	f := framework.NewDefaultFramework("simple-mount-test")

	// This is a very simple test that exercises the Kubelet's mounter code path.
	// If the mount fails, the pod will not be able to run, and CreateSync will timeout.
	It("should be able to mount an emptydir on a container", func() {
		pod := &v1.Pod{
			TypeMeta: metav1.TypeMeta{
				Kind:       "Pod",
				APIVersion: "v1",
			},
			ObjectMeta: metav1.ObjectMeta{
				Name: "simple-mount-pod",
			},
			Spec: v1.PodSpec{
				Containers: []v1.Container{
					{
						Name:  "simple-mount-container",
						Image: framework.GetPauseImageNameForHostArch(),
						VolumeMounts: []v1.VolumeMount{
							{
								Name:      "simply-mounted-volume",
								MountPath: "/opt/",
							},
						},
					},
				},
				Volumes: []v1.Volume{
					{
						Name: "simply-mounted-volume",
						VolumeSource: v1.VolumeSource{
							EmptyDir: &v1.EmptyDirVolumeSource{
								Medium: "Memory",
							},
						},
					},
				},
			},
		}
		podClient := f.PodClient()
		pod = podClient.CreateSync(pod)

	})
})
コード例 #17
0
var _ = framework.KubeDescribe("Cluster level logging using GCL", func() {
	f := framework.NewDefaultFramework("gcl-logging")

	BeforeEach(func() {
		// TODO (crassirostris): Expand to GKE once the test is stable
		framework.SkipUnlessProviderIs("gce")
	})

	It("should check that logs from containers are ingested in GCL", func() {
		By("Running synthetic logger")
		createSynthLogger(f, expectedLinesCount)
		defer f.PodClient().Delete(synthLoggerPodName, &api.DeleteOptions{})
		err := framework.WaitForPodSuccessInNamespace(f.ClientSet, synthLoggerPodName, f.Namespace.Name)
		framework.ExpectNoError(err, fmt.Sprintf("Should've successfully waited for pod %s to succeed", synthLoggerPodName))

		By("Waiting for logs to ingest")
		totalMissing := expectedLinesCount
		for start := time.Now(); time.Since(start) < ingestionTimeout; time.Sleep(ingestionRetryDelay) {
			var err error
			totalMissing, err = getMissingLinesCountGcl(f, synthLoggerPodName, expectedLinesCount)
			if err != nil {
				framework.Logf("Failed to get missing lines count due to %v", err)
				totalMissing = expectedLinesCount
			} else if totalMissing > 0 {
				framework.Logf("Still missing %d lines", totalMissing)
			}

			if totalMissing == 0 {
				break
			}
		}

		if totalMissing > 0 {
			if err := reportLogsFromFluentdPod(f); err != nil {
				framework.Logf("Failed to report logs from fluentd pod due to %v", err)
			}
		}

		Expect(totalMissing).To(Equal(0), "Some log lines are still missing")
	})
})
コード例 #18
0
var _ = framework.KubeDescribe("MetricsGrabber", func() {
	f := framework.NewDefaultFramework("metrics-grabber")
	var c clientset.Interface
	var grabber *metrics.MetricsGrabber
	BeforeEach(func() {
		var err error
		c = f.ClientSet
		framework.ExpectNoError(err)
		grabber, err = metrics.NewMetricsGrabber(c, true, true, true, true)
		framework.ExpectNoError(err)
	})

	It("should grab all metrics from API server.", func() {
		By("Connecting to /metrics endpoint")
		response, err := grabber.GrabFromApiServer()
		framework.ExpectNoError(err)
		Expect(response).NotTo(BeEmpty())
	})

	It("should grab all metrics from a Kubelet.", func() {
		By("Proxying to Node through the API server")
		nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
		Expect(nodes.Items).NotTo(BeEmpty())
		response, err := grabber.GrabFromKubelet(nodes.Items[0].Name)
		framework.ExpectNoError(err)
		Expect(response).NotTo(BeEmpty())
	})

	It("should grab all metrics from a Scheduler.", func() {
		By("Proxying to Pod through the API server")
		// Check if master Node is registered
		nodes, err := c.Core().Nodes().List(v1.ListOptions{})
		framework.ExpectNoError(err)

		var masterRegistered = false
		for _, node := range nodes.Items {
			if strings.HasSuffix(node.Name, "master") {
				masterRegistered = true
			}
		}
		if !masterRegistered {
			framework.Logf("Master is node api.Registry. Skipping testing Scheduler metrics.")
			return
		}
		response, err := grabber.GrabFromScheduler()
		framework.ExpectNoError(err)
		Expect(response).NotTo(BeEmpty())
	})

	It("should grab all metrics from a ControllerManager.", func() {
		By("Proxying to Pod through the API server")
		// Check if master Node is registered
		nodes, err := c.Core().Nodes().List(v1.ListOptions{})
		framework.ExpectNoError(err)

		var masterRegistered = false
		for _, node := range nodes.Items {
			if strings.HasSuffix(node.Name, "master") {
				masterRegistered = true
			}
		}
		if !masterRegistered {
			framework.Logf("Master is node api.Registry. Skipping testing ControllerManager metrics.")
			return
		}
		response, err := grabber.GrabFromControllerManager()
		framework.ExpectNoError(err)
		Expect(response).NotTo(BeEmpty())
	})
})
コード例 #19
0
var _ = framework.KubeDescribe("Networking", func() {
	var svcname = "nettest"
	f := framework.NewDefaultFramework(svcname)

	BeforeEach(func() {
		// Assert basic external connectivity.
		// Since this is not really a test of kubernetes in any way, we
		// leave it as a pre-test assertion, rather than a Ginko test.
		By("Executing a successful http request from the external internet")
		resp, err := http.Get("http://google.com")
		if err != nil {
			framework.Failf("Unable to connect/talk to the internet: %v", err)
		}
		if resp.StatusCode != http.StatusOK {
			framework.Failf("Unexpected error code, expected 200, got, %v (%v)", resp.StatusCode, resp)
		}
	})

	It("should provide Internet connection for containers [Conformance]", func() {
		By("Running container which tries to wget google.com")
		framework.ExpectNoError(framework.CheckConnectivityToHost(f, "", "wget-test", "google.com", 30))
	})

	// First test because it has no dependencies on variables created later on.
	It("should provide unchanging, static URL paths for kubernetes api services [Conformance]", func() {
		tests := []struct {
			path string
		}{
			{path: "/healthz"},
			{path: "/api"},
			{path: "/apis"},
			{path: "/logs"},
			{path: "/metrics"},
			{path: "/swaggerapi"},
			{path: "/version"},
			// TODO: test proxy links here
		}
		for _, test := range tests {
			By(fmt.Sprintf("testing: %s", test.path))
			data, err := f.Client.RESTClient.Get().
				AbsPath(test.path).
				DoRaw()
			if err != nil {
				framework.Failf("Failed: %v\nBody: %s", err, string(data))
			}
		}
	})

	It("should check kube-proxy urls", func() {
		// TODO: this is overkill we just need the host networking pod
		// to hit kube-proxy urls.
		config := framework.NewNetworkingTestConfig(f)

		By("checking kube-proxy URLs")
		config.GetSelfURL("/healthz", "ok")
		config.GetSelfURL("/proxyMode", "iptables") // the default
	})

	// TODO: Remove [Slow] when this has had enough bake time to prove presubmit worthiness.
	framework.KubeDescribe("Granular Checks: Services [Slow]", func() {

		It("should function for pod-Service: http", func() {
			config := framework.NewNetworkingTestConfig(f)
			By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterHttpPort))
			config.DialFromTestContainer("http", config.ClusterIP, framework.ClusterHttpPort, config.MaxTries, 0, config.EndpointHostnames())

			By(fmt.Sprintf("dialing(http) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.ExternalAddrs[0], config.NodeHttpPort))
			config.DialFromTestContainer("http", config.NodeIP, config.NodeHttpPort, config.MaxTries, 0, config.EndpointHostnames())
		})

		It("should function for pod-Service: udp", func() {
			config := framework.NewNetworkingTestConfig(f)
			By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterUdpPort))
			config.DialFromTestContainer("udp", config.ClusterIP, framework.ClusterUdpPort, config.MaxTries, 0, config.EndpointHostnames())

			By(fmt.Sprintf("dialing(udp) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.ExternalAddrs[0], config.NodeUdpPort))
			config.DialFromTestContainer("udp", config.NodeIP, config.NodeUdpPort, config.MaxTries, 0, config.EndpointHostnames())
		})

		It("should function for node-Service: http", func() {
			config := framework.NewNetworkingTestConfig(f)
			By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, framework.ClusterHttpPort))
			config.DialFromNode("http", config.ClusterIP, framework.ClusterHttpPort, config.MaxTries, 0, config.EndpointHostnames())

			By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHttpPort))
			config.DialFromNode("http", config.NodeIP, config.NodeHttpPort, config.MaxTries, 0, config.EndpointHostnames())
		})

		It("should function for node-Service: udp", func() {
			config := framework.NewNetworkingTestConfig(f)
			By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, framework.ClusterUdpPort))
			config.DialFromNode("udp", config.ClusterIP, framework.ClusterUdpPort, config.MaxTries, 0, config.EndpointHostnames())

			By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUdpPort))
			config.DialFromNode("udp", config.NodeIP, config.NodeUdpPort, config.MaxTries, 0, config.EndpointHostnames())
		})

		It("should function for endpoint-Service: http", func() {
			config := framework.NewNetworkingTestConfig(f)
			By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, framework.ClusterHttpPort))
			config.DialFromEndpointContainer("http", config.ClusterIP, framework.ClusterHttpPort, config.MaxTries, 0, config.EndpointHostnames())

			By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, config.NodeHttpPort))
			config.DialFromEndpointContainer("http", config.NodeIP, config.NodeHttpPort, config.MaxTries, 0, config.EndpointHostnames())
		})

		It("should function for endpoint-Service: udp", func() {
			config := framework.NewNetworkingTestConfig(f)
			By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, framework.ClusterUdpPort))
			config.DialFromEndpointContainer("udp", config.ClusterIP, framework.ClusterUdpPort, config.MaxTries, 0, config.EndpointHostnames())

			By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, config.NodeUdpPort))
			config.DialFromEndpointContainer("udp", config.NodeIP, config.NodeUdpPort, config.MaxTries, 0, config.EndpointHostnames())
		})

		It("should update endpoints: http", func() {
			config := framework.NewNetworkingTestConfig(f)
			By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterHttpPort))
			config.DialFromTestContainer("http", config.ClusterIP, framework.ClusterHttpPort, config.MaxTries, 0, config.EndpointHostnames())

			config.DeleteNetProxyPod()

			By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterHttpPort))
			config.DialFromTestContainer("http", config.ClusterIP, framework.ClusterHttpPort, config.MaxTries, config.MaxTries, config.EndpointHostnames())
		})

		It("should update endpoints: udp", func() {
			config := framework.NewNetworkingTestConfig(f)
			By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterUdpPort))
			config.DialFromTestContainer("udp", config.ClusterIP, framework.ClusterUdpPort, config.MaxTries, 0, config.EndpointHostnames())

			config.DeleteNetProxyPod()

			By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterUdpPort))
			config.DialFromTestContainer("udp", config.ClusterIP, framework.ClusterUdpPort, config.MaxTries, config.MaxTries, config.EndpointHostnames())
		})

		// Slow because we confirm that the nodePort doesn't serve traffic, which requires a period of polling.
		It("should update nodePort: http [Slow]", func() {
			config := framework.NewNetworkingTestConfig(f)
			By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHttpPort))
			config.DialFromNode("http", config.NodeIP, config.NodeHttpPort, config.MaxTries, 0, config.EndpointHostnames())

			config.DeleteNodePortService()

			By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHttpPort))
			config.DialFromNode("http", config.NodeIP, config.NodeHttpPort, config.MaxTries, config.MaxTries, sets.NewString())
		})

		// Slow because we confirm that the nodePort doesn't serve traffic, which requires a period of polling.
		It("should update nodePort: udp [Slow]", func() {
			config := framework.NewNetworkingTestConfig(f)
			By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUdpPort))
			config.DialFromNode("udp", config.NodeIP, config.NodeUdpPort, config.MaxTries, 0, config.EndpointHostnames())

			config.DeleteNodePortService()

			By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUdpPort))
			config.DialFromNode("udp", config.NodeIP, config.NodeUdpPort, config.MaxTries, config.MaxTries, sets.NewString())
		})
		// TODO: Test sessionAffinity #31712
	})
})
コード例 #20
0
ファイル: dns.go プロジェクト: paralin/kubernetes
var _ = framework.KubeDescribe("DNS", func() {
	f := framework.NewDefaultFramework("dns")

	It("should provide DNS for the cluster [Conformance]", func() {
		// All the names we need to be able to resolve.
		// TODO: Spin up a separate test service and test that dns works for that service.
		namesToResolve := []string{
			"kubernetes.default",
			"kubernetes.default.svc",
			"kubernetes.default.svc.cluster.local",
			"google.com",
		}
		// Added due to #8512. This is critical for GCE and GKE deployments.
		if framework.ProviderIs("gce", "gke") {
			namesToResolve = append(namesToResolve, "metadata")
		}
		hostFQDN := fmt.Sprintf("%s.%s.%s.svc.cluster.local", dnsTestPodHostName, dnsTestServiceName, f.Namespace.Name)
		hostEntries := []string{hostFQDN, dnsTestPodHostName}
		wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, hostEntries, "", "wheezy", f.Namespace.Name)
		jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, hostEntries, "", "jessie", f.Namespace.Name)
		By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
		By("Running these commands on jessie: " + jessieProbeCmd + "\n")

		// Run a pod which probes DNS and exposes the results by HTTP.
		By("creating a pod to probe DNS")
		pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, true)
		validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...))
	})

	It("should provide DNS for services [Conformance]", func() {
		// Create a test headless service.
		By("Creating a test headless service")
		testServiceSelector := map[string]string{
			"dns-test": "true",
		}
		headlessService := createServiceSpec(dnsTestServiceName, "", true, testServiceSelector)
		_, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(headlessService)
		Expect(err).NotTo(HaveOccurred())
		defer func() {
			By("deleting the test headless service")
			defer GinkgoRecover()
			f.ClientSet.Core().Services(f.Namespace.Name).Delete(headlessService.Name, nil)
		}()

		regularService := createServiceSpec("test-service-2", "", false, testServiceSelector)
		regularService, err = f.ClientSet.Core().Services(f.Namespace.Name).Create(regularService)
		Expect(err).NotTo(HaveOccurred())
		defer func() {
			By("deleting the test service")
			defer GinkgoRecover()
			f.ClientSet.Core().Services(f.Namespace.Name).Delete(regularService.Name, nil)
		}()

		// All the names we need to be able to resolve.
		// TODO: Create more endpoints and ensure that multiple A records are returned
		// for headless service.
		namesToResolve := []string{
			fmt.Sprintf("%s", headlessService.Name),
			fmt.Sprintf("%s.%s", headlessService.Name, f.Namespace.Name),
			fmt.Sprintf("%s.%s.svc", headlessService.Name, f.Namespace.Name),
			fmt.Sprintf("_http._tcp.%s.%s.svc", headlessService.Name, f.Namespace.Name),
			fmt.Sprintf("_http._tcp.%s.%s.svc", regularService.Name, f.Namespace.Name),
		}

		wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, nil, regularService.Spec.ClusterIP, "wheezy", f.Namespace.Name)
		jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, nil, regularService.Spec.ClusterIP, "jessie", f.Namespace.Name)
		By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
		By("Running these commands on jessie: " + jessieProbeCmd + "\n")

		// Run a pod which probes DNS and exposes the results by HTTP.
		By("creating a pod to probe DNS")
		pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, false)
		pod.ObjectMeta.Labels = testServiceSelector

		validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...))
	})

	It("should provide DNS for pods for Hostname and Subdomain Annotation", func() {
		// Create a test headless service.
		By("Creating a test headless service")
		testServiceSelector := map[string]string{
			"dns-test-hostname-attribute": "true",
		}
		serviceName := "dns-test-service-2"
		podHostname := "dns-querier-2"
		headlessService := createServiceSpec(serviceName, "", true, testServiceSelector)
		_, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(headlessService)
		Expect(err).NotTo(HaveOccurred())
		defer func() {
			By("deleting the test headless service")
			defer GinkgoRecover()
			f.ClientSet.Core().Services(f.Namespace.Name).Delete(headlessService.Name, nil)
		}()

		hostFQDN := fmt.Sprintf("%s.%s.%s.svc.cluster.local", podHostname, serviceName, f.Namespace.Name)
		hostNames := []string{hostFQDN, podHostname}
		namesToResolve := []string{hostFQDN}
		wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, hostNames, "", "wheezy", f.Namespace.Name)
		jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, hostNames, "", "jessie", f.Namespace.Name)
		By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
		By("Running these commands on jessie: " + jessieProbeCmd + "\n")

		// Run a pod which probes DNS and exposes the results by HTTP.
		By("creating a pod to probe DNS")
		pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, true)
		pod1.ObjectMeta.Labels = testServiceSelector
		pod1.ObjectMeta.Annotations = map[string]string{
			pod.PodHostnameAnnotation:  podHostname,
			pod.PodSubdomainAnnotation: serviceName,
		}

		validateDNSResults(f, pod1, append(wheezyFileNames, jessieFileNames...))
	})

	It("should provide DNS for ExternalName services", func() {
		// Create a test ExternalName service.
		By("Creating a test externalName service")
		serviceName := "dns-test-service-3"
		externalNameService := createServiceSpec(serviceName, "foo.example.com", false, nil)
		_, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(externalNameService)
		Expect(err).NotTo(HaveOccurred())
		defer func() {
			By("deleting the test externalName service")
			defer GinkgoRecover()
			f.ClientSet.Core().Services(f.Namespace.Name).Delete(externalNameService.Name, nil)
		}()

		hostFQDN := fmt.Sprintf("%s.%s.svc.cluster.local", serviceName, f.Namespace.Name)
		wheezyProbeCmd, wheezyFileName := createTargetedProbeCommand(hostFQDN, "CNAME", "wheezy")
		jessieProbeCmd, jessieFileName := createTargetedProbeCommand(hostFQDN, "CNAME", "jessie")
		By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
		By("Running these commands on jessie: " + jessieProbeCmd + "\n")

		// Run a pod which probes DNS and exposes the results by HTTP.
		By("creating a pod to probe DNS")
		pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, true)

		validateTargetedProbeOutput(f, pod1, []string{wheezyFileName, jessieFileName}, "foo.example.com.")

		// Test changing the externalName field
		By("changing the externalName to bar.example.com")
		_, err = updateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) {
			s.Spec.ExternalName = "bar.example.com"
		})
		Expect(err).NotTo(HaveOccurred())
		wheezyProbeCmd, wheezyFileName = createTargetedProbeCommand(hostFQDN, "CNAME", "wheezy")
		jessieProbeCmd, jessieFileName = createTargetedProbeCommand(hostFQDN, "CNAME", "jessie")
		By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
		By("Running these commands on jessie: " + jessieProbeCmd + "\n")

		// Run a pod which probes DNS and exposes the results by HTTP.
		By("creating a second pod to probe DNS")
		pod2 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, true)

		validateTargetedProbeOutput(f, pod2, []string{wheezyFileName, jessieFileName}, "bar.example.com.")

		// Test changing type from ExternalName to ClusterIP
		By("changing the service to type=ClusterIP")
		_, err = updateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) {
			s.Spec.Type = v1.ServiceTypeClusterIP
			s.Spec.ClusterIP = "127.1.2.3"
			s.Spec.Ports = []v1.ServicePort{
				{Port: 80, Name: "http", Protocol: "TCP"},
			}
		})
		Expect(err).NotTo(HaveOccurred())
		wheezyProbeCmd, wheezyFileName = createTargetedProbeCommand(hostFQDN, "A", "wheezy")
		jessieProbeCmd, jessieFileName = createTargetedProbeCommand(hostFQDN, "A", "jessie")
		By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
		By("Running these commands on jessie: " + jessieProbeCmd + "\n")

		// Run a pod which probes DNS and exposes the results by HTTP.
		By("creating a third pod to probe DNS")
		pod3 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, true)

		validateTargetedProbeOutput(f, pod3, []string{wheezyFileName, jessieFileName}, "127.1.2.3")
	})
})
コード例 #21
0
var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
	var c *client.Client
	var cs clientset.Interface
	var nodeList *api.NodeList
	var systemPodsNo int
	var totalPodCapacity int64
	var RCName string
	var ns string
	f := framework.NewDefaultFramework("sched-pred")
	ignoreLabels := framework.ImagePullerLabels

	AfterEach(func() {
		rc, err := c.ReplicationControllers(ns).Get(RCName)
		if err == nil && rc.Spec.Replicas != 0 {
			By("Cleaning up the replication controller")
			err := framework.DeleteRCAndPods(c, f.ClientSet, ns, RCName)
			framework.ExpectNoError(err)
		}
	})

	BeforeEach(func() {
		c = f.Client
		cs = f.ClientSet
		ns = f.Namespace.Name
		nodeList = &api.NodeList{}

		framework.WaitForAllNodesHealthy(c, time.Minute)
		masterNodes, nodeList = framework.GetMasterAndWorkerNodesOrDie(c)

		err := framework.CheckTestingNSDeletedExcept(c, ns)
		framework.ExpectNoError(err)

		// Every test case in this suite assumes that cluster add-on pods stay stable and
		// cannot be run in parallel with any other test that touches Nodes or Pods.
		// It is so because we need to have precise control on what's running in the cluster.
		systemPods, err := framework.GetPodsInNamespace(c, ns, ignoreLabels)
		Expect(err).NotTo(HaveOccurred())
		systemPodsNo = 0
		for _, pod := range systemPods {
			if !masterNodes.Has(pod.Spec.NodeName) && pod.DeletionTimestamp == nil {
				systemPodsNo++
			}
		}

		err = framework.WaitForPodsRunningReady(c, api.NamespaceSystem, int32(systemPodsNo), framework.PodReadyBeforeTimeout, ignoreLabels)
		Expect(err).NotTo(HaveOccurred())

		for _, node := range nodeList.Items {
			framework.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name)
			framework.PrintAllKubeletPods(c, node.Name)
		}

	})

	// This test verifies that max-pods flag works as advertised. It assumes that cluster add-on pods stay stable
	// and cannot be run in parallel with any other test that touches Nodes or Pods. It is so because to check
	// if max-pods is working we need to fully saturate the cluster and keep it in this state for few seconds.
	//
	// Slow PR #13315 (8 min)
	It("validates MaxPods limit number of pods that are allowed to run [Slow]", func() {
		totalPodCapacity = 0

		for _, node := range nodeList.Items {
			framework.Logf("Node: %v", node)
			podCapacity, found := node.Status.Capacity["pods"]
			Expect(found).To(Equal(true))
			totalPodCapacity += podCapacity.Value()
		}

		currentlyScheduledPods := framework.WaitForStableCluster(c, masterNodes)
		podsNeededForSaturation := int(totalPodCapacity) - currentlyScheduledPods

		By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster max pods and trying to start another one", podsNeededForSaturation))

		// As the pods are distributed randomly among nodes,
		// it can easily happen that all nodes are satured
		// and there is no need to create additional pods.
		// StartPods requires at least one pod to replicate.
		if podsNeededForSaturation > 0 {
			framework.ExpectNoError(testutils.StartPods(c, podsNeededForSaturation, ns, "maxp",
				*initPausePod(f, pausePodConfig{
					Name:   "",
					Labels: map[string]string{"name": ""},
				}), true, framework.Logf))
		}
		podName := "additional-pod"
		createPausePod(f, pausePodConfig{
			Name:   podName,
			Labels: map[string]string{"name": "additional"},
		})
		waitForScheduler()
		verifyResult(c, podsNeededForSaturation, 1, ns)
	})

	// This test verifies we don't allow scheduling of pods in a way that sum of limits of pods is greater than machines capacity.
	// It assumes that cluster add-on pods stay stable and cannot be run in parallel with any other test that touches Nodes or Pods.
	// It is so because we need to have precise control on what's running in the cluster.
	It("validates resource limits of pods that are allowed to run [Conformance]", func() {
		nodeMaxCapacity := int64(0)

		nodeToCapacityMap := make(map[string]int64)
		for _, node := range nodeList.Items {
			capacity, found := node.Status.Capacity["cpu"]
			Expect(found).To(Equal(true))
			nodeToCapacityMap[node.Name] = capacity.MilliValue()
			if nodeMaxCapacity < capacity.MilliValue() {
				nodeMaxCapacity = capacity.MilliValue()
			}
		}
		framework.WaitForStableCluster(c, masterNodes)

		pods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{})
		framework.ExpectNoError(err)
		for _, pod := range pods.Items {
			_, found := nodeToCapacityMap[pod.Spec.NodeName]
			if found && pod.Status.Phase != api.PodSucceeded && pod.Status.Phase != api.PodFailed {
				framework.Logf("Pod %v requesting resource cpu=%vm on Node %v", pod.Name, getRequestedCPU(pod), pod.Spec.NodeName)
				nodeToCapacityMap[pod.Spec.NodeName] -= getRequestedCPU(pod)
			}
		}

		var podsNeededForSaturation int

		milliCpuPerPod := nodeMaxCapacity / maxNumberOfPods
		if milliCpuPerPod < minPodCPURequest {
			milliCpuPerPod = minPodCPURequest
		}
		framework.Logf("Using pod capacity: %vm", milliCpuPerPod)
		for name, leftCapacity := range nodeToCapacityMap {
			framework.Logf("Node: %v has cpu capacity: %vm", name, leftCapacity)
			podsNeededForSaturation += (int)(leftCapacity / milliCpuPerPod)
		}

		By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster CPU and trying to start another one", podsNeededForSaturation))

		// As the pods are distributed randomly among nodes,
		// it can easily happen that all nodes are saturated
		// and there is no need to create additional pods.
		// StartPods requires at least one pod to replicate.
		if podsNeededForSaturation > 0 {
			framework.ExpectNoError(testutils.StartPods(c, podsNeededForSaturation, ns, "overcommit",
				*initPausePod(f, pausePodConfig{
					Name:   "",
					Labels: map[string]string{"name": ""},
					Resources: &api.ResourceRequirements{
						Limits: api.ResourceList{
							"cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"),
						},
						Requests: api.ResourceList{
							"cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"),
						},
					},
				}), true, framework.Logf))
		}
		podName := "additional-pod"
		createPausePod(f, pausePodConfig{
			Name:   podName,
			Labels: map[string]string{"name": "additional"},
			Resources: &api.ResourceRequirements{
				Limits: api.ResourceList{
					"cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"),
				},
			},
		})
		waitForScheduler()
		verifyResult(c, podsNeededForSaturation, 1, ns)
	})

	// Test Nodes does not have any label, hence it should be impossible to schedule Pod with
	// nonempty Selector set.
	It("validates that NodeSelector is respected if not matching [Conformance]", func() {
		By("Trying to schedule Pod with nonempty NodeSelector.")
		podName := "restricted-pod"

		framework.WaitForStableCluster(c, masterNodes)

		createPausePod(f, pausePodConfig{
			Name:   podName,
			Labels: map[string]string{"name": "restricted"},
			NodeSelector: map[string]string{
				"label": "nonempty",
			},
		})

		waitForScheduler()
		verifyResult(c, 0, 1, ns)
	})

	It("validates that a pod with an invalid NodeAffinity is rejected", func() {
		By("Trying to launch a pod with an invalid Affinity data.")
		podName := "without-label"
		_, err := c.Pods(ns).Create(initPausePod(f, pausePodConfig{
			Name: podName,
			Affinity: `{
				"nodeAffinity": {
					"requiredDuringSchedulingIgnoredDuringExecution": {
						"nodeSelectorTerms": [{
							"matchExpressions": []
						}]
					},
				}
			}`,
		}))

		if err == nil || !errors.IsInvalid(err) {
			framework.Failf("Expect error of invalid, got : %v", err)
		}

		// Wait a bit to allow scheduler to do its thing if the pod is not rejected.
		waitForScheduler()
	})

	It("validates that NodeSelector is respected if matching [Conformance]", func() {
		nodeName := getNodeThatCanRunPod(f)

		By("Trying to apply a random label on the found node.")
		k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID()))
		v := "42"
		framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
		framework.ExpectNodeHasLabel(cs, nodeName, k, v)
		defer framework.RemoveLabelOffNode(cs, nodeName, k)

		By("Trying to relaunch the pod, now with labels.")
		labelPodName := "with-labels"
		pod := createPausePod(f, pausePodConfig{
			Name: labelPodName,
			NodeSelector: map[string]string{
				"kubernetes.io/hostname": nodeName,
				k: v,
			},
		})

		// check that pod got scheduled. We intentionally DO NOT check that the
		// pod is running because this will create a race condition with the
		// kubelet and the scheduler: the scheduler might have scheduled a pod
		// already when the kubelet does not know about its new label yet. The
		// kubelet will then refuse to launch the pod.
		framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName, pod.ResourceVersion))
		labelPod, err := c.Pods(ns).Get(labelPodName)
		framework.ExpectNoError(err)
		Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
	})

	// Test Nodes does not have any label, hence it should be impossible to schedule Pod with
	// non-nil NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.
	It("validates that NodeAffinity is respected if not matching", func() {
		By("Trying to schedule Pod with nonempty NodeSelector.")
		podName := "restricted-pod"

		framework.WaitForStableCluster(c, masterNodes)

		createPausePod(f, pausePodConfig{
			Name: podName,
			Affinity: `{
				"nodeAffinity": {
					"requiredDuringSchedulingIgnoredDuringExecution": {
						"nodeSelectorTerms": [
							{
								"matchExpressions": [{
									"key": "foo",
									"operator": "In",
									"values": ["bar", "value2"]
								}]
							},
							{
								"matchExpressions": [{
									"key": "diffkey",
									"operator": "In",
									"values": ["wrong", "value2"]
								}]
							}
						]
					}
				}
			}`,
			Labels: map[string]string{"name": "restricted"},
		})
		waitForScheduler()
		verifyResult(c, 0, 1, ns)
	})

	// Keep the same steps with the test on NodeSelector,
	// but specify Affinity in Pod.Annotations, instead of NodeSelector.
	It("validates that required NodeAffinity setting is respected if matching", func() {
		nodeName := getNodeThatCanRunPod(f)

		By("Trying to apply a random label on the found node.")
		k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID()))
		v := "42"
		framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
		framework.ExpectNodeHasLabel(cs, nodeName, k, v)
		defer framework.RemoveLabelOffNode(cs, nodeName, k)

		By("Trying to relaunch the pod, now with labels.")
		labelPodName := "with-labels"
		pod := createPausePod(f, pausePodConfig{
			Name: labelPodName,
			Affinity: `{
				"nodeAffinity": {
					"requiredDuringSchedulingIgnoredDuringExecution": {
						"nodeSelectorTerms": [{
							"matchExpressions": [{
								"key": "kubernetes.io/hostname",
								"operator": "In",
								"values": ["` + nodeName + `"]
							},{
								"key": "` + k + `",
								"operator": "In",
								"values": ["` + v + `"]
							}]
						}]
					}
				}
			}`,
		})

		// check that pod got scheduled. We intentionally DO NOT check that the
		// pod is running because this will create a race condition with the
		// kubelet and the scheduler: the scheduler might have scheduled a pod
		// already when the kubelet does not know about its new label yet. The
		// kubelet will then refuse to launch the pod.
		framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName, pod.ResourceVersion))
		labelPod, err := c.Pods(ns).Get(labelPodName)
		framework.ExpectNoError(err)
		Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
	})

	// Verify that an escaped JSON string of NodeAffinity in a YAML PodSpec works.
	It("validates that embedding the JSON NodeAffinity setting as a string in the annotation value work", func() {
		nodeName := getNodeThatCanRunPod(f)

		By("Trying to apply a label with fake az info on the found node.")
		k := "kubernetes.io/e2e-az-name"
		v := "e2e-az1"
		framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
		framework.ExpectNodeHasLabel(cs, nodeName, k, v)
		defer framework.RemoveLabelOffNode(cs, nodeName, k)

		By("Trying to launch a pod that with NodeAffinity setting as embedded JSON string in the annotation value.")
		pod := createPodWithNodeAffinity(f)

		// check that pod got scheduled. We intentionally DO NOT check that the
		// pod is running because this will create a race condition with the
		// kubelet and the scheduler: the scheduler might have scheduled a pod
		// already when the kubelet does not know about its new label yet. The
		// kubelet will then refuse to launch the pod.
		framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, pod.Name, ""))
		labelPod, err := c.Pods(ns).Get(pod.Name)
		framework.ExpectNoError(err)
		Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
	})

	// labelSelector Operator is DoesNotExist but values are there in requiredDuringSchedulingIgnoredDuringExecution
	// part of podAffinity,so validation fails.
	It("validates that a pod with an invalid podAffinity is rejected because of the LabelSelectorRequirement is invalid", func() {
		By("Trying to launch a pod with an invalid pod Affinity data.")
		podName := "without-label-" + string(uuid.NewUUID())
		_, err := c.Pods(ns).Create(initPausePod(f, pausePodConfig{
			Name:   podName,
			Labels: map[string]string{"name": "without-label"},
			Affinity: `{
				"podAffinity": {
					"requiredDuringSchedulingIgnoredDuringExecution": [{
						"weight": 0,
						"podAffinityTerm": {
							"labelSelector": {
								"matchExpressions": [{
									"key": "service",
									"operator": "DoesNotExist",
									"values":["securityscan"]
								}]
							},
							"namespaces": [],
							"topologyKey": "kubernetes.io/hostname"
						}
					}]
				 }
			}`,
		}))

		if err == nil || !errors.IsInvalid(err) {
			framework.Failf("Expect error of invalid, got : %v", err)
		}

		// Wait a bit to allow scheduler to do its thing if the pod is not rejected.
		waitForScheduler()
	})

	// Test Nodes does not have any pod, hence it should be impossible to schedule a Pod with pod affinity.
	It("validates that Inter-pod-Affinity is respected if not matching", func() {
		By("Trying to schedule Pod with nonempty Pod Affinity.")
		framework.WaitForStableCluster(c, masterNodes)
		podName := "without-label-" + string(uuid.NewUUID())
		createPausePod(f, pausePodConfig{
			Name: podName,
			Affinity: `{
				"podAffinity": {
					"requiredDuringSchedulingIgnoredDuringExecution": [{
						"labelSelector":{
							"matchExpressions": [{
								"key": "service",
								"operator": "In",
								"values": ["securityscan", "value2"]
							}]
						},
						"topologyKey": "kubernetes.io/hostname"
					}]
				}
			}`,
		})

		waitForScheduler()
		verifyResult(c, 0, 1, ns)
	})

	// test the pod affinity successful matching scenario.
	It("validates that InterPodAffinity is respected if matching", func() {
		nodeName, _ := runAndKeepPodWithLabelAndGetNodeName(f)

		By("Trying to apply a random label on the found node.")
		k := "e2e.inter-pod-affinity.kubernetes.io/zone"
		v := "china-e2etest"
		framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
		framework.ExpectNodeHasLabel(cs, nodeName, k, v)
		defer framework.RemoveLabelOffNode(cs, nodeName, k)

		By("Trying to launch the pod, now with podAffinity.")
		labelPodName := "with-podaffinity-" + string(uuid.NewUUID())
		pod := createPausePod(f, pausePodConfig{
			Name: labelPodName,
			Affinity: `{
				"podAffinity": {
					"requiredDuringSchedulingIgnoredDuringExecution": [{
						"labelSelector": {
							"matchExpressions": [{
								"key": "security",
								"operator": "In",
								"values": ["S1", "value2"]
							}]
						},
						"topologyKey": "` + k + `",
						"namespaces":["` + ns + `"]
					}]
				}
			}`,
		})

		// check that pod got scheduled. We intentionally DO NOT check that the
		// pod is running because this will create a race condition with the
		// kubelet and the scheduler: the scheduler might have scheduled a pod
		// already when the kubelet does not know about its new label yet. The
		// kubelet will then refuse to launch the pod.
		framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName, pod.ResourceVersion))
		labelPod, err := c.Pods(ns).Get(labelPodName)
		framework.ExpectNoError(err)
		Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
	})

	// test when the pod anti affinity rule is not satisfied, the pod would stay pending.
	It("validates that InterPodAntiAffinity is respected if matching 2", func() {
		// launch pods to find nodes which can launch a pod. We intentionally do
		// not just take the node list and choose the first and the second of them.
		// Depending on the cluster and the scheduler it might be that a "normal" pod
		// cannot be scheduled onto it.
		By("Launching two pods on two distinct nodes to get two node names")
		CreateHostPortPods(f, "host-port", 2, true)
		defer framework.DeleteRCAndPods(c, f.ClientSet, ns, "host-port")
		podList, err := c.Pods(ns).List(api.ListOptions{})
		ExpectNoError(err)
		Expect(len(podList.Items)).To(Equal(2))
		nodeNames := []string{podList.Items[0].Spec.NodeName, podList.Items[1].Spec.NodeName}
		Expect(nodeNames[0]).ToNot(Equal(nodeNames[1]))

		By("Applying a random label to both nodes.")
		k := "e2e.inter-pod-affinity.kubernetes.io/zone"
		v := "china-e2etest"
		for _, nodeName := range nodeNames {
			framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
			framework.ExpectNodeHasLabel(cs, nodeName, k, v)
			defer framework.RemoveLabelOffNode(cs, nodeName, k)
		}

		By("Trying to launch another pod on the first node with the service label.")
		podName := "with-label-" + string(uuid.NewUUID())

		runPausePod(f, pausePodConfig{
			Name:         podName,
			Labels:       map[string]string{"service": "S1"},
			NodeSelector: map[string]string{k: v}, // only launch on our two nodes
		})

		By("Trying to launch another pod, now with podAntiAffinity with same Labels.")
		labelPodName := "with-podantiaffinity-" + string(uuid.NewUUID())
		createPausePod(f, pausePodConfig{
			Name:         labelPodName,
			Labels:       map[string]string{"service": "Diff"},
			NodeSelector: map[string]string{k: v}, // only launch on our two nodes, contradicting the podAntiAffinity
			Affinity: `{
				"podAntiAffinity": {
					"requiredDuringSchedulingIgnoredDuringExecution": [{
						"labelSelector":{
							"matchExpressions": [{
								"key": "service",
								"operator": "In",
								"values": ["S1", "value2"]
							}]
						},
						"topologyKey": "` + k + `",
						"namespaces": ["` + ns + `"]
					}]
				}
			}`,
		})

		waitForScheduler()
		verifyResult(c, 3, 1, ns)
	})

	// test the pod affinity successful matching scenario with multiple Label Operators.
	It("validates that InterPodAffinity is respected if matching with multiple Affinities", func() {
		nodeName, _ := runAndKeepPodWithLabelAndGetNodeName(f)

		By("Trying to apply a random label on the found node.")
		k := "e2e.inter-pod-affinity.kubernetes.io/zone"
		v := "kubernetes-e2e"
		framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
		framework.ExpectNodeHasLabel(cs, nodeName, k, v)
		defer framework.RemoveLabelOffNode(cs, nodeName, k)

		By("Trying to launch the pod, now with multiple pod affinities with diff LabelOperators.")
		labelPodName := "with-podaffinity-" + string(uuid.NewUUID())
		pod := createPausePod(f, pausePodConfig{
			Name: labelPodName,
			Affinity: `{
				"podAffinity": {
					"requiredDuringSchedulingIgnoredDuringExecution": [{
						"labelSelector":{
							"matchExpressions": [{
								"key": "security",
								"operator": "In",
								"values": ["S1", "value2"]
							},
							{
								"key": "security",
								"operator": "NotIn",
								"values": ["S2"]
							},
							{
								"key": "security",
								"operator":"Exists"
							}]
						},
						"topologyKey": "` + k + `"
					}]
				}
			}`,
		})

		// check that pod got scheduled. We intentionally DO NOT check that the
		// pod is running because this will create a race condition with the
		// kubelet and the scheduler: the scheduler might have scheduled a pod
		// already when the kubelet does not know about its new label yet. The
		// kubelet will then refuse to launch the pod.
		framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName, pod.ResourceVersion))
		labelPod, err := c.Pods(ns).Get(labelPodName)
		framework.ExpectNoError(err)
		Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
	})

	// test the pod affinity and anti affinity successful matching scenario.
	It("validates that InterPod Affinity and AntiAffinity is respected if matching", func() {
		nodeName, _ := runAndKeepPodWithLabelAndGetNodeName(f)

		By("Trying to apply a random label on the found node.")
		k := "e2e.inter-pod-affinity.kubernetes.io/zone"
		v := "e2e-testing"
		framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
		framework.ExpectNodeHasLabel(cs, nodeName, k, v)
		defer framework.RemoveLabelOffNode(cs, nodeName, k)

		By("Trying to launch the pod, now with Pod affinity and anti affinity.")
		pod := createPodWithPodAffinity(f, k)

		// check that pod got scheduled. We intentionally DO NOT check that the
		// pod is running because this will create a race condition with the
		// kubelet and the scheduler: the scheduler might have scheduled a pod
		// already when the kubelet does not know about its new label yet. The
		// kubelet will then refuse to launch the pod.
		framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, pod.Name, pod.ResourceVersion))
		labelPod, err := c.Pods(ns).Get(pod.Name)
		framework.ExpectNoError(err)
		Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
	})

	// Verify that an escaped JSON string of pod affinity and pod anti affinity in a YAML PodSpec works.
	It("validates that embedding the JSON PodAffinity and PodAntiAffinity setting as a string in the annotation value work", func() {
		nodeName, _ := runAndKeepPodWithLabelAndGetNodeName(f)

		By("Trying to apply a label with fake az info on the found node.")
		k := "e2e.inter-pod-affinity.kubernetes.io/zone"
		v := "e2e-az1"
		framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
		framework.ExpectNodeHasLabel(cs, nodeName, k, v)
		defer framework.RemoveLabelOffNode(cs, nodeName, k)

		By("Trying to launch a pod that with PodAffinity & PodAntiAffinity setting as embedded JSON string in the annotation value.")
		pod := createPodWithPodAffinity(f, "kubernetes.io/hostname")
		// check that pod got scheduled. We intentionally DO NOT check that the
		// pod is running because this will create a race condition with the
		// kubelet and the scheduler: the scheduler might have scheduled a pod
		// already when the kubelet does not know about its new label yet. The
		// kubelet will then refuse to launch the pod.
		framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, pod.Name, pod.ResourceVersion))
		labelPod, err := c.Pods(ns).Get(pod.Name)
		framework.ExpectNoError(err)
		Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
	})

	// 1. Run a pod to get an available node, then delete the pod
	// 2. Taint the node with a random taint
	// 3. Try to relaunch the pod with tolerations tolerate the taints on node,
	// and the pod's nodeName specified to the name of node found in step 1
	It("validates that taints-tolerations is respected if matching", func() {
		nodeName := getNodeThatCanRunPodWithoutToleration(f)

		By("Trying to apply a random taint on the found node.")
		testTaint := api.Taint{
			Key:    fmt.Sprintf("kubernetes.io/e2e-taint-key-%s", string(uuid.NewUUID())),
			Value:  "testing-taint-value",
			Effect: api.TaintEffectNoSchedule,
		}
		framework.AddOrUpdateTaintOnNode(c, nodeName, testTaint)
		framework.ExpectNodeHasTaint(c, nodeName, testTaint)
		defer framework.RemoveTaintOffNode(c, nodeName, testTaint)

		By("Trying to apply a random label on the found node.")
		labelKey := fmt.Sprintf("kubernetes.io/e2e-label-key-%s", string(uuid.NewUUID()))
		labelValue := "testing-label-value"
		framework.AddOrUpdateLabelOnNode(cs, nodeName, labelKey, labelValue)
		framework.ExpectNodeHasLabel(cs, nodeName, labelKey, labelValue)
		defer framework.RemoveLabelOffNode(cs, nodeName, labelKey)

		By("Trying to relaunch the pod, now with tolerations.")
		tolerationPodName := "with-tolerations"
		pod := createPausePod(f, pausePodConfig{
			Name: tolerationPodName,
			Annotations: map[string]string{
				"scheduler.alpha.kubernetes.io/tolerations": `
					[
						{
							"key": "` + testTaint.Key + `",
							"value": "` + testTaint.Value + `",
							"effect": "` + string(testTaint.Effect) + `"
						}
					]`,
			},
			NodeSelector: map[string]string{labelKey: labelValue},
		})

		// check that pod got scheduled. We intentionally DO NOT check that the
		// pod is running because this will create a race condition with the
		// kubelet and the scheduler: the scheduler might have scheduled a pod
		// already when the kubelet does not know about its new taint yet. The
		// kubelet will then refuse to launch the pod.
		framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, tolerationPodName, pod.ResourceVersion))
		deployedPod, err := c.Pods(ns).Get(tolerationPodName)
		framework.ExpectNoError(err)
		Expect(deployedPod.Spec.NodeName).To(Equal(nodeName))
	})

	// 1. Run a pod to get an available node, then delete the pod
	// 2. Taint the node with a random taint
	// 3. Try to relaunch the pod still no tolerations,
	// and the pod's nodeName specified to the name of node found in step 1
	It("validates that taints-tolerations is respected if not matching", func() {
		nodeName := getNodeThatCanRunPodWithoutToleration(f)

		By("Trying to apply a random taint on the found node.")
		testTaint := api.Taint{
			Key:    fmt.Sprintf("kubernetes.io/e2e-taint-key-%s", string(uuid.NewUUID())),
			Value:  "testing-taint-value",
			Effect: api.TaintEffectNoSchedule,
		}
		framework.AddOrUpdateTaintOnNode(c, nodeName, testTaint)
		framework.ExpectNodeHasTaint(c, nodeName, testTaint)
		defer framework.RemoveTaintOffNode(c, nodeName, testTaint)

		By("Trying to apply a random label on the found node.")
		labelKey := fmt.Sprintf("kubernetes.io/e2e-label-key-%s", string(uuid.NewUUID()))
		labelValue := "testing-label-value"
		framework.AddOrUpdateLabelOnNode(cs, nodeName, labelKey, labelValue)
		framework.ExpectNodeHasLabel(cs, nodeName, labelKey, labelValue)
		defer framework.RemoveLabelOffNode(cs, nodeName, labelKey)

		By("Trying to relaunch the pod, still no tolerations.")
		podNameNoTolerations := "still-no-tolerations"
		createPausePod(f, pausePodConfig{
			Name:         podNameNoTolerations,
			NodeSelector: map[string]string{labelKey: labelValue},
		})

		waitForScheduler()
		verifyResult(c, 0, 1, ns)

		By("Removing taint off the node")
		framework.RemoveTaintOffNode(c, nodeName, testTaint)

		waitForScheduler()
		verifyResult(c, 1, 0, ns)
	})
})
コード例 #22
0
var _ = framework.KubeDescribe("AppArmor [Feature:AppArmor]", func() {
	if isAppArmorEnabled() {
		BeforeEach(func() {
			By("Loading AppArmor profiles for testing")
			framework.ExpectNoError(loadTestProfiles(), "Could not load AppArmor test profiles")
		})
		Context("when running with AppArmor", func() {
			f := framework.NewDefaultFramework("apparmor-test")

			It("should reject an unloaded profile", func() {
				status := runAppArmorTest(f, false, apparmor.ProfileNamePrefix+"non-existant-profile")
				expectSoftRejection(status)
			})
			It("should enforce a profile blocking writes", func() {
				status := runAppArmorTest(f, true, apparmor.ProfileNamePrefix+apparmorProfilePrefix+"deny-write")
				if len(status.ContainerStatuses) == 0 {
					framework.Failf("Unexpected pod status: %s", spew.Sdump(status))
					return
				}
				state := status.ContainerStatuses[0].State.Terminated
				Expect(state.ExitCode).To(Not(BeZero()), "ContainerStateTerminated: %+v", state)

			})
			It("should enforce a permissive profile", func() {
				status := runAppArmorTest(f, true, apparmor.ProfileNamePrefix+apparmorProfilePrefix+"audit-write")
				if len(status.ContainerStatuses) == 0 {
					framework.Failf("Unexpected pod status: %s", spew.Sdump(status))
					return
				}
				state := status.ContainerStatuses[0].State.Terminated
				Expect(state.ExitCode).To(BeZero(), "ContainerStateTerminated: %+v", state)
			})
		})
	} else {
		Context("when running without AppArmor", func() {
			f := framework.NewDefaultFramework("apparmor-test")

			It("should reject a pod with an AppArmor profile", func() {
				status := runAppArmorTest(f, false, apparmor.ProfileRuntimeDefault)
				expectSoftRejection(status)
			})
		})
	}
})
コード例 #23
0
var _ = framework.KubeDescribe("Kubelet [Serial] [Slow]", func() {
	var nodeNames sets.String
	f := framework.NewDefaultFramework("kubelet-perf")
	var om *framework.RuntimeOperationMonitor
	var rm *framework.ResourceMonitor

	BeforeEach(func() {
		// Wait until image prepull pod has completed so that they wouldn't
		// affect the runtime cpu usage. Fail the test if prepulling cannot
		// finish in time.
		if err := framework.WaitForPodsSuccess(f.ClientSet, api.NamespaceSystem, framework.ImagePullerLabels, imagePrePullingLongTimeout); err != nil {
			framework.Failf("Image puller didn't complete in %v, not running resource usage test since the metrics might be adultrated", imagePrePullingLongTimeout)
		}
		nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
		nodeNames = sets.NewString()
		for _, node := range nodes.Items {
			nodeNames.Insert(node.Name)
		}
		om = framework.NewRuntimeOperationMonitor(f.ClientSet)
		rm = framework.NewResourceMonitor(f.ClientSet, framework.TargetContainers(), containerStatsPollingPeriod)
		rm.Start()
	})

	AfterEach(func() {
		rm.Stop()
		result := om.GetLatestRuntimeOperationErrorRate()
		framework.Logf("runtime operation error metrics:\n%s", framework.FormatRuntimeOperationErrorRate(result))
	})
	framework.KubeDescribe("regular resource usage tracking", func() {
		// We assume that the scheduler will make reasonable scheduling choices
		// and assign ~N pods on the node.
		// Although we want to track N pods per node, there are N + add-on pods
		// in the cluster. The cluster add-on pods can be distributed unevenly
		// among the nodes because they are created during the cluster
		// initialization. This *noise* is obvious when N is small. We
		// deliberately set higher resource usage limits to account for the
		// noise.
		rTests := []resourceTest{
			{
				podsPerNode: 0,
				cpuLimits: framework.ContainersCPUSummary{
					stats.SystemContainerKubelet: {0.50: 0.06, 0.95: 0.08},
					stats.SystemContainerRuntime: {0.50: 0.05, 0.95: 0.06},
				},
				// We set the memory limits generously because the distribution
				// of the addon pods affect the memory usage on each node.
				memLimits: framework.ResourceUsagePerContainer{
					stats.SystemContainerKubelet: &framework.ContainerResourceUsage{MemoryRSSInBytes: 70 * 1024 * 1024},
					// The detail can be found at https://github.com/kubernetes/kubernetes/issues/28384#issuecomment-244158892
					stats.SystemContainerRuntime: &framework.ContainerResourceUsage{MemoryRSSInBytes: 125 * 1024 * 1024},
				},
			},
			{
				podsPerNode: 35,
				cpuLimits: framework.ContainersCPUSummary{
					stats.SystemContainerKubelet: {0.50: 0.12, 0.95: 0.14},
					stats.SystemContainerRuntime: {0.50: 0.05, 0.95: 0.07},
				},
				// We set the memory limits generously because the distribution
				// of the addon pods affect the memory usage on each node.
				memLimits: framework.ResourceUsagePerContainer{
					stats.SystemContainerKubelet: &framework.ContainerResourceUsage{MemoryRSSInBytes: 70 * 1024 * 1024},
					stats.SystemContainerRuntime: &framework.ContainerResourceUsage{MemoryRSSInBytes: 200 * 1024 * 1024},
				},
			},
			{
				cpuLimits: framework.ContainersCPUSummary{
					stats.SystemContainerKubelet: {0.50: 0.17, 0.95: 0.22},
					stats.SystemContainerRuntime: {0.50: 0.06, 0.95: 0.09},
				},
				podsPerNode: 100,
				// We set the memory limits generously because the distribution
				// of the addon pods affect the memory usage on each node.
				memLimits: framework.ResourceUsagePerContainer{
					stats.SystemContainerKubelet: &framework.ContainerResourceUsage{MemoryRSSInBytes: 80 * 1024 * 1024},
					stats.SystemContainerRuntime: &framework.ContainerResourceUsage{MemoryRSSInBytes: 300 * 1024 * 1024},
				},
			},
		}
		for _, testArg := range rTests {
			itArg := testArg
			podsPerNode := itArg.podsPerNode
			name := fmt.Sprintf(
				"resource tracking for %d pods per node", podsPerNode)
			It(name, func() {
				runResourceTrackingTest(f, podsPerNode, nodeNames, rm, itArg.cpuLimits, itArg.memLimits)
			})
		}
	})
	framework.KubeDescribe("experimental resource usage tracking [Feature:ExperimentalResourceUsageTracking]", func() {
		density := []int{100}
		for i := range density {
			podsPerNode := density[i]
			name := fmt.Sprintf(
				"resource tracking for %d pods per node", podsPerNode)
			It(name, func() {
				runResourceTrackingTest(f, podsPerNode, nodeNames, rm, nil, nil)
			})
		}
	})
})
コード例 #24
0
ファイル: privileged.go プロジェクト: RyanBinfeng/kubernetes
type PrivilegedPodTestConfig struct {
	privilegedPod *api.Pod
	f             *framework.Framework
	hostExecPod   *api.Pod
}

var _ = framework.KubeDescribe("PrivilegedPod", func() {
	f := framework.NewDefaultFramework("e2e-privilegedpod")
	config := &PrivilegedPodTestConfig{
		f: f,
	}
	It("should test privileged pod", func() {
		config.hostExecPod = framework.LaunchHostExecPod(config.f.Client, config.f.Namespace.Name, "hostexec")

		By("Creating a privileged pod")
		config.createPrivilegedPod()

		By("Executing privileged command on privileged container")
		config.runPrivilegedCommandOnPrivilegedContainer()

		By("Executing privileged command on non-privileged container")
		config.runPrivilegedCommandOnNonPrivilegedContainer()
	})
})

func (config *PrivilegedPodTestConfig) runPrivilegedCommandOnPrivilegedContainer() {
	outputMap := config.dialFromContainer(config.privilegedPod.Status.PodIP, privilegedHttpPort)
	if len(outputMap["error"]) > 0 {
		framework.Failf("Privileged command failed unexpectedly on privileged container, output:%v", outputMap)
	}
}
コード例 #25
0
var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
	f := framework.NewDefaultFramework("autoscaling")
	var c clientset.Interface
	var nodeCount int
	var coresPerNode int
	var memCapacityMb int
	var originalSizes map[string]int

	BeforeEach(func() {
		c = f.ClientSet
		framework.SkipUnlessProviderIs("gce", "gke")

		nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
		nodeCount = len(nodes.Items)
		Expect(nodeCount).NotTo(BeZero())
		cpu := nodes.Items[0].Status.Capacity[v1.ResourceCPU]
		mem := nodes.Items[0].Status.Capacity[v1.ResourceMemory]
		coresPerNode = int((&cpu).MilliValue() / 1000)
		memCapacityMb = int((&mem).Value() / 1024 / 1024)

		originalSizes = make(map[string]int)
		sum := 0
		for _, mig := range strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") {
			size, err := GroupSize(mig)
			framework.ExpectNoError(err)
			By(fmt.Sprintf("Initial size of %s: %d", mig, size))
			originalSizes[mig] = size
			sum += size
		}
		Expect(nodeCount).Should(Equal(sum))

		if framework.ProviderIs("gke") {
			val, err := isAutoscalerEnabled(3)
			framework.ExpectNoError(err)
			if !val {
				err = enableAutoscaler("default-pool", 3, 5)
				framework.ExpectNoError(err)
			}
		}
	})

	AfterEach(func() {
		By(fmt.Sprintf("Restoring initial size of the cluster"))
		setMigSizes(originalSizes)
		framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount, scaleDownTimeout))
	})

	It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() {
		By("Creating unschedulable pod")
		ReserveMemory(f, "memory-reservation", 1, memCapacityMb, false)
		defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")

		By("Waiting for scale up hoping it won't happen")
		// Verfiy, that the appropreate event was generated.
		eventFound := false
	EventsLoop:
		for start := time.Now(); time.Since(start) < scaleUpTimeout; time.Sleep(20 * time.Second) {
			By("Waiting for NotTriggerScaleUp event")
			events, err := f.ClientSet.Core().Events(f.Namespace.Name).List(v1.ListOptions{})
			framework.ExpectNoError(err)

			for _, e := range events.Items {
				if e.InvolvedObject.Kind == "Pod" && e.Reason == "NotTriggerScaleUp" && strings.Contains(e.Message, "it wouldn't fit if a new node is added") {
					By("NotTriggerScaleUp event found")
					eventFound = true
					break EventsLoop
				}
			}
		}
		Expect(eventFound).Should(Equal(true))
		// Verify, that cluster size is not changed.
		framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
			func(size int) bool { return size <= nodeCount }, time.Second))
	})

	It("should increase cluster size if pending pods are small [Feature:ClusterSizeAutoscalingScaleUp]", func() {
		ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false)
		defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")

		// Verify, that cluster size is increased
		framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
			func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
		framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
	})

	It("should increase cluster size if pending pods are small and there is another node pool that is not autoscaled [Feature:ClusterSizeAutoscalingScaleUp]", func() {
		framework.SkipUnlessProviderIs("gke")

		By("Creating new node-pool with one n1-standard-4 machine")
		const extraPoolName = "extra-pool"
		addNodePool(extraPoolName, "n1-standard-4", 1)
		defer deleteNodePool(extraPoolName)
		framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount+1, resizeTimeout))
		glog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).")

		ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false)
		defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")

		// Verify, that cluster size is increased
		framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
			func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
		framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
	})

	It("should disable node pool autoscaling [Feature:ClusterSizeAutoscalingScaleUp]", func() {
		framework.SkipUnlessProviderIs("gke")

		By("Creating new node-pool with one n1-standard-4 machine")
		const extraPoolName = "extra-pool"
		addNodePool(extraPoolName, "n1-standard-4", 1)
		defer deleteNodePool(extraPoolName)
		framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount+1, resizeTimeout))
		framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2))
		framework.ExpectNoError(disableAutoscaler(extraPoolName, 1, 2))
	})

	It("should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]", func() {
		CreateHostPortPods(f, "host-port", nodeCount+2, false)
		defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "host-port")

		framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
			func(size int) bool { return size >= nodeCount+2 }, scaleUpTimeout))
		framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
	})

	It("should add node to the particular mig [Feature:ClusterSizeAutoscalingScaleUp]", func() {
		labels := map[string]string{"cluster-autoscaling-test.special-node": "true"}

		By("Finding the smallest MIG")
		minMig := ""
		minSize := nodeCount
		for mig, size := range originalSizes {
			if size <= minSize {
				minMig = mig
				minSize = size
			}
		}

		removeLabels := func(nodesToClean sets.String) {
			By("Removing labels from nodes")
			updateNodeLabels(c, nodesToClean, nil, labels)
		}

		nodes, err := GetGroupNodes(minMig)
		framework.ExpectNoError(err)
		nodesSet := sets.NewString(nodes...)
		defer removeLabels(nodesSet)
		By(fmt.Sprintf("Annotating nodes of the smallest MIG(%s): %v", minMig, nodes))
		updateNodeLabels(c, nodesSet, labels, nil)

		CreateNodeSelectorPods(f, "node-selector", minSize+1, labels, false)

		By("Waiting for new node to appear and annotating it")
		WaitForGroupSize(minMig, int32(minSize+1))
		// Verify, that cluster size is increased
		framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
			func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))

		newNodes, err := GetGroupNodes(minMig)
		framework.ExpectNoError(err)
		newNodesSet := sets.NewString(newNodes...)
		newNodesSet.Delete(nodes...)
		if len(newNodesSet) > 1 {
			By(fmt.Sprintf("Spotted following new nodes in %s: %v", minMig, newNodesSet))
			glog.Infof("Usually only 1 new node is expected, investigating")
			glog.Infof("Kubectl:%s\n", framework.RunKubectlOrDie("get", "nodes", "-o", "json"))
			if output, err := exec.Command("gcloud", "compute", "instances", "list",
				"--project="+framework.TestContext.CloudConfig.ProjectID,
				"--zone="+framework.TestContext.CloudConfig.Zone).Output(); err == nil {
				glog.Infof("Gcloud compute instances list: %s", output)
			} else {
				glog.Errorf("Failed to get instances list: %v", err)
			}

			for newNode := range newNodesSet {
				if output, err := exec.Command("gcloud", "compute", "instances", "describe",
					newNode,
					"--project="+framework.TestContext.CloudConfig.ProjectID,
					"--zone="+framework.TestContext.CloudConfig.Zone).Output(); err == nil {
					glog.Infof("Gcloud compute instances describe: %s", output)
				} else {
					glog.Errorf("Failed to get instances describe: %v", err)
				}
			}

			// TODO: possibly remove broken node from newNodesSet to prevent removeLabel from crashing.
			// However at this moment we DO WANT it to crash so that we don't check all test runs for the
			// rare behavior, but only the broken ones.
		}
		By(fmt.Sprintf("New nodes: %v\n", newNodesSet))
		registeredNodes := sets.NewString()
		for nodeName := range newNodesSet {
			node, err := f.ClientSet.Core().Nodes().Get(nodeName, metav1.GetOptions{})
			if err == nil && node != nil {
				registeredNodes.Insert(nodeName)
			} else {
				glog.Errorf("Failed to get node %v: %v", nodeName, err)
			}
		}
		By(fmt.Sprintf("Setting labels for registered new nodes: %v", registeredNodes.List()))
		updateNodeLabels(c, registeredNodes, labels, nil)
		defer removeLabels(registeredNodes)

		framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
		framework.ExpectNoError(framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "node-selector"))
	})

	It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func() {
		framework.SkipUnlessProviderIs("gke")

		By("Creating new node-pool with one n1-standard-4 machine")
		const extraPoolName = "extra-pool"
		addNodePool(extraPoolName, "n1-standard-4", 1)
		defer deleteNodePool(extraPoolName)
		framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount+1, resizeTimeout))
		framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2))

		By("Creating rc with 2 pods too big to fit default-pool but fitting extra-pool")
		ReserveMemory(f, "memory-reservation", 2, 2*memCapacityMb, false)
		defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")

		// Apparently GKE master is restarted couple minutes after the node pool is added
		// reseting all the timers in scale down code. Adding 5 extra minutes to workaround
		// this issue.
		// TODO: Remove the extra time when GKE restart is fixed.
		framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount+2, scaleUpTimeout+5*time.Minute))
	})

	It("should correctly scale down after a node is not needed [Feature:ClusterSizeAutoscalingScaleDown]", func() {
		By("Manually increase cluster size")
		increasedSize := 0
		newSizes := make(map[string]int)
		for key, val := range originalSizes {
			newSizes[key] = val + 2
			increasedSize += val + 2
		}
		setMigSizes(newSizes)
		framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
			func(size int) bool { return size >= increasedSize }, scaleUpTimeout))

		By("Some node should be removed")
		framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
			func(size int) bool { return size < increasedSize }, scaleDownTimeout))
	})

	It("should correctly scale down after a node is not needed when there is non autoscaled pool[Feature:ClusterSizeAutoscalingScaleDown]", func() {
		framework.SkipUnlessProviderIs("gke")

		By("Manually increase cluster size")
		increasedSize := 0
		newSizes := make(map[string]int)
		for key, val := range originalSizes {
			newSizes[key] = val + 2
			increasedSize += val + 2
		}
		setMigSizes(newSizes)
		framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
			func(size int) bool { return size >= increasedSize }, scaleUpTimeout))

		const extraPoolName = "extra-pool"
		addNodePool(extraPoolName, "n1-standard-1", 3)
		defer deleteNodePool(extraPoolName)

		framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
			func(size int) bool { return size >= increasedSize+3 }, scaleUpTimeout))

		By("Some node should be removed")
		// Apparently GKE master is restarted couple minutes after the node pool is added
		// reseting all the timers in scale down code. Adding 10 extra minutes to workaround
		// this issue.
		// TODO: Remove the extra time when GKE restart is fixed.
		framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
			func(size int) bool { return size < increasedSize+3 }, scaleDownTimeout+10*time.Minute))
	})
})
コード例 #26
0
ファイル: ubernetes_lite.go プロジェクト: ZenoRewn/origin
	"k8s.io/kubernetes/pkg/util/sets"
	"k8s.io/kubernetes/test/e2e/framework"
)

var _ = framework.KubeDescribe("Ubernetes Lite", func() {
	f := framework.NewDefaultFramework("ubernetes-lite")
	var zoneCount int
	var err error
	image := "gcr.io/google_containers/serve_hostname:v1.4"
	BeforeEach(func() {
		if zoneCount <= 0 {
			zoneCount, err = getZoneCount(f.Client)
			Expect(err).NotTo(HaveOccurred())
		}
		By(fmt.Sprintf("Checking for multi-zone cluster.  Zone count = %d", zoneCount))
		framework.SkipUnlessAtLeast(zoneCount, 2, "Zone count is %d, only run for multi-zone clusters, skipping test")
		framework.SkipUnlessProviderIs("gce", "gke", "aws")
		// TODO: SkipUnlessDefaultScheduler() // Non-default schedulers might not spread
	})
	It("should spread the pods of a service across zones", func() {
		SpreadServiceOrFail(f, (2*zoneCount)+1, image)
	})

	It("should spread the pods of a replication controller across zones", func() {
		SpreadRCOrFail(f, int32((2*zoneCount)+1), image)
	})
})

// Check that the pods comprising a service get spread evenly across available zones
func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string) {
	// First create the service
コード例 #27
0
var _ = framework.KubeDescribe("Docker Containers", func() {
	f := framework.NewDefaultFramework("containers")

	It("should use the image defaults if command and args are blank [Conformance]", func() {
		f.TestContainerOutput("use defaults", entrypointTestPod(), 0, []string{
			"[/ep default arguments]",
		})
	})

	It("should be able to override the image's default arguments (docker cmd) [Conformance]", func() {
		pod := entrypointTestPod()
		pod.Spec.Containers[0].Args = []string{"override", "arguments"}

		f.TestContainerOutput("override arguments", pod, 0, []string{
			"[/ep override arguments]",
		})
	})

	// Note: when you override the entrypoint, the image's arguments (docker cmd)
	// are ignored.
	It("should be able to override the image's default commmand (docker entrypoint) [Conformance]", func() {
		pod := entrypointTestPod()
		pod.Spec.Containers[0].Command = []string{"/ep-2"}

		f.TestContainerOutput("override command", pod, 0, []string{
			"[/ep-2]",
		})
	})

	It("should be able to override the image's default command and arguments [Conformance]", func() {
		pod := entrypointTestPod()
		pod.Spec.Containers[0].Command = []string{"/ep-2"}
		pod.Spec.Containers[0].Args = []string{"override", "arguments"}

		f.TestContainerOutput("override all", pod, 0, []string{
			"[/ep-2 override arguments]",
		})
	})
})
コード例 #28
0
ファイル: summary_test.go プロジェクト: nak3/kubernetes
var _ = framework.KubeDescribe("Summary API", func() {
	f := framework.NewDefaultFramework("summary-test")
	Context("when querying /stats/summary", func() {
		AfterEach(func() {
			if CurrentGinkgoTestDescription().Failed && framework.TestContext.DumpLogsOnFailure {
				framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf)
			}
		})
		It("should report resource usage through the stats api", func() {
			const pod0 = "stats-busybox-0"
			const pod1 = "stats-busybox-1"

			By("Creating test pods")
			createSummaryTestPods(f, pod0, pod1)
			// Wait for cAdvisor to collect 2 stats points
			time.Sleep(15 * time.Second)

			// Setup expectations.
			const (
				kb int64 = 1000
				mb int64 = 1000 * kb
				gb int64 = 1000 * mb
				tb int64 = 1000 * gb

				maxStartAge = time.Hour * 24 * 365 // 1 year
				maxStatsAge = time.Minute
			)
			fsCapacityBounds := bounded(100*mb, 100*gb)
			// Expectations for system containers.
			sysContExpectations := gstruct.MatchAllFields(gstruct.Fields{
				"Name":      gstruct.Ignore(),
				"StartTime": recent(maxStartAge),
				"CPU": ptrMatchAllFields(gstruct.Fields{
					"Time":                 recent(maxStatsAge),
					"UsageNanoCores":       bounded(100000, 2E9),
					"UsageCoreNanoSeconds": bounded(10000000, 1E15),
				}),
				"Memory": ptrMatchAllFields(gstruct.Fields{
					"Time": recent(maxStatsAge),
					// We don't limit system container memory.
					"AvailableBytes":  BeNil(),
					"UsageBytes":      bounded(1*mb, 10*gb),
					"WorkingSetBytes": bounded(1*mb, 10*gb),
					"RSSBytes":        bounded(1*mb, 1*gb),
					"PageFaults":      bounded(1000, 1E9),
					"MajorPageFaults": bounded(0, 100000),
				}),
				"Rootfs":             BeNil(),
				"Logs":               BeNil(),
				"UserDefinedMetrics": BeEmpty(),
			})
			systemContainers := gstruct.Elements{
				"kubelet": sysContExpectations,
				"runtime": sysContExpectations,
			}
			// The Kubelet only manages the 'misc' system container if the host is not running systemd.
			if !systemdutil.IsRunningSystemd() {
				framework.Logf("Host not running systemd; expecting 'misc' system container.")
				systemContainers["misc"] = sysContExpectations
			}
			// Expectations for pods.
			podExpectations := gstruct.MatchAllFields(gstruct.Fields{
				"PodRef":    gstruct.Ignore(),
				"StartTime": recent(maxStartAge),
				"Containers": gstruct.MatchAllElements(summaryObjectID, gstruct.Elements{
					"busybox-container": gstruct.MatchAllFields(gstruct.Fields{
						"Name":      Equal("busybox-container"),
						"StartTime": recent(maxStartAge),
						"CPU": ptrMatchAllFields(gstruct.Fields{
							"Time":                 recent(maxStatsAge),
							"UsageNanoCores":       bounded(100000, 100000000),
							"UsageCoreNanoSeconds": bounded(10000000, 1000000000),
						}),
						"Memory": ptrMatchAllFields(gstruct.Fields{
							"Time":            recent(maxStatsAge),
							"AvailableBytes":  bounded(1*mb, 10*mb),
							"UsageBytes":      bounded(10*kb, 5*mb),
							"WorkingSetBytes": bounded(10*kb, mb),
							"RSSBytes":        bounded(1*kb, mb),
							"PageFaults":      bounded(100, 100000),
							"MajorPageFaults": bounded(0, 10),
						}),
						"Rootfs": ptrMatchAllFields(gstruct.Fields{
							"AvailableBytes": fsCapacityBounds,
							"CapacityBytes":  fsCapacityBounds,
							"UsedBytes":      bounded(kb, 10*mb),
							"InodesFree":     bounded(1E4, 1E8),
							"Inodes":         bounded(1E4, 1E8),
							"InodesUsed":     bounded(0, 1E8),
						}),
						"Logs": ptrMatchAllFields(gstruct.Fields{
							"AvailableBytes": fsCapacityBounds,
							"CapacityBytes":  fsCapacityBounds,
							"UsedBytes":      bounded(kb, 10*mb),
							"InodesFree":     bounded(1E4, 1E8),
							"Inodes":         bounded(1E4, 1E8),
							"InodesUsed":     bounded(0, 1E8),
						}),
						"UserDefinedMetrics": BeEmpty(),
					}),
				}),
				"Network": ptrMatchAllFields(gstruct.Fields{
					"Time":     recent(maxStatsAge),
					"RxBytes":  bounded(10, 10*mb),
					"RxErrors": bounded(0, 1000),
					"TxBytes":  bounded(10, 10*mb),
					"TxErrors": bounded(0, 1000),
				}),
				"VolumeStats": gstruct.MatchAllElements(summaryObjectID, gstruct.Elements{
					"test-empty-dir": gstruct.MatchAllFields(gstruct.Fields{
						"Name": Equal("test-empty-dir"),
						"FsStats": gstruct.MatchAllFields(gstruct.Fields{
							"AvailableBytes": fsCapacityBounds,
							"CapacityBytes":  fsCapacityBounds,
							"UsedBytes":      bounded(kb, 1*mb),
							"InodesFree":     bounded(1E4, 1E8),
							"Inodes":         bounded(1E4, 1E8),
							"InodesUsed":     bounded(0, 1E8),
						}),
					}),
				}),
			})
			matchExpectations := ptrMatchAllFields(gstruct.Fields{
				"Node": gstruct.MatchAllFields(gstruct.Fields{
					"NodeName":         Equal(framework.TestContext.NodeName),
					"StartTime":        recent(maxStartAge),
					"SystemContainers": gstruct.MatchAllElements(summaryObjectID, systemContainers),
					"CPU": ptrMatchAllFields(gstruct.Fields{
						"Time":                 recent(maxStatsAge),
						"UsageNanoCores":       bounded(100E3, 2E9),
						"UsageCoreNanoSeconds": bounded(1E9, 1E15),
					}),
					"Memory": ptrMatchAllFields(gstruct.Fields{
						"Time":            recent(maxStatsAge),
						"AvailableBytes":  bounded(100*mb, 100*gb),
						"UsageBytes":      bounded(10*mb, 10*gb),
						"WorkingSetBytes": bounded(10*mb, 10*gb),
						"RSSBytes":        bounded(1*kb, 1*gb),
						"PageFaults":      bounded(1000, 1E9),
						"MajorPageFaults": bounded(0, 100000),
					}),
					// TODO(#28407): Handle non-eth0 network interface names.
					"Network": Or(BeNil(), ptrMatchAllFields(gstruct.Fields{
						"Time":     recent(maxStatsAge),
						"RxBytes":  bounded(1*mb, 100*gb),
						"RxErrors": bounded(0, 100000),
						"TxBytes":  bounded(10*kb, 10*gb),
						"TxErrors": bounded(0, 100000),
					})),
					"Fs": ptrMatchAllFields(gstruct.Fields{
						"AvailableBytes": fsCapacityBounds,
						"CapacityBytes":  fsCapacityBounds,
						"UsedBytes":      bounded(kb, 10*gb),
						"InodesFree":     bounded(1E4, 1E8),
						"Inodes":         bounded(1E4, 1E8),
						"InodesUsed":     bounded(0, 1E8),
					}),
					"Runtime": ptrMatchAllFields(gstruct.Fields{
						"ImageFs": ptrMatchAllFields(gstruct.Fields{
							"AvailableBytes": fsCapacityBounds,
							"CapacityBytes":  fsCapacityBounds,
							"UsedBytes":      bounded(kb, 10*gb),
							"InodesFree":     bounded(1E4, 1E8),
							"Inodes":         bounded(1E4, 1E8),
							"InodesUsed":     bounded(0, 1E8),
						}),
					}),
				}),
				// Ignore extra pods since the tests run in parallel.
				"Pods": gstruct.MatchElements(summaryObjectID, gstruct.IgnoreExtras, gstruct.Elements{
					fmt.Sprintf("%s::%s", f.Namespace.Name, pod0): podExpectations,
					fmt.Sprintf("%s::%s", f.Namespace.Name, pod1): podExpectations,
				}),
			})

			By("Validating /stats/summary")
			// Give pods a minute to actually start up.
			Eventually(getNodeSummary, 1*time.Minute, 15*time.Second).Should(matchExpectations)
			// Then the summary should match the expectations a few more times.
			Consistently(getNodeSummary, 30*time.Second, 15*time.Second).Should(matchExpectations)
		})
	})
})
コード例 #29
0
ファイル: empty_dir.go プロジェクト: RyanBinfeng/kubernetes
var _ = framework.KubeDescribe("EmptyDir volumes", func() {

	f := framework.NewDefaultFramework("emptydir")

	Context("when FSGroup is specified [Feature:FSGroup]", func() {
		It("new files should be created with FSGroup ownership when container is root", func() {
			doTestSetgidFSGroup(f, testImageRootUid, api.StorageMediumMemory)
		})

		It("new files should be created with FSGroup ownership when container is non-root", func() {
			doTestSetgidFSGroup(f, testImageNonRootUid, api.StorageMediumMemory)
		})

		It("files with FSGroup ownership should support (root,0644,tmpfs)", func() {
			doTest0644FSGroup(f, testImageRootUid, api.StorageMediumMemory)
		})

		It("volume on default medium should have the correct mode using FSGroup", func() {
			doTestVolumeModeFSGroup(f, testImageRootUid, api.StorageMediumDefault)
		})

		It("volume on tmpfs should have the correct mode using FSGroup", func() {
			doTestVolumeModeFSGroup(f, testImageRootUid, api.StorageMediumMemory)
		})
	})

	It("volume on tmpfs should have the correct mode [Conformance]", func() {
		doTestVolumeMode(f, testImageRootUid, api.StorageMediumMemory)
	})

	It("should support (root,0644,tmpfs) [Conformance]", func() {
		doTest0644(f, testImageRootUid, api.StorageMediumMemory)
	})

	It("should support (root,0666,tmpfs) [Conformance]", func() {
		doTest0666(f, testImageRootUid, api.StorageMediumMemory)
	})

	It("should support (root,0777,tmpfs) [Conformance]", func() {
		doTest0777(f, testImageRootUid, api.StorageMediumMemory)
	})

	It("should support (non-root,0644,tmpfs) [Conformance]", func() {
		doTest0644(f, testImageNonRootUid, api.StorageMediumMemory)
	})

	It("should support (non-root,0666,tmpfs) [Conformance]", func() {
		doTest0666(f, testImageNonRootUid, api.StorageMediumMemory)
	})

	It("should support (non-root,0777,tmpfs) [Conformance]", func() {
		doTest0777(f, testImageNonRootUid, api.StorageMediumMemory)
	})

	It("volume on default medium should have the correct mode [Conformance]", func() {
		doTestVolumeMode(f, testImageRootUid, api.StorageMediumDefault)
	})

	It("should support (root,0644,default) [Conformance]", func() {
		doTest0644(f, testImageRootUid, api.StorageMediumDefault)
	})

	It("should support (root,0666,default) [Conformance]", func() {
		doTest0666(f, testImageRootUid, api.StorageMediumDefault)
	})

	It("should support (root,0777,default) [Conformance]", func() {
		doTest0777(f, testImageRootUid, api.StorageMediumDefault)
	})

	It("should support (non-root,0644,default) [Conformance]", func() {
		doTest0644(f, testImageNonRootUid, api.StorageMediumDefault)
	})

	It("should support (non-root,0666,default) [Conformance]", func() {
		doTest0666(f, testImageNonRootUid, api.StorageMediumDefault)
	})

	It("should support (non-root,0777,default) [Conformance]", func() {
		doTest0777(f, testImageNonRootUid, api.StorageMediumDefault)
	})
})
コード例 #30
0
ファイル: pd.go プロジェクト: spxtr/kubernetes
var _ = framework.KubeDescribe("Pod Disks", func() {
	var (
		podClient  v1core.PodInterface
		nodeClient v1core.NodeInterface
		host0Name  types.NodeName
		host1Name  types.NodeName
		nodes      *v1.NodeList
	)
	f := framework.NewDefaultFramework("pod-disks")

	BeforeEach(func() {
		framework.SkipUnlessNodeCountIsAtLeast(2)

		podClient = f.ClientSet.Core().Pods(f.Namespace.Name)
		nodeClient = f.ClientSet.Core().Nodes()
		nodes = framework.GetReadySchedulableNodesOrDie(f.ClientSet)

		Expect(len(nodes.Items)).To(BeNumerically(">=", 2), "Requires at least 2 nodes")

		host0Name = types.NodeName(nodes.Items[0].ObjectMeta.Name)
		host1Name = types.NodeName(nodes.Items[1].ObjectMeta.Name)

		mathrand.Seed(time.Now().UTC().UnixNano())
	})

	It("should schedule a pod w/ a RW PD, ungracefully remove it, then schedule it on another host [Slow]", func() {
		framework.SkipUnlessProviderIs("gce", "gke", "aws")

		By("creating PD")
		diskName, err := createPDWithRetry()
		framework.ExpectNoError(err, "Error creating PD")

		host0Pod := testPDPod([]string{diskName}, host0Name, false /* readOnly */, 1 /* numContainers */)
		host1Pod := testPDPod([]string{diskName}, host1Name, false /* readOnly */, 1 /* numContainers */)
		containerName := "mycontainer"

		defer func() {
			// Teardown pods, PD. Ignore errors.
			// Teardown should do nothing unless test failed.
			By("cleaning up PD-RW test environment")
			podClient.Delete(host0Pod.Name, v1.NewDeleteOptions(0))
			podClient.Delete(host1Pod.Name, v1.NewDeleteOptions(0))
			detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name})
		}()

		By("submitting host0Pod to kubernetes")
		_, err = podClient.Create(host0Pod)
		framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err))

		framework.ExpectNoError(f.WaitForPodRunningSlow(host0Pod.Name))

		testFile := "/testpd1/tracker"
		testFileContents := fmt.Sprintf("%v", mathrand.Int())

		framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents))
		framework.Logf("Wrote value: %v", testFileContents)

		// Verify that disk shows up for in node 1's VolumeInUse list
		framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, true /* shouldExist */))

		By("deleting host0Pod")
		// Delete pod with 0 grace period
		framework.ExpectNoError(podClient.Delete(host0Pod.Name, v1.NewDeleteOptions(0)), "Failed to delete host0Pod")

		By("submitting host1Pod to kubernetes")
		_, err = podClient.Create(host1Pod)
		framework.ExpectNoError(err, "Failed to create host1Pod")

		framework.ExpectNoError(f.WaitForPodRunningSlow(host1Pod.Name))

		v, err := f.ReadFileViaContainer(host1Pod.Name, containerName, testFile)
		framework.ExpectNoError(err)
		framework.Logf("Read value: %v", v)

		Expect(strings.TrimSpace(v)).To(Equal(strings.TrimSpace(testFileContents)))

		// Verify that disk is removed from node 1's VolumeInUse list
		framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, false /* shouldExist */))

		By("deleting host1Pod")
		framework.ExpectNoError(podClient.Delete(host1Pod.Name, v1.NewDeleteOptions(0)), "Failed to delete host1Pod")

		By("Test completed successfully, waiting for PD to safely detach")
		waitForPDDetach(diskName, host0Name)
		waitForPDDetach(diskName, host1Name)

		return
	})

	It("Should schedule a pod w/ a RW PD, gracefully remove it, then schedule it on another host [Slow]", func() {
		framework.SkipUnlessProviderIs("gce", "gke", "aws")

		By("creating PD")
		diskName, err := createPDWithRetry()
		framework.ExpectNoError(err, "Error creating PD")

		host0Pod := testPDPod([]string{diskName}, host0Name, false /* readOnly */, 1 /* numContainers */)
		host1Pod := testPDPod([]string{diskName}, host1Name, false /* readOnly */, 1 /* numContainers */)
		containerName := "mycontainer"

		defer func() {
			// Teardown pods, PD. Ignore errors.
			// Teardown should do nothing unless test failed.
			By("cleaning up PD-RW test environment")
			podClient.Delete(host0Pod.Name, &v1.DeleteOptions{})
			podClient.Delete(host1Pod.Name, &v1.DeleteOptions{})
			detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name})
		}()

		By("submitting host0Pod to kubernetes")
		_, err = podClient.Create(host0Pod)
		framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err))

		framework.ExpectNoError(f.WaitForPodRunningSlow(host0Pod.Name))

		testFile := "/testpd1/tracker"
		testFileContents := fmt.Sprintf("%v", mathrand.Int())

		framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents))
		framework.Logf("Wrote value: %v", testFileContents)

		// Verify that disk shows up for in node 1's VolumeInUse list
		framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, true /* shouldExist */))

		By("deleting host0Pod")
		// Delete pod with default grace period 30s
		framework.ExpectNoError(podClient.Delete(host0Pod.Name, &v1.DeleteOptions{}), "Failed to delete host0Pod")

		By("submitting host1Pod to kubernetes")
		_, err = podClient.Create(host1Pod)
		framework.ExpectNoError(err, "Failed to create host1Pod")

		framework.ExpectNoError(f.WaitForPodRunningSlow(host1Pod.Name))

		v, err := f.ReadFileViaContainer(host1Pod.Name, containerName, testFile)
		framework.ExpectNoError(err)
		framework.Logf("Read value: %v", v)

		Expect(strings.TrimSpace(v)).To(Equal(strings.TrimSpace(testFileContents)))

		// Verify that disk is removed from node 1's VolumeInUse list
		framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, false /* shouldExist */))

		By("deleting host1Pod")
		framework.ExpectNoError(podClient.Delete(host1Pod.Name, &v1.DeleteOptions{}), "Failed to delete host1Pod")

		By("Test completed successfully, waiting for PD to safely detach")
		waitForPDDetach(diskName, host0Name)
		waitForPDDetach(diskName, host1Name)

		return
	})

	It("should schedule a pod w/ a readonly PD on two hosts, then remove both ungracefully. [Slow]", func() {
		framework.SkipUnlessProviderIs("gce", "gke")

		By("creating PD")
		diskName, err := createPDWithRetry()
		framework.ExpectNoError(err, "Error creating PD")

		rwPod := testPDPod([]string{diskName}, host0Name, false /* readOnly */, 1 /* numContainers */)
		host0ROPod := testPDPod([]string{diskName}, host0Name, true /* readOnly */, 1 /* numContainers */)
		host1ROPod := testPDPod([]string{diskName}, host1Name, true /* readOnly */, 1 /* numContainers */)

		defer func() {
			By("cleaning up PD-RO test environment")
			// Teardown pods, PD. Ignore errors.
			// Teardown should do nothing unless test failed.
			podClient.Delete(rwPod.Name, v1.NewDeleteOptions(0))
			podClient.Delete(host0ROPod.Name, v1.NewDeleteOptions(0))
			podClient.Delete(host1ROPod.Name, v1.NewDeleteOptions(0))
			detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name})
		}()

		By("submitting rwPod to ensure PD is formatted")
		_, err = podClient.Create(rwPod)
		framework.ExpectNoError(err, "Failed to create rwPod")
		framework.ExpectNoError(f.WaitForPodRunningSlow(rwPod.Name))
		// Delete pod with 0 grace period
		framework.ExpectNoError(podClient.Delete(rwPod.Name, v1.NewDeleteOptions(0)), "Failed to delete host0Pod")
		framework.ExpectNoError(waitForPDDetach(diskName, host0Name))

		By("submitting host0ROPod to kubernetes")
		_, err = podClient.Create(host0ROPod)
		framework.ExpectNoError(err, "Failed to create host0ROPod")

		By("submitting host1ROPod to kubernetes")
		_, err = podClient.Create(host1ROPod)
		framework.ExpectNoError(err, "Failed to create host1ROPod")

		framework.ExpectNoError(f.WaitForPodRunningSlow(host0ROPod.Name))

		framework.ExpectNoError(f.WaitForPodRunningSlow(host1ROPod.Name))

		By("deleting host0ROPod")
		framework.ExpectNoError(podClient.Delete(host0ROPod.Name, v1.NewDeleteOptions(0)), "Failed to delete host0ROPod")

		By("deleting host1ROPod")
		framework.ExpectNoError(podClient.Delete(host1ROPod.Name, v1.NewDeleteOptions(0)), "Failed to delete host1ROPod")

		By("Test completed successfully, waiting for PD to safely detach")
		waitForPDDetach(diskName, host0Name)
		waitForPDDetach(diskName, host1Name)
	})

	It("Should schedule a pod w/ a readonly PD on two hosts, then remove both gracefully. [Slow]", func() {
		framework.SkipUnlessProviderIs("gce", "gke")

		By("creating PD")
		diskName, err := createPDWithRetry()
		framework.ExpectNoError(err, "Error creating PD")

		rwPod := testPDPod([]string{diskName}, host0Name, false /* readOnly */, 1 /* numContainers */)
		host0ROPod := testPDPod([]string{diskName}, host0Name, true /* readOnly */, 1 /* numContainers */)
		host1ROPod := testPDPod([]string{diskName}, host1Name, true /* readOnly */, 1 /* numContainers */)

		defer func() {
			By("cleaning up PD-RO test environment")
			// Teardown pods, PD. Ignore errors.
			// Teardown should do nothing unless test failed.
			podClient.Delete(rwPod.Name, &v1.DeleteOptions{})
			podClient.Delete(host0ROPod.Name, &v1.DeleteOptions{})
			podClient.Delete(host1ROPod.Name, &v1.DeleteOptions{})
			detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name})
		}()

		By("submitting rwPod to ensure PD is formatted")
		_, err = podClient.Create(rwPod)
		framework.ExpectNoError(err, "Failed to create rwPod")
		framework.ExpectNoError(f.WaitForPodRunningSlow(rwPod.Name))
		// Delete pod with default grace period 30s
		framework.ExpectNoError(podClient.Delete(rwPod.Name, &v1.DeleteOptions{}), "Failed to delete host0Pod")
		framework.ExpectNoError(waitForPDDetach(diskName, host0Name))

		By("submitting host0ROPod to kubernetes")
		_, err = podClient.Create(host0ROPod)
		framework.ExpectNoError(err, "Failed to create host0ROPod")

		By("submitting host1ROPod to kubernetes")
		_, err = podClient.Create(host1ROPod)
		framework.ExpectNoError(err, "Failed to create host1ROPod")

		framework.ExpectNoError(f.WaitForPodRunningSlow(host0ROPod.Name))

		framework.ExpectNoError(f.WaitForPodRunningSlow(host1ROPod.Name))

		By("deleting host0ROPod")
		framework.ExpectNoError(podClient.Delete(host0ROPod.Name, &v1.DeleteOptions{}), "Failed to delete host0ROPod")

		By("deleting host1ROPod")
		framework.ExpectNoError(podClient.Delete(host1ROPod.Name, &v1.DeleteOptions{}), "Failed to delete host1ROPod")

		By("Test completed successfully, waiting for PD to safely detach")
		waitForPDDetach(diskName, host0Name)
		waitForPDDetach(diskName, host1Name)
	})

	It("should schedule a pod w/ a RW PD shared between multiple containers, write to PD, delete pod, verify contents, and repeat in rapid succession [Slow]", func() {
		framework.SkipUnlessProviderIs("gce", "gke", "aws")

		By("creating PD")
		diskName, err := createPDWithRetry()
		framework.ExpectNoError(err, "Error creating PD")
		numContainers := 4
		var host0Pod *v1.Pod

		defer func() {
			By("cleaning up PD-RW test environment")
			// Teardown pods, PD. Ignore errors.
			// Teardown should do nothing unless test failed.
			if host0Pod != nil {
				podClient.Delete(host0Pod.Name, v1.NewDeleteOptions(0))
			}
			detachAndDeletePDs(diskName, []types.NodeName{host0Name})
		}()

		fileAndContentToVerify := make(map[string]string)
		for i := 0; i < 3; i++ {
			framework.Logf("PD Read/Writer Iteration #%v", i)
			By("submitting host0Pod to kubernetes")
			host0Pod = testPDPod([]string{diskName}, host0Name, false /* readOnly */, numContainers)
			_, err = podClient.Create(host0Pod)
			framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err))

			framework.ExpectNoError(f.WaitForPodRunningSlow(host0Pod.Name))

			// randomly select a container and read/verify pd contents from it
			containerName := fmt.Sprintf("mycontainer%v", mathrand.Intn(numContainers)+1)
			verifyPDContentsViaContainer(f, host0Pod.Name, containerName, fileAndContentToVerify)

			// Randomly select a container to write a file to PD from
			containerName = fmt.Sprintf("mycontainer%v", mathrand.Intn(numContainers)+1)
			testFile := fmt.Sprintf("/testpd1/tracker%v", i)
			testFileContents := fmt.Sprintf("%v", mathrand.Int())
			fileAndContentToVerify[testFile] = testFileContents
			framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents))
			framework.Logf("Wrote value: \"%v\" to PD %q from pod %q container %q", testFileContents, diskName, host0Pod.Name, containerName)

			// Randomly select a container and read/verify pd contents from it
			containerName = fmt.Sprintf("mycontainer%v", mathrand.Intn(numContainers)+1)
			verifyPDContentsViaContainer(f, host0Pod.Name, containerName, fileAndContentToVerify)

			By("deleting host0Pod")
			framework.ExpectNoError(podClient.Delete(host0Pod.Name, v1.NewDeleteOptions(0)), "Failed to delete host0Pod")
		}

		By("Test completed successfully, waiting for PD to safely detach")
		waitForPDDetach(diskName, host0Name)
	})

	It("should schedule a pod w/two RW PDs both mounted to one container, write to PD, verify contents, delete pod, recreate pod, verify contents, and repeat in rapid succession [Slow]", func() {
		framework.SkipUnlessProviderIs("gce", "gke", "aws")

		By("creating PD1")
		disk1Name, err := createPDWithRetry()
		framework.ExpectNoError(err, "Error creating PD1")
		By("creating PD2")
		disk2Name, err := createPDWithRetry()
		framework.ExpectNoError(err, "Error creating PD2")
		var host0Pod *v1.Pod

		defer func() {
			By("cleaning up PD-RW test environment")
			// Teardown pods, PD. Ignore errors.
			// Teardown should do nothing unless test failed.
			if host0Pod != nil {
				podClient.Delete(host0Pod.Name, v1.NewDeleteOptions(0))
			}
			detachAndDeletePDs(disk1Name, []types.NodeName{host0Name})
			detachAndDeletePDs(disk2Name, []types.NodeName{host0Name})
		}()

		containerName := "mycontainer"
		fileAndContentToVerify := make(map[string]string)
		for i := 0; i < 3; i++ {
			framework.Logf("PD Read/Writer Iteration #%v", i)
			By("submitting host0Pod to kubernetes")
			host0Pod = testPDPod([]string{disk1Name, disk2Name}, host0Name, false /* readOnly */, 1 /* numContainers */)
			_, err = podClient.Create(host0Pod)
			framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err))

			framework.ExpectNoError(f.WaitForPodRunningSlow(host0Pod.Name))

			// Read/verify pd contents for both disks from container
			verifyPDContentsViaContainer(f, host0Pod.Name, containerName, fileAndContentToVerify)

			// Write a file to both PDs from container
			testFilePD1 := fmt.Sprintf("/testpd1/tracker%v", i)
			testFilePD2 := fmt.Sprintf("/testpd2/tracker%v", i)
			testFilePD1Contents := fmt.Sprintf("%v", mathrand.Int())
			testFilePD2Contents := fmt.Sprintf("%v", mathrand.Int())
			fileAndContentToVerify[testFilePD1] = testFilePD1Contents
			fileAndContentToVerify[testFilePD2] = testFilePD2Contents
			framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFilePD1, testFilePD1Contents))
			framework.Logf("Wrote value: \"%v\" to PD1 (%q) from pod %q container %q", testFilePD1Contents, disk1Name, host0Pod.Name, containerName)
			framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFilePD2, testFilePD2Contents))
			framework.Logf("Wrote value: \"%v\" to PD2 (%q) from pod %q container %q", testFilePD2Contents, disk2Name, host0Pod.Name, containerName)

			// Read/verify pd contents for both disks from container
			verifyPDContentsViaContainer(f, host0Pod.Name, containerName, fileAndContentToVerify)

			By("deleting host0Pod")
			framework.ExpectNoError(podClient.Delete(host0Pod.Name, v1.NewDeleteOptions(0)), "Failed to delete host0Pod")
		}

		By("Test completed successfully, waiting for PD to safely detach")
		waitForPDDetach(disk1Name, host0Name)
		waitForPDDetach(disk2Name, host0Name)
	})

	It("should be able to detach from a node which was deleted [Slow] [Disruptive]", func() {
		framework.SkipUnlessProviderIs("gce")

		initialGroupSize, err := GroupSize(framework.TestContext.CloudConfig.NodeInstanceGroup)
		framework.ExpectNoError(err, "Error getting group size")

		By("Creating a pd")
		diskName, err := createPDWithRetry()
		framework.ExpectNoError(err, "Error creating a pd")

		host0Pod := testPDPod([]string{diskName}, host0Name, false, 1)

		containerName := "mycontainer"

		defer func() {
			By("Cleaning up PD-RW test env")
			podClient.Delete(host0Pod.Name, v1.NewDeleteOptions(0))
			detachAndDeletePDs(diskName, []types.NodeName{host0Name})
		}()

		By("submitting host0Pod to kubernetes")
		_, err = podClient.Create(host0Pod)
		framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0pod: %v", err))

		framework.ExpectNoError(f.WaitForPodRunningSlow(host0Pod.Name))

		testFile := "/testpd1/tracker"
		testFileContents := fmt.Sprintf("%v", mathrand.Int())

		framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents))
		framework.Logf("Wrote value: %v", testFileContents)

		// Verify that disk shows up in node 0's volumeInUse list
		framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, true /* should exist*/))

		output, err := exec.Command("gcloud", "compute", "instances", "list").CombinedOutput()
		framework.ExpectNoError(err, fmt.Sprintf("Unable to get list of node instances %v", err))
		Expect(true, strings.Contains(string(output), string(host0Name)))

		By("deleting host0")

		output, err = exec.Command("gcloud", "compute", "instances", "delete", string(host0Name), "--project="+framework.TestContext.CloudConfig.ProjectID, "--zone="+framework.TestContext.CloudConfig.Zone).CombinedOutput()
		framework.ExpectNoError(err, fmt.Sprintf("Failed to delete host0pod: %v", err))

		output, err = exec.Command("gcloud", "compute", "instances", "list").CombinedOutput()
		framework.ExpectNoError(err, fmt.Sprintf("Unable to get list of node instances %v", err))
		Expect(false, strings.Contains(string(output), string(host0Name)))

		// The disk should be detached from host0 on it's deletion
		By("Waiting for pd to detach from host0")
		waitForPDDetach(diskName, host0Name)
		framework.ExpectNoError(WaitForGroupSize(framework.TestContext.CloudConfig.NodeInstanceGroup, int32(initialGroupSize)), "Unable to get back the cluster to inital size")
		return
	})

	It("should be able to detach from a node whose api object was deleted [Slow] [Disruptive]", func() {
		framework.SkipUnlessProviderIs("gce")
		initialGroupSize, err := GroupSize(framework.TestContext.CloudConfig.NodeInstanceGroup)
		framework.ExpectNoError(err, "Error getting group size")
		By("Creating a pd")
		diskName, err := createPDWithRetry()
		framework.ExpectNoError(err, "Error creating a pd")

		host0Pod := testPDPod([]string{diskName}, host0Name, false, 1)
		originalCount := len(nodes.Items)
		containerName := "mycontainer"
		nodeToDelete := &nodes.Items[0]
		defer func() error {
			By("Cleaning up PD-RW test env")
			detachAndDeletePDs(diskName, []types.NodeName{host0Name})
			nodeToDelete.ObjectMeta.SetResourceVersion("0")
			// need to set the resource version or else the Create() fails
			_, err := nodeClient.Create(nodeToDelete)
			framework.ExpectNoError(err, "Unable to re-create the deleted node")
			framework.ExpectNoError(WaitForGroupSize(framework.TestContext.CloudConfig.NodeInstanceGroup, int32(initialGroupSize)), "Unable to get the node group back to the original size")
			framework.WaitForNodeToBeReady(f.ClientSet, nodeToDelete.Name, nodeStatusTimeout)
			if len(nodes.Items) != originalCount {
				return fmt.Errorf("The node count is not back to original count")
			}
			return nil
		}()

		By("submitting host0Pod to kubernetes")
		_, err = podClient.Create(host0Pod)
		framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0pod: %v", err))

		framework.ExpectNoError(f.WaitForPodRunningSlow(host0Pod.Name))

		testFile := "/testpd1/tracker"
		testFileContents := fmt.Sprintf("%v", mathrand.Int())

		framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents))
		framework.Logf("Wrote value: %v", testFileContents)

		// Verify that disk shows up in node 0's volumeInUse list
		framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, true /* should exist*/))

		By("deleting api object of host0")
		framework.ExpectNoError(nodeClient.Delete(string(host0Name), v1.NewDeleteOptions(0)), "Unable to delete host0")

		By("deleting host0pod")
		framework.ExpectNoError(podClient.Delete(host0Pod.Name, v1.NewDeleteOptions(0)), "Unable to delete host0Pod")
		// The disk should be detached from host0 on its deletion
		By("Waiting for pd to detach from host0")
		framework.ExpectNoError(waitForPDDetach(diskName, host0Name), "Timed out waiting for detach pd")
	})
})