Ejemplo n.º 1
0
func InMultiTenantContext(body func()) {
	Context("when using a multi-tenant plugin", func() {
		BeforeEach(func() {
			if !pluginIsolatesNamespaces() {
				e2e.Skipf("Not a multi-tenant plugin.")
			}
		})

		body()
	})
}
Ejemplo n.º 2
0
func InSingleTenantContext(body func()) {
	Context("when using a single-tenant plugin", func() {
		BeforeEach(func() {
			if pluginIsolatesNamespaces() {
				e2e.Skipf("Not a single-tenant plugin.")
			}
		})

		body()
	})
}
Ejemplo n.º 3
0
// Detects whether the federation namespace exists in the underlying cluster
func SkipUnlessFederated(c clientset.Interface) {
	federationNS := os.Getenv("FEDERATION_NAMESPACE")
	if federationNS == "" {
		federationNS = federationapi.FederationNamespaceSystem
	}

	_, err := c.Core().Namespaces().Get(federationNS, metav1.GetOptions{})
	if err != nil {
		if apierrors.IsNotFound(err) {
			framework.Skipf("Could not find federation namespace %s: skipping federated test", federationNS)
		} else {
			framework.Failf("Unexpected error getting namespace: %v", err)
		}
	}
}
Ejemplo n.º 4
0
func checkPodIsolation(f1, f2 *e2e.Framework, nodeType NodeType) error {
	nodes := e2e.GetReadySchedulableNodesOrDie(f1.Client)
	var serverNode, clientNode *api.Node
	serverNode = &nodes.Items[0]
	if nodeType == DIFFERENT_NODE {
		if len(nodes.Items) == 1 {
			e2e.Skipf("Only one node is available in this environment")
		}
		clientNode = &nodes.Items[1]
	} else {
		clientNode = serverNode
	}

	podName := "isolation-webserver"
	defer f1.Client.Pods(f1.Namespace.Name).Delete(podName, nil)
	ip := e2e.LaunchWebserverPod(f1, podName, serverNode.Name)

	return checkConnectivityToHost(f2, clientNode.Name, "isolation-wget", ip, 10)
}
Ejemplo n.º 5
0
func checkServiceConnectivity(serverFramework, clientFramework *e2e.Framework, numNodes int) error {
	nodes := e2e.GetReadySchedulableNodesOrDie(serverFramework.Client)
	var serverNode, clientNode *api.Node
	serverNode = &nodes.Items[0]
	if numNodes == 2 {
		if len(nodes.Items) == 1 {
			e2e.Skipf("Only one node is available in this environment")
		}
		clientNode = &nodes.Items[1]
	} else {
		clientNode = serverNode
	}

	podName := "service-webserver"
	defer serverFramework.Client.Pods(serverFramework.Namespace.Name).Delete(podName, nil)
	ip := launchWebserverService(serverFramework, podName, serverNode.Name)

	return checkConnectivityToHost(clientFramework, clientNode.Name, "service-wget", ip, 10)
}
Ejemplo n.º 6
0
func checkServiceConnectivity(serverFramework, clientFramework *e2e.Framework, nodeType NodeType) error {
	nodes := e2e.GetReadySchedulableNodesOrDie(serverFramework.Client)
	var serverNode, clientNode *api.Node
	serverNode = &nodes.Items[0]
	if nodeType == DIFFERENT_NODE {
		if len(nodes.Items) == 1 {
			e2e.Skipf("Only one node is available in this environment")
		}
		clientNode = &nodes.Items[1]
	} else {
		clientNode = serverNode
	}

	podName := api.SimpleNameGenerator.GenerateName(fmt.Sprintf("service-"))
	defer serverFramework.Client.Pods(serverFramework.Namespace.Name).Delete(podName, nil)
	defer serverFramework.Client.Services(serverFramework.Namespace.Name).Delete(podName)
	ip := launchWebserverService(serverFramework, podName, serverNode.Name)

	return checkConnectivityToHost(clientFramework, clientNode.Name, "service-wget", ip, 10)
}
Ejemplo n.º 7
0
func checkPodIsolation(f1, f2 *e2e.Framework, numNodes int) error {
	nodes, err := e2e.GetReadyNodes(f1)
	if err != nil {
		e2e.Failf("Failed to list nodes: %v", err)
	}
	var serverNode, clientNode *api.Node
	serverNode = &nodes.Items[0]
	if numNodes == 2 {
		if len(nodes.Items) == 1 {
			e2e.Skipf("Only one node is available in this environment")
		}
		clientNode = &nodes.Items[1]
	} else {
		clientNode = serverNode
	}

	podName := "isolation-webserver"
	defer f1.Client.Pods(f1.Namespace.Name).Delete(podName, nil)
	ip := e2e.LaunchWebserverPod(f1, podName, serverNode.Name)

	return checkConnectivityToHost(f2, clientNode.Name, "isolation-wget", ip, 10)
}
Ejemplo n.º 8
0
	const (
		testDaemonHttpPort    = 11301
		testDaemonTcpPort     = 11302
		timeoutSeconds        = 10
		postFinTimeoutSeconds = 5
	)

	fr := framework.NewDefaultFramework("network")

	It("should set TCP CLOSE_WAIT timeout", func() {
		nodes := framework.GetReadySchedulableNodesOrDie(fr.ClientSet)
		ips := collectAddresses(nodes, api.NodeInternalIP)

		if len(nodes.Items) < 2 {
			framework.Skipf(
				"Test requires >= 2 Ready nodes, but there are only %v nodes",
				len(nodes.Items))
		}

		type NodeInfo struct {
			node   *api.Node
			name   string
			nodeIp string
		}

		clientNodeInfo := NodeInfo{
			node:   &nodes.Items[0],
			name:   nodes.Items[0].Name,
			nodeIp: ips[0],
		}
Ejemplo n.º 9
0
				Value: "1",
			},
		})
		pod.Spec.Containers[0].Command = []string{"/bin/sysctl", "kernel.shm_rmid_forced"}

		By("Creating a pod with the kernel.shm_rmid_forced sysctl")
		pod = podClient.Create(pod)

		By("Watching for error events or started pod")
		// watch for events instead of termination of pod because the kubelet deletes
		// failed pods without running containers. This would create a race as the pod
		// might have already been deleted here.
		ev, err := waitForPodErrorEventOrStarted(pod)
		Expect(err).NotTo(HaveOccurred())
		if ev != nil && ev.Reason == sysctl.UnsupportedReason {
			framework.Skipf("No sysctl support in Docker <1.12")
		}
		Expect(ev).To(BeNil())

		By("Waiting for pod completion")
		err = f.WaitForPodNoLongerRunning(pod.Name)
		Expect(err).NotTo(HaveOccurred())
		pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
		Expect(err).NotTo(HaveOccurred())

		By("Checking that the pod succeeded")
		Expect(pod.Status.Phase).To(Equal(v1.PodSucceeded))

		By("Getting logs from the pod")
		log, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
		Expect(err).NotTo(HaveOccurred())
Ejemplo n.º 10
0
	. "github.com/onsi/gomega"
)

var _ = framework.KubeDescribe("Kubernetes Dashboard", func() {
	const (
		uiServiceName = "kubernetes-dashboard"
		uiAppName     = uiServiceName
		uiNamespace   = api.NamespaceSystem

		serverStartTimeout = 1 * time.Minute
	)

	f := framework.NewDefaultFramework(uiServiceName)

	It("should check that the kubernetes-dashboard instance is alive", func() {
		framework.Skipf("UI is disabled")
		By("Checking whether the kubernetes-dashboard service exists.")
		err := framework.WaitForService(f.Client, uiNamespace, uiServiceName, true, framework.Poll, framework.ServiceStartTimeout)
		Expect(err).NotTo(HaveOccurred())

		By("Checking to make sure the kubernetes-dashboard pods are running")
		selector := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": uiAppName}))
		err = framework.WaitForPodsWithLabelRunning(f.Client, uiNamespace, selector)
		Expect(err).NotTo(HaveOccurred())

		By("Checking to make sure we get a response from the kubernetes-dashboard.")
		err = wait.Poll(framework.Poll, serverStartTimeout, func() (bool, error) {
			var status int
			proxyRequest, errProxy := framework.GetServicesProxyRequest(f.Client, f.Client.Get())
			if errProxy != nil {
				framework.Logf("Get services proxy request failed: %v", errProxy)
Ejemplo n.º 11
0
	f := framework.NewDefaultFramework("kubelet-eviction-manager")
	var podClient *framework.PodClient
	var c clientset.Interface

	BeforeEach(func() {
		podClient = f.PodClient()
		c = f.ClientSet
	})

	Describe("hard eviction test", func() {
		Context("pod using the most disk space gets evicted when the node disk usage is above the eviction hard threshold", func() {
			var busyPodName, idlePodName, verifyPodName string

			BeforeEach(func() {
				if !isImageSupported() {
					framework.Skipf("test skipped because the image is not supported by the test")
				}
				if !evictionOptionIsSet() {
					framework.Skipf("test skipped because eviction option is not set")
				}

				busyPodName = "to-evict" + string(uuid.NewUUID())
				idlePodName = "idle" + string(uuid.NewUUID())
				verifyPodName = "verify" + string(uuid.NewUUID())
				createIdlePod(idlePodName, podClient)
				podClient.Create(&api.Pod{
					ObjectMeta: api.ObjectMeta{
						Name: busyPodName,
					},
					Spec: api.PodSpec{
						RestartPolicy: api.RestartPolicyNever,
Ejemplo n.º 12
0
			framework.RunKubectlOrDie("create", "-f", adminServiceYaml, nsFlag)
			framework.RunKubectlOrDie("create", "-f", adminPodYaml, nsFlag)
			err = framework.WaitForPodNameRunningInNamespace(c, "rethinkdb-admin", ns)
			Expect(err).NotTo(HaveOccurred())
			checkDbInstances()
			content, err := makeHttpRequestToService(c, ns, "rethinkdb-admin", "/", framework.EndpointRegisterTimeout)
			Expect(err).NotTo(HaveOccurred())
			if !strings.Contains(content, "<title>RethinkDB Administration Console</title>") {
				framework.Failf("RethinkDB console is not running")
			}
		})
	})

	framework.KubeDescribe("Hazelcast", func() {
		It("should create and scale hazelcast", func() {
			framework.Skipf("Skipping because of upstream race condition. Remove Skip when https://github.com/pires/hazelcast-kubernetes-bootstrapper/issues/9 is fixed")

			mkpath := func(file string) string {
				return filepath.Join(framework.TestContext.RepoRoot, "examples/storage/hazelcast", file)
			}
			serviceYaml := mkpath("hazelcast-service.yaml")
			controllerYaml := mkpath("hazelcast-controller.yaml")
			nsFlag := fmt.Sprintf("--namespace=%v", ns)

			By("starting hazelcast")
			framework.RunKubectlOrDie("create", "-f", serviceYaml, nsFlag)
			framework.RunKubectlOrDie("create", "-f", controllerYaml, nsFlag)
			label := labels.SelectorFromSet(labels.Set(map[string]string{"name": "hazelcast"}))
			err := framework.WaitForPodsWithLabelRunning(c, ns, label)
			Expect(err).NotTo(HaveOccurred())
			forEachPod("name", "hazelcast", func(pod api.Pod) {
Ejemplo n.º 13
0
}

// These tests need privileged containers, which are disabled by default.  Run
// the test with "go run hack/e2e.go ... --ginkgo.focus=[Feature:Volumes]"
var _ = framework.KubeDescribe("GCP Volumes", func() {
	f := framework.NewDefaultFramework("gcp-volume")

	// If 'false', the test won't clear its volumes upon completion. Useful for debugging,
	// note that namespace deletion is handled by delete-namespace flag
	clean := true
	// filled in BeforeEach
	var namespace *v1.Namespace

	BeforeEach(func() {
		if !isTestEnabled(f.ClientSet) {
			framework.Skipf("NFS tests are not supported for this distro")
		}
		namespace = f.Namespace
	})

	////////////////////////////////////////////////////////////////////////
	// NFS
	////////////////////////////////////////////////////////////////////////

	framework.KubeDescribe("NFSv4", func() {
		It("should be mountable for NFSv4 [Volume]", func() {
			config := VolumeTestConfig{
				namespace:   namespace.Name,
				prefix:      "nfs",
				serverImage: "gcr.io/google_containers/volume-nfs:0.8",
				serverPorts: []int{2049},
Ejemplo n.º 14
0
	// How long to wait for a scheduledjob
	scheduledJobTimeout = 5 * time.Minute
)

var _ = framework.KubeDescribe("ScheduledJob", func() {
	options := framework.FrameworkOptions{
		ClientQPS:    20,
		ClientBurst:  50,
		GroupVersion: &unversioned.GroupVersion{Group: batch.GroupName, Version: "v2alpha1"},
	}
	f := framework.NewFramework("scheduledjob", options, nil)

	BeforeEach(func() {
		if _, err := f.Client.Batch().ScheduledJobs(f.Namespace.Name).List(api.ListOptions{}); err != nil {
			if apierrs.IsNotFound(err) {
				framework.Skipf("Could not find ScheduledJobs resource, skipping test: %#v", err)
			}
		}
	})

	// multiple jobs running at once
	It("should schedule multiple jobs concurrently", func() {
		By("Creating a scheduledjob")
		scheduledJob := newTestScheduledJob("concurrent", "*/1 * * * ?", batch.AllowConcurrent, true)
		scheduledJob, err := createScheduledJob(f.Client, f.Namespace.Name, scheduledJob)
		Expect(err).NotTo(HaveOccurred())

		By("Ensuring more than one job is running at a time")
		err = waitForActiveJobs(f.Client, f.Namespace.Name, scheduledJob.Name, 2)
		Expect(err).NotTo(HaveOccurred())
Ejemplo n.º 15
0
			defer f.Client.Pods(f.Namespace.Name).Delete(podName, nil)
			ip := framework.LaunchWebserverPod(f, podName, node.Name)

			By("Checking that the webserver is accessible from a pod on the same node")
			framework.ExpectNoError(framework.CheckConnectivityToHost(f, node.Name, "same-node-wget", ip, connectivityTimeout))
		})

		It("should function for pod communication between nodes", func() {

			podClient := f.Client.Pods(f.Namespace.Name)

			By("Picking multiple nodes")
			nodes := framework.GetReadySchedulableNodesOrDie(f.Client)

			if len(nodes.Items) == 1 {
				framework.Skipf("The test requires two Ready nodes on %s, but found just one.", framework.TestContext.Provider)
			}

			node1 := nodes.Items[0]
			node2 := nodes.Items[1]

			By("Creating a webserver pod")
			podName := "different-node-webserver"
			defer podClient.Delete(podName, nil)
			ip := framework.LaunchWebserverPod(f, podName, node1.Name)

			By("Checking that the webserver is accessible from a pod on a different node")
			framework.ExpectNoError(framework.CheckConnectivityToHost(f, node2.Name, "different-node-wget", ip, connectivityTimeout))
		})
	})
})