Example #1
0
func getMultipleNodes(f *e2e.Framework) (nodes *api.NodeList) {
	nodes, err := f.Client.Nodes().List(labels.Everything(), fields.Everything())
	if err != nil {
		e2e.Failf("Failed to list nodes: %v", err)
	}
	// previous tests may have cause failures of some nodes. Let's skip
	// 'Not Ready' nodes, just in case (there is no need to fail the test).
	filterNodes(nodes, func(node api.Node) bool {
		return isNodeReadySetAsExpected(&node, true)
	})

	if len(nodes.Items) == 0 {
		e2e.Failf("No Ready nodes found.")
	}
	if len(nodes.Items) == 1 {
		// in general, the test requires two nodes. But for local development, often a one node cluster
		// is created, for simplicity and speed. (see issue #10012). We permit one-node test
		// only in some cases
		if !providerIs("local") {
			e2e.Failf(fmt.Sprintf("The test requires two Ready nodes on %s, but found just one.", exutil.TestContext.Provider))
		}
		e2e.Logf("Only one ready node is detected. The test has limited scope in such setting. " +
			"Rerun it with at least two nodes to get complete coverage.")
	}
	return
}
Example #2
0
func validateDNSResults(f *e2e.Framework, pod *api.Pod, fileNames sets.String, expect int) {
	By("submitting the pod to kubernetes")
	podClient := f.Client.Pods(f.Namespace.Name)
	defer func() {
		By("deleting the pod")
		defer GinkgoRecover()
		podClient.Delete(pod.Name, api.NewDeleteOptions(0))
	}()
	if _, err := podClient.Create(pod); err != nil {
		e2e.Failf("Failed to create %s pod: %v", pod.Name, err)
	}

	Expect(f.WaitForPodRunning(pod.Name)).To(BeNil())
	Expect(wait.Poll(2*time.Second, 5*time.Minute, func() (bool, error) {
		pod, err := podClient.Get(pod.Name)
		if err != nil {
			return false, err
		}
		switch pod.Status.Phase {
		case api.PodSucceeded:
			return true, nil
		case api.PodFailed:
			return false, fmt.Errorf("pod failed")
		default:
			return false, nil
		}
	})).To(BeNil())

	By("retrieving the pod logs")
	r, err := podClient.GetLogs(pod.Name, &api.PodLogOptions{Container: "querier"}).Stream()
	if err != nil {
		e2e.Failf("Failed to get pod logs %s: %v", pod.Name, err)
	}
	out, err := ioutil.ReadAll(r)
	if err != nil {
		e2e.Failf("Failed to read pod logs %s: %v", pod.Name, err)
	}

	// Try to find results for each expected name.
	By("looking for the results for each expected name from probiers")

	if err := assertLinesExist(fileNames, expect, bytes.NewBuffer(out)); err != nil {
		e2e.Logf("Got results from pod:\n%s", out)
		e2e.Failf("Unexpected results: %v", err)
	}

	e2e.Logf("DNS probes using %s succeeded\n", pod.Name)
}
Example #3
0
// NewSampleRepoTest creates a function for a new ginkgo test case that will instantiate a template
// from a url, kick off the buildconfig defined in that template, wait for the build/deploy,
// and then confirm the application is serving an expected string value.
func NewSampleRepoTest(c SampleRepoConfig) func() {
	return func() {
		defer g.GinkgoRecover()
		var oc = exutil.NewCLI(c.repoName+"-repo-test", exutil.KubeConfigPath())

		g.JustBeforeEach(func() {
			g.By("Waiting for builder service account")
			err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
			o.Expect(err).NotTo(o.HaveOccurred())
		})

		g.Describe("Building "+c.repoName+" app from new-app", func() {
			g.It(fmt.Sprintf("should build a "+c.repoName+" image and run it in a pod"), func() {
				oc.SetOutputDir(exutil.TestContext.OutputDir)

				g.By(fmt.Sprintf("calling oc new-app with the " + c.repoName + " example template"))
				err := oc.Run("new-app").Args("-f", c.templateURL).Execute()
				o.Expect(err).NotTo(o.HaveOccurred())

				g.By("starting a build")
				buildName, err := oc.Run("start-build").Args(c.buildConfigName).Output()
				o.Expect(err).NotTo(o.HaveOccurred())

				g.By("expecting the build is in the Complete phase")
				err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), buildName, exutil.CheckBuildSuccessFunc, exutil.CheckBuildFailedFunc)
				if err != nil {
					logs, _ := oc.Run("build-logs").Args(buildName).Output()
					e2e.Failf("build failed: %s", logs)
				}
				o.Expect(err).NotTo(o.HaveOccurred())

				g.By("expecting the deployment to be complete")
				err = exutil.WaitForADeployment(oc.KubeREST().ReplicationControllers(oc.Namespace()), c.deploymentConfigName, exutil.CheckDeploymentCompletedFunc, exutil.CheckDeploymentFailedFunc)
				o.Expect(err).NotTo(o.HaveOccurred())

				g.By("expecting the service is available")
				serviceIP, err := oc.Run("get").Args("service", c.serviceName).Template("{{ .spec.clusterIP }}").Output()
				o.Expect(err).NotTo(o.HaveOccurred())
				o.Expect(serviceIP).ShouldNot(o.Equal(""))

				g.By("expecting an endpoint is available")
				err = oc.KubeFramework().WaitForAnEndpoint(c.serviceName)
				o.Expect(err).NotTo(o.HaveOccurred())

				response, err := exutil.FetchURL("http://"+serviceIP+":8080"+c.appPath, time.Duration(10*time.Second))
				o.Expect(err).NotTo(o.HaveOccurred())
				o.Expect(response).Should(o.ContainSubstring(c.expectedString))
			})
		})
	}
}
Example #4
0
func checkPodIsolation(f1, f2 *e2e.Framework, numNodes int) error {
	nodes, err := e2e.GetReadyNodes(f1)
	if err != nil {
		e2e.Failf("Failed to list nodes: %v", err)
	}
	var serverNode, clientNode *api.Node
	serverNode = &nodes.Items[0]
	if numNodes == 2 {
		if len(nodes.Items) == 1 {
			e2e.Skipf("Only one node is available in this environment")
		}
		clientNode = &nodes.Items[1]
	} else {
		clientNode = serverNode
	}

	podName := "isolation-webserver"
	defer f1.Client.Pods(f1.Namespace.Name).Delete(podName, nil)
	ip := e2e.LaunchWebserverPod(f1, podName, serverNode.Name)

	return checkConnectivityToHost(f2, clientNode.Name, "isolation-wget", ip, 10)
}
Example #5
0
func checkServiceConnectivity(serverFramework, clientFramework *e2e.Framework, numNodes int) error {
	nodes, err := e2e.GetReadyNodes(serverFramework)
	if err != nil {
		e2e.Failf("Failed to list nodes: %v", err)
	}
	var serverNode, clientNode *api.Node
	serverNode = &nodes.Items[0]
	if numNodes == 2 {
		if len(nodes.Items) == 1 {
			e2e.Skipf("Only one node is available in this environment")
		}
		clientNode = &nodes.Items[1]
	} else {
		clientNode = serverNode
	}

	podName := "service-webserver"
	defer serverFramework.Client.Pods(serverFramework.Namespace.Name).Delete(podName, nil)
	ip := launchWebserverService(serverFramework, podName, serverNode.Name)

	return checkConnectivityToHost(clientFramework, clientNode.Name, "service-wget", ip, 10)
}
Example #6
0
			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), buildName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("getting the Docker image reference from ImageStream")
			imageName, err := exutil.GetDockerImageReference(oc.REST().ImageStreams(oc.Namespace()), "internal-image", "latest")
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("instantiating a pod and service with the new image")
			err = oc.Run("new-app").Args("-f", podAndServiceFixture, "-p", "IMAGE_NAME="+imageName).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("waiting for the service to become available")
			err = oc.KubeFramework().WaitForAnEndpoint(buildTestService)
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("expecting the pod container has saved artifacts")
			out, err := oc.Run("exec").Args("-p", buildTestPod, "--", "curl", "http://0.0.0.0:8080").Output()
			if err != nil {
				logs, _ := oc.Run("logs").Args(buildTestPod).Output()
				e2e.Failf("Failed to curl in application container: \n%q, pod logs: \n%q", out, logs)
			}
			o.Expect(err).NotTo(o.HaveOccurred())

			if !strings.Contains(out, "artifacts exist") {
				logs, _ := oc.Run("logs").Args(buildTestPod).Output()
				e2e.Failf("Pod %q does not contain expected artifacts: %q\n%q", buildTestPod, out, logs)
			}
		})
	})
})
Example #7
0
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("getting the Docker image reference from ImageStream")
			imageName, err := exutil.GetDockerImageReference(oc.REST().ImageStreams(oc.Namespace()), "test", "latest")
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("writing the pod defintion to a file")
			outputPath := filepath.Join(exutil.TestContext.OutputDir, oc.Namespace()+"-sample-pod.json")
			pod := exutil.CreatePodForImage(imageName)
			err = exutil.WriteObjectToFile(pod, outputPath)
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By(fmt.Sprintf("calling oc create -f %q", outputPath))
			err = oc.Run("create").Args("-f", outputPath).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("expecting the pod to be running")
			err = oc.KubeFramework().WaitForPodRunning(pod.Name)
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("expecting the pod container has TEST_ENV variable set")
			out, err := oc.Run("exec").Args("-p", pod.Name, "--", "curl", "http://0.0.0.0:8080").Output()
			o.Expect(err).NotTo(o.HaveOccurred())

			if !strings.Contains(out, "success") {
				e2e.Failf("Pod %q contains does not contain expected variable: %q", pod.Name, out)
			}
		})
	})
})
Example #8
0
	. "github.com/onsi/ginkgo"
)

var _ = Describe("networking: sanity", func() {
	svcname := "net-sanity"
	timeout := 10

	f := e2e.NewFramework(svcname)

	It("should function for pod communication on a single node", func() {

		By("Picking a node")
		nodes, err := f.Client.Nodes().List(labels.Everything(), fields.Everything())
		if err != nil {
			e2e.Failf("Failed to list nodes: %v", err)
		}
		node := nodes.Items[0]

		By("Creating a webserver pod")
		podName := "same-node-webserver"
		defer f.Client.Pods(f.Namespace.Name).Delete(podName, nil)
		ip := launchWebserverPod(f, podName, node.Name)

		By("Checking that the webserver is accessible from a pod on the same node")
		expectNoError(checkConnectivityToHost(f, node.Name, "same-node-wget", ip, timeout))
	})

	It("should function for pod communication between nodes", func() {

		podClient := f.Client.Pods(f.Namespace.Name)
Example #9
0
			g.By("expecting the pod to be running")
			err = oc.KubeFramework().WaitForPodRunning(pod.Name)
			o.Expect(err).NotTo(o.HaveOccurred())

			// even though the pod is running, the app isn't always started
			// so wait until webrick output is complete before curling.
			logs := ""
			count := 0
			maxCount := 30
			for strings.Contains(logs, "8080") && count < maxCount {
				logs, _ = oc.Run("logs").Args(pod.Name).Output()
				time.Sleep(time.Second)
				count++
			}
			if count == maxCount {
				e2e.Failf("Never saw port 8080 open in pod logs")
			}

			g.By("expecting the pod container has saved artifacts")
			out, err := oc.Run("exec").Args("-p", pod.Name, "--", "curl", "http://0.0.0.0:8080").Output()
			if err != nil {
				logs, _ = oc.Run("logs").Args(pod.Name).Output()
				e2e.Failf("Failed to curl in application container: \n%q, pod logs: \n%q", out, logs)
			}
			o.Expect(err).NotTo(o.HaveOccurred())

			if !strings.Contains(out, "artifacts exist") {
				logs, _ = oc.Run("logs").Args(pod.Name).Output()
				e2e.Failf("Pod %q does not contain expected artifacts: %q\n%q", pod.Name, out, logs)
			}
		})
Example #10
0
			err = waitForRouterOKResponse(healthzURI, routerIP, 2*time.Minute)
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("waiting for the valid route to respond")
			err = waitForRouterOKResponse(routerURL, "first.example.com", 2*time.Minute)
			o.Expect(err).NotTo(o.HaveOccurred())

			for _, host := range []string{"second.example.com", "third.example.com"} {
				g.By(fmt.Sprintf("checking that %s does not match a route", host))
				req, err := requestViaReverseProxy("GET", routerURL, host)
				o.Expect(err).NotTo(o.HaveOccurred())
				resp, err := http.DefaultClient.Do(req)
				o.Expect(err).NotTo(o.HaveOccurred())
				resp.Body.Close()
				if resp.StatusCode != http.StatusServiceUnavailable {
					e2e.Failf("should have had a 503 status code for %s", host)
				}
			}
		})
		g.It("should override the route host with a custom value", func() {
			oc.SetOutputDir(exutil.TestContext.OutputDir)
			ns := oc.KubeFramework().Namespace.Name

			g.By(fmt.Sprintf("creating a scoped router from a config file %q", configPath))

			var routerIP string
			err := wait.Poll(time.Second, 2*time.Minute, func() (bool, error) {
				pod, err := oc.KubeFramework().Client.Pods(ns).Get("router-override")
				if err != nil {
					return false, err
				}
Example #11
0
			err := oc.Run("create").Args("-f", imageStreamFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By(fmt.Sprintf("calling oc create -f %q", stiBuildFixture))
			err = oc.Run("create").Args("-f", stiBuildFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("starting a test build")
			buildName, err := oc.Run("start-build").Args("test").Output()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("o.Expecting the S2I build is in Complete phase")
			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), buildName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
			if err != nil {
				logs, _ := oc.Run("build-logs").Args(buildName).Output()
				e2e.Failf("build failed: %s", logs)
			}

			g.By("getting the Docker image reference from ImageStream")
			imageRef, err := exutil.GetDockerImageReference(oc.REST().ImageStreams(oc.Namespace()), "test", "latest")
			o.Expect(err).NotTo(o.HaveOccurred())

			imageLabels, err := eximages.GetImageLabels(oc.REST().ImageStreamImages(oc.Namespace()), "test", imageRef)
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("inspecting the new image for proper Docker labels")
			err = ExpectOpenShiftLabels(imageLabels)
			o.Expect(err).NotTo(o.HaveOccurred())
		})
	})
Example #12
0
			g.By("expecting the build is in Complete phase")
			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), buildName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
			if err != nil {
				exutil.DumpBuildLogs("test", oc)
			}
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("getting the Docker image reference from ImageStream")
			imageName, err := exutil.GetDockerImageReference(oc.REST().ImageStreams(oc.Namespace()), "test", "latest")
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("instantiating a pod and service with the new image")
			err = oc.Run("new-app").Args("-f", podAndServiceFixture, "-p", "IMAGE_NAME="+imageName).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("waiting for the service to become available")
			err = oc.KubeFramework().WaitForAnEndpoint(buildTestService)
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("expecting the pod container has TEST_ENV variable set")
			out, err := oc.Run("exec").Args("-p", buildTestPod, "--", "curl", "http://0.0.0.0:8080").Output()
			o.Expect(err).NotTo(o.HaveOccurred())

			if !strings.Contains(out, "success") {
				e2e.Failf("expected 'success' response when executing curl in %q, got %q", buildTestPod, out)
			}
		})
	})
})
Example #13
0
			g.By("expecting the pod to be running")
			err = oc.KubeFramework().WaitForPodRunning(pod.Name)
			o.Expect(err).NotTo(o.HaveOccurred())

			// even though the pod is running, the app isn't always started
			// so wait until webrick output is complete before curling.
			logs := ""
			count := 0
			maxCount := 30
			for strings.Contains(logs, "8080") && count < maxCount {
				logs, _ = oc.Run("logs").Args(pod.Name).Output()
				time.Sleep(time.Second)
				count++
			}
			if count == maxCount {
				e2e.Failf("Never saw port 8080 open in pod logs")
			}

			g.By("expecting the pod container has saved artifacts")
			out, err := oc.Run("exec").Args("-p", pod.Name, "--", "curl", "http://0.0.0.0:8080").Output()
			o.Expect(err).NotTo(o.HaveOccurred())

			if !strings.Contains(out, "artifacts exist") {
				logs, _ = oc.Run("logs").Args(pod.Name).Output()
				e2e.Failf("Pod %q does not contain expected artifacts: %q\n%q", pod.Name, out, logs)
			}
		})
	})
})
Example #14
0
			}()
			o.Expect(err).NotTo(o.HaveOccurred())

			var result string
			found := false
			for i := 0; i < 12; i++ {
				g.By("Verifying that files are copied to the container")
				result, _ = oc.Run("rsh").Args(podName, "ls", "/tmp/rsync/subdir1").Output()
				if strings.Contains(result, "file1") {
					found = true
					break
				}
				time.Sleep(5 * time.Second)
			}
			if !found {
				e2e.Failf("Directory does not contain expected files: \n%s", result)
			}

			g.By("renaming file1 to file2")
			subdir1file2 := filepath.Join(subdir1, "file2")
			err = os.Rename(subdir1file1, subdir1file2)
			o.Expect(err).NotTo(o.HaveOccurred())

			found = false
			for i := 0; i < 12; i++ {
				g.By("Verifying that files are copied to the container")
				result, _ = oc.Run("rsh").Args(podName, "ls", "/tmp/rsync/subdir1").Output()
				if strings.Contains(result, "file2") && !strings.Contains(result, "file1") {
					found = true
					break
				}
Example #15
0
			g.By(fmt.Sprintf("calling oc create -f %q", outputPath))
			err = oc.Run("create").Args("-f", outputPath).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("expecting the pod to be running")
			err = oc.KubeFramework().WaitForPodRunning(pod.Name)
			o.Expect(err).NotTo(o.HaveOccurred())

			// even though the pod is running, the app isn't always started
			// so wait until webrick output is complete before curling.
			logs := ""
			count := 0
			for strings.Contains(logs, "8080") && count < 10 {
				logs, _ = oc.Run("logs").Args(pod.Name).Output()
				time.Sleep(time.Second)
				count++
			}

			g.By("expecting the pod container has saved artifacts")
			out, err := oc.Run("exec").Args("-p", pod.Name, "--", "curl", "http://0.0.0.0:8080").Output()
			o.Expect(err).NotTo(o.HaveOccurred())

			if !strings.Contains(out, "artifacts exist") {
				logs, _ = oc.Run("logs").Args(pod.Name).Output()
				e2e.Failf("Pod %q does not contain expected artifacts: %q\n%q", pod.Name, out, logs)
			}
		})
	})
})
Example #16
0
			g.By("expecting the build is in Complete phase")
			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), buildName, exutil.CheckBuildSuccessFunc, exutil.CheckBuildFailedFunc)
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("getting the Docker image reference from ImageStream")
			imageName, err := exutil.GetDockerImageReference(oc.REST().ImageStreams(oc.Namespace()), "test", "latest")
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("writing the pod definition to a file")
			pod := exutil.GetPodForContainer(kapi.Container{
				Image: imageName,
				Name:  "test",
			})
			_, err = oc.KubeREST().Pods(oc.Namespace()).Create(pod)
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("expecting the pod to be running")
			err = oc.KubeFramework().WaitForPodRunning(pod.Name)
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("expecting the pod container has TEST_ENV variable set")
			out, err := oc.Run("exec").Args("-p", pod.Name, "--", "curl", "http://0.0.0.0:8080").Output()
			o.Expect(err).NotTo(o.HaveOccurred())

			if !strings.Contains(out, "success") {
				e2e.Failf("expected 'success' response when executing curl in %q, got %q", pod.Name, out)
			}
		})
	})
})
Example #17
0
// FatalErr exits the test in case a fatal error has occurred.
func FatalErr(msg interface{}) {
	e2e.Failf("%v", msg)
}
Example #18
0
	for _, sub := range ep.Subsets {
		for _, addr := range sub.Addresses {
			ips.Insert(addr.IP)
		}
	}
	return ips.List()
}

var _ = Describe("DNS", func() {
	f := e2e.NewDefaultFramework("dns")

	It("should answer endpoint and wildcard queries for the cluster [Conformance]", func() {
		e2e.ClusterDNSVerifier(f)

		if _, err := f.Client.Services(f.Namespace.Name).Create(createServiceSpec("headless", true, nil)); err != nil {
			e2e.Failf("unable to create headless service: %v", err)
		}
		if _, err := f.Client.Endpoints(f.Namespace.Name).Create(createEndpointSpec("headless")); err != nil {
			e2e.Failf("unable to create clusterip endpoints: %v", err)
		}
		if _, err := f.Client.Services(f.Namespace.Name).Create(createServiceSpec("clusterip", false, nil)); err != nil {
			e2e.Failf("unable to create clusterip service: %v", err)
		}
		if _, err := f.Client.Endpoints(f.Namespace.Name).Create(createEndpointSpec("clusterip")); err != nil {
			e2e.Failf("unable to create clusterip endpoints: %v", err)
		}

		ep, err := f.Client.Endpoints("default").Get("kubernetes")
		if err != nil {
			e2e.Failf("unable to find endpoints for kubernetes.default: %v", err)
		}