// init initialize the extended testing suite. // You can set these environment variables to configure extended tests: // KUBECONFIG - Path to kubeconfig containing embedded authinfo // TEST_REPORT_DIR - If set, JUnit output will be written to this directory for each test // TEST_REPORT_FILE_NAME - If set, will determine the name of the file that JUnit output is written to func InitTest() { extendedOutputDir := filepath.Join(os.TempDir(), "openshift-extended-tests") os.MkdirAll(extendedOutputDir, 0777) TestContext.VerifyServiceAccount = true TestContext.RepoRoot = os.Getenv("KUBE_REPO_ROOT") TestContext.KubectlPath = "kubectl" TestContext.KubeConfig = KubeConfigPath() os.Setenv("KUBECONFIG", TestContext.KubeConfig) reportDir = os.Getenv("TEST_REPORT_DIR") reportFileName = os.Getenv("TEST_REPORT_FILE_NAME") if reportFileName == "" { reportFileName = "junit" } //flag.StringVar(&TestContext.KubeConfig, clientcmd.RecommendedConfigPathFlag, KubeConfigPath(), "Path to kubeconfig containing embedded authinfo.") flag.StringVar(&TestContext.OutputDir, "extended-tests-output-dir", extendedOutputDir, "Output directory for interesting/useful test data, like performance data, benchmarks, and other metrics.") rflag.StringVar(&config.GinkgoConfig.FocusString, "focus", "", "DEPRECATED: use --ginkgo.focus") // Ensure that Kube tests run privileged (like they do upstream) ginkgo.JustBeforeEach(ensureKubeE2EPrivilegedSA) // Override the default Kubernetes E2E configuration e2e.SetTestContext(TestContext) }
// init initialize the extended testing suite. // You can set these environment variables to configure extended tests: // KUBECONFIG - Path to kubeconfig containing embedded authinfo func InitTest() { // Turn on verbose by default to get spec names config.DefaultReporterConfig.Verbose = false // Turn on EmitSpecProgress to get spec progress (especially on interrupt) config.GinkgoConfig.EmitSpecProgress = false // Randomize specs as well as suites config.GinkgoConfig.RandomizeAllSpecs = false extendedOutputDir := filepath.Join(os.TempDir(), "openshift-extended-tests") os.MkdirAll(extendedOutputDir, 0777) TestContext.VerifyServiceAccount = true TestContext.RepoRoot = os.Getenv("KUBE_REPO_ROOT") TestContext.KubectlPath = "kubectl" TestContext.KubeConfig = KubeConfigPath() os.Setenv("KUBECONFIG", TestContext.KubeConfig) //flag.StringVar(&TestContext.KubeConfig, clientcmd.RecommendedConfigPathFlag, KubeConfigPath(), "Path to kubeconfig containing embedded authinfo.") flag.StringVar(&TestContext.OutputDir, "extended-tests-output-dir", extendedOutputDir, "Output directory for interesting/useful test data, like performance data, benchmarks, and other metrics.") // Ensure that Kube tests run privileged (like they do upstream) ginkgo.JustBeforeEach(ensureKubeE2EPrivilegedSA) // Override the default Kubernetes E2E configuration e2e.SetTestContext(TestContext) }
// NewSampleRepoTest creates a function for a new ginkgo test case that will instantiate a template // from a url, kick off the buildconfig defined in that template, wait for the build/deploy, // and then confirm the application is serving an expected string value. func NewSampleRepoTest(c SampleRepoConfig) func() { return func() { defer g.GinkgoRecover() var oc = exutil.NewCLI(c.repoName+"-repo-test", exutil.KubeConfigPath()) g.JustBeforeEach(func() { g.By("Waiting for builder service account") err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace())) o.Expect(err).NotTo(o.HaveOccurred()) }) g.Describe("Building "+c.repoName+" app from new-app", func() { g.It(fmt.Sprintf("should build a "+c.repoName+" image and run it in a pod"), func() { oc.SetOutputDir(exutil.TestContext.OutputDir) exutil.CheckOpenShiftNamespaceImageStreams(oc) g.By(fmt.Sprintf("calling oc new-app with the " + c.repoName + " example template")) err := oc.Run("new-app").Args("-f", c.templateURL).Execute() o.Expect(err).NotTo(o.HaveOccurred()) // all the templates automatically start a build. buildName := c.buildConfigName + "-1" g.By("expecting the build is in the Complete phase") err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), buildName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn) if err != nil { exutil.DumpBuildLogs(c.buildConfigName, oc) } o.Expect(err).NotTo(o.HaveOccurred()) g.By("expecting the app deployment to be complete") err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), c.deploymentConfigName, oc) o.Expect(err).NotTo(o.HaveOccurred()) if len(c.dbDeploymentConfigName) > 0 { g.By("expecting the db deployment to be complete") err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), c.dbDeploymentConfigName, oc) o.Expect(err).NotTo(o.HaveOccurred()) } g.By("expecting the service is available") serviceIP, err := oc.Run("get").Args("service", c.serviceName).Template("{{ .spec.clusterIP }}").Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(serviceIP).ShouldNot(o.Equal("")) g.By("expecting an endpoint is available") err = oc.KubeFramework().WaitForAnEndpoint(c.serviceName) o.Expect(err).NotTo(o.HaveOccurred()) g.By("verifying string from app request") response, err := exutil.FetchURL("http://"+serviceIP+":8080"+c.appPath, time.Duration(30*time.Second)) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(response).Should(o.ContainSubstring(c.expectedString)) }) }) } }
// init initialize the extended testing suite. // You can set these environment variables to configure extended tests: // KUBECONFIG - Path to kubeconfig containing embedded authinfo // TEST_REPORT_DIR - If set, JUnit output will be written to this directory for each test // TEST_REPORT_FILE_NAME - If set, will determine the name of the file that JUnit output is written to func InitTest() { extendedOutputDir := filepath.Join(os.TempDir(), "openshift-extended-tests") os.MkdirAll(extendedOutputDir, 0777) TestContext.DeleteNamespace = os.Getenv("DELETE_NAMESPACE") != "false" TestContext.VerifyServiceAccount = true TestContext.RepoRoot = os.Getenv("KUBE_REPO_ROOT") TestContext.KubeVolumeDir = os.Getenv("VOLUME_DIR") if len(TestContext.KubeVolumeDir) == 0 { TestContext.KubeVolumeDir = "/var/lib/origin/volumes" } TestContext.KubectlPath = "kubectl" TestContext.KubeConfig = KubeConfigPath() os.Setenv("KUBECONFIG", TestContext.KubeConfig) // load and set the host variable for kubectl clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(&clientcmd.ClientConfigLoadingRules{ExplicitPath: TestContext.KubeConfig}, &clientcmd.ConfigOverrides{}) cfg, err := clientConfig.ClientConfig() if err != nil { FatalErr(err) } TestContext.Host = cfg.Host reportDir = os.Getenv("TEST_REPORT_DIR") reportFileName = os.Getenv("TEST_REPORT_FILE_NAME") if reportFileName == "" { reportFileName = "junit" } quiet = os.Getenv("TEST_OUTPUT_QUIET") == "true" //flag.StringVar(&TestContext.KubeConfig, clientcmd.RecommendedConfigPathFlag, KubeConfigPath(), "Path to kubeconfig containing embedded authinfo.") flag.StringVar(&TestContext.OutputDir, "extended-tests-output-dir", extendedOutputDir, "Output directory for interesting/useful test data, like performance data, benchmarks, and other metrics.") rflag.StringVar(&config.GinkgoConfig.FocusString, "focus", "", "DEPRECATED: use --ginkgo.focus") // Ensure that Kube tests run privileged (like they do upstream) ginkgo.JustBeforeEach(ensureKubeE2EPrivilegedSA) // Override the default Kubernetes E2E configuration e2e.SetTestContext(TestContext) }
) var _ = g.Describe("[builds][Slow] build can have Docker image source", func() { defer g.GinkgoRecover() var ( buildFixture = exutil.FixturePath("testdata", "test-imagesource-build.yaml") oc = exutil.NewCLI("build-image-source", exutil.KubeConfigPath()) imageSourceLabel = exutil.ParseLabelsOrDie("app=imagesourceapp") imageDockerLabel = exutil.ParseLabelsOrDie("app=imagedockerapp") ) g.JustBeforeEach(func() { g.By("waiting for builder service account") err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace())) o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for imagestreams to be imported") err = exutil.WaitForAnImageStream(oc.AdminREST().ImageStreams("openshift"), "jenkins", exutil.CheckImageStreamLatestTagPopulatedFn, exutil.CheckImageStreamTagNotFoundFn) o.Expect(err).NotTo(o.HaveOccurred()) }) g.Describe("build with image source", func() { g.It("should complete successfully and contain the expected file", func() { g.By("Creating build configs for source build") err := oc.Run("create").Args("-f", buildFixture).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("starting the source strategy build") err = oc.Run("start-build").Args("imagesourcebuild").Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("expecting the builds to complete successfully") err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), "imagesourcebuild-1", exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
var ( oc = exutil.NewCLI("build-dockerfile-env", exutil.KubeConfigPath()) testDockerfile = ` FROM openshift/origin-base USER 1001 ` testDockerfile2 = ` FROM centos:7 RUN yum install -y httpd USER 1001 ` ) g.JustBeforeEach(func() { g.By("waiting for builder service account") err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace())) o.Expect(err).NotTo(o.HaveOccurred()) oc.SetOutputDir(exutil.TestContext.OutputDir) }) g.Describe("being created from new-build", func() { g.It("should create a image via new-build", func() { g.By(fmt.Sprintf("calling oc new-build with Dockerfile")) err := oc.Run("new-build").Args("-D", "-").InputString(testDockerfile).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("starting a test build") bc, err := oc.REST().BuildConfigs(oc.Namespace()).Get("origin-base") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(bc.Spec.Source.Git).To(o.BeNil()) o.Expect(bc.Spec.Source.Dockerfile).NotTo(o.BeNil()) o.Expect(*bc.Spec.Source.Dockerfile).To(o.Equal(testDockerfile))
exutil "github.com/openshift/origin/test/extended/util" ) var _ = g.Describe("[builds][Slow] starting a build using CLI", func() { defer g.GinkgoRecover() var ( buildFixture = exutil.FixturePath("testdata", "test-build.json") exampleGemfile = exutil.FixturePath("testdata", "test-build-app", "Gemfile") exampleBuild = exutil.FixturePath("testdata", "test-build-app") oc = exutil.NewCLI("cli-start-build", exutil.KubeConfigPath()) ) g.JustBeforeEach(func() { g.By("waiting for builder service account") err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace())) o.Expect(err).NotTo(o.HaveOccurred()) oc.Run("create").Args("-f", buildFixture).Execute() }) g.Describe("oc start-build --wait", func() { g.It("should start a build and wait for the build to complete", func() { g.By("starting the build with --wait flag") br, err := exutil.StartBuildAndWait(oc, "sample-build", "--wait") o.Expect(err).NotTo(o.HaveOccurred()) br.AssertSuccess() }) g.It("should start a build and wait for the build to fail", func() { g.By("starting the build with --wait flag but wrong --commit") br, _ := exutil.StartBuildAndWait(oc, "sample-build", "--wait", "--commit=fffffff") br.AssertFailure()
buildclient "github.com/openshift/origin/pkg/build/client" buildutil "github.com/openshift/origin/pkg/build/util" exutil "github.com/openshift/origin/test/extended/util" ) var _ = g.Describe("[builds][Slow] using build configuration runPolicy", func() { defer g.GinkgoRecover() var ( // Use invalid source here as we don't care about the result oc = exutil.NewCLI("cli-build-run-policy", exutil.KubeConfigPath()) ) g.JustBeforeEach(func() { g.By("waiting for builder service account") err := exutil.WaitForBuilderAccount(oc.KubeClient().Core().ServiceAccounts(oc.Namespace())) o.Expect(err).NotTo(o.HaveOccurred()) // Create all fixtures oc.Run("create").Args("-f", exutil.FixturePath("testdata", "run_policy")).Execute() }) g.Describe("build configuration with Parallel build run policy", func() { g.It("runs the builds in parallel", func() { g.By("starting multiple builds") var ( startedBuilds []string counter int ) bcName := "sample-parallel-build" buildWatch, err := oc.Client().Builds(oc.Namespace()).Watch(kapi.ListOptions{ LabelSelector: buildutil.BuildConfigSelector(bcName),
defer g.GinkgoRecover() const ( buildTestPod = "build-test-pod" buildTestService = "build-test-svc" ) var ( templateFixture = exutil.FixturePath("testdata", "incremental-auth-build.json") podAndServiceFixture = exutil.FixturePath("testdata", "test-build-podsvc.json") oc = exutil.NewCLI("build-sti-inc", exutil.KubeConfigPath()) ) g.JustBeforeEach(func() { g.By("waiting for builder service account") err := exutil.WaitForBuilderAccount(oc.AdminKubeREST().ServiceAccounts(oc.Namespace())) o.Expect(err).NotTo(o.HaveOccurred()) }) g.Describe("Building from a template", func() { g.It(fmt.Sprintf("should create a build from %q template and run it", templateFixture), func() { oc.SetOutputDir(exutil.TestContext.OutputDir) g.By(fmt.Sprintf("calling oc new-app -f %q", templateFixture)) err := oc.Run("new-app").Args("-f", templateFixture).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("starting a test build") out, err := oc.Run("start-build").Args("initial-build").Output() fmt.Fprintf(g.GinkgoWriter, "\ninitial-build start-build output:\n%s\n", out) o.Expect(err).NotTo(o.HaveOccurred())
templatePath = exutil.FixturePath("..", "..", "examples", "jenkins", "jenkins-ephemeral-template.json") sourcePath1 = exutil.FixturePath("..", "..", "examples", "image-streams") sourcePath2 = exutil.FixturePath("..", "..", "examples", "sample-app") strategies = []string{"rsync", "rsync-daemon", "tar"} ) var podName string g.JustBeforeEach(func() { oc.SetOutputDir(exutil.TestContext.OutputDir) g.By(fmt.Sprintf("calling oc new-app -f %q", templatePath)) err := oc.Run("new-app").Args("-f", templatePath).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("expecting the jenkins service get endpoints") err = oc.KubeFramework().WaitForAnEndpoint("jenkins") o.Expect(err).NotTo(o.HaveOccurred()) g.By("Getting the jenkins pod name") selector, _ := labels.Parse("name=jenkins") pods, err := oc.KubeREST().Pods(oc.Namespace()).List(selector, fields.Everything()) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(len(pods.Items)).ToNot(o.BeZero()) podName = pods.Items[0].Name }) g.Describe("copy by strategy", func() { testRsyncFn := func(strategy string) func() { return func() { g.By(fmt.Sprintf("Calling oc rsync %s %s:/tmp --strategy=%s", sourcePath1, podName, strategy)) err := oc.Run("rsync").Args(
type cleanUpContainer struct { imageNames []string } var _ = g.Describe("[images] prune images", func() { defer g.GinkgoRecover() var oc = exutil.NewCLI("prune-images", exutil.KubeConfigPath()) var originalAcceptSchema2 *bool g.JustBeforeEach(func() { if originalAcceptSchema2 == nil { accepts, err := doesRegistryAcceptSchema2(oc) o.Expect(err).NotTo(o.HaveOccurred()) originalAcceptSchema2 = &accepts } err := exutil.WaitForBuilderAccount(oc.KubeClient().Core().ServiceAccounts(oc.Namespace())) o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("give a user %s a right to prune images with %s role", oc.Username(), "system:image-pruner")) err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-user", "system:image-pruner", oc.Username()).Execute() o.Expect(err).NotTo(o.HaveOccurred()) }) g.Describe("of schema 1", func() { g.JustBeforeEach(func() { if *originalAcceptSchema2 { g.By("ensure the registry does not accept schema 2") err := ensureRegistryAcceptsSchema2(oc, false) o.Expect(err).NotTo(o.HaveOccurred()) } })
"os" g "github.com/onsi/ginkgo" "github.com/openshift/origin/test/common/build" exutil "github.com/openshift/origin/test/extended/util" ) var _ = g.Describe("[bldcompat][Slow][Compatibility] build controller", func() { defer g.GinkgoRecover() var ( oc = exutil.NewCLI("compat-build-controllers", exutil.KubeConfigPath()) ) g.JustBeforeEach(func() { os.Setenv("OS_TEST_NAMESPACE", oc.Namespace()) }) g.Describe("RunBuildControllerTest", func() { g.It("should succeed", func() { build.RunBuildControllerTest(g.GinkgoT(), oc.AdminClient(), oc.AdminKubeClient()) }) }) g.Describe("RunBuildPodControllerTest", func() { g.It("should succeed", func() { build.RunBuildPodControllerTest(g.GinkgoT(), oc.AdminClient(), oc.AdminKubeClient()) }) }) g.Describe("RunImageChangeTriggerTest [SkipPrevControllers]", func() { g.It("should succeed", func() { build.RunImageChangeTriggerTest(g.GinkgoT(), oc.AdminClient())
exutil "github.com/openshift/origin/test/extended/util" ) var _ = g.Describe("[builds][Slow] testing build configuration hooks", func() { defer g.GinkgoRecover() var ( buildFixture = exutil.FixturePath("testdata", "test-build-postcommit.json") oc = exutil.NewCLI("cli-test-hooks", exutil.KubeConfigPath()) ) g.JustBeforeEach(func() { g.By("waiting for builder service account") err := exutil.WaitForBuilderAccount(oc.KubeClient().Core().ServiceAccounts(oc.Namespace())) o.Expect(err).NotTo(o.HaveOccurred()) oc.Run("create").Args("-f", buildFixture).Execute() g.By("waiting for istag to initialize") exutil.WaitForAnImageStreamTag(oc, oc.Namespace(), "busybox", "1") }) g.Describe("testing postCommit hook", func() { g.It("successful postCommit script with args", func() { err := oc.Run("patch").Args("bc/busybox", "-p", "{\"spec\":{\"postCommit\":{\"script\":\"echo hello $1\",\"args\":[\"world\"],\"command\":null}}}").Execute() o.Expect(err).NotTo(o.HaveOccurred()) br, _ := exutil.StartBuildAndWait(oc, "busybox") br.AssertSuccess() o.Expect(br.Logs()).To(o.ContainSubstring("hello world")) })
var ( oc = exutil.NewCLI("extended-build", exutil.KubeConfigPath()) testDataDir = exutil.FixturePath("testdata", "build-extended") runnerConf = filepath.Join(testDataDir, "jvm-runner.yaml") runnerWithScriptsConf = filepath.Join(testDataDir, "jvm-runner-with-scripts.yaml") scriptsFromRepoBc = filepath.Join(testDataDir, "bc-scripts-in-repo.yaml") scriptsFromUrlBc = filepath.Join(testDataDir, "bc-scripts-by-url.yaml") scriptsFromImageBc = filepath.Join(testDataDir, "bc-scripts-in-the-image.yaml") ) g.JustBeforeEach(func() { g.By("waiting for builder service account") err := exutil.WaitForBuilderAccount(oc.KubeClient().Core().ServiceAccounts(oc.Namespace())) o.Expect(err).NotTo(o.HaveOccurred()) // we have to wait until image stream tag will be available, otherwise // `oc start-build` will fail with 'imagestreamtags "wildfly:10.0" not found' error. // See this issue for details: https://github.com/openshift/origin/issues/10103 err = exutil.WaitForAnImageStreamTag(oc, "openshift", "wildfly", "10.0") o.Expect(err).NotTo(o.HaveOccurred()) }) g.Describe("with scripts from the source repository", func() { oc.SetOutputDir(exutil.TestContext.OutputDir) g.It("should use assemble-runtime script from the source repository", func() { g.By("creating jvm-runner configuration") err := exutil.CreateResource(runnerConf, oc) o.Expect(err).NotTo(o.HaveOccurred())
// map of all resources created from the fixtures var resources map[string][]string g.JustBeforeEach(func() { g.By("Creating the resources") rawResources, rawResourceNames, err := createFixture(oc, fixture) o.Expect(err).ToNot(o.HaveOccurred()) resources = make(map[string][]string) for i, resource := range rawResources { resources[resource] = append(resources[resource], rawResourceNames[i]) } g.By("Creating the idling file") serviceNames := resources["service"] targetFile, err := ioutil.TempFile(exutil.TestContext.OutputDir, "idling-services-") o.Expect(err).ToNot(o.HaveOccurred()) defer targetFile.Close() idlingFile = targetFile.Name() _, err = targetFile.Write([]byte(strings.Join(serviceNames, "\n"))) o.Expect(err).ToNot(o.HaveOccurred()) g.By("Waiting for the endpoints to exist") serviceName := resources["service"][0] g.By("Waiting for endpoints to be up") err = waitForEndpointsAvailable(oc, serviceName) o.Expect(err).ToNot(o.HaveOccurred()) }) g.AfterEach(func() { g.By("Cleaning up the idling file")