g.Context("jenkins-plugin test context ", func() { g.It("jenkins-plugin test case execution", func() { g.By("create jenkins job config xml file, convert to bytes for http post") data := jenkinsJobBytes("testjob-plugin.xml", oc.Namespace()) g.By("make http request to create job") immediateInteractionWithJenkins(fmt.Sprintf("http://%s/createItem?name=test-plugin-job", hostPort), "POST", bytes.NewBuffer(data), 200) g.By("make http request to kick off build") immediateInteractionWithJenkins(fmt.Sprintf("http://%s/job/test-plugin-job/build?delay=0sec", hostPort), "POST", nil, 201) // the build and deployment is by far the most time consuming portion of the test jenkins job; // we leverage some of the openshift utilities for waiting for the deployment before we poll // jenkins for the sucessful job completion g.By("waiting for frontend, frontend-prod deployments as signs that the build has finished") err := exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "frontend") if err != nil { exutil.DumpDeploymentLogs("frontend", oc) } o.Expect(err).NotTo(o.HaveOccurred()) err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "frontend-prod") if err != nil { exutil.DumpDeploymentLogs("frontend-prod", oc) } o.Expect(err).NotTo(o.HaveOccurred()) g.By("get build console logs and see if succeeded") err = waitForJenkinsActivity(fmt.Sprintf("http://%s/job/test-plugin-job/1/console", hostPort), "Finished: SUCCESS", 200) o.Expect(err).NotTo(o.HaveOccurred()) }) })
g.By("waiting for builder service account") err := exutil.WaitForBuilderAccount(oc.KubeClient().Core().ServiceAccounts(oc.Namespace())) o.Expect(err).NotTo(o.HaveOccurred()) }) g.Context("Pipeline with maven slave", func() { g.It("Should build and complete successfully", func() { // Deploy Jenkins g.By(fmt.Sprintf("calling oc new-app -f %q", jenkinsTemplatePath)) err := oc.Run("new-app").Args("-f", jenkinsTemplatePath).Execute() o.Expect(err).NotTo(o.HaveOccurred()) // instantiate the template g.By(fmt.Sprintf("calling oc new-app -f %q", mavenSlavePipelinePath)) err = oc.Run("new-app").Args("-f", mavenSlavePipelinePath).Execute() o.Expect(err).NotTo(o.HaveOccurred()) // start the build g.By("starting the pipeline build and waiting for it to complete") br, _ := exutil.StartBuildAndWait(oc, "openshift-jee-sample") br.AssertSuccess() // wait for the service to be running g.By("expecting the openshift-jee-sample service to be deployed and running") _, err = exutil.GetEndpointAddress(oc, "openshift-jee-sample") o.Expect(err).NotTo(o.HaveOccurred()) }) }) g.Context("Orchestration pipeline", func() { g.It("Should build and complete successfully", func() { // Deploy Jenkins
exutil.ResetImage(resetData) }) g.JustBeforeEach(func() { g.By("waiting for builder service account") err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace())) o.Expect(err).NotTo(o.HaveOccurred()) }) g.Context("\n FORCE PULL TEST: when s2i force pull is false and the image is bad", func() { g.It("\n FORCE PULL TEST s2i false", func() { fpFalseS2I := exutil.FixturePath("fixtures", "forcepull-false-s2i.json") g.By(fmt.Sprintf("\n%s FORCE PULL TEST s2i false: calling create on %s", time.Now().Format(time.RFC850), fpFalseS2I)) exutil.StartBuild(fpFalseS2I, buildPrefix, oc) exutil.WaitForBuild("FORCE PULL TEST s2i false: ", buildName, oc) exutil.VerifyImagesSame(s2iDockBldr, custBldr, "FORCE PULL TEST s2i false: ") }) }) g.Context("\n FORCE PULL TEST: when s2i force pull is true and the image is bad", func() { g.It("\n FORCE PULL TEST s2i true", func() { fpTrueS2I := exutil.FixturePath("fixtures", "forcepull-true-s2i.json") g.By(fmt.Sprintf("\n%s FORCE PULL TEST s2i true: calling create on %s", time.Now().Format(time.RFC850), fpTrueS2I)) exutil.StartBuild(fpTrueS2I, buildPrefix, oc) exutil.WaitForBuild("FORCE PULL TEST s2i true: ", buildName, oc)
o.Expect(err).NotTo(o.HaveOccurred()) }) g.Context("Manual deploy the jenkins and trigger a jenkins pipeline build", func() { g.It("JenkinsPipeline build should succeed when manual deploy the jenkins service", func() { oc.SetOutputDir(exutil.TestContext.OutputDir) g.By(fmt.Sprintf("calling oc new-app -f %q", jenkinsTemplatePath)) err := oc.Run("new-app").Args("-f", jenkinsTemplatePath).Execute() o.Expect(err).NotTo(o.HaveOccurred()) //wait for the jenkins deployment complete g.By("waiting the jenkins service deployed") err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "jenkins", oc) if err != nil { exutil.DumpDeploymentLogs("jenkins", oc) } o.Expect(err).NotTo(o.HaveOccurred()) // create the pipeline build example g.By(fmt.Sprintf("calling oc new-app -f %q", pipelineTemplatePath)) err = oc.Run("new-app").Args("-f", pipelineTemplatePath).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("starting a pipeline build") br, _ := exutil.StartBuildAndWait(oc, "sample-pipeline") if !br.BuildSuccess { exutil.DumpDeploymentLogs("jenkins", oc) } br.AssertSuccess() }) }) })
g.Context("jenkins-plugin test context ", func() { g.It("jenkins-plugin test trigger build", func() { jobName := "test-build-job" data := j.readJenkinsJob("build-job.xml", oc.Namespace()) j.createItem(jobName, data) jmon := j.startJob(jobName) jmon.await(10 * time.Minute) // the build and deployment is by far the most time consuming portion of the test jenkins job; // we leverage some of the openshift utilities for waiting for the deployment before we poll // jenkins for the successful job completion g.By("waiting for frontend, frontend-prod deployments as signs that the build has finished") err := exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "frontend", oc) o.Expect(err).NotTo(o.HaveOccurred()) err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "frontend-prod", oc) o.Expect(err).NotTo(o.HaveOccurred()) g.By("get build console logs and see if succeeded") logs, err := j.waitForContent("Finished: SUCCESS", 200, 10*time.Minute, "job/%s/lastBuild/consoleText", jobName) ginkgolog("\n\nJenkins logs>\n%s\n\n", logs) o.Expect(err).NotTo(o.HaveOccurred()) }) g.It("jenkins-plugin test trigger build with envs", func() { jobName := "test-build-with-env-job" data := j.readJenkinsJob("build-with-env-job.xml", oc.Namespace()) j.createItem(jobName, data) jmon := j.startJob(jobName) jmon.await(10 * time.Minute) logs, err := j.getLastJobConsoleLogs(jobName) ginkgolog("\n\nJenkins logs>\n%s\n\n", logs) o.Expect(err).NotTo(o.HaveOccurred()) // the build and deployment is by far the most time consuming portion of the test jenkins job; // we leverage some of the openshift utilities for waiting for the deployment before we poll // jenkins for the successful job completion g.By("waiting for frontend deployments as signs that the build has finished") err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "frontend", oc) o.Expect(err).NotTo(o.HaveOccurred()) g.By("get build console logs and see if succeeded") _, err = j.waitForContent("Finished: SUCCESS", 200, 10*time.Minute, "job/%s/lastBuild/consoleText", jobName) assertEnvVars(oc, "frontend-", map[string]string{ "a": "b", "C": "D", "e": "", }) }) g.It("jenkins-plugin test trigger build DSL", func() { buildsBefore, err := oc.REST().Builds(oc.Namespace()).List(kapi.ListOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) data, err := j.buildDSLJob(oc.Namespace(), "node{", "openshiftBuild( namespace:'PROJECT_NAME', bldCfg: 'frontend', env: [ [ name : 'a', value : 'b' ], [ name : 'C', value : 'D' ], [ name : 'e', value : '' ] ] )", "}", ) jobName := "test-build-dsl-job" j.createItem(jobName, data) monitor := j.startJob(jobName) err = monitor.await(10 * time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) err = wait.Poll(10*time.Second, 10*time.Minute, func() (bool, error) { buildsAfter, err := oc.REST().Builds(oc.Namespace()).List(kapi.ListOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) return (len(buildsAfter.Items) != len(buildsBefore.Items)), nil }) buildsAfter, err := oc.REST().Builds(oc.Namespace()).List(kapi.ListOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(len(buildsAfter.Items)).To(o.Equal(len(buildsBefore.Items) + 1)) log, err := j.getLastJobConsoleLogs(jobName) ginkgolog("Job logs>>\n%s\n\n", log) assertEnvVars(oc, "frontend-", map[string]string{ "a": "b", "C": "D", "e": "", }) }) g.It("jenkins-plugin test exec DSL", func() { // Create a running pod in which we can execute our commands oc.Run("new-app").Args("https://github.com/openshift/ruby-hello-world").Execute() var targetPod *kapi.Pod err := wait.Poll(10*time.Second, 10*time.Minute, func() (bool, error) { pods, err := oc.KubeREST().Pods(oc.Namespace()).List(kapi.ListOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) for _, p := range pods.Items { if !strings.Contains(p.Name, "build") && !strings.Contains(p.Name, "deploy") && p.Status.Phase == "Running" { targetPod = &p return true, nil } } return false, nil }) o.Expect(err).NotTo(o.HaveOccurred()) targetContainer := targetPod.Spec.Containers[0] data, err := j.buildDSLJob(oc.Namespace(), "node{", fmt.Sprintf("openshiftExec( namespace:'PROJECT_NAME', pod: '%s', command: [ 'echo', 'hello', 'world', '1' ] )", targetPod.Name), fmt.Sprintf("openshiftExec( namespace:'PROJECT_NAME', pod: '%s', command: 'echo', arguments : [ 'hello', 'world', '2' ] )", targetPod.Name), fmt.Sprintf("openshiftExec( namespace:'PROJECT_NAME', pod: '%s', command: 'echo', arguments : [ [ value: 'hello' ], [ value : 'world' ], [ value : '3' ] ] )", targetPod.Name), fmt.Sprintf("openshiftExec( namespace:'PROJECT_NAME', pod: '%s', container: '%s', command: [ 'echo', 'hello', 'world', '4' ] )", targetPod.Name, targetContainer.Name), fmt.Sprintf("openshiftExec( namespace:'PROJECT_NAME', pod: '%s', container: '%s', command: 'echo', arguments : [ 'hello', 'world', '5' ] )", targetPod.Name, targetContainer.Name), fmt.Sprintf("openshiftExec( namespace:'PROJECT_NAME', pod: '%s', container: '%s', command: 'echo', arguments : [ [ value: 'hello' ], [ value : 'world' ], [ value : '6' ] ] )", targetPod.Name, targetContainer.Name), "}", ) jobName := "test-exec-dsl-job" j.createItem(jobName, data) monitor := j.startJob(jobName) err = monitor.await(10 * time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) log, err := j.getLastJobConsoleLogs(jobName) ginkgolog("Job logs>>\n%s\n\n", log) o.Expect(strings.Contains(log, "hello world 1")).To(o.BeTrue()) o.Expect(strings.Contains(log, "hello world 2")).To(o.BeTrue()) o.Expect(strings.Contains(log, "hello world 3")).To(o.BeTrue()) o.Expect(strings.Contains(log, "hello world 4")).To(o.BeTrue()) o.Expect(strings.Contains(log, "hello world 5")).To(o.BeTrue()) o.Expect(strings.Contains(log, "hello world 6")).To(o.BeTrue()) }) g.It("jenkins-plugin test multitag", func() { loadFixture(oc, "multitag-template.json") err := wait.Poll(10*time.Second, 1*time.Minute, func() (bool, error) { _, err := oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag3", "orig") if err != nil { return false, nil } return true, nil }) o.Expect(err).NotTo(o.HaveOccurred()) jobName := "test-multitag-job" data := j.readJenkinsJob("multitag-job.xml", oc.Namespace()) j.createItem(jobName, data) monitor := j.startJob(jobName) err = monitor.await(10 * time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) log, err := j.getLastJobConsoleLogs(jobName) ginkgolog("Job logs>>\n%s\n\n", log) // Assert stream tagging results _, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag", "prod") o.Expect(err).NotTo(o.HaveOccurred()) // 1 to N mapping _, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag", "prod2") o.Expect(err).NotTo(o.HaveOccurred()) _, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag", "prod3") o.Expect(err).NotTo(o.HaveOccurred()) _, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag", "prod4") o.Expect(err).NotTo(o.HaveOccurred()) // N to 1 mapping _, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag", "prod5") o.Expect(err).NotTo(o.HaveOccurred()) _, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag2", "prod5") o.Expect(err).NotTo(o.HaveOccurred()) _, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag3", "prod5") o.Expect(err).NotTo(o.HaveOccurred()) // N to N mapping _, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag", "prod6") o.Expect(err).NotTo(o.HaveOccurred()) _, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag2", "prod7") o.Expect(err).NotTo(o.HaveOccurred()) _, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag3", "prod8") o.Expect(err).NotTo(o.HaveOccurred()) // N to N mapping with creation _, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag4", "prod9") o.Expect(err).NotTo(o.HaveOccurred()) _, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag5", "prod10") o.Expect(err).NotTo(o.HaveOccurred()) _, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag6", "prod11") o.Expect(err).NotTo(o.HaveOccurred()) }) g.It("jenkins-plugin test multitag DSL", func() { testNamespace := oc.Namespace() loadFixture(oc, "multitag-template.json") err := wait.Poll(10*time.Second, 1*time.Minute, func() (bool, error) { _, err := oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag3", "orig") if err != nil { return false, nil } return true, nil }) o.Expect(err).NotTo(o.HaveOccurred()) anotherNamespace := oc.Namespace() + "-multitag-target" oc.Run("new-project").Args(anotherNamespace).Execute() time.Sleep(10 * time.Second) // Give project time to initialize policies. // Allow jenkins service account to edit the new namespace oc.SetNamespace(anotherNamespace) err = oc.Run("policy").Args("add-role-to-user", "edit", "system:serviceaccount:"+j.namespace+":jenkins").Execute() o.Expect(err).NotTo(o.HaveOccurred()) oc.SetNamespace(testNamespace) ginkgolog("Using testNamespace: %q and currentNamespace: %q", testNamespace, oc.Namespace()) data, err := j.buildDSLJob(oc.Namespace(), "node{", "openshiftTag( namespace:'PROJECT_NAME', srcStream: 'multitag', srcTag: 'orig', destStream: 'multitag', destTag: 'prod' )", "openshiftTag( namespace:'PROJECT_NAME', srcStream: 'multitag', srcTag: 'orig', destStream: 'multitag2', destTag: 'prod1, prod2, prod3' )", "openshiftTag( namespace:'PROJECT_NAME', srcStream: 'multitag', srcTag: 'orig', destStream: 'multitag2,multitag7', destTag: 'prod4' )", "openshiftTag( namespace:'PROJECT_NAME', srcStream: 'multitag', srcTag: 'orig', destStream: 'multitag5,multitag6', destTag: 'prod5, prod6' )", fmt.Sprintf("openshiftTag( namespace:'PROJECT_NAME', destinationNamespace: '%s', srcStream: 'multitag', srcTag: 'orig', destStream: 'multitag', destTag: 'prod' )", anotherNamespace), fmt.Sprintf("openshiftTag( namespace:'PROJECT_NAME', destinationNamespace: '%s', srcStream: 'multitag', srcTag: 'orig', destStream: 'multitag2', destTag: 'prod1, prod2, prod3' )", anotherNamespace), fmt.Sprintf("openshiftTag( namespace:'PROJECT_NAME', destinationNamespace: '%s', srcStream: 'multitag', srcTag: 'orig', destStream: 'multitag2,multitag7', destTag: 'prod4' )", anotherNamespace), fmt.Sprintf("openshiftTag( namespace:'PROJECT_NAME', destinationNamespace: '%s', srcStream: 'multitag', srcTag: 'orig', destStream: 'multitag5,multitag6', destTag: 'prod5, prod6' )", anotherNamespace), "}", ) jobName := "test-multitag-dsl-job" j.createItem(jobName, data) monitor := j.startJob(jobName) err = monitor.await(10 * time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) time.Sleep(10 * time.Second) log, err := j.getLastJobConsoleLogs(jobName) o.Expect(err).NotTo(o.HaveOccurred()) ginkgolog("Job logs>>\n%s\n\n", log) // Assert stream tagging results for _, namespace := range []string{oc.Namespace(), anotherNamespace} { g.By("Checking tags in namespace: " + namespace) _, err = oc.REST().ImageStreamTags(namespace).Get("multitag", "prod") o.Expect(err).NotTo(o.HaveOccurred()) _, err = oc.REST().ImageStreamTags(namespace).Get("multitag2", "prod1") o.Expect(err).NotTo(o.HaveOccurred()) _, err = oc.REST().ImageStreamTags(namespace).Get("multitag2", "prod2") o.Expect(err).NotTo(o.HaveOccurred()) _, err = oc.REST().ImageStreamTags(namespace).Get("multitag2", "prod3") o.Expect(err).NotTo(o.HaveOccurred()) _, err = oc.REST().ImageStreamTags(namespace).Get("multitag2", "prod4") o.Expect(err).NotTo(o.HaveOccurred()) _, err = oc.REST().ImageStreamTags(namespace).Get("multitag5", "prod5") o.Expect(err).NotTo(o.HaveOccurred()) _, err = oc.REST().ImageStreamTags(namespace).Get("multitag6", "prod6") o.Expect(err).NotTo(o.HaveOccurred()) _, err = oc.REST().ImageStreamTags(namespace).Get("multitag7", "prod4") o.Expect(err).NotTo(o.HaveOccurred()) } }) testImageStreamSCM := func(jobXMLFile string) { jobName := "test-imagestream-scm" g.By("creating a jenkins job with an imagestream SCM") data := j.readJenkinsJob(jobXMLFile, oc.Namespace()) j.createItem(jobName, data) // Because polling is enabled, a job should start automatically and fail // Wait for it to run and fail tree := url.QueryEscape("jobs[name,color]") xpath := url.QueryEscape("//job/name[text()='test-imagestream-scm']/../color") jobStatusURI := "api/xml?tree=%s&xpath=%s" g.By("waiting for initial job to complete") wait.Poll(10*time.Second, 10*time.Minute, func() (bool, error) { result, status, err := j.getResource(jobStatusURI, tree, xpath) o.Expect(err).NotTo(o.HaveOccurred()) if status == 200 && strings.Contains(result, "red") { return true, nil } return false, nil }) // Create a new imagestream tag and expect a job to be kicked off // that will create a new tag in the current namespace g.By("creating an imagestream tag in the current project") oc.Run("tag").Args("openshift/jenkins:latest", fmt.Sprintf("%s/testimage:v1", oc.Namespace())).Execute() // Wait after the image has been tagged for the Jenkins job to run // and create the new imagestream/tag g.By("verifying that the job ran by looking for the resulting imagestream tag") err := exutil.TimedWaitForAnImageStreamTag(oc, oc.Namespace(), "localjenkins", "develop", 10*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) } g.It("jenkins-plugin test imagestream SCM", func() { testImageStreamSCM("imagestream-scm-job.xml") }) g.It("jenkins-plugin test imagestream SCM DSL", func() { testImageStreamSCM("imagestream-scm-dsl-job.xml") }) g.It("jenkins-plugin test connection test", func() { jobName := "test-build-job" data := j.readJenkinsJob("build-job.xml", oc.Namespace()) j.createItem(jobName, data) g.By("trigger test connection logic, check for success") testConnectionBody := bytes.NewBufferString("apiURL=&authToken=") result, code, err := j.post(testConnectionBody, "job/test-build-job/descriptorByName/com.openshift.jenkins.plugins.pipeline.OpenShiftBuilder/testConnection", "application/x-www-form-urlencoded") if code != 200 { err = fmt.Errorf("Expected return code of 200") } if matched, _ := regexp.MatchString(".*Connection successful.*", result); !matched { err = fmt.Errorf("Expecting 'Connection successful', Got: %s", result) } o.Expect(err).NotTo(o.HaveOccurred()) g.By("trigger test connection logic, check for failure") testConnectionBody = bytes.NewBufferString("apiURL=https%3A%2F%2F1.2.3.4&authToken=") result, code, err = j.post(testConnectionBody, "job/test-build-job/descriptorByName/com.openshift.jenkins.plugins.pipeline.OpenShiftBuilder/testConnection", "application/x-www-form-urlencoded") if code != 200 { err = fmt.Errorf("Expected return code of 200") } if matched, _ := regexp.MatchString(".*Connection unsuccessful.*", result); !matched { err = fmt.Errorf("Expecting 'Connection unsuccessful', Got: %s", result) } o.Expect(err).NotTo(o.HaveOccurred()) }) })
g.Context("ForcePull test context ", func() { g.JustBeforeEach(func() { g.By("waiting for builder service account") err := exutil.WaitForBuilderAccount(oc.AdminKubeREST().ServiceAccounts(oc.Namespace())) o.Expect(err).NotTo(o.HaveOccurred()) }) g.It("ForcePull test case execution", func() { g.By("when s2i force pull is false") doTest(buildPrefixFS, "s2i false app/lang build", true, oc) g.By("when s2i force pull is true") doTest(buildPrefixTS, "s2i true app/lang build", false, oc) g.By("when docker force pull is false") doTest(buildPrefixFD, "dock false app/lang build", true, oc) g.By("docker when force pull is true") doTest(buildPrefixTD, "dock true app/lang build", false, oc) g.By("when custom force pull is false") doTest(buildPrefixFC, "cust false app/lang build", true, oc) g.By("when custom force pull is true") doTest(buildPrefixTC, "cust true app/lang build", false, oc) }) })
g.Context("jenkins-plugin test context ", func() { g.It("jenkins-plugin test trigger build", func() { jobName := "test-build-job" data := j.readJenkinsJob("build-job.xml", oc.Namespace()) j.createItem(jobName, data) jmon := j.startJob(jobName) jmon.await(10 * time.Minute) // the build and deployment is by far the most time consuming portion of the test jenkins job; // we leverage some of the openshift utilities for waiting for the deployment before we poll // jenkins for the successful job completion g.By("waiting for frontend, frontend-prod deployments as signs that the build has finished") err := exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "frontend", oc) o.Expect(err).NotTo(o.HaveOccurred()) err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "frontend-prod", oc) o.Expect(err).NotTo(o.HaveOccurred()) g.By("get build console logs and see if succeeded") logs, err := j.waitForContent("Finished: SUCCESS", 200, 10*time.Minute, "job/%s/lastBuild/consoleText", jobName) ginkgolog("\n\nJenkins logs>\n%s\n\n", logs) o.Expect(err).NotTo(o.HaveOccurred()) }) g.It("jenkins-plugin test trigger build with envs", func() { jobName := "test-build-with-env-job" data := j.readJenkinsJob("build-with-env-job.xml", oc.Namespace()) j.createItem(jobName, data) jmon := j.startJob(jobName) jmon.await(10 * time.Minute) logs, err := j.getLastJobConsoleLogs(jobName) ginkgolog("\n\nJenkins logs>\n%s\n\n", logs) o.Expect(err).NotTo(o.HaveOccurred()) // the build and deployment is by far the most time consuming portion of the test jenkins job; // we leverage some of the openshift utilities for waiting for the deployment before we poll // jenkins for the successful job completion g.By("waiting for frontend deployments as signs that the build has finished") err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "frontend", oc) o.Expect(err).NotTo(o.HaveOccurred()) g.By("get build console logs and see if succeeded") _, err = j.waitForContent("Finished: SUCCESS", 200, 10*time.Minute, "job/%s/lastBuild/consoleText", jobName) assertEnvVars(oc, "frontend-", map[string]string{ "a": "b", "C": "D", "e": "", }) }) g.It("jenkins-plugin test trigger build DSL", func() { buildsBefore, err := oc.REST().Builds(oc.Namespace()).List(kapi.ListOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) data, err := j.buildDSLJob(oc.Namespace(), "node{", "openshiftBuild( namespace:'PROJECT_NAME', bldCfg: 'frontend', env: [ [ name : 'a', value : 'b' ], [ name : 'C', value : 'D' ], [ name : 'e', value : '' ] ] )", "}", ) jobName := "test-build-dsl-job" j.createItem(jobName, data) monitor := j.startJob(jobName) err = monitor.await(10 * time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) err = wait.Poll(10*time.Second, 10*time.Minute, func() (bool, error) { buildsAfter, err := oc.REST().Builds(oc.Namespace()).List(kapi.ListOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) return (len(buildsAfter.Items) != len(buildsBefore.Items)), nil }) buildsAfter, err := oc.REST().Builds(oc.Namespace()).List(kapi.ListOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(len(buildsAfter.Items)).To(o.Equal(len(buildsBefore.Items) + 1)) log, err := j.getLastJobConsoleLogs(jobName) ginkgolog("Job logs>>\n%s\n\n", log) assertEnvVars(oc, "frontend-", map[string]string{ "a": "b", "C": "D", "e": "", }) }) g.It("jenkins-plugin test exec DSL", func() { // Create a running pod in which we can execute our commands oc.Run("new-app").Args("https://github.com/openshift/ruby-hello-world").Execute() var targetPod *kapi.Pod err := wait.Poll(10*time.Second, 10*time.Minute, func() (bool, error) { pods, err := oc.KubeREST().Pods(oc.Namespace()).List(kapi.ListOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) for _, p := range pods.Items { if !strings.Contains(p.Name, "build") && !strings.Contains(p.Name, "deploy") && p.Status.Phase == "Running" { targetPod = &p return true, nil } } return false, nil }) o.Expect(err).NotTo(o.HaveOccurred()) targetContainer := targetPod.Spec.Containers[0] data, err := j.buildDSLJob(oc.Namespace(), "node{", fmt.Sprintf("openshiftExec( namespace:'PROJECT_NAME', pod: '%s', command: [ 'echo', 'hello', 'world', '1' ] )", targetPod.Name), fmt.Sprintf("openshiftExec( namespace:'PROJECT_NAME', pod: '%s', command: 'echo', arguments : [ 'hello', 'world', '2' ] )", targetPod.Name), fmt.Sprintf("openshiftExec( namespace:'PROJECT_NAME', pod: '%s', command: 'echo', arguments : [ [ value: 'hello' ], [ value : 'world' ], [ value : '3' ] ] )", targetPod.Name), fmt.Sprintf("openshiftExec( namespace:'PROJECT_NAME', pod: '%s', container: '%s', command: [ 'echo', 'hello', 'world', '4' ] )", targetPod.Name, targetContainer.Name), fmt.Sprintf("openshiftExec( namespace:'PROJECT_NAME', pod: '%s', container: '%s', command: 'echo', arguments : [ 'hello', 'world', '5' ] )", targetPod.Name, targetContainer.Name), fmt.Sprintf("openshiftExec( namespace:'PROJECT_NAME', pod: '%s', container: '%s', command: 'echo', arguments : [ [ value: 'hello' ], [ value : 'world' ], [ value : '6' ] ] )", targetPod.Name, targetContainer.Name), "}", ) jobName := "test-exec-dsl-job" j.createItem(jobName, data) monitor := j.startJob(jobName) err = monitor.await(10 * time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) log, err := j.getLastJobConsoleLogs(jobName) ginkgolog("Job logs>>\n%s\n\n", log) o.Expect(strings.Contains(log, "hello world 1")).To(o.BeTrue()) o.Expect(strings.Contains(log, "hello world 2")).To(o.BeTrue()) o.Expect(strings.Contains(log, "hello world 3")).To(o.BeTrue()) o.Expect(strings.Contains(log, "hello world 4")).To(o.BeTrue()) o.Expect(strings.Contains(log, "hello world 5")).To(o.BeTrue()) o.Expect(strings.Contains(log, "hello world 6")).To(o.BeTrue()) }) g.It("jenkins-plugin test multitag", func() { loadFixture(oc, "multitag-template.json") err := wait.Poll(10*time.Second, 1*time.Minute, func() (bool, error) { _, err := oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag3", "orig") if err != nil { return false, nil } return true, nil }) o.Expect(err).NotTo(o.HaveOccurred()) jobName := "test-multitag-job" data := j.readJenkinsJob("multitag-job.xml", oc.Namespace()) j.createItem(jobName, data) monitor := j.startJob(jobName) err = monitor.await(10 * time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) log, err := j.getLastJobConsoleLogs(jobName) ginkgolog("Job logs>>\n%s\n\n", log) // Assert stream tagging results _, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag", "prod") o.Expect(err).NotTo(o.HaveOccurred()) // 1 to N mapping _, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag", "prod2") o.Expect(err).NotTo(o.HaveOccurred()) _, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag", "prod3") o.Expect(err).NotTo(o.HaveOccurred()) _, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag", "prod4") o.Expect(err).NotTo(o.HaveOccurred()) // N to 1 mapping _, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag", "prod5") o.Expect(err).NotTo(o.HaveOccurred()) _, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag2", "prod5") o.Expect(err).NotTo(o.HaveOccurred()) _, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag3", "prod5") o.Expect(err).NotTo(o.HaveOccurred()) // N to N mapping _, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag", "prod6") o.Expect(err).NotTo(o.HaveOccurred()) _, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag2", "prod7") o.Expect(err).NotTo(o.HaveOccurred()) _, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag3", "prod8") o.Expect(err).NotTo(o.HaveOccurred()) // N to N mapping with creation _, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag4", "prod9") o.Expect(err).NotTo(o.HaveOccurred()) _, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag5", "prod10") o.Expect(err).NotTo(o.HaveOccurred()) _, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag6", "prod11") o.Expect(err).NotTo(o.HaveOccurred()) }) g.It("jenkins-plugin test multitag DSL", func() { testNamespace := oc.Namespace() loadFixture(oc, "multitag-template.json") err := wait.Poll(10*time.Second, 1*time.Minute, func() (bool, error) { _, err := oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag3", "orig") if err != nil { return false, nil } return true, nil }) o.Expect(err).NotTo(o.HaveOccurred()) anotherNamespace := oc.Namespace() + "-multitag-target" oc.Run("new-project").Args(anotherNamespace).Execute() time.Sleep(10 * time.Second) // Give project time to initialize policies. // Allow jenkins service account to edit the new namespace oc.SetNamespace(anotherNamespace) err = oc.Run("policy").Args("add-role-to-user", "edit", "system:serviceaccount:"+j.namespace+":jenkins").Execute() o.Expect(err).NotTo(o.HaveOccurred()) oc.SetNamespace(testNamespace) ginkgolog("Using testNamespace: %q and currentNamespace: %q", testNamespace, oc.Namespace()) data, err := j.buildDSLJob(oc.Namespace(), "node{", "openshiftTag( namespace:'PROJECT_NAME', srcStream: 'multitag', srcTag: 'orig', destStream: 'multitag', destTag: 'prod' )", "openshiftTag( namespace:'PROJECT_NAME', srcStream: 'multitag', srcTag: 'orig', destStream: 'multitag2', destTag: 'prod1, prod2, prod3' )", "openshiftTag( namespace:'PROJECT_NAME', srcStream: 'multitag', srcTag: 'orig', destStream: 'multitag2,multitag7', destTag: 'prod4' )", "openshiftTag( namespace:'PROJECT_NAME', srcStream: 'multitag', srcTag: 'orig', destStream: 'multitag5,multitag6', destTag: 'prod5, prod6' )", fmt.Sprintf("openshiftTag( namespace:'PROJECT_NAME', destinationNamespace: '%s', srcStream: 'multitag', srcTag: 'orig', destStream: 'multitag', destTag: 'prod' )", anotherNamespace), fmt.Sprintf("openshiftTag( namespace:'PROJECT_NAME', destinationNamespace: '%s', srcStream: 'multitag', srcTag: 'orig', destStream: 'multitag2', destTag: 'prod1, prod2, prod3' )", anotherNamespace), fmt.Sprintf("openshiftTag( namespace:'PROJECT_NAME', destinationNamespace: '%s', srcStream: 'multitag', srcTag: 'orig', destStream: 'multitag2,multitag7', destTag: 'prod4' )", anotherNamespace), fmt.Sprintf("openshiftTag( namespace:'PROJECT_NAME', destinationNamespace: '%s', srcStream: 'multitag', srcTag: 'orig', destStream: 'multitag5,multitag6', destTag: 'prod5, prod6' )", anotherNamespace), "}", ) jobName := "test-multitag-dsl-job" j.createItem(jobName, data) monitor := j.startJob(jobName) err = monitor.await(10 * time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) time.Sleep(10 * time.Second) log, err := j.getLastJobConsoleLogs(jobName) o.Expect(err).NotTo(o.HaveOccurred()) ginkgolog("Job logs>>\n%s\n\n", log) // Assert stream tagging results for _, namespace := range []string{oc.Namespace(), anotherNamespace} { g.By("Checking tags in namespace: " + namespace) _, err = oc.REST().ImageStreamTags(namespace).Get("multitag", "prod") o.Expect(err).NotTo(o.HaveOccurred()) _, err = oc.REST().ImageStreamTags(namespace).Get("multitag2", "prod1") o.Expect(err).NotTo(o.HaveOccurred()) _, err = oc.REST().ImageStreamTags(namespace).Get("multitag2", "prod2") o.Expect(err).NotTo(o.HaveOccurred()) _, err = oc.REST().ImageStreamTags(namespace).Get("multitag2", "prod3") o.Expect(err).NotTo(o.HaveOccurred()) _, err = oc.REST().ImageStreamTags(namespace).Get("multitag2", "prod4") o.Expect(err).NotTo(o.HaveOccurred()) _, err = oc.REST().ImageStreamTags(namespace).Get("multitag5", "prod5") o.Expect(err).NotTo(o.HaveOccurred()) _, err = oc.REST().ImageStreamTags(namespace).Get("multitag6", "prod6") o.Expect(err).NotTo(o.HaveOccurred()) _, err = oc.REST().ImageStreamTags(namespace).Get("multitag7", "prod4") o.Expect(err).NotTo(o.HaveOccurred()) } }) })
g.By("Waiting for endpoints to be up") err = waitForEndpointsAvailable(oc, serviceName) o.Expect(err).ToNot(o.HaveOccurred()) }) g.AfterEach(func() { g.By("Cleaning up the idling file") os.Remove(idlingFile) }) g.Describe("idling", func() { g.Context("with a single service and DeploymentConfig [Conformance]", func() { g.BeforeEach(func() { framework.BeforeEach() fixture = echoServerFixture }) g.It("should idle the service and DeploymentConfig properly", func() { checkSingleIdle(oc, idlingFile, resources, "deploymentconfig", "DeploymentConfig") }) }) g.Context("with a single service and ReplicationController", func() { g.BeforeEach(func() { framework.BeforeEach() fixture = echoServerRcFixture }) g.It("should idle the service and ReplicationController properly", func() { checkSingleIdle(oc, idlingFile, resources, "replicationcontroller", "ReplicationController") }) })