func describeTable(description string, itBody interface{}, entries []TableEntry, pending bool, focused bool) { itBodyValue := reflect.ValueOf(itBody) if itBodyValue.Kind() != reflect.Func { panic(fmt.Sprintf("DescribeTable expects a function, got %#v", itBody)) } if pending { ginkgo.PDescribe(description, func() { for _, entry := range entries { entry.generateIt(itBodyValue) } }) } else if focused { ginkgo.FDescribe(description, func() { for _, entry := range entries { entry.generateIt(itBodyValue) } }) } else { ginkgo.Describe(description, func() { for _, entry := range entries { entry.generateIt(itBodyValue) } }) } }
// NewSampleRepoTest creates a function for a new ginkgo test case that will instantiate a template // from a url, kick off the buildconfig defined in that template, wait for the build/deploy, // and then confirm the application is serving an expected string value. func NewSampleRepoTest(c SampleRepoConfig) func() { return func() { defer g.GinkgoRecover() var oc = exutil.NewCLI(c.repoName+"-repo-test", exutil.KubeConfigPath()) g.JustBeforeEach(func() { g.By("Waiting for builder service account") err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace())) o.Expect(err).NotTo(o.HaveOccurred()) }) g.Describe("Building "+c.repoName+" app from new-app", func() { g.It(fmt.Sprintf("should build a "+c.repoName+" image and run it in a pod"), func() { oc.SetOutputDir(exutil.TestContext.OutputDir) exutil.CheckOpenShiftNamespaceImageStreams(oc) g.By(fmt.Sprintf("calling oc new-app with the " + c.repoName + " example template")) err := oc.Run("new-app").Args("-f", c.templateURL).Execute() o.Expect(err).NotTo(o.HaveOccurred()) // all the templates automatically start a build. buildName := c.buildConfigName + "-1" g.By("expecting the build is in the Complete phase") err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), buildName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn) if err != nil { exutil.DumpBuildLogs(c.buildConfigName, oc) } o.Expect(err).NotTo(o.HaveOccurred()) g.By("expecting the app deployment to be complete") err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), c.deploymentConfigName, oc) o.Expect(err).NotTo(o.HaveOccurred()) if len(c.dbDeploymentConfigName) > 0 { g.By("expecting the db deployment to be complete") err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), c.dbDeploymentConfigName, oc) o.Expect(err).NotTo(o.HaveOccurred()) } g.By("expecting the service is available") serviceIP, err := oc.Run("get").Args("service", c.serviceName).Template("{{ .spec.clusterIP }}").Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(serviceIP).ShouldNot(o.Equal("")) g.By("expecting an endpoint is available") err = oc.KubeFramework().WaitForAnEndpoint(c.serviceName) o.Expect(err).NotTo(o.HaveOccurred()) g.By("verifying string from app request") response, err := exutil.FetchURL("http://"+serviceIP+":8080"+c.appPath, time.Duration(30*time.Second)) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(response).Should(o.ContainSubstring(c.expectedString)) }) }) } }
var _ = g.Describe("[builds][Slow] build can have Docker image source", func() { defer g.GinkgoRecover() var ( buildFixture = exutil.FixturePath("testdata", "test-imagesource-build.yaml") oc = exutil.NewCLI("build-image-source", exutil.KubeConfigPath()) imageSourceLabel = exutil.ParseLabelsOrDie("app=imagesourceapp") imageDockerLabel = exutil.ParseLabelsOrDie("app=imagedockerapp") ) g.JustBeforeEach(func() { g.By("waiting for builder service account") err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace())) o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for imagestreams to be imported") err = exutil.WaitForAnImageStream(oc.AdminREST().ImageStreams("openshift"), "jenkins", exutil.CheckImageStreamLatestTagPopulatedFn, exutil.CheckImageStreamTagNotFoundFn) o.Expect(err).NotTo(o.HaveOccurred()) }) g.Describe("build with image source", func() { g.It("should complete successfully and contain the expected file", func() { g.By("Creating build configs for source build") err := oc.Run("create").Args("-f", buildFixture).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("starting the source strategy build") err = oc.Run("start-build").Args("imagesourcebuild").Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("expecting the builds to complete successfully") err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), "imagesourcebuild-1", exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn) if err != nil { exutil.DumpBuildLogs("imagesourcebuild", oc) } o.Expect(err).NotTo(o.HaveOccurred()) g.By("expecting the pod to deploy successfully") pods, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), imageSourceLabel, exutil.CheckPodIsRunningFn, 1, 2*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(len(pods)).To(o.Equal(1)) pod, err := oc.KubeREST().Pods(oc.Namespace()).Get(pods[0]) o.Expect(err).NotTo(o.HaveOccurred()) g.By("expecting the pod to contain the file from the input image") out, err := oc.Run("exec").Args(pod.Name, "-c", pod.Spec.Containers[0].Name, "--", "ls", "injected/dir").Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(out).To(o.ContainSubstring("jenkins.war")) }) }) g.Describe("build with image docker", func() { g.It("should complete successfully and contain the expected file", func() { g.By("Creating build configs for docker build") err := oc.Run("create").Args("-f", buildFixture).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("starting the docker strategy build") err = oc.Run("start-build").Args("imagedockerbuild").Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("expect the builds to complete successfully") err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), "imagedockerbuild-1", exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn) if err != nil { exutil.DumpBuildLogs("imagedockerbuild", oc) } o.Expect(err).NotTo(o.HaveOccurred()) g.By("expect the pod to deploy successfully") pods, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), imageDockerLabel, exutil.CheckPodIsRunningFn, 1, 2*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(len(pods)).To(o.Equal(1)) pod, err := oc.KubeREST().Pods(oc.Namespace()).Get(pods[0]) o.Expect(err).NotTo(o.HaveOccurred()) g.By("expecting the pod to contain the file from the input image") out, err := oc.Run("exec").Args(pod.Name, "-c", pod.Spec.Containers[0].Name, "--", "ls", "injected/dir").Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(out).To(o.ContainSubstring("jenkins.war")) }) }) })
var _ = g.Describe("[builds] can use build secrets", func() { defer g.GinkgoRecover() var ( buildSecretBaseDir = exutil.FixturePath("fixtures", "build-secrets") secretsFixture = filepath.Join(buildSecretBaseDir, "test-secret.json") secondSecretsFixture = filepath.Join(buildSecretBaseDir, "test-secret-2.json") isFixture = filepath.Join(buildSecretBaseDir, "test-is.json") dockerBuildFixture = filepath.Join(buildSecretBaseDir, "test-docker-build.json") sourceBuildFixture = filepath.Join(buildSecretBaseDir, "test-sti-build.json") oc = exutil.NewCLI("build-secrets", exutil.KubeConfigPath()) ) g.Describe("build with secrets", func() { oc.SetOutputDir(exutil.TestContext.OutputDir) g.It("should print the secrets during the source strategy build", func() { g.By("creating the sample secret files") err := oc.Run("create").Args("-f", secretsFixture).Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = oc.Run("create").Args("-f", secondSecretsFixture).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("creating the sample source build config and image stream") err = oc.Run("create").Args("-f", isFixture).Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = oc.Run("create").Args("-f", sourceBuildFixture).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("starting the sample source build") out, err := oc.Run("start-build").Args("test", "--follow", "--wait").Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(out).To(o.ContainSubstring("secret1=secret1")) o.Expect(out).To(o.ContainSubstring("secret3=secret3")) o.Expect(out).To(o.ContainSubstring("relative-secret1=secret1")) o.Expect(out).To(o.ContainSubstring("relative-secret3=secret3")) g.By("checking the status of the build") build, err := oc.REST().Builds(oc.Namespace()).Get("test-1") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(build.Status.Phase).Should(o.BeEquivalentTo(buildapi.BuildPhaseComplete)) g.By("getting the image name") image, err := exutil.GetDockerImageReference(oc.REST().ImageStreams(oc.Namespace()), "test", "latest") o.Expect(err).NotTo(o.HaveOccurred()) g.By("verifying the build secrets are not present in the output image") pod := exutil.GetPodForContainer(kapi.Container{Name: "test", Image: image}) oc.KubeFramework().TestContainerOutput("test-build-secret-source", pod, 0, []string{ "relative-secret1=empty", "secret3=empty", }) }) g.It("should print the secrets during the docker strategy build", func() { g.By("creating the sample secret files") err := oc.Run("create").Args("-f", secretsFixture).Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = oc.Run("create").Args("-f", secondSecretsFixture).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("creating the sample source build config and image stream") err = oc.Run("create").Args("-f", isFixture).Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = oc.Run("create").Args("-f", dockerBuildFixture).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("starting the sample source build") out, err := oc.Run("start-build").Args("test", "--follow", "--wait").Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(out).To(o.ContainSubstring("secret1=secret1")) o.Expect(out).To(o.ContainSubstring("relative-secret2=secret2")) g.By("checking the status of the build") build, err := oc.REST().Builds(oc.Namespace()).Get("test-1") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(build.Status.Phase).Should(o.BeEquivalentTo(buildapi.BuildPhaseComplete)) }) }) })
var _ = bdd.Describe("Writers", func() { bdd.BeforeEach(func() { reset.Enable() }) bdd.AfterEach(func() { reset.Disable() }) bdd.It("ErrorWriter", func() { w := ErrorWriter(0) validErrorWrite(w, 10, 0) w = ErrorWriter(5) validErrorWriteSuccess(w, 3) validErrorWrite(w, 10, 2) validErrorWrite(w, 10, 0) w = ErrorWriter(5) validErrorWriteSuccess(w, 5) validErrorWrite(w, 10, 0) }) bdd.It("SlowWriter", func() { buf := &bytes.Buffer{} w := NewSlowWriter(buf, 15*time.Millisecond) tStart := time.Now() Ω(w.Write([]byte("abc"))).Should(Equal(3)) tEnd := time.Now() d := tEnd.Sub(tStart) Ω(buf.Bytes()).Should(Equal([]byte("abc"))) Ω(d > 15*time.Millisecond).Should(BeTrue()) Ω(d < 25*time.Millisecond).Should(BeTrue()) tStart = time.Now() Ω(w.Write([]byte("cde"))).Should(Equal(3)) tEnd = time.Now() Ω(buf.Bytes()).Should(Equal([]byte("abccde"))) d = tEnd.Sub(tStart) Ω(d > 15*time.Millisecond).Should(BeTrue()) Ω(d < 25*time.Millisecond).Should(BeTrue()) }) })
postgreSQLReplicationTemplate = "https://raw.githubusercontent.com/openshift/postgresql/master/examples/replica/postgresql_replica.json" postgreSQLEphemeralTemplate = exutil.FixturePath("..", "..", "examples", "db-templates", "postgresql-ephemeral-template.json") postgreSQLHelperName = "postgresql-helper" postgreSQLImages = []string{ "openshift/postgresql-92-centos7", "centos/postgresql-94-centos7", "registry.access.redhat.com/openshift3/postgresql-92-rhel7", "registry.access.redhat.com/rhscl/postgresql-94-rhel7", } ) var _ = g.Describe("[LocalNode][images][postgresql][Slow] openshift postgresql replication", func() { defer g.GinkgoRecover() for i, image := range postgreSQLImages { oc := exutil.NewCLI(fmt.Sprintf("postgresql-replication-%d", i), exutil.KubeConfigPath()) testFn := PostgreSQLReplicationTestFactory(oc, image) g.It(fmt.Sprintf("postgresql replication works for %s", image), testFn) } }) // CreatePostgreSQLReplicationHelpers creates a set of PostgreSQL helpers for master, // slave an en extra helper that is used for remote login test. func CreatePostgreSQLReplicationHelpers(c kclient.PodInterface, masterDeployment, slaveDeployment, helperDeployment string, slaveCount int) (exutil.Database, []exutil.Database, exutil.Database) { podNames, err := exutil.WaitForPods(c, exutil.ParseLabelsOrDie(fmt.Sprintf("deployment=%s", masterDeployment)), exutil.CheckPodIsRunningFn, 1, 2*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) masterPod := podNames[0] slavePods, err := exutil.WaitForPods(c, exutil.ParseLabelsOrDie(fmt.Sprintf("deployment=%s", slaveDeployment)), exutil.CheckPodIsRunningFn, slaveCount, 3*time.Minute) o.Expect(err).NotTo(o.HaveOccurred())
var _ = g.Describe("[builds][Slow] using build configuration runPolicy", func() { defer g.GinkgoRecover() var ( // Use invalid source here as we don't care about the result oc = exutil.NewCLI("cli-build-run-policy", exutil.KubeConfigPath()) ) g.JustBeforeEach(func() { g.By("waiting for builder service account") err := exutil.WaitForBuilderAccount(oc.KubeClient().Core().ServiceAccounts(oc.Namespace())) o.Expect(err).NotTo(o.HaveOccurred()) // Create all fixtures oc.Run("create").Args("-f", exutil.FixturePath("testdata", "run_policy")).Execute() }) g.Describe("build configuration with Parallel build run policy", func() { g.It("runs the builds in parallel", func() { g.By("starting multiple builds") var ( startedBuilds []string counter int ) bcName := "sample-parallel-build" buildWatch, err := oc.Client().Builds(oc.Namespace()).Watch(kapi.ListOptions{ LabelSelector: buildutil.BuildConfigSelector(bcName), }) defer buildWatch.Stop() // Start first build stdout, _, err := exutil.StartBuild(oc, bcName, "-o=name") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(strings.TrimSpace(stdout)).ShouldNot(o.HaveLen(0)) // extract build name from "build/buildName" resource id startedBuilds = append(startedBuilds, strings.TrimSpace(strings.Split(stdout, "/")[1])) // Wait for it to become running for { event := <-buildWatch.ResultChan() build := event.Object.(*buildapi.Build) o.Expect(buildutil.IsBuildComplete(build)).Should(o.BeFalse()) if build.Name == startedBuilds[0] && build.Status.Phase == buildapi.BuildPhaseRunning { break } } for i := 0; i < 2; i++ { stdout, _, err = exutil.StartBuild(oc, bcName, "-o=name") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(strings.TrimSpace(stdout)).ShouldNot(o.HaveLen(0)) startedBuilds = append(startedBuilds, strings.TrimSpace(strings.Split(stdout, "/")[1])) } o.Expect(err).NotTo(o.HaveOccurred()) for { event := <-buildWatch.ResultChan() build := event.Object.(*buildapi.Build) if build.Name == startedBuilds[0] { if buildutil.IsBuildComplete(build) { break } continue } // When the the other two builds we started after waiting for the first // build to become running are Pending, verify the first build is still // running (so the other two builds are started in parallel with first // build). // TODO: This might introduce flakes in case the first build complete // sooner or fail. if build.Status.Phase == buildapi.BuildPhasePending { c := buildclient.NewOSClientBuildClient(oc.Client()) firstBuildRunning := false _, err := buildutil.BuildConfigBuilds(c, oc.Namespace(), bcName, func(b buildapi.Build) bool { if b.Name == startedBuilds[0] && b.Status.Phase == buildapi.BuildPhaseRunning { firstBuildRunning = true } return false }) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(firstBuildRunning).Should(o.BeTrue()) counter++ } // When the build failed or completed prematurely, fail the test o.Expect(buildutil.IsBuildComplete(build)).Should(o.BeFalse()) if counter == 2 { break } } o.Expect(counter).Should(o.BeEquivalentTo(2)) }) }) g.Describe("build configuration with Serial build run policy", func() { g.It("runs the builds in serial order", func() { g.By("starting multiple builds") var ( startedBuilds []string counter int ) bcName := "sample-serial-build" buildVerified := map[string]bool{} for i := 0; i < 3; i++ { stdout, _, err := exutil.StartBuild(oc, bcName, "-o=name") o.Expect(err).NotTo(o.HaveOccurred()) startedBuilds = append(startedBuilds, strings.TrimSpace(strings.Split(stdout, "/")[1])) } buildWatch, err := oc.Client().Builds(oc.Namespace()).Watch(kapi.ListOptions{ LabelSelector: buildutil.BuildConfigSelector(bcName), }) defer buildWatch.Stop() o.Expect(err).NotTo(o.HaveOccurred()) for { event := <-buildWatch.ResultChan() build := event.Object.(*buildapi.Build) if build.Status.Phase == buildapi.BuildPhaseRunning { // Ignore events from complete builds (if there are any) if we already // verified the build. if _, exists := buildVerified[build.Name]; exists { continue } // Verify there are no other running or pending builds than this // build as serial build always runs alone. c := buildclient.NewOSClientBuildClient(oc.Client()) builds, err := buildutil.BuildConfigBuilds(c, oc.Namespace(), bcName, func(b buildapi.Build) bool { if b.Name == build.Name { return false } if b.Status.Phase == buildapi.BuildPhaseRunning || b.Status.Phase == buildapi.BuildPhasePending { return true } return false }) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(builds.Items).Should(o.BeEmpty()) // The builds should start in the same order as they were created. o.Expect(build.Name).Should(o.BeEquivalentTo(startedBuilds[counter])) buildVerified[build.Name] = true counter++ } if counter == len(startedBuilds) { break } } }) }) g.Describe("build configuration with SerialLatestOnly build run policy", func() { g.It("runs the builds in serial order but cancel previous builds", func() { g.By("starting multiple builds") var ( startedBuilds []string expectedRunningBuild int wasCancelled bool ) bcName := "sample-serial-latest-only-build" buildVerified := map[string]bool{} buildWatch, err := oc.Client().Builds(oc.Namespace()).Watch(kapi.ListOptions{ LabelSelector: buildutil.BuildConfigSelector(bcName), }) defer buildWatch.Stop() o.Expect(err).NotTo(o.HaveOccurred()) stdout, _, err := exutil.StartBuild(oc, bcName, "-o=name") o.Expect(err).NotTo(o.HaveOccurred()) startedBuilds = append(startedBuilds, strings.TrimSpace(strings.Split(stdout, "/")[1])) // Wait for the first build to become running for { event := <-buildWatch.ResultChan() build := event.Object.(*buildapi.Build) if build.Name == startedBuilds[0] { if build.Status.Phase == buildapi.BuildPhaseRunning { buildVerified[build.Name] = true // now expect the last build to be run. expectedRunningBuild = 2 break } o.Expect(buildutil.IsBuildComplete(build)).Should(o.BeFalse()) } } // Trigger two more builds for i := 0; i < 2; i++ { stdout, _, err = exutil.StartBuild(oc, bcName, "-o=name") o.Expect(err).NotTo(o.HaveOccurred()) startedBuilds = append(startedBuilds, strings.TrimSpace(strings.Split(stdout, "/")[1])) } // Verify that the first build will complete and the next build to run // will be the last build created. for { event := <-buildWatch.ResultChan() build := event.Object.(*buildapi.Build) e2e.Logf("got event for build %s with phase %s", build.Name, build.Status.Phase) // The second build should be cancelled if build.Status.Phase == buildapi.BuildPhaseCancelled { if build.Name == startedBuilds[1] { buildVerified[build.Name] = true wasCancelled = true } } // Only first and third build should actually run (serially). if build.Status.Phase == buildapi.BuildPhaseRunning { // Ignore events from complete builds (if there are any) if we already // verified the build. if _, exists := buildVerified[build.Name]; exists { continue } // Verify there are no other running or pending builds than this // build as serial build always runs alone. c := buildclient.NewOSClientBuildClient(oc.Client()) builds, err := buildutil.BuildConfigBuilds(c, oc.Namespace(), bcName, func(b buildapi.Build) bool { e2e.Logf("[%s] build %s is %s", build.Name, b.Name, b.Status.Phase) if b.Name == build.Name { return false } if b.Status.Phase == buildapi.BuildPhaseRunning || b.Status.Phase == buildapi.BuildPhasePending { return true } return false }) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(builds.Items).Should(o.BeEmpty()) // The builds should start in the same order as they were created. o.Expect(build.Name).Should(o.BeEquivalentTo(startedBuilds[expectedRunningBuild])) buildVerified[build.Name] = true } if len(buildVerified) == len(startedBuilds) { break } } o.Expect(wasCancelled).Should(o.BeEquivalentTo(true)) }) }) })
var _ = g.Describe("deploymentconfigs", func() { defer g.GinkgoRecover() var ( oc = exutil.NewCLI("cli-deployment", exutil.KubeConfigPath()) deploymentFixture = exutil.FixturePath("testdata", "test-deployment-test.yaml") simpleDeploymentFixture = exutil.FixturePath("testdata", "deployment-simple.yaml") customDeploymentFixture = exutil.FixturePath("testdata", "custom-deployment.yaml") generationFixture = exutil.FixturePath("testdata", "generation-test.yaml") pausedDeploymentFixture = exutil.FixturePath("testdata", "paused-deployment.yaml") failedHookFixture = exutil.FixturePath("testdata", "failing-pre-hook.yaml") brokenDeploymentFixture = exutil.FixturePath("testdata", "test-deployment-broken.yaml") historyLimitedDeploymentFixture = exutil.FixturePath("testdata", "deployment-history-limit.yaml") minReadySecondsFixture = exutil.FixturePath("testdata", "deployment-min-ready-seconds.yaml") multipleICTFixture = exutil.FixturePath("testdata", "deployment-example.yaml") tagImagesFixture = exutil.FixturePath("testdata", "tag-images-deployment.yaml") ) g.Describe("when run iteratively", func() { g.AfterEach(func() { failureTrap(oc, "deployment-simple", g.CurrentGinkgoTestDescription().Failed) }) g.It("should only deploy the last deployment [Conformance]", func() { _, err := oc.Run("create").Args("-f", simpleDeploymentFixture).Output() o.Expect(err).NotTo(o.HaveOccurred()) iterations := 15 for i := 0; i < iterations; i++ { if rand.Float32() < 0.2 { time.Sleep(time.Duration(rand.Float32() * rand.Float32() * float32(time.Second))) } switch n := rand.Float32(); { case n < 0.4: // trigger a new deployment e2e.Logf("%02d: triggering a new deployment with config change", i) out, err := oc.Run("set", "env").Args("dc/deployment-simple", fmt.Sprintf("A=%d", i)).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(out).To(o.ContainSubstring("updated")) case n < 0.7: // cancel any running deployment e2e.Logf("%02d: cancelling deployment", i) if out, err := oc.Run("deploy").Args("dc/deployment-simple", "--cancel").Output(); err != nil { // TODO: we should fix this if !strings.Contains(out, "the object has been modified") { o.Expect(err).NotTo(o.HaveOccurred()) } e2e.Logf("--cancel deployment failed due to conflict: %v", err) } case n < 0.0: // delete the deployer pod - disabled because it forces the system to wait for the sync loop e2e.Logf("%02d: deleting one or more deployer pods", i) _, rcs, pods, err := deploymentInfo(oc, "deployment-simple") if err != nil { e2e.Logf("%02d: unable to get deployment info: %v", i, err) continue } all, err := deploymentPods(pods) if err != nil { e2e.Logf("%02d: unable to get deployment pods: %v", i, err) continue } if len(all) == 0 { e2e.Logf("%02d: no deployer pods", i) continue } top := len(rcs) - 1 for j := top; i >= top-1 && j >= 0; j-- { pods, ok := all[rcs[j].Name] if !ok { e2e.Logf("%02d: no deployer pod for rc %q", i, rcs[j].Name) continue } for _, pod := range pods { e2e.Logf("%02d: deleting deployer pod %s", i, pod.Name) options := kapi.NewDeleteOptions(0) if rand.Float32() < 0.5 { options = nil } if err := oc.KubeREST().Pods(oc.Namespace()).Delete(pod.Name, options); err != nil { e2e.Logf("%02d: unable to delete deployer pod %q: %v", i, pod.Name, err) } } } e2e.Logf("%02d: triggering a new deployment with config change", i) out, err := oc.Run("set", "env").Args("dc/deployment-simple", fmt.Sprintf("A=%d", i)).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(out).To(o.ContainSubstring("updated")) default: // wait for the deployment to be running e2e.Logf("%02d: waiting for current deployment to start running", i) o.Expect(waitForLatestCondition(oc, "deployment-simple", deploymentRunTimeout, deploymentRunning)).NotTo(o.HaveOccurred()) } } // trigger one more deployment, just in case we cancelled the latest output out, err := oc.Run("set", "env").Args("dc/deployment-simple", fmt.Sprintf("A=%d", iterations)).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(out).To(o.ContainSubstring("updated")) g.By("verifying all but terminal deployment is marked complete") o.Expect(waitForLatestCondition(oc, "deployment-simple", deploymentRunTimeout, deploymentReachedCompletion)).NotTo(o.HaveOccurred()) }) g.It("should immediately start a new deployment [Conformance]", func() { resource, name, err := createFixture(oc, simpleDeploymentFixture) o.Expect(err).NotTo(o.HaveOccurred()) _, err = oc.Run("set", "env").Args(resource, "TRY=ONCE").Output() o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("by checking that the deployment config has the correct version")) err = wait.PollImmediate(500*time.Millisecond, 5*time.Second, func() (bool, error) { dc, _, _, err := deploymentInfo(oc, name) if err != nil { return false, nil } return dc.Status.LatestVersion == 2, nil }) o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("by checking that the second deployment exists")) err = wait.PollImmediate(500*time.Millisecond, 30*time.Second, func() (bool, error) { _, rcs, _, err := deploymentInfo(oc, name) if err != nil { return false, nil } secondDeploymentExists := false for _, rc := range rcs { if rc.Name == deployutil.DeploymentNameForConfigVersion(name, 2) { secondDeploymentExists = true break } } return secondDeploymentExists, nil }) o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("by checking that the first deployer was deleted and the second deployer exists")) err = wait.PollImmediate(500*time.Millisecond, 10*time.Second, func() (bool, error) { _, _, pods, err := deploymentInfo(oc, name) if err != nil { return false, nil } deploymentNamesToDeployers, err := deploymentPods(pods) if err != nil { return false, nil } firstDeploymentName := deployutil.DeploymentNameForConfigVersion(name, 1) firstDeployerRemoved := len(deploymentNamesToDeployers[firstDeploymentName]) == 0 secondDeploymentName := deployutil.DeploymentNameForConfigVersion(name, 2) secondDeployerExists := len(deploymentNamesToDeployers[secondDeploymentName]) == 1 return firstDeployerRemoved && secondDeployerExists, nil }) o.Expect(err).NotTo(o.HaveOccurred()) }) }) g.Describe("with test deployments", func() { g.AfterEach(func() { failureTrap(oc, "deployment-test", g.CurrentGinkgoTestDescription().Failed) }) g.It("should run a deployment to completion and then scale to zero [Conformance]", func() { out, err := oc.Run("create").Args("-f", deploymentFixture).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(waitForLatestCondition(oc, "deployment-test", deploymentRunTimeout, deploymentRunning)).NotTo(o.HaveOccurred()) out, err = oc.Run("logs").Args("-f", "dc/deployment-test").Output() o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("checking the logs for substrings\n%s", out)) o.Expect(out).To(o.ContainSubstring("deployment-test-1 to 2")) o.Expect(out).To(o.ContainSubstring("--> pre: Success")) o.Expect(out).To(o.ContainSubstring("--> Success")) g.By("verifying the deployment is marked complete and scaled to zero") o.Expect(waitForLatestCondition(oc, "deployment-test", deploymentRunTimeout, deploymentReachedCompletion)).NotTo(o.HaveOccurred()) g.By("verifying that scaling does not result in new pods") out, err = oc.Run("scale").Args("dc/deployment-test", "--replicas=1").Output() o.Expect(err).NotTo(o.HaveOccurred()) g.By("ensuring no scale up of the deployment happens") wait.Poll(100*time.Millisecond, 10*time.Second, func() (bool, error) { rc, err := oc.KubeREST().ReplicationControllers(oc.Namespace()).Get("deployment-test-1") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(rc.Spec.Replicas).Should(o.BeEquivalentTo(0)) o.Expect(rc.Status.Replicas).Should(o.BeEquivalentTo(0)) return false, nil }) g.By("verifying the scale is updated on the deployment config") config, err := oc.REST().DeploymentConfigs(oc.Namespace()).Get("deployment-test") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(config.Spec.Replicas).Should(o.BeEquivalentTo(1)) o.Expect(config.Spec.Test).Should(o.BeTrue()) g.By("deploying a few more times") for i := 0; i < 3; i++ { out, err = oc.Run("deploy").Args("--latest", "--follow", "deployment-test").Output() o.Expect(err).NotTo(o.HaveOccurred()) g.By("verifying the deployment is marked complete and scaled to zero") o.Expect(waitForLatestCondition(oc, "deployment-test", deploymentRunTimeout, deploymentReachedCompletion)).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("checking the logs for substrings\n%s", out)) o.Expect(out).To(o.ContainSubstring(fmt.Sprintf("deployment-test-%d up to 1", i+2))) o.Expect(out).To(o.ContainSubstring("--> pre: Success")) o.Expect(out).To(o.ContainSubstring("test pre hook executed")) o.Expect(out).To(o.ContainSubstring("--> Success")) } }) }) g.Describe("when tagging images", func() { g.AfterEach(func() { failureTrap(oc, "tag-images", g.CurrentGinkgoTestDescription().Failed) }) g.It("should successfully tag the deployed image [Conformance]", func() { _, name, err := createFixture(oc, tagImagesFixture) o.Expect(err).NotTo(o.HaveOccurred()) g.By("verifying the deployment is marked complete") o.Expect(waitForLatestCondition(oc, name, deploymentRunTimeout, deploymentReachedCompletion)).NotTo(o.HaveOccurred()) g.By("verifying the post deployment action happened: tag is set") var out string pollErr := wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (bool, error) { out, err = oc.Run("get").Args("istag/sample-stream:deployed").Output() if errors.IsNotFound(err) { return false, nil } if err != nil { return false, err } return true, nil }) if pollErr == wait.ErrWaitTimeout { pollErr = err } o.Expect(pollErr).NotTo(o.HaveOccurred()) if !strings.Contains(out, "origin-pod") { err = fmt.Errorf("expected %q to be part of the image reference in %q", "origin-pod", out) o.Expect(err).NotTo(o.HaveOccurred()) } }) }) g.Describe("with multiple image change triggers", func() { g.AfterEach(func() { failureTrap(oc, "example", g.CurrentGinkgoTestDescription().Failed) }) g.It("should run a successful deployment [Conformance]", func() { _, name, err := createFixture(oc, multipleICTFixture) o.Expect(err).NotTo(o.HaveOccurred()) g.By("verifying the deployment is marked complete") o.Expect(waitForLatestCondition(oc, name, deploymentRunTimeout, deploymentReachedCompletion)).NotTo(o.HaveOccurred()) }) }) g.Describe("with enhanced status", func() { g.AfterEach(func() { failureTrap(oc, "deployment-simple", g.CurrentGinkgoTestDescription().Failed) }) g.It("should include various info in status [Conformance]", func() { resource, name, err := createFixture(oc, simpleDeploymentFixture) o.Expect(err).NotTo(o.HaveOccurred()) g.By("verifying the deployment is marked complete") o.Expect(waitForLatestCondition(oc, name, deploymentRunTimeout, deploymentReachedCompletion)).NotTo(o.HaveOccurred()) g.By("verifying that status.replicas is set") replicas, err := oc.Run("get").Args(resource, "--output=jsonpath=\"{.status.replicas}\"").Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(replicas).To(o.ContainSubstring("2")) g.By("verifying that status.updatedReplicas is set") updatedReplicas, err := oc.Run("get").Args(resource, "--output=jsonpath=\"{.status.updatedReplicas}\"").Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(updatedReplicas).To(o.ContainSubstring("2")) g.By("verifying that status.availableReplicas is set") availableReplicas, err := oc.Run("get").Args(resource, "--output=jsonpath=\"{.status.availableReplicas}\"").Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(availableReplicas).To(o.ContainSubstring("2")) g.By("verifying that status.unavailableReplicas is set") unavailableReplicas, err := oc.Run("get").Args(resource, "--output=jsonpath=\"{.status.unavailableReplicas}\"").Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(unavailableReplicas).To(o.ContainSubstring("0")) }) }) g.Describe("with custom deployments", func() { g.AfterEach(func() { failureTrap(oc, "custom-deployment", g.CurrentGinkgoTestDescription().Failed) }) g.It("should run the custom deployment steps [Conformance]", func() { out, err := oc.Run("create").Args("-f", customDeploymentFixture).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(waitForLatestCondition(oc, "custom-deployment", deploymentRunTimeout, deploymentRunning)).NotTo(o.HaveOccurred()) out, err = oc.Run("deploy").Args("--follow", "dc/custom-deployment").Output() o.Expect(err).NotTo(o.HaveOccurred()) g.By("verifying the deployment is marked complete") o.Expect(waitForLatestCondition(oc, "custom-deployment", deploymentRunTimeout, deploymentReachedCompletion)).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("checking the logs for substrings\n%s", out)) o.Expect(out).To(o.ContainSubstring("--> pre: Running hook pod ...")) o.Expect(out).To(o.ContainSubstring("test pre hook executed")) o.Expect(out).To(o.ContainSubstring("--> Scaling custom-deployment-1 to 2")) o.Expect(out).To(o.ContainSubstring("--> Reached 50%")) o.Expect(out).To(o.ContainSubstring("Halfway")) o.Expect(out).To(o.ContainSubstring("Finished")) o.Expect(out).To(o.ContainSubstring("--> Success")) }) }) g.Describe("viewing rollout history", func() { g.AfterEach(func() { failureTrap(oc, "deployment-simple", g.CurrentGinkgoTestDescription().Failed) }) g.It("should print the rollout history [Conformance]", func() { resource, name, err := createFixture(oc, simpleDeploymentFixture) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(waitForLatestCondition(oc, name, deploymentRunTimeout, deploymentReachedCompletion)).NotTo(o.HaveOccurred()) _, err = oc.REST().DeploymentConfigs(oc.Namespace()).Get(name) o.Expect(err).NotTo(o.HaveOccurred()) _, err = client.UpdateConfigWithRetries(oc.REST(), oc.Namespace(), name, func(dc *deployapi.DeploymentConfig) { one := int64(1) dc.Spec.Template.Spec.TerminationGracePeriodSeconds = &one }) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(waitForLatestCondition(oc, name, deploymentRunTimeout, deploymentReachedCompletion)).NotTo(o.HaveOccurred()) out, err := oc.Run("rollout").Args("history", resource).Output() o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("checking the history for substrings\n%s", out)) o.Expect(out).To(o.ContainSubstring("deploymentconfigs \"deployment-simple\" history viewed")) o.Expect(out).To(o.ContainSubstring("REVISION STATUS CAUSE")) o.Expect(out).To(o.ContainSubstring("1 Complete caused by a config change")) o.Expect(out).To(o.ContainSubstring("2 Complete caused by a config change")) }) }) g.Describe("generation", func() { g.AfterEach(func() { failureTrap(oc, "generation-test", g.CurrentGinkgoTestDescription().Failed) }) g.It("should deploy based on a status version bump [Conformance]", func() { resource, name, err := createFixture(oc, generationFixture) o.Expect(err).NotTo(o.HaveOccurred()) g.By("verifying that both latestVersion and generation are updated") version, err := oc.Run("get").Args(resource, "--output=jsonpath=\"{.status.latestVersion}\"").Output() version = strings.Trim(version, "\"") o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("checking the latest version for %s: %s", resource, version)) o.Expect(version).To(o.ContainSubstring("1")) generation, err := oc.Run("get").Args(resource, "--output=jsonpath=\"{.metadata.generation}\"").Output() o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("checking the generation for %s: %s", resource, generation)) o.Expect(generation).To(o.ContainSubstring("1")) g.By("verifying the deployment is marked complete") err = wait.Poll(100*time.Millisecond, 1*time.Minute, func() (bool, error) { rc, err := oc.KubeREST().ReplicationControllers(oc.Namespace()).Get(name + "-" + version) o.Expect(err).NotTo(o.HaveOccurred()) return deployutil.IsTerminatedDeployment(rc), nil }) o.Expect(err).NotTo(o.HaveOccurred()) g.By("verifying that scaling updates the generation") _, err = oc.Run("scale").Args(resource, "--replicas=2").Output() o.Expect(err).NotTo(o.HaveOccurred()) generation, err = oc.Run("get").Args(resource, "--output=jsonpath=\"{.metadata.generation}\"").Output() o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("checking the generation for %s: %s", resource, generation)) o.Expect(generation).To(o.ContainSubstring("2")) g.By("deploying a second time [new client]") _, err = oc.Run("deploy").Args("--latest", name).Output() o.Expect(err).NotTo(o.HaveOccurred()) g.By("verifying that both latestVersion and generation are updated") version, err = oc.Run("get").Args(resource, "--output=jsonpath=\"{.status.latestVersion}\"").Output() version = strings.Trim(version, "\"") o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("checking the latest version for %s: %s", resource, version)) o.Expect(version).To(o.ContainSubstring("2")) generation, err = oc.Run("get").Args(resource, "--output=jsonpath=\"{.metadata.generation}\"").Output() o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("checking the generation for %s: %s", resource, generation)) o.Expect(generation).To(o.ContainSubstring("3")) g.By("verifying that observedGeneration equals generation") err = wait.Poll(1*time.Second, 1*time.Minute, func() (bool, error) { dc, _, _, err := deploymentInfo(oc, name) o.Expect(err).NotTo(o.HaveOccurred()) return deployutil.HasSynced(dc), nil }) }) }) g.Describe("paused", func() { g.AfterEach(func() { failureTrap(oc, "paused", g.CurrentGinkgoTestDescription().Failed) }) g.It("should disable actions on deployments [Conformance]", func() { resource, name, err := createFixture(oc, pausedDeploymentFixture) o.Expect(err).NotTo(o.HaveOccurred()) _, rcs, _, err := deploymentInfo(oc, name) o.Expect(err).NotTo(o.HaveOccurred()) if len(rcs) != 0 { o.Expect(fmt.Errorf("expected no deployment, found %#v", rcs[0])).NotTo(o.HaveOccurred()) } g.By("verifying that we cannot start a new deployment") out, err := oc.Run("deploy").Args(resource, "--latest").Output() o.Expect(err).To(o.HaveOccurred()) o.Expect(out).To(o.ContainSubstring("cannot deploy a paused deployment config")) g.By("verifying that we cannot cancel a deployment") out, err = oc.Run("deploy").Args(resource, "--cancel").Output() o.Expect(err).To(o.HaveOccurred()) o.Expect(out).To(o.ContainSubstring("cannot cancel a paused deployment config")) g.By("verifying that we cannot retry a deployment") out, err = oc.Run("deploy").Args(resource, "--retry").Output() o.Expect(err).To(o.HaveOccurred()) o.Expect(out).To(o.ContainSubstring("cannot retry a paused deployment config")) g.By("verifying that we cannot rollback a deployment") out, err = oc.Run("rollback").Args(resource, "--to-version", "1").Output() o.Expect(err).To(o.HaveOccurred()) o.Expect(out).To(o.ContainSubstring("cannot rollback a paused deployment config")) _, rcs, _, err = deploymentInfo(oc, name) o.Expect(err).NotTo(o.HaveOccurred()) if len(rcs) != 0 { o.Expect(fmt.Errorf("expected no deployment, found %#v", rcs[0])).NotTo(o.HaveOccurred()) } _, err = client.UpdateConfigWithRetries(oc.REST(), oc.Namespace(), name, func(dc *deployapi.DeploymentConfig) { // TODO: oc rollout pause should patch instead of making a full update dc.Spec.Paused = false }) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(waitForLatestCondition(oc, name, deploymentRunTimeout, deploymentReachedCompletion)).NotTo(o.HaveOccurred()) }) }) g.Describe("with failing hook", func() { g.AfterEach(func() { failureTrap(oc, "hook", g.CurrentGinkgoTestDescription().Failed) }) g.It("should get all logs from retried hooks [Conformance]", func() { resource, name, err := createFixture(oc, failedHookFixture) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(waitForLatestCondition(oc, name, deploymentRunTimeout, deploymentPreHookRetried)).NotTo(o.HaveOccurred()) out, err := oc.Run("logs").Args(resource).Output() o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("checking the logs for substrings\n%s", out)) o.Expect(out).To(o.ContainSubstring("--> pre: Running hook pod ...")) o.Expect(out).To(o.ContainSubstring("no such file or directory")) o.Expect(out).To(o.ContainSubstring("--> pre: Retrying hook pod (retry #1)")) }) }) g.Describe("rolled back", func() { g.AfterEach(func() { failureTrap(oc, "deployment-simple", g.CurrentGinkgoTestDescription().Failed) }) g.It("should rollback to an older deployment [Conformance]", func() { resource, name, err := createFixture(oc, simpleDeploymentFixture) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(waitForLatestCondition(oc, name, deploymentRunTimeout, deploymentReachedCompletion)).NotTo(o.HaveOccurred()) _, err = oc.Run("deploy").Args(name, "--latest").Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(waitForLatestCondition(oc, name, deploymentRunTimeout, deploymentReachedCompletion)).NotTo(o.HaveOccurred()) g.By("verifying that we are on the second version") version, err := oc.Run("get").Args(resource, "--output=jsonpath=\"{.status.latestVersion}\"").Output() o.Expect(err).NotTo(o.HaveOccurred()) version = strings.Trim(version, "\"") o.Expect(version).To(o.ContainSubstring("2")) g.By("verifying that we can rollback") _, err = oc.Run("rollout").Args("undo", resource).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(waitForLatestCondition(oc, name, deploymentRunTimeout, deploymentReachedCompletion)).NotTo(o.HaveOccurred()) g.By("verifying that we are on the third version") version, err = oc.Run("get").Args(resource, "--output=jsonpath=\"{.status.latestVersion}\"").Output() o.Expect(err).NotTo(o.HaveOccurred()) version = strings.Trim(version, "\"") o.Expect(version).To(o.ContainSubstring("3")) }) }) g.Describe("reaper", func() { g.AfterEach(func() { failureTrap(oc, "brokendeployment", g.CurrentGinkgoTestDescription().Failed) }) g.It("should delete all failed deployer pods and hook pods [Conformance]", func() { resource, name, err := createFixture(oc, brokenDeploymentFixture) o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for the deployment to complete") err = waitForLatestCondition(oc, name, deploymentRunTimeout, deploymentReachedCompletion) o.Expect(err).To(o.HaveOccurred()) g.By("fetching the deployer pod") out, err := oc.Run("get").Args("pod", fmt.Sprintf("%s-1-deploy", name)).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(out).To(o.ContainSubstring("Error")) g.By("fetching the pre-hook pod") out, err = oc.Run("get").Args("pod", fmt.Sprintf("%s-1-hook-pre", name)).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(out).To(o.ContainSubstring("Error")) g.By("deleting the deployment config") out, err = oc.Run("delete").Args(resource).Output() o.Expect(err).NotTo(o.HaveOccurred()) g.By("fetching the deployer pod") out, err = oc.Run("get").Args("pod", fmt.Sprintf("%s-1-deploy", name)).Output() o.Expect(err).To(o.HaveOccurred()) o.Expect(out).To(o.ContainSubstring("not found")) g.By("fetching the pre-hook pod") out, err = oc.Run("get").Args("pod", fmt.Sprintf("%s-1-hook-pre", name)).Output() o.Expect(err).To(o.HaveOccurred()) o.Expect(out).To(o.ContainSubstring("not found")) }) }) g.Describe("with revision history limits", func() { g.AfterEach(func() { failureTrap(oc, "history-limit", g.CurrentGinkgoTestDescription().Failed) }) g.It("should never persist more old deployments than acceptable after being observed by the controller [Conformance]", func() { revisionHistoryLimit := 3 // as specified in the fixture _, err := oc.Run("create").Args("-f", historyLimitedDeploymentFixture).Output() o.Expect(err).NotTo(o.HaveOccurred()) iterations := 10 for i := 0; i < iterations; i++ { o.Expect(waitForLatestCondition(oc, "history-limit", deploymentRunTimeout, deploymentReachedCompletion)).NotTo(o.HaveOccurred(), "the current deployment needs to have finished before attempting to trigger a new deployment through configuration change") e2e.Logf("%02d: triggering a new deployment with config change", i) out, err := oc.Run("set", "env").Args("dc/history-limit", fmt.Sprintf("A=%d", i)).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(out).To(o.ContainSubstring("updated")) } o.Expect(waitForLatestCondition(oc, "history-limit", deploymentRunTimeout, checkDeploymentConfigHasSynced)).NotTo(o.HaveOccurred(), "the controller needs to have synced with the updated deployment configuration before checking that the revision history limits are being adhered to") deploymentConfig, deployments, _, err := deploymentInfo(oc, "history-limit") o.Expect(err).NotTo(o.HaveOccurred()) // sanity check to ensure that the following asertion on the amount of old deployments is valid o.Expect(*deploymentConfig.Spec.RevisionHistoryLimit).To(o.Equal(int32(revisionHistoryLimit))) // we need to filter out any deployments that we don't care about, // namely the active deployment and any newer deployments oldDeployments := deployutil.DeploymentsForCleanup(deploymentConfig, deployments) // we should not have more deployments than acceptable o.Expect(len(oldDeployments)).To(o.BeNumerically("==", revisionHistoryLimit)) // the deployments we continue to keep should be the latest ones for _, deployment := range oldDeployments { o.Expect(deployutil.DeploymentVersionFor(&deployment)).To(o.BeNumerically(">=", iterations-revisionHistoryLimit)) } }) }) g.Describe("with minimum ready seconds set", func() { g.AfterEach(func() { failureTrap(oc, "minreadytest", g.CurrentGinkgoTestDescription().Failed) }) g.It("should not transition the deployment to Complete before satisfied [Conformance]", func() { _, name, err := createFixture(oc, minReadySecondsFixture) o.Expect(err).NotTo(o.HaveOccurred()) g.By("verifying the deployment is marked running") o.Expect(waitForLatestCondition(oc, name, deploymentRunTimeout, deploymentRunning)).NotTo(o.HaveOccurred()) g.By("verifying that all pods are ready") config, err := oc.REST().DeploymentConfigs(oc.Namespace()).Get(name) o.Expect(err).NotTo(o.HaveOccurred()) selector := labels.Set(config.Spec.Selector).AsSelector() opts := kapi.ListOptions{LabelSelector: selector} ready := 0 if err := wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) { pods, err := oc.KubeREST().Pods(oc.Namespace()).List(opts) if err != nil { return false, nil } ready = 0 for i := range pods.Items { pod := pods.Items[i] if kapi.IsPodReady(&pod) { ready++ } } return len(pods.Items) == ready, nil }); err != nil { o.Expect(fmt.Errorf("deployment config %q never became ready (ready: %d, desired: %d)", config.Name, ready, config.Spec.Replicas)).NotTo(o.HaveOccurred()) } g.By("verifying that the deployment is still running") latestName := deployutil.DeploymentNameForConfigVersion(name, config.Status.LatestVersion) latest, err := oc.KubeREST().ReplicationControllers(oc.Namespace()).Get(latestName) o.Expect(err).NotTo(o.HaveOccurred()) if deployutil.IsTerminatedDeployment(latest) { o.Expect(fmt.Errorf("expected deployment %q not to have terminated", latest.Name)).NotTo(o.HaveOccurred()) } o.Expect(waitForLatestCondition(oc, name, deploymentRunTimeout, deploymentRunning)).NotTo(o.HaveOccurred()) }) }) })
var _ = g.Describe("[builds] build with empty source", func() { defer g.GinkgoRecover() var ( buildFixture = exutil.FixturePath("..", "extended", "testdata", "test-nosrc-build.json") oc = exutil.NewCLI("cli-build-nosrc", exutil.KubeConfigPath()) exampleBuild = exutil.FixturePath("..", "extended", "testdata", "test-build-app") ) g.JustBeforeEach(func() { g.By("waiting for builder service account") err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace())) o.Expect(err).NotTo(o.HaveOccurred()) oc.Run("create").Args("-f", buildFixture).Execute() }) g.Describe("started build", func() { g.It("should build even with an empty source in build config", func() { g.By("starting the build with --wait flag") out, err := oc.Run("start-build").Args("nosrc-build", "--wait", fmt.Sprintf("--from-dir=%s", exampleBuild)).Output() o.Expect(err).NotTo(o.HaveOccurred()) g.By("verifying build success") err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), "nosrc-build-1", exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn) g.By(fmt.Sprintf("verifying the build %q status", out)) build, err := oc.REST().Builds(oc.Namespace()).Get("nosrc-build-1") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(build.Spec.Source.Dockerfile).To(o.BeNil()) o.Expect(build.Spec.Source.Git).To(o.BeNil()) o.Expect(build.Spec.Source.Images).To(o.BeNil()) o.Expect(build.Spec.Source.Binary).NotTo(o.BeNil()) }) }) })
var _ = g.Describe("builds: parallel: oc start-build", func() { defer g.GinkgoRecover() var ( buildFixture = exutil.FixturePath("..", "extended", "fixtures", "test-build.json") oc = exutil.NewCLI("cli-start-build", exutil.KubeConfigPath()) ) g.JustBeforeEach(func() { g.By("waiting for builder service account") err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace())) o.Expect(err).NotTo(o.HaveOccurred()) oc.Run("create").Args("-f", buildFixture).Execute() }) g.Describe("oc start-build --wait", func() { g.It("should start a build and wait for the build to complete", func() { g.By("starting the build with --wait flag") out, err := oc.Run("start-build").Args("sample-build", "--wait").Output() o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("verifying the build %q status", out)) build, err := oc.REST().Builds(oc.Namespace()).Get(out) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(build.Status.Phase).Should(o.BeEquivalentTo(buildapi.BuildPhaseComplete)) }) g.It("should start a build and wait for the build to fail", func() { g.By("starting the build with --wait flag but wrong --commit") out, err := oc.Run("start-build"). Args("sample-build", "--wait", "--commit", "fffffff"). Output() o.Expect(err).To(o.HaveOccurred()) o.Expect(out).Should(o.ContainSubstring(`status is "Failed"`)) }) }) g.Describe("cancelling build started by oc start-build --wait", func() { g.It("should start a build and wait for the build to cancel", func() { g.By("starting the build with --wait flag") var wg sync.WaitGroup wg.Add(1) go func() { defer g.GinkgoRecover() out, err := oc.Run("start-build").Args("sample-build", "--wait").Output() defer wg.Done() o.Expect(err).To(o.HaveOccurred()) o.Expect(out).Should(o.ContainSubstring(`status is "Cancelled"`)) }() g.By("getting the build name") var buildName string wait.Poll(time.Duration(100*time.Millisecond), time.Duration(60*time.Second), func() (bool, error) { out, err := oc.Run("get"). Args("build", "--template", "{{ (index .items 0).metadata.name }}").Output() // Give it second chance in case the build resource was not created yet if err != nil || len(out) == 0 { return false, nil } buildName = out return true, nil }) o.Expect(buildName).ToNot(o.BeEmpty()) g.By(fmt.Sprintf("cancelling the build %q", buildName)) err := oc.Run("cancel-build").Args(buildName).Execute() o.Expect(err).ToNot(o.HaveOccurred()) wg.Wait() }) }) })
var _ = g.Describe("images: s2i: perl", func() { defer g.GinkgoRecover() var ( dancerTemplate = "https://raw.githubusercontent.com/openshift/dancer-ex/master/openshift/templates/dancer-mysql.json" oc = exutil.NewCLI("s2i-perl", exutil.KubeConfigPath()) modifyCommand = []string{"sed", "-ie", `s/data => \$data\[0\]/data => "1337"/`, "lib/default.pm"} pageCountFunc = func(count int) string { return fmt.Sprintf(`<span class="code" id="count-value">%d</span>`, count) } dcName = "dancer-mysql-example-1" dcLabel = exutil.ParseLabelsOrDie(fmt.Sprintf("deployment=%s", dcName)) ) g.Describe("Dancer example", func() { g.It(fmt.Sprintf("should work with hot deploy"), func() { oc.SetOutputDir(exutil.TestContext.OutputDir) g.By(fmt.Sprintf("calling oc new-app -f %q", dancerTemplate)) err := oc.Run("new-app").Args("-f", dancerTemplate).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for build to finish") err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), "dancer-mysql-example-1", exutil.CheckBuildSuccessFunc, exutil.CheckBuildFailedFunc) o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for endpoint") err = oc.KubeFramework().WaitForAnEndpoint("dancer-mysql-example") o.Expect(err).NotTo(o.HaveOccurred()) assertPageCountIs := func(i int) { _, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunningFunc, 1, 120*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) result, err := CheckPageContains(oc, "dancer-mysql-example", "", pageCountFunc(i)) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(result).To(o.BeTrue()) } g.By("checking page count") assertPageCountIs(1) assertPageCountIs(2) g.By("modifying the source code with disabled hot deploy") RunInPodContainer(oc, dcLabel, modifyCommand) assertPageCountIs(3) pods, err := oc.KubeREST().Pods(oc.Namespace()).List(dcLabel, nil) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(len(pods.Items)).To(o.Equal(1)) g.By("turning on hot-deploy") err = oc.Run("env").Args("rc", dcName, "PERL_APACHE2_RELOAD=true").Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = oc.Run("scale").Args("rc", dcName, "--replicas=0").Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), pods.Items[0].Name, 60*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) err = oc.Run("scale").Args("rc", dcName, "--replicas=1").Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("modifying the source code with enabled hot deploy") RunInPodContainer(oc, dcLabel, modifyCommand) assertPageCountIs(1337) }) }) })
var _ = g.Describe("[builds][Slow] the s2i build should support proxies", func() { defer g.GinkgoRecover() var ( buildFixture = exutil.FixturePath("..", "extended", "fixtures", "test-build-proxy.json") oc = exutil.NewCLI("build-proxy", exutil.KubeConfigPath()) ) g.JustBeforeEach(func() { g.By("waiting for builder service account") err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace())) o.Expect(err).NotTo(o.HaveOccurred()) oc.Run("create").Args("-f", buildFixture).Execute() }) g.Describe("start build with broken proxy", func() { g.It("should start a build and wait for the build to to fail", func() { g.By("starting the build with --wait and --follow flags") out, err := oc.Run("start-build").Args("sample-build", "--follow", "--wait").Output() if err != nil { fmt.Fprintln(g.GinkgoWriter, out) } o.Expect(err).To(o.HaveOccurred()) g.By("verifying the build sample-app-1 output") // The git ls-remote check should exit the build when the remote // repository is not accessible. It should never get to the clone. o.Expect(out).NotTo(o.ContainSubstring("clone")) o.Expect(out).To(o.ContainSubstring(`unable to access 'https://github.com/openshift/ruby-hello-world.git/': Failed connect to 127.0.0.1:3128`)) g.By("verifying the build sample-build-1 status") build, err := oc.REST().Builds(oc.Namespace()).Get("sample-build-1") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(build.Status.Phase).Should(o.BeEquivalentTo(buildapi.BuildPhaseFailed)) }) }) })
var _ = g.Describe("images: s2i: python", func() { defer g.GinkgoRecover() var ( oc = exutil.NewCLI("s2i-python", exutil.KubeConfigPath()) djangoRepository = "https://github.com/openshift/django-ex.git" modifyCommand = []string{"sed", "-ie", `s/'count': PageView.objects.count()/'count': 1337/`, "welcome/views.py"} pageCountFn = func(count int) string { return fmt.Sprintf("Page views: %d", count) } dcName = "django-ex-1" dcLabel = exutil.ParseLabelsOrDie(fmt.Sprintf("deployment=%s", dcName)) ) g.Describe("Django example", func() { g.It(fmt.Sprintf("should work with hot deploy"), func() { oc.SetOutputDir(exutil.TestContext.OutputDir) g.By(fmt.Sprintf("calling oc new-app %s", djangoRepository)) err := oc.Run("new-app").Args(djangoRepository, "--strategy=source").Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for build to finish") err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), "django-ex-1", exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn) o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for endpoint") err = oc.KubeFramework().WaitForAnEndpoint("django-ex") o.Expect(err).NotTo(o.HaveOccurred()) assertPageCountIs := func(i int) { _, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunningFn, 1, 120*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) result, err := CheckPageContains(oc, "django-ex", "", pageCountFn(i)) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(result).To(o.BeTrue()) } g.By("checking page count") assertPageCountIs(1) assertPageCountIs(2) g.By("modifying the source code with disabled hot deploy") RunInPodContainer(oc, dcLabel, modifyCommand) assertPageCountIs(3) pods, err := oc.KubeREST().Pods(oc.Namespace()).List(dcLabel, nil) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(len(pods.Items)).To(o.Equal(1)) g.By("turning on hot-deploy") err = oc.Run("env").Args("rc", dcName, "APP_CONFIG=conf/reload.py").Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = oc.Run("scale").Args("rc", dcName, "--replicas=0").Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), pods.Items[0].Name, 60*time.Second) o.Expect(err).NotTo(o.HaveOccurred()) err = oc.Run("scale").Args("rc", dcName, "--replicas=1").Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("modifying the source code with enabled hot deploy") RunInPodContainer(oc, dcLabel, modifyCommand) assertPageCountIs(1337) }) }) })
var _ = g.Describe("[image_ecosystem][mongodb][Slow] openshift mongodb replication (with petset)", func() { defer g.GinkgoRecover() const templatePath = "https://raw.githubusercontent.com/sclorg/mongodb-container/master/examples/petset/mongodb-petset-persistent.yaml" oc := exutil.NewCLI("mongodb-petset-replica", exutil.KubeConfigPath()).Verbose() g.Describe("creating from a template", func() { g.It(fmt.Sprintf("should process and create the %q template", templatePath), func() { oc.SetOutputDir(exutil.TestContext.OutputDir) g.By("creating persistent volumes") _, err := exutil.SetupHostPathVolumes( oc.AdminKubeREST().PersistentVolumes(), oc.Namespace(), "256Mi", 3, ) o.Expect(err).NotTo(o.HaveOccurred()) defer func() { // We're removing only PVs because all other things will be removed // together with namespace. err := exutil.CleanupHostPathVolumes(oc.AdminKubeREST().PersistentVolumes(), oc.Namespace()) if err != nil { fmt.Fprintf(g.GinkgoWriter, "WARNING: couldn't cleanup persistent volumes: %v", err) } }() g.By("creating a new app") o.Expect( oc.Run("new-app").Args( "-f", templatePath, "-p", "VOLUME_CAPACITY=256Mi", "-p", "MEMORY_LIMIT=512Mi", "-p", "MONGODB_IMAGE=centos/mongodb-32-centos7", "-p", "MONGODB_SERVICE_NAME=mongodb-replicaset", ).Execute(), ).Should(o.Succeed()) g.By("waiting for pods to running") podNames, err := exutil.WaitForPods( oc.KubeREST().Pods(oc.Namespace()), exutil.ParseLabelsOrDie("name=mongodb-replicaset"), exutil.CheckPodIsRunningFn, 3, 2*time.Minute, ) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(podNames).Should(o.HaveLen(3)) g.By("expecting that we can insert a new record on primary node") mongo := dbutil.NewMongoDB(podNames[0]) replicaSet := mongo.(exutil.ReplicaSet) _, err = replicaSet.QueryPrimary(oc, `db.test.save({ "status" : "passed" })`) o.Expect(err).ShouldNot(o.HaveOccurred()) g.By("expecting that we can read a record from all members") for _, podName := range podNames { o.Expect(readRecordFromPod(oc, podName)).To(o.Succeed()) } g.By("restarting replica set") err = oc.Run("delete").Args("pods", "--all", "-n", oc.Namespace()).Execute() o.Expect(err).ShouldNot(o.HaveOccurred()) g.By("waiting for restarting of the pods") podNames, err = exutil.WaitForPods( oc.KubeREST().Pods(oc.Namespace()), exutil.ParseLabelsOrDie("name=mongodb-replicaset"), exutil.CheckPodIsRunningFn, 3, 2*time.Minute, ) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(podNames).Should(o.HaveLen(3)) g.By("expecting that we can read a record from all members after its restart") for _, podName := range podNames { o.Expect(readRecordFromPod(oc, podName)).To(o.Succeed()) } }) }) })
var _ = g.Describe("[job] openshift can execute jobs", func() { defer g.GinkgoRecover() var ( configPath = exeutil.FixturePath("fixtures", "job-controller.yaml") oc = exeutil.NewCLI("job-controller", exeutil.KubeConfigPath()) ) g.Describe("controller", func() { g.It("should create and run a job in user project", func() { oc.SetOutputDir(exeutil.TestContext.OutputDir) g.By(fmt.Sprintf("creating a job from %q", configPath)) err := oc.Run("create").Args("-f", configPath).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("Waiting for pod...")) podNames, err := exeutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), exeutil.ParseLabelsOrDie("app=pi"), exeutil.CheckPodIsSucceededFn, 1, 2*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(len(podNames)).Should(o.Equal(1)) podName := podNames[0] g.By("retrieving logs from pod " + podName) logs, err := oc.Run("logs").Args(podName).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(logs).Should(o.Equal("3.141592653589793238462643383279502884197169399375105820974944592307816406286208998628034825342117068")) g.By("checking job status") jobs, err := oc.KubeREST().Jobs(oc.Namespace()).List(kapi.ListOptions{LabelSelector: exeutil.ParseLabelsOrDie("app=pi")}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(len(jobs.Items)).Should(o.Equal(1)) job := jobs.Items[0] o.Expect(len(job.Status.Conditions)).Should(o.Equal(1)) o.Expect(job.Status.Conditions[0].Type).Should(o.Equal(kapiextensions.JobComplete)) }) }) })
var _ = g.Describe("[builds][Slow] result image should have proper labels set", func() { defer g.GinkgoRecover() var ( imageStreamFixture = exutil.FixturePath("..", "integration", "testdata", "test-image-stream.json") stiBuildFixture = exutil.FixturePath("testdata", "test-s2i-build.json") dockerBuildFixture = exutil.FixturePath("testdata", "test-docker-build.json") oc = exutil.NewCLI("build-sti-labels", exutil.KubeConfigPath()) ) g.JustBeforeEach(func() { g.By("waiting for builder service account") err := exutil.WaitForBuilderAccount(oc.AdminKubeREST().ServiceAccounts(oc.Namespace())) o.Expect(err).NotTo(o.HaveOccurred()) }) g.Describe("S2I build from a template", func() { g.It(fmt.Sprintf("should create a image from %q template with proper Docker labels", stiBuildFixture), func() { oc.SetOutputDir(exutil.TestContext.OutputDir) g.By(fmt.Sprintf("calling oc create -f %q", imageStreamFixture)) err := oc.Run("create").Args("-f", imageStreamFixture).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("calling oc create -f %q", stiBuildFixture)) err = oc.Run("create").Args("-f", stiBuildFixture).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("starting a test build") br, err := exutil.StartBuildAndWait(oc, "test") br.AssertSuccess() g.By("getting the Docker image reference from ImageStream") imageRef, err := exutil.GetDockerImageReference(oc.REST().ImageStreams(oc.Namespace()), "test", "latest") o.Expect(err).NotTo(o.HaveOccurred()) imageLabels, err := eximages.GetImageLabels(oc.REST().ImageStreamImages(oc.Namespace()), "test", imageRef) o.Expect(err).NotTo(o.HaveOccurred()) g.By("inspecting the new image for proper Docker labels") err = ExpectOpenShiftLabels(imageLabels) o.Expect(err).NotTo(o.HaveOccurred()) }) }) g.Describe("Docker build from a template", func() { g.It(fmt.Sprintf("should create a image from %q template with proper Docker labels", dockerBuildFixture), func() { oc.SetOutputDir(exutil.TestContext.OutputDir) g.By(fmt.Sprintf("calling oc create -f %q", imageStreamFixture)) err := oc.Run("create").Args("-f", imageStreamFixture).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("calling oc create -f %q", dockerBuildFixture)) err = oc.Run("create").Args("-f", dockerBuildFixture).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("starting a test build") br, err := exutil.StartBuildAndWait(oc, "test") br.AssertSuccess() g.By("getting the Docker image reference from ImageStream") imageRef, err := exutil.GetDockerImageReference(oc.REST().ImageStreams(oc.Namespace()), "test", "latest") o.Expect(err).NotTo(o.HaveOccurred()) imageLabels, err := eximages.GetImageLabels(oc.REST().ImageStreamImages(oc.Namespace()), "test", imageRef) o.Expect(err).NotTo(o.HaveOccurred()) g.By("inspecting the new image for proper Docker labels") err = ExpectOpenShiftLabels(imageLabels) o.Expect(err).NotTo(o.HaveOccurred()) }) }) })
var _ = g.Describe("[builds][Slow] incremental s2i build", func() { defer g.GinkgoRecover() const ( buildTestPod = "build-test-pod" buildTestService = "build-test-svc" ) var ( templateFixture = exutil.FixturePath("testdata", "incremental-auth-build.json") podAndServiceFixture = exutil.FixturePath("testdata", "test-build-podsvc.json") oc = exutil.NewCLI("build-sti-inc", exutil.KubeConfigPath()) ) g.JustBeforeEach(func() { g.By("waiting for builder service account") err := exutil.WaitForBuilderAccount(oc.AdminKubeREST().ServiceAccounts(oc.Namespace())) o.Expect(err).NotTo(o.HaveOccurred()) }) g.Describe("Building from a template", func() { g.It(fmt.Sprintf("should create a build from %q template and run it", templateFixture), func() { oc.SetOutputDir(exutil.TestContext.OutputDir) g.By(fmt.Sprintf("calling oc new-app -f %q", templateFixture)) err := oc.Run("new-app").Args("-f", templateFixture).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("starting a test build") out, err := oc.Run("start-build").Args("initial-build").Output() fmt.Fprintf(g.GinkgoWriter, "\ninitial-build start-build output:\n%s\n", out) o.Expect(err).NotTo(o.HaveOccurred()) g.By("expecting the build is in Complete phase") err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), "initial-build-1", exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn) if err != nil { exutil.DumpBuildLogs("initial-build", oc) } o.Expect(err).NotTo(o.HaveOccurred()) g.By("starting a test build using the image produced by the last build") out, err = oc.Run("start-build").Args("internal-build").Output() fmt.Fprintf(g.GinkgoWriter, "\ninternal-build start-build output:\n%s\n", out) o.Expect(err).NotTo(o.HaveOccurred()) g.By("expecting the build is in Complete phase") err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), "internal-build-1", exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn) if err != nil { exutil.DumpBuildLogs("internal-build", oc) } o.Expect(err).NotTo(o.HaveOccurred()) g.By("getting the Docker image reference from ImageStream") imageName, err := exutil.GetDockerImageReference(oc.REST().ImageStreams(oc.Namespace()), "internal-image", "latest") o.Expect(err).NotTo(o.HaveOccurred()) g.By("instantiating a pod and service with the new image") err = oc.Run("new-app").Args("-f", podAndServiceFixture, "-p", "IMAGE_NAME="+imageName).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for the service to become available") err = oc.KubeFramework().WaitForAnEndpoint(buildTestService) o.Expect(err).NotTo(o.HaveOccurred()) g.By("expecting the pod container has saved artifacts") out, err = oc.Run("exec").Args("-p", buildTestPod, "--", "curl", "http://0.0.0.0:8080").Output() if err != nil { logs, _ := oc.Run("logs").Args(buildTestPod).Output() e2e.Failf("Failed to curl in application container: \n%q, pod logs: \n%q", out, logs) } o.Expect(err).NotTo(o.HaveOccurred()) if !strings.Contains(out, "artifacts exist") { logs, _ := oc.Run("logs").Args(buildTestPod).Output() e2e.Failf("Pod %q does not contain expected artifacts: %q\n%q", buildTestPod, out, logs) } }) }) })
var _ = g.Describe("[builds][quota][Slow] docker build with a quota", func() { defer g.GinkgoRecover() const ( buildTestPod = "build-test-pod" buildTestService = "build-test-svc" ) var ( buildFixture = exutil.FixturePath("testdata", "test-docker-build-quota.json") oc = exutil.NewCLI("docker-build-quota", exutil.KubeConfigPath()) ) g.JustBeforeEach(func() { g.By("waiting for builder service account") err := exutil.WaitForBuilderAccount(oc.AdminKubeREST().ServiceAccounts(oc.Namespace())) o.Expect(err).NotTo(o.HaveOccurred()) }) g.Describe("Building from a template", func() { g.It("should create a docker build with a quota and run it", func() { oc.SetOutputDir(exutil.TestContext.OutputDir) g.By(fmt.Sprintf("calling oc create -f %q", buildFixture)) err := oc.Run("create").Args("-f", buildFixture).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("starting a test build") _, err = oc.Run("start-build").Args("docker-build-quota", "--from-dir", exutil.FixturePath("testdata", "build-quota")).Output() o.Expect(err).NotTo(o.HaveOccurred()) g.By("expecting the build is in Failed phase") // note that success and fail functions are intentionally reversed because we want to wait for failure. err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), "docker-build-quota-1", exutil.CheckBuildFailedFn, exutil.CheckBuildSuccessFn) if err != nil { exutil.DumpBuildLogs("docker-build-quota", oc) } o.Expect(err).NotTo(o.HaveOccurred()) g.By("expecting the build logs to contain the correct cgroups values") out, err := oc.Run("logs").Args(fmt.Sprintf("build/docker-build-quota-1")).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(out).To(o.ContainSubstring("MEMORY=209715200")) o.Expect(out).To(o.ContainSubstring("MEMORYSWAP=209715200")) o.Expect(out).To(o.ContainSubstring("SHARES=61")) o.Expect(out).To(o.ContainSubstring("PERIOD=100000")) o.Expect(out).To(o.ContainSubstring("QUOTA=6000")) }) }) })
var _ = g.Describe("[images][php][Slow] hot deploy for openshift php image", func() { defer g.GinkgoRecover() var ( cakephpTemplate = "https://raw.githubusercontent.com/openshift/cakephp-ex/master/openshift/templates/cakephp-mysql.json" oc = exutil.NewCLI("s2i-php", exutil.KubeConfigPath()) hotDeployParam = "OPCACHE_REVALIDATE_FREQ=0" modifyCommand = []string{"sed", "-ie", `s/\$result\['c'\]/1337/`, "app/View/Layouts/default.ctp"} pageCountFn = func(count int) string { return fmt.Sprintf(`<span class="code" id="count-value">%d</span>`, count) } dcName = "cakephp-mysql-example-1" dcLabel = exutil.ParseLabelsOrDie(fmt.Sprintf("deployment=%s", dcName)) ) g.Describe("CakePHP example", func() { g.It(fmt.Sprintf("should work with hot deploy"), func() { oc.SetOutputDir(exutil.TestContext.OutputDir) g.By(fmt.Sprintf("calling oc new-app -f %q -p %q", cakephpTemplate, hotDeployParam)) err := oc.Run("new-app").Args("-f", cakephpTemplate, "-p", hotDeployParam).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for build to finish") err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), dcName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn) o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for endpoint") err = oc.KubeFramework().WaitForAnEndpoint("cakephp-mysql-example") o.Expect(err).NotTo(o.HaveOccurred()) assertPageCountIs := func(i int) { _, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunningFn, 1, 2*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) result, err := CheckPageContains(oc, "cakephp-mysql-example", "", pageCountFn(i)) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(result).To(o.BeTrue()) } g.By("checking page count") assertPageCountIs(1) assertPageCountIs(2) g.By("modifying the source code with disabled hot deploy") RunInPodContainer(oc, dcLabel, modifyCommand) g.By("checking page count after modifying the source code") assertPageCountIs(1337) }) }) })
var _ = g.Describe("[imageapis] openshift limit range admission", func() { defer g.GinkgoRecover() var oc = exutil.NewCLI("limitrange-admission", exutil.KubeConfigPath()) g.JustBeforeEach(func() { g.By("Waiting for builder service account") err := exutil.WaitForBuilderAccount(oc.KubeClient().Core().ServiceAccounts(oc.Namespace())) o.Expect(err).NotTo(o.HaveOccurred()) }) // needs to be run at the of of each It; cannot be run in AfterEach which is run after the project // is destroyed tearDown := func(oc *exutil.CLI) { g.By(fmt.Sprintf("Deleting limit range %s", limitRangeName)) oc.AdminKubeClient().Core().LimitRanges(oc.Namespace()).Delete(limitRangeName, nil) deleteTestImagesAndStreams(oc) } g.It(fmt.Sprintf("should deny a push of built image exceeding %s limit", imageapi.LimitTypeImage), func() { oc.SetOutputDir(exutil.TestContext.OutputDir) defer tearDown(oc) dClient, err := testutil.NewDockerClient() o.Expect(err).NotTo(o.HaveOccurred()) _, err = createLimitRangeOfType(oc, imageapi.LimitTypeImage, kapi.ResourceList{ kapi.ResourceStorage: resource.MustParse("10Ki"), }) o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("trying to push an image exceeding size limit with just 1 layer")) err = imagesutil.BuildAndPushImageOfSizeWithBuilder(oc, dClient, oc.Namespace(), "sized", "middle", 16000, 1, false) o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("trying to push an image exceeding size limit in total")) err = imagesutil.BuildAndPushImageOfSizeWithBuilder(oc, dClient, oc.Namespace(), "sized", "middle", 16000, 5, false) o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("trying to push an image with one big layer below size limit")) err = imagesutil.BuildAndPushImageOfSizeWithBuilder(oc, dClient, oc.Namespace(), "sized", "small", 8000, 1, true) o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("trying to push an image below size limit")) err = imagesutil.BuildAndPushImageOfSizeWithBuilder(oc, dClient, oc.Namespace(), "sized", "small", 8000, 2, true) o.Expect(err).NotTo(o.HaveOccurred()) }) g.It(fmt.Sprintf("should deny a push of built image exceeding limit on %s resource", imageapi.ResourceImageStreamImages), func() { oc.SetOutputDir(exutil.TestContext.OutputDir) defer tearDown(oc) limits := kapi.ResourceList{ imageapi.ResourceImageStreamTags: resource.MustParse("0"), imageapi.ResourceImageStreamImages: resource.MustParse("0"), } _, err := createLimitRangeOfType(oc, imageapi.LimitTypeImageStream, limits) o.Expect(err).NotTo(o.HaveOccurred()) dClient, err := testutil.NewDockerClient() o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("trying to push image exceeding limits %v", limits)) err = imagesutil.BuildAndPushImageOfSizeWithBuilder(oc, dClient, oc.Namespace(), "sized", "refused", imageSize, 1, false) o.Expect(err).NotTo(o.HaveOccurred()) limits, err = bumpLimit(oc, imageapi.ResourceImageStreamImages, "1") o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("trying to push image below limits %v", limits)) err = imagesutil.BuildAndPushImageOfSizeWithBuilder(oc, dClient, oc.Namespace(), "sized", "first", imageSize, 2, true) o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("trying to push image exceeding limits %v", limits)) err = imagesutil.BuildAndPushImageOfSizeWithBuilder(oc, dClient, oc.Namespace(), "sized", "second", imageSize, 2, false) o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("trying to push image below limits %v to another image stream", limits)) err = imagesutil.BuildAndPushImageOfSizeWithBuilder(oc, dClient, oc.Namespace(), "another", "second", imageSize, 1, true) o.Expect(err).NotTo(o.HaveOccurred()) limits, err = bumpLimit(oc, imageapi.ResourceImageStreamImages, "2") o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("trying to push image below limits %v", limits)) err = imagesutil.BuildAndPushImageOfSizeWithBuilder(oc, dClient, oc.Namespace(), "another", "third", imageSize, 1, true) o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("trying to push image exceeding limits %v", limits)) err = imagesutil.BuildAndPushImageOfSizeWithBuilder(oc, dClient, oc.Namespace(), "another", "fourth", imageSize, 1, false) o.Expect(err).NotTo(o.HaveOccurred()) g.By(`removing tag "second" from "another" image stream`) err = oc.Client().ImageStreamTags(oc.Namespace()).Delete("another", "second") o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("trying to push image below limits %v", limits)) err = imagesutil.BuildAndPushImageOfSizeWithBuilder(oc, dClient, oc.Namespace(), "another", "replenish", imageSize, 1, true) o.Expect(err).NotTo(o.HaveOccurred()) }) g.It(fmt.Sprintf("should deny a docker image reference exceeding limit on %s resource", imageapi.ResourceImageStreamTags), func() { oc.SetOutputDir(exutil.TestContext.OutputDir) defer tearDown(oc) tag2Image, err := buildAndPushTestImagesTo(oc, "src", "tag", 2) o.Expect(err).NotTo(o.HaveOccurred()) limit := kapi.ResourceList{imageapi.ResourceImageStreamTags: resource.MustParse("0")} _, err = createLimitRangeOfType(oc, imageapi.LimitTypeImageStream, limit) o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("trying to tag a docker image exceeding limit %v", limit)) out, err := oc.Run("import-image").Args("stream:dockerimage", "--confirm", "--insecure", "--from", tag2Image["tag1"].DockerImageReference).Output() o.Expect(err).To(o.HaveOccurred()) o.Expect(out).Should(o.ContainSubstring("exceeds the maximum limit")) o.Expect(out).Should(o.ContainSubstring(string(imageapi.ResourceImageStreamTags))) limit, err = bumpLimit(oc, imageapi.ResourceImageStreamTags, "1") o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("trying to tag a docker image below limit %v", limit)) err = oc.Run("import-image").Args("stream:dockerimage", "--confirm", "--insecure", "--from", tag2Image["tag1"].DockerImageReference).Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = exutil.WaitForAnImageStreamTag(oc, oc.Namespace(), "stream", "dockerimage") o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("trying to tag a docker image exceeding limit %v", limit)) is, err := oc.Client().ImageStreams(oc.Namespace()).Get("stream") o.Expect(err).NotTo(o.HaveOccurred()) is.Spec.Tags["foo"] = imageapi.TagReference{ Name: "foo", From: &kapi.ObjectReference{ Kind: "DockerImage", Name: tag2Image["tag2"].DockerImageReference, }, ImportPolicy: imageapi.TagImportPolicy{ Insecure: true, }, } _, err = oc.Client().ImageStreams(oc.Namespace()).Update(is) o.Expect(err).To(o.HaveOccurred()) o.Expect(quotautil.IsErrorQuotaExceeded(err)).Should(o.Equal(true)) g.By("re-tagging the image under different tag") is, err = oc.Client().ImageStreams(oc.Namespace()).Get("stream") o.Expect(err).NotTo(o.HaveOccurred()) is.Spec.Tags["duplicate"] = imageapi.TagReference{ Name: "duplicate", From: &kapi.ObjectReference{ Kind: "DockerImage", Name: tag2Image["tag1"].DockerImageReference, }, ImportPolicy: imageapi.TagImportPolicy{ Insecure: true, }, } _, err = oc.Client().ImageStreams(oc.Namespace()).Update(is) o.Expect(err).NotTo(o.HaveOccurred()) }) g.It(fmt.Sprintf("should deny an import of a repository exceeding limit on %s resource", imageapi.ResourceImageStreamTags), func() { oc.SetOutputDir(exutil.TestContext.OutputDir) defer tearDown(oc) maxBulkImport, err := getMaxImagesBulkImportedPerRepository() o.Expect(err).NotTo(o.HaveOccurred()) s1tag2Image, err := buildAndPushTestImagesTo(oc, "src1st", "tag", maxBulkImport+1) s2tag2Image, err := buildAndPushTestImagesTo(oc, "src2nd", "t", 2) o.Expect(err).NotTo(o.HaveOccurred()) limit := kapi.ResourceList{ imageapi.ResourceImageStreamTags: *resource.NewQuantity(int64(maxBulkImport)+1, resource.DecimalSI), imageapi.ResourceImageStreamImages: *resource.NewQuantity(int64(maxBulkImport)+1, resource.DecimalSI), } _, err = createLimitRangeOfType(oc, imageapi.LimitTypeImageStream, limit) o.Expect(err).NotTo(o.HaveOccurred()) s1ref, err := imageapi.ParseDockerImageReference(s1tag2Image["tag1"].DockerImageReference) o.Expect(err).NotTo(o.HaveOccurred()) s1ref.Tag = "" s1ref.ID = "" s2ref, err := imageapi.ParseDockerImageReference(s2tag2Image["t1"].DockerImageReference) o.Expect(err).NotTo(o.HaveOccurred()) s2ref.Tag = "" s2ref.ID = "" g.By(fmt.Sprintf("trying to import from repository %q below quota %v", s1ref.Exact(), limit)) err = oc.Run("import-image").Args("bulkimport", "--confirm", "--insecure", "--all", "--from", s1ref.Exact()).Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = exutil.WaitForAnImageStreamTag(oc, oc.Namespace(), "bulkimport", "tag1") o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("trying to import tags from repository %q exceeding quota %v", s2ref.Exact(), limit)) out, err := oc.Run("import-image").Args("bulkimport", "--confirm", "--insecure", "--all", "--from", s2ref.Exact()).Output() o.Expect(err).To(o.HaveOccurred()) o.Expect(out).Should(o.ContainSubstring("exceeds the maximum limit")) o.Expect(out).Should(o.ContainSubstring(string(imageapi.ResourceImageStreamTags))) o.Expect(out).Should(o.ContainSubstring(string(imageapi.ResourceImageStreamImages))) }) })
var _ = g.Describe("[builds][Slow] starting a build using CLI", func() { defer g.GinkgoRecover() var ( buildFixture = exutil.FixturePath("testdata", "test-build.json") exampleGemfile = exutil.FixturePath("testdata", "test-build-app", "Gemfile") exampleBuild = exutil.FixturePath("testdata", "test-build-app") oc = exutil.NewCLI("cli-start-build", exutil.KubeConfigPath()) ) g.JustBeforeEach(func() { g.By("waiting for builder service account") err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace())) o.Expect(err).NotTo(o.HaveOccurred()) oc.Run("create").Args("-f", buildFixture).Execute() }) g.Describe("oc start-build --wait", func() { g.It("should start a build and wait for the build to complete", func() { g.By("starting the build with --wait flag") br, err := exutil.StartBuildAndWait(oc, "sample-build", "--wait") o.Expect(err).NotTo(o.HaveOccurred()) br.AssertSuccess() }) g.It("should start a build and wait for the build to fail", func() { g.By("starting the build with --wait flag but wrong --commit") br, _ := exutil.StartBuildAndWait(oc, "sample-build", "--wait", "--commit=fffffff") br.AssertFailure() o.Expect(br.StartBuildErr).To(o.HaveOccurred()) // start-build should detect the build error with --wait flag o.Expect(br.StartBuildStdErr).Should(o.ContainSubstring(`status is "Failed"`)) }) }) g.Describe("override environment", func() { g.It("should accept environment variables", func() { g.By("starting the build with -e FOO=bar,VAR=test") br, err := exutil.StartBuildAndWait(oc, "sample-build", "-e", "FOO=bar,VAR=test") br.AssertSuccess() buildLog, err := br.Logs() o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("verifying the build output contains the env vars")) o.Expect(buildLog).To(o.ContainSubstring("FOO=bar")) o.Expect(buildLog).To(o.ContainSubstring("VAR=test")) g.By(fmt.Sprintf("verifying the build output contains inherited env vars")) // This variable is not set and thus inherited from the original build config o.Expect(buildLog).To(o.ContainSubstring("BAR=test")) }) g.It("BUILD_LOGLEVEL in buildconfig should create verbose output", func() { g.By("starting the build with buildconfig strategy env BUILD_LOGLEVEL=5") br, err := exutil.StartBuildAndWait(oc, "sample-verbose-build") br.AssertSuccess() buildLog, err := br.Logs() o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("verifying the build output is verbose")) o.Expect(buildLog).To(o.ContainSubstring("Creating a new S2I builder")) }) g.It("BUILD_LOGLEVEL in buildconfig can be overridden by build-loglevel", func() { g.By("starting the build with buildconfig strategy env BUILD_LOGLEVEL=5 but build-loglevel=1") br, err := exutil.StartBuildAndWait(oc, "sample-verbose-build", "--build-loglevel=1") br.AssertSuccess() buildLog, err := br.Logs() o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("verifying the build output is not verbose")) o.Expect(buildLog).NotTo(o.ContainSubstring("Creating a new S2I builder")) }) }) g.Describe("binary builds", func() { var commit string g.It("should accept --from-file as input", func() { g.By("starting the build with a Dockerfile") br, err := exutil.StartBuildAndWait(oc, "sample-build", fmt.Sprintf("--from-file=%s", exampleGemfile)) br.AssertSuccess() buildLog, err := br.Logs() o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("verifying the build %q status", br.BuildPath)) o.Expect(br.StartBuildStdErr).To(o.ContainSubstring("Uploading file")) o.Expect(br.StartBuildStdErr).To(o.ContainSubstring("as binary input for the build ...")) o.Expect(buildLog).To(o.ContainSubstring("Your bundle is complete")) }) g.It("should accept --from-dir as input", func() { g.By("starting the build with a directory") br, err := exutil.StartBuildAndWait(oc, "sample-build", fmt.Sprintf("--from-dir=%s", exampleBuild)) br.AssertSuccess() buildLog, err := br.Logs() o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("verifying the build %q status", br.BuildPath)) o.Expect(br.StartBuildStdErr).To(o.ContainSubstring("Uploading directory")) o.Expect(br.StartBuildStdErr).To(o.ContainSubstring("as binary input for the build ...")) o.Expect(buildLog).To(o.ContainSubstring("Your bundle is complete")) }) g.It("should accept --from-repo as input", func() { g.By("starting the build with a Git repository") br, err := exutil.StartBuildAndWait(oc, "sample-build", fmt.Sprintf("--from-repo=%s", exampleBuild)) br.AssertSuccess() buildLog, err := br.Logs() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(br.StartBuildStdErr).To(o.ContainSubstring("Uploading")) o.Expect(br.StartBuildStdErr).To(o.ContainSubstring(`at commit "HEAD"`)) o.Expect(br.StartBuildStdErr).To(o.ContainSubstring("as binary input for the build ...")) o.Expect(buildLog).To(o.ContainSubstring("Your bundle is complete")) }) g.It("should accept --from-repo with --commit as input", func() { g.By("starting the build with a Git repository") gitCmd := exec.Command("git", "rev-parse", "HEAD~1") gitCmd.Dir = exampleBuild commitByteArray, err := gitCmd.CombinedOutput() commit = strings.TrimSpace(string(commitByteArray[:])) o.Expect(err).NotTo(o.HaveOccurred()) br, err := exutil.StartBuildAndWait(oc, "sample-build", fmt.Sprintf("--commit=%s", commit), fmt.Sprintf("--from-repo=%s", exampleBuild)) br.AssertSuccess() buildLog, err := br.Logs() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(br.StartBuildStdErr).To(o.ContainSubstring("Uploading")) o.Expect(br.StartBuildStdErr).To(o.ContainSubstring(fmt.Sprintf("at commit \"%s\"", commit))) o.Expect(br.StartBuildStdErr).To(o.ContainSubstring("as binary input for the build ...")) o.Expect(buildLog).To(o.ContainSubstring(fmt.Sprintf("\"commit\":\"%s\"", commit))) o.Expect(buildLog).To(o.ContainSubstring("Your bundle is complete")) }) // run one valid binary build so we can do --from-build later g.It("should reject binary build requests without a --from-xxxx value", func() { g.By("starting a valid build with a directory") br, err := exutil.StartBuildAndWait(oc, "sample-build-binary", "--follow", fmt.Sprintf("--from-dir=%s", exampleBuild)) br.AssertSuccess() buildLog, err := br.Logs() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(br.StartBuildStdErr).To(o.ContainSubstring("Uploading directory")) o.Expect(br.StartBuildStdErr).To(o.ContainSubstring("as binary input for the build ...")) o.Expect(buildLog).To(o.ContainSubstring("Your bundle is complete")) g.By("starting a build without a --from-xxxx value") br, err = exutil.StartBuildAndWait(oc, "sample-build-binary") o.Expect(br.StartBuildErr).To(o.HaveOccurred()) o.Expect(br.StartBuildStdErr).To(o.ContainSubstring("has no valid source inputs")) g.By("starting a build from an existing binary build") br, err = exutil.StartBuildAndWait(oc, "sample-build-binary", fmt.Sprintf("--from-build=%s", "sample-build-binary-1")) o.Expect(br.StartBuildErr).To(o.HaveOccurred()) o.Expect(br.StartBuildStdErr).To(o.ContainSubstring("has no valid source inputs")) }) }) g.Describe("cancelling build started by oc start-build --wait", func() { g.It("should start a build and wait for the build to cancel", func() { g.By("starting the build with --wait flag") var wg sync.WaitGroup wg.Add(1) go func() { defer g.GinkgoRecover() defer wg.Done() _, stderr, err := exutil.StartBuild(oc, "sample-build", "--wait") o.Expect(err).To(o.HaveOccurred()) o.Expect(stderr).Should(o.ContainSubstring(`status is "Cancelled"`)) }() g.By("getting the build name") var buildName string wait.Poll(time.Duration(100*time.Millisecond), 1*time.Minute, func() (bool, error) { out, err := oc.Run("get"). Args("build", "--template", "{{ (index .items 0).metadata.name }}").Output() // Give it second chance in case the build resource was not created yet if err != nil || len(out) == 0 { return false, nil } buildName = out return true, nil }) o.Expect(buildName).ToNot(o.BeEmpty()) g.By(fmt.Sprintf("cancelling the build %q", buildName)) err := oc.Run("cancel-build").Args(buildName).Execute() o.Expect(err).ToNot(o.HaveOccurred()) wg.Wait() }) }) })
var _ = g.Describe("cli: parallel: oc rsync", func() { defer g.GinkgoRecover() var ( oc = exutil.NewCLI("cli-rsync", exutil.KubeConfigPath()) templatePath = exutil.FixturePath("..", "..", "examples", "jenkins", "jenkins-ephemeral-template.json") sourcePath1 = exutil.FixturePath("..", "..", "examples", "image-streams") sourcePath2 = exutil.FixturePath("..", "..", "examples", "sample-app") strategies = []string{"rsync", "rsync-daemon", "tar"} ) var podName string g.JustBeforeEach(func() { oc.SetOutputDir(exutil.TestContext.OutputDir) g.By(fmt.Sprintf("calling oc new-app -f %q", templatePath)) err := oc.Run("new-app").Args("-f", templatePath).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("expecting the jenkins service get endpoints") err = oc.KubeFramework().WaitForAnEndpoint("jenkins") o.Expect(err).NotTo(o.HaveOccurred()) g.By("Getting the jenkins pod name") selector, _ := labels.Parse("name=jenkins") pods, err := oc.KubeREST().Pods(oc.Namespace()).List(selector, fields.Everything()) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(len(pods.Items)).ToNot(o.BeZero()) podName = pods.Items[0].Name }) g.Describe("copy by strategy", func() { testRsyncFn := func(strategy string) func() { return func() { g.By(fmt.Sprintf("Calling oc rsync %s %s:/tmp --strategy=%s", sourcePath1, podName, strategy)) err := oc.Run("rsync").Args( sourcePath1, fmt.Sprintf("%s:/tmp", podName), fmt.Sprintf("--strategy=%s", strategy)).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("Verifying that files are copied to the container") result, err := oc.Run("rsh").Args(podName, "ls", "/tmp/image-streams").Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(result).To(o.ContainSubstring("image-streams-centos7.json")) g.By(fmt.Sprintf("Calling oc rsync %s/ %s:/tmp/image-streams --strategy=%s --delete", sourcePath2, podName, strategy)) err = oc.Run("rsync").Args( sourcePath2+"/", fmt.Sprintf("%s:/tmp/image-streams", podName), fmt.Sprintf("--strategy=%s", strategy), "--delete").Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("Verifying that the expected files are in the container") result, err = oc.Run("rsh").Args(podName, "ls", "/tmp/image-streams").Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(result).To(o.ContainSubstring("application-template-stibuild.json")) o.Expect(result).NotTo(o.ContainSubstring("image-streams-centos7.json")) g.By("Creating a local temporary directory") tempDir, err := ioutil.TempDir("", "rsync") o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("Copying files from container to local directory: oc rsync %s:/tmp/image-streams/ %s --strategy=%s", podName, tempDir, strategy)) err = oc.Run("rsync").Args( fmt.Sprintf("%s:/tmp/image-streams/", podName), tempDir, fmt.Sprintf("--strategy=%s", strategy)).Execute() g.By(fmt.Sprintf("Verifying that files were copied to the local directory")) files, err := ioutil.ReadDir(tempDir) o.Expect(err).NotTo(o.HaveOccurred()) found := false for _, f := range files { if strings.Contains(f.Name(), "application-template-stibuild.json") { found = true break } } o.Expect(found).To(o.BeTrue()) g.By(fmt.Sprintf("Copying files from container to local directory with --delete: oc rsync %s:/tmp/image-streams/ %s --strategy=%s", podName, tempDir, strategy)) originalName := "application-template-stibuild.json" modifiedName := "application-template-stirenamed.json" err = os.Rename(filepath.Join(tempDir, originalName), filepath.Join(tempDir, modifiedName)) o.Expect(err).NotTo(o.HaveOccurred()) err = oc.Run("rsync").Args( fmt.Sprintf("%s:/tmp/image-streams/", podName), tempDir, "--delete", fmt.Sprintf("--strategy=%s", strategy)).Execute() g.By(fmt.Sprintf("Verifying that the expected files are in the local directory")) o.Expect(err).NotTo(o.HaveOccurred()) // After the copy with --delete, the file with 'modifiedName' should have been deleted // and the file with 'originalName' should have been restored. foundOriginal := false foundModified := false files, err = ioutil.ReadDir(tempDir) for _, f := range files { if strings.Contains(f.Name(), originalName) { foundOriginal = true } if strings.Contains(f.Name(), modifiedName) { foundModified = true } } g.By("Verifying original file is in the local directory") o.Expect(foundOriginal).To(o.BeTrue()) g.By("Verifying renamed file is not in the local directory") o.Expect(foundModified).To(o.BeFalse()) g.By("Getting an error if copying to a destination directory where there is no write permission") result, err = oc.Run("rsync").Args( sourcePath1, fmt.Sprintf("%s:/", podName), fmt.Sprintf("--strategy=%s", strategy)).Output() o.Expect(err).To(o.HaveOccurred()) } } for _, strategy := range strategies { g.It(fmt.Sprintf("should copy files with the %s strategy", strategy), testRsyncFn(strategy)) } }) g.Describe("rsync specific flags", func() { g.It("should honor the --exclude flag", func() { g.By(fmt.Sprintf("Calling oc rsync %s %s:/tmp --exclude=image-streams-rhel7.json", sourcePath1, podName)) err := oc.Run("rsync").Args( sourcePath1, fmt.Sprintf("%s:/tmp", podName), "--exclude=image-streams-rhel7.json").Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("Verifying that files are copied to the container") result, err := oc.Run("rsh").Args(podName, "ls", "/tmp/image-streams").Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(result).To(o.ContainSubstring("image-streams-centos7.json")) o.Expect(result).NotTo(o.ContainSubstring("image-streams-rhel7.json")) }) g.It("should honor the --include flag", func() { g.By(fmt.Sprintf("Calling oc rsync %s %s:/tmp --exclude=*.json --include=image-streams-rhel7.json", sourcePath1, podName)) err := oc.Run("rsync").Args( sourcePath1, fmt.Sprintf("%s:/tmp", podName), "--exclude=*.json", "--include=image-streams-rhel7.json").Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("Verifying that files are copied to the container") result, err := oc.Run("rsh").Args(podName, "ls", "/tmp/image-streams").Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(result).To(o.ContainSubstring("image-streams-rhel7.json")) o.Expect(result).NotTo(o.ContainSubstring("image-streams-centos7.json")) }) g.It("should honor the --progress flag", func() { g.By(fmt.Sprintf("Calling oc rsync %s %s:/tmp --progress", sourcePath1, podName)) result, err := oc.Run("rsync").Args( sourcePath1, fmt.Sprintf("%s:/tmp", podName), "--progress").Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(result).To(o.ContainSubstring("100%")) }) g.It("should honor the --no-perms flag", func() { g.By("Creating a temporary destination directory") tempDir, err := ioutil.TempDir("", "rsync") o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("Copying the jenkins directory from the pod to the temp directory: oc rsync %s:/var/lib/jenkins %s", podName, tempDir)) err = oc.Run("rsync").Args( fmt.Sprintf("%s:/var/lib/jenkins", podName), tempDir).Execute() o.Expect(err).NotTo(o.HaveOccurred()) localJenkinsDir := filepath.Join(tempDir, "jenkins") g.By("By changing the permissions on the local jenkins directory") err = os.Chmod(localJenkinsDir, 0700) o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("Copying the local jenkins directory to the pod with no flags: oc rsync %s/ %s:/var/lib/jenkins", localJenkinsDir, podName)) err = oc.Run("rsync").Args( fmt.Sprintf("%s/", localJenkinsDir), fmt.Sprintf("%s:/var/lib/jenkins", podName)).Execute() // An error should occur trying to set permissions on the directory o.Expect(err).To(o.HaveOccurred()) g.By(fmt.Sprintf("Copying the local jenkins directory to the pod with: oc rsync %s/ %s:/var/lib/jenkins --no-perms", localJenkinsDir, podName)) err = oc.Run("rsync").Args( fmt.Sprintf("%s/", localJenkinsDir), fmt.Sprintf("%s:/var/lib/jenkins", podName), "--no-perms").Execute() o.Expect(err).NotTo(o.HaveOccurred()) }) }) })
var _ = g.Describe("[builds][Slow] build can have Dockerfile input", func() { defer g.GinkgoRecover() var ( oc = exutil.NewCLI("build-dockerfile-env", exutil.KubeConfigPath()) testDockerfile = ` FROM openshift/origin-base USER 1001 ` testDockerfile2 = ` FROM centos:7 RUN yum install -y httpd USER 1001 ` ) g.JustBeforeEach(func() { g.By("waiting for builder service account") err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace())) o.Expect(err).NotTo(o.HaveOccurred()) oc.SetOutputDir(exutil.TestContext.OutputDir) }) g.Describe("being created from new-build", func() { g.It("should create a image via new-build", func() { g.By(fmt.Sprintf("calling oc new-build with Dockerfile")) err := oc.Run("new-build").Args("-D", "-").InputString(testDockerfile).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("starting a test build") bc, err := oc.REST().BuildConfigs(oc.Namespace()).Get("origin-base") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(bc.Spec.Source.Git).To(o.BeNil()) o.Expect(bc.Spec.Source.Dockerfile).NotTo(o.BeNil()) o.Expect(*bc.Spec.Source.Dockerfile).To(o.Equal(testDockerfile)) buildName := "origin-base-1" g.By("expecting the Dockerfile build is in Complete phase") err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), buildName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn) o.Expect(err).NotTo(o.HaveOccurred()) g.By("getting the build Docker image reference from ImageStream") image, err := oc.REST().ImageStreamTags(oc.Namespace()).Get("origin-base", "latest") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(image.Image.DockerImageMetadata.Config.User).To(o.Equal("1001")) }) g.It("should create a image via new-build and infer the origin tag", func() { g.By(fmt.Sprintf("calling oc new-build with Dockerfile that uses the same tag as the output")) err := oc.Run("new-build").Args("-D", "-").InputString(testDockerfile2).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("starting a test build") bc, err := oc.REST().BuildConfigs(oc.Namespace()).Get("centos") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(bc.Spec.Source.Git).To(o.BeNil()) o.Expect(bc.Spec.Source.Dockerfile).NotTo(o.BeNil()) o.Expect(*bc.Spec.Source.Dockerfile).To(o.Equal(testDockerfile2)) o.Expect(bc.Spec.Output.To).ToNot(o.BeNil()) o.Expect(bc.Spec.Output.To.Name).To(o.Equal("centos:latest")) buildName := "centos-1" g.By("expecting the Dockerfile build is in Complete phase") err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), buildName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn) o.Expect(err).NotTo(o.HaveOccurred()) g.By("getting the built Docker image reference from ImageStream") image, err := oc.REST().ImageStreamTags(oc.Namespace()).Get("centos", "latest") o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(image.Image.DockerImageMetadata.Config.User).To(o.Equal("1001")) g.By("checking for the imported tag") _, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("centos", "7") o.Expect(err).NotTo(o.HaveOccurred()) }) }) })
var _ = g.Describe("[image_ecosystem][mongodb] openshift mongodb replication", func() { defer g.GinkgoRecover() const ( templatePath = "https://raw.githubusercontent.com/sclorg/mongodb-container/master/2.4/examples/replica/mongodb-clustered.json" deploymentConfigName = "mongodb" expectedValue = `{ "status" : "passed" }` insertCmd = "db.bar.save(" + expectedValue + ")" ) const ( expectedReplicasAfterDeployment = 3 expectedReplicasAfterScalingUp = expectedReplicasAfterDeployment + 2 ) oc := exutil.NewCLI("mongodb-replica", exutil.KubeConfigPath()).Verbose() g.Describe("creating from a template", func() { g.It(fmt.Sprintf("should process and create the %q template", templatePath), func() { exutil.CheckOpenShiftNamespaceImageStreams(oc) g.By("creating a new app") o.Expect(oc.Run("new-app").Args("-f", templatePath).Execute()).Should(o.Succeed()) g.By("waiting for the deployment to complete") err := exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), deploymentConfigName, oc) o.Expect(err).NotTo(o.HaveOccurred()) podNames := waitForNumberOfPodsWithLabel(oc, expectedReplicasAfterDeployment, "mongodb-replica") mongo := db.NewMongoDB(podNames[0]) g.By(fmt.Sprintf("expecting that replica set have %d members", expectedReplicasAfterDeployment)) assertMembersInReplica(oc, mongo, expectedReplicasAfterDeployment) g.By("expecting that we can insert a new record on primary node") replicaSet := mongo.(exutil.ReplicaSet) _, err = replicaSet.QueryPrimary(oc, insertCmd) o.Expect(err).ShouldNot(o.HaveOccurred()) g.By("expecting that we can read a record from all members") for _, podName := range podNames { tryToReadFromPod(oc, podName, expectedValue) } g.By(fmt.Sprintf("scaling deployment config %s to %d replicas", deploymentConfigName, expectedReplicasAfterScalingUp)) err = oc.Run("scale").Args("dc", deploymentConfigName, "--replicas="+fmt.Sprint(expectedReplicasAfterScalingUp), "--timeout=30s").Execute() o.Expect(err).NotTo(o.HaveOccurred()) podNames = waitForNumberOfPodsWithLabel(oc, expectedReplicasAfterScalingUp, "mongodb-replica") mongo = db.NewMongoDB(podNames[0]) g.By("expecting that scaling replica set up should have more members") assertMembersInReplica(oc, mongo, expectedReplicasAfterScalingUp) }) }) })
err = oc.Run("delete").Args("pod", "-l", "deployment=mysql-slave-1").Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), slaves[0].PodName(), 1*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1) pods, err := oc.KubeREST().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: exutil.ParseLabelsOrDie("deployment=mysql-slave-1")}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(len(pods.Items)).To(o.Equal(1)) g.By("after slave is scaled to 0 and then back to 4 replicas") err = oc.Run("scale").Args("dc", "mysql-slave", "--replicas=0").Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) err = oc.Run("scale").Args("dc", "mysql-slave", "--replicas=4").Execute() o.Expect(err).NotTo(o.HaveOccurred()) assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 4) } } var _ = g.Describe("images: mysql: replication", func() { defer g.GinkgoRecover() ocs := make([]*exutil.CLI, len(templatePaths)) for i, template := range templatePaths { ocs[i] = exutil.NewCLI(fmt.Sprintf("mysql-replication-%d", i), exutil.KubeConfigPath()) g.It(fmt.Sprintf("MySQL replication template %s", template), replicationTestFactory(ocs[i], template)) } })
var _ = g.Describe("[jenkins][Slow] openshift pipeline plugin", func() { defer g.GinkgoRecover() var oc = exutil.NewCLI("jenkins-plugin", exutil.KubeConfigPath()) var hostPort string g.BeforeEach(func() { g.By("set up policy for jenkins jobs") err := oc.Run("policy").Args("add-role-to-user", "edit", "system:serviceaccount:"+oc.Namespace()+":default").Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("kick off the build for the jenkins ephermeral and application templates") tag := []string{"openshift/jenkins-plugin-snapshot-test:latest"} hexIDs, err := exutil.DumpAndReturnTagging(tag) var jenkinsEphemeralPath string var testingSnapshot bool if len(hexIDs) > 0 && err == nil { // found an openshift pipeline plugin test image, must be testing a proposed change to the plugin jenkinsEphemeralPath = exutil.FixturePath("fixtures", "jenkins-ephemeral-template-test-new-plugin.json") testingSnapshot = true } else { // no test image, testing the base jenkins image with the current, supported version of the plugin jenkinsEphemeralPath = exutil.FixturePath("..", "..", "examples", "jenkins", "jenkins-ephemeral-template.json") } err = oc.Run("new-app").Args(jenkinsEphemeralPath).Execute() o.Expect(err).NotTo(o.HaveOccurred()) jenkinsApplicationPath := exutil.FixturePath("..", "..", "examples", "jenkins", "application-template.json") err = oc.Run("new-app").Args(jenkinsApplicationPath).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for jenkins deployment") err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "jenkins") if err != nil { exutil.DumpDeploymentLogs("jenkins", oc) } o.Expect(err).NotTo(o.HaveOccurred()) g.By("get ip and port for jenkins service") serviceIP, err := oc.Run("get").Args("svc", "jenkins", "--config", exutil.KubeConfigPath()).Template("{{.spec.clusterIP}}").Output() o.Expect(err).NotTo(o.HaveOccurred()) port, err := oc.Run("get").Args("svc", "jenkins", "--config", exutil.KubeConfigPath()).Template("{{ $x := index .spec.ports 0}}{{$x.port}}").Output() o.Expect(err).NotTo(o.HaveOccurred()) hostPort = fmt.Sprintf("%s:%s", serviceIP, port) g.By("wait for jenkins to come up") err = waitForJenkinsActivity(fmt.Sprintf("http://%s", hostPort), "", 200) o.Expect(err).NotTo(o.HaveOccurred()) if testingSnapshot { g.By("verifying the test image is being used") // for the test image, confirm that a snapshot version of the plugin is running in the jenkins image we'll test against err = waitForJenkinsActivity(fmt.Sprintf("http://%s/pluginManager/plugin/openshift-pipeline/thirdPartyLicenses", hostPort), `About OpenShift Pipeline Jenkins Plugin ([0-9\.]+)-SNAPSHOT`, 200) } }) g.Context("jenkins-plugin test context ", func() { g.It("jenkins-plugin test case execution", func() { g.By("create jenkins job config xml file, convert to bytes for http post") data := jenkinsJobBytes("testjob-plugin.xml", oc.Namespace()) g.By("make http request to create job") immediateInteractionWithJenkins(fmt.Sprintf("http://%s/createItem?name=test-plugin-job", hostPort), "POST", bytes.NewBuffer(data), 200) g.By("make http request to kick off build") immediateInteractionWithJenkins(fmt.Sprintf("http://%s/job/test-plugin-job/build?delay=0sec", hostPort), "POST", nil, 201) // the build and deployment is by far the most time consuming portion of the test jenkins job; // we leverage some of the openshift utilities for waiting for the deployment before we poll // jenkins for the sucessful job completion g.By("waiting for frontend, frontend-prod deployments as signs that the build has finished") err := exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "frontend") if err != nil { exutil.DumpDeploymentLogs("frontend", oc) } o.Expect(err).NotTo(o.HaveOccurred()) err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "frontend-prod") if err != nil { exutil.DumpDeploymentLogs("frontend-prod", oc) } o.Expect(err).NotTo(o.HaveOccurred()) g.By("get build console logs and see if succeeded") err = waitForJenkinsActivity(fmt.Sprintf("http://%s/job/test-plugin-job/1/console", hostPort), "Finished: SUCCESS", 200) o.Expect(err).NotTo(o.HaveOccurred()) }) }) })
var _ = g.Describe("[builds][Conformance] build without output image", func() { defer g.GinkgoRecover() var ( dockerImageFixture = exutil.FixturePath("fixtures", "test-docker-no-outputname.json") s2iImageFixture = exutil.FixturePath("fixtures", "test-s2i-no-outputname.json") oc = exutil.NewCLI("build-no-outputname", exutil.KubeConfigPath()) ) g.Describe("building from templates", func() { oc.SetOutputDir(exutil.TestContext.OutputDir) g.It(fmt.Sprintf("should create an image from %q docker template without an output image reference defined", dockerImageFixture), func() { err := oc.Run("create").Args("-f", dockerImageFixture).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("expecting build to pass without an output image reference specified") out, err := oc.Run("start-build").Args("test-docker", "--follow", "--wait").Output() if err != nil { fmt.Fprintln(g.GinkgoWriter, out) } o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(out).Should(o.ContainSubstring(`Build does not have an Output defined, no output image was pushed to a registry.`)) }) g.It(fmt.Sprintf("should create an image from %q S2i template without an output image reference defined", s2iImageFixture), func() { err := oc.Run("create").Args("-f", s2iImageFixture).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("expecting build to pass without an output image reference specified") out, err := oc.Run("start-build").Args("test-sti", "--follow", "--wait").Output() if err != nil { fmt.Fprintln(g.GinkgoWriter, out) } o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(out).Should(o.ContainSubstring(`Build does not have an Output defined, no output image was pushed to a registry.`)) }) }) })
var _ = g.Describe("[image_ecosystem][perl][Slow] hot deploy for openshift perl image", func() { defer g.GinkgoRecover() var ( dancerTemplate = "https://raw.githubusercontent.com/openshift/dancer-ex/master/openshift/templates/dancer-mysql.json" oc = exutil.NewCLI("s2i-perl", exutil.KubeConfigPath()) modifyCommand = []string{"sed", "-ie", `s/data => \$data\[0\]/data => "1337"/`, "lib/default.pm"} pageCountFn = func(count int) string { return fmt.Sprintf(`<span class="code" id="count-value">%d</span>`, count) } dcName = "dancer-mysql-example-1" dcLabel = exutil.ParseLabelsOrDie(fmt.Sprintf("deployment=%s", dcName)) ) g.Describe("Dancer example", func() { g.It(fmt.Sprintf("should work with hot deploy"), func() { oc.SetOutputDir(exutil.TestContext.OutputDir) exutil.CheckOpenShiftNamespaceImageStreams(oc) g.By(fmt.Sprintf("calling oc new-app -f %q", dancerTemplate)) err := oc.Run("new-app").Args("-f", dancerTemplate).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for build to finish") err = exutil.WaitForABuild(oc.Client().Builds(oc.Namespace()), "dancer-mysql-example-1", exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn) if err != nil { exutil.DumpBuildLogs("dancer-mysql-example", oc) } o.Expect(err).NotTo(o.HaveOccurred()) // oc.KubeFramework().WaitForAnEndpoint currently will wait forever; for now, prefacing with our WaitForADeploymentToComplete, // which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment err = exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), "dancer-mysql-example", oc) o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for endpoint") err = oc.KubeFramework().WaitForAnEndpoint("dancer-mysql-example") o.Expect(err).NotTo(o.HaveOccurred()) assertPageCountIs := func(i int) { _, err := exutil.WaitForPods(oc.KubeClient().Core().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunningFn, 1, 2*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) result, err := CheckPageContains(oc, "dancer-mysql-example", "", pageCountFn(i)) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(result).To(o.BeTrue()) } g.By("checking page count") assertPageCountIs(1) assertPageCountIs(2) g.By("modifying the source code with disabled hot deploy") RunInPodContainer(oc, dcLabel, modifyCommand) assertPageCountIs(3) pods, err := oc.KubeClient().Core().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: dcLabel}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(len(pods.Items)).To(o.Equal(1)) g.By("turning on hot-deploy") err = oc.Run("env").Args("rc", dcName, "PERL_APACHE2_RELOAD=true").Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = oc.Run("scale").Args("rc", dcName, "--replicas=0").Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) err = oc.Run("scale").Args("rc", dcName, "--replicas=1").Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("modifying the source code with enabled hot deploy") RunInPodContainer(oc, dcLabel, modifyCommand) assertPageCountIs(1337) }) }) })
var _ = g.Describe("[images][mongodb] openshift mongodb image", func() { defer g.GinkgoRecover() templatePath := exutil.FixturePath("..", "..", "examples", "db-templates", "mongodb-ephemeral-template.json") oc := exutil.NewCLI("mongodb-create", exutil.KubeConfigPath()).Verbose() g.Describe("creating from a template", func() { g.It(fmt.Sprintf("should process and create the %q template", templatePath), func() { g.By("creating a new app") o.Expect(oc.Run("new-app").Args("-f", templatePath).Execute()).Should(o.Succeed()) g.By("waiting for the deployment to complete") err := exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "mongodb") if err != nil { exutil.DumpDeploymentLogs("mongodb", oc) } o.Expect(err).ShouldNot(o.HaveOccurred()) g.By("expecting the mongodb pod is running") podNames, err := exutil.WaitForPods( oc.KubeREST().Pods(oc.Namespace()), exutil.ParseLabelsOrDie("name=mongodb"), exutil.CheckPodIsRunningFn, 1, 1*time.Minute, ) o.Expect(err).ShouldNot(o.HaveOccurred()) o.Expect(podNames).Should(o.HaveLen(1)) g.By("expecting the mongodb service is answering for ping") mongo := db.NewMongoDB(podNames[0]) ok, err := mongo.IsReady(oc) o.Expect(err).ShouldNot(o.HaveOccurred()) o.Expect(ok).Should(o.BeTrue()) g.By("expecting that we can insert a new record") result, err := mongo.Query(oc, `db.foo.save({ "status": "passed" })`) o.Expect(err).ShouldNot(o.HaveOccurred()) o.Expect(result).Should(o.ContainSubstring(`WriteResult({ "nInserted" : 1 })`)) g.By("expecting that we can read a record") findCmd := "printjson(db.foo.find({}, {_id: 0}).toArray())" // don't include _id field to output because it changes every time result, err = mongo.Query(oc, findCmd) o.Expect(err).ShouldNot(o.HaveOccurred()) o.Expect(result).Should(o.ContainSubstring(`{ "status" : "passed" }`)) }) }) })
var _ = g.Describe("[builds][pullsecret][Conformance] docker build using a pull secret", func() { defer g.GinkgoRecover() const ( buildTestPod = "build-test-pod" buildTestService = "build-test-svc" ) var ( buildFixture = exutil.FixturePath("testdata", "test-docker-build-pullsecret.json") oc = exutil.NewCLI("docker-build-pullsecret", exutil.KubeConfigPath()) ) g.JustBeforeEach(func() { g.By("waiting for builder service account") err := exutil.WaitForBuilderAccount(oc.AdminKubeREST().ServiceAccounts(oc.Namespace())) o.Expect(err).NotTo(o.HaveOccurred()) }) g.Describe("Building from a template", func() { g.It("should create a docker build that pulls using a secret run it", func() { oc.SetOutputDir(exutil.TestContext.OutputDir) g.By(fmt.Sprintf("calling oc create -f %q", buildFixture)) err := oc.Run("create").Args("-f", buildFixture).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("starting a test build") out, err := oc.Run("start-build").Args("docker-build").Output() fmt.Fprintf(g.GinkgoWriter, "\nstart-build output:\n%s\n", out) o.Expect(err).NotTo(o.HaveOccurred()) g.By("expecting the build succeeds") err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), "docker-build-1", exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn) if err != nil { exutil.DumpBuildLogs("docker-build", oc) } o.Expect(err).NotTo(o.HaveOccurred()) g.By("starting a second build that pulls the image from the first build") out, err = oc.Run("start-build").Args("docker-build-pull").Output() fmt.Fprintf(g.GinkgoWriter, "\nstart-build output:\n%s\n", out) o.Expect(err).NotTo(o.HaveOccurred()) g.By("expecting the build succeeds") err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), "docker-build-pull-1", exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn) if err != nil { exutil.DumpBuildLogs("docker-build-pull", oc) } o.Expect(err).NotTo(o.HaveOccurred()) }) }) })