예제 #1
0
func (t TableEntry) generateIt(itBody reflect.Value) {
	if t.Pending {
		ginkgo.PIt(t.Description)
		return
	}

	values := []reflect.Value{}
	for i, param := range t.Parameters {
		var value reflect.Value

		if param == nil {
			inType := itBody.Type().In(i)
			value = reflect.Zero(inType)
		} else {
			value = reflect.ValueOf(param)
		}

		values = append(values, value)
	}

	body := func() {
		itBody.Call(values)
	}

	if t.Focused {
		ginkgo.FIt(t.Description, body)
	} else {
		ginkgo.It(t.Description, body)
	}
}
예제 #2
0
// NewSampleRepoTest creates a function for a new ginkgo test case that will instantiate a template
// from a url, kick off the buildconfig defined in that template, wait for the build/deploy,
// and then confirm the application is serving an expected string value.
func NewSampleRepoTest(c SampleRepoConfig) func() {
	return func() {
		defer g.GinkgoRecover()
		var oc = exutil.NewCLI(c.repoName+"-repo-test", exutil.KubeConfigPath())

		g.JustBeforeEach(func() {
			g.By("Waiting for builder service account")
			err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
			o.Expect(err).NotTo(o.HaveOccurred())
		})

		g.Describe("Building "+c.repoName+" app from new-app", func() {
			g.It(fmt.Sprintf("should build a "+c.repoName+" image and run it in a pod"), func() {
				oc.SetOutputDir(exutil.TestContext.OutputDir)

				exutil.CheckOpenShiftNamespaceImageStreams(oc)
				g.By(fmt.Sprintf("calling oc new-app with the " + c.repoName + " example template"))
				err := oc.Run("new-app").Args("-f", c.templateURL).Execute()
				o.Expect(err).NotTo(o.HaveOccurred())

				// all the templates automatically start a build.
				buildName := c.buildConfigName + "-1"

				g.By("expecting the build is in the Complete phase")
				err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), buildName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
				if err != nil {
					exutil.DumpBuildLogs(c.buildConfigName, oc)
				}
				o.Expect(err).NotTo(o.HaveOccurred())

				g.By("expecting the app deployment to be complete")
				err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), c.deploymentConfigName, oc)
				o.Expect(err).NotTo(o.HaveOccurred())

				if len(c.dbDeploymentConfigName) > 0 {
					g.By("expecting the db deployment to be complete")
					err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), c.dbDeploymentConfigName, oc)
					o.Expect(err).NotTo(o.HaveOccurred())
				}

				g.By("expecting the service is available")
				serviceIP, err := oc.Run("get").Args("service", c.serviceName).Template("{{ .spec.clusterIP }}").Output()
				o.Expect(err).NotTo(o.HaveOccurred())
				o.Expect(serviceIP).ShouldNot(o.Equal(""))

				g.By("expecting an endpoint is available")
				err = oc.KubeFramework().WaitForAnEndpoint(c.serviceName)
				o.Expect(err).NotTo(o.HaveOccurred())

				g.By("verifying string from app request")
				response, err := exutil.FetchURL("http://"+serviceIP+":8080"+c.appPath, time.Duration(30*time.Second))
				o.Expect(err).NotTo(o.HaveOccurred())
				o.Expect(response).Should(o.ContainSubstring(c.expectedString))
			})
		})
	}
}
예제 #3
0
func (t TableEntry) generateIt(itBody reflect.Value) {
	if t.Pending {
		ginkgo.PIt(t.Description)
		return
	}

	values := []reflect.Value{}
	for _, param := range t.Parameters {
		values = append(values, reflect.ValueOf(param))
	}

	body := func() {
		itBody.Call(values)
	}

	if t.Focused {
		ginkgo.FIt(t.Description, body)
	} else {
		ginkgo.It(t.Description, body)
	}
}
예제 #4
0
	})

	g.Describe("build with image source", func() {
		g.It("should complete successfully and contain the expected file", func() {
			g.By("Creating build configs for source build")
			err := oc.Run("create").Args("-f", buildFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
			g.By("starting the source strategy build")
			err = oc.Run("start-build").Args("imagesourcebuild").Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
			g.By("expecting the builds to complete successfully")
			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), "imagesourcebuild-1", exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
			if err != nil {
				exutil.DumpBuildLogs("imagesourcebuild", oc)
			}
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("expecting the pod to deploy successfully")
			pods, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), imageSourceLabel, exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(len(pods)).To(o.Equal(1))
			pod, err := oc.KubeREST().Pods(oc.Namespace()).Get(pods[0])
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("expecting the pod to contain the file from the input image")
			out, err := oc.Run("exec").Args(pod.Name, "-c", pod.Spec.Containers[0].Name, "--", "ls", "injected/dir").Output()
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(out).To(o.ContainSubstring("jenkins.war"))
		})
	})
	g.Describe("build with image docker", func() {
		g.It("should complete successfully and contain the expected file", func() {
예제 #5
0
파일: secrets.go 프로젝트: grs/origin
		g.It("should print the secrets during the source strategy build", func() {
			g.By("creating the sample secret files")
			err := oc.Run("create").Args("-f", secretsFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
			err = oc.Run("create").Args("-f", secondSecretsFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("creating the sample source build config and image stream")
			err = oc.Run("create").Args("-f", isFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			err = oc.Run("create").Args("-f", sourceBuildFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("starting the sample source build")
			out, err := oc.Run("start-build").Args("test", "--follow", "--wait").Output()
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(out).To(o.ContainSubstring("secret1=secret1"))
			o.Expect(out).To(o.ContainSubstring("secret3=secret3"))
			o.Expect(out).To(o.ContainSubstring("relative-secret1=secret1"))
			o.Expect(out).To(o.ContainSubstring("relative-secret3=secret3"))

			g.By("checking the status of the build")
			build, err := oc.REST().Builds(oc.Namespace()).Get("test-1")
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(build.Status.Phase).Should(o.BeEquivalentTo(buildapi.BuildPhaseComplete))

			g.By("getting the image name")
			image, err := exutil.GetDockerImageReference(oc.REST().ImageStreams(oc.Namespace()), "test", "latest")
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("verifying the build secrets are not present in the output image")
			pod := exutil.GetPodForContainer(kapi.Container{Name: "test", Image: image})
			oc.KubeFramework().TestContainerOutput("test-build-secret-source", pod, 0, []string{
				"relative-secret1=empty",
				"secret3=empty",
			})
		})
예제 #6
0
	bdd.BeforeEach(func() {
		reset.Enable()
	})

	bdd.AfterEach(func() {
		reset.Disable()
	})

	bdd.It("ErrorWriter", func() {
		w := ErrorWriter(0)
		validErrorWrite(w, 10, 0)

		w = ErrorWriter(5)
		validErrorWriteSuccess(w, 3)
		validErrorWrite(w, 10, 2)
		validErrorWrite(w, 10, 0)

		w = ErrorWriter(5)
		validErrorWriteSuccess(w, 5)
		validErrorWrite(w, 10, 0)
	})

	bdd.It("SlowWriter", func() {
		buf := &bytes.Buffer{}
		w := NewSlowWriter(buf, 15*time.Millisecond)
		tStart := time.Now()
		Ω(w.Write([]byte("abc"))).Should(Equal(3))
		tEnd := time.Now()
		d := tEnd.Sub(tStart)
		Ω(buf.Bytes()).Should(Equal([]byte("abc")))
예제 #7
0
	postgreSQLHelperName          = "postgresql-helper"
	postgreSQLImages              = []string{
		"openshift/postgresql-92-centos7",
		"centos/postgresql-94-centos7",
		"registry.access.redhat.com/openshift3/postgresql-92-rhel7",
		"registry.access.redhat.com/rhscl/postgresql-94-rhel7",
	}
)

var _ = g.Describe("[LocalNode][images][postgresql][Slow] openshift postgresql replication", func() {
	defer g.GinkgoRecover()

	for i, image := range postgreSQLImages {
		oc := exutil.NewCLI(fmt.Sprintf("postgresql-replication-%d", i), exutil.KubeConfigPath())
		testFn := PostgreSQLReplicationTestFactory(oc, image)
		g.It(fmt.Sprintf("postgresql replication works for %s", image), testFn)
	}
})

// CreatePostgreSQLReplicationHelpers creates a set of PostgreSQL helpers for master,
// slave an en extra helper that is used for remote login test.
func CreatePostgreSQLReplicationHelpers(c kclient.PodInterface, masterDeployment, slaveDeployment, helperDeployment string, slaveCount int) (exutil.Database, []exutil.Database, exutil.Database) {
	podNames, err := exutil.WaitForPods(c, exutil.ParseLabelsOrDie(fmt.Sprintf("deployment=%s", masterDeployment)), exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
	o.Expect(err).NotTo(o.HaveOccurred())
	masterPod := podNames[0]

	slavePods, err := exutil.WaitForPods(c, exutil.ParseLabelsOrDie(fmt.Sprintf("deployment=%s", slaveDeployment)), exutil.CheckPodIsRunningFn, slaveCount, 3*time.Minute)
	o.Expect(err).NotTo(o.HaveOccurred())

	// Create PostgreSQL helper for master
	master := db.NewPostgreSQL(masterPod, "")
예제 #8
0
		g.It("runs the builds in parallel", func() {
			g.By("starting multiple builds")
			var (
				startedBuilds []string
				counter       int
			)
			bcName := "sample-parallel-build"

			buildWatch, err := oc.Client().Builds(oc.Namespace()).Watch(kapi.ListOptions{
				LabelSelector: buildutil.BuildConfigSelector(bcName),
			})
			defer buildWatch.Stop()

			// Start first build
			stdout, _, err := exutil.StartBuild(oc, bcName, "-o=name")
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(strings.TrimSpace(stdout)).ShouldNot(o.HaveLen(0))
			// extract build name from "build/buildName" resource id
			startedBuilds = append(startedBuilds, strings.TrimSpace(strings.Split(stdout, "/")[1]))

			// Wait for it to become running
			for {
				event := <-buildWatch.ResultChan()
				build := event.Object.(*buildapi.Build)
				o.Expect(buildutil.IsBuildComplete(build)).Should(o.BeFalse())
				if build.Name == startedBuilds[0] && build.Status.Phase == buildapi.BuildPhaseRunning {
					break
				}
			}

			for i := 0; i < 2; i++ {
				stdout, _, err = exutil.StartBuild(oc, bcName, "-o=name")
				o.Expect(err).NotTo(o.HaveOccurred())
				o.Expect(strings.TrimSpace(stdout)).ShouldNot(o.HaveLen(0))
				startedBuilds = append(startedBuilds, strings.TrimSpace(strings.Split(stdout, "/")[1]))
			}

			o.Expect(err).NotTo(o.HaveOccurred())

			for {
				event := <-buildWatch.ResultChan()
				build := event.Object.(*buildapi.Build)
				if build.Name == startedBuilds[0] {
					if buildutil.IsBuildComplete(build) {
						break
					}
					continue
				}
				// When the the other two builds we started after waiting for the first
				// build to become running are Pending, verify the first build is still
				// running (so the other two builds are started in parallel with first
				// build).
				// TODO: This might introduce flakes in case the first build complete
				// sooner or fail.
				if build.Status.Phase == buildapi.BuildPhasePending {
					c := buildclient.NewOSClientBuildClient(oc.Client())
					firstBuildRunning := false
					_, err := buildutil.BuildConfigBuilds(c, oc.Namespace(), bcName, func(b buildapi.Build) bool {
						if b.Name == startedBuilds[0] && b.Status.Phase == buildapi.BuildPhaseRunning {
							firstBuildRunning = true
						}
						return false
					})
					o.Expect(err).NotTo(o.HaveOccurred())
					o.Expect(firstBuildRunning).Should(o.BeTrue())
					counter++
				}
				// When the build failed or completed prematurely, fail the test
				o.Expect(buildutil.IsBuildComplete(build)).Should(o.BeFalse())
				if counter == 2 {
					break
				}
			}
			o.Expect(counter).Should(o.BeEquivalentTo(2))
		})
예제 #9
0
파일: deployments.go 프로젝트: ncdc/origin
		g.It("should only deploy the last deployment [Conformance]", func() {
			_, err := oc.Run("create").Args("-f", simpleDeploymentFixture).Output()
			o.Expect(err).NotTo(o.HaveOccurred())

			iterations := 15
			for i := 0; i < iterations; i++ {
				if rand.Float32() < 0.2 {
					time.Sleep(time.Duration(rand.Float32() * rand.Float32() * float32(time.Second)))
				}
				switch n := rand.Float32(); {

				case n < 0.4:
					// trigger a new deployment
					e2e.Logf("%02d: triggering a new deployment with config change", i)
					out, err := oc.Run("set", "env").Args("dc/deployment-simple", fmt.Sprintf("A=%d", i)).Output()
					o.Expect(err).NotTo(o.HaveOccurred())
					o.Expect(out).To(o.ContainSubstring("updated"))

				case n < 0.7:
					// cancel any running deployment
					e2e.Logf("%02d: cancelling deployment", i)
					if out, err := oc.Run("deploy").Args("dc/deployment-simple", "--cancel").Output(); err != nil {
						// TODO: we should fix this
						if !strings.Contains(out, "the object has been modified") {
							o.Expect(err).NotTo(o.HaveOccurred())
						}
						e2e.Logf("--cancel deployment failed due to conflict: %v", err)
					}

				case n < 0.0:
					// delete the deployer pod - disabled because it forces the system to wait for the sync loop
					e2e.Logf("%02d: deleting one or more deployer pods", i)
					_, rcs, pods, err := deploymentInfo(oc, "deployment-simple")
					if err != nil {
						e2e.Logf("%02d: unable to get deployment info: %v", i, err)
						continue
					}
					all, err := deploymentPods(pods)
					if err != nil {
						e2e.Logf("%02d: unable to get deployment pods: %v", i, err)
						continue
					}
					if len(all) == 0 {
						e2e.Logf("%02d: no deployer pods", i)
						continue
					}
					top := len(rcs) - 1
					for j := top; i >= top-1 && j >= 0; j-- {
						pods, ok := all[rcs[j].Name]
						if !ok {
							e2e.Logf("%02d: no deployer pod for rc %q", i, rcs[j].Name)
							continue
						}
						for _, pod := range pods {
							e2e.Logf("%02d: deleting deployer pod %s", i, pod.Name)
							options := kapi.NewDeleteOptions(0)
							if rand.Float32() < 0.5 {
								options = nil
							}
							if err := oc.KubeREST().Pods(oc.Namespace()).Delete(pod.Name, options); err != nil {
								e2e.Logf("%02d: unable to delete deployer pod %q: %v", i, pod.Name, err)
							}
						}
					}
					e2e.Logf("%02d: triggering a new deployment with config change", i)
					out, err := oc.Run("set", "env").Args("dc/deployment-simple", fmt.Sprintf("A=%d", i)).Output()
					o.Expect(err).NotTo(o.HaveOccurred())
					o.Expect(out).To(o.ContainSubstring("updated"))

				default:
					// wait for the deployment to be running
					e2e.Logf("%02d: waiting for current deployment to start running", i)
					o.Expect(waitForLatestCondition(oc, "deployment-simple", deploymentRunTimeout, deploymentRunning)).NotTo(o.HaveOccurred())
				}
			}

			// trigger one more deployment, just in case we cancelled the latest output
			out, err := oc.Run("set", "env").Args("dc/deployment-simple", fmt.Sprintf("A=%d", iterations)).Output()
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(out).To(o.ContainSubstring("updated"))

			g.By("verifying all but terminal deployment is marked complete")
			o.Expect(waitForLatestCondition(oc, "deployment-simple", deploymentRunTimeout, deploymentReachedCompletion)).NotTo(o.HaveOccurred())
		})
예제 #10
0
파일: nosrc.go 프로젝트: sgallagher/origin
		oc           = exutil.NewCLI("cli-build-nosrc", exutil.KubeConfigPath())
		exampleBuild = exutil.FixturePath("..", "extended", "testdata", "test-build-app")
	)

	g.JustBeforeEach(func() {
		g.By("waiting for builder service account")
		err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
		o.Expect(err).NotTo(o.HaveOccurred())
		oc.Run("create").Args("-f", buildFixture).Execute()
	})

	g.Describe("started build", func() {
		g.It("should build even with an empty source in build config", func() {
			g.By("starting the build with --wait flag")
			out, err := oc.Run("start-build").Args("nosrc-build", "--wait", fmt.Sprintf("--from-dir=%s", exampleBuild)).Output()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("verifying build success")
			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), "nosrc-build-1", exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)

			g.By(fmt.Sprintf("verifying the build %q status", out))
			build, err := oc.REST().Builds(oc.Namespace()).Get("nosrc-build-1")
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(build.Spec.Source.Dockerfile).To(o.BeNil())
			o.Expect(build.Spec.Source.Git).To(o.BeNil())
			o.Expect(build.Spec.Source.Images).To(o.BeNil())
			o.Expect(build.Spec.Source.Binary).NotTo(o.BeNil())
		})
	})
})
예제 #11
0
	)

	g.JustBeforeEach(func() {
		g.By("waiting for builder service account")
		err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
		o.Expect(err).NotTo(o.HaveOccurred())
		oc.Run("create").Args("-f", buildFixture).Execute()
	})

	g.Describe("oc start-build --wait", func() {
		g.It("should start a build and wait for the build to complete", func() {
			g.By("starting the build with --wait flag")
			out, err := oc.Run("start-build").Args("sample-build", "--wait").Output()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By(fmt.Sprintf("verifying the build %q status", out))
			build, err := oc.REST().Builds(oc.Namespace()).Get(out)
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(build.Status.Phase).Should(o.BeEquivalentTo(buildapi.BuildPhaseComplete))

		})

		g.It("should start a build and wait for the build to fail", func() {
			g.By("starting the build with --wait flag but wrong --commit")
			out, err := oc.Run("start-build").
				Args("sample-build", "--wait", "--commit", "fffffff").
				Output()
			o.Expect(err).To(o.HaveOccurred())
			o.Expect(out).Should(o.ContainSubstring(`status is "Failed"`))
		})
	})
예제 #12
0
파일: s2i_perl.go 프로젝트: ncantor/origin
		g.It(fmt.Sprintf("should work with hot deploy"), func() {
			oc.SetOutputDir(exutil.TestContext.OutputDir)

			g.By(fmt.Sprintf("calling oc new-app -f %q", dancerTemplate))
			err := oc.Run("new-app").Args("-f", dancerTemplate).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("waiting for build to finish")
			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), "dancer-mysql-example-1", exutil.CheckBuildSuccessFunc, exutil.CheckBuildFailedFunc)
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("waiting for endpoint")
			err = oc.KubeFramework().WaitForAnEndpoint("dancer-mysql-example")
			o.Expect(err).NotTo(o.HaveOccurred())

			assertPageCountIs := func(i int) {
				_, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunningFunc, 1, 120*time.Second)
				o.Expect(err).NotTo(o.HaveOccurred())

				result, err := CheckPageContains(oc, "dancer-mysql-example", "", pageCountFunc(i))
				o.Expect(err).NotTo(o.HaveOccurred())
				o.Expect(result).To(o.BeTrue())
			}

			g.By("checking page count")
			assertPageCountIs(1)
			assertPageCountIs(2)

			g.By("modifying the source code with disabled hot deploy")
			RunInPodContainer(oc, dcLabel, modifyCommand)
			assertPageCountIs(3)

			pods, err := oc.KubeREST().Pods(oc.Namespace()).List(dcLabel, nil)
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(len(pods.Items)).To(o.Equal(1))

			g.By("turning on hot-deploy")
			err = oc.Run("env").Args("rc", dcName, "PERL_APACHE2_RELOAD=true").Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
			err = oc.Run("scale").Args("rc", dcName, "--replicas=0").Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
			err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), pods.Items[0].Name, 60*time.Second)
			o.Expect(err).NotTo(o.HaveOccurred())
			err = oc.Run("scale").Args("rc", dcName, "--replicas=1").Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("modifying the source code with enabled hot deploy")
			RunInPodContainer(oc, dcLabel, modifyCommand)
			assertPageCountIs(1337)
		})
예제 #13
0
파일: proxy.go 프로젝트: RomainVabre/origin
	g.JustBeforeEach(func() {
		g.By("waiting for builder service account")
		err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
		o.Expect(err).NotTo(o.HaveOccurred())
		oc.Run("create").Args("-f", buildFixture).Execute()
	})

	g.Describe("start build with broken proxy", func() {
		g.It("should start a build and wait for the build to to fail", func() {
			g.By("starting the build with --wait and --follow flags")
			out, err := oc.Run("start-build").Args("sample-build", "--follow", "--wait").Output()
			if err != nil {
				fmt.Fprintln(g.GinkgoWriter, out)
			}
			o.Expect(err).To(o.HaveOccurred())
			g.By("verifying the build sample-app-1 output")
			// The git ls-remote check should exit the build when the remote
			// repository is not accessible. It should never get to the clone.
			o.Expect(out).NotTo(o.ContainSubstring("clone"))
			o.Expect(out).To(o.ContainSubstring(`unable to access 'https://github.com/openshift/ruby-hello-world.git/': Failed connect to 127.0.0.1:3128`))
			g.By("verifying the build sample-build-1 status")
			build, err := oc.REST().Builds(oc.Namespace()).Get("sample-build-1")
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(build.Status.Phase).Should(o.BeEquivalentTo(buildapi.BuildPhaseFailed))
		})

	})

})
예제 #14
0
		g.It(fmt.Sprintf("should work with hot deploy"), func() {
			oc.SetOutputDir(exutil.TestContext.OutputDir)

			g.By(fmt.Sprintf("calling oc new-app %s", djangoRepository))
			err := oc.Run("new-app").Args(djangoRepository, "--strategy=source").Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("waiting for build to finish")
			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), "django-ex-1", exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("waiting for endpoint")
			err = oc.KubeFramework().WaitForAnEndpoint("django-ex")
			o.Expect(err).NotTo(o.HaveOccurred())

			assertPageCountIs := func(i int) {
				_, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunningFn, 1, 120*time.Second)
				o.Expect(err).NotTo(o.HaveOccurred())

				result, err := CheckPageContains(oc, "django-ex", "", pageCountFn(i))
				o.Expect(err).NotTo(o.HaveOccurred())
				o.Expect(result).To(o.BeTrue())
			}

			g.By("checking page count")
			assertPageCountIs(1)
			assertPageCountIs(2)

			g.By("modifying the source code with disabled hot deploy")
			RunInPodContainer(oc, dcLabel, modifyCommand)
			assertPageCountIs(3)

			pods, err := oc.KubeREST().Pods(oc.Namespace()).List(dcLabel, nil)
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(len(pods.Items)).To(o.Equal(1))

			g.By("turning on hot-deploy")
			err = oc.Run("env").Args("rc", dcName, "APP_CONFIG=conf/reload.py").Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
			err = oc.Run("scale").Args("rc", dcName, "--replicas=0").Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
			err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), pods.Items[0].Name, 60*time.Second)
			o.Expect(err).NotTo(o.HaveOccurred())
			err = oc.Run("scale").Args("rc", dcName, "--replicas=1").Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("modifying the source code with enabled hot deploy")
			RunInPodContainer(oc, dcLabel, modifyCommand)
			assertPageCountIs(1337)
		})
		g.It(fmt.Sprintf("should process and create the %q template", templatePath), func() {
			oc.SetOutputDir(exutil.TestContext.OutputDir)

			g.By("creating persistent volumes")
			_, err := exutil.SetupHostPathVolumes(
				oc.AdminKubeREST().PersistentVolumes(),
				oc.Namespace(),
				"256Mi",
				3,
			)
			o.Expect(err).NotTo(o.HaveOccurred())

			defer func() {
				// We're removing only PVs because all other things will be removed
				// together with namespace.
				err := exutil.CleanupHostPathVolumes(oc.AdminKubeREST().PersistentVolumes(), oc.Namespace())
				if err != nil {
					fmt.Fprintf(g.GinkgoWriter, "WARNING: couldn't cleanup persistent volumes: %v", err)
				}
			}()

			g.By("creating a new app")
			o.Expect(
				oc.Run("new-app").Args(
					"-f", templatePath,
					"-p", "VOLUME_CAPACITY=256Mi",
					"-p", "MEMORY_LIMIT=512Mi",
					"-p", "MONGODB_IMAGE=centos/mongodb-32-centos7",
					"-p", "MONGODB_SERVICE_NAME=mongodb-replicaset",
				).Execute(),
			).Should(o.Succeed())

			g.By("waiting for pods to running")
			podNames, err := exutil.WaitForPods(
				oc.KubeREST().Pods(oc.Namespace()),
				exutil.ParseLabelsOrDie("name=mongodb-replicaset"),
				exutil.CheckPodIsRunningFn,
				3,
				2*time.Minute,
			)
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(podNames).Should(o.HaveLen(3))

			g.By("expecting that we can insert a new record on primary node")
			mongo := dbutil.NewMongoDB(podNames[0])
			replicaSet := mongo.(exutil.ReplicaSet)
			_, err = replicaSet.QueryPrimary(oc, `db.test.save({ "status" : "passed" })`)
			o.Expect(err).ShouldNot(o.HaveOccurred())

			g.By("expecting that we can read a record from all members")
			for _, podName := range podNames {
				o.Expect(readRecordFromPod(oc, podName)).To(o.Succeed())
			}

			g.By("restarting replica set")
			err = oc.Run("delete").Args("pods", "--all", "-n", oc.Namespace()).Execute()
			o.Expect(err).ShouldNot(o.HaveOccurred())

			g.By("waiting for restarting of the pods")
			podNames, err = exutil.WaitForPods(
				oc.KubeREST().Pods(oc.Namespace()),
				exutil.ParseLabelsOrDie("name=mongodb-replicaset"),
				exutil.CheckPodIsRunningFn,
				3,
				2*time.Minute,
			)
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(podNames).Should(o.HaveLen(3))

			g.By("expecting that we can read a record from all members after its restart")
			for _, podName := range podNames {
				o.Expect(readRecordFromPod(oc, podName)).To(o.Succeed())
			}
		})
예제 #16
0
	})

	g.Describe("S2I build from a template", func() {
		g.It(fmt.Sprintf("should create a image from %q template with proper Docker labels", stiBuildFixture), func() {
			oc.SetOutputDir(exutil.TestContext.OutputDir)

			g.By(fmt.Sprintf("calling oc create -f %q", imageStreamFixture))
			err := oc.Run("create").Args("-f", imageStreamFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By(fmt.Sprintf("calling oc create -f %q", stiBuildFixture))
			err = oc.Run("create").Args("-f", stiBuildFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("starting a test build")
			br, err := exutil.StartBuildAndWait(oc, "test")
			br.AssertSuccess()

			g.By("getting the Docker image reference from ImageStream")
			imageRef, err := exutil.GetDockerImageReference(oc.REST().ImageStreams(oc.Namespace()), "test", "latest")
			o.Expect(err).NotTo(o.HaveOccurred())

			imageLabels, err := eximages.GetImageLabels(oc.REST().ImageStreamImages(oc.Namespace()), "test", imageRef)
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("inspecting the new image for proper Docker labels")
			err = ExpectOpenShiftLabels(imageLabels)
			o.Expect(err).NotTo(o.HaveOccurred())
		})
	})

	g.Describe("Docker build from a template", func() {
예제 #17
0
		g.It(fmt.Sprintf("should create a build from %q template and run it", templateFixture), func() {
			oc.SetOutputDir(exutil.TestContext.OutputDir)

			g.By(fmt.Sprintf("calling oc new-app -f %q", templateFixture))
			err := oc.Run("new-app").Args("-f", templateFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("starting a test build")
			out, err := oc.Run("start-build").Args("initial-build").Output()
			fmt.Fprintf(g.GinkgoWriter, "\ninitial-build start-build output:\n%s\n", out)
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("expecting the build is in Complete phase")
			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), "initial-build-1", exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
			if err != nil {
				exutil.DumpBuildLogs("initial-build", oc)
			}
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("starting a test build using the image produced by the last build")
			out, err = oc.Run("start-build").Args("internal-build").Output()
			fmt.Fprintf(g.GinkgoWriter, "\ninternal-build start-build output:\n%s\n", out)
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("expecting the build is in Complete phase")
			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), "internal-build-1", exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
			if err != nil {
				exutil.DumpBuildLogs("internal-build", oc)
			}
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("getting the Docker image reference from ImageStream")
			imageName, err := exutil.GetDockerImageReference(oc.REST().ImageStreams(oc.Namespace()), "internal-image", "latest")
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("instantiating a pod and service with the new image")
			err = oc.Run("new-app").Args("-f", podAndServiceFixture, "-p", "IMAGE_NAME="+imageName).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("waiting for the service to become available")
			err = oc.KubeFramework().WaitForAnEndpoint(buildTestService)
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("expecting the pod container has saved artifacts")
			out, err = oc.Run("exec").Args("-p", buildTestPod, "--", "curl", "http://0.0.0.0:8080").Output()
			if err != nil {
				logs, _ := oc.Run("logs").Args(buildTestPod).Output()
				e2e.Failf("Failed to curl in application container: \n%q, pod logs: \n%q", out, logs)
			}
			o.Expect(err).NotTo(o.HaveOccurred())

			if !strings.Contains(out, "artifacts exist") {
				logs, _ := oc.Run("logs").Args(buildTestPod).Output()
				e2e.Failf("Pod %q does not contain expected artifacts: %q\n%q", buildTestPod, out, logs)
			}
		})
예제 #18
0
	g.Describe("Building from a template", func() {
		g.It("should create a docker build with a quota and run it", func() {
			oc.SetOutputDir(exutil.TestContext.OutputDir)

			g.By(fmt.Sprintf("calling oc create -f %q", buildFixture))
			err := oc.Run("create").Args("-f", buildFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("starting a test build")
			_, err = oc.Run("start-build").Args("docker-build-quota", "--from-dir", exutil.FixturePath("testdata", "build-quota")).Output()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("expecting the build is in Failed phase")
			// note that success and fail functions are intentionally reversed because we want to wait for failure.
			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), "docker-build-quota-1", exutil.CheckBuildFailedFn, exutil.CheckBuildSuccessFn)
			if err != nil {
				exutil.DumpBuildLogs("docker-build-quota", oc)
			}
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("expecting the build logs to contain the correct cgroups values")
			out, err := oc.Run("logs").Args(fmt.Sprintf("build/docker-build-quota-1")).Output()
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(out).To(o.ContainSubstring("MEMORY=209715200"))
			o.Expect(out).To(o.ContainSubstring("MEMORYSWAP=209715200"))
			o.Expect(out).To(o.ContainSubstring("SHARES=61"))
			o.Expect(out).To(o.ContainSubstring("PERIOD=100000"))
			o.Expect(out).To(o.ContainSubstring("QUOTA=6000"))
		})
	})
})
예제 #19
0
		g.It(fmt.Sprintf("should work with hot deploy"), func() {
			oc.SetOutputDir(exutil.TestContext.OutputDir)

			g.By(fmt.Sprintf("calling oc new-app -f %q -p %q", cakephpTemplate, hotDeployParam))
			err := oc.Run("new-app").Args("-f", cakephpTemplate, "-p", hotDeployParam).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("waiting for build to finish")
			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), dcName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("waiting for endpoint")
			err = oc.KubeFramework().WaitForAnEndpoint("cakephp-mysql-example")
			o.Expect(err).NotTo(o.HaveOccurred())

			assertPageCountIs := func(i int) {
				_, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
				o.Expect(err).NotTo(o.HaveOccurred())

				result, err := CheckPageContains(oc, "cakephp-mysql-example", "", pageCountFn(i))
				o.Expect(err).NotTo(o.HaveOccurred())
				o.Expect(result).To(o.BeTrue())
			}

			g.By("checking page count")

			assertPageCountIs(1)
			assertPageCountIs(2)

			g.By("modifying the source code with disabled hot deploy")
			RunInPodContainer(oc, dcLabel, modifyCommand)
			g.By("checking page count after modifying the source code")
			assertPageCountIs(1337)
		})
	}

	g.It(fmt.Sprintf("should deny a push of built image exceeding %s limit", imageapi.LimitTypeImage), func() {
		oc.SetOutputDir(exutil.TestContext.OutputDir)
		defer tearDown(oc)

		dClient, err := testutil.NewDockerClient()
		o.Expect(err).NotTo(o.HaveOccurred())

		_, err = createLimitRangeOfType(oc, imageapi.LimitTypeImage, kapi.ResourceList{
			kapi.ResourceStorage: resource.MustParse("10Ki"),
		})
		o.Expect(err).NotTo(o.HaveOccurred())

		g.By(fmt.Sprintf("trying to push an image exceeding size limit with just 1 layer"))
		err = imagesutil.BuildAndPushImageOfSizeWithBuilder(oc, dClient, oc.Namespace(), "sized", "middle", 16000, 1, false)
		o.Expect(err).NotTo(o.HaveOccurred())

		g.By(fmt.Sprintf("trying to push an image exceeding size limit in total"))
		err = imagesutil.BuildAndPushImageOfSizeWithBuilder(oc, dClient, oc.Namespace(), "sized", "middle", 16000, 5, false)
		o.Expect(err).NotTo(o.HaveOccurred())

		g.By(fmt.Sprintf("trying to push an image with one big layer below size limit"))
		err = imagesutil.BuildAndPushImageOfSizeWithBuilder(oc, dClient, oc.Namespace(), "sized", "small", 8000, 1, true)
		o.Expect(err).NotTo(o.HaveOccurred())

		g.By(fmt.Sprintf("trying to push an image below size limit"))
		err = imagesutil.BuildAndPushImageOfSizeWithBuilder(oc, dClient, oc.Namespace(), "sized", "small", 8000, 2, true)
		o.Expect(err).NotTo(o.HaveOccurred())
	})

	g.It(fmt.Sprintf("should deny a push of built image exceeding limit on %s resource", imageapi.ResourceImageStreamImages), func() {
예제 #21
0
		exampleGemfile = exutil.FixturePath("testdata", "test-build-app", "Gemfile")
		exampleBuild   = exutil.FixturePath("testdata", "test-build-app")
		oc             = exutil.NewCLI("cli-start-build", exutil.KubeConfigPath())
	)

	g.JustBeforeEach(func() {
		g.By("waiting for builder service account")
		err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
		o.Expect(err).NotTo(o.HaveOccurred())
		oc.Run("create").Args("-f", buildFixture).Execute()
	})

	g.Describe("oc start-build --wait", func() {
		g.It("should start a build and wait for the build to complete", func() {
			g.By("starting the build with --wait flag")
			br, err := exutil.StartBuildAndWait(oc, "sample-build", "--wait")
			o.Expect(err).NotTo(o.HaveOccurred())
			br.AssertSuccess()
		})

		g.It("should start a build and wait for the build to fail", func() {
			g.By("starting the build with --wait flag but wrong --commit")
			br, _ := exutil.StartBuildAndWait(oc, "sample-build", "--wait", "--commit=fffffff")
			br.AssertFailure()
			o.Expect(br.StartBuildErr).To(o.HaveOccurred()) // start-build should detect the build error with --wait flag
			o.Expect(br.StartBuildStdErr).Should(o.ContainSubstring(`status is "Failed"`))
		})
	})

	g.Describe("override environment", func() {
		g.It("should accept environment variables", func() {
			g.By("starting the build with -e FOO=bar,VAR=test")
예제 #22
0
				o.Expect(foundOriginal).To(o.BeTrue())

				g.By("Verifying renamed file is not in the local directory")
				o.Expect(foundModified).To(o.BeFalse())

				g.By("Getting an error if copying to a destination directory where there is no write permission")
				result, err = oc.Run("rsync").Args(
					sourcePath1,
					fmt.Sprintf("%s:/", podName),
					fmt.Sprintf("--strategy=%s", strategy)).Output()
				o.Expect(err).To(o.HaveOccurred())
			}
		}

		for _, strategy := range strategies {
			g.It(fmt.Sprintf("should copy files with the %s strategy", strategy), testRsyncFn(strategy))
		}
	})

	g.Describe("rsync specific flags", func() {

		g.It("should honor the --exclude flag", func() {
			g.By(fmt.Sprintf("Calling oc rsync %s %s:/tmp --exclude=image-streams-rhel7.json", sourcePath1, podName))
			err := oc.Run("rsync").Args(
				sourcePath1,
				fmt.Sprintf("%s:/tmp", podName),
				"--exclude=image-streams-rhel7.json").Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("Verifying that files are copied to the container")
			result, err := oc.Run("rsh").Args(podName, "ls", "/tmp/image-streams").Output()
예제 #23
0
파일: dockerfile.go 프로젝트: richm/origin
		o.Expect(err).NotTo(o.HaveOccurred())
		oc.SetOutputDir(exutil.TestContext.OutputDir)
	})

	g.Describe("being created from new-build", func() {
		g.It("should create a image via new-build", func() {
			g.By(fmt.Sprintf("calling oc new-build with Dockerfile"))
			err := oc.Run("new-build").Args("-D", "-").InputString(testDockerfile).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("starting a test build")
			bc, err := oc.REST().BuildConfigs(oc.Namespace()).Get("origin-base")
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(bc.Spec.Source.Git).To(o.BeNil())
			o.Expect(bc.Spec.Source.Dockerfile).NotTo(o.BeNil())
			o.Expect(*bc.Spec.Source.Dockerfile).To(o.Equal(testDockerfile))

			buildName := "origin-base-1"
			g.By("expecting the Dockerfile build is in Complete phase")
			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), buildName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("getting the build Docker image reference from ImageStream")
			image, err := oc.REST().ImageStreamTags(oc.Namespace()).Get("origin-base", "latest")
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(image.Image.DockerImageMetadata.Config.User).To(o.Equal("1001"))
		})

		g.It("should create a image via new-build and infer the origin tag", func() {
			g.By(fmt.Sprintf("calling oc new-build with Dockerfile that uses the same tag as the output"))
			err := oc.Run("new-build").Args("-D", "-").InputString(testDockerfile2).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
예제 #24
0
		g.It(fmt.Sprintf("should process and create the %q template", templatePath), func() {

			exutil.CheckOpenShiftNamespaceImageStreams(oc)
			g.By("creating a new app")
			o.Expect(oc.Run("new-app").Args("-f", templatePath).Execute()).Should(o.Succeed())

			g.By("waiting for the deployment to complete")
			err := exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), deploymentConfigName, oc)
			o.Expect(err).NotTo(o.HaveOccurred())

			podNames := waitForNumberOfPodsWithLabel(oc, expectedReplicasAfterDeployment, "mongodb-replica")
			mongo := db.NewMongoDB(podNames[0])

			g.By(fmt.Sprintf("expecting that replica set have %d members", expectedReplicasAfterDeployment))
			assertMembersInReplica(oc, mongo, expectedReplicasAfterDeployment)

			g.By("expecting that we can insert a new record on primary node")
			replicaSet := mongo.(exutil.ReplicaSet)
			_, err = replicaSet.QueryPrimary(oc, insertCmd)
			o.Expect(err).ShouldNot(o.HaveOccurred())

			g.By("expecting that we can read a record from all members")
			for _, podName := range podNames {
				tryToReadFromPod(oc, podName, expectedValue)
			}

			g.By(fmt.Sprintf("scaling deployment config %s to %d replicas", deploymentConfigName, expectedReplicasAfterScalingUp))

			err = oc.Run("scale").Args("dc", deploymentConfigName, "--replicas="+fmt.Sprint(expectedReplicasAfterScalingUp), "--timeout=30s").Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			podNames = waitForNumberOfPodsWithLabel(oc, expectedReplicasAfterScalingUp, "mongodb-replica")
			mongo = db.NewMongoDB(podNames[0])

			g.By("expecting that scaling replica set up should have more members")
			assertMembersInReplica(oc, mongo, expectedReplicasAfterScalingUp)
		})
예제 #25
0
		err = oc.Run("delete").Args("pod", "-l", "deployment=mysql-slave-1").Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), slaves[0].PodName(), 1*time.Minute)
		o.Expect(err).NotTo(o.HaveOccurred())
		assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)

		pods, err := oc.KubeREST().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: exutil.ParseLabelsOrDie("deployment=mysql-slave-1")})
		o.Expect(err).NotTo(o.HaveOccurred())
		o.Expect(len(pods.Items)).To(o.Equal(1))

		g.By("after slave is scaled to 0 and then back to 4 replicas")
		err = oc.Run("scale").Args("dc", "mysql-slave", "--replicas=0").Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute)
		o.Expect(err).NotTo(o.HaveOccurred())
		err = oc.Run("scale").Args("dc", "mysql-slave", "--replicas=4").Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 4)
	}
}

var _ = g.Describe("images: mysql: replication", func() {
	defer g.GinkgoRecover()

	ocs := make([]*exutil.CLI, len(templatePaths))
	for i, template := range templatePaths {
		ocs[i] = exutil.NewCLI(fmt.Sprintf("mysql-replication-%d", i), exutil.KubeConfigPath())
		g.It(fmt.Sprintf("MySQL replication template %s", template), replicationTestFactory(ocs[i], template))
	}
})
예제 #26
0
		g.It("jenkins-plugin test case execution", func() {

			g.By("create jenkins job config xml file, convert to bytes for http post")
			data := jenkinsJobBytes("testjob-plugin.xml", oc.Namespace())

			g.By("make http request to create job")
			immediateInteractionWithJenkins(fmt.Sprintf("http://%s/createItem?name=test-plugin-job", hostPort), "POST", bytes.NewBuffer(data), 200)

			g.By("make http request to kick off build")
			immediateInteractionWithJenkins(fmt.Sprintf("http://%s/job/test-plugin-job/build?delay=0sec", hostPort), "POST", nil, 201)

			// the build and deployment is by far the most time consuming portion of the test jenkins job;
			// we leverage some of the openshift utilities for waiting for the deployment before we poll
			// jenkins for the sucessful job completion
			g.By("waiting for frontend, frontend-prod deployments as signs that the build has finished")
			err := exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "frontend")
			if err != nil {
				exutil.DumpDeploymentLogs("frontend", oc)
			}
			o.Expect(err).NotTo(o.HaveOccurred())
			err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "frontend-prod")
			if err != nil {
				exutil.DumpDeploymentLogs("frontend-prod", oc)
			}
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("get build console logs and see if succeeded")
			err = waitForJenkinsActivity(fmt.Sprintf("http://%s/job/test-plugin-job/1/console", hostPort), "Finished: SUCCESS", 200)
			o.Expect(err).NotTo(o.HaveOccurred())

		})
예제 #27
0
	defer g.GinkgoRecover()
	var (
		dockerImageFixture = exutil.FixturePath("fixtures", "test-docker-no-outputname.json")
		s2iImageFixture    = exutil.FixturePath("fixtures", "test-s2i-no-outputname.json")
		oc                 = exutil.NewCLI("build-no-outputname", exutil.KubeConfigPath())
	)

	g.Describe("building from templates", func() {
		oc.SetOutputDir(exutil.TestContext.OutputDir)

		g.It(fmt.Sprintf("should create an image from %q docker template without an output image reference defined", dockerImageFixture), func() {
			err := oc.Run("create").Args("-f", dockerImageFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("expecting build to pass without an output image reference specified")
			out, err := oc.Run("start-build").Args("test-docker", "--follow", "--wait").Output()
			if err != nil {
				fmt.Fprintln(g.GinkgoWriter, out)
			}
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(out).Should(o.ContainSubstring(`Build does not have an Output defined, no output image was pushed to a registry.`))
		})

		g.It(fmt.Sprintf("should create an image from %q S2i template without an output image reference defined", s2iImageFixture), func() {
			err := oc.Run("create").Args("-f", s2iImageFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("expecting build to pass without an output image reference specified")
			out, err := oc.Run("start-build").Args("test-sti", "--follow", "--wait").Output()
			if err != nil {
				fmt.Fprintln(g.GinkgoWriter, out)
			}
예제 #28
0
		g.It(fmt.Sprintf("should work with hot deploy"), func() {
			oc.SetOutputDir(exutil.TestContext.OutputDir)

			exutil.CheckOpenShiftNamespaceImageStreams(oc)
			g.By(fmt.Sprintf("calling oc new-app -f %q", dancerTemplate))
			err := oc.Run("new-app").Args("-f", dancerTemplate).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("waiting for build to finish")
			err = exutil.WaitForABuild(oc.Client().Builds(oc.Namespace()), "dancer-mysql-example-1", exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
			if err != nil {
				exutil.DumpBuildLogs("dancer-mysql-example", oc)
			}
			o.Expect(err).NotTo(o.HaveOccurred())

			// oc.KubeFramework().WaitForAnEndpoint currently will wait forever;  for now, prefacing with our WaitForADeploymentToComplete,
			// which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment
			err = exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), "dancer-mysql-example", oc)
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("waiting for endpoint")
			err = oc.KubeFramework().WaitForAnEndpoint("dancer-mysql-example")
			o.Expect(err).NotTo(o.HaveOccurred())

			assertPageCountIs := func(i int) {
				_, err := exutil.WaitForPods(oc.KubeClient().Core().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
				o.Expect(err).NotTo(o.HaveOccurred())

				result, err := CheckPageContains(oc, "dancer-mysql-example", "", pageCountFn(i))
				o.Expect(err).NotTo(o.HaveOccurred())
				o.Expect(result).To(o.BeTrue())
			}

			g.By("checking page count")
			assertPageCountIs(1)
			assertPageCountIs(2)

			g.By("modifying the source code with disabled hot deploy")
			RunInPodContainer(oc, dcLabel, modifyCommand)
			assertPageCountIs(3)

			pods, err := oc.KubeClient().Core().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: dcLabel})
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(len(pods.Items)).To(o.Equal(1))

			g.By("turning on hot-deploy")
			err = oc.Run("env").Args("rc", dcName, "PERL_APACHE2_RELOAD=true").Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
			err = oc.Run("scale").Args("rc", dcName, "--replicas=0").Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
			err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute)
			o.Expect(err).NotTo(o.HaveOccurred())
			err = oc.Run("scale").Args("rc", dcName, "--replicas=1").Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("modifying the source code with enabled hot deploy")
			RunInPodContainer(oc, dcLabel, modifyCommand)
			assertPageCountIs(1337)
		})
예제 #29
0
		g.It(fmt.Sprintf("should process and create the %q template", templatePath), func() {

			g.By("creating a new app")
			o.Expect(oc.Run("new-app").Args("-f", templatePath).Execute()).Should(o.Succeed())

			g.By("waiting for the deployment to complete")
			err := exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "mongodb")
			if err != nil {
				exutil.DumpDeploymentLogs("mongodb", oc)
			}
			o.Expect(err).ShouldNot(o.HaveOccurred())

			g.By("expecting the mongodb pod is running")
			podNames, err := exutil.WaitForPods(
				oc.KubeREST().Pods(oc.Namespace()),
				exutil.ParseLabelsOrDie("name=mongodb"),
				exutil.CheckPodIsRunningFn,
				1,
				1*time.Minute,
			)
			o.Expect(err).ShouldNot(o.HaveOccurred())
			o.Expect(podNames).Should(o.HaveLen(1))

			g.By("expecting the mongodb service is answering for ping")
			mongo := db.NewMongoDB(podNames[0])
			ok, err := mongo.IsReady(oc)
			o.Expect(err).ShouldNot(o.HaveOccurred())
			o.Expect(ok).Should(o.BeTrue())

			g.By("expecting that we can insert a new record")
			result, err := mongo.Query(oc, `db.foo.save({ "status": "passed" })`)
			o.Expect(err).ShouldNot(o.HaveOccurred())
			o.Expect(result).Should(o.ContainSubstring(`WriteResult({ "nInserted" : 1 })`))

			g.By("expecting that we can read a record")
			findCmd := "printjson(db.foo.find({}, {_id: 0}).toArray())" // don't include _id field to output because it changes every time
			result, err = mongo.Query(oc, findCmd)
			o.Expect(err).ShouldNot(o.HaveOccurred())
			o.Expect(result).Should(o.ContainSubstring(`{ "status" : "passed" }`))
		})
예제 #30
0
		g.It("should create a docker build that pulls using a secret run it", func() {
			oc.SetOutputDir(exutil.TestContext.OutputDir)

			g.By(fmt.Sprintf("calling oc create -f %q", buildFixture))
			err := oc.Run("create").Args("-f", buildFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("starting a test build")
			out, err := oc.Run("start-build").Args("docker-build").Output()
			fmt.Fprintf(g.GinkgoWriter, "\nstart-build output:\n%s\n", out)
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("expecting the build succeeds")
			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), "docker-build-1", exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
			if err != nil {
				exutil.DumpBuildLogs("docker-build", oc)
			}
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("starting a second build that pulls the image from the first build")
			out, err = oc.Run("start-build").Args("docker-build-pull").Output()
			fmt.Fprintf(g.GinkgoWriter, "\nstart-build output:\n%s\n", out)
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("expecting the build succeeds")
			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), "docker-build-pull-1", exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
			if err != nil {
				exutil.DumpBuildLogs("docker-build-pull", oc)
			}
			o.Expect(err).NotTo(o.HaveOccurred())
		})