// NewSampleRepoTest creates a function for a new ginkgo test case that will instantiate a template // from a url, kick off the buildconfig defined in that template, wait for the build/deploy, // and then confirm the application is serving an expected string value. func NewSampleRepoTest(c SampleRepoConfig) func() { return func() { defer g.GinkgoRecover() var oc = exutil.NewCLI(c.repoName+"-repo-test", exutil.KubeConfigPath()) g.JustBeforeEach(func() { g.By("Waiting for builder service account") err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace())) o.Expect(err).NotTo(o.HaveOccurred()) }) g.Describe("Building "+c.repoName+" app from new-app", func() { g.It(fmt.Sprintf("should build a "+c.repoName+" image and run it in a pod"), func() { oc.SetOutputDir(exutil.TestContext.OutputDir) exutil.CheckOpenShiftNamespaceImageStreams(oc) g.By(fmt.Sprintf("calling oc new-app with the " + c.repoName + " example template")) err := oc.Run("new-app").Args("-f", c.templateURL).Execute() o.Expect(err).NotTo(o.HaveOccurred()) // all the templates automatically start a build. buildName := c.buildConfigName + "-1" g.By("expecting the build is in the Complete phase") err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), buildName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn) if err != nil { exutil.DumpBuildLogs(c.buildConfigName, oc) } o.Expect(err).NotTo(o.HaveOccurred()) g.By("expecting the app deployment to be complete") err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), c.deploymentConfigName, oc) o.Expect(err).NotTo(o.HaveOccurred()) if len(c.dbDeploymentConfigName) > 0 { g.By("expecting the db deployment to be complete") err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), c.dbDeploymentConfigName, oc) o.Expect(err).NotTo(o.HaveOccurred()) } g.By("expecting the service is available") serviceIP, err := oc.Run("get").Args("service", c.serviceName).Template("{{ .spec.clusterIP }}").Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(serviceIP).ShouldNot(o.Equal("")) g.By("expecting an endpoint is available") err = oc.KubeFramework().WaitForAnEndpoint(c.serviceName) o.Expect(err).NotTo(o.HaveOccurred()) g.By("verifying string from app request") response, err := exutil.FetchURL("http://"+serviceIP+":8080"+c.appPath, time.Duration(30*time.Second)) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(response).Should(o.ContainSubstring(c.expectedString)) }) }) } }
) var _ = g.Describe("[images][mongodb] openshift mongodb image", func() { defer g.GinkgoRecover() templatePath := exutil.FixturePath("..", "..", "examples", "db-templates", "mongodb-ephemeral-template.json") oc := exutil.NewCLI("mongodb-create", exutil.KubeConfigPath()).Verbose() g.Describe("creating from a template", func() { g.It(fmt.Sprintf("should process and create the %q template", templatePath), func() { g.By("creating a new app") o.Expect(oc.Run("new-app").Args("-f", templatePath).Execute()).Should(o.Succeed()) g.By("waiting for the deployment to complete") err := exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "mongodb") if err != nil { exutil.DumpDeploymentLogs("mongodb", oc) } o.Expect(err).ShouldNot(o.HaveOccurred()) g.By("expecting the mongodb pod is running") podNames, err := exutil.WaitForPods( oc.KubeREST().Pods(oc.Namespace()), exutil.ParseLabelsOrDie("name=mongodb"), exutil.CheckPodIsRunningFn, 1, 1*time.Minute, ) o.Expect(err).ShouldNot(o.HaveOccurred()) o.Expect(podNames).Should(o.HaveLen(1))
exutil.CheckOpenShiftNamespaceImageStreams(oc) g.By(fmt.Sprintf("calling oc new-app -f %q", dancerTemplate)) err := oc.Run("new-app").Args("-f", dancerTemplate).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for build to finish") err = exutil.WaitForABuild(oc.Client().Builds(oc.Namespace()), "dancer-mysql-example-1", exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn) if err != nil { exutil.DumpBuildLogs("dancer-mysql-example", oc) } o.Expect(err).NotTo(o.HaveOccurred()) // oc.KubeFramework().WaitForAnEndpoint currently will wait forever; for now, prefacing with our WaitForADeploymentToComplete, // which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment err = exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), "dancer-mysql-example", oc) o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for endpoint") err = oc.KubeFramework().WaitForAnEndpoint("dancer-mysql-example") o.Expect(err).NotTo(o.HaveOccurred()) assertPageCountIs := func(i int) { _, err := exutil.WaitForPods(oc.KubeClient().Core().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunningFn, 1, 2*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) result, err := CheckPageContains(oc, "dancer-mysql-example", "", pageCountFn(i)) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(result).To(o.BeTrue()) }
if len(hexIDs) > 0 && err == nil { // found an openshift pipeline plugin test image, must be testing a proposed change to the plugin jenkinsEphemeralPath = exutil.FixturePath("fixtures", "jenkins-ephemeral-template-test-new-plugin.json") testingSnapshot = true } else { // no test image, testing the base jenkins image with the current, supported version of the plugin jenkinsEphemeralPath = exutil.FixturePath("..", "..", "examples", "jenkins", "jenkins-ephemeral-template.json") } err = oc.Run("new-app").Args(jenkinsEphemeralPath).Execute() o.Expect(err).NotTo(o.HaveOccurred()) jenkinsApplicationPath := exutil.FixturePath("..", "..", "examples", "jenkins", "application-template.json") err = oc.Run("new-app").Args(jenkinsApplicationPath).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for jenkins deployment") err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "jenkins") if err != nil { exutil.DumpDeploymentLogs("jenkins", oc) } o.Expect(err).NotTo(o.HaveOccurred()) g.By("get ip and port for jenkins service") serviceIP, err := oc.Run("get").Args("svc", "jenkins", "--config", exutil.KubeConfigPath()).Template("{{.spec.clusterIP}}").Output() o.Expect(err).NotTo(o.HaveOccurred()) port, err := oc.Run("get").Args("svc", "jenkins", "--config", exutil.KubeConfigPath()).Template("{{ $x := index .spec.ports 0}}{{$x.port}}").Output() o.Expect(err).NotTo(o.HaveOccurred()) hostPort = fmt.Sprintf("%s:%s", serviceIP, port) g.By("wait for jenkins to come up") err = waitForJenkinsActivity(fmt.Sprintf("http://%s", hostPort), "", 200) o.Expect(err).NotTo(o.HaveOccurred())
const ( expectedReplicasAfterDeployment = 3 expectedReplicasAfterScalingUp = expectedReplicasAfterDeployment + 2 ) oc := exutil.NewCLI("mongodb-replica", exutil.KubeConfigPath()).Verbose() g.Describe("creating from a template", func() { g.It(fmt.Sprintf("should process and create the %q template", templatePath), func() { exutil.CheckOpenShiftNamespaceImageStreams(oc) g.By("creating a new app") o.Expect(oc.Run("new-app").Args("-f", templatePath).Execute()).Should(o.Succeed()) g.By("waiting for the deployment to complete") err := exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), deploymentConfigName, oc) o.Expect(err).NotTo(o.HaveOccurred()) podNames := waitForNumberOfPodsWithLabel(oc, expectedReplicasAfterDeployment, "mongodb-replica") mongo := db.NewMongoDB(podNames[0]) g.By(fmt.Sprintf("expecting that replica set have %d members", expectedReplicasAfterDeployment)) assertMembersInReplica(oc, mongo, expectedReplicasAfterDeployment) g.By("expecting that we can insert a new record on primary node") replicaSet := mongo.(exutil.ReplicaSet) _, err = replicaSet.QueryPrimary(oc, insertCmd) o.Expect(err).ShouldNot(o.HaveOccurred()) g.By("expecting that we can read a record from all members") for _, podName := range podNames {
const ( expectedReplicasAfterDeployment = 3 expectedReplicasAfterScalingUp = expectedReplicasAfterDeployment + 2 ) oc := exutil.NewCLI("mongodb-replica", exutil.KubeConfigPath()).Verbose() g.Describe("creating from a template", func() { g.It(fmt.Sprintf("should process and create the %q template", templatePath), func() { g.By("creating a new app") o.Expect(oc.Run("new-app").Args("-f", templatePath).Execute()).Should(o.Succeed()) g.By("waiting for the deployment to complete") o.Expect(exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), deploymentConfigName)).Should(o.Succeed()) podNames := waitForNumberOfPodsWithLabel(oc, expectedReplicasAfterDeployment, "mongodb-replica") mongo := db.NewMongoDB(podNames[0]) g.By(fmt.Sprintf("expecting that replica set have %d members", expectedReplicasAfterDeployment)) assertMembersInReplica(oc, mongo, expectedReplicasAfterDeployment) g.By("expecting that we can insert a new record on primary node") replicaSet := mongo.(exutil.ReplicaSet) _, err := replicaSet.QueryPrimary(oc, insertCmd) o.Expect(err).ShouldNot(o.HaveOccurred()) g.By("expecting that we can read a record from all members") for _, podName := range podNames { tryToReadFromPod(oc, podName, expectedValue)
err := exutil.WaitForOpenShiftNamespaceImageStreams(oc) o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("calling oc new-app %s", djangoRepository)) err = oc.Run("new-app").Args(djangoRepository, "--strategy=source").Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for build to finish") err = exutil.WaitForABuild(oc.Client().Builds(oc.Namespace()), "django-ex-1", exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn) if err != nil { exutil.DumpBuildLogs("django-ex", oc) } o.Expect(err).NotTo(o.HaveOccurred()) // oc.KubeFramework().WaitForAnEndpoint currently will wait forever; for now, prefacing with our WaitForADeploymentToComplete, // which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment err = exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), "django-ex", oc) o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for endpoint") err = oc.KubeFramework().WaitForAnEndpoint("django-ex") o.Expect(err).NotTo(o.HaveOccurred()) assertPageCountIs := func(i int) { _, err := exutil.WaitForPods(oc.KubeClient().Core().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunningFn, 1, 2*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) result, err := CheckPageContains(oc, "django-ex", "", pageCountFn(i)) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(result).To(o.BeTrue()) }
oc.SetOutputDir(exutil.TestContext.OutputDir) g.By(fmt.Sprintf("calling oc new-app -f %q", railsTemplate)) err := oc.Run("new-app").Args("-f", railsTemplate).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for build to finish") err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), dcName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn) if err != nil { exutil.DumpBuildLogs("rails-postgresql-example", oc) } o.Expect(err).NotTo(o.HaveOccurred()) // oc.KubeFramework().WaitForAnEndpoint currently will wait forever; for now, prefacing with our WaitForADeploymentToComplete, // which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "rails-postgresql-example") if err != nil { exutil.DumpDeploymentLogs("rails-postgresql-example", oc) } o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for endpoint") err = oc.KubeFramework().WaitForAnEndpoint("rails-postgresql-example") o.Expect(err).NotTo(o.HaveOccurred()) assertPageContent := func(content string) { _, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunningFn, 1, 2*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) result, err := CheckPageContains(oc, "rails-postgresql-example", "", content) o.Expect(err).NotTo(o.HaveOccurred())
g.JustBeforeEach(func() { g.By("waiting for builder service account") err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace())) o.Expect(err).NotTo(o.HaveOccurred()) }) g.Context("Manual deploy the jenkins and trigger a jenkins pipeline build", func() { g.It("JenkinsPipeline build should succeed when manual deploy the jenkins service", func() { oc.SetOutputDir(exutil.TestContext.OutputDir) g.By(fmt.Sprintf("calling oc new-app -f %q", jenkinsTemplatePath)) err := oc.Run("new-app").Args("-f", jenkinsTemplatePath).Execute() o.Expect(err).NotTo(o.HaveOccurred()) //wait for the jenkins deployment complete g.By("waiting the jenkins service deployed") err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "jenkins", oc) if err != nil { exutil.DumpDeploymentLogs("jenkins", oc) } o.Expect(err).NotTo(o.HaveOccurred()) // create the pipeline build example g.By(fmt.Sprintf("calling oc new-app -f %q", pipelineTemplatePath)) err = oc.Run("new-app").Args("-f", pipelineTemplatePath).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("starting a pipeline build") br, _ := exutil.StartBuildAndWait(oc, "sample-pipeline") if !br.BuildSuccess { exutil.DumpDeploymentLogs("jenkins", oc) } br.AssertSuccess()
g.By("obtaining the configured API server host from config") adminClientConfig, err := testutil.GetClusterAdminClientConfig(exutil.KubeConfigPath()) o.Expect(err).NotTo(o.HaveOccurred()) hostURL, err := url.Parse(adminClientConfig.Host) o.Expect(err).NotTo(o.HaveOccurred()) host, err := hostname(hostURL.Host) o.Expect(err).NotTo(o.HaveOccurred()) routeSuffix := fmt.Sprintf("%s.%s", host, hostNameSuffix) g.By(fmt.Sprintf("calling oc new-app -f %q -p ROUTE_SUFFIX=%s", gitServerYaml, routeSuffix)) err = oc.Run("new-app").Args("-f", gitServerYaml, "-p", fmt.Sprintf("ROUTE_SUFFIX=%s", routeSuffix)).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("expecting the deployment of the gitserver to be in the Complete phase") err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), gitServerDeploymentConfigName) o.Expect(err).NotTo(o.HaveOccurred()) sourceSecretName := secretFunc() sourceURL := fmt.Sprintf(urlTemplate, routeSuffix) g.By(fmt.Sprintf("creating a new BuildConfig by calling oc new-app -f %q -p SOURCE_SECRET=%s,SOURCE_URL=%s", testBuildFixture, sourceSecretName, sourceURL)) err = oc.Run("new-app").Args("-f", testBuildFixture, "-p", fmt.Sprintf("SOURCE_SECRET=%s,SOURCE_URL=%s", sourceSecretName, sourceURL)).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("starting a test build") buildName, err := oc.Run("start-build").Args(buildConfigName).Output() o.Expect(err).NotTo(o.HaveOccurred())
func replicationTestFactory(oc *exutil.CLI, tc testCase) func() { return func() { oc.SetOutputDir(exutil.TestContext.OutputDir) defer cleanup(oc) _, err := exutil.SetupHostPathVolumes(oc.AdminKubeClient().Core().PersistentVolumes(), oc.Namespace(), "1Gi", 2) o.Expect(err).NotTo(o.HaveOccurred()) err = testutil.WaitForPolicyUpdate(oc.Client(), oc.Namespace(), "create", templateapi.Resource("templates"), true) o.Expect(err).NotTo(o.HaveOccurred()) exutil.CheckOpenShiftNamespaceImageStreams(oc) err = oc.Run("new-app").Args("-f", tc.TemplatePath).Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = oc.Run("new-app").Args("-f", helperTemplate, "-p", fmt.Sprintf("DATABASE_SERVICE_NAME=%s", helperName)).Execute() o.Expect(err).NotTo(o.HaveOccurred()) // oc.KubeFramework().WaitForAnEndpoint currently will wait forever; for now, prefacing with our WaitForADeploymentToComplete, // which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment g.By("waiting for the deployment to complete") err = exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), helperName, oc) o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for an endpoint") err = oc.KubeFramework().WaitForAnEndpoint(helperName) o.Expect(err).NotTo(o.HaveOccurred()) tableCounter := 0 assertReplicationIsWorking := func(masterDeployment, slaveDeployment string, slaveCount int) (exutil.Database, []exutil.Database, exutil.Database) { tableCounter++ table := fmt.Sprintf("table_%0.2d", tableCounter) g.By("creating replication helpers") master, slaves, helper := CreateMySQLReplicationHelpers(oc.KubeClient().Core().Pods(oc.Namespace()), masterDeployment, slaveDeployment, fmt.Sprintf("%s-1", helperName), slaveCount) o.Expect(exutil.WaitUntilAllHelpersAreUp(oc, []exutil.Database{master, helper})).NotTo(o.HaveOccurred()) o.Expect(exutil.WaitUntilAllHelpersAreUp(oc, slaves)).NotTo(o.HaveOccurred()) // Test if we can query as root g.By("wait for mysql-master endpoint") oc.KubeFramework().WaitForAnEndpoint("mysql-master") err := helper.TestRemoteLogin(oc, "mysql-master") o.Expect(err).NotTo(o.HaveOccurred()) // Create a new table with random name g.By("create new table") _, err = master.Query(oc, fmt.Sprintf("CREATE TABLE %s (col1 VARCHAR(20), col2 VARCHAR(20));", table)) o.Expect(err).NotTo(o.HaveOccurred()) // Write new data to the table through master _, err = master.Query(oc, fmt.Sprintf("INSERT INTO %s (col1, col2) VALUES ('val1', 'val2');", table)) o.Expect(err).NotTo(o.HaveOccurred()) // Make sure data is present on master err = exutil.WaitForQueryOutputContains(oc, master, 10*time.Second, false, fmt.Sprintf("SELECT * FROM %s\\G;", table), "col1: val1\ncol2: val2") o.Expect(err).NotTo(o.HaveOccurred()) // Make sure data was replicated to all slaves for _, slave := range slaves { err = exutil.WaitForQueryOutputContains(oc, slave, 90*time.Second, false, fmt.Sprintf("SELECT * FROM %s\\G;", table), "col1: val1\ncol2: val2") o.Expect(err).NotTo(o.HaveOccurred()) } return master, slaves, helper } g.By("after initial deployment") master, _, _ := assertReplicationIsWorking("mysql-master-1", "mysql-slave-1", 1) if tc.SkipReplication { return } g.By("after master is restarted by changing the Deployment Config") err = oc.Run("env").Args("dc", "mysql-master", "MYSQL_ROOT_PASSWORD=newpass").Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), master.PodName(), 1*time.Minute) master, _, _ = assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1) g.By("after master is restarted by deleting the pod") err = oc.Run("delete").Args("pod", "-l", "deployment=mysql-master-2").Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), master.PodName(), 1*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) _, slaves, _ := assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1) g.By("after slave is restarted by deleting the pod") err = oc.Run("delete").Args("pod", "-l", "deployment=mysql-slave-1").Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), slaves[0].PodName(), 1*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1) pods, err := oc.KubeClient().Core().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: exutil.ParseLabelsOrDie("deployment=mysql-slave-1")}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(len(pods.Items)).To(o.Equal(1)) // NOTE: Commented out, current template does not support multiple replicas. /* g.By("after slave is scaled to 0 and then back to 4 replicas") err = oc.Run("scale").Args("dc", "mysql-slave", "--replicas=0").Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) err = oc.Run("scale").Args("dc", "mysql-slave", "--replicas=4").Execute() o.Expect(err).NotTo(o.HaveOccurred()) assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 4) */ } }
exutil.CheckOpenShiftNamespaceImageStreams(oc) g.By(fmt.Sprintf("calling oc new-app -f %q -p %q", cakephpTemplate, hotDeployParam)) err := oc.Run("new-app").Args("-f", cakephpTemplate, "-p", hotDeployParam).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for build to finish") err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), dcName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn) if err != nil { exutil.DumpBuildLogs("cakephp-mysql-example", oc) } o.Expect(err).NotTo(o.HaveOccurred()) // oc.KubeFramework().WaitForAnEndpoint currently will wait forever; for now, prefacing with our WaitForADeploymentToComplete, // which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "cakephp-mysql-example", oc) o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for endpoint") err = oc.KubeFramework().WaitForAnEndpoint("cakephp-mysql-example") o.Expect(err).NotTo(o.HaveOccurred()) assertPageCountIs := func(i int) { _, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunningFn, 1, 2*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) result, err := CheckPageContains(oc, "cakephp-mysql-example", "", pageCountFn(i)) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(result).To(o.BeTrue()) }