Пример #1
0
// PanicsWithSubstring returns a gomega matcher, like Panics() but matches substring of err.Error().
func PanicsWithSubstring(substr string) types.GomegaMatcher {
	return &panicsWithSubstring{
		substr: substr,
		panics: panics{err: gomega.WithTransform(func(v interface{}) string {
			return fmt.Sprint(v)
		}, gomega.ContainSubstring(substr)),
		},
	}
}
Пример #2
0
// NewSampleRepoTest creates a function for a new ginkgo test case that will instantiate a template
// from a url, kick off the buildconfig defined in that template, wait for the build/deploy,
// and then confirm the application is serving an expected string value.
func NewSampleRepoTest(c SampleRepoConfig) func() {
	return func() {
		defer g.GinkgoRecover()
		var oc = exutil.NewCLI(c.repoName+"-repo-test", exutil.KubeConfigPath())

		g.JustBeforeEach(func() {
			g.By("Waiting for builder service account")
			err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
			o.Expect(err).NotTo(o.HaveOccurred())
		})

		g.Describe("Building "+c.repoName+" app from new-app", func() {
			g.It(fmt.Sprintf("should build a "+c.repoName+" image and run it in a pod"), func() {
				oc.SetOutputDir(exutil.TestContext.OutputDir)

				exutil.CheckOpenShiftNamespaceImageStreams(oc)
				g.By(fmt.Sprintf("calling oc new-app with the " + c.repoName + " example template"))
				err := oc.Run("new-app").Args("-f", c.templateURL).Execute()
				o.Expect(err).NotTo(o.HaveOccurred())

				// all the templates automatically start a build.
				buildName := c.buildConfigName + "-1"

				g.By("expecting the build is in the Complete phase")
				err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), buildName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
				if err != nil {
					exutil.DumpBuildLogs(c.buildConfigName, oc)
				}
				o.Expect(err).NotTo(o.HaveOccurred())

				g.By("expecting the app deployment to be complete")
				err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), c.deploymentConfigName, oc)
				o.Expect(err).NotTo(o.HaveOccurred())

				if len(c.dbDeploymentConfigName) > 0 {
					g.By("expecting the db deployment to be complete")
					err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), c.dbDeploymentConfigName, oc)
					o.Expect(err).NotTo(o.HaveOccurred())
				}

				g.By("expecting the service is available")
				serviceIP, err := oc.Run("get").Args("service", c.serviceName).Template("{{ .spec.clusterIP }}").Output()
				o.Expect(err).NotTo(o.HaveOccurred())
				o.Expect(serviceIP).ShouldNot(o.Equal(""))

				g.By("expecting an endpoint is available")
				err = oc.KubeFramework().WaitForAnEndpoint(c.serviceName)
				o.Expect(err).NotTo(o.HaveOccurred())

				g.By("verifying string from app request")
				response, err := exutil.FetchURL("http://"+serviceIP+":8080"+c.appPath, time.Duration(30*time.Second))
				o.Expect(err).NotTo(o.HaveOccurred())
				o.Expect(response).Should(o.ContainSubstring(c.expectedString))
			})
		})
	}
}
Пример #3
0
func tryToReadFromPod(oc *exutil.CLI, podName, expectedValue string) {
	// don't include _id field to output because it changes every time
	findCmd := "rs.slaveOk(); printjson(db.bar.find({}, {_id: 0}).toArray())"

	fmt.Fprintf(g.GinkgoWriter, "DEBUG: reading record from pod %v\n", podName)

	mongoPod := db.NewMongoDB(podName)
	result, err := mongoPod.Query(oc, findCmd)
	o.Expect(err).ShouldNot(o.HaveOccurred())
	o.Expect(result).Should(o.ContainSubstring(expectedValue))
}
Пример #4
0
func checkSingleIdle(oc *exutil.CLI, idlingFile string, resources map[string][]string, resourceName string, kind string) {
	g.By("Idling the service")
	_, err := oc.Run("idle").Args("--resource-names-file", idlingFile).Output()
	o.Expect(err).ToNot(o.HaveOccurred())

	g.By("Ensuring the scale is zero (and stays zero)")
	objName := resources[resourceName][0]
	// make sure we don't get woken up by an incorrect router health check or anything like that
	o.Consistently(func() (string, error) {
		return oc.Run("get").Args(resourceName+"/"+objName, "--output=jsonpath=\"{.spec.replicas}\"").Output()
	}, 20*time.Second, 500*time.Millisecond).Should(o.ContainSubstring("0"))

	g.By("Fetching the service and checking the annotations are present")
	serviceName := resources["service"][0]
	endpoints, err := oc.KubeREST().Endpoints(oc.Namespace()).Get(serviceName)
	o.Expect(err).NotTo(o.HaveOccurred())

	o.Expect(endpoints.Annotations).To(o.HaveKey(unidlingapi.IdledAtAnnotation))
	o.Expect(endpoints.Annotations).To(o.HaveKey(unidlingapi.UnidleTargetAnnotation))

	g.By("Checking the idled-at time")
	idledAtAnnotation := endpoints.Annotations[unidlingapi.IdledAtAnnotation]
	idledAtTime, err := time.Parse(time.RFC3339, idledAtAnnotation)
	o.Expect(err).ToNot(o.HaveOccurred())
	o.Expect(idledAtTime).To(o.BeTemporally("~", time.Now(), 5*time.Minute))

	g.By("Checking the idle targets")
	unidleTargetAnnotation := endpoints.Annotations[unidlingapi.UnidleTargetAnnotation]
	unidleTargets := []unidlingapi.RecordedScaleReference{}
	err = json.Unmarshal([]byte(unidleTargetAnnotation), &unidleTargets)
	o.Expect(err).ToNot(o.HaveOccurred())
	o.Expect(unidleTargets).To(o.Equal([]unidlingapi.RecordedScaleReference{
		{
			Replicas: 2,
			CrossGroupObjectReference: unidlingapi.CrossGroupObjectReference{
				Name: resources[resourceName][0],
				Kind: kind,
			},
		},
	}))
}
Пример #5
0
func testPruneImages(oc *exutil.CLI, schemaVersion int) {
	var mediaType string
	switch schemaVersion {
	case 1:
		mediaType = schema1.MediaTypeManifest
	case 2:
		mediaType = schema2.MediaTypeManifest
	default:
		g.Fail(fmt.Sprintf("unexpected schema version %d", schemaVersion))
	}

	oc.SetOutputDir(exutil.TestContext.OutputDir)
	outSink := g.GinkgoWriter

	cleanUp := cleanUpContainer{}
	defer tearDownPruneImagesTest(oc, &cleanUp)

	dClient, err := testutil.NewDockerClient()
	o.Expect(err).NotTo(o.HaveOccurred())

	g.By(fmt.Sprintf("build two images using Docker and push them as schema %d", schemaVersion))
	imgPruneName, err := BuildAndPushImageOfSizeWithDocker(oc, dClient, "prune", "latest", testImageSize, 2, outSink, true)
	o.Expect(err).NotTo(o.HaveOccurred())
	cleanUp.imageNames = append(cleanUp.imageNames, imgPruneName)
	pruneSize, err := getRegistryStorageSize(oc)
	o.Expect(err).NotTo(o.HaveOccurred())
	imgKeepName, err := BuildAndPushImageOfSizeWithDocker(oc, dClient, "prune", "latest", testImageSize, 2, outSink, true)
	o.Expect(err).NotTo(o.HaveOccurred())
	cleanUp.imageNames = append(cleanUp.imageNames, imgKeepName)
	keepSize, err := getRegistryStorageSize(oc)
	o.Expect(err).NotTo(o.HaveOccurred())
	o.Expect(pruneSize < keepSize).To(o.BeTrue())

	g.By(fmt.Sprintf("ensure uploaded image is of schema %d", schemaVersion))
	imgPrune, err := oc.AsAdmin().Client().Images().Get(imgPruneName)
	o.Expect(err).NotTo(o.HaveOccurred())
	o.Expect(imgPrune.DockerImageManifestMediaType).To(o.Equal(mediaType))
	imgKeep, err := oc.AsAdmin().Client().Images().Get(imgKeepName)
	o.Expect(err).NotTo(o.HaveOccurred())
	o.Expect(imgKeep.DockerImageManifestMediaType).To(o.Equal(mediaType))

	g.By("prune the first image uploaded (dry-run)")
	output, err := oc.WithoutNamespace().Run("adm").Args("prune", "images", "--keep-tag-revisions=1", "--keep-younger-than=0").Output()

	g.By("verify images, layers and configs about to be pruned")
	o.Expect(output).To(o.ContainSubstring(imgPruneName))
	if schemaVersion == 1 {
		o.Expect(output).NotTo(o.ContainSubstring(imgPrune.DockerImageMetadata.ID))
	} else {
		o.Expect(output).To(o.ContainSubstring(imgPrune.DockerImageMetadata.ID))
	}
	for _, layer := range imgPrune.DockerImageLayers {
		if !strings.Contains(output, layer.Name) {
			o.Expect(output).To(o.ContainSubstring(layer.Name))
		}
	}

	o.Expect(output).NotTo(o.ContainSubstring(imgKeepName))
	o.Expect(output).NotTo(o.ContainSubstring(imgKeep.DockerImageMetadata.ID))
	for _, layer := range imgKeep.DockerImageLayers {
		if !strings.Contains(output, layer.Name) {
			o.Expect(output).NotTo(o.ContainSubstring(layer.Name))
		}
	}

	noConfirmSize, err := getRegistryStorageSize(oc)
	o.Expect(err).NotTo(o.HaveOccurred())
	o.Expect(noConfirmSize).To(o.Equal(keepSize))

	g.By("prune the first image uploaded (confirm)")
	output, err = oc.WithoutNamespace().Run("adm").Args("prune", "images", "--keep-tag-revisions=1", "--keep-younger-than=0", "--confirm").Output()

	g.By("verify images, layers and configs about to be pruned")
	o.Expect(output).To(o.ContainSubstring(imgPruneName))
	if schemaVersion == 1 {
		o.Expect(output).NotTo(o.ContainSubstring(imgPrune.DockerImageMetadata.ID))
	} else {
		o.Expect(output).To(o.ContainSubstring(imgPrune.DockerImageMetadata.ID))
	}
	for _, layer := range imgPrune.DockerImageLayers {
		if !strings.Contains(output, layer.Name) {
			o.Expect(output).To(o.ContainSubstring(layer.Name))
		}
	}

	o.Expect(output).NotTo(o.ContainSubstring(imgKeepName))
	o.Expect(output).NotTo(o.ContainSubstring(imgKeep.DockerImageMetadata.ID))
	for _, layer := range imgKeep.DockerImageLayers {
		if !strings.Contains(output, layer.Name) {
			o.Expect(output).NotTo(o.ContainSubstring(layer.Name))
		}
	}

	confirmSize, err := getRegistryStorageSize(oc)
	o.Expect(err).NotTo(o.HaveOccurred())
	g.By(fmt.Sprintf("confirming storage size: sizeOfKeepImage=%d <= sizeAfterPrune=%d < beforePruneSize=%d", imgKeep.DockerImageMetadata.Size, confirmSize, keepSize))
	o.Expect(confirmSize >= imgKeep.DockerImageMetadata.Size).To(o.BeTrue())
	o.Expect(confirmSize < keepSize).To(o.BeTrue())
	g.By(fmt.Sprintf("confirming pruned size: sizeOfPruneImage=%d <= (sizeAfterPrune=%d - sizeBeforePrune=%d)", imgPrune, keepSize, confirmSize))
	o.Expect(imgPrune.DockerImageMetadata.Size <= keepSize-confirmSize).To(o.BeTrue())
}
Пример #6
0
			g.By(fmt.Sprintf("verifying the build %q status", out))
			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), "sample-build-1", exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
			if err != nil {
				exutil.DumpBuildLogs("sample-build", oc)
			}
			o.Expect(err).NotTo(o.HaveOccurred())
		})

		g.It("should start a build and wait for the build to fail", func() {
			g.By("starting the build with --wait flag but wrong --commit")
			out, err := oc.Run("start-build").
				Args("sample-build", "--wait", "--commit", "fffffff").
				Output()
			o.Expect(err).To(o.HaveOccurred())
			o.Expect(out).Should(o.ContainSubstring(`status is "Failed"`))
		})
	})

	g.Describe("override environment", func() {
		g.It("should accept environment variables", func() {
			g.By("starting the build with -e FOO=bar")
			out, err := oc.Run("start-build").Args("sample-build", "--follow", "--wait", "-e", "FOO=bar,VAR=test").Output()
			o.Expect(err).NotTo(o.HaveOccurred())
			g.By(fmt.Sprintf("verifying the build output contains the env var"))
			o.Expect(out).To(o.ContainSubstring("FOO=bar"))
			// This variable is not set and thus inherited from the original build
			// config
			o.Expect(out).To(o.ContainSubstring("BAR=test"))
			o.Expect(out).To(o.ContainSubstring("VAR=test"))
			g.By(fmt.Sprintf("verifying the build %q status", out))
Пример #7
0
			if err != nil {
				exutil.DumpBuildLogs("imagesourcebuild", oc)
			}
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("expecting the pod to deploy successfully")
			pods, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), imageSourceLabel, exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(len(pods)).To(o.Equal(1))
			pod, err := oc.KubeREST().Pods(oc.Namespace()).Get(pods[0])
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("expecting the pod to contain the file from the input image")
			out, err := oc.Run("exec").Args(pod.Name, "-c", pod.Spec.Containers[0].Name, "--", "ls", "injected/dir").Output()
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(out).To(o.ContainSubstring("jenkins.war"))
		})
	})
	g.Describe("build with image docker", func() {
		g.It("should complete successfully and contain the expected file", func() {
			g.By("Creating build configs for docker build")
			err := oc.Run("create").Args("-f", buildFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
			g.By("starting the docker strategy build")
			err = oc.Run("start-build").Args("imagedockerbuild").Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
			g.By("expect the builds to complete successfully")
			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), "imagedockerbuild-1", exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
			if err != nil {
				exutil.DumpBuildLogs("imagedockerbuild", oc)
			}
Пример #8
0
	g.Describe("copy by strategy", func() {

		testRsyncFn := func(strategy string) func() {
			return func() {
				g.By(fmt.Sprintf("Calling oc rsync %s %s:/tmp --strategy=%s", sourcePath1, podName, strategy))
				err := oc.Run("rsync").Args(
					sourcePath1,
					fmt.Sprintf("%s:/tmp", podName),
					fmt.Sprintf("--strategy=%s", strategy)).Execute()
				o.Expect(err).NotTo(o.HaveOccurred())

				g.By("Verifying that files are copied to the container")
				result, err := oc.Run("rsh").Args(podName, "ls", "/tmp/image-streams").Output()
				o.Expect(err).NotTo(o.HaveOccurred())
				o.Expect(result).To(o.ContainSubstring("image-streams-centos7.json"))

				g.By(fmt.Sprintf("Calling oc rsync %s/ %s:/tmp/image-streams --strategy=%s --delete", sourcePath2, podName, strategy))
				err = oc.Run("rsync").Args(
					sourcePath2+"/",
					fmt.Sprintf("%s:/tmp/image-streams", podName),
					fmt.Sprintf("--strategy=%s", strategy),
					"--delete").Execute()
				o.Expect(err).NotTo(o.HaveOccurred())

				g.By("Verifying that the expected files are in the container")
				result, err = oc.Run("rsh").Args(podName, "ls", "/tmp/image-streams").Output()
				o.Expect(err).NotTo(o.HaveOccurred())
				o.Expect(result).To(o.ContainSubstring("application-template-stibuild.json"))
				o.Expect(result).NotTo(o.ContainSubstring("image-streams-centos7.json"))
Пример #9
0
			_, err := oc.Run("create").Args("-f", simpleDeploymentFixture).Output()
			o.Expect(err).NotTo(o.HaveOccurred())

			iterations := 15
			for i := 0; i < iterations; i++ {
				if rand.Float32() < 0.2 {
					time.Sleep(time.Duration(rand.Float32() * rand.Float32() * float32(time.Second)))
				}
				switch n := rand.Float32(); {

				case n < 0.4:
					// trigger a new deployment
					e2e.Logf("%02d: triggering a new deployment with config change", i)
					out, err := oc.Run("set", "env").Args("dc/deployment-simple", fmt.Sprintf("A=%d", i)).Output()
					o.Expect(err).NotTo(o.HaveOccurred())
					o.Expect(out).To(o.ContainSubstring("updated"))

				case n < 0.7:
					// cancel any running deployment
					e2e.Logf("%02d: cancelling deployment", i)
					if out, err := oc.Run("deploy").Args("dc/deployment-simple", "--cancel").Output(); err != nil {
						// TODO: we should fix this
						if !strings.Contains(out, "the object has been modified") {
							o.Expect(err).NotTo(o.HaveOccurred())
						}
						e2e.Logf("--cancel deployment failed due to conflict: %v", err)
					}

				case n < 0.0:
					// delete the deployer pod - disabled because it forces the system to wait for the sync loop
					e2e.Logf("%02d: deleting one or more deployer pods", i)
Пример #10
0
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("waiting for the build to complete")
			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), buildName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
			if err != nil {
				exutil.DumpBuildLogs(buildConfigName, oc)
			}
			o.Expect(err).NotTo(o.HaveOccurred())

			buildLog, err := oc.Run("logs").Args("--follow", "build/"+buildName).Output()
			if err != nil {
				e2e.Failf("Failed to fetch build logs of %q: %v", buildLog, err)
			}

			g.By("expecting that .s2i/bin/assemble-runtime was executed")
			o.Expect(buildLog).To(o.ContainSubstring(`Using "assemble-runtime" installed from "<source-dir>/.s2i/bin/assemble-runtime"`))
			o.Expect(buildLog).To(o.ContainSubstring(".s2i/bin/assemble-runtime: assembling app within runtime image"))

			g.By("expecting that environment variable from BuildConfig is available")
			o.Expect(buildLog).To(o.ContainSubstring(".s2i/bin/assemble-runtime: USING_ENV_FROM_BUILD_CONFIG=yes"))

			g.By("expecting that environment variable from .s2i/environment is available")
			o.Expect(buildLog).To(o.ContainSubstring(".s2i/bin/assemble-runtime: USING_ENV_FROM_FILE=yes"))
		})
	})

	g.Describe("with scripts from URL", func() {
		oc.SetOutputDir(exutil.TestContext.OutputDir)

		g.It("should use assemble-runtime script from URL", func() {
			const buildConfigName = "java-extended-build-from-url"
Пример #11
0
		g.It("should create a docker build with a quota and run it", func() {
			oc.SetOutputDir(exutil.TestContext.OutputDir)

			g.By(fmt.Sprintf("calling oc create -f %q", buildFixture))
			err := oc.Run("create").Args("-f", buildFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("starting a test build")
			_, err = oc.Run("start-build").Args("docker-build-quota", "--from-dir", exutil.FixturePath("testdata", "build-quota")).Output()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("expecting the build is in Failed phase")
			// note that success and fail functions are intentionally reversed because we want to wait for failure.
			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), "docker-build-quota-1", exutil.CheckBuildFailedFn, exutil.CheckBuildSuccessFn)
			if err != nil {
				exutil.DumpBuildLogs("docker-build-quota", oc)
			}
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("expecting the build logs to contain the correct cgroups values")
			out, err := oc.Run("logs").Args(fmt.Sprintf("build/docker-build-quota-1")).Output()
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(out).To(o.ContainSubstring("MEMORY=209715200"))
			o.Expect(out).To(o.ContainSubstring("MEMORYSWAP=209715200"))
			o.Expect(out).To(o.ContainSubstring("SHARES=61"))
			o.Expect(out).To(o.ContainSubstring("PERIOD=100000"))
			o.Expect(out).To(o.ContainSubstring("QUOTA=6000"))
		})
	})
})
Пример #12
0
		}

		g.By(fmt.Sprintf("creating resource quota with a limit %s=%d", imageapi.ResourceImages, 0))
		_, err = oc.AdminKubeREST().ResourceQuotas(oc.Namespace()).Create(rq)
		o.Expect(err).NotTo(o.HaveOccurred())
		waitForLimitSync(oc, quotaName, rq.Spec.Hard)

		g.By("waiting for resource quota to get in sync")
		err = waitForLimitSync(oc, quotaName, rq.Spec.Hard)
		o.Expect(err).NotTo(o.HaveOccurred())

		g.By(fmt.Sprintf("trying to tag an image exceeding %s=%d quota", imageapi.ResourceImages, 0))
		out, err := oc.Run("tag").Args(sharedProjectName+"/src:1", "is:1").Output()
		o.Expect(err).To(o.HaveOccurred())
		o.Expect(out).Should(o.MatchRegexp("(?i)exceeded quota"))
		o.Expect(out).Should(o.ContainSubstring(string(imageapi.ResourceImages)))

		g.By(fmt.Sprintf("bump the %s quota to %d", imageapi.ResourceImages, 1))
		rq, err = oc.AdminKubeREST().ResourceQuotas(oc.Namespace()).Get(quotaName)
		o.Expect(err).NotTo(o.HaveOccurred())
		rq.Spec.Hard[imageapi.ResourceImages] = resource.MustParse("1")
		_, err = oc.AdminKubeREST().ResourceQuotas(oc.Namespace()).Update(rq)
		o.Expect(err).NotTo(o.HaveOccurred())
		err = waitForLimitSync(oc, quotaName, rq.Spec.Hard)
		o.Expect(err).NotTo(o.HaveOccurred())

		g.By(fmt.Sprintf("trying to tag an image below %s=%d quota", imageapi.ResourceImages, 1))
		out, err = oc.Run("tag").Args(sharedProjectName+"/src:1", "is:1").Output()
		o.Expect(err).NotTo(o.HaveOccurred())
		used, err := waitForResourceQuotaSync(oc, quotaName, rq.Spec.Hard)
		o.Expect(err).NotTo(o.HaveOccurred())
Пример #13
0
		err := exutil.WaitForBuilderAccount(oc.KubeClient().Core().ServiceAccounts(oc.Namespace()))
		o.Expect(err).NotTo(o.HaveOccurred())
		oc.Run("create").Args("-f", buildFixture).Execute()

		g.By("waiting for istag to initialize")
		exutil.WaitForAnImageStreamTag(oc, oc.Namespace(), "busybox", "1")
	})

	g.Describe("testing postCommit hook", func() {

		g.It("successful postCommit script with args", func() {
			err := oc.Run("patch").Args("bc/busybox", "-p", "{\"spec\":{\"postCommit\":{\"script\":\"echo hello $1\",\"args\":[\"world\"],\"command\":null}}}").Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
			br, _ := exutil.StartBuildAndWait(oc, "busybox")
			br.AssertSuccess()
			o.Expect(br.Logs()).To(o.ContainSubstring("hello world"))
		})

		g.It("successful postCommit explicit command", func() {
			err := oc.Run("patch").Args("bc/busybox", "-p", "{\"spec\":{\"postCommit\":{\"command\":[\"sh\",\"-c\"],\"args\":[\"echo explicit command\"],\"script\":\"\"}}}").Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
			br, _ := exutil.StartBuildAndWait(oc, "busybox")
			br.AssertSuccess()
			o.Expect(br.Logs()).To(o.ContainSubstring("explicit command"))
		})

		g.It("successful postCommit default entrypoint", func() {
			err := oc.Run("patch").Args("bc/busybox", "-p", "{\"spec\":{\"postCommit\":{\"args\":[\"echo\",\"default entrypoint\"],\"command\":null,\"script\":\"\"}}}").Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
			br, _ := exutil.StartBuildAndWait(oc, "busybox")
			br.AssertSuccess()
Пример #14
0
				It("User must be saved in the database", func() {
					user := gory.Build("userOk").(*models.User)
					userTest := models.User{
						IdUser: user.IdUser,
					}
					userTest, err := userTest.Get()

					Expect(err).To(gomega.BeNil())
					Expect(user.Name).To(gomega.Equal(userTest.Name))
				})

				It("Response need have hash token", func() {
					serve.Handler.ServeHTTP(recorder, request)
					Expect(recorder.Code).To(gomega.Equal(200))
					Expect(recorder.HeaderMap["Content-Type"][0]).
						To(gomega.ContainSubstring("application/json; charset=UTF-8"))

					data := myCloser{bytes.NewBufferString(recorder.Body.String())}
					token, err := DecodeToken(data)
					Expect(err).To(gomega.BeNil())
					Expect(token).ShouldNot(gomega.BeZero())
				})
			})
		})

		Describe("POST /auth/login", func() {

			Context("With invalid JSON", func() {

				BeforeEach(func() {
					user := models.User{}
Пример #15
0
			_, err := oc.Run("create").Args("-f", simpleDeploymentFixture).Output()
			o.Expect(err).NotTo(o.HaveOccurred())

			iterations := 15
			for i := 0; i < iterations; i++ {
				if rand.Float32() < 0.2 {
					time.Sleep(time.Duration(rand.Float32() * rand.Float32() * float32(time.Second)))
				}
				switch n := rand.Float32(); {

				case n < 0.4:
					// trigger a new deployment
					e2e.Logf("%02d: triggering a new deployment with config change", i)
					out, err := oc.Run("set", "env").Args("dc/deployment-simple", fmt.Sprintf("A=%d", i)).Output()
					o.Expect(err).NotTo(o.HaveOccurred())
					o.Expect(out).To(o.ContainSubstring("updated"))

				case n < 0.7:
					// cancel any running deployment
					e2e.Logf("%02d: cancelling deployment", i)
					if out, err := oc.Run("deploy").Args("dc/deployment-simple", "--cancel").Output(); err != nil {
						// TODO: we should fix this
						if !strings.Contains(out, "the object has been modified") {
							o.Expect(err).NotTo(o.HaveOccurred())
						}
						e2e.Logf("--cancel deployment failed due to conflict: %v", err)
					}

				case n < 0.0:
					// delete the deployer pod - disabled because it forces the system to wait for the sync loop
					e2e.Logf("%02d: deleting one or more deployer pods", i)
Пример #16
0
			_, err := oc.Run("create").Args("-f", simpleDeploymentFixture).Output()
			o.Expect(err).NotTo(o.HaveOccurred())

			iterations := 15
			for i := 0; i < iterations; i++ {
				if rand.Float32() < 0.2 {
					time.Sleep(time.Duration(rand.Float32() * rand.Float32() * float32(time.Second)))
				}
				switch n := rand.Float32(); {

				case n < 0.4:
					// trigger a new deployment
					e2e.Logf("%02d: triggering a new deployment with config change", i)
					out, err := oc.Run("set", "env").Args("dc/deployment-simple", fmt.Sprintf("A=%d", i)).Output()
					o.Expect(err).NotTo(o.HaveOccurred())
					o.Expect(out).To(o.ContainSubstring("updated"))

				case n < 0.7:
					// cancel any running deployment
					e2e.Logf("%02d: cancelling deployment", i)
					if out, err := oc.Run("rollout").Args("cancel", "dc/deployment-simple").Output(); err != nil {
						// TODO: we should fix this
						if !strings.Contains(out, "the object has been modified") {
							o.Expect(err).NotTo(o.HaveOccurred())
						}
						e2e.Logf("rollout cancel deployment failed due to conflict: %v", err)
					}

				case n < 0.0:
					// delete the deployer pod - disabled because it forces the system to wait for the sync loop
					e2e.Logf("%02d: deleting one or more deployer pods", i)
Пример #17
0
	g.JustBeforeEach(func() {
		g.By("waiting for builder service account")
		err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
		o.Expect(err).NotTo(o.HaveOccurred())
		oc.Run("create").Args("-f", buildFixture).Execute()
	})

	g.Describe("start build with broken proxy", func() {
		g.It("should start a build and wait for the build to to fail", func() {
			g.By("starting the build with --wait and --follow flags")
			out, err := oc.Run("start-build").Args("sample-build", "--follow", "--wait").Output()
			if err != nil {
				fmt.Fprintln(g.GinkgoWriter, out)
			}
			o.Expect(err).To(o.HaveOccurred())
			g.By("verifying the build sample-app-1 output")
			// The git ls-remote check should exit the build when the remote
			// repository is not accessible. It should never get to the clone.
			o.Expect(out).NotTo(o.ContainSubstring("clone"))
			o.Expect(out).To(o.ContainSubstring(`unable to access 'https://github.com/openshift/ruby-hello-world.git/': Failed connect to 127.0.0.1:3128`))
			g.By("verifying the build sample-build-1 status")
			build, err := oc.REST().Builds(oc.Namespace()).Get("sample-build-1")
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(build.Status.Phase).Should(o.BeEquivalentTo(buildapi.BuildPhaseFailed))
		})

	})

})
Пример #18
0
			g.By(fmt.Sprintf("verifying the build %q status", out))
			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), "sample-build-1", exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
			if err != nil {
				exutil.DumpBuildLogs("sample-build", oc)
			}
			o.Expect(err).NotTo(o.HaveOccurred())
		})

		g.It("should start a build and wait for the build to fail", func() {
			g.By("starting the build with --wait flag but wrong --commit")
			out, err := oc.Run("start-build").
				Args("sample-build", "--wait", "--commit", "fffffff").
				Output()
			o.Expect(err).To(o.HaveOccurred())
			o.Expect(out).Should(o.ContainSubstring(`status is "Failed"`))
		})
	})

	g.Describe("override environment", func() {
		g.It("should accept environment variables", func() {
			g.By("starting the build with -e FOO=bar")
			out, err := oc.Run("start-build").Args("sample-build", "--follow", "--wait", "-e", "FOO=bar,VAR=test").Output()
			o.Expect(err).NotTo(o.HaveOccurred())
			g.By(fmt.Sprintf("verifying the build output contains the env var"))
			o.Expect(out).To(o.ContainSubstring("FOO=bar"))
			// This variable is not set and thus inherited from the original build
			// config
			o.Expect(out).To(o.ContainSubstring("BAR=test"))
			o.Expect(out).To(o.ContainSubstring("VAR=test"))
			g.By(fmt.Sprintf("verifying the build %q status", out))
Пример #19
0
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By(fmt.Sprintf("verifying the build %q status", out))
			build, err := oc.REST().Builds(oc.Namespace()).Get(out)
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(build.Status.Phase).Should(o.BeEquivalentTo(buildapi.BuildPhaseComplete))

		})

		g.It("should start a build and wait for the build to fail", func() {
			g.By("starting the build with --wait flag but wrong --commit")
			out, err := oc.Run("start-build").
				Args("sample-build", "--wait", "--commit", "fffffff").
				Output()
			o.Expect(err).To(o.HaveOccurred())
			o.Expect(out).Should(o.ContainSubstring(`status is "Failed"`))
		})
	})

	g.Describe("cancelling build started by oc start-build --wait", func() {
		g.It("should start a build and wait for the build to cancel", func() {
			g.By("starting the build with --wait flag")
			var wg sync.WaitGroup
			wg.Add(1)
			go func() {
				defer g.GinkgoRecover()
				out, err := oc.Run("start-build").Args("sample-build", "--wait").Output()
				defer wg.Done()
				o.Expect(err).To(o.HaveOccurred())
				o.Expect(out).Should(o.ContainSubstring(`status is "Cancelled"`))
			}()
Пример #20
0
			_, err := oc.Run("create").Args("-f", simpleDeploymentFixture).Output()
			o.Expect(err).NotTo(o.HaveOccurred())

			iterations := 15
			for i := 0; i < iterations; i++ {
				if rand.Float32() < 0.2 {
					time.Sleep(time.Duration(rand.Float32() * rand.Float32() * float32(time.Second)))
				}
				switch n := rand.Float32(); {

				case n < 0.4:
					// trigger a new deployment
					e2e.Logf("%02d: triggering a new deployment with config change", i)
					out, err := oc.Run("set", "env").Args("dc/deployment-simple", fmt.Sprintf("A=%d", i)).Output()
					o.Expect(err).NotTo(o.HaveOccurred())
					o.Expect(out).To(o.ContainSubstring("updated"))

				case n < 0.7:
					// cancel any running deployment
					e2e.Logf("%02d: cancelling deployment", i)
					if out, err := oc.Run("deploy").Args("dc/deployment-simple", "--cancel").Output(); err != nil {
						// TODO: we should fix this
						if !strings.Contains(out, "the object has been modified") {
							o.Expect(err).NotTo(o.HaveOccurred())
						}
						e2e.Logf("--cancel deployment failed due to conflict: %v", err)
					}

				case n < 0.0:
					// delete the deployer pod - disabled because it forces the system to wait for the sync loop
					e2e.Logf("%02d: deleting one or more deployer pods", i)
	g.It(fmt.Sprintf("should deny a docker image reference exceeding limit on %s resource", imageapi.ResourceImageStreamTags), func() {
		oc.SetOutputDir(exutil.TestContext.OutputDir)
		defer tearDown(oc)

		tag2Image, err := buildAndPushTestImagesTo(oc, "src", "tag", 2)
		o.Expect(err).NotTo(o.HaveOccurred())

		limit := kapi.ResourceList{imageapi.ResourceImageStreamTags: resource.MustParse("0")}
		_, err = createLimitRangeOfType(oc, imageapi.LimitTypeImageStream, limit)
		o.Expect(err).NotTo(o.HaveOccurred())

		g.By(fmt.Sprintf("trying to tag a docker image exceeding limit %v", limit))
		out, err := oc.Run("import-image").Args("stream:dockerimage", "--confirm", "--insecure", "--from", tag2Image["tag1"].DockerImageReference).Output()
		o.Expect(err).To(o.HaveOccurred())
		o.Expect(out).Should(o.ContainSubstring("exceeds the maximum limit"))
		o.Expect(out).Should(o.ContainSubstring(string(imageapi.ResourceImageStreamTags)))

		limit, err = bumpLimit(oc, imageapi.ResourceImageStreamTags, "1")
		o.Expect(err).NotTo(o.HaveOccurred())

		g.By(fmt.Sprintf("trying to tag a docker image below limit %v", limit))
		err = oc.Run("import-image").Args("stream:dockerimage", "--confirm", "--insecure", "--from", tag2Image["tag1"].DockerImageReference).Execute()
		o.Expect(err).NotTo(o.HaveOccurred())
		err = exutil.WaitForAnImageStreamTag(oc, oc.Namespace(), "stream", "dockerimage")
		o.Expect(err).NotTo(o.HaveOccurred())

		g.By(fmt.Sprintf("trying to tag a docker image exceeding limit %v", limit))
		is, err := oc.Client().ImageStreams(oc.Namespace()).Get("stream")
		o.Expect(err).NotTo(o.HaveOccurred())
		is.Spec.Tags["foo"] = imageapi.TagReference{
Пример #22
0
			err := oc.Run("create").Args("-f", secretsFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())
			err = oc.Run("create").Args("-f", secondSecretsFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("creating the sample source build config and image stream")
			err = oc.Run("create").Args("-f", isFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			err = oc.Run("create").Args("-f", sourceBuildFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("starting the sample source build")
			out, err := oc.Run("start-build").Args("test", "--follow", "--wait").Output()
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(out).To(o.ContainSubstring("secret1=secret1"))
			o.Expect(out).To(o.ContainSubstring("secret3=secret3"))
			o.Expect(out).To(o.ContainSubstring("relative-secret1=secret1"))
			o.Expect(out).To(o.ContainSubstring("relative-secret3=secret3"))

			g.By("checking the status of the build")
			build, err := oc.REST().Builds(oc.Namespace()).Get("test-1")
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(build.Status.Phase).Should(o.BeEquivalentTo(buildapi.BuildPhaseComplete))

			g.By("getting the image name")
			image, err := exutil.GetDockerImageReference(oc.REST().ImageStreams(oc.Namespace()), "test", "latest")
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("verifying the build secrets are not present in the output image")
			pod := exutil.GetPodForContainer(kapi.Container{Name: "test", Image: image})
Пример #23
0
					})

					oc.KubeFramework().TestContainerOutput(getPodNameForTest(image, t), pod, 0, []string{t.Expected})

					g.By(fmt.Sprintf("creating a sample pod for %q", t.DockerImageReference))
					pod = exutil.GetPodForContainer(kapi.Container{
						Image:   t.DockerImageReference,
						Name:    "test",
						Command: []string{"/usr/bin/sleep", "infinity"},
					})
					_, err := oc.KubeClient().Core().Pods(oc.Namespace()).Create(pod)
					o.Expect(err).NotTo(o.HaveOccurred())

					err = oc.KubeFramework().WaitForPodRunning(pod.Name)
					o.Expect(err).NotTo(o.HaveOccurred())

					g.By("calling the binary using 'oc exec /bin/bash -c'")
					out, err := oc.Run("exec").Args("-p", pod.Name, "--", "/bin/bash", "-c", t.Cmd).Output()
					o.Expect(err).NotTo(o.HaveOccurred())
					o.Expect(out).Should(o.ContainSubstring(t.Expected))

					g.By("calling the binary using 'oc exec /bin/sh -ic'")
					out, err = oc.Run("exec").Args("-p", pod.Name, "--", "/bin/sh", "-ic", t.Cmd).Output()
					o.Expect(err).NotTo(o.HaveOccurred())
					o.Expect(out).Should(o.ContainSubstring(t.Expected))
				})
			})
		}
	}
})
Пример #24
0
var _ = g.Describe("deployments: parallel: test deployment", func() {
	defer g.GinkgoRecover()
	var (
		deploymentFixture = exutil.FixturePath("..", "extended", "fixtures", "test-deployment-test.yaml")
		oc                = exutil.NewCLI("cli-deployment", exutil.KubeConfigPath())
	)

	g.Describe("test deployment", func() {
		g.It("should run a deployment to completion and then scale to zero", func() {
			out, err := oc.Run("create").Args("-f", deploymentFixture).Output()
			o.Expect(err).NotTo(o.HaveOccurred())

			out, err = oc.Run("logs").Args("-f", "dc/deployment-test").Output()
			o.Expect(err).NotTo(o.HaveOccurred())
			g.By(fmt.Sprintf("checking the logs for substrings\n%s", out))
			o.Expect(out).To(o.ContainSubstring("deployment-test-1 to 2"))
			o.Expect(out).To(o.ContainSubstring("Pre hook finished"))
			o.Expect(out).To(o.ContainSubstring("Deployment deployment-test-1 successfully made active"))

			g.By("verifying the deployment is marked complete and scaled to zero")
			err = wait.Poll(100*time.Millisecond, 1*time.Minute, func() (bool, error) {
				rc, err := oc.KubeREST().ReplicationControllers(oc.Namespace()).Get("deployment-test-1")
				o.Expect(err).NotTo(o.HaveOccurred())
				status := rc.Annotations[deployapi.DeploymentStatusAnnotation]
				if deployapi.DeploymentStatus(status) != deployapi.DeploymentStatusComplete {
					return false, nil
				}
				if rc.Spec.Replicas != 0 {
					return false, nil
				}
				if rc.Status.Replicas != 0 {
Пример #25
0
	})

	g.Describe("oc start-build --wait", func() {
		g.It("should start a build and wait for the build to complete", func() {
			g.By("starting the build with --wait flag")
			br, err := exutil.StartBuildAndWait(oc, "sample-build", "--wait")
			o.Expect(err).NotTo(o.HaveOccurred())
			br.AssertSuccess()
		})

		g.It("should start a build and wait for the build to fail", func() {
			g.By("starting the build with --wait flag but wrong --commit")
			br, _ := exutil.StartBuildAndWait(oc, "sample-build", "--wait", "--commit=fffffff")
			br.AssertFailure()
			o.Expect(br.StartBuildErr).To(o.HaveOccurred()) // start-build should detect the build error with --wait flag
			o.Expect(br.StartBuildStdErr).Should(o.ContainSubstring(`status is "Failed"`))
		})
	})

	g.Describe("override environment", func() {
		g.It("should accept environment variables", func() {
			g.By("starting the build with -e FOO=bar,VAR=test")
			br, err := exutil.StartBuildAndWait(oc, "sample-build", "-e", "FOO=bar,VAR=test")
			br.AssertSuccess()
			buildLog, err := br.Logs()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By(fmt.Sprintf("verifying the build output contains the env vars"))
			o.Expect(buildLog).To(o.ContainSubstring("FOO=bar"))
			o.Expect(buildLog).To(o.ContainSubstring("VAR=test"))
Пример #26
0
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(serviceIP).NotTo(o.BeEmpty())
			port, err := oc.Run("get").Args("svc", "jenkins", "--config",
				exutil.KubeConfigPath()).Template("{{ $x := index .spec.ports 0}}{{$x.port}}").Output()
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(port).NotTo(o.BeEmpty())

			jenkinsUri := fmt.Sprintf("http://%s:%s", serviceIP, port)
			g.By(fmt.Sprintf("wait for jenkins to come up at %q", jenkinsUri))
			err = waitForJenkinsActivity(jenkinsUri, "", 200)
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("inspecting the Jenkins master logs the slave image should be registered")
			out, err := oc.Run("logs").Args("dc/jenkins").Output()
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(out).To(o.ContainSubstring("Adding image ruby-22-centos7-jenkins-slave:latest as Kubernetes slave"))

			g.By("kick the ruby-hello-world-test job")
			immediateInteractionWithJenkins(fmt.Sprintf("%s/job/ruby-hello-world-test/build?delay=0sec", jenkinsUri), "POST", nil, 201)
			verifyPodProvisioned := func() (bool, error) {
				out, err := oc.Run("logs").Args("dc/jenkins").Output()
				if err != nil {
					return false, err
				}
				return strings.Contains(out, "Kubernetes Pod Template provisioning successfully completed"), nil
			}

			err = wait.Poll(2*time.Second, 5*time.Minute, verifyPodProvisioned)
			o.Expect(err).NotTo(o.HaveOccurred())
		})
	})
Пример #27
0
	)

	g.Describe("building from templates", func() {
		oc.SetOutputDir(exutil.TestContext.OutputDir)

		g.It(fmt.Sprintf("should create an image from %q docker template without an output image reference defined", dockerImageFixture), func() {
			err := oc.Run("create").Args("-f", dockerImageFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("expecting build to pass without an output image reference specified")
			out, err := oc.Run("start-build").Args("test-docker", "--follow", "--wait").Output()
			if err != nil {
				fmt.Fprintln(g.GinkgoWriter, out)
			}
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(out).Should(o.ContainSubstring(`Build does not have an Output defined, no output image was pushed to a registry.`))
		})

		g.It(fmt.Sprintf("should create an image from %q S2i template without an output image reference defined", s2iImageFixture), func() {
			err := oc.Run("create").Args("-f", s2iImageFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("expecting build to pass without an output image reference specified")
			out, err := oc.Run("start-build").Args("test-sti", "--follow", "--wait").Output()
			if err != nil {
				fmt.Fprintln(g.GinkgoWriter, out)
			}
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(out).Should(o.ContainSubstring(`Build does not have an Output defined, no output image was pushed to a registry.`))
		})
	})
Пример #28
0
	)

	g.Describe("building from templates", func() {
		oc.SetOutputDir(exutil.TestContext.OutputDir)

		g.It(fmt.Sprintf("should create an image from %q docker template without an output image reference defined", dockerImageFixture), func() {
			err := oc.Run("create").Args("-f", dockerImageFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("expecting build to pass without an output image reference specified")
			out, err := oc.Run("start-build").Args("test-docker", "--follow", "--wait").Output()
			if err != nil {
				fmt.Fprintln(g.GinkgoWriter, out)
			}
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(out).Should(o.ContainSubstring(`Build complete, no image push requested`))
		})

		g.It(fmt.Sprintf("should create an image from %q S2i template without an output image reference defined", s2iImageFixture), func() {
			err := oc.Run("create").Args("-f", s2iImageFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("expecting build to pass without an output image reference specified")
			out, err := oc.Run("start-build").Args("test-sti", "--follow", "--wait").Output()
			if err != nil {
				fmt.Fprintln(g.GinkgoWriter, out)
			}
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(out).Should(o.ContainSubstring(`Build complete, no image push requested`))
		})
	})
Пример #29
0
			podNames, err := exutil.WaitForPods(
				oc.KubeREST().Pods(oc.Namespace()),
				exutil.ParseLabelsOrDie("name=mongodb"),
				exutil.CheckPodIsRunningFn,
				1,
				1*time.Minute,
			)
			o.Expect(err).ShouldNot(o.HaveOccurred())
			o.Expect(podNames).Should(o.HaveLen(1))

			g.By("expecting the mongodb service is answering for ping")
			mongo := db.NewMongoDB(podNames[0])
			ok, err := mongo.IsReady(oc)
			o.Expect(err).ShouldNot(o.HaveOccurred())
			o.Expect(ok).Should(o.BeTrue())

			g.By("expecting that we can insert a new record")
			result, err := mongo.Query(oc, `db.foo.save({ "status": "passed" })`)
			o.Expect(err).ShouldNot(o.HaveOccurred())
			o.Expect(result).Should(o.ContainSubstring(`WriteResult({ "nInserted" : 1 })`))

			g.By("expecting that we can read a record")
			findCmd := "printjson(db.foo.find({}, {_id: 0}).toArray())" // don't include _id field to output because it changes every time
			result, err = mongo.Query(oc, findCmd)
			o.Expect(err).ShouldNot(o.HaveOccurred())
			o.Expect(result).Should(o.ContainSubstring(`{ "status" : "passed" }`))
		})
	})

})
			err := oc.Run("create").Args("-f", sourceFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("starting the source build with --wait flag and short timeout")
			br, err := exutil.StartBuildAndWait(oc, "source-build", "--wait")
			o.Expect(br.StartBuildErr).To(o.HaveOccurred()) // start-build should detect the build error

			g.By("verifying the build status")
			o.Expect(br.BuildAttempt).To(o.BeTrue())                                            // the build should have been attempted
			o.Expect(br.Build.Status.Phase).Should(o.BeEquivalentTo(buildapi.BuildPhaseFailed)) // the build should have failed

			g.By("verifying the build pod status")
			pod, err := oc.KubeREST().Pods(oc.Namespace()).Get(buildapi.GetBuildPodName(br.Build))
			o.Expect(err).NotTo(o.HaveOccurred())
			o.Expect(pod.Status.Phase).Should(o.BeEquivalentTo(kapi.PodFailed))
			o.Expect(pod.Status.Reason).Should(o.ContainSubstring("DeadlineExceeded"))

		})
	})

	g.Describe("oc start-build docker-build --wait", func() {
		g.It("Docker: should start a build and wait for the build failed and build pod being killed by kubelet", func() {

			g.By("calling oc create docker-build")
			err := oc.Run("create").Args("-f", dockerFixture).Execute()
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("starting the docker build with --wait flag and short timeout")
			br, err := exutil.StartBuildAndWait(oc, "docker-build", "--wait")
			o.Expect(br.StartBuildErr).To(o.HaveOccurred()) // start-build should detect the build error