//StartBuildFromJSON creates a build config from the supplied json file (not a template) and then starts a build, using the supplied oc/cli client for both operations; ginkgo error checking included func StartBuildFromJSON(jsonFile, buildPrefix string, oc *CLI) { err := oc.Run("create").Args("-f", jsonFile).Execute() o.Expect(err).NotTo(o.HaveOccurred()) out, berr := oc.Run("start-build").Args(buildPrefix).Output() fmt.Fprintf(g.GinkgoWriter, "\nstart-build output:\n%s\n", out) o.Expect(berr).NotTo(o.HaveOccurred()) }
// patchTemplate finds BuildConfigs in a template, changes their source type to Binary, and removes all triggers func patchTemplate(filename string, outDir string) string { inputJson, err := ioutil.ReadFile(filename) o.Expect(err).ToNot(o.HaveOccurred()) var template map[string]interface{} err = json.Unmarshal(inputJson, &template) o.Expect(err).ToNot(o.HaveOccurred()) for _, obj := range template["objects"].([]interface{}) { bc := obj.(map[string]interface{}) if kind := bc["kind"].(string); kind != "BuildConfig" { continue } spec := bc["spec"].(map[string]interface{}) spec["triggers"] = []interface{}{} source := spec["source"].(map[string]interface{}) source["type"] = "Binary" delete(source, "git") delete(source, "contextDir") } outputJson, err := json.MarshalIndent(template, "", " ") o.Expect(err).ToNot(o.HaveOccurred()) basename := filepath.Base(filename) outputFile := filepath.Join(outDir, basename) err = ioutil.WriteFile(outputFile, outputJson, 0644) o.Expect(err).ToNot(o.HaveOccurred()) return outputFile }
func doTest(bldPrefix, debugStr string, same bool, oc *exutil.CLI) { // corrupt the builder image exutil.CorruptImage(fullImageName, corruptor) if bldPrefix == buildPrefixFC || bldPrefix == buildPrefixTC { // grant access to the custom build strategy err := oc.AsAdmin().Run("adm").Args("policy", "add-cluster-role-to-user", "system:build-strategy-custom", oc.Username()).Execute() o.Expect(err).NotTo(o.HaveOccurred()) defer func() { err = oc.AsAdmin().Run("adm").Args("policy", "remove-cluster-role-from-user", "system:build-strategy-custom", oc.Username()).Execute() o.Expect(err).NotTo(o.HaveOccurred()) }() } // kick off the app/lang build and verify the builder image accordingly _, err := exutil.StartBuildAndWait(oc, bldPrefix) o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) if same { exutil.VerifyImagesSame(fullImageName, corruptor, debugStr) } else { exutil.VerifyImagesDifferent(fullImageName, corruptor, debugStr) } // reset corrupted tagging for next test exutil.ResetImage(resetData) // dump tags/hexids for debug _, err = exutil.DumpAndReturnTagging(tags) o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) }
func compareSchemas(actual, expected []*schema.Schema) { g.Expect(actual).To(g.HaveLen(len(expected))) for i, s := range actual { sortProperties(s) sortProperties(expected[i]) g.Expect(s).To(g.Equal(expected[i])) } }
// NewSampleRepoTest creates a function for a new ginkgo test case that will instantiate a template // from a url, kick off the buildconfig defined in that template, wait for the build/deploy, // and then confirm the application is serving an expected string value. func NewSampleRepoTest(c SampleRepoConfig) func() { return func() { defer g.GinkgoRecover() var oc = exutil.NewCLI(c.repoName+"-repo-test", exutil.KubeConfigPath()) g.JustBeforeEach(func() { g.By("Waiting for builder service account") err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace())) o.Expect(err).NotTo(o.HaveOccurred()) }) g.Describe("Building "+c.repoName+" app from new-app", func() { g.It(fmt.Sprintf("should build a "+c.repoName+" image and run it in a pod"), func() { oc.SetOutputDir(exutil.TestContext.OutputDir) exutil.CheckOpenShiftNamespaceImageStreams(oc) g.By(fmt.Sprintf("calling oc new-app with the " + c.repoName + " example template")) err := oc.Run("new-app").Args("-f", c.templateURL).Execute() o.Expect(err).NotTo(o.HaveOccurred()) // all the templates automatically start a build. buildName := c.buildConfigName + "-1" g.By("expecting the build is in the Complete phase") err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), buildName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn) if err != nil { exutil.DumpBuildLogs(c.buildConfigName, oc) } o.Expect(err).NotTo(o.HaveOccurred()) g.By("expecting the app deployment to be complete") err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), c.deploymentConfigName, oc) o.Expect(err).NotTo(o.HaveOccurred()) if len(c.dbDeploymentConfigName) > 0 { g.By("expecting the db deployment to be complete") err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), c.dbDeploymentConfigName, oc) o.Expect(err).NotTo(o.HaveOccurred()) } g.By("expecting the service is available") serviceIP, err := oc.Run("get").Args("service", c.serviceName).Template("{{ .spec.clusterIP }}").Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(serviceIP).ShouldNot(o.Equal("")) g.By("expecting an endpoint is available") err = oc.KubeFramework().WaitForAnEndpoint(c.serviceName) o.Expect(err).NotTo(o.HaveOccurred()) g.By("verifying string from app request") response, err := exutil.FetchURL("http://"+serviceIP+":8080"+c.appPath, time.Duration(30*time.Second)) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(response).Should(o.ContainSubstring(c.expectedString)) }) }) } }
// Validate create/delete of objects func validateCreateDelete(create bool, key, out string, err error) { ginkgolog("\nOBJ: %s\n", out) if create { o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(strings.Contains(out, key)).To(o.BeTrue()) } else { o.Expect(err).To(o.HaveOccurred()) } }
func jenkinsJobBytes(filename, namespace string) []byte { pre := exutil.FixturePath("fixtures", filename) post := exutil.ArtifactPath(filename) err := exutil.VarSubOnFile(pre, post, "PROJECT_NAME", namespace) o.Expect(err).NotTo(o.HaveOccurred()) data, err := ioutil.ReadFile(post) o.Expect(err).NotTo(o.HaveOccurred()) return data }
// MakeContentFile creates a temporary file with content to upload to S3 func MakeContentFile(content string) string { tmpFile, err := ioutil.TempFile("", "s3cli-test-content") gomega.Expect(err).ToNot(gomega.HaveOccurred()) _, err = tmpFile.Write([]byte(content)) gomega.Expect(err).ToNot(gomega.HaveOccurred()) err = tmpFile.Close() gomega.Expect(err).ToNot(gomega.HaveOccurred()) return tmpFile.Name() }
func VerifyPlan(expectedPlan atc.Plan) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { var plan atc.Plan err := json.NewDecoder(r.Body).Decode(&plan) gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Expect(plan).To(testhelpers.MatchPlan(expectedPlan)) } }
func tryToReadFromPod(oc *exutil.CLI, podName, expectedValue string) { // don't include _id field to output because it changes every time findCmd := "rs.slaveOk(); printjson(db.bar.find({}, {_id: 0}).toArray())" fmt.Fprintf(g.GinkgoWriter, "DEBUG: reading record from pod %v\n", podName) mongoPod := db.NewMongoDB(podName) result, err := mongoPod.Query(oc, findCmd) o.Expect(err).ShouldNot(o.HaveOccurred()) o.Expect(result).Should(o.ContainSubstring(expectedValue)) }
// MakeConfigFile creates a config file from a S3Cli config struct func MakeConfigFile(cfg *config.S3Cli) string { cfgBytes, err := json.Marshal(cfg) gomega.Expect(err).ToNot(gomega.HaveOccurred()) tmpFile, err := ioutil.TempFile("", "s3cli-test") gomega.Expect(err).ToNot(gomega.HaveOccurred()) _, err = tmpFile.Write(cfgBytes) gomega.Expect(err).ToNot(gomega.HaveOccurred()) err = tmpFile.Close() gomega.Expect(err).ToNot(gomega.HaveOccurred()) return tmpFile.Name() }
func waitForNumberOfPodsWithLabel(oc *exutil.CLI, number int, label string) []string { g.By(fmt.Sprintf("expecting that there are %d running pods with label name=%s", number, label)) podNames, err := exutil.WaitForPods( oc.KubeClient().Core().Pods(oc.Namespace()), exutil.ParseLabelsOrDie("name="+label), exutil.CheckPodIsRunningFn, number, 1*time.Minute, ) o.Expect(err).ShouldNot(o.HaveOccurred()) o.Expect(podNames).Should(o.HaveLen(number)) return podNames }
func assertEnvVars(oc *exutil.CLI, buildPrefix string, varsToFind map[string]string) { buildList, err := oc.REST().Builds(oc.Namespace()).List(kapi.ListOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) // Ensure that expected start-build environment variables were injected for _, build := range buildList.Items { ginkgolog("Found build: %q", build.GetName()) if strings.HasPrefix(build.GetName(), buildPrefix) { envs := []kapi.EnvVar{} if build.Spec.Strategy.DockerStrategy != nil && build.Spec.Strategy.DockerStrategy.Env != nil { envs = build.Spec.Strategy.DockerStrategy.Env } else if build.Spec.Strategy.SourceStrategy != nil && build.Spec.Strategy.SourceStrategy.Env != nil { envs = build.Spec.Strategy.SourceStrategy.Env } else { continue } for k, v := range varsToFind { found := false for _, env := range envs { ginkgolog("Found %s=%s in build %s", env.Name, env.Value, build.GetName()) if k == env.Name && v == env.Value { found = true break } } o.ExpectWithOffset(1, found).To(o.BeTrue()) } } } }
// RetryUntilResult runs the provided cmd repeatedly, once every period, // up to the supplied timeout until the cmd result matches the supplied // expectedCmdResult func RetryUntilResult(command model.Cmd, expectedCmdResult model.CmdResult, period, timeout time.Duration) bool { var actualCmdResult model.CmdResult fmt.Fprintf(ginkgo.GinkgoWriter, "Waiting up to %d seconds for `%s` to return expected cmdResult %s...\n", int(timeout.Seconds()), command.CommandLineString, expectedCmdResult.String()) tck := time.NewTicker(period) tmr := time.NewTimer(timeout) defer tck.Stop() defer tmr.Stop() for { select { case <-tck.C: sess, err := StartCmd(command) gomega.Expect(err).NotTo(gomega.HaveOccurred()) sessWait := sess.Wait() actualCmdResult = model.CmdResult{ Out: sessWait.Out.Contents(), Err: sessWait.Err.Contents(), ExitCode: sessWait.ExitCode(), } if actualCmdResult.Satisfies(expectedCmdResult) { return true } case <-tmr.C: fmt.Fprintf(ginkgo.GinkgoWriter, "FAIL: Actual cmdResult '%v' does not match expected cmdResult '%v'\n", actualCmdResult, expectedCmdResult) return false } } }
//DumpAndReturnTagging takes and array of tags and obtains the hex image IDs, dumps them to ginkgo for printing, and then returns them func DumpAndReturnTagging(tags []string) []string { hexIDs, err := GetImageIDForTags(tags) o.Expect(err).NotTo(o.HaveOccurred()) for i, hexID := range hexIDs { g.By(fmt.Sprintf("tag %s hex id %s ", tags[i], hexID)) } return hexIDs }
func assertMembersInReplica(oc *exutil.CLI, db exutil.Database, expectedReplicas int) { isMasterCmd := "printjson(db.isMaster())" getReplicaHostsCmd := "print(db.isMaster().hosts.length)" // pod is running but we need to wait when it will be really ready (became member of the replica) err := exutil.WaitForQueryOutputSatisfies(oc, db, 1*time.Minute, false, isMasterCmd, func(commandOutput string) bool { return commandOutput != "" }) o.Expect(err).ShouldNot(o.HaveOccurred()) isMasterOutput, _ := db.Query(oc, isMasterCmd) fmt.Fprintf(g.GinkgoWriter, "DEBUG: Output of the db.isMaster() command: %v\n", isMasterOutput) members, err := db.Query(oc, getReplicaHostsCmd) o.Expect(err).ShouldNot(o.HaveOccurred()) o.Expect(members).Should(o.Equal(strconv.Itoa(expectedReplicas))) }
//CorruptImage is a helper that tags the image to be corrupted, the corruptee, as the corruptor string, resulting in the wrong image being used when corruptee is referenced later on; strategy is for ginkgo debug; ginkgo error checking leveraged func CorruptImage(corruptee, corruptor, strategy string) { g.By(fmt.Sprintf("\n%s Calling docker tag to corrupt %s builder image %s by tagging %s", time.Now().Format(time.RFC850), strategy, corruptee, corruptor)) cerr := TagImage(corruptee, corruptor) g.By(fmt.Sprintf("\n%s Tagging %s to %s complete with err %v", time.Now().Format(time.RFC850), corruptor, corruptee, cerr)) o.Expect(cerr).NotTo(o.HaveOccurred()) }
//CorruptImage is a helper that tags the image to be corrupted, the corruptee, as the corruptor string, resulting in the wrong image being used when corruptee is referenced later on; strategy is for ginkgo debug; ginkgo error checking leveraged func CorruptImage(corruptee, corruptor string) { g.By(fmt.Sprintf("Calling docker tag to corrupt builder image %s by tagging %s", corruptee, corruptor)) cerr := TagImage(corruptee, corruptor) g.By(fmt.Sprintf("Tagging %s to %s complete with err %v", corruptor, corruptee, cerr)) o.Expect(cerr).NotTo(o.HaveOccurred()) VerifyImagesSame(corruptee, corruptor, "image corruption") }
func immediateInteractionWithJenkins(uri, method string, body io.Reader, status int) { req, err := http.NewRequest(method, uri, body) o.Expect(err).NotTo(o.HaveOccurred()) if body != nil { req.Header.Set("Content-Type", "application/xml") // jenkins will return 417 if we have an expect hdr req.Header.Del("Expect") } req.SetBasicAuth("admin", "password") client := &http.Client{} resp, err := client.Do(req) o.Expect(err).NotTo(o.HaveOccurred()) defer resp.Body.Close() o.Expect(resp.StatusCode).To(o.BeEquivalentTo(status)) }
//ResetImage is a helper the allows the programmer to undo any corruption performed by CorruptImage; ginkgo error checking leveraged func ResetImage(tags map[string]string) { fmt.Fprintf(g.GinkgoWriter, "Calling docker tag to reset images") for corruptedTag, goodTag := range tags { err := TagImage(corruptedTag, goodTag) fmt.Fprintf(g.GinkgoWriter, "Reset for %s to %s complete with err %v", corruptedTag, goodTag, err) o.Expect(err).NotTo(o.HaveOccurred()) } }
//ResetImage is a helper the allows the programmer to undo any corruption performed by CorruptImage; ginkgo error checking leveraged func ResetImage(tags map[string]string) { g.By(fmt.Sprintf("Calling docker tag to reset images")) for corruptedTag, goodTag := range tags { err := TagImage(corruptedTag, goodTag) g.By(fmt.Sprintf("Reset for %s to %s complete with err %v", corruptedTag, goodTag, err)) o.Expect(err).NotTo(o.HaveOccurred()) } }
// GenerateRandomString generates a random string of len 25 func GenerateRandomString() string { size := 25 randBytes := make([]byte, size) for i := range randBytes { randInt, err := rand.Int(rand.Reader, big.NewInt(int64(len(alphanum)))) gomega.Expect(err).ToNot(gomega.HaveOccurred()) randBytes[i] = alphanum[randInt.Uint64()] } return string(randBytes) }
// NewRef creates a jenkins reference from an OC client func NewRef(oc *exutil.CLI) *JenkinsRef { g.By("get ip and port for jenkins service") serviceIP, err := oc.Run("get").Args("svc", "jenkins", "--config", exutil.KubeConfigPath()).Template("{{.spec.clusterIP}}").Output() o.Expect(err).NotTo(o.HaveOccurred()) port, err := oc.Run("get").Args("svc", "jenkins", "--config", exutil.KubeConfigPath()).Template("{{ $x := index .spec.ports 0}}{{$x.port}}").Output() o.Expect(err).NotTo(o.HaveOccurred()) g.By("get admin password") password := GetAdminPassword(oc) o.Expect(password).ShouldNot(o.BeEmpty()) j := &JenkinsRef{ oc: oc, host: serviceIP, port: port, namespace: oc.Namespace(), password: password, } return j }
// buildAndPushImage tries to build an image. The image is stored as an image stream tag <name>:<tag>. If // shouldBeDenied is true, a build will be expected to fail with a denied error. func buildAndPushImage(oc *exutil.CLI, namespace, name, tag string, shouldBeDenied bool) { istName := name if tag != "" { istName += ":" + tag } g.By(fmt.Sprintf("building an image %q", istName)) bc, err := oc.REST().BuildConfigs(namespace).Get(name) if err == nil { g.By(fmt.Sprintf("changing build config %s to store result into %s", name, istName)) o.Expect(bc.Spec.BuildSpec.Output.To.Kind).To(o.Equal("ImageStreamTag")) bc.Spec.BuildSpec.Output.To.Name = istName _, err := oc.REST().BuildConfigs(namespace).Update(bc) o.Expect(err).NotTo(o.HaveOccurred()) } else { g.By(fmt.Sprintf("creating a new build config %s with output to %s ", name, istName)) err = oc.Run("new-build").Args( "--binary", "--name", name, "--to", istName).Execute() o.Expect(err).NotTo(o.HaveOccurred()) } tempDir, err := ioutil.TempDir("", "name-build") o.Expect(err).NotTo(o.HaveOccurred()) err = createRandomBlob(path.Join(tempDir, "data"), imageSize) o.Expect(err).NotTo(o.HaveOccurred()) err = ioutil.WriteFile(path.Join(tempDir, "Dockerfile"), []byte("FROM scratch\nCOPY data /data\n"), 0644) o.Expect(err).NotTo(o.HaveOccurred()) err = oc.Run("start-build").Args(name, "--from-dir", tempDir, "--wait").Execute() if shouldBeDenied { o.Expect(err).To(o.HaveOccurred()) out, err := oc.Run("logs").Args("bc/" + name).Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(out).Should(o.MatchRegexp("(?i)Failed to push image:.*denied")) } else { o.Expect(err).NotTo(o.HaveOccurred()) } }
func checkSingleIdle(oc *exutil.CLI, idlingFile string, resources map[string][]string, resourceName string, kind string) { g.By("Idling the service") _, err := oc.Run("idle").Args("--resource-names-file", idlingFile).Output() o.Expect(err).ToNot(o.HaveOccurred()) g.By("Ensuring the scale is zero") objName := resources[resourceName][0] replicas, err := oc.Run("get").Args(resourceName+"/"+objName, "--output=jsonpath=\"{.spec.replicas}\"").Output() o.Expect(err).ToNot(o.HaveOccurred()) o.Expect(replicas).To(o.ContainSubstring("0")) g.By("Fetching the service and checking the annotations are present") serviceName := resources["service"][0] endpoints, err := oc.KubeREST().Endpoints(oc.Namespace()).Get(serviceName) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(endpoints.Annotations).To(o.HaveKey(unidlingapi.IdledAtAnnotation)) o.Expect(endpoints.Annotations).To(o.HaveKey(unidlingapi.UnidleTargetAnnotation)) g.By("Checking the idled-at time") idledAtAnnotation := endpoints.Annotations[unidlingapi.IdledAtAnnotation] idledAtTime, err := time.Parse(time.RFC3339, idledAtAnnotation) o.Expect(err).ToNot(o.HaveOccurred()) o.Expect(idledAtTime).To(o.BeTemporally("~", time.Now(), 5*time.Minute)) g.By("Checking the idle targets") unidleTargetAnnotation := endpoints.Annotations[unidlingapi.UnidleTargetAnnotation] unidleTargets := []unidlingapi.RecordedScaleReference{} err = json.Unmarshal([]byte(unidleTargetAnnotation), &unidleTargets) o.Expect(err).ToNot(o.HaveOccurred()) o.Expect(unidleTargets).To(o.Equal([]unidlingapi.RecordedScaleReference{ { Replicas: 2, CrossGroupObjectReference: unidlingapi.CrossGroupObjectReference{ Name: resources[resourceName][0], Kind: kind, }, }, })) }
//DumpImage is a helper that inspects the image along with some ginkgo debug func DumpImage(name string) { fmt.Fprintf(g.GinkgoWriter, "Calling docker inspect for image %s", name) image, err := InspectImage(name) o.Expect(err).NotTo(o.HaveOccurred()) if image != nil { fmt.Fprintf(g.GinkgoWriter, "Returned docker image %+v", image) fmt.Fprintf(g.GinkgoWriter, "Container config %+v and user %s", image.ContainerConfig, image.ContainerConfig.User) if image.Config != nil { fmt.Fprintf(g.GinkgoWriter, "Image config %+v and user %s", image.Config, image.Config.User) } } }
//VerifyImagesDifferent will that the two supplied image tags and see if they reference different hexadecimal image IDs; strategy is for ginkgo debug, also leverage ginkgo error checking func VerifyImagesDifferent(comp1, comp2, strategy string) { tag1 := comp1 + ":latest" tag2 := comp2 + ":latest" comps := []string{tag1, tag2} retIDs, gerr := GetImageIDForTags(comps) o.Expect(gerr).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("%s compare image - %s, %s, %s, %s", strategy, tag1, tag2, retIDs[0], retIDs[1])) o.Ω(len(retIDs[0])).Should(o.BeNumerically(">", 0)) o.Ω(len(retIDs[1])).Should(o.BeNumerically(">", 0)) o.Ω(retIDs[0] != retIDs[1]).Should(o.BeTrue()) }
func getAdminPassword(oc *exutil.CLI) string { envs, err := oc.Run("set").Args("env", "dc/jenkins", "--list").Output() o.Expect(err).NotTo(o.HaveOccurred()) kvs := strings.Split(envs, "\n") for _, kv := range kvs { if strings.HasPrefix(kv, "JENKINS_PASSWORD="******"=") fmt.Fprintf(g.GinkgoWriter, "\nJenkins admin password %s\n", s[1]) return s[1] } } return "password" }
//VerifyImagesSame will take the two supplied image tags and see if they reference the same hexadecimal image ID; strategy is for debug func VerifyImagesSame(comp1, comp2, strategy string) { tag1 := comp1 + ":latest" tag2 := comp2 + ":latest" comps := []string{tag1, tag2} retIDs, gerr := GetImageIDForTags(comps) o.Expect(gerr).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("\n%s %s compare image - %s, %s, %s, %s", time.Now().Format(time.RFC850), strategy, tag1, tag2, retIDs[0], retIDs[1])) o.Ω(len(retIDs[0])).Should(o.BeNumerically(">", 0)) o.Ω(len(retIDs[1])).Should(o.BeNumerically(">", 0)) o.Ω(retIDs[0]).Should(o.Equal(retIDs[1])) }
//VerifyImagesSame will take the two supplied image tags and see if they reference the same hexadecimal image ID; strategy is for debug func VerifyImagesSame(comp1, comp2, strategy string) { tag1 := comp1 + ":latest" tag2 := comp2 + ":latest" comps := []string{tag1, tag2} retIDs, gerr := GetImageIDForTags(comps) o.Expect(gerr).NotTo(o.HaveOccurred()) fmt.Fprintf(g.GinkgoWriter, "%s compare image - %s, %s, %s, %s", strategy, tag1, tag2, retIDs[0], retIDs[1]) o.Ω(len(retIDs[0])).Should(o.BeNumerically(">", 0)) o.Ω(len(retIDs[1])).Should(o.BeNumerically(">", 0)) o.Ω(retIDs[0]).Should(o.Equal(retIDs[1])) }