//StartBuildFromJSON creates a build config from the supplied json file (not a template) and then starts a build, using the supplied oc/cli client for both operations; ginkgo error checking included func StartBuildFromJSON(jsonFile, buildPrefix string, oc *CLI) { err := oc.Run("create").Args("-f", jsonFile).Execute() o.Expect(err).NotTo(o.HaveOccurred()) out, berr := oc.Run("start-build").Args(buildPrefix).Output() fmt.Fprintf(g.GinkgoWriter, "\nstart-build output:\n%s\n", out) o.Expect(berr).NotTo(o.HaveOccurred()) }
func doTest(bldPrefix, debugStr string, same bool, oc *exutil.CLI) { // corrupt the builder image exutil.CorruptImage(fullImageName, corruptor) if bldPrefix == buildPrefixFC || bldPrefix == buildPrefixTC { // grant access to the custom build strategy err := oc.AsAdmin().Run("adm").Args("policy", "add-cluster-role-to-user", "system:build-strategy-custom", oc.Username()).Execute() o.Expect(err).NotTo(o.HaveOccurred()) defer func() { err = oc.AsAdmin().Run("adm").Args("policy", "remove-cluster-role-from-user", "system:build-strategy-custom", oc.Username()).Execute() o.Expect(err).NotTo(o.HaveOccurred()) }() } // kick off the app/lang build and verify the builder image accordingly _, err := exutil.StartBuildAndWait(oc, bldPrefix) o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) if same { exutil.VerifyImagesSame(fullImageName, corruptor, debugStr) } else { exutil.VerifyImagesDifferent(fullImageName, corruptor, debugStr) } // reset corrupted tagging for next test exutil.ResetImage(resetData) // dump tags/hexids for debug _, err = exutil.DumpAndReturnTagging(tags) o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) }
// Waits for the timestamp on the Jenkins job to change. Returns // and error if the timeout expires. func (jmon *JobMon) await(timeout time.Duration) error { err := wait.Poll(10*time.Second, timeout, func() (bool, error) { buildNumber, err := jmon.j.getJobBuildNumber(jmon.jobName, time.Minute) o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) ginkgolog("Checking build number for job %q current[%v] vs last[%v]", jmon.jobName, buildNumber, jmon.lastBuildNumber) if buildNumber == jmon.lastBuildNumber { return false, nil } if jmon.buildNumber == "" { jmon.buildNumber = buildNumber } body, status, err := jmon.j.getResource("job/%s/%s/api/json?depth=1", jmon.jobName, jmon.buildNumber) o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) o.ExpectWithOffset(1, status).To(o.Equal(200)) body = strings.ToLower(body) if strings.Contains(body, "\"building\":true") { ginkgolog("Jenkins job %q still building:\n%s\n\n", jmon.jobName, body) return false, nil } if strings.Contains(body, "\"result\":null") { ginkgolog("Jenkins job %q still building result:\n%s\n\n", jmon.jobName, body) return false, nil } ginkgolog("Jenkins job %q build complete:\n%s\n\n", jmon.jobName, body) return true, nil }) return err }
// patchTemplate finds BuildConfigs in a template, changes their source type to Binary, and removes all triggers func patchTemplate(filename string, outDir string) string { inputJson, err := ioutil.ReadFile(filename) o.Expect(err).ToNot(o.HaveOccurred()) var template map[string]interface{} err = json.Unmarshal(inputJson, &template) o.Expect(err).ToNot(o.HaveOccurred()) for _, obj := range template["objects"].([]interface{}) { bc := obj.(map[string]interface{}) if kind := bc["kind"].(string); kind != "BuildConfig" { continue } spec := bc["spec"].(map[string]interface{}) spec["triggers"] = []interface{}{} source := spec["source"].(map[string]interface{}) source["type"] = "Binary" delete(source, "git") delete(source, "contextDir") } outputJson, err := json.MarshalIndent(template, "", " ") o.Expect(err).ToNot(o.HaveOccurred()) basename := filepath.Base(filename) outputFile := filepath.Join(outDir, basename) err = ioutil.WriteFile(outputFile, outputJson, 0644) o.Expect(err).ToNot(o.HaveOccurred()) return outputFile }
// Validate create/delete of objects func validateCreateDelete(create bool, key, out string, err error) { ginkgolog("\nOBJ: %s\n", out) if create { o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(strings.Contains(out, key)).To(o.BeTrue()) } else { o.Expect(err).To(o.HaveOccurred()) } }
func jenkinsJobBytes(filename, namespace string) []byte { pre := exutil.FixturePath("fixtures", filename) post := exutil.ArtifactPath(filename) err := exutil.VarSubOnFile(pre, post, "PROJECT_NAME", namespace) o.Expect(err).NotTo(o.HaveOccurred()) data, err := ioutil.ReadFile(post) o.Expect(err).NotTo(o.HaveOccurred()) return data }
// Returns the content of a Jenkins job XML file. Instances of the // string "PROJECT_NAME" are replaced with the specified namespace. func (j *JenkinsRef) readJenkinsJob(filename, namespace string) string { pre := exutil.FixturePath("testdata", "jenkins-plugin", filename) post := exutil.ArtifactPath(filename) err := exutil.VarSubOnFile(pre, post, "PROJECT_NAME", namespace) o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) data, err := ioutil.ReadFile(post) o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) return string(data) }
// MakeContentFile creates a temporary file with content to upload to S3 func MakeContentFile(content string) string { tmpFile, err := ioutil.TempFile("", "s3cli-test-content") gomega.Expect(err).ToNot(gomega.HaveOccurred()) _, err = tmpFile.Write([]byte(content)) gomega.Expect(err).ToNot(gomega.HaveOccurred()) err = tmpFile.Close() gomega.Expect(err).ToNot(gomega.HaveOccurred()) return tmpFile.Name() }
// MakeConfigFile creates a config file from a S3Cli config struct func MakeConfigFile(cfg *config.S3Cli) string { cfgBytes, err := json.Marshal(cfg) gomega.Expect(err).ToNot(gomega.HaveOccurred()) tmpFile, err := ioutil.TempFile("", "s3cli-test") gomega.Expect(err).ToNot(gomega.HaveOccurred()) _, err = tmpFile.Write(cfgBytes) gomega.Expect(err).ToNot(gomega.HaveOccurred()) err = tmpFile.Close() gomega.Expect(err).ToNot(gomega.HaveOccurred()) return tmpFile.Name() }
// ReadJenkinsJobUsingVars returns the content of a Jenkins job XML file. Instances of the // string "PROJECT_NAME" are replaced with the specified namespace. // Variables named in the vars map will also be replaced with their // corresponding value. func (j *JenkinsRef) ReadJenkinsJobUsingVars(filename, namespace string, vars map[string]string) string { pre := exutil.FixturePath("testdata", "jenkins-plugin", filename) post := exutil.ArtifactPath(filename) if vars == nil { vars = map[string]string{} } vars["PROJECT_NAME"] = namespace err := exutil.VarSubOnFile(pre, post, vars) o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) data, err := ioutil.ReadFile(post) o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) return string(data) }
func assertEnvVars(oc *exutil.CLI, buildPrefix string, varsToFind map[string]string) { buildList, err := oc.REST().Builds(oc.Namespace()).List(kapi.ListOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) // Ensure that expected start-build environment variables were injected for _, build := range buildList.Items { ginkgolog("Found build: %q", build.GetName()) if strings.HasPrefix(build.GetName(), buildPrefix) { envs := []kapi.EnvVar{} if build.Spec.Strategy.DockerStrategy != nil && build.Spec.Strategy.DockerStrategy.Env != nil { envs = build.Spec.Strategy.DockerStrategy.Env } else if build.Spec.Strategy.SourceStrategy != nil && build.Spec.Strategy.SourceStrategy.Env != nil { envs = build.Spec.Strategy.SourceStrategy.Env } else { continue } for k, v := range varsToFind { found := false for _, env := range envs { ginkgolog("Found %s=%s in build %s", env.Name, env.Value, build.GetName()) if k == env.Name && v == env.Value { found = true break } } o.ExpectWithOffset(1, found).To(o.BeTrue()) } } } }
// RetryUntilResult runs the provided cmd repeatedly, once every period, // up to the supplied timeout until the cmd result matches the supplied // expectedCmdResult func RetryUntilResult(command model.Cmd, expectedCmdResult model.CmdResult, period, timeout time.Duration) bool { var actualCmdResult model.CmdResult fmt.Fprintf(ginkgo.GinkgoWriter, "Waiting up to %d seconds for `%s` to return expected cmdResult %s...\n", int(timeout.Seconds()), command.CommandLineString, expectedCmdResult.String()) tck := time.NewTicker(period) tmr := time.NewTimer(timeout) defer tck.Stop() defer tmr.Stop() for { select { case <-tck.C: sess, err := StartCmd(command) gomega.Expect(err).NotTo(gomega.HaveOccurred()) sessWait := sess.Wait() actualCmdResult = model.CmdResult{ Out: sessWait.Out.Contents(), Err: sessWait.Err.Contents(), ExitCode: sessWait.ExitCode(), } if actualCmdResult.Satisfies(expectedCmdResult) { return true } case <-tmr.C: fmt.Fprintf(ginkgo.GinkgoWriter, "FAIL: Actual cmdResult '%v' does not match expected cmdResult '%v'\n", actualCmdResult, expectedCmdResult) return false } } }
// Post sends a POST to the Jenkins server. Returns response body and status code or an error. func (j *JenkinsRef) Post(reqBody io.Reader, resourcePathFormat, contentType string, a ...interface{}) (string, int, error) { uri := j.BuildURI(resourcePathFormat, a...) req, err := http.NewRequest("POST", uri, reqBody) o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) // http://stackoverflow.com/questions/17714494/golang-http-request-results-in-eof-errors-when-making-multiple-requests-successi req.Close = true if reqBody != nil { req.Header.Set("Content-Type", contentType) req.Header.Del("Expect") // jenkins will return 417 if we have an expect hdr } req.SetBasicAuth("admin", j.password) client := &http.Client{} ginkgolog("Posting to Jenkins resource: %q", uri) resp, err := client.Do(req) if err != nil { return "", 0, fmt.Errorf("Error posting request to %q: %v", uri, err) } defer resp.Body.Close() status := resp.StatusCode body, err := ioutil.ReadAll(resp.Body) if err != nil { return "", 0, fmt.Errorf("Error reading Post response body %q: %v", uri, err) } return string(body), status, nil }
// NewSampleRepoTest creates a function for a new ginkgo test case that will instantiate a template // from a url, kick off the buildconfig defined in that template, wait for the build/deploy, // and then confirm the application is serving an expected string value. func NewSampleRepoTest(c SampleRepoConfig) func() { return func() { defer g.GinkgoRecover() var oc = exutil.NewCLI(c.repoName+"-repo-test", exutil.KubeConfigPath()) g.JustBeforeEach(func() { g.By("Waiting for builder service account") err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace())) o.Expect(err).NotTo(o.HaveOccurred()) }) g.Describe("Building "+c.repoName+" app from new-app", func() { g.It(fmt.Sprintf("should build a "+c.repoName+" image and run it in a pod"), func() { oc.SetOutputDir(exutil.TestContext.OutputDir) exutil.CheckOpenShiftNamespaceImageStreams(oc) g.By(fmt.Sprintf("calling oc new-app with the " + c.repoName + " example template")) err := oc.Run("new-app").Args("-f", c.templateURL).Execute() o.Expect(err).NotTo(o.HaveOccurred()) // all the templates automatically start a build. buildName := c.buildConfigName + "-1" g.By("expecting the build is in the Complete phase") err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), buildName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn) if err != nil { exutil.DumpBuildLogs(c.buildConfigName, oc) } o.Expect(err).NotTo(o.HaveOccurred()) g.By("expecting the app deployment to be complete") err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), c.deploymentConfigName, oc) o.Expect(err).NotTo(o.HaveOccurred()) if len(c.dbDeploymentConfigName) > 0 { g.By("expecting the db deployment to be complete") err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), c.dbDeploymentConfigName, oc) o.Expect(err).NotTo(o.HaveOccurred()) } g.By("expecting the service is available") serviceIP, err := oc.Run("get").Args("service", c.serviceName).Template("{{ .spec.clusterIP }}").Output() o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(serviceIP).ShouldNot(o.Equal("")) g.By("expecting an endpoint is available") err = oc.KubeFramework().WaitForAnEndpoint(c.serviceName) o.Expect(err).NotTo(o.HaveOccurred()) g.By("verifying string from app request") response, err := exutil.FetchURL("http://"+serviceIP+":8080"+c.appPath, time.Duration(30*time.Second)) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(response).Should(o.ContainSubstring(c.expectedString)) }) }) } }
func assertMembersInReplica(oc *exutil.CLI, db exutil.Database, expectedReplicas int) { isMasterCmd := "printjson(db.isMaster())" getReplicaHostsCmd := "print(db.isMaster().hosts.length)" // pod is running but we need to wait when it will be really ready (became member of the replica) err := exutil.WaitForQueryOutputSatisfies(oc, db, 1*time.Minute, false, isMasterCmd, func(commandOutput string) bool { return commandOutput != "" }) o.Expect(err).ShouldNot(o.HaveOccurred()) isMasterOutput, _ := db.Query(oc, isMasterCmd) fmt.Fprintf(g.GinkgoWriter, "DEBUG: Output of the db.isMaster() command: %v\n", isMasterOutput) members, err := db.Query(oc, getReplicaHostsCmd) o.Expect(err).ShouldNot(o.HaveOccurred()) o.Expect(members).Should(o.Equal(strconv.Itoa(expectedReplicas))) }
//CorruptImage is a helper that tags the image to be corrupted, the corruptee, as the corruptor string, resulting in the wrong image being used when corruptee is referenced later on; strategy is for ginkgo debug; ginkgo error checking leveraged func CorruptImage(corruptee, corruptor, strategy string) { g.By(fmt.Sprintf("\n%s Calling docker tag to corrupt %s builder image %s by tagging %s", time.Now().Format(time.RFC850), strategy, corruptee, corruptor)) cerr := TagImage(corruptee, corruptor) g.By(fmt.Sprintf("\n%s Tagging %s to %s complete with err %v", time.Now().Format(time.RFC850), corruptor, corruptee, cerr)) o.Expect(cerr).NotTo(o.HaveOccurred()) }
//DumpAndReturnTagging takes and array of tags and obtains the hex image IDs, dumps them to ginkgo for printing, and then returns them func DumpAndReturnTagging(tags []string) []string { hexIDs, err := GetImageIDForTags(tags) o.Expect(err).NotTo(o.HaveOccurred()) for i, hexID := range hexIDs { g.By(fmt.Sprintf("tag %s hex id %s ", tags[i], hexID)) } return hexIDs }
func VerifyPlan(expectedPlan atc.Plan) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { var plan atc.Plan err := json.NewDecoder(r.Body).Decode(&plan) gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Expect(plan).To(testhelpers.MatchPlan(expectedPlan)) } }
//CorruptImage is a helper that tags the image to be corrupted, the corruptee, as the corruptor string, resulting in the wrong image being used when corruptee is referenced later on; strategy is for ginkgo debug; ginkgo error checking leveraged func CorruptImage(corruptee, corruptor string) { g.By(fmt.Sprintf("Calling docker tag to corrupt builder image %s by tagging %s", corruptee, corruptor)) cerr := TagImage(corruptee, corruptor) g.By(fmt.Sprintf("Tagging %s to %s complete with err %v", corruptor, corruptee, cerr)) o.Expect(cerr).NotTo(o.HaveOccurred()) VerifyImagesSame(corruptee, corruptor, "image corruption") }
// StartJob triggers a named Jenkins job. The job can be monitored with the // returned object. func (j *JenkinsRef) StartJob(jobName string) *JobMon { lastBuildNumber, err := j.GetJobBuildNumber(jobName, time.Minute) o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) jmon := &JobMon{ j: j, lastBuildNumber: lastBuildNumber, buildNumber: "", jobName: jobName, } ginkgolog("Current timestamp for [%s]: %q", jobName, jmon.lastBuildNumber) g.By(fmt.Sprintf("Starting jenkins job: %s", jobName)) _, status, err := j.PostXML(nil, "job/%s/build?delay=0sec", jobName) o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) o.ExpectWithOffset(1, status).To(o.Equal(201)) return jmon }
// GenerateRandomString generates a random string of len 25 func GenerateRandomString() string { size := 25 randBytes := make([]byte, size) for i := range randBytes { randInt, err := rand.Int(rand.Reader, big.NewInt(int64(len(alphanum)))) gomega.Expect(err).ToNot(gomega.HaveOccurred()) randBytes[i] = alphanum[randInt.Uint64()] } return string(randBytes) }
// MakeRequest makes a get request, checksthe http status code, and returns the body as string func MakeRequest(method, url, body string, expectedCode int) (string, *http.Response) { log15.Debug("MakeRequest", "verb", method, "url", url) // prepare the request body var bodyReader io.Reader if body != "" { bodyReader = strings.NewReader(body) } // make the request req, _ := http.NewRequest(method, url, bodyReader) resp, err := http.DefaultClient.Do(req) // check the response for basic validity gomega.Ω(err).ShouldNot(gomega.HaveOccurred()) gomega.Ω(resp.StatusCode).Should(gomega.Equal(expectedCode)) gomega.Ω(resp.Header.Get("Content-Type")).ShouldNot(gomega.BeNil()) // read the response body respBody, err := ioutil.ReadAll(resp.Body) gomega.Ω(err).ShouldNot(gomega.HaveOccurred()) return string(respBody), resp }
//ResetImage is a helper the allows the programmer to undo any corruption performed by CorruptImage; ginkgo error checking leveraged func ResetImage(tags map[string]string) { fmt.Fprintf(g.GinkgoWriter, "Calling docker tag to reset images") for corruptedTag, goodTag := range tags { err := TagImage(corruptedTag, goodTag) fmt.Fprintf(g.GinkgoWriter, "Reset for %s to %s complete with err %v", corruptedTag, goodTag, err) o.Expect(err).NotTo(o.HaveOccurred()) } }
func immediateInteractionWithJenkins(uri, method string, body io.Reader, status int) { req, err := http.NewRequest(method, uri, body) o.Expect(err).NotTo(o.HaveOccurred()) if body != nil { req.Header.Set("Content-Type", "application/xml") // jenkins will return 417 if we have an expect hdr req.Header.Del("Expect") } req.SetBasicAuth("admin", "password") client := &http.Client{} resp, err := client.Do(req) o.Expect(err).NotTo(o.HaveOccurred()) defer resp.Body.Close() o.Expect(resp.StatusCode).To(o.BeEquivalentTo(status)) }
//ResetImage is a helper the allows the programmer to undo any corruption performed by CorruptImage; ginkgo error checking leveraged func ResetImage(tags map[string]string) { g.By(fmt.Sprintf("Calling docker tag to reset images")) for corruptedTag, goodTag := range tags { err := TagImage(corruptedTag, goodTag) g.By(fmt.Sprintf("Reset for %s to %s complete with err %v", corruptedTag, goodTag, err)) o.Expect(err).NotTo(o.HaveOccurred()) } }
func doTest(bldPrefix, debugStr string, same bool, oc *exutil.CLI) { // corrupt the builder image exutil.CorruptImage(fullImageName, corruptor) // kick off the app/lang build and verify the builder image accordingly _, err := exutil.StartBuildAndWait(oc, bldPrefix) o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) if same { exutil.VerifyImagesSame(fullImageName, corruptor, debugStr) } else { exutil.VerifyImagesDifferent(fullImageName, corruptor, debugStr) } // reset corrupted tagging for next test exutil.ResetImage(resetData) // dump tags/hexids for debug _, err = exutil.DumpAndReturnTagging(tags) o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) }
func tryToReadFromPod(oc *exutil.CLI, podName, expectedValue string) { // don't include _id field to output because it changes every time findCmd := "rs.slaveOk(); printjson(db.bar.find({}, {_id: 0}).toArray())" fmt.Fprintf(g.GinkgoWriter, "DEBUG: reading record from pod %v\n", podName) mongoPod := db.NewMongoDB(podName) result, err := mongoPod.Query(oc, findCmd) o.Expect(err).ShouldNot(o.HaveOccurred()) o.Expect(result).Should(o.ContainSubstring(expectedValue)) }
// Stands up a simple pod which can be used for exec commands func initExecPod(oc *exutil.CLI) *kapi.Pod { // Create a running pod in which we can execute our commands oc.Run("run").Args("centos", "--image", "centos:7", "--command", "--", "sleep", "1800").Execute() var targetPod *kapi.Pod err := wait.Poll(10*time.Second, 10*time.Minute, func() (bool, error) { pods, err := oc.KubeClient().Core().Pods(oc.Namespace()).List(kapi.ListOptions{}) o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) for _, p := range pods.Items { if strings.HasPrefix(p.Name, "centos") && !strings.Contains(p.Name, "deploy") && p.Status.Phase == "Running" { targetPod = &p return true, nil } } return false, nil }) o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) return targetPod }
// Finds the pod running Jenkins func FindJenkinsPod(oc *exutil.CLI) *kapi.Pod { pods, err := exutil.GetDeploymentConfigPods(oc, "jenkins") o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) if pods == nil || pods.Items == nil { g.Fail("No pods matching jenkins deploymentconfig in namespace " + oc.Namespace()) } o.ExpectWithOffset(1, len(pods.Items)).To(o.Equal(1)) return &pods.Items[0] }
// NewRef creates a jenkins reference from an OC client func NewRef(oc *exutil.CLI) *JenkinsRef { g.By("get ip and port for jenkins service") serviceIP, err := oc.Run("get").Args("svc", "jenkins", "--config", exutil.KubeConfigPath()).Template("{{.spec.clusterIP}}").Output() o.Expect(err).NotTo(o.HaveOccurred()) port, err := oc.Run("get").Args("svc", "jenkins", "--config", exutil.KubeConfigPath()).Template("{{ $x := index .spec.ports 0}}{{$x.port}}").Output() o.Expect(err).NotTo(o.HaveOccurred()) g.By("get admin password") password := GetAdminPassword(oc) o.Expect(password).ShouldNot(o.BeEmpty()) j := &JenkinsRef{ oc: oc, host: serviceIP, port: port, namespace: oc.Namespace(), password: password, } return j }