// Fail reports a failure through func Fail(failureMessage string, msgAndArgs ...interface{}) bool { message := messageFromMsgAndArgs(msgAndArgs...) if len(message) > 0 { ginkgo.Fail(fmt.Sprintf("\r%s\r\tLocation:\t%s\n\r\tError:\t\t%s\n\r\tMessages:\t%s\n\r", getWhitespaceString(), CallerInfo(), failureMessage, message)) } else { ginkgo.Fail(fmt.Sprintf("\r%s\r\tLocation:\t%s\n\r\tError:\t\t%s\n\r", getWhitespaceString(), CallerInfo(), failureMessage)) } return false }
// ExitCode returns the exit code of the process, or -1 if the process has not // exited. It can be used with the gexec.Exit matcher. func (r *Runner) ExitCode() int { if r.sessionReady == nil { ginkgo.Fail(fmt.Sprintf("ginkgomon.Runner '%s' improperly created without using New", r.Name)) } <-r.sessionReady return r.session.ExitCode() }
// Buffer returns a gbytes.Buffer, for use with the gbytes.Say matcher. func (r *Runner) Buffer() *gbytes.Buffer { if r.sessionReady == nil { ginkgo.Fail(fmt.Sprintf("ginkgomon.Runner '%s' improperly created without using New", r.Name)) } <-r.sessionReady return r.session.Buffer() }
// Takes a slice of errors and asserts that there were none provided // When failing, appends error messages together on newlines and // provides a count of how many errors were passed in func AssertNoErrors(errs []error) { if len(errs) > 0 { var concatErrors string for _, err := range errs { concatErrors = concatErrors + err.Error() + "\n" } ginkgo.Fail(fmt.Sprintf("Expected no errors, but there were %d.\n%s", len(errs), concatErrors)) } }
func Cmd(cmd string) string { ginkgo.GinkgoWriter.Write([]byte(fmt.Sprintf("Running command [%s]\n", cmd))) out, err := exec.Command("bash", "-c", cmd).Output() if err != nil { ginkgo.GinkgoWriter.Write(out) ginkgo.GinkgoWriter.Write(err.(*exec.ExitError).Stderr) ginkgo.Fail("Command failed") } return strings.TrimSpace(string(out)) }
func freePort() string { listener, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { ginkgo.Fail(err.Error(), 1) } defer listener.Close() address := listener.Addr().String() addressParts := strings.SplitN(address, ":", 2) return addressParts[1] }
// Finds the pod running Jenkins func FindJenkinsPod(oc *exutil.CLI) *kapi.Pod { pods, err := exutil.GetDeploymentConfigPods(oc, "jenkins") o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) if pods == nil || pods.Items == nil { g.Fail("No pods matching jenkins deploymentconfig in namespace " + oc.Namespace()) } o.ExpectWithOffset(1, len(pods.Items)).To(o.Equal(1)) return &pods.Items[0] }
func Invoke(runner ifrit.Runner) ifrit.Process { process := ifrit.Background(runner) select { case <-process.Ready(): case err := <-process.Wait(): ginkgo.Fail(fmt.Sprintf("process failed to start: %s", err)) } return process }
func AssertPanic(panicValue interface{}, callback func()) { defer func() { value := recover() if value != panicValue { panic(value) } }() callback() ginkgo.Fail("Expected a panic, but got none!") }
func fetchDatabases() (*db.DB, *gobble.DB) { env := application.NewEnvironment() sqlDB, err := sql.Open("mysql", env.DatabaseURL) if err != nil { ginkgo.Fail(err.Error(), 1) } database := db.NewDatabase(sqlDB, db.Config{DefaultTemplatePath: path.Join(env.RootPath, "templates", "default.json")}) gobbleDB := gobble.NewDatabase(sqlDB) return database, gobbleDB }
func (f *FakeCC) ServeHTTP(w http.ResponseWriter, r *http.Request) { fmt.Fprintf(ginkgo.GinkgoWriter, "[FAKE CC] Handling request: %s\n", r.URL.Path) endpoints := map[string]func(http.ResponseWriter, *http.Request){ "/staging/droplets/.*/upload": f.handleDropletUploadRequest, } for pattern, handler := range endpoints { re := regexp.MustCompile(pattern) matches := re.FindStringSubmatch(r.URL.Path) if matches != nil { handler(w, r) return } } ginkgo.Fail(fmt.Sprintf("[FAKE CC] No matching endpoint handler for %s", r.URL.Path)) }
func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { s.writeLock.Lock() defer s.writeLock.Unlock() if s.calls < len(s.requestHandlers) { s.requestHandlers[s.calls](w, req) } else { if s.AllowUnhandledRequests { ioutil.ReadAll(req.Body) req.Body.Close() w.WriteHeader(s.UnhandledRequestStatusCode) } else { ginkgo.Fail(fmt.Sprintf("Received unhandled request:\n%s", format.Object(req, 1))) } } s.receivedRequests = append(s.receivedRequests, req) s.calls++ }
func (context *ConfiguredContext) Setup() { cf.AsUser(context.AdminUserContext(), func() { definition := quotaDefinition{ Name: context.quotaDefinitionName, TotalServices: "100", TotalRoutes: "1000", MemoryLimit: "10G", NonBasicServicesAllowed: true, } args := []string{ "create-quota", context.quotaDefinitionName, "-m", definition.MemoryLimit, "-r", definition.TotalRoutes, "-s", definition.TotalServices, } if definition.NonBasicServicesAllowed { args = append(args, "--allow-paid-service-plans") } Eventually(cf.Cf(args...), 30).Should(Exit(0)) createUserSession := cf.Cf("create-user", context.regularUserUsername, context.regularUserPassword) select { case <-createUserSession.Out.Detect("OK"): case <-createUserSession.Out.Detect("scim_resource_already_exists"): case <-time.After(30 * time.Second): ginkgo.Fail("Failed to create user") } createUserSession.Out.CancelDetects() Eventually(cf.Cf("create-org", context.organizationName), 30).Should(Exit(0)) Eventually(cf.Cf("set-quota", context.organizationName, definition.Name), 30).Should(Exit(0)) }) }
func (g GomockTestReporter) Fatalf(format string, args ...interface{}) { ginkgo.Fail(fmt.Sprintf(format, args...), 3) }
func (h *TestHandler) logError(msg string, args ...interface{}) { println(fmt.Sprintf(msg, args...)) ginkgo.Fail("failed") }
func testPruneImages(oc *exutil.CLI, schemaVersion int) { var mediaType string switch schemaVersion { case 1: mediaType = schema1.MediaTypeManifest case 2: mediaType = schema2.MediaTypeManifest default: g.Fail(fmt.Sprintf("unexpected schema version %d", schemaVersion)) } oc.SetOutputDir(exutil.TestContext.OutputDir) outSink := g.GinkgoWriter cleanUp := cleanUpContainer{} defer tearDownPruneImagesTest(oc, &cleanUp) dClient, err := testutil.NewDockerClient() o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("build two images using Docker and push them as schema %d", schemaVersion)) imgPruneName, err := BuildAndPushImageOfSizeWithDocker(oc, dClient, "prune", "latest", testImageSize, 2, outSink, true) o.Expect(err).NotTo(o.HaveOccurred()) cleanUp.imageNames = append(cleanUp.imageNames, imgPruneName) pruneSize, err := getRegistryStorageSize(oc) o.Expect(err).NotTo(o.HaveOccurred()) imgKeepName, err := BuildAndPushImageOfSizeWithDocker(oc, dClient, "prune", "latest", testImageSize, 2, outSink, true) o.Expect(err).NotTo(o.HaveOccurred()) cleanUp.imageNames = append(cleanUp.imageNames, imgKeepName) keepSize, err := getRegistryStorageSize(oc) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(pruneSize < keepSize).To(o.BeTrue()) g.By(fmt.Sprintf("ensure uploaded image is of schema %d", schemaVersion)) imgPrune, err := oc.AsAdmin().Client().Images().Get(imgPruneName) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(imgPrune.DockerImageManifestMediaType).To(o.Equal(mediaType)) imgKeep, err := oc.AsAdmin().Client().Images().Get(imgKeepName) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(imgKeep.DockerImageManifestMediaType).To(o.Equal(mediaType)) g.By("prune the first image uploaded (dry-run)") output, err := oc.WithoutNamespace().Run("adm").Args("prune", "images", "--keep-tag-revisions=1", "--keep-younger-than=0").Output() g.By("verify images, layers and configs about to be pruned") o.Expect(output).To(o.ContainSubstring(imgPruneName)) if schemaVersion == 1 { o.Expect(output).NotTo(o.ContainSubstring(imgPrune.DockerImageMetadata.ID)) } else { o.Expect(output).To(o.ContainSubstring(imgPrune.DockerImageMetadata.ID)) } for _, layer := range imgPrune.DockerImageLayers { if !strings.Contains(output, layer.Name) { o.Expect(output).To(o.ContainSubstring(layer.Name)) } } o.Expect(output).NotTo(o.ContainSubstring(imgKeepName)) o.Expect(output).NotTo(o.ContainSubstring(imgKeep.DockerImageMetadata.ID)) for _, layer := range imgKeep.DockerImageLayers { if !strings.Contains(output, layer.Name) { o.Expect(output).NotTo(o.ContainSubstring(layer.Name)) } } noConfirmSize, err := getRegistryStorageSize(oc) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(noConfirmSize).To(o.Equal(keepSize)) g.By("prune the first image uploaded (confirm)") output, err = oc.WithoutNamespace().Run("adm").Args("prune", "images", "--keep-tag-revisions=1", "--keep-younger-than=0", "--confirm").Output() g.By("verify images, layers and configs about to be pruned") o.Expect(output).To(o.ContainSubstring(imgPruneName)) if schemaVersion == 1 { o.Expect(output).NotTo(o.ContainSubstring(imgPrune.DockerImageMetadata.ID)) } else { o.Expect(output).To(o.ContainSubstring(imgPrune.DockerImageMetadata.ID)) } for _, layer := range imgPrune.DockerImageLayers { if !strings.Contains(output, layer.Name) { o.Expect(output).To(o.ContainSubstring(layer.Name)) } } o.Expect(output).NotTo(o.ContainSubstring(imgKeepName)) o.Expect(output).NotTo(o.ContainSubstring(imgKeep.DockerImageMetadata.ID)) for _, layer := range imgKeep.DockerImageLayers { if !strings.Contains(output, layer.Name) { o.Expect(output).NotTo(o.ContainSubstring(layer.Name)) } } confirmSize, err := getRegistryStorageSize(oc) o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("confirming storage size: sizeOfKeepImage=%d <= sizeAfterPrune=%d < beforePruneSize=%d", imgKeep.DockerImageMetadata.Size, confirmSize, keepSize)) o.Expect(confirmSize >= imgKeep.DockerImageMetadata.Size).To(o.BeTrue()) o.Expect(confirmSize < keepSize).To(o.BeTrue()) g.By(fmt.Sprintf("confirming pruned size: sizeOfPruneImage=%d <= (sizeAfterPrune=%d - sizeBeforePrune=%d)", imgPrune, keepSize, confirmSize)) o.Expect(imgPrune.DockerImageMetadata.Size <= keepSize-confirmSize).To(o.BeTrue()) }
package compose_test import ( "github.com/crowley-io/macchiato" "github.com/onsi/ginkgo" "github.com/onsi/gomega" "os" "testing" ) func TestCompose(t *testing.T) { gomega.RegisterFailHandler(ginkgo.Fail) macchiato.RunSpecs(t, "Compose Suite") } var ( pwd string ) var _ = ginkgo.BeforeSuite(func() { p, err := os.Getwd() if err != nil { ginkgo.Fail(err.Error()) } pwd = p })
err error builds [4]string ) g.By("starting multiple builds") for i := range builds { builds[i], err = oc.Run("start-build").Args("sample-build").Output() o.Expect(err).NotTo(o.HaveOccurred()) } g.By("deleting the buildconfig") err = oc.Run("delete").Args("bc/sample-build").Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for builds to clear") err = wait.Poll(3*time.Second, 3*time.Minute, func() (bool, error) { out, err := oc.Run("get").Args("-o", "name", "builds").Output() o.Expect(err).NotTo(o.HaveOccurred()) if len(out) == 0 { return true, nil } return false, nil }) if err == wait.ErrWaitTimeout { g.Fail("timed out waiting for builds to clear") } }) }) })