func (t *testRunner) runParallelGinkgoSuite(suite testSuite) bool { completions := make(chan bool) for cpu := 0; cpu < t.numCPU; cpu++ { config.GinkgoConfig.ParallelNode = cpu + 1 config.GinkgoConfig.ParallelTotal = t.numCPU args := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig) args = append(args, t.commonArgs(suite)...) buffer := new(bytes.Buffer) t.reports = append(t.reports, buffer) go t.runCommand(suite.path, args, nil, buffer, completions) } passed := true for cpu := 0; cpu < t.numCPU; cpu++ { passed = <-completions && passed } for _, report := range t.reports { fmt.Print(report.String()) } os.Stdout.Sync() return passed }
func (t *TestRunner) runAndStreamParallelGinkgoSuite() bool { completions := make(chan bool) writers := make([]*logWriter, t.numCPU) for cpu := 0; cpu < t.numCPU; cpu++ { config.GinkgoConfig.ParallelNode = cpu + 1 config.GinkgoConfig.ParallelTotal = t.numCPU ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig) writers[cpu] = newLogWriter(fmt.Sprintf("[%d]", cpu+1)) go t.run(ginkgoArgs, nil, writers[cpu], completions) } passed := true for cpu := 0; cpu < t.numCPU; cpu++ { passed = <-completions && passed } for _, writer := range writers { writer.Close() } os.Stdout.Sync() return passed }
func runSuite(suite suite) bool { completions := make(chan bool) if runMagicI { runGoI(suite.path, race) } if suite.isGinkgo { for cpu := 0; cpu < numCPU; cpu++ { config.GinkgoConfig.ParallelNode = cpu + 1 config.GinkgoConfig.ParallelTotal = numCPU args := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig) if race { args = append([]string{"--race"}, args...) } if cover { args = append([]string{"--cover", "--coverprofile=" + suite.packageName + ".coverprofile"}) } var writer io.Writer if numCPU > 1 { buffer := new(bytes.Buffer) reports = append(reports, buffer) writer = buffer } else { writer = os.Stdout } go runCommand(suite.path, args, writer, completions) } passed := true for cpu := 0; cpu < numCPU; cpu++ { passed = <-completions && passed } if numCPU > 1 { printToScreen() } return passed } else { args := []string{} if race { args = append(args, "--race") } if cover { args = append([]string{"--cover", "--coverprofile=" + suite.packageName + ".out"}) } go runCommand(suite.path, args, os.Stdout, completions) return <-completions } }
func (t *TestRunner) runAndStreamParallelGinkgoSuite() RunResult { completions := make(chan RunResult) writers := make([]*logWriter, t.numCPU) server, err := remote.NewServer(t.numCPU) if err != nil { panic("Failed to start parallel spec server") } server.Start() defer server.Close() for cpu := 0; cpu < t.numCPU; cpu++ { config.GinkgoConfig.ParallelNode = cpu + 1 config.GinkgoConfig.ParallelTotal = t.numCPU config.GinkgoConfig.SyncHost = server.Address() ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig) writers[cpu] = newLogWriter(os.Stdout, cpu+1) cmd := t.cmd(ginkgoArgs, writers[cpu], cpu+1) server.RegisterAlive(cpu+1, func() bool { if cmd.ProcessState == nil { return true } return !cmd.ProcessState.Exited() }) go t.run(cmd, completions) } res := PassingRunResult() for cpu := 0; cpu < t.numCPU; cpu++ { res = res.Merge(<-completions) } for _, writer := range writers { writer.Close() } os.Stdout.Sync() if t.cover || t.coverPkg != "" { t.combineCoverprofiles() } return res }
func (t *TestRunner) runParallelGinkgoSuite() RunResult { result := make(chan bool) completions := make(chan RunResult) writers := make([]*logWriter, t.numCPU) reports := make([]*bytes.Buffer, t.numCPU) stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor) aggregator := remote.NewAggregator(t.numCPU, result, config.DefaultReporterConfig, stenographer) server, err := remote.NewServer(t.numCPU) if err != nil { panic("Failed to start parallel spec server") } server.RegisterReporters(aggregator) server.Start() defer server.Close() for cpu := 0; cpu < t.numCPU; cpu++ { config.GinkgoConfig.ParallelNode = cpu + 1 config.GinkgoConfig.ParallelTotal = t.numCPU config.GinkgoConfig.SyncHost = server.Address() config.GinkgoConfig.StreamHost = server.Address() ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig) reports[cpu] = &bytes.Buffer{} writers[cpu] = newLogWriter(reports[cpu], cpu+1) cmd := t.cmd(ginkgoArgs, writers[cpu], cpu+1) server.RegisterAlive(cpu+1, func() bool { if cmd.ProcessState == nil { return true } return !cmd.ProcessState.Exited() }) go t.run(cmd, completions) } res := PassingRunResult() for cpu := 0; cpu < t.numCPU; cpu++ { res = res.Merge(<-completions) } //all test processes are done, at this point //we should be able to wait for the aggregator to tell us that it's done select { case <-result: fmt.Println("") case <-time.After(time.Second): //the aggregator never got back to us! something must have gone wrong fmt.Println(` ------------------------------------------------------------------- | | | Ginkgo timed out waiting for all parallel nodes to report back! | | | ------------------------------------------------------------------- `) os.Stdout.Sync() for _, writer := range writers { writer.Close() } for _, report := range reports { fmt.Print(report.String()) } os.Stdout.Sync() } if t.cover || t.coverPkg != "" { t.combineCoverprofiles() } return res }
func (t *TestRunner) runSerialGinkgoSuite() RunResult { ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig) return t.run(t.cmd(ginkgoArgs, os.Stdout, 1), nil) }
func (t *testRunner) runSerialGinkgoSuite(suite testSuite) bool { args := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig) args = append(args, t.commonArgs(suite)...) return t.runCommand(suite.path, args, nil, os.Stdout, nil) }
func (t *testRunner) runAndStreamParallelGinkgoSuite(suite testSuite) bool { result := make(chan bool, 0) stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor) aggregator := aggregator.NewAggregator(t.numCPU, result, config.DefaultReporterConfig, stenographer) server, err := remote.NewServer() if err != nil { panic("Failed to start parallel spec server") } server.RegisterReporters(aggregator) server.Start() serverAddress := server.Address() completions := make(chan bool) for cpu := 0; cpu < t.numCPU; cpu++ { config.GinkgoConfig.ParallelNode = cpu + 1 config.GinkgoConfig.ParallelTotal = t.numCPU args := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig) args = append(args, t.commonArgs(suite)...) env := os.Environ() env = append(env, fmt.Sprintf("GINKGO_REMOTE_REPORTING_SERVER=%s", serverAddress)) buffer := new(bytes.Buffer) t.reports = append(t.reports, buffer) go t.runCommand(suite.path, args, env, buffer, completions) } for cpu := 0; cpu < t.numCPU; cpu++ { <-completions } //all test processes are done, at this point //we should be able to wait for the aggregator to tell us that it's done var passed = false select { case passed = <-result: //the aggregator is done and can tell us whether or not the suite passed case <-time.After(time.Second): //the aggregator never got back to us! something must have gone wrong fmt.Println("") fmt.Println("") fmt.Println(" ----------------------------------------------------------- ") fmt.Println(" | |") fmt.Println(" | Ginkgo timed out waiting for all parallel nodes to end! |") fmt.Println(" | Here is some salvaged output: |") fmt.Println(" | |") fmt.Println(" ----------------------------------------------------------- ") fmt.Println("") fmt.Println("") os.Stdout.Sync() time.Sleep(time.Second) for _, report := range t.reports { fmt.Print(report.String()) } os.Stdout.Sync() } server.Stop() return passed }
func (t *TestRunner) runSerialGinkgoSuite() bool { ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig) return t.run(ginkgoArgs, nil, os.Stdout, nil) }
func (t *testRunner) runSerialGinkgoSuite(suite *testsuite.TestSuite) bool { ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig) return t.runCompiledSuite(suite, ginkgoArgs, nil, os.Stdout, nil) }