// Builds a single target func buildTarget(tid int, state *core.BuildState, target *core.BuildTarget) (err error) { defer func() { if r := recover(); r != nil { if e, ok := r.(error); ok { err = e } else { err = fmt.Errorf("%s", r) } } }() if err := target.CheckDependencyVisibility(state.Graph); err != nil { return err } // We can't do this check until build time, until then we don't know what all the outputs // will be (eg. for filegroups that collect outputs of other rules). if err := target.CheckDuplicateOutputs(); err != nil { return err } // This must run before we can leave this function successfully by any path. if target.PreBuildFunction != 0 { log.Debug("Running pre-build function for %s", target.Label) if err := parse.RunPreBuildFunction(tid, state, target); err != nil { return err } log.Debug("Finished pre-build function for %s", target.Label) } state.LogBuildResult(tid, target.Label, core.TargetBuilding, "Preparing...") var postBuildOutput string if state.PrepareOnly && state.IsOriginalTarget(target.Label) { if target.IsFilegroup() { return fmt.Errorf("Filegroup targets don't have temporary directories") } if err := prepareDirectories(target); err != nil { return err } if err := prepareSources(state.Graph, target); err != nil { return err } return stopTarget } if !needsBuilding(state, target, false) { log.Debug("Not rebuilding %s, nothing's changed", target.Label) postBuildOutput = runPostBuildFunctionIfNeeded(tid, state, target) // If a post-build function ran it may modify the rule definition. In that case we // need to check again whether the rule needs building. if target.PostBuildFunction == 0 || !needsBuilding(state, target, true) { target.SetState(core.Reused) state.LogBuildResult(tid, target.Label, core.TargetCached, "Unchanged") return nil // Nothing needs to be done. } else { log.Debug("Rebuilding %s after post-build function", target.Label) } } if target.IsFilegroup() { log.Debug("Building %s...", target.Label) return buildFilegroup(tid, state, target) } oldOutputHash, outputHashErr := OutputHash(target) if err := prepareDirectories(target); err != nil { return fmt.Errorf("Error preparing directories for %s: %s", target.Label, err) } retrieveArtifacts := func() bool { state.LogBuildResult(tid, target.Label, core.TargetBuilding, "Checking cache...") if _, retrieved := retrieveFromCache(state, target); retrieved { log.Debug("Retrieved artifacts for %s from cache", target.Label) checkLicences(state, target) newOutputHash, err := calculateAndCheckRuleHash(state, target) if err != nil { // Most likely hash verification failure log.Warning("Error retrieving cached artifacts for %s: %s", target.Label, err) RemoveOutputs(target) return false } else if outputHashErr != nil || !bytes.Equal(oldOutputHash, newOutputHash) { target.SetState(core.Cached) state.LogBuildResult(tid, target.Label, core.TargetCached, "Cached") } else { target.SetState(core.Unchanged) state.LogBuildResult(tid, target.Label, core.TargetCached, "Cached (unchanged)") } return true // got from cache } return false } cacheKey := mustShortTargetHash(state, target) if state.Cache != nil { // Note that ordering here is quite sensitive since the post-build function can modify // what we would retrieve from the cache. if target.PostBuildFunction != 0 { log.Debug("Checking for post-build output file for %s in cache...", target.Label) if (*state.Cache).RetrieveExtra(target, cacheKey, core.PostBuildOutputFileName(target)) { postBuildOutput = runPostBuildFunctionIfNeeded(tid, state, target) if retrieveArtifacts() { return nil } } } else if retrieveArtifacts() { return nil } } if err := prepareSources(state.Graph, target); err != nil { return fmt.Errorf("Error preparing sources for %s: %s", target.Label, err) } state.LogBuildResult(tid, target.Label, core.TargetBuilding, target.BuildingDescription) replacedCmd := replaceSequences(target) env := core.StampedBuildEnvironment(state, target, false, cacheKey) log.Debug("Building target %s\nENVIRONMENT:\n%s\n%s", target.Label, strings.Join(env, "\n"), replacedCmd) out, combined, err := core.ExecWithTimeoutShell(target.TmpDir(), env, target.BuildTimeout, state.Config.Build.Timeout, state.ShowAllOutput, replacedCmd) if err != nil { if state.Verbosity >= 4 { return fmt.Errorf("Error building target %s: %s\nENVIRONMENT:\n%s\n%s\n%s", target.Label, err, strings.Join(env, "\n"), target.GetCommand(), combined) } return fmt.Errorf("Error building target %s: %s\n%s", target.Label, err, combined) } if target.PostBuildFunction != 0 { out = bytes.TrimSpace(out) sout := string(out) if postBuildOutput != "" { // We've already run the post-build function once, it's not safe to do it again (e.g. if adding new // targets, it will likely fail). Theoretically it should get the same output this time and hence would // do the same thing, since it had all the same inputs. // Obviously we can't be 100% sure that will be the case, so issue a warning if not... if postBuildOutput != sout { log.Warning("The build output for %s differs from what we got back from the cache earlier.\n"+ "This implies your target's output is nondeterministic; Please won't re-run the\n"+ "post-build function, which will *probably* be okay, but Please can't be sure.\n"+ "See https://github.com/thought-machine/please/issues/113 for more information.", target.Label) log.Debug("Cached build output for %s: %s\n\nNew build output: %s", target.Label, postBuildOutput, sout) } } else if err := parse.RunPostBuildFunction(tid, state, target, sout); err != nil { return err } storePostBuildOutput(state, target, out) } checkLicences(state, target) state.LogBuildResult(tid, target.Label, core.TargetBuilding, "Collecting outputs...") extraOuts, outputsChanged, err := moveOutputs(state, target) if err != nil { return fmt.Errorf("Error moving outputs for target %s: %s", target.Label, err) } if _, err = calculateAndCheckRuleHash(state, target); err != nil { return err } if outputsChanged { target.SetState(core.Built) } else { target.SetState(core.Unchanged) } if state.Cache != nil { state.LogBuildResult(tid, target.Label, core.TargetBuilding, "Storing...") newCacheKey := mustShortTargetHash(state, target) (*state.Cache).Store(target, newCacheKey) if target.PostBuildFunction != 0 { // NB. Important this is stored with the earlier hash - if we calculate the hash // now, it might be different, and we could of course never retrieve it again. (*state.Cache).StoreExtra(target, cacheKey, core.PostBuildOutputFileName(target)) } for _, out := range extraOuts { (*state.Cache).StoreExtra(target, newCacheKey, out) } } // Clean up the temporary directory once it's done. if state.CleanWorkdirs { if err := os.RemoveAll(target.TmpDir()); err != nil { log.Warning("Failed to remove temporary directory for %s: %s", target.Label, err) } } if outputsChanged { state.LogBuildResult(tid, target.Label, core.TargetBuilt, "Built") } else { state.LogBuildResult(tid, target.Label, core.TargetBuilt, "Built (unchanged)") } return nil }
func addTarget(state *core.BuildState, i int) *core.BuildTarget { // Create and add a new target, with a parent and a dependency. target := core.NewBuildTarget(label(i)) target.Command = "__FILEGROUP__" // Will mean it doesn't have to shell out to anything. target.SetState(core.Active) state.Graph.AddTarget(target) if i <= size { if i > 10 { target.Flakiness = i // Stash this here, will be useful later. target.PostBuildFunction = reflect.ValueOf(&postBuildFunc).Pointer() } if i < size/10 { for j := 0; j < 10; j++ { dep := label(i*10 + j) log.Info("Adding dependency %s -> %s", target.Label, dep) target.AddDependency(dep) state.Graph.AddDependency(target.Label, dep) } } else { // These are buildable now state.AddPendingBuild(target.Label, false) } } state.AddActiveTarget() return target }
// prepareAndRunTest sets up a test directory and runs the test. func prepareAndRunTest(tid int, state *core.BuildState, target *core.BuildTarget) (out []byte, err error) { if err = prepareTestDir(state.Graph, target); err != nil { state.LogBuildError(tid, target.Label, core.TargetTestFailed, err, "Failed to prepare test directory for %s: %s", target.Label, err) return []byte{}, err } return runPossiblyContainerisedTest(state, target) }
func Test(tid int, state *core.BuildState, label core.BuildLabel) { state.LogBuildResult(tid, label, core.TargetTesting, "Testing...") startTime := time.Now() target := state.Graph.TargetOrDie(label) test(tid, state, label, target) metrics.Record(target, time.Since(startTime)) }
// setWindowTitle sets the title of the current shell window based on the current build state. func setWindowTitle(state *core.BuildState) { if state == nil { SetWindowTitle("plz: finishing up") } else { SetWindowTitle(fmt.Sprintf("plz: %d / %d tasks, %3.1fs", state.NumDone(), state.NumActive(), time.Since(startTime).Seconds())) } }
func printLines(state *core.BuildState, buildingTargets []buildingTarget, maxLines, cols int) { now := time.Now() printf("Building [%d/%d, %3.1fs]:\n", state.NumDone(), state.NumActive(), time.Since(startTime).Seconds()) for i := 0; i < len(buildingTargets) && i < maxLines; i++ { buildingTargets[i].Lock() // Take a local copy of the structure, which isn't *that* big, so we don't need to retain the lock // while we do potentially blocking things like printing. target := buildingTargets[i].buildingTargetData buildingTargets[i].Unlock() label := target.Label.Parent() if target.Active { lprintf(cols, "${BOLD_WHITE}=> [%4.1fs] ${RESET}%s%s ${BOLD_WHITE}%s${ERASE_AFTER}\n", now.Sub(target.Started).Seconds(), target.Colour, label, target.Description) } else if time.Since(target.Finished).Seconds() < 0.5 { // Only display finished targets for half a second after they're done. duration := target.Finished.Sub(target.Started).Seconds() if target.Failed { lprintf(cols, "${BOLD_RED}=> [%4.1fs] ${RESET}%s%s ${BOLD_RED}Failed${ERASE_AFTER}\n", duration, target.Colour, label) } else if target.Cached { lprintf(cols, "${BOLD_WHITE}=> [%4.1fs] ${RESET}%s%s ${BOLD_GREY}%s${ERASE_AFTER}\n", duration, target.Colour, label, target.Description) } else { lprintf(cols, "${BOLD_WHITE}=> [%4.1fs] ${RESET}%s%s ${WHITE}%s${ERASE_AFTER}\n", duration, target.Colour, label, target.Description) } } else { printf("${BOLD_GREY}=|${ERASE_AFTER}\n") } } printf("${RESET}") }
func printBuildResults(state *core.BuildState, duration float64) { // Count incrementality. totalBuilt := 0 totalReused := 0 for _, target := range state.Graph.AllTargets() { if target.State() == core.Built { totalBuilt++ } else if target.State() == core.Reused { totalReused++ } } incrementality := 100.0 * float64(totalReused) / float64(totalBuilt+totalReused) if totalBuilt+totalReused == 0 { incrementality = 100 // avoid NaN } // Print this stuff so we always see it. printf("Build finished; total time %0.2fs, incrementality %.1f%%. Outputs:\n", duration, incrementality) for _, label := range state.ExpandOriginalTargets() { target := state.Graph.TargetOrDie(label) fmt.Printf("%s:\n", label) for _, result := range buildResult(target) { fmt.Printf(" %s\n", result) } } }
func findOriginalTask(state *core.BuildState, target core.BuildLabel) { if target.IsAllSubpackages() { for pkg := range utils.FindAllSubpackages(state.Config, target.PackageName, "") { state.AddOriginalTarget(core.NewBuildLabel(pkg, "all")) } } else { state.AddOriginalTarget(target) } }
func MonitorState(state *core.BuildState, numThreads int, plainOutput, keepGoing, shouldBuild, shouldTest, shouldRun bool, traceFile string) bool { failedTargetMap := map[core.BuildLabel]error{} buildingTargets := make([]buildingTarget, numThreads, numThreads) displayDone := make(chan interface{}) stop := make(chan interface{}) if !plainOutput { go display(state, &buildingTargets, stop, displayDone) } aggregatedResults := core.TestResults{} failedTargets := []core.BuildLabel{} failedNonTests := []core.BuildLabel{} for result := range state.Results { processResult(state, result, buildingTargets, &aggregatedResults, plainOutput, keepGoing, &failedTargets, &failedNonTests, failedTargetMap, traceFile != "") } if !plainOutput { stop <- struct{}{} <-displayDone } if traceFile != "" { writeTrace(traceFile) } duration := time.Since(startTime).Seconds() if len(failedNonTests) > 0 { // Something failed in the build step. if state.Verbosity > 0 { printFailedBuildResults(failedNonTests, failedTargetMap, duration) } // Die immediately and unsuccessfully, this avoids awkward interactions with // --failing_tests_ok later on. os.Exit(-1) } // Check all the targets we wanted to build actually have been built. for _, label := range state.ExpandOriginalTargets() { if target := state.Graph.Target(label); target == nil { log.Fatalf("Target %s doesn't exist in build graph", label) } else if (state.NeedHashesOnly || state.PrepareOnly) && target.State() == core.Stopped { // Do nothing, we will output about this shortly. } else if shouldBuild && target != nil && target.State() < core.Built && len(failedTargetMap) == 0 { cycle := graphCycleMessage(state.Graph, target) log.Fatalf("Target %s hasn't built but we have no pending tasks left.\n%s", label, cycle) } } if state.Verbosity > 0 && shouldBuild { if shouldTest { // Got to the test phase, report their results. printTestResults(state, aggregatedResults, failedTargets, duration) } else if state.NeedHashesOnly { printHashes(state, duration) } else if state.PrepareOnly { printTempDirs(state, duration) } else if !shouldRun { // Must be plz build or similar, report build outputs. printBuildResults(state, duration) } } return len(failedTargetMap) == 0 }
func printTempDirs(state *core.BuildState, duration float64) { fmt.Printf("Temp directories prepared, total time %0.2fs:\n", duration) for _, label := range state.ExpandOriginalTargets() { target := state.Graph.TargetOrDie(label) cmd := build.ReplaceSequences(target, target.GetCommand()) env := core.BuildEnvironment(state, target, false) fmt.Printf(" %s: %s\n", label, target.TmpDir()) fmt.Printf(" Command: %s\n", cmd) fmt.Printf(" Expanded: %s\n", os.Expand(cmd, core.ReplaceEnvironment(env))) } }
func printHashes(state *core.BuildState, duration float64) { fmt.Printf("Hashes calculated, total time %0.2fs:\n", duration) for _, label := range state.ExpandOriginalTargets() { hash, err := build.OutputHash(state.Graph.TargetOrDie(label)) if err != nil { fmt.Printf(" %s: cannot calculate: %s\n", label, err) } else { fmt.Printf(" %s: %s\n", label, hex.EncodeToString(hash)) } } }
// findOriginalTasks finds the original parse tasks for the original set of targets. func findOriginalTasks(state *core.BuildState, targets []core.BuildLabel) { for _, target := range targets { if target == core.BuildLabelStdin { for label := range utils.ReadStdin() { findOriginalTask(state, core.ParseBuildLabels([]string{label})[0]) } } else { findOriginalTask(state, target) } } state.TaskDone() // initial target adding counts as one. }
func logTestSuccess(state *core.BuildState, tid int, label core.BuildLabel, results core.TestResults, coverage core.TestCoverage) { var description string tests := pluralise("test", results.NumTests) if results.Skipped != 0 || results.ExpectedFailures != 0 { failures := pluralise("failure", results.ExpectedFailures) description = fmt.Sprintf("%d %s passed. %d skipped, %d expected %s", results.NumTests, tests, results.Skipped, results.ExpectedFailures, failures) } else { description = fmt.Sprintf("%d %s passed.", results.NumTests, tests) } state.LogTestResult(tid, label, core.TargetTested, results, coverage, nil, description) }
func Build(tid int, state *core.BuildState, label core.BuildLabel) { start := time.Now() target := state.Graph.TargetOrDie(label) target.SetState(core.Building) if err := buildTarget(tid, state, target); err != nil { if err == stopTarget { target.SetState(core.Stopped) state.LogBuildResult(tid, target.Label, core.TargetBuildStopped, "Build stopped") return } state.LogBuildError(tid, label, core.TargetBuildFailed, err, "Build failed: %s", err) if err := RemoveOutputs(target); err != nil { log.Errorf("Failed to remove outputs for %s: %s", target.Label, err) } target.SetState(core.Failed) return } metrics.Record(target, time.Since(start)) // Add any of the reverse deps that are now fully built to the queue. for _, reverseDep := range state.Graph.ReverseDependencies(target) { if reverseDep.State() == core.Active && state.Graph.AllDepsBuilt(reverseDep) && reverseDep.SyncUpdateState(core.Active, core.Pending) { state.AddPendingBuild(reverseDep.Label, false) } } if target.IsTest && state.NeedTests { state.AddPendingTest(target.Label) } parse.UndeferAnyParses(state, target) }
// please mimics the core build 'loop' from src/please.go. func please(tid int, state *core.BuildState) { for { label, _, t := state.NextTask() switch t { case core.Stop, core.Kill: return case core.Build: Build(tid, state, label) default: panic(fmt.Sprintf("unexpected task type: %d", t)) } state.TaskDone() } }
func updateTarget(state *core.BuildState, plainOutput bool, buildingTarget *buildingTarget, label core.BuildLabel, active bool, failed bool, cached bool, description string, err error, colour string) { updateTarget2(buildingTarget, label, active, failed, cached, description, err, colour) if plainOutput { if failed { log.Errorf("%s: %s", label.String(), description) } else { if !active { active := pluralise(state.NumActive(), "task", "tasks") log.Notice("[%d/%s] %s: %s [%3.1fs]", state.NumDone(), active, label.String(), description, time.Now().Sub(buildingTarget.Started).Seconds()) } else { log.Info("%s: %s", label.String(), description) } } } }
func please(tid int, state *core.BuildState, parsePackageOnly bool, include, exclude []string) { for { label, dependor, t := state.NextTask() switch t { case core.Stop, core.Kill: return case core.Parse, core.SubincludeParse: parse.Parse(tid, state, label, dependor, parsePackageOnly, include, exclude) case core.Build, core.SubincludeBuild: build.Build(tid, state, label) case core.Test: test.Test(tid, state, label) } state.TaskDone() } }
// Return true if the rule needs building, false if the existing outputs are OK. func needsBuilding(state *core.BuildState, target *core.BuildTarget, postBuild bool) bool { // Check the dependencies first, because they don't need any disk I/O. if target.NeedsTransitiveDependencies { if anyDependencyHasChanged(target) { return true // one of the transitive deps has changed, need to rebuild } } else { for _, dep := range target.Dependencies() { if dep.State() < core.Unchanged { log.Debug("Need to rebuild %s, %s has changed", target.Label, dep.Label) return true // dependency has just been rebuilt, do this too. } } } oldRuleHash, oldConfigHash, oldSourceHash := readRuleHashFile(ruleHashFileName(target), postBuild) if !bytes.Equal(oldConfigHash, state.Hashes.Config) { if len(oldConfigHash) == 0 { // Small nicety to make it a bit clearer what's going on. log.Debug("Need to build %s, outputs aren't there", target.Label) } else { log.Debug("Need to rebuild %s, config has changed (was %s, need %s)", target.Label, b64(oldConfigHash), b64(state.Hashes.Config)) } return true } newRuleHash := RuleHash(target, false, postBuild) if !bytes.Equal(oldRuleHash, newRuleHash) { log.Debug("Need to rebuild %s, rule has changed (was %s, need %s)", target.Label, b64(oldRuleHash), b64(newRuleHash)) return true } newSourceHash, err := sourceHash(state.Graph, target) if err != nil || !bytes.Equal(oldSourceHash, newSourceHash) { log.Debug("Need to rebuild %s, sources have changed (was %s, need %s)", target.Label, b64(oldSourceHash), b64(newSourceHash)) return true } // Check the outputs of this rule exist. This would only happen if the user had // removed them but it's incredibly aggravating if you remove an output and the // rule won't rebuild itself. for _, output := range target.Outputs() { realOutput := path.Join(target.OutDir(), output) if !core.PathExists(realOutput) { log.Debug("Output %s doesn't exist for rule %s; will rebuild.", realOutput, target.Label) return true } } // Maybe we've forced a rebuild. Do this last; might be interesting to see if it needed building anyway. return state.ForceRebuild && (state.IsOriginalTarget(target.Label) || state.IsOriginalTarget(target.Label.Parent())) }
// UndeferAnyParses un-defers the parsing of a package if it depended on some subinclude target being built. func UndeferAnyParses(state *core.BuildState, target *core.BuildTarget) { pendingTargetMutex.Lock() defer pendingTargetMutex.Unlock() if m, present := deferredParses[target.Label.PackageName]; present { if s, present := m[target.Label.Name]; present { for _, deferredPackageName := range s { log.Debug("Undeferring parse of %s", deferredPackageName) state.AddPendingParse( core.BuildLabel{PackageName: deferredPackageName, Name: getDependingTarget(deferredPackageName)}, core.BuildLabel{PackageName: deferredPackageName, Name: "_UNDEFER_"}, false, ) } delete(m, target.Label.Name) // Don't need this any more } } }
// Adds empty coverage entries for any files covered by the original query that we // haven't discovered through tests to the overall report. // The coverage reports only contain information about files that were covered during // tests, so it's important that we identify anything with zero coverage here. // This is made trickier by attempting to reconcile coverage targets from languages like // Java that don't preserve the original file structure, which requires a slightly fuzzy match. func AddOriginalTargetsToCoverage(state *core.BuildState, includeAllFiles bool) { // First we collect all the source files from all relevant targets allFiles := map[string]bool{} doneTargets := map[*core.BuildTarget]bool{} // Track the set of packages the user ran tests from; we only show coverage metrics from them. coveragePackages := map[string]bool{} for _, label := range state.OriginalTargets { coveragePackages[label.PackageName] = true } for _, label := range state.ExpandOriginalTargets() { collectAllFiles(state, state.Graph.TargetOrDie(label), coveragePackages, allFiles, doneTargets, includeAllFiles) } // Now merge the recorded coverage so far into them recordedCoverage := state.Coverage state.Coverage = core.TestCoverage{Tests: recordedCoverage.Tests, Files: map[string][]core.LineCoverage{}} mergeCoverage(state, recordedCoverage, coveragePackages, allFiles, includeAllFiles) }
func assertPendingBuilds(t *testing.T, state *core.BuildState, targets ...string) { state.Stop(1) pending := []core.BuildLabel{} for { label, _, typ := state.NextTask() if typ == core.Stop { break } else if typ != core.Build && typ != core.SubincludeBuild { t.Errorf("Unexpected non-build task") } else { pending = append(pending, label) } } expected := []core.BuildLabel{} for _, target := range targets { expected = append(expected, core.ParseBuildLabel(target, "")) } assert.Equal(t, expected, pending) }
// calculateAndCheckRuleHash checks the output hash for a rule. func calculateAndCheckRuleHash(state *core.BuildState, target *core.BuildTarget) ([]byte, error) { hash, err := OutputHash(target) if err != nil { return nil, err } if err = checkRuleHashes(target, hash); err != nil { if state.NeedHashesOnly && (state.IsOriginalTarget(target.Label) || state.IsOriginalTarget(target.Label.Parent())) { return nil, stopTarget } else if state.VerifyHashes { return nil, err } else { log.Warning("%s", err) } } if err := writeRuleHashFile(state, target); err != nil { return nil, fmt.Errorf("Attempting to create hash file: %s", err) } return hash, nil }
func processResult(state *core.BuildState, result *core.BuildResult, buildingTargets []buildingTarget, aggregatedResults *core.TestResults, plainOutput bool, keepGoing bool, failedTargets, failedNonTests *[]core.BuildLabel, failedTargetMap map[core.BuildLabel]error, shouldTrace bool) { label := result.Label active := result.Status == core.PackageParsing || result.Status == core.TargetBuilding || result.Status == core.TargetTesting failed := result.Status == core.ParseFailed || result.Status == core.TargetBuildFailed || result.Status == core.TargetTestFailed cached := result.Status == core.TargetCached || result.Tests.Cached stopped := result.Status == core.TargetBuildStopped if shouldTrace { addTrace(result, buildingTargets[result.ThreadId].Label, active) } if failed && result.Tests.NumTests == 0 && result.Tests.Failed == 0 { result.Tests.NumTests = 1 result.Tests.Failed = 1 // Ensure there's one test failure when there're no results to parse. } // Only aggregate test results the first time it finishes. if buildingTargets[result.ThreadId].Active && !active { aggregatedResults.Aggregate(result.Tests) } target := state.Graph.Target(label) updateTarget(state, plainOutput, &buildingTargets[result.ThreadId], label, active, failed, cached, result.Description, result.Err, targetColour(target)) if failed { failedTargetMap[label] = result.Err // Don't stop here after test failure, aggregate them for later. if !keepGoing && result.Status != core.TargetTestFailed { // Reset colour so the entire compiler error output doesn't appear red. log.Errorf("%s failed:${RESET}\n%s", result.Label, result.Err) state.KillAll() } else if !plainOutput { // plain output will have already logged this log.Errorf("%s failed: %s", result.Label, result.Err) } *failedTargets = append(*failedTargets, label) if result.Status != core.TargetTestFailed { *failedNonTests = append(*failedNonTests, label) } } else if stopped { failedTargetMap[result.Label] = nil } else if plainOutput && state.ShowTestOutput && result.Status == core.TargetTested { // If using interactive output we'll print it afterwards. printf("Finished test %s:\n%s\n", label, target.Results.Output) } }
// Adds a single target to the build queue. func addDep(state *core.BuildState, label, dependor core.BuildLabel, rescan, forceBuild bool) { // Stop at any package that's not loaded yet if state.Graph.Package(label.PackageName) == nil { state.AddPendingParse(label, dependor, false) return } target := state.Graph.Target(label) if target == nil { log.Fatalf("Target %s (referenced by %s) doesn't exist\n", label, dependor) } if target.State() >= core.Active && !rescan && !forceBuild { return // Target is already tagged to be built and likely on the queue. } // Only do this bit if we actually need to build the target if !target.SyncUpdateState(core.Inactive, core.Semiactive) && !rescan && !forceBuild { return } if state.NeedBuild || forceBuild { if target.SyncUpdateState(core.Semiactive, core.Active) { state.AddActiveTarget() if target.IsTest && state.NeedTests { state.AddActiveTarget() // Tests count twice if we're gonna run them. } } } // If this target has no deps, add it to the queue now, otherwise handle its deps. // Only add if we need to build targets (not if we're just parsing) but we might need it to parse... if target.State() == core.Active && state.Graph.AllDepsBuilt(target) { if target.SyncUpdateState(core.Active, core.Pending) { state.AddPendingBuild(label, dependor.IsAllTargets()) } if !rescan { return } } for _, dep := range target.DeclaredDependencies() { // Check the require/provide stuff; we may need to add a different target. if len(target.Requires) > 0 { if depTarget := state.Graph.Target(dep); depTarget != nil && len(depTarget.Provides) > 0 { for _, provided := range depTarget.ProvideFor(target) { addDep(state, provided, label, false, forceBuild) } continue } } addDep(state, dep, label, false, forceBuild) } }
// buildFilegroup runs the manual build steps for a filegroup rule. // We don't force this to be done in bash to avoid errors with maximum command lengths, // and it's actually quite fiddly to get just so there. func buildFilegroup(tid int, state *core.BuildState, target *core.BuildTarget) error { if err := prepareDirectory(target.OutDir(), false); err != nil { return err } if err := os.RemoveAll(ruleHashFileName(target)); err != nil { return err } changed := false outDir := target.OutDir() for _, source := range target.Sources { fullPaths := source.FullPaths(state.Graph) for i, sourcePath := range source.LocalPaths(state.Graph) { outPath := path.Join(outDir, sourcePath) c, err := moveOutput(target, fullPaths[i], outPath, true) if err != nil { return err } changed = changed || c } } if target.HasLabel("py") && !target.IsBinary { // Pre-emptively create __init__.py files so the outputs can be loaded dynamically. // It's a bit cheeky to do non-essential language-specific logic but this enables // a lot of relatively normal Python workflows. // Errors are deliberately ignored. createInitPy(outDir) } if _, err := calculateAndCheckRuleHash(state, target); err != nil { return err } else if changed { target.SetState(core.Built) } else { target.SetState(core.Unchanged) } state.LogBuildResult(tid, target.Label, core.TargetBuilt, "Built") return nil }
// Parses the package corresponding to a single build label. The label can be :all to add all targets in a package. // It is not an error if the package has already been parsed. // // By default, after the package is parsed, any targets that are now needed for the build and ready // to be built are queued, and any new packages are queued for parsing. When a specific label is requested // this is straightforward, but when parsing for pseudo-targets like :all and ..., various flags affect it: // If 'noDeps' is true, then no new packages will be added and no new targets queued. // 'include' and 'exclude' refer to the labels of targets to be added. If 'include' is non-empty then only // targets with at least one matching label are added. Any targets with a label in 'exclude' are not added. func Parse(tid int, state *core.BuildState, label, dependor core.BuildLabel, noDeps bool, include, exclude []string) { defer func() { if r := recover(); r != nil { state.LogBuildError(tid, label, core.ParseFailed, fmt.Errorf("%s", r), "Failed to parse package") } }() // First see if this package already exists; once it's in the graph it will have been parsed. pkg := state.Graph.Package(label.PackageName) if pkg != nil { // Does exist, all we need to do is toggle on this target activateTarget(state, pkg, label, dependor, noDeps, include, exclude) return } // We use the name here to signal undeferring of a package. If we get that we need to retry the package regardless. if dependor.Name != "_UNDEFER_" && !firstToParse(label, dependor) { // Check this again to avoid a potential race if pkg = state.Graph.Package(label.PackageName); pkg != nil { activateTarget(state, pkg, label, dependor, noDeps, include, exclude) } else { log.Debug("Adding pending parse for %s", label) } return } // If we get here then it falls to us to parse this package state.LogBuildResult(tid, label, core.PackageParsing, "Parsing...") pkg = parsePackage(state, label, dependor) if pkg == nil { state.LogBuildResult(tid, label, core.PackageParsed, "Deferred") return } // Now add any lurking pending targets for this package. pendingTargetMutex.Lock() pending := pendingTargets[label.PackageName] // Must be present. pendingTargets[label.PackageName] = map[string][]core.BuildLabel{} // Empty this to free memory, but leave a sentinel pendingTargetMutex.Unlock() // Nothing will look up this package in the map again. for targetName, dependors := range pending { for _, dependor := range dependors { lbl := core.BuildLabel{PackageName: label.PackageName, Name: targetName} activateTarget(state, pkg, lbl, dependor, noDeps, include, exclude) } } state.LogBuildResult(tid, label, core.PackageParsed, "Parsed") }
// RunPreBuildFunction runs a pre-build callback function registered on a build target via pre_build = <...>. // // This is called before the target is built. It doesn't receive any output like the post-build one does but can // be useful for other things; for example if you want to investigate a target's transitive labels to adjust // its build command, you have to do that here (because in general the transitive dependencies aren't known // when the rule is evaluated). func RunPreBuildFunction(tid int, state *core.BuildState, target *core.BuildTarget) error { state.LogBuildResult(tid, target.Label, core.PackageParsing, fmt.Sprintf("Running pre-build function for %s", target.Label)) pkg := state.Graph.Package(target.Label.PackageName) pkg.BuildCallbackMutex.Lock() defer pkg.BuildCallbackMutex.Unlock() if err := runPreBuildFunction(pkg, target); err != nil { state.LogBuildError(tid, target.Label, core.ParseFailed, err, "Failed pre-build function for %s", target.Label) return err } rescanDeps(state, pkg) state.LogBuildResult(tid, target.Label, core.TargetBuilding, fmt.Sprintf("Finished pre-build function for %s", target.Label)) return nil }
func test(tid int, state *core.BuildState, label core.BuildLabel, target *core.BuildTarget) { startTime := time.Now() hash, err := build.RuntimeHash(state, target) if err != nil { state.LogBuildError(tid, label, core.TargetTestFailed, err, "Failed to calculate target hash") return } // Check the cached output files if the target wasn't rebuilt. hash = core.CollapseHash(hash) hashStr := base64.RawURLEncoding.EncodeToString(hash) resultsFileName := fmt.Sprintf(".test_results_%s_%s", label.Name, hashStr) coverageFileName := fmt.Sprintf(".test_coverage_%s_%s", label.Name, hashStr) outputFile := path.Join(target.TestDir(), "test.results") coverageFile := path.Join(target.TestDir(), "test.coverage") cachedOutputFile := path.Join(target.OutDir(), resultsFileName) cachedCoverageFile := path.Join(target.OutDir(), coverageFileName) needCoverage := state.NeedCoverage && !target.NoTestOutput cachedTest := func() { log.Debug("Not re-running test %s; got cached results.", label) coverage := parseCoverageFile(target, cachedCoverageFile) results, err := parseTestResults(target, cachedOutputFile, true) target.Results.Duration = time.Since(startTime).Seconds() target.Results.Cached = true if err != nil { state.LogBuildError(tid, label, core.TargetTestFailed, err, "Failed to parse cached test file %s", cachedOutputFile) } else if results.Failed > 0 { panic("Test results with failures shouldn't be cached.") } else { logTestSuccess(state, tid, label, results, coverage) } } moveAndCacheOutputFiles := func(results core.TestResults, coverage core.TestCoverage) bool { // Never cache test results when given arguments; the results may be incomplete. if len(state.TestArgs) > 0 { log.Debug("Not caching results for %s, we passed it arguments", label) return true } if err := moveAndCacheOutputFile(state, target, hash, outputFile, cachedOutputFile, resultsFileName, dummyOutput); err != nil { state.LogTestResult(tid, label, core.TargetTestFailed, results, coverage, err, "Failed to move test output file") return false } if needCoverage || core.PathExists(coverageFile) { if err := moveAndCacheOutputFile(state, target, hash, coverageFile, cachedCoverageFile, coverageFileName, dummyCoverage); err != nil { state.LogTestResult(tid, label, core.TargetTestFailed, results, coverage, err, "Failed to move test coverage file") return false } } for _, output := range target.TestOutputs { tmpFile := path.Join(target.TestDir(), output) outFile := path.Join(target.OutDir(), output) if err := moveAndCacheOutputFile(state, target, hash, tmpFile, outFile, output, ""); err != nil { state.LogTestResult(tid, label, core.TargetTestFailed, results, coverage, err, "Failed to move test output file") return false } } return true } needToRun := func() bool { if target.State() == core.Unchanged && core.PathExists(cachedOutputFile) { // Output file exists already and appears to be valid. We might still need to rerun though // if the coverage files aren't available. if needCoverage && !core.PathExists(cachedCoverageFile) { return true } return false } // Check the cache for these artifacts. if state.Cache == nil { return true } cache := *state.Cache if !cache.RetrieveExtra(target, hash, resultsFileName) { return true } if needCoverage && !cache.RetrieveExtra(target, hash, coverageFileName) { return true } for _, output := range target.TestOutputs { if !cache.RetrieveExtra(target, hash, output) { return true } } return false } // Don't cache when doing multiple runs, presumably the user explicitly wants to check it. if state.NumTestRuns <= 1 && !needToRun() { cachedTest() return } // Remove any cached test result file. if err := RemoveCachedTestFiles(target); err != nil { state.LogBuildError(tid, label, core.TargetTestFailed, err, "Failed to remove cached test files") return } numSucceeded := 0 numFlakes := 0 numRuns, successesRequired := calcNumRuns(state.NumTestRuns, target.Flakiness) var resultErr error resultMsg := "" var coverage core.TestCoverage for i := 0; i < numRuns && numSucceeded < successesRequired; i++ { if numRuns > 1 { state.LogBuildResult(tid, label, core.TargetTesting, fmt.Sprintf("Testing (%d of %d)...", i+1, numRuns)) } out, err := prepareAndRunTest(tid, state, target) duration := time.Since(startTime).Seconds() startTime = time.Now() // reset this for next time // This is all pretty involved; there are lots of different possibilities of what could happen. // The contract is that the test must return zero on success or non-zero on failure (Unix FTW). // If it's successful, it must produce a parseable file named "test.results" in its temp folder. // (alternatively, this can be a directory containing parseable files). // Tests can opt out of the file requirement individually, in which case they're judged only // by their return value. // But of course, we still have to consider all the alternatives here and handle them nicely. target.Results.Output = string(out) if err != nil && target.Results.Output == "" { target.Results.Output = err.Error() } target.Results.TimedOut = err == context.DeadlineExceeded coverage = parseCoverageFile(target, coverageFile) target.Results.Duration += duration if !core.PathExists(outputFile) { if err == nil && target.NoTestOutput { target.Results.NumTests += 1 target.Results.Passed += 1 numSucceeded++ } else if err == nil { target.Results.NumTests++ target.Results.Failed++ target.Results.Failures = append(target.Results.Failures, core.TestFailure{ Name: "Missing results", Stdout: string(out), }) resultErr = fmt.Errorf("Test failed to produce output results file") resultMsg = fmt.Sprintf("Test apparently succeeded but failed to produce %s. Output: %s", outputFile, string(out)) numFlakes++ } else { target.Results.NumTests++ target.Results.Failed++ target.Results.Failures = append(target.Results.Failures, core.TestFailure{ Name: "Test failed with no results", Stdout: string(out), }) numFlakes++ resultErr = err resultMsg = fmt.Sprintf("Test failed with no results. Output: %s", string(out)) } } else { results, err2 := parseTestResults(target, outputFile, false) if err2 != nil { resultErr = err2 resultMsg = fmt.Sprintf("Couldn't parse test output file: %s. Stdout: %s", err2, string(out)) numFlakes++ } else if err != nil && results.Failed == 0 { // Add a failure result to the test so it shows up in the final aggregation. target.Results.Failed = 1 target.Results.Failures = append(results.Failures, core.TestFailure{ Name: "Return value", Type: fmt.Sprintf("%s", err), Stdout: string(out), }) numFlakes++ resultErr = err resultMsg = fmt.Sprintf("Test returned nonzero but reported no errors: %s. Output: %s", err, string(out)) } else if err == nil && results.Failed != 0 { resultErr = fmt.Errorf("Test returned 0 but still reported failures") resultMsg = fmt.Sprintf("Test returned 0 but still reported failures. Stdout: %s", string(out)) numFlakes++ } else if results.Failed != 0 { resultErr = fmt.Errorf("Tests failed") resultMsg = fmt.Sprintf("Tests failed. Stdout: %s", string(out)) numFlakes++ } else { numSucceeded++ if !state.ShowTestOutput { // Save a bit of memory, if we're not printing results on success we will never use them again. target.Results.Output = "" } } } } if numSucceeded >= successesRequired { target.Results.Failures = nil // Remove any failures, they don't count target.Results.Failed = 0 // (they'll be picked up as flakes below) if numSucceeded > 0 && numFlakes > 0 { target.Results.Flakes = numFlakes } // Success, clean things up if moveAndCacheOutputFiles(target.Results, coverage) { logTestSuccess(state, tid, label, target.Results, coverage) } // Clean up the test directory. if state.CleanWorkdirs { if err := os.RemoveAll(target.TestDir()); err != nil { log.Warning("Failed to remove test directory for %s: %s", target.Label, err) } } } else { state.LogTestResult(tid, label, core.TargetTestFailed, target.Results, coverage, resultErr, resultMsg) } }