// Builds a single target func buildTarget(tid int, state *core.BuildState, target *core.BuildTarget) (err error) { defer func() { if r := recover(); r != nil { if e, ok := r.(error); ok { err = e } else { err = fmt.Errorf("%s", r) } } }() if err := target.CheckDependencyVisibility(state.Graph); err != nil { return err } // We can't do this check until build time, until then we don't know what all the outputs // will be (eg. for filegroups that collect outputs of other rules). if err := target.CheckDuplicateOutputs(); err != nil { return err } // This must run before we can leave this function successfully by any path. if target.PreBuildFunction != 0 { log.Debug("Running pre-build function for %s", target.Label) if err := parse.RunPreBuildFunction(tid, state, target); err != nil { return err } log.Debug("Finished pre-build function for %s", target.Label) } state.LogBuildResult(tid, target.Label, core.TargetBuilding, "Preparing...") var postBuildOutput string if state.PrepareOnly && state.IsOriginalTarget(target.Label) { if target.IsFilegroup() { return fmt.Errorf("Filegroup targets don't have temporary directories") } if err := prepareDirectories(target); err != nil { return err } if err := prepareSources(state.Graph, target); err != nil { return err } return stopTarget } if !needsBuilding(state, target, false) { log.Debug("Not rebuilding %s, nothing's changed", target.Label) postBuildOutput = runPostBuildFunctionIfNeeded(tid, state, target) // If a post-build function ran it may modify the rule definition. In that case we // need to check again whether the rule needs building. if target.PostBuildFunction == 0 || !needsBuilding(state, target, true) { target.SetState(core.Reused) state.LogBuildResult(tid, target.Label, core.TargetCached, "Unchanged") return nil // Nothing needs to be done. } else { log.Debug("Rebuilding %s after post-build function", target.Label) } } if target.IsFilegroup() { log.Debug("Building %s...", target.Label) return buildFilegroup(tid, state, target) } oldOutputHash, outputHashErr := OutputHash(target) if err := prepareDirectories(target); err != nil { return fmt.Errorf("Error preparing directories for %s: %s", target.Label, err) } retrieveArtifacts := func() bool { state.LogBuildResult(tid, target.Label, core.TargetBuilding, "Checking cache...") if _, retrieved := retrieveFromCache(state, target); retrieved { log.Debug("Retrieved artifacts for %s from cache", target.Label) checkLicences(state, target) newOutputHash, err := calculateAndCheckRuleHash(state, target) if err != nil { // Most likely hash verification failure log.Warning("Error retrieving cached artifacts for %s: %s", target.Label, err) RemoveOutputs(target) return false } else if outputHashErr != nil || !bytes.Equal(oldOutputHash, newOutputHash) { target.SetState(core.Cached) state.LogBuildResult(tid, target.Label, core.TargetCached, "Cached") } else { target.SetState(core.Unchanged) state.LogBuildResult(tid, target.Label, core.TargetCached, "Cached (unchanged)") } return true // got from cache } return false } cacheKey := mustShortTargetHash(state, target) if state.Cache != nil { // Note that ordering here is quite sensitive since the post-build function can modify // what we would retrieve from the cache. if target.PostBuildFunction != 0 { log.Debug("Checking for post-build output file for %s in cache...", target.Label) if (*state.Cache).RetrieveExtra(target, cacheKey, core.PostBuildOutputFileName(target)) { postBuildOutput = runPostBuildFunctionIfNeeded(tid, state, target) if retrieveArtifacts() { return nil } } } else if retrieveArtifacts() { return nil } } if err := prepareSources(state.Graph, target); err != nil { return fmt.Errorf("Error preparing sources for %s: %s", target.Label, err) } state.LogBuildResult(tid, target.Label, core.TargetBuilding, target.BuildingDescription) replacedCmd := replaceSequences(target) env := core.StampedBuildEnvironment(state, target, false, cacheKey) log.Debug("Building target %s\nENVIRONMENT:\n%s\n%s", target.Label, strings.Join(env, "\n"), replacedCmd) out, combined, err := core.ExecWithTimeoutShell(target.TmpDir(), env, target.BuildTimeout, state.Config.Build.Timeout, state.ShowAllOutput, replacedCmd) if err != nil { if state.Verbosity >= 4 { return fmt.Errorf("Error building target %s: %s\nENVIRONMENT:\n%s\n%s\n%s", target.Label, err, strings.Join(env, "\n"), target.GetCommand(), combined) } return fmt.Errorf("Error building target %s: %s\n%s", target.Label, err, combined) } if target.PostBuildFunction != 0 { out = bytes.TrimSpace(out) sout := string(out) if postBuildOutput != "" { // We've already run the post-build function once, it's not safe to do it again (e.g. if adding new // targets, it will likely fail). Theoretically it should get the same output this time and hence would // do the same thing, since it had all the same inputs. // Obviously we can't be 100% sure that will be the case, so issue a warning if not... if postBuildOutput != sout { log.Warning("The build output for %s differs from what we got back from the cache earlier.\n"+ "This implies your target's output is nondeterministic; Please won't re-run the\n"+ "post-build function, which will *probably* be okay, but Please can't be sure.\n"+ "See https://github.com/thought-machine/please/issues/113 for more information.", target.Label) log.Debug("Cached build output for %s: %s\n\nNew build output: %s", target.Label, postBuildOutput, sout) } } else if err := parse.RunPostBuildFunction(tid, state, target, sout); err != nil { return err } storePostBuildOutput(state, target, out) } checkLicences(state, target) state.LogBuildResult(tid, target.Label, core.TargetBuilding, "Collecting outputs...") extraOuts, outputsChanged, err := moveOutputs(state, target) if err != nil { return fmt.Errorf("Error moving outputs for target %s: %s", target.Label, err) } if _, err = calculateAndCheckRuleHash(state, target); err != nil { return err } if outputsChanged { target.SetState(core.Built) } else { target.SetState(core.Unchanged) } if state.Cache != nil { state.LogBuildResult(tid, target.Label, core.TargetBuilding, "Storing...") newCacheKey := mustShortTargetHash(state, target) (*state.Cache).Store(target, newCacheKey) if target.PostBuildFunction != 0 { // NB. Important this is stored with the earlier hash - if we calculate the hash // now, it might be different, and we could of course never retrieve it again. (*state.Cache).StoreExtra(target, cacheKey, core.PostBuildOutputFileName(target)) } for _, out := range extraOuts { (*state.Cache).StoreExtra(target, newCacheKey, out) } } // Clean up the temporary directory once it's done. if state.CleanWorkdirs { if err := os.RemoveAll(target.TmpDir()); err != nil { log.Warning("Failed to remove temporary directory for %s: %s", target.Label, err) } } if outputsChanged { state.LogBuildResult(tid, target.Label, core.TargetBuilt, "Built") } else { state.LogBuildResult(tid, target.Label, core.TargetBuilt, "Built (unchanged)") } return nil }
func Build(tid int, state *core.BuildState, label core.BuildLabel) { start := time.Now() target := state.Graph.TargetOrDie(label) target.SetState(core.Building) if err := buildTarget(tid, state, target); err != nil { if err == stopTarget { target.SetState(core.Stopped) state.LogBuildResult(tid, target.Label, core.TargetBuildStopped, "Build stopped") return } state.LogBuildError(tid, label, core.TargetBuildFailed, err, "Build failed: %s", err) if err := RemoveOutputs(target); err != nil { log.Errorf("Failed to remove outputs for %s: %s", target.Label, err) } target.SetState(core.Failed) return } metrics.Record(target, time.Since(start)) // Add any of the reverse deps that are now fully built to the queue. for _, reverseDep := range state.Graph.ReverseDependencies(target) { if reverseDep.State() == core.Active && state.Graph.AllDepsBuilt(reverseDep) && reverseDep.SyncUpdateState(core.Active, core.Pending) { state.AddPendingBuild(reverseDep.Label, false) } } if target.IsTest && state.NeedTests { state.AddPendingTest(target.Label) } parse.UndeferAnyParses(state, target) }
func Test(tid int, state *core.BuildState, label core.BuildLabel) { state.LogBuildResult(tid, label, core.TargetTesting, "Testing...") startTime := time.Now() target := state.Graph.TargetOrDie(label) test(tid, state, label, target) metrics.Record(target, time.Since(startTime)) }
// RunPreBuildFunction runs a pre-build callback function registered on a build target via pre_build = <...>. // // This is called before the target is built. It doesn't receive any output like the post-build one does but can // be useful for other things; for example if you want to investigate a target's transitive labels to adjust // its build command, you have to do that here (because in general the transitive dependencies aren't known // when the rule is evaluated). func RunPreBuildFunction(tid int, state *core.BuildState, target *core.BuildTarget) error { state.LogBuildResult(tid, target.Label, core.PackageParsing, fmt.Sprintf("Running pre-build function for %s", target.Label)) pkg := state.Graph.Package(target.Label.PackageName) pkg.BuildCallbackMutex.Lock() defer pkg.BuildCallbackMutex.Unlock() if err := runPreBuildFunction(pkg, target); err != nil { state.LogBuildError(tid, target.Label, core.ParseFailed, err, "Failed pre-build function for %s", target.Label) return err } rescanDeps(state, pkg) state.LogBuildResult(tid, target.Label, core.TargetBuilding, fmt.Sprintf("Finished pre-build function for %s", target.Label)) return nil }
// buildFilegroup runs the manual build steps for a filegroup rule. // We don't force this to be done in bash to avoid errors with maximum command lengths, // and it's actually quite fiddly to get just so there. func buildFilegroup(tid int, state *core.BuildState, target *core.BuildTarget) error { if err := prepareDirectory(target.OutDir(), false); err != nil { return err } if err := os.RemoveAll(ruleHashFileName(target)); err != nil { return err } changed := false outDir := target.OutDir() for _, source := range target.Sources { fullPaths := source.FullPaths(state.Graph) for i, sourcePath := range source.LocalPaths(state.Graph) { outPath := path.Join(outDir, sourcePath) c, err := moveOutput(target, fullPaths[i], outPath, true) if err != nil { return err } changed = changed || c } } if target.HasLabel("py") && !target.IsBinary { // Pre-emptively create __init__.py files so the outputs can be loaded dynamically. // It's a bit cheeky to do non-essential language-specific logic but this enables // a lot of relatively normal Python workflows. // Errors are deliberately ignored. createInitPy(outDir) } if _, err := calculateAndCheckRuleHash(state, target); err != nil { return err } else if changed { target.SetState(core.Built) } else { target.SetState(core.Unchanged) } state.LogBuildResult(tid, target.Label, core.TargetBuilt, "Built") return nil }
// Parses the package corresponding to a single build label. The label can be :all to add all targets in a package. // It is not an error if the package has already been parsed. // // By default, after the package is parsed, any targets that are now needed for the build and ready // to be built are queued, and any new packages are queued for parsing. When a specific label is requested // this is straightforward, but when parsing for pseudo-targets like :all and ..., various flags affect it: // If 'noDeps' is true, then no new packages will be added and no new targets queued. // 'include' and 'exclude' refer to the labels of targets to be added. If 'include' is non-empty then only // targets with at least one matching label are added. Any targets with a label in 'exclude' are not added. func Parse(tid int, state *core.BuildState, label, dependor core.BuildLabel, noDeps bool, include, exclude []string) { defer func() { if r := recover(); r != nil { state.LogBuildError(tid, label, core.ParseFailed, fmt.Errorf("%s", r), "Failed to parse package") } }() // First see if this package already exists; once it's in the graph it will have been parsed. pkg := state.Graph.Package(label.PackageName) if pkg != nil { // Does exist, all we need to do is toggle on this target activateTarget(state, pkg, label, dependor, noDeps, include, exclude) return } // We use the name here to signal undeferring of a package. If we get that we need to retry the package regardless. if dependor.Name != "_UNDEFER_" && !firstToParse(label, dependor) { // Check this again to avoid a potential race if pkg = state.Graph.Package(label.PackageName); pkg != nil { activateTarget(state, pkg, label, dependor, noDeps, include, exclude) } else { log.Debug("Adding pending parse for %s", label) } return } // If we get here then it falls to us to parse this package state.LogBuildResult(tid, label, core.PackageParsing, "Parsing...") pkg = parsePackage(state, label, dependor) if pkg == nil { state.LogBuildResult(tid, label, core.PackageParsed, "Deferred") return } // Now add any lurking pending targets for this package. pendingTargetMutex.Lock() pending := pendingTargets[label.PackageName] // Must be present. pendingTargets[label.PackageName] = map[string][]core.BuildLabel{} // Empty this to free memory, but leave a sentinel pendingTargetMutex.Unlock() // Nothing will look up this package in the map again. for targetName, dependors := range pending { for _, dependor := range dependors { lbl := core.BuildLabel{PackageName: label.PackageName, Name: targetName} activateTarget(state, pkg, lbl, dependor, noDeps, include, exclude) } } state.LogBuildResult(tid, label, core.PackageParsed, "Parsed") }
func test(tid int, state *core.BuildState, label core.BuildLabel, target *core.BuildTarget) { startTime := time.Now() hash, err := build.RuntimeHash(state, target) if err != nil { state.LogBuildError(tid, label, core.TargetTestFailed, err, "Failed to calculate target hash") return } // Check the cached output files if the target wasn't rebuilt. hash = core.CollapseHash(hash) hashStr := base64.RawURLEncoding.EncodeToString(hash) resultsFileName := fmt.Sprintf(".test_results_%s_%s", label.Name, hashStr) coverageFileName := fmt.Sprintf(".test_coverage_%s_%s", label.Name, hashStr) outputFile := path.Join(target.TestDir(), "test.results") coverageFile := path.Join(target.TestDir(), "test.coverage") cachedOutputFile := path.Join(target.OutDir(), resultsFileName) cachedCoverageFile := path.Join(target.OutDir(), coverageFileName) needCoverage := state.NeedCoverage && !target.NoTestOutput cachedTest := func() { log.Debug("Not re-running test %s; got cached results.", label) coverage := parseCoverageFile(target, cachedCoverageFile) results, err := parseTestResults(target, cachedOutputFile, true) target.Results.Duration = time.Since(startTime).Seconds() target.Results.Cached = true if err != nil { state.LogBuildError(tid, label, core.TargetTestFailed, err, "Failed to parse cached test file %s", cachedOutputFile) } else if results.Failed > 0 { panic("Test results with failures shouldn't be cached.") } else { logTestSuccess(state, tid, label, results, coverage) } } moveAndCacheOutputFiles := func(results core.TestResults, coverage core.TestCoverage) bool { // Never cache test results when given arguments; the results may be incomplete. if len(state.TestArgs) > 0 { log.Debug("Not caching results for %s, we passed it arguments", label) return true } if err := moveAndCacheOutputFile(state, target, hash, outputFile, cachedOutputFile, resultsFileName, dummyOutput); err != nil { state.LogTestResult(tid, label, core.TargetTestFailed, results, coverage, err, "Failed to move test output file") return false } if needCoverage || core.PathExists(coverageFile) { if err := moveAndCacheOutputFile(state, target, hash, coverageFile, cachedCoverageFile, coverageFileName, dummyCoverage); err != nil { state.LogTestResult(tid, label, core.TargetTestFailed, results, coverage, err, "Failed to move test coverage file") return false } } for _, output := range target.TestOutputs { tmpFile := path.Join(target.TestDir(), output) outFile := path.Join(target.OutDir(), output) if err := moveAndCacheOutputFile(state, target, hash, tmpFile, outFile, output, ""); err != nil { state.LogTestResult(tid, label, core.TargetTestFailed, results, coverage, err, "Failed to move test output file") return false } } return true } needToRun := func() bool { if target.State() == core.Unchanged && core.PathExists(cachedOutputFile) { // Output file exists already and appears to be valid. We might still need to rerun though // if the coverage files aren't available. if needCoverage && !core.PathExists(cachedCoverageFile) { return true } return false } // Check the cache for these artifacts. if state.Cache == nil { return true } cache := *state.Cache if !cache.RetrieveExtra(target, hash, resultsFileName) { return true } if needCoverage && !cache.RetrieveExtra(target, hash, coverageFileName) { return true } for _, output := range target.TestOutputs { if !cache.RetrieveExtra(target, hash, output) { return true } } return false } // Don't cache when doing multiple runs, presumably the user explicitly wants to check it. if state.NumTestRuns <= 1 && !needToRun() { cachedTest() return } // Remove any cached test result file. if err := RemoveCachedTestFiles(target); err != nil { state.LogBuildError(tid, label, core.TargetTestFailed, err, "Failed to remove cached test files") return } numSucceeded := 0 numFlakes := 0 numRuns, successesRequired := calcNumRuns(state.NumTestRuns, target.Flakiness) var resultErr error resultMsg := "" var coverage core.TestCoverage for i := 0; i < numRuns && numSucceeded < successesRequired; i++ { if numRuns > 1 { state.LogBuildResult(tid, label, core.TargetTesting, fmt.Sprintf("Testing (%d of %d)...", i+1, numRuns)) } out, err := prepareAndRunTest(tid, state, target) duration := time.Since(startTime).Seconds() startTime = time.Now() // reset this for next time // This is all pretty involved; there are lots of different possibilities of what could happen. // The contract is that the test must return zero on success or non-zero on failure (Unix FTW). // If it's successful, it must produce a parseable file named "test.results" in its temp folder. // (alternatively, this can be a directory containing parseable files). // Tests can opt out of the file requirement individually, in which case they're judged only // by their return value. // But of course, we still have to consider all the alternatives here and handle them nicely. target.Results.Output = string(out) if err != nil && target.Results.Output == "" { target.Results.Output = err.Error() } target.Results.TimedOut = err == context.DeadlineExceeded coverage = parseCoverageFile(target, coverageFile) target.Results.Duration += duration if !core.PathExists(outputFile) { if err == nil && target.NoTestOutput { target.Results.NumTests += 1 target.Results.Passed += 1 numSucceeded++ } else if err == nil { target.Results.NumTests++ target.Results.Failed++ target.Results.Failures = append(target.Results.Failures, core.TestFailure{ Name: "Missing results", Stdout: string(out), }) resultErr = fmt.Errorf("Test failed to produce output results file") resultMsg = fmt.Sprintf("Test apparently succeeded but failed to produce %s. Output: %s", outputFile, string(out)) numFlakes++ } else { target.Results.NumTests++ target.Results.Failed++ target.Results.Failures = append(target.Results.Failures, core.TestFailure{ Name: "Test failed with no results", Stdout: string(out), }) numFlakes++ resultErr = err resultMsg = fmt.Sprintf("Test failed with no results. Output: %s", string(out)) } } else { results, err2 := parseTestResults(target, outputFile, false) if err2 != nil { resultErr = err2 resultMsg = fmt.Sprintf("Couldn't parse test output file: %s. Stdout: %s", err2, string(out)) numFlakes++ } else if err != nil && results.Failed == 0 { // Add a failure result to the test so it shows up in the final aggregation. target.Results.Failed = 1 target.Results.Failures = append(results.Failures, core.TestFailure{ Name: "Return value", Type: fmt.Sprintf("%s", err), Stdout: string(out), }) numFlakes++ resultErr = err resultMsg = fmt.Sprintf("Test returned nonzero but reported no errors: %s. Output: %s", err, string(out)) } else if err == nil && results.Failed != 0 { resultErr = fmt.Errorf("Test returned 0 but still reported failures") resultMsg = fmt.Sprintf("Test returned 0 but still reported failures. Stdout: %s", string(out)) numFlakes++ } else if results.Failed != 0 { resultErr = fmt.Errorf("Tests failed") resultMsg = fmt.Sprintf("Tests failed. Stdout: %s", string(out)) numFlakes++ } else { numSucceeded++ if !state.ShowTestOutput { // Save a bit of memory, if we're not printing results on success we will never use them again. target.Results.Output = "" } } } } if numSucceeded >= successesRequired { target.Results.Failures = nil // Remove any failures, they don't count target.Results.Failed = 0 // (they'll be picked up as flakes below) if numSucceeded > 0 && numFlakes > 0 { target.Results.Flakes = numFlakes } // Success, clean things up if moveAndCacheOutputFiles(target.Results, coverage) { logTestSuccess(state, tid, label, target.Results, coverage) } // Clean up the test directory. if state.CleanWorkdirs { if err := os.RemoveAll(target.TestDir()); err != nil { log.Warning("Failed to remove test directory for %s: %s", target.Label, err) } } } else { state.LogTestResult(tid, label, core.TargetTestFailed, target.Results, coverage, resultErr, resultMsg) } }