// Builds a single target func buildTarget(tid int, state *core.BuildState, target *core.BuildTarget) (err error) { defer func() { if r := recover(); r != nil { if e, ok := r.(error); ok { err = e } else { err = fmt.Errorf("%s", r) } } }() if err := target.CheckDependencyVisibility(state.Graph); err != nil { return err } // We can't do this check until build time, until then we don't know what all the outputs // will be (eg. for filegroups that collect outputs of other rules). if err := target.CheckDuplicateOutputs(); err != nil { return err } // This must run before we can leave this function successfully by any path. if target.PreBuildFunction != 0 { log.Debug("Running pre-build function for %s", target.Label) if err := parse.RunPreBuildFunction(tid, state, target); err != nil { return err } log.Debug("Finished pre-build function for %s", target.Label) } state.LogBuildResult(tid, target.Label, core.TargetBuilding, "Preparing...") var postBuildOutput string if state.PrepareOnly && state.IsOriginalTarget(target.Label) { if target.IsFilegroup() { return fmt.Errorf("Filegroup targets don't have temporary directories") } if err := prepareDirectories(target); err != nil { return err } if err := prepareSources(state.Graph, target); err != nil { return err } return stopTarget } if !needsBuilding(state, target, false) { log.Debug("Not rebuilding %s, nothing's changed", target.Label) postBuildOutput = runPostBuildFunctionIfNeeded(tid, state, target) // If a post-build function ran it may modify the rule definition. In that case we // need to check again whether the rule needs building. if target.PostBuildFunction == 0 || !needsBuilding(state, target, true) { target.SetState(core.Reused) state.LogBuildResult(tid, target.Label, core.TargetCached, "Unchanged") return nil // Nothing needs to be done. } else { log.Debug("Rebuilding %s after post-build function", target.Label) } } if target.IsFilegroup() { log.Debug("Building %s...", target.Label) return buildFilegroup(tid, state, target) } oldOutputHash, outputHashErr := OutputHash(target) if err := prepareDirectories(target); err != nil { return fmt.Errorf("Error preparing directories for %s: %s", target.Label, err) } retrieveArtifacts := func() bool { state.LogBuildResult(tid, target.Label, core.TargetBuilding, "Checking cache...") if _, retrieved := retrieveFromCache(state, target); retrieved { log.Debug("Retrieved artifacts for %s from cache", target.Label) checkLicences(state, target) newOutputHash, err := calculateAndCheckRuleHash(state, target) if err != nil { // Most likely hash verification failure log.Warning("Error retrieving cached artifacts for %s: %s", target.Label, err) RemoveOutputs(target) return false } else if outputHashErr != nil || !bytes.Equal(oldOutputHash, newOutputHash) { target.SetState(core.Cached) state.LogBuildResult(tid, target.Label, core.TargetCached, "Cached") } else { target.SetState(core.Unchanged) state.LogBuildResult(tid, target.Label, core.TargetCached, "Cached (unchanged)") } return true // got from cache } return false } cacheKey := mustShortTargetHash(state, target) if state.Cache != nil { // Note that ordering here is quite sensitive since the post-build function can modify // what we would retrieve from the cache. if target.PostBuildFunction != 0 { log.Debug("Checking for post-build output file for %s in cache...", target.Label) if (*state.Cache).RetrieveExtra(target, cacheKey, core.PostBuildOutputFileName(target)) { postBuildOutput = runPostBuildFunctionIfNeeded(tid, state, target) if retrieveArtifacts() { return nil } } } else if retrieveArtifacts() { return nil } } if err := prepareSources(state.Graph, target); err != nil { return fmt.Errorf("Error preparing sources for %s: %s", target.Label, err) } state.LogBuildResult(tid, target.Label, core.TargetBuilding, target.BuildingDescription) replacedCmd := replaceSequences(target) env := core.StampedBuildEnvironment(state, target, false, cacheKey) log.Debug("Building target %s\nENVIRONMENT:\n%s\n%s", target.Label, strings.Join(env, "\n"), replacedCmd) out, combined, err := core.ExecWithTimeoutShell(target.TmpDir(), env, target.BuildTimeout, state.Config.Build.Timeout, state.ShowAllOutput, replacedCmd) if err != nil { if state.Verbosity >= 4 { return fmt.Errorf("Error building target %s: %s\nENVIRONMENT:\n%s\n%s\n%s", target.Label, err, strings.Join(env, "\n"), target.GetCommand(), combined) } return fmt.Errorf("Error building target %s: %s\n%s", target.Label, err, combined) } if target.PostBuildFunction != 0 { out = bytes.TrimSpace(out) sout := string(out) if postBuildOutput != "" { // We've already run the post-build function once, it's not safe to do it again (e.g. if adding new // targets, it will likely fail). Theoretically it should get the same output this time and hence would // do the same thing, since it had all the same inputs. // Obviously we can't be 100% sure that will be the case, so issue a warning if not... if postBuildOutput != sout { log.Warning("The build output for %s differs from what we got back from the cache earlier.\n"+ "This implies your target's output is nondeterministic; Please won't re-run the\n"+ "post-build function, which will *probably* be okay, but Please can't be sure.\n"+ "See https://github.com/thought-machine/please/issues/113 for more information.", target.Label) log.Debug("Cached build output for %s: %s\n\nNew build output: %s", target.Label, postBuildOutput, sout) } } else if err := parse.RunPostBuildFunction(tid, state, target, sout); err != nil { return err } storePostBuildOutput(state, target, out) } checkLicences(state, target) state.LogBuildResult(tid, target.Label, core.TargetBuilding, "Collecting outputs...") extraOuts, outputsChanged, err := moveOutputs(state, target) if err != nil { return fmt.Errorf("Error moving outputs for target %s: %s", target.Label, err) } if _, err = calculateAndCheckRuleHash(state, target); err != nil { return err } if outputsChanged { target.SetState(core.Built) } else { target.SetState(core.Unchanged) } if state.Cache != nil { state.LogBuildResult(tid, target.Label, core.TargetBuilding, "Storing...") newCacheKey := mustShortTargetHash(state, target) (*state.Cache).Store(target, newCacheKey) if target.PostBuildFunction != 0 { // NB. Important this is stored with the earlier hash - if we calculate the hash // now, it might be different, and we could of course never retrieve it again. (*state.Cache).StoreExtra(target, cacheKey, core.PostBuildOutputFileName(target)) } for _, out := range extraOuts { (*state.Cache).StoreExtra(target, newCacheKey, out) } } // Clean up the temporary directory once it's done. if state.CleanWorkdirs { if err := os.RemoveAll(target.TmpDir()); err != nil { log.Warning("Failed to remove temporary directory for %s: %s", target.Label, err) } } if outputsChanged { state.LogBuildResult(tid, target.Label, core.TargetBuilt, "Built") } else { state.LogBuildResult(tid, target.Label, core.TargetBuilt, "Built (unchanged)") } return nil }
// Return true if the rule needs building, false if the existing outputs are OK. func needsBuilding(state *core.BuildState, target *core.BuildTarget, postBuild bool) bool { // Check the dependencies first, because they don't need any disk I/O. if target.NeedsTransitiveDependencies { if anyDependencyHasChanged(target) { return true // one of the transitive deps has changed, need to rebuild } } else { for _, dep := range target.Dependencies() { if dep.State() < core.Unchanged { log.Debug("Need to rebuild %s, %s has changed", target.Label, dep.Label) return true // dependency has just been rebuilt, do this too. } } } oldRuleHash, oldConfigHash, oldSourceHash := readRuleHashFile(ruleHashFileName(target), postBuild) if !bytes.Equal(oldConfigHash, state.Hashes.Config) { if len(oldConfigHash) == 0 { // Small nicety to make it a bit clearer what's going on. log.Debug("Need to build %s, outputs aren't there", target.Label) } else { log.Debug("Need to rebuild %s, config has changed (was %s, need %s)", target.Label, b64(oldConfigHash), b64(state.Hashes.Config)) } return true } newRuleHash := RuleHash(target, false, postBuild) if !bytes.Equal(oldRuleHash, newRuleHash) { log.Debug("Need to rebuild %s, rule has changed (was %s, need %s)", target.Label, b64(oldRuleHash), b64(newRuleHash)) return true } newSourceHash, err := sourceHash(state.Graph, target) if err != nil || !bytes.Equal(oldSourceHash, newSourceHash) { log.Debug("Need to rebuild %s, sources have changed (was %s, need %s)", target.Label, b64(oldSourceHash), b64(newSourceHash)) return true } // Check the outputs of this rule exist. This would only happen if the user had // removed them but it's incredibly aggravating if you remove an output and the // rule won't rebuild itself. for _, output := range target.Outputs() { realOutput := path.Join(target.OutDir(), output) if !core.PathExists(realOutput) { log.Debug("Output %s doesn't exist for rule %s; will rebuild.", realOutput, target.Label) return true } } // Maybe we've forced a rebuild. Do this last; might be interesting to see if it needed building anyway. return state.ForceRebuild && (state.IsOriginalTarget(target.Label) || state.IsOriginalTarget(target.Label.Parent())) }
// calculateAndCheckRuleHash checks the output hash for a rule. func calculateAndCheckRuleHash(state *core.BuildState, target *core.BuildTarget) ([]byte, error) { hash, err := OutputHash(target) if err != nil { return nil, err } if err = checkRuleHashes(target, hash); err != nil { if state.NeedHashesOnly && (state.IsOriginalTarget(target.Label) || state.IsOriginalTarget(target.Label.Parent())) { return nil, stopTarget } else if state.VerifyHashes { return nil, err } else { log.Warning("%s", err) } } if err := writeRuleHashFile(state, target); err != nil { return nil, fmt.Errorf("Attempting to create hash file: %s", err) } return hash, nil }