func moveOutput(target *core.BuildTarget, tmpOutput, realOutput string, filegroup bool) (bool, error) { // hash the file newHash, err := pathHash(tmpOutput, false) if err != nil { return true, err } realOutputExists := core.PathExists(realOutput) // If this is a filegroup we hardlink the outputs over and so the two files may actually be // the same file. If so don't do anything else and especially don't delete & recreate the // file because other things might be using it already (because more than one filegroup can // own the same file). if filegroup && realOutputExists && core.IsSameFile(tmpOutput, realOutput) { movePathHash(tmpOutput, realOutput, filegroup) // make sure this is updated regardless return false, nil } if realOutputExists { if oldHash, err := pathHash(realOutput, false); err != nil { return true, err } else if bytes.Equal(oldHash, newHash) { // We already have the same file in the current location. Don't bother moving it. log.Debug("Checking %s vs. %s, hashes match", tmpOutput, realOutput) return false, nil } if err := os.RemoveAll(realOutput); err != nil { return true, err } } movePathHash(tmpOutput, realOutput, filegroup) // Check if we need a directory for this output. dir := path.Dir(realOutput) if !core.PathExists(dir) { if err := os.MkdirAll(dir, core.DirPermissions); err != nil { return true, err } } // If the output file is in plz-out/tmp we can just move it to save time, otherwise we need // to copy so we don't move files from other directories. if strings.HasPrefix(tmpOutput, target.TmpDir()) { if err := os.Rename(tmpOutput, realOutput); err != nil { return true, err } } else { if err := core.RecursiveCopyFile(tmpOutput, realOutput, target.OutMode(), filegroup, false); err != nil { if filegroup && os.IsExist(err) && core.IsSameFile(tmpOutput, realOutput) { // It's possible for two filegroups to race building simultaneously. In that // case one will fail with an ErrExist, which is OK as far as we're concerned // here as long as the file we tried to write really is the same as the input. return true, nil } return true, err } } if target.IsBinary { if err := os.Chmod(realOutput, target.OutMode()); err != nil { return true, err } } return true, nil }
// Builds a single target func buildTarget(tid int, state *core.BuildState, target *core.BuildTarget) (err error) { defer func() { if r := recover(); r != nil { if e, ok := r.(error); ok { err = e } else { err = fmt.Errorf("%s", r) } } }() if err := target.CheckDependencyVisibility(state.Graph); err != nil { return err } // We can't do this check until build time, until then we don't know what all the outputs // will be (eg. for filegroups that collect outputs of other rules). if err := target.CheckDuplicateOutputs(); err != nil { return err } // This must run before we can leave this function successfully by any path. if target.PreBuildFunction != 0 { log.Debug("Running pre-build function for %s", target.Label) if err := parse.RunPreBuildFunction(tid, state, target); err != nil { return err } log.Debug("Finished pre-build function for %s", target.Label) } state.LogBuildResult(tid, target.Label, core.TargetBuilding, "Preparing...") var postBuildOutput string if state.PrepareOnly && state.IsOriginalTarget(target.Label) { if target.IsFilegroup() { return fmt.Errorf("Filegroup targets don't have temporary directories") } if err := prepareDirectories(target); err != nil { return err } if err := prepareSources(state.Graph, target); err != nil { return err } return stopTarget } if !needsBuilding(state, target, false) { log.Debug("Not rebuilding %s, nothing's changed", target.Label) postBuildOutput = runPostBuildFunctionIfNeeded(tid, state, target) // If a post-build function ran it may modify the rule definition. In that case we // need to check again whether the rule needs building. if target.PostBuildFunction == 0 || !needsBuilding(state, target, true) { target.SetState(core.Reused) state.LogBuildResult(tid, target.Label, core.TargetCached, "Unchanged") return nil // Nothing needs to be done. } else { log.Debug("Rebuilding %s after post-build function", target.Label) } } if target.IsFilegroup() { log.Debug("Building %s...", target.Label) return buildFilegroup(tid, state, target) } oldOutputHash, outputHashErr := OutputHash(target) if err := prepareDirectories(target); err != nil { return fmt.Errorf("Error preparing directories for %s: %s", target.Label, err) } retrieveArtifacts := func() bool { state.LogBuildResult(tid, target.Label, core.TargetBuilding, "Checking cache...") if _, retrieved := retrieveFromCache(state, target); retrieved { log.Debug("Retrieved artifacts for %s from cache", target.Label) checkLicences(state, target) newOutputHash, err := calculateAndCheckRuleHash(state, target) if err != nil { // Most likely hash verification failure log.Warning("Error retrieving cached artifacts for %s: %s", target.Label, err) RemoveOutputs(target) return false } else if outputHashErr != nil || !bytes.Equal(oldOutputHash, newOutputHash) { target.SetState(core.Cached) state.LogBuildResult(tid, target.Label, core.TargetCached, "Cached") } else { target.SetState(core.Unchanged) state.LogBuildResult(tid, target.Label, core.TargetCached, "Cached (unchanged)") } return true // got from cache } return false } cacheKey := mustShortTargetHash(state, target) if state.Cache != nil { // Note that ordering here is quite sensitive since the post-build function can modify // what we would retrieve from the cache. if target.PostBuildFunction != 0 { log.Debug("Checking for post-build output file for %s in cache...", target.Label) if (*state.Cache).RetrieveExtra(target, cacheKey, core.PostBuildOutputFileName(target)) { postBuildOutput = runPostBuildFunctionIfNeeded(tid, state, target) if retrieveArtifacts() { return nil } } } else if retrieveArtifacts() { return nil } } if err := prepareSources(state.Graph, target); err != nil { return fmt.Errorf("Error preparing sources for %s: %s", target.Label, err) } state.LogBuildResult(tid, target.Label, core.TargetBuilding, target.BuildingDescription) replacedCmd := replaceSequences(target) env := core.StampedBuildEnvironment(state, target, false, cacheKey) log.Debug("Building target %s\nENVIRONMENT:\n%s\n%s", target.Label, strings.Join(env, "\n"), replacedCmd) out, combined, err := core.ExecWithTimeoutShell(target.TmpDir(), env, target.BuildTimeout, state.Config.Build.Timeout, state.ShowAllOutput, replacedCmd) if err != nil { if state.Verbosity >= 4 { return fmt.Errorf("Error building target %s: %s\nENVIRONMENT:\n%s\n%s\n%s", target.Label, err, strings.Join(env, "\n"), target.GetCommand(), combined) } return fmt.Errorf("Error building target %s: %s\n%s", target.Label, err, combined) } if target.PostBuildFunction != 0 { out = bytes.TrimSpace(out) sout := string(out) if postBuildOutput != "" { // We've already run the post-build function once, it's not safe to do it again (e.g. if adding new // targets, it will likely fail). Theoretically it should get the same output this time and hence would // do the same thing, since it had all the same inputs. // Obviously we can't be 100% sure that will be the case, so issue a warning if not... if postBuildOutput != sout { log.Warning("The build output for %s differs from what we got back from the cache earlier.\n"+ "This implies your target's output is nondeterministic; Please won't re-run the\n"+ "post-build function, which will *probably* be okay, but Please can't be sure.\n"+ "See https://github.com/thought-machine/please/issues/113 for more information.", target.Label) log.Debug("Cached build output for %s: %s\n\nNew build output: %s", target.Label, postBuildOutput, sout) } } else if err := parse.RunPostBuildFunction(tid, state, target, sout); err != nil { return err } storePostBuildOutput(state, target, out) } checkLicences(state, target) state.LogBuildResult(tid, target.Label, core.TargetBuilding, "Collecting outputs...") extraOuts, outputsChanged, err := moveOutputs(state, target) if err != nil { return fmt.Errorf("Error moving outputs for target %s: %s", target.Label, err) } if _, err = calculateAndCheckRuleHash(state, target); err != nil { return err } if outputsChanged { target.SetState(core.Built) } else { target.SetState(core.Unchanged) } if state.Cache != nil { state.LogBuildResult(tid, target.Label, core.TargetBuilding, "Storing...") newCacheKey := mustShortTargetHash(state, target) (*state.Cache).Store(target, newCacheKey) if target.PostBuildFunction != 0 { // NB. Important this is stored with the earlier hash - if we calculate the hash // now, it might be different, and we could of course never retrieve it again. (*state.Cache).StoreExtra(target, cacheKey, core.PostBuildOutputFileName(target)) } for _, out := range extraOuts { (*state.Cache).StoreExtra(target, newCacheKey, out) } } // Clean up the temporary directory once it's done. if state.CleanWorkdirs { if err := os.RemoveAll(target.TmpDir()); err != nil { log.Warning("Failed to remove temporary directory for %s: %s", target.Label, err) } } if outputsChanged { state.LogBuildResult(tid, target.Label, core.TargetBuilt, "Built") } else { state.LogBuildResult(tid, target.Label, core.TargetBuilt, "Built (unchanged)") } return nil }
func moveOutputs(state *core.BuildState, target *core.BuildTarget) ([]string, bool, error) { // Before we write any outputs, we must remove the old hash file to avoid it being // left in an inconsistent state. if err := os.RemoveAll(ruleHashFileName(target)); err != nil { return nil, true, err } changed := false tmpDir := target.TmpDir() outDir := target.OutDir() for _, output := range target.Outputs() { tmpOutput := path.Join(tmpDir, output) realOutput := path.Join(outDir, output) if !core.PathExists(tmpOutput) { return nil, true, fmt.Errorf("Rule %s failed to create output %s", target.Label, tmpOutput) } // If output is a symlink, dereference it. Otherwise, for efficiency, // we can just move it without a full copy (saves copying large .jar files etc). dereferencedPath, err := filepath.EvalSymlinks(tmpOutput) if err != nil { return nil, true, err } // NB. false -> not filegroup, we wouldn't be here if it was. outputChanged, err := moveOutput(target, dereferencedPath, realOutput, false) if err != nil { return nil, true, err } changed = changed || outputChanged } if changed { log.Debug("Outputs for %s have changed", target.Label) } else { log.Debug("Outputs for %s are unchanged", target.Label) } // Optional outputs get moved but don't contribute to the hash or for incrementality. // Glob patterns are supported on these. extraOuts := []string{} for _, output := range core.Glob(tmpDir, target.OptionalOutputs, nil, nil, true) { log.Debug("Discovered optional output %s", output) tmpOutput := path.Join(tmpDir, output) realOutput := path.Join(outDir, output) if _, err := moveOutput(target, tmpOutput, realOutput, false); err != nil { return nil, changed, err } extraOuts = append(extraOuts, output) } return extraOuts, changed, nil }
// Prepares the output directories for a target func prepareDirectories(target *core.BuildTarget) error { if err := prepareDirectory(target.TmpDir(), true); err != nil { return err } if err := prepareDirectory(target.OutDir(), false); err != nil { return err } // Nicety for the build rules: create any directories that it's // declared it'll create files in. for _, out := range target.Outputs() { if dir := path.Dir(out); dir != "." { outPath := path.Join(target.TmpDir(), dir) if !core.PathExists(outPath) { if err := os.MkdirAll(outPath, core.DirPermissions); err != nil { return err } } } } return nil }