func (cache *rpcCache) retrieveArtifacts(target *core.BuildTarget, req *pb.RetrieveRequest, remove bool) bool { ctx, cancel := context.WithTimeout(context.Background(), cache.timeout) defer cancel() response, err := cache.client.Retrieve(ctx, req) if err != nil { log.Warning("Failed to retrieve artifacts for %s", target.Label) cache.error() return false } else if !response.Success { // Quiet, this is almost certainly just a 'not found' log.Debug("Couldn't retrieve artifacts for %s [key %s] from RPC cache", target.Label, base64.RawURLEncoding.EncodeToString(req.Hash)) return false } // Remove any existing outputs first; this is important for cases where the output is a // directory, because we get back individual artifacts, and we need to make sure that // only the retrieved artifacts are present in the output. if remove { for _, out := range target.Outputs() { out := path.Join(target.OutDir(), out) if err := os.RemoveAll(out); err != nil { log.Error("Failed to remove artifact %s: %s", out, err) return false } } } for _, artifact := range response.Artifacts { if !cache.writeFile(target, artifact.File, artifact.Body) { return false } } // Sanity check: if we don't get anything back, assume it probably wasn't really a success. return len(response.Artifacts) > 0 }
func makeJSONTarget(graph *core.BuildGraph, target *core.BuildTarget) JSONTarget { t := JSONTarget{} for in := range core.IterSources(graph, target) { t.Inputs = append(t.Inputs, in.Src) } for _, out := range target.Outputs() { t.Outputs = append(t.Outputs, path.Join(target.Label.PackageName, out)) } for _, src := range target.AllSourcePaths(graph) { t.Sources = append(t.Sources, src) } for _, dep := range target.Dependencies() { t.Deps = append(t.Deps, dep.Label.String()) } for data := range core.IterRuntimeFiles(graph, target, false) { t.Data = append(t.Data, data.Src) } t.Labels = target.Labels t.Requires = target.Requires rawHash := append(build.RuleHash(target, true, false), core.State.Hashes.Config...) t.Hash = base64.RawStdEncoding.EncodeToString(rawHash) t.Test = target.IsTest t.Binary = target.IsBinary t.TestOnly = target.TestOnly return t }
// Yields all cacheable artifacts from this target. Useful for cache implementations // to not have to reinvent logic around post-build functions etc. func cacheArtifacts(target *core.BuildTarget) <-chan string { ch := make(chan string, 10) go func() { for _, out := range target.Outputs() { ch <- out } close(ch) }() return ch }
// RemoveOutputs removes all generated outputs for a rule. func RemoveOutputs(target *core.BuildTarget) error { if err := os.Remove(ruleHashFileName(target)); err != nil && !os.IsNotExist(err) { return err } for _, output := range target.Outputs() { if err := os.RemoveAll(path.Join(target.OutDir(), output)); err != nil { return err } } return nil }
func buildResult(target *core.BuildTarget) []string { results := []string{} if target != nil { for _, out := range target.Outputs() { if core.StartedAtRepoRoot() { results = append(results, path.Join(target.OutDir(), out)) } else { results = append(results, path.Join(core.RepoRoot, target.OutDir(), out)) } } } return results }
// OutputHash calculates the hash of a target's outputs. func OutputHash(target *core.BuildTarget) ([]byte, error) { h := sha1.New() for _, output := range target.Outputs() { // NB. Always force a recalculation of the output hashes here. Memoisation is not // useful because by definition we are rebuilding a target, and can actively hurt // in cases where we compare the retrieved cache artifacts with what was there before. h2, err := pathHash(path.Join(target.OutDir(), output), true) if err != nil { return nil, err } h.Write(h2) } return h.Sum(nil), nil }
func moveOutputs(state *core.BuildState, target *core.BuildTarget) ([]string, bool, error) { // Before we write any outputs, we must remove the old hash file to avoid it being // left in an inconsistent state. if err := os.RemoveAll(ruleHashFileName(target)); err != nil { return nil, true, err } changed := false tmpDir := target.TmpDir() outDir := target.OutDir() for _, output := range target.Outputs() { tmpOutput := path.Join(tmpDir, output) realOutput := path.Join(outDir, output) if !core.PathExists(tmpOutput) { return nil, true, fmt.Errorf("Rule %s failed to create output %s", target.Label, tmpOutput) } // If output is a symlink, dereference it. Otherwise, for efficiency, // we can just move it without a full copy (saves copying large .jar files etc). dereferencedPath, err := filepath.EvalSymlinks(tmpOutput) if err != nil { return nil, true, err } // NB. false -> not filegroup, we wouldn't be here if it was. outputChanged, err := moveOutput(target, dereferencedPath, realOutput, false) if err != nil { return nil, true, err } changed = changed || outputChanged } if changed { log.Debug("Outputs for %s have changed", target.Label) } else { log.Debug("Outputs for %s are unchanged", target.Label) } // Optional outputs get moved but don't contribute to the hash or for incrementality. // Glob patterns are supported on these. extraOuts := []string{} for _, output := range core.Glob(tmpDir, target.OptionalOutputs, nil, nil, true) { log.Debug("Discovered optional output %s", output) tmpOutput := path.Join(tmpDir, output) realOutput := path.Join(outDir, output) if _, err := moveOutput(target, tmpOutput, realOutput, false); err != nil { return nil, changed, err } extraOuts = append(extraOuts, output) } return extraOuts, changed, nil }
// Return true if the rule needs building, false if the existing outputs are OK. func needsBuilding(state *core.BuildState, target *core.BuildTarget, postBuild bool) bool { // Check the dependencies first, because they don't need any disk I/O. if target.NeedsTransitiveDependencies { if anyDependencyHasChanged(target) { return true // one of the transitive deps has changed, need to rebuild } } else { for _, dep := range target.Dependencies() { if dep.State() < core.Unchanged { log.Debug("Need to rebuild %s, %s has changed", target.Label, dep.Label) return true // dependency has just been rebuilt, do this too. } } } oldRuleHash, oldConfigHash, oldSourceHash := readRuleHashFile(ruleHashFileName(target), postBuild) if !bytes.Equal(oldConfigHash, state.Hashes.Config) { if len(oldConfigHash) == 0 { // Small nicety to make it a bit clearer what's going on. log.Debug("Need to build %s, outputs aren't there", target.Label) } else { log.Debug("Need to rebuild %s, config has changed (was %s, need %s)", target.Label, b64(oldConfigHash), b64(state.Hashes.Config)) } return true } newRuleHash := RuleHash(target, false, postBuild) if !bytes.Equal(oldRuleHash, newRuleHash) { log.Debug("Need to rebuild %s, rule has changed (was %s, need %s)", target.Label, b64(oldRuleHash), b64(newRuleHash)) return true } newSourceHash, err := sourceHash(state.Graph, target) if err != nil || !bytes.Equal(oldSourceHash, newSourceHash) { log.Debug("Need to rebuild %s, sources have changed (was %s, need %s)", target.Label, b64(oldSourceHash), b64(newSourceHash)) return true } // Check the outputs of this rule exist. This would only happen if the user had // removed them but it's incredibly aggravating if you remove an output and the // rule won't rebuild itself. for _, output := range target.Outputs() { realOutput := path.Join(target.OutDir(), output) if !core.PathExists(realOutput) { log.Debug("Output %s doesn't exist for rule %s; will rebuild.", realOutput, target.Label) return true } } // Maybe we've forced a rebuild. Do this last; might be interesting to see if it needed building anyway. return state.ForceRebuild && (state.IsOriginalTarget(target.Label) || state.IsOriginalTarget(target.Label.Parent())) }
// Prepares the output directories for a target func prepareDirectories(target *core.BuildTarget) error { if err := prepareDirectory(target.TmpDir(), true); err != nil { return err } if err := prepareDirectory(target.OutDir(), false); err != nil { return err } // Nicety for the build rules: create any directories that it's // declared it'll create files in. for _, out := range target.Outputs() { if dir := path.Dir(out); dir != "." { outPath := path.Join(target.TmpDir(), dir) if !core.PathExists(outPath) { if err := os.MkdirAll(outPath, core.DirPermissions); err != nil { return err } } } } return nil }
func checkAndReplaceSequence(target, dep *core.BuildTarget, in string, runnable, multiple, dir, outPrefix, hash, test, allOutputs, tool bool) string { if allOutputs && !multiple && len(dep.Outputs()) != 1 { // Label must have only one output. panic(fmt.Sprintf("Rule %s can't use %s; %s has multiple outputs.", target.Label, in, dep.Label)) } else if runnable && !dep.IsBinary { panic(fmt.Sprintf("Rule %s can't $(exe %s), it's not executable", target.Label, dep.Label)) } else if runnable && len(dep.Outputs()) == 0 { panic(fmt.Sprintf("Rule %s is tagged as binary but produces no output.", dep.Label)) } if hash { return base64.RawURLEncoding.EncodeToString(mustShortTargetHash(core.State, dep)) } output := "" for _, out := range dep.Outputs() { if allOutputs || out == in { if tool { abs, err := filepath.Abs(handleDir(dep.OutDir(), out, dir)) if err != nil { log.Fatalf("Couldn't calculate relative path: %s", err) } output += quote(abs) + " " } else { output += quote(fileDestination(target, dep, out, dir, outPrefix, test)) + " " } if dir { break } } } if runnable && dep.HasLabel("java_non_exe") { // The target is a Java target that isn't self-executable, hence it needs something to run it. output = "java -jar " + output } return strings.TrimRight(output, " ") }