Example #1
1
func moveOutput(target *core.BuildTarget, tmpOutput, realOutput string, filegroup bool) (bool, error) {
	// hash the file
	newHash, err := pathHash(tmpOutput, false)
	if err != nil {
		return true, err
	}
	realOutputExists := core.PathExists(realOutput)
	// If this is a filegroup we hardlink the outputs over and so the two files may actually be
	// the same file. If so don't do anything else and especially don't delete & recreate the
	// file because other things might be using it already (because more than one filegroup can
	// own the same file).
	if filegroup && realOutputExists && core.IsSameFile(tmpOutput, realOutput) {
		movePathHash(tmpOutput, realOutput, filegroup) // make sure this is updated regardless
		return false, nil
	}
	if realOutputExists {
		if oldHash, err := pathHash(realOutput, false); err != nil {
			return true, err
		} else if bytes.Equal(oldHash, newHash) {
			// We already have the same file in the current location. Don't bother moving it.
			log.Debug("Checking %s vs. %s, hashes match", tmpOutput, realOutput)
			return false, nil
		}
		if err := os.RemoveAll(realOutput); err != nil {
			return true, err
		}
	}
	movePathHash(tmpOutput, realOutput, filegroup)
	// Check if we need a directory for this output.
	dir := path.Dir(realOutput)
	if !core.PathExists(dir) {
		if err := os.MkdirAll(dir, core.DirPermissions); err != nil {
			return true, err
		}
	}
	// If the output file is in plz-out/tmp we can just move it to save time, otherwise we need
	// to copy so we don't move files from other directories.
	if strings.HasPrefix(tmpOutput, target.TmpDir()) {
		if err := os.Rename(tmpOutput, realOutput); err != nil {
			return true, err
		}
	} else {
		if err := core.RecursiveCopyFile(tmpOutput, realOutput, target.OutMode(), filegroup, false); err != nil {
			if filegroup && os.IsExist(err) && core.IsSameFile(tmpOutput, realOutput) {
				// It's possible for two filegroups to race building simultaneously. In that
				// case one will fail with an ErrExist, which is OK as far as we're concerned
				// here as long as the file we tried to write really is the same as the input.
				return true, nil
			}
			return true, err
		}
	}
	if target.IsBinary {
		if err := os.Chmod(realOutput, target.OutMode()); err != nil {
			return true, err
		}
	}
	return true, nil
}
Example #2
0
func (cache *dirCache) RetrieveExtra(target *core.BuildTarget, key []byte, out string) bool {
	outDir := path.Join(core.RepoRoot, target.OutDir())
	cacheDir := cache.getPath(target, key)
	cachedOut := path.Join(cacheDir, out)
	realOut := path.Join(outDir, out)
	if !core.PathExists(cachedOut) {
		log.Debug("%s: %s doesn't exist in dir cache", target.Label, cachedOut)
		return false
	}
	log.Debug("Retrieving %s: %s from dir cache...", target.Label, cachedOut)
	if dir := path.Dir(realOut); dir != "." {
		if err := os.MkdirAll(dir, core.DirPermissions); err != nil {
			log.Warning("Failed to create output directory %s: %s", dir, err)
			return false
		}
	}
	// It seems to be quite important that we unlink the existing file first to avoid ETXTBSY errors
	// in cases where we're running an existing binary (as Please does during bootstrap, for example).
	if err := os.RemoveAll(realOut); err != nil {
		log.Warning("Failed to unlink existing output %s: %s", realOut, err)
		return false
	}
	// Recursively hardlink files back out of the cache
	if err := core.RecursiveCopyFile(cachedOut, realOut, fileMode(target), true, true); err != nil {
		log.Warning("Failed to move cached file to output: %s -> %s: %s", cachedOut, realOut, err)
		return false
	}
	log.Debug("Retrieved %s: %s from dir cache", target.Label, cachedOut)
	return true
}
Example #3
0
func (cache *rpcCache) retrieveArtifacts(target *core.BuildTarget, req *pb.RetrieveRequest, remove bool) bool {
	ctx, cancel := context.WithTimeout(context.Background(), cache.timeout)
	defer cancel()
	response, err := cache.client.Retrieve(ctx, req)
	if err != nil {
		log.Warning("Failed to retrieve artifacts for %s", target.Label)
		cache.error()
		return false
	} else if !response.Success {
		// Quiet, this is almost certainly just a 'not found'
		log.Debug("Couldn't retrieve artifacts for %s [key %s] from RPC cache", target.Label, base64.RawURLEncoding.EncodeToString(req.Hash))
		return false
	}
	// Remove any existing outputs first; this is important for cases where the output is a
	// directory, because we get back individual artifacts, and we need to make sure that
	// only the retrieved artifacts are present in the output.
	if remove {
		for _, out := range target.Outputs() {
			out := path.Join(target.OutDir(), out)
			if err := os.RemoveAll(out); err != nil {
				log.Error("Failed to remove artifact %s: %s", out, err)
				return false
			}
		}
	}
	for _, artifact := range response.Artifacts {
		if !cache.writeFile(target, artifact.File, artifact.Body) {
			return false
		}
	}
	// Sanity check: if we don't get anything back, assume it probably wasn't really a success.
	return len(response.Artifacts) > 0
}
Example #4
0
// Collects all the source files from a single target
func collectAllFiles(state *core.BuildState, target *core.BuildTarget, coveragePackages, allFiles map[string]bool, doneTargets map[*core.BuildTarget]bool, includeAllFiles bool) {
	doneTargets[target] = true
	if !includeAllFiles && !coveragePackages[target.Label.PackageName] {
		return
	}
	// Small hack here; explore these targets when we don't have any sources yet. Helps languages
	// like Java where we generate a wrapper target with a complete one immediately underneath.
	// TODO(pebers): do we still need this now we have Java sourcemaps?
	if !target.OutputIsComplete || len(allFiles) == 0 {
		for _, dep := range target.Dependencies() {
			if !doneTargets[dep] {
				collectAllFiles(state, dep, coveragePackages, allFiles, doneTargets, includeAllFiles)
			}
		}
	}
	if target.IsTest {
		return // Test sources don't count for coverage.
	}
	for _, path := range target.AllSourcePaths(state.Graph) {
		extension := filepath.Ext(path)
		for _, ext := range state.Config.Cover.FileExtension {
			if ext == extension {
				allFiles[path] = target.IsTest || target.TestOnly // Skip test source files from actual coverage display
				break
			}
		}
	}
}
Example #5
0
func (cache *httpCache) StoreExtra(target *core.BuildTarget, key []byte, file string) {
	if cache.Writeable {
		artifact := path.Join(
			cache.OSName,
			target.Label.PackageName,
			target.Label.Name,
			base64.RawURLEncoding.EncodeToString(key),
			file,
		)
		log.Info("Storing %s: %s in http cache...", target.Label, artifact)

		// NB. Don't need to close this file, http.Post will do it for us.
		file, err := os.Open(path.Join(target.OutDir(), file))
		if err != nil {
			log.Warning("Failed to read artifact: %s", err)
			return
		}
		response, err := http.Post(cache.Url+"/artifact/"+artifact, "application/octet-stream", file)
		if err != nil {
			log.Warning("Failed to send artifact to %s: %s", cache.Url+"/artifact/"+artifact, err)
		} else if response.StatusCode < 200 || response.StatusCode > 299 {
			log.Warning("Failed to send artifact to %s: got response %s", cache.Url+"/artifact/"+artifact, response.Status)
		}
		response.Body.Close()
	}
}
Example #6
0
func getLabels(target *core.BuildTarget, prefix string, minState core.BuildTargetState) []string {
	if target.State() < minState {
		log.Fatalf("get_labels called on a target that is not yet built: %s", target.Label)
	}
	labels := map[string]bool{}
	done := map[*core.BuildTarget]bool{}
	var getLabels func(*core.BuildTarget)
	getLabels = func(t *core.BuildTarget) {
		for _, label := range t.Labels {
			if strings.HasPrefix(label, prefix) {
				labels[strings.TrimSpace(strings.TrimPrefix(label, prefix))] = true
			}
		}
		done[t] = true
		if !t.OutputIsComplete || t == target {
			for _, dep := range t.Dependencies() {
				if !done[dep] {
					getLabels(dep)
				}
			}
		}
	}
	getLabels(target)
	ret := make([]string, len(labels))
	i := 0
	for label := range labels {
		ret[i] = label
		i++
	}
	sort.Strings(ret)
	return ret
}
func checkAndReplaceSequence(target, dep *core.BuildTarget, in string, runnable, multiple, dir, outPrefix, hash, test, allOutputs, tool bool) string {
	if allOutputs && !multiple && len(dep.Outputs()) != 1 {
		// Label must have only one output.
		panic(fmt.Sprintf("Rule %s can't use %s; %s has multiple outputs.", target.Label, in, dep.Label))
	} else if runnable && !dep.IsBinary {
		panic(fmt.Sprintf("Rule %s can't $(exe %s), it's not executable", target.Label, dep.Label))
	} else if runnable && len(dep.Outputs()) == 0 {
		panic(fmt.Sprintf("Rule %s is tagged as binary but produces no output.", dep.Label))
	}
	if hash {
		return base64.RawURLEncoding.EncodeToString(mustShortTargetHash(core.State, dep))
	}
	output := ""
	for _, out := range dep.Outputs() {
		if allOutputs || out == in {
			if tool {
				abs, err := filepath.Abs(handleDir(dep.OutDir(), out, dir))
				if err != nil {
					log.Fatalf("Couldn't calculate relative path: %s", err)
				}
				output += quote(abs) + " "
			} else {
				output += quote(fileDestination(target, dep, out, dir, outPrefix, test)) + " "
			}
			if dir {
				break
			}
		}
	}
	if runnable && dep.HasLabel("java_non_exe") {
		// The target is a Java target that isn't self-executable, hence it needs something to run it.
		output = "java -jar " + output
	}
	return strings.TrimRight(output, " ")
}
Example #8
0
// Attempts to detect cycles in the build graph. Returns an empty slice if none is found,
// otherwise returns a slice of labels describing the cycle.
func findGraphCycle(graph *core.BuildGraph, target *core.BuildTarget) []*core.BuildTarget {
	index := func(haystack []*core.BuildTarget, needle *core.BuildTarget) int {
		for i, straw := range haystack {
			if straw == needle {
				return i
			}
		}
		return -1
	}

	var detectCycle func(*core.BuildTarget, []*core.BuildTarget) []*core.BuildTarget
	detectCycle = func(target *core.BuildTarget, deps []*core.BuildTarget) []*core.BuildTarget {
		if i := index(deps, target); i != -1 {
			return deps[i:]
		}
		deps = append(deps, target)
		for _, dep := range target.Dependencies() {
			if cycle := detectCycle(dep, deps); len(cycle) > 0 {
				return cycle
			}
		}
		return nil
	}
	return detectCycle(target, nil)
}
Example #9
0
func (cache *rpcCache) loadArtifacts(target *core.BuildTarget, file string) ([]*pb.Artifact, int, error) {
	artifacts := []*pb.Artifact{}
	outDir := target.OutDir()
	root := path.Join(outDir, file)
	totalSize := 1000 // Allow a little space for encoding overhead.
	err := filepath.Walk(root, func(name string, info os.FileInfo, err error) error {
		if err != nil {
			return err
		} else if !info.IsDir() {
			content, err := ioutil.ReadFile(name)
			if err != nil {
				return err
			}
			artifacts = append(artifacts, &pb.Artifact{
				Package: target.Label.PackageName,
				Target:  target.Label.Name,
				File:    name[len(outDir)+1:],
				Body:    content,
			})
			totalSize += len(content)
		}
		return nil
	})
	return artifacts, totalSize, err
}
func fileDestination(target, dep *core.BuildTarget, out string, dir, outPrefix, test bool) string {
	if outPrefix {
		return handleDir(dep.OutDir(), out, dir)
	}
	if test && target == dep {
		// Slightly fiddly case because tests put binaries in a possibly slightly unusual place.
		return "./" + out
	}
	return handleDir(dep.Label.PackageName, out, dir)
}
Example #11
0
// Yields all cacheable artifacts from this target. Useful for cache implementations
// to not have to reinvent logic around post-build functions etc.
func cacheArtifacts(target *core.BuildTarget) <-chan string {
	ch := make(chan string, 10)
	go func() {
		for _, out := range target.Outputs() {
			ch <- out
		}
		close(ch)
	}()
	return ch
}
Example #12
0
// RemoveOutputs removes all generated outputs for a rule.
func RemoveOutputs(target *core.BuildTarget) error {
	if err := os.Remove(ruleHashFileName(target)); err != nil && !os.IsNotExist(err) {
		return err
	}
	for _, output := range target.Outputs() {
		if err := os.RemoveAll(path.Join(target.OutDir(), output)); err != nil {
			return err
		}
	}
	return nil
}
Example #13
0
// For targets that have post-build functions, we have to store and retrieve the target's
// output to feed to it
func loadPostBuildOutput(state *core.BuildState, target *core.BuildTarget) string {
	// Normally filegroups don't have post-build functions, but we use this sometimes for testing.
	if target.IsFilegroup() {
		return ""
	}
	out, err := ioutil.ReadFile(postBuildOutputFileName(target))
	if err != nil {
		panic(err)
	}
	return string(out)
}
Example #14
0
// retrieveFile retrieves a single file (or directory) from a Docker container.
func retrieveFile(target *core.BuildTarget, cid []byte, filename string, warn bool) {
	log.Debug("Attempting to retrieve file %s for %s...", filename, target.Label)
	timeout := core.State.Config.Docker.ResultsTimeout
	cmd := []string{"docker", "cp", string(cid) + ":/tmp/test/" + filename, target.TestDir()}
	if out, err := core.ExecWithTimeoutSimple(timeout, cmd...); err != nil {
		if warn {
			log.Warning("Failed to retrieve results for %s: %s [%s]", target.Label, err, out)
		} else {
			log.Debug("Failed to retrieve results for %s: %s [%s]", target.Label, err, out)
		}
	}
}
Example #15
0
func runTest(state *core.BuildState, target *core.BuildTarget) ([]byte, error) {
	replacedCmd := build.ReplaceTestSequences(target, target.GetTestCommand())
	env := core.BuildEnvironment(state, target, true)
	if len(state.TestArgs) > 0 {
		args := strings.Join(state.TestArgs, " ")
		replacedCmd += " " + args
		env = append(env, "TESTS="+args)
	}
	log.Debug("Running test %s\nENVIRONMENT:\n%s\n%s", target.Label, strings.Join(env, "\n"), replacedCmd)
	_, out, err := core.ExecWithTimeoutShell(target.TestDir(), env, target.TestTimeout, state.Config.Test.Timeout, state.ShowAllOutput, replacedCmd)
	return out, err
}
func replaceSequenceLabel(target *core.BuildTarget, label core.BuildLabel, in string, runnable, multiple, dir, outPrefix, hash, test, allOutputs bool) string {
	// Check this label is a dependency of the target, otherwise it's not allowed.
	if label == target.Label { // targets can always use themselves.
		return checkAndReplaceSequence(target, target, in, runnable, multiple, dir, outPrefix, hash, test, allOutputs, false)
	}
	deps := target.DependenciesFor(label)
	if len(deps) == 0 {
		panic(fmt.Sprintf("Rule %s can't use %s; doesn't depend on target %s", target.Label, in, label))
	}
	// TODO(pebers): this does not correctly handle the case where there are multiple deps here
	//               (but is better than the previous case where it never worked at all)
	return checkAndReplaceSequence(target, deps[0], in, runnable, multiple, dir, outPrefix, hash, test, allOutputs, target.IsTool(label))
}
Example #17
0
func (cache *rpcCache) writeFile(target *core.BuildTarget, file string, body []byte) bool {
	out := path.Join(target.OutDir(), file)
	if err := os.MkdirAll(path.Dir(out), core.DirPermissions); err != nil {
		log.Warning("Failed to create directory for artifacts: %s", err)
		return false
	}
	if err := core.WriteFile(bytes.NewReader(body), out, fileMode(target)); err != nil {
		log.Warning("RPC cache failed to write file %s", err)
		return false
	}
	log.Debug("Retrieved %s - %s from RPC cache", target.Label, file)
	return true
}
Example #18
0
func prepareTestDir(graph *core.BuildGraph, target *core.BuildTarget) error {
	if err := os.RemoveAll(target.TestDir()); err != nil {
		return err
	}
	if err := os.MkdirAll(target.TestDir(), core.DirPermissions); err != nil {
		return err
	}
	for out := range core.IterRuntimeFiles(graph, target, true) {
		if err := core.PrepareSourcePair(out); err != nil {
			return err
		}
	}
	return nil
}
Example #19
0
// OutputHash calculates the hash of a target's outputs.
func OutputHash(target *core.BuildTarget) ([]byte, error) {
	h := sha1.New()
	for _, output := range target.Outputs() {
		// NB. Always force a recalculation of the output hashes here. Memoisation is not
		//     useful because by definition we are rebuilding a target, and can actively hurt
		//     in cases where we compare the retrieved cache artifacts with what was there before.
		h2, err := pathHash(path.Join(target.OutDir(), output), true)
		if err != nil {
			return nil, err
		}
		h.Write(h2)
	}
	return h.Sum(nil), nil
}
Example #20
0
// This is just a simple DFS through the graph.
func printSomePath(graph *core.BuildGraph, target1, target2 *core.BuildTarget) bool {
	if target1 == target2 {
		fmt.Printf("Found path:\n  %s\n", target1.Label)
		return true
	}
	for _, target := range graph.ReverseDependencies(target2) {
		if printSomePath(graph, target1, target) {
			if target2.Parent(graph) != target {
				fmt.Printf("  %s\n", target2.Label)
			}
			return true
		}
	}
	return false
}
// Replaces a single escape sequence in a command.
func replaceSequence(target *core.BuildTarget, in string, runnable, multiple, dir, outPrefix, hash, test bool) string {
	if core.LooksLikeABuildLabel(in) {
		label := core.ParseBuildLabel(in, target.Label.PackageName)
		return replaceSequenceLabel(target, label, in, runnable, multiple, dir, outPrefix, hash, test, true)
	}
	for _, src := range target.AllSources() {
		if label := src.Label(); label != nil && src.String() == in {
			return replaceSequenceLabel(target, *label, in, runnable, multiple, dir, outPrefix, hash, test, false)
		}
	}
	if hash {
		return base64.RawURLEncoding.EncodeToString(mustPathHash(path.Join(target.Label.PackageName, in)))
	}
	return quote(path.Join(target.Label.PackageName, in))
}
Example #22
0
func printTarget(state *core.BuildState, target *core.BuildTarget, indent string, targets map[*core.BuildTarget]bool, unique bool) {
	if unique && targets[target] {
		return
	}
	targets[target] = true
	if target.ShouldInclude(state.Include, state.Exclude) {
		fmt.Printf("%s%s\n", indent, target.Label)
	}
	if !unique {
		indent = indent + "  "
	}
	for _, dep := range target.Dependencies() {
		printTarget(state, dep, indent, targets, unique)
	}
}
Example #23
0
func (cache *httpCache) writeFile(target *core.BuildTarget, file string, r io.Reader) bool {
	outFile := path.Join(target.OutDir(), file)
	if err := os.MkdirAll(path.Dir(outFile), core.DirPermissions); err != nil {
		log.Errorf("Failed to create directory: %s", err)
		return false
	}
	f, err := os.OpenFile(outFile, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, fileMode(target))
	if err != nil {
		log.Errorf("Failed to open file: %s", err)
		return false
	}
	defer f.Close()
	if _, err := io.Copy(f, r); err != nil {
		log.Errorf("Failed to write file: %s", err)
		return false
	}
	log.Info("Retrieved %s from http cache", target.Label)
	return true
}
Example #24
0
// RuleHash calculates a hash for the relevant bits of this rule that affect its output.
// Optionally it can include parts of the rule that affect runtime (most obviously test-time).
// Note that we have to hash on the declared fields, we obviously can't hash pointers etc.
// incrementality_test will warn if new fields are added to the struct but not here.
func RuleHash(target *core.BuildTarget, runtime, postBuild bool) []byte {
	if runtime || (postBuild && target.PostBuildFunction != 0) {
		return ruleHash(target, runtime)
	}
	// Non-post-build hashes get stored on the target itself.
	if len(target.RuleHash) != 0 {
		return target.RuleHash
	}
	target.RuleHash = ruleHash(target, false) // This is never a runtime hash.
	return target.RuleHash
}
Example #25
0
func (cache *dirCache) storeFile(target *core.BuildTarget, out, cacheDir string) {
	log.Debug("Storing %s: %s in dir cache...", target.Label, out)
	if dir := path.Dir(out); dir != "." {
		if err := os.MkdirAll(path.Join(cacheDir, dir), core.DirPermissions); err != nil {
			log.Warning("Failed to create cache directory %s: %s", path.Join(cacheDir, dir), err)
			return
		}
	}
	outFile := path.Join(core.RepoRoot, target.OutDir(), out)
	cachedFile := path.Join(cacheDir, out)
	// Remove anything existing
	if err := os.RemoveAll(cachedFile); err != nil {
		log.Warning("Failed to remove existing cached file %s: %s", cachedFile, err)
	} else if err := os.MkdirAll(cacheDir, core.DirPermissions); err != nil {
		log.Warning("Failed to create cache directory %s: %s", cacheDir, err)
		return
	} else if err := core.RecursiveCopyFile(outFile, cachedFile, fileMode(target), true, true); err != nil {
		// Cannot hardlink files into the cache, must copy them for reals.
		log.Warning("Failed to store cache file %s: %s", cachedFile, err)
	}
}
Example #26
0
func makeJSONTarget(graph *core.BuildGraph, target *core.BuildTarget) JSONTarget {
	t := JSONTarget{}
	for in := range core.IterSources(graph, target) {
		t.Inputs = append(t.Inputs, in.Src)
	}
	for _, out := range target.Outputs() {
		t.Outputs = append(t.Outputs, path.Join(target.Label.PackageName, out))
	}
	for _, src := range target.AllSourcePaths(graph) {
		t.Sources = append(t.Sources, src)
	}
	for _, dep := range target.Dependencies() {
		t.Deps = append(t.Deps, dep.Label.String())
	}
	for data := range core.IterRuntimeFiles(graph, target, false) {
		t.Data = append(t.Data, data.Src)
	}
	t.Labels = target.Labels
	t.Requires = target.Requires
	rawHash := append(build.RuleHash(target, true, false), core.State.Hashes.Config...)
	t.Hash = base64.RawStdEncoding.EncodeToString(rawHash)
	t.Test = target.IsTest
	t.Binary = target.IsBinary
	t.TestOnly = target.TestOnly
	return t
}
Example #27
0
func (m *metrics) record(target *core.BuildTarget, duration time.Duration) {
	if target.Results.NumTests > 0 {
		// Tests have run
		m.cacheCounter.WithLabelValues(b(target.Results.Cached)).Inc()
		m.testCounter.WithLabelValues(b(target.Results.Failed == 0)).Inc()
		if target.Results.Cached {
			m.cacheHistogram.WithLabelValues().Observe(duration.Seconds())
		} else if target.Results.Failed == 0 {
			m.testHistogram.WithLabelValues().Observe(duration.Seconds())
		}
	} else {
		// Build has run
		state := target.State()
		m.cacheCounter.WithLabelValues(b(state == core.Cached)).Inc()
		m.buildCounter.WithLabelValues(b(state != core.Failed), b(state != core.Reused)).Inc()
		if state == core.Cached {
			m.cacheHistogram.WithLabelValues().Observe(duration.Seconds())
		} else if state != core.Failed && state >= core.Built {
			m.buildHistogram.WithLabelValues().Observe(duration.Seconds())
		}
	}
	m.newMetrics = true
}
Example #28
0
// buildFilegroup runs the manual build steps for a filegroup rule.
// We don't force this to be done in bash to avoid errors with maximum command lengths,
// and it's actually quite fiddly to get just so there.
func buildFilegroup(tid int, state *core.BuildState, target *core.BuildTarget) error {
	if err := prepareDirectory(target.OutDir(), false); err != nil {
		return err
	}
	if err := os.RemoveAll(ruleHashFileName(target)); err != nil {
		return err
	}
	changed := false
	outDir := target.OutDir()
	for _, source := range target.Sources {
		fullPaths := source.FullPaths(state.Graph)
		for i, sourcePath := range source.LocalPaths(state.Graph) {
			outPath := path.Join(outDir, sourcePath)
			c, err := moveOutput(target, fullPaths[i], outPath, true)
			if err != nil {
				return err
			}
			changed = changed || c
		}
	}
	if target.HasLabel("py") && !target.IsBinary {
		// Pre-emptively create __init__.py files so the outputs can be loaded dynamically.
		// It's a bit cheeky to do non-essential language-specific logic but this enables
		// a lot of relatively normal Python workflows.
		// Errors are deliberately ignored.
		createInitPy(outDir)
	}
	if _, err := calculateAndCheckRuleHash(state, target); err != nil {
		return err
	} else if changed {
		target.SetState(core.Built)
	} else {
		target.SetState(core.Unchanged)
	}
	state.LogBuildResult(tid, target.Label, core.TargetBuilt, "Built")
	return nil
}
Example #29
0
func buildResult(target *core.BuildTarget) []string {
	results := []string{}
	if target != nil {
		for _, out := range target.Outputs() {
			if core.StartedAtRepoRoot() {
				results = append(results, path.Join(target.OutDir(), out))
			} else {
				results = append(results, path.Join(core.RepoRoot, target.OutDir(), out))
			}
		}
	}
	return results
}
Example #30
0
// Prepares the output directories for a target
func prepareDirectories(target *core.BuildTarget) error {
	if err := prepareDirectory(target.TmpDir(), true); err != nil {
		return err
	}
	if err := prepareDirectory(target.OutDir(), false); err != nil {
		return err
	}
	// Nicety for the build rules: create any directories that it's
	// declared it'll create files in.
	for _, out := range target.Outputs() {
		if dir := path.Dir(out); dir != "." {
			outPath := path.Join(target.TmpDir(), dir)
			if !core.PathExists(outPath) {
				if err := os.MkdirAll(outPath, core.DirPermissions); err != nil {
					return err
				}
			}
		}
	}
	return nil
}