예제 #1
0
func (cache *rpcCache) loadArtifacts(target *core.BuildTarget, file string) ([]*pb.Artifact, int, error) {
	artifacts := []*pb.Artifact{}
	outDir := target.OutDir()
	root := path.Join(outDir, file)
	totalSize := 1000 // Allow a little space for encoding overhead.
	err := filepath.Walk(root, func(name string, info os.FileInfo, err error) error {
		if err != nil {
			return err
		} else if !info.IsDir() {
			content, err := ioutil.ReadFile(name)
			if err != nil {
				return err
			}
			artifacts = append(artifacts, &pb.Artifact{
				Package: target.Label.PackageName,
				Target:  target.Label.Name,
				File:    name[len(outDir)+1:],
				Body:    content,
			})
			totalSize += len(content)
		}
		return nil
	})
	return artifacts, totalSize, err
}
예제 #2
0
func (cache *httpCache) StoreExtra(target *core.BuildTarget, key []byte, file string) {
	if cache.Writeable {
		artifact := path.Join(
			cache.OSName,
			target.Label.PackageName,
			target.Label.Name,
			base64.RawURLEncoding.EncodeToString(key),
			file,
		)
		log.Info("Storing %s: %s in http cache...", target.Label, artifact)

		// NB. Don't need to close this file, http.Post will do it for us.
		file, err := os.Open(path.Join(target.OutDir(), file))
		if err != nil {
			log.Warning("Failed to read artifact: %s", err)
			return
		}
		response, err := http.Post(cache.Url+"/artifact/"+artifact, "application/octet-stream", file)
		if err != nil {
			log.Warning("Failed to send artifact to %s: %s", cache.Url+"/artifact/"+artifact, err)
		} else if response.StatusCode < 200 || response.StatusCode > 299 {
			log.Warning("Failed to send artifact to %s: got response %s", cache.Url+"/artifact/"+artifact, response.Status)
		}
		response.Body.Close()
	}
}
예제 #3
0
func (cache *dirCache) RetrieveExtra(target *core.BuildTarget, key []byte, out string) bool {
	outDir := path.Join(core.RepoRoot, target.OutDir())
	cacheDir := cache.getPath(target, key)
	cachedOut := path.Join(cacheDir, out)
	realOut := path.Join(outDir, out)
	if !core.PathExists(cachedOut) {
		log.Debug("%s: %s doesn't exist in dir cache", target.Label, cachedOut)
		return false
	}
	log.Debug("Retrieving %s: %s from dir cache...", target.Label, cachedOut)
	if dir := path.Dir(realOut); dir != "." {
		if err := os.MkdirAll(dir, core.DirPermissions); err != nil {
			log.Warning("Failed to create output directory %s: %s", dir, err)
			return false
		}
	}
	// It seems to be quite important that we unlink the existing file first to avoid ETXTBSY errors
	// in cases where we're running an existing binary (as Please does during bootstrap, for example).
	if err := os.RemoveAll(realOut); err != nil {
		log.Warning("Failed to unlink existing output %s: %s", realOut, err)
		return false
	}
	// Recursively hardlink files back out of the cache
	if err := core.RecursiveCopyFile(cachedOut, realOut, fileMode(target), true, true); err != nil {
		log.Warning("Failed to move cached file to output: %s -> %s: %s", cachedOut, realOut, err)
		return false
	}
	log.Debug("Retrieved %s: %s from dir cache", target.Label, cachedOut)
	return true
}
func checkAndReplaceSequence(target, dep *core.BuildTarget, in string, runnable, multiple, dir, outPrefix, hash, test, allOutputs, tool bool) string {
	if allOutputs && !multiple && len(dep.Outputs()) != 1 {
		// Label must have only one output.
		panic(fmt.Sprintf("Rule %s can't use %s; %s has multiple outputs.", target.Label, in, dep.Label))
	} else if runnable && !dep.IsBinary {
		panic(fmt.Sprintf("Rule %s can't $(exe %s), it's not executable", target.Label, dep.Label))
	} else if runnable && len(dep.Outputs()) == 0 {
		panic(fmt.Sprintf("Rule %s is tagged as binary but produces no output.", dep.Label))
	}
	if hash {
		return base64.RawURLEncoding.EncodeToString(mustShortTargetHash(core.State, dep))
	}
	output := ""
	for _, out := range dep.Outputs() {
		if allOutputs || out == in {
			if tool {
				abs, err := filepath.Abs(handleDir(dep.OutDir(), out, dir))
				if err != nil {
					log.Fatalf("Couldn't calculate relative path: %s", err)
				}
				output += quote(abs) + " "
			} else {
				output += quote(fileDestination(target, dep, out, dir, outPrefix, test)) + " "
			}
			if dir {
				break
			}
		}
	}
	if runnable && dep.HasLabel("java_non_exe") {
		// The target is a Java target that isn't self-executable, hence it needs something to run it.
		output = "java -jar " + output
	}
	return strings.TrimRight(output, " ")
}
예제 #5
0
func (cache *rpcCache) retrieveArtifacts(target *core.BuildTarget, req *pb.RetrieveRequest, remove bool) bool {
	ctx, cancel := context.WithTimeout(context.Background(), cache.timeout)
	defer cancel()
	response, err := cache.client.Retrieve(ctx, req)
	if err != nil {
		log.Warning("Failed to retrieve artifacts for %s", target.Label)
		cache.error()
		return false
	} else if !response.Success {
		// Quiet, this is almost certainly just a 'not found'
		log.Debug("Couldn't retrieve artifacts for %s [key %s] from RPC cache", target.Label, base64.RawURLEncoding.EncodeToString(req.Hash))
		return false
	}
	// Remove any existing outputs first; this is important for cases where the output is a
	// directory, because we get back individual artifacts, and we need to make sure that
	// only the retrieved artifacts are present in the output.
	if remove {
		for _, out := range target.Outputs() {
			out := path.Join(target.OutDir(), out)
			if err := os.RemoveAll(out); err != nil {
				log.Error("Failed to remove artifact %s: %s", out, err)
				return false
			}
		}
	}
	for _, artifact := range response.Artifacts {
		if !cache.writeFile(target, artifact.File, artifact.Body) {
			return false
		}
	}
	// Sanity check: if we don't get anything back, assume it probably wasn't really a success.
	return len(response.Artifacts) > 0
}
func fileDestination(target, dep *core.BuildTarget, out string, dir, outPrefix, test bool) string {
	if outPrefix {
		return handleDir(dep.OutDir(), out, dir)
	}
	if test && target == dep {
		// Slightly fiddly case because tests put binaries in a possibly slightly unusual place.
		return "./" + out
	}
	return handleDir(dep.Label.PackageName, out, dir)
}
예제 #7
0
// RemoveOutputs removes all generated outputs for a rule.
func RemoveOutputs(target *core.BuildTarget) error {
	if err := os.Remove(ruleHashFileName(target)); err != nil && !os.IsNotExist(err) {
		return err
	}
	for _, output := range target.Outputs() {
		if err := os.RemoveAll(path.Join(target.OutDir(), output)); err != nil {
			return err
		}
	}
	return nil
}
예제 #8
0
func (cache *rpcCache) writeFile(target *core.BuildTarget, file string, body []byte) bool {
	out := path.Join(target.OutDir(), file)
	if err := os.MkdirAll(path.Dir(out), core.DirPermissions); err != nil {
		log.Warning("Failed to create directory for artifacts: %s", err)
		return false
	}
	if err := core.WriteFile(bytes.NewReader(body), out, fileMode(target)); err != nil {
		log.Warning("RPC cache failed to write file %s", err)
		return false
	}
	log.Debug("Retrieved %s - %s from RPC cache", target.Label, file)
	return true
}
예제 #9
0
func buildResult(target *core.BuildTarget) []string {
	results := []string{}
	if target != nil {
		for _, out := range target.Outputs() {
			if core.StartedAtRepoRoot() {
				results = append(results, path.Join(target.OutDir(), out))
			} else {
				results = append(results, path.Join(core.RepoRoot, target.OutDir(), out))
			}
		}
	}
	return results
}
예제 #10
0
// OutputHash calculates the hash of a target's outputs.
func OutputHash(target *core.BuildTarget) ([]byte, error) {
	h := sha1.New()
	for _, output := range target.Outputs() {
		// NB. Always force a recalculation of the output hashes here. Memoisation is not
		//     useful because by definition we are rebuilding a target, and can actively hurt
		//     in cases where we compare the retrieved cache artifacts with what was there before.
		h2, err := pathHash(path.Join(target.OutDir(), output), true)
		if err != nil {
			return nil, err
		}
		h.Write(h2)
	}
	return h.Sum(nil), nil
}
예제 #11
0
func moveOutputs(state *core.BuildState, target *core.BuildTarget) ([]string, bool, error) {
	// Before we write any outputs, we must remove the old hash file to avoid it being
	// left in an inconsistent state.
	if err := os.RemoveAll(ruleHashFileName(target)); err != nil {
		return nil, true, err
	}
	changed := false
	tmpDir := target.TmpDir()
	outDir := target.OutDir()
	for _, output := range target.Outputs() {
		tmpOutput := path.Join(tmpDir, output)
		realOutput := path.Join(outDir, output)
		if !core.PathExists(tmpOutput) {
			return nil, true, fmt.Errorf("Rule %s failed to create output %s", target.Label, tmpOutput)
		}
		// If output is a symlink, dereference it. Otherwise, for efficiency,
		// we can just move it without a full copy (saves copying large .jar files etc).
		dereferencedPath, err := filepath.EvalSymlinks(tmpOutput)
		if err != nil {
			return nil, true, err
		}
		// NB. false -> not filegroup, we wouldn't be here if it was.
		outputChanged, err := moveOutput(target, dereferencedPath, realOutput, false)
		if err != nil {
			return nil, true, err
		}
		changed = changed || outputChanged
	}
	if changed {
		log.Debug("Outputs for %s have changed", target.Label)
	} else {
		log.Debug("Outputs for %s are unchanged", target.Label)
	}
	// Optional outputs get moved but don't contribute to the hash or for incrementality.
	// Glob patterns are supported on these.
	extraOuts := []string{}
	for _, output := range core.Glob(tmpDir, target.OptionalOutputs, nil, nil, true) {
		log.Debug("Discovered optional output %s", output)
		tmpOutput := path.Join(tmpDir, output)
		realOutput := path.Join(outDir, output)
		if _, err := moveOutput(target, tmpOutput, realOutput, false); err != nil {
			return nil, changed, err
		}
		extraOuts = append(extraOuts, output)
	}
	return extraOuts, changed, nil
}
예제 #12
0
// Return true if the rule needs building, false if the existing outputs are OK.
func needsBuilding(state *core.BuildState, target *core.BuildTarget, postBuild bool) bool {
	// Check the dependencies first, because they don't need any disk I/O.
	if target.NeedsTransitiveDependencies {
		if anyDependencyHasChanged(target) {
			return true // one of the transitive deps has changed, need to rebuild
		}
	} else {
		for _, dep := range target.Dependencies() {
			if dep.State() < core.Unchanged {
				log.Debug("Need to rebuild %s, %s has changed", target.Label, dep.Label)
				return true // dependency has just been rebuilt, do this too.
			}
		}
	}
	oldRuleHash, oldConfigHash, oldSourceHash := readRuleHashFile(ruleHashFileName(target), postBuild)
	if !bytes.Equal(oldConfigHash, state.Hashes.Config) {
		if len(oldConfigHash) == 0 {
			// Small nicety to make it a bit clearer what's going on.
			log.Debug("Need to build %s, outputs aren't there", target.Label)
		} else {
			log.Debug("Need to rebuild %s, config has changed (was %s, need %s)", target.Label, b64(oldConfigHash), b64(state.Hashes.Config))
		}
		return true
	}
	newRuleHash := RuleHash(target, false, postBuild)
	if !bytes.Equal(oldRuleHash, newRuleHash) {
		log.Debug("Need to rebuild %s, rule has changed (was %s, need %s)", target.Label, b64(oldRuleHash), b64(newRuleHash))
		return true
	}
	newSourceHash, err := sourceHash(state.Graph, target)
	if err != nil || !bytes.Equal(oldSourceHash, newSourceHash) {
		log.Debug("Need to rebuild %s, sources have changed (was %s, need %s)", target.Label, b64(oldSourceHash), b64(newSourceHash))
		return true
	}
	// Check the outputs of this rule exist. This would only happen if the user had
	// removed them but it's incredibly aggravating if you remove an output and the
	// rule won't rebuild itself.
	for _, output := range target.Outputs() {
		realOutput := path.Join(target.OutDir(), output)
		if !core.PathExists(realOutput) {
			log.Debug("Output %s doesn't exist for rule %s; will rebuild.", realOutput, target.Label)
			return true
		}
	}
	// Maybe we've forced a rebuild. Do this last; might be interesting to see if it needed building anyway.
	return state.ForceRebuild && (state.IsOriginalTarget(target.Label) || state.IsOriginalTarget(target.Label.Parent()))
}
예제 #13
0
func (cache *httpCache) writeFile(target *core.BuildTarget, file string, r io.Reader) bool {
	outFile := path.Join(target.OutDir(), file)
	if err := os.MkdirAll(path.Dir(outFile), core.DirPermissions); err != nil {
		log.Errorf("Failed to create directory: %s", err)
		return false
	}
	f, err := os.OpenFile(outFile, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, fileMode(target))
	if err != nil {
		log.Errorf("Failed to open file: %s", err)
		return false
	}
	defer f.Close()
	if _, err := io.Copy(f, r); err != nil {
		log.Errorf("Failed to write file: %s", err)
		return false
	}
	log.Info("Retrieved %s from http cache", target.Label)
	return true
}
예제 #14
0
// Prepares the output directories for a target
func prepareDirectories(target *core.BuildTarget) error {
	if err := prepareDirectory(target.TmpDir(), true); err != nil {
		return err
	}
	if err := prepareDirectory(target.OutDir(), false); err != nil {
		return err
	}
	// Nicety for the build rules: create any directories that it's
	// declared it'll create files in.
	for _, out := range target.Outputs() {
		if dir := path.Dir(out); dir != "." {
			outPath := path.Join(target.TmpDir(), dir)
			if !core.PathExists(outPath) {
				if err := os.MkdirAll(outPath, core.DirPermissions); err != nil {
					return err
				}
			}
		}
	}
	return nil
}
예제 #15
0
func (cache *dirCache) storeFile(target *core.BuildTarget, out, cacheDir string) {
	log.Debug("Storing %s: %s in dir cache...", target.Label, out)
	if dir := path.Dir(out); dir != "." {
		if err := os.MkdirAll(path.Join(cacheDir, dir), core.DirPermissions); err != nil {
			log.Warning("Failed to create cache directory %s: %s", path.Join(cacheDir, dir), err)
			return
		}
	}
	outFile := path.Join(core.RepoRoot, target.OutDir(), out)
	cachedFile := path.Join(cacheDir, out)
	// Remove anything existing
	if err := os.RemoveAll(cachedFile); err != nil {
		log.Warning("Failed to remove existing cached file %s: %s", cachedFile, err)
	} else if err := os.MkdirAll(cacheDir, core.DirPermissions); err != nil {
		log.Warning("Failed to create cache directory %s: %s", cacheDir, err)
		return
	} else if err := core.RecursiveCopyFile(outFile, cachedFile, fileMode(target), true, true); err != nil {
		// Cannot hardlink files into the cache, must copy them for reals.
		log.Warning("Failed to store cache file %s: %s", cachedFile, err)
	}
}
예제 #16
0
// buildFilegroup runs the manual build steps for a filegroup rule.
// We don't force this to be done in bash to avoid errors with maximum command lengths,
// and it's actually quite fiddly to get just so there.
func buildFilegroup(tid int, state *core.BuildState, target *core.BuildTarget) error {
	if err := prepareDirectory(target.OutDir(), false); err != nil {
		return err
	}
	if err := os.RemoveAll(ruleHashFileName(target)); err != nil {
		return err
	}
	changed := false
	outDir := target.OutDir()
	for _, source := range target.Sources {
		fullPaths := source.FullPaths(state.Graph)
		for i, sourcePath := range source.LocalPaths(state.Graph) {
			outPath := path.Join(outDir, sourcePath)
			c, err := moveOutput(target, fullPaths[i], outPath, true)
			if err != nil {
				return err
			}
			changed = changed || c
		}
	}
	if target.HasLabel("py") && !target.IsBinary {
		// Pre-emptively create __init__.py files so the outputs can be loaded dynamically.
		// It's a bit cheeky to do non-essential language-specific logic but this enables
		// a lot of relatively normal Python workflows.
		// Errors are deliberately ignored.
		createInitPy(outDir)
	}
	if _, err := calculateAndCheckRuleHash(state, target); err != nil {
		return err
	} else if changed {
		target.SetState(core.Built)
	} else {
		target.SetState(core.Unchanged)
	}
	state.LogBuildResult(tid, target.Label, core.TargetBuilt, "Built")
	return nil
}
예제 #17
0
// RemoveCachedTestFiles removes any cached test or coverage result files for a target.
func RemoveCachedTestFiles(target *core.BuildTarget) error {
	if err := removeAnyFilesWithPrefix(target.OutDir(), ".test_results_"+target.Label.Name); err != nil {
		return err
	}
	if err := removeAnyFilesWithPrefix(target.OutDir(), ".test_coverage_"+target.Label.Name); err != nil {
		return err
	}
	for _, output := range target.TestOutputs {
		if err := os.RemoveAll(path.Join(target.OutDir(), output)); err != nil {
			return err
		}
	}
	return nil
}
예제 #18
0
// Returns the filename we'll store the hashes for this file in.
func ruleHashFileName(target *core.BuildTarget) string {
	return path.Join(target.OutDir(), ".rule_hash_"+target.Label.Name)
}
예제 #19
0
func test(tid int, state *core.BuildState, label core.BuildLabel, target *core.BuildTarget) {
	startTime := time.Now()
	hash, err := build.RuntimeHash(state, target)
	if err != nil {
		state.LogBuildError(tid, label, core.TargetTestFailed, err, "Failed to calculate target hash")
		return
	}
	// Check the cached output files if the target wasn't rebuilt.
	hash = core.CollapseHash(hash)
	hashStr := base64.RawURLEncoding.EncodeToString(hash)
	resultsFileName := fmt.Sprintf(".test_results_%s_%s", label.Name, hashStr)
	coverageFileName := fmt.Sprintf(".test_coverage_%s_%s", label.Name, hashStr)
	outputFile := path.Join(target.TestDir(), "test.results")
	coverageFile := path.Join(target.TestDir(), "test.coverage")
	cachedOutputFile := path.Join(target.OutDir(), resultsFileName)
	cachedCoverageFile := path.Join(target.OutDir(), coverageFileName)
	needCoverage := state.NeedCoverage && !target.NoTestOutput

	cachedTest := func() {
		log.Debug("Not re-running test %s; got cached results.", label)
		coverage := parseCoverageFile(target, cachedCoverageFile)
		results, err := parseTestResults(target, cachedOutputFile, true)
		target.Results.Duration = time.Since(startTime).Seconds()
		target.Results.Cached = true
		if err != nil {
			state.LogBuildError(tid, label, core.TargetTestFailed, err, "Failed to parse cached test file %s", cachedOutputFile)
		} else if results.Failed > 0 {
			panic("Test results with failures shouldn't be cached.")
		} else {
			logTestSuccess(state, tid, label, results, coverage)
		}
	}

	moveAndCacheOutputFiles := func(results core.TestResults, coverage core.TestCoverage) bool {
		// Never cache test results when given arguments; the results may be incomplete.
		if len(state.TestArgs) > 0 {
			log.Debug("Not caching results for %s, we passed it arguments", label)
			return true
		}
		if err := moveAndCacheOutputFile(state, target, hash, outputFile, cachedOutputFile, resultsFileName, dummyOutput); err != nil {
			state.LogTestResult(tid, label, core.TargetTestFailed, results, coverage, err, "Failed to move test output file")
			return false
		}
		if needCoverage || core.PathExists(coverageFile) {
			if err := moveAndCacheOutputFile(state, target, hash, coverageFile, cachedCoverageFile, coverageFileName, dummyCoverage); err != nil {
				state.LogTestResult(tid, label, core.TargetTestFailed, results, coverage, err, "Failed to move test coverage file")
				return false
			}
		}
		for _, output := range target.TestOutputs {
			tmpFile := path.Join(target.TestDir(), output)
			outFile := path.Join(target.OutDir(), output)
			if err := moveAndCacheOutputFile(state, target, hash, tmpFile, outFile, output, ""); err != nil {
				state.LogTestResult(tid, label, core.TargetTestFailed, results, coverage, err, "Failed to move test output file")
				return false
			}
		}
		return true
	}

	needToRun := func() bool {
		if target.State() == core.Unchanged && core.PathExists(cachedOutputFile) {
			// Output file exists already and appears to be valid. We might still need to rerun though
			// if the coverage files aren't available.
			if needCoverage && !core.PathExists(cachedCoverageFile) {
				return true
			}
			return false
		}
		// Check the cache for these artifacts.
		if state.Cache == nil {
			return true
		}
		cache := *state.Cache
		if !cache.RetrieveExtra(target, hash, resultsFileName) {
			return true
		}
		if needCoverage && !cache.RetrieveExtra(target, hash, coverageFileName) {
			return true
		}
		for _, output := range target.TestOutputs {
			if !cache.RetrieveExtra(target, hash, output) {
				return true
			}
		}
		return false
	}

	// Don't cache when doing multiple runs, presumably the user explicitly wants to check it.
	if state.NumTestRuns <= 1 && !needToRun() {
		cachedTest()
		return
	}
	// Remove any cached test result file.
	if err := RemoveCachedTestFiles(target); err != nil {
		state.LogBuildError(tid, label, core.TargetTestFailed, err, "Failed to remove cached test files")
		return
	}
	numSucceeded := 0
	numFlakes := 0
	numRuns, successesRequired := calcNumRuns(state.NumTestRuns, target.Flakiness)
	var resultErr error
	resultMsg := ""
	var coverage core.TestCoverage
	for i := 0; i < numRuns && numSucceeded < successesRequired; i++ {
		if numRuns > 1 {
			state.LogBuildResult(tid, label, core.TargetTesting, fmt.Sprintf("Testing (%d of %d)...", i+1, numRuns))
		}
		out, err := prepareAndRunTest(tid, state, target)
		duration := time.Since(startTime).Seconds()
		startTime = time.Now() // reset this for next time

		// This is all pretty involved; there are lots of different possibilities of what could happen.
		// The contract is that the test must return zero on success or non-zero on failure (Unix FTW).
		// If it's successful, it must produce a parseable file named "test.results" in its temp folder.
		// (alternatively, this can be a directory containing parseable files).
		// Tests can opt out of the file requirement individually, in which case they're judged only
		// by their return value.
		// But of course, we still have to consider all the alternatives here and handle them nicely.
		target.Results.Output = string(out)
		if err != nil && target.Results.Output == "" {
			target.Results.Output = err.Error()
		}
		target.Results.TimedOut = err == context.DeadlineExceeded
		coverage = parseCoverageFile(target, coverageFile)
		target.Results.Duration += duration
		if !core.PathExists(outputFile) {
			if err == nil && target.NoTestOutput {
				target.Results.NumTests += 1
				target.Results.Passed += 1
				numSucceeded++
			} else if err == nil {
				target.Results.NumTests++
				target.Results.Failed++
				target.Results.Failures = append(target.Results.Failures, core.TestFailure{
					Name:   "Missing results",
					Stdout: string(out),
				})
				resultErr = fmt.Errorf("Test failed to produce output results file")
				resultMsg = fmt.Sprintf("Test apparently succeeded but failed to produce %s. Output: %s", outputFile, string(out))
				numFlakes++
			} else {
				target.Results.NumTests++
				target.Results.Failed++
				target.Results.Failures = append(target.Results.Failures, core.TestFailure{
					Name:   "Test failed with no results",
					Stdout: string(out),
				})
				numFlakes++
				resultErr = err
				resultMsg = fmt.Sprintf("Test failed with no results. Output: %s", string(out))
			}
		} else {
			results, err2 := parseTestResults(target, outputFile, false)
			if err2 != nil {
				resultErr = err2
				resultMsg = fmt.Sprintf("Couldn't parse test output file: %s. Stdout: %s", err2, string(out))
				numFlakes++
			} else if err != nil && results.Failed == 0 {
				// Add a failure result to the test so it shows up in the final aggregation.
				target.Results.Failed = 1
				target.Results.Failures = append(results.Failures, core.TestFailure{
					Name:   "Return value",
					Type:   fmt.Sprintf("%s", err),
					Stdout: string(out),
				})
				numFlakes++
				resultErr = err
				resultMsg = fmt.Sprintf("Test returned nonzero but reported no errors: %s. Output: %s", err, string(out))
			} else if err == nil && results.Failed != 0 {
				resultErr = fmt.Errorf("Test returned 0 but still reported failures")
				resultMsg = fmt.Sprintf("Test returned 0 but still reported failures. Stdout: %s", string(out))
				numFlakes++
			} else if results.Failed != 0 {
				resultErr = fmt.Errorf("Tests failed")
				resultMsg = fmt.Sprintf("Tests failed. Stdout: %s", string(out))
				numFlakes++
			} else {
				numSucceeded++
				if !state.ShowTestOutput {
					// Save a bit of memory, if we're not printing results on success we will never use them again.
					target.Results.Output = ""
				}
			}
		}
	}
	if numSucceeded >= successesRequired {
		target.Results.Failures = nil // Remove any failures, they don't count
		target.Results.Failed = 0     // (they'll be picked up as flakes below)
		if numSucceeded > 0 && numFlakes > 0 {
			target.Results.Flakes = numFlakes
		}
		// Success, clean things up
		if moveAndCacheOutputFiles(target.Results, coverage) {
			logTestSuccess(state, tid, label, target.Results, coverage)
		}
		// Clean up the test directory.
		if state.CleanWorkdirs {
			if err := os.RemoveAll(target.TestDir()); err != nil {
				log.Warning("Failed to remove test directory for %s: %s", target.Label, err)
			}
		}
	} else {
		state.LogTestResult(tid, label, core.TargetTestFailed, target.Results, coverage, resultErr, resultMsg)
	}
}
예제 #20
0
func postBuildOutputFileName(target *core.BuildTarget) string {
	return path.Join(target.OutDir(), core.PostBuildOutputFileName(target))
}