func moveOutput(target *core.BuildTarget, tmpOutput, realOutput string, filegroup bool) (bool, error) { // hash the file newHash, err := pathHash(tmpOutput, false) if err != nil { return true, err } realOutputExists := core.PathExists(realOutput) // If this is a filegroup we hardlink the outputs over and so the two files may actually be // the same file. If so don't do anything else and especially don't delete & recreate the // file because other things might be using it already (because more than one filegroup can // own the same file). if filegroup && realOutputExists && core.IsSameFile(tmpOutput, realOutput) { movePathHash(tmpOutput, realOutput, filegroup) // make sure this is updated regardless return false, nil } if realOutputExists { if oldHash, err := pathHash(realOutput, false); err != nil { return true, err } else if bytes.Equal(oldHash, newHash) { // We already have the same file in the current location. Don't bother moving it. log.Debug("Checking %s vs. %s, hashes match", tmpOutput, realOutput) return false, nil } if err := os.RemoveAll(realOutput); err != nil { return true, err } } movePathHash(tmpOutput, realOutput, filegroup) // Check if we need a directory for this output. dir := path.Dir(realOutput) if !core.PathExists(dir) { if err := os.MkdirAll(dir, core.DirPermissions); err != nil { return true, err } } // If the output file is in plz-out/tmp we can just move it to save time, otherwise we need // to copy so we don't move files from other directories. if strings.HasPrefix(tmpOutput, target.TmpDir()) { if err := os.Rename(tmpOutput, realOutput); err != nil { return true, err } } else { if err := core.RecursiveCopyFile(tmpOutput, realOutput, target.OutMode(), filegroup, false); err != nil { if filegroup && os.IsExist(err) && core.IsSameFile(tmpOutput, realOutput) { // It's possible for two filegroups to race building simultaneously. In that // case one will fail with an ErrExist, which is OK as far as we're concerned // here as long as the file we tried to write really is the same as the input. return true, nil } return true, err } } if target.IsBinary { if err := os.Chmod(realOutput, target.OutMode()); err != nil { return true, err } } return true, nil }
// maybeFork will fork & detach if background is true. First it will rename the out and // cache dirs so it's safe to run another plz in this repo, then fork & detach child // processes to do the actual cleaning. // The parent will then die quietly and the children will continue to actually remove the // directories. func maybeFork(outDir, cacheDir string, cleanCache bool) error { rm, err := exec.LookPath("rm") if err != nil { return err } if !core.PathExists(outDir) || !core.PathExists(cacheDir) { return nil } newOutDir, err := moveDir(outDir) if err != nil { return err } args := []string{rm, "-rf", newOutDir} if cleanCache { newCacheDir, err := moveDir(cacheDir) if err != nil { return err } args = append(args, newCacheDir) } // Note that we can't fork() directly and continue running Go code, but ForkExec() works okay. _, err = syscall.ForkExec(rm, args, nil) if err == nil { // Success if we get here. fmt.Println("Cleaning in background; you may continue to do pleasing things in this repo in the meantime.") os.Exit(0) } return err }
func createInitPy(dir string) { if core.PathExists(path.Join(dir, "__init__.py")) { return } if f, err := os.OpenFile(path.Join(dir, "__init__.py"), os.O_RDONLY|os.O_CREATE, 0444); err == nil { f.Close() } dir = path.Dir(dir) if dir != core.GenDir && dir != "." && !core.PathExists(path.Join(dir, "__init__.py")) { createInitPy(dir) } }
func TestClean(t *testing.T) { httpcache.Clean(target) filename := path.Join("src/cache/test_data", osName, "pkg/name/label_name") if core.PathExists(filename) { t.Errorf("File %s was not removed from cache.", filename) } }
// scan scans the directory tree for files. func (cache *Cache) scan() { cache.cachedFiles = cmap.New() cache.totalSize = 0 if !core.PathExists(cache.rootPath) { if err := os.MkdirAll(cache.rootPath, core.DirPermissions); err != nil { log.Fatalf("Failed to create cache directory %s: %s", cache.rootPath, err) } return } log.Info("Scanning cache directory %s...", cache.rootPath) filepath.Walk(cache.rootPath, func(name string, info os.FileInfo, err error) error { if err != nil { log.Fatalf("%s", err) } else if !info.IsDir() { // We don't have directory entries. name = name[len(cache.rootPath)+1:] log.Debug("Found file %s", name) size := info.Size() cache.cachedFiles.Set(name, &cachedFile{ lastReadTime: time.Unix(tools.AccessTime(info), 0), readCount: 0, size: size, }) cache.totalSize += size } return nil }) log.Info("Scan complete, found %d entries", cache.cachedFiles.Count()) }
// CheckAndUpdate checks whether we should update Please and does so if needed. // If it requires an update it will never return, it will either die on failure or on success will exec the new Please. // Conversely, if an update isn't required it will return. It may adjust the version in the configuration. // updatesEnabled indicates whether updates are enabled (i.e. not run with --noupdate) // updateCommand indicates whether an update is specifically requested (due to e.g. `plz update`) // forceUpdate indicates whether the user passed --force on the command line, in which case we // will always update even if the version exists. func CheckAndUpdate(config *core.Configuration, updatesEnabled, updateCommand, forceUpdate bool) { if !forceUpdate && !shouldUpdate(config, updatesEnabled, updateCommand) { return } word := describe(config.Please.Version, core.PleaseVersion, true) log.Warning("%s to Please version %s (currently %s)", word, config.Please.Version, core.PleaseVersion) // Must lock here so that the update process doesn't race when running two instances // simultaneously. core.AcquireRepoLock() defer core.ReleaseRepoLock() // If the destination exists and the user passed --force, remove it to force a redownload. newDir := core.ExpandHomePath(path.Join(config.Please.Location, config.Please.Version.String())) log.Notice("%s", newDir) if forceUpdate && core.PathExists(newDir) { if err := os.RemoveAll(newDir); err != nil { log.Fatalf("Failed to remove existing directory: %s", err) } } // Download it. newPlease := downloadAndLinkPlease(config) // Now run the new one. args := append([]string{newPlease}, os.Args[1:]...) log.Info("Executing %s", strings.Join(args, " ")) if err := syscall.Exec(newPlease, args, os.Environ()); err != nil { log.Fatalf("Failed to exec new Please version %s: %s", newPlease, err) } // Shouldn't ever get here. We should have either exec'd or died above. panic("please update failed in an an unexpected and exciting way") }
func (cache *dirCache) RetrieveExtra(target *core.BuildTarget, key []byte, out string) bool { outDir := path.Join(core.RepoRoot, target.OutDir()) cacheDir := cache.getPath(target, key) cachedOut := path.Join(cacheDir, out) realOut := path.Join(outDir, out) if !core.PathExists(cachedOut) { log.Debug("%s: %s doesn't exist in dir cache", target.Label, cachedOut) return false } log.Debug("Retrieving %s: %s from dir cache...", target.Label, cachedOut) if dir := path.Dir(realOut); dir != "." { if err := os.MkdirAll(dir, core.DirPermissions); err != nil { log.Warning("Failed to create output directory %s: %s", dir, err) return false } } // It seems to be quite important that we unlink the existing file first to avoid ETXTBSY errors // in cases where we're running an existing binary (as Please does during bootstrap, for example). if err := os.RemoveAll(realOut); err != nil { log.Warning("Failed to unlink existing output %s: %s", realOut, err) return false } // Recursively hardlink files back out of the cache if err := core.RecursiveCopyFile(cachedOut, realOut, fileMode(target), true, true); err != nil { log.Warning("Failed to move cached file to output: %s -> %s: %s", cachedOut, realOut, err) return false } log.Debug("Retrieved %s: %s from dir cache", target.Label, cachedOut) return true }
func TestStore(t *testing.T) { target.AddOutput("testfile") httpcache.Store(target, []byte("test_key")) abs, _ := filepath.Abs(path.Join("src/cache/test_data", osName, "pkg/name", "label_name")) if !core.PathExists(abs) { t.Errorf("Test file %s was not stored in cache.", abs) } }
func TestDownloadNewPlease(t *testing.T) { c := makeConfig("downloadnewplease") downloadPlease(c) // Should have written new file assert.True(t, core.PathExists(path.Join(c.Please.Location, c.Please.Version.String(), "please"))) // Should not have written this yet though assert.False(t, core.PathExists(path.Join(c.Please.Location, "please"))) // Panics because it's not a valid .tar.gz c.Please.Version = *semver.New("1.0.0") assert.Panics(t, func() { downloadPlease(c) }) // Panics because it doesn't exist c.Please.Version = *semver.New("2.0.0") assert.Panics(t, func() { downloadPlease(c) }) // Panics because invalid URL c.Please.DownloadLocation = "notaurl" assert.Panics(t, func() { downloadPlease(c) }) }
func clean(path string) { if core.PathExists(path) { log.Info("Cleaning path %s", path) if err := os.RemoveAll(path); err != nil { log.Fatalf("Failed to clean path %s: %s", path, err) } } }
func prepareDirectory(directory string, remove bool) error { if remove && core.PathExists(directory) { if err := os.RemoveAll(directory); err != nil { return err } } return os.MkdirAll(directory, core.DirPermissions) // drwxrwxr-x }
func TestStore(t *testing.T) { target := core.NewBuildTarget(label) target.AddOutput("testfile2") rpccache.Store(target, []byte("test_key")) expectedPath := path.Join("src/cache/test_data", osName, "pkg/name", "label_name", "dGVzdF9rZXk", target.Outputs()[0]) if !core.PathExists(expectedPath) { t.Errorf("Test file %s was not stored in cache.", expectedPath) } }
func TestLinkNewFile(t *testing.T) { c := makeConfig("linknewfile") dir := path.Join(c.Please.Location, c.Please.Version.String()) assert.NoError(t, os.MkdirAll(dir, core.DirPermissions)) assert.NoError(t, ioutil.WriteFile(path.Join(dir, "please"), []byte("test"), 0775)) linkNewFile(c, "please") assert.True(t, core.PathExists(path.Join(c.Please.Location, "please"))) assert.NoError(t, ioutil.WriteFile(path.Join(c.Please.Location, "exists"), []byte("test"), 0775)) }
func (cache *dirCache) Retrieve(target *core.BuildTarget, key []byte) bool { cacheDir := cache.getPath(target, key) if !core.PathExists(cacheDir) { log.Debug("%s: %s doesn't exist in dir cache", target.Label, cacheDir) return false } for out := range cacheArtifacts(target) { if !cache.RetrieveExtra(target, key, out) { return false } } return true }
// downloadAndLinkPlease downloads a new Please version and links it into place, if needed. // It returns the new location and dies on failure. func downloadAndLinkPlease(config *core.Configuration) string { config.Please.Location = core.ExpandHomePath(config.Please.Location) newPlease := path.Join(config.Please.Location, config.Please.Version.String(), "please") if !core.PathExists(newPlease) { downloadPlease(config) } if !verifyNewPlease(newPlease, config.Please.Version.String()) { cleanDir(path.Join(config.Please.Location, config.Please.Version.String())) log.Fatalf("Not continuing.") } linkNewPlease(config) return newPlease }
func TestStoreAndRetrieve(t *testing.T) { target := core.NewBuildTarget(label) target.AddOutput("testfile3") rpccache.Store(target, []byte("test_key")) // Remove the file so we can test retrieval correctly outPath := path.Join(target.OutDir(), target.Outputs()[0]) if err := os.Remove(outPath); err != nil { t.Errorf("Failed to remove artifact: %s", err) } if !rpccache.Retrieve(target, []byte("test_key")) { t.Error("Artifact expected and not found.") } else if !core.PathExists(outPath) { t.Errorf("Artifact %s doesn't exist after alleged cache retrieval", outPath) } }
// Attempt to write a dummy coverage file to record that it's been done for a test. func moveAndCacheOutputFile(state *core.BuildState, target *core.BuildTarget, hash []byte, from, to, filename, dummy string) error { if !core.PathExists(from) { if dummy == "" { return nil } if err := ioutil.WriteFile(to, []byte(dummy), 0644); err != nil { return err } } else if err := os.Rename(from, to); err != nil { return err } if state.Cache != nil { (*state.Cache).StoreExtra(target, hash, filename) } return nil }
func initialiseInterpreterFrom(enginePath string) bool { if !core.PathExists(enginePath) { return false } log.Debug("Attempting to load engine from %s", enginePath) cEnginePath := C.CString(enginePath) defer C.free(unsafe.Pointer(cEnginePath)) result := C.InitialiseInterpreter(cEnginePath) if result != 0 { // Low level of logging because it's allowable to fail on libplease_parser_pypy, which we try first. log.Notice("Failed to initialise interpreter from %s: %s", enginePath, C.GoString(C.dlerror())) return false } log.Info("Using parser engine from %s", enginePath) return true }
func moveOutputs(state *core.BuildState, target *core.BuildTarget) ([]string, bool, error) { // Before we write any outputs, we must remove the old hash file to avoid it being // left in an inconsistent state. if err := os.RemoveAll(ruleHashFileName(target)); err != nil { return nil, true, err } changed := false tmpDir := target.TmpDir() outDir := target.OutDir() for _, output := range target.Outputs() { tmpOutput := path.Join(tmpDir, output) realOutput := path.Join(outDir, output) if !core.PathExists(tmpOutput) { return nil, true, fmt.Errorf("Rule %s failed to create output %s", target.Label, tmpOutput) } // If output is a symlink, dereference it. Otherwise, for efficiency, // we can just move it without a full copy (saves copying large .jar files etc). dereferencedPath, err := filepath.EvalSymlinks(tmpOutput) if err != nil { return nil, true, err } // NB. false -> not filegroup, we wouldn't be here if it was. outputChanged, err := moveOutput(target, dereferencedPath, realOutput, false) if err != nil { return nil, true, err } changed = changed || outputChanged } if changed { log.Debug("Outputs for %s have changed", target.Label) } else { log.Debug("Outputs for %s are unchanged", target.Label) } // Optional outputs get moved but don't contribute to the hash or for incrementality. // Glob patterns are supported on these. extraOuts := []string{} for _, output := range core.Glob(tmpDir, target.OptionalOutputs, nil, nil, true) { log.Debug("Discovered optional output %s", output) tmpOutput := path.Join(tmpDir, output) realOutput := path.Join(outDir, output) if _, err := moveOutput(target, tmpOutput, realOutput, false); err != nil { return nil, changed, err } extraOuts = append(extraOuts, output) } return extraOuts, changed, nil }
// Return true if the rule needs building, false if the existing outputs are OK. func needsBuilding(state *core.BuildState, target *core.BuildTarget, postBuild bool) bool { // Check the dependencies first, because they don't need any disk I/O. if target.NeedsTransitiveDependencies { if anyDependencyHasChanged(target) { return true // one of the transitive deps has changed, need to rebuild } } else { for _, dep := range target.Dependencies() { if dep.State() < core.Unchanged { log.Debug("Need to rebuild %s, %s has changed", target.Label, dep.Label) return true // dependency has just been rebuilt, do this too. } } } oldRuleHash, oldConfigHash, oldSourceHash := readRuleHashFile(ruleHashFileName(target), postBuild) if !bytes.Equal(oldConfigHash, state.Hashes.Config) { if len(oldConfigHash) == 0 { // Small nicety to make it a bit clearer what's going on. log.Debug("Need to build %s, outputs aren't there", target.Label) } else { log.Debug("Need to rebuild %s, config has changed (was %s, need %s)", target.Label, b64(oldConfigHash), b64(state.Hashes.Config)) } return true } newRuleHash := RuleHash(target, false, postBuild) if !bytes.Equal(oldRuleHash, newRuleHash) { log.Debug("Need to rebuild %s, rule has changed (was %s, need %s)", target.Label, b64(oldRuleHash), b64(newRuleHash)) return true } newSourceHash, err := sourceHash(state.Graph, target) if err != nil || !bytes.Equal(oldSourceHash, newSourceHash) { log.Debug("Need to rebuild %s, sources have changed (was %s, need %s)", target.Label, b64(oldSourceHash), b64(newSourceHash)) return true } // Check the outputs of this rule exist. This would only happen if the user had // removed them but it's incredibly aggravating if you remove an output and the // rule won't rebuild itself. for _, output := range target.Outputs() { realOutput := path.Join(target.OutDir(), output) if !core.PathExists(realOutput) { log.Debug("Output %s doesn't exist for rule %s; will rebuild.", realOutput, target.Label) return true } } // Maybe we've forced a rebuild. Do this last; might be interesting to see if it needed building anyway. return state.ForceRebuild && (state.IsOriginalTarget(target.Label) || state.IsOriginalTarget(target.Label.Parent())) }
func parseTestResultsDir(target *core.BuildTarget, outputDir string) (core.TestResults, error) { results := core.TestResults{} if !core.PathExists(outputDir) { return results, fmt.Errorf("Didn't find any test results in %s", outputDir) } err := filepath.Walk(outputDir, func(path string, info os.FileInfo, err error) error { if err != nil { return err } else if !info.IsDir() { fileResults, err := parseTestResultsImpl(target, path) if err != nil { return fmt.Errorf("Error parsing %s: %s", path, err) } results.Aggregate(fileResults) } return nil }) return results, err }
// Prepares the output directories for a target func prepareDirectories(target *core.BuildTarget) error { if err := prepareDirectory(target.TmpDir(), true); err != nil { return err } if err := prepareDirectory(target.OutDir(), false); err != nil { return err } // Nicety for the build rules: create any directories that it's // declared it'll create files in. for _, out := range target.Outputs() { if dir := path.Dir(out); dir != "." { outPath := path.Join(target.TmpDir(), dir) if !core.PathExists(outPath) { if err := os.MkdirAll(outPath, core.DirPermissions); err != nil { return err } } } } return nil }
// parsePackage performs the initial parse of a package. // It's assumed that the caller used firstToParse to ascertain that they only call this once per package. func parsePackage(state *core.BuildState, label, dependor core.BuildLabel) *core.Package { packageName := label.PackageName pkg := core.NewPackage(packageName) if pkg.Filename = buildFileName(state, packageName); pkg.Filename == "" { exists := core.PathExists(packageName) // Handle quite a few cases to provide more obvious error messages. if dependor != core.OriginalTarget && exists { panic(fmt.Sprintf("%s depends on %s, but there's no BUILD file in %s/", dependor, label, packageName)) } else if dependor != core.OriginalTarget { panic(fmt.Sprintf("%s depends on %s, but the directory %s doesn't exist", dependor, label, packageName)) } else if exists { panic(fmt.Sprintf("Can't build %s; there's no BUILD file in %s/", label, packageName)) } panic(fmt.Sprintf("Can't build %s; the directory %s doesn't exist", label, packageName)) } if parsePackageFile(state, pkg.Filename, pkg) { return nil // Indicates deferral } for _, target := range pkg.Targets { state.Graph.AddTarget(target) for _, out := range target.DeclaredOutputs() { pkg.MustRegisterOutput(out, target) } for _, out := range target.TestOutputs { if !core.IsGlob(out) { pkg.MustRegisterOutput(out, target) } } } // Do this in a separate loop so we get intra-package dependencies right now. for _, target := range pkg.Targets { for _, dep := range target.DeclaredDependencies() { state.Graph.AddDependency(target.Label, dep) } } state.Graph.AddPackage(pkg) // Calling this means nobody else will add entries to pendingTargets for this package. return pkg }
func queryCompletionPackages(config *core.Configuration, query, repoRoot string) { root := path.Join(repoRoot, query) origRoot := root if !core.PathExists(root) { root = path.Dir(root) } packages := []string{} for pkg := range utils.FindAllSubpackages(config, root, origRoot) { if strings.HasPrefix(pkg, origRoot) { packages = append(packages, pkg[len(repoRoot):]) } } // If there's only one package, we know it has to be that, but we don't present // only one option otherwise bash completion will assume it's that. if len(packages) == 1 { fmt.Printf("/%s:\n", packages[0]) fmt.Printf("/%s:all\n", packages[0]) } else { for _, pkg := range packages { fmt.Printf("/%s\n", pkg) } } os.Exit(0) // Don't need to run a full-blown parse, get out now. }
func TestOutputFolderExists(t *testing.T) { if !core.PathExists(cachePath) { t.Errorf("%s does not exist.", cachePath) } }
func TestDownloadAndLinkPleaseBadVersion(t *testing.T) { c := makeConfig("downloadandlink") assert.Panics(t, func() { downloadAndLinkPlease(c) }) // Should have deleted the thing it downloaded. assert.False(t, core.PathExists(path.Join(c.Please.Location, c.Please.Version.String()))) }
func TestDownloadAndLinkPlease(t *testing.T) { c := makeConfig("downloadandlink") c.Please.Version = core.PleaseVersion newPlease := downloadAndLinkPlease(c) assert.True(t, core.PathExists(newPlease)) }
func test(tid int, state *core.BuildState, label core.BuildLabel, target *core.BuildTarget) { startTime := time.Now() hash, err := build.RuntimeHash(state, target) if err != nil { state.LogBuildError(tid, label, core.TargetTestFailed, err, "Failed to calculate target hash") return } // Check the cached output files if the target wasn't rebuilt. hash = core.CollapseHash(hash) hashStr := base64.RawURLEncoding.EncodeToString(hash) resultsFileName := fmt.Sprintf(".test_results_%s_%s", label.Name, hashStr) coverageFileName := fmt.Sprintf(".test_coverage_%s_%s", label.Name, hashStr) outputFile := path.Join(target.TestDir(), "test.results") coverageFile := path.Join(target.TestDir(), "test.coverage") cachedOutputFile := path.Join(target.OutDir(), resultsFileName) cachedCoverageFile := path.Join(target.OutDir(), coverageFileName) needCoverage := state.NeedCoverage && !target.NoTestOutput cachedTest := func() { log.Debug("Not re-running test %s; got cached results.", label) coverage := parseCoverageFile(target, cachedCoverageFile) results, err := parseTestResults(target, cachedOutputFile, true) target.Results.Duration = time.Since(startTime).Seconds() target.Results.Cached = true if err != nil { state.LogBuildError(tid, label, core.TargetTestFailed, err, "Failed to parse cached test file %s", cachedOutputFile) } else if results.Failed > 0 { panic("Test results with failures shouldn't be cached.") } else { logTestSuccess(state, tid, label, results, coverage) } } moveAndCacheOutputFiles := func(results core.TestResults, coverage core.TestCoverage) bool { // Never cache test results when given arguments; the results may be incomplete. if len(state.TestArgs) > 0 { log.Debug("Not caching results for %s, we passed it arguments", label) return true } if err := moveAndCacheOutputFile(state, target, hash, outputFile, cachedOutputFile, resultsFileName, dummyOutput); err != nil { state.LogTestResult(tid, label, core.TargetTestFailed, results, coverage, err, "Failed to move test output file") return false } if needCoverage || core.PathExists(coverageFile) { if err := moveAndCacheOutputFile(state, target, hash, coverageFile, cachedCoverageFile, coverageFileName, dummyCoverage); err != nil { state.LogTestResult(tid, label, core.TargetTestFailed, results, coverage, err, "Failed to move test coverage file") return false } } for _, output := range target.TestOutputs { tmpFile := path.Join(target.TestDir(), output) outFile := path.Join(target.OutDir(), output) if err := moveAndCacheOutputFile(state, target, hash, tmpFile, outFile, output, ""); err != nil { state.LogTestResult(tid, label, core.TargetTestFailed, results, coverage, err, "Failed to move test output file") return false } } return true } needToRun := func() bool { if target.State() == core.Unchanged && core.PathExists(cachedOutputFile) { // Output file exists already and appears to be valid. We might still need to rerun though // if the coverage files aren't available. if needCoverage && !core.PathExists(cachedCoverageFile) { return true } return false } // Check the cache for these artifacts. if state.Cache == nil { return true } cache := *state.Cache if !cache.RetrieveExtra(target, hash, resultsFileName) { return true } if needCoverage && !cache.RetrieveExtra(target, hash, coverageFileName) { return true } for _, output := range target.TestOutputs { if !cache.RetrieveExtra(target, hash, output) { return true } } return false } // Don't cache when doing multiple runs, presumably the user explicitly wants to check it. if state.NumTestRuns <= 1 && !needToRun() { cachedTest() return } // Remove any cached test result file. if err := RemoveCachedTestFiles(target); err != nil { state.LogBuildError(tid, label, core.TargetTestFailed, err, "Failed to remove cached test files") return } numSucceeded := 0 numFlakes := 0 numRuns, successesRequired := calcNumRuns(state.NumTestRuns, target.Flakiness) var resultErr error resultMsg := "" var coverage core.TestCoverage for i := 0; i < numRuns && numSucceeded < successesRequired; i++ { if numRuns > 1 { state.LogBuildResult(tid, label, core.TargetTesting, fmt.Sprintf("Testing (%d of %d)...", i+1, numRuns)) } out, err := prepareAndRunTest(tid, state, target) duration := time.Since(startTime).Seconds() startTime = time.Now() // reset this for next time // This is all pretty involved; there are lots of different possibilities of what could happen. // The contract is that the test must return zero on success or non-zero on failure (Unix FTW). // If it's successful, it must produce a parseable file named "test.results" in its temp folder. // (alternatively, this can be a directory containing parseable files). // Tests can opt out of the file requirement individually, in which case they're judged only // by their return value. // But of course, we still have to consider all the alternatives here and handle them nicely. target.Results.Output = string(out) if err != nil && target.Results.Output == "" { target.Results.Output = err.Error() } target.Results.TimedOut = err == context.DeadlineExceeded coverage = parseCoverageFile(target, coverageFile) target.Results.Duration += duration if !core.PathExists(outputFile) { if err == nil && target.NoTestOutput { target.Results.NumTests += 1 target.Results.Passed += 1 numSucceeded++ } else if err == nil { target.Results.NumTests++ target.Results.Failed++ target.Results.Failures = append(target.Results.Failures, core.TestFailure{ Name: "Missing results", Stdout: string(out), }) resultErr = fmt.Errorf("Test failed to produce output results file") resultMsg = fmt.Sprintf("Test apparently succeeded but failed to produce %s. Output: %s", outputFile, string(out)) numFlakes++ } else { target.Results.NumTests++ target.Results.Failed++ target.Results.Failures = append(target.Results.Failures, core.TestFailure{ Name: "Test failed with no results", Stdout: string(out), }) numFlakes++ resultErr = err resultMsg = fmt.Sprintf("Test failed with no results. Output: %s", string(out)) } } else { results, err2 := parseTestResults(target, outputFile, false) if err2 != nil { resultErr = err2 resultMsg = fmt.Sprintf("Couldn't parse test output file: %s. Stdout: %s", err2, string(out)) numFlakes++ } else if err != nil && results.Failed == 0 { // Add a failure result to the test so it shows up in the final aggregation. target.Results.Failed = 1 target.Results.Failures = append(results.Failures, core.TestFailure{ Name: "Return value", Type: fmt.Sprintf("%s", err), Stdout: string(out), }) numFlakes++ resultErr = err resultMsg = fmt.Sprintf("Test returned nonzero but reported no errors: %s. Output: %s", err, string(out)) } else if err == nil && results.Failed != 0 { resultErr = fmt.Errorf("Test returned 0 but still reported failures") resultMsg = fmt.Sprintf("Test returned 0 but still reported failures. Stdout: %s", string(out)) numFlakes++ } else if results.Failed != 0 { resultErr = fmt.Errorf("Tests failed") resultMsg = fmt.Sprintf("Tests failed. Stdout: %s", string(out)) numFlakes++ } else { numSucceeded++ if !state.ShowTestOutput { // Save a bit of memory, if we're not printing results on success we will never use them again. target.Results.Output = "" } } } } if numSucceeded >= successesRequired { target.Results.Failures = nil // Remove any failures, they don't count target.Results.Failed = 0 // (they'll be picked up as flakes below) if numSucceeded > 0 && numFlakes > 0 { target.Results.Flakes = numFlakes } // Success, clean things up if moveAndCacheOutputFiles(target.Results, coverage) { logTestSuccess(state, tid, label, target.Results, coverage) } // Clean up the test directory. if state.CleanWorkdirs { if err := os.RemoveAll(target.TestDir()); err != nil { log.Warning("Failed to remove test directory for %s: %s", target.Label, err) } } } else { state.LogTestResult(tid, label, core.TargetTestFailed, target.Results, coverage, resultErr, resultMsg) } }