コード例 #1
0
ファイル: tester.go プロジェクト: mcellteam/nutmeg
// checkFilesEmpty tests that all simulation output files listed were
// created by the run and are either empty or non-empty depending on the
// provided switch
func checkFilesEmpty(test *TestData, c *tomlParser.TestCase,
	empty bool) error {

	var fileList []string
	for _, fileName := range c.FileNames {
		files, err := misc.GenerateFileList(fileName, c.IDRange)
		if err != nil {
			return err
		}
		fileList = append(fileList, files...)
	}

	if len(fileList) == 0 {
		return fmt.Errorf("no files to test specified")
	}

	var sizeCheck func(int64) bool
	var message string
	if empty {
		sizeCheck = func(s int64) bool { return s == 0 }
		message = "non-empty"
	} else {
		sizeCheck = func(s int64) bool {
			if c.FileSize == 0 {
				return s != 0
			}
			return s == c.FileSize
		}
		message = "empty"
	}

	var badFileList []string
	for _, fileName := range fileList {
		filePaths, err := file.GetDataPaths(test.Path, fileName, test.Run.Seed, 1)
		if err != nil {
			return fmt.Errorf("failed to construct data path for file %s:\n%s",
				fileName, err)
		}

		for _, filePath := range filePaths {
			fi, err := os.Stat(filePath)
			if err != nil || !sizeCheck(fi.Size()) {
				badFileList = append(badFileList, filePath)
			}
		}
	}

	if len(badFileList) != 0 {
		badFiles := strings.Join(badFileList, "\n\t\t")
		return fmt.Errorf("the following files were either missing, %s, or had "+
			"the wrong size:\n\n\t\t%s", message, badFiles)
	}
	return nil
}
コード例 #2
0
ファイル: tester.go プロジェクト: mcellteam/nutmeg
// Run analyses the TestDescriptions coming from an MCell run on a
// test and analyses them as requested per the TestDescription.
func Run(test *TestData, result chan *TestResult) {

	// tests which don't require loading of reaction data output
	nonDataParseTests := []string{"DIFF_FILE_CONTENT", "FILE_MATCH_PATTERN",
		"CHECK_TRIGGERS", "CHECK_EXPRESSIONS", "CHECK_LEGACY_VOL_OUTPUT",
		"CHECK_EMPTY_FILE", "CHECK_ASCII_VIZ_OUTPUT", "CHECK_CHECKPOINT"}

	for _, c := range test.Checks {

		dataPaths, err := file.GetDataPaths(test.Path, c.DataFile, test.Run.Seed,
			test.Run.NumSeeds)
		if err != nil {
			result <- &TestResult{test.Path, false, c.TestType, fmt.Sprint(err)}
			continue
		}

		// load the data for test types which need it
		var data []*file.Columns
		var stringData []*file.StringColumns
		// NOTE: only attempt to parse data for the test cases which need it
		if c.DataFile != "" && !misc.ContainsString(nonDataParseTests, c.TestType) {
			data, err = file.LoadData(dataPaths, c.HaveHeader, c.AverageData)
			if err != nil {
				result <- &TestResult{test.Path, false, c.TestType, fmt.Sprint(err)}
				continue
			}
		} else if c.TestType == "CHECK_TRIGGERS" {
			stringData, err = file.LoadStringData(dataPaths, c.HaveHeader)
			if err != nil {
				result <- &TestResult{test.Path, false, c.TestType, fmt.Sprint(err)}
				continue
			}
		}

		// execute requested tests on data
		var testErr error
		switch c.TestType {
		case "CHECK_SUCCESS":
			if test.SimStatus == nil {
				result <- &TestResult{test.Path, false, "CHECK_SUCCESS",
					"simulations did not run or return an exit status"}
				return // if simulation fails we won't continue testing
			}

			// in order to cut down on the amount of output (particularly in the case of
			// multiple seeds) we return failure if one or more of all runs within a test
			// fails and success otherwise
			for _, testRun := range test.SimStatus {
				if !testRun.Success {
					message := strings.Join([]string{testRun.ExitMessage, testRun.StdErrContent}, "\n")
					result <- &TestResult{test.Path, false, "CHECK_SUCCESS", message}
					return // if simulation fails we won't continue testing
				}
			}

		case "CHECK_EXIT_CODE":
			for _, testRun := range test.SimStatus {
				if c.ExitCode != testRun.ExitCode {
					testErr = fmt.Errorf("Expected exit code %d but got %d instead",
						c.ExitCode, testRun.ExitCode)
				}
			}

		case "CHECK_NONEMPTY_FILES":
			if testErr = checkFilesEmpty(test, c, false); testErr != nil {
				break
			}

		case "CHECK_EMPTY_FILES":
			if testErr = checkFilesEmpty(test, c, true); testErr != nil {
				break
			}

		case "CHECK_CHECKPOINT":
			if testErr = checkCheckPoint(test.Path, c); testErr != nil {
				break
			}

		case "CHECK_LEGACY_VOL_OUTPUT":
			for _, p := range dataPaths {
				if testErr = checkLegacyVolOutput(p, c); testErr != nil {
					break
				}
			}

		case "CHECK_ASCII_VIZ_OUTPUT":
			for _, p := range dataPaths {
				if testErr = checkASCIIVizOutput(p); testErr != nil {
					break
				}
			}

		case "DIFF_FILE_CONTENT":
			for _, p := range dataPaths {
				if testErr = diffFileContent(test.Path, p, c.TemplateFile,
					c.TemplateParameters); testErr != nil {
					break
				}
			}

		case "COUNT_CONSTRAINTS":
			for i, d := range data {
				if testErr = checkCountConstraints(d, dataPaths[i], c.MinTime, c.MaxTime,
					c.CountConstraints); testErr != nil {
					break
				}
			}

		case "COUNT_MINMAX":
			for i, d := range data {
				if testErr = checkCountMinmax(d, dataPaths[i], c.MinTime, c.MaxTime,
					c.CountMaximum, c.CountMinimum); testErr != nil {
					break
				}
			}

		case "FILE_MATCH_PATTERN":
			for _, dataPath := range dataPaths {
				if testErr = fileMatchPattern(dataPath, c.MatchPattern, c.NumMatches); testErr != nil {
					break
				}
			}

		case "CHECK_EXPRESSIONS":
			for _, dataPath := range dataPaths {
				if testErr = checkExpressions(dataPath); testErr != nil {
					break
				}
			}

		case "COMPARE_COUNTS":
			// only one of absDeviation or relDeviation can be defined
			if (len(c.AbsDeviation) > 0) && (len(c.RelDeviation) > 0) {
				testErr = fmt.Errorf("absDeviation and relDeviation are mutually exclusive")
				break
			}

			referencePath := filepath.Join(test.Path, c.ReferenceFile)
			refData, err := file.ReadCounts(referencePath, c.HaveHeader)
			if err != nil {
				testErr = err
				break
			}
			for i, d := range data {
				if testErr = compareCounts(d, refData, c.AbsDeviation, c.RelDeviation,
					dataPaths[i], c.MinTime, c.MaxTime); testErr != nil {
					break
				}
			}

		case "COUNT_EQUILIBRIUM":
			for i, d := range data {
				if testErr = checkCountEquilibrium(d, dataPaths[i], c.MinTime, c.MaxTime,
					c.Means, c.Tolerances); testErr != nil {
					break
				}
			}

		case "POSITIVE_COUNTS":
			for i, d := range data {
				if testErr = checkPositiveOrZeroCounts(d, dataPaths[i], c.MinTime,
					c.MaxTime, false); testErr != nil {
					break
				}
			}

		case "POSITIVE_OR_ZERO_COUNTS":
			for i, d := range data {
				if testErr = checkPositiveOrZeroCounts(d, dataPaths[i], c.MinTime,
					c.MaxTime, true); testErr != nil {
					break
				}
			}

		case "ZERO_COUNTS":
			for i, d := range data {
				if testErr = checkZeroCounts(d, dataPaths[i], c.MinTime,
					c.MaxTime); testErr != nil {
					break
				}
			}

		case "COUNT_RATES":
			for i, d := range data {
				if testErr = countRates(d, dataPaths[i], c.MinTime, c.MaxTime,
					c.BaseTime, c.Means, c.Tolerances); testErr != nil {
					break
				}
			}

		case "CHECK_TRIGGERS":
			for i, d := range stringData {
				if testErr = checkTriggers(d, dataPaths[i], c.MinTime, c.MaxTime,
					c.TriggerType, c.HaveExactTime, c.OutputTime, c.Xrange, c.Yrange,
					c.Zrange); testErr != nil {
					break
				}
			}

		default:
			testErr = fmt.Errorf("Unknown test type: %s", c.TestType)
			break
		}
		recordResult(result, c.TestType, test.Path, testErr)
	}
}