示例#1
0
// CreateMakefile creates a Makefile to build a tree. The cwd should
// be the root of the tree you want to make (due to some probably
// unnecessary assumptions that CreateMaker makes).
func CreateMakefile() (*makex.Makefile, error) {
	localRepo, err := OpenRepo(".")
	if err != nil {
		return nil, err
	}
	buildStore, err := buildstore.LocalRepo(localRepo.RootDir)
	if err != nil {
		return nil, err
	}

	treeConfig, err := config.ReadCached(buildStore.Commit(localRepo.CommitID))
	if err != nil {
		return nil, err
	}
	if len(treeConfig.SourceUnits) == 0 {
		log.Printf("No source unit files found. Did you mean to run `%s config`? (This is not an error; it just means that srclib didn't find anything to build or analyze here.)", srclib.CommandName)
	}

	// TODO(sqs): buildDataDir is hardcoded.
	buildDataDir := filepath.Join(buildstore.BuildDataDirName, localRepo.CommitID)
	mf, err := plan.CreateMakefile(buildDataDir, buildStore, localRepo.VCSType, treeConfig)
	if err != nil {
		return nil, err
	}
	return mf, nil
}
示例#2
0
func TestCreateMakefile(t *testing.T) {
	oldChooseTool := toolchain.ChooseTool
	defer func() { toolchain.ChooseTool = oldChooseTool }()

	toolchain.ChooseTool = func(op, unitType string) (*srclib.ToolRef, error) {
		return &srclib.ToolRef{
			Toolchain: "tc",
			Subcmd:    "t",
		}, nil
	}
	buildDataDir := "testdata"
	c := &config.Tree{
		SourceUnits: []*unit.SourceUnit{
			{
				Key: unit.Key{
					Name: "n",
					Type: "t",
				},
				Info: unit.Info{
					Files: []string{"f"},
					Ops: map[string][]byte{
						"graph":      nil,
						"depresolve": nil,
					},
				},
			},
		},
	}

	mf, err := plan.CreateMakefile(buildDataDir, nil, "", c)
	if err != nil {
		t.Fatal(err)
	}

	want := `
.PHONY: all

all: testdata/n/t.depresolve.json testdata/n/t.graph.json

testdata/n/t.depresolve.json: testdata/n/t.unit.json
	srclib tool "tc" "t" < $^ 1> $@

testdata/n/t.graph.json: testdata/n/t.unit.json
	srclib tool "tc" "t" < $< | srclib internal normalize-graph-data --unit-type "t" --dir . 1> $@

.DELETE_ON_ERROR:
`

	gotBytes, err := makex.Marshal(mf)
	if err != nil {
		t.Fatal(err)
	}

	want = strings.TrimSpace(want)
	got := string(bytes.TrimSpace(gotBytes))

	if got != want {
		t.Errorf("got makefile:\n==========\n%s\n==========\n\nwant makefile:\n==========\n%s\n==========", got, want)
	}
}
示例#3
0
文件: make_cmd.go 项目: jpoler/srclib
// CreateMakefile creates a Makefile to build a tree. The cwd should
// be the root of the tree you want to make (due to some probably
// unnecessary assumptions that CreateMaker makes).
func CreateMakefile(execOpt ToolchainExecOpt, verbose bool) (*makex.Makefile, error) {
	localRepo, err := OpenRepo(".")
	if err != nil {
		return nil, err
	}
	buildStore, err := buildstore.LocalRepo(localRepo.RootDir)
	if err != nil {
		return nil, err
	}

	treeConfig, err := config.ReadCached(buildStore.Commit(localRepo.CommitID))
	if err != nil {
		return nil, err
	}
	if len(treeConfig.SourceUnits) == 0 {
		log.Printf("No source unit files found. Did you mean to run `%s config`? (This is not an error; it just means that srclib didn't find anything to build or analyze here.)", srclib.CommandName)
	}

	toolchainExecOptArgs, err := flagutil.MarshalArgs(&execOpt)
	if err != nil {
		return nil, err
	}

	// TODO(sqs): buildDataDir is hardcoded.
	buildDataDir := filepath.Join(buildstore.BuildDataDirName, localRepo.CommitID)
	mf, err := plan.CreateMakefile(buildDataDir, buildStore, localRepo.VCSType, treeConfig, plan.Options{
		ToolchainExecOpt: strings.Join(toolchainExecOptArgs, " "),
		Verbose:          verbose,
	})
	if err != nil {
		return nil, err
	}
	return mf, nil
}
示例#4
0
func TestCreateMakefile(t *testing.T) {
	buildDataDir := "testdata"
	c := &config.Tree{
		SourceUnits: []*unit.SourceUnit{
			{
				Name:  "n",
				Type:  "t",
				Files: []string{"f"},
				Ops: map[string]*srclib.ToolRef{
					"graph":      {Toolchain: "tc", Subcmd: "t"},
					"depresolve": {Toolchain: "tc", Subcmd: "t"},
				},
			},
		},
	}

	mf, err := plan.CreateMakefile(buildDataDir, nil, "", c, plan.Options{NoCache: true})
	if err != nil {
		t.Fatal(err)
	}

	sep := string(filepath.Separator)

	want := `
all: testdata` + sep + `n` + sep + `t.graph.json testdata` + sep + `n` + sep + `t.depresolve.json

testdata` + sep + `n` + sep + `t.graph.json: testdata` + sep + `n` + sep + `t.unit.json f
	src tool  "tc" "t" < $< | src internal normalize-graph-data --unit-type "t" --dir . 1> $@

testdata` + sep + `n` + sep + `t.depresolve.json: testdata` + sep + `n` + sep + `t.unit.json
	src tool  "tc" "t" < $^ 1> $@

.DELETE_ON_ERROR:
`

	gotBytes, err := makex.Marshal(mf)
	if err != nil {
		t.Fatal(err)
	}

	want = strings.TrimSpace(want)
	got := string(bytes.TrimSpace(gotBytes))

	if got != want {
		t.Errorf("got makefile:\n==========\n%s\n==========\n\nwant makefile:\n==========\n%s\n==========", got, want)
	}
}
示例#5
0
// Import imports build data into a RepoStore or MultiRepoStore.
func Import(buildDataFS vfs.FileSystem, stor interface{}, opt ImportOpt) error {
	// Traverse the build data directory for this repo and commit to
	// create the makefile that lists the targets (which are the data
	// files we will import).
	treeConfig, err := config.ReadCached(buildDataFS)
	if err != nil {
		return fmt.Errorf("error calling config.ReadCached: %s", err)
	}
	mf, err := plan.CreateMakefile(".", nil, "", treeConfig, plan.Options{NoCache: true})
	if err != nil {
		return fmt.Errorf("error calling plan.Makefile: %s", err)
	}

	var (
		mu               sync.Mutex
		hasIndexableData bool
	)

	par := parallel.NewRun(10)
	for _, rule_ := range mf.Rules {
		rule := rule_

		if opt.Unit != "" || opt.UnitType != "" {
			type ruleForSourceUnit interface {
				SourceUnit() *unit.SourceUnit
			}
			if rule, ok := rule.(ruleForSourceUnit); ok {
				u := rule.SourceUnit()
				if (opt.Unit != "" && u.Name != opt.Unit) || (opt.UnitType != "" && u.Type != opt.UnitType) {
					continue
				}
			} else {
				// Skip all non-source-unit rules if --unit or
				// --unit-type are specified.
				continue
			}
		}

		par.Do(func() error {
			switch rule := rule.(type) {
			case *grapher.GraphUnitRule:
				var data graph.Output
				if err := readJSONFileFS(buildDataFS, rule.Target(), &data); err != nil {
					if os.IsNotExist(err) {
						log.Printf("Warning: no build data for unit %s %s.", rule.Unit.Type, rule.Unit.Name)
						return nil
					}
					return fmt.Errorf("error reading JSON file %s for unit %s %s: %s", rule.Target(), rule.Unit.Type, rule.Unit.Name, err)
				}
				if opt.DryRun || GlobalOpt.Verbose {
					log.Printf("# Importing graph data (%d defs, %d refs, %d docs, %d anns) for unit %s %s", len(data.Defs), len(data.Refs), len(data.Docs), len(data.Anns), rule.Unit.Type, rule.Unit.Name)
					if opt.DryRun {
						return nil
					}
				}

				// HACK: Transfer docs to [def].Docs.
				docsByPath := make(map[string]*graph.Doc, len(data.Docs))
				for _, doc := range data.Docs {
					docsByPath[doc.Path] = doc
				}
				for _, def := range data.Defs {
					if doc, present := docsByPath[def.Path]; present {
						def.Docs = append(def.Docs, &graph.DefDoc{Format: doc.Format, Data: doc.Data})
					}
				}

				switch imp := stor.(type) {
				case store.RepoImporter:
					if err := imp.Import(opt.CommitID, rule.Unit, data); err != nil {
						return fmt.Errorf("error running store.RepoImporter.Import: %s", err)
					}
				case store.MultiRepoImporter:
					if err := imp.Import(opt.Repo, opt.CommitID, rule.Unit, data); err != nil {
						return fmt.Errorf("error running store.MultiRepoImporter.Import: %s", err)
					}
				default:
					return fmt.Errorf("store (type %T) does not implement importing", stor)
				}

				mu.Lock()
				hasIndexableData = true
				mu.Unlock()
			}
			return nil
		})
	}
	if err := par.Wait(); err != nil {
		return err
	}

	if hasIndexableData && !opt.NoIndex {
		if GlobalOpt.Verbose {
			log.Printf("# Building indexes")
		}
		switch s := stor.(type) {
		case store.RepoIndexer:
			if err := s.Index(opt.CommitID); err != nil {
				return fmt.Errorf("Error indexing commit %s: %s", opt.CommitID, err)
			}
		case store.MultiRepoIndexer:
			if err := s.Index(opt.Repo, opt.CommitID); err != nil {
				return fmt.Errorf("error indexing %s@%s: %s", opt.Repo, opt.CommitID, err)
			}
		}
	}

	return nil
}
示例#6
0
文件: store_cmds.go 项目: xuy/srclib
// Import imports build data into a RepoStore or MultiRepoStore.
func Import(buildDataFS vfs.FileSystem, stor interface{}, opt ImportOpt) error {
	// Traverse the build data directory for this repo and commit to
	// create the makefile that lists the targets (which are the data
	// files we will import).
	treeConfig, err := config.ReadCached(buildDataFS)
	if err != nil {
		return fmt.Errorf("error calling config.ReadCached: %s", err)
	}
	mf, err := plan.CreateMakefile(".", nil, "", treeConfig)
	if err != nil {
		return fmt.Errorf("error calling plan.Makefile: %s", err)
	}

	// hasIndexableData is set if at least one source unit's graph data is
	// successfully imported to the graph store.
	//
	// This flag is set concurrently in calls to importGraphData, but it doesn't
	// need to be protected by a mutex since its value is only modified in one
	// direction (false to true), and it is only read in the sequential section
	// after parallel.NewRun completes.
	//
	// However, we still protect it with a mutex to avoid data race errors from the
	// Go race detector.
	var (
		mu               sync.Mutex
		hasIndexableData bool
	)

	importGraphData := func(graphFile string, sourceUnit *unit.SourceUnit) error {
		var data graph.Output
		if err := readJSONFileFS(buildDataFS, graphFile, &data); err != nil {
			if err == errEmptyJSONFile {
				log.Printf("Warning: the JSON file is empty for unit %s %s.", sourceUnit.Type, sourceUnit.Name)
				return nil
			}
			if os.IsNotExist(err) {
				log.Printf("Warning: no build data for unit %s %s.", sourceUnit.Type, sourceUnit.Name)
				return nil
			}
			return fmt.Errorf("error reading JSON file %s for unit %s %s: %s", graphFile, sourceUnit.Type, sourceUnit.Name, err)
		}
		if opt.DryRun || GlobalOpt.Verbose {
			log.Printf("# Importing graph data (%d defs, %d refs, %d docs, %d anns) for unit %s %s", len(data.Defs), len(data.Refs), len(data.Docs), len(data.Anns), sourceUnit.Type, sourceUnit.Name)
			if opt.DryRun {
				return nil
			}
		}

		// HACK: Transfer docs to [def].Docs.
		docsByPath := make(map[string]*graph.Doc, len(data.Docs))
		for _, doc := range data.Docs {
			docsByPath[doc.Path] = doc
		}
		for _, def := range data.Defs {
			if doc, present := docsByPath[def.Path]; present {
				def.Docs = append(def.Docs, &graph.DefDoc{Format: doc.Format, Data: doc.Data})
			}
		}

		switch imp := stor.(type) {
		case store.RepoImporter:
			if err := imp.Import(opt.CommitID, sourceUnit, data); err != nil {
				return fmt.Errorf("error running store.RepoImporter.Import: %s", err)
			}
		case store.MultiRepoImporter:
			if err := imp.Import(opt.Repo, opt.CommitID, sourceUnit, data); err != nil {
				return fmt.Errorf("error running store.MultiRepoImporter.Import: %s", err)
			}
		default:
			return fmt.Errorf("store (type %T) does not implement importing", stor)
		}

		mu.Lock()
		hasIndexableData = true
		mu.Unlock()

		return nil
	}

	par := parallel.NewRun(10)
	for _, rule_ := range mf.Rules {
		rule := rule_
		switch rule := rule.(type) {
		case *grapher.GraphUnitRule:
			if (opt.Unit != "" && rule.Unit.Name != opt.Unit) || (opt.UnitType != "" && rule.Unit.Type != opt.UnitType) {
				continue
			}
			par.Acquire()
			go func() {
				defer par.Release()
				if err := importGraphData(rule.Target(), rule.Unit); err != nil {
					par.Error(err)
				}
			}()
		case *grapher.GraphMultiUnitsRule:
			for target_, sourceUnit_ := range rule.Targets() {
				target, sourceUnit := target_, sourceUnit_
				if (opt.Unit != "" && sourceUnit.Name != opt.Unit) || (opt.UnitType != "" && sourceUnit.Type != opt.UnitType) {
					continue
				}
				par.Acquire()
				go func() {
					defer par.Release()
					if err := importGraphData(target, sourceUnit); err != nil {
						par.Error(err)
					}
				}()
			}
		}
	}
	if err := par.Wait(); err != nil {
		return err
	}

	if hasIndexableData && !opt.NoIndex {
		if GlobalOpt.Verbose {
			log.Printf("# Building indexes")
		}
		switch s := stor.(type) {
		case store.RepoIndexer:
			if err := s.Index(opt.CommitID); err != nil {
				return fmt.Errorf("Error indexing commit %s: %s", opt.CommitID, err)
			}
		case store.MultiRepoIndexer:
			if err := s.Index(opt.Repo, opt.CommitID); err != nil {
				return fmt.Errorf("error indexing %s@%s: %s", opt.Repo, opt.CommitID, err)
			}
		}
	}

	switch imp := stor.(type) {
	case store.RepoImporter:
		if err := imp.CreateVersion(opt.CommitID); err != nil {
			return fmt.Errorf("error running store.RepoImporter.CreateVersion: %s", err)
		}
	case store.MultiRepoImporter:
		if err := imp.CreateVersion(opt.Repo, opt.CommitID); err != nil {
			return fmt.Errorf("error running store.MultiRepoImporter.CreateVersion: %s", err)
		}
	}

	return nil
}
示例#7
0
func coverage(repo *Repo) (map[string]*cvg.Coverage, error) {
	// Gather file data
	codeFileData := make(map[string]*codeFileDatum) // data for each file needed to compute coverage
	filepath.Walk(repo.RootDir, func(path string, info os.FileInfo, err error) error {
		if filepath.IsAbs(path) {
			var err error
			path, err = filepath.Rel(repo.RootDir, path)
			if err != nil {
				return err
			}
		}

		if info.IsDir() {
			if strings.HasPrefix(info.Name(), ".") {
				return filepath.SkipDir // don't search hidden directories
			}
			return nil
		}

		path = filepath.ToSlash(path)

		ext := strings.ToLower(filepath.Ext(path))
		if lang, isCodeFile := extToLang[ext]; isCodeFile {

			// omitting special files (auto-generated, temporary, ...)
			if shouldIgnoreFile(path, lang) {
				return nil
			}

			b, err := ioutil.ReadFile(path)
			if err != nil {
				return err
			}
			loc := numLines(b)
			codeFileData[path] = &codeFileDatum{LoC: loc, Language: lang}
		}
		return nil
	})

	// Gather ref/def data for each file
	bdfs, err := GetBuildDataFS(repo.CommitID)
	if err != nil {
		return nil, err
	}
	treeConfig, err := config.ReadCached(bdfs)
	if err != nil {
		return nil, fmt.Errorf("error calling config.ReadCached: %s", err)
	}
	mf, err := plan.CreateMakefile(".", nil, "", treeConfig)
	if err != nil {
		return nil, fmt.Errorf("error calling plan.Makefile: %s", err)
	}

	defKeys := make(map[graph.DefKey]struct{})
	data := make([]graph.Output, 0, len(mf.Rules))

	parseGraphData := func(graphFile string, sourceUnit *unit.SourceUnit) error {
		var item graph.Output
		if err := readJSONFileFS(bdfs, graphFile, &item); err != nil {
			if err == errEmptyJSONFile {
				log.Printf("Warning: the JSON file is empty for unit %s %s.", sourceUnit.Type, sourceUnit.Name)
				return nil
			}
			if os.IsNotExist(err) {
				log.Printf("Warning: no build data for unit %s %s.", sourceUnit.Type, sourceUnit.Name)
				return nil
			}
			return fmt.Errorf("error reading JSON file %s for unit %s %s: %s", graphFile, sourceUnit.Type, sourceUnit.Name, err)
		}
		data = append(data, item)

		for _, file := range sourceUnit.Files {
			if datum, exists := codeFileData[file]; exists {
				datum.Seen = true
			}
		}

		for _, def := range item.Defs {
			defKeys[def.DefKey] = struct{}{}
		}

		return nil
	}

	for _, rule_ := range mf.Rules {
		switch rule := rule_.(type) {
		case *grapher.GraphUnitRule:
			if err := parseGraphData(rule.Target(), rule.Unit); err != nil {
				return nil, err
			}
		case *grapher.GraphMultiUnitsRule:
			for target, sourceUnit := range rule.Targets() {
				if err := parseGraphData(target, sourceUnit); err != nil {
					return nil, err
				}
			}
		}
	}

	missingKeys := make(map[graph.DefKey]struct{})

	for _, item := range data {
		var validRefs []*graph.Ref
		for _, ref := range item.Refs {
			if datum, exists := codeFileData[ref.File]; exists {
				datum.NumRefs++

				if ref.DefUnitType == "URL" || ref.DefRepo != "" {
					validRefs = append(validRefs, ref)
					datum.NumRefsValid++
				} else if _, defExists := defKeys[ref.DefKey()]; defExists {
					validRefs = append(validRefs, ref)
					datum.NumRefsValid++
				} else if GlobalOpt.Verbose {
					if _, reported := missingKeys[ref.DefKey()]; !reported {
						missingKeys[ref.DefKey()] = struct{}{}
						sample := ref.DefKey().Path
						candidates := make([]graph.DefKey, 0, 1)
						for key := range defKeys {
							if key.Path == sample {
								candidates = append(candidates, key)
							}
						}
						log.Printf("No matching def for %s, candidates are %v", ref.String(), candidates)
					}
				}
			}
		}

		for _, def := range item.Defs {
			if datum, exists := codeFileData[def.File]; exists {
				datum.NumDefs++
			}
		}
	}

	// Compute coverage from per-file data
	type langStats struct {
		numFiles          int
		numIndexedFiles   int
		numDefs           int
		numRefs           int
		numRefsValid      int
		uncoveredFiles    []string
		undiscoveredFiles []string
		loc               int
	}
	stats := make(map[string]*langStats)
	for file, datum := range codeFileData {
		if _, exist := stats[datum.Language]; !exist {
			stats[datum.Language] = &langStats{}
		}

		s := stats[datum.Language]
		s.loc += datum.LoC
		s.numDefs += datum.NumDefs
		s.numRefs += datum.NumRefs
		s.numRefsValid += datum.NumRefsValid
		if datum.Seen {
			// this file is listed in the source unit and found by the scanner
			s.numFiles++
			density := float64(datum.NumDefs+datum.NumRefsValid) / float64(datum.LoC)
			if density > fileTokThresh {
				s.numIndexedFiles++
			} else {
				if GlobalOpt.Verbose {
					log.Printf("Uncovered file %s - density: %f, defs: %d, refs: %d, lines of code: %d",
						file, density, datum.NumDefs, datum.NumRefsValid, datum.LoC)
				}
				s.uncoveredFiles = append(s.uncoveredFiles, file)
			}
		} else {
			// this file is not listed in the source unit but found by the scanner
			if GlobalOpt.Verbose {
				log.Printf("Undiscovered file %s", file)
			}
			s.undiscoveredFiles = append(s.undiscoveredFiles, file)
		}
	}

	cov := make(map[string]*cvg.Coverage)
	for lang, s := range stats {
		cov[lang] = &cvg.Coverage{
			FileScore:         divideSentinel(float64(s.numIndexedFiles), float64(s.numFiles), -1),
			RefScore:          divideSentinel(float64(s.numRefsValid), float64(s.numRefs), -1),
			TokDensity:        divideSentinel(float64(s.numDefs+s.numRefs), float64(s.loc), -1),
			UncoveredFiles:    s.uncoveredFiles,
			UndiscoveredFiles: s.undiscoveredFiles,
		}
	}
	return cov, nil
}