Ejemplo n.º 1
0
func printFilenames(fset *token.FileSet, info *loader.PackageInfo) {
	var names []string
	for _, f := range info.Files {
		names = append(names, filepath.Base(fset.File(f.Pos()).Name()))
	}
	fmt.Printf("%s.Files: %s\n", info.Pkg.Path(), names)
}
Ejemplo n.º 2
0
// An deterministic ordering for token.Pos that doesn't
// depend on the order in which packages were loaded.
func lessPos(fset *token.FileSet, x, y token.Pos) bool {
	fx := fset.File(x)
	fy := fset.File(y)
	if fx != fy {
		return fx.Name() < fy.Name()
	}
	return x < y
}
Ejemplo n.º 3
0
func offsetLine(fset *token.FileSet, af *ast.File, offset int) (line int) {
	defer func() {
		if err := recover(); err != nil {
			line = 0
		}
	}()
	return fset.File(af.Pos()).Position(token.Pos(offset)).Line
}
Ejemplo n.º 4
0
// positionFor, returns the Position for Pos p in FileSet fset.
func positionFor(p token.Pos, fset *token.FileSet) *token.Position {
	if p != token.NoPos && fset != nil {
		if f := fset.File(p); f != nil {
			// Prevent panic
			if f.Base() <= int(p) && int(p) <= f.Base()+f.Size() {
				p := f.Position(p)
				return &p
			}
		}
	}
	return nil
}
Ejemplo n.º 5
0
func getFilePath(fset *token.FileSet, path string, pos token.Pos) string {
	onDiskFile := fset.File(pos)

	if onDiskFile != nil {
		if rel, err := filepath.Rel(path, onDiskFile.Name()); err == nil {
			return rel
		} else {
			return onDiskFile.Name()
		}
	}
	return ""
}
Ejemplo n.º 6
0
func extractMultipleStatementsAsFunc(
	astFile *ast.File,
	fileSet *token.FileSet,
	stmtsToExtract []ast.Node,
	parentNode ast.Node,
	extractedFuncName string) {
	params := varIdentsUsedIn(stmtsToExtract)
	varsDeclaredWithinStmtsToExtract := varIdentsDeclaredWithin(stmtsToExtract)
	util.MapStringAstIdentRemoveKeys(params, namesOf(varsDeclaredWithinStmtsToExtract))
	util.MapStringAstIdentRemoveKeys(params, namesOf(globalVarIdents(astFile)))

	allStmts := stmtsFromBlockStmt(parentNode)
	indexOfExtractedStmt := indexOf(stmtsToExtract[0].(ast.Stmt), *allStmts)
	varsUsedAfterwards := overlappingVarsIdentsUsedIn((*allStmts)[indexOfExtractedStmt+len(stmtsToExtract):], varsDeclaredWithinStmtsToExtract)

	newStmt := funcCallStmt(varsUsedAfterwards, extractedFuncName, params, (*allStmts)[indexOfExtractedStmt].Pos())
	replaceStmtsWithFuncCallStmt(newStmt,
		allStmts,
		indexOfExtractedStmt, len(stmtsToExtract))

	areaRemoved := areaRemoved(fileSet, (stmtsToExtract)[0].Pos(), (stmtsToExtract)[len(stmtsToExtract)-1].End())
	lineLengths := lineLengthsFrom(fileSet)
	lineNum, numLinesToCut, newLineLength := replacementModifications(fileSet, (stmtsToExtract)[0].Pos(), (stmtsToExtract)[len(stmtsToExtract)-1].End(), newStmt.End(), lineLengths, areaRemoved)

	shiftPosesAfterPos(astFile, newStmt, (stmtsToExtract)[len(stmtsToExtract)-1].End(), newStmt.End()-stmtsToExtract[len(stmtsToExtract)-1].End())

	multipleStmtFuncDecl := CopyNode(multipleStmtFuncDeclWith(
		extractedFuncName,
		fieldsFrom(params),
		stmtsFromNodes(stmtsToExtract),
		exprsFrom(varsUsedAfterwards),
	)).(*ast.FuncDecl)
	var moveOffset token.Pos
	RecalcPoses(multipleStmtFuncDecl, astFile.End()+2, &moveOffset, 0)
	astFile.Decls = append(astFile.Decls, multipleStmtFuncDecl)

	areaToBeAppended := insertionModificationsForStmts(astFile, multipleStmtFuncDecl, areaRemoved, exprsFrom(varsUsedAfterwards))

	lineLengths = append(
		lineLengths[:lineNum+1],
		lineLengths[lineNum+1+numLinesToCut:]...)
	lineLengths[lineNum] = newLineLength
	lineLengths = append(lineLengths, areaToBeAppended...)

	newFileSet := token.NewFileSet()
	newFileSet.AddFile(fileSet.File(1).Name(), 1, int(astFile.End()))
	newFileSet.File(1).SetLines(ConvertLineLengthsToLineOffsets(lineLengths))
	*fileSet = *newFileSet

	moveComments(astFile, moveOffset /*, needs a range to restict which comments to move*/)

}
Ejemplo n.º 7
0
// nodeAtOffset, returns the ast.Node for the given offset.
// Node types: *ast.Ident, *ast.SelectorExpr, *ast.ImportSpec
func nodeAtOffset(af *ast.File, fset *token.FileSet, offset int) (ast.Node, error) {
	file := fset.File(af.Pos())
	if file == nil {
		return nil, errors.New("ast.File not in token.FileSet")
	}
	// Prevent file.Pos from panicking.
	if offset < 0 || file.Size() < offset {
		return nil, fmt.Errorf("invalid offset: %d", offset)
	}
	v := &offsetVisitor{pos: file.Pos(offset)}
	ast.Walk(v, af)
	if v.node == nil {
		return nil, fmt.Errorf("no node at offset: %d", offset)
	}
	return v.node, nil
}
Ejemplo n.º 8
0
// findInterval parses input and returns the [start, end) positions of
// the first occurrence of substr in input.  f==nil indicates failure;
// an error has already been reported in that case.
//
func findInterval(t *testing.T, fset *token.FileSet, input, substr string) (f *ast.File, start, end token.Pos) {
	f, err := parser.ParseFile(fset, "<input>", input, 0)
	if err != nil {
		t.Errorf("parse error: %s", err)
		return
	}

	i := strings.Index(input, substr)
	if i < 0 {
		t.Errorf("%q is not a substring of input", substr)
		f = nil
		return
	}

	filePos := fset.File(f.Package)
	return f, filePos.Pos(i), filePos.Pos(i + len(substr))
}
Ejemplo n.º 9
0
func printSites(sites []copySite, fset *token.FileSet, w io.Writer) {
	sort.Sort(sortedCopySites{sites: sites, fset: fset})
	for _, site := range sites {
		f := site.fun
		shouldBe := site.shouldBe
		sb := sentence(shouldBe)
		msg := "should be made into"
		if len(shouldBe) > 1 {
			msg += " pointers"
		} else {
			msg += " a pointer"
		}
		pos := site.fun.Pos()
		file := fset.File(pos)
		position := file.Position(pos)
		fmt.Fprintf(w, "%s:%d:%d: %s %s (%s)\n", file.Name(), position.Line, position.Column, sb, msg, f)
	}
}
Ejemplo n.º 10
0
// sortImports sorts runs of consecutive import lines in import blocks in f.
// It also removes duplicate imports when it is possible to do so without data loss.
func sortImports(fset *token.FileSet, f *ast.File) {
	for i, d := range f.Decls {
		d, ok := d.(*ast.GenDecl)
		if !ok || d.Tok != token.IMPORT {
			// Not an import declaration, so we're done.
			// Imports are always first.
			break
		}

		if len(d.Specs) == 0 {
			// Empty import block, remove it.
			f.Decls = append(f.Decls[:i], f.Decls[i+1:]...)
		}

		if !d.Lparen.IsValid() {
			// Not a block: sorted by default.
			continue
		}

		// Remove all newlines and sort the specs
		pline := -1
		for _, s := range d.Specs {
			line := fset.Position(s.Pos()).Line
			if pline == -1 {
				pline = line
				continue
			}
			for line-pline > 1 {
				fset.File(s.Pos()).MergeLine(line - 1)
				line = fset.Position(s.Pos()).Line
			}
		}
		d.Specs = sortSpecs(fset, f, d.Specs)

		// Deduping can leave a blank line before the rparen; clean that up.
		if len(d.Specs) > 0 {
			lastSpec := d.Specs[len(d.Specs)-1]
			lastLine := fset.Position(lastSpec.Pos()).Line
			if rParenLine := fset.Position(d.Rparen).Line; rParenLine > lastLine+1 {
				fset.File(d.Rparen).MergeLine(rParenLine - 1)
			}
		}
	}
}
Ejemplo n.º 11
0
// sortImports sorts runs of consecutive import lines in import blocks in f.
// It also removes duplicate imports when it is possible to do so without data loss.
func sortImports(fset *token.FileSet, f *ast.File) {
	for i, d := range f.Decls {
		d, ok := d.(*ast.GenDecl)
		if !ok || d.Tok != token.IMPORT {
			// Not an import declaration, so we're done.
			// Imports are always first.
			break
		}

		if len(d.Specs) == 0 {
			// Empty import block, remove it.
			f.Decls = append(f.Decls[:i], f.Decls[i+1:]...)
		}

		if !d.Lparen.IsValid() {
			// Not a block: sorted by default.
			continue
		}

		// Identify and sort runs of specs on successive lines.
		i := 0
		specs := d.Specs[:0]
		for j, s := range d.Specs {
			if j > i && fset.Position(s.Pos()).Line > 1+fset.Position(d.Specs[j-1].End()).Line {
				// j begins a new run.  End this one.
				specs = append(specs, sortSpecs(fset, f, d.Specs[i:j])...)
				i = j
			}
		}
		specs = append(specs, sortSpecs(fset, f, d.Specs[i:])...)
		d.Specs = specs

		// Deduping can leave a blank line before the rparen; clean that up.
		if len(d.Specs) > 0 {
			lastSpec := d.Specs[len(d.Specs)-1]
			lastLine := fset.Position(lastSpec.Pos()).Line
			if rParenLine := fset.Position(d.Rparen).Line; rParenLine > lastLine+1 {
				fset.File(d.Rparen).MergeLine(rParenLine - 1)
			}
		}
	}
}
Ejemplo n.º 12
0
func (oil OverlappingImportsLinter) RunLint(
	fset *token.FileSet,
	nodes chan ast.Node,
	lints chan Lint,
	wg *sync.WaitGroup) {
	wg.Add(1)
	imports := make(map[string]string)
	filename := ""
	for node := range nodes {
		switch is := node.(type) {
		case (*ast.ImportSpec):
			if filename == "" {
				filename = fset.File(is.Pos()).Name()
			}
			if is.Name != nil {
				imports[is.Path.Value] = is.Name.String()
			} else {
				path := strings.Trim(is.Path.Value, "\"")
				parts := strings.Split(path, "/", -1)
				imports[is.Path.Value] = parts[len(parts)-1]
			}
		}
	}

	localNameCount := make(map[string]int)
	for _, localName := range imports {
		localNameCount[localName] += 1
	}

	for localName, count := range localNameCount {
		if localName == "." {
			count += 1
		}
		if count > 1 {
			lints <- OverlappingImportsLint{oil,
				filename, localName, count}
		}
	}
	wg.Done()
}
Ejemplo n.º 13
0
// walk the ast and extract function declarations. Build up the
// indexes.
func processAst(a *ast.File, fset *token.FileSet, types, functions map[string][]*string, includeBody bool) {
	ast.Inspect(a, func(node ast.Node) bool {
		buf.Reset()
		switch x := node.(type) {
		case *ast.FuncDecl:
			pos := fset.File(node.Pos())
			fmt.Fprintf(buf, "%s:%d: \n", pos.Name(), pos.Line(node.Pos()))
			if !includeBody {
				x.Body = nil
			}
			printer.Fprint(buf, fset, x)

			function := buf.String()
			add(functions, x.Name.Name, &function)
			if x.Recv != nil {
				buf.Reset()
				printer.Fprint(buf, fset, x.Recv.List[0].Type)
				thetype := strings.Trim(buf.String(), "*")
				add(types, thetype, &function)
			}
		}
		return true
	})
}
Ejemplo n.º 14
0
// getSourceCodeLineNumber returns the line number in the parsed source code, according to the tokens position.
func getSourceCodeLineNumber(fileSet *token.FileSet, position token.Pos) int {
	return fileSet.File(position).Line(position)
}
Ejemplo n.º 15
0
// AddNamedImport adds the import path to the file f, if absent.
// If name is not empty, it is used to rename the import.
//
// For example, calling
//	AddNamedImport(fset, f, "pathpkg", "path")
// adds
//	import pathpkg "path"
func AddNamedImport(fset *token.FileSet, f *ast.File, name, ipath string) (added bool) {
	if imports(f, ipath) {
		return false
	}

	newImport := &ast.ImportSpec{
		Path: &ast.BasicLit{
			Kind:  token.STRING,
			Value: strconv.Quote(ipath),
		},
	}
	if name != "" {
		newImport.Name = &ast.Ident{Name: name}
	}

	// Find an import decl to add to.
	// The goal is to find an existing import
	// whose import path has the longest shared
	// prefix with ipath.
	var (
		bestMatch  = -1         // length of longest shared prefix
		lastImport = -1         // index in f.Decls of the file's final import decl
		impDecl    *ast.GenDecl // import decl containing the best match
		impIndex   = -1         // spec index in impDecl containing the best match
	)
	for i, decl := range f.Decls {
		gen, ok := decl.(*ast.GenDecl)
		if ok && gen.Tok == token.IMPORT {
			lastImport = i
			// Do not add to import "C", to avoid disrupting the
			// association with its doc comment, breaking cgo.
			if declImports(gen, "C") {
				continue
			}

			// Match an empty import decl if that's all that is available.
			if len(gen.Specs) == 0 && bestMatch == -1 {
				impDecl = gen
			}

			// Compute longest shared prefix with imports in this group.
			for j, spec := range gen.Specs {
				impspec := spec.(*ast.ImportSpec)
				n := matchLen(importPath(impspec), ipath)
				if n > bestMatch {
					bestMatch = n
					impDecl = gen
					impIndex = j
				}
			}
		}
	}

	// If no import decl found, add one after the last import.
	if impDecl == nil {
		impDecl = &ast.GenDecl{
			Tok: token.IMPORT,
		}
		if lastImport >= 0 {
			impDecl.TokPos = f.Decls[lastImport].End()
		} else {
			// There are no existing imports.
			// Our new import goes after the package declaration and after
			// the comment, if any, that starts on the same line as the
			// package declaration.
			impDecl.TokPos = f.Package

			file := fset.File(f.Package)
			pkgLine := file.Line(f.Package)
			for _, c := range f.Comments {
				if file.Line(c.Pos()) > pkgLine {
					break
				}
				impDecl.TokPos = c.End()
			}
		}
		f.Decls = append(f.Decls, nil)
		copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:])
		f.Decls[lastImport+1] = impDecl
	}

	// Insert new import at insertAt.
	insertAt := 0
	if impIndex >= 0 {
		// insert after the found import
		insertAt = impIndex + 1
	}
	impDecl.Specs = append(impDecl.Specs, nil)
	copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:])
	impDecl.Specs[insertAt] = newImport
	pos := impDecl.Pos()
	if insertAt > 0 {
		// If there is a comment after an existing import, preserve the comment
		// position by adding the new import after the comment.
		if spec, ok := impDecl.Specs[insertAt-1].(*ast.ImportSpec); ok && spec.Comment != nil {
			pos = spec.Comment.End()
		} else {
			// Assign same position as the previous import,
			// so that the sorter sees it as being in the same block.
			pos = impDecl.Specs[insertAt-1].Pos()
		}
	}
	if newImport.Name != nil {
		newImport.Name.NamePos = pos
	}
	newImport.Path.ValuePos = pos
	newImport.EndPos = pos

	// Clean up parens. impDecl contains at least one spec.
	if len(impDecl.Specs) == 1 {
		// Remove unneeded parens.
		impDecl.Lparen = token.NoPos
	} else if !impDecl.Lparen.IsValid() {
		// impDecl needs parens added.
		impDecl.Lparen = impDecl.Specs[0].Pos()
	}

	f.Imports = append(f.Imports, newImport)

	if len(f.Decls) <= 1 {
		return true
	}

	// Merge all the import declarations into the first one.
	var first *ast.GenDecl
	for i := 0; i < len(f.Decls); i++ {
		decl := f.Decls[i]
		gen, ok := decl.(*ast.GenDecl)
		if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") {
			continue
		}
		if first == nil {
			first = gen
			continue // Don't touch the first one.
		}
		// We now know there is more than one package in this import
		// declaration. Ensure that it ends up parenthesized.
		first.Lparen = first.Pos()
		// Move the imports of the other import declaration to the first one.
		for _, spec := range gen.Specs {
			spec.(*ast.ImportSpec).Path.ValuePos = first.Pos()
			first.Specs = append(first.Specs, spec)
		}
		f.Decls = append(f.Decls[:i], f.Decls[i+1:]...)
		i--
	}

	return true
}
Ejemplo n.º 16
0
// DeleteNamedImport deletes the import with the given name and path from the file f, if present.
func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) {
	var delspecs []*ast.ImportSpec
	var delcomments []*ast.CommentGroup

	// Find the import nodes that import path, if any.
	for i := 0; i < len(f.Decls); i++ {
		decl := f.Decls[i]
		gen, ok := decl.(*ast.GenDecl)
		if !ok || gen.Tok != token.IMPORT {
			continue
		}
		for j := 0; j < len(gen.Specs); j++ {
			spec := gen.Specs[j]
			impspec := spec.(*ast.ImportSpec)
			if impspec.Name == nil && name != "" {
				continue
			}
			if impspec.Name != nil && impspec.Name.Name != name {
				continue
			}
			if importPath(impspec) != path {
				continue
			}

			// We found an import spec that imports path.
			// Delete it.
			delspecs = append(delspecs, impspec)
			deleted = true
			copy(gen.Specs[j:], gen.Specs[j+1:])
			gen.Specs = gen.Specs[:len(gen.Specs)-1]

			// If this was the last import spec in this decl,
			// delete the decl, too.
			if len(gen.Specs) == 0 {
				copy(f.Decls[i:], f.Decls[i+1:])
				f.Decls = f.Decls[:len(f.Decls)-1]
				i--
				break
			} else if len(gen.Specs) == 1 {
				if impspec.Doc != nil {
					delcomments = append(delcomments, impspec.Doc)
				}
				if impspec.Comment != nil {
					delcomments = append(delcomments, impspec.Comment)
				}
				for _, cg := range f.Comments {
					// Found comment on the same line as the import spec.
					if cg.End() < impspec.Pos() && fset.Position(cg.End()).Line == fset.Position(impspec.Pos()).Line {
						delcomments = append(delcomments, cg)
						break
					}
				}

				gen.Lparen = token.NoPos // drop parens
				spec := gen.Specs[0].(*ast.ImportSpec)
				if spec.Doc != nil {
					// Move the documentation above the import statement.
					gen.TokPos = spec.Doc.End() + 1
				}

				for _, cg := range f.Comments {
					if cg.End() < spec.Pos() && fset.Position(cg.End()).Line == fset.Position(spec.Pos()).Line {
						for fset.Position(gen.TokPos).Line != fset.Position(spec.Pos()).Line {
							fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line)
						}
						break
					}
				}
			}
			if j > 0 {
				lastImpspec := gen.Specs[j-1].(*ast.ImportSpec)
				lastLine := fset.Position(lastImpspec.Path.ValuePos).Line
				line := fset.Position(impspec.Path.ValuePos).Line

				// We deleted an entry but now there may be
				// a blank line-sized hole where the import was.
				if line-lastLine > 1 {
					// There was a blank line immediately preceding the deleted import,
					// so there's no need to close the hole.
					// Do nothing.
				} else {
					// There was no blank line. Close the hole.
					fset.File(gen.Rparen).MergeLine(line)
				}
			}
			j--
		}
	}

	// Delete imports from f.Imports.
	for i := 0; i < len(f.Imports); i++ {
		imp := f.Imports[i]
		for j, del := range delspecs {
			if imp == del {
				copy(f.Imports[i:], f.Imports[i+1:])
				f.Imports = f.Imports[:len(f.Imports)-1]
				copy(delspecs[j:], delspecs[j+1:])
				delspecs = delspecs[:len(delspecs)-1]
				i--
				break
			}
		}
	}

	// Delete comments from f.Comments.
	for i := 0; i < len(f.Comments); i++ {
		cg := f.Comments[i]
		for j, del := range delcomments {
			if cg == del {
				copy(f.Comments[i:], f.Comments[i+1:])
				f.Comments = f.Comments[:len(f.Comments)-1]
				copy(delcomments[j:], delcomments[j+1:])
				delcomments = delcomments[:len(delcomments)-1]
				i--
				break
			}
		}
	}

	if len(delspecs) > 0 {
		panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs))
	}

	return
}
Ejemplo n.º 17
0
func toSourcePos(fset *token.FileSet, node ast.Node) SourcePos {
	file := fset.File(node.Pos())
	start := file.Offset(node.Pos())
	end := file.Offset(node.End())
	return SourcePos(fmt.Sprintf("%s:#%d,#%d", file.Name(), start, end))
}
Ejemplo n.º 18
0
func sortSpecs(fset *token.FileSet, f *ast.File, specs []ast.Spec) []ast.Spec {
	// Can't short-circuit here even if specs are already sorted,
	// since they might yet need deduplication.
	// A lone import, however, may be safely ignored.
	if len(specs) <= 1 {
		return specs
	}

	// Record positions for specs.
	pos := make([]posSpan, len(specs))
	for i, s := range specs {
		pos[i] = posSpan{s.Pos(), s.End()}
	}

	// Identify comments in this range.
	// Any comment from pos[0].Start to the final line counts.
	lastLine := fset.Position(pos[len(pos)-1].End).Line
	cstart := len(f.Comments)
	cend := len(f.Comments)
	for i, g := range f.Comments {
		if g.Pos() < pos[0].Start {
			continue
		}
		if i < cstart {
			cstart = i
		}
		if fset.Position(g.End()).Line > lastLine {
			cend = i
			break
		}
	}
	comments := f.Comments[cstart:cend]

	// Assign each comment to the import spec preceding it.
	importComment := map[*ast.ImportSpec][]*ast.CommentGroup{}
	specIndex := 0
	for _, g := range comments {
		for specIndex+1 < len(specs) && pos[specIndex+1].Start <= g.Pos() {
			specIndex++
		}
		s := specs[specIndex].(*ast.ImportSpec)
		importComment[s] = append(importComment[s], g)
	}

	// Sort the import specs by import path.
	// Remove duplicates, when possible without data loss.
	// Reassign the import paths to have the same position sequence.
	// Reassign each comment to abut the end of its spec.
	// Sort the comments by new position.
	sort.Sort(byImportSpec(specs))

	// Dedup. Thanks to our sorting, we can just consider
	// adjacent pairs of imports.
	deduped := specs[:0]
	for i, s := range specs {
		if i == len(specs)-1 || !collapse(s, specs[i+1]) {
			deduped = append(deduped, s)
		} else {
			p := s.Pos()
			fset.File(p).MergeLine(fset.Position(p).Line)
		}
	}
	specs = deduped

	// Fix up comment positions
	for i, s := range specs {
		s := s.(*ast.ImportSpec)
		if s.Name != nil {
			s.Name.NamePos = pos[i].Start
		}
		s.Path.ValuePos = pos[i].Start
		s.EndPos = pos[i].End
		for _, g := range importComment[s] {
			for _, c := range g.List {
				c.Slash = pos[i].End
			}
		}
	}

	sort.Sort(byCommentPos(comments))

	return specs
}
Ejemplo n.º 19
0
func filename(file *ast.File, fset *token.FileSet) string {
	return fset.File(file.Pos()).Name()
}
Ejemplo n.º 20
0
func positionString(pos token.Pos, fset *token.FileSet) string {
	position := fset.File(pos).PositionFor(pos, false)
	return fmt.Sprintf("[%v]", position)
}
Ejemplo n.º 21
0
func (compiler *compiler) Compile(fset *token.FileSet, files []*ast.File, importpath string) (m *Module, err error) {
	// FIXME create a compilation state, rather than storing in 'compiler'.
	compiler.fileset = fset
	compiler.initfuncs = nil
	compiler.varinitfuncs = nil

	// If no import path is specified, or the package's
	// name (not path) is "main", then set the import
	// path to be the same as the package's name.
	if importpath == "" || files[0].Name.String() == "main" {
		importpath = files[0].Name.String()
	}

	// Type-check, and store object data.
	compiler.typeinfo.Types = make(map[ast.Expr]types.Type)
	compiler.typeinfo.Values = make(map[ast.Expr]exact.Value)
	compiler.typeinfo.Objects = make(map[*ast.Ident]types.Object)
	compiler.typeinfo.Implicits = make(map[ast.Node]types.Object)
	compiler.typeinfo.Selections = make(map[*ast.SelectorExpr]*types.Selection)
	compiler.objectdata = make(map[types.Object]*ObjectData)
	compiler.methodsets = make(map[types.Type]*methodset)
	compiler.exportedtypes = nil
	compiler.llvmtypes = NewLLVMTypeMap(compiler.target)
	pkg, err := compiler.typecheck(importpath, fset, files)
	if err != nil {
		return nil, err
	}
	compiler.pkg = pkg

	// Create a Module, which contains the LLVM bitcode. Dispose it on panic,
	// otherwise we'll set a finalizer at the end. The caller may invoke
	// Dispose manually, which will render the finalizer a no-op.
	modulename := importpath
	compiler.module = &Module{llvm.NewModule(modulename), modulename, false}
	compiler.module.SetTarget(compiler.TargetTriple)
	compiler.module.SetDataLayout(compiler.target.String())
	defer func() {
		if e := recover(); e != nil {
			compiler.module.Dispose()
			panic(e)
		}
	}()

	// Create a struct responsible for mapping static types to LLVM types,
	// and to runtime/dynamic type values.
	var resolver Resolver = compiler
	compiler.FunctionCache = NewFunctionCache(compiler)
	compiler.types = NewTypeMap(compiler.llvmtypes, compiler.module.Module, importpath, compiler.FunctionCache, resolver)

	// Create a Builder, for building LLVM instructions.
	compiler.builder = newBuilder(compiler.types)
	defer compiler.builder.Dispose()

	compiler.debug_info = &llvm.DebugInfo{}
	// Compile each file in the package.
	for _, file := range files {
		compiler.compile_unit = &llvm.CompileUnitDescriptor{
			Language: llvm.DW_LANG_Go,
			Path:     llvm.FileDescriptor(fset.File(file.Pos()).Name()),
			Producer: LLGOProducer,
			Runtime:  LLGORuntimeVersion,
		}
		compiler.pushDebugContext(&compiler.compile_unit.Path)

		for _, decl := range file.Decls {
			compiler.VisitDecl(decl)
		}
		compiler.popDebugContext()
		if len(compiler.debug_context) > 0 {
			log.Panicln(compiler.debug_context)
		}
		compiler.module.AddNamedMetadataOperand("llvm.dbg.cu", compiler.debug_info.MDNode(compiler.compile_unit))
	}

	// Export runtime type information.
	compiler.exportRuntimeTypes()

	// Wrap "main.main" in a call to runtime.main.
	if importpath == "main" {
		err = compiler.createMainFunction()
		if err != nil {
			return nil, err
		}
	} else {
		var e = exporter{compiler: compiler}
		if err := e.Export(pkg); err != nil {
			return nil, err
		}
	}

	// Create global constructors. The initfuncs/varinitfuncs
	// slices are in the order of visitation; we generate the
	// list of constructors in the reverse order.
	//
	// The llgo linker will link modules in the order of
	// package dependency, i.e. if A requires B, then llgo-link
	// will link the modules in the order A, B. The "runtime"
	// package is always last.
	//
	// At program initialisation, the runtime initialisation
	// function (runtime.main) will invoke the constructors
	// in reverse order.
	var initfuncs [][]llvm.Value
	if compiler.varinitfuncs != nil {
		initfuncs = append(initfuncs, compiler.varinitfuncs)
	}
	if compiler.initfuncs != nil {
		initfuncs = append(initfuncs, compiler.initfuncs)
	}
	if initfuncs != nil {
		ctortype := llvm.PointerType(llvm.Int8Type(), 0)
		var ctors []llvm.Value
		var index int = 0
		for _, initfuncs := range initfuncs {
			for _, fnptr := range initfuncs {
				name := fmt.Sprintf("__llgo.ctor.%s.%d", importpath, index)
				fnptr.SetName(name)
				fnptr = llvm.ConstBitCast(fnptr, ctortype)
				ctors = append(ctors, fnptr)
				index++
			}
		}
		for i, n := 0, len(ctors); i < n/2; i++ {
			ctors[i], ctors[n-i-1] = ctors[n-i-1], ctors[i]
		}
		ctorsInit := llvm.ConstArray(ctortype, ctors)
		ctorsVar := llvm.AddGlobal(compiler.module.Module, ctorsInit.Type(), "runtime.ctors")
		ctorsVar.SetInitializer(ctorsInit)
		ctorsVar.SetLinkage(llvm.AppendingLinkage)
	}

	// Create debug metadata.
	//compiler.createMetadata()

	return compiler.module, nil
}
Ejemplo n.º 22
0
// DeleteImport deletes the import path from the file f, if present.
func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) {
	var delspecs []*ast.ImportSpec

	// Find the import nodes that import path, if any.
	for i := 0; i < len(f.Decls); i++ {
		decl := f.Decls[i]
		gen, ok := decl.(*ast.GenDecl)
		if !ok || gen.Tok != token.IMPORT {
			continue
		}
		for j := 0; j < len(gen.Specs); j++ {
			spec := gen.Specs[j]
			impspec := spec.(*ast.ImportSpec)
			if importPath(impspec) != path {
				continue
			}

			// We found an import spec that imports path.
			// Delete it.
			delspecs = append(delspecs, impspec)
			deleted = true
			copy(gen.Specs[j:], gen.Specs[j+1:])
			gen.Specs = gen.Specs[:len(gen.Specs)-1]

			// If this was the last import spec in this decl,
			// delete the decl, too.
			if len(gen.Specs) == 0 {
				copy(f.Decls[i:], f.Decls[i+1:])
				f.Decls = f.Decls[:len(f.Decls)-1]
				i--
				break
			} else if len(gen.Specs) == 1 {
				gen.Lparen = token.NoPos // drop parens
			}
			if j > 0 {
				lastImpspec := gen.Specs[j-1].(*ast.ImportSpec)
				lastLine := fset.Position(lastImpspec.Path.ValuePos).Line
				line := fset.Position(impspec.Path.ValuePos).Line

				// We deleted an entry but now there may be
				// a blank line-sized hole where the import was.
				if line-lastLine > 1 {
					// There was a blank line immediately preceding the deleted import,
					// so there's no need to close the hole.
					// Do nothing.
				} else {
					// There was no blank line. Close the hole.
					fset.File(gen.Rparen).MergeLine(line)
				}
			}
			j--
		}
	}

	// Delete them from f.Imports.
	for i := 0; i < len(f.Imports); i++ {
		imp := f.Imports[i]
		for j, del := range delspecs {
			if imp == del {
				copy(f.Imports[i:], f.Imports[i+1:])
				f.Imports = f.Imports[:len(f.Imports)-1]
				copy(delspecs[j:], delspecs[j+1:])
				delspecs = delspecs[:len(delspecs)-1]
				i--
				break
			}
		}
	}

	if len(delspecs) > 0 {
		panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs))
	}

	return
}
Ejemplo n.º 23
0
func lineLengthsFrom(fileSet *token.FileSet) []int {
	return lineLengthsFromLines(strings.Split(util.ReadFileAsStringOrPanic(fileSet.File(1).Name()), "\n"))
}
Ejemplo n.º 24
0
func extractExpressionAsFunc(
	astFile *ast.File,
	fileSet *token.FileSet,
	expr ast.Expr,
	parent ast.Node,
	extractedFuncName string) {
	params := varIdentsUsedIn([]ast.Node{expr})
	util.MapStringAstIdentRemoveKeys(params, namesOf(globalVarIdents(astFile)))

	newExpr := CopyNode(callExprWith(extractedFuncName, params)).(ast.Expr)
	RecalcPoses(newExpr, expr.Pos(), nil, 0)
	switch typedNode := parent.(type) {
	case *ast.AssignStmt:
		for i, rhs := range typedNode.Rhs {
			if rhs == expr {
				typedNode.Rhs[i] = newExpr
			}
		}
		for i, lhs := range typedNode.Lhs {
			if lhs == expr {
				typedNode.Lhs[i] = newExpr
			}
		}
	case *ast.CallExpr:
		for i, arg := range typedNode.Args {
			if arg == expr {
				typedNode.Args[i] = newExpr
			}
		}
	case *ast.ExprStmt:
		typedNode.X = newExpr

	case *ast.ReturnStmt:
		for i, result := range typedNode.Results {
			if result == expr {
				typedNode.Results[i] = newExpr
			}
		}
	case *ast.IfStmt:
		if typedNode.Cond == expr {
			typedNode.Cond = newExpr
		}

	case *ast.CaseClause:
		for i, caseExpr := range typedNode.List {
			if caseExpr == expr {
				typedNode.List[i] = newExpr
			}
		}

	case *ast.SwitchStmt:
		if typedNode.Tag == expr {
			typedNode.Tag = newExpr
		}

	case *ast.ForStmt:
		if typedNode.Cond == expr {
			typedNode.Cond = newExpr
		}

	case *ast.RangeStmt:
		if typedNode.Key == expr {
			typedNode.Key = newExpr
		} else if typedNode.Value == expr {
			typedNode.Value = newExpr
		} else if typedNode.X == expr {
			typedNode.X = newExpr
		}

	case *ast.SendStmt:
		if typedNode.Chan == expr {
			typedNode.Chan = newExpr
		} else if typedNode.Value == expr {
			typedNode.Value = newExpr
		}

	case *ast.IncDecStmt:
		if typedNode.X == expr {
			typedNode.X = newExpr
		}

	case *ast.ValueSpec:
		for i, value := range typedNode.Values {
			if value == expr {
				typedNode.Values[i] = newExpr
			}
		}

	default:
		panic(fmt.Sprintf("Type %v not supported yet", reflect.TypeOf(parent)))
	}

	areaRemoved := areaRemoved(fileSet, expr.Pos(), expr.End())
	lineLengths := lineLengthsFrom(fileSet)
	lineNum, numLinesToCut, newLineLength := replacementModifications(fileSet, expr.Pos(), expr.End(), newExpr.End(), lineLengths, areaRemoved)

	shiftPosesAfterPos(astFile, newExpr, expr.End(), newExpr.End()-expr.End())

	singleExprStmtFuncDeclWith := CopyNode(singleExprStmtFuncDeclWith(extractedFuncName, fieldsFrom(params), expr)).(*ast.FuncDecl)
	var moveOffset token.Pos
	RecalcPoses(singleExprStmtFuncDeclWith, astFile.End()+2, &moveOffset, 0)
	astFile.Decls = append(astFile.Decls, singleExprStmtFuncDeclWith)

	areaToBeAppended := insertionModifications(astFile, singleExprStmtFuncDeclWith, areaRemoved)

	lineLengths = append(
		lineLengths[:lineNum+1],
		lineLengths[lineNum+1+numLinesToCut:]...)
	lineLengths[lineNum] = newLineLength
	lineLengths = append(lineLengths, areaToBeAppended...)

	newFileSet := token.NewFileSet()
	newFileSet.AddFile(fileSet.File(1).Name(), 1, int(astFile.End()))
	success := newFileSet.File(1).SetLines(ConvertLineLengthsToLineOffsets(lineLengths))
	if !success {
		panic("Could not SetLines on File.")
	}
	*fileSet = *newFileSet

	moveComments(astFile, moveOffset /*, needs a range to restict which comments to move*/)
}