// Imports returns the file imports grouped by paragraph. func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec { var groups [][]*ast.ImportSpec for _, decl := range f.Decls { genDecl, ok := decl.(*ast.GenDecl) if !ok || genDecl.Tok != token.IMPORT { break } group := []*ast.ImportSpec{} var lastLine int for _, spec := range genDecl.Specs { importSpec := spec.(*ast.ImportSpec) pos := importSpec.Path.ValuePos line := fset.Position(pos).Line if lastLine > 0 && pos > 0 && line-lastLine > 1 { groups = append(groups, group) group = []*ast.ImportSpec{} } group = append(group, importSpec) lastLine = line } groups = append(groups, group) } return groups }
// compareErrors compares the map of expected error messages with the list // of found errors and reports discrepancies. // func compareErrors(t *testing.T, fset *token.FileSet, expected map[token.Pos]string, found scanner.ErrorList) { for _, error := range found { // error.Pos is a token.Position, but we want // a token.Pos so we can do a map lookup pos := getPos(fset, error.Pos.Filename, error.Pos.Offset) if msg, found := expected[pos]; found { // we expect a message at pos; check if it matches rx, err := regexp.Compile(msg) if err != nil { t.Errorf("%s: %v", error.Pos, err) continue } if match := rx.MatchString(error.Msg); !match { t.Errorf("%s: %q does not match %q", error.Pos, error.Msg, msg) continue } // we have a match - eliminate this error delete(expected, pos) } else { // To keep in mind when analyzing failed test output: // If the same error position occurs multiple times in errors, // this message will be triggered (because the first error at // the position removes this position from the expected errors). t.Errorf("%s: unexpected error: %s", error.Pos, error.Msg) } } // there should be no expected errors left if len(expected) > 0 { t.Errorf("%d errors not reported:", len(expected)) for pos, msg := range expected { t.Errorf("%s: %s\n", fset.Position(pos), msg) } } }
// Eval returns the type and, if constant, the value for the // expression expr, evaluated at position pos of package pkg, // which must have been derived from type-checking an AST with // complete position information relative to the provided file // set. // // If the expression contains function literals, their bodies // are ignored (i.e., the bodies are not type-checked). // // If pkg == nil, the Universe scope is used and the provided // position pos is ignored. If pkg != nil, and pos is invalid, // the package scope is used. Otherwise, pos must belong to the // package. // // An error is returned if pos is not within the package or // if the node cannot be evaluated. // // Note: Eval should not be used instead of running Check to compute // types and values, but in addition to Check. Eval will re-evaluate // its argument each time, and it also does not know about the context // in which an expression is used (e.g., an assignment). Thus, top- // level untyped constants will return an untyped type rather then the // respective context-specific type. // func Eval(fset *token.FileSet, pkg *Package, pos token.Pos, expr string) (TypeAndValue, error) { // determine scope var scope *Scope if pkg == nil { scope = Universe pos = token.NoPos } else if !pos.IsValid() { scope = pkg.scope } else { // The package scope extent (position information) may be // incorrect (files spread across a wide range of fset // positions) - ignore it and just consider its children // (file scopes). for _, fscope := range pkg.scope.children { if scope = fscope.Innermost(pos); scope != nil { break } } if scope == nil || debug { s := scope for s != nil && s != pkg.scope { s = s.parent } // s == nil || s == pkg.scope if s == nil { return TypeAndValue{}, fmt.Errorf("no position %s found in package %s", fset.Position(pos), pkg.name) } } } // parse expressions node, err := parser.ParseExprFrom(fset, "eval", expr, 0) if err != nil { return TypeAndValue{}, err } // initialize checker check := NewChecker(nil, fset, pkg, nil) check.scope = scope check.pos = pos defer check.handleBailout(&err) // evaluate node var x operand check.rawExpr(&x, node, nil) return TypeAndValue{x.mode, x.typ, x.val}, err }
func makeErrList(fset *token.FileSet, errs []error) scanner.ErrorList { var errList scanner.ErrorList for _, err := range errs { if v, ok := err.(*types.Error); ok { errList = append(errList, &scanner.Error{ Pos: fset.Position(v.Pos), Msg: v.Msg, }) } else { errList = append(errList, &scanner.Error{ Pos: token.Position{}, Msg: err.Error(), }) } } return errList }
// sortImports sorts runs of consecutive import lines in import blocks in f. // It also removes duplicate imports when it is possible to do so without data loss. func sortImports(fset *token.FileSet, f *ast.File) { for i, d := range f.Decls { d, ok := d.(*ast.GenDecl) if !ok || d.Tok != token.IMPORT { // Not an import declaration, so we're done. // Imports are always first. break } if len(d.Specs) == 0 { // Empty import block, remove it. f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) } if !d.Lparen.IsValid() { // Not a block: sorted by default. continue } // Identify and sort runs of specs on successive lines. i := 0 specs := d.Specs[:0] for j, s := range d.Specs { if j > i && fset.Position(s.Pos()).Line > 1+fset.Position(d.Specs[j-1].End()).Line { // j begins a new run. End this one. specs = append(specs, sortSpecs(fset, f, d.Specs[i:j])...) i = j } } specs = append(specs, sortSpecs(fset, f, d.Specs[i:])...) d.Specs = specs // Deduping can leave a blank line before the rparen; clean that up. if len(d.Specs) > 0 { lastSpec := d.Specs[len(d.Specs)-1] lastLine := fset.Position(lastSpec.Pos()).Line if rParenLine := fset.Position(d.Rparen).Line; rParenLine > lastLine+1 { fset.File(d.Rparen).MergeLine(rParenLine - 1) } } } }
// NewCommentMap creates a new comment map by associating comment groups // of the comments list with the nodes of the AST specified by node. // // A comment group g is associated with a node n if: // // - g starts on the same line as n ends // - g starts on the line immediately following n, and there is // at least one empty line after g and before the next node // - g starts before n and is not associated to the node before n // via the previous rules // // NewCommentMap tries to associate a comment group to the "largest" // node possible: For instance, if the comment is a line comment // trailing an assignment, the comment is associated with the entire // assignment rather than just the last operand in the assignment. // func NewCommentMap(fset *token.FileSet, node Node, comments []*CommentGroup) CommentMap { if len(comments) == 0 { return nil // no comments to map } cmap := make(CommentMap) // set up comment reader r tmp := make([]*CommentGroup, len(comments)) copy(tmp, comments) // don't change incoming comments sortComments(tmp) r := commentListReader{fset: fset, list: tmp} // !r.eol() because len(comments) > 0 r.next() // create node list in lexical order nodes := nodeList(node) nodes = append(nodes, nil) // append sentinel // set up iteration variables var ( p Node // previous node pend token.Position // end of p pg Node // previous node group (enclosing nodes of "importance") pgend token.Position // end of pg stack nodeStack // stack of node groups ) for _, q := range nodes { var qpos token.Position if q != nil { qpos = fset.Position(q.Pos()) // current node position } else { // set fake sentinel position to infinity so that // all comments get processed before the sentinel const infinity = 1 << 30 qpos.Offset = infinity qpos.Line = infinity } // process comments before current node for r.end.Offset <= qpos.Offset { // determine recent node group if top := stack.pop(r.comment.Pos()); top != nil { pg = top pgend = fset.Position(pg.End()) } // Try to associate a comment first with a node group // (i.e., a node of "importance" such as a declaration); // if that fails, try to associate it with the most recent // node. // TODO(gri) try to simplify the logic below var assoc Node switch { case pg != nil && (pgend.Line == r.pos.Line || pgend.Line+1 == r.pos.Line && r.end.Line+1 < qpos.Line): // 1) comment starts on same line as previous node group ends, or // 2) comment starts on the line immediately after the // previous node group and there is an empty line before // the current node // => associate comment with previous node group assoc = pg case p != nil && (pend.Line == r.pos.Line || pend.Line+1 == r.pos.Line && r.end.Line+1 < qpos.Line || q == nil): // same rules apply as above for p rather than pg, // but also associate with p if we are at the end (q == nil) assoc = p default: // otherwise, associate comment with current node if q == nil { // we can only reach here if there was no p // which would imply that there were no nodes panic("internal error: no comments should be associated with sentinel") } assoc = q } cmap.addComment(assoc, r.comment) if r.eol() { return cmap } r.next() } // update previous node p = q pend = fset.Position(p.End()) // update previous node group if we see an "important" node switch q.(type) { case *File, *Field, Decl, Spec, Stmt: stack.push(q) } } return cmap }
func sortSpecs(fset *token.FileSet, f *ast.File, specs []ast.Spec) []ast.Spec { // Can't short-circuit here even if specs are already sorted, // since they might yet need deduplication. // A lone import, however, may be safely ignored. if len(specs) <= 1 { return specs } // Record positions for specs. pos := make([]posSpan, len(specs)) for i, s := range specs { pos[i] = posSpan{s.Pos(), s.End()} } // Identify comments in this range. // Any comment from pos[0].Start to the final line counts. lastLine := fset.Position(pos[len(pos)-1].End).Line cstart := len(f.Comments) cend := len(f.Comments) for i, g := range f.Comments { if g.Pos() < pos[0].Start { continue } if i < cstart { cstart = i } if fset.Position(g.End()).Line > lastLine { cend = i break } } comments := f.Comments[cstart:cend] // Assign each comment to the import spec preceding it. importComment := map[*ast.ImportSpec][]*ast.CommentGroup{} specIndex := 0 for _, g := range comments { for specIndex+1 < len(specs) && pos[specIndex+1].Start <= g.Pos() { specIndex++ } s := specs[specIndex].(*ast.ImportSpec) importComment[s] = append(importComment[s], g) } // Sort the import specs by import path. // Remove duplicates, when possible without data loss. // Reassign the import paths to have the same position sequence. // Reassign each comment to abut the end of its spec. // Sort the comments by new position. sort.Sort(byImportSpec(specs)) // Dedup. Thanks to our sorting, we can just consider // adjacent pairs of imports. deduped := specs[:0] for i, s := range specs { if i == len(specs)-1 || !collapse(s, specs[i+1]) { deduped = append(deduped, s) } else { p := s.Pos() fset.File(p).MergeLine(fset.Position(p).Line) } } specs = deduped // Fix up comment positions for i, s := range specs { s := s.(*ast.ImportSpec) if s.Name != nil { s.Name.NamePos = pos[i].Start } s.Path.ValuePos = pos[i].Start s.EndPos = pos[i].End for _, g := range importComment[s] { for _, c := range g.List { c.Slash = pos[i].End } } } sort.Sort(byCommentPos(comments)) return specs }
// DeleteNamedImport deletes the import with the given name and path from the file f, if present. func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) { var delspecs []*ast.ImportSpec // Find the import nodes that import path, if any. for i := 0; i < len(f.Decls); i++ { decl := f.Decls[i] gen, ok := decl.(*ast.GenDecl) if !ok || gen.Tok != token.IMPORT { continue } for j := 0; j < len(gen.Specs); j++ { spec := gen.Specs[j] impspec := spec.(*ast.ImportSpec) if impspec.Name == nil && name != "" { continue } if impspec.Name != nil && impspec.Name.Name != name { continue } if importPath(impspec) != path { continue } // We found an import spec that imports path. // Delete it. delspecs = append(delspecs, impspec) deleted = true copy(gen.Specs[j:], gen.Specs[j+1:]) gen.Specs = gen.Specs[:len(gen.Specs)-1] // If this was the last import spec in this decl, // delete the decl, too. if len(gen.Specs) == 0 { copy(f.Decls[i:], f.Decls[i+1:]) f.Decls = f.Decls[:len(f.Decls)-1] i-- break } else if len(gen.Specs) == 1 { gen.Lparen = token.NoPos // drop parens } if j > 0 { lastImpspec := gen.Specs[j-1].(*ast.ImportSpec) lastLine := fset.Position(lastImpspec.Path.ValuePos).Line line := fset.Position(impspec.Path.ValuePos).Line // We deleted an entry but now there may be // a blank line-sized hole where the import was. if line-lastLine > 1 { // There was a blank line immediately preceding the deleted import, // so there's no need to close the hole. // Do nothing. } else { // There was no blank line. Close the hole. fset.File(gen.Rparen).MergeLine(line) } } j-- } } // Delete them from f.Imports. for i := 0; i < len(f.Imports); i++ { imp := f.Imports[i] for j, del := range delspecs { if imp == del { copy(f.Imports[i:], f.Imports[i+1:]) f.Imports = f.Imports[:len(f.Imports)-1] copy(delspecs[j:], delspecs[j+1:]) delspecs = delspecs[:len(delspecs)-1] i-- break } } } if len(delspecs) > 0 { panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs)) } return }