// format parses src, prints the corresponding AST, verifies the resulting // src is syntactically correct, and returns the resulting src or an error // if any. func format(src []byte, mode checkMode) ([]byte, error) { // parse src f, err := parser.ParseFile(fset, "", src, parser.ParseComments) if err != nil { return nil, fmt.Errorf("parse: %s\n%s", err, src) } // filter exports if necessary if mode&export != 0 { ast.FileExports(f) // ignore result f.Comments = nil // don't print comments that are not in AST } // determine printer configuration cfg := Config{Tabwidth: tabwidth} if mode&rawFormat != 0 { cfg.Mode |= RawFormat } // print AST var buf bytes.Buffer if err := cfg.Fprint(&buf, fset, f); err != nil { return nil, fmt.Errorf("print: %s", err) } // make sure formatted output is syntactically correct res := buf.Bytes() if _, err := parser.ParseFile(fset, "", res, 0); err != nil { return nil, fmt.Errorf("re-parse: %s\n%s", err, buf.Bytes()) } return res, nil }
func doParseFiles(filePathes []string, fset *token.FileSet) (*token.FileSet, []*ast.File, error) { if fset == nil { fset = token.NewFileSet() } util.Info("parsing files %v", filePathes) astFiles := make([]*ast.File, 0, len(filePathes)) for _, f := range filePathes { //XXX: Ignoring files with packages ends with _test. //XXX: Doing that because getting error in check() //XXX: cause source file is still going to current //XXX: packages. Need to analyze package before //XXX: and check both packages separately. tempFset := token.NewFileSet() astFile, err := parser.ParseFile(tempFset, f, nil, 0) if !strings.HasSuffix(astFile.Name.Name, "_test") { if err != nil { return nil, nil, err } astFile, _ := parser.ParseFile(fset, f, nil, 0) astFiles = append(astFiles, astFile) } } iterateFunc := func(f *token.File) bool { util.Debug("\t%s", f.Name()) return true } fset.Iterate(iterateFunc) return fset, astFiles, nil }
// Parse parses the source in filename and returns a list of tags. func Parse(filename string) ([]Tag, error) { p := &tagParser{ fset: token.NewFileSet(), tags: []Tag{}, types: make([]string, 0), } var f *ast.File var err error if filename != "-" { f, err = parser.ParseFile(p.fset, filename, nil, 0) if err != nil { return nil, err } } else { bytes, err := ioutil.ReadAll(os.Stdin) f, err = parser.ParseFile(p.fset, "", string(bytes), 0) if err != nil { return nil, err } } // package p.parsePackage(f) // imports p.parseImports(f) // declarations p.parseDeclarations(f) return p.tags, nil }
// NewParser creates a new Parser reference from the given options func NewParser(opts *ParserOptions) (*Parser, error) { var mode parser.Mode if opts != nil && opts.Comments { mode = parser.ParseComments } fset := token.NewFileSet() p := &Parser{fset: fset} var err error switch { case opts.File != "": p.file, err = parser.ParseFile(fset, opts.File, nil, mode) if err != nil { return nil, err } case opts.Dir != "": p.pkgs, err = parser.ParseDir(fset, opts.Dir, nil, mode) if err != nil { return nil, err } case opts.Src != nil: p.file, err = parser.ParseFile(fset, "src.go", opts.Src, mode) if err != nil { return nil, err } default: return nil, errors.New("file, src or dir is not specified") } return p, nil }
func checkGoFile(filePath string) bool { if strings.HasSuffix(filePath, ".go") { var ( fset = token.NewFileSet() astFile *ast.File err error hasGoglImp = false ) if astFile, err = parser.ParseFile(fset, filePath, nil, parser.ImportsOnly); err != nil { panic(err) } for _, s := range astFile.Imports { if hasGoglImp = (strings.Index(s.Path.Value, "github.com/go3d/go-opengl/core/") >= 0); hasGoglImp { break } } if hasGoglImp { if astFile, err = parser.ParseFile(fset, filePath, nil, 0); err != nil { panic(err) } curFilePath = filePath ast.Inspect(astFile, inspectNode) } } return true }
func applyPatches(file *ast.File, fileSet *token.FileSet, importPath string) *ast.File { removeImport := func(path string) { for _, decl := range file.Decls { if d, ok := decl.(*ast.GenDecl); ok && d.Tok == token.IMPORT { for j, spec := range d.Specs { value := spec.(*ast.ImportSpec).Path.Value if value[1:len(value)-1] == path { d.Specs[j] = d.Specs[len(d.Specs)-1] d.Specs = d.Specs[:len(d.Specs)-1] return } } } } } removeFunction := func(name string) { for i, decl := range file.Decls { if d, ok := decl.(*ast.FuncDecl); ok { if d.Name.String() == name { file.Decls[i] = file.Decls[len(file.Decls)-1] file.Decls = file.Decls[:len(file.Decls)-1] return } } } } basename := filepath.Base(fileSet.Position(file.Pos()).Filename) switch { case importPath == "bytes_test" && basename == "equal_test.go": file, _ = parser.ParseFile(fileSet, basename, "package bytes_test", 0) case importPath == "crypto/rc4" && basename == "rc4_ref.go": // see https://codereview.appspot.com/40540049/ file, _ = parser.ParseFile(fileSet, basename, "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !amd64,!arm,!386\n\npackage rc4\n\n// XORKeyStream sets dst to the result of XORing src with the key stream.\n// Dst and src may be the same slice but otherwise should not overlap.\nfunc (c *Cipher) XORKeyStream(dst, src []byte) {\n i, j := c.i, c.j\n for k, v := range src {\n i += 1\n j += uint8(c.s[i])\n c.s[i], c.s[j] = c.s[j], c.s[i]\n dst[k] = v ^ uint8(c.s[uint8(c.s[i]+c.s[j])])\n }\n c.i, c.j = i, j\n}", 0) case importPath == "encoding/json" && basename == "stream_test.go": removeImport("net") removeFunction("TestBlocking") case importPath == "math/big" && basename == "arith.go": // see https://codereview.appspot.com/55470046/ file, _ = parser.ParseFile(fileSet, basename, "// Copyright 2009 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// This file provides Go implementations of elementary multi-precision\n// arithmetic operations on word vectors. Needed for platforms without\n// assembly implementations of these routines.\n\npackage big\n\n// A Word represents a single digit of a multi-precision unsigned integer.\ntype Word uintptr\n\nconst (\n // Compute the size _S of a Word in bytes.\n _m = ^Word(0)\n _logS = _m>>8&1 + _m>>16&1 + _m>>32&1\n _S = 1 << _logS\n\n _W = _S << 3 // word size in bits\n _B = 1 << _W // digit base\n _M = _B - 1 // digit mask\n\n _W2 = _W / 2 // half word size in bits\n _B2 = 1 << _W2 // half digit base\n _M2 = _B2 - 1 // half digit mask\n)\n\n// ----------------------------------------------------------------------------\n// Elementary operations on words\n//\n// These operations are used by the vector operations below.\n\n// z1<<_W + z0 = x+y+c, with c == 0 or 1\nfunc addWW_g(x, y, c Word) (z1, z0 Word) {\n yc := y + c\n z0 = x + yc\n if z0 < x || yc < y {\n z1 = 1\n }\n return\n}\n\n// z1<<_W + z0 = x-y-c, with c == 0 or 1\nfunc subWW_g(x, y, c Word) (z1, z0 Word) {\n yc := y + c\n z0 = x - yc\n if z0 > x || yc < y {\n z1 = 1\n }\n return\n}\n\n// z1<<_W + z0 = x*y\n// Adapted from Warren, Hacker's Delight, p. 132.\nfunc mulWW_g(x, y Word) (z1, z0 Word) {\n x0 := x & _M2\n x1 := x >> _W2\n y0 := y & _M2\n y1 := y >> _W2\n w0 := x0 * y0\n t := x1*y0 + w0>>_W2\n w1 := t & _M2\n w2 := t >> _W2\n w1 += x0 * y1\n z1 = x1*y1 + w2 + w1>>_W2\n z0 = x * y\n return\n}\n\n// z1<<_W + z0 = x*y + c\nfunc mulAddWWW_g(x, y, c Word) (z1, z0 Word) {\n z1, zz0 := mulWW(x, y)\n if z0 = zz0 + c; z0 < zz0 {\n z1++\n }\n return\n}\n\n// Length of x in bits.\nfunc bitLen_g(x Word) (n int) {\n for ; x >= 0x8000; x >>= 16 {\n n += 16\n }\n if x >= 0x80 {\n x >>= 8\n n += 8\n }\n if x >= 0x8 {\n x >>= 4\n n += 4\n }\n if x >= 0x2 {\n x >>= 2\n n += 2\n }\n if x >= 0x1 {\n n++\n }\n return\n}\n\n// log2 computes the integer binary logarithm of x.\n// The result is the integer n for which 2^n <= x < 2^(n+1).\n// If x == 0, the result is -1.\nfunc log2(x Word) int {\n return bitLen(x) - 1\n}\n\n// Number of leading zeros in x.\nfunc leadingZeros(x Word) uint {\n return uint(_W - bitLen(x))\n}\n\n// q = (u1<<_W + u0 - r)/y\n// Adapted from Warren, Hacker's Delight, p. 152.\nfunc divWW_g(u1, u0, v Word) (q, r Word) {\n if u1 >= v {\n return 1<<_W - 1, 1<<_W - 1\n }\n\n s := leadingZeros(v)\n v <<= s\n\n vn1 := v >> _W2\n vn0 := v & _M2\n un32 := u1<<s | u0>>(_W-s)\n un10 := u0 << s\n un1 := un10 >> _W2\n un0 := un10 & _M2\n q1 := un32 / vn1\n rhat := un32 - q1*vn1\n\n for q1 >= _B2 || q1*vn0 > _B2*rhat+un1 {\n q1--\n rhat += vn1\n if rhat >= _B2 {\n break\n }\n }\n\n un21 := un32*_B2 + un1 - q1*v\n q0 := un21 / vn1\n rhat = un21 - q0*vn1\n\n for q0 >= _B2 || q0*vn0 > _B2*rhat+un0 {\n q0--\n rhat += vn1\n if rhat >= _B2 {\n break\n }\n }\n\n return q1*_B2 + q0, (un21*_B2 + un0 - q0*v) >> s\n}\n\nfunc addVV_g(z, x, y []Word) (c Word) {\n for i := range z {\n c, z[i] = addWW_g(x[i], y[i], c)\n }\n return\n}\n\nfunc subVV_g(z, x, y []Word) (c Word) {\n for i := range z {\n c, z[i] = subWW_g(x[i], y[i], c)\n }\n return\n}\n\nfunc addVW_g(z, x []Word, y Word) (c Word) {\n c = y\n for i := range z {\n c, z[i] = addWW_g(x[i], c, 0)\n }\n return\n}\n\nfunc subVW_g(z, x []Word, y Word) (c Word) {\n c = y\n for i := range z {\n c, z[i] = subWW_g(x[i], c, 0)\n }\n return\n}\n\nfunc shlVU_g(z, x []Word, s uint) (c Word) {\n if n := len(z); n > 0 {\n ŝ := _W - s\n w1 := x[n-1]\n c = w1 >> ŝ\n for i := n - 1; i > 0; i-- {\n w := w1\n w1 = x[i-1]\n z[i] = w<<s | w1>>ŝ\n }\n z[0] = w1 << s\n }\n return\n}\n\nfunc shrVU_g(z, x []Word, s uint) (c Word) {\n if n := len(z); n > 0 {\n ŝ := _W - s\n w1 := x[0]\n c = w1 << ŝ\n for i := 0; i < n-1; i++ {\n w := w1\n w1 = x[i+1]\n z[i] = w>>s | w1<<ŝ\n }\n z[n-1] = w1 >> s\n }\n return\n}\n\nfunc mulAddVWW_g(z, x []Word, y, r Word) (c Word) {\n c = r\n for i := range z {\n c, z[i] = mulAddWWW_g(x[i], y, c)\n }\n return\n}\n\nfunc addMulVVW_g(z, x []Word, y Word) (c Word) {\n for i := range z {\n z1, z0 := mulAddWWW_g(x[i], y, z[i])\n c, z[i] = addWW_g(z0, c, 0)\n c += z1\n }\n return\n}\n\nfunc divWVW_g(z []Word, xn Word, x []Word, y Word) (r Word) {\n r = xn\n for i := len(z) - 1; i >= 0; i-- {\n z[i], r = divWW_g(r, x[i], y)\n }\n return\n}", 0) case importPath == "reflect_test" && basename == "all_test.go": removeImport("unsafe") removeFunction("TestAlignment") removeFunction("TestSliceOverflow") case importPath == "runtime" && strings.HasPrefix(basename, "zgoarch_"): file, _ = parser.ParseFile(fileSet, basename, "package runtime\nconst theGoarch = `js`\n", 0) case importPath == "sync/atomic_test" && basename == "atomic_test.go": removeFunction("TestUnaligned64") case importPath == "text/template/parse" && basename == "lex.go": // this patch will be removed as soon as goroutines are supported file, _ = parser.ParseFile(fileSet, basename, "// Copyright 2011 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\npackage parse\nimport (\n \"container/list\"\n \"fmt\"\n \"strings\"\n \"unicode\"\n \"unicode/utf8\"\n)\n// item represents a token or text string returned from the scanner.\ntype item struct {\n typ itemType // The type of this item.\n pos Pos // The starting position, in bytes, of this item in the input string.\n val string // The value of this item.\n}\nfunc (i item) String() string {\n switch {\n case i.typ == itemEOF:\n return \"EOF\"\n case i.typ == itemError:\n return i.val\n case i.typ > itemKeyword:\n return fmt.Sprintf(\"<%s>\", i.val)\n case len(i.val) > 10:\n return fmt.Sprintf(\"%.10q...\", i.val)\n }\n return fmt.Sprintf(\"%q\", i.val)\n}\n// itemType identifies the type of lex items.\ntype itemType int\nconst (\n itemError itemType = iota // error occurred; value is text of error\n itemBool // boolean constant\n itemChar // printable ASCII character; grab bag for comma etc.\n itemCharConstant // character constant\n itemComplex // complex constant (1+2i); imaginary is just a number\n itemColonEquals // colon-equals (':=') introducing a declaration\n itemEOF\n itemField // alphanumeric identifier starting with '.'\n itemIdentifier // alphanumeric identifier not starting with '.'\n itemLeftDelim // left action delimiter\n itemLeftParen // '(' inside action\n itemNumber // simple number, including imaginary\n itemPipe // pipe symbol\n itemRawString // raw quoted string (includes quotes)\n itemRightDelim // right action delimiter\n itemRightParen // ')' inside action\n itemSpace // run of spaces separating arguments\n itemString // quoted string (includes quotes)\n itemText // plain text\n itemVariable // variable starting with '$', such as '$' or '$1' or '$hello'\n // Keywords appear after all the rest.\n itemKeyword // used only to delimit the keywords\n itemDot // the cursor, spelled '.'\n itemDefine // define keyword\n itemElse // else keyword\n itemEnd // end keyword\n itemIf // if keyword\n itemNil // the untyped nil constant, easiest to treat as a keyword\n itemRange // range keyword\n itemTemplate // template keyword\n itemWith // with keyword\n)\nvar key = map[string]itemType{\n \".\": itemDot,\n \"define\": itemDefine,\n \"else\": itemElse,\n \"end\": itemEnd,\n \"if\": itemIf,\n \"range\": itemRange,\n \"nil\": itemNil,\n \"template\": itemTemplate,\n \"with\": itemWith,\n}\nconst eof = -1\n// stateFn represents the state of the scanner as a function that returns the next state.\ntype stateFn func(*lexer) stateFn\n// lexer holds the state of the scanner.\ntype lexer struct {\n name string // the name of the input; used only for error reports\n input string // the string being scanned\n leftDelim string // start of action\n rightDelim string // end of action\n state stateFn // the next lexing function to enter\n pos Pos // current position in the input\n start Pos // start position of this item\n width Pos // width of last rune read from input\n lastPos Pos // position of most recent item returned by nextItem\n items *list.List // scanned items\n parenDepth int // nesting depth of ( ) exprs\n}\n// next returns the next rune in the input.\nfunc (l *lexer) next() rune {\n if int(l.pos) >= len(l.input) {\n l.width = 0\n return eof\n }\n r, w := utf8.DecodeRuneInString(l.input[l.pos:])\n l.width = Pos(w)\n l.pos += l.width\n return r\n}\n// peek returns but does not consume the next rune in the input.\nfunc (l *lexer) peek() rune {\n r := l.next()\n l.backup()\n return r\n}\n// backup steps back one rune. Can only be called once per call of next.\nfunc (l *lexer) backup() {\n l.pos -= l.width\n}\n// emit passes an item back to the client.\nfunc (l *lexer) emit(t itemType) {\n l.items.PushBack(item{t, l.start, l.input[l.start:l.pos]})\n l.start = l.pos\n}\n// ignore skips over the pending input before this point.\nfunc (l *lexer) ignore() {\n l.start = l.pos\n}\n// accept consumes the next rune if it's from the valid set.\nfunc (l *lexer) accept(valid string) bool {\n if strings.IndexRune(valid, l.next()) >= 0 {\n return true\n }\n l.backup()\n return false\n}\n// acceptRun consumes a run of runes from the valid set.\nfunc (l *lexer) acceptRun(valid string) {\n for strings.IndexRune(valid, l.next()) >= 0 {\n }\n l.backup()\n}\n// lineNumber reports which line we're on, based on the position of\n// the previous item returned by nextItem. Doing it this way\n// means we don't have to worry about peek double counting.\nfunc (l *lexer) lineNumber() int {\n return 1 + strings.Count(l.input[:l.lastPos], \"\\n\")\n}\n// errorf returns an error token and terminates the scan by passing\n// back a nil pointer that will be the next state, terminating l.nextItem.\nfunc (l *lexer) errorf(format string, args ...interface{}) stateFn {\n l.items.PushBack(item{itemError, l.start, fmt.Sprintf(format, args...)})\n return nil\n}\n// nextItem returns the next item from the input.\nfunc (l *lexer) nextItem() item {\n element := l.items.Front()\n for element == nil {\n l.state = l.state(l)\n element = l.items.Front()\n }\n l.items.Remove(element)\n item := element.Value.(item)\n l.lastPos = item.pos\n return item\n}\n// lex creates a new scanner for the input string.\nfunc lex(name, input, left, right string) *lexer {\n if left == \"\" {\n left = leftDelim\n }\n if right == \"\" {\n right = rightDelim\n }\n l := &lexer{\n name: name,\n input: input,\n leftDelim: left,\n rightDelim: right,\n items: list.New(),\n }\n l.state = lexText\n return l\n}\n// state functions\nconst (\n leftDelim = \"{{\"\n rightDelim = \"}}\"\n leftComment = \"/*\"\n rightComment = \"*/\"\n)\n// lexText scans until an opening action delimiter, \"{{\".\nfunc lexText(l *lexer) stateFn {\n for {\n if strings.HasPrefix(l.input[l.pos:], l.leftDelim) {\n if l.pos > l.start {\n l.emit(itemText)\n }\n return lexLeftDelim\n }\n if l.next() == eof {\n break\n }\n }\n // Correctly reached EOF.\n if l.pos > l.start {\n l.emit(itemText)\n }\n l.emit(itemEOF)\n return nil\n}\n// lexLeftDelim scans the left delimiter, which is known to be present.\nfunc lexLeftDelim(l *lexer) stateFn {\n l.pos += Pos(len(l.leftDelim))\n if strings.HasPrefix(l.input[l.pos:], leftComment) {\n return lexComment\n }\n l.emit(itemLeftDelim)\n l.parenDepth = 0\n return lexInsideAction\n}\n// lexComment scans a comment. The left comment marker is known to be present.\nfunc lexComment(l *lexer) stateFn {\n l.pos += Pos(len(leftComment))\n i := strings.Index(l.input[l.pos:], rightComment)\n if i < 0 {\n return l.errorf(\"unclosed comment\")\n }\n l.pos += Pos(i + len(rightComment))\n if !strings.HasPrefix(l.input[l.pos:], l.rightDelim) {\n return l.errorf(\"comment ends before closing delimiter\")\n }\n l.pos += Pos(len(l.rightDelim))\n l.ignore()\n return lexText\n}\n// lexRightDelim scans the right delimiter, which is known to be present.\nfunc lexRightDelim(l *lexer) stateFn {\n l.pos += Pos(len(l.rightDelim))\n l.emit(itemRightDelim)\n return lexText\n}\n// lexInsideAction scans the elements inside action delimiters.\nfunc lexInsideAction(l *lexer) stateFn {\n // Either number, quoted string, or identifier.\n // Spaces separate arguments; runs of spaces turn into itemSpace.\n // Pipe symbols separate and are emitted.\n if strings.HasPrefix(l.input[l.pos:], l.rightDelim) {\n if l.parenDepth == 0 {\n return lexRightDelim\n }\n return l.errorf(\"unclosed left paren\")\n }\n switch r := l.next(); {\n case r == eof || isEndOfLine(r):\n return l.errorf(\"unclosed action\")\n case isSpace(r):\n return lexSpace\n case r == ':':\n if l.next() != '=' {\n return l.errorf(\"expected :=\")\n }\n l.emit(itemColonEquals)\n case r == '|':\n l.emit(itemPipe)\n case r == '\"':\n return lexQuote\n case r == '`':\n return lexRawQuote\n case r == '$':\n return lexVariable\n case r == '\\'':\n return lexChar\n case r == '.':\n // special look-ahead for \".field\" so we don't break l.backup().\n if l.pos < Pos(len(l.input)) {\n r := l.input[l.pos]\n if r < '0' || '9' < r {\n return lexField\n }\n }\n fallthrough // '.' can start a number.\n case r == '+' || r == '-' || ('0' <= r && r <= '9'):\n l.backup()\n return lexNumber\n case isAlphaNumeric(r):\n l.backup()\n return lexIdentifier\n case r == '(':\n l.emit(itemLeftParen)\n l.parenDepth++\n return lexInsideAction\n case r == ')':\n l.emit(itemRightParen)\n l.parenDepth--\n if l.parenDepth < 0 {\n return l.errorf(\"unexpected right paren %#U\", r)\n }\n return lexInsideAction\n case r <= unicode.MaxASCII && unicode.IsPrint(r):\n l.emit(itemChar)\n return lexInsideAction\n default:\n return l.errorf(\"unrecognized character in action: %#U\", r)\n }\n return lexInsideAction\n}\n// lexSpace scans a run of space characters.\n// One space has already been seen.\nfunc lexSpace(l *lexer) stateFn {\n for isSpace(l.peek()) {\n l.next()\n }\n l.emit(itemSpace)\n return lexInsideAction\n}\n// lexIdentifier scans an alphanumeric.\nfunc lexIdentifier(l *lexer) stateFn {\nLoop:\n for {\n switch r := l.next(); {\n case isAlphaNumeric(r):\n // absorb.\n default:\n l.backup()\n word := l.input[l.start:l.pos]\n if !l.atTerminator() {\n return l.errorf(\"bad character %#U\", r)\n }\n switch {\n case key[word] > itemKeyword:\n l.emit(key[word])\n case word[0] == '.':\n l.emit(itemField)\n case word == \"true\", word == \"false\":\n l.emit(itemBool)\n default:\n l.emit(itemIdentifier)\n }\n break Loop\n }\n }\n return lexInsideAction\n}\n// lexField scans a field: .Alphanumeric.\n// The . has been scanned.\nfunc lexField(l *lexer) stateFn {\n return lexFieldOrVariable(l, itemField)\n}\n// lexVariable scans a Variable: $Alphanumeric.\n// The $ has been scanned.\nfunc lexVariable(l *lexer) stateFn {\n if l.atTerminator() { // Nothing interesting follows -> \"$\".\n l.emit(itemVariable)\n return lexInsideAction\n }\n return lexFieldOrVariable(l, itemVariable)\n}\n// lexVariable scans a field or variable: [.$]Alphanumeric.\n// The . or $ has been scanned.\nfunc lexFieldOrVariable(l *lexer, typ itemType) stateFn {\n if l.atTerminator() { // Nothing interesting follows -> \".\" or \"$\".\n if typ == itemVariable {\n l.emit(itemVariable)\n } else {\n l.emit(itemDot)\n }\n return lexInsideAction\n }\n var r rune\n for {\n r = l.next()\n if !isAlphaNumeric(r) {\n l.backup()\n break\n }\n }\n if !l.atTerminator() {\n return l.errorf(\"bad character %#U\", r)\n }\n l.emit(typ)\n return lexInsideAction\n}\n// atTerminator reports whether the input is at valid termination character to\n// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases\n// like \"$x+2\" not being acceptable without a space, in case we decide one\n// day to implement arithmetic.\nfunc (l *lexer) atTerminator() bool {\n r := l.peek()\n if isSpace(r) || isEndOfLine(r) {\n return true\n }\n switch r {\n case eof, '.', ',', '|', ':', ')', '(':\n return true\n }\n // Does r start the delimiter? This can be ambiguous (with delim==\"//\", $x/2 will\n // succeed but should fail) but only in extremely rare cases caused by willfully\n // bad choice of delimiter.\n if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r {\n return true\n }\n return false\n}\n// lexChar scans a character constant. The initial quote is already\n// scanned. Syntax checking is done by the parser.\nfunc lexChar(l *lexer) stateFn {\nLoop:\n for {\n switch l.next() {\n case '\\\\':\n if r := l.next(); r != eof && r != '\\n' {\n break\n }\n fallthrough\n case eof, '\\n':\n return l.errorf(\"unterminated character constant\")\n case '\\'':\n break Loop\n }\n }\n l.emit(itemCharConstant)\n return lexInsideAction\n}\n// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This\n// isn't a perfect number scanner - for instance it accepts \".\" and \"0x0.2\"\n// and \"089\" - but when it's wrong the input is invalid and the parser (via\n// strconv) will notice.\nfunc lexNumber(l *lexer) stateFn {\n if !l.scanNumber() {\n return l.errorf(\"bad number syntax: %q\", l.input[l.start:l.pos])\n }\n if sign := l.peek(); sign == '+' || sign == '-' {\n // Complex: 1+2i. No spaces, must end in 'i'.\n if !l.scanNumber() || l.input[l.pos-1] != 'i' {\n return l.errorf(\"bad number syntax: %q\", l.input[l.start:l.pos])\n }\n l.emit(itemComplex)\n } else {\n l.emit(itemNumber)\n }\n return lexInsideAction\n}\nfunc (l *lexer) scanNumber() bool {\n // Optional leading sign.\n l.accept(\"+-\")\n // Is it hex?\n digits := \"0123456789\"\n if l.accept(\"0\") && l.accept(\"xX\") {\n digits = \"0123456789abcdefABCDEF\"\n }\n l.acceptRun(digits)\n if l.accept(\".\") {\n l.acceptRun(digits)\n }\n if l.accept(\"eE\") {\n l.accept(\"+-\")\n l.acceptRun(\"0123456789\")\n }\n // Is it imaginary?\n l.accept(\"i\")\n // Next thing mustn't be alphanumeric.\n if isAlphaNumeric(l.peek()) {\n l.next()\n return false\n }\n return true\n}\n// lexQuote scans a quoted string.\nfunc lexQuote(l *lexer) stateFn {\nLoop:\n for {\n switch l.next() {\n case '\\\\':\n if r := l.next(); r != eof && r != '\\n' {\n break\n }\n fallthrough\n case eof, '\\n':\n return l.errorf(\"unterminated quoted string\")\n case '\"':\n break Loop\n }\n }\n l.emit(itemString)\n return lexInsideAction\n}\n// lexRawQuote scans a raw quoted string.\nfunc lexRawQuote(l *lexer) stateFn {\nLoop:\n for {\n switch l.next() {\n case eof, '\\n':\n return l.errorf(\"unterminated raw quoted string\")\n case '`':\n break Loop\n }\n }\n l.emit(itemRawString)\n return lexInsideAction\n}\n// isSpace reports whether r is a space character.\nfunc isSpace(r rune) bool {\n return r == ' ' || r == '\\t'\n}\n// isEndOfLine reports whether r is an end-of-line character.\nfunc isEndOfLine(r rune) bool {\n return r == '\\r' || r == '\\n'\n}\n// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.\nfunc isAlphaNumeric(r rune) bool {\n return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)\n}", 0) } return file }
func parse(fset *token.FileSet, src []byte) (interface{}, error) { // Try as a complete source file. file, err := parser.ParseFile(fset, "", src, parser.ParseComments) if err == nil { return file, nil } // If the source is missing a package clause, try as a source fragment; otherwise fail. if !strings.Contains(err.Error(), "expected 'package'") { return nil, err } // Try as a declaration list by prepending a package clause in front of src. // Use ';' not '\n' to keep line numbers intact. psrc := append([]byte("package p;"), src...) file, err = parser.ParseFile(fset, "", psrc, parser.ParseComments) if err == nil { return file.Decls, nil } // If the source is missing a declaration, try as a statement list; otherwise fail. if !strings.Contains(err.Error(), "expected declaration") { return nil, err } // Try as statement list by wrapping a function around src. fsrc := append(append([]byte("package p; func _() {"), src...), '}') file, err = parser.ParseFile(fset, "", fsrc, parser.ParseComments) if err == nil { return file.Decls[0].(*ast.FuncDecl).Body.List, nil } // Failed, and out of options. return nil, err }
func GetCurrentAppSettings(settings_path string)(settings map[string]string,err error){ fset := token.NewFileSet() f, err := parser.ParseFile(fset, default_settings_path, nil, 0) if err != nil { f, err = parser.ParseFile(fset, default_relative_settings_path, nil, 0) if err!=nil{ return nil,err } } conf := types.Config{Importer: importer.Default()} pkg, err := conf.Check("wapour/settings", fset, []*ast.File{f}, nil) if err != nil { return nil,err } settings=make(map[string]string,0) for word_id := range initial { word:=initial[word_id] existing_set:=pkg.Scope().Lookup(word).(*types.Const).Val().String() settings[word]=existing_set } return settings,err }
func GetPackageList() { for _, fname := range files { file, err := parser.ParseFile(fname, nil, parser.PackageClauseOnly) if err != nil { fmt.Fprint(os.Stderr, err) os.Exit(1) } pname := file.Name.Name if pname == "main" { fullfile, err := parser.ParseFile(fname, nil, 0) if err == nil { v := &MainCheckVisitor{fname: fname} ast.Walk(v, fullfile) if v.hasMain { // get the name from the filename fparts := strings.Split(fname, ".", -1) basename := path.Base(fparts[0]) packages[basename] = nil } else { packages[pname] = nil } } } else { packages[file.Name.Name] = nil } } }
func Print(filename string, source interface{}) (pretty string, ok os.Error) { fileAst, ok := parser.ParseFile(filename, source, 4) // Make common corrections for snippet pastes if ok != nil && source != nil { src := source.(string) if m, _ := regexp.MatchString(`^package`, src); !m { src = "package main\n\n" + src } if fileAst, ok = parser.ParseFile(filename, src, 4); ok != nil { return } } coll := new(collector) coll.contents = new(vector.StringVector) (&printer.Config{ Mode: 5, Tabwidth: 4, Styler: new(HTMLStyler), }).Fprint(coll, fileAst) pretty = strings.Join(coll.contents.Data(), "") return }
// inspired by godeps rewrite, rewrites import paths with gx vendored names func rewriteImportsInFile(fi string, rw func(string) string) error { cfg := &printer.Config{Mode: printer.UseSpaces | printer.TabIndent, Tabwidth: 8} fset := token.NewFileSet() file, err := parser.ParseFile(fset, fi, nil, parser.ParseComments) if err != nil { return err } var changed bool for _, imp := range file.Imports { p, err := strconv.Unquote(imp.Path.Value) if err != nil { return err } np := rw(p) if np != p { changed = true imp.Path.Value = strconv.Quote(np) } } if !changed { return nil } buf := bufpool.Get().(*bytes.Buffer) if err = cfg.Fprint(buf, fset, file); err != nil { return err } fset = token.NewFileSet() file, err = parser.ParseFile(fset, fi, buf, parser.ParseComments) if err != nil { return err } buf.Reset() bufpool.Put(buf) ast.SortImports(fset, file) wpath := fi + ".temp" w, err := os.Create(wpath) if err != nil { return err } if err = cfg.Fprint(w, fset, file); err != nil { return err } if err = w.Close(); err != nil { return err } return os.Rename(wpath, fi) }
// parse parses src, which was read from filename, // as a Go source file or statement list. func parse(fset *token.FileSet, filename string, src []byte, stdin bool) (*ast.File, func(orig, src []byte) []byte, error) { // Try as whole source file. file, err := parser.ParseFile(fset, filename, src, parserMode) if err == nil { return file, nil, nil } // If the error is that the source file didn't begin with a // package line and this is standard input, fall through to // try as a source fragment. Stop and return on any other error. if !stdin || !strings.Contains(err.Error(), "expected 'package'") { return nil, nil, err } // If this is a declaration list, make it a source file // by inserting a package clause. // Insert using a ;, not a newline, so that the line numbers // in psrc match the ones in src. psrc := append([]byte("package p;"), src...) file, err = parser.ParseFile(fset, filename, psrc, parserMode) if err == nil { adjust := func(orig, src []byte) []byte { // Remove the package clause. // Gofmt has turned the ; into a \n. src = src[len("package p\n"):] return matchSpace(orig, src) } return file, adjust, nil } // If the error is that the source file didn't begin with a // declaration, fall through to try as a statement list. // Stop and return on any other error. if !strings.Contains(err.Error(), "expected declaration") { return nil, nil, err } // If this is a statement list, make it a source file // by inserting a package clause and turning the list // into a function body. This handles expressions too. // Insert using a ;, not a newline, so that the line numbers // in fsrc match the ones in src. fsrc := append(append([]byte("package p; func _() {"), src...), '}') file, err = parser.ParseFile(fset, filename, fsrc, parserMode) if err == nil { adjust := func(orig, src []byte) []byte { // Remove the wrapping. // Gofmt has turned the ; into a \n\n. src = src[len("package p\n\nfunc _() {"):] src = src[:len(src)-len("}\n")] // Gofmt has also indented the function body one level. // Remove that indent. src = bytes.Replace(src, []byte("\n\t"), []byte("\n"), -1) return matchSpace(orig, src) } return file, adjust, nil } // Failed, and out of options. return nil, nil, err }
func getImports(dir string) *DepSet { var pf *ast.File var err error fset = token.NewFileSet() files := getGoFiles(dir) dset := NewDepSet() for _, file := range files { if showCalls == true { pf, err = parser.ParseFile(fset, file, nil, 0) // Build a map with the imports mapping to the files for _, s := range pf.Imports { pname := strings.Trim(s.Path.Value, "\"") if debug { fmt.Printf("Import found: %s imports %s\n", file, pname) } dset.AddImport(pname, file) } if debug { for _, pkg := range dset.Packages() { fmt.Printf("Imported: %s\n", pkg) } } // ast.Inspect(pf, parseCalls) ast.Walk(finder{ find: findCalls, dset: dset, }, pf) } else { pf, err = parser.ParseFile(fset, file, nil, parser.ImportsOnly) // Build a map with the imports mapping to the files for _, s := range pf.Imports { pname := strings.Trim(s.Path.Value, "\"") dset.AddImport(pname, file) } } if err != nil { // fmt.Println(err) } /* * We'll need to AND the imports and the keys in the * package calls map to trim out keys from the latter * which are not actually packages. * We also need to figure out how to account for methods - * since the qualifier (X node) in those cases is the * method receiver, not the package. */ } return dset }
func (b *builder) merge() ([]byte, error) { var buf bytes.Buffer if err := b.tpl.Execute(&buf, b); err != nil { return nil, err } f, err := parser.ParseFile(b.fset, "", &buf, 0) if err != nil { return nil, err } // b.imports(f) b.deleteImports(f) b.files["main.go"] = f pkg, _ := ast.NewPackage(b.fset, b.files, nil, nil) pkg.Name = "main" ret, err := ast.MergePackageFiles(pkg, 0), nil if err != nil { return nil, err } // @TODO: we reread the file, probably something goes wrong with position buf.Reset() if err = format.Node(&buf, b.fset, ret); err != nil { return nil, err } ret, err = parser.ParseFile(b.fset, "", buf.Bytes(), 0) if err != nil { return nil, err } for _, spec := range b.imports { var name string if spec.Name != nil { name = spec.Name.Name } ipath, _ := strconv.Unquote(spec.Path.Value) addImport(b.fset, ret, name, ipath) } buf.Reset() if err := format.Node(&buf, b.fset, ret); err != nil { return nil, err } return buf.Bytes(), nil }
func main() { fileSet := token.NewFileSet() comFileSet := token.NewFileSet() // The FileSet used to parse the COM declarations. mod := newModule(comFileSet) // Iterate over all the files specified on the command line. for _, filename := range os.Args[1:] { f, err := parser.ParseFile(fileSet, filename, nil, parser.ParseComments) if err != nil { log.Fatalln("error parsing the source file", filename, ":", err) } // Iterate over all the comments in the current file, picking out the ones that // start with "com". // Join them into a long string to be parsed, starting with a package declaration. chunks := []string{"package " + f.Name.Name} for _, cg := range f.Comments { for _, c := range cg.List { text := strings.TrimSpace(strings.Trim(c.Text, "/*")) if strings.HasPrefix(text, "com") { text = text[len("com"):] if text == "" || !strings.Contains(" \t\n", text[:1]) { continue } text = strings.TrimSpace(text) chunks = append(chunks, text) } } } comDecls := strings.Join(chunks, "\n") // Now parse the concatenated result as a Go source file. comAST, err := parser.ParseFile(comFileSet, filename, comDecls, parser.ParseComments) if err != nil { log.Fatalln("error parsing the COM declarations from", filename, ":", err) } err = mod.loadFile(comAST) if err != nil { log.Fatalln("error loading declarations from", filename, "into module:", err) } } err := mod.write(os.Stdout) if err != nil { log.Fatalln("error generating output:", err) } }
// rewriteGoFile rewrites import statments in the named file // according to the rules for func qualify. func rewriteGoFile(name, qual string, paths []string) error { debugln("rewriteGoFile", name, ",", qual, ",", paths) printerConfig := &printer.Config{Mode: printer.TabIndent | printer.UseSpaces, Tabwidth: 8} fset := token.NewFileSet() f, err := parser.ParseFile(fset, name, nil, parser.ParseComments) if err != nil { return err } var changed bool for _, s := range f.Imports { name, err := strconv.Unquote(s.Path.Value) if err != nil { return err // can't happen } q := qualify(unqualify(name), qual, paths) if q != name { s.Path.Value = strconv.Quote(q) changed = true } } if !changed { return nil } var buffer bytes.Buffer if err = printerConfig.Fprint(&buffer, fset, f); err != nil { return err } fset = token.NewFileSet() f, err = parser.ParseFile(fset, name, &buffer, parser.ParseComments) ast.SortImports(fset, f) tpath := name + ".temp" t, err := os.Create(tpath) if err != nil { return err } if err = printerConfig.Fprint(t, fset, f); err != nil { return err } if err = t.Close(); err != nil { return err } // This is required before the rename on windows. if err = os.Remove(name); err != nil { return err } return os.Rename(tpath, name) }
func pkgForPath(path string) (*types.Package, error) { // collect filenames ctxt := build.Default pkginfo, err := ctxt.Import(path, "", 0) if err != nil { return nil, err } filenames := append(pkginfo.GoFiles, pkginfo.CgoFiles...) // parse files fset := token.NewFileSet() files := make([]*ast.File, len(filenames)) for i, filename := range filenames { var err error files[i], err = parser.ParseFile(fset, filepath.Join(pkginfo.Dir, filename), nil, 0) if err != nil { return nil, err } } // typecheck files // (we only care about exports and thus can ignore function bodies) conf := types.Config{IgnoreFuncBodies: true, FakeImportC: true} return conf.Check(path, fset, files, nil) }
func TestIssue8518(t *testing.T) { fset := token.NewFileSet() conf := Config{ Packages: make(map[string]*Package), Error: func(err error) { t.Log(err) }, // don't exit after first error Import: func(imports map[string]*Package, path string) (*Package, error) { return imports[path], nil }, } makePkg := func(path, src string) { f, err := parser.ParseFile(fset, path, src, 0) if err != nil { t.Fatal(err) } pkg, _ := conf.Check(path, fset, []*ast.File{f}, nil) // errors logged via conf.Error conf.Packages[path] = pkg } const libSrc = ` package a import "missing" const C1 = foo const C2 = missing.C ` const mainSrc = ` package main import "a" var _ = a.C1 var _ = a.C2 ` makePkg("a", libSrc) makePkg("main", mainSrc) // don't crash when type-checking this package }
func TestMultiFileInitOrder(t *testing.T) { fset := token.NewFileSet() mustParse := func(src string) *ast.File { f, err := parser.ParseFile(fset, "main", src, 0) if err != nil { t.Fatal(err) } return f } fileA := mustParse(`package main; var a = 1`) fileB := mustParse(`package main; var b = 2`) // The initialization order must not depend on the parse // order of the files, only on the presentation order to // the type-checker. for _, test := range []struct { files []*ast.File want string }{ {[]*ast.File{fileA, fileB}, "[a = 1 b = 2]"}, {[]*ast.File{fileB, fileA}, "[b = 2 a = 1]"}, } { var info Info if _, err := new(Config).Check("main", fset, test.files, &info); err != nil { t.Fatal(err) } if got := fmt.Sprint(info.InitOrder); got != test.want { t.Fatalf("got %s; want %s", got, test.want) } } }
func main() { fset := token.NewFileSet() // Parse the input string, []byte, or io.Reader, // recording position information in fset. // ParseFile returns an *ast.File, a syntax tree. f, err := parser.ParseFile(fset, "hello.go", hello, 0) if err != nil { log.Fatal(err) // parse error } // A Config controls various options of the type checker. // The defaults work fine except for one setting: // we must specify how to deal with imports. conf := types.Config{Importer: importer.Default()} // Type-check the package containing only file f. // Check returns a *types.Package. pkg, err := conf.Check("cmd/hello", fset, []*ast.File{f}, nil) if err != nil { log.Fatal(err) // type error } fmt.Printf("Package %q\n", pkg.Path()) fmt.Printf("Name: %s\n", pkg.Name()) fmt.Printf("Imports: %s\n", pkg.Imports()) fmt.Printf("Scope: %s\n", pkg.Scope()) }
// LintFiles lints a set of files of a single package. // The argument is a map of filename to source. func (l *Linter) LintFiles(files map[string][]byte) ([]Problem, error) { if len(files) == 0 { return nil, nil } pkg := &pkg{ fset: token.NewFileSet(), files: make(map[string]*file), } var pkgName string for filename, src := range files { f, err := parser.ParseFile(pkg.fset, filename, src, parser.ParseComments) if err != nil { return nil, err } if pkgName == "" { pkgName = f.Name.Name } else if f.Name.Name != pkgName { return nil, fmt.Errorf("%s is in package %s, not %s", filename, f.Name.Name, pkgName) } pkg.files[filename] = &file{ pkg: pkg, f: f, fset: pkg.fset, src: src, filename: filename, } } return pkg.lint(), nil }
func GetDeps(source string) (pkg, target string, deps, funcs, cflags, ldflags []string, err error) { isTest := strings.HasSuffix(source, "_test.go") && Test var file *ast.File flag := parser.ParseComments if !isTest { flag = flag | parser.ImportsOnly } file, err = parser.ParseFile(token.NewFileSet(), source, nil, flag) if err != nil { return } w := &Walker{ Name: "", Target: "", pkgPos: 0, Deps: []string{}, Funcs: []string{}, CGoLDFlags: []string{}, CGoCFlags: []string{}, ScanFuncs: isTest, } ast.Walk(w, file) deps = w.Deps pkg = w.Name target = w.Target funcs = w.Funcs cflags = RemoveDups(w.CGoCFlags) ldflags = RemoveDups(w.CGoLDFlags) return }
func loadExportsGoPath(dir string) map[string]bool { exports := make(map[string]bool) buildPkg, err := build.ImportDir(dir, 0) if err != nil { if strings.Contains(err.Error(), "no buildable Go source files in") { return nil } fmt.Fprintf(os.Stderr, "could not import %q: %v", dir, err) return nil } fset := token.NewFileSet() for _, file := range buildPkg.GoFiles { f, err := parser.ParseFile(fset, filepath.Join(dir, file), nil, 0) if err != nil { fmt.Fprintf(os.Stderr, "could not parse %q: %v", file, err) continue } for name := range f.Scope.Objects { if ast.IsExported(name) { exports[name] = true } } } return exports }
func TestEvalContext(t *testing.T) { skipSpecialPlatforms(t) src := ` package p import "fmt" import m "math" const c = 3.0 type T []int func f(a int, s string) float64 { fmt.Println("calling f") _ = m.Pi // use package math const d int = c + 1 var x int x = a + len(s) return float64(x) } ` fset := token.NewFileSet() file, err := parser.ParseFile(fset, "p", src, 0) if err != nil { t.Fatal(err) } pkg, err := Check("p", fset, []*ast.File{file}) if err != nil { t.Fatal(err) } pkgScope := pkg.Scope() if n := pkgScope.NumChildren(); n != 1 { t.Fatalf("got %d file scopes, want 1", n) } fileScope := pkgScope.Child(0) if n := fileScope.NumChildren(); n != 1 { t.Fatalf("got %d functions scopes, want 1", n) } funcScope := fileScope.Child(0) var tests = []string{ `true => true, untyped bool`, `fmt.Println => , func(a ...interface{}) (n int, err error)`, `c => 3, untyped float`, `T => , p.T`, `a => , int`, `s => , string`, `d => 4, int`, `x => , int`, `d/c => 1, int`, `c/2 => 3/2, untyped float`, `m.Pi < m.E => false, untyped bool`, } for _, test := range tests { str, typ := split(test, ", ") str, val := split(str, "=>") testEval(t, pkg, funcScope, str, nil, typ, val) } }
// Rewrite modifies the AST to rewrite import statements and package import comments. // src should be compatible with go/parser/#ParseFile: // (The type of the argument for the src parameter must be string, []byte, or io.Reader.) // // return of nil, nil (no result, no error) means no changes are needed func Rewrite(fname string, src interface{}, prefix string, remove bool) (buf *bytes.Buffer, err error) { // Create the AST by parsing src. fset := token.NewFileSet() // positions are relative to fset f, err := parser.ParseFile(fset, fname, src, parser.ParseComments) if err != nil { log.Printf("Error parsing file %s, source: [%s], error: %s", fname, src, err) return nil, err } // normalize the prefix ending with a trailing slash if prefix[len(prefix)-1] != '/' { prefix += "/" } changed, err := RewriteImports(f, prefix, remove) if err != nil { log.Printf("Error rewriting imports in the AST: file %s - %s", fname, err) return nil, err } changed2, err := RewriteImportComments(f, fset, prefix, remove) if err != nil { log.Printf("Error rewriting import comments in the AST: file %s - %s", fname, err) return nil, err } if !changed && !changed2 { return nil, nil } buf = &bytes.Buffer{} err = format.Node(buf, fset, f) return buf, err }
//Read web/main.go to get General info func (parser *Parser) ParseGeneralApiInfo(mainApiFile string) { fileSet := token.NewFileSet() fileTree, err := goparser.ParseFile(fileSet, mainApiFile, nil, goparser.ParseComments) if err != nil { log.Fatalf("Can not parse general API information: %v\n", err) } parser.Listing.SwaggerVersion = SwaggerVersion if fileTree.Comments != nil { for _, comment := range fileTree.Comments { for _, commentLine := range strings.Split(comment.Text(), "\n") { attribute := strings.ToLower(strings.Split(commentLine, " ")[0]) switch attribute { case "@apiversion": parser.Listing.ApiVersion = strings.TrimSpace(commentLine[len(attribute):]) case "@apititle": parser.Listing.Infos.Title = strings.TrimSpace(commentLine[len(attribute):]) case "@apidescription": parser.Listing.Infos.Description = strings.TrimSpace(commentLine[len(attribute):]) case "@termsofserviceurl": parser.Listing.Infos.TermsOfServiceUrl = strings.TrimSpace(commentLine[len(attribute):]) case "@contact": parser.Listing.Infos.Contact = strings.TrimSpace(commentLine[len(attribute):]) case "@licenseurl": parser.Listing.Infos.LicenseUrl = strings.TrimSpace(commentLine[len(attribute):]) case "@license": parser.Listing.Infos.License = strings.TrimSpace(commentLine[len(attribute):]) } } } } }
// Process takes a Go source file and bumps version declaration according to conf. // Returns the modified code and a map from identifiers to updated versions and an error, if any. func (conf Config) Process(filename string, src interface{}) ([]byte, map[string]string, error) { fset := token.NewFileSet() file, err := parser.ParseFile(fset, filename, src, parser.ParseComments) if err != nil { return nil, nil, err } versions, err := conf.ProcessNode(fset, file) if err != nil { return nil, nil, err } var buf bytes.Buffer err = printer.Fprint(&buf, fset, file) if err != nil { return nil, nil, err } out := buf.Bytes() out, err = format.Source(out) if err != nil { return nil, nil, err } return out, versions, nil }
// parsePackage analyzes the single package constructed from the named files. // If text is non-nil, it is a string to be used instead of the content of the file, // to be used for testing. parsePackage exits if there is an error. func (g *Generator) parsePackage(directory string, names []string, text interface{}) { var files []*File var astFiles []*ast.File g.pkg = new(Package) fs := token.NewFileSet() for _, name := range names { if !strings.HasSuffix(name, ".go") { continue } parsedFile, err := parser.ParseFile(fs, name, text, 0) if err != nil { log.Fatalf("parsing package: %s: %s", name, err) } astFiles = append(astFiles, parsedFile) files = append(files, &File{ file: parsedFile, pkg: g.pkg, }) } if len(astFiles) == 0 { log.Fatalf("%s: no buildable Go files", directory) } g.pkg.name = astFiles[0].Name.Name g.pkg.files = files g.pkg.dir = directory // Type check the package. g.pkg.check(fs, astFiles) }
func TestIssue7245(t *testing.T) { src := ` package p func (T) m() (res bool) { return } type T struct{} // receiver type after method declaration ` f, err := parser.ParseFile(fset, "", src, 0) if err != nil { t.Fatal(err) } var conf Config defs := make(map[*ast.Ident]Object) _, err = conf.Check(f.Name.Name, fset, []*ast.File{f}, &Info{Defs: defs}) if err != nil { t.Fatal(err) } m := f.Decls[0].(*ast.FuncDecl) res1 := defs[m.Name].(*Func).Type().(*Signature).Results().At(0) res2 := defs[m.Type.Results.List[0].Names[0]].(*Var) if res1 != res2 { t.Errorf("got %s (%p) != %s (%p)", res1, res2, res1, res2) } }
func getLocalImports(filename string) (imports map[string]bool, error os.Error) { source, error := ioutil.ReadFile(filename) if error != nil { return } file, error := parser.ParseFile(filename, source, parser.ImportsOnly) if error != nil { return } for _, importDecl := range file.Decls { importDecl, ok := importDecl.(*ast.GenDecl) if ok { for _, importSpec := range importDecl.Specs { importSpec, ok := importSpec.(*ast.ImportSpec) if ok { for _, importPath := range importSpec.Path { importPath, _ := strconv.Unquote(string(importPath.Value)) if len(importPath) > 0 && importPath[0] == '.' { if imports == nil { imports = make(map[string]bool) } dir, _ := path.Split(filename) imports[path.Join(dir, path.Clean(importPath))] = true } } } } } } return }