func (p *printer) file(src *ast.File) { p.setComment(src.Doc) p.print(src.Pos(), token.PACKAGE, blank) p.expr(src.Name) if len(src.Decls) > 0 { tok := token.ILLEGAL for _, d := range src.Decls { prev := tok tok = declToken(d) // if the declaration token changed (e.g., from CONST to TYPE) // or the next declaration has documentation associated with it, // print an empty line between top-level declarations // (because p.linebreak is called with the position of d, which // is past any documentation, the minimum requirement is satisfied // even w/o the extra getDoc(d) nil-check - leave it in case the // linebreak logic improves - there's already a TODO). min := 1 if prev != tok || getDoc(d) != nil { min = 2 } p.linebreak(p.lineFor(d.Pos()), min, ignore, false) p.decl(d) } } p.print(newline) }
func generateGodebugIdentifiers(f *ast.File) { // Variables that won't have suffixes. idents.ctx = createConflictFreeName("ctx", f, false) idents.ok = createConflictFreeName("ok", f, false) idents.scope = createConflictFreeName("scope", f, false) idents.receiver = createConflictFreeName("receiver", f, false) idents.recoverChan = createConflictFreeName("rr", f, false) idents.recoverChanChan = createConflictFreeName("r", f, false) idents.recovers = createConflictFreeName("recovers", f, false) idents.panicVal = createConflictFreeName("v", f, false) idents.panicChan = createConflictFreeName("panicChan", f, false) idents.godebug = generateGodebugPkgName(f) // Variables that will have suffixes. idents.result = createConflictFreeName("result", f, true) idents.input = createConflictFreeName("input", f, true) // Variables with names derived from the filename. base := strings.Map(func(r rune) rune { if !unicode.In(r, unicode.Digit, unicode.Letter) { return '_' } return r }, filepath.Base(fs.Position(f.Pos()).Filename)) if !unicode.IsLetter(rune(base[0])) { // identifiers must start with letters base = "a" + base } idents.fileScope = createConflictFreeName(base+"_scope", f, false) idents.fileContents = createConflictFreeName(base+"_contents", f, false) }
func (p *printer) file(src *ast.File) { p.setComment(src.Doc) p.print(src.Pos(), token.PACKAGE, blank) p.expr(src.Name) p.declList(src.Decls) p.print(newline) }
func applyPatches(file *ast.File, fileSet *token.FileSet, importPath string) *ast.File { removeImport := func(path string) { for _, decl := range file.Decls { if d, ok := decl.(*ast.GenDecl); ok && d.Tok == token.IMPORT { for j, spec := range d.Specs { value := spec.(*ast.ImportSpec).Path.Value if value[1:len(value)-1] == path { d.Specs[j] = d.Specs[len(d.Specs)-1] d.Specs = d.Specs[:len(d.Specs)-1] return } } } } } removeFunction := func(name string) { for i, decl := range file.Decls { if d, ok := decl.(*ast.FuncDecl); ok { if d.Name.String() == name { file.Decls[i] = file.Decls[len(file.Decls)-1] file.Decls = file.Decls[:len(file.Decls)-1] return } } } } basename := filepath.Base(fileSet.Position(file.Pos()).Filename) switch { case importPath == "bytes_test" && basename == "equal_test.go": file, _ = parser.ParseFile(fileSet, basename, "package bytes_test", 0) case importPath == "crypto/rc4" && basename == "rc4_ref.go": // see https://codereview.appspot.com/40540049/ file, _ = parser.ParseFile(fileSet, basename, "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !amd64,!arm,!386\n\npackage rc4\n\n// XORKeyStream sets dst to the result of XORing src with the key stream.\n// Dst and src may be the same slice but otherwise should not overlap.\nfunc (c *Cipher) XORKeyStream(dst, src []byte) {\n i, j := c.i, c.j\n for k, v := range src {\n i += 1\n j += uint8(c.s[i])\n c.s[i], c.s[j] = c.s[j], c.s[i]\n dst[k] = v ^ uint8(c.s[uint8(c.s[i]+c.s[j])])\n }\n c.i, c.j = i, j\n}", 0) case importPath == "encoding/json" && basename == "stream_test.go": removeImport("net") removeFunction("TestBlocking") case importPath == "math/big" && basename == "arith.go": // see https://codereview.appspot.com/55470046/ file, _ = parser.ParseFile(fileSet, basename, "// Copyright 2009 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// This file provides Go implementations of elementary multi-precision\n// arithmetic operations on word vectors. Needed for platforms without\n// assembly implementations of these routines.\n\npackage big\n\n// A Word represents a single digit of a multi-precision unsigned integer.\ntype Word uintptr\n\nconst (\n // Compute the size _S of a Word in bytes.\n _m = ^Word(0)\n _logS = _m>>8&1 + _m>>16&1 + _m>>32&1\n _S = 1 << _logS\n\n _W = _S << 3 // word size in bits\n _B = 1 << _W // digit base\n _M = _B - 1 // digit mask\n\n _W2 = _W / 2 // half word size in bits\n _B2 = 1 << _W2 // half digit base\n _M2 = _B2 - 1 // half digit mask\n)\n\n// ----------------------------------------------------------------------------\n// Elementary operations on words\n//\n// These operations are used by the vector operations below.\n\n// z1<<_W + z0 = x+y+c, with c == 0 or 1\nfunc addWW_g(x, y, c Word) (z1, z0 Word) {\n yc := y + c\n z0 = x + yc\n if z0 < x || yc < y {\n z1 = 1\n }\n return\n}\n\n// z1<<_W + z0 = x-y-c, with c == 0 or 1\nfunc subWW_g(x, y, c Word) (z1, z0 Word) {\n yc := y + c\n z0 = x - yc\n if z0 > x || yc < y {\n z1 = 1\n }\n return\n}\n\n// z1<<_W + z0 = x*y\n// Adapted from Warren, Hacker's Delight, p. 132.\nfunc mulWW_g(x, y Word) (z1, z0 Word) {\n x0 := x & _M2\n x1 := x >> _W2\n y0 := y & _M2\n y1 := y >> _W2\n w0 := x0 * y0\n t := x1*y0 + w0>>_W2\n w1 := t & _M2\n w2 := t >> _W2\n w1 += x0 * y1\n z1 = x1*y1 + w2 + w1>>_W2\n z0 = x * y\n return\n}\n\n// z1<<_W + z0 = x*y + c\nfunc mulAddWWW_g(x, y, c Word) (z1, z0 Word) {\n z1, zz0 := mulWW(x, y)\n if z0 = zz0 + c; z0 < zz0 {\n z1++\n }\n return\n}\n\n// Length of x in bits.\nfunc bitLen_g(x Word) (n int) {\n for ; x >= 0x8000; x >>= 16 {\n n += 16\n }\n if x >= 0x80 {\n x >>= 8\n n += 8\n }\n if x >= 0x8 {\n x >>= 4\n n += 4\n }\n if x >= 0x2 {\n x >>= 2\n n += 2\n }\n if x >= 0x1 {\n n++\n }\n return\n}\n\n// log2 computes the integer binary logarithm of x.\n// The result is the integer n for which 2^n <= x < 2^(n+1).\n// If x == 0, the result is -1.\nfunc log2(x Word) int {\n return bitLen(x) - 1\n}\n\n// Number of leading zeros in x.\nfunc leadingZeros(x Word) uint {\n return uint(_W - bitLen(x))\n}\n\n// q = (u1<<_W + u0 - r)/y\n// Adapted from Warren, Hacker's Delight, p. 152.\nfunc divWW_g(u1, u0, v Word) (q, r Word) {\n if u1 >= v {\n return 1<<_W - 1, 1<<_W - 1\n }\n\n s := leadingZeros(v)\n v <<= s\n\n vn1 := v >> _W2\n vn0 := v & _M2\n un32 := u1<<s | u0>>(_W-s)\n un10 := u0 << s\n un1 := un10 >> _W2\n un0 := un10 & _M2\n q1 := un32 / vn1\n rhat := un32 - q1*vn1\n\n for q1 >= _B2 || q1*vn0 > _B2*rhat+un1 {\n q1--\n rhat += vn1\n if rhat >= _B2 {\n break\n }\n }\n\n un21 := un32*_B2 + un1 - q1*v\n q0 := un21 / vn1\n rhat = un21 - q0*vn1\n\n for q0 >= _B2 || q0*vn0 > _B2*rhat+un0 {\n q0--\n rhat += vn1\n if rhat >= _B2 {\n break\n }\n }\n\n return q1*_B2 + q0, (un21*_B2 + un0 - q0*v) >> s\n}\n\nfunc addVV_g(z, x, y []Word) (c Word) {\n for i := range z {\n c, z[i] = addWW_g(x[i], y[i], c)\n }\n return\n}\n\nfunc subVV_g(z, x, y []Word) (c Word) {\n for i := range z {\n c, z[i] = subWW_g(x[i], y[i], c)\n }\n return\n}\n\nfunc addVW_g(z, x []Word, y Word) (c Word) {\n c = y\n for i := range z {\n c, z[i] = addWW_g(x[i], c, 0)\n }\n return\n}\n\nfunc subVW_g(z, x []Word, y Word) (c Word) {\n c = y\n for i := range z {\n c, z[i] = subWW_g(x[i], c, 0)\n }\n return\n}\n\nfunc shlVU_g(z, x []Word, s uint) (c Word) {\n if n := len(z); n > 0 {\n ŝ := _W - s\n w1 := x[n-1]\n c = w1 >> ŝ\n for i := n - 1; i > 0; i-- {\n w := w1\n w1 = x[i-1]\n z[i] = w<<s | w1>>ŝ\n }\n z[0] = w1 << s\n }\n return\n}\n\nfunc shrVU_g(z, x []Word, s uint) (c Word) {\n if n := len(z); n > 0 {\n ŝ := _W - s\n w1 := x[0]\n c = w1 << ŝ\n for i := 0; i < n-1; i++ {\n w := w1\n w1 = x[i+1]\n z[i] = w>>s | w1<<ŝ\n }\n z[n-1] = w1 >> s\n }\n return\n}\n\nfunc mulAddVWW_g(z, x []Word, y, r Word) (c Word) {\n c = r\n for i := range z {\n c, z[i] = mulAddWWW_g(x[i], y, c)\n }\n return\n}\n\nfunc addMulVVW_g(z, x []Word, y Word) (c Word) {\n for i := range z {\n z1, z0 := mulAddWWW_g(x[i], y, z[i])\n c, z[i] = addWW_g(z0, c, 0)\n c += z1\n }\n return\n}\n\nfunc divWVW_g(z []Word, xn Word, x []Word, y Word) (r Word) {\n r = xn\n for i := len(z) - 1; i >= 0; i-- {\n z[i], r = divWW_g(r, x[i], y)\n }\n return\n}", 0) case importPath == "reflect_test" && basename == "all_test.go": removeImport("unsafe") removeFunction("TestAlignment") removeFunction("TestSliceOverflow") case importPath == "runtime" && strings.HasPrefix(basename, "zgoarch_"): file, _ = parser.ParseFile(fileSet, basename, "package runtime\nconst theGoarch = `js`\n", 0) case importPath == "sync/atomic_test" && basename == "atomic_test.go": removeFunction("TestUnaligned64") case importPath == "text/template/parse" && basename == "lex.go": // this patch will be removed as soon as goroutines are supported file, _ = parser.ParseFile(fileSet, basename, "// Copyright 2011 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\npackage parse\nimport (\n \"container/list\"\n \"fmt\"\n \"strings\"\n \"unicode\"\n \"unicode/utf8\"\n)\n// item represents a token or text string returned from the scanner.\ntype item struct {\n typ itemType // The type of this item.\n pos Pos // The starting position, in bytes, of this item in the input string.\n val string // The value of this item.\n}\nfunc (i item) String() string {\n switch {\n case i.typ == itemEOF:\n return \"EOF\"\n case i.typ == itemError:\n return i.val\n case i.typ > itemKeyword:\n return fmt.Sprintf(\"<%s>\", i.val)\n case len(i.val) > 10:\n return fmt.Sprintf(\"%.10q...\", i.val)\n }\n return fmt.Sprintf(\"%q\", i.val)\n}\n// itemType identifies the type of lex items.\ntype itemType int\nconst (\n itemError itemType = iota // error occurred; value is text of error\n itemBool // boolean constant\n itemChar // printable ASCII character; grab bag for comma etc.\n itemCharConstant // character constant\n itemComplex // complex constant (1+2i); imaginary is just a number\n itemColonEquals // colon-equals (':=') introducing a declaration\n itemEOF\n itemField // alphanumeric identifier starting with '.'\n itemIdentifier // alphanumeric identifier not starting with '.'\n itemLeftDelim // left action delimiter\n itemLeftParen // '(' inside action\n itemNumber // simple number, including imaginary\n itemPipe // pipe symbol\n itemRawString // raw quoted string (includes quotes)\n itemRightDelim // right action delimiter\n itemRightParen // ')' inside action\n itemSpace // run of spaces separating arguments\n itemString // quoted string (includes quotes)\n itemText // plain text\n itemVariable // variable starting with '$', such as '$' or '$1' or '$hello'\n // Keywords appear after all the rest.\n itemKeyword // used only to delimit the keywords\n itemDot // the cursor, spelled '.'\n itemDefine // define keyword\n itemElse // else keyword\n itemEnd // end keyword\n itemIf // if keyword\n itemNil // the untyped nil constant, easiest to treat as a keyword\n itemRange // range keyword\n itemTemplate // template keyword\n itemWith // with keyword\n)\nvar key = map[string]itemType{\n \".\": itemDot,\n \"define\": itemDefine,\n \"else\": itemElse,\n \"end\": itemEnd,\n \"if\": itemIf,\n \"range\": itemRange,\n \"nil\": itemNil,\n \"template\": itemTemplate,\n \"with\": itemWith,\n}\nconst eof = -1\n// stateFn represents the state of the scanner as a function that returns the next state.\ntype stateFn func(*lexer) stateFn\n// lexer holds the state of the scanner.\ntype lexer struct {\n name string // the name of the input; used only for error reports\n input string // the string being scanned\n leftDelim string // start of action\n rightDelim string // end of action\n state stateFn // the next lexing function to enter\n pos Pos // current position in the input\n start Pos // start position of this item\n width Pos // width of last rune read from input\n lastPos Pos // position of most recent item returned by nextItem\n items *list.List // scanned items\n parenDepth int // nesting depth of ( ) exprs\n}\n// next returns the next rune in the input.\nfunc (l *lexer) next() rune {\n if int(l.pos) >= len(l.input) {\n l.width = 0\n return eof\n }\n r, w := utf8.DecodeRuneInString(l.input[l.pos:])\n l.width = Pos(w)\n l.pos += l.width\n return r\n}\n// peek returns but does not consume the next rune in the input.\nfunc (l *lexer) peek() rune {\n r := l.next()\n l.backup()\n return r\n}\n// backup steps back one rune. Can only be called once per call of next.\nfunc (l *lexer) backup() {\n l.pos -= l.width\n}\n// emit passes an item back to the client.\nfunc (l *lexer) emit(t itemType) {\n l.items.PushBack(item{t, l.start, l.input[l.start:l.pos]})\n l.start = l.pos\n}\n// ignore skips over the pending input before this point.\nfunc (l *lexer) ignore() {\n l.start = l.pos\n}\n// accept consumes the next rune if it's from the valid set.\nfunc (l *lexer) accept(valid string) bool {\n if strings.IndexRune(valid, l.next()) >= 0 {\n return true\n }\n l.backup()\n return false\n}\n// acceptRun consumes a run of runes from the valid set.\nfunc (l *lexer) acceptRun(valid string) {\n for strings.IndexRune(valid, l.next()) >= 0 {\n }\n l.backup()\n}\n// lineNumber reports which line we're on, based on the position of\n// the previous item returned by nextItem. Doing it this way\n// means we don't have to worry about peek double counting.\nfunc (l *lexer) lineNumber() int {\n return 1 + strings.Count(l.input[:l.lastPos], \"\\n\")\n}\n// errorf returns an error token and terminates the scan by passing\n// back a nil pointer that will be the next state, terminating l.nextItem.\nfunc (l *lexer) errorf(format string, args ...interface{}) stateFn {\n l.items.PushBack(item{itemError, l.start, fmt.Sprintf(format, args...)})\n return nil\n}\n// nextItem returns the next item from the input.\nfunc (l *lexer) nextItem() item {\n element := l.items.Front()\n for element == nil {\n l.state = l.state(l)\n element = l.items.Front()\n }\n l.items.Remove(element)\n item := element.Value.(item)\n l.lastPos = item.pos\n return item\n}\n// lex creates a new scanner for the input string.\nfunc lex(name, input, left, right string) *lexer {\n if left == \"\" {\n left = leftDelim\n }\n if right == \"\" {\n right = rightDelim\n }\n l := &lexer{\n name: name,\n input: input,\n leftDelim: left,\n rightDelim: right,\n items: list.New(),\n }\n l.state = lexText\n return l\n}\n// state functions\nconst (\n leftDelim = \"{{\"\n rightDelim = \"}}\"\n leftComment = \"/*\"\n rightComment = \"*/\"\n)\n// lexText scans until an opening action delimiter, \"{{\".\nfunc lexText(l *lexer) stateFn {\n for {\n if strings.HasPrefix(l.input[l.pos:], l.leftDelim) {\n if l.pos > l.start {\n l.emit(itemText)\n }\n return lexLeftDelim\n }\n if l.next() == eof {\n break\n }\n }\n // Correctly reached EOF.\n if l.pos > l.start {\n l.emit(itemText)\n }\n l.emit(itemEOF)\n return nil\n}\n// lexLeftDelim scans the left delimiter, which is known to be present.\nfunc lexLeftDelim(l *lexer) stateFn {\n l.pos += Pos(len(l.leftDelim))\n if strings.HasPrefix(l.input[l.pos:], leftComment) {\n return lexComment\n }\n l.emit(itemLeftDelim)\n l.parenDepth = 0\n return lexInsideAction\n}\n// lexComment scans a comment. The left comment marker is known to be present.\nfunc lexComment(l *lexer) stateFn {\n l.pos += Pos(len(leftComment))\n i := strings.Index(l.input[l.pos:], rightComment)\n if i < 0 {\n return l.errorf(\"unclosed comment\")\n }\n l.pos += Pos(i + len(rightComment))\n if !strings.HasPrefix(l.input[l.pos:], l.rightDelim) {\n return l.errorf(\"comment ends before closing delimiter\")\n }\n l.pos += Pos(len(l.rightDelim))\n l.ignore()\n return lexText\n}\n// lexRightDelim scans the right delimiter, which is known to be present.\nfunc lexRightDelim(l *lexer) stateFn {\n l.pos += Pos(len(l.rightDelim))\n l.emit(itemRightDelim)\n return lexText\n}\n// lexInsideAction scans the elements inside action delimiters.\nfunc lexInsideAction(l *lexer) stateFn {\n // Either number, quoted string, or identifier.\n // Spaces separate arguments; runs of spaces turn into itemSpace.\n // Pipe symbols separate and are emitted.\n if strings.HasPrefix(l.input[l.pos:], l.rightDelim) {\n if l.parenDepth == 0 {\n return lexRightDelim\n }\n return l.errorf(\"unclosed left paren\")\n }\n switch r := l.next(); {\n case r == eof || isEndOfLine(r):\n return l.errorf(\"unclosed action\")\n case isSpace(r):\n return lexSpace\n case r == ':':\n if l.next() != '=' {\n return l.errorf(\"expected :=\")\n }\n l.emit(itemColonEquals)\n case r == '|':\n l.emit(itemPipe)\n case r == '\"':\n return lexQuote\n case r == '`':\n return lexRawQuote\n case r == '$':\n return lexVariable\n case r == '\\'':\n return lexChar\n case r == '.':\n // special look-ahead for \".field\" so we don't break l.backup().\n if l.pos < Pos(len(l.input)) {\n r := l.input[l.pos]\n if r < '0' || '9' < r {\n return lexField\n }\n }\n fallthrough // '.' can start a number.\n case r == '+' || r == '-' || ('0' <= r && r <= '9'):\n l.backup()\n return lexNumber\n case isAlphaNumeric(r):\n l.backup()\n return lexIdentifier\n case r == '(':\n l.emit(itemLeftParen)\n l.parenDepth++\n return lexInsideAction\n case r == ')':\n l.emit(itemRightParen)\n l.parenDepth--\n if l.parenDepth < 0 {\n return l.errorf(\"unexpected right paren %#U\", r)\n }\n return lexInsideAction\n case r <= unicode.MaxASCII && unicode.IsPrint(r):\n l.emit(itemChar)\n return lexInsideAction\n default:\n return l.errorf(\"unrecognized character in action: %#U\", r)\n }\n return lexInsideAction\n}\n// lexSpace scans a run of space characters.\n// One space has already been seen.\nfunc lexSpace(l *lexer) stateFn {\n for isSpace(l.peek()) {\n l.next()\n }\n l.emit(itemSpace)\n return lexInsideAction\n}\n// lexIdentifier scans an alphanumeric.\nfunc lexIdentifier(l *lexer) stateFn {\nLoop:\n for {\n switch r := l.next(); {\n case isAlphaNumeric(r):\n // absorb.\n default:\n l.backup()\n word := l.input[l.start:l.pos]\n if !l.atTerminator() {\n return l.errorf(\"bad character %#U\", r)\n }\n switch {\n case key[word] > itemKeyword:\n l.emit(key[word])\n case word[0] == '.':\n l.emit(itemField)\n case word == \"true\", word == \"false\":\n l.emit(itemBool)\n default:\n l.emit(itemIdentifier)\n }\n break Loop\n }\n }\n return lexInsideAction\n}\n// lexField scans a field: .Alphanumeric.\n// The . has been scanned.\nfunc lexField(l *lexer) stateFn {\n return lexFieldOrVariable(l, itemField)\n}\n// lexVariable scans a Variable: $Alphanumeric.\n// The $ has been scanned.\nfunc lexVariable(l *lexer) stateFn {\n if l.atTerminator() { // Nothing interesting follows -> \"$\".\n l.emit(itemVariable)\n return lexInsideAction\n }\n return lexFieldOrVariable(l, itemVariable)\n}\n// lexVariable scans a field or variable: [.$]Alphanumeric.\n// The . or $ has been scanned.\nfunc lexFieldOrVariable(l *lexer, typ itemType) stateFn {\n if l.atTerminator() { // Nothing interesting follows -> \".\" or \"$\".\n if typ == itemVariable {\n l.emit(itemVariable)\n } else {\n l.emit(itemDot)\n }\n return lexInsideAction\n }\n var r rune\n for {\n r = l.next()\n if !isAlphaNumeric(r) {\n l.backup()\n break\n }\n }\n if !l.atTerminator() {\n return l.errorf(\"bad character %#U\", r)\n }\n l.emit(typ)\n return lexInsideAction\n}\n// atTerminator reports whether the input is at valid termination character to\n// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases\n// like \"$x+2\" not being acceptable without a space, in case we decide one\n// day to implement arithmetic.\nfunc (l *lexer) atTerminator() bool {\n r := l.peek()\n if isSpace(r) || isEndOfLine(r) {\n return true\n }\n switch r {\n case eof, '.', ',', '|', ':', ')', '(':\n return true\n }\n // Does r start the delimiter? This can be ambiguous (with delim==\"//\", $x/2 will\n // succeed but should fail) but only in extremely rare cases caused by willfully\n // bad choice of delimiter.\n if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r {\n return true\n }\n return false\n}\n// lexChar scans a character constant. The initial quote is already\n// scanned. Syntax checking is done by the parser.\nfunc lexChar(l *lexer) stateFn {\nLoop:\n for {\n switch l.next() {\n case '\\\\':\n if r := l.next(); r != eof && r != '\\n' {\n break\n }\n fallthrough\n case eof, '\\n':\n return l.errorf(\"unterminated character constant\")\n case '\\'':\n break Loop\n }\n }\n l.emit(itemCharConstant)\n return lexInsideAction\n}\n// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This\n// isn't a perfect number scanner - for instance it accepts \".\" and \"0x0.2\"\n// and \"089\" - but when it's wrong the input is invalid and the parser (via\n// strconv) will notice.\nfunc lexNumber(l *lexer) stateFn {\n if !l.scanNumber() {\n return l.errorf(\"bad number syntax: %q\", l.input[l.start:l.pos])\n }\n if sign := l.peek(); sign == '+' || sign == '-' {\n // Complex: 1+2i. No spaces, must end in 'i'.\n if !l.scanNumber() || l.input[l.pos-1] != 'i' {\n return l.errorf(\"bad number syntax: %q\", l.input[l.start:l.pos])\n }\n l.emit(itemComplex)\n } else {\n l.emit(itemNumber)\n }\n return lexInsideAction\n}\nfunc (l *lexer) scanNumber() bool {\n // Optional leading sign.\n l.accept(\"+-\")\n // Is it hex?\n digits := \"0123456789\"\n if l.accept(\"0\") && l.accept(\"xX\") {\n digits = \"0123456789abcdefABCDEF\"\n }\n l.acceptRun(digits)\n if l.accept(\".\") {\n l.acceptRun(digits)\n }\n if l.accept(\"eE\") {\n l.accept(\"+-\")\n l.acceptRun(\"0123456789\")\n }\n // Is it imaginary?\n l.accept(\"i\")\n // Next thing mustn't be alphanumeric.\n if isAlphaNumeric(l.peek()) {\n l.next()\n return false\n }\n return true\n}\n// lexQuote scans a quoted string.\nfunc lexQuote(l *lexer) stateFn {\nLoop:\n for {\n switch l.next() {\n case '\\\\':\n if r := l.next(); r != eof && r != '\\n' {\n break\n }\n fallthrough\n case eof, '\\n':\n return l.errorf(\"unterminated quoted string\")\n case '\"':\n break Loop\n }\n }\n l.emit(itemString)\n return lexInsideAction\n}\n// lexRawQuote scans a raw quoted string.\nfunc lexRawQuote(l *lexer) stateFn {\nLoop:\n for {\n switch l.next() {\n case eof, '\\n':\n return l.errorf(\"unterminated raw quoted string\")\n case '`':\n break Loop\n }\n }\n l.emit(itemRawString)\n return lexInsideAction\n}\n// isSpace reports whether r is a space character.\nfunc isSpace(r rune) bool {\n return r == ' ' || r == '\\t'\n}\n// isEndOfLine reports whether r is an end-of-line character.\nfunc isEndOfLine(r rune) bool {\n return r == '\\r' || r == '\\n'\n}\n// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.\nfunc isAlphaNumeric(r rune) bool {\n return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)\n}", 0) } return file }
func offsetLine(fset *token.FileSet, af *ast.File, offset int) (line int) { defer func() { if err := recover(); err != nil { line = 0 } }() return fset.File(af.Pos()).Position(token.Pos(offset)).Line }
func parseFileset(wks filesystem.WorkspaceFS, f *ast.File, fs *token.FileSet) []TokenPosition { var res []TokenPosition if f.Name != nil { res = appendTokenPosition(wks, res, fs.Position(f.Pos()), f.Name.Name, "", PACKAGE) } for _, i := range f.Imports { if i.Path != nil { res = appendTokenPosition(wks, res, fs.Position(f.Pos()), i.Path.Value, "", IMPORT) } } for _, d := range f.Decls { switch x := d.(type) { case *ast.FuncDecl: pos := fs.Position(x.Pos()) if x.Recv != nil && len(x.Recv.List) > 0 { var recv *string n := x.Recv.List[0].Type id, ok := n.(*ast.Ident) if !ok { sid, ok := n.(*ast.StarExpr) if ok { id, ok = sid.X.(*ast.Ident) if ok { r := fmt.Sprintf("*%s", id.Name) recv = &r } } } else { recv = &id.Name } res = appendTokenPosition(wks, res, pos, x.Name.Name, *recv, METH) } else { res = appendTokenPosition(wks, res, pos, x.Name.Name, "", FUNC) } case *ast.GenDecl: switch x.Tok { case token.CONST: for _, s := range x.Specs { vs := s.(*ast.ValueSpec) res = appendTokenPosition(wks, res, fs.Position(vs.Pos()), vs.Names[0].Name, "", CONST) } case token.VAR: for _, s := range x.Specs { vs := s.(*ast.ValueSpec) res = appendTokenPosition(wks, res, fs.Position(vs.Pos()), vs.Names[0].Name, "", VAR) } case token.TYPE: for _, s := range x.Specs { vs := s.(*ast.TypeSpec) res = appendTokenPosition(wks, res, fs.Position(vs.Pos()), vs.Name.Name, "", TYPE) } } } } return res }
func (scp *schemaParser) packageForFile(gofile *ast.File) (*loader.PackageInfo, error) { for pkg, pkgInfo := range scp.program.AllPackages { if pkg.Name() == gofile.Name.Name { return pkgInfo, nil } } fn := scp.program.Fset.File(gofile.Pos()).Name() return nil, fmt.Errorf("unable to determine package for %s", fn) }
// nodeAtOffset, returns the ast.Node for the given offset. // Node types: *ast.Ident, *ast.SelectorExpr, *ast.ImportSpec func nodeAtOffset(af *ast.File, fset *token.FileSet, offset int) (ast.Node, error) { file := fset.File(af.Pos()) if file == nil { return nil, errors.New("ast.File not in token.FileSet") } // Prevent file.Pos from panicking. if offset < 0 || file.Size() < offset { return nil, fmt.Errorf("invalid offset: %d", offset) } v := &offsetVisitor{pos: file.Pos(offset)} ast.Walk(v, af) if v.node == nil { return nil, fmt.Errorf("no node at offset: %d", offset) } return v.node, nil }
func (p *printer) file(src *ast.File) { p.setComment(src.Doc) p.print(src.Pos(), token.PACKAGE, blank) p.expr(src.Name, ignoreMultiLine) if len(src.Decls) > 0 { tok := token.ILLEGAL for _, d := range src.Decls { prev := tok tok = declToken(d) // if the declaration token changed (e.g., from CONST to TYPE) // print an empty line between top-level declarations min := 1 if prev != tok { min = 2 } p.linebreak(d.Pos().Line, min, ignore, false) p.decl(d, ignoreMultiLine) } } p.print(newline) }
// PathEnclosingInterval returns the node that encloses the source // interval [start, end), and all its ancestors up to the AST root. // // The definition of "enclosing" used by this function considers // additional whitespace abutting a node to be enclosed by it. // In this example: // // z := x + y // add them // <-A-> // <----B-----> // // the ast.BinaryExpr(+) node is considered to enclose interval B // even though its [Pos()..End()) is actually only interval A. // This behaviour makes user interfaces more tolerant of imperfect // input. // // This function treats tokens as nodes, though they are not included // in the result. e.g. PathEnclosingInterval("+") returns the // enclosing ast.BinaryExpr("x + y"). // // If start==end, the 1-char interval following start is used instead. // // The 'exact' result is true if the interval contains only path[0] // and perhaps some adjacent whitespace. It is false if the interval // overlaps multiple children of path[0], or if it contains only // interior whitespace of path[0]. // In this example: // // z := x + y // add them // <--C--> <---E--> // ^ // D // // intervals C, D and E are inexact. C is contained by the // z-assignment statement, because it spans three of its children (:=, // x, +). So too is the 1-char interval D, because it contains only // interior whitespace of the assignment. E is considered interior // whitespace of the BlockStmt containing the assignment. // // Precondition: [start, end) both lie within the same file as root. // TODO(adonovan): return (nil, false) in this case and remove precond. // Requires FileSet; see tokenFileContainsPos. // // Postcondition: path is never nil; it always contains at least 'root'. // func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) { // fmt.Printf("EnclosingInterval %d %d\n", start, end) // debugging // Precondition: node.[Pos..End) and adjoining whitespace contain [start, end). var visit func(node ast.Node) bool visit = func(node ast.Node) bool { path = append(path, node) nodePos := node.Pos() nodeEnd := node.End() // fmt.Printf("visit(%T, %d, %d)\n", node, nodePos, nodeEnd) // debugging // Intersect [start, end) with interval of node. if start < nodePos { start = nodePos } if end > nodeEnd { end = nodeEnd } // Find sole child that contains [start, end). children := childrenOf(node) l := len(children) for i, child := range children { // [childPos, childEnd) is unaugmented interval of child. childPos := child.Pos() childEnd := child.End() // [augPos, augEnd) is whitespace-augmented interval of child. augPos := childPos augEnd := childEnd if i > 0 { augPos = children[i-1].End() // start of preceding whitespace } if i < l-1 { nextChildPos := children[i+1].Pos() // Does [start, end) lie between child and next child? if start >= augEnd && end <= nextChildPos { return false // inexact match } augEnd = nextChildPos // end of following whitespace } // fmt.Printf("\tchild %d: [%d..%d)\tcontains interval [%d..%d)?\n", // i, augPos, augEnd, start, end) // debugging // Does augmented child strictly contain [start, end)? if augPos <= start && end <= augEnd { _, isToken := child.(tokenNode) return isToken || visit(child) } // Does [start, end) overlap multiple children? // i.e. left-augmented child contains start // but LR-augmented child does not contain end. if start < childEnd && end > augEnd { break } } // No single child contained [start, end), // so node is the result. Is it exact? // (It's tempting to put this condition before the // child loop, but it gives the wrong result in the // case where a node (e.g. ExprStmt) and its sole // child have equal intervals.) if start == nodePos && end == nodeEnd { return true // exact match } return false // inexact: overlaps multiple children } if start > end { start, end = end, start } if start < root.End() && end > root.Pos() { if start == end { end = start + 1 // empty interval => interval of size 1 } exact = visit(root) // Reverse the path: for i, l := 0, len(path); i < l/2; i++ { path[i], path[l-1-i] = path[l-1-i], path[i] } } else { // Selection lies within whitespace preceding the // first (or following the last) declaration in the file. // The result nonetheless always includes the ast.File. path = append(path, root) } return }
func (w *PkgWalker) Import(parentDir string, name string, conf *PkgConfig) (pkg *types.Package, err error) { defer func() { err := recover() if err != nil && typeVerbose { log.Println(err) } }() if strings.HasPrefix(name, ".") && parentDir != "" { name = filepath.Join(parentDir, name) } pkg = w.imported[name] if pkg != nil { if pkg == &w.importing { return nil, fmt.Errorf("cycle importing package %q", name) } return pkg, nil } if typeVerbose { log.Println("parser pkg", name) } var bp *build.Package if filepath.IsAbs(name) { bp, err = w.context.ImportDir(name, 0) } else { bp, err = w.context.Import(name, "", 0) } checkName := name if bp.ImportPath == "." { checkName = bp.Name } else { checkName = bp.ImportPath } if err != nil { return nil, err //if _, nogo := err.(*build.NoGoError); nogo { // return //} //return //log.Fatalf("pkg %q, dir %q: ScanDir: %v", name, info.Dir, err) } filenames := append(append([]string{}, bp.GoFiles...), bp.CgoFiles...) filenames = append(filenames, bp.TestGoFiles...) if name == "runtime" { n := fmt.Sprintf("zgoos_%s.go", w.context.GOOS) if !contains(filenames, n) { filenames = append(filenames, n) } n = fmt.Sprintf("zgoarch_%s.go", w.context.GOARCH) if !contains(filenames, n) { filenames = append(filenames, n) } } parserFiles := func(filenames []string, cursor *FileCursor) (files []*ast.File) { for _, file := range filenames { var f *ast.File if cursor != nil && cursor.fileName == file { f, err = w.parseFile(bp.Dir, file, cursor.src) cursor.pos = token.Pos(w.fset.File(f.Pos()).Base()) + token.Pos(cursor.cursorPos) cursor.fileDir = bp.Dir } else { f, err = w.parseFile(bp.Dir, file, nil) } if err != nil && typeVerbose { log.Printf("error parsing package %s: %s\n", name, err) } files = append(files, f) } return } files := parserFiles(filenames, conf.Cursor) xfiles := parserFiles(bp.XTestGoFiles, conf.Cursor) typesConf := types.Config{ IgnoreFuncBodies: conf.IgnoreFuncBodies, FakeImportC: true, Packages: w.gcimporter, Import: func(imports map[string]*types.Package, name string) (pkg *types.Package, err error) { if pkg != nil { return pkg, nil } if conf.AllowBinary && w.isBinaryPkg(name) { pkg = w.gcimporter[name] if pkg != nil && pkg.Complete() { return } pkg, err = gcimporter.Import(imports, name) if pkg != nil && pkg.Complete() { w.gcimporter[name] = pkg return } } return w.Import(bp.Dir, name, &PkgConfig{IgnoreFuncBodies: true, AllowBinary: true}) }, Error: func(err error) { if typeVerbose { log.Println(err) } }, } if pkg == nil { pkg, err = typesConf.Check(checkName, w.fset, files, conf.Info) } w.imported[name] = pkg if len(xfiles) > 0 { xpkg, _ := typesConf.Check(checkName+"_test", w.fset, xfiles, conf.Info) w.imported[checkName+"_test"] = xpkg } return }
func filename(file *ast.File, fset *token.FileSet) string { return fset.File(file.Pos()).Name() }