func insertIntVar(file *ast.File, name string, value int) { var before, after []ast.Decl if len(file.Decls) > 0 { hasImport := false if genDecl, ok := file.Decls[0].(*ast.GenDecl); ok { hasImport = genDecl.Tok == token.IMPORT } if hasImport { before, after = []ast.Decl{file.Decls[0]}, file.Decls[1:] } else { after = file.Decls } } file.Decls = append(before, &ast.GenDecl{ Tok: token.VAR, Specs: []ast.Spec{ &ast.ValueSpec{ Names: []*ast.Ident{ast.NewIdent(name)}, Type: ast.NewIdent("int"), Values: []ast.Expr{ &ast.BasicLit{ Kind: token.INT, Value: fmt.Sprintf("%d", value), }, }, }, }, }, ) file.Decls = append(file.Decls, after...) }
func applyPatches(file *ast.File, fileSet *token.FileSet, importPath string) *ast.File { removeImport := func(path string) { for _, decl := range file.Decls { if d, ok := decl.(*ast.GenDecl); ok && d.Tok == token.IMPORT { for j, spec := range d.Specs { value := spec.(*ast.ImportSpec).Path.Value if value[1:len(value)-1] == path { d.Specs[j] = d.Specs[len(d.Specs)-1] d.Specs = d.Specs[:len(d.Specs)-1] return } } } } } removeFunction := func(name string) { for i, decl := range file.Decls { if d, ok := decl.(*ast.FuncDecl); ok { if d.Name.String() == name { file.Decls[i] = file.Decls[len(file.Decls)-1] file.Decls = file.Decls[:len(file.Decls)-1] return } } } } basename := filepath.Base(fileSet.Position(file.Pos()).Filename) switch { case importPath == "bytes_test" && basename == "equal_test.go": file, _ = parser.ParseFile(fileSet, basename, "package bytes_test", 0) case importPath == "crypto/rc4" && basename == "rc4_ref.go": // see https://codereview.appspot.com/40540049/ file, _ = parser.ParseFile(fileSet, basename, "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !amd64,!arm,!386\n\npackage rc4\n\n// XORKeyStream sets dst to the result of XORing src with the key stream.\n// Dst and src may be the same slice but otherwise should not overlap.\nfunc (c *Cipher) XORKeyStream(dst, src []byte) {\n i, j := c.i, c.j\n for k, v := range src {\n i += 1\n j += uint8(c.s[i])\n c.s[i], c.s[j] = c.s[j], c.s[i]\n dst[k] = v ^ uint8(c.s[uint8(c.s[i]+c.s[j])])\n }\n c.i, c.j = i, j\n}", 0) case importPath == "encoding/json" && basename == "stream_test.go": removeImport("net") removeFunction("TestBlocking") case importPath == "math/big" && basename == "arith.go": // see https://codereview.appspot.com/55470046/ file, _ = parser.ParseFile(fileSet, basename, "// Copyright 2009 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// This file provides Go implementations of elementary multi-precision\n// arithmetic operations on word vectors. Needed for platforms without\n// assembly implementations of these routines.\n\npackage big\n\n// A Word represents a single digit of a multi-precision unsigned integer.\ntype Word uintptr\n\nconst (\n // Compute the size _S of a Word in bytes.\n _m = ^Word(0)\n _logS = _m>>8&1 + _m>>16&1 + _m>>32&1\n _S = 1 << _logS\n\n _W = _S << 3 // word size in bits\n _B = 1 << _W // digit base\n _M = _B - 1 // digit mask\n\n _W2 = _W / 2 // half word size in bits\n _B2 = 1 << _W2 // half digit base\n _M2 = _B2 - 1 // half digit mask\n)\n\n// ----------------------------------------------------------------------------\n// Elementary operations on words\n//\n// These operations are used by the vector operations below.\n\n// z1<<_W + z0 = x+y+c, with c == 0 or 1\nfunc addWW_g(x, y, c Word) (z1, z0 Word) {\n yc := y + c\n z0 = x + yc\n if z0 < x || yc < y {\n z1 = 1\n }\n return\n}\n\n// z1<<_W + z0 = x-y-c, with c == 0 or 1\nfunc subWW_g(x, y, c Word) (z1, z0 Word) {\n yc := y + c\n z0 = x - yc\n if z0 > x || yc < y {\n z1 = 1\n }\n return\n}\n\n// z1<<_W + z0 = x*y\n// Adapted from Warren, Hacker's Delight, p. 132.\nfunc mulWW_g(x, y Word) (z1, z0 Word) {\n x0 := x & _M2\n x1 := x >> _W2\n y0 := y & _M2\n y1 := y >> _W2\n w0 := x0 * y0\n t := x1*y0 + w0>>_W2\n w1 := t & _M2\n w2 := t >> _W2\n w1 += x0 * y1\n z1 = x1*y1 + w2 + w1>>_W2\n z0 = x * y\n return\n}\n\n// z1<<_W + z0 = x*y + c\nfunc mulAddWWW_g(x, y, c Word) (z1, z0 Word) {\n z1, zz0 := mulWW(x, y)\n if z0 = zz0 + c; z0 < zz0 {\n z1++\n }\n return\n}\n\n// Length of x in bits.\nfunc bitLen_g(x Word) (n int) {\n for ; x >= 0x8000; x >>= 16 {\n n += 16\n }\n if x >= 0x80 {\n x >>= 8\n n += 8\n }\n if x >= 0x8 {\n x >>= 4\n n += 4\n }\n if x >= 0x2 {\n x >>= 2\n n += 2\n }\n if x >= 0x1 {\n n++\n }\n return\n}\n\n// log2 computes the integer binary logarithm of x.\n// The result is the integer n for which 2^n <= x < 2^(n+1).\n// If x == 0, the result is -1.\nfunc log2(x Word) int {\n return bitLen(x) - 1\n}\n\n// Number of leading zeros in x.\nfunc leadingZeros(x Word) uint {\n return uint(_W - bitLen(x))\n}\n\n// q = (u1<<_W + u0 - r)/y\n// Adapted from Warren, Hacker's Delight, p. 152.\nfunc divWW_g(u1, u0, v Word) (q, r Word) {\n if u1 >= v {\n return 1<<_W - 1, 1<<_W - 1\n }\n\n s := leadingZeros(v)\n v <<= s\n\n vn1 := v >> _W2\n vn0 := v & _M2\n un32 := u1<<s | u0>>(_W-s)\n un10 := u0 << s\n un1 := un10 >> _W2\n un0 := un10 & _M2\n q1 := un32 / vn1\n rhat := un32 - q1*vn1\n\n for q1 >= _B2 || q1*vn0 > _B2*rhat+un1 {\n q1--\n rhat += vn1\n if rhat >= _B2 {\n break\n }\n }\n\n un21 := un32*_B2 + un1 - q1*v\n q0 := un21 / vn1\n rhat = un21 - q0*vn1\n\n for q0 >= _B2 || q0*vn0 > _B2*rhat+un0 {\n q0--\n rhat += vn1\n if rhat >= _B2 {\n break\n }\n }\n\n return q1*_B2 + q0, (un21*_B2 + un0 - q0*v) >> s\n}\n\nfunc addVV_g(z, x, y []Word) (c Word) {\n for i := range z {\n c, z[i] = addWW_g(x[i], y[i], c)\n }\n return\n}\n\nfunc subVV_g(z, x, y []Word) (c Word) {\n for i := range z {\n c, z[i] = subWW_g(x[i], y[i], c)\n }\n return\n}\n\nfunc addVW_g(z, x []Word, y Word) (c Word) {\n c = y\n for i := range z {\n c, z[i] = addWW_g(x[i], c, 0)\n }\n return\n}\n\nfunc subVW_g(z, x []Word, y Word) (c Word) {\n c = y\n for i := range z {\n c, z[i] = subWW_g(x[i], c, 0)\n }\n return\n}\n\nfunc shlVU_g(z, x []Word, s uint) (c Word) {\n if n := len(z); n > 0 {\n ŝ := _W - s\n w1 := x[n-1]\n c = w1 >> ŝ\n for i := n - 1; i > 0; i-- {\n w := w1\n w1 = x[i-1]\n z[i] = w<<s | w1>>ŝ\n }\n z[0] = w1 << s\n }\n return\n}\n\nfunc shrVU_g(z, x []Word, s uint) (c Word) {\n if n := len(z); n > 0 {\n ŝ := _W - s\n w1 := x[0]\n c = w1 << ŝ\n for i := 0; i < n-1; i++ {\n w := w1\n w1 = x[i+1]\n z[i] = w>>s | w1<<ŝ\n }\n z[n-1] = w1 >> s\n }\n return\n}\n\nfunc mulAddVWW_g(z, x []Word, y, r Word) (c Word) {\n c = r\n for i := range z {\n c, z[i] = mulAddWWW_g(x[i], y, c)\n }\n return\n}\n\nfunc addMulVVW_g(z, x []Word, y Word) (c Word) {\n for i := range z {\n z1, z0 := mulAddWWW_g(x[i], y, z[i])\n c, z[i] = addWW_g(z0, c, 0)\n c += z1\n }\n return\n}\n\nfunc divWVW_g(z []Word, xn Word, x []Word, y Word) (r Word) {\n r = xn\n for i := len(z) - 1; i >= 0; i-- {\n z[i], r = divWW_g(r, x[i], y)\n }\n return\n}", 0) case importPath == "reflect_test" && basename == "all_test.go": removeImport("unsafe") removeFunction("TestAlignment") removeFunction("TestSliceOverflow") case importPath == "runtime" && strings.HasPrefix(basename, "zgoarch_"): file, _ = parser.ParseFile(fileSet, basename, "package runtime\nconst theGoarch = `js`\n", 0) case importPath == "sync/atomic_test" && basename == "atomic_test.go": removeFunction("TestUnaligned64") case importPath == "text/template/parse" && basename == "lex.go": // this patch will be removed as soon as goroutines are supported file, _ = parser.ParseFile(fileSet, basename, "// Copyright 2011 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\npackage parse\nimport (\n \"container/list\"\n \"fmt\"\n \"strings\"\n \"unicode\"\n \"unicode/utf8\"\n)\n// item represents a token or text string returned from the scanner.\ntype item struct {\n typ itemType // The type of this item.\n pos Pos // The starting position, in bytes, of this item in the input string.\n val string // The value of this item.\n}\nfunc (i item) String() string {\n switch {\n case i.typ == itemEOF:\n return \"EOF\"\n case i.typ == itemError:\n return i.val\n case i.typ > itemKeyword:\n return fmt.Sprintf(\"<%s>\", i.val)\n case len(i.val) > 10:\n return fmt.Sprintf(\"%.10q...\", i.val)\n }\n return fmt.Sprintf(\"%q\", i.val)\n}\n// itemType identifies the type of lex items.\ntype itemType int\nconst (\n itemError itemType = iota // error occurred; value is text of error\n itemBool // boolean constant\n itemChar // printable ASCII character; grab bag for comma etc.\n itemCharConstant // character constant\n itemComplex // complex constant (1+2i); imaginary is just a number\n itemColonEquals // colon-equals (':=') introducing a declaration\n itemEOF\n itemField // alphanumeric identifier starting with '.'\n itemIdentifier // alphanumeric identifier not starting with '.'\n itemLeftDelim // left action delimiter\n itemLeftParen // '(' inside action\n itemNumber // simple number, including imaginary\n itemPipe // pipe symbol\n itemRawString // raw quoted string (includes quotes)\n itemRightDelim // right action delimiter\n itemRightParen // ')' inside action\n itemSpace // run of spaces separating arguments\n itemString // quoted string (includes quotes)\n itemText // plain text\n itemVariable // variable starting with '$', such as '$' or '$1' or '$hello'\n // Keywords appear after all the rest.\n itemKeyword // used only to delimit the keywords\n itemDot // the cursor, spelled '.'\n itemDefine // define keyword\n itemElse // else keyword\n itemEnd // end keyword\n itemIf // if keyword\n itemNil // the untyped nil constant, easiest to treat as a keyword\n itemRange // range keyword\n itemTemplate // template keyword\n itemWith // with keyword\n)\nvar key = map[string]itemType{\n \".\": itemDot,\n \"define\": itemDefine,\n \"else\": itemElse,\n \"end\": itemEnd,\n \"if\": itemIf,\n \"range\": itemRange,\n \"nil\": itemNil,\n \"template\": itemTemplate,\n \"with\": itemWith,\n}\nconst eof = -1\n// stateFn represents the state of the scanner as a function that returns the next state.\ntype stateFn func(*lexer) stateFn\n// lexer holds the state of the scanner.\ntype lexer struct {\n name string // the name of the input; used only for error reports\n input string // the string being scanned\n leftDelim string // start of action\n rightDelim string // end of action\n state stateFn // the next lexing function to enter\n pos Pos // current position in the input\n start Pos // start position of this item\n width Pos // width of last rune read from input\n lastPos Pos // position of most recent item returned by nextItem\n items *list.List // scanned items\n parenDepth int // nesting depth of ( ) exprs\n}\n// next returns the next rune in the input.\nfunc (l *lexer) next() rune {\n if int(l.pos) >= len(l.input) {\n l.width = 0\n return eof\n }\n r, w := utf8.DecodeRuneInString(l.input[l.pos:])\n l.width = Pos(w)\n l.pos += l.width\n return r\n}\n// peek returns but does not consume the next rune in the input.\nfunc (l *lexer) peek() rune {\n r := l.next()\n l.backup()\n return r\n}\n// backup steps back one rune. Can only be called once per call of next.\nfunc (l *lexer) backup() {\n l.pos -= l.width\n}\n// emit passes an item back to the client.\nfunc (l *lexer) emit(t itemType) {\n l.items.PushBack(item{t, l.start, l.input[l.start:l.pos]})\n l.start = l.pos\n}\n// ignore skips over the pending input before this point.\nfunc (l *lexer) ignore() {\n l.start = l.pos\n}\n// accept consumes the next rune if it's from the valid set.\nfunc (l *lexer) accept(valid string) bool {\n if strings.IndexRune(valid, l.next()) >= 0 {\n return true\n }\n l.backup()\n return false\n}\n// acceptRun consumes a run of runes from the valid set.\nfunc (l *lexer) acceptRun(valid string) {\n for strings.IndexRune(valid, l.next()) >= 0 {\n }\n l.backup()\n}\n// lineNumber reports which line we're on, based on the position of\n// the previous item returned by nextItem. Doing it this way\n// means we don't have to worry about peek double counting.\nfunc (l *lexer) lineNumber() int {\n return 1 + strings.Count(l.input[:l.lastPos], \"\\n\")\n}\n// errorf returns an error token and terminates the scan by passing\n// back a nil pointer that will be the next state, terminating l.nextItem.\nfunc (l *lexer) errorf(format string, args ...interface{}) stateFn {\n l.items.PushBack(item{itemError, l.start, fmt.Sprintf(format, args...)})\n return nil\n}\n// nextItem returns the next item from the input.\nfunc (l *lexer) nextItem() item {\n element := l.items.Front()\n for element == nil {\n l.state = l.state(l)\n element = l.items.Front()\n }\n l.items.Remove(element)\n item := element.Value.(item)\n l.lastPos = item.pos\n return item\n}\n// lex creates a new scanner for the input string.\nfunc lex(name, input, left, right string) *lexer {\n if left == \"\" {\n left = leftDelim\n }\n if right == \"\" {\n right = rightDelim\n }\n l := &lexer{\n name: name,\n input: input,\n leftDelim: left,\n rightDelim: right,\n items: list.New(),\n }\n l.state = lexText\n return l\n}\n// state functions\nconst (\n leftDelim = \"{{\"\n rightDelim = \"}}\"\n leftComment = \"/*\"\n rightComment = \"*/\"\n)\n// lexText scans until an opening action delimiter, \"{{\".\nfunc lexText(l *lexer) stateFn {\n for {\n if strings.HasPrefix(l.input[l.pos:], l.leftDelim) {\n if l.pos > l.start {\n l.emit(itemText)\n }\n return lexLeftDelim\n }\n if l.next() == eof {\n break\n }\n }\n // Correctly reached EOF.\n if l.pos > l.start {\n l.emit(itemText)\n }\n l.emit(itemEOF)\n return nil\n}\n// lexLeftDelim scans the left delimiter, which is known to be present.\nfunc lexLeftDelim(l *lexer) stateFn {\n l.pos += Pos(len(l.leftDelim))\n if strings.HasPrefix(l.input[l.pos:], leftComment) {\n return lexComment\n }\n l.emit(itemLeftDelim)\n l.parenDepth = 0\n return lexInsideAction\n}\n// lexComment scans a comment. The left comment marker is known to be present.\nfunc lexComment(l *lexer) stateFn {\n l.pos += Pos(len(leftComment))\n i := strings.Index(l.input[l.pos:], rightComment)\n if i < 0 {\n return l.errorf(\"unclosed comment\")\n }\n l.pos += Pos(i + len(rightComment))\n if !strings.HasPrefix(l.input[l.pos:], l.rightDelim) {\n return l.errorf(\"comment ends before closing delimiter\")\n }\n l.pos += Pos(len(l.rightDelim))\n l.ignore()\n return lexText\n}\n// lexRightDelim scans the right delimiter, which is known to be present.\nfunc lexRightDelim(l *lexer) stateFn {\n l.pos += Pos(len(l.rightDelim))\n l.emit(itemRightDelim)\n return lexText\n}\n// lexInsideAction scans the elements inside action delimiters.\nfunc lexInsideAction(l *lexer) stateFn {\n // Either number, quoted string, or identifier.\n // Spaces separate arguments; runs of spaces turn into itemSpace.\n // Pipe symbols separate and are emitted.\n if strings.HasPrefix(l.input[l.pos:], l.rightDelim) {\n if l.parenDepth == 0 {\n return lexRightDelim\n }\n return l.errorf(\"unclosed left paren\")\n }\n switch r := l.next(); {\n case r == eof || isEndOfLine(r):\n return l.errorf(\"unclosed action\")\n case isSpace(r):\n return lexSpace\n case r == ':':\n if l.next() != '=' {\n return l.errorf(\"expected :=\")\n }\n l.emit(itemColonEquals)\n case r == '|':\n l.emit(itemPipe)\n case r == '\"':\n return lexQuote\n case r == '`':\n return lexRawQuote\n case r == '$':\n return lexVariable\n case r == '\\'':\n return lexChar\n case r == '.':\n // special look-ahead for \".field\" so we don't break l.backup().\n if l.pos < Pos(len(l.input)) {\n r := l.input[l.pos]\n if r < '0' || '9' < r {\n return lexField\n }\n }\n fallthrough // '.' can start a number.\n case r == '+' || r == '-' || ('0' <= r && r <= '9'):\n l.backup()\n return lexNumber\n case isAlphaNumeric(r):\n l.backup()\n return lexIdentifier\n case r == '(':\n l.emit(itemLeftParen)\n l.parenDepth++\n return lexInsideAction\n case r == ')':\n l.emit(itemRightParen)\n l.parenDepth--\n if l.parenDepth < 0 {\n return l.errorf(\"unexpected right paren %#U\", r)\n }\n return lexInsideAction\n case r <= unicode.MaxASCII && unicode.IsPrint(r):\n l.emit(itemChar)\n return lexInsideAction\n default:\n return l.errorf(\"unrecognized character in action: %#U\", r)\n }\n return lexInsideAction\n}\n// lexSpace scans a run of space characters.\n// One space has already been seen.\nfunc lexSpace(l *lexer) stateFn {\n for isSpace(l.peek()) {\n l.next()\n }\n l.emit(itemSpace)\n return lexInsideAction\n}\n// lexIdentifier scans an alphanumeric.\nfunc lexIdentifier(l *lexer) stateFn {\nLoop:\n for {\n switch r := l.next(); {\n case isAlphaNumeric(r):\n // absorb.\n default:\n l.backup()\n word := l.input[l.start:l.pos]\n if !l.atTerminator() {\n return l.errorf(\"bad character %#U\", r)\n }\n switch {\n case key[word] > itemKeyword:\n l.emit(key[word])\n case word[0] == '.':\n l.emit(itemField)\n case word == \"true\", word == \"false\":\n l.emit(itemBool)\n default:\n l.emit(itemIdentifier)\n }\n break Loop\n }\n }\n return lexInsideAction\n}\n// lexField scans a field: .Alphanumeric.\n// The . has been scanned.\nfunc lexField(l *lexer) stateFn {\n return lexFieldOrVariable(l, itemField)\n}\n// lexVariable scans a Variable: $Alphanumeric.\n// The $ has been scanned.\nfunc lexVariable(l *lexer) stateFn {\n if l.atTerminator() { // Nothing interesting follows -> \"$\".\n l.emit(itemVariable)\n return lexInsideAction\n }\n return lexFieldOrVariable(l, itemVariable)\n}\n// lexVariable scans a field or variable: [.$]Alphanumeric.\n// The . or $ has been scanned.\nfunc lexFieldOrVariable(l *lexer, typ itemType) stateFn {\n if l.atTerminator() { // Nothing interesting follows -> \".\" or \"$\".\n if typ == itemVariable {\n l.emit(itemVariable)\n } else {\n l.emit(itemDot)\n }\n return lexInsideAction\n }\n var r rune\n for {\n r = l.next()\n if !isAlphaNumeric(r) {\n l.backup()\n break\n }\n }\n if !l.atTerminator() {\n return l.errorf(\"bad character %#U\", r)\n }\n l.emit(typ)\n return lexInsideAction\n}\n// atTerminator reports whether the input is at valid termination character to\n// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases\n// like \"$x+2\" not being acceptable without a space, in case we decide one\n// day to implement arithmetic.\nfunc (l *lexer) atTerminator() bool {\n r := l.peek()\n if isSpace(r) || isEndOfLine(r) {\n return true\n }\n switch r {\n case eof, '.', ',', '|', ':', ')', '(':\n return true\n }\n // Does r start the delimiter? This can be ambiguous (with delim==\"//\", $x/2 will\n // succeed but should fail) but only in extremely rare cases caused by willfully\n // bad choice of delimiter.\n if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r {\n return true\n }\n return false\n}\n// lexChar scans a character constant. The initial quote is already\n// scanned. Syntax checking is done by the parser.\nfunc lexChar(l *lexer) stateFn {\nLoop:\n for {\n switch l.next() {\n case '\\\\':\n if r := l.next(); r != eof && r != '\\n' {\n break\n }\n fallthrough\n case eof, '\\n':\n return l.errorf(\"unterminated character constant\")\n case '\\'':\n break Loop\n }\n }\n l.emit(itemCharConstant)\n return lexInsideAction\n}\n// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This\n// isn't a perfect number scanner - for instance it accepts \".\" and \"0x0.2\"\n// and \"089\" - but when it's wrong the input is invalid and the parser (via\n// strconv) will notice.\nfunc lexNumber(l *lexer) stateFn {\n if !l.scanNumber() {\n return l.errorf(\"bad number syntax: %q\", l.input[l.start:l.pos])\n }\n if sign := l.peek(); sign == '+' || sign == '-' {\n // Complex: 1+2i. No spaces, must end in 'i'.\n if !l.scanNumber() || l.input[l.pos-1] != 'i' {\n return l.errorf(\"bad number syntax: %q\", l.input[l.start:l.pos])\n }\n l.emit(itemComplex)\n } else {\n l.emit(itemNumber)\n }\n return lexInsideAction\n}\nfunc (l *lexer) scanNumber() bool {\n // Optional leading sign.\n l.accept(\"+-\")\n // Is it hex?\n digits := \"0123456789\"\n if l.accept(\"0\") && l.accept(\"xX\") {\n digits = \"0123456789abcdefABCDEF\"\n }\n l.acceptRun(digits)\n if l.accept(\".\") {\n l.acceptRun(digits)\n }\n if l.accept(\"eE\") {\n l.accept(\"+-\")\n l.acceptRun(\"0123456789\")\n }\n // Is it imaginary?\n l.accept(\"i\")\n // Next thing mustn't be alphanumeric.\n if isAlphaNumeric(l.peek()) {\n l.next()\n return false\n }\n return true\n}\n// lexQuote scans a quoted string.\nfunc lexQuote(l *lexer) stateFn {\nLoop:\n for {\n switch l.next() {\n case '\\\\':\n if r := l.next(); r != eof && r != '\\n' {\n break\n }\n fallthrough\n case eof, '\\n':\n return l.errorf(\"unterminated quoted string\")\n case '\"':\n break Loop\n }\n }\n l.emit(itemString)\n return lexInsideAction\n}\n// lexRawQuote scans a raw quoted string.\nfunc lexRawQuote(l *lexer) stateFn {\nLoop:\n for {\n switch l.next() {\n case eof, '\\n':\n return l.errorf(\"unterminated raw quoted string\")\n case '`':\n break Loop\n }\n }\n l.emit(itemRawString)\n return lexInsideAction\n}\n// isSpace reports whether r is a space character.\nfunc isSpace(r rune) bool {\n return r == ' ' || r == '\\t'\n}\n// isEndOfLine reports whether r is an end-of-line character.\nfunc isEndOfLine(r rune) bool {\n return r == '\\r' || r == '\\n'\n}\n// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.\nfunc isAlphaNumeric(r rune) bool {\n return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)\n}", 0) } return file }
func InstrumentFunctionsAndInterfaces(f *ast.File) bool { addMock4goImport := false ast.Inspect(f, func(n ast.Node) bool { switch x := n.(type) { case *ast.FuncDecl: if instrumentFunction(x) { addMock4goImport = true } if x.Recv != nil { // fieldList := x.Recv.List[0] // name := fieldList.Names[0] // with receiver } else { // without receiver } case *ast.GenDecl: if x.Tok == token.TYPE { typeSpec := x.Specs[0].(*ast.TypeSpec) if interfaceType, ok := typeSpec.Type.(*ast.InterfaceType); ok { if interfaceType.Incomplete { // TODO: what should we do here panic("incomplete interface type") } decls := instrumentInterface(typeSpec.Name.Name, interfaceType) addMock4goImport = true f.Decls = append(f.Decls, decls...) } } } return true }) return addMock4goImport }
func removeImport(file *ast.File, ipath string) { var j int for j < len(file.Decls) { gen, ok := file.Decls[j].(*ast.GenDecl) if !ok || gen.Tok != token.IMPORT { j++ continue } var i int for i < len(gen.Specs) { impspec := gen.Specs[i].(*ast.ImportSpec) if importPath(impspec) != ipath { i++ continue } // If we found a match, pop the spec from gen.Specs gen.Specs[i] = gen.Specs[len(gen.Specs)-1] gen.Specs = gen.Specs[:len(gen.Specs)-1] } if len(gen.Specs) > 0 { j++ continue } // Remove entire import decl if no imports left in it file.Decls[j] = file.Decls[len(file.Decls)-1] file.Decls = file.Decls[:len(file.Decls)-1] } }
func transformRegisterPkgValues(file *ast.File, typeNames []string) { // Create init function declaration fdecl := &ast.FuncDecl{ Doc: nil, Recv: nil, Name: &ast.Ident{Name: "init"}, Type: &ast.FuncType{}, Body: &ast.BlockStmt{}, } file.Decls = append(file.Decls, fdecl) // Add type registrations to fdecl.Body.List for _, name := range typeNames { stmt := &ast.ExprStmt{ X: &ast.CallExpr{ Fun: &ast.SelectorExpr{ X: &ast.Ident{Name: "circuit"}, // Refers to import circuit/use/circuit Sel: &ast.Ident{Name: "RegisterValue"}, }, Args: []ast.Expr{ &ast.CompositeLit{ Type: &ast.Ident{Name: name}, }, }, }, } fdecl.Body.List = append(fdecl.Body.List, stmt) } }
func sortDecls(merged *ast.File) { var sortedDecls []ast.Decl for _, decl := range merged.Decls { if x, ok := decl.(*ast.GenDecl); ok && x.Tok == token.PACKAGE { sortedDecls = append(sortedDecls, decl) } } /*for _, decl := range merged.Decls { if x, ok := decl.(*ast.GenDecl); ok && x.Tok == token.IMPORT { sortedDecls = append(sortedDecls, decl) goon.DumpExpr(decl) } }*/ var specs []ast.Spec for _, importSpec := range merged.Imports { if importSpec.Name != nil && importSpec.Name.Name == "." { continue } importSpec.EndPos = 0 specs = append(specs, importSpec) } sortedDecls = append(sortedDecls, &ast.GenDecl{ Tok: token.IMPORT, Lparen: (token.Pos)(1), // Needs to be non-zero to be considered as a group. Specs: specs, }) //goon.DumpExpr(sortedDecls[len(sortedDecls)-1]) for _, decl := range merged.Decls { if x, ok := decl.(*ast.GenDecl); ok && (x.Tok == token.IMPORT || x.Tok == token.PACKAGE) { continue } sortedDecls = append(sortedDecls, decl) } merged.Decls = sortedDecls }
// addImport adds the import path to the file f, if absent. func addImport(f *ast.File, path string) { if imports(f, path) { return } newImport := &ast.ImportSpec{ Path: &ast.BasicLit{ Kind: token.STRING, Value: strconv.Quote(path), }, } var impdecl *ast.GenDecl // Find an import decl to add to. for _, decl := range f.Decls { gen, ok := decl.(*ast.GenDecl) if ok && gen.Tok == token.IMPORT { impdecl = gen break } } // No import decl found. Add one. if impdecl == nil { impdecl = &ast.GenDecl{ Tok: token.IMPORT, } f.Decls = append(f.Decls, nil) copy(f.Decls[1:], f.Decls) f.Decls[0] = impdecl } // Ensure the import decl has parentheses, if needed. if len(impdecl.Specs) > 0 && !impdecl.Lparen.IsValid() { impdecl.Lparen = impdecl.Pos() } // Assume the import paths are alphabetically ordered. // If they are not, the result is ugly, but legal. insertAt := len(impdecl.Specs) // default to end of specs for i, spec := range impdecl.Specs { impspec := spec.(*ast.ImportSpec) if importPath(impspec) > path { insertAt = i break } } impdecl.Specs = append(impdecl.Specs, nil) copy(impdecl.Specs[insertAt+1:], impdecl.Specs[insertAt:]) impdecl.Specs[insertAt] = newImport f.Imports = append(f.Imports, newImport) }
func mergeASTFile(dst, src *ast.File) *ast.File { if dst == nil { return src } if dst.Doc != nil { dst.Doc = src.Doc } dst.Decls = append(dst.Decls, src.Decls...) return dst }
// fileExports removes unexported declarations from src in place. // func (r *reader) fileExports(src *ast.File) { j := 0 for _, d := range src.Decls { if r.filterDecl(d) { src.Decls[j] = d j++ } } src.Decls = src.Decls[0:j] }
func removeEmptyDeclGroups(f *ast.File) { i := 0 for _, d := range f.Decls { if g, ok := d.(*ast.GenDecl); !ok || !isEmpty(f, g) { f.Decls[i] = d i++ } } f.Decls = f.Decls[:i] }
// Inline replaces each instance of identifier k with v.Ident in ast.File f, // for k, v := range m. // For all inlines that were triggeres it also adds imports from v.Imports to f. // In addition, it removes top level type declarations of the form // type k ... // for all k in m. // // Every k in m should be a valid identifier. // Every v.Ident should be a valid expression. func Inline(fset *token.FileSet, f *ast.File, m map[string]Target) error { // Build the inline map. im := map[string]reflect.Value{} for k, v := range m { expr, err := parser.ParseExpr(k) if err != nil { return fmt.Errorf("failed to parse `%s`: %s", k, err) } if _, ok := expr.(*ast.Ident); !ok { return fmt.Errorf("expected identifier, got %s which is %T", k, expr) } expr, err = parser.ParseExpr(v.Ident) if err != nil { return fmt.Errorf("failed to parse `%s`: %s", v.Ident, err) } s := v.Ident if _, ok := expr.(*ast.StarExpr); ok { s = fmt.Sprintf("(%s)", s) } im[k] = reflect.ValueOf(ast.Ident{Name: s}) } // Filter `type XXX ...` declarations out if we are inlining XXX. cmap := ast.NewCommentMap(fset, f, f.Comments) to := 0 for _, d := range f.Decls { skip := false if t, ok := d.(*ast.GenDecl); ok { for _, s := range t.Specs { ts, ok := s.(*ast.TypeSpec) if !ok { continue } if _, ok = im[ts.Name.String()]; ok { skip = true } } } if !skip { f.Decls[to] = d to++ } } if to != len(f.Decls) { f.Decls = f.Decls[:to] // Remove comments for the declarations that were filtered out. f.Comments = cmap.Filter(f).Comments() } // Add imports for the inlines that were triggered. for k := range inline(im, f) { for _, imp := range m[k].Imports { astutil.AddImport(fset, f, imp) } } return nil }
func extractMultipleStatementsAsFunc( astFile *ast.File, fileSet *token.FileSet, stmtsToExtract []ast.Node, parentNode ast.Node, extractedFuncName string) { params := varIdentsUsedIn(stmtsToExtract) varsDeclaredWithinStmtsToExtract := varIdentsDeclaredWithin(stmtsToExtract) util.MapStringAstIdentRemoveKeys(params, namesOf(varsDeclaredWithinStmtsToExtract)) util.MapStringAstIdentRemoveKeys(params, namesOf(globalVarIdents(astFile))) allStmts := stmtsFromBlockStmt(parentNode) indexOfExtractedStmt := indexOf(stmtsToExtract[0].(ast.Stmt), *allStmts) varsUsedAfterwards := overlappingVarsIdentsUsedIn((*allStmts)[indexOfExtractedStmt+len(stmtsToExtract):], varsDeclaredWithinStmtsToExtract) newStmt := funcCallStmt(varsUsedAfterwards, extractedFuncName, params, (*allStmts)[indexOfExtractedStmt].Pos()) replaceStmtsWithFuncCallStmt(newStmt, allStmts, indexOfExtractedStmt, len(stmtsToExtract)) areaRemoved := areaRemoved(fileSet, (stmtsToExtract)[0].Pos(), (stmtsToExtract)[len(stmtsToExtract)-1].End()) lineLengths := lineLengthsFrom(fileSet) lineNum, numLinesToCut, newLineLength := replacementModifications(fileSet, (stmtsToExtract)[0].Pos(), (stmtsToExtract)[len(stmtsToExtract)-1].End(), newStmt.End(), lineLengths, areaRemoved) shiftPosesAfterPos(astFile, newStmt, (stmtsToExtract)[len(stmtsToExtract)-1].End(), newStmt.End()-stmtsToExtract[len(stmtsToExtract)-1].End()) multipleStmtFuncDecl := CopyNode(multipleStmtFuncDeclWith( extractedFuncName, fieldsFrom(params), stmtsFromNodes(stmtsToExtract), exprsFrom(varsUsedAfterwards), )).(*ast.FuncDecl) var moveOffset token.Pos RecalcPoses(multipleStmtFuncDecl, astFile.End()+2, &moveOffset, 0) astFile.Decls = append(astFile.Decls, multipleStmtFuncDecl) areaToBeAppended := insertionModificationsForStmts(astFile, multipleStmtFuncDecl, areaRemoved, exprsFrom(varsUsedAfterwards)) lineLengths = append( lineLengths[:lineNum+1], lineLengths[lineNum+1+numLinesToCut:]...) lineLengths[lineNum] = newLineLength lineLengths = append(lineLengths, areaToBeAppended...) newFileSet := token.NewFileSet() newFileSet.AddFile(fileSet.File(1).Name(), 1, int(astFile.End())) newFileSet.File(1).SetLines(ConvertLineLengthsToLineOffsets(lineLengths)) *fileSet = *newFileSet moveComments(astFile, moveOffset /*, needs a range to restict which comments to move*/) }
// deleteImport deletes the import path from the file f, if present. func deleteImport(f *ast.File, path string) (deleted bool) { oldImport := importSpec(f, path) // Find the import node that imports path, if any. for i, decl := range f.Decls { gen, ok := decl.(*ast.GenDecl) if !ok || gen.Tok != token.IMPORT { continue } for j, spec := range gen.Specs { impspec := spec.(*ast.ImportSpec) if oldImport != impspec { continue } // We found an import spec that imports path. // Delete it. deleted = true copy(gen.Specs[j:], gen.Specs[j+1:]) gen.Specs = gen.Specs[:len(gen.Specs)-1] // If this was the last import spec in this decl, // delete the decl, too. if len(gen.Specs) == 0 { copy(f.Decls[i:], f.Decls[i+1:]) f.Decls = f.Decls[:len(f.Decls)-1] } else if len(gen.Specs) == 1 { gen.Lparen = token.NoPos // drop parens } if j > 0 { // We deleted an entry but now there will be // a blank line-sized hole where the import was. // Close the hole by making the previous // import appear to "end" where this one did. gen.Specs[j-1].(*ast.ImportSpec).EndPos = impspec.End() } break } } // Delete it from f.Imports. for i, imp := range f.Imports { if imp == oldImport { copy(f.Imports[i:], f.Imports[i+1:]) f.Imports = f.Imports[:len(f.Imports)-1] break } } return }
func (b *builder) deleteMainFunc(f *ast.File) { var decls []ast.Decl for _, d := range f.Decls { fun, ok := d.(*ast.FuncDecl) if !ok { decls = append(decls, d) continue } if fun.Name.Name != "main" { decls = append(decls, fun) } } f.Decls = decls }
func AddMock4goImport(f *ast.File) { importSpec := &ast.ImportSpec{ Name: makeIdent("mock4go"), Path: &ast.BasicLit{ Kind: token.STRING, Value: fmt.Sprintf("%#v", Mock4goImport), }, } importDecl := &ast.GenDecl{ Tok: token.IMPORT, Specs: []ast.Spec{importSpec}, } f.Decls = append([]ast.Decl{importDecl}, f.Decls...) }
func addImport(file *ast.File, pkg string) { decl := &ast.GenDecl{ Tok: token.IMPORT, } file.Decls = append(file.Decls, nil) copy(file.Decls[1:], file.Decls[:]) file.Decls[0] = decl spec := &ast.ImportSpec{ Path: &ast.BasicLit{ Kind: token.STRING, Value: pkg, }, } decl.Specs = append(decl.Specs, spec) file.Imports = append(file.Imports, spec) }
func AddDecl(fset *token.FileSet, filename string, file *ast.File, withFileSet *token.FileSet, withFileName string, withFile *ast.File, withPosStart, withPosEnd token.Position, identMap st.IdentifierMap) (bool, *token.FileSet, *ast.File, *errors.GoRefactorError) { withNode := FindNode(withFileSet, withFile, withPosStart, withPosEnd) if withNode == nil { return false, nil, nil, errors.PrinterError("couldn't find node with given positions") } if _, ok := withNode.(ast.Decl); !ok { return false, nil, nil, errors.PrinterError("node is not a declaration") } withTokFile := GetFileFromFileSet(withFileSet, withFileName) withOldSize := withTokFile.Size() l := int(withNode.End() - withNode.Pos()) fset, file = ReparseFile(file, filename, l, identMap) if filename == withFileName { withFileSet, withFile = fset, file withNode = FindNode(withFileSet, withFile, withPosStart, withPosEnd) } withNode = utils.CopyAstNode(withNode) tokFile := GetFileFromFileSet(fset, filename) withTokFile = GetFileFromFileSet(withFileSet, withFileName) if tokFile == nil || withTokFile == nil { return false, nil, nil, errors.PrinterError("couldn't find file " + filename + " in fileset") } lines := GetLines(tokFile) fmt.Printf("linesCount = %d\n", len(lines)) tokFile.SetLines(lines[:len(lines)-(l)]) lines = GetLines(tokFile) for i, offset := range lines { fmt.Printf("%d -> %s(%d)\n", i+1, fset.Position(tokFile.Pos(offset)), offset) } withNodeLines, _ := GetRangeLines(withTokFile, withNode.Pos(), withNode.End(), withOldSize) withMod := int(tokFile.Offset(file.Decls[len(file.Decls)-1].End()) + 1 - withTokFile.Offset(withNode.Pos())) fmt.Printf("withMod: %v\n", withMod) for i, _ := range withNodeLines { withNodeLines[i] += withMod } tokFile.SetLines(addLinesOfRange(withTokFile.Offset(withNode.Pos()), withTokFile.Offset(withNode.End()), lines, withNodeLines, -1)) //to the end file.Decls = append(file.Decls, withNode.(ast.Decl)) return true, fset, file, nil }
func (b *builder) deleteImports(f *ast.File) { var decls []ast.Decl for _, d := range f.Decls { gen, ok := d.(*ast.GenDecl) if ok && gen.Tok == token.IMPORT { for _, spec := range gen.Specs { impspec := spec.(*ast.ImportSpec) if !b.hasImport(impspec) { b.imports = append(b.imports, impspec) } } continue } decls = append(decls, d) } f.Decls = decls }
// sortImports sorts runs of consecutive import lines in import blocks in f. // It also removes duplicate imports when it is possible to do so without data loss. func sortImports(fset *token.FileSet, f *ast.File) { for i, d := range f.Decls { d, ok := d.(*ast.GenDecl) if !ok || d.Tok != token.IMPORT { // Not an import declaration, so we're done. // Imports are always first. break } if len(d.Specs) == 0 { // Empty import block, remove it. f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) } if !d.Lparen.IsValid() { // Not a block: sorted by default. continue } // Remove all newlines and sort the specs pline := -1 for _, s := range d.Specs { line := fset.Position(s.Pos()).Line if pline == -1 { pline = line continue } for line-pline > 1 { fset.File(s.Pos()).MergeLine(line - 1) line = fset.Position(s.Pos()).Line } } d.Specs = sortSpecs(fset, f, d.Specs) // Deduping can leave a blank line before the rparen; clean that up. if len(d.Specs) > 0 { lastSpec := d.Specs[len(d.Specs)-1] lastLine := fset.Position(lastSpec.Pos()).Line if rParenLine := fset.Position(d.Rparen).Line; rParenLine > lastLine+1 { fset.File(d.Rparen).MergeLine(rParenLine - 1) } } } }
// deleteImport deletes the import path from the file f, if present. func deleteImport(f *ast.File, path string) { oldImport := importSpec(f, path) // Find the import node that imports path, if any. for i, decl := range f.Decls { gen, ok := decl.(*ast.GenDecl) if !ok || gen.Tok != token.IMPORT { continue } for j, spec := range gen.Specs { impspec := spec.(*ast.ImportSpec) if oldImport != impspec { continue } // We found an import spec that imports path. // Delete it. copy(gen.Specs[j:], gen.Specs[j+1:]) gen.Specs = gen.Specs[:len(gen.Specs)-1] // If this was the last import spec in this decl, // delete the decl, too. if len(gen.Specs) == 0 { copy(f.Decls[i:], f.Decls[i+1:]) f.Decls = f.Decls[:len(f.Decls)-1] } else if len(gen.Specs) == 1 { gen.Lparen = token.NoPos // drop parens } break } } // Delete it from f.Imports. for i, imp := range f.Imports { if imp == oldImport { copy(f.Imports[i:], f.Imports[i+1:]) f.Imports = f.Imports[:len(f.Imports)-1] break } } }
// prependImport prepends,in place, an import of the given name to the // list of imports in the *ast.File. func prependImport(astfile *ast.File, name string) { litvalue := fmt.Sprintf("\"%s\"", name) // XXX The newline here at the beginning of the comment is a bit of // a hack. We'd like to find a way to include this comment // above or to the right of the added import in the printed // code. However, the spacing doesn't come out correctly if // just inserted into the AST as you'd think. So in order for // the generated code to be idempotent under gofmt, we have to // insert a newline here at the beginning. commentText := fmt.Sprintf("\n// *** IMPORT ADDED BY %s ***", args.progUpper) comment := &ast.CommentGroup{ List: []*ast.Comment{ &ast.Comment{ Slash: token.NoPos, Text: commentText, }, }, } decl := &ast.GenDecl{ Doc: comment, TokPos: token.NoPos, Tok: token.IMPORT, Lparen: token.NoPos, Specs: []ast.Spec{ &ast.ImportSpec{ Doc: nil, Name: nil, Path: &ast.BasicLit{ ValuePos: token.NoPos, Kind: token.STRING, Value: litvalue, }, Comment: nil, EndPos: token.NoPos, }, }, Rparen: token.NoPos, } astfile.Decls = append([]ast.Decl{decl}, astfile.Decls...) }
// sortImports sorts runs of consecutive import lines in import blocks in f. // It also removes duplicate imports when it is possible to do so without data loss. func sortImports(fset *token.FileSet, f *ast.File) { for i, d := range f.Decls { d, ok := d.(*ast.GenDecl) if !ok || d.Tok != token.IMPORT { // Not an import declaration, so we're done. // Imports are always first. break } if len(d.Specs) == 0 { // Empty import block, remove it. f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) } if !d.Lparen.IsValid() { // Not a block: sorted by default. continue } // Identify and sort runs of specs on successive lines. i := 0 specs := d.Specs[:0] for j, s := range d.Specs { if j > i && fset.Position(s.Pos()).Line > 1+fset.Position(d.Specs[j-1].End()).Line { // j begins a new run. End this one. specs = append(specs, sortSpecs(fset, f, d.Specs[i:j])...) i = j } } specs = append(specs, sortSpecs(fset, f, d.Specs[i:])...) d.Specs = specs // Deduping can leave a blank line before the rparen; clean that up. if len(d.Specs) > 0 { lastSpec := d.Specs[len(d.Specs)-1] lastLine := fset.Position(lastSpec.Pos()).Line if rParenLine := fset.Position(d.Rparen).Line; rParenLine > lastLine+1 { fset.File(d.Rparen).MergeLine(rParenLine - 1) } } } }
/* * Given a test func named TestDoesSomethingNeat, rewrites it as * It("does something neat", func() { __test_body_here__ }) and adds it * to the Describe's list of statements */ func rewriteTestFuncAsItStatement(testFunc *ast.FuncDecl, rootNode *ast.File, describe *ast.CallExpr) { var funcIndex int = -1 for index, child := range rootNode.Decls { if child == testFunc { funcIndex = index break } } if funcIndex < 0 { panic(fmt.Sprintf("Assert failed: Error finding index for test node %s\n", testFunc.Name.Name)) } var block *ast.BlockStmt = blockStatementFromDescribe(describe) block.List = append(block.List, createItStatementForTestFunc(testFunc)) replaceTestingTsWithGinkgoT(block, namedTestingTArg(testFunc)) // remove the old test func from the root node's declarations rootNode.Decls = append(rootNode.Decls[:funcIndex], rootNode.Decls[funcIndex+1:]...) return }
func Json2Ast(jsonReader io.Reader) (*ast.File, error) { var v map[string]interface{} var file *ast.File var err error dec := json.NewDecoder(jsonReader) file = &ast.File{ Name: ast.NewIdent("main"), } if err := dec.Decode(&v); err != nil { return nil, err } var decls []ast.Decl if decls, err = convertMap("Top", v); err != nil { return nil, err } file.Decls = decls return file, nil }
// AddNamedImport adds the import path to the file f, if absent. // If name is not empty, it is used to rename the import. // // For example, calling // AddNamedImport(fset, f, "pathpkg", "path") // adds // import pathpkg "path" func AddNamedImport(fset *token.FileSet, f *ast.File, name, ipath string) (added bool) { if imports(f, ipath) { return false } newImport := &ast.ImportSpec{ Path: &ast.BasicLit{ Kind: token.STRING, Value: strconv.Quote(ipath), }, } if name != "" { newImport.Name = &ast.Ident{Name: name} } // Find an import decl to add to. var ( bestMatch = -1 lastImport = -1 impDecl *ast.GenDecl impIndex = -1 hasImports = false ) for i, decl := range f.Decls { gen, ok := decl.(*ast.GenDecl) if ok && gen.Tok == token.IMPORT { hasImports = true lastImport = i // Do not add to import "C", to avoid disrupting the // association with its doc comment, breaking cgo. if declImports(gen, "C") { continue } // Compute longest shared prefix with imports in this block. for j, spec := range gen.Specs { impspec := spec.(*ast.ImportSpec) n := matchLen(importPath(impspec), ipath) if n > bestMatch { bestMatch = n impDecl = gen impIndex = j } } } } // If no import decl found, add one after the last import. if impDecl == nil { // TODO(bradfitz): remove this hack. See comment below on // addImportViaSourceModification. if !hasImports { f2, err := addImportViaSourceModification(fset, f, name, ipath) if err == nil { *f = *f2 return true } log.Printf("addImportViaSourceModification error: %v", err) } // TODO(bradfitz): fix above and resume using this old code: impDecl = &ast.GenDecl{ Tok: token.IMPORT, } f.Decls = append(f.Decls, nil) copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:]) f.Decls[lastImport+1] = impDecl } // Ensure the import decl has parentheses, if needed. if len(impDecl.Specs) > 0 && !impDecl.Lparen.IsValid() { impDecl.Lparen = impDecl.Pos() } insertAt := impIndex + 1 if insertAt == 0 { insertAt = len(impDecl.Specs) } impDecl.Specs = append(impDecl.Specs, nil) copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:]) impDecl.Specs[insertAt] = newImport if insertAt > 0 { // Assign same position as the previous import, // so that the sorter sees it as being in the same block. prev := impDecl.Specs[insertAt-1] newImport.Path.ValuePos = prev.Pos() newImport.EndPos = prev.Pos() } if len(impDecl.Specs) > 1 && impDecl.Lparen == 0 { // set Lparen to something not zero, so the printer prints // the full block rather just the first ImportSpec. impDecl.Lparen = 1 } f.Imports = append(f.Imports, newImport) return true }
// AddNamedImport adds the import path to the file f, if absent. // If name is not empty, it is used to rename the import. // // For example, calling // AddNamedImport(fset, f, "pathpkg", "path") // adds // import pathpkg "path" func AddNamedImport(fset *token.FileSet, f *ast.File, name, ipath string) (added bool) { if imports(f, ipath) { return false } newImport := &ast.ImportSpec{ Path: &ast.BasicLit{ Kind: token.STRING, Value: strconv.Quote(ipath), }, } if name != "" { newImport.Name = &ast.Ident{Name: name} } // Find an import decl to add to. // The goal is to find an existing import // whose import path has the longest shared // prefix with ipath. var ( bestMatch = -1 // length of longest shared prefix lastImport = -1 // index in f.Decls of the file's final import decl impDecl *ast.GenDecl // import decl containing the best match impIndex = -1 // spec index in impDecl containing the best match ) for i, decl := range f.Decls { gen, ok := decl.(*ast.GenDecl) if ok && gen.Tok == token.IMPORT { lastImport = i // Do not add to import "C", to avoid disrupting the // association with its doc comment, breaking cgo. if declImports(gen, "C") { continue } // Match an empty import decl if that's all that is available. if len(gen.Specs) == 0 && bestMatch == -1 { impDecl = gen } // Compute longest shared prefix with imports in this group. for j, spec := range gen.Specs { impspec := spec.(*ast.ImportSpec) n := matchLen(importPath(impspec), ipath) if n > bestMatch { bestMatch = n impDecl = gen impIndex = j } } } } // If no import decl found, add one after the last import. if impDecl == nil { impDecl = &ast.GenDecl{ Tok: token.IMPORT, } if lastImport >= 0 { impDecl.TokPos = f.Decls[lastImport].End() } else { // There are no existing imports. // Our new import goes after the package declaration and after // the comment, if any, that starts on the same line as the // package declaration. impDecl.TokPos = f.Package file := fset.File(f.Package) pkgLine := file.Line(f.Package) for _, c := range f.Comments { if file.Line(c.Pos()) > pkgLine { break } impDecl.TokPos = c.End() } } f.Decls = append(f.Decls, nil) copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:]) f.Decls[lastImport+1] = impDecl } // Insert new import at insertAt. insertAt := 0 if impIndex >= 0 { // insert after the found import insertAt = impIndex + 1 } impDecl.Specs = append(impDecl.Specs, nil) copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:]) impDecl.Specs[insertAt] = newImport pos := impDecl.Pos() if insertAt > 0 { // If there is a comment after an existing import, preserve the comment // position by adding the new import after the comment. if spec, ok := impDecl.Specs[insertAt-1].(*ast.ImportSpec); ok && spec.Comment != nil { pos = spec.Comment.End() } else { // Assign same position as the previous import, // so that the sorter sees it as being in the same block. pos = impDecl.Specs[insertAt-1].Pos() } } if newImport.Name != nil { newImport.Name.NamePos = pos } newImport.Path.ValuePos = pos newImport.EndPos = pos // Clean up parens. impDecl contains at least one spec. if len(impDecl.Specs) == 1 { // Remove unneeded parens. impDecl.Lparen = token.NoPos } else if !impDecl.Lparen.IsValid() { // impDecl needs parens added. impDecl.Lparen = impDecl.Specs[0].Pos() } f.Imports = append(f.Imports, newImport) if len(f.Decls) <= 1 { return true } // Merge all the import declarations into the first one. var first *ast.GenDecl for i := 0; i < len(f.Decls); i++ { decl := f.Decls[i] gen, ok := decl.(*ast.GenDecl) if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") { continue } if first == nil { first = gen continue // Don't touch the first one. } // We now know there is more than one package in this import // declaration. Ensure that it ends up parenthesized. first.Lparen = first.Pos() // Move the imports of the other import declaration to the first one. for _, spec := range gen.Specs { spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() first.Specs = append(first.Specs, spec) } f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) i-- } return true }
// DeleteNamedImport deletes the import with the given name and path from the file f, if present. func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) { var delspecs []*ast.ImportSpec var delcomments []*ast.CommentGroup // Find the import nodes that import path, if any. for i := 0; i < len(f.Decls); i++ { decl := f.Decls[i] gen, ok := decl.(*ast.GenDecl) if !ok || gen.Tok != token.IMPORT { continue } for j := 0; j < len(gen.Specs); j++ { spec := gen.Specs[j] impspec := spec.(*ast.ImportSpec) if impspec.Name == nil && name != "" { continue } if impspec.Name != nil && impspec.Name.Name != name { continue } if importPath(impspec) != path { continue } // We found an import spec that imports path. // Delete it. delspecs = append(delspecs, impspec) deleted = true copy(gen.Specs[j:], gen.Specs[j+1:]) gen.Specs = gen.Specs[:len(gen.Specs)-1] // If this was the last import spec in this decl, // delete the decl, too. if len(gen.Specs) == 0 { copy(f.Decls[i:], f.Decls[i+1:]) f.Decls = f.Decls[:len(f.Decls)-1] i-- break } else if len(gen.Specs) == 1 { if impspec.Doc != nil { delcomments = append(delcomments, impspec.Doc) } if impspec.Comment != nil { delcomments = append(delcomments, impspec.Comment) } for _, cg := range f.Comments { // Found comment on the same line as the import spec. if cg.End() < impspec.Pos() && fset.Position(cg.End()).Line == fset.Position(impspec.Pos()).Line { delcomments = append(delcomments, cg) break } } gen.Lparen = token.NoPos // drop parens spec := gen.Specs[0].(*ast.ImportSpec) if spec.Doc != nil { // Move the documentation above the import statement. gen.TokPos = spec.Doc.End() + 1 } for _, cg := range f.Comments { if cg.End() < spec.Pos() && fset.Position(cg.End()).Line == fset.Position(spec.Pos()).Line { for fset.Position(gen.TokPos).Line != fset.Position(spec.Pos()).Line { fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) } break } } } if j > 0 { lastImpspec := gen.Specs[j-1].(*ast.ImportSpec) lastLine := fset.Position(lastImpspec.Path.ValuePos).Line line := fset.Position(impspec.Path.ValuePos).Line // We deleted an entry but now there may be // a blank line-sized hole where the import was. if line-lastLine > 1 { // There was a blank line immediately preceding the deleted import, // so there's no need to close the hole. // Do nothing. } else { // There was no blank line. Close the hole. fset.File(gen.Rparen).MergeLine(line) } } j-- } } // Delete imports from f.Imports. for i := 0; i < len(f.Imports); i++ { imp := f.Imports[i] for j, del := range delspecs { if imp == del { copy(f.Imports[i:], f.Imports[i+1:]) f.Imports = f.Imports[:len(f.Imports)-1] copy(delspecs[j:], delspecs[j+1:]) delspecs = delspecs[:len(delspecs)-1] i-- break } } } // Delete comments from f.Comments. for i := 0; i < len(f.Comments); i++ { cg := f.Comments[i] for j, del := range delcomments { if cg == del { copy(f.Comments[i:], f.Comments[i+1:]) f.Comments = f.Comments[:len(f.Comments)-1] copy(delcomments[j:], delcomments[j+1:]) delcomments = delcomments[:len(delcomments)-1] i-- break } } } if len(delspecs) > 0 { panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs)) } return }
func imp(fset *token.FileSet, af *ast.File, toggle []ImportDeclArg) *ast.File { add := map[ImportDecl]bool{} del := map[ImportDecl]bool{} for _, sda := range toggle { sd := ImportDecl{ Path: sda.Path, Name: sda.Name, } if sda.Add { add[sd] = true } else { del[sd] = true } } var firstDecl *ast.GenDecl imports := map[ImportDecl]bool{} for _, decl := range af.Decls { if gdecl, ok := decl.(*ast.GenDecl); ok && len(gdecl.Specs) > 0 { hasC := false sj := 0 for _, spec := range gdecl.Specs { if ispec, ok := spec.(*ast.ImportSpec); ok { sd := ImportDecl{ Path: unquote(ispec.Path.Value), } if ispec.Name != nil { sd.Name = ispec.Name.String() } if sd.Path == "C" { hasC = true } else if del[sd] { if sj > 0 { if lspec, ok := gdecl.Specs[sj-1].(*ast.ImportSpec); ok { lspec.EndPos = ispec.Pos() } } continue } else { imports[sd] = true } } gdecl.Specs[sj] = spec sj += 1 } gdecl.Specs = gdecl.Specs[:sj] if !hasC && firstDecl == nil { firstDecl = gdecl } } } if len(add) > 0 { if firstDecl == nil { firstDecl = &ast.GenDecl{ Tok: token.IMPORT, Lparen: 1, } af.Decls = append(af.Decls, firstDecl) } else if firstDecl.Lparen == token.NoPos { firstDecl.Lparen = 1 } for sd, _ := range add { if !imports[sd] { ispec := &ast.ImportSpec{ Path: &ast.BasicLit{ Value: quote(sd.Path), Kind: token.STRING, }, } if sd.Name != "" { ispec.Name = &ast.Ident{ Name: sd.Name, } } firstDecl.Specs = append(firstDecl.Specs, ispec) imports[sd] = true } } } dj := 0 for _, decl := range af.Decls { if gdecl, ok := decl.(*ast.GenDecl); ok { if len(gdecl.Specs) == 0 { continue } } af.Decls[dj] = decl dj += 1 } af.Decls = af.Decls[:dj] ast.SortImports(fset, af) return af }
// DeleteImport deletes the import path from the file f, if present. func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) { var delspecs []*ast.ImportSpec // Find the import nodes that import path, if any. for i := 0; i < len(f.Decls); i++ { decl := f.Decls[i] gen, ok := decl.(*ast.GenDecl) if !ok || gen.Tok != token.IMPORT { continue } for j := 0; j < len(gen.Specs); j++ { spec := gen.Specs[j] impspec := spec.(*ast.ImportSpec) if importPath(impspec) != path { continue } // We found an import spec that imports path. // Delete it. delspecs = append(delspecs, impspec) deleted = true copy(gen.Specs[j:], gen.Specs[j+1:]) gen.Specs = gen.Specs[:len(gen.Specs)-1] // If this was the last import spec in this decl, // delete the decl, too. if len(gen.Specs) == 0 { copy(f.Decls[i:], f.Decls[i+1:]) f.Decls = f.Decls[:len(f.Decls)-1] i-- break } else if len(gen.Specs) == 1 { gen.Lparen = token.NoPos // drop parens } if j > 0 { lastImpspec := gen.Specs[j-1].(*ast.ImportSpec) lastLine := fset.Position(lastImpspec.Path.ValuePos).Line line := fset.Position(impspec.Path.ValuePos).Line // We deleted an entry but now there may be // a blank line-sized hole where the import was. if line-lastLine > 1 { // There was a blank line immediately preceding the deleted import, // so there's no need to close the hole. // Do nothing. } else { // There was no blank line. Close the hole. fset.File(gen.Rparen).MergeLine(line) } } j-- } } // Delete them from f.Imports. for i := 0; i < len(f.Imports); i++ { imp := f.Imports[i] for j, del := range delspecs { if imp == del { copy(f.Imports[i:], f.Imports[i+1:]) f.Imports = f.Imports[:len(f.Imports)-1] copy(delspecs[j:], delspecs[j+1:]) delspecs = delspecs[:len(delspecs)-1] i-- break } } } if len(delspecs) > 0 { panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs)) } return }
func (in *instrumenter) instrumentFile(f *ast.File, fset *token.FileSet, pkgpath string) error { pkgObj := in.instrumented[pkgpath] pkgCreated := false if pkgObj == nil { pkgCreated = true pkgObj = gocov.RegisterPackage(f.Name.Name) // FIXME(axw) use full package path in.instrumented[pkgpath] = pkgObj } state := &state{fset, f, pkgObj, nil} ast.Walk(&funcVisitor{state}, f) // Count the number of import GenDecl's. They're always first. nImportDecls := 0 for _, decl := range f.Decls { if decl, ok := decl.(*ast.GenDecl); !ok || decl.Tok != token.IMPORT { break } nImportDecls++ } // Redirect imports of instrumented packages. in.redirectImports(f) // Insert variable declarations for registered objects. var vardecls []ast.Decl pkgvarname := fmt.Sprint(pkgObj) if pkgCreated { // FIXME(axw) use full package path value := makeCall("gocov.RegisterPackage", makeLit(f.Name.Name)) vardecls = append(vardecls, makeVarDecl(pkgvarname, value)) } for _, fn := range state.functions { fnvarname := fmt.Sprint(fn) value := makeCall(pkgvarname+".RegisterFunction", makeLit(fn.Name), makeLit(fn.File), makeLit(fn.Start), makeLit(fn.End)) vardecls = append(vardecls, makeVarDecl(fnvarname, value)) for _, stmt := range fn.Statements { varname := fmt.Sprint(stmt) value := makeCall( fnvarname+".RegisterStatement", makeLit(stmt.Start), makeLit(stmt.End)) vardecls = append(vardecls, makeVarDecl(varname, value)) } } if len(f.Decls) > 0 { vardecls = append(vardecls, f.Decls[nImportDecls:]...) f.Decls = append(f.Decls[:nImportDecls], vardecls...) } else { f.Decls = vardecls } // Add a "gocov" import. if pkgCreated { gocovImportSpec := &ast.ImportSpec{ Path: makeLit(gocovPackagePath).(*ast.BasicLit)} gocovImportGenDecl := &ast.GenDecl{ Tok: token.IMPORT, Specs: []ast.Spec{gocovImportSpec}} tail := make([]ast.Decl, len(f.Decls)-nImportDecls) copy(tail, f.Decls[nImportDecls:]) head := append(f.Decls[:nImportDecls], gocovImportGenDecl) f.Decls = append(head, tail...) } // Clear out all cached comments. This forces the AST printer to use // node comments instead, repositioning them correctly. f.Comments = nil return nil }