func extractGoType(messages messageMap, fset *token.FileSet, f *ast.File, typ string) error { // for castings calls, err := astutil.Calls(fset, f, typ) if err != nil { return err } for _, c := range calls { if len(c.Args) > 0 { lit, pos := astutil.StringLiteral(fset, c.Args[0]) if pos == nil { p := fset.Position(c.Pos()) log.Debugf("Skipping cast to %s (%v) - not a literal", typ, p) continue } comment := comments(fset, f, pos) if err := messages.AddString(&astutil.String{Value: lit, Position: pos}, comment); err != nil { return err } } } strings, err := astutil.Strings(fset, f, typ) if err != nil { return err } for _, s := range strings { comment := comments(fset, f, s.Position) if err := messages.AddString(s, comment); err != nil { return err } } return nil }
func (r *definitionResult) toSerial(res *serial.Result, fset *token.FileSet) { definition := &serial.Definition{ Desc: r.descr, ObjPos: fset.Position(r.pos).String(), } res.Definition = definition }
func checkIdents(t *testing.T, fset *token.FileSet, file *ast.File, idents map[identPos]interface{}, defs map[defPath]struct{}, g *Grapher, printAll bool) { ast.Inspect(file, func(n ast.Node) bool { if x, ok := n.(*ast.Ident); ok && !ignoreIdent(g, x) { pos, end := fset.Position(x.Pos()), fset.Position(x.End()) if printAll { t.Logf("ident %q at %s:%d-%d", x.Name, pos.Filename, pos.Offset, end.Offset) } ip := identPos{pos.Filename, uint32(pos.Offset), uint32(end.Offset)} if obj, present := idents[ip]; !present { t.Errorf("unresolved ident %q at %s", x.Name, pos) } else if ref, ok := obj.(*Ref); ok { if printAll { t.Logf("ref to %+v from ident %q at %s:%d-%d", ref.Def, x.Name, pos.Filename, pos.Offset, end.Offset) } if *resolve { if _, resolved := defs[ref.Def.defPath()]; !resolved && !ignoreRef(ref.Def.defPath()) { t.Errorf("unresolved ref %q to %+v at %s", x.Name, ref.Def.defPath(), pos) unresolvedIdents++ } } } return false } return true }) }
func FindAllExports(pkg *types.Package, fset *token.FileSet) []UnexportCandidate { candidates := []UnexportCandidate{} for _, name := range pkg.Scope().Names() { obj := pkg.Scope().Lookup(name) if !obj.Exported() { continue } displayName := obj.Name() if _, ok := obj.(*types.Func); ok { displayName += "()" } candidate := UnexportCandidate{obj.Name(), displayName, fset.Position(obj.Pos())} candidates = append(candidates, candidate) if tn, ok := obj.(*types.TypeName); ok { if str, ok := tn.Type().Underlying().(*types.Struct); ok { candidates = append(candidates, findStructFields(str, obj.Name(), fset)...) } ptrType := types.NewPointer(tn.Type()) methodSet := types.NewMethodSet(ptrType) for i := 0; i < methodSet.Len(); i++ { methodSel := methodSet.At(i) method := methodSel.Obj() // skip unexported functions, and functions from embedded fields. // The best I can figure out for embedded functions is if the selection index path is longer than 1. if !method.Exported() || len(methodSel.Index()) > 1 { continue } candidate := UnexportCandidate{method.Name(), obj.Name() + "." + method.Name() + "()", fset.Position(method.Pos())} candidates = append(candidates, candidate) } } } return candidates }
func getSourceString(node ast.Node, fset *token.FileSet) string { p1 := fset.Position(node.Pos()) p2 := fset.Position(node.End()) b := getFileBytes(p1.Filename) return string(b[p1.Offset:p2.Offset]) }
func (d docFinder) Find(af *ast.File, fset *token.FileSet) *token.Position { if af.Comments != nil && len(af.Comments) != 0 { p := fset.Position(af.Comments[0].Pos()) return &p } return nil }
func (r *describeStmtResult) toSerial(res *serial.Result, fset *token.FileSet) { res.Describe = &serial.Describe{ Desc: r.description, Pos: fset.Position(r.node.Pos()).String(), Detail: "unknown", } }
// compareErrors compares the map of expected error messages with the list // of found errors and reports discrepancies. // func compareErrors(t *testing.T, fset *token.FileSet, expected map[token.Pos]string, found scanner.ErrorList) { for _, error := range found { // error.Pos is a token.Position, but we want // a token.Pos so we can do a map lookup pos := getPos(fset, error.Pos.Filename, error.Pos.Offset) if msg, found := expected[pos]; found { // we expect a message at pos; check if it matches rx, err := regexp.Compile(msg) if err != nil { t.Errorf("%s: %v", error.Pos, err) continue } if match := rx.MatchString(error.Msg); !match { t.Errorf("%s: %q does not match %q", error.Pos, error.Msg, msg) continue } // we have a match - eliminate this error delete(expected, pos) } else { // To keep in mind when analyzing failed test output: // If the same error position occurs multiple times in errors, // this message will be triggered (because the first error at // the position removes this position from the expected errors). t.Errorf("%s: unexpected error: %s", error.Pos, error.Msg) } } // there should be no expected errors left if len(expected) > 0 { t.Errorf("%d errors not reported:", len(expected)) for pos, msg := range expected { t.Errorf("%s: %s\n", fset.Position(pos), msg) } } }
func (c *CodeGenCreaters) processComments(fileSet *token.FileSet, commentGroup *ast.CommentGroup, typ string, sepc CodeGenSpec) ([]CodeGen, error) { codeGens := []CodeGen{} if commentGroup == nil || len(commentGroup.List) <= 0 { return codeGens, nil } for _, comment := range commentGroup.List { if comment.Text[:2] == "//" { content := strings.TrimSpace(comment.Text[2:]) if len(content) == 0 || content[0] != '@' { continue } for _, codeGenCreater := range (*c)[typ] { if codeGen, err := codeGenCreater(content, sepc); err == nil { codeGens = append(codeGens, codeGen) } else { if err == NotMatch { continue } position := fileSet.Position(comment.Pos()) perr := NewErrorFromPosition(position, err) return nil, perr } } } } return codeGens, nil }
func ModifyLine(fset *token.FileSet, file *ast.File, filename string, identMap st.IdentifierMap, Pos token.Pos, mod int) (*token.FileSet, *ast.File, int) { baseMod := 1 if mod > 0 { tokFile := GetFileFromFileSet(fset, filename) baseMod = tokFile.Base() Pos -= token.Pos(tokFile.Base()) - 1 fset, file = ReparseFile(file, filename, mod, identMap) tokFile = GetFileFromFileSet(fset, filename) lines := GetLines(tokFile) tokFile.SetLines(lines[:len(lines)-(mod)]) } tokFile := GetFileFromFileSet(fset, filename) lines := GetLines(tokFile) for i, offset := range lines { fmt.Printf("%d -> %s(%d)\n", i+1, fset.Position(tokFile.Pos(offset)), offset) } var li int for i, l := range lines { if l > tokFile.Offset(Pos) { li = i break } } for i := li; i < len(lines); i++ { lines[i] += mod } tokFile.SetLines(lines) for i, offset := range lines { fmt.Printf("%d -> %s(%d)\n", i+1, fset.Position(tokFile.Pos(offset)), offset) } return fset, file, baseMod }
// Scan app source code for calls to X.Y(), where X is of type *Validation. // // Recognize these scenarios: // - "Y" = "Validation" and is a member of the receiver. // (The common case for inline validation) // - "X" is passed in to the func as a parameter. // (For structs implementing Validated) // // The line number to which a validation call is attributed is that of the // surrounding ExprStmt. This is so that it matches what runtime.Callers() // reports. // // The end result is that we can set the default validation key for each call to // be the same as the local variable. func getValidationKeys(fset *token.FileSet, funcDecl *ast.FuncDecl, imports map[string]string) map[int]string { var ( lineKeys = make(map[int]string) // Check the func parameters and the receiver's members for the *revel.Validation type. validationParam = getValidationParameter(funcDecl, imports) ) ast.Inspect(funcDecl.Body, func(node ast.Node) bool { // e.g. c.Validation.Required(arg) or v.Required(arg) callExpr, ok := node.(*ast.CallExpr) if !ok { return true } // e.g. c.Validation.Required or v.Required funcSelector, ok := callExpr.Fun.(*ast.SelectorExpr) if !ok { return true } switch x := funcSelector.X.(type) { case *ast.SelectorExpr: // e.g. c.Validation if x.Sel.Name != "Validation" { return true } case *ast.Ident: // e.g. v if validationParam == nil || x.Obj != validationParam { return true } default: return true } if len(callExpr.Args) == 0 { return true } // If the argument is a binary expression, take the first expression. // (e.g. c.Validation.Required(myName != "")) arg := callExpr.Args[0] if binExpr, ok := arg.(*ast.BinaryExpr); ok { arg = binExpr.X } // If it's a literal, skip it. if _, ok = arg.(*ast.BasicLit); ok { return true } if typeExpr := NewTypeExpr("", arg); typeExpr.Valid { lineKeys[fset.Position(callExpr.End()).Line] = typeExpr.TypeName("") } return true }) return lineKeys }
func DeleteNode(fset *token.FileSet, filename string, file *ast.File, posStart, posEnd token.Position) (bool, *errors.GoRefactorError) { tokFile := GetFileFromFileSet(fset, filename) if tokFile == nil { return false, errors.PrinterError("couldn't find file " + filename + " in fileset") } node := FindNode(fset, file, posStart, posEnd) if node == nil { return false, errors.PrinterError("couldn't find node with given positions") } lines := GetLines(tokFile) for i, offset := range lines { fmt.Printf("%d -> %s(%d)\n", i+1, fset.Position(tokFile.Pos(offset)), offset) } nodeLines, firstLine := GetRangeLines(tokFile, node.Pos(), node.End(), tokFile.Size()) if _, ok := deleteNode(fset, posStart, posEnd, file); !ok { return false, errors.PrinterError("didn't find node to delete") } deleteCommentsInRange(file, node.Pos(), node.End()) inc := -int(node.End() - node.Pos()) fmt.Printf("\n%v, %v, mod = %v\n", nodeLines, firstLine, inc) FixPositions(node.Pos(), inc, file, true) tokFile.SetLines(removeLinesOfRange(tokFile.Offset(node.Pos()), tokFile.Offset(node.End()), lines, nodeLines, firstLine)) return true, nil }
func AddLineForRange(fset *token.FileSet, filename string, Pos, End token.Pos) { tokFile := GetFileFromFileSet(fset, filename) lines := GetLines(tokFile) for i, offset := range lines { fmt.Printf("%d -> %s(%d)\n", i+1, fset.Position(tokFile.Pos(offset)), offset) } var li int for i, l := range lines { if l > tokFile.Offset(Pos) { li = i break } } if li == 0 { li = len(lines) - 1 } //mod := int(End-Pos) + tokFile.Offset(Pos) - lines[li-1] fmt.Printf("node --------- %d %d\n", tokFile.Offset(Pos), tokFile.Offset(End)) mod := int(End - Pos) fmt.Printf("mod = %d\n", mod) newLines := make([]int, len(lines)+1) copy(newLines, lines[0:li-1]) //newLines[li-1] = tokFile.Offset(Pos) - (tokFile.Offset(Pos) - lines[li-1]) newLines[li-1] = tokFile.Offset(Pos) for i := li - 1; i < len(lines); i++ { newLines[i+1] = lines[i] + mod } tokFile.SetLines(newLines) for i, offset := range newLines { fmt.Printf("%d -> %s(%d)\n", i+1, fset.Position(tokFile.Pos(offset)), offset) } }
func (r *describePackageResult) toSerial(res *serial.Result, fset *token.FileSet) { var members []*serial.DescribeMember for _, mem := range r.members { typ := mem.obj.Type() var val string switch mem := mem.obj.(type) { case *types.Const: val = mem.Val().String() case *types.TypeName: typ = typ.Underlying() } members = append(members, &serial.DescribeMember{ Name: mem.obj.Name(), Type: typ.String(), Value: val, Pos: fset.Position(mem.obj.Pos()).String(), Kind: tokenOf(mem.obj), Methods: methodsToSerial(r.pkg, mem.methods, fset), }) } res.Describe = &serial.Describe{ Desc: r.description, Pos: fset.Position(r.node.Pos()).String(), Detail: "package", Package: &serial.DescribePackage{ Path: r.pkg.Path(), Members: members, }, } }
func applyPatches(file *ast.File, fileSet *token.FileSet, importPath string) *ast.File { removeImport := func(path string) { for _, decl := range file.Decls { if d, ok := decl.(*ast.GenDecl); ok && d.Tok == token.IMPORT { for j, spec := range d.Specs { value := spec.(*ast.ImportSpec).Path.Value if value[1:len(value)-1] == path { d.Specs[j] = d.Specs[len(d.Specs)-1] d.Specs = d.Specs[:len(d.Specs)-1] return } } } } } removeFunction := func(name string) { for i, decl := range file.Decls { if d, ok := decl.(*ast.FuncDecl); ok { if d.Name.String() == name { file.Decls[i] = file.Decls[len(file.Decls)-1] file.Decls = file.Decls[:len(file.Decls)-1] return } } } } basename := filepath.Base(fileSet.Position(file.Pos()).Filename) switch { case importPath == "bytes_test" && basename == "equal_test.go": file, _ = parser.ParseFile(fileSet, basename, "package bytes_test", 0) case importPath == "crypto/rc4" && basename == "rc4_ref.go": // see https://codereview.appspot.com/40540049/ file, _ = parser.ParseFile(fileSet, basename, "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !amd64,!arm,!386\n\npackage rc4\n\n// XORKeyStream sets dst to the result of XORing src with the key stream.\n// Dst and src may be the same slice but otherwise should not overlap.\nfunc (c *Cipher) XORKeyStream(dst, src []byte) {\n i, j := c.i, c.j\n for k, v := range src {\n i += 1\n j += uint8(c.s[i])\n c.s[i], c.s[j] = c.s[j], c.s[i]\n dst[k] = v ^ uint8(c.s[uint8(c.s[i]+c.s[j])])\n }\n c.i, c.j = i, j\n}", 0) case importPath == "encoding/json" && basename == "stream_test.go": removeImport("net") removeFunction("TestBlocking") case importPath == "math/big" && basename == "arith.go": // see https://codereview.appspot.com/55470046/ file, _ = parser.ParseFile(fileSet, basename, "// Copyright 2009 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// This file provides Go implementations of elementary multi-precision\n// arithmetic operations on word vectors. Needed for platforms without\n// assembly implementations of these routines.\n\npackage big\n\n// A Word represents a single digit of a multi-precision unsigned integer.\ntype Word uintptr\n\nconst (\n // Compute the size _S of a Word in bytes.\n _m = ^Word(0)\n _logS = _m>>8&1 + _m>>16&1 + _m>>32&1\n _S = 1 << _logS\n\n _W = _S << 3 // word size in bits\n _B = 1 << _W // digit base\n _M = _B - 1 // digit mask\n\n _W2 = _W / 2 // half word size in bits\n _B2 = 1 << _W2 // half digit base\n _M2 = _B2 - 1 // half digit mask\n)\n\n// ----------------------------------------------------------------------------\n// Elementary operations on words\n//\n// These operations are used by the vector operations below.\n\n// z1<<_W + z0 = x+y+c, with c == 0 or 1\nfunc addWW_g(x, y, c Word) (z1, z0 Word) {\n yc := y + c\n z0 = x + yc\n if z0 < x || yc < y {\n z1 = 1\n }\n return\n}\n\n// z1<<_W + z0 = x-y-c, with c == 0 or 1\nfunc subWW_g(x, y, c Word) (z1, z0 Word) {\n yc := y + c\n z0 = x - yc\n if z0 > x || yc < y {\n z1 = 1\n }\n return\n}\n\n// z1<<_W + z0 = x*y\n// Adapted from Warren, Hacker's Delight, p. 132.\nfunc mulWW_g(x, y Word) (z1, z0 Word) {\n x0 := x & _M2\n x1 := x >> _W2\n y0 := y & _M2\n y1 := y >> _W2\n w0 := x0 * y0\n t := x1*y0 + w0>>_W2\n w1 := t & _M2\n w2 := t >> _W2\n w1 += x0 * y1\n z1 = x1*y1 + w2 + w1>>_W2\n z0 = x * y\n return\n}\n\n// z1<<_W + z0 = x*y + c\nfunc mulAddWWW_g(x, y, c Word) (z1, z0 Word) {\n z1, zz0 := mulWW(x, y)\n if z0 = zz0 + c; z0 < zz0 {\n z1++\n }\n return\n}\n\n// Length of x in bits.\nfunc bitLen_g(x Word) (n int) {\n for ; x >= 0x8000; x >>= 16 {\n n += 16\n }\n if x >= 0x80 {\n x >>= 8\n n += 8\n }\n if x >= 0x8 {\n x >>= 4\n n += 4\n }\n if x >= 0x2 {\n x >>= 2\n n += 2\n }\n if x >= 0x1 {\n n++\n }\n return\n}\n\n// log2 computes the integer binary logarithm of x.\n// The result is the integer n for which 2^n <= x < 2^(n+1).\n// If x == 0, the result is -1.\nfunc log2(x Word) int {\n return bitLen(x) - 1\n}\n\n// Number of leading zeros in x.\nfunc leadingZeros(x Word) uint {\n return uint(_W - bitLen(x))\n}\n\n// q = (u1<<_W + u0 - r)/y\n// Adapted from Warren, Hacker's Delight, p. 152.\nfunc divWW_g(u1, u0, v Word) (q, r Word) {\n if u1 >= v {\n return 1<<_W - 1, 1<<_W - 1\n }\n\n s := leadingZeros(v)\n v <<= s\n\n vn1 := v >> _W2\n vn0 := v & _M2\n un32 := u1<<s | u0>>(_W-s)\n un10 := u0 << s\n un1 := un10 >> _W2\n un0 := un10 & _M2\n q1 := un32 / vn1\n rhat := un32 - q1*vn1\n\n for q1 >= _B2 || q1*vn0 > _B2*rhat+un1 {\n q1--\n rhat += vn1\n if rhat >= _B2 {\n break\n }\n }\n\n un21 := un32*_B2 + un1 - q1*v\n q0 := un21 / vn1\n rhat = un21 - q0*vn1\n\n for q0 >= _B2 || q0*vn0 > _B2*rhat+un0 {\n q0--\n rhat += vn1\n if rhat >= _B2 {\n break\n }\n }\n\n return q1*_B2 + q0, (un21*_B2 + un0 - q0*v) >> s\n}\n\nfunc addVV_g(z, x, y []Word) (c Word) {\n for i := range z {\n c, z[i] = addWW_g(x[i], y[i], c)\n }\n return\n}\n\nfunc subVV_g(z, x, y []Word) (c Word) {\n for i := range z {\n c, z[i] = subWW_g(x[i], y[i], c)\n }\n return\n}\n\nfunc addVW_g(z, x []Word, y Word) (c Word) {\n c = y\n for i := range z {\n c, z[i] = addWW_g(x[i], c, 0)\n }\n return\n}\n\nfunc subVW_g(z, x []Word, y Word) (c Word) {\n c = y\n for i := range z {\n c, z[i] = subWW_g(x[i], c, 0)\n }\n return\n}\n\nfunc shlVU_g(z, x []Word, s uint) (c Word) {\n if n := len(z); n > 0 {\n ŝ := _W - s\n w1 := x[n-1]\n c = w1 >> ŝ\n for i := n - 1; i > 0; i-- {\n w := w1\n w1 = x[i-1]\n z[i] = w<<s | w1>>ŝ\n }\n z[0] = w1 << s\n }\n return\n}\n\nfunc shrVU_g(z, x []Word, s uint) (c Word) {\n if n := len(z); n > 0 {\n ŝ := _W - s\n w1 := x[0]\n c = w1 << ŝ\n for i := 0; i < n-1; i++ {\n w := w1\n w1 = x[i+1]\n z[i] = w>>s | w1<<ŝ\n }\n z[n-1] = w1 >> s\n }\n return\n}\n\nfunc mulAddVWW_g(z, x []Word, y, r Word) (c Word) {\n c = r\n for i := range z {\n c, z[i] = mulAddWWW_g(x[i], y, c)\n }\n return\n}\n\nfunc addMulVVW_g(z, x []Word, y Word) (c Word) {\n for i := range z {\n z1, z0 := mulAddWWW_g(x[i], y, z[i])\n c, z[i] = addWW_g(z0, c, 0)\n c += z1\n }\n return\n}\n\nfunc divWVW_g(z []Word, xn Word, x []Word, y Word) (r Word) {\n r = xn\n for i := len(z) - 1; i >= 0; i-- {\n z[i], r = divWW_g(r, x[i], y)\n }\n return\n}", 0) case importPath == "reflect_test" && basename == "all_test.go": removeImport("unsafe") removeFunction("TestAlignment") removeFunction("TestSliceOverflow") case importPath == "runtime" && strings.HasPrefix(basename, "zgoarch_"): file, _ = parser.ParseFile(fileSet, basename, "package runtime\nconst theGoarch = `js`\n", 0) case importPath == "sync/atomic_test" && basename == "atomic_test.go": removeFunction("TestUnaligned64") case importPath == "text/template/parse" && basename == "lex.go": // this patch will be removed as soon as goroutines are supported file, _ = parser.ParseFile(fileSet, basename, "// Copyright 2011 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\npackage parse\nimport (\n \"container/list\"\n \"fmt\"\n \"strings\"\n \"unicode\"\n \"unicode/utf8\"\n)\n// item represents a token or text string returned from the scanner.\ntype item struct {\n typ itemType // The type of this item.\n pos Pos // The starting position, in bytes, of this item in the input string.\n val string // The value of this item.\n}\nfunc (i item) String() string {\n switch {\n case i.typ == itemEOF:\n return \"EOF\"\n case i.typ == itemError:\n return i.val\n case i.typ > itemKeyword:\n return fmt.Sprintf(\"<%s>\", i.val)\n case len(i.val) > 10:\n return fmt.Sprintf(\"%.10q...\", i.val)\n }\n return fmt.Sprintf(\"%q\", i.val)\n}\n// itemType identifies the type of lex items.\ntype itemType int\nconst (\n itemError itemType = iota // error occurred; value is text of error\n itemBool // boolean constant\n itemChar // printable ASCII character; grab bag for comma etc.\n itemCharConstant // character constant\n itemComplex // complex constant (1+2i); imaginary is just a number\n itemColonEquals // colon-equals (':=') introducing a declaration\n itemEOF\n itemField // alphanumeric identifier starting with '.'\n itemIdentifier // alphanumeric identifier not starting with '.'\n itemLeftDelim // left action delimiter\n itemLeftParen // '(' inside action\n itemNumber // simple number, including imaginary\n itemPipe // pipe symbol\n itemRawString // raw quoted string (includes quotes)\n itemRightDelim // right action delimiter\n itemRightParen // ')' inside action\n itemSpace // run of spaces separating arguments\n itemString // quoted string (includes quotes)\n itemText // plain text\n itemVariable // variable starting with '$', such as '$' or '$1' or '$hello'\n // Keywords appear after all the rest.\n itemKeyword // used only to delimit the keywords\n itemDot // the cursor, spelled '.'\n itemDefine // define keyword\n itemElse // else keyword\n itemEnd // end keyword\n itemIf // if keyword\n itemNil // the untyped nil constant, easiest to treat as a keyword\n itemRange // range keyword\n itemTemplate // template keyword\n itemWith // with keyword\n)\nvar key = map[string]itemType{\n \".\": itemDot,\n \"define\": itemDefine,\n \"else\": itemElse,\n \"end\": itemEnd,\n \"if\": itemIf,\n \"range\": itemRange,\n \"nil\": itemNil,\n \"template\": itemTemplate,\n \"with\": itemWith,\n}\nconst eof = -1\n// stateFn represents the state of the scanner as a function that returns the next state.\ntype stateFn func(*lexer) stateFn\n// lexer holds the state of the scanner.\ntype lexer struct {\n name string // the name of the input; used only for error reports\n input string // the string being scanned\n leftDelim string // start of action\n rightDelim string // end of action\n state stateFn // the next lexing function to enter\n pos Pos // current position in the input\n start Pos // start position of this item\n width Pos // width of last rune read from input\n lastPos Pos // position of most recent item returned by nextItem\n items *list.List // scanned items\n parenDepth int // nesting depth of ( ) exprs\n}\n// next returns the next rune in the input.\nfunc (l *lexer) next() rune {\n if int(l.pos) >= len(l.input) {\n l.width = 0\n return eof\n }\n r, w := utf8.DecodeRuneInString(l.input[l.pos:])\n l.width = Pos(w)\n l.pos += l.width\n return r\n}\n// peek returns but does not consume the next rune in the input.\nfunc (l *lexer) peek() rune {\n r := l.next()\n l.backup()\n return r\n}\n// backup steps back one rune. Can only be called once per call of next.\nfunc (l *lexer) backup() {\n l.pos -= l.width\n}\n// emit passes an item back to the client.\nfunc (l *lexer) emit(t itemType) {\n l.items.PushBack(item{t, l.start, l.input[l.start:l.pos]})\n l.start = l.pos\n}\n// ignore skips over the pending input before this point.\nfunc (l *lexer) ignore() {\n l.start = l.pos\n}\n// accept consumes the next rune if it's from the valid set.\nfunc (l *lexer) accept(valid string) bool {\n if strings.IndexRune(valid, l.next()) >= 0 {\n return true\n }\n l.backup()\n return false\n}\n// acceptRun consumes a run of runes from the valid set.\nfunc (l *lexer) acceptRun(valid string) {\n for strings.IndexRune(valid, l.next()) >= 0 {\n }\n l.backup()\n}\n// lineNumber reports which line we're on, based on the position of\n// the previous item returned by nextItem. Doing it this way\n// means we don't have to worry about peek double counting.\nfunc (l *lexer) lineNumber() int {\n return 1 + strings.Count(l.input[:l.lastPos], \"\\n\")\n}\n// errorf returns an error token and terminates the scan by passing\n// back a nil pointer that will be the next state, terminating l.nextItem.\nfunc (l *lexer) errorf(format string, args ...interface{}) stateFn {\n l.items.PushBack(item{itemError, l.start, fmt.Sprintf(format, args...)})\n return nil\n}\n// nextItem returns the next item from the input.\nfunc (l *lexer) nextItem() item {\n element := l.items.Front()\n for element == nil {\n l.state = l.state(l)\n element = l.items.Front()\n }\n l.items.Remove(element)\n item := element.Value.(item)\n l.lastPos = item.pos\n return item\n}\n// lex creates a new scanner for the input string.\nfunc lex(name, input, left, right string) *lexer {\n if left == \"\" {\n left = leftDelim\n }\n if right == \"\" {\n right = rightDelim\n }\n l := &lexer{\n name: name,\n input: input,\n leftDelim: left,\n rightDelim: right,\n items: list.New(),\n }\n l.state = lexText\n return l\n}\n// state functions\nconst (\n leftDelim = \"{{\"\n rightDelim = \"}}\"\n leftComment = \"/*\"\n rightComment = \"*/\"\n)\n// lexText scans until an opening action delimiter, \"{{\".\nfunc lexText(l *lexer) stateFn {\n for {\n if strings.HasPrefix(l.input[l.pos:], l.leftDelim) {\n if l.pos > l.start {\n l.emit(itemText)\n }\n return lexLeftDelim\n }\n if l.next() == eof {\n break\n }\n }\n // Correctly reached EOF.\n if l.pos > l.start {\n l.emit(itemText)\n }\n l.emit(itemEOF)\n return nil\n}\n// lexLeftDelim scans the left delimiter, which is known to be present.\nfunc lexLeftDelim(l *lexer) stateFn {\n l.pos += Pos(len(l.leftDelim))\n if strings.HasPrefix(l.input[l.pos:], leftComment) {\n return lexComment\n }\n l.emit(itemLeftDelim)\n l.parenDepth = 0\n return lexInsideAction\n}\n// lexComment scans a comment. The left comment marker is known to be present.\nfunc lexComment(l *lexer) stateFn {\n l.pos += Pos(len(leftComment))\n i := strings.Index(l.input[l.pos:], rightComment)\n if i < 0 {\n return l.errorf(\"unclosed comment\")\n }\n l.pos += Pos(i + len(rightComment))\n if !strings.HasPrefix(l.input[l.pos:], l.rightDelim) {\n return l.errorf(\"comment ends before closing delimiter\")\n }\n l.pos += Pos(len(l.rightDelim))\n l.ignore()\n return lexText\n}\n// lexRightDelim scans the right delimiter, which is known to be present.\nfunc lexRightDelim(l *lexer) stateFn {\n l.pos += Pos(len(l.rightDelim))\n l.emit(itemRightDelim)\n return lexText\n}\n// lexInsideAction scans the elements inside action delimiters.\nfunc lexInsideAction(l *lexer) stateFn {\n // Either number, quoted string, or identifier.\n // Spaces separate arguments; runs of spaces turn into itemSpace.\n // Pipe symbols separate and are emitted.\n if strings.HasPrefix(l.input[l.pos:], l.rightDelim) {\n if l.parenDepth == 0 {\n return lexRightDelim\n }\n return l.errorf(\"unclosed left paren\")\n }\n switch r := l.next(); {\n case r == eof || isEndOfLine(r):\n return l.errorf(\"unclosed action\")\n case isSpace(r):\n return lexSpace\n case r == ':':\n if l.next() != '=' {\n return l.errorf(\"expected :=\")\n }\n l.emit(itemColonEquals)\n case r == '|':\n l.emit(itemPipe)\n case r == '\"':\n return lexQuote\n case r == '`':\n return lexRawQuote\n case r == '$':\n return lexVariable\n case r == '\\'':\n return lexChar\n case r == '.':\n // special look-ahead for \".field\" so we don't break l.backup().\n if l.pos < Pos(len(l.input)) {\n r := l.input[l.pos]\n if r < '0' || '9' < r {\n return lexField\n }\n }\n fallthrough // '.' can start a number.\n case r == '+' || r == '-' || ('0' <= r && r <= '9'):\n l.backup()\n return lexNumber\n case isAlphaNumeric(r):\n l.backup()\n return lexIdentifier\n case r == '(':\n l.emit(itemLeftParen)\n l.parenDepth++\n return lexInsideAction\n case r == ')':\n l.emit(itemRightParen)\n l.parenDepth--\n if l.parenDepth < 0 {\n return l.errorf(\"unexpected right paren %#U\", r)\n }\n return lexInsideAction\n case r <= unicode.MaxASCII && unicode.IsPrint(r):\n l.emit(itemChar)\n return lexInsideAction\n default:\n return l.errorf(\"unrecognized character in action: %#U\", r)\n }\n return lexInsideAction\n}\n// lexSpace scans a run of space characters.\n// One space has already been seen.\nfunc lexSpace(l *lexer) stateFn {\n for isSpace(l.peek()) {\n l.next()\n }\n l.emit(itemSpace)\n return lexInsideAction\n}\n// lexIdentifier scans an alphanumeric.\nfunc lexIdentifier(l *lexer) stateFn {\nLoop:\n for {\n switch r := l.next(); {\n case isAlphaNumeric(r):\n // absorb.\n default:\n l.backup()\n word := l.input[l.start:l.pos]\n if !l.atTerminator() {\n return l.errorf(\"bad character %#U\", r)\n }\n switch {\n case key[word] > itemKeyword:\n l.emit(key[word])\n case word[0] == '.':\n l.emit(itemField)\n case word == \"true\", word == \"false\":\n l.emit(itemBool)\n default:\n l.emit(itemIdentifier)\n }\n break Loop\n }\n }\n return lexInsideAction\n}\n// lexField scans a field: .Alphanumeric.\n// The . has been scanned.\nfunc lexField(l *lexer) stateFn {\n return lexFieldOrVariable(l, itemField)\n}\n// lexVariable scans a Variable: $Alphanumeric.\n// The $ has been scanned.\nfunc lexVariable(l *lexer) stateFn {\n if l.atTerminator() { // Nothing interesting follows -> \"$\".\n l.emit(itemVariable)\n return lexInsideAction\n }\n return lexFieldOrVariable(l, itemVariable)\n}\n// lexVariable scans a field or variable: [.$]Alphanumeric.\n// The . or $ has been scanned.\nfunc lexFieldOrVariable(l *lexer, typ itemType) stateFn {\n if l.atTerminator() { // Nothing interesting follows -> \".\" or \"$\".\n if typ == itemVariable {\n l.emit(itemVariable)\n } else {\n l.emit(itemDot)\n }\n return lexInsideAction\n }\n var r rune\n for {\n r = l.next()\n if !isAlphaNumeric(r) {\n l.backup()\n break\n }\n }\n if !l.atTerminator() {\n return l.errorf(\"bad character %#U\", r)\n }\n l.emit(typ)\n return lexInsideAction\n}\n// atTerminator reports whether the input is at valid termination character to\n// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases\n// like \"$x+2\" not being acceptable without a space, in case we decide one\n// day to implement arithmetic.\nfunc (l *lexer) atTerminator() bool {\n r := l.peek()\n if isSpace(r) || isEndOfLine(r) {\n return true\n }\n switch r {\n case eof, '.', ',', '|', ':', ')', '(':\n return true\n }\n // Does r start the delimiter? This can be ambiguous (with delim==\"//\", $x/2 will\n // succeed but should fail) but only in extremely rare cases caused by willfully\n // bad choice of delimiter.\n if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r {\n return true\n }\n return false\n}\n// lexChar scans a character constant. The initial quote is already\n// scanned. Syntax checking is done by the parser.\nfunc lexChar(l *lexer) stateFn {\nLoop:\n for {\n switch l.next() {\n case '\\\\':\n if r := l.next(); r != eof && r != '\\n' {\n break\n }\n fallthrough\n case eof, '\\n':\n return l.errorf(\"unterminated character constant\")\n case '\\'':\n break Loop\n }\n }\n l.emit(itemCharConstant)\n return lexInsideAction\n}\n// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This\n// isn't a perfect number scanner - for instance it accepts \".\" and \"0x0.2\"\n// and \"089\" - but when it's wrong the input is invalid and the parser (via\n// strconv) will notice.\nfunc lexNumber(l *lexer) stateFn {\n if !l.scanNumber() {\n return l.errorf(\"bad number syntax: %q\", l.input[l.start:l.pos])\n }\n if sign := l.peek(); sign == '+' || sign == '-' {\n // Complex: 1+2i. No spaces, must end in 'i'.\n if !l.scanNumber() || l.input[l.pos-1] != 'i' {\n return l.errorf(\"bad number syntax: %q\", l.input[l.start:l.pos])\n }\n l.emit(itemComplex)\n } else {\n l.emit(itemNumber)\n }\n return lexInsideAction\n}\nfunc (l *lexer) scanNumber() bool {\n // Optional leading sign.\n l.accept(\"+-\")\n // Is it hex?\n digits := \"0123456789\"\n if l.accept(\"0\") && l.accept(\"xX\") {\n digits = \"0123456789abcdefABCDEF\"\n }\n l.acceptRun(digits)\n if l.accept(\".\") {\n l.acceptRun(digits)\n }\n if l.accept(\"eE\") {\n l.accept(\"+-\")\n l.acceptRun(\"0123456789\")\n }\n // Is it imaginary?\n l.accept(\"i\")\n // Next thing mustn't be alphanumeric.\n if isAlphaNumeric(l.peek()) {\n l.next()\n return false\n }\n return true\n}\n// lexQuote scans a quoted string.\nfunc lexQuote(l *lexer) stateFn {\nLoop:\n for {\n switch l.next() {\n case '\\\\':\n if r := l.next(); r != eof && r != '\\n' {\n break\n }\n fallthrough\n case eof, '\\n':\n return l.errorf(\"unterminated quoted string\")\n case '\"':\n break Loop\n }\n }\n l.emit(itemString)\n return lexInsideAction\n}\n// lexRawQuote scans a raw quoted string.\nfunc lexRawQuote(l *lexer) stateFn {\nLoop:\n for {\n switch l.next() {\n case eof, '\\n':\n return l.errorf(\"unterminated raw quoted string\")\n case '`':\n break Loop\n }\n }\n l.emit(itemRawString)\n return lexInsideAction\n}\n// isSpace reports whether r is a space character.\nfunc isSpace(r rune) bool {\n return r == ' ' || r == '\\t'\n}\n// isEndOfLine reports whether r is an end-of-line character.\nfunc isEndOfLine(r rune) bool {\n return r == '\\r' || r == '\\n'\n}\n// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.\nfunc isAlphaNumeric(r rune) bool {\n return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)\n}", 0) } return file }
// SortImports sorts runs of consecutive import lines in import blocks in f. func SortImports(fset *token.FileSet, f *File) { for _, d := range f.Decls { d, ok := d.(*GenDecl) if !ok || d.Tok != token.IMPORT { // Not an import declaration, so we're done. // Imports are always first. break } if d.Lparen == token.NoPos { // Not a block: sorted by default. continue } // Identify and sort runs of specs on successive lines. i := 0 for j, s := range d.Specs { if j > i && fset.Position(s.Pos()).Line > 1+fset.Position(d.Specs[j-1].End()).Line { // j begins a new run. End this one. sortSpecs(fset, f, d.Specs[i:j]) i = j } } sortSpecs(fset, f, d.Specs[i:]) } }
// Imports returns the file imports grouped by paragraph. func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec { var groups [][]*ast.ImportSpec for _, decl := range f.Decls { genDecl, ok := decl.(*ast.GenDecl) if !ok || genDecl.Tok != token.IMPORT { break } group := []*ast.ImportSpec{} var lastLine int for _, spec := range genDecl.Specs { importSpec := spec.(*ast.ImportSpec) pos := importSpec.Path.ValuePos line := fset.Position(pos).Line if lastLine > 0 && pos > 0 && line-lastLine > 1 { groups = append(groups, group) group = []*ast.ImportSpec{} } group = append(group, importSpec) lastLine = line } groups = append(groups, group) } return groups }
func lintCheckFlagParse(fset *token.FileSet, af *ast.File, res []AcLintReport) []AcLintReport { reps := []AcLintReport{} foundParse := false ast.Inspect(af, func(node ast.Node) bool { switch c := node.(type) { case *ast.CallExpr: if sel, ok := c.Fun.(*ast.SelectorExpr); ok { if id, ok := sel.X.(*ast.Ident); ok && id.Name == "flag" { if sel.Sel.String() == "Parse" { foundParse = true } else if !foundParse && c != nil { tp := fset.Position(c.Pos()) if tp.IsValid() { reps = append(reps, AcLintReport{ Row: tp.Line - 1, Col: tp.Column - 1, Msg: "Cannot find corresponding call to flag.Parse()", }) } } } } } return !foundParse }) if !foundParse { res = append(res, reps...) } return res }
// toViewNode converts the AST node (, which corresponds to code[pos:end]) into view model. func toViewNode(code string, fset *token.FileSet, a *astNode, pos int, end int) *viewNode { nodeType := reflect.TypeOf(a.Node).String() res := &viewNode{ Type: nodeType, } for _, c := range a.Children { cpos := fset.Position(c.Node.Pos()).Offset cend := fset.Position(c.Node.End()).Offset if cpos < pos { // Some AST nodes overlap between each other. // Ugly hack to make it work. cpos = pos } if pos < cpos { res.Children = append(res.Children, &viewNode{ Type: nodeType, Text: code[pos:cpos], }) } res.Children = append(res.Children, toViewNode(code, fset, c, cpos, cend)) pos = cend } if pos < end { res.Children = append(res.Children, &viewNode{ Type: nodeType, Text: code[pos:end], }) } return res }
func comments(fset *token.FileSet, f *ast.File, pos *token.Position) string { for _, v := range f.Comments { end := fset.Position(v.End()) if end.Filename == pos.Filename && end.Line == pos.Line-1 { var lines []string for _, c := range v.List { text := c.Text if strings.HasPrefix(text, "//") { text = text[2:] } else if strings.HasPrefix(text, "/*") { text = text[2 : len(text)-4] } text = strings.TrimSpace(text) if text == "" || text == "/" || text[0] != '/' { continue } text = strings.TrimSpace(text[1:]) lines = append(lines, text) } if len(lines) > 0 { return strings.Join(lines, "\n") } } } return "" }
func FmtPos(fset *token.FileSet, start token.Pos) string { if start == token.NoPos { return "-" } startP := fset.Position(start) return PositionRange(startP, startP) }
func (r *describeStmtResult) JSON(fset *token.FileSet) []byte { return toJSON(&serial.Describe{ Desc: r.description, Pos: fset.Position(r.node.Pos()).String(), Detail: "unknown", }) }
func PrintHugeParams(fset *token.FileSet, info *types.Info, files []*ast.File) { checkTuple := func(descr string, tuple *types.Tuple) { for i := 0; i < tuple.Len(); i++ { v := tuple.At(i) if sz := sizeof(v.Type()); sz > int64(*bytesFlag) { fmt.Printf("%s: %q %s: %s = %d bytes\n", fset.Position(v.Pos()), v.Name(), descr, v.Type(), sz) } } } checkSig := func(sig *types.Signature) { checkTuple("parameter", sig.Params()) checkTuple("result", sig.Results()) } for _, file := range files { ast.Inspect(file, func(n ast.Node) bool { switch n := n.(type) { case *ast.FuncDecl: checkSig(info.Defs[n.Name].Type().(*types.Signature)) case *ast.FuncLit: checkSig(info.Types[n.Type].Type.(*types.Signature)) } return true }) } }
func replacementModifications(fileSet *token.FileSet, oldPos, oldEnd, newEnd token.Pos, lineLengths []int, areaRemoved []Range) (lineNum, numLinesToCut, newLength int) { oldEndLine := fileSet.Position(oldEnd).Line oldPosLine := fileSet.Position(oldPos).Line numCutLines := oldEndLine - oldPosLine newLinelen := areaRemoved[0].begin + (lineLengths[oldEndLine-1] - areaRemoved[len(areaRemoved)-1].end) + int(newEnd-oldPos) return oldPosLine - 1, numCutLines, newLinelen }
func posLink_urlFunc(node ast.Node, fset *token.FileSet) string { var relpath string var line int var low, high int // selection if p := node.Pos(); p.IsValid() { pos := fset.Position(p) relpath = pos.Filename line = pos.Line low = pos.Offset } if p := node.End(); p.IsValid() { high = fset.Position(p).Offset } var buf bytes.Buffer template.HTMLEscape(&buf, []byte(relpath)) // selection ranges are of form "s=low:high" if low < high { fmt.Fprintf(&buf, "?s=%d:%d", low, high) // no need for URL escaping // if we have a selection, position the page // such that the selection is a bit below the top line -= 10 if line < 1 { line = 1 } } // line id's in html-printed source are of the // form "L%d" where %d stands for the line number if line > 0 { fmt.Fprintf(&buf, "#L%d", line) // no need for URL escaping } return buf.String() }
// ResolveQualifiedIdents resolves the selectors of qualified // identifiers by associating the correct ast.Object with them. // TODO(gri): Eventually, this functionality should be subsumed // by Check. // func ResolveQualifiedIdents(fset *token.FileSet, pkg *ast.Package) error { var errors scanner.ErrorList findObj := func(pkg *ast.Object, name *ast.Ident) *ast.Object { scope := pkg.Data.(*ast.Scope) obj := scope.Lookup(name.Name) if obj == nil { errors.Add(fset.Position(name.Pos()), fmt.Sprintf("no %s in package %s", name.Name, pkg.Name)) } return obj } ast.Inspect(pkg, func(n ast.Node) bool { if s, ok := n.(*ast.SelectorExpr); ok { if x, ok := s.X.(*ast.Ident); ok && x.Obj != nil && x.Obj.Kind == ast.Pkg { // find selector in respective package s.Sel.Obj = findObj(x.Obj, s.Sel) } return false } return true }) return errors.Err() }
func output_tag(pkgName *ast.Ident, fset *token.FileSet, name *ast.Ident, kind byte, hasRecv bool) { position := fset.Position(name.NamePos) switch kind { case PKG: tags = append(tags, (fmt.Sprintf("%s\t%s\t%d;\"\t%c", name.Name, position.Filename, position.Line, kind))) case FUNC: var fname string if hasRecv || isPrivate(name.Name) { fname = name.Name } else { fname = pkgName.Name + "." + name.Name } tags = append(tags, (fmt.Sprintf("%s\t%s\t%d;\"\t%c", fname, position.Filename, position.Line, kind))) default: var vname string if isPrivate(name.Name) { vname = name.Name } else { vname = pkgName.Name + "." + name.Name } tags = append(tags, (fmt.Sprintf("%s\t%s\t%d;\"\t%c", vname, position.Filename, position.Line, kind))) } }
func findCommentsForTranslation(fset *token.FileSet, f *ast.File, posCall token.Position) string { com := "" for _, cg := range f.Comments { // search for all comments in the previous line for i := len(cg.List) - 1; i >= 0; i-- { c := cg.List[i] posComment := fset.Position(c.End()) //println(posCall.Line, posComment.Line, c.Text) if posCall.Line == posComment.Line+1 { posCall = posComment com = fmt.Sprintf("%s\n%s", c.Text, com) } } } // only return if we have a matching prefix formatedComment := formatComment(com) needle := fmt.Sprintf("#. %s", opts.AddCommentsTag) if !strings.HasPrefix(formatedComment, needle) { formatedComment = "" } return formatedComment }
// Scan app source code for calls to X.Y(), where X is of type *Validation. // // Recognize these scenarios: // - "Y" = "Validation" and is a member of the receiver. // (The common case for inline validation) // - "X" is passed in to the func as a parameter. // (For structs implementing Validated) // // The line number to which a validation call is attributed is that of the // surrounding ExprStmt. This is so that it matches what runtime.Callers() // reports. // // The end result is that we can set the default validation key for each call to // be the same as the local variable. func getValidationKeys(fset *token.FileSet, funcDecl *ast.FuncDecl) map[int]string { var ( lineKeys = make(map[int]string) lastExprLine = 0 // Check the func parameters and the receiver's members for the *rev.Validation type. validationParam = getValidationParameter(funcDecl) ) ast.Inspect(funcDecl.Body, func(node ast.Node) bool { if expr, ok := node.(*ast.ExprStmt); ok { lastExprLine = fset.Position(expr.End()).Line return true } // e.g. c.Validation.Required(arg) or v.Required(arg) callExpr, ok := node.(*ast.CallExpr) if !ok { return true } // e.g. c.Validation.Required or v.Required funcSelector, ok := callExpr.Fun.(*ast.SelectorExpr) if !ok { return true } switch x := funcSelector.X.(type) { case *ast.SelectorExpr: // e.g. c.Validation if x.Sel.Name != "Validation" { return true } case *ast.Ident: // e.g. v if validationParam == nil || x.Obj != validationParam { return true } default: return true } if len(callExpr.Args) == 0 { return true } // If the argument is a binary expression, take the first expression. // (e.g. c.Validation.Required(myName != "")) arg := callExpr.Args[0] if binExpr, ok := arg.(*ast.BinaryExpr); ok { arg = binExpr.X } lineKeys[lastExprLine] = ExprName(arg) return true }) return lineKeys }
// extractInfos returns all info we need from ast at source level. func extractInfos(f *token.FileSet, pos, end token.Pos) (fname string, offset, len int64) { beg := f.Position(pos) fname = beg.Filename offset = int64(beg.Offset) len = int64(f.Position(end).Offset) - offset return fname, offset, len }