func ParseTrace(path string) (pkgs []*gocov.Package, err error) { defer func() { if e := recover(); e != nil { if e, ok := e.(error); ok { err = e return } err = fmt.Errorf("%s", e) } }() finfo, err := os.Stat(path) if err != nil { return nil, err } src, err := ioutil.ReadFile(path) if err != nil { return nil, err } fset := token.NewFileSet() f := fset.AddFile(path, fset.Base(), int(finfo.Size())) s := &scanner.Scanner{} s.Init(f, src, errorHandler, 0) p := &parser{ FileSet: fset, Scanner: s, tok: token.Token(-1), objects: make(map[int]gocov.Object), context: &gocov.Context{}, } p.parse() return p.packages, nil }
func (p *parser) parseBasicLit(n *parse.Node) *ast.BasicLit { return &ast.BasicLit{ ValuePos: token.Pos(n.Pos()), Kind: token.Token(n.ID()), Value: string(n.Value()), } }
func (s *Scanner) Scan() (token.Pos, token.Token, string) { if s.Token() != nil && s.Token().ID == int(token.EOF) { return s.file.Pos(s.Pos()), token.EOF, "" } var t *scan.Token defer func() { if t.ID != int(token.ILLEGAL) { s.endOfLinePos = s.Pos() + 1 s.lastIsPreSemi = isPreSemi(t.ID) } if s.lastIsPreSemi { s.commentAfterPreSemi = false } //fmt.Println("scan return:", t.ID, strconv.Quote(string(t.Value)), s.lastIsPreSemi) }() for { t = s.scan() if t.ID == int(token.ILLEGAL) { //fmt.Println("scan error", s.Error()) // DEBUG break } else if t.ID == tSkip { continue } break } return s.file.Pos(t.Pos), token.Token(t.ID), string(t.Value) }
func (p *parser) parseAsignment(n *parse.Node) ast.Stmt { return &ast.AssignStmt{ Lhs: p.parseExprList(n.Child(0)), Tok: token.Token(n.Child(1).Child(0).ID()), Rhs: p.parseExprList(n.Child(2)), } }
func isPreSemi(tok int) bool { switch token.Token(tok) { case token.IDENT, token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING, token.BREAK, token.CONTINUE, token.FALLTHROUGH, token.RETURN, token.INC, token.DEC, token.RPAREN, token.RBRACK, token.RBRACE: return true } return false }
func (p *parser) parseUnaryExpr(n *parse.Node) ast.Expr { if n.Child(0).Is(primaryExpr) { return p.parsePrimaryExpr(n.Child(0)) } return &ast.UnaryExpr{ OpPos: token.Pos(n.Child(0).Child(0).Pos()), Op: token.Token(n.Child(0).Child(0).ID()), X: p.parseUnaryExpr(n.Child(1)), } }
func TestAssignOp(t *testing.T) { // there are fewer than 256 tokens for i := 0; i < 256; i++ { tok := token.Token(i) got := assignOp(tok) want := assignOps[tok] if got != want { t.Errorf("for assignOp(%s): got %s; want %s", tok, got, want) } } }
func (p *parser) parseVarDecl(n *parse.Node) ast.Decl { var specs []ast.Spec lParen, rParen := declListEach(n.Child(1), func(item *parse.Node) { specs = append(specs, p.parseVarSpec(item)) }) return &ast.GenDecl{ TokPos: token.Pos(n.Child(0).Pos()), Tok: token.Token(n.Child(0).ID()), Lparen: token.Pos(lParen), Specs: specs, Rparen: token.Pos(rParen), } }
func (p *parser) parseShortVarDecl(n *parse.Node) ast.Stmt { idents := p.parseIdentList(n.Child(0)) exprs := make([]ast.Expr, len(idents)) for i := range exprs { exprs[i] = idents[i] } ast := &ast.AssignStmt{ Lhs: exprs, TokPos: token.Pos(n.Child(1).Pos()), Tok: token.Token(n.Child(1).ID()), Rhs: p.parseExprList(n.Child(2)), } p.declareShortVar(ast, exprs) return ast }
func (p *parser) parseExpr(n *parse.Node) ast.Expr { if n == nil { return nil // ?? } switch n.ChildCount() { case 1: return p.parseUnaryExpr(n.Child(0)) case 3: op := n.Child(1).Child(0).Child(0) return &ast.BinaryExpr{ X: p.parseExpr(n.Child(0)), OpPos: token.Pos(op.Pos()), Op: token.Token(op.ID()), Y: p.parseUnaryExpr(n.Child(2)), } } return nil }
func modify(t *scan.Token) { // modify tokens switch token.Token(t.ID) { case tLineComment: t.ID = int(token.COMMENT) if t.Value[len(t.Value)-1] == '\n' { t.Value = t.Value[:len(t.Value)-1] } t.Value = stripCR(t.Value) case tGeneralCommentSL: t.ID = int(token.COMMENT) t.Value = stripCR(t.Value) case tGeneralCommentML: t.ID = int(token.COMMENT) t.Value = stripCR(t.Value) case tRawStringLit: t.ID = int(token.STRING) t.Value = stripCR(t.Value) case tInterpretedStringLit: t.ID = int(token.STRING) } }
func (p *parser) parseRangeStmt(n *parse.Node) *ast.RangeStmt { kv := n.Child(0).Child(0) tok := n.Child(0).Child(1) rangeStmt := &ast.RangeStmt{ TokPos: token.Pos(tok.Pos()), Tok: token.Token(tok.ID()), X: p.parseExpr(n.Child(2)), } if kv.Is(exprList) { es := p.parseExprList(kv) rangeStmt.Key = es[0] if len(es) > 1 { rangeStmt.Value = es[1] } } else { es := p.parseIdentList(kv) rangeStmt.Key = es[0] if len(es) > 1 { rangeStmt.Value = es[1] } } return rangeStmt }
func (s *Scanner) Scan() (token.Pos, token.Token, string) { for s.tokScanner.Scan() { var val []byte t := s.tokScanner.Token() //fmt.Println(token.Token(t.ID), t, string(s.src[t.Lo:t.Hi])) switch t.ID { case tWhitespace: s.semiPos = t.Hi + 1 continue case tNewline: if s.preSemi { s.preSemi = false if s.mode&dontInsertSemis == 0 { t.ID, t.Lo, val = tSemiColon, s.semiPos, newlineValue break } } s.file.AddLine(t.Hi) s.lineStart = t.Hi continue case tIdentifier, tInt, tFloat, tImag, tRune, tString, tReturn, tBreak, tContinue, tFallthrough: s.preSemi, s.semiPos = true, t.Hi+1 val = s.src[t.Lo:t.Hi] case tRightParen, tRightBrack, tRightBrace, tInc, tDec: s.preSemi, s.semiPos = true, t.Hi+1 case tLineComment, tLineCommentEOF, tLineCommentInfo: if s.preSemi { s.preSemi = false if s.mode&dontInsertSemis == 0 { s.tokScanner.SetPos(t.Lo) t.ID, t.Lo, val = tSemiColon, s.semiPos, newlineValue break } } val = s.src[t.Lo:t.Hi] if t.ID == tLineCommentInfo && t.Lo == s.lineStart { s.interpretLineComment(val, t.Hi) } if val[len(val)-1] == '\n' { s.file.AddLine(t.Hi) s.lineStart = t.Hi val = val[:len(val)-1] } if s.mode&ScanComments == 0 { continue } if t.ID == tLineCommentEOF && t.Hi < len(s.src) { t, val = s.handleError(t.Lo, t.Hi) break } t.ID = tComment val = stripCR(val) case tGeneralCommentML: if s.preSemi { s.preSemi = false if s.mode&dontInsertSemis == 0 { s.tokScanner.SetPos(t.Lo) t.ID, t.Lo, val = tSemiColon, s.semiPos, newlineValue break } } val = s.src[t.Lo:t.Hi] for i, c := range val { if c == '\n' { s.file.AddLine(t.Lo + i + 1) } } if s.mode&ScanComments == 0 { continue } t.ID = tComment val = stripCR(val) case tGeneralCommentSL: if s.preSemi { s.preSemi = false if s.mode&dontInsertSemis == 0 { t = t.Copy() for s.tokScanner.Scan() { nt := s.tokScanner.Token() switch nt.ID { case tWhitespace, tGeneralCommentSL: continue case tEOF, tNewline, tLineComment, tLineCommentEOF, tLineCommentInfo, tGeneralCommentML, eIncompleteComment: s.tokScanner.SetPos(t.Lo) t.ID, t.Lo, val = tSemiColon, s.semiPos, newlineValue goto returnSemi default: s.tokScanner.SetPos(t.Hi) goto returnComment } } returnSemi: break } } returnComment: if s.mode&ScanComments == 0 { continue } t.ID = tComment val = stripCR(s.src[t.Lo:t.Hi]) case tInterpretedStringLit: s.preSemi, s.semiPos = true, t.Hi+1 t.ID = tString val = s.src[t.Lo:t.Hi] case tRawStringLit: s.preSemi, s.semiPos = true, t.Hi+1 t.ID = tString val = s.src[t.Lo:t.Hi] for i, c := range val { if c == '\n' { s.file.AddLine(t.Lo + i + 1) } } val = stripCR(val) case tEOF: if s.preSemi { s.preSemi = false if s.mode&dontInsertSemis == 0 { s.tokScanner.SetPos(t.Lo) t.ID, t.Lo, val = tSemiColon, s.semiPos, newlineValue } } case tSemiColon: s.preSemi = false val = s.src[t.Lo:t.Hi] case eIncompleteComment: if s.preSemi { s.preSemi = false if s.mode&dontInsertSemis == 0 { s.tokScanner.SetPos(t.Lo) t.ID, t.Lo, val = tSemiColon, s.semiPos, newlineValue break } } t.ID = tComment val = s.src[t.Lo:t.Hi] s.error(t.Lo, "comment not terminated") case eOctalLit: t.ID = tInt val = s.src[t.Lo:t.Hi] s.error(t.Lo, "illegal octal number") case eHexLit: t.ID = tInt val = s.src[t.Lo:t.Hi] s.error(t.Lo, "illegal hexadecimal number") case eIllegal: t, val = s.handleError(t.Lo, t.Hi) default: s.preSemi = false if t.ID < firstOp || t.ID > lastOp { val = s.src[t.Lo:t.Hi] } } return token.Pos(s.fileBase + t.Lo), token.Token(t.ID), string(val) } return token.Pos(s.fileBase + len(s.src)), token.EOF, "" }
p = i + 1 // Skip the newline character } else if i == ln { t := Token{t: tok, Line: current.Num, Text: text[p:], Kind: kind, Name: name} current.Add(t) } } } return lines, nil } const ( space = " " tab = " " // 4 space tab tokenSpace = token.Token(token.COMMENT + 1) ) var literals = map[string]struct{}{ "bool": {}, "close": {}, "error": {}, "iota": {}, "int": {}, "int8": {}, "int16": {}, "int32": {}, "int64": {}, "uint": {}, "uint8": {}, "uint16": {},
func (s *Scanner) scan() *scan.Token { if s.commentQueue.count() > 0 { return s.commentQueue.pop() } s.gombiScanner.Scan() t := s.Token() //fmt.Println("scanning:", t.ID, strconv.Quote(string(t.Value)), s.lastIsPreSemi) // add line switch token.Token(t.ID) { case tNewline: s.file.AddLine(t.Pos + 1) case tLineComment, tGeneralCommentML, tRawStringLit: s.addLineFromValue(t.Pos, t.Value) } if s.lastIsPreSemi { switch token.Token(t.ID) { case tLineComment, tGeneralCommentSL, tGeneralCommentML: s.commentAfterPreSemi = true } } // skip whitespace if t.ID == int(tWhitespace) { if s.lastIsPreSemi && !s.commentAfterPreSemi { s.endOfLinePos = s.Pos() + 1 } return skipToken } // supress value for operations if t.ID != int(token.SEMICOLON) && firstOp <= t.ID && t.ID <= lastOp { t.Value = nil return t } // insert semi switch token.Token(t.ID) { case tNewline: if semi := s.insertSemi(); semi != nil { return semi } return skipToken case token.EOF: if semi := s.insertSemi(); semi != nil { return semi } return t case tLineComment, tGeneralCommentML: if semi := s.insertSemi(); semi != nil { modify(t) s.commentQueue.push(t) return semi } case tGeneralCommentSL: if semi := s.insertSemi(); semi != nil { for { modify(t) s.commentQueue.push(t) s.gombiScanner.Scan() t = s.Token() //fmt.Println("SL scanning:", t.ID, strconv.Quote(string(t.Value)), s.lastIsPreSemi) switch t.ID { case int(token.EOF), tNewline, tLineComment, tGeneralCommentML: modify(t) s.commentQueue.push(t) return semi case tGeneralCommentSL: modify(t) s.commentQueue.push(t) case tWhitespace: default: modify(t) s.commentQueue.push(t) return skipToken } } } } // skip comments if s.mode&ScanComments == 0 { switch token.Token(t.ID) { case tLineComment, tGeneralCommentSL, tGeneralCommentML: return skipToken } } modify(t) return t }