// distance returns the column difference between from and to if both // are on the same line; if they are on different lines (or unknown) // the result is infinity. func (p *printer) distance(from0 token.Pos, to token.Position) int { from := p.posFor(from0) if from.IsValid() && to.IsValid() && from.Line == to.Line { return to.Column - from.Column } return infinity }
func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr { if p.trace { defer un(trace(p, "CallOrConversion")) } lparen := p.expect(token.LPAREN) p.exprLev++ var list vector.Vector var ellipsis token.Position for p.tok != token.RPAREN && p.tok != token.EOF && !ellipsis.IsValid() { list.Push(p.parseExpr()) if p.tok == token.ELLIPSIS { ellipsis = p.pos p.next() } if p.tok != token.COMMA { break } p.next() } p.exprLev-- rparen := p.expect(token.RPAREN) return &ast.CallExpr{fun, lparen, makeExprList(&list), ellipsis, rparen} }
// writeItem writes data at position pos. data is the text corresponding to // a single lexical token, but may also be comment text. pos is the actual // (or at least very accurately estimated) position of the data in the original // source text. If tags are present and GenHTML is set, the tags are written // before and after the data. writeItem updates p.last to the position // immediately following the data. // func (p *printer) writeItem(pos token.Position, data []byte, tag HTMLTag) { fileChanged := false if pos.IsValid() { // continue with previous position if we don't have a valid pos if p.last.IsValid() && p.last.Filename != pos.Filename { // the file has changed - reset state // (used when printing merged ASTs of different files // e.g., the result of ast.MergePackageFiles) p.indent = 0 p.mode = 0 p.buffer = p.buffer[0:0] fileChanged = true } p.pos = pos } if debug { // do not update p.pos - use write0 _, filename := path.Split(pos.Filename) p.write0([]byte(fmt.Sprintf("[%s:%d:%d]", filename, pos.Line, pos.Column))) } if p.Mode&GenHTML != 0 { // write line tag if on a new line // TODO(gri): should write line tags on each line at the start // will be more useful (e.g. to show line numbers) if p.Styler != nil && (pos.Line != p.lastTaggedLine || fileChanged) { p.writeTaggedItem(p.Styler.LineTag(pos.Line)) p.lastTaggedLine = pos.Line } p.writeTaggedItem(data, tag) } else { p.write(data) } p.last = p.pos }
func error(pos token.Position, msg string, args ...interface{}) { nerrors++ if pos.IsValid() { fmt.Fprintf(os.Stderr, "%s: ", pos) } fmt.Fprintf(os.Stderr, msg, args) fmt.Fprintf(os.Stderr, "\n") }
// Obtain a (single) token position before the next comment. // Use this function to correct a token position such that the // token is placed before the next comment (which may be a line // comment introducing a newline and possibly introducing a // semicolon). Use moveCommentsAfter() to move a comment past // more than a single token. beforeComment() is preferable if // if can be used as it produces better results. // // Remove this after transitioning to new semicolon syntax and // some reasonable grace period (12/11/09). func (p *printer) beforeComment(pos token.Position) token.Position { if p.comment != nil { p := p.comment.List[0].Position if !pos.IsValid() || pos.Offset > p.Offset { return p } } return pos }
// writeString writes the string s to p.output and updates p.pos, p.out, // and p.last. If isLit is set, s is escaped w/ tabwriter.Escape characters // to protect s from being interpreted by the tabwriter. // // Note: writeString is only used to write Go tokens, literals, and // comments, all of which must be written literally. Thus, it is correct // to always set isLit = true. However, setting it explicitly only when // needed (i.e., when we don't know that s contains no tabs or line breaks) // avoids processing extra escape characters and reduces run time of the // printer benchmark by up to 10%. // func (p *printer) writeString(pos token.Position, s string, isLit bool) { if p.out.Column == 1 { p.atLineBegin(pos) } if pos.IsValid() { // update p.pos (if pos is invalid, continue with existing p.pos) // Note: Must do this after handling line beginnings because // atLineBegin updates p.pos if there's indentation, but p.pos // is the position of s. p.pos = pos } if isLit { // Protect s such that is passes through the tabwriter // unchanged. Note that valid Go programs cannot contain // tabwriter.Escape bytes since they do not appear in legal // UTF-8 sequences. p.output = append(p.output, tabwriter.Escape) } if debug { p.output = append(p.output, fmt.Sprintf("/*%s*/", pos)...) // do not update p.pos! } p.output = append(p.output, s...) // update positions nlines := 0 var li int // index of last newline; valid if nlines > 0 for i := 0; i < len(s); i++ { // Go tokens cannot contain '\f' - no need to look for it if s[i] == '\n' { nlines++ li = i } } p.pos.Offset += len(s) if nlines > 0 { p.pos.Line += nlines p.out.Line += nlines c := len(s) - li p.pos.Column = c p.out.Column = c } else { p.pos.Column += len(s) p.out.Column += len(s) } if isLit { p.output = append(p.output, tabwriter.Escape) } p.last = p.pos }
// Verify that calling Scan() provides the correct results. func TestScan(t *testing.T) { // make source var src string for _, e := range tokens { src += e.lit + whitespace } src_linecount := newlineCount(src) + 1 whitespace_linecount := newlineCount(whitespace) // verify scan var s Scanner s.Init(fset.AddFile("", fset.Base(), len(src)), []byte(src), &testErrorHandler{t}, ScanComments) index := 0 epos := token.Position{"", 0, 1, 1} // expected position for { pos, tok, litb := s.Scan() e := elt{token.EOF, "", special} if index < len(tokens) { e = tokens[index] } lit := string(litb) if tok == token.EOF { lit = "<EOF>" epos.Line = src_linecount epos.Column = 1 } checkPos(t, lit, pos, epos) if tok != e.tok { t.Errorf("bad token for %q: got %s, expected %s", lit, tok.String(), e.tok.String()) } if e.tok.IsLiteral() && lit != e.lit { t.Errorf("bad literal for %q: got %q, expected %q", lit, lit, e.lit) } if tokenclass(tok) != e.class { t.Errorf("bad class for %q: got %d, expected %d", lit, tokenclass(tok), e.class) } epos.Offset += len(lit) + len(whitespace) epos.Line += newlineCount(lit) + whitespace_linecount if tok == token.COMMENT && litb[1] == '/' { // correct for unaccounted '/n' in //-style comment epos.Offset++ epos.Line++ } index++ if tok == token.EOF { break } } if s.ErrorCount != 0 { t.Errorf("found %d errors", s.ErrorCount) } }
// Verify that calling Scan() provides the correct results. func TestScan(t *testing.T) { // make source var src string for _, e := range tokens { src += e.lit + whitespace } src_linecount := newlineCount(src) whitespace_linecount := newlineCount(whitespace) // verify scan index := 0 epos := token.Position{"", 0, 1, 1} // expected position nerrors := Tokenize("", []byte(src), &testErrorHandler{t}, ScanComments, func(pos token.Position, tok token.Token, litb []byte) bool { e := elt{token.EOF, "", special} if index < len(tokens) { e = tokens[index] } lit := string(litb) if tok == token.EOF { lit = "<EOF>" epos.Line = src_linecount epos.Column = 1 } checkPos(t, lit, pos, epos) if tok != e.tok { t.Errorf("bad token for %q: got %s, expected %s", lit, tok.String(), e.tok.String()) } if e.tok.IsLiteral() && lit != e.lit { t.Errorf("bad literal for %q: got %q, expected %q", lit, lit, e.lit) } if tokenclass(tok) != e.class { t.Errorf("bad class for %q: got %d, expected %d", lit, tokenclass(tok), e.class) } epos.Offset += len(lit) + len(whitespace) epos.Line += newlineCount(lit) + whitespace_linecount if tok == token.COMMENT && litb[1] == '/' { // correct for unaccounted '/n' in //-style comment epos.Offset++ epos.Line++ } index++ return tok != token.EOF }) if nerrors != 0 { t.Errorf("found %d errors", nerrors) } }
// FIXME: arrange to put in ast func PositionRange(start token.Position, end token.Position) string { s := "" if start.IsValid() { s = start.Filename + ":" + PositionRangeSansFile(start, end) } else if end.IsValid() { s = "-" if end.Filename != "" { s += end.Filename + ":" } s += fmt.Sprintf("%d:%d", end.Line, end.Column) } if s == "" { s = "-" } return s }
// writeItem writes data at position pos. data is the text corresponding to // a single lexical token, but may also be comment text. pos is the actual // (or at least very accurately estimated) position of the data in the original // source text. writeItem updates p.last to the position immediately following // the data. // func (p *printer) writeItem(pos token.Position, data string, isLit bool) { if pos.IsValid() { // continue with previous position if we don't have a valid pos if p.last.IsValid() && p.last.Filename != pos.Filename { // the file has changed - reset state // (used when printing merged ASTs of different files // e.g., the result of ast.MergePackageFiles) p.indent = 0 p.mode = 0 p.wsbuf = p.wsbuf[0:0] } p.pos = pos } if debug { // do not update p.pos - use write0 fmt.Fprintf(&p.output, "/*%s*/", pos) } p.writeString(data, isLit) p.last = p.pos }
// writeItem writes data at position pos. data is the text corresponding to // a single lexical token, but may also be comment text. pos is the actual // (or at least very accurately estimated) position of the data in the original // source text. writeItem updates p.last to the position immediately following // the data. // func (p *printer) writeItem(pos token.Position, data string) { if pos.IsValid() { // continue with previous position if we don't have a valid pos if p.last.IsValid() && p.last.Filename != pos.Filename { // the file has changed - reset state // (used when printing merged ASTs of different files // e.g., the result of ast.MergePackageFiles) p.indent = 0 p.mode = 0 p.wsbuf = p.wsbuf[0:0] } p.pos = pos } if debug { // do not update p.pos - use write0 _, filename := filepath.Split(pos.Filename) p.write0([]byte(fmt.Sprintf("[%s:%d:%d]", filename, pos.Line, pos.Column))) } p.write([]byte(data)) p.last = p.pos }
// Verify that calling Scan() provides the correct results. func TestScan(t *testing.T) { // make source src_linecount := newlineCount(string(source)) whitespace_linecount := newlineCount(whitespace) // verify scan var s Scanner s.Init(fset.AddFile("", fset.Base(), len(source)), source, &testErrorHandler{t}, ScanComments|dontInsertSemis) index := 0 epos := token.Position{"", 0, 1, 1} // expected position for { pos, tok, lit := s.Scan() if lit == "" { // no literal value for non-literal tokens lit = tok.String() } e := elt{token.EOF, "", special} if index < len(tokens) { e = tokens[index] } if tok == token.EOF { lit = "<EOF>" epos.Line = src_linecount epos.Column = 2 } checkPos(t, lit, pos, epos) if tok != e.tok { t.Errorf("bad token for %q: got %s, expected %s", lit, tok, e.tok) } if e.tok.IsLiteral() { // no CRs in raw string literals elit := e.lit if elit[0] == '`' { elit = string(stripCR([]byte(elit))) epos.Offset += len(e.lit) - len(lit) // correct position } if lit != elit { t.Errorf("bad literal for %q: got %q, expected %q", lit, lit, elit) } } if tokenclass(tok) != e.class { t.Errorf("bad class for %q: got %d, expected %d", lit, tokenclass(tok), e.class) } epos.Offset += len(lit) + len(whitespace) epos.Line += newlineCount(lit) + whitespace_linecount if tok == token.COMMENT && lit[1] == '/' { // correct for unaccounted '/n' in //-style comment epos.Offset++ epos.Line++ } index++ if tok == token.EOF { break } } if s.ErrorCount != 0 { t.Errorf("found %d errors", s.ErrorCount) } }
// atLineBegin emits a //line comment if necessary and prints indentation. func (p *printer) atLineBegin(pos token.Position) { // write a //line comment if necessary if p.Config.Mode&SourcePos != 0 && pos.IsValid() && (p.out.Line != pos.Line || p.out.Filename != pos.Filename) { p.output = append(p.output, tabwriter.Escape) // protect '\n' in //line from tabwriter interpretation p.output = append(p.output, fmt.Sprintf("//line %s:%d\n", pos.Filename, pos.Line)...) p.output = append(p.output, tabwriter.Escape) // p.out must match the //line comment p.out.Filename = pos.Filename p.out.Line = pos.Line } // write indentation // use "hard" htabs - indentation columns // must not be discarded by the tabwriter for i := 0; i < p.indent; i++ { p.output = append(p.output, '\t') } // update positions i := p.indent p.pos.Offset += i p.pos.Column += i p.out.Column += i }
func parsePattern(pos token.Position, src string, stack map[string]bool) (pattern, re, action string, bol, eol bool) { p := &pat{src: src, re: bytes.NewBuffer(nil), stack: stack} defer func() { if e := recover(); e != nil { pos.Column += p.pos logErr(fmt.Sprintf(`%s - "%s^%s" - %s`, pos, src[:p.pos], src[p.pos:], e.(error))) } }() p.parseExpr(0) pattern, re = src[:p.pos], p.re.String() bol, eol = p.bol, p.eol switch b := p.current(); b { case 0: return case ' ', '\t': p.move() action = src[p.pos:] return } panic(errors.New("syntax error")) }
func PositionRangeSansFile(start token.Position, end token.Position) string { s := "" if start.IsValid() { s += fmt.Sprintf("%d:%d", start.Line, start.Column) if start.Filename == end.Filename && end.IsValid() { // this is what we expect if start.Line == end.Line { if start.Column != end.Column { s += fmt.Sprintf("-%d", end.Column) } } else { s += fmt.Sprintf("-%d:%d", end.Line, end.Column) } } } else if end.IsValid() { s = "-" s += fmt.Sprintf("%d:%d", end.Line, end.Column) } if s == "" { s = "-" } return s }
// Print a list of expressions. If the list spans multiple // source lines, the original line breaks are respected between // expressions. Sets multiLine to true if the list spans multiple // lines. func (p *printer) exprList(prev token.Position, list []ast.Expr, mode exprListMode, multiLine *bool) { if len(list) == 0 { return } if mode&blankStart != 0 { p.print(blank) } // TODO(gri): endLine may be incorrect as it is really the beginning // of the last list entry. There may be only one, very long // entry in which case line == endLine. line := list[0].Pos().Line endLine := list[len(list)-1].Pos().Line if prev.IsValid() && prev.Line == line && line == endLine { // all list entries on a single line for i, x := range list { if i > 0 { if mode&commaSep != 0 { p.print(token.COMMA) } p.print(blank) } p.expr(x, multiLine) } return } // list entries span multiple lines; // use source code positions to guide line breaks // don't add extra indentation if noIndent is set; // i.e., pretend that the first line is already indented ws := ignore if mode&noIndent == 0 { ws = indent } if prev.IsValid() && prev.Line < line && p.linebreak(line, 1, 2, ws, true) { ws = ignore *multiLine = true } for i, x := range list { prev := line line = x.Pos().Line if i > 0 { if mode&commaSep != 0 { p.print(token.COMMA) } if prev < line { if p.linebreak(line, 1, 2, ws, true) { ws = ignore *multiLine = true } } else { p.print(blank) } } p.expr(x, multiLine) } if mode&commaTerm != 0 { p.print(token.COMMA) if ws == ignore && mode&noIndent == 0 { // should always be indented here since we have a multi-line // expression list - be conservative and check anyway p.print(unindent) } p.print(formfeed) // terminating comma needs a line break to look good } else if ws == ignore && mode&noIndent == 0 { p.print(unindent) } }
// NewCommentMap creates a new comment map by associating comment groups // of the comments list with the nodes of the AST specified by node. // // A comment group g is associated with a node n if: // // - g starts on the same line as n ends // - g starts on the line immediately following n, and there is // at least one empty line after g and before the next node // - g starts before n and is not associated to the node before n // via the previous rules // // NewCommentMap tries to associate a comment group to the "largest" // node possible: For instance, if the comment is a line comment // trailing an assignment, the comment is associated with the entire // assignment rather than just the last operand in the assignment. // func NewCommentMap(fset *token.FileSet, node Node, comments []*CommentGroup) CommentMap { if len(comments) == 0 { return nil // no comments to map } cmap := make(CommentMap) // set up comment reader r tmp := make([]*CommentGroup, len(comments)) copy(tmp, comments) // don't change incoming comments sortComments(tmp) r := commentListReader{fset: fset, list: tmp} // !r.eol() because len(comments) > 0 r.next() // create node list in lexical order nodes := nodeList(node) nodes = append(nodes, nil) // append sentinel // set up iteration variables var ( p Node // previous node pend token.Position // end of p pg Node // previous node group (enclosing nodes of "importance") pgend token.Position // end of pg stack nodeStack // stack of node groups ) for _, q := range nodes { var qpos token.Position if q != nil { qpos = fset.Position(q.Pos()) // current node position } else { // set fake sentinel position to infinity so that // all comments get processed before the sentinel const infinity = 1 << 30 qpos.Offset = infinity qpos.Line = infinity } // process comments before current node for r.end.Offset <= qpos.Offset { // determine recent node group if top := stack.pop(r.comment.Pos()); top != nil { pg = top pgend = fset.Position(pg.End()) } // Try to associate a comment first with a node group // (i.e., a node of "importance" such as a declaration); // if that fails, try to associate it with the most recent // node. // TODO(gri) try to simplify the logic below var assoc Node switch { case pg != nil && (pgend.Line == r.pos.Line || pgend.Line+1 == r.pos.Line && r.end.Line+1 < qpos.Line): // 1) comment starts on same line as previous node group ends, or // 2) comment starts on the line immediately after the // previous node group and there is an empty line before // the current node // => associate comment with previous node group assoc = pg case p != nil && (pend.Line == r.pos.Line || pend.Line+1 == r.pos.Line && r.end.Line+1 < qpos.Line || q == nil): // same rules apply as above for p rather than pg, // but also associate with p if we are at the end (q == nil) assoc = p default: // otherwise, associate comment with current node if q == nil { // we can only reach here if there was no p // which would imply that there were no nodes panic("internal error: no comments should be associated with sentinel") } assoc = q } cmap.addComment(assoc, r.comment) if r.eol() { return cmap } r.next() } // update previous node p = q pend = fset.Position(p.End()) // update previous node group if we see an "important" node switch q.(type) { case *File, *Field, Decl, Spec, Stmt: stack.push(q) } } return cmap }
// Verify that calling Scan() provides the correct results. func TestScan(t *testing.T) { whitespace_linecount := newlineCount(whitespace) // error handler eh := func(_ token.Position, msg string) { t.Errorf("error handler called (msg = %s)", msg) } // verify scan var s Scanner s.Init(fset.AddFile("", fset.Base(), len(source)), source, eh, ScanComments|dontInsertSemis) // set up expected position epos := token.Position{ Filename: "", Offset: 0, Line: 1, Column: 1, } index := 0 for { pos, tok, lit := s.Scan() // check position if tok == token.EOF { // correction for EOF epos.Line = newlineCount(string(source)) epos.Column = 2 } checkPos(t, lit, pos, epos) // check token e := elt{token.EOF, "", special} if index < len(tokens) { e = tokens[index] index++ } if tok != e.tok { t.Errorf("bad token for %q: got %s, expected %s", lit, tok, e.tok) } // check token class if tokenclass(tok) != e.class { t.Errorf("bad class for %q: got %d, expected %d", lit, tokenclass(tok), e.class) } // check literal elit := "" switch e.tok { case token.COMMENT: // no CRs in comments elit = string(stripCR([]byte(e.lit))) //-style comment literal doesn't contain newline if elit[1] == '/' { elit = elit[0 : len(elit)-1] } case token.IDENT: elit = e.lit case token.SEMICOLON: elit = ";" default: if e.tok.IsLiteral() { // no CRs in raw string literals elit = e.lit if elit[0] == '`' { elit = string(stripCR([]byte(elit))) } } else if e.tok.IsKeyword() { elit = e.lit } } if lit != elit { t.Errorf("bad literal for %q: got %q, expected %q", lit, lit, elit) } if tok == token.EOF { break } // update position epos.Offset += len(e.lit) + len(whitespace) epos.Line += newlineCount(e.lit) + whitespace_linecount } if s.ErrorCount != 0 { t.Errorf("found %d errors", s.ErrorCount) } }
// writeCommentPrefix writes the whitespace before a comment. // If there is any pending whitespace, it consumes as much of // it as is likely to help position the comment nicely. // pos is the comment position, next the position of the item // after all pending comments, prev is the previous comment in // a group of comments (or nil), and tok is the next token. // func (p *printer) writeCommentPrefix(pos, next token.Position, prev, comment *ast.Comment, tok token.Token) { if len(p.output) == 0 { // the comment is the first item to be printed - don't write any whitespace return } if pos.IsValid() && pos.Filename != p.last.Filename { // comment in a different file - separate with newlines p.writeByte('\f', maxNewlines) return } if pos.Line == p.last.Line && (prev == nil || prev.Text[1] != '/') { // comment on the same line as last item: // separate with at least one separator hasSep := false if prev == nil { // first comment of a comment group j := 0 for i, ch := range p.wsbuf { switch ch { case blank: // ignore any blanks before a comment p.wsbuf[i] = ignore continue case vtab: // respect existing tabs - important // for proper formatting of commented structs hasSep = true continue case indent: // apply pending indentation continue } j = i break } p.writeWhitespace(j) } // make sure there is at least one separator if !hasSep { sep := byte('\t') if pos.Line == next.Line { // next item is on the same line as the comment // (which must be a /*-style comment): separate // with a blank instead of a tab sep = ' ' } p.writeByte(sep, 1) } } else { // comment on a different line: // separate with at least one line break droppedLinebreak := false j := 0 for i, ch := range p.wsbuf { switch ch { case blank, vtab: // ignore any horizontal whitespace before line breaks p.wsbuf[i] = ignore continue case indent: // apply pending indentation continue case unindent: // if this is not the last unindent, apply it // as it is (likely) belonging to the last // construct (e.g., a multi-line expression list) // and is not part of closing a block if i+1 < len(p.wsbuf) && p.wsbuf[i+1] == unindent { continue } // if the next token is not a closing }, apply the unindent // if it appears that the comment is aligned with the // token; otherwise assume the unindent is part of a // closing block and stop (this scenario appears with // comments before a case label where the comments // apply to the next case instead of the current one) if tok != token.RBRACE && pos.Column == next.Column { continue } case newline, formfeed: p.wsbuf[i] = ignore droppedLinebreak = prev == nil // record only if first comment of a group } j = i break } p.writeWhitespace(j) // determine number of linebreaks before the comment n := 0 if pos.IsValid() && p.last.IsValid() { n = pos.Line - p.last.Line if n < 0 { // should never happen n = 0 } } // at the package scope level only (p.indent == 0), // add an extra newline if we dropped one before: // this preserves a blank line before documentation // comments at the package scope level (issue 2570) if p.indent == 0 && droppedLinebreak { n++ } // make sure there is at least one line break // if the previous comment was a line comment if n == 0 && prev != nil && prev.Text[1] == '/' { n = 1 } if n > 0 { // use formfeeds to break columns before a comment; // this is analogous to using formfeeds to separate // individual lines of /*-style comments p.writeByte('\f', nlimit(n)) } } }
func (s *byteScanner) error(pos token.Position, msg string) { s.ErrorCount++ if !s.Quiet { fmt.Fprintf(os.Stderr, "robotstxt from %s: %s\n", pos.String(), msg) } }
// writeCommentPrefix writes the whitespace before a comment. // If there is any pending whitespace, it consumes as much of // it as is likely to help position the comment nicely. // pos is the comment position, next the position of the item // after all pending comments, prev is the previous comment in // a group of comments (or nil), and isKeyword indicates if the // next item is a keyword. // func (p *printer) writeCommentPrefix(pos, next token.Position, prev *ast.Comment, isKeyword bool) { if p.written == 0 { // the comment is the first item to be printed - don't write any whitespace return } if pos.IsValid() && pos.Filename != p.last.Filename { // comment in a different file - separate with newlines (writeNewlines will limit the number) p.writeNewlines(10, true) return } if pos.Line == p.last.Line && (prev == nil || prev.Text[1] != '/') { // comment on the same line as last item: // separate with at least one separator hasSep := false if prev == nil { // first comment of a comment group j := 0 for i, ch := range p.wsbuf { switch ch { case blank: // ignore any blanks before a comment p.wsbuf[i] = ignore continue case vtab: // respect existing tabs - important // for proper formatting of commented structs hasSep = true continue case indent: // apply pending indentation continue } j = i break } p.writeWhitespace(j) } // make sure there is at least one separator if !hasSep { if pos.Line == next.Line { // next item is on the same line as the comment // (which must be a /*-style comment): separate // with a blank instead of a tab p.write([]byte{' '}) } else { p.write(htab) } } } else { // comment on a different line: // separate with at least one line break if prev == nil { // first comment of a comment group j := 0 for i, ch := range p.wsbuf { switch ch { case blank, vtab: // ignore any horizontal whitespace before line breaks p.wsbuf[i] = ignore continue case indent: // apply pending indentation continue case unindent: // if the next token is a keyword, apply the outdent // if it appears that the comment is aligned with the // keyword; otherwise assume the outdent is part of a // closing block and stop (this scenario appears with // comments before a case label where the comments // apply to the next case instead of the current one) if isKeyword && pos.Column == next.Column { continue } case newline, formfeed: // TODO(gri): may want to keep formfeed info in some cases p.wsbuf[i] = ignore } j = i break } p.writeWhitespace(j) } // use formfeeds to break columns before a comment; // this is analogous to using formfeeds to separate // individual lines of /*-style comments - but make // sure there is at least one line break if the previous // comment was a line comment n := pos.Line - p.last.Line // if !pos.IsValid(), pos.Line == 0, and n will be 0 if n <= 0 && prev != nil && prev.Text[1] == '/' { n = 1 } p.writeNewlines(n, true) } }
func slashify(posn token.Position) token.Position { posn.Filename = filepath.ToSlash(posn.Filename) // for MS Windows portability return posn }
func MutationID(pos token.Position) string { pos.Filename = filepath.Base(pos.Filename) return pos.String() }
// Print a list of expressions. If the list spans multiple // source lines, the original line breaks are respected between // expressions. Sets multiLine to true if the list spans multiple // lines. func (p *printer) exprList(prev token.Position, list []ast.Expr, depth int, mode exprListMode, multiLine *bool, next token.Position) { if len(list) == 0 { return } if mode&blankStart != 0 { p.print(blank) } line := list[0].Pos().Line endLine := next.Line if endLine == 0 { // TODO(gri): endLine may be incorrect as it is really the beginning // of the last list entry. There may be only one, very long // entry in which case line == endLine. endLine = list[len(list)-1].Pos().Line } if prev.IsValid() && prev.Line == line && line == endLine { // all list entries on a single line for i, x := range list { if i > 0 { if mode&plusSep != 0 { p.print(blank, token.ADD) } if mode&commaSep != 0 { p.print(token.COMMA) } p.print(blank) } p.expr0(x, depth, multiLine) } if mode&blankEnd != 0 { p.print(blank) } return } // list entries span multiple lines; // use source code positions to guide line breaks // don't add extra indentation if noIndent is set; // i.e., pretend that the first line is already indented ws := ignore if mode&noIndent == 0 { ws = indent } if prev.IsValid() && prev.Line < line && p.linebreak(line, 1, 2, ws, true) { ws = ignore *multiLine = true } for i, x := range list { prev := line line = x.Pos().Line if i > 0 { if mode&plusSep != 0 { p.print(blank, p.beforeComment(noPos), token.ADD) } if mode&commaSep != 0 { p.print(token.COMMA) } if prev < line && prev > 0 && line > 0 { if p.linebreak(line, 1, 2, ws, true) { ws = ignore *multiLine = true } } else { p.print(blank) } } p.expr0(x, depth, multiLine) } if mode&commaTerm != 0 && next.IsValid() && p.pos.Line < next.Line { // print a terminating comma if the next token is on a new line p.print(token.COMMA) if ws == ignore && mode&noIndent == 0 { // unindent if we indented p.print(unindent) } p.print(formfeed) // terminating comma needs a line break to look good return } if mode&blankEnd != 0 { p.print(blank) } if ws == ignore && mode&noIndent == 0 { // unindent if we indented p.print(unindent) } }
// Print a list of expressions. If the list spans multiple // source lines, the original line breaks are respected between // expressions. Sets multiLine to true if the list spans multiple // lines. // // TODO(gri) Consider rewriting this to be independent of []ast.Expr // so that we can use the algorithm for any kind of list // (e.g., pass list via a channel over which to range). func (p *printer) exprList(prev token.Position, list []ast.Expr, depth int, mode exprListMode, multiLine *bool, next token.Position) { if len(list) == 0 { return } if mode&blankStart != 0 { p.print(blank) } line := list[0].Pos().Line endLine := next.Line if endLine == 0 { // TODO(gri): endLine may be incorrect as it is really the beginning // of the last list entry. There may be only one, very long // entry in which case line == endLine. endLine = list[len(list)-1].Pos().Line } if prev.IsValid() && prev.Line == line && line == endLine { // all list entries on a single line for i, x := range list { if i > 0 { if mode&commaSep != 0 { p.print(token.COMMA) } p.print(blank) } p.expr0(x, depth, multiLine) } if mode&blankEnd != 0 { p.print(blank) } return } // list entries span multiple lines; // use source code positions to guide line breaks // don't add extra indentation if noIndent is set; // i.e., pretend that the first line is already indented ws := ignore if mode&noIndent == 0 { ws = indent } // the first linebreak is always a formfeed since this section must not // depend on any previous formatting prevBreak := -1 // index of last expression that was followed by a linebreak linebreakMin := 1 if mode&periodSep != 0 { // Make fragments like // // a.Bar(1, // 2).Foo // // format correctly (a linebreak shouldn't be added before Foo) when // doing period-separated expr lists by setting minimum linebreak to 0 // lines for them. linebreakMin = 0 } if prev.IsValid() && prev.Line < line && p.linebreak(line, linebreakMin, ws, true) { ws = ignore *multiLine = true prevBreak = 0 } // initialize expression/key size: a zero value indicates expr/key doesn't fit on a single line size := 0 // print all list elements for i, x := range list { prevLine := line line = x.Pos().Line // determine if the next linebreak, if any, needs to use formfeed: // in general, use the entire node size to make the decision; for // key:value expressions, use the key size // TODO(gri) for a better result, should probably incorporate both // the key and the node size into the decision process useFF := true // determine size prevSize := size const infinity = 1e6 // larger than any source line size = p.nodeSize(x, infinity) pair, isPair := x.(*ast.KeyValueExpr) if size <= infinity { // x fits on a single line if isPair { size = p.nodeSize(pair.Key, infinity) // size <= infinity } } else { size = 0 } // if the previous line and the current line had single- // line-expressions and the key sizes are small or the // the ratio between the key sizes does not exceed a // threshold, align columns and do not use formfeed if prevSize > 0 && size > 0 { const smallSize = 20 if prevSize <= smallSize && size <= smallSize { useFF = false } else { const r = 4 // threshold ratio := float(size) / float(prevSize) useFF = ratio <= 1/r || r <= ratio } } if i > 0 { if mode&commaSep != 0 { p.print(token.COMMA) } if mode&periodSep != 0 { p.print(token.PERIOD) } if prevLine < line && prevLine > 0 && line > 0 { // lines are broken using newlines so comments remain aligned // unless forceFF is set or there are multiple expressions on // the same line in which case formfeed is used // broken with a formfeed if p.linebreak(line, linebreakMin, ws, useFF || prevBreak+1 < i) { ws = ignore *multiLine = true prevBreak = i } } else if mode&periodSep == 0 { p.print(blank) } // period-separated list elements don't need a blank } if isPair && size > 0 && len(list) > 1 { // we have a key:value expression that fits onto one line and // is in a list with more then one entry: use a column for the // key such that consecutive entries can align if possible p.expr(pair.Key, multiLine) p.print(pair.Colon, token.COLON, vtab) p.expr(pair.Value, multiLine) } else { p.expr0(x, depth, multiLine) } } if mode&commaTerm != 0 && next.IsValid() && p.pos.Line < next.Line { // print a terminating comma if the next token is on a new line p.print(token.COMMA) if ws == ignore && mode&noIndent == 0 { // unindent if we indented p.print(unindent) } p.print(formfeed) // terminating comma needs a line break to look good return } if mode&blankEnd != 0 { p.print(blank) } if ws == ignore && mode&noIndent == 0 { // unindent if we indented p.print(unindent) } }
// writeCommentPrefix writes the whitespace before a comment. // If there is any pending whitespace, it consumes as much of // it as is likely to help position the comment nicely. // pos is the comment position, next the position of the item // after all pending comments, prev is the previous comment in // a group of comments (or nil), and isKeyword indicates if the // next item is a keyword. // func (p *printer) writeCommentPrefix(pos, next token.Position, prev, comment *ast.Comment, isKeyword bool) { if p.output.Len() == 0 { // the comment is the first item to be printed - don't write any whitespace return } if pos.IsValid() && pos.Filename != p.last.Filename { // comment in a different file - separate with newlines p.writeByteN('\f', maxNewlines) return } if pos.Line == p.last.Line && (prev == nil || prev.Text[1] != '/') { // comment on the same line as last item: // separate with at least one separator hasSep := false if prev == nil { // first comment of a comment group j := 0 for i, ch := range p.wsbuf { switch ch { case blank: // ignore any blanks before a comment p.wsbuf[i] = ignore continue case vtab: // respect existing tabs - important // for proper formatting of commented structs hasSep = true continue case indent: // apply pending indentation continue } j = i break } p.writeWhitespace(j) } // make sure there is at least one separator if !hasSep { sep := byte('\t') if pos.Line == next.Line { // next item is on the same line as the comment // (which must be a /*-style comment): separate // with a blank instead of a tab sep = ' ' } p.writeByte(sep) } } else { // comment on a different line: // separate with at least one line break droppedLinebreak := false if prev == nil { // first comment of a comment group j := 0 for i, ch := range p.wsbuf { switch ch { case blank, vtab: // ignore any horizontal whitespace before line breaks p.wsbuf[i] = ignore continue case indent: // apply pending indentation continue case unindent: // if the next token is a keyword, apply the outdent // if it appears that the comment is aligned with the // keyword; otherwise assume the outdent is part of a // closing block and stop (this scenario appears with // comments before a case label where the comments // apply to the next case instead of the current one) if isKeyword && pos.Column == next.Column { continue } case newline, formfeed: // TODO(gri): may want to keep formfeed info in some cases p.wsbuf[i] = ignore droppedLinebreak = true } j = i break } p.writeWhitespace(j) } // determine number of linebreaks before the comment n := 0 if pos.IsValid() && p.last.IsValid() { n = pos.Line - p.last.Line if n < 0 { // should never happen n = 0 } } // at the package scope level only (p.indent == 0), // add an extra newline if we dropped one before: // this preserves a blank line before documentation // comments at the package scope level (issue 2570) if p.indent == 0 && droppedLinebreak { n++ } // make sure there is at least one line break // if the previous comment was a line comment if n == 0 && prev != nil && prev.Text[1] == '/' { n = 1 } if n > 0 { // turn off indent if we're about to print a line directive indent := p.indent if strings.HasPrefix(comment.Text, linePrefix) { p.indent = 0 } // use formfeeds to break columns before a comment; // this is analogous to using formfeeds to separate // individual lines of /*-style comments p.writeByteN('\f', nlimit(n)) p.indent = indent // restore indent } } }
// distance returns the column difference between from and to if both // are on the same line; if they are on different lines (or unknown) // the result is infinity. func distance(from, to token.Position) int { if from.IsValid() && to.IsValid() && from.Line == to.Line { return to.Column - from.Column } return infinity }
// writeString writes the string s to p.output and updates p.pos, p.out, // and p.last. If isLit is set, s is escaped w/ tabwriter.Escape characters // to protect s from being interpreted by the tabwriter. // // Note: writeString is only used to write Go tokens, literals, and // comments, all of which must be written literally. Thus, it is correct // to always set isLit = true. However, setting it explicitly only when // needed (i.e., when we don't know that s contains no tabs or line breaks) // avoids processing extra escape characters and reduces run time of the // printer benchmark by up to 10%. // func (p *printer) writeString(pos token.Position, s string, isLit bool, sType int) { if p.out.Column == 1 { p.atLineBegin(pos) } if pos.IsValid() { // update p.pos (if pos is invalid, continue with existing p.pos) // Note: Must do this after handling line beginnings because // atLineBegin updates p.pos if there's indentation, but p.pos // is the position of s. p.pos = pos } if isLit { // Protect s such that is passes through the tabwriter // unchanged. Note that valid Go programs cannot contain // tabwriter.Escape bytes since they do not appear in legal // UTF-8 sequences. p.output = append(p.output, tabwriter.Escape) } if debug { p.output = append(p.output, fmt.Sprintf("/*%s*/", pos)...) // do not update p.pos! } if p.HTML { es := html.EscapeString(s) var wrap []byte switch sType { case typeLiteral: wrap = literalWrap case typeToken: if len(s) > 2 { // Don't color small tokens like * { or [ wrap = tokenWrap } case typeBuiltin: wrap = builtinWrap case typeComment: wrap = commentWrap if p.Linker != nil { es = p.Linker.Linkify(es, p.comment) } // Disabled for now, cause output problems. Handled via RE for now. /* case typeConst: wrap = constWrap case typeVar: wrap = varWrap */ } if wrap != nil { p.output = append(p.output, wrap...) if sType == typeConst || sType == typeVar { p.output = append(p.output, es...) p.output = append(p.output, closeWrap...) } p.output = append(p.output, es...) p.output = append(p.output, endWrap...) } else { p.output = append(p.output, es...) } } else { p.output = append(p.output, s...) } // update positions nlines := 0 var li int // index of last newline; valid if nlines > 0 for i := 0; i < len(s); i++ { // Go tokens cannot contain '\f' - no need to look for it if s[i] == '\n' { nlines++ li = i } } p.pos.Offset += len(s) if nlines > 0 { p.pos.Line += nlines p.out.Line += nlines c := len(s) - li p.pos.Column = c p.out.Column = c } else { p.pos.Column += len(s) p.out.Column += len(s) } if isLit { p.output = append(p.output, tabwriter.Escape) } p.last = p.pos }