func (p *printer) writeComment(comment *ast.Comment) { text := comment.Text // shortcut common case of //-style comments if text[1] == '/' { p.writeItem(p.fset.Position(comment.Pos()), p.escape(text)) return } // for /*-style comments, print line by line and let the // write function take care of the proper indentation lines := split([]byte(text)) stripCommonPrefix(lines) // write comment lines, separated by formfeed, // without a line break after the last line linebreak := formfeeds[0:1] pos := p.fset.Position(comment.Pos()) for i, line := range lines { if i > 0 { p.write(linebreak) pos = p.pos } if len(line) > 0 { p.writeItem(pos, p.escape(string(line))) } } }
// intersperseComments consumes all comments that appear before the next token // tok and prints it together with the buffered whitespace (i.e., the whitespace // that needs to be written before the next token). A heuristic is used to mix // the comments and whitespace. The intersperseComments result indicates if a // newline was written or if a formfeed was dropped from the whitespace buffer. // func (p *printer) intersperseComments(next token.Position, tok token.Token) (wroteNewline, droppedFF bool) { var last *ast.Comment for p.commentBefore(next) { for _, c := range p.comment.List { p.writeCommentPrefix(p.posFor(c.Pos()), next, last, c, tok) p.writeComment(c) last = c } p.nextComment() } if last != nil { // if the last comment is a /*-style comment and the next item // follows on the same line but is not a comma or a "closing" // token, add an extra blank for separation if last.Text[1] == '*' && p.lineFor(last.Pos()) == next.Line && tok != token.COMMA && tok != token.RPAREN && tok != token.RBRACK && tok != token.RBRACE { p.writeByte(' ', 1) } // ensure that there is a line break after a //-style comment, // before a closing '}' unless explicitly disabled, or at eof needsLinebreak := last.Text[1] == '/' || tok == token.RBRACE && p.mode&noExtraLinebreak == 0 || tok == token.EOF return p.writeCommentSuffix(needsLinebreak) } // no comment was written - we should never reach here since // intersperseComments should not be called in that case p.internalError("intersperseComments called without pending comments") return }
// intersperseComments consumes all comments that appear before the next token // tok and prints it together with the buffered whitespace (i.e., the whitespace // that needs to be written before the next token). A heuristic is used to mix // the comments and whitespace. intersperseComments returns true if a pending // formfeed was dropped from the whitespace buffer. // func (p *printer) intersperseComments(next token.Position, tok token.Token) (droppedFF bool) { var last *ast.Comment for ; p.commentBefore(next); p.cindex++ { for _, c := range p.comments[p.cindex].List { p.writeCommentPrefix(p.fset.Position(c.Pos()), next, last, tok.IsKeyword()) p.writeComment(c) last = c } } if last != nil { if last.Text[1] == '*' && p.fset.Position(last.Pos()).Line == next.Line { // the last comment is a /*-style comment and the next item // follows on the same line: separate with an extra blank p.write([]byte{' '}) } // ensure that there is a line break after a //-style comment, // before a closing '}' unless explicitly disabled, or at eof needsLinebreak := last.Text[1] == '/' || tok == token.RBRACE && p.mode&noExtraLinebreak == 0 || tok == token.EOF return p.writeCommentSuffix(needsLinebreak) } // no comment was written - we should never reach here since // intersperseComments should not be called in that case p.internalError("intersperseComments called without pending comments") return false }
// intersperseComments consumes all comments that appear before the next token // tok and prints it together with the buffered whitespace (i.e., the whitespace // that needs to be written before the next token). A heuristic is used to mix // the comments and whitespace. intersperseComments returns true if a pending // formfeed was dropped from the whitespace buffer. // func (p *printer) intersperseComments(next token.Position, tok token.Token) (droppedFF bool) { var last *ast.Comment for ; p.commentBefore(next); p.cindex++ { for _, c := range p.comments[p.cindex].List { p.writeCommentPrefix(c.Pos(), next, last == nil, tok.IsKeyword()) p.writeComment(c) last = c } } if last != nil { if last.Text[1] == '*' && last.Pos().Line == next.Line { // the last comment is a /*-style comment and the next item // follows on the same line: separate with an extra blank p.write([]byte{' '}) } // ensure that there is a newline after a //-style comment // or if we are before a closing '}' or at the end of a file return p.writeCommentSuffix(last.Text[1] == '/' || tok == token.RBRACE || tok == token.EOF) } // no comment was written - we should never reach here since // intersperseComments should not be called in that case p.internalError("intersperseComments called without pending comments") return false }
func TestRSTComment(t *testing.T) { tests := []struct { comment string expected bool }{ {"//+rst", true}, {"// +rst", true}, {"// +rst\n", true}, {"/*+rst*/", true}, {"/* +rst */", true}, {"/* +rst */", true}, {"/*\n+rst */", true}, {"/* +rst\nfoo */", true}, {"/* foo\n+rst */", false}, {"// rst", false}, } for i, test := range tests { var c ast.Comment c.Text = test.comment cgrp := &ast.CommentGroup{List: []*ast.Comment{&c}} _, ok := rstComment(cgrp) if ok != test.expected { t.Fatalf("Iteration %d: Expected %t, got %t: %q", i, test.expected, ok, test.comment) } } }
func ascg(txt string) *ast.CommentGroup { var cg ast.CommentGroup for _, line := range strings.Split(txt, "\n") { var cmt ast.Comment cmt.Text = "// " + line cg.List = append(cg.List, &cmt) } return &cg }
func (p *printer) writeComment(comment *ast.Comment) { text := comment.Text pos := p.posFor(comment.Pos()) const linePrefix = "//line " if strings.HasPrefix(text, linePrefix) && (!pos.IsValid() || pos.Column == 1) { // possibly a line directive ldir := strings.TrimSpace(text[len(linePrefix):]) if i := strings.LastIndex(ldir, ":"); i >= 0 { if line, err := strconv.Atoi(ldir[i+1:]); err == nil && line > 0 { // The line directive we are about to print changed // the Filename and Line number used for subsequent // tokens. We have to update our AST-space position // accordingly and suspend indentation temporarily. indent := p.indent p.indent = 0 defer func() { p.pos.Filename = ldir[:i] p.pos.Line = line p.pos.Column = 1 p.indent = indent }() } } } // shortcut common case of //-style comments if text[1] == '/' { p.writeString(pos, text, true) return } // for /*-style comments, print line by line and let the // write function take care of the proper indentation lines := split(text) stripCommonPrefix(lines) // write comment lines, separated by formfeed, // without a line break after the last line for i, line := range lines { if i > 0 { p.writeByte('\f', 1) pos = p.pos } if len(line) > 0 { p.writeString(pos, line, true) } } }
func (p *printer) writeComment(comment *ast.Comment) { text := comment.Text if strings.HasPrefix(text, linePrefix) { pos := strings.TrimSpace(text[len(linePrefix):]) i := strings.LastIndex(pos, ":") if i >= 0 { // The line directive we are about to print changed // the Filename and Line number used by go/token // as it was reading the input originally. // In order to match the original input, we have to // update our own idea of the file and line number // accordingly, after printing the directive. file := pos[:i] line, _ := strconv.Atoi(pos[i+1:]) defer func() { p.pos.Filename = file p.pos.Line = line p.pos.Column = 1 }() } } // shortcut common case of //-style comments if text[1] == '/' { p.writeItem(p.posFor(comment.Pos()), text, true) return } // for /*-style comments, print line by line and let the // write function take care of the proper indentation lines := split(text) stripCommonPrefix(lines) // write comment lines, separated by formfeed, // without a line break after the last line pos := p.posFor(comment.Pos()) for i, line := range lines { if i > 0 { p.writeByte('\f') pos = p.pos } if len(line) > 0 { p.writeItem(pos, line, true) } } }
// intersperseComments consumes all comments that appear before the next token // tok and prints it together with the buffered whitespace (i.e., the whitespace // that needs to be written before the next token). A heuristic is used to mix // the comments and whitespace. The intersperseComments result indicates if a // newline was written or if a formfeed was dropped from the whitespace buffer. // func (p *printer) intersperseComments(next token.Position, tok token.Token) (wroteNewline, droppedFF bool) { var last *ast.Comment for p.commentBefore(next) { for _, c := range p.comment.List { p.writeCommentPrefix(p.posFor(c.Pos()), next, last, c, tok) p.writeComment(c) last = c } p.nextComment() } if last != nil { // If the last comment is a /*-style comment and the next item // follows on the same line but is not a comma, and not a "closing" // token immediately following its corresponding "opening" token, // add an extra separator unless explicitly disabled. Use a blank // as separator unless we have pending linebreaks and they are not // disabled, in which case we want a linebreak (issue 15137). needsLinebreak := false if p.mode&noExtraBlank == 0 && last.Text[1] == '*' && p.lineFor(last.Pos()) == next.Line && tok != token.COMMA && (tok != token.RPAREN || p.prevOpen == token.LPAREN) && (tok != token.RBRACK || p.prevOpen == token.LBRACK) { if p.containsLinebreak() && p.mode&noExtraLinebreak == 0 { needsLinebreak = true } else { p.writeByte(' ', 1) } } // Ensure that there is a line break after a //-style comment, // before EOF, and before a closing '}' unless explicitly disabled. if last.Text[1] == '/' || tok == token.EOF || tok == token.RBRACE && p.mode&noExtraLinebreak == 0 { needsLinebreak = true } return p.writeCommentSuffix(needsLinebreak) } // no comment was written - we should never reach here since // intersperseComments should not be called in that case p.internalError("intersperseComments called without pending comments") return }
// Rewrite rewrites files to the local path. func (ctx *Context) rewrite() error { if !ctx.rewriteImports { return nil } if ctx.dirty { ctx.loadPackage() } ctx.dirty = true fileImports := make(map[string]map[string]*File) // map[ImportPath]map[FilePath]File for _, pkg := range ctx.Package { for _, f := range pkg.Files { for _, imp := range f.Imports { fileList := fileImports[imp] if fileList == nil { fileList = make(map[string]*File, 1) fileImports[imp] = fileList } fileList[f.Path] = f } } } filePaths := make(map[string]*File, len(ctx.RewriteRule)) for from := range ctx.RewriteRule { for _, f := range fileImports[from] { filePaths[f.Path] = f } } /* RULE: co2/internal/co3/pk3 -> co1/internal/co3/pk3 i co1/internal/co2/pk2 [co2/pk2] < ["co1/pk1"] i co1/internal/co3/pk3 [co3/pk3] < ["co1/pk1"] e co2/internal/co3/pk3 [co3/pk3] < ["co1/internal/co2/pk2"] l co1/pk1 < [] s strings < ["co1/internal/co3/pk3" "co2/internal/co3/pk3"] Rewrite the package "co1/internal/co2/pk2" because it references a package with a rewrite.from package. */ ctx.updatePackageReferences() for from := range ctx.RewriteRule { pkg := ctx.Package[from] if pkg == nil { continue } for _, ref := range pkg.referenced { for _, f := range ref.Files { dprintf("REF RW %s\n", f.Path) filePaths[f.Path] = f } } } defer func() { ctx.RewriteRule = make(map[string]string, 3) }() if len(ctx.RewriteRule) == 0 { return nil } goprint := &printer.Config{ Mode: printer.TabIndent | printer.UseSpaces, Tabwidth: 8, } for _, fileInfo := range filePaths { if pathos.FileHasPrefix(fileInfo.Path, ctx.RootDir) == false { continue } // Read the file into AST, modify the AST. fileset := token.NewFileSet() f, err := parser.ParseFile(fileset, fileInfo.Path, nil, parser.ParseComments) if err != nil { return err } dprintf("RW:: File: %s\n", fileInfo.Path) for _, impNode := range f.Imports { imp, err := strconv.Unquote(impNode.Path.Value) if err != nil { return err } for from, to := range ctx.RewriteRule { if imp != from { continue } impNode.Path.Value = strconv.Quote(to) for i, metaImport := range fileInfo.Imports { if from == metaImport { dprintf("\tImport: %s -> %s\n", from, to) fileInfo.Imports[i] = to } } break } } // Remove import comment. st := fileInfo.Package.Status if st == StatusVendor || st == StatusUnused { var ic *ast.Comment if f.Name != nil { pos := f.Name.Pos() big: // Find the next comment after the package name. for _, cblock := range f.Comments { for _, c := range cblock.List { if c.Pos() > pos { ic = c break big } } } } if ic != nil { // If it starts with the import text, assume it is the import comment and remove. if index := strings.Index(ic.Text, " import "); index > 0 && index < 5 { ic.Text = strings.Repeat(" ", len(ic.Text)) } } } // Don't sort or modify the imports to minimize diffs. // Write the AST back to disk. fi, err := os.Stat(fileInfo.Path) if err != nil { return err } w, err := safefile.Create(fileInfo.Path, fi.Mode()) if err != nil { return err } err = goprint.Fprint(w, fileset, f) if err != nil { w.Close() return err } err = w.Commit() if err != nil { return err } } return nil }
func (p *printer) writeComment(comment *ast.Comment) { text := comment.Text pos := p.posFor(comment.Pos()) const linePrefix = "//line " if strings.HasPrefix(text, linePrefix) && (!pos.IsValid() || pos.Column == 1) { // possibly a line directive ldir := strings.TrimSpace(text[len(linePrefix):]) if i := strings.LastIndex(ldir, ":"); i >= 0 { if line, err := strconv.Atoi(ldir[i+1:]); err == nil && line > 0 { // The line directive we are about to print changed // the Filename and Line number used for subsequent // tokens. We have to update our AST-space position // accordingly and suspend indentation temporarily. indent := p.indent p.indent = 0 defer func() { p.pos.Filename = ldir[:i] p.pos.Line = line p.pos.Column = 1 p.indent = indent }() } } } // shortcut common case of //-style comments if text[1] == '/' { p.writeString(pos, trimRight(text), true) return } // for /*-style comments, print line by line and let the // write function take care of the proper indentation lines := strings.Split(text, "\n") // The comment started in the first column but is going // to be indented. For an idempotent result, add indentation // to all lines such that they look like they were indented // before - this will make sure the common prefix computation // is the same independent of how many times formatting is // applied (was issue 1835). if pos.IsValid() && pos.Column == 1 && p.indent > 0 { for i, line := range lines[1:] { lines[1+i] = " " + line } } stripCommonPrefix(lines) // write comment lines, separated by formfeed, // without a line break after the last line for i, line := range lines { if i > 0 { p.writeByte('\f', 1) pos = p.pos } if len(line) > 0 { p.writeString(pos, trimRight(line), true) } } }
func (rp *routesParser) Parse(gofile *ast.File, target interface{}) error { tgt := target.(*spec.Paths) for _, comsec := range gofile.Comments { // check if this is a route comment section var method, path, id string var tags []string var remaining *ast.CommentGroup var justMatched bool for _, cmt := range comsec.List { for _, line := range strings.Split(cmt.Text, "\n") { matches := rxRoute.FindStringSubmatch(line) if len(matches) > 3 && len(matches[3]) > 0 { method, path, id = matches[1], matches[2], matches[len(matches)-1] tags = rxSpace.Split(matches[3], -1) justMatched = true } else if method != "" { if remaining == nil { remaining = new(ast.CommentGroup) } if !justMatched || strings.TrimSpace(rxStripComments.ReplaceAllString(line, "")) != "" { cc := new(ast.Comment) cc.Slash = cmt.Slash cc.Text = line remaining.List = append(remaining.List, cc) justMatched = false } } } } if method == "" { continue // it's not, next! } pthObj := tgt.Paths[path] op := rp.operations[id] if op == nil { op = new(spec.Operation) op.ID = id } switch strings.ToUpper(method) { case "GET": if pthObj.Get != nil { if id == pthObj.Get.ID { op = pthObj.Get } else { pthObj.Get = op } } else { pthObj.Get = op } case "POST": if pthObj.Post != nil { if id == pthObj.Post.ID { op = pthObj.Post } else { pthObj.Post = op } } else { pthObj.Post = op } case "PUT": if pthObj.Put != nil { if id == pthObj.Put.ID { op = pthObj.Put } else { pthObj.Put = op } } else { pthObj.Put = op } case "PATCH": if pthObj.Patch != nil { if id == pthObj.Patch.ID { op = pthObj.Patch } else { pthObj.Patch = op } } else { pthObj.Patch = op } case "HEAD": if pthObj.Head != nil { if id == pthObj.Head.ID { op = pthObj.Head } else { pthObj.Head = op } } else { pthObj.Head = op } case "DELETE": if pthObj.Delete != nil { if id == pthObj.Delete.ID { op = pthObj.Delete } else { pthObj.Delete = op } } else { pthObj.Delete = op } case "OPTIONS": if pthObj.Options != nil { if id == pthObj.Options.ID { op = pthObj.Options } else { pthObj.Options = op } } else { pthObj.Options = op } } op.Tags = tags sp := new(sectionedParser) sp.setTitle = func(lines []string) { op.Summary = joinDropLast(lines) } sp.setDescription = func(lines []string) { op.Description = joinDropLast(lines) } sr := newSetResponses(rp.definitions, rp.responses, opResponsesSetter(op)) sp.taggers = []tagParser{ newMultiLineTagParser("Consumes", newMultilineDropEmptyParser(rxConsumes, opConsumesSetter(op))), newMultiLineTagParser("Produces", newMultilineDropEmptyParser(rxProduces, opProducesSetter(op))), newSingleLineTagParser("Schemes", newSetSchemes(opSchemeSetter(op))), newMultiLineTagParser("Security", newSetSecurityDefinitions(rxSecuritySchemes, opSecurityDefsSetter(op))), newMultiLineTagParser("Responses", sr), } if err := sp.Parse(remaining); err != nil { return fmt.Errorf("operation (%s): %v", op.ID, err) } if tgt.Paths == nil { tgt.Paths = make(map[string]spec.PathItem) } tgt.Paths[path] = pthObj } return nil }