Beispiel #1
0
// distance returns the column difference between from and to if both
// are on the same line; if they are on different lines (or unknown)
// the result is infinity.
func (p *printer) distance(from0 token.Pos, to token.Position) int {
	from := p.fset.Position(from0)
	if from.IsValid() && to.IsValid() && from.Line == to.Line {
		return to.Column - from.Column
	}
	return infinity
}
Beispiel #2
0
// Verify that calling Scan() provides the correct results.
func TestScan(t *testing.T) {
	// make source
	var src string
	for _, e := range tokens {
		src += e.lit + whitespace
	}
	src_linecount := newlineCount(src)
	whitespace_linecount := newlineCount(whitespace)

	// verify scan
	var s scanner.Scanner
	s.Init(fset.AddFile("", fset.Base(), len(src)), []byte(src), &testErrorHandler{t}, scanner.ScanComments)
	index := 0
	epos := token.Position{"", 0, 1, 1} // expected position
	for {
		pos, tok, litb := s.Scan()
		e := elt{token.EOF, "", special}
		if index < len(tokens) {
			e = tokens[index]
		}
		lit := string(litb)
		if tok == token.EOF {
			lit = "<EOF>"
			epos.Line = src_linecount
			epos.Column = 2
		}
		checkPos(t, lit, pos, epos)
		if tok != e.tok {
			t.Errorf("bad token for %q: got %s, expected %s", lit, tok.String(), e.tok.String())
		}
		if e.tok.IsLiteral() && lit != e.lit {
			t.Errorf("bad literal for %q: got %q, expected %q", lit, lit, e.lit)
		}
		if tokenclass(tok) != e.class {
			t.Errorf("bad class for %q: got %d, expected %d", lit, tokenclass(tok), e.class)
		}
		epos.Offset += len(lit) + len(whitespace)
		epos.Line += newlineCount(lit) + whitespace_linecount
		if tok == token.COMMENT && litb[1] == '/' {
			// correct for unaccounted '/n' in //-style comment
			epos.Offset++
			epos.Line++
		}
		index++
		if tok == token.EOF {
			break
		}
	}
	if s.ErrorCount != 0 {
		t.Errorf("found %d errors", s.ErrorCount)
	}
}
Beispiel #3
0
// writeItem writes data at position pos. data is the text corresponding to
// a single lexical token, but may also be comment text. pos is the actual
// (or at least very accurately estimated) position of the data in the original
// source text. writeItem updates p.last to the position immediately following
// the data.
//
func (p *printer) writeItem(pos token.Position, data []byte) {
	if pos.IsValid() {
		// continue with previous position if we don't have a valid pos
		if p.last.IsValid() && p.last.Filename != pos.Filename {
			// the file has changed - reset state
			// (used when printing merged ASTs of different files
			// e.g., the result of ast.MergePackageFiles)
			p.indent = 0
			p.mode = 0
			p.buffer = p.buffer[0:0]
		}
		p.pos = pos
	}
	if debug {
		// do not update p.pos - use write0
		_, filename := filepath.Split(pos.Filename)
		p.write0([]byte(fmt.Sprintf("[%s:%d:%d]", filename, pos.Line, pos.Column)))
	}
	p.write(data)
	p.last = p.pos
}
Beispiel #4
0
// writeCommentPrefix writes the whitespace before a comment.
// If there is any pending whitespace, it consumes as much of
// it as is likely to help position the comment nicely.
// pos is the comment position, next the position of the item
// after all pending comments, prev is the previous comment in
// a group of comments (or nil), and isKeyword indicates if the
// next item is a keyword.
//
func (p *printer) writeCommentPrefix(pos, next token.Position, prev *ast.Comment, isKeyword bool) {
	if p.written == 0 {
		// the comment is the first item to be printed - don't write any whitespace
		return
	}

	if pos.IsValid() && pos.Filename != p.last.Filename {
		// comment in a different file - separate with newlines (writeNewlines will limit the number)
		p.writeNewlines(10, true)
		return
	}

	if pos.Line == p.last.Line && (prev == nil || prev.Text[1] != '/') {
		// comment on the same line as last item:
		// separate with at least one separator
		hasSep := false
		if prev == nil {
			// first comment of a comment group
			j := 0
			for i, ch := range p.buffer {
				switch ch {
				case blank:
					// ignore any blanks before a comment
					p.buffer[i] = ignore
					continue
				case vtab:
					// respect existing tabs - important
					// for proper formatting of commented structs
					hasSep = true
					continue
				case indent:
					// apply pending indentation
					continue
				}
				j = i
				break
			}
			p.writeWhitespace(j)
		}
		// make sure there is at least one separator
		if !hasSep {
			if pos.Line == next.Line {
				// next item is on the same line as the comment
				// (which must be a /*-style comment): separate
				// with a blank instead of a tab
				p.write([]byte{' '})
			} else {
				p.write(htab)
			}
		}

	} else {
		// comment on a different line:
		// separate with at least one line break
		if prev == nil {
			// first comment of a comment group
			j := 0
			for i, ch := range p.buffer {
				switch ch {
				case blank, vtab:
					// ignore any horizontal whitespace before line breaks
					p.buffer[i] = ignore
					continue
				case indent:
					// apply pending indentation
					continue
				case unindent:
					// if the next token is a keyword, apply the outdent
					// if it appears that the comment is aligned with the
					// keyword; otherwise assume the outdent is part of a
					// closing block and stop (this scenario appears with
					// comments before a case label where the comments
					// apply to the next case instead of the current one)
					if isKeyword && pos.Column == next.Column {
						continue
					}
				case newline, formfeed:
					// TODO(gri): may want to keep formfeed info in some cases
					p.buffer[i] = ignore
				}
				j = i
				break
			}
			p.writeWhitespace(j)
		}
		// use formfeeds to break columns before a comment;
		// this is analogous to using formfeeds to separate
		// individual lines of /*-style comments - but make
		// sure there is at least one line break if the previous
		// comment was a line comment
		n := pos.Line - p.last.Line // if !pos.IsValid(), pos.Line == 0, and n will be 0
		if n <= 0 && prev != nil && prev.Text[1] == '/' {
			n = 1
		}
		p.writeNewlines(n, true)
	}
}