Example #1
0
// Verify that calling Scan() provides the correct results.
func TestScan(t *testing.T) {
	// make source
	src_linecount := newlineCount(string(source))
	whitespace_linecount := newlineCount(whitespace)

	// verify scan
	var s Scanner
	s.Init(fset.AddFile("", fset.Base(), len(source)), source, &testErrorHandler{t}, ScanComments|dontInsertSemis)
	index := 0
	epos := token.Position{"", 0, 1, 1} // expected position
	for {
		pos, tok, lit := s.Scan()
		if lit == "" {
			// no literal value for non-literal tokens
			lit = tok.String()
		}
		e := elt{token.EOF, "", special}
		if index < len(tokens) {
			e = tokens[index]
		}
		if tok == token.EOF {
			lit = "<EOF>"
			epos.Line = src_linecount
			epos.Column = 2
		}
		checkPos(t, lit, pos, epos)
		if tok != e.tok {
			t.Errorf("bad token for %q: got %s, expected %s", lit, tok, e.tok)
		}
		if e.tok.IsLiteral() {
			// no CRs in raw string literals
			elit := e.lit
			if elit[0] == '`' {
				elit = string(stripCR([]byte(elit)))
				epos.Offset += len(e.lit) - len(lit) // correct position
			}
			if lit != elit {
				t.Errorf("bad literal for %q: got %q, expected %q", lit, lit, elit)
			}
		}
		if tokenclass(tok) != e.class {
			t.Errorf("bad class for %q: got %d, expected %d", lit, tokenclass(tok), e.class)
		}
		epos.Offset += len(lit) + len(whitespace)
		epos.Line += newlineCount(lit) + whitespace_linecount
		if tok == token.COMMENT && lit[1] == '/' {
			// correct for unaccounted '/n' in //-style comment
			epos.Offset++
			epos.Line++
		}
		index++
		if tok == token.EOF {
			break
		}
	}
	if s.ErrorCount != 0 {
		t.Errorf("found %d errors", s.ErrorCount)
	}
}
// Verify that calling Scan() provides the correct results.
func TestScan(t *testing.T) {
	// make source
	var src string
	for _, e := range tokens {
		src += e.lit + whitespace
	}
	src_linecount := newlineCount(src) + 1
	whitespace_linecount := newlineCount(whitespace)

	// verify scan
	var s Scanner
	s.Init(fset.AddFile("", fset.Base(), len(src)), []byte(src), &testErrorHandler{t}, ScanComments)
	index := 0
	epos := token.Position{"", 0, 1, 1} // expected position
	for {
		pos, tok, litb := s.Scan()
		e := elt{token.EOF, "", special}
		if index < len(tokens) {
			e = tokens[index]
		}
		lit := string(litb)
		if tok == token.EOF {
			lit = "<EOF>"
			epos.Line = src_linecount
			epos.Column = 1
		}
		checkPos(t, lit, pos, epos)
		if tok != e.tok {
			t.Errorf("bad token for %q: got %s, expected %s", lit, tok.String(), e.tok.String())
		}
		if e.tok.IsLiteral() && lit != e.lit {
			t.Errorf("bad literal for %q: got %q, expected %q", lit, lit, e.lit)
		}
		if tokenclass(tok) != e.class {
			t.Errorf("bad class for %q: got %d, expected %d", lit, tokenclass(tok), e.class)
		}
		epos.Offset += len(lit) + len(whitespace)
		epos.Line += newlineCount(lit) + whitespace_linecount
		if tok == token.COMMENT && litb[1] == '/' {
			// correct for unaccounted '/n' in //-style comment
			epos.Offset++
			epos.Line++
		}
		index++
		if tok == token.EOF {
			break
		}
	}
	if s.ErrorCount != 0 {
		t.Errorf("found %d errors", s.ErrorCount)
	}
}
Example #3
0
// Verify that calling Scan() provides the correct results.
func TestScan(t *testing.T) {
	// make source
	var src string
	for _, e := range tokens {
		src += e.lit + whitespace
	}
	src_linecount := newlineCount(src)
	whitespace_linecount := newlineCount(whitespace)

	// verify scan
	index := 0
	epos := token.Position{"", 0, 1, 1} // expected position
	nerrors := Tokenize("", []byte(src), &testErrorHandler{t}, ScanComments,
		func(pos token.Position, tok token.Token, litb []byte) bool {
			e := elt{token.EOF, "", special}
			if index < len(tokens) {
				e = tokens[index]
			}
			lit := string(litb)
			if tok == token.EOF {
				lit = "<EOF>"
				epos.Line = src_linecount
				epos.Column = 1
			}
			checkPos(t, lit, pos, epos)
			if tok != e.tok {
				t.Errorf("bad token for %q: got %s, expected %s", lit, tok.String(), e.tok.String())
			}
			if e.tok.IsLiteral() && lit != e.lit {
				t.Errorf("bad literal for %q: got %q, expected %q", lit, lit, e.lit)
			}
			if tokenclass(tok) != e.class {
				t.Errorf("bad class for %q: got %d, expected %d", lit, tokenclass(tok), e.class)
			}
			epos.Offset += len(lit) + len(whitespace)
			epos.Line += newlineCount(lit) + whitespace_linecount
			if tok == token.COMMENT && litb[1] == '/' {
				// correct for unaccounted '/n' in //-style comment
				epos.Offset++
				epos.Line++
			}
			index++
			return tok != token.EOF
		})
	if nerrors != 0 {
		t.Errorf("found %d errors", nerrors)
	}
}
Example #4
0
func parsePattern(pos token.Position, src string, stack map[string]bool) (pattern, re, action string, bol, eol bool) {
	p := &pat{src: src, re: bytes.NewBuffer(nil), stack: stack}

	defer func() {
		if e := recover(); e != nil {
			pos.Column += p.pos
			logErr(fmt.Sprintf(`%s - "%s^%s" - %s`, pos, src[:p.pos], src[p.pos:], e.(error)))
		}
	}()

	p.parseExpr(0)
	pattern, re = src[:p.pos], p.re.String()
	bol, eol = p.bol, p.eol
	switch b := p.current(); b {
	case 0:
		return
	case ' ', '\t':
		p.move()
		action = src[p.pos:]
		return
	}
	panic(errors.New("syntax error"))
}
Example #5
0
// Verify that calling Scan() provides the correct results.
func TestScan(t *testing.T) {
	whitespace_linecount := newlineCount(whitespace)

	// error handler
	eh := func(_ token.Position, msg string) {
		t.Errorf("error handler called (msg = %s)", msg)
	}

	// verify scan
	var s Scanner
	s.Init(fset.AddFile("", fset.Base(), len(source)), source, eh, ScanComments|dontInsertSemis)

	// set up expected position
	epos := token.Position{
		Filename: "",
		Offset:   0,
		Line:     1,
		Column:   1,
	}

	index := 0
	for {
		pos, tok, lit := s.Scan()

		// check position
		if tok == token.EOF {
			// correction for EOF
			epos.Line = newlineCount(string(source))
			epos.Column = 2
		}
		checkPos(t, lit, pos, epos)

		// check token
		e := elt{token.EOF, "", special}
		if index < len(tokens) {
			e = tokens[index]
			index++
		}
		if tok != e.tok {
			t.Errorf("bad token for %q: got %s, expected %s", lit, tok, e.tok)
		}

		// check token class
		if tokenclass(tok) != e.class {
			t.Errorf("bad class for %q: got %d, expected %d", lit, tokenclass(tok), e.class)
		}

		// check literal
		elit := ""
		switch e.tok {
		case token.COMMENT:
			// no CRs in comments
			elit = string(stripCR([]byte(e.lit)))
			//-style comment literal doesn't contain newline
			if elit[1] == '/' {
				elit = elit[0 : len(elit)-1]
			}
		case token.IDENT:
			elit = e.lit
		case token.SEMICOLON:
			elit = ";"
		default:
			if e.tok.IsLiteral() {
				// no CRs in raw string literals
				elit = e.lit
				if elit[0] == '`' {
					elit = string(stripCR([]byte(elit)))
				}
			} else if e.tok.IsKeyword() {
				elit = e.lit
			}
		}
		if lit != elit {
			t.Errorf("bad literal for %q: got %q, expected %q", lit, lit, elit)
		}

		if tok == token.EOF {
			break
		}

		// update position
		epos.Offset += len(e.lit) + len(whitespace)
		epos.Line += newlineCount(e.lit) + whitespace_linecount

	}

	if s.ErrorCount != 0 {
		t.Errorf("found %d errors", s.ErrorCount)
	}
}