Esempio n. 1
0
File: lexer.go Progetto: kdar/health
	tokFieldRepeatSeparator
	tokSubComponentSeparator
	tokField
	//tokEscaped
)

const (
	fieldSeparatorPos = iota
	componentSeparatorPos
	fieldRepeatSeparatorPos
	escapeCharacterPos
	subComponentSeparatorPos
)

var (
	bytesUpperChars     = rangeutil.RangeToBytes("A-Z0-9")
	bytesSegmentGarbage = []byte{'\r', '\n'}
)

// tokenTypeAsString converts a token type to a string.
func tokenTypeAsString(t lexer.TokenType) string {
	var typeString string

	switch t {
	case tokEOF:
		typeString = "tokEOF"
	case tokNIL:
		typeString = "tokNIL"
	case tokError:
		typeString = "tokError"
	case tokSegmentName:
Esempio n. 2
0
	T_CHAR_CARRIAGE_RETURN
	T_CHAR_TAB
	T_CHAR_LOWER_U
	T_CHAR_HEX_WORD
	T_CHAR_CONTROL
)

// Single-character tokens
var singleChars = []byte{'{', '}', '[', ']', ':', ','}

var singleTokens = []lexer.TokenType{T_OPEN_BRACE, T_CLOSE_BRACE, T_OPEN_BRACKET, T_CLOSE_BRACKET, T_COLON, T_COMMA}

// Multi-character tokens
var bytesWhitespace = []byte{' ', '\t', '\r', '\n'}

var bytesDigits = rangeutil.RangeToBytes("0-9")

var bytes1to9 = rangeutil.RangeToBytes("1-9")

var bytesAlpha = rangeutil.RangeToBytes("a-zA-Z")

var bytesAlphaNum = rangeutil.RangeToBytes("0-9a-zA-Z")

var bytesHex = rangeutil.RangeToBytes("0-9a-fA-F")

var bytesNonText = rangeutil.RangeToBytes("\u0000-\u001f\\\"")

var escapeChars = []byte{'"', '\\', '/', 'b', 'f', 'n', 'r', 't'}

var escapeTokens = []lexer.TokenType{T_CHAR_QUOTE, T_CHAR_BACK_SLASH, T_CHAR_SLASH, T_CHAR_BACK_SPACE, T_CHAR_FORM_FEED, T_CHAR_LINE_FEED, T_CHAR_CARRIAGE_RETURN, T_CHAR_TAB}