Esempio n. 1
0
// serveQuery parses an incoming query and, if valid, executes the query.
func (h *Handler) serveQuery(w http.ResponseWriter, r *http.Request, user *influxdb.User) {
	q := r.URL.Query()
	p := influxql.NewParser(strings.NewReader(q.Get("q")))
	db := q.Get("db")
	pretty := q.Get("pretty") == "true"

	// Parse query from query string.
	query, err := p.ParseQuery()
	if err != nil {
		httpError(w, "error parsing query: "+err.Error(), http.StatusBadRequest)
		return
	}

	// If authentication is enabled and there are no users yet, make sure
	// the first statement is creating a new cluster admin.
	if h.requireAuthentication && h.server.UserCount() == 0 {
		stmt, ok := query.Statements[0].(*influxql.CreateUserStatement)
		if !ok || stmt.Privilege == nil || *stmt.Privilege != influxql.AllPrivileges {
			httpError(w, "must create cluster admin", http.StatusUnauthorized)
			return
		}
	}

	// Execute query. One result will return for each statement.
	results := h.server.ExecuteQuery(query, db, user)

	// Send results to client.
	httpResults(w, results, pretty)
}
Esempio n. 2
0
func emptyTestServer() *httptest.Server {
	return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		w.Header().Set("X-Influxdb-Version", SERVER_VERSION)

		switch r.URL.Path {
		case "/query":
			values := r.URL.Query()
			parser := influxql.NewParser(bytes.NewBufferString(values.Get("q")))
			q, err := parser.ParseQuery()
			if err != nil {
				w.WriteHeader(http.StatusInternalServerError)
				return
			}
			stmt := q.Statements[0]

			switch stmt.(type) {
			case *influxql.ShowDatabasesStatement:
				io.WriteString(w, `{"results":[{"series":[{"name":"databases","columns":["name"],"values":[["db"]]}]}]}`)
			case *influxql.ShowDiagnosticsStatement:
				io.WriteString(w, `{"results":[{}]}`)
			}
		case "/write":
			w.WriteHeader(http.StatusOK)
		}
	}))
}
Esempio n. 3
0
// MustParseExpr parses an expression. Panic on error.
func MustParseExpr(s string) influxql.Expr {
	expr, err := influxql.NewParser(strings.NewReader(s)).ParseExpr()
	if err != nil {
		panic(err.Error())
	}
	return expr
}
// MustParseQuery parses an InfluxQL query. Panic on error.
func mustParseQuery(s string) *influxql.Query {
	q, err := influxql.NewParser(strings.NewReader(s)).ParseQuery()
	if err != nil {
		panic(err.Error())
	}
	return q
}
Esempio n. 5
0
// mustParseSelectStatement parses a select statement. Panic on error.
func mustParseSelectStatement(s string) *influxql.SelectStatement {
	stmt, err := influxql.NewParser(strings.NewReader(s)).ParseStatement()
	if err != nil {
		panic(err)
	}
	return stmt.(*influxql.SelectStatement)
}
Esempio n. 6
0
// Ensure the parser can parse an empty query.
func TestParser_ParseQuery_Empty(t *testing.T) {
	q, err := influxql.NewParser(strings.NewReader(``)).ParseQuery()
	if err != nil {
		t.Fatalf("unexpected error: %s", err)
	} else if len(q.Statements) != 0 {
		t.Fatalf("unexpected statement count: %d", len(q.Statements))
	}
}
Esempio n. 7
0
func TestSelectStatement_IsSimpleDerivative(t *testing.T) {
	var tests = []struct {
		stmt       string
		derivative bool
	}{
		// No derivatives
		{
			stmt:       `SELECT value FROM cpu`,
			derivative: false,
		},

		// Query derivative
		{
			stmt:       `SELECT derivative(value) FROM cpu`,
			derivative: true,
		},

		// Query derivative
		{
			stmt:       `SELECT non_negative_derivative(value) FROM cpu`,
			derivative: true,
		},

		// No GROUP BY time only
		{
			stmt:       `SELECT mean(value) FROM cpu where time < now() GROUP BY time(5ms)`,
			derivative: false,
		},

		// No GROUP BY derivatives, time only
		{
			stmt:       `SELECT non_negative_derivative(mean(value)) FROM cpu where time < now() GROUP BY time(5ms)`,
			derivative: false,
		},

		// Invalid derivative function name
		{
			stmt:       `SELECT typoDerivative(value) FROM cpu where time < now()`,
			derivative: false,
		},
	}

	for i, tt := range tests {
		// Parse statement.
		t.Logf("index: %d, statement: %s", i, tt.stmt)
		stmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement()
		if err != nil {
			t.Fatalf("invalid statement: %q: %s", tt.stmt, err)
		}

		// Test derivative detection.
		if d := stmt.(*influxql.SelectStatement).IsSimpleDerivative(); tt.derivative != d {
			t.Errorf("%d. %q: unexpected derivative detection:\n\nexp=%v\n\ngot=%v\n\n", i, tt.stmt, tt.derivative, d)
			continue
		}
	}
}
Esempio n. 8
0
// Ensure the parser can parse a multi-statement query.
func TestParser_ParseQuery(t *testing.T) {
	s := `SELECT a FROM b; SELECT c FROM d`
	q, err := influxql.NewParser(strings.NewReader(s)).ParseQuery()
	if err != nil {
		t.Fatalf("unexpected error: %s", err)
	} else if len(q.Statements) != 2 {
		t.Fatalf("unexpected statement count: %d", len(q.Statements))
	}
}
Esempio n. 9
0
func BenchmarkParserParseStatement(b *testing.B) {
	b.ReportAllocs()
	s := `SELECT field FROM "series" WHERE value > 10`
	for i := 0; i < b.N; i++ {
		if stmt, err := influxql.NewParser(strings.NewReader(s)).ParseStatement(); err != nil {
			b.Fatalf("unexpected error: %s", err)
		} else if stmt == nil {
			b.Fatalf("expected statement: %s", stmt)
		}
	}
	b.SetBytes(int64(len(s)))
}
Esempio n. 10
0
func TestSelectStatement_HasCountDistinct(t *testing.T) {
	var tests = []struct {
		stmt  string
		count bool
	}{
		// No counts
		{
			stmt:  `SELECT value FROM cpu`,
			count: false,
		},

		// Query count
		{
			stmt:  `SELECT count(value) FROM cpu`,
			count: false,
		},

		// No GROUP BY time only
		{
			stmt:  `SELECT count(distinct(value)) FROM cpu where time < now() GROUP BY time(5ms)`,
			count: true,
		},

		// Query count
		{
			stmt:  `SELECT typoCount(value) FROM cpu`,
			count: false,
		},

		// No GROUP BY time only
		{
			stmt:  `SELECT typoCount(distinct(value)) FROM cpu where time < now() GROUP BY time(5ms)`,
			count: false,
		},
	}

	for i, tt := range tests {
		// Parse statement.
		t.Logf("index: %d, statement: %s", i, tt.stmt)
		stmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement()
		if err != nil {
			t.Fatalf("invalid statement: %q: %s", tt.stmt, err)
		}

		// Test count detection.
		if c := stmt.(*influxql.SelectStatement).HasCountDistinct(); tt.count != c {
			t.Errorf("%d. %q: unexpected count detection:\n\nexp=%v\n\ngot=%v\n\n", i, tt.stmt, tt.count, c)
			continue
		}
	}
}
Esempio n. 11
0
// Ensure that the String() value of a statement is parseable
func TestParseString(t *testing.T) {
	var tests = []struct {
		stmt string
	}{
		{
			stmt: `SELECT "cpu load" FROM myseries`,
		},
		{
			stmt: `SELECT "cpu load" FROM "my series"`,
		},
		{
			stmt: `SELECT "cpu\"load" FROM myseries`,
		},
		{
			stmt: `SELECT "cpu'load" FROM myseries`,
		},
		{
			stmt: `SELECT "cpu load" FROM "my\"series"`,
		},
		{
			stmt: `SELECT * FROM myseries`,
		},
	}

	for _, tt := range tests {
		// Parse statement.
		stmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement()
		if err != nil {
			t.Fatalf("invalid statement: %q: %s", tt.stmt, err)
		}

		_, err = influxql.NewParser(strings.NewReader(stmt.String())).ParseStatement()
		if err != nil {
			t.Fatalf("failed to parse string: %v\norig: %v\ngot: %v", err, tt.stmt, stmt.String())
		}
	}
}
Esempio n. 12
0
// Ensure the SELECT statement can extract GROUP BY interval.
func TestSelectStatement_GroupByInterval(t *testing.T) {
	q := "SELECT sum(value) from foo  where time < now() GROUP BY time(10m)"
	stmt, err := influxql.NewParser(strings.NewReader(q)).ParseStatement()
	if err != nil {
		t.Fatalf("invalid statement: %q: %s", stmt, err)
	}

	s := stmt.(*influxql.SelectStatement)
	d, err := s.GroupByInterval()
	if d != 10*time.Minute {
		t.Fatalf("group by interval not equal:\nexp=%s\ngot=%s", 10*time.Minute, d)
	}
	if err != nil {
		t.Fatalf("error parsing group by interval: %s", err.Error())
	}
}
Esempio n. 13
0
func (s *Store) CreateMapper(shardID uint64, query string, chunkSize int) (Mapper, error) {
	q, err := influxql.NewParser(strings.NewReader(query)).ParseStatement()
	if err != nil {
		return nil, err
	}
	stmt, ok := q.(*influxql.SelectStatement)
	if !ok {
		return nil, fmt.Errorf("query is not a SELECT statement: %s", err.Error())
	}

	shard := s.Shard(shardID)
	if shard == nil {
		// This can happen if the shard has been assigned, but hasn't actually been created yet.
		return nil, nil
	}

	return NewLocalMapper(shard, stmt, chunkSize), nil
}
Esempio n. 14
0
// serveQuery parses an incoming query and, if valid, executes the query.
func (h *Handler) serveQuery(w http.ResponseWriter, r *http.Request, user *influxdb.User) {
	q := r.URL.Query()
	p := influxql.NewParser(strings.NewReader(q.Get("q")))
	db := q.Get("db")
	pretty := q.Get("pretty") == "true"

	// Parse query from query string.
	query, err := p.ParseQuery()
	if err != nil {
		httpError(w, "error parsing query: "+err.Error(), pretty, http.StatusBadRequest)
		return
	}

	// Execute query. One result will return for each statement.
	results := h.server.ExecuteQuery(query, db, user)

	// Send results to client.
	httpResults(w, results, pretty)
}
Esempio n. 15
0
// NewContinuousQuery returns a ContinuousQuery object with a parsed influxql.CreateContinuousQueryStatement
func NewContinuousQuery(database string, cqi *meta.ContinuousQueryInfo) (*ContinuousQuery, error) {
	stmt, err := influxql.NewParser(strings.NewReader(cqi.Query)).ParseStatement()
	if err != nil {
		return nil, err
	}

	q, ok := stmt.(*influxql.CreateContinuousQueryStatement)
	if !ok || q.Source.Target == nil || q.Source.Target.Measurement == nil {
		return nil, errors.New("query isn't a valid continuous query")
	}

	cquery := &ContinuousQuery{
		Database: database,
		Info:     cqi,
		q:        q.Source,
	}

	return cquery, nil
}
Esempio n. 16
0
// Ensure that we see if a where clause has only time limitations
func TestOnlyTimeExpr(t *testing.T) {
	var tests = []struct {
		stmt string
		exp  bool
	}{
		{
			stmt: `SELECT value FROM myseries WHERE value > 1`,
			exp:  false,
		},
		{
			stmt: `SELECT value FROM foo WHERE time >= '2000-01-01T00:00:05Z'`,
			exp:  true,
		},
		{
			stmt: `SELECT value FROM foo WHERE time >= '2000-01-01T00:00:05Z' AND time < '2000-01-01T00:00:05Z'`,
			exp:  true,
		},
		{
			stmt: `SELECT value FROM foo WHERE time >= '2000-01-01T00:00:05Z' AND asdf = 'bar'`,
			exp:  false,
		},
		{
			stmt: `SELECT value FROM foo WHERE asdf = 'jkl' AND (time >= '2000-01-01T00:00:05Z' AND time < '2000-01-01T00:00:05Z')`,
			exp:  false,
		},
	}

	for i, tt := range tests {
		// Parse statement.
		stmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement()
		if err != nil {
			t.Fatalf("invalid statement: %q: %s", tt.stmt, err)
		}
		if influxql.OnlyTimeExpr(stmt.(*influxql.SelectStatement).Condition) != tt.exp {
			t.Fatalf("%d. expected statement to return only time dimension to be %t: %s", i, tt.exp, tt.stmt)
		}
	}
}
Esempio n. 17
0
// serveQuery parses an incoming query and, if valid, executes the query.
func (h *Handler) serveQuery(w http.ResponseWriter, r *http.Request, user *meta.UserInfo) {
	h.statMap.Add(statQueryRequest, 1)

	q := r.URL.Query()
	pretty := q.Get("pretty") == "true"

	qp := strings.TrimSpace(q.Get("q"))
	if qp == "" {
		httpError(w, `missing required parameter "q"`, pretty, http.StatusBadRequest)
		return
	}

	epoch := strings.TrimSpace(q.Get("epoch"))

	p := influxql.NewParser(strings.NewReader(qp))
	db := q.Get("db")

	// Parse query from query string.
	query, err := p.ParseQuery()
	if err != nil {
		httpError(w, "error parsing query: "+err.Error(), pretty, http.StatusBadRequest)
		return
	}

	// Sanitize statements with passwords.
	for _, s := range query.Statements {
		switch stmt := s.(type) {
		case *influxql.CreateUserStatement:
			sanitize(r, stmt.Password)
		case *influxql.SetPasswordUserStatement:
			sanitize(r, stmt.Password)
		}
	}

	// Check authorization.
	if h.requireAuthentication {
		err = h.QueryExecutor.Authorize(user, query, db)
		if err != nil {
			httpError(w, "error authorizing query: "+err.Error(), pretty, http.StatusUnauthorized)
			return
		}
	}

	// Parse chunk size. Use default if not provided or unparsable.
	chunked := (q.Get("chunked") == "true")
	chunkSize := DefaultChunkSize
	if chunked {
		if n, err := strconv.ParseInt(q.Get("chunk_size"), 10, 64); err == nil {
			chunkSize = int(n)
		}
	}

	// Make sure if the client disconnects we signal the query to abort
	closing := make(chan struct{})
	if notifier, ok := w.(http.CloseNotifier); ok {
		notify := notifier.CloseNotify()
		go func() {
			<-notify
			close(closing)
		}()
	}

	// Execute query.
	w.Header().Add("content-type", "application/json")
	results, err := h.QueryExecutor.ExecuteQuery(query, db, chunkSize, closing)

	if err != nil {
		w.WriteHeader(http.StatusInternalServerError)
		return
	}

	// if we're not chunking, this will be the in memory buffer for all results before sending to client
	resp := Response{Results: make([]*influxql.Result, 0)}

	// Status header is OK once this point is reached.
	w.WriteHeader(http.StatusOK)

	// pull all results from the channel
	for r := range results {
		// Ignore nil results.
		if r == nil {
			continue
		}

		// if requested, convert result timestamps to epoch
		if epoch != "" {
			convertToEpoch(r, epoch)
		}

		// Write out result immediately if chunked.
		if chunked {
			n, _ := w.Write(MarshalJSON(Response{
				Results: []*influxql.Result{r},
			}, pretty))
			h.statMap.Add(statQueryRequestBytesTransmitted, int64(n))
			w.(http.Flusher).Flush()
			continue
		}

		// It's not chunked so buffer results in memory.
		// Results for statements need to be combined together.
		// We need to check if this new result is for the same statement as
		// the last result, or for the next statement
		l := len(resp.Results)
		if l == 0 {
			resp.Results = append(resp.Results, r)
		} else if resp.Results[l-1].StatementID == r.StatementID {
			cr := resp.Results[l-1]
			rowsMerged := 0
			if len(cr.Series) > 0 {
				lastSeries := cr.Series[len(cr.Series)-1]

				for _, row := range r.Series {
					if !lastSeries.SameSeries(row) {
						// Next row is for a different series than last.
						break
					}
					// Values are for the same series, so append them.
					lastSeries.Values = append(lastSeries.Values, row.Values...)
					rowsMerged++
				}
			}

			// Append remaining rows as new rows.
			r.Series = r.Series[rowsMerged:]
			cr.Series = append(cr.Series, r.Series...)
		} else {
			resp.Results = append(resp.Results, r)
		}
	}

	// If it's not chunked we buffered everything in memory, so write it out
	if !chunked {
		n, _ := w.Write(MarshalJSON(resp, pretty))
		h.statMap.Add(statQueryRequestBytesTransmitted, int64(n))
	}
}
Esempio n. 18
0
// Ensure the parser can parse expressions into an AST.
func TestParser_ParseExpr(t *testing.T) {
	var tests = []struct {
		s    string
		expr influxql.Expr
		err  string
	}{
		// Primitives
		{s: `100`, expr: &influxql.NumberLiteral{Val: 100}},
		{s: `'foo bar'`, expr: &influxql.StringLiteral{Val: "foo bar"}},
		{s: `true`, expr: &influxql.BooleanLiteral{Val: true}},
		{s: `false`, expr: &influxql.BooleanLiteral{Val: false}},
		{s: `my_ident`, expr: &influxql.VarRef{Val: "my_ident"}},
		{s: `'2000-01-01 00:00:00'`, expr: &influxql.TimeLiteral{Val: mustParseTime("2000-01-01T00:00:00Z")}},
		{s: `'2000-01-01 00:00:00.232'`, expr: &influxql.TimeLiteral{Val: mustParseTime("2000-01-01T00:00:00.232Z")}},
		{s: `'2000-01-32 00:00:00'`, err: `unable to parse datetime at line 1, char 1`},
		{s: `'2000-01-01'`, expr: &influxql.TimeLiteral{Val: mustParseTime("2000-01-01T00:00:00Z")}},
		{s: `'2000-01-99'`, err: `unable to parse date at line 1, char 1`},

		// Simple binary expression
		{
			s: `1 + 2`,
			expr: &influxql.BinaryExpr{
				Op:  influxql.ADD,
				LHS: &influxql.NumberLiteral{Val: 1},
				RHS: &influxql.NumberLiteral{Val: 2},
			},
		},

		// Binary expression with LHS precedence
		{
			s: `1 * 2 + 3`,
			expr: &influxql.BinaryExpr{
				Op: influxql.ADD,
				LHS: &influxql.BinaryExpr{
					Op:  influxql.MUL,
					LHS: &influxql.NumberLiteral{Val: 1},
					RHS: &influxql.NumberLiteral{Val: 2},
				},
				RHS: &influxql.NumberLiteral{Val: 3},
			},
		},

		// Binary expression with RHS precedence
		{
			s: `1 + 2 * 3`,
			expr: &influxql.BinaryExpr{
				Op:  influxql.ADD,
				LHS: &influxql.NumberLiteral{Val: 1},
				RHS: &influxql.BinaryExpr{
					Op:  influxql.MUL,
					LHS: &influxql.NumberLiteral{Val: 2},
					RHS: &influxql.NumberLiteral{Val: 3},
				},
			},
		},

		// Binary expression with LHS paren group.
		{
			s: `(1 + 2) * 3`,
			expr: &influxql.BinaryExpr{
				Op: influxql.MUL,
				LHS: &influxql.ParenExpr{
					Expr: &influxql.BinaryExpr{
						Op:  influxql.ADD,
						LHS: &influxql.NumberLiteral{Val: 1},
						RHS: &influxql.NumberLiteral{Val: 2},
					},
				},
				RHS: &influxql.NumberLiteral{Val: 3},
			},
		},

		// Binary expression with no precedence, tests left associativity.
		{
			s: `1 * 2 * 3`,
			expr: &influxql.BinaryExpr{
				Op: influxql.MUL,
				LHS: &influxql.BinaryExpr{
					Op:  influxql.MUL,
					LHS: &influxql.NumberLiteral{Val: 1},
					RHS: &influxql.NumberLiteral{Val: 2},
				},
				RHS: &influxql.NumberLiteral{Val: 3},
			},
		},

		// Binary expression with regex.
		{
			s: "region =~ /us.*/",
			expr: &influxql.BinaryExpr{
				Op:  influxql.EQREGEX,
				LHS: &influxql.VarRef{Val: "region"},
				RHS: &influxql.RegexLiteral{Val: regexp.MustCompile(`us.*`)},
			},
		},

		// Complex binary expression.
		{
			s: `value + 3 < 30 AND 1 + 2 OR true`,
			expr: &influxql.BinaryExpr{
				Op: influxql.OR,
				LHS: &influxql.BinaryExpr{
					Op: influxql.AND,
					LHS: &influxql.BinaryExpr{
						Op: influxql.LT,
						LHS: &influxql.BinaryExpr{
							Op:  influxql.ADD,
							LHS: &influxql.VarRef{Val: "value"},
							RHS: &influxql.NumberLiteral{Val: 3},
						},
						RHS: &influxql.NumberLiteral{Val: 30},
					},
					RHS: &influxql.BinaryExpr{
						Op:  influxql.ADD,
						LHS: &influxql.NumberLiteral{Val: 1},
						RHS: &influxql.NumberLiteral{Val: 2},
					},
				},
				RHS: &influxql.BooleanLiteral{Val: true},
			},
		},

		// Function call (empty)
		{
			s: `my_func()`,
			expr: &influxql.Call{
				Name: "my_func",
			},
		},

		// Function call (multi-arg)
		{
			s: `my_func(1, 2 + 3)`,
			expr: &influxql.Call{
				Name: "my_func",
				Args: []influxql.Expr{
					&influxql.NumberLiteral{Val: 1},
					&influxql.BinaryExpr{
						Op:  influxql.ADD,
						LHS: &influxql.NumberLiteral{Val: 2},
						RHS: &influxql.NumberLiteral{Val: 3},
					},
				},
			},
		},
	}

	for i, tt := range tests {
		expr, err := influxql.NewParser(strings.NewReader(tt.s)).ParseExpr()
		if !reflect.DeepEqual(tt.err, errstring(err)) {
			t.Errorf("%d. %q: error mismatch:\n  exp=%s\n  got=%s\n\n", i, tt.s, tt.err, err)
		} else if tt.err == "" && !reflect.DeepEqual(tt.expr, expr) {
			t.Errorf("%d. %q\n\nexpr mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.s, tt.expr, expr)
		}
	}
}
Esempio n. 19
0
// Ensure the parser can parse strings into Statement ASTs.
func TestParser_ParseStatement(t *testing.T) {
	var tests = []struct {
		s    string
		stmt influxql.Statement
		err  string
	}{
		// SELECT * statement
		{
			s: `SELECT * FROM myseries`,
			stmt: &influxql.SelectStatement{
				Fields: []*influxql.Field{
					{Expr: &influxql.Wildcard{}},
				},
				Source: &influxql.Measurement{Name: "myseries"},
			},
		},

		// SELECT statement
		{
			s: `SELECT field1, field2 ,field3 AS field_x FROM myseries WHERE host = 'hosta.influxdb.org' GROUP BY 10h ORDER BY ASC LIMIT 20 OFFSET 10;`,
			stmt: &influxql.SelectStatement{
				Fields: []*influxql.Field{
					{Expr: &influxql.VarRef{Val: "field1"}},
					{Expr: &influxql.VarRef{Val: "field2"}},
					{Expr: &influxql.VarRef{Val: "field3"}, Alias: "field_x"},
				},
				Source: &influxql.Measurement{Name: "myseries"},
				Condition: &influxql.BinaryExpr{
					Op:  influxql.EQ,
					LHS: &influxql.VarRef{Val: "host"},
					RHS: &influxql.StringLiteral{Val: "hosta.influxdb.org"},
				},
				Dimensions: []*influxql.Dimension{
					{Expr: &influxql.DurationLiteral{Val: 10 * time.Hour}},
				},
				SortFields: []*influxql.SortField{
					{Ascending: true},
				},
				Limit:  20,
				Offset: 10,
			},
		},

		// SELECT statement with JOIN
		{
			s: `SELECT field1 FROM join(aa,"bb", cc) JOIN cc`,
			stmt: &influxql.SelectStatement{
				Fields: []*influxql.Field{{Expr: &influxql.VarRef{Val: "field1"}}},
				Source: &influxql.Join{
					Measurements: []*influxql.Measurement{
						{Name: "aa"},
						{Name: `"bb"`},
						{Name: "cc"},
					},
				},
			},
		},

		// SELECT statement with MERGE
		{
			s: `SELECT field1 FROM merge(aa,b.b)`,
			stmt: &influxql.SelectStatement{
				Fields: []*influxql.Field{{Expr: &influxql.VarRef{Val: "field1"}}},
				Source: &influxql.Merge{
					Measurements: []*influxql.Measurement{
						{Name: "aa"},
						{Name: "b.b"},
					},
				},
			},
		},

		// SELECT statement (lowercase)
		{
			s: `select my_field from myseries`,
			stmt: &influxql.SelectStatement{
				Fields: []*influxql.Field{{Expr: &influxql.VarRef{Val: "my_field"}}},
				Source: &influxql.Measurement{Name: "myseries"},
			},
		},

		// SELECT statement with multiple ORDER BY fields
		{
			s: `SELECT field1 FROM myseries ORDER BY ASC, field1, field2 DESC LIMIT 10`,
			stmt: &influxql.SelectStatement{
				Fields: []*influxql.Field{{Expr: &influxql.VarRef{Val: "field1"}}},
				Source: &influxql.Measurement{Name: "myseries"},
				SortFields: []*influxql.SortField{
					{Ascending: true},
					{Name: "field1"},
					{Name: "field2"},
				},
				Limit: 10,
			},
		},

		// SELECT statement with SLIMIT and SOFFSET
		{
			s: `SELECT field1 FROM myseries SLIMIT 10 SOFFSET 5`,
			stmt: &influxql.SelectStatement{
				Fields:  []*influxql.Field{{Expr: &influxql.VarRef{Val: "field1"}}},
				Source:  &influxql.Measurement{Name: "myseries"},
				SLimit:  10,
				SOffset: 5,
			},
		},

		// SELECT * FROM cpu WHERE host = 'serverC' AND region =~ /.*west.*/
		{
			s: `SELECT * FROM cpu WHERE host = 'serverC' AND region =~ /.*west.*/`,
			stmt: &influxql.SelectStatement{
				Fields: []*influxql.Field{{Expr: &influxql.Wildcard{}}},
				Source: &influxql.Measurement{Name: "cpu"},
				Condition: &influxql.BinaryExpr{
					Op: influxql.AND,
					LHS: &influxql.BinaryExpr{
						Op:  influxql.EQ,
						LHS: &influxql.VarRef{Val: "host"},
						RHS: &influxql.StringLiteral{Val: "serverC"},
					},
					RHS: &influxql.BinaryExpr{
						Op:  influxql.EQREGEX,
						LHS: &influxql.VarRef{Val: "region"},
						RHS: &influxql.RegexLiteral{Val: regexp.MustCompile(".*west.*")},
					},
				},
			},
		},

		// SELECT statement with fill
		{
			s: `SELECT mean(value) FROM cpu GROUP BY time(5m) fill(1)`,
			stmt: &influxql.SelectStatement{
				Fields: []*influxql.Field{{
					Expr: &influxql.Call{
						Name: "mean",
						Args: []influxql.Expr{&influxql.VarRef{Val: "value"}}}}},
				Source: &influxql.Measurement{Name: "cpu"},
				Dimensions: []*influxql.Dimension{
					{Expr: &influxql.Call{
						Name: "time",
						Args: []influxql.Expr{
							&influxql.DurationLiteral{Val: 5 * time.Minute},
						},
					}},
				},
				Fill:      influxql.NumberFill,
				FillValue: float64(1),
			},
		},

		// SELECT statement with previous fill
		{
			s: `SELECT mean(value) FROM cpu GROUP BY time(5m) fill(previous)`,
			stmt: &influxql.SelectStatement{
				Fields: []*influxql.Field{{
					Expr: &influxql.Call{
						Name: "mean",
						Args: []influxql.Expr{&influxql.VarRef{Val: "value"}}}}},
				Source: &influxql.Measurement{Name: "cpu"},
				Dimensions: []*influxql.Dimension{
					{Expr: &influxql.Call{
						Name: "time",
						Args: []influxql.Expr{
							&influxql.DurationLiteral{Val: 5 * time.Minute},
						},
					}},
				},
				Fill: influxql.PreviousFill,
			},
		},

		// DELETE statement
		{
			s: `DELETE FROM myseries WHERE host = 'hosta.influxdb.org'`,
			stmt: &influxql.DeleteStatement{
				Source: &influxql.Measurement{Name: "myseries"},
				Condition: &influxql.BinaryExpr{
					Op:  influxql.EQ,
					LHS: &influxql.VarRef{Val: "host"},
					RHS: &influxql.StringLiteral{Val: "hosta.influxdb.org"},
				},
			},
		},

		// SHOW SERVERS
		{
			s:    `SHOW SERVERS`,
			stmt: &influxql.ShowServersStatement{},
		},

		// SHOW DATABASES
		{
			s:    `SHOW DATABASES`,
			stmt: &influxql.ShowDatabasesStatement{},
		},

		// SHOW SERIES statement
		{
			s:    `SHOW SERIES`,
			stmt: &influxql.ShowSeriesStatement{},
		},

		// SHOW SERIES with OFFSET 0
		{
			s:    `SHOW SERIES OFFSET 0`,
			stmt: &influxql.ShowSeriesStatement{Offset: 0},
		},

		// SHOW SERIES with LIMIT 2 OFFSET 0
		{
			s:    `SHOW SERIES LIMIT 2 OFFSET 0`,
			stmt: &influxql.ShowSeriesStatement{Offset: 0, Limit: 2},
		},

		// SHOW SERIES WHERE with ORDER BY and LIMIT
		{
			s: `SHOW SERIES WHERE region = 'uswest' ORDER BY ASC, field1, field2 DESC LIMIT 10`,
			stmt: &influxql.ShowSeriesStatement{
				Condition: &influxql.BinaryExpr{
					Op:  influxql.EQ,
					LHS: &influxql.VarRef{Val: "region"},
					RHS: &influxql.StringLiteral{Val: "uswest"},
				},
				SortFields: []*influxql.SortField{
					{Ascending: true},
					{Name: "field1"},
					{Name: "field2"},
				},
				Limit: 10,
			},
		},

		// SHOW MEASUREMENTS WHERE with ORDER BY and LIMIT
		{
			s: `SHOW MEASUREMENTS WHERE region = 'uswest' ORDER BY ASC, field1, field2 DESC LIMIT 10`,
			stmt: &influxql.ShowMeasurementsStatement{
				Condition: &influxql.BinaryExpr{
					Op:  influxql.EQ,
					LHS: &influxql.VarRef{Val: "region"},
					RHS: &influxql.StringLiteral{Val: "uswest"},
				},
				SortFields: []*influxql.SortField{
					{Ascending: true},
					{Name: "field1"},
					{Name: "field2"},
				},
				Limit: 10,
			},
		},

		// SHOW RETENTION POLICIES
		{
			s: `SHOW RETENTION POLICIES mydb`,
			stmt: &influxql.ShowRetentionPoliciesStatement{
				Database: "mydb",
			},
		},

		// SHOW TAG KEYS
		{
			s: `SHOW TAG KEYS FROM src`,
			stmt: &influxql.ShowTagKeysStatement{
				Source: &influxql.Measurement{Name: "src"},
			},
		},

		// SHOW TAG KEYS
		{
			s: `SHOW TAG KEYS FROM src WHERE region = 'uswest' ORDER BY ASC, field1, field2 DESC LIMIT 10`,
			stmt: &influxql.ShowTagKeysStatement{
				Source: &influxql.Measurement{Name: "src"},
				Condition: &influxql.BinaryExpr{
					Op:  influxql.EQ,
					LHS: &influxql.VarRef{Val: "region"},
					RHS: &influxql.StringLiteral{Val: "uswest"},
				},
				SortFields: []*influxql.SortField{
					{Ascending: true},
					{Name: "field1"},
					{Name: "field2"},
				},
				Limit: 10,
			},
		},

		// SHOW TAG VALUES FROM ... WITH KEY = ...
		{
			s: `SHOW TAG VALUES FROM src WITH KEY = region WHERE region = 'uswest' ORDER BY ASC, field1, field2 DESC LIMIT 10`,
			stmt: &influxql.ShowTagValuesStatement{
				Source:  &influxql.Measurement{Name: "src"},
				TagKeys: []string{"region"},
				Condition: &influxql.BinaryExpr{
					Op:  influxql.EQ,
					LHS: &influxql.VarRef{Val: "region"},
					RHS: &influxql.StringLiteral{Val: "uswest"},
				},
				SortFields: []*influxql.SortField{
					{Ascending: true},
					{Name: "field1"},
					{Name: "field2"},
				},
				Limit: 10,
			},
		},

		// SHOW TAG VALUES FROM ... WITH KEY IN...
		{
			s: `SHOW TAG VALUES FROM cpu WITH KEY IN (region, host) WHERE region = 'uswest'`,
			stmt: &influxql.ShowTagValuesStatement{
				Source:  &influxql.Measurement{Name: "cpu"},
				TagKeys: []string{"region", "host"},
				Condition: &influxql.BinaryExpr{
					Op:  influxql.EQ,
					LHS: &influxql.VarRef{Val: "region"},
					RHS: &influxql.StringLiteral{Val: "uswest"},
				},
			},
		},

		// SHOW TAG VALUES ... AND TAG KEY =
		{
			s: `SHOW TAG VALUES FROM cpu WITH KEY IN (region,service,host)WHERE region = 'uswest'`,
			stmt: &influxql.ShowTagValuesStatement{
				Source:  &influxql.Measurement{Name: "cpu"},
				TagKeys: []string{"region", "service", "host"},
				Condition: &influxql.BinaryExpr{
					Op:  influxql.EQ,
					LHS: &influxql.VarRef{Val: "region"},
					RHS: &influxql.StringLiteral{Val: "uswest"},
				},
			},
		},

		// SHOW TAG VALUES WITH KEY = ...
		{
			s: `SHOW TAG VALUES WITH KEY = host WHERE region = 'uswest'`,
			stmt: &influxql.ShowTagValuesStatement{
				TagKeys: []string{"host"},
				Condition: &influxql.BinaryExpr{
					Op:  influxql.EQ,
					LHS: &influxql.VarRef{Val: "region"},
					RHS: &influxql.StringLiteral{Val: "uswest"},
				},
			},
		},

		// SHOW TAG VALUES WITH KEY = "..."
		{
			s: `SHOW TAG VALUES WITH KEY = "host" WHERE region = 'uswest'`,
			stmt: &influxql.ShowTagValuesStatement{
				TagKeys: []string{`"host"`},
				Condition: &influxql.BinaryExpr{
					Op:  influxql.EQ,
					LHS: &influxql.VarRef{Val: "region"},
					RHS: &influxql.StringLiteral{Val: "uswest"},
				},
			},
		},

		// SHOW USERS
		{
			s:    `SHOW USERS`,
			stmt: &influxql.ShowUsersStatement{},
		},

		// SHOW FIELD KEYS
		{
			s: `SHOW FIELD KEYS FROM src ORDER BY ASC, field1, field2 DESC LIMIT 10`,
			stmt: &influxql.ShowFieldKeysStatement{
				Source: &influxql.Measurement{Name: "src"},
				SortFields: []*influxql.SortField{
					{Ascending: true},
					{Name: "field1"},
					{Name: "field2"},
				},
				Limit: 10,
			},
		},

		// DROP SERIES statement
		{
			s:    `DROP SERIES 1`,
			stmt: &influxql.DropSeriesStatement{SeriesID: 1},
		},
		{
			s:    `DROP SERIES FROM src`,
			stmt: &influxql.DropSeriesStatement{Source: &influxql.Measurement{Name: "src"}},
		},
		{
			s: `DROP SERIES WHERE host = 'hosta.influxdb.org'`,
			stmt: &influxql.DropSeriesStatement{
				Condition: &influxql.BinaryExpr{
					Op:  influxql.EQ,
					LHS: &influxql.VarRef{Val: "host"},
					RHS: &influxql.StringLiteral{Val: "hosta.influxdb.org"},
				},
			},
		},
		{
			s: `DROP SERIES FROM src WHERE host = 'hosta.influxdb.org'`,
			stmt: &influxql.DropSeriesStatement{
				Source: &influxql.Measurement{Name: "src"},
				Condition: &influxql.BinaryExpr{
					Op:  influxql.EQ,
					LHS: &influxql.VarRef{Val: "host"},
					RHS: &influxql.StringLiteral{Val: "hosta.influxdb.org"},
				},
			},
		},

		// SHOW CONTINUOUS QUERIES statement
		{
			s:    `SHOW CONTINUOUS QUERIES`,
			stmt: &influxql.ShowContinuousQueriesStatement{},
		},

		// CREATE CONTINUOUS QUERY ... INTO <measurement>
		{
			s: `CREATE CONTINUOUS QUERY myquery ON testdb BEGIN SELECT count() INTO measure1 FROM myseries GROUP BY time(5m) END`,
			stmt: &influxql.CreateContinuousQueryStatement{
				Name:     "myquery",
				Database: "testdb",
				Source: &influxql.SelectStatement{
					Fields: []*influxql.Field{{Expr: &influxql.Call{Name: "count"}}},
					Target: &influxql.Target{Measurement: "measure1"},
					Source: &influxql.Measurement{Name: "myseries"},
					Dimensions: []*influxql.Dimension{
						{
							Expr: &influxql.Call{
								Name: "time",
								Args: []influxql.Expr{
									&influxql.DurationLiteral{Val: 5 * time.Minute},
								},
							},
						},
					},
				},
			},
		},

		// CREATE CONTINUOUS QUERY ... INTO <retention-policy>.<measurement>
		{
			s: `CREATE CONTINUOUS QUERY myquery ON testdb BEGIN SELECT count() INTO "1h.policy1"."cpu.load" FROM myseries GROUP BY time(5m) END`,
			stmt: &influxql.CreateContinuousQueryStatement{
				Name:     "myquery",
				Database: "testdb",
				Source: &influxql.SelectStatement{
					Fields: []*influxql.Field{{Expr: &influxql.Call{Name: "count"}}},
					Target: &influxql.Target{
						Measurement: `"1h.policy1"."cpu.load"`,
					},
					Source: &influxql.Measurement{Name: "myseries"},
					Dimensions: []*influxql.Dimension{
						{
							Expr: &influxql.Call{
								Name: "time",
								Args: []influxql.Expr{
									&influxql.DurationLiteral{Val: 5 * time.Minute},
								},
							},
						},
					},
				},
			},
		},

		// CREATE CONTINUOUS QUERY for non-aggregate SELECT stmts
		{
			s: `CREATE CONTINUOUS QUERY myquery ON testdb BEGIN SELECT value INTO "policy1"."value" FROM myseries END`,
			stmt: &influxql.CreateContinuousQueryStatement{
				Name:     "myquery",
				Database: "testdb",
				Source: &influxql.SelectStatement{
					Fields: []*influxql.Field{{Expr: &influxql.Call{Name: "value"}}},
					Target: &influxql.Target{
						Measurement: `"policy1"."value"`,
					},
					Source: &influxql.Measurement{Name: "myseries"},
				},
			},
		},

		// CREATE CONTINUOUS QUERY for non-aggregate SELECT stmts with multiple values
		{
			s: `CREATE CONTINUOUS QUERY myquery ON testdb BEGIN SELECT transmit_rx, transmit_tx INTO "policy1"."network" FROM myseries END`,
			stmt: &influxql.CreateContinuousQueryStatement{
				Name:     "myquery",
				Database: "testdb",
				Source: &influxql.SelectStatement{
					Fields: []*influxql.Field{{Expr: &influxql.Call{Name: "transmit_rx"}},
						{Expr: &influxql.Call{Name: "transmit_tx"}}},
					Target: &influxql.Target{
						Measurement: `"policy1"."network"`,
					},
					Source: &influxql.Measurement{Name: "myseries"},
				},
			},
		},

		// CREATE DATABASE statement
		{
			s: `CREATE DATABASE testdb`,
			stmt: &influxql.CreateDatabaseStatement{
				Name: "testdb",
			},
		},

		// CREATE USER statement
		{
			s: `CREATE USER testuser WITH PASSWORD 'pwd1337'`,
			stmt: &influxql.CreateUserStatement{
				Name:     "testuser",
				Password: "******",
			},
		},

		// CREATE USER ... WITH ALL PRIVILEGES
		{
			s: `CREATE USER testuser WITH PASSWORD 'pwd1337' WITH ALL PRIVILEGES`,
			stmt: &influxql.CreateUserStatement{
				Name:      "testuser",
				Password:  "******",
				Privilege: influxql.NewPrivilege(influxql.AllPrivileges),
			},
		},

		// DROP CONTINUOUS QUERY statement
		{
			s:    `DROP CONTINUOUS QUERY myquery`,
			stmt: &influxql.DropContinuousQueryStatement{Name: "myquery"},
		},

		// DROP DATABASE statement
		{
			s:    `DROP DATABASE testdb`,
			stmt: &influxql.DropDatabaseStatement{Name: "testdb"},
		},

		// DROP MEASUREMENT statement
		{
			s:    `DROP MEASUREMENT cpu`,
			stmt: &influxql.DropMeasurementStatement{Name: "cpu"},
		},

		// DROP RETENTION POLICY
		{
			s: `DROP RETENTION POLICY "1h.cpu" ON mydb`,
			stmt: &influxql.DropRetentionPolicyStatement{
				Name:     `"1h.cpu"`,
				Database: `mydb`,
			},
		},

		// DROP USER statement
		{
			s:    `DROP USER jdoe`,
			stmt: &influxql.DropUserStatement{Name: "jdoe"},
		},

		// GRANT READ
		{
			s: `GRANT READ ON testdb TO jdoe`,
			stmt: &influxql.GrantStatement{
				Privilege: influxql.ReadPrivilege,
				On:        "testdb",
				User:      "******",
			},
		},

		// GRANT WRITE
		{
			s: `GRANT WRITE ON testdb TO jdoe`,
			stmt: &influxql.GrantStatement{
				Privilege: influxql.WritePrivilege,
				On:        "testdb",
				User:      "******",
			},
		},

		// GRANT ALL
		{
			s: `GRANT ALL ON testdb TO jdoe`,
			stmt: &influxql.GrantStatement{
				Privilege: influxql.AllPrivileges,
				On:        "testdb",
				User:      "******",
			},
		},

		// GRANT ALL PRIVILEGES
		{
			s: `GRANT ALL PRIVILEGES ON testdb TO jdoe`,
			stmt: &influxql.GrantStatement{
				Privilege: influxql.AllPrivileges,
				On:        "testdb",
				User:      "******",
			},
		},

		// GRANT cluster admin
		{
			s: `GRANT ALL PRIVILEGES TO jdoe`,
			stmt: &influxql.GrantStatement{
				Privilege: influxql.AllPrivileges,
				User:      "******",
			},
		},

		// REVOKE READ
		{
			s: `REVOKE READ on testdb FROM jdoe`,
			stmt: &influxql.RevokeStatement{
				Privilege: influxql.ReadPrivilege,
				On:        "testdb",
				User:      "******",
			},
		},

		// REVOKE WRITE
		{
			s: `REVOKE WRITE ON testdb FROM jdoe`,
			stmt: &influxql.RevokeStatement{
				Privilege: influxql.WritePrivilege,
				On:        "testdb",
				User:      "******",
			},
		},

		// REVOKE ALL
		{
			s: `REVOKE ALL ON testdb FROM jdoe`,
			stmt: &influxql.RevokeStatement{
				Privilege: influxql.AllPrivileges,
				On:        "testdb",
				User:      "******",
			},
		},

		// REVOKE ALL PRIVILEGES
		{
			s: `REVOKE ALL PRIVILEGES ON testdb FROM jdoe`,
			stmt: &influxql.RevokeStatement{
				Privilege: influxql.AllPrivileges,
				On:        "testdb",
				User:      "******",
			},
		},

		// REVOKE cluster admin
		{
			s: `REVOKE ALL FROM jdoe`,
			stmt: &influxql.RevokeStatement{
				Privilege: influxql.AllPrivileges,
				User:      "******",
			},
		},

		// CREATE RETENTION POLICY
		{
			s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION 2`,
			stmt: &influxql.CreateRetentionPolicyStatement{
				Name:        "policy1",
				Database:    "testdb",
				Duration:    time.Hour,
				Replication: 2,
			},
		},

		// CREATE RETENTION POLICY with infinite retention
		{
			s: `CREATE RETENTION POLICY policy1 ON testdb DURATION INF REPLICATION 2`,
			stmt: &influxql.CreateRetentionPolicyStatement{
				Name:        "policy1",
				Database:    "testdb",
				Duration:    0,
				Replication: 2,
			},
		},

		// CREATE RETENTION POLICY ... DEFAULT
		{
			s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 2m REPLICATION 4 DEFAULT`,
			stmt: &influxql.CreateRetentionPolicyStatement{
				Name:        "policy1",
				Database:    "testdb",
				Duration:    2 * time.Minute,
				Replication: 4,
				Default:     true,
			},
		},

		// ALTER RETENTION POLICY
		{
			s:    `ALTER RETENTION POLICY policy1 ON testdb DURATION 1m REPLICATION 4 DEFAULT`,
			stmt: newAlterRetentionPolicyStatement("policy1", "testdb", time.Minute, 4, true),
		},

		// ALTER RETENTION POLICY with options in reverse order
		{
			s:    `ALTER RETENTION POLICY policy1 ON testdb DEFAULT REPLICATION 4 DURATION 1m`,
			stmt: newAlterRetentionPolicyStatement("policy1", "testdb", time.Minute, 4, true),
		},

		// ALTER RETENTION POLICY with infinite retention
		{
			s:    `ALTER RETENTION POLICY policy1 ON testdb DEFAULT REPLICATION 4 DURATION INF`,
			stmt: newAlterRetentionPolicyStatement("policy1", "testdb", 0, 4, true),
		},

		// ALTER RETENTION POLICY without optional DURATION
		{
			s:    `ALTER RETENTION POLICY policy1 ON testdb DEFAULT REPLICATION 4`,
			stmt: newAlterRetentionPolicyStatement("policy1", "testdb", -1, 4, true),
		},

		// ALTER RETENTION POLICY without optional REPLICATION
		{
			s:    `ALTER RETENTION POLICY policy1 ON testdb DEFAULT`,
			stmt: newAlterRetentionPolicyStatement("policy1", "testdb", -1, -1, true),
		},

		// ALTER RETENTION POLICY without optional DEFAULT
		{
			s:    `ALTER RETENTION POLICY policy1 ON testdb REPLICATION 4`,
			stmt: newAlterRetentionPolicyStatement("policy1", "testdb", -1, 4, false),
		},

		// Errors
		{s: ``, err: `found EOF, expected SELECT at line 1, char 1`},
		{s: `SELECT`, err: `found EOF, expected identifier, string, number, bool at line 1, char 8`},
		{s: `blah blah`, err: `found blah, expected SELECT at line 1, char 1`},
		{s: `SELECT field1 X`, err: `found X, expected FROM at line 1, char 15`},
		{s: `SELECT field1 FROM "series" WHERE X +;`, err: `found ;, expected identifier, string, number, bool at line 1, char 38`},
		{s: `SELECT field1 FROM myseries GROUP`, err: `found EOF, expected BY at line 1, char 35`},
		{s: `SELECT field1 FROM myseries LIMIT`, err: `found EOF, expected number at line 1, char 35`},
		{s: `SELECT field1 FROM myseries LIMIT 10.5`, err: `fractional parts not allowed in LIMIT at line 1, char 35`},
		{s: `SELECT field1 FROM myseries OFFSET`, err: `found EOF, expected number at line 1, char 36`},
		{s: `SELECT field1 FROM myseries OFFSET 10.5`, err: `fractional parts not allowed in OFFSET at line 1, char 36`},
		{s: `SELECT field1 FROM myseries ORDER`, err: `found EOF, expected BY at line 1, char 35`},
		{s: `SELECT field1 FROM myseries ORDER BY /`, err: `found /, expected identifier, ASC, or DESC at line 1, char 38`},
		{s: `SELECT field1 FROM myseries ORDER BY 1`, err: `found 1, expected identifier, ASC, or DESC at line 1, char 38`},
		{s: `SELECT field1 AS`, err: `found EOF, expected identifier at line 1, char 18`},
		{s: `SELECT field1 FROM 12`, err: `found 12, expected identifier at line 1, char 20`},
		{s: `SELECT 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 FROM myseries`, err: `unable to parse number at line 1, char 8`},
		{s: `SELECT 10.5h FROM myseries`, err: `found h, expected FROM at line 1, char 12`},
		{s: `DELETE`, err: `found EOF, expected FROM at line 1, char 8`},
		{s: `DELETE FROM`, err: `found EOF, expected identifier at line 1, char 13`},
		{s: `DELETE FROM myseries WHERE`, err: `found EOF, expected identifier, string, number, bool at line 1, char 28`},
		{s: `DROP MEASUREMENT`, err: `found EOF, expected identifier at line 1, char 18`},
		{s: `DROP SERIES`, err: `found EOF, expected number at line 1, char 13`},
		{s: `DROP SERIES FROM`, err: `found EOF, expected identifier at line 1, char 18`},
		{s: `DROP SERIES FROM src WHERE`, err: `found EOF, expected identifier, string, number, bool at line 1, char 28`},
		{s: `SHOW CONTINUOUS`, err: `found EOF, expected QUERIES at line 1, char 17`},
		{s: `SHOW RETENTION`, err: `found EOF, expected POLICIES at line 1, char 16`},
		{s: `SHOW RETENTION POLICIES`, err: `found EOF, expected identifier at line 1, char 25`},
		{s: `SHOW FOO`, err: `found FOO, expected CONTINUOUS, DATABASES, FIELD, MEASUREMENTS, RETENTION, SERIES, SERVERS, TAG, USERS at line 1, char 6`},
		{s: `DROP CONTINUOUS`, err: `found EOF, expected QUERY at line 1, char 17`},
		{s: `DROP CONTINUOUS QUERY`, err: `found EOF, expected identifier at line 1, char 23`},
		{s: `CREATE CONTINUOUS`, err: `found EOF, expected QUERY at line 1, char 19`},
		{s: `CREATE CONTINUOUS QUERY`, err: `found EOF, expected identifier at line 1, char 25`},
		{s: `DROP FOO`, err: `found FOO, expected SERIES, CONTINUOUS, MEASUREMENT at line 1, char 6`},
		{s: `DROP DATABASE`, err: `found EOF, expected identifier at line 1, char 15`},
		{s: `DROP RETENTION`, err: `found EOF, expected POLICY at line 1, char 16`},
		{s: `DROP RETENTION POLICY`, err: `found EOF, expected identifier at line 1, char 23`},
		{s: `DROP RETENTION POLICY "1h.cpu"`, err: `found EOF, expected ON at line 1, char 32`},
		{s: `DROP RETENTION POLICY "1h.cpu" ON`, err: `found EOF, expected identifier at line 1, char 35`},
		{s: `DROP USER`, err: `found EOF, expected identifier at line 1, char 11`},
		{s: `CREATE USER testuser`, err: `found EOF, expected WITH at line 1, char 22`},
		{s: `CREATE USER testuser WITH`, err: `found EOF, expected PASSWORD at line 1, char 27`},
		{s: `CREATE USER testuser WITH PASSWORD`, err: `found EOF, expected string at line 1, char 36`},
		{s: `CREATE USER testuser WITH PASSWORD 'pwd' WITH`, err: `found EOF, expected ALL at line 1, char 47`},
		{s: `CREATE USER testuser WITH PASSWORD 'pwd' WITH ALL`, err: `found EOF, expected PRIVILEGES at line 1, char 51`},
		{s: `GRANT`, err: `found EOF, expected READ, WRITE, ALL [PRIVILEGES] at line 1, char 7`},
		{s: `GRANT BOGUS`, err: `found BOGUS, expected READ, WRITE, ALL [PRIVILEGES] at line 1, char 7`},
		{s: `GRANT READ`, err: `found EOF, expected ON at line 1, char 12`},
		{s: `GRANT READ TO jdoe`, err: `found TO, expected ON at line 1, char 12`},
		{s: `GRANT READ ON`, err: `found EOF, expected identifier at line 1, char 15`},
		{s: `GRANT READ ON testdb`, err: `found EOF, expected TO at line 1, char 22`},
		{s: `GRANT READ ON testdb TO`, err: `found EOF, expected identifier at line 1, char 25`}, {s: `GRANT`, err: `found EOF, expected READ, WRITE, ALL [PRIVILEGES] at line 1, char 7`},
		{s: `REVOKE BOGUS`, err: `found BOGUS, expected READ, WRITE, ALL [PRIVILEGES] at line 1, char 8`},
		{s: `REVOKE READ`, err: `found EOF, expected ON at line 1, char 13`},
		{s: `REVOKE READ TO jdoe`, err: `found TO, expected ON at line 1, char 13`},
		{s: `REVOKE READ ON`, err: `found EOF, expected identifier at line 1, char 16`},
		{s: `REVOKE READ ON testdb`, err: `found EOF, expected FROM at line 1, char 23`},
		{s: `REVOKE READ ON testdb FROM`, err: `found EOF, expected identifier at line 1, char 28`},
		{s: `CREATE RETENTION`, err: `found EOF, expected POLICY at line 1, char 18`},
		{s: `CREATE RETENTION POLICY`, err: `found EOF, expected identifier at line 1, char 25`},
		{s: `CREATE RETENTION POLICY policy1`, err: `found EOF, expected ON at line 1, char 33`},
		{s: `CREATE RETENTION POLICY policy1 ON`, err: `found EOF, expected identifier at line 1, char 36`},
		{s: `CREATE RETENTION POLICY policy1 ON testdb`, err: `found EOF, expected DURATION at line 1, char 43`},
		{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION`, err: `found EOF, expected duration at line 1, char 52`},
		{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION bad`, err: `found bad, expected duration at line 1, char 52`},
		{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h`, err: `found EOF, expected REPLICATION at line 1, char 54`},
		{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION`, err: `found EOF, expected number at line 1, char 67`},
		{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION 3.14`, err: `number must be an integer at line 1, char 67`},
		{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION 0`, err: `invalid value 0: must be 1 <= n <= 2147483647 at line 1, char 67`},
		{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION bad`, err: `found bad, expected number at line 1, char 67`},
		{s: `ALTER`, err: `found EOF, expected RETENTION at line 1, char 7`},
		{s: `ALTER RETENTION`, err: `found EOF, expected POLICY at line 1, char 17`},
		{s: `ALTER RETENTION POLICY`, err: `found EOF, expected identifier at line 1, char 24`},
		{s: `ALTER RETENTION POLICY policy1`, err: `found EOF, expected ON at line 1, char 32`}, {s: `ALTER RETENTION POLICY policy1 ON`, err: `found EOF, expected identifier at line 1, char 35`},
		{s: `ALTER RETENTION POLICY policy1 ON testdb`, err: `found EOF, expected DURATION, RETENTION, DEFAULT at line 1, char 42`},
	}

	for i, tt := range tests {
		stmt, err := influxql.NewParser(strings.NewReader(tt.s)).ParseStatement()

		if !reflect.DeepEqual(tt.err, errstring(err)) {
			t.Errorf("%d. %q: error mismatch:\n  exp=%s\n  got=%s\n\n", i, tt.s, tt.err, err)
		} else if st, ok := stmt.(*influxql.CreateContinuousQueryStatement); ok { // if it's a CQ, there is a non-exported field that gets memoized during parsing that needs to be set
			if st != nil && st.Source != nil {
				tt.stmt.(*influxql.CreateContinuousQueryStatement).Source.GroupByInterval()
			}
		} else if tt.err == "" && !reflect.DeepEqual(tt.stmt, stmt) {
			t.Logf("exp=%s\ngot=%s\n", mustMarshalJSON(tt.stmt), mustMarshalJSON(stmt))
			t.Errorf("%d. %q\n\nstmt mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.s, tt.stmt, stmt)
		}
	}
}
Esempio n. 20
0
// Ensure the parser can return an error from an malformed statement.
func TestParser_ParseQuery_ParseError(t *testing.T) {
	_, err := influxql.NewParser(strings.NewReader(`SELECT`)).ParseQuery()
	if err == nil || err.Error() != `found EOF, expected identifier, string, number, bool at line 1, char 8` {
		t.Fatalf("unexpected error: %s", err)
	}
}
Esempio n. 21
0
// Ensure the SELECT statement can have its start and end time set
func TestSelectStatement_SetTimeRange(t *testing.T) {
	q := "SELECT sum(value) from foo where time < now() GROUP BY time(10m)"
	stmt, err := influxql.NewParser(strings.NewReader(q)).ParseStatement()
	if err != nil {
		t.Fatalf("invalid statement: %q: %s", stmt, err)
	}

	s := stmt.(*influxql.SelectStatement)
	min, max := influxql.TimeRange(s.Condition)
	start := time.Now().Add(-20 * time.Hour).Round(time.Second).UTC()
	end := time.Now().Add(10 * time.Hour).Round(time.Second).UTC()
	s.SetTimeRange(start, end)
	min, max = influxql.TimeRange(s.Condition)

	if min != start {
		t.Fatalf("start time wasn't set properly.\n  exp: %s\n  got: %s", start, min)
	}
	// the end range is actually one nanosecond before the given one since end is exclusive
	end = end.Add(-time.Nanosecond)
	if max != end {
		t.Fatalf("end time wasn't set properly.\n  exp: %s\n  got: %s", end, max)
	}

	// ensure we can set a time on a select that already has one set
	start = time.Now().Add(-20 * time.Hour).Round(time.Second).UTC()
	end = time.Now().Add(10 * time.Hour).Round(time.Second).UTC()
	q = fmt.Sprintf("SELECT sum(value) from foo WHERE time >= %ds and time <= %ds GROUP BY time(10m)", start.Unix(), end.Unix())
	stmt, err = influxql.NewParser(strings.NewReader(q)).ParseStatement()
	if err != nil {
		t.Fatalf("invalid statement: %q: %s", stmt, err)
	}

	s = stmt.(*influxql.SelectStatement)
	min, max = influxql.TimeRange(s.Condition)
	if start != min || end != max {
		t.Fatalf("start and end times weren't equal:\n  exp: %s\n  got: %s\n  exp: %s\n  got:%s\n", start, min, end, max)
	}

	// update and ensure it saves it
	start = time.Now().Add(-40 * time.Hour).Round(time.Second).UTC()
	end = time.Now().Add(20 * time.Hour).Round(time.Second).UTC()
	s.SetTimeRange(start, end)
	min, max = influxql.TimeRange(s.Condition)

	// TODO: right now the SetTimeRange can't override the start time if it's more recent than what they're trying to set it to.
	//       shouldn't matter for our purposes with continuous queries, but fix this later

	if min != start {
		t.Fatalf("start time wasn't set properly.\n  exp: %s\n  got: %s", start, min)
	}
	// the end range is actually one nanosecond before the given one since end is exclusive
	end = end.Add(-time.Nanosecond)
	if max != end {
		t.Fatalf("end time wasn't set properly.\n  exp: %s\n  got: %s", end, max)
	}

	// ensure that when we set a time range other where clause conditions are still there
	q = "SELECT sum(value) from foo WHERE foo = 'bar' and time < now() GROUP BY time(10m)"
	stmt, err = influxql.NewParser(strings.NewReader(q)).ParseStatement()
	if err != nil {
		t.Fatalf("invalid statement: %q: %s", stmt, err)
	}

	s = stmt.(*influxql.SelectStatement)

	// update and ensure it saves it
	start = time.Now().Add(-40 * time.Hour).Round(time.Second).UTC()
	end = time.Now().Add(20 * time.Hour).Round(time.Second).UTC()
	s.SetTimeRange(start, end)
	min, max = influxql.TimeRange(s.Condition)

	if min != start {
		t.Fatalf("start time wasn't set properly.\n  exp: %s\n  got: %s", start, min)
	}
	// the end range is actually one nanosecond before the given one since end is exclusive
	end = end.Add(-time.Nanosecond)
	if max != end {
		t.Fatalf("end time wasn't set properly.\n  exp: %s\n  got: %s", end, max)
	}

	// ensure the where clause is there
	hasWhere := false
	influxql.WalkFunc(s.Condition, func(n influxql.Node) {
		if ex, ok := n.(*influxql.BinaryExpr); ok {
			if lhs, ok := ex.LHS.(*influxql.VarRef); ok {
				if lhs.Val == "foo" {
					if rhs, ok := ex.RHS.(*influxql.StringLiteral); ok {
						if rhs.Val == "bar" {
							hasWhere = true
						}
					}
				}
			}
		}
	})
	if !hasWhere {
		t.Fatal("set time range cleared out the where clause")
	}
}
Esempio n. 22
0
func TestSelectStatement_HasWildcard(t *testing.T) {
	var tests = []struct {
		stmt     string
		wildcard bool
	}{
		// No wildcards
		{
			stmt:     `SELECT value FROM cpu`,
			wildcard: false,
		},

		// Query wildcard
		{
			stmt:     `SELECT * FROM cpu`,
			wildcard: true,
		},

		// No GROUP BY wildcards
		{
			stmt:     `SELECT value FROM cpu GROUP BY host`,
			wildcard: false,
		},

		// No GROUP BY wildcards, time only
		{
			stmt:     `SELECT mean(value) FROM cpu where time < now() GROUP BY time(5ms)`,
			wildcard: false,
		},

		// GROUP BY wildcard
		{
			stmt:     `SELECT value FROM cpu GROUP BY *`,
			wildcard: true,
		},

		// GROUP BY wildcard with time
		{
			stmt:     `SELECT mean(value) FROM cpu where time < now() GROUP BY *,time(1m)`,
			wildcard: true,
		},

		// GROUP BY wildcard with explicit
		{
			stmt:     `SELECT value FROM cpu GROUP BY *,host`,
			wildcard: true,
		},

		// GROUP BY multiple wildcards
		{
			stmt:     `SELECT value FROM cpu GROUP BY *,*`,
			wildcard: true,
		},

		// Combo
		{
			stmt:     `SELECT * FROM cpu GROUP BY *`,
			wildcard: true,
		},
	}

	for i, tt := range tests {
		// Parse statement.
		t.Logf("index: %d, statement: %s", i, tt.stmt)
		stmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement()
		if err != nil {
			t.Fatalf("invalid statement: %q: %s", tt.stmt, err)
		}

		// Test wildcard detection.
		if w := stmt.(*influxql.SelectStatement).HasWildcard(); tt.wildcard != w {
			t.Errorf("%d. %q: unexpected wildcard detection:\n\nexp=%v\n\ngot=%v\n\n", i, tt.stmt, tt.wildcard, w)
			continue
		}
	}
}
Esempio n. 23
0
// Ensure the SELECT statement can extract substatements.
func TestSelectStatement_Substatement(t *testing.T) {
	var tests = []struct {
		stmt string
		expr *influxql.VarRef
		sub  string
		err  string
	}{
		// 0. Single series
		{
			stmt: `SELECT value FROM myseries WHERE value > 1`,
			expr: &influxql.VarRef{Val: "value"},
			sub:  `SELECT value FROM myseries WHERE value > 1.000`,
		},

		// 1. Simple join
		{
			stmt: `SELECT sum(aa.value) + sum(bb.value) FROM aa, bb`,
			expr: &influxql.VarRef{Val: "aa.value"},
			sub:  `SELECT "aa.value" FROM aa`,
		},

		// 2. Simple merge
		{
			stmt: `SELECT sum(aa.value) + sum(bb.value) FROM aa, bb`,
			expr: &influxql.VarRef{Val: "bb.value"},
			sub:  `SELECT "bb.value" FROM bb`,
		},

		// 3. Join with condition
		{
			stmt: `SELECT sum(aa.value) + sum(bb.value) FROM aa, bb WHERE aa.host = 'servera' AND bb.host = 'serverb'`,
			expr: &influxql.VarRef{Val: "bb.value"},
			sub:  `SELECT "bb.value" FROM bb WHERE "bb.host" = 'serverb'`,
		},

		// 4. Join with complex condition
		{
			stmt: `SELECT sum(aa.value) + sum(bb.value) FROM aa, bb WHERE aa.host = 'servera' AND (bb.host = 'serverb' OR bb.host = 'serverc') AND 1 = 2`,
			expr: &influxql.VarRef{Val: "bb.value"},
			sub:  `SELECT "bb.value" FROM bb WHERE ("bb.host" = 'serverb' OR "bb.host" = 'serverc') AND 1.000 = 2.000`,
		},

		// 5. 4 with different condition order
		{
			stmt: `SELECT sum(aa.value) + sum(bb.value) FROM aa, bb WHERE ((bb.host = 'serverb' OR bb.host = 'serverc') AND aa.host = 'servera') AND 1 = 2`,
			expr: &influxql.VarRef{Val: "bb.value"},
			sub:  `SELECT "bb.value" FROM bb WHERE (("bb.host" = 'serverb' OR "bb.host" = 'serverc')) AND 1.000 = 2.000`,
		},
	}

	for i, tt := range tests {
		// Parse statement.
		stmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement()
		if err != nil {
			t.Fatalf("invalid statement: %q: %s", tt.stmt, err)
		}

		// Extract substatement.
		sub, err := stmt.(*influxql.SelectStatement).Substatement(tt.expr)
		if err != nil {
			t.Errorf("%d. %q: unexpected error: %s", i, tt.stmt, err)
			continue
		}
		if substr := sub.String(); tt.sub != substr {
			t.Errorf("%d. %q: unexpected substatement:\n\nexp=%s\n\ngot=%s\n\n", i, tt.stmt, tt.sub, substr)
			continue
		}
	}
}
Esempio n. 24
0
// Test SELECT statement wildcard rewrite.
func TestSelectStatement_RewriteWildcards(t *testing.T) {
	var fields = influxql.Fields{
		&influxql.Field{Expr: &influxql.VarRef{Val: "value1"}},
		&influxql.Field{Expr: &influxql.VarRef{Val: "value2"}},
	}
	var dimensions = influxql.Dimensions{
		&influxql.Dimension{Expr: &influxql.VarRef{Val: "host"}},
		&influxql.Dimension{Expr: &influxql.VarRef{Val: "region"}},
	}

	var tests = []struct {
		stmt    string
		rewrite string
	}{
		// No wildcards
		{
			stmt:    `SELECT value FROM cpu`,
			rewrite: `SELECT value FROM cpu`,
		},

		// Query wildcard
		{
			stmt:    `SELECT * FROM cpu`,
			rewrite: `SELECT value1, value2 FROM cpu GROUP BY host, region`,
		},

		// Parser fundamentally prohibits multiple query sources

		// Query wildcard with explicit
		// {
		//	stmt:    `SELECT *,value1 FROM cpu`,
		//		rewrite: `SELECT value1, value2, value1 FROM cpu`,
		//	},

		// Query multiple wildcards
		//	{
		//			stmt:    `SELECT *,* FROM cpu`,
		//			rewrite: `SELECT value1,value2,value1,value2 FROM cpu`,
		//  },

		// No GROUP BY wildcards
		{
			stmt:    `SELECT value FROM cpu GROUP BY host`,
			rewrite: `SELECT value FROM cpu GROUP BY host`,
		},

		// No GROUP BY wildcards, time only
		{
			stmt:    `SELECT mean(value) FROM cpu where time < now() GROUP BY time(5ms)`,
			rewrite: `SELECT mean(value) FROM cpu WHERE time < now() GROUP BY time(5ms)`,
		},

		// GROUP BY wildcard
		{
			stmt:    `SELECT value FROM cpu GROUP BY *`,
			rewrite: `SELECT value FROM cpu GROUP BY host, region`,
		},

		// GROUP BY wildcard with time
		{
			stmt:    `SELECT mean(value) FROM cpu where time < now() GROUP BY *,time(1m)`,
			rewrite: `SELECT mean(value) FROM cpu WHERE time < now() GROUP BY host, region, time(1m)`,
		},

		// GROUP BY wildarde with fill
		{
			stmt:    `SELECT mean(value) FROM cpu where time < now() GROUP BY *,time(1m) fill(0)`,
			rewrite: `SELECT mean(value) FROM cpu WHERE time < now() GROUP BY host, region, time(1m) fill(0)`,
		},

		// GROUP BY wildcard with explicit
		{
			stmt:    `SELECT value FROM cpu GROUP BY *,host`,
			rewrite: `SELECT value FROM cpu GROUP BY host, region, host`,
		},

		// GROUP BY multiple wildcards
		{
			stmt:    `SELECT value FROM cpu GROUP BY *,*`,
			rewrite: `SELECT value FROM cpu GROUP BY host, region, host, region`,
		},

		// Combo
		{
			stmt:    `SELECT * FROM cpu GROUP BY *`,
			rewrite: `SELECT value1, value2 FROM cpu GROUP BY host, region`,
		},
	}

	for i, tt := range tests {
		t.Logf("index: %d, statement: %s", i, tt.stmt)
		// Parse statement.
		stmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement()
		if err != nil {
			t.Fatalf("invalid statement: %q: %s", tt.stmt, err)
		}

		// Rewrite statement.
		rw := stmt.(*influxql.SelectStatement).RewriteWildcards(fields, dimensions)
		if rw == nil {
			t.Errorf("%d. %q: unexpected nil statement", i, tt.stmt)
			continue
		}
		if rw := rw.String(); tt.rewrite != rw {
			t.Errorf("%d. %q: unexpected rewrite:\n\nexp=%s\n\ngot=%s\n\n", i, tt.stmt, tt.rewrite, rw)
			continue
		}
	}
}
Esempio n. 25
0
// Ensure that the String() value of a statement is parseable
func TestParseString(t *testing.T) {
	var tests = []struct {
		stmt string
	}{
		{
			stmt: `SELECT "cpu load" FROM myseries`,
		},
		{
			stmt: `SELECT "cpu load" FROM "my series"`,
		},
		{
			stmt: `SELECT "cpu\"load" FROM myseries`,
		},
		{
			stmt: `SELECT "cpu'load" FROM myseries`,
		},
		{
			stmt: `SELECT "cpu load" FROM "my\"series"`,
		},
		{
			stmt: `SELECT "field with spaces" FROM "\"ugly\" db"."\"ugly\" rp"."\"ugly\" measurement"`,
		},
		{
			stmt: `SELECT * FROM myseries`,
		},
		{
			stmt: `DROP DATABASE "!"`,
		},
		{
			stmt: `DROP RETENTION POLICY "my rp" ON "a database"`,
		},
		{
			stmt: `CREATE RETENTION POLICY "my rp" ON "a database" DURATION 1d REPLICATION 1`,
		},
		{
			stmt: `ALTER RETENTION POLICY "my rp" ON "a database" DEFAULT`,
		},
		{
			stmt: `SHOW RETENTION POLICIES ON "a database"`,
		},
		{
			stmt: `SHOW TAG VALUES WITH KEY IN ("a long name", short)`,
		},
		{
			stmt: `DROP CONTINUOUS QUERY "my query" ON "my database"`,
		},
		// See issues https://github.com/influxdb/influxdb/issues/1647
		// and https://github.com/influxdb/influxdb/issues/4404
		//{
		//	stmt: `DELETE FROM "my db"."my rp"."my measurement"`,
		//},
		{
			stmt: `DROP SUBSCRIPTION "ugly \"subscription\" name" ON "\"my\" db"."\"my\" rp"`,
		},
		{
			stmt: `CREATE SUBSCRIPTION "ugly \"subscription\" name" ON "\"my\" db"."\"my\" rp" DESTINATIONS ALL 'my host', 'my other host'`,
		},
		{
			stmt: `SHOW MEASUREMENTS WITH MEASUREMENT =~ /foo/`,
		},
		{
			stmt: `SHOW MEASUREMENTS WITH MEASUREMENT = "and/or"`,
		},
		{
			stmt: `DROP USER "user with spaces"`,
		},
		{
			stmt: `GRANT ALL PRIVILEGES ON "db with spaces" TO "user with spaces"`,
		},
		{
			stmt: `GRANT ALL PRIVILEGES TO "user with spaces"`,
		},
		{
			stmt: `SHOW GRANTS FOR "user with spaces"`,
		},
		{
			stmt: `REVOKE ALL PRIVILEGES ON "db with spaces" FROM "user with spaces"`,
		},
		{
			stmt: `REVOKE ALL PRIVILEGES FROM "user with spaces"`,
		},
		{
			stmt: `CREATE DATABASE "db with spaces"`,
		},
	}

	for _, tt := range tests {
		// Parse statement.
		stmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement()
		if err != nil {
			t.Fatalf("invalid statement: %q: %s", tt.stmt, err)
		}

		stmtCopy, err := influxql.NewParser(strings.NewReader(stmt.String())).ParseStatement()
		if err != nil {
			t.Fatalf("failed to parse string: %v\norig: %v\ngot: %v", err, tt.stmt, stmt.String())
		}

		if !reflect.DeepEqual(stmt, stmtCopy) {
			t.Fatalf("statement changed after stringifying and re-parsing:\noriginal : %v\nre-parsed: %v\n", tt.stmt, stmtCopy.String())
		}
	}
}
Esempio n. 26
0
// serveQuery parses an incoming query and, if valid, executes the query.
func (h *Handler) serveQuery(w http.ResponseWriter, r *http.Request, user *meta.UserInfo) {
	q := r.URL.Query()
	pretty := q.Get("pretty") == "true"

	qp := strings.TrimSpace(q.Get("q"))
	if qp == "" {
		httpError(w, `missing required parameter "q"`, pretty, http.StatusBadRequest)
		return
	}

	epoch := strings.TrimSpace(q.Get("epoch"))

	p := influxql.NewParser(strings.NewReader(qp))
	db := q.Get("db")

	// Parse query from query string.
	query, err := p.ParseQuery()
	if err != nil {
		httpError(w, "error parsing query: "+err.Error(), pretty, http.StatusBadRequest)
		return
	}

	// Parse chunk size. Use default if not provided or unparsable.
	chunked := (q.Get("chunked") == "true")
	chunkSize := DefaultChunkSize
	if chunked {
		if n, err := strconv.ParseInt(q.Get("chunk_size"), 10, 64); err == nil {
			chunkSize = int(n)
		}
	}

	// Execute query.
	w.Header().Add("content-type", "application/json")
	results, err := h.QueryExecutor.ExecuteQuery(query, db, chunkSize)

	if _, ok := err.(meta.AuthError); ok {
		w.WriteHeader(http.StatusUnauthorized)
		return
	} else if err != nil {
		w.WriteHeader(http.StatusInternalServerError)
		return
	}

	// if we're not chunking, this will be the in memory buffer for all results before sending to client
	resp := Response{Results: make([]*influxql.Result, 0)}
	statusWritten := false

	// pull all results from the channel
	for r := range results {
		// write the status header based on the first result returned in the channel
		if !statusWritten {
			status := http.StatusOK

			if r != nil && r.Err != nil {
				if _, ok := r.Err.(meta.AuthError); ok {
					status = http.StatusUnauthorized
				}
			}

			w.WriteHeader(status)
			statusWritten = true
		}

		// Ignore nil results.
		if r == nil {
			continue
		}

		// if requested, convert result timestamps to epoch
		if epoch != "" {
			convertToEpoch(r, epoch)
		}

		// Write out result immediately if chunked.
		if chunked {
			w.Write(MarshalJSON(Response{
				Results: []*influxql.Result{r},
			}, pretty))
			w.(http.Flusher).Flush()
			continue
		}

		// It's not chunked so buffer results in memory.
		// Results for statements need to be combined together.
		// We need to check if this new result is for the same statement as
		// the last result, or for the next statement
		l := len(resp.Results)
		if l == 0 {
			resp.Results = append(resp.Results, r)
		} else if resp.Results[l-1].StatementID == r.StatementID {
			cr := resp.Results[l-1]
			cr.Series = append(cr.Series, r.Series...)
		} else {
			resp.Results = append(resp.Results, r)
		}
	}

	// If it's not chunked we buffered everything in memory, so write it out
	if !chunked {
		w.Write(MarshalJSON(resp, pretty))
	}
}
Esempio n. 27
0
// Ensure the parser can parse strings into Statement ASTs.
func TestParser_ParseStatement(t *testing.T) {
	var tests = []struct {
		s    string
		stmt influxql.Statement
		err  string
	}{
		// SELECT * statement
		{
			s: `SELECT * FROM myseries`,
			stmt: &influxql.SelectStatement{
				Fields: []*influxql.Field{
					&influxql.Field{Expr: &influxql.Wildcard{}},
				},
				Source: &influxql.Measurement{Name: "myseries"},
			},
		},

		// SELECT statement
		{
			s: `SELECT field1, field2 ,field3 AS field_x FROM myseries WHERE host = 'hosta.influxdb.org' GROUP BY 10h ORDER BY ASC LIMIT 20 OFFSET 10;`,
			stmt: &influxql.SelectStatement{
				Fields: []*influxql.Field{
					&influxql.Field{Expr: &influxql.VarRef{Val: "field1"}},
					&influxql.Field{Expr: &influxql.VarRef{Val: "field2"}},
					&influxql.Field{Expr: &influxql.VarRef{Val: "field3"}, Alias: "field_x"},
				},
				Source: &influxql.Measurement{Name: "myseries"},
				Condition: &influxql.BinaryExpr{
					Op:  influxql.EQ,
					LHS: &influxql.VarRef{Val: "host"},
					RHS: &influxql.StringLiteral{Val: "hosta.influxdb.org"},
				},
				Dimensions: []*influxql.Dimension{
					&influxql.Dimension{Expr: &influxql.DurationLiteral{Val: 10 * time.Hour}},
				},
				SortFields: []*influxql.SortField{
					&influxql.SortField{Ascending: true},
				},
				Limit:  20,
				Offset: 10,
			},
		},

		// SELECT statement with JOIN
		{
			s: `SELECT field1 FROM join(aa,"bb", cc) JOIN cc`,
			stmt: &influxql.SelectStatement{
				Fields: []*influxql.Field{&influxql.Field{Expr: &influxql.VarRef{Val: "field1"}}},
				Source: &influxql.Join{
					Measurements: []*influxql.Measurement{
						{Name: "aa"},
						{Name: `"bb"`},
						{Name: "cc"},
					},
				},
			},
		},

		// SELECT statement with MERGE
		{
			s: `SELECT field1 FROM merge(aa,b.b)`,
			stmt: &influxql.SelectStatement{
				Fields: []*influxql.Field{&influxql.Field{Expr: &influxql.VarRef{Val: "field1"}}},
				Source: &influxql.Merge{
					Measurements: []*influxql.Measurement{
						{Name: "aa"},
						{Name: "b.b"},
					},
				},
			},
		},

		// SELECT statement (lowercase)
		{
			s: `select my_field from myseries`,
			stmt: &influxql.SelectStatement{
				Fields: []*influxql.Field{&influxql.Field{Expr: &influxql.VarRef{Val: "my_field"}}},
				Source: &influxql.Measurement{Name: "myseries"},
			},
		},

		// SELECT statement with multiple ORDER BY fields
		{
			s: `SELECT field1 FROM myseries ORDER BY ASC, field1, field2 DESC LIMIT 10`,
			stmt: &influxql.SelectStatement{
				Fields: []*influxql.Field{&influxql.Field{Expr: &influxql.VarRef{Val: "field1"}}},
				Source: &influxql.Measurement{Name: "myseries"},
				SortFields: []*influxql.SortField{
					&influxql.SortField{Ascending: true},
					&influxql.SortField{Name: "field1"},
					&influxql.SortField{Name: "field2"},
				},
				Limit: 10,
			},
		},

		// DELETE statement
		{
			s: `DELETE FROM myseries WHERE host = 'hosta.influxdb.org'`,
			stmt: &influxql.DeleteStatement{
				Source: &influxql.Measurement{Name: "myseries"},
				Condition: &influxql.BinaryExpr{
					Op:  influxql.EQ,
					LHS: &influxql.VarRef{Val: "host"},
					RHS: &influxql.StringLiteral{Val: "hosta.influxdb.org"},
				},
			},
		},

		// SHOW DATABASES
		{
			s:    `SHOW DATABASES`,
			stmt: &influxql.ShowDatabasesStatement{},
		},

		// SHOW SERIES statement
		{
			s:    `SHOW SERIES`,
			stmt: &influxql.ShowSeriesStatement{},
		},

		// SHOW SERIES WHERE with ORDER BY and LIMIT
		{
			s: `SHOW SERIES WHERE region = 'uswest' ORDER BY ASC, field1, field2 DESC LIMIT 10`,
			stmt: &influxql.ShowSeriesStatement{
				Condition: &influxql.BinaryExpr{
					Op:  influxql.EQ,
					LHS: &influxql.VarRef{Val: "region"},
					RHS: &influxql.StringLiteral{Val: "uswest"},
				},
				SortFields: []*influxql.SortField{
					&influxql.SortField{Ascending: true},
					&influxql.SortField{Name: "field1"},
					&influxql.SortField{Name: "field2"},
				},
				Limit: 10,
			},
		},

		// SHOW MEASUREMENTS WHERE with ORDER BY and LIMIT
		{
			s: `SHOW MEASUREMENTS WHERE region = 'uswest' ORDER BY ASC, field1, field2 DESC LIMIT 10`,
			stmt: &influxql.ShowMeasurementsStatement{
				Condition: &influxql.BinaryExpr{
					Op:  influxql.EQ,
					LHS: &influxql.VarRef{Val: "region"},
					RHS: &influxql.StringLiteral{Val: "uswest"},
				},
				SortFields: []*influxql.SortField{
					&influxql.SortField{Ascending: true},
					&influxql.SortField{Name: "field1"},
					&influxql.SortField{Name: "field2"},
				},
				Limit: 10,
			},
		},

		// SHOW RETENTION POLICIES
		{
			s: `SHOW RETENTION POLICIES mydb`,
			stmt: &influxql.ShowRetentionPoliciesStatement{
				Database: "mydb",
			},
		},

		// SHOW TAG KEYS
		{
			s: `SHOW TAG KEYS FROM src`,
			stmt: &influxql.ShowTagKeysStatement{
				Source: &influxql.Measurement{Name: "src"},
			},
		},

		// SHOW TAG KEYS
		{
			s: `SHOW TAG KEYS FROM src WHERE region = 'uswest' ORDER BY ASC, field1, field2 DESC LIMIT 10`,
			stmt: &influxql.ShowTagKeysStatement{
				Source: &influxql.Measurement{Name: "src"},
				Condition: &influxql.BinaryExpr{
					Op:  influxql.EQ,
					LHS: &influxql.VarRef{Val: "region"},
					RHS: &influxql.StringLiteral{Val: "uswest"},
				},
				SortFields: []*influxql.SortField{
					&influxql.SortField{Ascending: true},
					&influxql.SortField{Name: "field1"},
					&influxql.SortField{Name: "field2"},
				},
				Limit: 10,
			},
		},

		// SHOW TAG VALUES
		{
			s: `SHOW TAG VALUES FROM src WHERE region = 'uswest' ORDER BY ASC, field1, field2 DESC LIMIT 10`,
			stmt: &influxql.ShowTagValuesStatement{
				Source: &influxql.Measurement{Name: "src"},
				Condition: &influxql.BinaryExpr{
					Op:  influxql.EQ,
					LHS: &influxql.VarRef{Val: "region"},
					RHS: &influxql.StringLiteral{Val: "uswest"},
				},
				SortFields: []*influxql.SortField{
					&influxql.SortField{Ascending: true},
					&influxql.SortField{Name: "field1"},
					&influxql.SortField{Name: "field2"},
				},
				Limit: 10,
			},
		},

		// SHOW TAG VALUES ... TAG KEY =
		{
			s: `SHOW TAG VALUES FROM cpu WHERE TAG KEY = 'host' AND region = 'uswest'`,
			stmt: &influxql.ShowTagValuesStatement{
				Source: &influxql.Measurement{Name: "cpu"},
				Condition: &influxql.BinaryExpr{
					Op: influxql.AND,
					LHS: &influxql.BinaryExpr{
						Op:  influxql.EQ,
						LHS: &influxql.TagKeyIdent{},
						RHS: &influxql.StringLiteral{Val: "host"},
					},
					RHS: &influxql.BinaryExpr{
						Op:  influxql.EQ,
						LHS: &influxql.VarRef{Val: "region"},
						RHS: &influxql.StringLiteral{Val: "uswest"},
					},
				},
			},
		},

		// SHOW TAG VALUES ... AND TAG KEY =
		{
			s: `SHOW TAG VALUES FROM cpu WHERE region = 'uswest' AND TAG KEY = 'host'`,
			stmt: &influxql.ShowTagValuesStatement{
				Source: &influxql.Measurement{Name: "cpu"},
				Condition: &influxql.BinaryExpr{
					Op: influxql.AND,
					LHS: &influxql.BinaryExpr{
						Op:  influxql.EQ,
						LHS: &influxql.VarRef{Val: "region"},
						RHS: &influxql.StringLiteral{Val: "uswest"},
					},
					RHS: &influxql.BinaryExpr{
						Op:  influxql.EQ,
						LHS: &influxql.TagKeyIdent{},
						RHS: &influxql.StringLiteral{Val: "host"},
					},
				},
			},
		},

		// SHOW TAG VALUES ... AND ... = TAG KEY
		{
			s: `SHOW TAG VALUES FROM cpu WHERE region = 'uswest' AND 'host' = TAG KEY`,
			stmt: &influxql.ShowTagValuesStatement{
				Source: &influxql.Measurement{Name: "cpu"},
				Condition: &influxql.BinaryExpr{
					Op: influxql.AND,
					LHS: &influxql.BinaryExpr{
						Op:  influxql.EQ,
						LHS: &influxql.VarRef{Val: "region"},
						RHS: &influxql.StringLiteral{Val: "uswest"},
					},
					RHS: &influxql.BinaryExpr{
						Op:  influxql.EQ,
						LHS: &influxql.StringLiteral{Val: "host"},
						RHS: &influxql.TagKeyIdent{},
					},
				},
			},
		},

		// SHOW USERS
		{
			s:    `SHOW USERS`,
			stmt: &influxql.ShowUsersStatement{},
		},

		// SHOW FIELD KEYS
		{
			s: `SHOW FIELD KEYS FROM src WHERE region = 'uswest' ORDER BY ASC, field1, field2 DESC LIMIT 10`,
			stmt: &influxql.ShowFieldKeysStatement{
				Source: &influxql.Measurement{Name: "src"},
				Condition: &influxql.BinaryExpr{
					Op:  influxql.EQ,
					LHS: &influxql.VarRef{Val: "region"},
					RHS: &influxql.StringLiteral{Val: "uswest"},
				},
				SortFields: []*influxql.SortField{
					&influxql.SortField{Ascending: true},
					&influxql.SortField{Name: "field1"},
					&influxql.SortField{Name: "field2"},
				},
				Limit: 10,
			},
		},

		// SHOW FIELD VALUES
		{
			s: `SHOW FIELD VALUES FROM src WHERE region = 'uswest' ORDER BY ASC, field1, field2 DESC LIMIT 10`,
			stmt: &influxql.ShowFieldValuesStatement{
				Source: &influxql.Measurement{Name: "src"},
				Condition: &influxql.BinaryExpr{
					Op:  influxql.EQ,
					LHS: &influxql.VarRef{Val: "region"},
					RHS: &influxql.StringLiteral{Val: "uswest"},
				},
				SortFields: []*influxql.SortField{
					&influxql.SortField{Ascending: true},
					&influxql.SortField{Name: "field1"},
					&influxql.SortField{Name: "field2"},
				},
				Limit: 10,
			},
		},

		// DROP SERIES statement
		{
			s:    `DROP SERIES myseries`,
			stmt: &influxql.DropSeriesStatement{Name: "myseries"},
		},

		// SHOW CONTINUOUS QUERIES statement
		{
			s:    `SHOW CONTINUOUS QUERIES`,
			stmt: &influxql.ShowContinuousQueriesStatement{},
		},

		// CREATE CONTINUOUS QUERY ... INTO <measurement>
		{
			s: `CREATE CONTINUOUS QUERY myquery ON testdb BEGIN SELECT count() INTO measure1 FROM myseries END`,
			stmt: &influxql.CreateContinuousQueryStatement{
				Name:     "myquery",
				Database: "testdb",
				Source: &influxql.SelectStatement{
					Fields: []*influxql.Field{&influxql.Field{Expr: &influxql.Call{Name: "count"}}},
					Target: &influxql.Target{Measurement: "measure1"},
					Source: &influxql.Measurement{Name: "myseries"},
				},
			},
		},

		// CREATE CONTINUOUS QUERY ... INTO <retention-policy>.<measurement>
		{
			s: `CREATE CONTINUOUS QUERY myquery ON testdb BEGIN SELECT count() INTO "1h.policy1"."cpu.load" FROM myseries END`,
			stmt: &influxql.CreateContinuousQueryStatement{
				Name:     "myquery",
				Database: "testdb",
				Source: &influxql.SelectStatement{
					Fields: []*influxql.Field{&influxql.Field{Expr: &influxql.Call{Name: "count"}}},
					Target: &influxql.Target{
						Measurement: `"1h.policy1"."cpu.load"`,
					},
					Source: &influxql.Measurement{Name: "myseries"},
				},
			},
		},

		// CREATE DATABASE statement
		{
			s: `CREATE DATABASE testdb`,
			stmt: &influxql.CreateDatabaseStatement{
				Name: "testdb",
			},
		},

		// CREATE USER statement
		{
			s: `CREATE USER testuser WITH PASSWORD 'pwd1337'`,
			stmt: &influxql.CreateUserStatement{
				Name:     "testuser",
				Password: "******",
			},
		},

		// CREATE USER ... WITH ALL PRIVILEGES
		{
			s: `CREATE USER testuser WITH PASSWORD 'pwd1337' WITH ALL PRIVILEGES`,
			stmt: &influxql.CreateUserStatement{
				Name:      "testuser",
				Password:  "******",
				Privilege: influxql.NewPrivilege(influxql.AllPrivileges),
			},
		},

		// DROP CONTINUOUS QUERY statement
		{
			s:    `DROP CONTINUOUS QUERY myquery`,
			stmt: &influxql.DropContinuousQueryStatement{Name: "myquery"},
		},

		// DROP DATABASE statement
		{
			s:    `DROP DATABASE testdb`,
			stmt: &influxql.DropDatabaseStatement{Name: "testdb"},
		},

		// DROP RETENTION POLICY
		{
			s: `DROP RETENTION POLICY "1h.cpu" ON mydb`,
			stmt: &influxql.DropRetentionPolicyStatement{
				Name:     `"1h.cpu"`,
				Database: `mydb`,
			},
		},

		// DROP USER statement
		{
			s:    `DROP USER jdoe`,
			stmt: &influxql.DropUserStatement{Name: "jdoe"},
		},

		// GRANT READ
		{
			s: `GRANT READ ON testdb TO jdoe`,
			stmt: &influxql.GrantStatement{
				Privilege: influxql.ReadPrivilege,
				On:        "testdb",
				User:      "******",
			},
		},

		// GRANT WRITE
		{
			s: `GRANT WRITE ON testdb TO jdoe`,
			stmt: &influxql.GrantStatement{
				Privilege: influxql.WritePrivilege,
				On:        "testdb",
				User:      "******",
			},
		},

		// GRANT ALL
		{
			s: `GRANT ALL ON testdb TO jdoe`,
			stmt: &influxql.GrantStatement{
				Privilege: influxql.AllPrivileges,
				On:        "testdb",
				User:      "******",
			},
		},

		// GRANT ALL PRIVILEGES
		{
			s: `GRANT ALL PRIVILEGES ON testdb TO jdoe`,
			stmt: &influxql.GrantStatement{
				Privilege: influxql.AllPrivileges,
				On:        "testdb",
				User:      "******",
			},
		},

		// GRANT cluster admin
		{
			s: `GRANT ALL PRIVILEGES TO jdoe`,
			stmt: &influxql.GrantStatement{
				Privilege: influxql.AllPrivileges,
				User:      "******",
			},
		},

		// REVOKE READ
		{
			s: `REVOKE READ on testdb FROM jdoe`,
			stmt: &influxql.RevokeStatement{
				Privilege: influxql.ReadPrivilege,
				On:        "testdb",
				User:      "******",
			},
		},

		// REVOKE WRITE
		{
			s: `REVOKE WRITE ON testdb FROM jdoe`,
			stmt: &influxql.RevokeStatement{
				Privilege: influxql.WritePrivilege,
				On:        "testdb",
				User:      "******",
			},
		},

		// REVOKE ALL
		{
			s: `REVOKE ALL ON testdb FROM jdoe`,
			stmt: &influxql.RevokeStatement{
				Privilege: influxql.AllPrivileges,
				On:        "testdb",
				User:      "******",
			},
		},

		// REVOKE ALL PRIVILEGES
		{
			s: `REVOKE ALL PRIVILEGES ON testdb FROM jdoe`,
			stmt: &influxql.RevokeStatement{
				Privilege: influxql.AllPrivileges,
				On:        "testdb",
				User:      "******",
			},
		},

		// REVOKE cluster admin
		{
			s: `REVOKE ALL FROM jdoe`,
			stmt: &influxql.RevokeStatement{
				Privilege: influxql.AllPrivileges,
				User:      "******",
			},
		},

		// CREATE RETENTION POLICY
		{
			s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION 2`,
			stmt: &influxql.CreateRetentionPolicyStatement{
				Name:        "policy1",
				Database:    "testdb",
				Duration:    time.Hour,
				Replication: 2,
			},
		},

		// CREATE RETENTION POLICY ... DEFAULT
		{
			s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 2m REPLICATION 4 DEFAULT`,
			stmt: &influxql.CreateRetentionPolicyStatement{
				Name:        "policy1",
				Database:    "testdb",
				Duration:    2 * time.Minute,
				Replication: 4,
				Default:     true,
			},
		},

		// ALTER RETENTION POLICY
		{
			s:    `ALTER RETENTION POLICY policy1 ON testdb DURATION 1m REPLICATION 4 DEFAULT`,
			stmt: newAlterRetentionPolicyStatement("policy1", "testdb", time.Minute, 4, true),
		},

		// ALTER RETENTION POLICY with options in reverse order
		{
			s:    `ALTER RETENTION POLICY policy1 ON testdb DEFAULT REPLICATION 4 DURATION 1m`,
			stmt: newAlterRetentionPolicyStatement("policy1", "testdb", time.Minute, 4, true),
		},

		// ALTER RETENTION POLICY without optional DURATION
		{
			s:    `ALTER RETENTION POLICY policy1 ON testdb DEFAULT REPLICATION 4`,
			stmt: newAlterRetentionPolicyStatement("policy1", "testdb", -1, 4, true),
		},

		// ALTER RETENTION POLICY without optional REPLICATION
		{
			s:    `ALTER RETENTION POLICY policy1 ON testdb DEFAULT`,
			stmt: newAlterRetentionPolicyStatement("policy1", "testdb", -1, -1, true),
		},

		// ALTER RETENTION POLICY without optional DEFAULT
		{
			s:    `ALTER RETENTION POLICY policy1 ON testdb REPLICATION 4`,
			stmt: newAlterRetentionPolicyStatement("policy1", "testdb", -1, 4, false),
		},

		// Errors
		{s: ``, err: `found EOF, expected SELECT at line 1, char 1`},
		{s: `SELECT`, err: `found EOF, expected identifier, string, number, bool at line 1, char 8`},
		{s: `blah blah`, err: `found blah, expected SELECT at line 1, char 1`},
		{s: `SELECT field1 X`, err: `found X, expected FROM at line 1, char 15`},
		{s: `SELECT field1 FROM "series" WHERE X +;`, err: `found ;, expected identifier, string, number, bool at line 1, char 38`},
		{s: `SELECT field1 FROM myseries GROUP`, err: `found EOF, expected BY at line 1, char 35`},
		{s: `SELECT field1 FROM myseries LIMIT`, err: `found EOF, expected number at line 1, char 35`},
		{s: `SELECT field1 FROM myseries LIMIT 10.5`, err: `fractional parts not allowed in LIMIT at line 1, char 35`},
		{s: `SELECT field1 FROM myseries LIMIT 0`, err: `LIMIT must be > 0 at line 1, char 35`},
		{s: `SELECT field1 FROM myseries OFFSET`, err: `found EOF, expected number at line 1, char 36`},
		{s: `SELECT field1 FROM myseries OFFSET 10.5`, err: `fractional parts not allowed in OFFSET at line 1, char 36`},
		{s: `SELECT field1 FROM myseries OFFSET 0`, err: `OFFSET must be > 0 at line 1, char 36`},
		{s: `SELECT field1 FROM myseries ORDER`, err: `found EOF, expected BY at line 1, char 35`},
		{s: `SELECT field1 FROM myseries ORDER BY /`, err: `found /, expected identifier, ASC, or DESC at line 1, char 38`},
		{s: `SELECT field1 FROM myseries ORDER BY 1`, err: `found 1, expected identifier, ASC, or DESC at line 1, char 38`},
		{s: `SELECT field1 AS`, err: `found EOF, expected identifier at line 1, char 18`},
		{s: `SELECT field1 FROM 12`, err: `found 12, expected identifier at line 1, char 20`},
		{s: `SELECT field1 FROM myseries GROUP BY *`, err: `found *, expected identifier, string, number, bool at line 1, char 38`},
		{s: `SELECT 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 FROM myseries`, err: `unable to parse number at line 1, char 8`},
		{s: `SELECT 10.5h FROM myseries`, err: `found h, expected FROM at line 1, char 12`},
		{s: `DELETE`, err: `found EOF, expected FROM at line 1, char 8`},
		{s: `DELETE FROM`, err: `found EOF, expected identifier at line 1, char 13`},
		{s: `DELETE FROM myseries WHERE`, err: `found EOF, expected identifier, string, number, bool at line 1, char 28`},
		{s: `DROP SERIES`, err: `found EOF, expected identifier at line 1, char 13`},
		{s: `SHOW CONTINUOUS`, err: `found EOF, expected QUERIES at line 1, char 17`},
		{s: `SHOW RETENTION`, err: `found EOF, expected POLICIES at line 1, char 16`},
		{s: `SHOW RETENTION POLICIES`, err: `found EOF, expected identifier at line 1, char 25`},
		{s: `SHOW FOO`, err: `found FOO, expected SERIES, CONTINUOUS, MEASUREMENTS, TAG, FIELD, RETENTION at line 1, char 6`},
		{s: `DROP CONTINUOUS`, err: `found EOF, expected QUERY at line 1, char 17`},
		{s: `DROP CONTINUOUS QUERY`, err: `found EOF, expected identifier at line 1, char 23`},
		{s: `DROP FOO`, err: `found FOO, expected SERIES, CONTINUOUS at line 1, char 6`},
		{s: `DROP DATABASE`, err: `found EOF, expected identifier at line 1, char 15`},
		{s: `DROP RETENTION`, err: `found EOF, expected POLICY at line 1, char 16`},
		{s: `DROP RETENTION POLICY`, err: `found EOF, expected identifier at line 1, char 23`},
		{s: `DROP RETENTION POLICY "1h.cpu"`, err: `found EOF, expected ON at line 1, char 32`},
		{s: `DROP RETENTION POLICY "1h.cpu" ON`, err: `found EOF, expected identifier at line 1, char 35`},
		{s: `DROP USER`, err: `found EOF, expected identifier at line 1, char 11`},
		{s: `CREATE USER testuser`, err: `found EOF, expected WITH at line 1, char 22`},
		{s: `CREATE USER testuser WITH`, err: `found EOF, expected PASSWORD at line 1, char 27`},
		{s: `CREATE USER testuser WITH PASSWORD`, err: `found EOF, expected string at line 1, char 36`},
		{s: `CREATE USER testuser WITH PASSWORD 'pwd' WITH`, err: `found EOF, expected ALL at line 1, char 47`},
		{s: `CREATE USER testuser WITH PASSWORD 'pwd' WITH ALL`, err: `found EOF, expected PRIVILEGES at line 1, char 51`},
		{s: `GRANT`, err: `found EOF, expected READ, WRITE, ALL [PRIVILEGES] at line 1, char 7`},
		{s: `GRANT BOGUS`, err: `found BOGUS, expected READ, WRITE, ALL [PRIVILEGES] at line 1, char 7`},
		{s: `GRANT READ`, err: `found EOF, expected ON at line 1, char 12`},
		{s: `GRANT READ TO jdoe`, err: `found TO, expected ON at line 1, char 12`},
		{s: `GRANT READ ON`, err: `found EOF, expected identifier at line 1, char 15`},
		{s: `GRANT READ ON testdb`, err: `found EOF, expected TO at line 1, char 22`},
		{s: `GRANT READ ON testdb TO`, err: `found EOF, expected identifier at line 1, char 25`}, {s: `GRANT`, err: `found EOF, expected READ, WRITE, ALL [PRIVILEGES] at line 1, char 7`},
		{s: `REVOKE BOGUS`, err: `found BOGUS, expected READ, WRITE, ALL [PRIVILEGES] at line 1, char 8`},
		{s: `REVOKE READ`, err: `found EOF, expected ON at line 1, char 13`},
		{s: `REVOKE READ TO jdoe`, err: `found TO, expected ON at line 1, char 13`},
		{s: `REVOKE READ ON`, err: `found EOF, expected identifier at line 1, char 16`},
		{s: `REVOKE READ ON testdb`, err: `found EOF, expected FROM at line 1, char 23`},
		{s: `REVOKE READ ON testdb FROM`, err: `found EOF, expected identifier at line 1, char 28`},
		{s: `CREATE RETENTION`, err: `found EOF, expected POLICY at line 1, char 18`},
		{s: `CREATE RETENTION POLICY`, err: `found EOF, expected identifier at line 1, char 25`},
		{s: `CREATE RETENTION POLICY policy1`, err: `found EOF, expected ON at line 1, char 33`},
		{s: `CREATE RETENTION POLICY policy1 ON`, err: `found EOF, expected identifier at line 1, char 36`},
		{s: `CREATE RETENTION POLICY policy1 ON testdb`, err: `found EOF, expected DURATION at line 1, char 43`},
		{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION`, err: `found EOF, expected duration at line 1, char 52`},
		{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION bad`, err: `found bad, expected duration at line 1, char 52`},
		{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h`, err: `found EOF, expected REPLICATION at line 1, char 54`},
		{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION`, err: `found EOF, expected number at line 1, char 67`},
		{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION 3.14`, err: `number must be an integer at line 1, char 67`},
		{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION 0`, err: `invalid value 0: must be 1 <= n <= 2147483647 at line 1, char 67`},
		{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION bad`, err: `found bad, expected number at line 1, char 67`},
		{s: `ALTER`, err: `found EOF, expected RETENTION at line 1, char 7`},
		{s: `ALTER RETENTION`, err: `found EOF, expected POLICY at line 1, char 17`},
		{s: `ALTER RETENTION POLICY`, err: `found EOF, expected identifier at line 1, char 24`},
		{s: `ALTER RETENTION POLICY policy1`, err: `found EOF, expected ON at line 1, char 32`},
		{s: `ALTER RETENTION POLICY policy1 ON`, err: `found EOF, expected identifier at line 1, char 35`},
		{s: `ALTER RETENTION POLICY policy1 ON testdb`, err: `found EOF, expected DURATION, RETENTION, DEFAULT at line 1, char 42`},
	}

	for i, tt := range tests {
		stmt, err := influxql.NewParser(strings.NewReader(tt.s)).ParseStatement()
		if !reflect.DeepEqual(tt.err, errstring(err)) {
			t.Errorf("%d. %q: error mismatch:\n  exp=%s\n  got=%s\n\n", i, tt.s, tt.err, err)
		} else if tt.err == "" && !reflect.DeepEqual(tt.stmt, stmt) {
			t.Errorf("%d. %q\n\nstmt mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.s, tt.stmt, stmt)
		}
	}
}