Example #1
0
// Ensure the time range of an expression can be extracted.
func TestTimeRange(t *testing.T) {
	for i, tt := range []struct {
		expr          string
		min, max, err string
	}{
		// LHS VarRef
		{expr: `time > '2000-01-01 00:00:00'`, min: `2000-01-01T00:00:00.000000001Z`, max: `0001-01-01T00:00:00Z`},
		{expr: `time >= '2000-01-01 00:00:00'`, min: `2000-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`},
		{expr: `time < '2000-01-01 00:00:00'`, min: `0001-01-01T00:00:00Z`, max: `1999-12-31T23:59:59.999999999Z`},
		{expr: `time <= '2000-01-01 00:00:00'`, min: `0001-01-01T00:00:00Z`, max: `2000-01-01T00:00:00Z`},

		// RHS VarRef
		{expr: `'2000-01-01 00:00:00' > time`, min: `0001-01-01T00:00:00Z`, max: `1999-12-31T23:59:59.999999999Z`},
		{expr: `'2000-01-01 00:00:00' >= time`, min: `0001-01-01T00:00:00Z`, max: `2000-01-01T00:00:00Z`},
		{expr: `'2000-01-01 00:00:00' < time`, min: `2000-01-01T00:00:00.000000001Z`, max: `0001-01-01T00:00:00Z`},
		{expr: `'2000-01-01 00:00:00' <= time`, min: `2000-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`},

		// number literal
		{expr: `time < 10`, min: `0001-01-01T00:00:00Z`, max: `1970-01-01T00:00:00.000000009Z`},
		{expr: `time < 10i`, min: `0001-01-01T00:00:00Z`, max: `1970-01-01T00:00:00.000000009Z`},

		// Equality
		{expr: `time = '2000-01-01 00:00:00'`, min: `2000-01-01T00:00:00Z`, max: `2000-01-01T00:00:00.000000001Z`},

		// Multiple time expressions.
		{expr: `time >= '2000-01-01 00:00:00' AND time < '2000-01-02 00:00:00'`, min: `2000-01-01T00:00:00Z`, max: `2000-01-01T23:59:59.999999999Z`},

		// Min/max crossover
		{expr: `time >= '2000-01-01 00:00:00' AND time <= '1999-01-01 00:00:00'`, min: `2000-01-01T00:00:00Z`, max: `1999-01-01T00:00:00Z`},

		// Absolute time
		{expr: `time = 1388534400s`, min: `2014-01-01T00:00:00Z`, max: `2014-01-01T00:00:00.000000001Z`},

		// Non-comparative expressions.
		{expr: `time`, min: `0001-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`},
		{expr: `time + 2`, min: `0001-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`},
		{expr: `time - '2000-01-01 00:00:00'`, min: `0001-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`},
		{expr: `time AND '2000-01-01 00:00:00'`, min: `0001-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`},

		// Invalid time expressions.
		{expr: `time > "2000-01-01 00:00:00"`, min: `0001-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`, err: `invalid operation: time and *influxql.VarRef are not compatible`},
	} {
		// Extract time range.
		expr := MustParseExpr(tt.expr)
		min, max, err := influxql.TimeRange(expr)

		// Compare with expected min/max.
		if min := min.Format(time.RFC3339Nano); tt.min != min {
			t.Errorf("%d. %s: unexpected min:\n\nexp=%s\n\ngot=%s\n\n", i, tt.expr, tt.min, min)
			continue
		}
		if max := max.Format(time.RFC3339Nano); tt.max != max {
			t.Errorf("%d. %s: unexpected max:\n\nexp=%s\n\ngot=%s\n\n", i, tt.expr, tt.max, max)
			continue
		}
		if (err != nil && err.Error() != tt.err) || (err == nil && tt.err != "") {
			t.Errorf("%d. %s: unexpected error:\n\nexp=%s\n\ngot=%s\n\n", i, tt.expr, tt.err, err)
		}
	}
}
Example #2
0
// MustTimeRange will parse a time range. Panic on error.
func MustTimeRange(expr influxql.Expr) (min, max time.Time) {
	min, max, err := influxql.TimeRange(expr)
	if err != nil {
		panic(err)
	}
	return min, max
}
Example #3
0
// PlanSelect creates an execution plan for the given SelectStatement and returns an Executor.
func (q *QueryExecutor) PlanSelect(stmt *influxql.SelectStatement, chunkSize int) (Executor, error) {
	// It is important to "stamp" this time so that everywhere we evaluate `now()` in the statement is EXACTLY the same `now`
	now := time.Now().UTC()
	opt := influxql.SelectOptions{}

	// Replace instances of "now()" with the current time, and check the resultant times.
	stmt.Condition = influxql.Reduce(stmt.Condition, &influxql.NowValuer{Now: now})
	opt.MinTime, opt.MaxTime = influxql.TimeRange(stmt.Condition)
	if opt.MaxTime.IsZero() {
		opt.MaxTime = now
	}
	if opt.MinTime.IsZero() {
		opt.MinTime = time.Unix(0, 0)
	}

	// Expand regex sources to their actual source names.
	sources, err := q.Store.ExpandSources(stmt.Sources)
	if err != nil {
		return nil, err
	}
	stmt.Sources = sources

	// Convert DISTINCT into a call.
	stmt.RewriteDistinct()

	// Remove "time" from fields list.
	stmt.RewriteTimeFields()

	// Filter only shards that contain date range.
	shardIDs, err := q.MetaClient.ShardIDsByTimeRange(stmt.Sources, opt.MinTime, opt.MaxTime)
	if err != nil {
		return nil, err
	}
	shards := q.Store.Shards(shardIDs)

	// Rewrite wildcards, if any exist.
	tmp, err := stmt.RewriteWildcards(Shards(shards))
	if err != nil {
		return nil, err
	}
	stmt = tmp

	// Create a set of iterators from a selection.
	itrs, err := influxql.Select(stmt, Shards(shards), &opt)
	if err != nil {
		return nil, err
	}

	// Generate a row emitter from the iterator set.
	em := influxql.NewEmitter(itrs, stmt.TimeAscending())
	em.Columns = stmt.ColumnNames()
	em.OmitTime = stmt.OmitTime

	// Wrap emitter in an adapter to conform to the Executor interface.
	return (*emitterExecutor)(em), nil
}
Example #4
0
func TestContinuousQueryService_GroupByOffset(t *testing.T) {
	s := NewTestService(t)
	mc := NewMetaClient(t)
	mc.CreateDatabase("db", "")
	mc.CreateContinuousQuery("db", "cq", `CREATE CONTINUOUS QUERY cq ON db BEGIN SELECT mean(value) INTO cpu_mean FROM cpu GROUP BY time(1m, 30s) END`)
	s.MetaClient = mc

	// Set RunInterval high so we can trigger using Run method.
	s.RunInterval = 10 * time.Minute

	done := make(chan struct{})
	var expected struct {
		min time.Time
		max time.Time
	}

	// Set a callback for ExecuteStatement.
	s.QueryExecutor.StatementExecutor = &StatementExecutor{
		ExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {
			s := stmt.(*influxql.SelectStatement)
			min, max, err := influxql.TimeRange(s.Condition)
			if err != nil {
				t.Errorf("unexpected error parsing time range: %s", err)
			} else if !expected.min.Equal(min) || !expected.max.Equal(max) {
				t.Errorf("mismatched time range: got=(%s, %s) exp=(%s, %s)", min, max, expected.min, expected.max)
			}
			done <- struct{}{}
			ctx.Results <- &influxql.Result{}
			return nil
		},
	}

	s.Open()
	defer s.Close()

	// Set the 'now' time to the start of a 10 minute interval with a 30 second offset.
	// Then trigger a run. This should trigger two queries (one for the current time
	// interval, one for the previous).
	now := time.Now().UTC().Truncate(10 * time.Minute).Add(30 * time.Second)
	expected.min = now.Add(-time.Minute)
	expected.max = now.Add(-1)
	s.RunCh <- &RunRequest{Now: now}

	if err := wait(done, 100*time.Millisecond); err != nil {
		t.Fatal(err)
	}
}
Example #5
0
func (e *StatementExecutor) createIterators(stmt *influxql.SelectStatement, ctx *influxql.ExecutionContext) ([]influxql.Iterator, *influxql.SelectStatement, error) {
	// It is important to "stamp" this time so that everywhere we evaluate `now()` in the statement is EXACTLY the same `now`
	now := time.Now().UTC()
	opt := influxql.SelectOptions{
		InterruptCh: ctx.InterruptCh,
		NodeID:      ctx.ExecutionOptions.NodeID,
		MaxSeriesN:  e.MaxSelectSeriesN,
	}

	// Replace instances of "now()" with the current time, and check the resultant times.
	nowValuer := influxql.NowValuer{Now: now}
	stmt.Condition = influxql.Reduce(stmt.Condition, &nowValuer)
	// Replace instances of "now()" with the current time in the dimensions.
	for _, d := range stmt.Dimensions {
		d.Expr = influxql.Reduce(d.Expr, &nowValuer)
	}

	var err error
	opt.MinTime, opt.MaxTime, err = influxql.TimeRange(stmt.Condition)
	if err != nil {
		return nil, stmt, err
	}

	if opt.MaxTime.IsZero() {
		// In the case that we're executing a meta query where the user cannot
		// specify a time condition, then we expand the default max time
		// to the maximum possible value, to ensure that data where all points
		// are in the future are returned.
		if influxql.Sources(stmt.Sources).HasSystemSource() {
			opt.MaxTime = time.Unix(0, influxql.MaxTime).UTC()
		} else {
			if interval, err := stmt.GroupByInterval(); err != nil {
				return nil, stmt, err
			} else if interval > 0 {
				opt.MaxTime = now
			} else {
				opt.MaxTime = time.Unix(0, influxql.MaxTime).UTC()
			}
		}
	}
	if opt.MinTime.IsZero() {
		opt.MinTime = time.Unix(0, influxql.MinTime).UTC()
	}

	// Convert DISTINCT into a call.
	stmt.RewriteDistinct()

	// Remove "time" from fields list.
	stmt.RewriteTimeFields()

	// Rewrite any regex conditions that could make use of the index.
	stmt.RewriteRegexConditions()

	// Create an iterator creator based on the shards in the cluster.
	ic, err := e.iteratorCreator(stmt, &opt)
	if err != nil {
		return nil, stmt, err
	}

	// Expand regex sources to their actual source names.
	if stmt.Sources.HasRegex() {
		sources, err := ic.ExpandSources(stmt.Sources)
		if err != nil {
			return nil, stmt, err
		}
		stmt.Sources = sources
	}

	// Rewrite wildcards, if any exist.
	tmp, err := stmt.RewriteFields(ic)
	if err != nil {
		return nil, stmt, err
	}
	stmt = tmp

	if e.MaxSelectBucketsN > 0 && !stmt.IsRawQuery {
		interval, err := stmt.GroupByInterval()
		if err != nil {
			return nil, stmt, err
		}

		if interval > 0 {
			// Determine the start and end time matched to the interval (may not match the actual times).
			min := opt.MinTime.Truncate(interval)
			max := opt.MaxTime.Truncate(interval).Add(interval)

			// Determine the number of buckets by finding the time span and dividing by the interval.
			buckets := int64(max.Sub(min)) / int64(interval)
			if int(buckets) > e.MaxSelectBucketsN {
				return nil, stmt, fmt.Errorf("max-select-buckets limit exceeded: (%d/%d)", buckets, e.MaxSelectBucketsN)
			}
		}
	}

	// Create a set of iterators from a selection.
	itrs, err := influxql.Select(stmt, ic, &opt)
	if err != nil {
		return nil, stmt, err
	}

	if e.MaxSelectPointN > 0 {
		monitor := influxql.PointLimitMonitor(itrs, influxql.DefaultStatsInterval, e.MaxSelectPointN)
		ctx.Query.Monitor(monitor)
	}
	return itrs, stmt, nil
}
func (e *StatementExecutor) executeSelectStatement(stmt *influxql.SelectStatement, ctx *influxql.ExecutionContext) error {
	// Handle SHOW TAG VALUES separately so it can be optimized.
	// https://github.com/influxdata/influxdb/issues/6233
	if source, ok := stmt.Sources[0].(*influxql.Measurement); ok && source.Name == "_tags" {
		// Use the optimized version only if we have direct access to the database.
		if store, ok := e.TSDBStore.(LocalTSDBStore); ok {
			return e.executeShowTagValues(stmt, ctx, store)
		}
	}

	// It is important to "stamp" this time so that everywhere we evaluate `now()` in the statement is EXACTLY the same `now`
	now := time.Now().UTC()
	opt := influxql.SelectOptions{InterruptCh: ctx.InterruptCh}

	// Replace instances of "now()" with the current time, and check the resultant times.
	nowValuer := influxql.NowValuer{Now: now}
	stmt.Condition = influxql.Reduce(stmt.Condition, &nowValuer)
	// Replace instances of "now()" with the current time in the dimensions.
	for _, d := range stmt.Dimensions {
		d.Expr = influxql.Reduce(d.Expr, &nowValuer)
	}

	var err error
	opt.MinTime, opt.MaxTime, err = influxql.TimeRange(stmt.Condition)
	if err != nil {
		return err
	}

	if opt.MaxTime.IsZero() {
		// In the case that we're executing a meta query where the user cannot
		// specify a time condition, then we expand the default max time
		// to the maximum possible value, to ensure that data where all points
		// are in the future are returned.
		if influxql.Sources(stmt.Sources).HasSystemSource() {
			opt.MaxTime = time.Unix(0, influxql.MaxTime).UTC()
		} else {
			opt.MaxTime = now
		}
	}
	if opt.MinTime.IsZero() {
		opt.MinTime = time.Unix(0, 0)
	}

	// Convert DISTINCT into a call.
	stmt.RewriteDistinct()

	// Remove "time" from fields list.
	stmt.RewriteTimeFields()

	// Create an iterator creator based on the shards in the cluster.
	ic, err := e.iteratorCreator(stmt, &opt)
	if err != nil {
		return err
	}

	// Expand regex sources to their actual source names.
	if stmt.Sources.HasRegex() {
		sources, err := ic.ExpandSources(stmt.Sources)
		if err != nil {
			return err
		}
		stmt.Sources = sources
	}

	// Rewrite wildcards, if any exist.
	tmp, err := stmt.RewriteFields(ic)
	if err != nil {
		return err
	}
	stmt = tmp

	if e.MaxSelectBucketsN > 0 && !stmt.IsRawQuery {
		interval, err := stmt.GroupByInterval()
		if err != nil {
			return err
		}

		if interval > 0 {
			// Determine the start and end time matched to the interval (may not match the actual times).
			min := opt.MinTime.Truncate(interval)
			max := opt.MaxTime.Truncate(interval).Add(interval)

			// Determine the number of buckets by finding the time span and dividing by the interval.
			buckets := int64(max.Sub(min)) / int64(interval)
			if int(buckets) > e.MaxSelectBucketsN {
				return fmt.Errorf("max select bucket count exceeded: %d buckets", buckets)
			}
		}
	}

	// Create a set of iterators from a selection.
	itrs, err := influxql.Select(stmt, ic, &opt)
	if err != nil {
		return err
	}

	if e.MaxSelectPointN > 0 {
		monitor := influxql.PointLimitMonitor(itrs, influxql.DefaultStatsInterval, e.MaxSelectPointN)
		ctx.Query.Monitor(monitor)
	}

	// Generate a row emitter from the iterator set.
	em := influxql.NewEmitter(itrs, stmt.TimeAscending(), ctx.ChunkSize)
	em.Columns = stmt.ColumnNames()
	em.OmitTime = stmt.OmitTime
	defer em.Close()

	// Calculate initial stats across all iterators.
	stats := influxql.Iterators(itrs).Stats()
	if e.MaxSelectSeriesN > 0 && stats.SeriesN > e.MaxSelectSeriesN {
		return fmt.Errorf("max select series count exceeded: %d series", stats.SeriesN)
	}

	// Emit rows to the results channel.
	var writeN int64
	var emitted bool

	var pointsWriter *BufferedPointsWriter
	if stmt.Target != nil {
		pointsWriter = NewBufferedPointsWriter(e.PointsWriter, stmt.Target.Measurement.Database, stmt.Target.Measurement.RetentionPolicy, 10000)
	}

	for {
		row, err := em.Emit()
		if err != nil {
			return err
		} else if row == nil {
			// Check if the query was interrupted while emitting.
			select {
			case <-ctx.InterruptCh:
				return influxql.ErrQueryInterrupted
			default:
			}
			break
		}

		// Write points back into system for INTO statements.
		if stmt.Target != nil {
			if err := e.writeInto(pointsWriter, stmt, row); err != nil {
				return err
			}
			writeN += int64(len(row.Values))
			continue
		}

		result := &influxql.Result{
			StatementID: ctx.StatementID,
			Series:      []*models.Row{row},
		}

		// Send results or exit if closing.
		select {
		case <-ctx.InterruptCh:
			return influxql.ErrQueryInterrupted
		case ctx.Results <- result:
		}

		emitted = true
	}

	// Flush remaing points and emit write count if an INTO statement.
	if stmt.Target != nil {
		if err := pointsWriter.Flush(); err != nil {
			return err
		}

		var messages []*influxql.Message
		if ctx.ReadOnly {
			messages = append(messages, influxql.ReadOnlyWarning(stmt.String()))
		}

		ctx.Results <- &influxql.Result{
			StatementID: ctx.StatementID,
			Messages:    messages,
			Series: []*models.Row{{
				Name:    "result",
				Columns: []string{"time", "written"},
				Values:  [][]interface{}{{time.Unix(0, 0).UTC(), writeN}},
			}},
		}
		return nil
	}

	// Always emit at least one result.
	if !emitted {
		ctx.Results <- &influxql.Result{
			StatementID: ctx.StatementID,
			Series:      make([]*models.Row, 0),
		}
	}

	return nil
}
Example #7
0
func (e *StatementExecutor) executeSelectStatement(stmt *influxql.SelectStatement, ctx *influxql.ExecutionContext) error {
	// It is important to "stamp" this time so that everywhere we evaluate `now()` in the statement is EXACTLY the same `now`
	now := time.Now().UTC()
	opt := influxql.SelectOptions{InterruptCh: ctx.InterruptCh}

	// Replace instances of "now()" with the current time, and check the resultant times.
	stmt.Condition = influxql.Reduce(stmt.Condition, &influxql.NowValuer{Now: now})
	var err error
	opt.MinTime, opt.MaxTime, err = influxql.TimeRange(stmt.Condition)
	if err != nil {
		return err
	}

	if opt.MaxTime.IsZero() {
		opt.MaxTime = now
	}
	if opt.MinTime.IsZero() {
		opt.MinTime = time.Unix(0, 0)
	}

	// Convert DISTINCT into a call.
	stmt.RewriteDistinct()

	// Remove "time" from fields list.
	stmt.RewriteTimeFields()

	// Create an iterator creator based on the shards in the cluster.
	ic, err := e.iteratorCreator(stmt, &opt)
	if err != nil {
		return err
	}

	// Expand regex sources to their actual source names.
	if stmt.Sources.HasRegex() {
		sources, err := ic.ExpandSources(stmt.Sources)
		if err != nil {
			return err
		}
		stmt.Sources = sources
	}

	// Rewrite wildcards, if any exist.
	tmp, err := stmt.RewriteWildcards(ic)
	if err != nil {
		return err
	}
	stmt = tmp

	if e.MaxSelectBucketsN > 0 && !stmt.IsRawQuery {
		interval, err := stmt.GroupByInterval()
		if err != nil {
			return err
		}

		if interval > 0 {
			// Determine the start and end time matched to the interval (may not match the actual times).
			min := opt.MinTime.Truncate(interval)
			max := opt.MaxTime.Truncate(interval).Add(interval)

			// Determine the number of buckets by finding the time span and dividing by the interval.
			buckets := int64(max.Sub(min)) / int64(interval)
			if int(buckets) > e.MaxSelectBucketsN {
				return fmt.Errorf("max select bucket count exceeded: %d buckets", buckets)
			}
		}
	}

	// Create a set of iterators from a selection.
	itrs, err := influxql.Select(stmt, ic, &opt)
	if err != nil {
		return err
	}

	if e.MaxSelectPointN > 0 {
		monitor := influxql.PointLimitMonitor(itrs, influxql.DefaultStatsInterval, e.MaxSelectPointN)
		ctx.Query.Monitor(monitor)
	}

	// Generate a row emitter from the iterator set.
	em := influxql.NewEmitter(itrs, stmt.TimeAscending(), ctx.ChunkSize)
	em.Columns = stmt.ColumnNames()
	em.OmitTime = stmt.OmitTime
	defer em.Close()

	// Calculate initial stats across all iterators.
	stats := influxql.Iterators(itrs).Stats()
	if e.MaxSelectSeriesN > 0 && stats.SeriesN > e.MaxSelectSeriesN {
		return fmt.Errorf("max select series count exceeded: %d series", stats.SeriesN)
	}

	// Emit rows to the results channel.
	var writeN int64
	var emitted bool
	for {
		row := em.Emit()
		if row == nil {
			// Check if the query was interrupted while emitting.
			select {
			case <-ctx.InterruptCh:
				return influxql.ErrQueryInterrupted
			default:
			}
			break
		}

		result := &influxql.Result{
			StatementID: ctx.StatementID,
			Series:      []*models.Row{row},
		}

		// Write points back into system for INTO statements.
		if stmt.Target != nil {
			if err := e.writeInto(stmt, row); err != nil {
				return err
			}
			writeN += int64(len(row.Values))
			continue
		}

		// Send results or exit if closing.
		select {
		case <-ctx.InterruptCh:
			return influxql.ErrQueryInterrupted
		case ctx.Results <- result:
		}

		emitted = true
	}

	// Emit write count if an INTO statement.
	if stmt.Target != nil {
		ctx.Results <- &influxql.Result{
			StatementID: ctx.StatementID,
			Series: []*models.Row{{
				Name:    "result",
				Columns: []string{"time", "written"},
				Values:  [][]interface{}{{time.Unix(0, 0).UTC(), writeN}},
			}},
		}
		return nil
	}

	// Always emit at least one result.
	if !emitted {
		ctx.Results <- &influxql.Result{
			StatementID: ctx.StatementID,
			Series:      make([]*models.Row, 0),
		}
	}

	return nil
}
Example #8
0
// Ensure the SELECT statement can have its start and end time set
func TestSelectStatement_SetTimeRange(t *testing.T) {
	q := "SELECT sum(value) from foo where time < now() GROUP BY time(10m)"
	stmt, err := influxql.NewParser(strings.NewReader(q)).ParseStatement()
	if err != nil {
		t.Fatalf("invalid statement: %q: %s", stmt, err)
	}

	s := stmt.(*influxql.SelectStatement)
	min, max := influxql.TimeRange(s.Condition)
	start := time.Now().Add(-20 * time.Hour).Round(time.Second).UTC()
	end := time.Now().Add(10 * time.Hour).Round(time.Second).UTC()
	s.SetTimeRange(start, end)
	min, max = influxql.TimeRange(s.Condition)

	if min != start {
		t.Fatalf("start time wasn't set properly.\n  exp: %s\n  got: %s", start, min)
	}
	// the end range is actually one nanosecond before the given one since end is exclusive
	end = end.Add(-time.Nanosecond)
	if max != end {
		t.Fatalf("end time wasn't set properly.\n  exp: %s\n  got: %s", end, max)
	}

	// ensure we can set a time on a select that already has one set
	start = time.Now().Add(-20 * time.Hour).Round(time.Second).UTC()
	end = time.Now().Add(10 * time.Hour).Round(time.Second).UTC()
	q = fmt.Sprintf("SELECT sum(value) from foo WHERE time >= %ds and time <= %ds GROUP BY time(10m)", start.Unix(), end.Unix())
	stmt, err = influxql.NewParser(strings.NewReader(q)).ParseStatement()
	if err != nil {
		t.Fatalf("invalid statement: %q: %s", stmt, err)
	}

	s = stmt.(*influxql.SelectStatement)
	min, max = influxql.TimeRange(s.Condition)
	if start != min || end != max {
		t.Fatalf("start and end times weren't equal:\n  exp: %s\n  got: %s\n  exp: %s\n  got:%s\n", start, min, end, max)
	}

	// update and ensure it saves it
	start = time.Now().Add(-40 * time.Hour).Round(time.Second).UTC()
	end = time.Now().Add(20 * time.Hour).Round(time.Second).UTC()
	s.SetTimeRange(start, end)
	min, max = influxql.TimeRange(s.Condition)

	// TODO: right now the SetTimeRange can't override the start time if it's more recent than what they're trying to set it to.
	//       shouldn't matter for our purposes with continuous queries, but fix this later

	if min != start {
		t.Fatalf("start time wasn't set properly.\n  exp: %s\n  got: %s", start, min)
	}
	// the end range is actually one nanosecond before the given one since end is exclusive
	end = end.Add(-time.Nanosecond)
	if max != end {
		t.Fatalf("end time wasn't set properly.\n  exp: %s\n  got: %s", end, max)
	}

	// ensure that when we set a time range other where clause conditions are still there
	q = "SELECT sum(value) from foo WHERE foo = 'bar' and time < now() GROUP BY time(10m)"
	stmt, err = influxql.NewParser(strings.NewReader(q)).ParseStatement()
	if err != nil {
		t.Fatalf("invalid statement: %q: %s", stmt, err)
	}

	s = stmt.(*influxql.SelectStatement)

	// update and ensure it saves it
	start = time.Now().Add(-40 * time.Hour).Round(time.Second).UTC()
	end = time.Now().Add(20 * time.Hour).Round(time.Second).UTC()
	s.SetTimeRange(start, end)
	min, max = influxql.TimeRange(s.Condition)

	if min != start {
		t.Fatalf("start time wasn't set properly.\n  exp: %s\n  got: %s", start, min)
	}
	// the end range is actually one nanosecond before the given one since end is exclusive
	end = end.Add(-time.Nanosecond)
	if max != end {
		t.Fatalf("end time wasn't set properly.\n  exp: %s\n  got: %s", end, max)
	}

	// ensure the where clause is there
	hasWhere := false
	influxql.WalkFunc(s.Condition, func(n influxql.Node) {
		if ex, ok := n.(*influxql.BinaryExpr); ok {
			if lhs, ok := ex.LHS.(*influxql.VarRef); ok {
				if lhs.Val == "foo" {
					if rhs, ok := ex.RHS.(*influxql.StringLiteral); ok {
						if rhs.Val == "bar" {
							hasWhere = true
						}
					}
				}
			}
		}
	})
	if !hasWhere {
		t.Fatal("set time range cleared out the where clause")
	}
}
Example #9
0
func (e *QueryExecutor) executeSelectStatement(stmt *influxql.SelectStatement, chunkSize, statementID int, results chan *influxql.Result, closing <-chan struct{}) error {
	// It is important to "stamp" this time so that everywhere we evaluate `now()` in the statement is EXACTLY the same `now`
	now := time.Now().UTC()
	opt := influxql.SelectOptions{}

	// Replace instances of "now()" with the current time, and check the resultant times.
	stmt.Condition = influxql.Reduce(stmt.Condition, &influxql.NowValuer{Now: now})
	opt.MinTime, opt.MaxTime = influxql.TimeRange(stmt.Condition)
	if opt.MaxTime.IsZero() {
		opt.MaxTime = now
	}
	if opt.MinTime.IsZero() {
		opt.MinTime = time.Unix(0, 0)
	}

	// Convert DISTINCT into a call.
	stmt.RewriteDistinct()

	// Remove "time" from fields list.
	stmt.RewriteTimeFields()

	// Create an iterator creator based on the shards in the cluster.
	ic, err := e.iteratorCreator(stmt, &opt)
	if err != nil {
		return err
	}

	// Expand regex sources to their actual source names.
	if stmt.Sources.HasRegex() {
		sources, err := ic.ExpandSources(stmt.Sources)
		if err != nil {
			return err
		}
		stmt.Sources = sources
	}

	// Rewrite wildcards, if any exist.
	tmp, err := stmt.RewriteWildcards(ic)
	if err != nil {
		return err
	}
	stmt = tmp

	// Create a set of iterators from a selection.
	itrs, err := influxql.Select(stmt, ic, &opt)
	if err != nil {
		return err
	}

	// Generate a row emitter from the iterator set.
	em := influxql.NewEmitter(itrs, stmt.TimeAscending())
	em.Columns = stmt.ColumnNames()
	em.OmitTime = stmt.OmitTime
	defer em.Close()

	// Emit rows to the results channel.
	var writeN int64
	var emitted bool
	for {
		row := em.Emit()
		if row == nil {
			break
		}

		result := &influxql.Result{
			StatementID: statementID,
			Series:      []*models.Row{row},
		}

		// Write points back into system for INTO statements.
		if stmt.Target != nil {
			if err := e.writeInto(stmt, row); err != nil {
				return err
			}
			writeN += int64(len(row.Values))
			continue
		}

		// Send results or exit if closing.
		select {
		case <-closing:
			return nil
		case results <- result:
		}

		emitted = true
	}

	// Emit write count if an INTO statement.
	if stmt.Target != nil {
		results <- &influxql.Result{
			StatementID: statementID,
			Series: []*models.Row{{
				Name:    "result",
				Columns: []string{"time", "written"},
				Values:  [][]interface{}{{time.Unix(0, 0).UTC(), writeN}},
			}},
		}
		return nil
	}

	// Always emit at least one result.
	if !emitted {
		results <- &influxql.Result{
			StatementID: statementID,
			Series:      make([]*models.Row, 0),
		}
	}

	return nil
}
Example #10
0
func TestContinuousQueryService_ResampleOptions(t *testing.T) {
	s := NewTestService(t)
	mc := NewMetaClient(t)
	mc.CreateDatabase("db", "")
	mc.CreateContinuousQuery("db", "cq", `CREATE CONTINUOUS QUERY cq ON db RESAMPLE EVERY 10s FOR 2m BEGIN SELECT mean(value) INTO cpu_mean FROM cpu GROUP BY time(1m) END`)
	s.MetaClient = mc

	db := s.MetaClient.Database("db")

	cq, err := NewContinuousQuery(db.Name, &db.ContinuousQueries[0])
	if err != nil {
		t.Fatal(err)
	} else if cq.Resample.Every != 10*time.Second {
		t.Errorf("expected resample every to be 10s, got %s", influxql.FormatDuration(cq.Resample.Every))
	} else if cq.Resample.For != 2*time.Minute {
		t.Errorf("expected resample for 2m, got %s", influxql.FormatDuration(cq.Resample.For))
	}

	// Set RunInterval high so we can trigger using Run method.
	s.RunInterval = 10 * time.Minute

	done := make(chan struct{})
	var expected struct {
		min time.Time
		max time.Time
	}

	// Set a callback for ExecuteStatement.
	s.QueryExecutor.StatementExecutor = &StatementExecutor{
		ExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {
			s := stmt.(*influxql.SelectStatement)
			min, max, err := influxql.TimeRange(s.Condition)
			if err != nil {
				t.Errorf("unexpected error parsing time range: %s", err)
			} else if !expected.min.Equal(min) || !expected.max.Equal(max) {
				t.Errorf("mismatched time range: got=(%s, %s) exp=(%s, %s)", min, max, expected.min, expected.max)
			}
			done <- struct{}{}
			ctx.Results <- &influxql.Result{}
			return nil
		},
	}

	s.Open()
	defer s.Close()

	// Set the 'now' time to the start of a 10 minute interval. Then trigger a run.
	// This should trigger two queries (one for the current time interval, one for the previous).
	now := time.Now().UTC().Truncate(10 * time.Minute)
	expected.min = now.Add(-2 * time.Minute)
	expected.max = now.Add(-1)
	s.RunCh <- &RunRequest{Now: now}

	if err := wait(done, 100*time.Millisecond); err != nil {
		t.Fatal(err)
	}

	// Trigger another run 10 seconds later. Another two queries should happen,
	// but it will be a different two queries.
	expected.min = expected.min.Add(time.Minute)
	expected.max = expected.max.Add(time.Minute)
	s.RunCh <- &RunRequest{Now: now.Add(10 * time.Second)}

	if err := wait(done, 100*time.Millisecond); err != nil {
		t.Fatal(err)
	}

	// Reset the time period and send the initial request at 5 seconds after the
	// 10 minute mark. There should be exactly one call since the current interval is too
	// young and only one interval matches the FOR duration.
	expected.min = now.Add(-time.Minute)
	expected.max = now.Add(-1)
	s.Run("", "", now.Add(5*time.Second))

	if err := wait(done, 100*time.Millisecond); err != nil {
		t.Fatal(err)
	}

	// Send a message 10 minutes later and ensure that the system plays catchup.
	expected.max = now.Add(10*time.Minute - 1)
	s.RunCh <- &RunRequest{Now: now.Add(10 * time.Minute)}

	if err := wait(done, 100*time.Millisecond); err != nil {
		t.Fatal(err)
	}

	// No overflow should be sent.
	if err := wait(done, 100*time.Millisecond); err == nil {
		t.Error("too many queries executed")
	}
}
Example #11
0
func TestContinuousQueryService_EveryHigherThanInterval(t *testing.T) {
	s := NewTestService(t)
	ms := NewMetaClient(t)
	ms.CreateDatabase("db", "")
	ms.CreateContinuousQuery("db", "cq", `CREATE CONTINUOUS QUERY cq ON db RESAMPLE EVERY 1m BEGIN SELECT mean(value) INTO cpu_mean FROM cpu GROUP BY time(30s) END`)
	s.MetaClient = ms

	// Set RunInterval high so we can trigger using Run method.
	s.RunInterval = 10 * time.Minute

	done := make(chan struct{})
	var expected struct {
		min time.Time
		max time.Time
	}

	// Set a callback for ExecuteQuery.
	s.QueryExecutor.StatementExecutor = &StatementExecutor{
		ExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {
			s := stmt.(*influxql.SelectStatement)
			min, max, err := influxql.TimeRange(s.Condition)
			if err != nil {
				t.Errorf("unexpected error parsing time range: %s", err)
			} else if !expected.min.Equal(min) || !expected.max.Equal(max) {
				t.Errorf("mismatched time range: got=(%s, %s) exp=(%s, %s)", min, max, expected.min, expected.max)
			}
			done <- struct{}{}
			ctx.Results <- &influxql.Result{}
			return nil
		},
	}

	s.Open()
	defer s.Close()

	// Set the 'now' time to the start of a 10 minute interval. Then trigger a run.
	// This should trigger two queries (one for the current time interval, one for the previous)
	// since the default FOR interval should be EVERY, not the GROUP BY interval.
	now := time.Now().Truncate(10 * time.Minute)
	expected.min = now.Add(-time.Minute)
	expected.max = now.Add(-1)
	s.RunCh <- &RunRequest{Now: now}

	if err := wait(done, 100*time.Millisecond); err != nil {
		t.Fatal(err)
	}

	// Trigger 30 seconds later. Nothing should run.
	s.RunCh <- &RunRequest{Now: now.Add(30 * time.Second)}

	if err := wait(done, 100*time.Millisecond); err == nil {
		t.Fatal("too many queries")
	}

	// Run again 1 minute later. Another two queries should run.
	expected.min = now
	expected.max = now.Add(time.Minute - 1)
	s.RunCh <- &RunRequest{Now: now.Add(time.Minute)}

	if err := wait(done, 100*time.Millisecond); err != nil {
		t.Fatal(err)
	}

	// No overflow should be sent.
	if err := wait(done, 100*time.Millisecond); err == nil {
		t.Error("too many queries executed")
	}
}