func expandExprWithValues(expr influxql.Expr, keys []string, tagExprs []tagExpr, uniques [][]string, index int) []tagSetExpr { // If we have no more keys left then execute the reduction and return. if index == len(keys) { // Create a map of tag key/values. m := make(map[string]*string, len(keys)) for i, key := range keys { if tagExprs[i].op == influxql.EQ { m[key] = &tagExprs[i].values[0] } else { m[key] = nil } } // TODO: Rewrite full expressions instead of VarRef replacement. // Reduce using the current tag key/value set. // Ignore it if reduces down to "false". e := influxql.Reduce(expr, &tagValuer{tags: m}) if e, ok := e.(*influxql.BooleanLiteral); ok && e.Val == false { return nil } return []tagSetExpr{{values: copyTagExprs(tagExprs), expr: e}} } // Otherwise expand for each possible equality value of the key. var exprs []tagSetExpr for _, v := range uniques[index] { exprs = append(exprs, expandExprWithValues(expr, keys, append(tagExprs, tagExpr{keys[index], []string{v}, influxql.EQ}), uniques, index+1)...) } exprs = append(exprs, expandExprWithValues(expr, keys, append(tagExprs, tagExpr{keys[index], uniques[index], influxql.NEQ}), uniques, index+1)...) return exprs }
// PlanSelect creates an execution plan for the given SelectStatement and returns an Executor. func (q *QueryExecutor) PlanSelect(stmt *influxql.SelectStatement, chunkSize int) (Executor, error) { // It is important to "stamp" this time so that everywhere we evaluate `now()` in the statement is EXACTLY the same `now` now := time.Now().UTC() opt := influxql.SelectOptions{} // Replace instances of "now()" with the current time, and check the resultant times. stmt.Condition = influxql.Reduce(stmt.Condition, &influxql.NowValuer{Now: now}) opt.MinTime, opt.MaxTime = influxql.TimeRange(stmt.Condition) if opt.MaxTime.IsZero() { opt.MaxTime = now } if opt.MinTime.IsZero() { opt.MinTime = time.Unix(0, 0) } // Expand regex sources to their actual source names. sources, err := q.Store.ExpandSources(stmt.Sources) if err != nil { return nil, err } stmt.Sources = sources // Convert DISTINCT into a call. stmt.RewriteDistinct() // Remove "time" from fields list. stmt.RewriteTimeFields() // Filter only shards that contain date range. shardIDs, err := q.MetaClient.ShardIDsByTimeRange(stmt.Sources, opt.MinTime, opt.MaxTime) if err != nil { return nil, err } shards := q.Store.Shards(shardIDs) // Rewrite wildcards, if any exist. tmp, err := stmt.RewriteWildcards(Shards(shards)) if err != nil { return nil, err } stmt = tmp // Create a set of iterators from a selection. itrs, err := influxql.Select(stmt, Shards(shards), &opt) if err != nil { return nil, err } // Generate a row emitter from the iterator set. em := influxql.NewEmitter(itrs, stmt.TimeAscending()) em.Columns = stmt.ColumnNames() em.OmitTime = stmt.OmitTime // Wrap emitter in an adapter to conform to the Executor interface. return (*emitterExecutor)(em), nil }
func (e *StatementExecutor) executeDeleteSeriesStatement(stmt *influxql.DeleteSeriesStatement, database string) error { if dbi := e.MetaClient.Database(database); dbi == nil { return influxql.ErrDatabaseNotFound(database) } // Convert "now()" to current time. stmt.Condition = influxql.Reduce(stmt.Condition, &influxql.NowValuer{Now: time.Now().UTC()}) // Locally delete the series. return e.TSDBStore.DeleteSeries(database, stmt.Sources, stmt.Condition) }
// Ensure an expression can be reduced. func TestReduce(t *testing.T) { now := mustParseTime("2000-01-01T00:00:00Z") for i, tt := range []struct { in string out string data Valuer }{ // Number literals. {in: `1 + 2`, out: `3`}, {in: `(foo*2) + ( (4/2) + (3 * 5) - 0.5 )`, out: `(foo * 2) + 16.500`}, {in: `foo(bar(2 + 3), 4)`, out: `foo(bar(5), 4)`}, {in: `4 / 0`, out: `0.000`}, {in: `1 / 2`, out: `0.500`}, {in: `4 = 4`, out: `true`}, {in: `4 <> 4`, out: `false`}, {in: `6 > 4`, out: `true`}, {in: `4 >= 4`, out: `true`}, {in: `4 < 6`, out: `true`}, {in: `4 <= 4`, out: `true`}, {in: `4 AND 5`, out: `4 AND 5`}, // Boolean literals. {in: `true AND false`, out: `false`}, {in: `true OR false`, out: `true`}, {in: `true OR (foo = bar AND 1 > 2)`, out: `true`}, {in: `(foo = bar AND 1 > 2) OR true`, out: `true`}, {in: `false OR (foo = bar AND 1 > 2)`, out: `false`}, {in: `(foo = bar AND 1 > 2) OR false`, out: `false`}, {in: `true = false`, out: `false`}, {in: `true <> false`, out: `true`}, {in: `true + false`, out: `true + false`}, // Time literals with now(). {in: `now() + 2h`, out: `'2000-01-01T02:00:00Z'`, data: map[string]interface{}{"now()": now}}, {in: `now() / 2h`, out: `'2000-01-01T00:00:00Z' / 2h`, data: map[string]interface{}{"now()": now}}, {in: `4µ + now()`, out: `'2000-01-01T00:00:00.000004Z'`, data: map[string]interface{}{"now()": now}}, {in: `now() + 2000000000`, out: `'2000-01-01T00:00:02Z'`, data: map[string]interface{}{"now()": now}}, {in: `2000000000 + now()`, out: `'2000-01-01T00:00:02Z'`, data: map[string]interface{}{"now()": now}}, {in: `now() - 2000000000`, out: `'1999-12-31T23:59:58Z'`, data: map[string]interface{}{"now()": now}}, {in: `now() = now()`, out: `true`, data: map[string]interface{}{"now()": now}}, {in: `now() <> now()`, out: `false`, data: map[string]interface{}{"now()": now}}, {in: `now() < now() + 1h`, out: `true`, data: map[string]interface{}{"now()": now}}, {in: `now() <= now() + 1h`, out: `true`, data: map[string]interface{}{"now()": now}}, {in: `now() >= now() - 1h`, out: `true`, data: map[string]interface{}{"now()": now}}, {in: `now() > now() - 1h`, out: `true`, data: map[string]interface{}{"now()": now}}, {in: `now() - (now() - 60s)`, out: `1m`, data: map[string]interface{}{"now()": now}}, {in: `now() AND now()`, out: `'2000-01-01T00:00:00Z' AND '2000-01-01T00:00:00Z'`, data: map[string]interface{}{"now()": now}}, {in: `now()`, out: `now()`}, {in: `946684800000000000 + 2h`, out: `'2000-01-01T02:00:00Z'`}, // Time literals. {in: `'2000-01-01T00:00:00Z' + 2h`, out: `'2000-01-01T02:00:00Z'`}, {in: `'2000-01-01T00:00:00Z' / 2h`, out: `'2000-01-01T00:00:00Z' / 2h`}, {in: `4µ + '2000-01-01T00:00:00Z'`, out: `'2000-01-01T00:00:00.000004Z'`}, {in: `'2000-01-01T00:00:00Z' + 2000000000`, out: `'2000-01-01T00:00:02Z'`}, {in: `2000000000 + '2000-01-01T00:00:00Z'`, out: `'2000-01-01T00:00:02Z'`}, {in: `'2000-01-01T00:00:00Z' - 2000000000`, out: `'1999-12-31T23:59:58Z'`}, {in: `'2000-01-01T00:00:00Z' = '2000-01-01T00:00:00Z'`, out: `true`}, {in: `'2000-01-01T00:00:00.000000000Z' = '2000-01-01T00:00:00Z'`, out: `true`}, {in: `'2000-01-01T00:00:00Z' <> '2000-01-01T00:00:00Z'`, out: `false`}, {in: `'2000-01-01T00:00:00.000000000Z' <> '2000-01-01T00:00:00Z'`, out: `false`}, {in: `'2000-01-01T00:00:00Z' < '2000-01-01T00:00:00Z' + 1h`, out: `true`}, {in: `'2000-01-01T00:00:00.000000000Z' < '2000-01-01T00:00:00Z' + 1h`, out: `true`}, {in: `'2000-01-01T00:00:00Z' <= '2000-01-01T00:00:00Z' + 1h`, out: `true`}, {in: `'2000-01-01T00:00:00.000000000Z' <= '2000-01-01T00:00:00Z' + 1h`, out: `true`}, {in: `'2000-01-01T00:00:00Z' > '2000-01-01T00:00:00Z' - 1h`, out: `true`}, {in: `'2000-01-01T00:00:00.000000000Z' > '2000-01-01T00:00:00Z' - 1h`, out: `true`}, {in: `'2000-01-01T00:00:00Z' >= '2000-01-01T00:00:00Z' - 1h`, out: `true`}, {in: `'2000-01-01T00:00:00.000000000Z' >= '2000-01-01T00:00:00Z' - 1h`, out: `true`}, {in: `'2000-01-01T00:00:00Z' - ('2000-01-01T00:00:00Z' - 60s)`, out: `1m`}, {in: `'2000-01-01T00:00:00Z' AND '2000-01-01T00:00:00Z'`, out: `'2000-01-01T00:00:00Z' AND '2000-01-01T00:00:00Z'`}, // Duration literals. {in: `10m + 1h - 60s`, out: `69m`}, {in: `(10m / 2) * 5`, out: `25m`}, {in: `60s = 1m`, out: `true`}, {in: `60s <> 1m`, out: `false`}, {in: `60s < 1h`, out: `true`}, {in: `60s <= 1h`, out: `true`}, {in: `60s > 12s`, out: `true`}, {in: `60s >= 1m`, out: `true`}, {in: `60s AND 1m`, out: `1m AND 1m`}, {in: `60m / 0`, out: `0s`}, {in: `60m + 50`, out: `1h + 50`}, // String literals. {in: `'foo' + 'bar'`, out: `'foobar'`}, // Variable references. {in: `foo`, out: `'bar'`, data: map[string]interface{}{"foo": "bar"}}, {in: `foo = 'bar'`, out: `true`, data: map[string]interface{}{"foo": "bar"}}, {in: `foo = 'bar'`, out: `false`, data: map[string]interface{}{"foo": nil}}, {in: `foo <> 'bar'`, out: `false`, data: map[string]interface{}{"foo": nil}}, } { // Fold expression. expr := influxql.Reduce(MustParseExpr(tt.in), tt.data) // Compare with expected output. if out := expr.String(); tt.out != out { t.Errorf("%d. %s: unexpected expr:\n\nexp=%s\n\ngot=%s\n\n", i, tt.in, tt.out, out) continue } } }
// mergeSeriesFilters merges two sets of filter expressions and culls series IDs. func mergeSeriesFilters(op influxql.Token, ids SeriesIDs, lfilters, rfilters FilterExprs) (SeriesIDs, FilterExprs) { // Create a map to hold the final set of series filter expressions. filters := make(map[uint64]influxql.Expr, 0) // Resulting list of series IDs var series SeriesIDs // Combining logic: // +==========+==========+==========+=======================+=======================+ // | operator | LHS | RHS | intermediate expr | reduced filter | // +==========+==========+==========+=======================+=======================+ // | | <nil> | <r-expr> | false OR <r-expr> | <r-expr> | // | |----------+----------+-----------------------+-----------------------+ // | OR | <l-expr> | <nil> | <l-expr> OR false | <l-expr> | // | |----------+----------+-----------------------+-----------------------+ // | | <nil> | <nil> | false OR false | false | // | |----------+----------+-----------------------+-----------------------+ // | | <l-expr> | <r-expr> | <l-expr> OR <r-expr> | <l-expr> OR <r-expr> | // +----------+----------+----------+-----------------------+-----------------------+ // | | <nil> | <r-expr> | false AND <r-expr> | false* | // | |----------+----------+-----------------------+-----------------------+ // | AND | <l-expr> | <nil> | <l-expr> AND false | false | // | |----------+----------+-----------------------+-----------------------+ // | | <nil> | <nil> | false AND false | false | // | |----------+----------+-----------------------+-----------------------+ // | | <l-expr> | <r-expr> | <l-expr> AND <r-expr> | <l-expr> AND <r-expr> | // +----------+----------+----------+-----------------------+-----------------------+ // *literal false filters and series IDs should be excluded from the results for _, id := range ids { // Get LHS and RHS filter expressions for this series ID. lfilter, rfilter := lfilters[id], rfilters[id] // Set filter to false if either LHS or RHS expressions were nil. if lfilter == nil { lfilter = &influxql.BooleanLiteral{Val: false} } if rfilter == nil { rfilter = &influxql.BooleanLiteral{Val: false} } // Create the intermediate filter expression for this series ID. be := &influxql.BinaryExpr{ Op: op, LHS: lfilter, RHS: rfilter, } // Reduce the intermediate expression. expr := influxql.Reduce(be, nil) // If the expression reduced to false, exclude this series ID and filter. if b, ok := expr.(*influxql.BooleanLiteral); ok && !b.Val { continue } // Store the series ID and merged filter in the final results. if expr != nil { filters[id] = expr } series = append(series, id) } return series, filters }
func (e *StatementExecutor) createIterators(stmt *influxql.SelectStatement, ctx *influxql.ExecutionContext) ([]influxql.Iterator, *influxql.SelectStatement, error) { // It is important to "stamp" this time so that everywhere we evaluate `now()` in the statement is EXACTLY the same `now` now := time.Now().UTC() opt := influxql.SelectOptions{ InterruptCh: ctx.InterruptCh, NodeID: ctx.ExecutionOptions.NodeID, MaxSeriesN: e.MaxSelectSeriesN, } // Replace instances of "now()" with the current time, and check the resultant times. nowValuer := influxql.NowValuer{Now: now} stmt.Condition = influxql.Reduce(stmt.Condition, &nowValuer) // Replace instances of "now()" with the current time in the dimensions. for _, d := range stmt.Dimensions { d.Expr = influxql.Reduce(d.Expr, &nowValuer) } var err error opt.MinTime, opt.MaxTime, err = influxql.TimeRange(stmt.Condition) if err != nil { return nil, stmt, err } if opt.MaxTime.IsZero() { // In the case that we're executing a meta query where the user cannot // specify a time condition, then we expand the default max time // to the maximum possible value, to ensure that data where all points // are in the future are returned. if influxql.Sources(stmt.Sources).HasSystemSource() { opt.MaxTime = time.Unix(0, influxql.MaxTime).UTC() } else { if interval, err := stmt.GroupByInterval(); err != nil { return nil, stmt, err } else if interval > 0 { opt.MaxTime = now } else { opt.MaxTime = time.Unix(0, influxql.MaxTime).UTC() } } } if opt.MinTime.IsZero() { opt.MinTime = time.Unix(0, influxql.MinTime).UTC() } // Convert DISTINCT into a call. stmt.RewriteDistinct() // Remove "time" from fields list. stmt.RewriteTimeFields() // Rewrite any regex conditions that could make use of the index. stmt.RewriteRegexConditions() // Create an iterator creator based on the shards in the cluster. ic, err := e.iteratorCreator(stmt, &opt) if err != nil { return nil, stmt, err } // Expand regex sources to their actual source names. if stmt.Sources.HasRegex() { sources, err := ic.ExpandSources(stmt.Sources) if err != nil { return nil, stmt, err } stmt.Sources = sources } // Rewrite wildcards, if any exist. tmp, err := stmt.RewriteFields(ic) if err != nil { return nil, stmt, err } stmt = tmp if e.MaxSelectBucketsN > 0 && !stmt.IsRawQuery { interval, err := stmt.GroupByInterval() if err != nil { return nil, stmt, err } if interval > 0 { // Determine the start and end time matched to the interval (may not match the actual times). min := opt.MinTime.Truncate(interval) max := opt.MaxTime.Truncate(interval).Add(interval) // Determine the number of buckets by finding the time span and dividing by the interval. buckets := int64(max.Sub(min)) / int64(interval) if int(buckets) > e.MaxSelectBucketsN { return nil, stmt, fmt.Errorf("max-select-buckets limit exceeded: (%d/%d)", buckets, e.MaxSelectBucketsN) } } } // Create a set of iterators from a selection. itrs, err := influxql.Select(stmt, ic, &opt) if err != nil { return nil, stmt, err } if e.MaxSelectPointN > 0 { monitor := influxql.PointLimitMonitor(itrs, influxql.DefaultStatsInterval, e.MaxSelectPointN) ctx.Query.Monitor(monitor) } return itrs, stmt, nil }
func (s *Store) TagValues(database string, cond influxql.Expr) ([]TagValues, error) { if cond == nil { return nil, errors.New("a condition is required") } dbi := s.DatabaseIndex(database) if dbi == nil { return nil, nil } measurementExpr := influxql.CloneExpr(cond) measurementExpr = influxql.Reduce(influxql.RewriteExpr(measurementExpr, func(e influxql.Expr) influxql.Expr { switch e := e.(type) { case *influxql.BinaryExpr: switch e.Op { case influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX: tag, ok := e.LHS.(*influxql.VarRef) if !ok || tag.Val != "_name" { return nil } } } return e }), nil) mms, ok, err := dbi.MeasurementsByExpr(measurementExpr) if err != nil { return nil, err } else if !ok { mms = dbi.Measurements() sort.Sort(mms) } // If there are no measurements, return immediately. if len(mms) == 0 { return nil, nil } filterExpr := influxql.CloneExpr(cond) filterExpr = influxql.Reduce(influxql.RewriteExpr(filterExpr, func(e influxql.Expr) influxql.Expr { switch e := e.(type) { case *influxql.BinaryExpr: switch e.Op { case influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX: tag, ok := e.LHS.(*influxql.VarRef) if !ok || strings.HasPrefix(tag.Val, "_") { return nil } } } return e }), nil) tagValues := make([]TagValues, len(mms)) for i, mm := range mms { tagValues[i].Measurement = mm.Name ids, err := mm.SeriesIDsAllOrByExpr(filterExpr) if err != nil { return nil, err } ss := mm.SeriesByIDSlice(ids) // Determine a list of keys from condition. keySet, ok, err := mm.TagKeysByExpr(cond) if err != nil { return nil, err } // Loop over all keys for each series. m := make(map[KeyValue]struct{}, len(ss)) for _, series := range ss { for key, value := range series.Tags { if !ok { // nop } else if _, exists := keySet[key]; !exists { continue } m[KeyValue{key, value}] = struct{}{} } } // Return an empty slice if there are no key/value matches. if len(m) == 0 { continue } // Sort key/value set. a := make([]KeyValue, 0, len(m)) for kv := range m { a = append(a, kv) } sort.Sort(KeyValues(a)) tagValues[i].Values = a } return tagValues, nil }
func (e *StatementExecutor) executeShowTagValues(stmt *influxql.SelectStatement, ctx *influxql.ExecutionContext, store LocalTSDBStore) error { if stmt.Condition == nil { return errors.New("a condition is required") } source := stmt.Sources[0].(*influxql.Measurement) index := store.DatabaseIndex(source.Database) if index == nil { ctx.Results <- &influxql.Result{StatementID: ctx.StatementID, Series: make([]*models.Row, 0)} return nil } measurementExpr := influxql.CloneExpr(stmt.Condition) measurementExpr = influxql.Reduce(influxql.RewriteExpr(measurementExpr, func(e influxql.Expr) influxql.Expr { switch e := e.(type) { case *influxql.BinaryExpr: switch e.Op { case influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX: tag, ok := e.LHS.(*influxql.VarRef) if !ok || tag.Val != "_name" { return nil } } } return e }), nil) mms, ok, err := index.MeasurementsByExpr(measurementExpr) if err != nil { return err } else if !ok { mms = index.Measurements() sort.Sort(mms) } // If there are no measurements, return immediately. if len(mms) == 0 { ctx.Results <- &influxql.Result{StatementID: ctx.StatementID, Series: make([]*models.Row, 0)} return nil } filterExpr := influxql.CloneExpr(stmt.Condition) filterExpr = influxql.Reduce(influxql.RewriteExpr(filterExpr, func(e influxql.Expr) influxql.Expr { switch e := e.(type) { case *influxql.BinaryExpr: switch e.Op { case influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX: tag, ok := e.LHS.(*influxql.VarRef) if !ok || strings.HasPrefix(tag.Val, "_") { return nil } } } return e }), nil) var emitted bool columns := stmt.ColumnNames() for _, mm := range mms { ids, err := mm.SeriesIDsAllOrByExpr(filterExpr) if err != nil { return err } ss := mm.SeriesByIDSlice(ids) // Determine a list of keys from condition. keySet, ok, err := mm.TagKeysByExpr(stmt.Condition) if err != nil { return err } // Loop over all keys for each series. m := make(map[keyValue]struct{}, len(ss)) for _, series := range ss { for key, value := range series.Tags { if !ok { // nop } else if _, exists := keySet[key]; !exists { continue } m[keyValue{key, value}] = struct{}{} } } // Move to next series if no key/values match. if len(m) == 0 { continue } // Sort key/value set. a := make([]keyValue, 0, len(m)) for kv := range m { a = append(a, kv) } sort.Sort(keyValues(a)) // Convert to result values. slab := make([]interface{}, len(a)*2) values := make([][]interface{}, len(a)) for i, elem := range a { slab[i*2], slab[i*2+1] = elem.key, elem.value values[i] = slab[i*2 : i*2+2] } // Send result to client. ctx.Results <- &influxql.Result{ StatementID: ctx.StatementID, Series: []*models.Row{&models.Row{ Name: mm.Name, Columns: columns, Values: values, }}, } emitted = true } // Always emit at least one row. if !emitted { ctx.Results <- &influxql.Result{StatementID: ctx.StatementID, Series: make([]*models.Row, 0)} } return nil }
func (e *StatementExecutor) executeSelectStatement(stmt *influxql.SelectStatement, ctx *influxql.ExecutionContext) error { // Handle SHOW TAG VALUES separately so it can be optimized. // https://github.com/influxdata/influxdb/issues/6233 if source, ok := stmt.Sources[0].(*influxql.Measurement); ok && source.Name == "_tags" { // Use the optimized version only if we have direct access to the database. if store, ok := e.TSDBStore.(LocalTSDBStore); ok { return e.executeShowTagValues(stmt, ctx, store) } } // It is important to "stamp" this time so that everywhere we evaluate `now()` in the statement is EXACTLY the same `now` now := time.Now().UTC() opt := influxql.SelectOptions{InterruptCh: ctx.InterruptCh} // Replace instances of "now()" with the current time, and check the resultant times. nowValuer := influxql.NowValuer{Now: now} stmt.Condition = influxql.Reduce(stmt.Condition, &nowValuer) // Replace instances of "now()" with the current time in the dimensions. for _, d := range stmt.Dimensions { d.Expr = influxql.Reduce(d.Expr, &nowValuer) } var err error opt.MinTime, opt.MaxTime, err = influxql.TimeRange(stmt.Condition) if err != nil { return err } if opt.MaxTime.IsZero() { // In the case that we're executing a meta query where the user cannot // specify a time condition, then we expand the default max time // to the maximum possible value, to ensure that data where all points // are in the future are returned. if influxql.Sources(stmt.Sources).HasSystemSource() { opt.MaxTime = time.Unix(0, influxql.MaxTime).UTC() } else { opt.MaxTime = now } } if opt.MinTime.IsZero() { opt.MinTime = time.Unix(0, 0) } // Convert DISTINCT into a call. stmt.RewriteDistinct() // Remove "time" from fields list. stmt.RewriteTimeFields() // Create an iterator creator based on the shards in the cluster. ic, err := e.iteratorCreator(stmt, &opt) if err != nil { return err } // Expand regex sources to their actual source names. if stmt.Sources.HasRegex() { sources, err := ic.ExpandSources(stmt.Sources) if err != nil { return err } stmt.Sources = sources } // Rewrite wildcards, if any exist. tmp, err := stmt.RewriteFields(ic) if err != nil { return err } stmt = tmp if e.MaxSelectBucketsN > 0 && !stmt.IsRawQuery { interval, err := stmt.GroupByInterval() if err != nil { return err } if interval > 0 { // Determine the start and end time matched to the interval (may not match the actual times). min := opt.MinTime.Truncate(interval) max := opt.MaxTime.Truncate(interval).Add(interval) // Determine the number of buckets by finding the time span and dividing by the interval. buckets := int64(max.Sub(min)) / int64(interval) if int(buckets) > e.MaxSelectBucketsN { return fmt.Errorf("max select bucket count exceeded: %d buckets", buckets) } } } // Create a set of iterators from a selection. itrs, err := influxql.Select(stmt, ic, &opt) if err != nil { return err } if e.MaxSelectPointN > 0 { monitor := influxql.PointLimitMonitor(itrs, influxql.DefaultStatsInterval, e.MaxSelectPointN) ctx.Query.Monitor(monitor) } // Generate a row emitter from the iterator set. em := influxql.NewEmitter(itrs, stmt.TimeAscending(), ctx.ChunkSize) em.Columns = stmt.ColumnNames() em.OmitTime = stmt.OmitTime defer em.Close() // Calculate initial stats across all iterators. stats := influxql.Iterators(itrs).Stats() if e.MaxSelectSeriesN > 0 && stats.SeriesN > e.MaxSelectSeriesN { return fmt.Errorf("max select series count exceeded: %d series", stats.SeriesN) } // Emit rows to the results channel. var writeN int64 var emitted bool var pointsWriter *BufferedPointsWriter if stmt.Target != nil { pointsWriter = NewBufferedPointsWriter(e.PointsWriter, stmt.Target.Measurement.Database, stmt.Target.Measurement.RetentionPolicy, 10000) } for { row, err := em.Emit() if err != nil { return err } else if row == nil { // Check if the query was interrupted while emitting. select { case <-ctx.InterruptCh: return influxql.ErrQueryInterrupted default: } break } // Write points back into system for INTO statements. if stmt.Target != nil { if err := e.writeInto(pointsWriter, stmt, row); err != nil { return err } writeN += int64(len(row.Values)) continue } result := &influxql.Result{ StatementID: ctx.StatementID, Series: []*models.Row{row}, } // Send results or exit if closing. select { case <-ctx.InterruptCh: return influxql.ErrQueryInterrupted case ctx.Results <- result: } emitted = true } // Flush remaing points and emit write count if an INTO statement. if stmt.Target != nil { if err := pointsWriter.Flush(); err != nil { return err } var messages []*influxql.Message if ctx.ReadOnly { messages = append(messages, influxql.ReadOnlyWarning(stmt.String())) } ctx.Results <- &influxql.Result{ StatementID: ctx.StatementID, Messages: messages, Series: []*models.Row{{ Name: "result", Columns: []string{"time", "written"}, Values: [][]interface{}{{time.Unix(0, 0).UTC(), writeN}}, }}, } return nil } // Always emit at least one result. if !emitted { ctx.Results <- &influxql.Result{ StatementID: ctx.StatementID, Series: make([]*models.Row, 0), } } return nil }
// NewTagValuesIterator returns a new instance of TagValuesIterator. func NewTagValuesIterator(sh *Shard, opt influxql.IteratorOptions) (influxql.Iterator, error) { if opt.Condition == nil { return nil, errors.New("a condition is required") } measurementExpr := influxql.CloneExpr(opt.Condition) measurementExpr = influxql.Reduce(influxql.RewriteExpr(measurementExpr, func(e influxql.Expr) influxql.Expr { switch e := e.(type) { case *influxql.BinaryExpr: switch e.Op { case influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX: tag, ok := e.LHS.(*influxql.VarRef) if !ok || tag.Val != "_name" { return nil } } } return e }), nil) mms, ok, err := sh.index.measurementsByExpr(measurementExpr) if err != nil { return nil, err } else if !ok { mms = sh.index.Measurements() sort.Sort(mms) } // If there are no measurements, return immediately. if len(mms) == 0 { return &tagValuesIterator{}, nil } filterExpr := influxql.CloneExpr(opt.Condition) filterExpr = influxql.Reduce(influxql.RewriteExpr(filterExpr, func(e influxql.Expr) influxql.Expr { switch e := e.(type) { case *influxql.BinaryExpr: switch e.Op { case influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX: tag, ok := e.LHS.(*influxql.VarRef) if !ok || strings.HasPrefix(tag.Val, "_") { return nil } } } return e }), nil) var series []*Series keys := newStringSet() for _, mm := range mms { ss, ok, err := mm.TagKeysByExpr(opt.Condition) if err != nil { return nil, err } else if !ok { keys.add(mm.TagKeys()...) } else { keys = keys.union(ss) } ids, err := mm.seriesIDsAllOrByExpr(filterExpr) if err != nil { return nil, err } for _, id := range ids { series = append(series, mm.SeriesByID(id)) } } return &tagValuesIterator{ series: series, keys: keys.list(), fields: influxql.VarRefs(opt.Aux).Strings(), }, nil }
func (e *StatementExecutor) executeSelectStatement(stmt *influxql.SelectStatement, ctx *influxql.ExecutionContext) error { // It is important to "stamp" this time so that everywhere we evaluate `now()` in the statement is EXACTLY the same `now` now := time.Now().UTC() opt := influxql.SelectOptions{InterruptCh: ctx.InterruptCh} // Replace instances of "now()" with the current time, and check the resultant times. stmt.Condition = influxql.Reduce(stmt.Condition, &influxql.NowValuer{Now: now}) var err error opt.MinTime, opt.MaxTime, err = influxql.TimeRange(stmt.Condition) if err != nil { return err } if opt.MaxTime.IsZero() { opt.MaxTime = now } if opt.MinTime.IsZero() { opt.MinTime = time.Unix(0, 0) } // Convert DISTINCT into a call. stmt.RewriteDistinct() // Remove "time" from fields list. stmt.RewriteTimeFields() // Create an iterator creator based on the shards in the cluster. ic, err := e.iteratorCreator(stmt, &opt) if err != nil { return err } // Expand regex sources to their actual source names. if stmt.Sources.HasRegex() { sources, err := ic.ExpandSources(stmt.Sources) if err != nil { return err } stmt.Sources = sources } // Rewrite wildcards, if any exist. tmp, err := stmt.RewriteWildcards(ic) if err != nil { return err } stmt = tmp if e.MaxSelectBucketsN > 0 && !stmt.IsRawQuery { interval, err := stmt.GroupByInterval() if err != nil { return err } if interval > 0 { // Determine the start and end time matched to the interval (may not match the actual times). min := opt.MinTime.Truncate(interval) max := opt.MaxTime.Truncate(interval).Add(interval) // Determine the number of buckets by finding the time span and dividing by the interval. buckets := int64(max.Sub(min)) / int64(interval) if int(buckets) > e.MaxSelectBucketsN { return fmt.Errorf("max select bucket count exceeded: %d buckets", buckets) } } } // Create a set of iterators from a selection. itrs, err := influxql.Select(stmt, ic, &opt) if err != nil { return err } if e.MaxSelectPointN > 0 { monitor := influxql.PointLimitMonitor(itrs, influxql.DefaultStatsInterval, e.MaxSelectPointN) ctx.Query.Monitor(monitor) } // Generate a row emitter from the iterator set. em := influxql.NewEmitter(itrs, stmt.TimeAscending(), ctx.ChunkSize) em.Columns = stmt.ColumnNames() em.OmitTime = stmt.OmitTime defer em.Close() // Calculate initial stats across all iterators. stats := influxql.Iterators(itrs).Stats() if e.MaxSelectSeriesN > 0 && stats.SeriesN > e.MaxSelectSeriesN { return fmt.Errorf("max select series count exceeded: %d series", stats.SeriesN) } // Emit rows to the results channel. var writeN int64 var emitted bool for { row := em.Emit() if row == nil { // Check if the query was interrupted while emitting. select { case <-ctx.InterruptCh: return influxql.ErrQueryInterrupted default: } break } result := &influxql.Result{ StatementID: ctx.StatementID, Series: []*models.Row{row}, } // Write points back into system for INTO statements. if stmt.Target != nil { if err := e.writeInto(stmt, row); err != nil { return err } writeN += int64(len(row.Values)) continue } // Send results or exit if closing. select { case <-ctx.InterruptCh: return influxql.ErrQueryInterrupted case ctx.Results <- result: } emitted = true } // Emit write count if an INTO statement. if stmt.Target != nil { ctx.Results <- &influxql.Result{ StatementID: ctx.StatementID, Series: []*models.Row{{ Name: "result", Columns: []string{"time", "written"}, Values: [][]interface{}{{time.Unix(0, 0).UTC(), writeN}}, }}, } return nil } // Always emit at least one result. if !emitted { ctx.Results <- &influxql.Result{ StatementID: ctx.StatementID, Series: make([]*models.Row, 0), } } return nil }
func (e *QueryExecutor) executeSelectStatement(stmt *influxql.SelectStatement, chunkSize, statementID int, results chan *influxql.Result, closing <-chan struct{}) error { // It is important to "stamp" this time so that everywhere we evaluate `now()` in the statement is EXACTLY the same `now` now := time.Now().UTC() opt := influxql.SelectOptions{} // Replace instances of "now()" with the current time, and check the resultant times. stmt.Condition = influxql.Reduce(stmt.Condition, &influxql.NowValuer{Now: now}) opt.MinTime, opt.MaxTime = influxql.TimeRange(stmt.Condition) if opt.MaxTime.IsZero() { opt.MaxTime = now } if opt.MinTime.IsZero() { opt.MinTime = time.Unix(0, 0) } // Convert DISTINCT into a call. stmt.RewriteDistinct() // Remove "time" from fields list. stmt.RewriteTimeFields() // Create an iterator creator based on the shards in the cluster. ic, err := e.iteratorCreator(stmt, &opt) if err != nil { return err } // Expand regex sources to their actual source names. if stmt.Sources.HasRegex() { sources, err := ic.ExpandSources(stmt.Sources) if err != nil { return err } stmt.Sources = sources } // Rewrite wildcards, if any exist. tmp, err := stmt.RewriteWildcards(ic) if err != nil { return err } stmt = tmp // Create a set of iterators from a selection. itrs, err := influxql.Select(stmt, ic, &opt) if err != nil { return err } // Generate a row emitter from the iterator set. em := influxql.NewEmitter(itrs, stmt.TimeAscending()) em.Columns = stmt.ColumnNames() em.OmitTime = stmt.OmitTime defer em.Close() // Emit rows to the results channel. var writeN int64 var emitted bool for { row := em.Emit() if row == nil { break } result := &influxql.Result{ StatementID: statementID, Series: []*models.Row{row}, } // Write points back into system for INTO statements. if stmt.Target != nil { if err := e.writeInto(stmt, row); err != nil { return err } writeN += int64(len(row.Values)) continue } // Send results or exit if closing. select { case <-closing: return nil case results <- result: } emitted = true } // Emit write count if an INTO statement. if stmt.Target != nil { results <- &influxql.Result{ StatementID: statementID, Series: []*models.Row{{ Name: "result", Columns: []string{"time", "written"}, Values: [][]interface{}{{time.Unix(0, 0).UTC(), writeN}}, }}, } return nil } // Always emit at least one result. if !emitted { results <- &influxql.Result{ StatementID: statementID, Series: make([]*models.Row, 0), } } return nil }