func expandExprWithValues(expr influxql.Expr, keys []string, tagExprs []tagExpr, uniques [][]string, index int) []tagSetExpr { // If we have no more keys left then execute the reduction and return. if index == len(keys) { // Create a map of tag key/values. m := make(map[string]*string, len(keys)) for i, key := range keys { if tagExprs[i].op == influxql.EQ { m[key] = &tagExprs[i].values[0] } else { m[key] = nil } } // TODO: Rewrite full expressions instead of VarRef replacement. // Reduce using the current tag key/value set. // Ignore it if reduces down to "false". e := influxql.Reduce(expr, &tagValuer{tags: m}) if e, ok := e.(*influxql.BooleanLiteral); ok && e.Val == false { return nil } return []tagSetExpr{{values: copyTagExprs(tagExprs), expr: e}} } // Otherwise expand for each possible equality value of the key. var exprs []tagSetExpr for _, v := range uniques[index] { exprs = append(exprs, expandExprWithValues(expr, keys, append(tagExprs, tagExpr{keys[index], []string{v}, influxql.EQ}), uniques, index+1)...) } exprs = append(exprs, expandExprWithValues(expr, keys, append(tagExprs, tagExpr{keys[index], uniques[index], influxql.NEQ}), uniques, index+1)...) return exprs }
// Plan creates an execution plan for the given SelectStatement and returns an Executor. func (q *QueryExecutor) plan(stmt *influxql.SelectStatement, chunkSize int) (Executor, error) { shards := map[uint64]meta.ShardInfo{} // Shards requiring mappers. // Replace instances of "now()" with the current time, and check the resultant times. stmt.Condition = influxql.Reduce(stmt.Condition, &influxql.NowValuer{Now: time.Now().UTC()}) tmin, tmax := influxql.TimeRange(stmt.Condition) if tmax.IsZero() { tmax = time.Now() } if tmin.IsZero() { tmin = time.Unix(0, 0) } for _, src := range stmt.Sources { mm, ok := src.(*influxql.Measurement) if !ok { return nil, fmt.Errorf("invalid source type: %#v", src) } // Build the set of target shards. Using shard IDs as keys ensures each shard ID // occurs only once. shardGroups, err := q.MetaStore.ShardGroupsByTimeRange(mm.Database, mm.RetentionPolicy, tmin, tmax) if err != nil { return nil, err } for _, g := range shardGroups { for _, sh := range g.Shards { shards[sh.ID] = sh } } } // Build the Mappers, one per shard. mappers := []Mapper{} for _, sh := range shards { m, err := q.ShardMapper.CreateMapper(sh, stmt.String(), chunkSize) if err != nil { return nil, err } if m == nil { // No data for this shard, skip it. continue } mappers = append(mappers, m) } var executor Executor if len(mappers) > 0 { // All Mapper are of same type, so check first to determine correct Executor type. if _, ok := mappers[0].(*RawMapper); ok { executor = NewRawExecutor(stmt, mappers, chunkSize) } else { executor = NewAggregateExecutor(stmt, mappers) } } else { // With no mappers, the Executor type doesn't matter. executor = NewRawExecutor(stmt, nil, chunkSize) } return executor, nil }
// Plan creates an execution plan for the given SelectStatement and returns an Executor. func (q *QueryExecutor) PlanSelect(stmt *influxql.SelectStatement, chunkSize int) (Executor, error) { shards := map[uint64]meta.ShardInfo{} // Shards requiring mappers. // It is important to "stamp" this time so that everywhere we evaluate `now()` in the statement is EXACTLY the same `now` now := time.Now().UTC() // Replace instances of "now()" with the current time, and check the resultant times. stmt.Condition = influxql.Reduce(stmt.Condition, &influxql.NowValuer{Now: now}) tmin, tmax := influxql.TimeRange(stmt.Condition) if tmax.IsZero() { tmax = now } if tmin.IsZero() { tmin = time.Unix(0, 0) } for _, src := range stmt.Sources { mm, ok := src.(*influxql.Measurement) if !ok { return nil, fmt.Errorf("invalid source type: %#v", src) } // Build the set of target shards. Using shard IDs as keys ensures each shard ID // occurs only once. shardGroups, err := q.MetaStore.ShardGroupsByTimeRange(mm.Database, mm.RetentionPolicy, tmin, tmax) if err != nil { return nil, err } for _, g := range shardGroups { for _, sh := range g.Shards { shards[sh.ID] = sh } } } // Build the Mappers, one per shard. mappers := []Mapper{} for _, sh := range shards { m, err := q.ShardMapper.CreateMapper(sh, stmt, chunkSize) if err != nil { return nil, err } if m == nil { // No data for this shard, skip it. continue } mappers = append(mappers, m) } executor := NewSelectExecutor(stmt, mappers, chunkSize) return executor, nil }
// mergeSeriesFilters merges two sets of filter expressions and culls series IDs. func mergeSeriesFilters(op influxql.Token, ids SeriesIDs, lfilters, rfilters map[uint64]influxql.Expr) (SeriesIDs, map[uint64]influxql.Expr) { // Create a map to hold the final set of series filter expressions. filters := make(map[uint64]influxql.Expr, 0) // Resulting list of series IDs var series SeriesIDs // Combining logic: // +==========+==========+==========+=======================+=======================+ // | operator | LHS | RHS | intermediate expr | reduced filter | // +==========+==========+==========+=======================+=======================+ // | | <nil> | <r-expr> | true OR <r-expr> | true | // | |----------+----------+-----------------------+-----------------------+ // | OR | <l-expr> | <nil> | <l-expr> OR true | true | // | |----------+----------+-----------------------+-----------------------+ // | | <nil> | <nil> | true OR true | true | // | |----------+----------+-----------------------+-----------------------+ // | | <l-expr> | <r-expr> | <l-expr> OR <r-expr> | <l-expr> OR <r-expr> | // +----------+----------+----------+-----------------------+-----------------------+ // | | <nil> | <r-expr> | false AND <r-expr> | false* | // | |----------+----------+-----------------------+-----------------------+ // | AND | <l-expr> | <nil> | <l-expr> AND false | false | // | |----------+----------+-----------------------+-----------------------+ // | | <nil> | <nil> | false AND false | false | // | |----------+----------+-----------------------+-----------------------+ // | | <l-expr> | <r-expr> | <l-expr> AND <r-expr> | <l-expr> AND <r-expr> | // +----------+----------+----------+-----------------------+-----------------------+ // *literal false filters and series IDs should be excluded from the results def := false if op == influxql.OR { def = true } for _, id := range ids { // Get LHS and RHS filter expressions for this series ID. lfilter, rfilter := lfilters[id], rfilters[id] // Set default filters if either LHS or RHS expressions were nil. if lfilter == nil { lfilter = &influxql.BooleanLiteral{Val: def} } if rfilter == nil { rfilter = &influxql.BooleanLiteral{Val: def} } // Create the intermediate filter expression for this series ID. be := &influxql.BinaryExpr{ Op: op, LHS: lfilter, RHS: rfilter, } // Reduce the intermediate expression. expr := influxql.Reduce(be, nil) // If the expression reduced to false, exclude this series ID and filter. if b, ok := expr.(*influxql.BooleanLiteral); ok && !b.Val { continue } // Store the series ID and merged filter in the final results. filters[id] = expr series = append(series, id) } return series, filters }
// Plan creates an execution plan for the given SelectStatement and returns an Executor. func (q *QueryExecutor) PlanSelect(stmt *influxql.SelectStatement, chunkSize int) (Executor, error) { var shardIDs []uint64 shards := map[uint64]meta.ShardInfo{} // Shards requiring mappers. // It is important to "stamp" this time so that everywhere we evaluate `now()` in the statement is EXACTLY the same `now` now := time.Now().UTC() // Replace instances of "now()" with the current time, and check the resultant times. stmt.Condition = influxql.Reduce(stmt.Condition, &influxql.NowValuer{Now: now}) tmin, tmax := influxql.TimeRange(stmt.Condition) if tmax.IsZero() { tmax = now } if tmin.IsZero() { tmin = time.Unix(0, 0) } for _, src := range stmt.Sources { mm, ok := src.(*influxql.Measurement) if !ok { return nil, fmt.Errorf("invalid source type: %#v", src) } // Build the set of target shards. Using shard IDs as keys ensures each shard ID // occurs only once. shardGroups, err := q.MetaClient.ShardGroupsByTimeRange(mm.Database, mm.RetentionPolicy, tmin, tmax) if err != nil { return nil, err } for _, g := range shardGroups { for _, sh := range g.Shards { if _, ok := shards[sh.ID]; !ok { shards[sh.ID] = sh shardIDs = append(shardIDs, sh.ID) } } } } // Sort shard IDs to make testing deterministic. sort.Sort(uint64Slice(shardIDs)) // Build the Mappers, one per shard. mappers := []Mapper{} for _, shardID := range shardIDs { sh := shards[shardID] m, err := q.ShardMapper.CreateMapper(sh, stmt, chunkSize) if err != nil { return nil, err } if m == nil { // No data for this shard, skip it. continue } mappers = append(mappers, m) } // Certain operations on the SELECT statement can be performed by the AggregateExecutor without // assistance from the Mappers. This allows the AggregateExecutor to prepare aggregation functions // and mathematical functions. stmt.RewriteDistinct() if (stmt.IsRawQuery && !stmt.HasDistinct()) || stmt.IsSimpleDerivative() { return NewRawExecutor(stmt, mappers, chunkSize), nil } else { return NewAggregateExecutor(stmt, mappers), nil } }
// Ensure an expression can be reduced. func TestReduce(t *testing.T) { now := mustParseTime("2000-01-01T00:00:00Z") for i, tt := range []struct { in string out string data Valuer }{ // Number literals. {in: `1 + 2`, out: `3.000`}, {in: `(foo*2) + ( (4/2) + (3 * 5) - 0.5 )`, out: `(foo * 2.000) + 16.500`}, {in: `foo(bar(2 + 3), 4)`, out: `foo(bar(5.000), 4.000)`}, {in: `4 / 0`, out: `0.000`}, {in: `4 = 4`, out: `true`}, {in: `4 <> 4`, out: `false`}, {in: `6 > 4`, out: `true`}, {in: `4 >= 4`, out: `true`}, {in: `4 < 6`, out: `true`}, {in: `4 <= 4`, out: `true`}, {in: `4 AND 5`, out: `4.000 AND 5.000`}, // Boolean literals. {in: `true AND false`, out: `false`}, {in: `true OR false`, out: `true`}, {in: `true OR (foo = bar AND 1 > 2)`, out: `true`}, {in: `(foo = bar AND 1 > 2) OR true`, out: `true`}, {in: `false OR (foo = bar AND 1 > 2)`, out: `false`}, {in: `(foo = bar AND 1 > 2) OR false`, out: `false`}, {in: `true = false`, out: `false`}, {in: `true <> false`, out: `true`}, {in: `true + false`, out: `true + false`}, // Time literals. {in: `now() + 2h`, out: `'2000-01-01T02:00:00Z'`, data: map[string]interface{}{"now()": now}}, {in: `now() / 2h`, out: `'2000-01-01T00:00:00Z' / 2h`, data: map[string]interface{}{"now()": now}}, {in: `4µ + now()`, out: `'2000-01-01T00:00:00.000004Z'`, data: map[string]interface{}{"now()": now}}, {in: `now() = now()`, out: `true`, data: map[string]interface{}{"now()": now}}, {in: `now() <> now()`, out: `false`, data: map[string]interface{}{"now()": now}}, {in: `now() < now() + 1h`, out: `true`, data: map[string]interface{}{"now()": now}}, {in: `now() <= now() + 1h`, out: `true`, data: map[string]interface{}{"now()": now}}, {in: `now() >= now() - 1h`, out: `true`, data: map[string]interface{}{"now()": now}}, {in: `now() > now() - 1h`, out: `true`, data: map[string]interface{}{"now()": now}}, {in: `now() - (now() - 60s)`, out: `1m`, data: map[string]interface{}{"now()": now}}, {in: `now() AND now()`, out: `'2000-01-01T00:00:00Z' AND '2000-01-01T00:00:00Z'`, data: map[string]interface{}{"now()": now}}, {in: `now()`, out: `now()`}, // Duration literals. {in: `10m + 1h - 60s`, out: `69m`}, {in: `(10m / 2) * 5`, out: `25m`}, {in: `60s = 1m`, out: `true`}, {in: `60s <> 1m`, out: `false`}, {in: `60s < 1h`, out: `true`}, {in: `60s <= 1h`, out: `true`}, {in: `60s > 12s`, out: `true`}, {in: `60s >= 1m`, out: `true`}, {in: `60s AND 1m`, out: `1m AND 1m`}, {in: `60m / 0`, out: `0s`}, {in: `60m + 50`, out: `1h + 50.000`}, // String literals. {in: `'foo' + 'bar'`, out: `'foobar'`}, // Variable references. {in: `foo`, out: `'bar'`, data: map[string]interface{}{"foo": "bar"}}, {in: `foo = 'bar'`, out: `true`, data: map[string]interface{}{"foo": "bar"}}, {in: `foo = 'bar'`, out: `false`, data: map[string]interface{}{"foo": nil}}, {in: `foo <> 'bar'`, out: `false`, data: map[string]interface{}{"foo": nil}}, } { // Fold expression. expr := influxql.Reduce(MustParseExpr(tt.in), tt.data) // Compare with expected output. if out := expr.String(); tt.out != out { t.Errorf("%d. %s: unexpected expr:\n\nexp=%s\n\ngot=%s\n\n", i, tt.in, tt.out, out) continue } } }