func (s *renderNode) initWhere(where *parser.Where) (*filterNode, error) { if where == nil { return nil, nil } f := &filterNode{p: s.planner, source: s.source} f.ivarHelper = parser.MakeIndexedVarHelper(f, len(s.sourceInfo[0].sourceColumns)) var err error f.filter, err = s.planner.analyzeExpr(where.Expr, s.sourceInfo, f.ivarHelper, parser.TypeBool, true, "WHERE") if err != nil { return nil, err } // Make sure there are no aggregation/window functions in the filter // (after subqueries have been expanded). if err := s.planner.parser.AssertNoAggregationOrWindowing( f.filter, "WHERE", s.planner.session.SearchPath, ); err != nil { return nil, err } // Insert the newly created filterNode between the renderNode and // its original FROM source. f.source = s.source s.source.plan = f return f, nil }
// Initializes the column structures. func (n *scanNode) initDescDefaults(scanVisibility scanVisibility) { n.scanVisibility = scanVisibility n.index = &n.desc.PrimaryIndex n.cols = make([]sqlbase.ColumnDescriptor, 0, len(n.desc.Columns)+len(n.desc.Mutations)) switch scanVisibility { case publicColumns: n.cols = append(n.cols, n.desc.Columns...) case publicAndNonPublicColumns: n.cols = append(n.cols, n.desc.Columns...) for _, mutation := range n.desc.Mutations { if c := mutation.GetColumn(); c != nil { col := *c // Even if the column is non-nullable it can be null in the // middle of a schema change. col.Nullable = true n.cols = append(n.cols, col) } } } n.resultColumns = makeResultColumns(n.cols) n.colIdxMap = make(map[sqlbase.ColumnID]int, len(n.cols)) for i, c := range n.cols { n.colIdxMap[c.ID] = i } n.valNeededForCol = make([]bool, len(n.cols)) for i := range n.cols { n.valNeededForCol[i] = true } n.row = make([]parser.Datum, len(n.cols)) n.filterVars = parser.MakeIndexedVarHelper(n, len(n.cols)) }
func (c *checkHelper) init( p *planner, tn *parser.TableName, tableDesc *sqlbase.TableDescriptor, ) error { if len(tableDesc.Checks) == 0 { return nil } c.cols = tableDesc.Columns c.sourceInfo = newSourceInfoForSingleTable(*tn, makeResultColumns(tableDesc.Columns)) c.exprs = make([]parser.TypedExpr, len(tableDesc.Checks)) exprStrings := make([]string, len(tableDesc.Checks)) for i, check := range tableDesc.Checks { exprStrings[i] = check.Expr } exprs, err := parser.ParseExprsTraditional(exprStrings) if err != nil { return err } ivarHelper := parser.MakeIndexedVarHelper(c, len(c.cols)) for i, raw := range exprs { typedExpr, err := p.analyzeExpr(raw, multiSourceInfo{c.sourceInfo}, ivarHelper, parser.TypeBool, false, "") if err != nil { return err } c.exprs[i] = typedExpr } c.ivars = ivarHelper.GetIndexedVars() c.curSourceRow = make(parser.DTuple, len(c.cols)) return nil }
// propagateOrWrapFilters triggers filter propagation on the given // node, and creates a new filterNode if there is any remaining filter // after the propagation. func (p *planner) propagateOrWrapFilters( plan planNode, info *dataSourceInfo, filter parser.TypedExpr, ) (planNode, error) { newPlan, remainingFilter, err := p.propagateFilters(plan, info, filter) if err != nil { return plan, err } // If there is no remaining filter, simply return the new plan. if isFilterTrue(remainingFilter) { return newPlan, nil } // Otherwise, wrap it using a new filterNode. if info == nil { info = newSourceInfoForSingleTable(anonymousTable, newPlan.Columns()) } f := &filterNode{ p: p, source: planDataSource{plan: newPlan, info: info}, } f.ivarHelper = parser.MakeIndexedVarHelper(f, len(info.sourceColumns)) f.filter = f.ivarHelper.Rebind(remainingFilter, false /* helper is fresh, no reset needed */, false) return f, nil }
// newReturningHelper creates a new returningHelper for use by an // insert/update node. func (p *planner) newReturningHelper( r parser.ReturningExprs, desiredTypes []parser.Type, alias string, tablecols []sqlbase.ColumnDescriptor, ) (*returningHelper, error) { rh := &returningHelper{ p: p, } if len(r) == 0 { return rh, nil } for _, e := range r { if err := p.parser.AssertNoAggregationOrWindowing( e.Expr, "RETURNING", p.session.SearchPath, ); err != nil { return nil, err } } rh.columns = make(ResultColumns, 0, len(r)) aliasTableName := parser.TableName{TableName: parser.Name(alias)} rh.source = newSourceInfoForSingleTable(aliasTableName, makeResultColumns(tablecols)) rh.exprs = make([]parser.TypedExpr, 0, len(r)) ivarHelper := parser.MakeIndexedVarHelper(rh, len(tablecols)) for i, target := range r { // Pre-normalize VarNames at the top level so that checkRenderStar can see stars. if err := target.NormalizeTopLevelVarName(); err != nil { return nil, err } if isStar, cols, typedExprs, err := checkRenderStar(target, rh.source, ivarHelper); err != nil { return nil, err } else if isStar { rh.exprs = append(rh.exprs, typedExprs...) rh.columns = append(rh.columns, cols...) continue } // When generating an output column name it should exactly match the original // expression, so determine the output column name before we perform any // manipulations to the expression. outputName := getRenderColName(target) desired := parser.TypeAny if len(desiredTypes) > i { desired = desiredTypes[i] } typedExpr, err := rh.p.analyzeExpr(target.Expr, multiSourceInfo{rh.source}, ivarHelper, desired, false, "") if err != nil { return nil, err } rh.exprs = append(rh.exprs, typedExpr) rh.columns = append(rh.columns, ResultColumn{Name: outputName, Typ: typedExpr.ResolvedType()}) } return rh, nil }
func makeSelectNode(t *testing.T) *selectNode { desc := testTableDesc() sel := testInitDummySelectNode(desc) if err := desc.AllocateIDs(); err != nil { t.Fatal(err) } numColumns := len(sel.sourceInfo[0].sourceColumns) sel.ivarHelper = parser.MakeIndexedVarHelper(sel, numColumns) sel.curSourceRow = make(parser.DTuple, numColumns) return sel }
func (eh *exprHelper) init( expr Expression, types []sqlbase.ColumnType_Kind, evalCtx *parser.EvalContext, ) error { if expr.Expr == "" { return nil } eh.types = types eh.evalCtx = evalCtx eh.vars = parser.MakeIndexedVarHelper(eh, len(types)) var err error eh.expr, err = processExpression(expr, &eh.vars) return err }
func testInitDummySelectNode(desc *sqlbase.TableDescriptor) *renderNode { p := makeTestPlanner() scan := &scanNode{p: p} scan.desc = *desc scan.initDescDefaults(publicColumns) sel := &renderNode{planner: p} sel.source.plan = scan testName := parser.TableName{TableName: parser.Name(desc.Name), DatabaseName: parser.Name("test")} sel.source.info = newSourceInfoForSingleTable(testName, scan.Columns()) sel.sourceInfo = multiSourceInfo{sel.source.info} sel.ivarHelper = parser.MakeIndexedVarHelper(sel, len(scan.Columns())) return sel }
func newAggregator( ctx *FlowCtx, spec *AggregatorSpec, input RowSource, output RowReceiver, ) (*aggregator, error) { ag := &aggregator{ input: input, output: output, ctx: log.WithLogTag(ctx.Context, "Agg", nil), rows: &RowBuffer{}, buckets: make(map[string]struct{}), inputCols: make(columns, len(spec.Exprs)), outputTypes: make([]*sqlbase.ColumnType, len(spec.Exprs)), groupCols: make(columns, len(spec.GroupCols)), } inputTypes := make([]*sqlbase.ColumnType, len(spec.Exprs)) for i, expr := range spec.Exprs { ag.inputCols[i] = expr.ColIdx inputTypes[i] = spec.Types[expr.ColIdx] } copy(ag.groupCols, spec.GroupCols) // Loop over the select expressions and extract any aggregate functions -- // non-aggregation functions are replaced with parser.NewIdentAggregate, // (which just returns the last value added to them for a bucket) to provide // grouped-by values for each bucket. ag.funcs is updated to contain all // the functions which need to be fed values. eh := &exprHelper{types: inputTypes} eh.vars = parser.MakeIndexedVarHelper(eh, len(eh.types)) for i, expr := range spec.Exprs { fn, retType, err := ag.extractFunc(expr, eh) if err != nil { return nil, err } ag.funcs = append(ag.funcs, fn) // The aggregate function extracted is an identity function, the return // type of this therefore being the i-th input type. if retType == nil { ag.outputTypes[i] = inputTypes[i] } else { typ := sqlbase.DatumTypeToColumnType(retType) ag.outputTypes[i] = &typ } } return ag, nil }
func (eh *exprHelper) init( expr Expression, types []sqlbase.ColumnType_Kind, evalCtx *parser.EvalContext, ) error { if expr.Expr == "" { return nil } eh.types = types eh.evalCtx = evalCtx eh.vars = parser.MakeIndexedVarHelper(eh, len(types)) var err error eh.expr, err = processExpression(expr, &eh.vars) if err != nil { return err } var p parser.Parser if p.AggregateInExpr(eh.expr, evalCtx.SearchPath) { return errors.Errorf("expression '%s' has aggregate", eh.expr) } return nil }
func TestProcessExpression(t *testing.T) { defer leaktest.AfterTest(t)() e := Expression{Expr: "@1 * (@2 + @3) + @1"} h := parser.MakeIndexedVarHelper(testVarContainer{}, 4) expr, err := processExpression(e, &h) if err != nil { t.Fatal(err) } if !h.IndexedVarUsed(0) || !h.IndexedVarUsed(1) || !h.IndexedVarUsed(2) || h.IndexedVarUsed(3) { t.Errorf("invalid IndexedVarUsed results %t %t %t %t (expected false false false true)", h.IndexedVarUsed(0), h.IndexedVarUsed(1), h.IndexedVarUsed(2), h.IndexedVarUsed(3)) } str := expr.String() expectedStr := "(var0 * (var1 + var2)) + var0" if str != expectedStr { t.Errorf("invalid expression string '%s', expected '%s'", str, expectedStr) } }
// newReturningHelper creates a new returningHelper for use by an // insert/update node. func (p *planner) newReturningHelper( r parser.ReturningExprs, desiredTypes []parser.Type, alias string, tablecols []sqlbase.ColumnDescriptor, ) (*returningHelper, error) { rh := &returningHelper{ p: p, } if len(r) == 0 { return rh, nil } for _, e := range r { if err := p.parser.AssertNoAggregationOrWindowing( e.Expr, "RETURNING", p.session.SearchPath, ); err != nil { return nil, err } } rh.columns = make(ResultColumns, 0, len(r)) aliasTableName := parser.TableName{TableName: parser.Name(alias)} rh.source = newSourceInfoForSingleTable(aliasTableName, makeResultColumns(tablecols)) rh.exprs = make([]parser.TypedExpr, 0, len(r)) ivarHelper := parser.MakeIndexedVarHelper(rh, len(tablecols)) for _, target := range r { cols, typedExprs, _, err := p.computeRender(target, parser.TypeAny, rh.source, ivarHelper, true) if err != nil { return nil, err } rh.columns = append(rh.columns, cols...) rh.exprs = append(rh.exprs, typedExprs...) } return rh, nil }
// SelectClause selects rows from a single table. Select is the workhorse of the // SQL statements. In the slowest and most general case, select must perform // full table scans across multiple tables and sort and join the resulting rows // on arbitrary columns. Full table scans can be avoided when indexes can be // used to satisfy the where-clause. scanVisibility controls which columns are // visible to the select. // // NB: This is passed directly to planNode only when there is no ORDER BY, // LIMIT, or parenthesis in the parsed SELECT. See `sql/parser.Select` and // `sql/parser.SelectStatement`. // // Privileges: SELECT on table // Notes: postgres requires SELECT. Also requires UPDATE on "FOR UPDATE". // mysql requires SELECT. func (p *planner) SelectClause( parsed *parser.SelectClause, orderBy parser.OrderBy, limit *parser.Limit, desiredTypes []parser.Type, scanVisibility scanVisibility, ) (planNode, error) { s := &selectNode{planner: p} if err := s.initFrom(parsed, scanVisibility); err != nil { return nil, err } s.ivarHelper = parser.MakeIndexedVarHelper(s, len(s.sourceInfo[0].sourceColumns)) if err := s.initTargets(parsed.Exprs, desiredTypes); err != nil { return nil, err } if err := s.initWhere(parsed.Where); err != nil { return nil, err } // NB: orderBy, window, and groupBy are passed and can modify the selectNode, // but must do so in that order. sort, err := p.orderBy(orderBy, s) if err != nil { return nil, err } window, err := p.window(parsed, s) if err != nil { return nil, err } group, err := p.groupBy(parsed, s) if err != nil { return nil, err } if s.filter != nil && group != nil { // Allow the group-by to add an implicit "IS NOT NULL" filter. s.filter = group.isNotNullFilter(s.filter) } limitPlan, err := p.Limit(limit) if err != nil { return nil, err } distinctPlan := p.Distinct(parsed) result := &selectTopNode{ source: s, group: group, window: window, sort: sort, distinct: distinctPlan, limit: limitPlan, } s.top = result limitPlan.setTop(result) distinctPlan.setTop(result) return result, nil }
func (p *planner) makeUpsertHelper( tn *parser.TableName, tableDesc *sqlbase.TableDescriptor, insertCols []sqlbase.ColumnDescriptor, updateCols []sqlbase.ColumnDescriptor, updateExprs parser.UpdateExprs, upsertConflictIndex *sqlbase.IndexDescriptor, ) (*upsertHelper, error) { defaultExprs, err := makeDefaultExprs(updateCols, &p.parser, &p.evalCtx) if err != nil { return nil, err } untupledExprs := make(parser.Exprs, 0, len(updateExprs)) i := 0 for _, updateExpr := range updateExprs { if updateExpr.Tuple { if t, ok := updateExpr.Expr.(*parser.Tuple); ok { for _, e := range t.Exprs { typ := updateCols[i].Type.ToDatumType() e := fillDefault(e, typ, i, defaultExprs) untupledExprs = append(untupledExprs, e) i++ } } } else { typ := updateCols[i].Type.ToDatumType() e := fillDefault(updateExpr.Expr, typ, i, defaultExprs) untupledExprs = append(untupledExprs, e) i++ } } sourceInfo := newSourceInfoForSingleTable(*tn, makeResultColumns(tableDesc.Columns)) excludedSourceInfo := newSourceInfoForSingleTable(upsertExcludedTable, makeResultColumns(insertCols)) helper := &upsertHelper{ p: p, sourceInfo: sourceInfo, excludedSourceInfo: excludedSourceInfo, } var evalExprs []parser.TypedExpr ivarHelper := parser.MakeIndexedVarHelper(helper, len(sourceInfo.sourceColumns)+len(excludedSourceInfo.sourceColumns)) sources := multiSourceInfo{sourceInfo, excludedSourceInfo} for _, expr := range untupledExprs { normExpr, err := p.analyzeExpr(expr, sources, ivarHelper, parser.NoTypePreference, false, "") if err != nil { return nil, err } evalExprs = append(evalExprs, normExpr) } helper.evalExprs = evalExprs helper.allExprsIdentity = true for i, expr := range evalExprs { // analyzeExpr above has normalized all direct column names to ColumnItems. c, ok := expr.(*parser.ColumnItem) if !ok { helper.allExprsIdentity = false break } if len(c.Selector) > 0 || !c.TableName.TableName.Equal(upsertExcludedTable.TableName) || c.ColumnName.Normalize() != parser.ReNormalizeName(updateCols[i].Name) { helper.allExprsIdentity = false break } } return helper, nil }
// makeEqualityPredicate constructs a joinPredicate object for joins. The join // condition includes equality between numMergedEqualityColumns columns, // specified by leftColNames and rightColNames. func makeEqualityPredicate( left, right *dataSourceInfo, leftColNames, rightColNames parser.NameList, numMergedEqualityColumns int, concatInfos *dataSourceInfo, ) (resPred *joinPredicate, info *dataSourceInfo, err error) { if len(leftColNames) != len(rightColNames) { panic(fmt.Errorf("left columns' length %q doesn't match right columns' length %q in EqualityPredicate", len(leftColNames), len(rightColNames))) } if len(leftColNames) < numMergedEqualityColumns { panic(fmt.Errorf("cannot merge %d columns, only %d columns to compare", numMergedEqualityColumns, len(leftColNames))) } // Prepare the arrays populated below. cmpOps := make([]func(*parser.EvalContext, parser.Datum, parser.Datum) (parser.DBool, error), len(leftColNames)) leftEqualityIndices := make([]int, len(leftColNames)) rightEqualityIndices := make([]int, len(rightColNames)) // usedLeft represents the list of indices that participate in the // equality predicate. They are collected in order to determine // below which columns remain after the equality; this is used // only when merging result columns. var usedLeft, usedRight []int var columns ResultColumns if numMergedEqualityColumns > 0 { usedLeft = make([]int, len(left.sourceColumns)) for i := range usedLeft { usedLeft[i] = invalidColIdx } usedRight = make([]int, len(right.sourceColumns)) for i := range usedRight { usedRight[i] = invalidColIdx } nResultColumns := len(left.sourceColumns) + len(right.sourceColumns) - numMergedEqualityColumns columns = make(ResultColumns, 0, nResultColumns) } // Find out which columns are involved in EqualityPredicate. for i := range leftColNames { leftColName := leftColNames[i].Normalize() rightColName := rightColNames[i].Normalize() // Find the column name on the left. leftIdx, leftType, err := pickUsingColumn(left.sourceColumns, leftColName, "left") if err != nil { return nil, nil, err } // Find the column name on the right. rightIdx, rightType, err := pickUsingColumn(right.sourceColumns, rightColName, "right") if err != nil { return nil, nil, err } // Remember the indices. leftEqualityIndices[i] = leftIdx rightEqualityIndices[i] = rightIdx // Memoize the comparison function. fn, found := parser.FindEqualComparisonFunction(leftType, rightType) if !found { return nil, nil, fmt.Errorf("JOIN/USING types %s for left column %s and %s for right column %s cannot be matched", leftType, leftColName, rightType, rightColName) } cmpOps[i] = fn if i < numMergedEqualityColumns { usedLeft[leftIdx] = i usedRight[rightIdx] = i // Merged columns come first in the results. columns = append(columns, left.sourceColumns[leftIdx]) } } // Now, prepare/complete the metadata for the result columns. // The structure of the join data source results is like this: // - first, all the equality/USING columns; // - then all the left columns, // - then all the right columns, // The duplicate columns appended after the equality/USING columns // are hidden so that they are invisible to star expansion, but // not omitted so that they can still be selected separately. // Finish collecting the column definitions from the left and // right data sources. for i, c := range left.sourceColumns { if usedLeft != nil && usedLeft[i] != invalidColIdx { c.hidden = true } columns = append(columns, c) } for i, c := range right.sourceColumns { if usedRight != nil && usedRight[i] != invalidColIdx { c.hidden = true } columns = append(columns, c) } // Compute the mappings from table aliases to column sets from // both sides into a new alias-columnset mapping for the result // rows. We need to be extra careful about the aliases // for the anonymous table, which needs to be merged. aliases := make(sourceAliases, 0, len(left.sourceAliases)+len(right.sourceAliases)) collectAliases := func(sourceAliases sourceAliases, offset int) { for _, alias := range sourceAliases { if alias.name == anonymousTable { continue } newRange := make([]int, len(alias.columnRange)) for i, colIdx := range alias.columnRange { newRange[i] = colIdx + offset } aliases = append(aliases, sourceAlias{name: alias.name, columnRange: newRange}) } } collectAliases(left.sourceAliases, numMergedEqualityColumns) collectAliases(right.sourceAliases, numMergedEqualityColumns+len(left.sourceColumns)) anonymousAlias := sourceAlias{name: anonymousTable, columnRange: nil} var hiddenLeftNames, hiddenRightNames []string // All the merged columns at the beginning belong to the // anonymous data source. for i := 0; i < numMergedEqualityColumns; i++ { anonymousAlias.columnRange = append(anonymousAlias.columnRange, i) hiddenLeftNames = append(hiddenLeftNames, parser.ReNormalizeName(left.sourceColumns[i].Name)) hiddenRightNames = append(hiddenRightNames, parser.ReNormalizeName(right.sourceColumns[i].Name)) } // Now collect the other table-less columns into the anonymous data // source, but hide (skip) those that are already merged. collectAnonymousAliases := func( sourceAliases sourceAliases, hiddenNames []string, cols ResultColumns, offset int, ) { for _, alias := range sourceAliases { if alias.name != anonymousTable { continue } for _, colIdx := range alias.columnRange { isHidden := false for _, hiddenName := range hiddenNames { if parser.ReNormalizeName(cols[colIdx].Name) == hiddenName { isHidden = true break } } if !isHidden { anonymousAlias.columnRange = append(anonymousAlias.columnRange, colIdx+offset) } } } } collectAnonymousAliases(left.sourceAliases, hiddenLeftNames, left.sourceColumns, numMergedEqualityColumns) collectAnonymousAliases(right.sourceAliases, hiddenRightNames, right.sourceColumns, numMergedEqualityColumns+len(left.sourceColumns)) if anonymousAlias.columnRange != nil { aliases = append(aliases, anonymousAlias) } info = &dataSourceInfo{ sourceColumns: columns, sourceAliases: aliases, } pred := &joinPredicate{ numLeftCols: len(left.sourceColumns), numRightCols: len(right.sourceColumns), leftColNames: leftColNames, rightColNames: rightColNames, numMergedEqualityColumns: numMergedEqualityColumns, cmpFunctions: cmpOps, leftEqualityIndices: leftEqualityIndices, rightEqualityIndices: rightEqualityIndices, info: info, } // We must initialize the indexed var helper in all cases, even when // there is no on condition, so that getNeededColumns() does not get // confused. pred.iVarHelper = parser.MakeIndexedVarHelper(pred, len(columns)) return pred, info, nil }