// Run is part of the processor interface. func (ev *evaluator) Run(wg *sync.WaitGroup) { if wg != nil { defer wg.Done() } ctx, span := tracing.ChildSpan(ev.ctx, "evaluator") defer tracing.FinishSpan(span) if log.V(2) { log.Infof(ctx, "starting evaluator process") defer log.Infof(ctx, "exiting evaluator") } first := true for { row, err := ev.input.NextRow() if err != nil || row == nil { ev.output.Close(err) return } if first { first = false types := make([]*sqlbase.ColumnType, len(row)) for i := range types { types[i] = &row[i].Type } for i, expr := range ev.specExprs { err := ev.exprs[i].init(expr, types, ev.flowCtx.evalCtx) if err != nil { ev.output.Close(err) return } ev.exprTypes[i] = sqlbase.DatumTypeToColumnType(ev.exprs[i].expr.ResolvedType()) } } outRow, err := ev.eval(row) if err != nil { ev.output.Close(err) return } if log.V(3) { log.Infof(ctx, "pushing %s\n", outRow) } // Push the row to the output RowReceiver; stop if they don't need more // rows. if !ev.output.PushRow(outRow) { if log.V(2) { log.Infof(ctx, "no more rows required") } ev.output.Close(nil) return } } }
func newAggregator( ctx *FlowCtx, spec *AggregatorSpec, input RowSource, output RowReceiver, ) (*aggregator, error) { ag := &aggregator{ input: input, output: output, ctx: log.WithLogTag(ctx.Context, "Agg", nil), rows: &RowBuffer{}, buckets: make(map[string]struct{}), inputCols: make(columns, len(spec.Exprs)), outputTypes: make([]*sqlbase.ColumnType, len(spec.Exprs)), groupCols: make(columns, len(spec.GroupCols)), } inputTypes := make([]*sqlbase.ColumnType, len(spec.Exprs)) for i, expr := range spec.Exprs { ag.inputCols[i] = expr.ColIdx inputTypes[i] = spec.Types[expr.ColIdx] } copy(ag.groupCols, spec.GroupCols) // Loop over the select expressions and extract any aggregate functions -- // non-aggregation functions are replaced with parser.NewIdentAggregate, // (which just returns the last value added to them for a bucket) to provide // grouped-by values for each bucket. ag.funcs is updated to contain all // the functions which need to be fed values. eh := &exprHelper{types: inputTypes} eh.vars = parser.MakeIndexedVarHelper(eh, len(eh.types)) for i, expr := range spec.Exprs { fn, retType, err := ag.extractFunc(expr, eh) if err != nil { return nil, err } ag.funcs = append(ag.funcs, fn) // The aggregate function extracted is an identity function, the return // type of this therefore being the i-th input type. if retType == nil { ag.outputTypes[i] = inputTypes[i] } else { typ := sqlbase.DatumTypeToColumnType(retType) ag.outputTypes[i] = &typ } } return ag, nil }
// getTypesForPlanResult returns the types of the elements in the result streams // of a plan that corresponds to a given planNode. func (dsp *distSQLPlanner) getTypesForPlanResult( planToStreamColMap []int, node planNode, ) []*sqlbase.ColumnType { numCols := 0 for _, streamCol := range planToStreamColMap { if numCols <= streamCol { numCols = streamCol + 1 } } nodeColumns := node.Columns() types := make([]*sqlbase.ColumnType, numCols) for nodeCol, streamCol := range planToStreamColMap { if streamCol != -1 { typ := sqlbase.DatumTypeToColumnType(nodeColumns[nodeCol].Typ) types[streamCol] = &typ } } return types }
// GetAggregateInfo returns the aggregate constructor and the return type for // the given aggregate function when applied on the given type. func GetAggregateInfo( fn AggregatorSpec_Func, inputType sqlbase.ColumnType, ) (aggregateConstructor func() parser.AggregateFunc, returnType sqlbase.ColumnType, err error) { if fn == AggregatorSpec_IDENT { return parser.NewIdentAggregate, inputType, nil } inputDatumType := inputType.ToDatumType() builtins := parser.Aggregates[strings.ToLower(fn.String())] for _, b := range builtins { for _, t := range b.Types.Types() { if inputDatumType.Equivalent(t) { // Found! return b.AggregateFunc, sqlbase.DatumTypeToColumnType(b.ReturnType), nil } } } return nil, sqlbase.ColumnType{}, errors.Errorf( "no builtin aggregate for %s on %s", fn, inputType.Kind, ) }