func (p *planner) processColumns(tableDesc *sqlbase.TableDescriptor, node parser.UnresolvedNames) ([]sqlbase.ColumnDescriptor, error) { if node == nil { // VisibleColumns is used here to prevent INSERT INTO <table> VALUES (...) // (as opposed to INSERT INTO <table> (...) VALUES (...)) from writing // hidden columns. At present, the only hidden column is the implicit rowid // primary key column. return tableDesc.VisibleColumns(), nil } cols := make([]sqlbase.ColumnDescriptor, len(node)) colIDSet := make(map[sqlbase.ColumnID]struct{}, len(node)) for i, n := range node { c, err := n.NormalizeUnqualifiedColumnItem() if err != nil { return nil, err } if len(c.Selector) > 0 { return nil, util.UnimplementedWithIssueErrorf(8318, "compound types not supported yet: %q", n) } col, err := tableDesc.FindActiveColumnByName(c.ColumnName) if err != nil { return nil, err } if _, ok := colIDSet[col.ID]; ok { return nil, fmt.Errorf("multiple assignments to the same column %q", n) } colIDSet[col.ID] = struct{}{} cols[i] = col } return cols, nil }
// initFrom initializes the table node, given the parsed select expression func (s *selectNode) initFrom( p *planner, parsed *parser.SelectClause, scanVisibility scanVisibility, ) error { from := parsed.From var colAlias parser.NameList var err error switch len(from) { case 0: s.source.plan = &emptyNode{results: true} case 1: ate, ok := from[0].(*parser.AliasedTableExpr) if !ok { return util.UnimplementedWithIssueErrorf(2970, "unsupported FROM type %T", from[0]) } switch expr := ate.Expr.(type) { case *parser.QualifiedName: // Usual case: a table. scan := p.Scan() s.source.info.alias, err = scan.initTable(p, expr, ate.Hints, scanVisibility) if err != nil { return err } s.source.plan = scan case *parser.Subquery: // We have a subquery (this includes a simple "VALUES"). if ate.As.Alias == "" { return fmt.Errorf("subquery in FROM must have an alias") } s.source.plan, err = p.newPlan(expr.Select, nil, false) if err != nil { return err } default: panic(fmt.Sprintf("unexpected SimpleTableExpr type: %T", expr)) } if ate.As.Alias != "" { // If an alias was specified, use that. s.source.info.alias = string(ate.As.Alias) } colAlias = ate.As.Cols default: return util.UnimplementedWithIssueErrorf(2970, "JOINs and SELECTs from multiple tables "+ "are not yet supported: %s", from) } s.source.info.columns = s.source.plan.Columns() if len(colAlias) > 0 { // Make a copy of the slice since we are about to modify the contents. s.source.info.columns = append([]ResultColumn(nil), s.source.info.columns...) // The column aliases can only refer to explicit columns. for colIdx, aliasIdx := 0, 0; aliasIdx < len(colAlias); colIdx++ { if colIdx >= len(s.source.info.columns) { return util.Errorf( "table \"%s\" has %d columns available but %d columns specified", s.source.info.alias, aliasIdx, len(colAlias)) } if s.source.info.columns[colIdx].hidden { continue } s.source.info.columns[colIdx].Name = string(colAlias[aliasIdx]) aliasIdx++ } } return nil }
// findColumn looks up the column specified by a VarName. The normalized VarName // is returned. func (sources multiSourceInfo) findColumn( c *parser.ColumnItem, ) (info *dataSourceInfo, colIdx int, err error) { if len(c.Selector) > 0 { return nil, invalidColIdx, util.UnimplementedWithIssueErrorf(8318, "compound types not supported yet: %q", c) } colName := sqlbase.NormalizeName(c.ColumnName) var tableName parser.TableName if c.TableName.Table() != "" { tableName = sqlbase.NormalizeTableName(c.TableName) tn, err := sources.checkDatabaseName(tableName) if err != nil { return nil, invalidColIdx, err } tableName = tn // Propagate the discovered database name back to the original VarName. // (to clarify the output of e.g. EXPLAIN) c.TableName.DatabaseName = tableName.DatabaseName } colIdx = invalidColIdx for _, src := range sources { findCol := func(src, info *dataSourceInfo, colIdx int, idx int) (*dataSourceInfo, int, error) { col := src.sourceColumns[idx] if sqlbase.ReNormalizeName(col.Name) == colName { if colIdx != invalidColIdx { return nil, invalidColIdx, fmt.Errorf("column reference %q is ambiguous", c) } info = src colIdx = idx } return info, colIdx, nil } if tableName.Table() == "" { for idx := 0; idx < len(src.sourceColumns); idx++ { info, colIdx, err = findCol(src, info, colIdx, idx) if err != nil { return info, colIdx, err } } } else { colRange, ok := src.sourceAliases[tableName] if !ok { // The data source "src" has no column for table tableName. // Try again with the net one. continue } for _, idx := range colRange { info, colIdx, err = findCol(src, info, colIdx, idx) if err != nil { return info, colIdx, err } } } } if colIdx == invalidColIdx { return nil, invalidColIdx, fmt.Errorf("column name %q not found", c) } return info, colIdx, nil }
// execStmtInOpenTxn executes one statement in the context // of the planner's transaction (which is assumed to exist). // It handles statements that affect the transaction state (BEGIN, COMMIT) // and delegates everything else to `execStmt`. // It binds placeholders. // // The current transaction might be committed/rolled back when this returns. // It might also have transitioned to the aborted or RestartWait state. // // Args: // implicitTxn: set if the current transaction was implicitly // created by the system (i.e. the client sent the statement outside of // a transaction). // COMMIT/ROLLBACK statements are rejected if set. Also, the transaction // might be auto-committed in this function. // firstInTxn: set for the first statement in a transaction. Used // so that nested BEGIN statements are caught. // stmtTimestamp: Used as the statement_timestamp(). // // Returns: // - a Result // - an error, if any. In case of error, the result returned also reflects this error. func (e *Executor) execStmtInOpenTxn( stmt parser.Statement, planMaker *planner, implicitTxn bool, firstInTxn bool, txnState *txnState, ) (Result, error) { if txnState.State != Open { panic("execStmtInOpenTxn called outside of an open txn") } if planMaker.txn == nil { panic("execStmtInOpenTxn called with the a txn not set on the planner") } planMaker.evalCtx.SetTxnTimestamp(txnState.sqlTimestamp) planMaker.evalCtx.SetStmtTimestamp(e.ctx.Clock.PhysicalTime()) // TODO(cdo): Figure out how to not double count on retries. e.updateStmtCounts(stmt) switch s := stmt.(type) { case *parser.BeginTransaction: if !firstInTxn { txnState.updateStateAndCleanupOnErr(errTransactionInProgress, e) return Result{Err: errTransactionInProgress}, errTransactionInProgress } case *parser.CommitTransaction: if implicitTxn { return e.noTransactionHelper(txnState) } // CommitTransaction is executed fully here; there's no planNode for it // and the planner is not involved at all. res, err := commitSQLTransaction(txnState, planMaker, commit, e) return res, err case *parser.ReleaseSavepoint: if implicitTxn { return e.noTransactionHelper(txnState) } if err := parser.ValidateRestartCheckpoint(s.Savepoint); err != nil { return Result{Err: err}, err } // ReleaseSavepoint is executed fully here; there's no planNode for it // and the planner is not involved at all. res, err := commitSQLTransaction(txnState, planMaker, release, e) return res, err case *parser.RollbackTransaction: if implicitTxn { return e.noTransactionHelper(txnState) } // RollbackTransaction is executed fully here; there's no planNode for it // and the planner is not involved at all. // Notice that we don't return any errors on rollback. return rollbackSQLTransaction(txnState, planMaker), nil case *parser.SetTransaction: if implicitTxn { return e.noTransactionHelper(txnState) } case *parser.Savepoint: if implicitTxn { return e.noTransactionHelper(txnState) } if err := parser.ValidateRestartCheckpoint(s.Name); err != nil { return Result{Err: err}, err } // We want to disallow SAVEPOINTs to be issued after a transaction has // started running, but such enforcement is problematic in the // presence of transaction retries (since the transaction proto is // necessarily reused). To work around this, we keep track of the // transaction's retrying state and special-case SAVEPOINT when it is // set. // // TODO(andrei): the check for retrying is a hack - we erroneously // allow SAVEPOINT to be issued at any time during a retry, not just // in the beginning. We should figure out how to track whether we // started using the transaction during a retry. if txnState.txn.Proto.IsInitialized() && !txnState.retrying { err := fmt.Errorf("SAVEPOINT %s needs to be the first statement in a transaction", parser.RestartSavepointName) txnState.updateStateAndCleanupOnErr(err, e) return Result{Err: err}, err } // Note that Savepoint doesn't have a corresponding plan node. // This here is all the execution there is. txnState.retryIntent = true return Result{}, nil case *parser.RollbackToSavepoint: err := parser.ValidateRestartCheckpoint(s.Savepoint) if err == nil { // Can't restart if we didn't get an error first, which would've put the // txn in a different state. err = errNotRetriable } txnState.updateStateAndCleanupOnErr(err, e) return Result{Err: err}, err case *parser.Prepare: err := util.UnimplementedWithIssueErrorf(7568, "Prepared statements are supported only via the Postgres wire protocol") txnState.updateStateAndCleanupOnErr(err, e) return Result{Err: err}, err case *parser.Execute: err := util.UnimplementedWithIssueErrorf(7568, "Executing prepared statements is supported only via the Postgres wire protocol") txnState.updateStateAndCleanupOnErr(err, e) return Result{Err: err}, err case *parser.Deallocate: if s.Name == "" { planMaker.session.PreparedStatements.DeleteAll() } else { if found := planMaker.session.PreparedStatements.Delete(string(s.Name)); !found { err := fmt.Errorf("prepared statement %s does not exist", s.Name) txnState.updateStateAndCleanupOnErr(err, e) return Result{Err: err}, err } } return Result{PGTag: s.StatementTag()}, nil } if txnState.tr != nil { txnState.tr.LazyLog(stmt, true /* sensitive */) } result, err := e.execStmt(stmt, planMaker, implicitTxn /* autoCommit */) if err != nil { if traceSQL { log.Tracef(txnState.txn.Context, "ERROR: %v", err) } if txnState.tr != nil { txnState.tr.LazyPrintf("ERROR: %v", err) } txnState.updateStateAndCleanupOnErr(err, e) result = Result{Err: err} } else if txnState.tr != nil { tResult := &traceResult{tag: result.PGTag, count: -1} switch result.Type { case parser.RowsAffected: tResult.count = result.RowsAffected case parser.Rows: tResult.count = len(result.Rows) } txnState.tr.LazyLog(tResult, false) if traceSQL { log.Tracef(txnState.txn.Context, "%s done", tResult) } } return result, err }
// MakeTableDesc creates a table descriptor from a CreateTable statement. func MakeTableDesc(p *parser.CreateTable, parentID ID) (TableDescriptor, error) { desc := TableDescriptor{} if err := p.Table.NormalizeTableName(""); err != nil { return desc, err } desc.Name = p.Table.Table() desc.ParentID = parentID desc.FormatVersion = FamilyFormatVersion // We don't use version 0. desc.Version = 1 var primaryIndexColumnSet map[parser.Name]struct{} for _, def := range p.Defs { switch d := def.(type) { case *parser.ColumnTableDef: col, idx, err := MakeColumnDefDescs(d) if err != nil { return desc, err } desc.AddColumn(*col) if idx != nil { if err := desc.AddIndex(*idx, d.PrimaryKey); err != nil { return desc, err } } case *parser.IndexTableDef: idx := IndexDescriptor{ Name: string(d.Name), StoreColumnNames: d.Storing, } if err := idx.FillColumns(d.Columns); err != nil { return desc, err } if err := desc.AddIndex(idx, false); err != nil { return desc, err } if d.Interleave != nil { return desc, util.UnimplementedWithIssueErrorf(2972, "interleaving is not yet supported") } case *parser.UniqueConstraintTableDef: idx := IndexDescriptor{ Name: string(d.Name), Unique: true, StoreColumnNames: d.Storing, } if err := idx.FillColumns(d.Columns); err != nil { return desc, err } if err := desc.AddIndex(idx, d.PrimaryKey); err != nil { return desc, err } if d.PrimaryKey { primaryIndexColumnSet = make(map[parser.Name]struct{}) for _, c := range d.Columns { primaryIndexColumnSet[c.Column] = struct{}{} } } if d.Interleave != nil { return desc, util.UnimplementedWithIssueErrorf(2972, "interleaving is not yet supported") } case *parser.CheckConstraintTableDef: // CHECK expressions seem to vary across databases. Wikipedia's entry on // Check_constraint (https://en.wikipedia.org/wiki/Check_constraint) says // that if the constraint refers to a single column only, it is possible to // specify the constraint as part of the column definition. Postgres allows // specifying them anywhere about any columns, but it moves all constraints to // the table level (i.e., columns never have a check constraint themselves). We // will adhere to the stricter definition. preFn := func(expr parser.Expr) (err error, recurse bool, newExpr parser.Expr) { qname, ok := expr.(*parser.QualifiedName) if !ok { // Not a qname, don't do anything to this node. return nil, true, expr } if err := qname.NormalizeColumnName(); err != nil { return err, false, nil } if qname.IsStar() { return fmt.Errorf("* not allowed in constraint %q", d.Expr.String()), false, nil } col, err := desc.FindActiveColumnByName(qname.Column()) if err != nil { return fmt.Errorf("column %q not found for constraint %q", qname.String(), d.Expr.String()), false, nil } // Convert to a dummy datum of the correct type. return nil, false, col.Type.ToDatumType() } expr, err := parser.SimpleVisit(d.Expr, preFn) if err != nil { return desc, err } if err := SanitizeVarFreeExpr(expr, parser.TypeBool, "CHECK"); err != nil { return desc, err } var p parser.Parser if p.AggregateInExpr(expr) { return desc, fmt.Errorf("Aggregate functions are not allowed in CHECK expressions") } check := &TableDescriptor_CheckConstraint{Expr: d.Expr.String()} if len(d.Name) > 0 { check.Name = string(d.Name) } desc.Checks = append(desc.Checks, check) case *parser.FamilyTableDef: names := make([]string, len(d.Columns)) for i, col := range d.Columns { names[i] = string(col.Column) } fam := ColumnFamilyDescriptor{ Name: string(d.Name), ColumnNames: names, } desc.AddFamily(fam) default: return desc, errors.Errorf("unsupported table def: %T", def) } } if primaryIndexColumnSet != nil { // Primary index columns are not nullable. for i := range desc.Columns { if _, ok := primaryIndexColumnSet[parser.Name(desc.Columns[i].Name)]; ok { desc.Columns[i].Nullable = false } } } return desc, nil }