func (s *Server) processSelect(node parser.SelectStatement) (rows []sqlwire.Result_Row, _ error) { switch nt := node.(type) { // case *parser.Select: // case *parser.Union: // TODO(vivek): return s.query(nt.stmt, nil) case parser.Values: for _, tuple := range nt { data, err := parser.EvalExpr(tuple, nil) if err != nil { return rows, err } dTuple, ok := data.(parser.DTuple) if !ok { // A one-element DTuple is currently turned into whatever its // underlying element is, so we have to massage here. // See #1741. dTuple = parser.DTuple([]parser.Datum{data}) } var vals []sqlwire.Datum for _, val := range dTuple { switch vt := val.(type) { case parser.DBool: vals = append(vals, sqlwire.Datum{BoolVal: (*bool)(&vt)}) case parser.DInt: vals = append(vals, sqlwire.Datum{IntVal: (*int64)(&vt)}) case parser.DFloat: vals = append(vals, sqlwire.Datum{FloatVal: (*float64)(&vt)}) case parser.DString: vals = append(vals, sqlwire.Datum{StringVal: (*string)(&vt)}) case parser.DNull: vals = append(vals, sqlwire.Datum{}) default: return rows, util.Errorf("unsupported node: %T", val) } } rows = append(rows, sqlwire.Result_Row{Values: vals}) } return rows, nil } return nil, util.Errorf("TODO(pmattis): unsupported node: %T", node) }
// Delete deletes rows from a table. // Privileges: DELETE and SELECT on table. We currently always use a SELECT statement. // Notes: postgres requires DELETE. Also requires SELECT for "USING" and "WHERE" with tables. // mysql requires DELETE. Also requires SELECT if a table is used in the "WHERE" clause. func (p *planner) Delete(n *parser.Delete) (planNode, *roachpb.Error) { tableDesc, pErr := p.getAliasedTableLease(n.Table) if pErr != nil { return nil, pErr } if err := p.checkPrivilege(tableDesc, privilege.DELETE); err != nil { return nil, roachpb.NewError(err) } // TODO(tamird,pmattis): avoid going through Select to avoid encoding // and decoding keys. rows, pErr := p.Select(&parser.Select{ Exprs: tableDesc.allColumnsSelector(), From: parser.TableExprs{n.Table}, Where: n.Where, }) if pErr != nil { return nil, pErr } if p.prepareOnly { return nil, nil } // Construct a map from column ID to the index the value appears at within a // row. colIDtoRowIndex, err := makeColIDtoRowIndex(rows, tableDesc) if err != nil { return nil, roachpb.NewError(err) } primaryIndex := tableDesc.PrimaryIndex primaryIndexKeyPrefix := MakeIndexKeyPrefix(tableDesc.ID, primaryIndex.ID) b := client.Batch{} result := &valuesNode{} for rows.Next() { rowVals := rows.Values() result.rows = append(result.rows, parser.DTuple(nil)) primaryIndexKey, _, err := encodeIndexKey( &primaryIndex, colIDtoRowIndex, rowVals, primaryIndexKeyPrefix) if err != nil { return nil, roachpb.NewError(err) } // Delete the secondary indexes. indexes := tableDesc.Indexes // Also include all the indexes under mutation; mutation state is // irrelevant for deletions. for _, m := range tableDesc.Mutations { if index := m.GetIndex(); index != nil { indexes = append(indexes, *index) } } secondaryIndexEntries, err := encodeSecondaryIndexes( tableDesc.ID, indexes, colIDtoRowIndex, rowVals) if err != nil { return nil, roachpb.NewError(err) } for _, secondaryIndexEntry := range secondaryIndexEntries { if log.V(2) { log.Infof("Del %s", secondaryIndexEntry.key) } b.Del(secondaryIndexEntry.key) } // Delete the row. rowStartKey := roachpb.Key(primaryIndexKey) rowEndKey := rowStartKey.PrefixEnd() if log.V(2) { log.Infof("DelRange %s - %s", rowStartKey, rowEndKey) } b.DelRange(rowStartKey, rowEndKey) } if pErr := rows.PErr(); pErr != nil { return nil, pErr } if isSystemConfigID(tableDesc.GetID()) { // Mark transaction as operating on the system DB. p.txn.SetSystemConfigTrigger() } if pErr := p.txn.Run(&b); pErr != nil { return nil, pErr } return result, nil }
// Delete deletes rows from a table. // Privileges: DELETE and SELECT on table. We currently always use a SELECT statement. // Notes: postgres requires DELETE. Also requires SELECT for "USING" and "WHERE" with tables. // mysql requires DELETE. Also requires SELECT if a table is used in the "WHERE" clause. func (p *planner) Delete(n *parser.Delete) (planNode, error) { tableDesc, err := p.getAliasedTableDesc(n.Table, false /* !allowCache */) if err != nil { return nil, err } if err := p.checkPrivilege(tableDesc, privilege.DELETE); err != nil { return nil, err } // TODO(tamird,pmattis): avoid going through Select to avoid encoding // and decoding keys. rows, err := p.Select(&parser.Select{ Exprs: parser.SelectExprs{parser.StarSelectExpr()}, From: parser.TableExprs{n.Table}, Where: n.Where, }) if err != nil { return nil, err } // Construct a map from column ID to the index the value appears at within a // row. colIDtoRowIndex, err := makeColIDtoRowIndex(rows, tableDesc) if err != nil { return nil, err } primaryIndex := tableDesc.PrimaryIndex primaryIndexKeyPrefix := MakeIndexKeyPrefix(tableDesc.ID, primaryIndex.ID) b := client.Batch{} result := &valuesNode{} for rows.Next() { rowVals := rows.Values() result.rows = append(result.rows, parser.DTuple(nil)) primaryIndexKey, _, err := encodeIndexKey( primaryIndex.ColumnIDs, colIDtoRowIndex, rowVals, primaryIndexKeyPrefix) if err != nil { return nil, err } // Delete the secondary indexes. secondaryIndexEntries, err := encodeSecondaryIndexes( tableDesc.ID, tableDesc.Indexes, colIDtoRowIndex, rowVals) if err != nil { return nil, err } for _, secondaryIndexEntry := range secondaryIndexEntries { if log.V(2) { log.Infof("Del %s", prettyKey(secondaryIndexEntry.key, 0)) } b.Del(secondaryIndexEntry.key) } // Delete the row. rowStartKey := roachpb.Key(primaryIndexKey) rowEndKey := rowStartKey.PrefixEnd() if log.V(2) { log.Infof("DelRange %s - %s", prettyKey(rowStartKey, 0), prettyKey(rowEndKey, 0)) } b.DelRange(rowStartKey, rowEndKey) } if err := rows.Err(); err != nil { return nil, err } if IsSystemID(tableDesc.GetID()) { // Mark transaction as operating on the system DB. p.txn.SetSystemDBTrigger() } if err := p.txn.Run(&b); err != nil { return nil, err } return result, nil }
// Insert inserts rows into the database. // Privileges: INSERT on table // Notes: postgres requires INSERT. No "on duplicate key update" option. // mysql requires INSERT. Also requires UPDATE on "ON DUPLICATE KEY UPDATE". func (p *planner) Insert(n *parser.Insert) (planNode, error) { // TODO(marcb): We can't use the cached descriptor here because a recent // update of the schema (e.g. the addition of an index) might not be // reflected in the cached version (yet). Perhaps schema modification // routines such as CREATE INDEX should not return until the schema change // has been pushed everywhere. tableDesc, err := p.getTableLease(n.Table) if err != nil { return nil, err } if err := p.checkPrivilege(tableDesc, privilege.INSERT); err != nil { return nil, err } // Determine which columns we're inserting into. cols, err := p.processColumns(tableDesc, n.Columns) if err != nil { return nil, err } // Construct a map from column ID to the index the value appears at within a // row. colIDtoRowIndex := map[ColumnID]int{} for i, c := range cols { colIDtoRowIndex[c.ID] = i } // Add any column not already present that has a DEFAULT expression. for _, col := range tableDesc.Columns { if _, ok := colIDtoRowIndex[col.ID]; ok { continue } if col.DefaultExpr != nil { colIDtoRowIndex[col.ID] = len(cols) cols = append(cols, col) } } // Verify we have at least the columns that are part of the primary key. primaryKeyCols := map[ColumnID]struct{}{} for i, id := range tableDesc.PrimaryIndex.ColumnIDs { if _, ok := colIDtoRowIndex[id]; !ok { return nil, fmt.Errorf("missing %q primary key column", tableDesc.PrimaryIndex.ColumnNames[i]) } primaryKeyCols[id] = struct{}{} } // Construct the default expressions. The returned slice will be nil if no // column in the table has a default expression. defaultExprs, err := p.makeDefaultExprs(cols) if err != nil { return nil, err } // Replace any DEFAULT markers with the corresponding default expressions. if n.Rows, err = p.fillDefaults(defaultExprs, cols, n.Rows); err != nil { return nil, err } // Transform the values into a rows object. This expands SELECT statements or // generates rows from the values contained within the query. rows, err := p.makePlan(n.Rows) if err != nil { return nil, err } if expressions, columns := len(rows.Columns()), len(cols); expressions > columns { return nil, fmt.Errorf("INSERT has more expressions than target columns: %d/%d", expressions, columns) } primaryIndex := tableDesc.PrimaryIndex primaryIndexKeyPrefix := MakeIndexKeyPrefix(tableDesc.ID, primaryIndex.ID) marshalled := make([]interface{}, len(cols)) b := client.Batch{} result := &valuesNode{} for rows.Next() { rowVals := rows.Values() result.rows = append(result.rows, parser.DTuple(nil)) // The values for the row may be shorter than the number of columns being // inserted into. Generate default values for those columns using the // default expressions. for i := len(rowVals); i < len(cols); i++ { if defaultExprs == nil { rowVals = append(rowVals, parser.DNull) continue } d, err := defaultExprs[i].Eval(p.evalCtx) if err != nil { return nil, err } rowVals = append(rowVals, d) } // Check to see if NULL is being inserted into any non-nullable column. for _, col := range tableDesc.Columns { if !col.Nullable { if i, ok := colIDtoRowIndex[col.ID]; !ok || rowVals[i] == parser.DNull { return nil, fmt.Errorf("null value in column %q violates not-null constraint", col.Name) } } } // Check that the row value types match the column types. This needs to // happen before index encoding because certain datum types (i.e. tuple) // cannot be used as index values. for i, val := range rowVals { // Make sure the value can be written to the column before proceeding. var err error if marshalled[i], err = marshalColumnValue(cols[i], val); err != nil { return nil, err } } primaryIndexKey, _, err := encodeIndexKey( primaryIndex.ColumnIDs, colIDtoRowIndex, rowVals, primaryIndexKeyPrefix) if err != nil { return nil, err } // Write the secondary indexes. secondaryIndexEntries, err := encodeSecondaryIndexes( tableDesc.ID, tableDesc.Indexes, colIDtoRowIndex, rowVals) if err != nil { return nil, err } for _, secondaryIndexEntry := range secondaryIndexEntries { if log.V(2) { log.Infof("CPut %s -> %v", prettyKey(secondaryIndexEntry.key, 0), secondaryIndexEntry.value) } b.CPut(secondaryIndexEntry.key, secondaryIndexEntry.value, nil) } // Write the row sentinel. if log.V(2) { log.Infof("CPut %s -> NULL", prettyKey(primaryIndexKey, 0)) } b.CPut(primaryIndexKey, nil, nil) // Write the row columns. for i, val := range rowVals { col := cols[i] if _, ok := primaryKeyCols[col.ID]; ok { // Skip primary key columns as their values are encoded in the row // sentinel key which is guaranteed to exist for as long as the row // exists. continue } if marshalled[i] != nil { // We only output non-NULL values. Non-existent column keys are // considered NULL during scanning and the row sentinel ensures we know // the row exists. key := MakeColumnKey(col.ID, primaryIndexKey) if log.V(2) { log.Infof("CPut %s -> %v", prettyKey(key, 0), val) } b.CPut(key, marshalled[i], nil) } } } if err := rows.Err(); err != nil { return nil, err } if IsSystemID(tableDesc.GetID()) { // Mark transaction as operating on the system DB. p.txn.SetSystemDBTrigger() } if err := p.txn.Run(&b); err != nil { return nil, convertBatchError(tableDesc, b, err) } return result, nil }
// Update updates columns for a selection of rows from a table. // Privileges: UPDATE and SELECT on table. We currently always use a select statement. // Notes: postgres requires UPDATE. Requires SELECT with WHERE clause with table. // mysql requires UPDATE. Also requires SELECT with WHERE clause with table. func (p *planner) Update(n *parser.Update) (planNode, *roachpb.Error) { tableDesc, pErr := p.getAliasedTableLease(n.Table) if pErr != nil { return nil, pErr } if err := p.checkPrivilege(tableDesc, privilege.UPDATE); err != nil { return nil, roachpb.NewError(err) } // Determine which columns we're inserting into. var names parser.QualifiedNames for _, expr := range n.Exprs { var epErr *roachpb.Error expr.Expr, epErr = p.expandSubqueries(expr.Expr, len(expr.Names)) if epErr != nil { return nil, epErr } if expr.Tuple { // TODO(pmattis): The distinction between Tuple and DTuple here is // irritating. We'll see a DTuple if the expression was a subquery that // has been evaluated. We'll see a Tuple in other cases. n := 0 switch t := expr.Expr.(type) { case parser.Tuple: n = len(t) case parser.DTuple: n = len(t) default: return nil, roachpb.NewErrorf("unsupported tuple assignment: %T", expr.Expr) } if len(expr.Names) != n { return nil, roachpb.NewUErrorf("number of columns (%d) does not match number of values (%d)", len(expr.Names), n) } } names = append(names, expr.Names...) } cols, err := p.processColumns(tableDesc, names) if err != nil { return nil, roachpb.NewError(err) } // Set of columns being updated colIDSet := map[ColumnID]struct{}{} for _, c := range cols { colIDSet[c.ID] = struct{}{} } // Don't allow updating any column that is part of the primary key. for i, id := range tableDesc.PrimaryIndex.ColumnIDs { if _, ok := colIDSet[id]; ok { return nil, roachpb.NewUErrorf("primary key column %q cannot be updated", tableDesc.PrimaryIndex.ColumnNames[i]) } } defaultExprs, err := p.makeDefaultExprs(cols) if err != nil { return nil, roachpb.NewError(err) } // Generate the list of select targets. We need to select all of the columns // plus we select all of the update expressions in case those expressions // reference columns (e.g. "UPDATE t SET v = v + 1"). Note that we flatten // expressions for tuple assignments just as we flattened the column names // above. So "UPDATE t SET (a, b) = (1, 2)" translates into select targets of // "*, 1, 2", not "*, (1, 2)". targets := tableDesc.allColumnsSelector() i := 0 for _, expr := range n.Exprs { if expr.Tuple { switch t := expr.Expr.(type) { case parser.Tuple: for _, e := range t { e = fillDefault(e, i, defaultExprs) targets = append(targets, parser.SelectExpr{Expr: e}) i++ } case parser.DTuple: for _, e := range t { targets = append(targets, parser.SelectExpr{Expr: e}) i++ } } } else { e := fillDefault(expr.Expr, i, defaultExprs) targets = append(targets, parser.SelectExpr{Expr: e}) i++ } } // Query the rows that need updating. rows, pErr := p.Select(&parser.Select{ Exprs: targets, From: parser.TableExprs{n.Table}, Where: n.Where, }) if pErr != nil { return nil, pErr } // ValArgs have their types populated in the above Select if they are part // of an expression ("SET a = 2 + $1") in the type check step where those // types are inferred. For the simpler case ("SET a = $1"), populate them // using marshalColumnValue. This step also verifies that the expression // types match the column types. if p.prepareOnly { i := 0 f := func(expr parser.Expr) *roachpb.Error { idx := i i++ // DefaultVal doesn't implement TypeCheck if _, ok := expr.(parser.DefaultVal); ok { return nil } d, err := expr.TypeCheck(p.evalCtx.Args) if err != nil { return roachpb.NewError(err) } if _, err := marshalColumnValue(cols[idx], d, p.evalCtx.Args); err != nil { return roachpb.NewError(err) } return nil } for _, expr := range n.Exprs { if expr.Tuple { switch t := expr.Expr.(type) { case parser.Tuple: for _, e := range t { if err := f(e); err != nil { return nil, err } } case parser.DTuple: for _, e := range t { if err := f(e); err != nil { return nil, err } } } } else { if err := f(expr.Expr); err != nil { return nil, err } } } return nil, nil } // Construct a map from column ID to the index the value appears at within a // row. colIDtoRowIndex := map[ColumnID]int{} for i, col := range tableDesc.Columns { colIDtoRowIndex[col.ID] = i } primaryIndex := tableDesc.PrimaryIndex primaryIndexKeyPrefix := MakeIndexKeyPrefix(tableDesc.ID, primaryIndex.ID) // Secondary indexes needing updating. needsUpdate := func(index IndexDescriptor) bool { for _, id := range index.ColumnIDs { if _, ok := colIDSet[id]; ok { return true } } return false } indexes := make([]IndexDescriptor, 0, len(tableDesc.Indexes)+len(tableDesc.Mutations)) var deleteOnlyIndex map[int]struct{} for _, index := range tableDesc.Indexes { if needsUpdate(index) { indexes = append(indexes, index) } } for _, m := range tableDesc.Mutations { if index := m.GetIndex(); index != nil { if needsUpdate(*index) { indexes = append(indexes, *index) switch m.State { case DescriptorMutation_DELETE_ONLY: if deleteOnlyIndex == nil { // Allocate at most once. deleteOnlyIndex = make(map[int]struct{}, len(tableDesc.Mutations)) } deleteOnlyIndex[len(indexes)-1] = struct{}{} case DescriptorMutation_WRITE_ONLY: } } } } marshalled := make([]interface{}, len(cols)) b := client.Batch{} result := &valuesNode{} for rows.Next() { rowVals := rows.Values() result.rows = append(result.rows, parser.DTuple(nil)) primaryIndexKey, _, err := encodeIndexKey( &primaryIndex, colIDtoRowIndex, rowVals, primaryIndexKeyPrefix) if err != nil { return nil, roachpb.NewError(err) } // Compute the current secondary index key:value pairs for this row. secondaryIndexEntries, err := encodeSecondaryIndexes( tableDesc.ID, indexes, colIDtoRowIndex, rowVals) if err != nil { return nil, roachpb.NewError(err) } // Our updated value expressions occur immediately after the plain // columns in the output. newVals := rowVals[len(tableDesc.Columns):] // Update the row values. for i, col := range cols { val := newVals[i] if !col.Nullable && val == parser.DNull { return nil, roachpb.NewUErrorf("null value in column %q violates not-null constraint", col.Name) } rowVals[colIDtoRowIndex[col.ID]] = val } // Check that the new value types match the column types. This needs to // happen before index encoding because certain datum types (i.e. tuple) // cannot be used as index values. for i, val := range newVals { var mErr error if marshalled[i], mErr = marshalColumnValue(cols[i], val, p.evalCtx.Args); mErr != nil { return nil, roachpb.NewError(mErr) } } // Compute the new secondary index key:value pairs for this row. newSecondaryIndexEntries, eErr := encodeSecondaryIndexes( tableDesc.ID, indexes, colIDtoRowIndex, rowVals) if eErr != nil { return nil, roachpb.NewError(eErr) } // Update secondary indexes. for i, newSecondaryIndexEntry := range newSecondaryIndexEntries { secondaryIndexEntry := secondaryIndexEntries[i] if !bytes.Equal(newSecondaryIndexEntry.key, secondaryIndexEntry.key) { // Do not update Indexes in the DELETE_ONLY state. if _, ok := deleteOnlyIndex[i]; !ok { if log.V(2) { log.Infof("CPut %s -> %v", newSecondaryIndexEntry.key, newSecondaryIndexEntry.value) } b.CPut(newSecondaryIndexEntry.key, newSecondaryIndexEntry.value, nil) } if log.V(2) { log.Infof("Del %s", secondaryIndexEntry.key) } b.Del(secondaryIndexEntry.key) } } // Add the new values. for i, val := range newVals { col := cols[i] key := keys.MakeColumnKey(primaryIndexKey, uint32(col.ID)) if marshalled[i] != nil { // We only output non-NULL values. Non-existent column keys are // considered NULL during scanning and the row sentinel ensures we know // the row exists. if log.V(2) { log.Infof("Put %s -> %v", key, val) } b.Put(key, marshalled[i]) } else { // The column might have already existed but is being set to NULL, so // delete it. if log.V(2) { log.Infof("Del %s", key) } b.Del(key) } } } if pErr := rows.PErr(); pErr != nil { return nil, pErr } if pErr := p.txn.Run(&b); pErr != nil { return nil, convertBatchError(tableDesc, b, pErr) } return result, nil }
// Insert inserts rows into the database. // Privileges: INSERT on table // Notes: postgres requires INSERT. No "on duplicate key update" option. // mysql requires INSERT. Also requires UPDATE on "ON DUPLICATE KEY UPDATE". func (p *planner) Insert(n *parser.Insert, autoCommit bool) (planNode, *roachpb.Error) { // TODO(marcb): We can't use the cached descriptor here because a recent // update of the schema (e.g. the addition of an index) might not be // reflected in the cached version (yet). Perhaps schema modification // routines such as CREATE INDEX should not return until the schema change // has been pushed everywhere. tableDesc, pErr := p.getTableLease(n.Table) if pErr != nil { return nil, pErr } if err := p.checkPrivilege(&tableDesc, privilege.INSERT); err != nil { return nil, roachpb.NewError(err) } var cols []ColumnDescriptor // Determine which columns we're inserting into. if n.DefaultValues() { cols = tableDesc.Columns } else { var err error if cols, err = p.processColumns(&tableDesc, n.Columns); err != nil { return nil, roachpb.NewError(err) } } // Number of columns expecting an input. This doesn't include the // columns receiving a default value. numInputColumns := len(cols) // Construct a map from column ID to the index the value appears at within a // row. colIDtoRowIndex := map[ColumnID]int{} for i, c := range cols { colIDtoRowIndex[c.ID] = i } // Add the column if it has a DEFAULT expression. addIfDefault := func(col ColumnDescriptor) { if col.DefaultExpr != nil { if _, ok := colIDtoRowIndex[col.ID]; !ok { colIDtoRowIndex[col.ID] = len(cols) cols = append(cols, col) } } } // Add any column that has a DEFAULT expression. for _, col := range tableDesc.Columns { addIfDefault(col) } // Also add any column in a mutation that is WRITE_ONLY and has // a DEFAULT expression. for _, m := range tableDesc.Mutations { if m.State != DescriptorMutation_WRITE_ONLY { continue } if col := m.GetColumn(); col != nil { addIfDefault(*col) } } // Verify we have at least the columns that are part of the primary key. primaryKeyCols := map[ColumnID]struct{}{} for i, id := range tableDesc.PrimaryIndex.ColumnIDs { if _, ok := colIDtoRowIndex[id]; !ok { return nil, roachpb.NewUErrorf("missing %q primary key column", tableDesc.PrimaryIndex.ColumnNames[i]) } primaryKeyCols[id] = struct{}{} } // Construct the default expressions. The returned slice will be nil if no // column in the table has a default expression. defaultExprs, err := p.makeDefaultExprs(cols) if err != nil { return nil, roachpb.NewError(err) } // Replace any DEFAULT markers with the corresponding default expressions. n.Rows = p.fillDefaults(defaultExprs, cols, n) // Transform the values into a rows object. This expands SELECT statements or // generates rows from the values contained within the query. rows, pErr := p.makePlan(n.Rows, false) if pErr != nil { return nil, pErr } if expressions := len(rows.Columns()); expressions > numInputColumns { return nil, roachpb.NewUErrorf("INSERT has more expressions than target columns: %d/%d", expressions, numInputColumns) } primaryIndex := tableDesc.PrimaryIndex primaryIndexKeyPrefix := MakeIndexKeyPrefix(tableDesc.ID, primaryIndex.ID) marshalled := make([]interface{}, len(cols)) b := p.txn.NewBatch() result := &valuesNode{} var qvals qvalMap if n.Returning != nil { result.columns = make([]ResultColumn, len(n.Returning)) table := tableInfo{ columns: makeResultColumns(cols, 0), } qvals = make(qvalMap) for i, c := range n.Returning { expr, err := resolveQNames(&table, qvals, c.Expr) if err != nil { return nil, roachpb.NewError(err) } n.Returning[i].Expr = expr typ, err := expr.TypeCheck(p.evalCtx.Args) if err != nil { return nil, roachpb.NewError(err) } name := string(c.As) if name == "" { name = expr.String() } result.columns[i] = ResultColumn{ Name: name, Typ: typ, } } } for rows.Next() { rowVals := rows.Values() result.rows = append(result.rows, parser.DTuple(nil)) // The values for the row may be shorter than the number of columns being // inserted into. Generate default values for those columns using the // default expressions. for i := len(rowVals); i < len(cols); i++ { if defaultExprs == nil { rowVals = append(rowVals, parser.DNull) continue } d, err := defaultExprs[i].Eval(p.evalCtx) if err != nil { return nil, roachpb.NewError(err) } rowVals = append(rowVals, d) } // Check to see if NULL is being inserted into any non-nullable column. for _, col := range tableDesc.Columns { if !col.Nullable { if i, ok := colIDtoRowIndex[col.ID]; !ok || rowVals[i] == parser.DNull { return nil, roachpb.NewUErrorf("null value in column %q violates not-null constraint", col.Name) } } } // Check that the row value types match the column types. This needs to // happen before index encoding because certain datum types (i.e. tuple) // cannot be used as index values. for i, val := range rowVals { // Make sure the value can be written to the column before proceeding. var mErr error if marshalled[i], mErr = marshalColumnValue(cols[i], val, p.evalCtx.Args); mErr != nil { return nil, roachpb.NewError(mErr) } } if p.prepareOnly { continue } primaryIndexKey, _, eErr := encodeIndexKey( &primaryIndex, colIDtoRowIndex, rowVals, primaryIndexKeyPrefix) if eErr != nil { return nil, roachpb.NewError(eErr) } // Write the secondary indexes. indexes := tableDesc.Indexes // Also include the secondary indexes in mutation state WRITE_ONLY. for _, m := range tableDesc.Mutations { if m.State == DescriptorMutation_WRITE_ONLY { if index := m.GetIndex(); index != nil { indexes = append(indexes, *index) } } } secondaryIndexEntries, eErr := encodeSecondaryIndexes( tableDesc.ID, indexes, colIDtoRowIndex, rowVals) if eErr != nil { return nil, roachpb.NewError(eErr) } for _, secondaryIndexEntry := range secondaryIndexEntries { if log.V(2) { log.Infof("CPut %s -> %v", secondaryIndexEntry.key, secondaryIndexEntry.value) } b.CPut(secondaryIndexEntry.key, secondaryIndexEntry.value, nil) } // Write the row sentinel. sentinelKey := keys.MakeNonColumnKey(primaryIndexKey) if log.V(2) { log.Infof("CPut %s -> NULL", roachpb.Key(sentinelKey)) } // This is subtle: An interface{}(nil) deletes the value, so we pass in // []byte{} as a non-nil value. b.CPut(sentinelKey, []byte{}, nil) // Write the row columns. for i, val := range rowVals { col := cols[i] if _, ok := primaryKeyCols[col.ID]; ok { // Skip primary key columns as their values are encoded in the row // sentinel key which is guaranteed to exist for as long as the row // exists. continue } if marshalled[i] != nil { // We only output non-NULL values. Non-existent column keys are // considered NULL during scanning and the row sentinel ensures we know // the row exists. key := keys.MakeColumnKey(primaryIndexKey, uint32(col.ID)) if log.V(2) { log.Infof("CPut %s -> %v", roachpb.Key(key), val) } b.CPut(key, marshalled[i], nil) } } if n.Returning == nil { continue } qvals.populateQVals(rowVals) resrow := make(parser.DTuple, len(n.Returning)) for i, c := range n.Returning { d, err := c.Expr.Eval(p.evalCtx) if err != nil { return nil, roachpb.NewError(err) } resrow[i] = d } result.rows[len(result.rows)-1] = resrow } if pErr := rows.PErr(); pErr != nil { return nil, pErr } if p.prepareOnly { return nil, nil } if isSystemConfigID(tableDesc.GetID()) { // Mark transaction as operating on the system DB. p.txn.SetSystemConfigTrigger() } if autoCommit { // An auto-txn can commit the transaction with the batch. This is an // optimization to avoid an extra round-trip to the transaction // coordinator. pErr = p.txn.CommitInBatch(b) } else { pErr = p.txn.Run(b) } if pErr != nil { return nil, convertBatchError(&tableDesc, *b, pErr) } return result, nil }
// Delete deletes rows from a table. // Privileges: DELETE and SELECT on table. We currently always use a SELECT statement. // Notes: postgres requires DELETE. Also requires SELECT for "USING" and "WHERE" with tables. // mysql requires DELETE. Also requires SELECT if a table is used in the "WHERE" clause. func (p *planner) Delete(n *parser.Delete) (planNode, error) { tableDesc, err := p.getAliasedTableDesc(n.Table) if err != nil { return nil, err } if err := p.checkPrivilege(tableDesc, privilege.DELETE); err != nil { return nil, err } // TODO(tamird,pmattis): avoid going through Select to avoid encoding // and decoding keys. Also, avoiding Select may provide more // convenient access to index keys which we are not currently // deleting. rows, err := p.Select(&parser.Select{ Exprs: parser.SelectExprs{parser.StarSelectExpr()}, From: parser.TableExprs{n.Table}, Where: n.Where, }) if err != nil { return nil, err } // Construct a map from column ID to the index the value appears at within a // row. colIDtoRowIndex := map[ColumnID]int{} for i, name := range rows.Columns() { c, err := tableDesc.FindColumnByName(name) if err != nil { return nil, err } colIDtoRowIndex[c.ID] = i } primaryIndex := tableDesc.PrimaryIndex primaryIndexKeyPrefix := MakeIndexKeyPrefix(tableDesc.ID, primaryIndex.ID) b := client.Batch{} result := &valuesNode{} for rows.Next() { rowVals := rows.Values() result.rows = append(result.rows, parser.DTuple(nil)) primaryIndexKey, _, err := encodeIndexKey( primaryIndex.ColumnIDs, colIDtoRowIndex, rowVals, primaryIndexKeyPrefix) if err != nil { return nil, err } // Delete the secondary indexes. secondaryIndexEntries, err := encodeSecondaryIndexes( tableDesc.ID, tableDesc.Indexes, colIDtoRowIndex, rowVals) if err != nil { return nil, err } for _, secondaryIndexEntry := range secondaryIndexEntries { if log.V(2) { log.Infof("Del %s", prettyKey(secondaryIndexEntry.key, 0)) } b.Del(secondaryIndexEntry.key) } // Delete the row. rowStartKey := proto.Key(primaryIndexKey) rowEndKey := rowStartKey.PrefixEnd() if log.V(2) { log.Infof("DelRange %s - %s", prettyKey(rowStartKey, 0), prettyKey(rowEndKey, 0)) } b.DelRange(rowStartKey, rowEndKey) } if err := rows.Err(); err != nil { return nil, err } if err := p.txn.Run(&b); err != nil { return nil, err } return result, nil }
// Update updates columns for a selection of rows from a table. // Privileges: UPDATE and SELECT on table. We currently always use a select statement. // Notes: postgres requires UPDATE. Requires SELECT with WHERE clause with table. // mysql requires UPDATE. Also requires SELECT with WHERE clause with table. func (p *planner) Update(n *parser.Update) (planNode, error) { tableDesc, err := p.getAliasedTableDesc(n.Table) if err != nil { return nil, err } if err := p.checkPrivilege(tableDesc, privilege.UPDATE); err != nil { return nil, err } // Determine which columns we're inserting into. var names parser.QualifiedNames for _, expr := range n.Exprs { var err error expr.Expr, err = p.expandSubqueries(expr.Expr, len(expr.Names)) if err != nil { return nil, err } if expr.Tuple { // TODO(pmattis): The distinction between Tuple and DTuple here is // irritating. We'll see a DTuple if the expression was a subquery that // has been evaluated. We'll see a Tuple in other cases. n := 0 switch t := expr.Expr.(type) { case parser.Tuple: n = len(t) case parser.DTuple: n = len(t) default: return nil, util.Errorf("unsupported tuple assignment: %T", expr.Expr) } if len(expr.Names) != n { return nil, fmt.Errorf("number of columns (%d) does not match number of values (%d)", len(expr.Names), n) } } names = append(names, expr.Names...) } cols, err := p.processColumns(tableDesc, names) if err != nil { return nil, err } // Set of columns being updated colIDSet := map[ColumnID]struct{}{} for _, c := range cols { colIDSet[c.ID] = struct{}{} } // Don't allow updating any column that is part of the primary key. for i, id := range tableDesc.PrimaryIndex.ColumnIDs { if _, ok := colIDSet[id]; ok { return nil, fmt.Errorf("primary key column %q cannot be updated", tableDesc.PrimaryIndex.ColumnNames[i]) } } defaultExprs, err := p.makeDefaultExprs(cols) if err != nil { return nil, err } // Generate the list of select targets. We need to select all of the columns // plus we select all of the update expressions in case those expressions // reference columns (e.g. "UPDATE t SET v = v + 1"). Note that we flatten // expressions for tuple assignments just as we flattened the column names // above. So "UPDATE t SET (a, b) = (1, 2)" translates into select targets of // "*, 1, 2", not "*, (1, 2)". targets := make(parser.SelectExprs, 0, len(n.Exprs)+1) targets = append(targets, parser.StarSelectExpr()) for _, expr := range n.Exprs { if expr.Tuple { switch t := expr.Expr.(type) { case parser.Tuple: for i, e := range t { e, err := fillDefault(e, i, defaultExprs) if err != nil { return nil, err } targets = append(targets, parser.SelectExpr{Expr: e}) } case parser.DTuple: for _, e := range t { targets = append(targets, parser.SelectExpr{Expr: e}) } } } else { e, err := fillDefault(expr.Expr, 0, defaultExprs) if err != nil { return nil, err } targets = append(targets, parser.SelectExpr{Expr: e}) } } // Query the rows that need updating. rows, err := p.Select(&parser.Select{ Exprs: targets, From: parser.TableExprs{n.Table}, Where: n.Where, }) if err != nil { return nil, err } // Construct a map from column ID to the index the value appears at within a // row. colIDtoRowIndex := map[ColumnID]int{} for i, col := range tableDesc.Columns { colIDtoRowIndex[col.ID] = i } primaryIndex := tableDesc.PrimaryIndex primaryIndexKeyPrefix := MakeIndexKeyPrefix(tableDesc.ID, primaryIndex.ID) // Secondary indexes needing updating. var indexes []IndexDescriptor for _, index := range tableDesc.Indexes { for _, id := range index.ColumnIDs { if _, ok := colIDSet[id]; ok { indexes = append(indexes, index) break } } } marshalled := make([]interface{}, len(cols)) b := client.Batch{} result := &valuesNode{} for rows.Next() { rowVals := rows.Values() result.rows = append(result.rows, parser.DTuple(nil)) primaryIndexKey, _, err := encodeIndexKey( primaryIndex.ColumnIDs, colIDtoRowIndex, rowVals, primaryIndexKeyPrefix) if err != nil { return nil, err } // Compute the current secondary index key:value pairs for this row. secondaryIndexEntries, err := encodeSecondaryIndexes( tableDesc.ID, indexes, colIDtoRowIndex, rowVals) if err != nil { return nil, err } // Our updated value expressions occur immediately after the plain // columns in the output. newVals := rowVals[len(tableDesc.Columns):] // Update the row values. for i, col := range cols { val := newVals[i] if !col.Nullable && val == parser.DNull { return nil, fmt.Errorf("null value in column %q violates not-null constraint", col.Name) } rowVals[colIDtoRowIndex[col.ID]] = val } // Check that the new value types match the column types. This needs to // happen before index encoding because certain datum types (i.e. tuple) // cannot be used as index values. for i, val := range newVals { var err error if marshalled[i], err = marshalColumnValue(cols[i], val); err != nil { return nil, err } } // Compute the new secondary index key:value pairs for this row. newSecondaryIndexEntries, err := encodeSecondaryIndexes( tableDesc.ID, indexes, colIDtoRowIndex, rowVals) if err != nil { return nil, err } // Update secondary indexes. for i, newSecondaryIndexEntry := range newSecondaryIndexEntries { secondaryIndexEntry := secondaryIndexEntries[i] if !bytes.Equal(newSecondaryIndexEntry.key, secondaryIndexEntry.key) { if log.V(2) { log.Infof("CPut %s -> %v", prettyKey(newSecondaryIndexEntry.key, 0), newSecondaryIndexEntry.value) } b.CPut(newSecondaryIndexEntry.key, newSecondaryIndexEntry.value, nil) if log.V(2) { log.Infof("Del %s", prettyKey(secondaryIndexEntry.key, 0)) } b.Del(secondaryIndexEntry.key) } } // Add the new values. for i, val := range newVals { col := cols[i] key := MakeColumnKey(col.ID, primaryIndexKey) if marshalled[i] != nil { // We only output non-NULL values. Non-existent column keys are // considered NULL during scanning and the row sentinel ensures we know // the row exists. if log.V(2) { log.Infof("Put %s -> %v", prettyKey(key, 0), val) } b.Put(key, marshalled[i]) } else { // The column might have already existed but is being set to NULL, so // delete it. if log.V(2) { log.Infof("Del %s", prettyKey(key, 0)) } b.Del(key) } } } if err := rows.Err(); err != nil { return nil, err } if err := p.txn.Run(&b); err != nil { return nil, convertBatchError(tableDesc, b, err) } return result, nil }