func runPut(cmd *cobra.Command, args []string) { if len(args) == 0 || len(args)%2 == 1 { cmd.Usage() return } count := len(args) / 2 keys := make([]string, 0, count) values := make([]string, 0, count) for i := 0; i < len(args); i += 2 { keys = append(keys, unquoteArg(args[i], true /* disallow system keys */)) values = append(values, unquoteArg(args[i+1], false)) } kvDB := makeDBClient() if kvDB == nil { return } var b client.Batch for i := 0; i < count; i++ { b.Put(keys[i], values[i]) } if err := kvDB.Run(&b); err != nil { fmt.Fprintf(osStderr, "put failed: %s\n", err) osExit(1) return } }
// Future home of the asynchronous schema changer that picks up // queued schema changes and processes them. // // applyMutations applies the queued mutations for a table. func (p *planner) applyMutations(tableDesc *TableDescriptor) error { if len(tableDesc.Mutations) == 0 { return nil } newTableDesc := proto.Clone(tableDesc).(*TableDescriptor) p.applyUpVersion(newTableDesc) // Make all mutations active. for _, mutation := range newTableDesc.Mutations { newTableDesc.makeMutationComplete(mutation) } newTableDesc.Mutations = nil if err := newTableDesc.Validate(); err != nil { return err } b := client.Batch{} if err := p.backfillBatch(&b, tableDesc, newTableDesc); err != nil { return err } b.Put(MakeDescMetadataKey(newTableDesc.GetID()), wrapDescriptor(newTableDesc)) if err := p.txn.Run(&b); err != nil { return convertBatchError(newTableDesc, b, err) } p.notifyCompletedSchemaChange(newTableDesc.ID) return nil }
// Future home of the aysynchronous schema changer that picks up // queued schema changes and processes them. // // applyMutations applies the queued mutations for a table. // TODO(vivek): Eliminate the need to pass in tableName. func (p *planner) applyMutations(tableDesc *TableDescriptor, tableName *parser.QualifiedName) error { if len(tableDesc.Mutations) == 0 { return nil } newTableDesc := proto.Clone(tableDesc).(*TableDescriptor) // Make all mutations active. for _, mutation := range newTableDesc.Mutations { newTableDesc.makeMutationComplete(mutation) } newTableDesc.Mutations = nil if err := newTableDesc.Validate(); err != nil { return err } b := client.Batch{} if err := p.backfillBatch(&b, tableName, tableDesc, newTableDesc); err != nil { return err } // TODO(pmattis): This is a hack. Remove when schema change operations work // properly. p.hackNoteSchemaChange(newTableDesc) b.Put(MakeDescMetadataKey(newTableDesc.GetID()), wrapDescriptor(newTableDesc)) if err := p.txn.Run(&b); err != nil { return convertBatchError(newTableDesc, b, err) } return nil }
// TestAuthentication tests authentication for the KV endpoint. func TestAuthentication(t *testing.T) { defer leaktest.AfterTest(t)() s := server.StartTestServer(t) defer s.Stop() var b1 client.Batch b1.Put("a", "b") // Create a node user client and call Run() on it which lets us build our own // request, specifying the user. db1 := createTestClientForUser(t, s.Stopper(), s.ServingAddr(), security.NodeUser) if err := db1.Run(&b1); err != nil { t.Fatal(err) } var b2 client.Batch b2.Put("c", "d") // Try again, but this time with certs for a non-node user (even the root // user has no KV permissions). db2 := createTestClientForUser(t, s.Stopper(), s.ServingAddr(), security.RootUser) if err := db2.Run(&b2); !testutils.IsError(err, "is not allowed") { t.Fatal(err) } }
func runPut(cmd *cobra.Command, args []string) { if len(args) == 0 || len(args)%2 == 1 { cmd.Usage() return } var b client.Batch for i := 0; i < len(args); i += 2 { b.Put( unquoteArg(args[i], true /* disallow system keys */), unquoteArg(args[i+1], false), ) } kvDB := makeDBClient() if kvDB == nil { return } if err := kvDB.Run(&b); err != nil { fmt.Fprintf(osStderr, "put failed: %s\n", err) osExit(1) return } }
// RenameDatabase renames the database. // Privileges: security.RootUser user. // Notes: postgres requires superuser, db owner, or "CREATEDB". // mysql >= 5.1.23 does not allow database renames. func (p *planner) RenameDatabase(n *parser.RenameDatabase) (planNode, error) { if n.Name == "" || n.NewName == "" { return nil, errEmptyDatabaseName } if p.session.User != security.RootUser { return nil, fmt.Errorf("only %s is allowed to rename databases", security.RootUser) } dbDesc, err := p.getDatabaseDesc(string(n.Name)) if err != nil { return nil, err } if dbDesc == nil { return nil, databaseDoesNotExistError(string(n.Name)) } if n.Name == n.NewName { // Noop. return &emptyNode{}, nil } // Now update the nameMetadataKey and the descriptor. descKey := sqlbase.MakeDescMetadataKey(dbDesc.GetID()) dbDesc.SetName(string(n.NewName)) if err := dbDesc.Validate(); err != nil { return nil, err } newKey := databaseKey{string(n.NewName)}.Key() oldKey := databaseKey{string(n.Name)}.Key() descID := dbDesc.GetID() descDesc := sqlbase.WrapDescriptor(dbDesc) b := client.Batch{} b.CPut(newKey, descID, nil) b.Put(descKey, descDesc) b.Del(oldKey) if err := p.txn.Run(&b); err != nil { if _, ok := err.(*roachpb.ConditionFailedError); ok { return nil, fmt.Errorf("the new database name %q already exists", string(n.NewName)) } return nil, err } p.setTestingVerifyMetadata(func(systemConfig config.SystemConfig) error { if err := expectDescriptorID(systemConfig, newKey, descID); err != nil { return err } if err := expectDescriptor(systemConfig, descKey, descDesc); err != nil { return err } return expectDeleted(systemConfig, oldKey) }) return &emptyNode{}, nil }
// flush writes all dirty nodes and the tree to the transaction. func (tc *treeContext) flush(b *client.Batch) error { if tc.dirty { b.Put(keys.RangeTreeRoot, tc.tree) } for _, cachedNode := range tc.nodes { if cachedNode.dirty { b.Put(keys.RangeTreeNodeKey(cachedNode.node.Key), cachedNode.node) } } return nil }
// CreateIndex creates an index. // Privileges: CREATE on table. // notes: postgres requires CREATE on the table. // mysql requires INDEX on the table. func (p *planner) CreateIndex(n *parser.CreateIndex) (planNode, error) { tableDesc, err := p.getTableDesc(n.Table) if err != nil { return nil, err } if _, err := tableDesc.FindIndexByName(string(n.Name)); err == nil { if n.IfNotExists { // Noop. return &valuesNode{}, nil } return nil, fmt.Errorf("index %q already exists", string(n.Name)) } if err := p.checkPrivilege(tableDesc, privilege.CREATE); err != nil { return nil, err } indexDesc := IndexDescriptor{ Name: string(n.Name), Unique: n.Unique, ColumnNames: n.Columns, StoreColumnNames: n.Storing, } newTableDesc := proto.Clone(tableDesc).(*TableDescriptor) if err := newTableDesc.AddIndex(indexDesc, false); err != nil { return nil, err } if err := newTableDesc.AllocateIDs(); err != nil { return nil, err } b := client.Batch{} if err := p.backfillBatch(&b, n.Table, tableDesc, newTableDesc); err != nil { return nil, err } // TODO(pmattis): This is a hack. Remove when schema change operations work // properly. p.hackNoteSchemaChange(newTableDesc) b.Put(MakeDescMetadataKey(newTableDesc.GetID()), wrapDescriptor(newTableDesc)) if err := p.txn.Run(&b); err != nil { return nil, convertBatchError(newTableDesc, b, err) } return &valuesNode{}, nil }
// flush writes all dirty nodes and the tree to the transaction. func (tc *treeContext) flush(b *client.Batch) { if tc.dirty { b.Put(keys.RangeTreeRoot, tc.tree) } for key, cachedNode := range tc.nodes { if cachedNode.dirty { if cachedNode.node == nil { b.Del(keys.RangeTreeNodeKey(roachpb.RKey(key))) } else { b.Put(keys.RangeTreeNodeKey(roachpb.RKey(key)), cachedNode.node) } } } }
// DropIndex drops an index. // Privileges: CREATE on table. // Notes: postgres allows only the index owner to DROP an index. // mysql requires the INDEX privilege on the table. func (p *planner) DropIndex(n *parser.DropIndex) (planNode, error) { b := client.Batch{} for _, indexQualifiedName := range n.Names { if err := indexQualifiedName.NormalizeTableName(p.session.Database); err != nil { return nil, err } tableDesc, err := p.getTableDesc(indexQualifiedName) if err != nil { return nil, err } if err := p.checkPrivilege(tableDesc, privilege.CREATE); err != nil { return nil, err } newTableDesc := proto.Clone(tableDesc).(*TableDescriptor) idxName := indexQualifiedName.Index() i, err := newTableDesc.FindIndexByName(idxName) if err != nil { if n.IfExists { // Noop. return &valuesNode{}, nil } // Index does not exist, but we want it to: error out. return nil, err } newTableDesc.Indexes = append(newTableDesc.Indexes[:i], newTableDesc.Indexes[i+1:]...) if err := p.backfillBatch(&b, indexQualifiedName, tableDesc, newTableDesc); err != nil { return nil, err } if err := newTableDesc.Validate(); err != nil { return nil, err } descKey := MakeDescMetadataKey(newTableDesc.GetID()) b.Put(descKey, wrapDescriptor(newTableDesc)) } if err := p.txn.Run(&b); err != nil { return nil, err } return &valuesNode{}, nil }
// RenameDatabase renames the database. // Privileges: "root" user. // Notes: postgres requires superuser, db owner, or "CREATEDB". // mysql >= 5.1.23 does not allow database renames. func (p *planner) RenameDatabase(n *parser.RenameDatabase) (planNode, error) { if n.Name == "" || n.NewName == "" { return nil, errEmptyDatabaseName } if p.user != security.RootUser { return nil, fmt.Errorf("only %s is allowed to rename databases", security.RootUser) } dbDesc, err := p.getDatabaseDesc(string(n.Name)) if err != nil { return nil, err } if n.Name == n.NewName { // Noop. return &valuesNode{}, nil } // Now update the nameMetadataKey and the descriptor. descKey := MakeDescMetadataKey(dbDesc.GetID()) dbDesc.SetName(string(n.NewName)) if err := dbDesc.Validate(); err != nil { return nil, err } b := client.Batch{} b.CPut(databaseKey{string(n.NewName)}.Key(), dbDesc.GetID(), nil) b.Put(descKey, dbDesc) b.Del(databaseKey{string(n.Name)}.Key()) // Mark transaction as operating on the system DB. p.txn.SetSystemDBTrigger() if err := p.txn.Run(&b); err != nil { if _, ok := err.(*proto.ConditionFailedError); ok { return nil, fmt.Errorf("the new database name %q already exists", string(n.NewName)) } return nil, err } return &valuesNode{}, nil }
func runPut(cmd *cobra.Command, args []string) { if len(args) == 0 || len(args)%2 == 1 { mustUsage(cmd) return } var b client.Batch for i := 0; i < len(args); i += 2 { b.Put( unquoteArg(args[i], true /* disallow system keys */), unquoteArg(args[i+1], false), ) } kvDB, stopper := makeDBClient() defer stopper.Stop() if err := kvDB.Run(&b); err != nil { panicf("put failed: %s", err) } }
// Update updates columns for a selection of rows from a table. // Privileges: UPDATE and SELECT on table. We currently always use a select statement. // Notes: postgres requires UPDATE. Requires SELECT with WHERE clause with table. // mysql requires UPDATE. Also requires SELECT with WHERE clause with table. func (p *planner) Update(n *parser.Update) (planNode, error) { tableDesc, err := p.getAliasedTableDesc(n.Table) if err != nil { return nil, err } if err := p.checkPrivilege(tableDesc, privilege.UPDATE); err != nil { return nil, err } // Determine which columns we're inserting into. var names parser.QualifiedNames for _, expr := range n.Exprs { names = append(names, expr.Name) } cols, err := p.processColumns(tableDesc, names) if err != nil { return nil, err } // Set of columns being updated colIDSet := map[ColumnID]struct{}{} for _, c := range cols { colIDSet[c.ID] = struct{}{} } // Don't allow updating any column that is part of the primary key. for i, id := range tableDesc.PrimaryIndex.ColumnIDs { if _, ok := colIDSet[id]; ok { return nil, fmt.Errorf("primary key column %q cannot be updated", tableDesc.PrimaryIndex.ColumnNames[i]) } } // Generate the list of select targets. We need to select all of the columns // plus we select all of the update expressions in case those expressions // reference columns (e.g. "UPDATE t SET v = v + 1"). targets := make(parser.SelectExprs, 0, len(n.Exprs)+1) targets = append(targets, parser.StarSelectExpr()) for _, expr := range n.Exprs { targets = append(targets, parser.SelectExpr{Expr: expr.Expr}) } // Query the rows that need updating. rows, err := p.Select(&parser.Select{ Exprs: targets, From: parser.TableExprs{n.Table}, Where: n.Where, }) if err != nil { return nil, err } // Construct a map from column ID to the index the value appears at within a // row. colIDtoRowIndex := map[ColumnID]int{} for i, col := range tableDesc.Columns { colIDtoRowIndex[col.ID] = i } primaryIndex := tableDesc.PrimaryIndex primaryIndexKeyPrefix := MakeIndexKeyPrefix(tableDesc.ID, primaryIndex.ID) // Secondary indexes needing updating. var indexes []IndexDescriptor for _, index := range tableDesc.Indexes { for _, id := range index.ColumnIDs { if _, ok := colIDSet[id]; ok { indexes = append(indexes, index) break } } } // Update all the rows. var b client.Batch for rows.Next() { rowVals := rows.Values() primaryIndexKey, _, err := encodeIndexKey( primaryIndex.ColumnIDs, colIDtoRowIndex, rowVals, primaryIndexKeyPrefix) if err != nil { return nil, err } // Compute the current secondary index key:value pairs for this row. secondaryIndexEntries, err := encodeSecondaryIndexes( tableDesc.ID, indexes, colIDtoRowIndex, rowVals) if err != nil { return nil, err } // Our updated value expressions occur immediately after the plain // columns in the output. newVals := rowVals[len(tableDesc.Columns):] // Update the row values. for i, col := range cols { val := newVals[i] if !col.Nullable && val == parser.DNull { return nil, fmt.Errorf("null value in column %q violates not-null constraint", col.Name) } rowVals[colIDtoRowIndex[col.ID]] = val } // Compute the new secondary index key:value pairs for this row. newSecondaryIndexEntries, err := encodeSecondaryIndexes( tableDesc.ID, indexes, colIDtoRowIndex, rowVals) if err != nil { return nil, err } // Update secondary indexes. for i, newSecondaryIndexEntry := range newSecondaryIndexEntries { secondaryIndexEntry := secondaryIndexEntries[i] if !bytes.Equal(newSecondaryIndexEntry.key, secondaryIndexEntry.key) { if log.V(2) { log.Infof("CPut %q -> %v", newSecondaryIndexEntry.key, newSecondaryIndexEntry.value) } b.CPut(newSecondaryIndexEntry.key, newSecondaryIndexEntry.value, nil) if log.V(2) { log.Infof("Del %q", secondaryIndexEntry.key) } b.Del(secondaryIndexEntry.key) } } // Add the new values. for i, val := range newVals { col := cols[i] primitive, err := convertDatum(col, val) if err != nil { return nil, err } key := MakeColumnKey(col.ID, primaryIndexKey) if primitive != nil { // We only output non-NULL values. Non-existent column keys are // considered NULL during scanning and the row sentinel ensures we know // the row exists. if log.V(2) { log.Infof("Put %q -> %v", key, val) } b.Put(key, primitive) } else { // The column might have already existed but is being set to NULL, so // delete it. if log.V(2) { log.Infof("Del %q", key) } b.Del(key) } } } if err := rows.Err(); err != nil { return nil, err } if err := p.txn.Run(&b); err != nil { return nil, convertBatchError(tableDesc, b, err) } // TODO(tamird/pmattis): return the number of affected rows. return &valuesNode{}, nil }
// updateRow adds to the batch the kv operations necessary to update a table row // with the given values. // // The row corresponding to oldValues is updated with the ones in updateValues. // Note that updateValues only contains the ones that are changing. // // The return value is only good until the next call to UpdateRow. func (ru *rowUpdater) updateRow( b *client.Batch, oldValues []parser.Datum, updateValues []parser.Datum, ) ([]parser.Datum, error) { if len(oldValues) != len(ru.fetchCols) { return nil, util.Errorf("got %d values but expected %d", len(oldValues), len(ru.fetchCols)) } if len(updateValues) != len(ru.updateCols) { return nil, util.Errorf("got %d values but expected %d", len(updateValues), len(ru.updateCols)) } primaryIndexKey, secondaryIndexEntries, err := ru.helper.encodeIndexes(ru.fetchColIDtoRowIndex, oldValues) if err != nil { return nil, err } // Check that the new value types match the column types. This needs to // happen before index encoding because certain datum types (i.e. tuple) // cannot be used as index values. for i, val := range updateValues { if ru.marshalled[i], err = sqlbase.MarshalColumnValue(ru.updateCols[i], val); err != nil { return nil, err } } // Update the row values. copy(ru.newValues, oldValues) for i, updateCol := range ru.updateCols { ru.newValues[ru.fetchColIDtoRowIndex[updateCol.ID]] = updateValues[i] } newPrimaryIndexKey := primaryIndexKey rowPrimaryKeyChanged := false var newSecondaryIndexEntries []sqlbase.IndexEntry if ru.primaryKeyColChange { newPrimaryIndexKey, newSecondaryIndexEntries, err = ru.helper.encodeIndexes(ru.fetchColIDtoRowIndex, ru.newValues) if err != nil { return nil, err } rowPrimaryKeyChanged = !bytes.Equal(primaryIndexKey, newPrimaryIndexKey) } else { newSecondaryIndexEntries, err = sqlbase.EncodeSecondaryIndexes( ru.helper.tableDesc.ID, ru.helper.indexes, ru.fetchColIDtoRowIndex, ru.newValues) if err != nil { return nil, err } } if rowPrimaryKeyChanged { err := ru.rd.deleteRow(b, oldValues) if err != nil { return nil, err } err = ru.ri.insertRow(b, ru.newValues) return ru.newValues, err } // Update secondary indexes. for i, newSecondaryIndexEntry := range newSecondaryIndexEntries { secondaryIndexEntry := secondaryIndexEntries[i] secondaryKeyChanged := !bytes.Equal(newSecondaryIndexEntry.Key, secondaryIndexEntry.Key) if secondaryKeyChanged { if log.V(2) { log.Infof("Del %s", secondaryIndexEntry.Key) } b.Del(secondaryIndexEntry.Key) // Do not update Indexes in the DELETE_ONLY state. if _, ok := ru.deleteOnlyIndex[i]; !ok { if log.V(2) { log.Infof("CPut %s -> %v", newSecondaryIndexEntry.Key, newSecondaryIndexEntry.Value) } b.CPut(newSecondaryIndexEntry.Key, newSecondaryIndexEntry.Value, nil) } } } // Add the new values. for i, val := range updateValues { col := ru.updateCols[i] if ru.helper.columnInPK(col.ID) { // Skip primary key columns as their values are encoded in the row // sentinel key which is guaranteed to exist for as long as the row // exists. continue } ru.key = keys.MakeColumnKey(newPrimaryIndexKey, uint32(col.ID)) if ru.marshalled[i].RawBytes != nil { // We only output non-NULL values. Non-existent column keys are // considered NULL during scanning and the row sentinel ensures we know // the row exists. if log.V(2) { log.Infof("Put %s -> %v", ru.key, val) } b.Put(&ru.key, &ru.marshalled[i]) } else { // The column might have already existed but is being set to NULL, so // delete it. if log.V(2) { log.Infof("Del %s", ru.key) } b.Del(&ru.key) } ru.key = nil } return ru.newValues, nil }
// RenameTable renames the table. // Privileges: WRITE on database. // Notes: postgres requires the table owner. // mysql requires ALTER, DROP on the original table, and CREATE, INSERT // on the new table (and does not copy privileges over). func (p *planner) RenameTable(n *parser.RenameTable) (planNode, error) { if n.NewName == "" { return nil, errEmptyTableName } if err := n.Name.NormalizeTableName(p.session.Database); err != nil { return nil, err } if n.Name.Table() == string(n.NewName) { // Noop. return &valuesNode{}, nil } dbDesc, err := p.getDatabaseDesc(n.Name.Database()) if err != nil { return nil, err } tbKey := tableKey{dbDesc.ID, string(n.Name.Table())}.Key() // Check if table exists. gr, err := p.txn.Get(tbKey) if err != nil { return nil, err } if !gr.Exists() { if n.IfExists { // Noop. return &valuesNode{}, nil } // Key does not exist, but we want it to: error out. return nil, fmt.Errorf("table %q does not exist", n.Name.Table()) } if err := p.checkPrivilege(dbDesc, privilege.WRITE); err != nil { return nil, err } tableDesc, err := p.getTableDesc(n.Name) if err != nil { return nil, err } tableDesc.SetName(string(n.NewName)) newTbKey := tableKey{dbDesc.ID, string(n.NewName)}.Key() descKey := MakeDescMetadataKey(tableDesc.GetID()) b := client.Batch{} b.Put(descKey, tableDesc) b.CPut(newTbKey, descKey, nil) b.Del(tbKey) if err := p.txn.Run(&b); err != nil { if _, ok := err.(*proto.ConditionFailedError); ok { return nil, fmt.Errorf("table name %q already exists", n.NewName) } return nil, err } return &valuesNode{}, nil }
// Insert inserts rows into the database. func (p *planner) Insert(n *parser.Insert) (planNode, error) { desc, err := p.getTableDesc(n.Table) if err != nil { return nil, err } // Determine which columns we're inserting into. cols, err := p.processColumns(desc, n.Columns) if err != nil { return nil, err } // Construct a map from column ID to the index the value appears at within a // row. colMap := map[uint32]int{} for i, c := range cols { colMap[c.ID] = i } // Verify we have at least the columns that are part of the primary key. for i, id := range desc.Indexes[0].ColumnIDs { if _, ok := colMap[id]; !ok { return nil, fmt.Errorf("missing \"%s\" primary key column", desc.Indexes[0].ColumnNames[i]) } } // Transform the values into a rows object. This expands SELECT statements or // generates rows from the values contained within the query. rows, err := p.makePlan(n.Rows) if err != nil { return nil, err } b := client.Batch{} for rows.Next() { values := rows.Values() if len(values) != len(cols) { return nil, fmt.Errorf("invalid values for columns: %d != %d", len(values), len(cols)) } indexKey := encodeIndexKeyPrefix(desc.ID, desc.Indexes[0].ID) primaryKey, err := encodeIndexKey(desc.Indexes[0], colMap, values, indexKey) if err != nil { return nil, err } for i, val := range values { key := encodeColumnKey(cols[i], primaryKey) if log.V(2) { log.Infof("Put %q -> %v", key, val) } // TODO(pmattis): Need to convert the value type to the column type. switch t := val.(type) { case parser.DBool: b.Put(key, bool(t)) case parser.DInt: b.Put(key, int64(t)) case parser.DFloat: b.Put(key, float64(t)) case parser.DString: b.Put(key, string(t)) } } } if err := rows.Err(); err != nil { return nil, err } if err := p.db.Run(&b); err != nil { return nil, err } // TODO(tamird/pmattis): return the number of affected rows return &valuesNode{}, nil }
// AlterTable creates a table. // Privileges: CREATE on table. // notes: postgres requires CREATE on the table. // mysql requires ALTER, CREATE, INSERT on the table. func (p *planner) AlterTable(n *parser.AlterTable) (planNode, error) { if err := n.Table.NormalizeTableName(p.session.Database); err != nil { return nil, err } dbDesc, err := p.getDatabaseDesc(n.Table.Database()) if err != nil { return nil, err } // Check if table exists. tbKey := tableKey{dbDesc.ID, n.Table.Table()}.Key() gr, err := p.txn.Get(tbKey) if err != nil { return nil, err } if !gr.Exists() { if n.IfExists { // Noop. return &valuesNode{}, nil } // Key does not exist, but we want it to: error out. return nil, fmt.Errorf("table %q does not exist", n.Table.Table()) } tableDesc, err := p.getTableDesc(n.Table) if err != nil { return nil, err } if err := p.checkPrivilege(tableDesc, privilege.CREATE); err != nil { return nil, err } newTableDesc := proto.Clone(tableDesc).(*TableDescriptor) b := client.Batch{} for _, cmd := range n.Cmds { switch t := cmd.(type) { case *parser.AlterTableAddColumn: d := t.ColumnDef col, idx, err := makeColumnDefDescs(d) if err != nil { return nil, err } newTableDesc.AddColumn(*col) if idx != nil { if err := newTableDesc.AddIndex(*idx, d.PrimaryKey); err != nil { return nil, err } } case *parser.AlterTableAddConstraint: switch d := t.ConstraintDef.(type) { case *parser.UniqueConstraintTableDef: idx := IndexDescriptor{ Name: string(d.Name), Unique: true, ColumnNames: d.Columns, StoreColumnNames: d.Storing, } if err := newTableDesc.AddIndex(idx, d.PrimaryKey); err != nil { return nil, err } default: return nil, util.Errorf("unsupported constraint: %T", t.ConstraintDef) } case *parser.AlterTableDropColumn: i, err := newTableDesc.FindColumnByName(t.Column) if err != nil { if t.IfExists { // Noop. continue } return nil, err } col := newTableDesc.Columns[i] if newTableDesc.PrimaryIndex.containsColumnID(col.ID) { return nil, fmt.Errorf("column %q is referenced by the primary key", col.Name) } for _, idx := range newTableDesc.Indexes { if idx.containsColumnID(col.ID) { return nil, fmt.Errorf("column %q is referenced by existing index %q", col.Name, idx.Name) } } newTableDesc.Columns = append(newTableDesc.Columns[:i], newTableDesc.Columns[i+1:]...) case *parser.AlterTableDropConstraint: i, err := newTableDesc.FindIndexByName(t.Constraint) if err != nil { if t.IfExists { // Noop. continue } return nil, err } newTableDesc.Indexes = append(newTableDesc.Indexes[:i], newTableDesc.Indexes[i+1:]...) default: return nil, util.Errorf("unsupported alter cmd: %T", cmd) } } if err := newTableDesc.AllocateIDs(); err != nil { return nil, err } if err := p.backfillBatch(&b, n.Table, tableDesc, newTableDesc); err != nil { return nil, err } // TODO(pmattis): This is a hack. Remove when schema change operations work // properly. p.hackNoteSchemaChange(newTableDesc) b.Put(MakeDescMetadataKey(newTableDesc.GetID()), wrapDescriptor(newTableDesc)) if err := p.txn.Run(&b); err != nil { return nil, convertBatchError(newTableDesc, b, err) } return &valuesNode{}, nil }
// RenameTable renames the table. // Privileges: DROP on source table, CREATE on destination database. // Notes: postgres requires the table owner. // mysql requires ALTER, DROP on the original table, and CREATE, INSERT // on the new table (and does not copy privileges over). func (p *planner) RenameTable(n *parser.RenameTable) (planNode, error) { if err := n.NewName.NormalizeTableName(p.session.Database); err != nil { return nil, err } if n.NewName.Table() == "" { return nil, errEmptyTableName } if err := n.Name.NormalizeTableName(p.session.Database); err != nil { return nil, err } dbDesc, err := p.getDatabaseDesc(n.Name.Database()) if err != nil { return nil, err } if dbDesc == nil { return nil, sqlbase.NewUndefinedDatabaseError(n.Name.Database()) } tbKey := tableKey{dbDesc.ID, n.Name.Table()}.Key() // Check if table exists. gr, err := p.txn.Get(tbKey) if err != nil { return nil, err } if !gr.Exists() { if n.IfExists { // Noop. return &emptyNode{}, nil } // Key does not exist, but we want it to: error out. return nil, fmt.Errorf("table %q does not exist", n.Name.Table()) } targetDbDesc, err := p.getDatabaseDesc(n.NewName.Database()) if err != nil { return nil, err } if targetDbDesc == nil { return nil, sqlbase.NewUndefinedDatabaseError(n.NewName.Database()) } if err := p.checkPrivilege(targetDbDesc, privilege.CREATE); err != nil { return nil, err } if n.Name.Database() == n.NewName.Database() && n.Name.Table() == n.NewName.Table() { // Noop. return &emptyNode{}, nil } tableDesc, err := p.getTableDesc(n.Name) if err != nil { return nil, err } if tableDesc == nil || tableDesc.State != sqlbase.TableDescriptor_PUBLIC { return nil, sqlbase.NewUndefinedTableError(n.Name.String()) } if err := p.checkPrivilege(tableDesc, privilege.DROP); err != nil { return nil, err } tableDesc.SetName(n.NewName.Table()) tableDesc.ParentID = targetDbDesc.ID descKey := sqlbase.MakeDescMetadataKey(tableDesc.GetID()) newTbKey := tableKey{targetDbDesc.ID, n.NewName.Table()}.Key() if err := tableDesc.Validate(); err != nil { return nil, err } descID := tableDesc.GetID() descDesc := sqlbase.WrapDescriptor(tableDesc) if err := tableDesc.SetUpVersion(); err != nil { return nil, err } renameDetails := sqlbase.TableDescriptor_RenameInfo{ OldParentID: uint32(dbDesc.ID), OldName: n.Name.Table()} tableDesc.Renames = append(tableDesc.Renames, renameDetails) if err := p.writeTableDesc(tableDesc); err != nil { return nil, err } // We update the descriptor to the new name, but also leave the mapping of the // old name to the id, so that the name is not reused until the schema changer // has made sure it's not in use any more. b := client.Batch{} b.Put(descKey, descDesc) b.CPut(newTbKey, descID, nil) if err := p.txn.Run(&b); err != nil { if _, ok := err.(*roachpb.ConditionFailedError); ok { return nil, fmt.Errorf("table name %q already exists", n.NewName.Table()) } return nil, err } p.notifySchemaChange(tableDesc.ID, sqlbase.InvalidMutationID) p.setTestingVerifyMetadata(func(systemConfig config.SystemConfig) error { if err := expectDescriptorID(systemConfig, newTbKey, descID); err != nil { return err } if err := expectDescriptor(systemConfig, descKey, descDesc); err != nil { return err } return nil }) return &emptyNode{}, nil }
// insertPutFn is used by insertRow when conflicts should be ignored. // logValue is used for pretty printing. func insertPutFn(b *client.Batch, key *roachpb.Key, value interface{}, logValue interface{}) { if log.V(2) { log.InfofDepth(1, "Put %s -> %v", *key, logValue) } b.Put(key, value) }
// RenameTable renames the table. // Privileges: DROP on source table, CREATE on destination database. // Notes: postgres requires the table owner. // mysql requires ALTER, DROP on the original table, and CREATE, INSERT // on the new table (and does not copy privileges over). func (p *planner) RenameTable(n *parser.RenameTable) (planNode, *roachpb.Error) { if err := n.NewName.NormalizeTableName(p.session.Database); err != nil { return nil, roachpb.NewError(err) } if n.NewName.Table() == "" { return nil, roachpb.NewError(errEmptyTableName) } if err := n.Name.NormalizeTableName(p.session.Database); err != nil { return nil, roachpb.NewError(err) } dbDesc, pErr := p.getDatabaseDesc(n.Name.Database()) if pErr != nil { return nil, pErr } tbKey := tableKey{dbDesc.ID, n.Name.Table()}.Key() // Check if table exists. gr, pErr := p.txn.Get(tbKey) if pErr != nil { return nil, pErr } if !gr.Exists() { if n.IfExists { // Noop. return &valuesNode{}, nil } // Key does not exist, but we want it to: error out. return nil, roachpb.NewUErrorf("table %q does not exist", n.Name.Table()) } targetDbDesc, pErr := p.getDatabaseDesc(n.NewName.Database()) if pErr != nil { return nil, pErr } if pErr := p.checkPrivilege(targetDbDesc, privilege.CREATE); pErr != nil { return nil, pErr } if n.Name.Database() == n.NewName.Database() && n.Name.Table() == n.NewName.Table() { // Noop. return &valuesNode{}, nil } tableDesc, pErr := p.getTableDesc(n.Name) if pErr != nil { return nil, pErr } if pErr := p.checkPrivilege(tableDesc, privilege.DROP); pErr != nil { return nil, pErr } tableDesc.SetName(n.NewName.Table()) tableDesc.ParentID = targetDbDesc.ID descKey := MakeDescMetadataKey(tableDesc.GetID()) newTbKey := tableKey{targetDbDesc.ID, n.NewName.Table()}.Key() if err := tableDesc.Validate(); err != nil { return nil, roachpb.NewError(err) } descID := tableDesc.GetID() descDesc := wrapDescriptor(tableDesc) b := client.Batch{} b.Put(descKey, descDesc) b.CPut(newTbKey, descID, nil) b.Del(tbKey) if pErr := p.txn.Run(&b); pErr != nil { if _, ok := pErr.GoError().(*roachpb.ConditionFailedError); ok { return nil, roachpb.NewUErrorf("table name %q already exists", n.NewName.Table()) } return nil, pErr } p.testingVerifyMetadata = func(systemConfig config.SystemConfig) error { if err := expectDescriptorID(systemConfig, newTbKey, descID); err != nil { return err } if err := expectDescriptor(systemConfig, descKey, descDesc); err != nil { return err } return expectDeleted(systemConfig, tbKey) } return &valuesNode{}, nil }
func putMeta(b *client.Batch, key proto.Key, desc *proto.RangeDescriptor) { b.Put(key, desc) }
// Update updates columns for a selection of rows from a table. // Privileges: UPDATE and SELECT on table. We currently always use a select statement. // Notes: postgres requires UPDATE. Requires SELECT with WHERE clause with table. // mysql requires UPDATE. Also requires SELECT with WHERE clause with table. func (p *planner) Update(n *parser.Update) (planNode, *roachpb.Error) { tableDesc, pErr := p.getAliasedTableLease(n.Table) if pErr != nil { return nil, pErr } if err := p.checkPrivilege(tableDesc, privilege.UPDATE); err != nil { return nil, roachpb.NewError(err) } // Determine which columns we're inserting into. var names parser.QualifiedNames for _, expr := range n.Exprs { var epErr *roachpb.Error expr.Expr, epErr = p.expandSubqueries(expr.Expr, len(expr.Names)) if epErr != nil { return nil, epErr } if expr.Tuple { // TODO(pmattis): The distinction between Tuple and DTuple here is // irritating. We'll see a DTuple if the expression was a subquery that // has been evaluated. We'll see a Tuple in other cases. n := 0 switch t := expr.Expr.(type) { case parser.Tuple: n = len(t) case parser.DTuple: n = len(t) default: return nil, roachpb.NewErrorf("unsupported tuple assignment: %T", expr.Expr) } if len(expr.Names) != n { return nil, roachpb.NewUErrorf("number of columns (%d) does not match number of values (%d)", len(expr.Names), n) } } names = append(names, expr.Names...) } cols, err := p.processColumns(tableDesc, names) if err != nil { return nil, roachpb.NewError(err) } // Set of columns being updated colIDSet := map[ColumnID]struct{}{} for _, c := range cols { colIDSet[c.ID] = struct{}{} } // Don't allow updating any column that is part of the primary key. for i, id := range tableDesc.PrimaryIndex.ColumnIDs { if _, ok := colIDSet[id]; ok { return nil, roachpb.NewUErrorf("primary key column %q cannot be updated", tableDesc.PrimaryIndex.ColumnNames[i]) } } defaultExprs, err := p.makeDefaultExprs(cols) if err != nil { return nil, roachpb.NewError(err) } // Generate the list of select targets. We need to select all of the columns // plus we select all of the update expressions in case those expressions // reference columns (e.g. "UPDATE t SET v = v + 1"). Note that we flatten // expressions for tuple assignments just as we flattened the column names // above. So "UPDATE t SET (a, b) = (1, 2)" translates into select targets of // "*, 1, 2", not "*, (1, 2)". targets := tableDesc.allColumnsSelector() i := 0 for _, expr := range n.Exprs { if expr.Tuple { switch t := expr.Expr.(type) { case parser.Tuple: for _, e := range t { e = fillDefault(e, i, defaultExprs) targets = append(targets, parser.SelectExpr{Expr: e}) i++ } case parser.DTuple: for _, e := range t { targets = append(targets, parser.SelectExpr{Expr: e}) i++ } } } else { e := fillDefault(expr.Expr, i, defaultExprs) targets = append(targets, parser.SelectExpr{Expr: e}) i++ } } // Query the rows that need updating. rows, pErr := p.Select(&parser.Select{ Exprs: targets, From: parser.TableExprs{n.Table}, Where: n.Where, }) if pErr != nil { return nil, pErr } // ValArgs have their types populated in the above Select if they are part // of an expression ("SET a = 2 + $1") in the type check step where those // types are inferred. For the simpler case ("SET a = $1"), populate them // using marshalColumnValue. This step also verifies that the expression // types match the column types. if p.prepareOnly { i := 0 f := func(expr parser.Expr) *roachpb.Error { idx := i i++ // DefaultVal doesn't implement TypeCheck if _, ok := expr.(parser.DefaultVal); ok { return nil } d, err := expr.TypeCheck(p.evalCtx.Args) if err != nil { return roachpb.NewError(err) } if _, err := marshalColumnValue(cols[idx], d, p.evalCtx.Args); err != nil { return roachpb.NewError(err) } return nil } for _, expr := range n.Exprs { if expr.Tuple { switch t := expr.Expr.(type) { case parser.Tuple: for _, e := range t { if err := f(e); err != nil { return nil, err } } case parser.DTuple: for _, e := range t { if err := f(e); err != nil { return nil, err } } } } else { if err := f(expr.Expr); err != nil { return nil, err } } } return nil, nil } // Construct a map from column ID to the index the value appears at within a // row. colIDtoRowIndex := map[ColumnID]int{} for i, col := range tableDesc.Columns { colIDtoRowIndex[col.ID] = i } primaryIndex := tableDesc.PrimaryIndex primaryIndexKeyPrefix := MakeIndexKeyPrefix(tableDesc.ID, primaryIndex.ID) // Secondary indexes needing updating. needsUpdate := func(index IndexDescriptor) bool { for _, id := range index.ColumnIDs { if _, ok := colIDSet[id]; ok { return true } } return false } indexes := make([]IndexDescriptor, 0, len(tableDesc.Indexes)+len(tableDesc.Mutations)) var deleteOnlyIndex map[int]struct{} for _, index := range tableDesc.Indexes { if needsUpdate(index) { indexes = append(indexes, index) } } for _, m := range tableDesc.Mutations { if index := m.GetIndex(); index != nil { if needsUpdate(*index) { indexes = append(indexes, *index) switch m.State { case DescriptorMutation_DELETE_ONLY: if deleteOnlyIndex == nil { // Allocate at most once. deleteOnlyIndex = make(map[int]struct{}, len(tableDesc.Mutations)) } deleteOnlyIndex[len(indexes)-1] = struct{}{} case DescriptorMutation_WRITE_ONLY: } } } } marshalled := make([]interface{}, len(cols)) b := client.Batch{} result := &valuesNode{} for rows.Next() { rowVals := rows.Values() result.rows = append(result.rows, parser.DTuple(nil)) primaryIndexKey, _, err := encodeIndexKey( &primaryIndex, colIDtoRowIndex, rowVals, primaryIndexKeyPrefix) if err != nil { return nil, roachpb.NewError(err) } // Compute the current secondary index key:value pairs for this row. secondaryIndexEntries, err := encodeSecondaryIndexes( tableDesc.ID, indexes, colIDtoRowIndex, rowVals) if err != nil { return nil, roachpb.NewError(err) } // Our updated value expressions occur immediately after the plain // columns in the output. newVals := rowVals[len(tableDesc.Columns):] // Update the row values. for i, col := range cols { val := newVals[i] if !col.Nullable && val == parser.DNull { return nil, roachpb.NewUErrorf("null value in column %q violates not-null constraint", col.Name) } rowVals[colIDtoRowIndex[col.ID]] = val } // Check that the new value types match the column types. This needs to // happen before index encoding because certain datum types (i.e. tuple) // cannot be used as index values. for i, val := range newVals { var mErr error if marshalled[i], mErr = marshalColumnValue(cols[i], val, p.evalCtx.Args); mErr != nil { return nil, roachpb.NewError(mErr) } } // Compute the new secondary index key:value pairs for this row. newSecondaryIndexEntries, eErr := encodeSecondaryIndexes( tableDesc.ID, indexes, colIDtoRowIndex, rowVals) if eErr != nil { return nil, roachpb.NewError(eErr) } // Update secondary indexes. for i, newSecondaryIndexEntry := range newSecondaryIndexEntries { secondaryIndexEntry := secondaryIndexEntries[i] if !bytes.Equal(newSecondaryIndexEntry.key, secondaryIndexEntry.key) { // Do not update Indexes in the DELETE_ONLY state. if _, ok := deleteOnlyIndex[i]; !ok { if log.V(2) { log.Infof("CPut %s -> %v", newSecondaryIndexEntry.key, newSecondaryIndexEntry.value) } b.CPut(newSecondaryIndexEntry.key, newSecondaryIndexEntry.value, nil) } if log.V(2) { log.Infof("Del %s", secondaryIndexEntry.key) } b.Del(secondaryIndexEntry.key) } } // Add the new values. for i, val := range newVals { col := cols[i] key := keys.MakeColumnKey(primaryIndexKey, uint32(col.ID)) if marshalled[i] != nil { // We only output non-NULL values. Non-existent column keys are // considered NULL during scanning and the row sentinel ensures we know // the row exists. if log.V(2) { log.Infof("Put %s -> %v", key, val) } b.Put(key, marshalled[i]) } else { // The column might have already existed but is being set to NULL, so // delete it. if log.V(2) { log.Infof("Del %s", key) } b.Del(key) } } } if pErr := rows.PErr(); pErr != nil { return nil, pErr } if pErr := p.txn.Run(&b); pErr != nil { return nil, convertBatchError(tableDesc, b, pErr) } return result, nil }
// CreateIndex creates an index. // Privileges: CREATE on table. // notes: postgres requires CREATE on the table. // mysql requires ALTER, CREATE, INSERT on the table. func (p *planner) CreateIndex(n *parser.CreateIndex) (planNode, error) { tableDesc, err := p.getTableDesc(n.Table) if err != nil { return nil, err } if _, err := tableDesc.FindIndexByName(string(n.Name)); err == nil { if n.IfNotExists { // Noop. return &valuesNode{}, nil } return nil, fmt.Errorf("index %q already exists", string(n.Name)) } if err := p.checkPrivilege(tableDesc, privilege.CREATE); err != nil { return nil, err } index := IndexDescriptor{ Name: string(n.Name), Unique: n.Unique, ColumnNames: n.Columns, } tableDesc.Indexes = append(tableDesc.Indexes, index) if err := tableDesc.AllocateIDs(); err != nil { return nil, err } // `index` changed on us when we called `tableDesc.AllocateIDs()`. index = tableDesc.Indexes[len(tableDesc.Indexes)-1] // Get all the rows affected. // TODO(vivek): Avoid going through Select. // TODO(tamird): Support partial indexes? row, err := p.Select(&parser.Select{ Exprs: parser.SelectExprs{parser.StarSelectExpr()}, From: parser.TableExprs{&parser.AliasedTableExpr{Expr: n.Table}}, }) if err != nil { return nil, err } // Construct a map from column ID to the index the value appears at within a // row. colIDtoRowIndex := map[ColumnID]int{} for i, name := range row.Columns() { c, err := tableDesc.FindColumnByName(name) if err != nil { return nil, err } colIDtoRowIndex[c.ID] = i } // TODO(tamird): This will fall down in production use. We need to do // something better (see #2036). In particular, this implementation // has the following problems: // - Very large tables will generate an enormous batch here. This // isn't really a problem in itself except that it will exacerbate // the other issue: // - Any non-quiescent table that this runs against will end up with // an inconsistent index. This is because as inserts/updates continue // to roll in behind this operation's read front, the written index // will become incomplete/stale before it's written. var b client.Batch b.Put(MakeDescMetadataKey(tableDesc.GetID()), tableDesc) for row.Next() { rowVals := row.Values() secondaryIndexEntries, err := encodeSecondaryIndexes( tableDesc.ID, []IndexDescriptor{index}, colIDtoRowIndex, rowVals) if err != nil { return nil, err } for _, secondaryIndexEntry := range secondaryIndexEntries { if log.V(2) { log.Infof("CPut %q -> %v", secondaryIndexEntry.key, secondaryIndexEntry.value) } b.CPut(secondaryIndexEntry.key, secondaryIndexEntry.value, nil) } } if err := row.Err(); err != nil { return nil, err } if err := p.txn.Run(&b); err != nil { if tErr, ok := err.(*proto.ConditionFailedError); ok { return nil, fmt.Errorf("duplicate key value %q violates unique constraint %s", tErr.ActualValue.Bytes, "TODO(tamird)") } return nil, err } return &valuesNode{}, nil }
// Update updates columns for a selection of rows from a table. // Privileges: WRITE and READ on table. We currently always use a select statement. // Notes: postgres requires UPDATE. Requires SELECT with WHERE clause with table. // mysql requires UPDATE. Also requires SELECT with WHERE clause with table. func (p *planner) Update(n *parser.Update) (planNode, error) { tableDesc, err := p.getAliasedTableDesc(n.Table) if err != nil { return nil, err } if !tableDesc.HasPrivilege(p.user, parser.PrivilegeWrite) { return nil, fmt.Errorf("user %s does not have %s privilege on table %s", p.user, parser.PrivilegeWrite, tableDesc.Name) } // Determine which columns we're inserting into. var names parser.QualifiedNames for _, expr := range n.Exprs { names = append(names, expr.Name) } cols, err := p.processColumns(tableDesc, names) if err != nil { return nil, err } // Set of columns being updated colIDSet := map[structured.ColumnID]struct{}{} for _, c := range cols { colIDSet[c.ID] = struct{}{} } // Don't allow updating any column that is part of the primary key. for i, id := range tableDesc.PrimaryIndex.ColumnIDs { if _, ok := colIDSet[id]; ok { return nil, fmt.Errorf("primary key column %q cannot be updated", tableDesc.PrimaryIndex.ColumnNames[i]) } } // Query the rows that need updating. // TODO(vivek): Avoid going through Select. row, err := p.Select(&parser.Select{ Exprs: parser.SelectExprs{parser.StarSelectExpr}, From: parser.TableExprs{n.Table}, Where: n.Where, }) if err != nil { return nil, err } // Construct a map from column ID to the index the value appears at within a // row. colIDtoRowIndex := map[structured.ColumnID]int{} for i, name := range row.Columns() { c, err := tableDesc.FindColumnByName(name) if err != nil { return nil, err } colIDtoRowIndex[c.ID] = i } primaryIndex := tableDesc.PrimaryIndex primaryIndexKeyPrefix := structured.MakeIndexKeyPrefix(tableDesc.ID, primaryIndex.ID) // Evaluate all the column value expressions. vals := make([]parser.Datum, 0, 10) for _, expr := range n.Exprs { val, err := parser.EvalExpr(expr.Expr) if err != nil { return nil, err } vals = append(vals, val) } // Secondary indexes needing updating. var indexes []structured.IndexDescriptor for _, index := range tableDesc.Indexes { for _, id := range index.ColumnIDs { if _, ok := colIDSet[id]; ok { indexes = append(indexes, index) break } } } // Update all the rows. b := client.Batch{} for row.Next() { rowVals := row.Values() primaryIndexKeySuffix, _, err := encodeIndexKey(primaryIndex.ColumnIDs, colIDtoRowIndex, rowVals, nil) if err != nil { return nil, err } primaryIndexKey := bytes.Join([][]byte{primaryIndexKeyPrefix, primaryIndexKeySuffix}, nil) // Compute the current secondary index key:value pairs for this row. secondaryIndexEntries, err := encodeSecondaryIndexes(tableDesc.ID, indexes, colIDtoRowIndex, rowVals, primaryIndexKeySuffix) if err != nil { return nil, err } // Compute the new secondary index key:value pairs for this row. // // Update the row values. for i, col := range cols { val := vals[i] if !col.Nullable && val == parser.DNull { return nil, fmt.Errorf("null value in column %q violates not-null constraint", col.Name) } rowVals[colIDtoRowIndex[col.ID]] = val } newSecondaryIndexEntries, err := encodeSecondaryIndexes(tableDesc.ID, indexes, colIDtoRowIndex, rowVals, primaryIndexKeySuffix) if err != nil { return nil, err } // Update secondary indexes. for i, newSecondaryIndexEntry := range newSecondaryIndexEntries { secondaryIndexEntry := secondaryIndexEntries[i] if !bytes.Equal(newSecondaryIndexEntry.key, secondaryIndexEntry.key) { if log.V(2) { log.Infof("CPut %q -> %v", newSecondaryIndexEntry.key, newSecondaryIndexEntry.value) } b.CPut(newSecondaryIndexEntry.key, newSecondaryIndexEntry.value, nil) if log.V(2) { log.Infof("Del %q", secondaryIndexEntry.key) } b.Del(secondaryIndexEntry.key) } } // Add the new values. for i, val := range vals { col := cols[i] primitive, err := convertDatum(col, val) if err != nil { return nil, err } key := structured.MakeColumnKey(col.ID, primaryIndexKey) if primitive != nil { // We only output non-NULL values. Non-existent column keys are // considered NULL during scanning and the row sentinel ensures we know // the row exists. if log.V(2) { log.Infof("Put %q -> %v", key, val) } b.Put(key, primitive) } else { // The column might have already existed but is being set to NULL, so // delete it. if log.V(2) { log.Infof("Del %q", key) } b.Del(key) } } } if err := row.Err(); err != nil { return nil, err } if err := p.txn.Run(&b); err != nil { if tErr, ok := err.(*proto.ConditionFailedError); ok { return nil, fmt.Errorf("duplicate key value %q violates unique constraint %s", tErr.ActualValue.Bytes, "TODO(tamird)") } return nil, err } // TODO(tamird/pmattis): return the number of affected rows. return &valuesNode{}, nil }
// Update updates columns for a selection of rows from a table. // Privileges: UPDATE and SELECT on table. We currently always use a select statement. // Notes: postgres requires UPDATE. Requires SELECT with WHERE clause with table. // mysql requires UPDATE. Also requires SELECT with WHERE clause with table. func (p *planner) Update(n *parser.Update) (planNode, error) { tableDesc, err := p.getAliasedTableDesc(n.Table) if err != nil { return nil, err } if err := p.checkPrivilege(tableDesc, privilege.UPDATE); err != nil { return nil, err } // Determine which columns we're inserting into. var names parser.QualifiedNames for _, expr := range n.Exprs { var err error expr.Expr, err = p.expandSubqueries(expr.Expr, len(expr.Names)) if err != nil { return nil, err } if expr.Tuple { // TODO(pmattis): The distinction between Tuple and DTuple here is // irritating. We'll see a DTuple if the expression was a subquery that // has been evaluated. We'll see a Tuple in other cases. n := 0 switch t := expr.Expr.(type) { case parser.Tuple: n = len(t) case parser.DTuple: n = len(t) default: return nil, util.Errorf("unsupported tuple assignment: %T", expr.Expr) } if len(expr.Names) != n { return nil, fmt.Errorf("number of columns (%d) does not match number of values (%d)", len(expr.Names), n) } } names = append(names, expr.Names...) } cols, err := p.processColumns(tableDesc, names) if err != nil { return nil, err } // Set of columns being updated colIDSet := map[ColumnID]struct{}{} for _, c := range cols { colIDSet[c.ID] = struct{}{} } // Don't allow updating any column that is part of the primary key. for i, id := range tableDesc.PrimaryIndex.ColumnIDs { if _, ok := colIDSet[id]; ok { return nil, fmt.Errorf("primary key column %q cannot be updated", tableDesc.PrimaryIndex.ColumnNames[i]) } } defaultExprs, err := p.makeDefaultExprs(cols) if err != nil { return nil, err } // Generate the list of select targets. We need to select all of the columns // plus we select all of the update expressions in case those expressions // reference columns (e.g. "UPDATE t SET v = v + 1"). Note that we flatten // expressions for tuple assignments just as we flattened the column names // above. So "UPDATE t SET (a, b) = (1, 2)" translates into select targets of // "*, 1, 2", not "*, (1, 2)". targets := make(parser.SelectExprs, 0, len(n.Exprs)+1) targets = append(targets, parser.StarSelectExpr()) for _, expr := range n.Exprs { if expr.Tuple { switch t := expr.Expr.(type) { case parser.Tuple: for i, e := range t { e, err := fillDefault(e, i, defaultExprs) if err != nil { return nil, err } targets = append(targets, parser.SelectExpr{Expr: e}) } case parser.DTuple: for _, e := range t { targets = append(targets, parser.SelectExpr{Expr: e}) } } } else { e, err := fillDefault(expr.Expr, 0, defaultExprs) if err != nil { return nil, err } targets = append(targets, parser.SelectExpr{Expr: e}) } } // Query the rows that need updating. rows, err := p.Select(&parser.Select{ Exprs: targets, From: parser.TableExprs{n.Table}, Where: n.Where, }) if err != nil { return nil, err } // Construct a map from column ID to the index the value appears at within a // row. colIDtoRowIndex := map[ColumnID]int{} for i, col := range tableDesc.Columns { colIDtoRowIndex[col.ID] = i } primaryIndex := tableDesc.PrimaryIndex primaryIndexKeyPrefix := MakeIndexKeyPrefix(tableDesc.ID, primaryIndex.ID) // Secondary indexes needing updating. var indexes []IndexDescriptor for _, index := range tableDesc.Indexes { for _, id := range index.ColumnIDs { if _, ok := colIDSet[id]; ok { indexes = append(indexes, index) break } } } marshalled := make([]interface{}, len(cols)) // Update all the rows. var b client.Batch for rows.Next() { rowVals := rows.Values() primaryIndexKey, _, err := encodeIndexKey( primaryIndex.ColumnIDs, colIDtoRowIndex, rowVals, primaryIndexKeyPrefix) if err != nil { return nil, err } // Compute the current secondary index key:value pairs for this row. secondaryIndexEntries, err := encodeSecondaryIndexes( tableDesc.ID, indexes, colIDtoRowIndex, rowVals) if err != nil { return nil, err } // Our updated value expressions occur immediately after the plain // columns in the output. newVals := rowVals[len(tableDesc.Columns):] // Update the row values. for i, col := range cols { val := newVals[i] if !col.Nullable && val == parser.DNull { return nil, fmt.Errorf("null value in column %q violates not-null constraint", col.Name) } rowVals[colIDtoRowIndex[col.ID]] = val } // Check that the new value types match the column types. This needs to // happen before index encoding because certain datum types (i.e. tuple) // cannot be used as index values. for i, val := range newVals { var err error if marshalled[i], err = marshalColumnValue(cols[i], val); err != nil { return nil, err } } // Compute the new secondary index key:value pairs for this row. newSecondaryIndexEntries, err := encodeSecondaryIndexes( tableDesc.ID, indexes, colIDtoRowIndex, rowVals) if err != nil { return nil, err } // Update secondary indexes. for i, newSecondaryIndexEntry := range newSecondaryIndexEntries { secondaryIndexEntry := secondaryIndexEntries[i] if !bytes.Equal(newSecondaryIndexEntry.key, secondaryIndexEntry.key) { if log.V(2) { log.Infof("CPut %q -> %v", newSecondaryIndexEntry.key, newSecondaryIndexEntry.value) } b.CPut(newSecondaryIndexEntry.key, newSecondaryIndexEntry.value, nil) if log.V(2) { log.Infof("Del %q", secondaryIndexEntry.key) } b.Del(secondaryIndexEntry.key) } } // Add the new values. for i, val := range newVals { col := cols[i] key := MakeColumnKey(col.ID, primaryIndexKey) if marshalled[i] != nil { // We only output non-NULL values. Non-existent column keys are // considered NULL during scanning and the row sentinel ensures we know // the row exists. if log.V(2) { log.Infof("Put %q -> %v", key, val) } b.Put(key, marshalled[i]) } else { // The column might have already existed but is being set to NULL, so // delete it. if log.V(2) { log.Infof("Del %q", key) } b.Del(key) } } } if err := rows.Err(); err != nil { return nil, err } if err := p.txn.Run(&b); err != nil { return nil, convertBatchError(tableDesc, b, err) } // TODO(tamird/pmattis): return the number of affected rows. return &valuesNode{}, nil }
// RenameTable renames the table. // Privileges: DROP on source table, CREATE on destination database. // Notes: postgres requires the table owner. // mysql requires ALTER, DROP on the original table, and CREATE, INSERT // on the new table (and does not copy privileges over). func (p *planner) RenameTable(n *parser.RenameTable) (planNode, error) { if err := n.NewName.NormalizeTableName(p.session.Database); err != nil { return nil, err } if n.NewName.Table() == "" { return nil, errEmptyTableName } if err := n.Name.NormalizeTableName(p.session.Database); err != nil { return nil, err } dbDesc, err := p.getDatabaseDesc(n.Name.Database()) if err != nil { return nil, err } tbKey := tableKey{dbDesc.ID, n.Name.Table()}.Key() // Check if table exists. gr, err := p.txn.Get(tbKey) if err != nil { return nil, err } if !gr.Exists() { if n.IfExists { // Noop. return &valuesNode{}, nil } // Key does not exist, but we want it to: error out. return nil, fmt.Errorf("table %q does not exist", n.Name.Table()) } targetDbDesc, err := p.getDatabaseDesc(n.NewName.Database()) if err != nil { return nil, err } if err := p.checkPrivilege(targetDbDesc, privilege.CREATE); err != nil { return nil, err } if n.Name.Database() == n.NewName.Database() && n.Name.Table() == n.NewName.Table() { // Noop. return &valuesNode{}, nil } tableDesc, err := p.getTableDesc(n.Name) if err != nil { return nil, err } if err := p.checkPrivilege(tableDesc, privilege.DROP); err != nil { return nil, err } tableDesc.SetName(n.NewName.Table()) tableDesc.ParentID = targetDbDesc.ID newTbKey := tableKey{targetDbDesc.ID, n.NewName.Table()}.Key() descKey := MakeDescMetadataKey(tableDesc.GetID()) if err := tableDesc.Validate(); err != nil { return nil, err } b := client.Batch{} b.Put(descKey, tableDesc) b.CPut(newTbKey, tableDesc.GetID(), nil) b.Del(tbKey) // Mark transaction as operating on the system DB. p.txn.SetSystemDBTrigger() if err := p.txn.Run(&b); err != nil { if _, ok := err.(*proto.ConditionFailedError); ok { return nil, fmt.Errorf("table name %q already exists", n.NewName.Table()) } return nil, err } return &valuesNode{}, nil }
func putMeta(b *client.Batch, key roachpb.Key, desc *roachpb.RangeDescriptor) { b.Put(key, desc) }