func (u *updateNode) Next() (bool, error) { next, err := u.run.rows.Next() if !next { if err == nil { // We're done. Finish the batch. err = u.tw.finalize() } return false, err } if u.run.explain == explainDebug { return true, nil } tracing.AnnotateTrace() oldValues := u.run.rows.Values() // Our updated value expressions occur immediately after the plain // columns in the output. updateValues := oldValues[len(u.tw.ru.fetchCols):] oldValues = oldValues[:len(u.tw.ru.fetchCols)] u.checkHelper.loadRow(u.tw.ru.fetchColIDtoRowIndex, oldValues, false) u.checkHelper.loadRow(u.updateColsIdx, updateValues, true) if err := u.checkHelper.check(&u.p.evalCtx); err != nil { return false, err } // Ensure that the values honor the specified column widths. for i := range updateValues { if err := sqlbase.CheckValueWidth(u.tw.ru.updateCols[i], updateValues[i]); err != nil { return false, err } } // Update the row values. for i, col := range u.tw.ru.updateCols { val := updateValues[i] if !col.Nullable && val == parser.DNull { return false, sqlbase.NewNonNullViolationError(col.Name) } } newValues, err := u.tw.row(append(oldValues, updateValues...)) if err != nil { return false, err } resultRow, err := u.rh.cookResultRow(newValues) if err != nil { return false, err } u.run.resultRow = resultRow return true, nil }
func (n *insertNode) Next() (bool, error) { ctx := context.TODO() if next, err := n.run.rows.Next(); !next { if err == nil { // We're done. Finish the batch. err = n.tw.finalize(ctx) } return false, err } if n.run.explain == explainDebug { return true, nil } rowVals := n.run.rows.Values() // The values for the row may be shorter than the number of columns being // inserted into. Generate default values for those columns using the // default expressions. for i := len(rowVals); i < len(n.insertCols); i++ { if n.defaultExprs == nil { rowVals = append(rowVals, parser.DNull) continue } d, err := n.defaultExprs[i].Eval(&n.p.evalCtx) if err != nil { return false, err } rowVals = append(rowVals, d) } // Check to see if NULL is being inserted into any non-nullable column. for _, col := range n.tableDesc.Columns { if !col.Nullable { if i, ok := n.insertColIDtoRowIndex[col.ID]; !ok || rowVals[i] == parser.DNull { return false, sqlbase.NewNonNullViolationError(col.Name) } } } // Ensure that the values honor the specified column widths. for i := range rowVals { if err := sqlbase.CheckValueWidth(n.insertCols[i], rowVals[i]); err != nil { return false, err } } n.checkHelper.loadRow(n.insertColIDtoRowIndex, rowVals, false) if err := n.checkHelper.check(&n.p.evalCtx); err != nil { return false, err } _, err := n.tw.row(ctx, rowVals) if err != nil { return false, err } for i, val := range rowVals { if n.run.rowTemplate != nil { n.run.rowTemplate[n.run.rowIdxToRetIdx[i]] = val } } resultRow, err := n.rh.cookResultRow(n.run.rowTemplate) if err != nil { return false, err } n.run.resultRow = resultRow return true, nil }
func (sc *SchemaChanger) truncateAndBackfillColumnsChunk( added []sqlbase.ColumnDescriptor, dropped []sqlbase.ColumnDescriptor, defaultExprs []parser.TypedExpr, evalCtx *parser.EvalContext, sp sqlbase.Span, ) (roachpb.Key, bool, error) { var curIndexKey roachpb.Key done := false err := sc.db.Txn(func(txn *client.Txn) error { tableDesc, err := getTableDescFromID(txn, sc.tableID) if err != nil { return err } // Short circuit the backfill if the table has been deleted. if tableDesc.Deleted() { done = true return nil } updateCols := append(added, dropped...) fkTables := TablesNeededForFKs(*tableDesc, CheckUpdates) for k := range fkTables { if fkTables[k], err = getTableDescFromID(txn, k); err != nil { return err } } // TODO(dan): Tighten up the bound on the requestedCols parameter to // makeRowUpdater. requestedCols := make([]sqlbase.ColumnDescriptor, 0, len(tableDesc.Columns)+len(added)) requestedCols = append(requestedCols, tableDesc.Columns...) requestedCols = append(requestedCols, added...) ru, err := makeRowUpdater( txn, tableDesc, fkTables, updateCols, requestedCols, rowUpdaterOnlyColumns, ) if err != nil { return err } // TODO(dan): This check is an unfortunate bleeding of the internals of // rowUpdater. Extract the sql row to k/v mapping logic out into something // usable here. if !ru.isColumnOnlyUpdate() { panic("only column data should be modified, but the rowUpdater is configured otherwise") } // Run a scan across the table using the primary key. Running // the scan and applying the changes in many transactions is // fine because the schema change is in the correct state to // handle intermediate OLTP commands which delete and add // values during the scan. var rf sqlbase.RowFetcher colIDtoRowIndex := colIDtoRowIndexFromCols(tableDesc.Columns) valNeededForCol := make([]bool, len(tableDesc.Columns)) for i := range valNeededForCol { _, valNeededForCol[i] = ru.fetchColIDtoRowIndex[tableDesc.Columns[i].ID] } err = rf.Init(tableDesc, colIDtoRowIndex, &tableDesc.PrimaryIndex, false, false, tableDesc.Columns, valNeededForCol) if err != nil { return err } // StartScan uses 0 as a sentinal for the default limit of entries scanned. if err := rf.StartScan(txn, sqlbase.Spans{sp}, 0); err != nil { return err } indexKeyPrefix := sqlbase.MakeIndexKeyPrefix(tableDesc, tableDesc.PrimaryIndex.ID) oldValues := make(parser.DTuple, len(ru.fetchCols)) updateValues := make(parser.DTuple, len(updateCols)) writeBatch := &client.Batch{} var i int for ; i < ColumnTruncateAndBackfillChunkSize; i++ { row, err := rf.NextRow() if err != nil { return err } if row == nil { break // Done } curIndexKey, _, err = sqlbase.EncodeIndexKey( tableDesc, &tableDesc.PrimaryIndex, colIDtoRowIndex, row, indexKeyPrefix) for j, col := range added { if defaultExprs == nil || defaultExprs[j] == nil { updateValues[j] = parser.DNull } else { updateValues[j], err = defaultExprs[j].Eval(evalCtx) if err != nil { return err } } if !col.Nullable && updateValues[j].Compare(parser.DNull) == 0 { return sqlbase.NewNonNullViolationError(col.Name) } } for j := range dropped { updateValues[j+len(added)] = parser.DNull } copy(oldValues, row) for j := len(row); j < len(oldValues); j++ { oldValues[j] = parser.DNull } if _, err := ru.updateRow(writeBatch, oldValues, updateValues); err != nil { return err } } if i < ColumnTruncateAndBackfillChunkSize { done = true } if err := txn.Run(writeBatch); err != nil { return convertBackfillError(tableDesc, writeBatch) } return nil }) return curIndexKey.PrefixEnd(), done, err }
func (sc *SchemaChanger) truncateAndBackfillColumnsChunk( added []sqlbase.ColumnDescriptor, dropped []sqlbase.ColumnDescriptor, nonNullableColumn string, defaultExprs []parser.TypedExpr, evalCtx parser.EvalContext, sp sqlbase.Span, ) (roachpb.Key, bool, error) { var curSentinel roachpb.Key done := false err := sc.db.Txn(func(txn *client.Txn) error { tableDesc, err := getTableDescFromID(txn, sc.tableID) if err != nil { return err } // Short circuit the backfill if the table has been deleted. if tableDesc.Deleted() { done = true return nil } // Run a scan across the table using the primary key. Running // the scan and applying the changes in many transactions is // fine because the schema change is in the correct state to // handle intermediate OLTP commands which delete and add // values during the scan. b := &client.Batch{} b.Scan(sp.Start, sp.End, ColumnTruncateAndBackfillChunkSize) if err := txn.Run(b); err != nil { return err } // Use a different batch to truncate/backfill columns. writeBatch := &client.Batch{} marshalled := make([]roachpb.Value, len(defaultExprs)) done = true for _, result := range b.Results { var sentinelKey roachpb.Key for _, kv := range result.Rows { // Still processing table. done = false if nonNullableColumn != "" { return sqlbase.NewNonNullViolationError(nonNullableColumn) } if sentinelKey == nil || !bytes.HasPrefix(kv.Key, sentinelKey) { // Sentinel keys have a 0 suffix indicating 0 bytes of // column ID. Strip off that suffix to determine the // prefix shared with the other keys for the row. sentinelKey = sqlbase.StripColumnIDLength(kv.Key) // Store away key for the next table row as the point from // which to start from. curSentinel = sentinelKey // Delete the entire dropped columns. This used to use SQL // UPDATE in the past to update the dropped column to // NULL; but a column in the process of being dropped is // placed in the table descriptor mutations, and a SQL // UPDATE of a column in mutations will fail. for _, columnDesc := range dropped { // Delete the dropped column. colKey := keys.MakeColumnKey(sentinelKey, uint32(columnDesc.ID)) if log.V(2) { log.Infof("Del %s", colKey) } writeBatch.Del(colKey) } // Add the new columns and backfill the values. for i, expr := range defaultExprs { if expr == nil { continue } col := added[i] colKey := keys.MakeColumnKey(sentinelKey, uint32(col.ID)) d, err := expr.Eval(evalCtx) if err != nil { return err } marshalled[i], err = sqlbase.MarshalColumnValue(col, d) if err != nil { return err } if log.V(2) { log.Infof("Put %s -> %v", colKey, d) } // Insert default value into the column. If this row // was recently added the default value might have // already been populated, because the // ColumnDescriptor is in the WRITE_ONLY state. // Reinserting the default value is not a big deal. // // Note: a column in the WRITE_ONLY state cannot be // populated directly through SQL. A SQL INSERT cannot // directly reference the column, and the INSERT // populates the column with the default value. writeBatch.Put(colKey, &marshalled[i]) } } } } if err := txn.Run(writeBatch); err != nil { return convertBackfillError(tableDesc, writeBatch) } return nil }) return curSentinel.PrefixEnd(), done, err }