Exemplo n.º 1
0
func (u *updateNode) Next() (bool, error) {
	next, err := u.run.rows.Next()
	if !next {
		if err == nil {
			// We're done. Finish the batch.
			err = u.tw.finalize(u.p.ctx())
		}
		return false, err
	}

	if u.run.explain == explainDebug {
		return true, nil
	}

	tracing.AnnotateTrace()

	oldValues := u.run.rows.Values()

	// Our updated value expressions occur immediately after the plain
	// columns in the output.
	updateValues := oldValues[len(u.tw.ru.fetchCols):]
	oldValues = oldValues[:len(u.tw.ru.fetchCols)]

	u.checkHelper.loadRow(u.tw.ru.fetchColIDtoRowIndex, oldValues, false)
	u.checkHelper.loadRow(u.updateColsIdx, updateValues, true)
	if err := u.checkHelper.check(&u.p.evalCtx); err != nil {
		return false, err
	}

	// Ensure that the values honor the specified column widths.
	for i := range updateValues {
		if err := sqlbase.CheckValueWidth(u.tw.ru.updateCols[i], updateValues[i]); err != nil {
			return false, err
		}
	}

	// Update the row values.
	for i, col := range u.tw.ru.updateCols {
		val := updateValues[i]
		if !col.Nullable && val == parser.DNull {
			return false, sqlbase.NewNonNullViolationError(col.Name)
		}
	}

	newValues, err := u.tw.row(u.p.ctx(), append(oldValues, updateValues...))
	if err != nil {
		return false, err
	}

	resultRow, err := u.rh.cookResultRow(newValues)
	if err != nil {
		return false, err
	}
	u.run.resultRow = resultRow

	return true, nil
}
Exemplo n.º 2
0
// GenerateInsertRow prepares a row tuple for insertion. It fills in default
// expressions, verifies non-nullable columns, and checks column widths.
func GenerateInsertRow(
	defaultExprs []parser.TypedExpr,
	insertColIDtoRowIndex map[sqlbase.ColumnID]int,
	insertCols []sqlbase.ColumnDescriptor,
	evalCtx parser.EvalContext,
	tableDesc *sqlbase.TableDescriptor,
	rowVals parser.DTuple,
) (parser.DTuple, error) {
	// The values for the row may be shorter than the number of columns being
	// inserted into. Generate default values for those columns using the
	// default expressions.

	if len(rowVals) < len(insertCols) {
		// It's not cool to append to the slice returned by a node; make a copy.
		oldVals := rowVals
		rowVals = make(parser.DTuple, len(insertCols))
		copy(rowVals, oldVals)

		for i := len(oldVals); i < len(insertCols); i++ {
			if defaultExprs == nil {
				rowVals[i] = parser.DNull
				continue
			}
			d, err := defaultExprs[i].Eval(&evalCtx)
			if err != nil {
				return nil, err
			}
			rowVals[i] = d
		}
	}

	// Check to see if NULL is being inserted into any non-nullable column.
	for _, col := range tableDesc.Columns {
		if !col.Nullable {
			if i, ok := insertColIDtoRowIndex[col.ID]; !ok || rowVals[i] == parser.DNull {
				return nil, sqlbase.NewNonNullViolationError(col.Name)
			}
		}
	}

	// Ensure that the values honor the specified column widths.
	for i := range rowVals {
		if err := sqlbase.CheckValueWidth(insertCols[i], rowVals[i]); err != nil {
			return nil, err
		}
	}
	return rowVals, nil
}
Exemplo n.º 3
0
// truncateAndBackfillColumnsChunk returns the next-key, done and an error.
// next-key and done are invalid if error != nil. next-key is invalid if done
// is true.
func (sc *SchemaChanger) truncateAndBackfillColumnsChunk(
	added []sqlbase.ColumnDescriptor,
	dropped []sqlbase.ColumnDescriptor,
	defaultExprs []parser.TypedExpr,
	sp roachpb.Span,
	updateValues parser.DTuple,
	nonNullViolationColumnName string,
	chunkSize int64,
	mutationIdx int,
	lastCheckpoint *time.Time,
) (roachpb.Key, bool, error) {
	done := false
	var nextKey roachpb.Key
	err := sc.db.Txn(context.TODO(), func(txn *client.Txn) error {
		if sc.testingKnobs.RunBeforeBackfillChunk != nil {
			if err := sc.testingKnobs.RunBeforeBackfillChunk(sp); err != nil {
				return err
			}
		}
		if sc.testingKnobs.RunAfterBackfillChunk != nil {
			defer sc.testingKnobs.RunAfterBackfillChunk()
		}

		tableDesc, err := sqlbase.GetTableDescFromID(txn, sc.tableID)
		if err != nil {
			return err
		}
		// Short circuit the backfill if the table has been deleted.
		if done = tableDesc.Dropped(); done {
			return nil
		}

		updateCols := append(added, dropped...)
		fkTables := tablesNeededForFKs(*tableDesc, CheckUpdates)
		for k := range fkTables {
			table, err := sqlbase.GetTableDescFromID(txn, k)
			if err != nil {
				return err
			}
			fkTables[k] = tableLookup{table: table}
		}
		// TODO(dan): Tighten up the bound on the requestedCols parameter to
		// makeRowUpdater.
		requestedCols := make([]sqlbase.ColumnDescriptor, 0, len(tableDesc.Columns)+len(added))
		requestedCols = append(requestedCols, tableDesc.Columns...)
		requestedCols = append(requestedCols, added...)
		ru, err := makeRowUpdater(
			txn, tableDesc, fkTables, updateCols, requestedCols, rowUpdaterOnlyColumns,
		)
		if err != nil {
			return err
		}

		// TODO(dan): This check is an unfortunate bleeding of the internals of
		// rowUpdater. Extract the sql row to k/v mapping logic out into something
		// usable here.
		if !ru.isColumnOnlyUpdate() {
			panic("only column data should be modified, but the rowUpdater is configured otherwise")
		}

		// Run a scan across the table using the primary key. Running
		// the scan and applying the changes in many transactions is
		// fine because the schema change is in the correct state to
		// handle intermediate OLTP commands which delete and add
		// values during the scan.
		var rf sqlbase.RowFetcher
		colIDtoRowIndex := colIDtoRowIndexFromCols(tableDesc.Columns)
		valNeededForCol := make([]bool, len(tableDesc.Columns))
		for i := range valNeededForCol {
			_, valNeededForCol[i] = ru.fetchColIDtoRowIndex[tableDesc.Columns[i].ID]
		}
		if err := rf.Init(
			tableDesc, colIDtoRowIndex, &tableDesc.PrimaryIndex, false, false,
			tableDesc.Columns, valNeededForCol,
		); err != nil {
			return err
		}
		if err := rf.StartScan(
			txn, roachpb.Spans{sp}, true /* limit batches */, chunkSize,
		); err != nil {
			return err
		}

		oldValues := make(parser.DTuple, len(ru.fetchCols))
		writeBatch := txn.NewBatch()
		rowLength := 0
		var lastRowSeen parser.DTuple
		i := int64(0)
		for ; i < chunkSize; i++ {
			row, err := rf.NextRow()
			if err != nil {
				return err
			}
			if row == nil {
				break
			}
			lastRowSeen = row
			if nonNullViolationColumnName != "" {
				return sqlbase.NewNonNullViolationError(nonNullViolationColumnName)
			}

			copy(oldValues, row)
			// Update oldValues with NULL values where values weren't found;
			// only update when necessary.
			if rowLength != len(row) {
				rowLength = len(row)
				for j := rowLength; j < len(oldValues); j++ {
					oldValues[j] = parser.DNull
				}
			}
			if _, err := ru.updateRow(txn.Context, writeBatch, oldValues, updateValues); err != nil {
				return err
			}
		}
		if err := txn.Run(writeBatch); err != nil {
			return convertBackfillError(tableDesc, writeBatch)
		}
		if done = i < chunkSize; done {
			return nil
		}
		curIndexKey, _, err := sqlbase.EncodeIndexKey(
			tableDesc, &tableDesc.PrimaryIndex, colIDtoRowIndex, lastRowSeen,
			sqlbase.MakeIndexKeyPrefix(tableDesc, tableDesc.PrimaryIndex.ID))
		if err != nil {
			return err
		}
		resume := roachpb.Span{Key: roachpb.Key(curIndexKey).PrefixEnd(), EndKey: sp.EndKey}
		if err := sc.maybeWriteResumeSpan(txn, tableDesc, resume, mutationIdx, lastCheckpoint); err != nil {
			return err
		}
		nextKey = resume.Key
		return nil
	})
	return nextKey, done, err
}
Exemplo n.º 4
0
func (n *insertNode) Next() (bool, error) {
	ctx := n.editNodeBase.p.ctx()
	if next, err := n.run.rows.Next(); !next {
		if err == nil {
			// We're done. Finish the batch.
			err = n.tw.finalize(ctx)
		}
		return false, err
	}

	if n.run.explain == explainDebug {
		return true, nil
	}

	rowVals := n.run.rows.Values()

	// The values for the row may be shorter than the number of columns being
	// inserted into. Generate default values for those columns using the
	// default expressions.

	if len(rowVals) < len(n.insertCols) {
		// It's not cool to append to the slice returned by a node; make a copy.
		oldVals := rowVals
		rowVals = make(parser.DTuple, len(n.insertCols))
		copy(rowVals, oldVals)

		for i := len(oldVals); i < len(n.insertCols); i++ {
			if n.defaultExprs == nil {
				rowVals[i] = parser.DNull
				continue
			}
			d, err := n.defaultExprs[i].Eval(&n.p.evalCtx)
			if err != nil {
				return false, err
			}
			rowVals[i] = d
		}
	}

	// Check to see if NULL is being inserted into any non-nullable column.
	for _, col := range n.tableDesc.Columns {
		if !col.Nullable {
			if i, ok := n.insertColIDtoRowIndex[col.ID]; !ok || rowVals[i] == parser.DNull {
				return false, sqlbase.NewNonNullViolationError(col.Name)
			}
		}
	}

	// Ensure that the values honor the specified column widths.
	for i := range rowVals {
		if err := sqlbase.CheckValueWidth(n.insertCols[i], rowVals[i]); err != nil {
			return false, err
		}
	}

	n.checkHelper.loadRow(n.insertColIDtoRowIndex, rowVals, false)
	if err := n.checkHelper.check(&n.p.evalCtx); err != nil {
		return false, err
	}

	_, err := n.tw.row(ctx, rowVals)
	if err != nil {
		return false, err
	}

	for i, val := range rowVals {
		if n.run.rowTemplate != nil {
			n.run.rowTemplate[n.run.rowIdxToRetIdx[i]] = val
		}
	}

	resultRow, err := n.rh.cookResultRow(n.run.rowTemplate)
	if err != nil {
		return false, err
	}
	n.run.resultRow = resultRow

	return true, nil
}