예제 #1
0
func (td *tableDeleter) deleteAllRowsScan(
	ctx context.Context, resume roachpb.Span, limit int64,
) (roachpb.Span, error) {
	if resume.Key == nil {
		tablePrefix := sqlbase.MakeIndexKeyPrefix(
			td.rd.helper.tableDesc, td.rd.helper.tableDesc.PrimaryIndex.ID)
		resume = roachpb.Span{Key: roachpb.Key(tablePrefix), EndKey: roachpb.Key(tablePrefix).PrefixEnd()}
	}
	valNeededForCol := make([]bool, len(td.rd.helper.tableDesc.Columns))
	for _, idx := range td.rd.fetchColIDtoRowIndex {
		valNeededForCol[idx] = true
	}

	var rf sqlbase.RowFetcher
	err := rf.Init(
		td.rd.helper.tableDesc, td.rd.fetchColIDtoRowIndex, &td.rd.helper.tableDesc.PrimaryIndex,
		false, false, td.rd.fetchCols, valNeededForCol)
	if err != nil {
		return resume, err
	}
	if err := rf.StartScan(td.txn, roachpb.Spans{resume}, true /* limit batches */, 0); err != nil {
		return resume, err
	}

	for i := int64(0); i < limit; i++ {
		row, err := rf.NextRowDecoded()
		if err != nil {
			return resume, err
		}
		if row == nil {
			// Done deleting all rows.
			resume = roachpb.Span{}
			break
		}
		_, err = td.row(ctx, row)
		if err != nil {
			return resume, err
		}
	}
	if resume.Key != nil {
		// Update the resume start key for the next iteration.
		resume.Key = rf.Key()
	}
	return resume, td.finalize(ctx)
}
예제 #2
0
파일: backfill.go 프로젝트: knz/cockroach
// truncateAndBackfillColumnsChunk returns the next-key, done and an error.
// next-key and done are invalid if error != nil. next-key is invalid if done
// is true.
func (sc *SchemaChanger) truncateAndBackfillColumnsChunk(
	added []sqlbase.ColumnDescriptor,
	dropped []sqlbase.ColumnDescriptor,
	defaultExprs []parser.TypedExpr,
	sp roachpb.Span,
	updateValues parser.DTuple,
	nonNullViolationColumnName string,
	chunkSize int64,
	mutationIdx int,
	lastCheckpoint *time.Time,
) (roachpb.Key, bool, error) {
	done := false
	var nextKey roachpb.Key
	err := sc.db.Txn(context.TODO(), func(txn *client.Txn) error {
		if sc.testingKnobs.RunBeforeBackfillChunk != nil {
			if err := sc.testingKnobs.RunBeforeBackfillChunk(sp); err != nil {
				return err
			}
		}
		if sc.testingKnobs.RunAfterBackfillChunk != nil {
			defer sc.testingKnobs.RunAfterBackfillChunk()
		}

		tableDesc, err := sqlbase.GetTableDescFromID(txn, sc.tableID)
		if err != nil {
			return err
		}
		// Short circuit the backfill if the table has been deleted.
		if done = tableDesc.Dropped(); done {
			return nil
		}

		updateCols := append(added, dropped...)
		fkTables := tablesNeededForFKs(*tableDesc, CheckUpdates)
		for k := range fkTables {
			table, err := sqlbase.GetTableDescFromID(txn, k)
			if err != nil {
				return err
			}
			fkTables[k] = tableLookup{table: table}
		}
		// TODO(dan): Tighten up the bound on the requestedCols parameter to
		// makeRowUpdater.
		requestedCols := make([]sqlbase.ColumnDescriptor, 0, len(tableDesc.Columns)+len(added))
		requestedCols = append(requestedCols, tableDesc.Columns...)
		requestedCols = append(requestedCols, added...)
		ru, err := makeRowUpdater(
			txn, tableDesc, fkTables, updateCols, requestedCols, rowUpdaterOnlyColumns,
		)
		if err != nil {
			return err
		}

		// TODO(dan): This check is an unfortunate bleeding of the internals of
		// rowUpdater. Extract the sql row to k/v mapping logic out into something
		// usable here.
		if !ru.isColumnOnlyUpdate() {
			panic("only column data should be modified, but the rowUpdater is configured otherwise")
		}

		// Run a scan across the table using the primary key. Running
		// the scan and applying the changes in many transactions is
		// fine because the schema change is in the correct state to
		// handle intermediate OLTP commands which delete and add
		// values during the scan.
		var rf sqlbase.RowFetcher
		colIDtoRowIndex := colIDtoRowIndexFromCols(tableDesc.Columns)
		valNeededForCol := make([]bool, len(tableDesc.Columns))
		for i := range valNeededForCol {
			_, valNeededForCol[i] = ru.fetchColIDtoRowIndex[tableDesc.Columns[i].ID]
		}
		if err := rf.Init(
			tableDesc, colIDtoRowIndex, &tableDesc.PrimaryIndex, false, false,
			tableDesc.Columns, valNeededForCol,
		); err != nil {
			return err
		}
		if err := rf.StartScan(
			txn, roachpb.Spans{sp}, true /* limit batches */, chunkSize,
		); err != nil {
			return err
		}

		oldValues := make(parser.DTuple, len(ru.fetchCols))
		writeBatch := txn.NewBatch()
		rowLength := 0
		var lastRowSeen parser.DTuple
		i := int64(0)
		for ; i < chunkSize; i++ {
			row, err := rf.NextRow()
			if err != nil {
				return err
			}
			if row == nil {
				break
			}
			lastRowSeen = row
			if nonNullViolationColumnName != "" {
				return sqlbase.NewNonNullViolationError(nonNullViolationColumnName)
			}

			copy(oldValues, row)
			// Update oldValues with NULL values where values weren't found;
			// only update when necessary.
			if rowLength != len(row) {
				rowLength = len(row)
				for j := rowLength; j < len(oldValues); j++ {
					oldValues[j] = parser.DNull
				}
			}
			if _, err := ru.updateRow(txn.Context, writeBatch, oldValues, updateValues); err != nil {
				return err
			}
		}
		if err := txn.Run(writeBatch); err != nil {
			return convertBackfillError(tableDesc, writeBatch)
		}
		if done = i < chunkSize; done {
			return nil
		}
		curIndexKey, _, err := sqlbase.EncodeIndexKey(
			tableDesc, &tableDesc.PrimaryIndex, colIDtoRowIndex, lastRowSeen,
			sqlbase.MakeIndexKeyPrefix(tableDesc, tableDesc.PrimaryIndex.ID))
		if err != nil {
			return err
		}
		resume := roachpb.Span{Key: roachpb.Key(curIndexKey).PrefixEnd(), EndKey: sp.EndKey}
		if err := sc.maybeWriteResumeSpan(txn, tableDesc, resume, mutationIdx, lastCheckpoint); err != nil {
			return err
		}
		nextKey = resume.Key
		return nil
	})
	return nextKey, done, err
}