Exemple #1
0
func makeFKDeleteHelper(
	txn *client.Txn,
	table sqlbase.TableDescriptor,
	otherTables tableLookupsByID,
	colMap map[sqlbase.ColumnID]int,
) (fkDeleteHelper, error) {
	var fks fkDeleteHelper
	for _, idx := range table.AllNonDropIndexes() {
		for _, ref := range idx.ReferencedBy {
			if otherTables[ref.Table].isAdding {
				// We can assume that a table being added but not yet public is empty,
				// and thus does not need to be checked for FK violations.
				continue
			}
			fk, err := makeBaseFKHelper(txn, otherTables, idx, ref, colMap)
			if err == errSkipUnusedFK {
				continue
			}
			if err != nil {
				return fks, err
			}
			if fks == nil {
				fks = make(fkDeleteHelper)
			}
			fks[idx.ID] = append(fks[idx.ID], fk)
		}
	}
	return fks, nil
}
Exemple #2
0
// writeTableDesc implements the SchemaAccessor interface.
func (p *planner) writeTableDesc(tableDesc *sqlbase.TableDescriptor) error {
	if isVirtualDescriptor(tableDesc) {
		panic(fmt.Sprintf("Virtual Descriptors cannot be stored, found: %v", tableDesc))
	}
	return p.txn.Put(sqlbase.MakeDescMetadataKey(tableDesc.GetID()),
		sqlbase.WrapDescriptor(tableDesc))
}
Exemple #3
0
// GetKeysForTableDescriptor retrieves the KV keys corresponding
// to the zone, name and descriptor of a table.
func GetKeysForTableDescriptor(
	tableDesc *sqlbase.TableDescriptor,
) (zoneKey roachpb.Key, nameKey roachpb.Key, descKey roachpb.Key) {
	zoneKey = sqlbase.MakeZoneKey(tableDesc.ID)
	nameKey = sqlbase.MakeNameMetadataKey(tableDesc.ParentID, tableDesc.GetName())
	descKey = sqlbase.MakeDescMetadataKey(tableDesc.ID)
	return
}
Exemple #4
0
// makeRowDeleter creates a rowDeleter for the given table.
//
// The returned rowDeleter contains a fetchCols field that defines the
// expectation of which values are passed as values to deleteRow. Any column
// passed in requestedCols will be included in fetchCols.
func makeRowDeleter(
	txn *client.Txn,
	tableDesc *sqlbase.TableDescriptor,
	fkTables tableLookupsByID,
	requestedCols []sqlbase.ColumnDescriptor,
	checkFKs bool,
) (rowDeleter, error) {
	indexes := tableDesc.Indexes
	for _, m := range tableDesc.Mutations {
		if index := m.GetIndex(); index != nil {
			indexes = append(indexes, *index)
		}
	}

	fetchCols := requestedCols[:len(requestedCols):len(requestedCols)]
	fetchColIDtoRowIndex := colIDtoRowIndexFromCols(fetchCols)

	maybeAddCol := func(colID sqlbase.ColumnID) error {
		if _, ok := fetchColIDtoRowIndex[colID]; !ok {
			col, err := tableDesc.FindColumnByID(colID)
			if err != nil {
				return err
			}
			fetchColIDtoRowIndex[col.ID] = len(fetchCols)
			fetchCols = append(fetchCols, *col)
		}
		return nil
	}
	for _, colID := range tableDesc.PrimaryIndex.ColumnIDs {
		if err := maybeAddCol(colID); err != nil {
			return rowDeleter{}, err
		}
	}
	for _, index := range indexes {
		for _, colID := range index.ColumnIDs {
			if err := maybeAddCol(colID); err != nil {
				return rowDeleter{}, err
			}
		}
	}

	rd := rowDeleter{
		helper:               rowHelper{tableDesc: tableDesc, indexes: indexes},
		fetchCols:            fetchCols,
		fetchColIDtoRowIndex: fetchColIDtoRowIndex,
	}
	if checkFKs {
		var err error
		if rd.fks, err = makeFKDeleteHelper(txn, *tableDesc, fkTables, fetchColIDtoRowIndex); err != nil {
			return rowDeleter{}, err
		}
	}

	return rd, nil
}
Exemple #5
0
func filterTableState(tableDesc *sqlbase.TableDescriptor) error {
	switch {
	case tableDesc.Dropped():
		return errTableDropped
	case tableDesc.Adding():
		return errTableAdding
	case tableDesc.State != sqlbase.TableDescriptor_PUBLIC:
		return errors.Errorf("table in unknown state: %s", tableDesc.State.String())
	}
	return nil
}
Exemple #6
0
func convertBatchError(tableDesc *sqlbase.TableDescriptor, b *client.Batch) error {
	origPErr := b.MustPErr()
	if origPErr.Index == nil {
		return origPErr.GoError()
	}
	index := origPErr.Index.Index
	if index >= int32(len(b.Results)) {
		panic(fmt.Sprintf("index %d outside of results: %+v", index, b.Results))
	}
	result := b.Results[index]
	var alloc sqlbase.DatumAlloc
	if _, ok := origPErr.GetDetail().(*roachpb.ConditionFailedError); ok {
		for _, row := range result.Rows {
			// TODO(dan): There's too much internal knowledge of the sql table
			// encoding here (and this callsite is the only reason
			// DecodeIndexKeyPrefix is exported). Refactor this bit out.
			indexID, key, err := sqlbase.DecodeIndexKeyPrefix(&alloc, tableDesc, row.Key)
			if err != nil {
				return err
			}
			index, err := tableDesc.FindIndexByID(indexID)
			if err != nil {
				return err
			}
			vals, err := sqlbase.MakeEncodedKeyVals(tableDesc, index.ColumnIDs)
			if err != nil {
				return err
			}
			dirs := make([]encoding.Direction, 0, len(index.ColumnIDs))
			for _, dir := range index.ColumnDirections {
				convertedDir, err := dir.ToEncodingDirection()
				if err != nil {
					return err
				}
				dirs = append(dirs, convertedDir)
			}
			if _, err := sqlbase.DecodeKeyVals(&alloc, vals, dirs, key); err != nil {
				return err
			}
			decodedVals := make([]parser.Datum, len(vals))
			var da sqlbase.DatumAlloc
			for i, val := range vals {
				err := val.EnsureDecoded(&da)
				if err != nil {
					return err
				}
				decodedVals[i] = val.Datum
			}
			return sqlbase.NewUniquenessConstraintViolationError(index, decodedVals)
		}
	}
	return origPErr.GoError()
}
Exemple #7
0
func forEachIndexInTable(
	table *sqlbase.TableDescriptor, fn func(*sqlbase.IndexDescriptor) error,
) error {
	if table.IsPhysicalTable() {
		if err := fn(&table.PrimaryIndex); err != nil {
			return err
		}
	}
	for i := range table.Indexes {
		if err := fn(&table.Indexes[i]); err != nil {
			return err
		}
	}
	return nil
}
Exemple #8
0
// deleteIndexMutationsWithReversedColumns deletes index mutations with a
// different mutationID than the schema changer and a reference to one of the
// reversed columns.
func (sc *SchemaChanger) deleteIndexMutationsWithReversedColumns(
	desc *sqlbase.TableDescriptor, columns map[string]struct{},
) {
	newMutations := make([]sqlbase.DescriptorMutation, 0, len(desc.Mutations))
	for _, mutation := range desc.Mutations {
		if mutation.MutationID != sc.mutationID {
			if idx := mutation.GetIndex(); idx != nil {
				deleteMutation := false
				for _, name := range idx.ColumnNames {
					if _, ok := columns[name]; ok {
						// Such an index mutation has to be with direction ADD and
						// in the DELETE_ONLY state. Live indexes referencing live
						// columns cannot be deleted and thus never have direction
						// DROP. All mutations with the ADD direction start off in
						// the DELETE_ONLY state.
						if mutation.Direction != sqlbase.DescriptorMutation_ADD ||
							mutation.State != sqlbase.DescriptorMutation_DELETE_ONLY {
							panic(fmt.Sprintf("mutation in bad state: %+v", mutation))
						}
						log.Warningf(context.TODO(), "delete schema change mutation: %+v", mutation)
						deleteMutation = true
						break
					}
				}
				if deleteMutation {
					continue
				}
			}
		}
		newMutations = append(newMutations, mutation)
	}
	// Reset mutations.
	desc.Mutations = newMutations
}
Exemple #9
0
func convertBackfillError(tableDesc *sqlbase.TableDescriptor, b *client.Batch) error {
	// A backfill on a new schema element has failed and the batch contains
	// information useful in printing a sensible error. However
	// convertBatchError() will only work correctly if the schema elements are
	// "live" in the tableDesc. Apply the mutations belonging to the same
	// mutationID to make all the mutations live in tableDesc. Note: this
	// tableDesc is not written to the k:v store.
	mutationID := tableDesc.Mutations[0].MutationID
	for _, mutation := range tableDesc.Mutations {
		if mutation.MutationID != mutationID {
			// Mutations are applied in a FIFO order. Only apply the first set
			// of mutations if they have the mutation ID we're looking for.
			break
		}
		tableDesc.MakeMutationComplete(mutation)
	}
	return convertBatchError(tableDesc, b)
}
Exemple #10
0
func makeColIDtoRowIndex(
	row planNode, desc *sqlbase.TableDescriptor,
) (map[sqlbase.ColumnID]int, error) {
	columns := row.Columns()
	colIDtoRowIndex := make(map[sqlbase.ColumnID]int, len(columns))
	for i, column := range columns {
		s, idx, err := desc.FindColumnByNormalizedName(parser.ReNormalizeName(column.Name))
		if err != nil {
			return nil, err
		}
		switch s {
		case sqlbase.DescriptorActive:
			colIDtoRowIndex[desc.Columns[idx].ID] = i
		case sqlbase.DescriptorIncomplete:
			colIDtoRowIndex[desc.Mutations[idx].GetColumn().ID] = i
		default:
			panic("unreachable")
		}
	}
	return colIDtoRowIndex, nil
}
Exemple #11
0
// tablesNeededForFKs calculates the IDs of the additional TableDescriptors that
// will be needed for FK checking delete and/or insert operations on `table`.
//
// NB: the returned map's values are *not* set -- higher level calling code, eg
// in planner, should fill the map's values by acquiring leases. This function
// is essentially just returning a slice of IDs, but the empty map can be filled
// in place and reused, avoiding a second allocation.
func tablesNeededForFKs(table sqlbase.TableDescriptor, usage FKCheck) tableLookupsByID {
	var ret tableLookupsByID
	for _, idx := range table.AllNonDropIndexes() {
		if usage != CheckDeletes && idx.ForeignKey.IsSet() {
			if ret == nil {
				ret = make(tableLookupsByID)
			}
			ret[idx.ForeignKey.Table] = tableLookup{}
		}
		if usage != CheckInserts {
			for _, idx := range table.AllNonDropIndexes() {
				for _, ref := range idx.ReferencedBy {
					if ret == nil {
						ret = make(tableLookupsByID)
					}
					ret[ref.Table] = tableLookup{}
				}
			}
		}
	}
	return ret
}
Exemple #12
0
func (sc *SchemaChanger) maybeWriteResumeSpan(
	txn *client.Txn,
	tableDesc *sqlbase.TableDescriptor,
	resume roachpb.Span,
	mutationIdx int,
	lastCheckpoint *time.Time,
) error {
	checkpointInterval := checkpointInterval
	if sc.testingKnobs.WriteCheckpointInterval > 0 {
		checkpointInterval = sc.testingKnobs.WriteCheckpointInterval
	}
	if timeutil.Since(*lastCheckpoint) < checkpointInterval {
		return nil
	}
	tableDesc.Mutations[mutationIdx].ResumeSpan = resume
	txn.SetSystemConfigTrigger()
	if err := txn.Put(sqlbase.MakeDescMetadataKey(tableDesc.GetID()),
		sqlbase.WrapDescriptor(tableDesc)); err != nil {
		return err
	}
	*lastCheckpoint = timeutil.Now()
	return nil
}
Exemple #13
0
func (p *planner) processColumns(
	tableDesc *sqlbase.TableDescriptor, node parser.UnresolvedNames,
) ([]sqlbase.ColumnDescriptor, error) {
	if node == nil {
		// VisibleColumns is used here to prevent INSERT INTO <table> VALUES (...)
		// (as opposed to INSERT INTO <table> (...) VALUES (...)) from writing
		// hidden columns. At present, the only hidden column is the implicit rowid
		// primary key column.
		return tableDesc.VisibleColumns(), nil
	}

	cols := make([]sqlbase.ColumnDescriptor, len(node))
	colIDSet := make(map[sqlbase.ColumnID]struct{}, len(node))
	for i, n := range node {
		c, err := n.NormalizeUnqualifiedColumnItem()
		if err != nil {
			return nil, err
		}

		if len(c.Selector) > 0 {
			return nil, util.UnimplementedWithIssueErrorf(8318, "compound types not supported yet: %q", n)
		}

		col, err := tableDesc.FindActiveColumnByName(c.ColumnName)
		if err != nil {
			return nil, err
		}

		if _, ok := colIDSet[col.ID]; ok {
			return nil, fmt.Errorf("multiple assignments to the same column %q", n)
		}
		colIDSet[col.ID] = struct{}{}
		cols[i] = col
	}

	return cols, nil
}
Exemple #14
0
func makeFKInsertHelper(
	txn *client.Txn,
	table sqlbase.TableDescriptor,
	otherTables tableLookupsByID,
	colMap map[sqlbase.ColumnID]int,
) (fkInsertHelper, error) {
	var fks fkInsertHelper
	for _, idx := range table.AllNonDropIndexes() {
		if idx.ForeignKey.IsSet() {
			fk, err := makeBaseFKHelper(txn, otherTables, idx, idx.ForeignKey, colMap)
			if err == errSkipUnusedFK {
				continue
			}
			if err != nil {
				return fks, err
			}
			if fks == nil {
				fks = make(fkInsertHelper)
			}
			fks[idx.ID] = append(fks[idx.ID], fk)
		}
	}
	return fks, nil
}
Exemple #15
0
// runBackfill runs the backfill for the schema changer.
func (sc *SchemaChanger) runBackfill(lease *sqlbase.TableDescriptor_SchemaChangeLease) error {
	if err := sc.ExtendLease(lease); err != nil {
		return err
	}

	// Mutations are applied in a FIFO order. Only apply the first set of
	// mutations. Collect the elements that are part of the mutation.
	var droppedColumnDescs []sqlbase.ColumnDescriptor
	var droppedIndexDescs []sqlbase.IndexDescriptor
	var addedColumnDescs []sqlbase.ColumnDescriptor
	var addedIndexDescs []sqlbase.IndexDescriptor
	// Indexes within the Mutations slice for checkpointing.
	mutationSentinel := -1
	var columnMutationIdx, addedIndexMutationIdx, droppedIndexMutationIdx int

	var tableDesc *sqlbase.TableDescriptor
	if err := sc.db.Txn(context.TODO(), func(txn *client.Txn) error {
		var err error
		tableDesc, err = sqlbase.GetTableDescFromID(txn, sc.tableID)
		return err
	}); err != nil {
		return err
	}
	// Short circuit the backfill if the table has been deleted.
	if tableDesc.Dropped() {
		return nil
	}
	version := tableDesc.Version
	for i, m := range tableDesc.Mutations {
		if m.MutationID != sc.mutationID {
			break
		}
		switch m.Direction {
		case sqlbase.DescriptorMutation_ADD:
			switch t := m.Descriptor_.(type) {
			case *sqlbase.DescriptorMutation_Column:
				addedColumnDescs = append(addedColumnDescs, *t.Column)
				if columnMutationIdx == mutationSentinel {
					columnMutationIdx = i
				}
			case *sqlbase.DescriptorMutation_Index:
				addedIndexDescs = append(addedIndexDescs, *t.Index)
				if addedIndexMutationIdx == mutationSentinel {
					addedIndexMutationIdx = i
				}
			default:
				return errors.Errorf("unsupported mutation: %+v", m)
			}

		case sqlbase.DescriptorMutation_DROP:
			switch t := m.Descriptor_.(type) {
			case *sqlbase.DescriptorMutation_Column:
				droppedColumnDescs = append(droppedColumnDescs, *t.Column)
				if columnMutationIdx == mutationSentinel {
					columnMutationIdx = i
				}
			case *sqlbase.DescriptorMutation_Index:
				droppedIndexDescs = append(droppedIndexDescs, *t.Index)
				if droppedIndexMutationIdx == mutationSentinel {
					droppedIndexMutationIdx = i
				}
			default:
				return errors.Errorf("unsupported mutation: %+v", m)
			}
		}
	}

	// First drop indexes, then add/drop columns, and only then add indexes.

	// Drop indexes.
	if err := sc.truncateIndexes(
		lease, version, droppedIndexDescs, droppedIndexMutationIdx,
	); err != nil {
		return err
	}

	// Add and drop columns.
	if err := sc.truncateAndBackfillColumns(
		lease, version, addedColumnDescs, droppedColumnDescs, columnMutationIdx,
	); err != nil {
		return err
	}

	// Add new indexes.
	if err := sc.backfillIndexes(
		lease, version, addedIndexDescs, addedIndexMutationIdx,
	); err != nil {
		return err
	}

	return nil
}
Exemple #16
0
// makeRowUpdater creates a rowUpdater for the given table.
//
// updateCols are the columns being updated and correspond to the updateValues
// that will be passed to updateRow.
//
// The returned rowUpdater contains a fetchCols field that defines the
// expectation of which values are passed as oldValues to updateRow. Any column
// passed in requestedCols will be included in fetchCols.
func makeRowUpdater(
	txn *client.Txn,
	tableDesc *sqlbase.TableDescriptor,
	fkTables tableLookupsByID,
	updateCols []sqlbase.ColumnDescriptor,
	requestedCols []sqlbase.ColumnDescriptor,
	updateType rowUpdaterType,
) (rowUpdater, error) {
	updateColIDtoRowIndex := colIDtoRowIndexFromCols(updateCols)

	primaryIndexCols := make(map[sqlbase.ColumnID]struct{}, len(tableDesc.PrimaryIndex.ColumnIDs))
	for _, colID := range tableDesc.PrimaryIndex.ColumnIDs {
		primaryIndexCols[colID] = struct{}{}
	}

	var primaryKeyColChange bool
	for _, c := range updateCols {
		if _, ok := primaryIndexCols[c.ID]; ok {
			primaryKeyColChange = true
			break
		}
	}

	// Secondary indexes needing updating.
	needsUpdate := func(index sqlbase.IndexDescriptor) bool {
		if updateType == rowUpdaterOnlyColumns {
			// Only update columns.
			return false
		}
		// If the primary key changed, we need to update all of them.
		if primaryKeyColChange {
			return true
		}
		for _, id := range index.ColumnIDs {
			if _, ok := updateColIDtoRowIndex[id]; ok {
				return true
			}
		}
		return false
	}

	indexes := make([]sqlbase.IndexDescriptor, 0, len(tableDesc.Indexes)+len(tableDesc.Mutations))
	for _, index := range tableDesc.Indexes {
		if needsUpdate(index) {
			indexes = append(indexes, index)
		}
	}

	var deleteOnlyIndex map[int]struct{}
	for _, m := range tableDesc.Mutations {
		if index := m.GetIndex(); index != nil {
			if needsUpdate(*index) {
				indexes = append(indexes, *index)

				switch m.State {
				case sqlbase.DescriptorMutation_DELETE_ONLY:
					if deleteOnlyIndex == nil {
						// Allocate at most once.
						deleteOnlyIndex = make(map[int]struct{}, len(tableDesc.Mutations))
					}
					deleteOnlyIndex[len(indexes)-1] = struct{}{}

				case sqlbase.DescriptorMutation_WRITE_ONLY:
				}
			}
		}
	}

	ru := rowUpdater{
		helper:                rowHelper{tableDesc: tableDesc, indexes: indexes},
		updateCols:            updateCols,
		updateColIDtoRowIndex: updateColIDtoRowIndex,
		deleteOnlyIndex:       deleteOnlyIndex,
		primaryKeyColChange:   primaryKeyColChange,
		marshalled:            make([]roachpb.Value, len(updateCols)),
		newValues:             make([]parser.Datum, len(tableDesc.Columns)+len(tableDesc.Mutations)),
	}

	if primaryKeyColChange {
		// These fields are only used when the primary key is changing.
		var err error
		// When changing the primary key, we delete the old values and reinsert
		// them, so request them all.
		if ru.rd, err = makeRowDeleter(txn, tableDesc, fkTables, tableDesc.Columns, skipFKs); err != nil {
			return rowUpdater{}, err
		}
		ru.fetchCols = ru.rd.fetchCols
		ru.fetchColIDtoRowIndex = colIDtoRowIndexFromCols(ru.fetchCols)
		if ru.ri, err = MakeRowInserter(txn, tableDesc, fkTables, tableDesc.Columns, skipFKs); err != nil {
			return rowUpdater{}, err
		}
	} else {
		ru.fetchCols = requestedCols[:len(requestedCols):len(requestedCols)]
		ru.fetchColIDtoRowIndex = colIDtoRowIndexFromCols(ru.fetchCols)

		maybeAddCol := func(colID sqlbase.ColumnID) error {
			if _, ok := ru.fetchColIDtoRowIndex[colID]; !ok {
				col, err := tableDesc.FindColumnByID(colID)
				if err != nil {
					return err
				}
				ru.fetchColIDtoRowIndex[col.ID] = len(ru.fetchCols)
				ru.fetchCols = append(ru.fetchCols, *col)
			}
			return nil
		}
		for _, colID := range tableDesc.PrimaryIndex.ColumnIDs {
			if err := maybeAddCol(colID); err != nil {
				return rowUpdater{}, err
			}
		}
		for _, fam := range tableDesc.Families {
			familyBeingUpdated := false
			for _, colID := range fam.ColumnIDs {
				if _, ok := ru.updateColIDtoRowIndex[colID]; ok {
					familyBeingUpdated = true
					break
				}
			}
			if familyBeingUpdated {
				for _, colID := range fam.ColumnIDs {
					if err := maybeAddCol(colID); err != nil {
						return rowUpdater{}, err
					}
				}
			}
		}
		for _, index := range indexes {
			for _, colID := range index.ColumnIDs {
				if err := maybeAddCol(colID); err != nil {
					return rowUpdater{}, err
				}
			}
		}
	}

	var err error
	if ru.fks, err = makeFKUpdateHelper(txn, *tableDesc, fkTables, ru.fetchColIDtoRowIndex); err != nil {
		return rowUpdater{}, err
	}
	return ru, nil
}
Exemple #17
0
// restoreTable inserts the given DatabaseDescriptor. If the name conflicts with
// an existing table, the one being restored is rekeyed with a new ID and the
// old data is deleted.
func restoreTable(
	ctx context.Context,
	db client.DB,
	database sqlbase.DatabaseDescriptor,
	table *sqlbase.TableDescriptor,
	ranges []sqlbase.BackupRangeDescriptor,
) error {
	if log.V(1) {
		log.Infof(ctx, "Restoring Table %q", table.Name)
	}

	var newTableID sqlbase.ID
	if err := db.Txn(ctx, func(txn *client.Txn) error {
		// Make sure there's a database with a name that matches the original.
		if _, err := getDescriptorID(txn, tableKey{name: database.Name}); err != nil {
			return errors.Wrapf(err, "a database named %q needs to exist to restore table %q",
				database.Name, table.Name)
		}

		// Assign a new ID for the table. TODO(dan): For now, we're always
		// generating a new ID, but varints get longer as they get bigger and so
		// our keys will, too. We should someday figure out how to overwrite an
		// existing table and steal its ID.
		var err error
		newTableID, err = GenerateUniqueDescID(txn)
		return err
	}); err != nil {
		return err
	}

	// Create the iteration keys before we give the table its new ID.
	tableStartKeyOld := roachpb.Key(sqlbase.MakeIndexKeyPrefix(table, table.PrimaryIndex.ID))
	tableEndKeyOld := tableStartKeyOld.PrefixEnd()

	// This loop makes restoring multiple tables O(N*M), where N is the number
	// of tables and M is the number of ranges. We could reduce this using an
	// interval tree if necessary.
	var wg sync.WaitGroup
	result := struct {
		syncutil.Mutex
		firstErr error
		numErrs  int
	}{}
	for _, rangeDesc := range ranges {
		if len(rangeDesc.Path) == 0 {
			// Empty path means empty range.
			continue
		}

		intersectBegin, intersectEnd := IntersectHalfOpen(
			rangeDesc.StartKey, rangeDesc.EndKey, tableStartKeyOld, tableEndKeyOld)
		if intersectBegin != nil && intersectEnd != nil {
			// Write the data under the new ID.
			// TODO(dan): There's no SQL descriptors that point at this yet, so it
			// should be possible to remove it from the one txn this is all currently
			// run under. If we do that, make sure this data gets cleaned up on errors.
			wg.Add(1)
			go func(desc sqlbase.BackupRangeDescriptor) {
				for r := retry.StartWithCtx(ctx, base.DefaultRetryOptions()); r.Next(); {
					err := db.Txn(ctx, func(txn *client.Txn) error {
						return Ingest(ctx, txn, desc.Path, desc.CRC, intersectBegin, intersectEnd, newTableID)
					})
					if _, ok := err.(*client.AutoCommitError); ok {
						log.Errorf(ctx, "auto commit error during ingest: %s", err)
						// TODO(dan): Ingest currently does not rely on the
						// range being empty, but the plan is that it will. When
						// that change happens, this will have to delete any
						// partially ingested data or something.
						continue
					}

					if err != nil {
						log.Errorf(ctx, "%T %s", err, err)
						result.Lock()
						defer result.Unlock()
						if result.firstErr != nil {
							result.firstErr = err
						}
						result.numErrs++
					}
					break
				}
				wg.Done()
			}(rangeDesc)
		}
	}
	wg.Wait()
	// All concurrent accesses have finished, we don't need the lock anymore.
	if result.firstErr != nil {
		// This leaves the data that did get imported in case the user wants to
		// retry.
		// TODO(dan): Build tooling to allow a user to restart a failed restore.
		return errors.Wrapf(result.firstErr, "ingest encountered %d errors", result.numErrs)
	}

	table.ID = newTableID
	return db.Txn(ctx, func(txn *client.Txn) error {
		// Pass the descriptors by value to keep this idempotent.
		return restoreTableDesc(ctx, txn, database, *table)
	})
}
Exemple #18
0
// RenameTable renames the table or view.
// Privileges: DROP on source table/view, CREATE on destination database.
//   Notes: postgres requires the table owner.
//          mysql requires ALTER, DROP on the original table, and CREATE, INSERT
//          on the new table (and does not copy privileges over).
func (p *planner) RenameTable(n *parser.RenameTable) (planNode, error) {
	oldTn, err := n.Name.NormalizeWithDatabaseName(p.session.Database)
	if err != nil {
		return nil, err
	}
	newTn, err := n.NewName.NormalizeWithDatabaseName(p.session.Database)
	if err != nil {
		return nil, err
	}

	dbDesc, err := p.mustGetDatabaseDesc(oldTn.Database())
	if err != nil {
		return nil, err
	}

	// Check if source table or view exists.
	// Note that Postgres's behavior here is a little lenient - it'll let you
	// modify views by running ALTER TABLE, but won't let you modify tables
	// by running ALTER VIEW. Our behavior is strict for now, but can be
	// made more lenient down the road if needed.
	var tableDesc *sqlbase.TableDescriptor
	if n.IsView {
		tableDesc, err = p.getViewDesc(oldTn)
		if err != nil {
			return nil, err
		}
		if tableDesc == nil {
			if n.IfExists {
				// Noop.
				return &emptyNode{}, nil
			}
			// Key does not exist, but we want it to: error out.
			return nil, sqlbase.NewUndefinedViewError(oldTn.String())
		}
		if tableDesc.State != sqlbase.TableDescriptor_PUBLIC {
			return nil, sqlbase.NewUndefinedViewError(oldTn.String())
		}
	} else {
		tableDesc, err = p.getTableDesc(oldTn)
		if err != nil {
			return nil, err
		}
		if tableDesc == nil {
			if n.IfExists {
				// Noop.
				return &emptyNode{}, nil
			}
			// Key does not exist, but we want it to: error out.
			return nil, sqlbase.NewUndefinedTableError(oldTn.String())
		}
		if tableDesc.State != sqlbase.TableDescriptor_PUBLIC {
			return nil, sqlbase.NewUndefinedTableError(oldTn.String())
		}
	}

	if err := p.checkPrivilege(tableDesc, privilege.DROP); err != nil {
		return nil, err
	}

	// Check if any views depend on this table/view. Because our views
	// are currently just stored as strings, they explicitly specify the name
	// of everything they depend on. Rather than trying to rewrite the view's
	// query with the new name, we simply disallow such renames for now.
	if len(tableDesc.DependedOnBy) > 0 {
		return nil, p.dependentViewRenameError(
			tableDesc.TypeName(), oldTn.String(), tableDesc.ParentID, tableDesc.DependedOnBy[0].ID)
	}

	// Check if target database exists.
	targetDbDesc, err := p.mustGetDatabaseDesc(newTn.Database())
	if err != nil {
		return nil, err
	}

	if err := p.checkPrivilege(targetDbDesc, privilege.CREATE); err != nil {
		return nil, err
	}

	// oldTn and newTn are already normalized, so we can compare directly here.
	if oldTn.Database() == newTn.Database() && oldTn.Table() == newTn.Table() {
		// Noop.
		return &emptyNode{}, nil
	}

	tableDesc.SetName(newTn.Table())
	tableDesc.ParentID = targetDbDesc.ID

	descKey := sqlbase.MakeDescMetadataKey(tableDesc.GetID())
	newTbKey := tableKey{targetDbDesc.ID, newTn.Table()}.Key()

	if err := tableDesc.Validate(p.txn); err != nil {
		return nil, err
	}

	descID := tableDesc.GetID()
	descDesc := sqlbase.WrapDescriptor(tableDesc)

	if err := tableDesc.SetUpVersion(); err != nil {
		return nil, err
	}
	renameDetails := sqlbase.TableDescriptor_RenameInfo{
		OldParentID: dbDesc.ID,
		OldName:     oldTn.Table()}
	tableDesc.Renames = append(tableDesc.Renames, renameDetails)
	if err := p.writeTableDesc(tableDesc); err != nil {
		return nil, err
	}

	// We update the descriptor to the new name, but also leave the mapping of the
	// old name to the id, so that the name is not reused until the schema changer
	// has made sure it's not in use any more.
	b := &client.Batch{}
	b.Put(descKey, descDesc)
	b.CPut(newTbKey, descID, nil)

	if err := p.txn.Run(b); err != nil {
		if _, ok := err.(*roachpb.ConditionFailedError); ok {
			return nil, sqlbase.NewRelationAlreadyExistsError(newTn.Table())
		}
		return nil, err
	}
	p.notifySchemaChange(tableDesc.ID, sqlbase.InvalidMutationID)

	p.setTestingVerifyMetadata(func(systemConfig config.SystemConfig) error {
		if err := expectDescriptorID(systemConfig, newTbKey, descID); err != nil {
			return err
		}
		if err := expectDescriptor(systemConfig, descKey, descDesc); err != nil {
			return err
		}
		return nil
	})

	return &emptyNode{}, nil
}