Пример #1
0
// AcquireLease acquires a schema change lease on the table if
// an unexpired lease doesn't exist. It returns the lease.
func (sc *SchemaChanger) AcquireLease() (sqlbase.TableDescriptor_SchemaChangeLease, error) {
	var lease sqlbase.TableDescriptor_SchemaChangeLease
	err := sc.db.Txn(context.TODO(), func(txn *client.Txn) error {
		txn.SetSystemConfigTrigger()
		tableDesc, err := sqlbase.GetTableDescFromID(txn, sc.tableID)
		if err != nil {
			return err
		}

		// A second to deal with the time uncertainty across nodes.
		// It is perfectly valid for two or more goroutines to hold a valid
		// lease and execute a schema change in parallel, because schema
		// changes are executed using transactions that run sequentially.
		// This just reduces the probability of a write collision.
		expirationTimeUncertainty := time.Second

		if tableDesc.Lease != nil {
			if time.Unix(0, tableDesc.Lease.ExpirationTime).Add(expirationTimeUncertainty).After(timeutil.Now()) {
				return errExistingSchemaChangeLease
			}
			log.Infof(txn.Context, "Overriding existing expired lease %v", tableDesc.Lease)
		}
		lease = sc.createSchemaChangeLease()
		tableDesc.Lease = &lease
		return txn.Put(sqlbase.MakeDescMetadataKey(tableDesc.ID), sqlbase.WrapDescriptor(tableDesc))
	})
	return lease, err
}
Пример #2
0
// getTableSpan returns a span stored at a checkpoint idx, or in the absence
// of a checkpoint, the span over all keys within a table.
func (sc *SchemaChanger) getTableSpan(mutationIdx int) (roachpb.Span, error) {
	var tableDesc *sqlbase.TableDescriptor
	if err := sc.db.Txn(context.TODO(), func(txn *client.Txn) error {
		var err error
		tableDesc, err = sqlbase.GetTableDescFromID(txn, sc.tableID)
		return err
	}); err != nil {
		return roachpb.Span{}, err
	}
	if len(tableDesc.Mutations) < mutationIdx {
		return roachpb.Span{},
			errors.Errorf("cannot find idx %d among %d mutations", mutationIdx, len(tableDesc.Mutations))
	}
	if mutationID := tableDesc.Mutations[mutationIdx].MutationID; mutationID != sc.mutationID {
		return roachpb.Span{},
			errors.Errorf("mutation index pointing to the wrong schema change, %d vs expected %d", mutationID, sc.mutationID)
	}
	resumeSpan := tableDesc.Mutations[mutationIdx].ResumeSpan
	if resumeSpan.Key != nil {
		return resumeSpan, nil
	}
	prefix := roachpb.Key(sqlbase.MakeIndexKeyPrefix(tableDesc, tableDesc.PrimaryIndex.ID))
	return roachpb.Span{
		Key:    prefix,
		EndKey: prefix.PrefixEnd(),
	}, nil
}
Пример #3
0
func (sc *SchemaChanger) maybeWriteResumeSpan(
	txn *client.Txn,
	version sqlbase.DescriptorVersion,
	resume roachpb.Span,
	mutationIdx int,
	lastCheckpoint *time.Time,
) error {
	checkpointInterval := checkpointInterval
	if sc.testingKnobs.WriteCheckpointInterval > 0 {
		checkpointInterval = sc.testingKnobs.WriteCheckpointInterval
	}
	if timeutil.Since(*lastCheckpoint) < checkpointInterval {
		return nil
	}
	tableDesc, err := sqlbase.GetTableDescFromID(txn, sc.tableID)
	if err != nil {
		return err
	}
	if tableDesc.Version != version {
		return errVersionMismatch
	}
	tableDesc.Mutations[mutationIdx].ResumeSpan = resume
	txn.SetSystemConfigTrigger()
	if err := txn.Put(sqlbase.MakeDescMetadataKey(tableDesc.GetID()),
		sqlbase.WrapDescriptor(tableDesc)); err != nil {
		return err
	}
	*lastCheckpoint = timeutil.Now()
	return nil
}
Пример #4
0
// indexDefFromDescriptor creates an index definition (`CREATE INDEX ... ON (...)`) from
// and index descriptor by reconstructing a CreateIndex parser node and calling its
// String method.
func indexDefFromDescriptor(
	p *planner,
	db *sqlbase.DatabaseDescriptor,
	table *sqlbase.TableDescriptor,
	index *sqlbase.IndexDescriptor,
) (string, error) {
	indexDef := parser.CreateIndex{
		Name: parser.Name(index.Name),
		Table: parser.NormalizableTableName{
			TableNameReference: &parser.TableName{
				DatabaseName: parser.Name(db.Name),
				TableName:    parser.Name(table.Name),
			},
		},
		Unique:  index.Unique,
		Columns: make(parser.IndexElemList, len(index.ColumnNames)),
		Storing: make(parser.NameList, len(index.StoreColumnNames)),
	}
	for i, name := range index.ColumnNames {
		elem := parser.IndexElem{
			Column:    parser.Name(name),
			Direction: parser.Ascending,
		}
		if index.ColumnDirections[i] == sqlbase.IndexDescriptor_DESC {
			elem.Direction = parser.Descending
		}
		indexDef.Columns[i] = elem
	}
	for i, name := range index.StoreColumnNames {
		indexDef.Storing[i] = parser.Name(name)
	}
	if len(index.Interleave.Ancestors) > 0 {
		intl := index.Interleave
		parentTable, err := sqlbase.GetTableDescFromID(p.txn, intl.Ancestors[len(intl.Ancestors)-1].TableID)
		if err != nil {
			return "", err
		}
		var sharedPrefixLen int
		for _, ancestor := range intl.Ancestors {
			sharedPrefixLen += int(ancestor.SharedPrefixLen)
		}
		fields := index.ColumnNames[:sharedPrefixLen]
		intlDef := &parser.InterleaveDef{
			Parent: parser.NormalizableTableName{
				TableNameReference: &parser.TableName{
					TableName: parser.Name(parentTable.Name),
				},
			},
			Fields: make(parser.NameList, len(fields)),
		}
		for i, field := range fields {
			intlDef.Fields[i] = parser.Name(field)
		}
		indexDef.Interleave = intlDef
	}
	return indexDef.String(), nil
}
Пример #5
0
func (sc *SchemaChanger) findTableWithLease(
	txn *client.Txn, lease sqlbase.TableDescriptor_SchemaChangeLease,
) (*sqlbase.TableDescriptor, error) {
	tableDesc, err := sqlbase.GetTableDescFromID(txn, sc.tableID)
	if err != nil {
		return nil, err
	}
	if tableDesc.Lease == nil {
		return nil, errors.Errorf("no lease present for tableID: %d", sc.tableID)
	}
	if *tableDesc.Lease != lease {
		return nil, errors.Errorf("table: %d has lease: %v, expected: %v", sc.tableID, tableDesc.Lease, lease)
	}
	return tableDesc, nil
}
Пример #6
0
// getTableLeaseByID is a by-ID variant of getTableLease (i.e. uses same cache).
func (p *planner) getTableLeaseByID(tableID sqlbase.ID) (*sqlbase.TableDescriptor, error) {
	if log.V(2) {
		log.Infof(p.ctx(), "planner acquiring lease on table ID %d", tableID)
	}

	if testDisableTableLeases {
		table, err := sqlbase.GetTableDescFromID(p.txn, tableID)
		if err != nil {
			return nil, err
		}
		if err := filterTableState(table); err != nil {
			return nil, err
		}
		return table, nil
	}

	// First, look to see if we already have a lease for this table -- including
	// leases acquired via `getTableLease`.
	var lease *LeaseState
	for _, l := range p.leases {
		if l.ID == tableID {
			lease = l
			if log.V(2) {
				log.Infof(p.ctx(), "found lease in planner cache for table %d", tableID)
			}
			break
		}
	}

	// If we didn't find a lease or the lease is about to expire, acquire one.
	if lease == nil || p.removeLeaseIfExpiring(lease) {
		var err error
		lease, err = p.leaseMgr.Acquire(p.txn, tableID, 0)
		if err != nil {
			if err == sqlbase.ErrDescriptorNotFound {
				// Transform the descriptor error into an error that references the
				// table's ID.
				return nil, sqlbase.NewUndefinedTableError(fmt.Sprintf("<id=%d>", tableID))
			}
			return nil, err
		}
		p.leases = append(p.leases, lease)
		// If the lease we just acquired expires before the txn's deadline, reduce
		// the deadline.
		p.txn.UpdateDeadlineMaybe(hlc.Timestamp{WallTime: lease.Expiration().UnixNano()})
	}
	return &lease.TableDescriptor, nil
}
Пример #7
0
// showCreateInterleave returns an INTERLEAVE IN PARENT clause for the specified
// index, if applicable.
func (p *planner) showCreateInterleave(idx *sqlbase.IndexDescriptor) (string, error) {
	if len(idx.Interleave.Ancestors) == 0 {
		return "", nil
	}
	intl := idx.Interleave
	parentTable, err := sqlbase.GetTableDescFromID(p.txn, intl.Ancestors[len(intl.Ancestors)-1].TableID)
	if err != nil {
		return "", err
	}
	var sharedPrefixLen int
	for _, ancestor := range intl.Ancestors {
		sharedPrefixLen += int(ancestor.SharedPrefixLen)
	}
	interleavedColumnNames := quoteNames(idx.ColumnNames[:sharedPrefixLen]...)
	s := fmt.Sprintf(" INTERLEAVE IN PARENT %s (%s)", parentTable.Name, interleavedColumnNames)
	return s, nil
}
Пример #8
0
// TODO(a-robinson): Support renaming objects depended on by views once we have
// a better encoding for view queries (#10083).
func (p *planner) dependentViewRenameError(
	typeName, objName string, parentID, viewID sqlbase.ID,
) error {
	viewDesc, err := sqlbase.GetTableDescFromID(p.txn, viewID)
	if err != nil {
		return err
	}
	viewName := viewDesc.Name
	if viewDesc.ParentID != parentID {
		var err error
		viewName, err = p.getQualifiedTableName(viewDesc)
		if err != nil {
			log.Warningf(p.ctx(), "unable to retrieve name of view %d: %v", viewID, err)
			return sqlbase.NewDependentObjectError("cannot rename %s %q because a view depends on it",
				typeName, objName)
		}
	}
	return sqlbase.NewDependentObjectError("cannot rename %s %q because view %q depends on it",
		typeName, objName, viewName)
}
Пример #9
0
// IsDone returns true if the work scheduled for the schema changer
// is complete.
func (sc *SchemaChanger) IsDone() (bool, error) {
	var done bool
	err := sc.db.Txn(context.TODO(), func(txn *client.Txn) error {
		done = true
		tableDesc, err := sqlbase.GetTableDescFromID(txn, sc.tableID)
		if err != nil {
			return err
		}
		if sc.mutationID == sqlbase.InvalidMutationID {
			if tableDesc.UpVersion {
				done = false
			}
		} else {
			for _, mutation := range tableDesc.Mutations {
				if mutation.MutationID == sc.mutationID {
					done = false
					break
				}
			}
		}
		return nil
	})
	return done, err
}
Пример #10
0
func (p *planner) validateForeignKey(
	ctx context.Context, srcTable *sqlbase.TableDescriptor, srcIdx *sqlbase.IndexDescriptor,
) error {
	targetTable, err := sqlbase.GetTableDescFromID(p.txn, srcIdx.ForeignKey.Table)
	if err != nil {
		return err
	}
	targetIdx, err := targetTable.FindIndexByID(srcIdx.ForeignKey.Index)
	if err != nil {
		return err
	}

	srcName, err := p.getQualifiedTableName(srcTable)
	if err != nil {
		return err
	}

	targetName, err := p.getQualifiedTableName(targetTable)
	if err != nil {
		return err
	}

	escape := func(s string) string {
		return parser.Name(s).String()
	}

	prefix := len(srcIdx.ColumnNames)
	if p := len(targetIdx.ColumnNames); p < prefix {
		prefix = p
	}

	srcCols, targetCols := make([]string, prefix), make([]string, prefix)
	join, where := make([]string, prefix), make([]string, prefix)

	for i := 0; i < prefix; i++ {
		srcCols[i] = fmt.Sprintf("s.%s", escape(srcIdx.ColumnNames[i]))
		targetCols[i] = fmt.Sprintf("t.%s", escape(targetIdx.ColumnNames[i]))
		join[i] = fmt.Sprintf("(%s = %s OR (%s IS NULL AND %s IS NULL))",
			srcCols[i], targetCols[i], srcCols[i], targetCols[i])
		where[i] = fmt.Sprintf("(%s IS NOT NULL AND %s IS NULL)", srcCols[i], targetCols[i])
	}

	query := fmt.Sprintf(
		`SELECT %s FROM %s@%s AS s LEFT OUTER JOIN %s@%s AS t ON %s WHERE %s LIMIT 1`,
		strings.Join(srcCols, ", "),
		srcName, escape(srcIdx.Name), targetName, escape(targetIdx.Name),
		strings.Join(join, " AND "),
		strings.Join(where, " OR "),
	)

	log.Infof(ctx, "Validating FK %q (%q [%v] -> %q [%v]) with query %q",
		srcIdx.ForeignKey.Name,
		srcTable.Name, srcCols, targetTable.Name, targetCols,
		query,
	)

	values, err := p.queryRows(query)
	if err != nil {
		return err
	}

	if len(values) > 0 {
		var pairs bytes.Buffer
		for i := range values[0] {
			if i > 0 {
				pairs.WriteString(", ")
			}
			pairs.WriteString(fmt.Sprintf("%s=%v", srcIdx.ColumnNames[i], values[0][i]))
		}
		return errors.Errorf("foreign key violation: %q row %s has no match in %q",
			srcTable.Name, pairs.String(), targetTable.Name)
	}
	return nil
}
Пример #11
0
// backfillIndexesChunk returns the next-key, done and an error. next-key and
// done are invalid if error != nil. next-key is invalid if done is true.
func (sc *SchemaChanger) backfillIndexesChunk(
	added []sqlbase.IndexDescriptor,
	sp roachpb.Span,
	chunkSize int64,
	mutationIdx int,
	lastCheckpoint *time.Time,
) (roachpb.Key, bool, error) {
	var nextKey roachpb.Key
	done := false
	secondaryIndexEntries := make([]sqlbase.IndexEntry, len(added))
	err := sc.db.Txn(context.TODO(), func(txn *client.Txn) error {
		if sc.testingKnobs.RunBeforeBackfillChunk != nil {
			if err := sc.testingKnobs.RunBeforeBackfillChunk(sp); err != nil {
				return err
			}
		}
		if sc.testingKnobs.RunAfterBackfillChunk != nil {
			defer sc.testingKnobs.RunAfterBackfillChunk()
		}

		tableDesc, err := sqlbase.GetTableDescFromID(txn, sc.tableID)
		if err != nil {
			return err
		}
		// Short circuit the backfill if the table has been deleted.
		if done = tableDesc.Dropped(); done {
			return nil
		}

		// Get the next set of rows.
		// TODO(tamird): Support partial indexes?
		//
		// Use a scanNode with SELECT to pass in a sqlbase.TableDescriptor
		// to the SELECT without needing to go through table name
		// resolution, because we want to run schema changes from a gossip
		// feed of table IDs. Running the scan and applying the changes in
		// many transactions is fine because the schema change is in the
		// correct state to handle intermediate OLTP commands which delete
		// and add values during the scan.
		planner := makePlanner("backfill")
		planner.setTxn(txn)
		scan := planner.Scan()
		scan.desc = *tableDesc
		scan.spans = []roachpb.Span{sp}
		scan.SetLimitHint(chunkSize, false)
		scan.initDescDefaults(publicAndNonPublicColumns)
		rows, err := selectIndex(scan, nil, false)
		if err != nil {
			return err
		}

		if err := rows.Start(); err != nil {
			return err
		}

		// Construct a map from column ID to the index the value appears at
		// within a row.
		colIDtoRowIndex, err := makeColIDtoRowIndex(rows, tableDesc)
		if err != nil {
			return err
		}
		b := &client.Batch{}
		numRows := int64(0)
		for ; numRows < chunkSize; numRows++ {
			if next, err := rows.Next(); !next {
				if err != nil {
					return err
				}
				break
			}
			rowVals := rows.Values()

			err := sqlbase.EncodeSecondaryIndexes(
				tableDesc, added, colIDtoRowIndex,
				rowVals, secondaryIndexEntries)
			if err != nil {
				return err
			}
			for _, secondaryIndexEntry := range secondaryIndexEntries {
				if log.V(2) {
					log.Infof(txn.Context, "InitPut %s -> %v", secondaryIndexEntry.Key,
						secondaryIndexEntry.Value)
				}
				b.InitPut(secondaryIndexEntry.Key, &secondaryIndexEntry.Value)
			}
		}
		// Write the new index values.
		if err := txn.Run(b); err != nil {
			return convertBackfillError(tableDesc, b)
		}
		// Have we processed all the table rows?
		if done = numRows < chunkSize; done {
			return nil
		}
		// Keep track of the next key.
		resume := roachpb.Span{Key: scan.fetcher.Key(), EndKey: sp.EndKey}
		if err := sc.maybeWriteResumeSpan(txn, tableDesc, resume, mutationIdx, lastCheckpoint); err != nil {
			return err
		}
		nextKey = resume.Key
		return nil
	})
	return nextKey, done, err
}
Пример #12
0
func (sc *SchemaChanger) truncateIndexes(
	lease *sqlbase.TableDescriptor_SchemaChangeLease,
	dropped []sqlbase.IndexDescriptor,
	mutationIdx int,
) error {
	chunkSize := sc.getChunkSize(indexTruncateChunkSize)
	if sc.testingKnobs.BackfillChunkSize > 0 {
		chunkSize = sc.testingKnobs.BackfillChunkSize
	}
	for _, desc := range dropped {
		var resume roachpb.Span
		lastCheckpoint := timeutil.Now()
		for row, done := int64(0), false; !done; row += chunkSize {
			// First extend the schema change lease.
			l, err := sc.ExtendLease(*lease)
			if err != nil {
				return err
			}
			*lease = l

			resumeAt := resume
			if log.V(2) {
				log.Infof(context.TODO(), "drop index (%d, %d) at row: %d, span: %s",
					sc.tableID, sc.mutationID, row, resume)
			}
			if err := sc.db.Txn(context.TODO(), func(txn *client.Txn) error {
				if sc.testingKnobs.RunBeforeBackfillChunk != nil {
					if err := sc.testingKnobs.RunBeforeBackfillChunk(resume); err != nil {
						return err
					}
				}
				if sc.testingKnobs.RunAfterBackfillChunk != nil {
					defer sc.testingKnobs.RunAfterBackfillChunk()
				}

				tableDesc, err := sqlbase.GetTableDescFromID(txn, sc.tableID)
				if err != nil {
					return err
				}
				// Short circuit the truncation if the table has been deleted.
				if done = tableDesc.Dropped(); done {
					return nil
				}

				rd, err := makeRowDeleter(txn, tableDesc, nil, nil, false)
				if err != nil {
					return err
				}
				td := tableDeleter{rd: rd}
				if err := td.init(txn); err != nil {
					return err
				}
				resume, err = td.deleteIndex(
					txn.Context, &desc, resumeAt, chunkSize,
				)
				if err != nil {
					return err
				}
				if err := sc.maybeWriteResumeSpan(txn, tableDesc, resume, mutationIdx, &lastCheckpoint); err != nil {
					return err
				}
				done = resume.Key == nil
				return nil
			}); err != nil {
				return err
			}
		}
	}
	return nil
}
Пример #13
0
// truncateAndBackfillColumnsChunk returns the next-key, done and an error.
// next-key and done are invalid if error != nil. next-key is invalid if done
// is true.
func (sc *SchemaChanger) truncateAndBackfillColumnsChunk(
	added []sqlbase.ColumnDescriptor,
	dropped []sqlbase.ColumnDescriptor,
	defaultExprs []parser.TypedExpr,
	sp roachpb.Span,
	updateValues parser.DTuple,
	nonNullViolationColumnName string,
	chunkSize int64,
	mutationIdx int,
	lastCheckpoint *time.Time,
) (roachpb.Key, bool, error) {
	done := false
	var nextKey roachpb.Key
	err := sc.db.Txn(context.TODO(), func(txn *client.Txn) error {
		if sc.testingKnobs.RunBeforeBackfillChunk != nil {
			if err := sc.testingKnobs.RunBeforeBackfillChunk(sp); err != nil {
				return err
			}
		}
		if sc.testingKnobs.RunAfterBackfillChunk != nil {
			defer sc.testingKnobs.RunAfterBackfillChunk()
		}

		tableDesc, err := sqlbase.GetTableDescFromID(txn, sc.tableID)
		if err != nil {
			return err
		}
		// Short circuit the backfill if the table has been deleted.
		if done = tableDesc.Dropped(); done {
			return nil
		}

		updateCols := append(added, dropped...)
		fkTables := tablesNeededForFKs(*tableDesc, CheckUpdates)
		for k := range fkTables {
			table, err := sqlbase.GetTableDescFromID(txn, k)
			if err != nil {
				return err
			}
			fkTables[k] = tableLookup{table: table}
		}
		// TODO(dan): Tighten up the bound on the requestedCols parameter to
		// makeRowUpdater.
		requestedCols := make([]sqlbase.ColumnDescriptor, 0, len(tableDesc.Columns)+len(added))
		requestedCols = append(requestedCols, tableDesc.Columns...)
		requestedCols = append(requestedCols, added...)
		ru, err := makeRowUpdater(
			txn, tableDesc, fkTables, updateCols, requestedCols, rowUpdaterOnlyColumns,
		)
		if err != nil {
			return err
		}

		// TODO(dan): This check is an unfortunate bleeding of the internals of
		// rowUpdater. Extract the sql row to k/v mapping logic out into something
		// usable here.
		if !ru.isColumnOnlyUpdate() {
			panic("only column data should be modified, but the rowUpdater is configured otherwise")
		}

		// Run a scan across the table using the primary key. Running
		// the scan and applying the changes in many transactions is
		// fine because the schema change is in the correct state to
		// handle intermediate OLTP commands which delete and add
		// values during the scan.
		var rf sqlbase.RowFetcher
		colIDtoRowIndex := colIDtoRowIndexFromCols(tableDesc.Columns)
		valNeededForCol := make([]bool, len(tableDesc.Columns))
		for i := range valNeededForCol {
			_, valNeededForCol[i] = ru.fetchColIDtoRowIndex[tableDesc.Columns[i].ID]
		}
		if err := rf.Init(
			tableDesc, colIDtoRowIndex, &tableDesc.PrimaryIndex, false, false,
			tableDesc.Columns, valNeededForCol,
		); err != nil {
			return err
		}
		if err := rf.StartScan(
			txn, roachpb.Spans{sp}, true /* limit batches */, chunkSize,
		); err != nil {
			return err
		}

		oldValues := make(parser.DTuple, len(ru.fetchCols))
		writeBatch := txn.NewBatch()
		rowLength := 0
		var lastRowSeen parser.DTuple
		i := int64(0)
		for ; i < chunkSize; i++ {
			row, err := rf.NextRow()
			if err != nil {
				return err
			}
			if row == nil {
				break
			}
			lastRowSeen = row
			if nonNullViolationColumnName != "" {
				return sqlbase.NewNonNullViolationError(nonNullViolationColumnName)
			}

			copy(oldValues, row)
			// Update oldValues with NULL values where values weren't found;
			// only update when necessary.
			if rowLength != len(row) {
				rowLength = len(row)
				for j := rowLength; j < len(oldValues); j++ {
					oldValues[j] = parser.DNull
				}
			}
			if _, err := ru.updateRow(txn.Context, writeBatch, oldValues, updateValues); err != nil {
				return err
			}
		}
		if err := txn.Run(writeBatch); err != nil {
			return convertBackfillError(tableDesc, writeBatch)
		}
		if done = i < chunkSize; done {
			return nil
		}
		curIndexKey, _, err := sqlbase.EncodeIndexKey(
			tableDesc, &tableDesc.PrimaryIndex, colIDtoRowIndex, lastRowSeen,
			sqlbase.MakeIndexKeyPrefix(tableDesc, tableDesc.PrimaryIndex.ID))
		if err != nil {
			return err
		}
		resume := roachpb.Span{Key: roachpb.Key(curIndexKey).PrefixEnd(), EndKey: sp.EndKey}
		if err := sc.maybeWriteResumeSpan(txn, tableDesc, resume, mutationIdx, lastCheckpoint); err != nil {
			return err
		}
		nextKey = resume.Key
		return nil
	})
	return nextKey, done, err
}
Пример #14
0
// runBackfill runs the backfill for the schema changer.
func (sc *SchemaChanger) runBackfill(lease *sqlbase.TableDescriptor_SchemaChangeLease) error {
	l, err := sc.ExtendLease(*lease)
	if err != nil {
		return err
	}
	*lease = l

	// Mutations are applied in a FIFO order. Only apply the first set of
	// mutations. Collect the elements that are part of the mutation.
	var droppedColumnDescs []sqlbase.ColumnDescriptor
	var droppedIndexDescs []sqlbase.IndexDescriptor
	var addedColumnDescs []sqlbase.ColumnDescriptor
	var addedIndexDescs []sqlbase.IndexDescriptor
	// Indexes within the Mutations slice for checkpointing.
	mutationSentinel := -1
	var columnMutationIdx, addedIndexMutationIdx, droppedIndexMutationIdx int

	var tableDesc *sqlbase.TableDescriptor
	if err := sc.db.Txn(context.TODO(), func(txn *client.Txn) error {
		tableDesc, err = sqlbase.GetTableDescFromID(txn, sc.tableID)
		return err
	}); err != nil {
		return err
	}

	for i, m := range tableDesc.Mutations {
		if m.MutationID != sc.mutationID {
			break
		}
		switch m.Direction {
		case sqlbase.DescriptorMutation_ADD:
			switch t := m.Descriptor_.(type) {
			case *sqlbase.DescriptorMutation_Column:
				addedColumnDescs = append(addedColumnDescs, *t.Column)
				if columnMutationIdx == mutationSentinel {
					columnMutationIdx = i
				}
			case *sqlbase.DescriptorMutation_Index:
				addedIndexDescs = append(addedIndexDescs, *t.Index)
				if addedIndexMutationIdx == mutationSentinel {
					addedIndexMutationIdx = i
				}
			default:
				return errors.Errorf("unsupported mutation: %+v", m)
			}

		case sqlbase.DescriptorMutation_DROP:
			switch t := m.Descriptor_.(type) {
			case *sqlbase.DescriptorMutation_Column:
				droppedColumnDescs = append(droppedColumnDescs, *t.Column)
				if columnMutationIdx == mutationSentinel {
					columnMutationIdx = i
				}
			case *sqlbase.DescriptorMutation_Index:
				droppedIndexDescs = append(droppedIndexDescs, *t.Index)
				if droppedIndexMutationIdx == mutationSentinel {
					droppedIndexMutationIdx = i
				}
			default:
				return errors.Errorf("unsupported mutation: %+v", m)
			}
		}
	}

	// First drop indexes, then add/drop columns, and only then add indexes.

	// Drop indexes.
	if err := sc.truncateIndexes(lease, droppedIndexDescs, droppedIndexMutationIdx); err != nil {
		return err
	}

	// Add and drop columns.
	if err := sc.truncateAndBackfillColumns(
		lease, addedColumnDescs, droppedColumnDescs, columnMutationIdx,
	); err != nil {
		return err
	}

	// Add new indexes.
	if err := sc.backfillIndexes(lease, addedIndexDescs, addedIndexMutationIdx); err != nil {
		return err
	}

	return nil
}
Пример #15
0
// RenameDatabase renames the database.
// Privileges: security.RootUser user, DROP on source database.
//   Notes: postgres requires superuser, db owner, or "CREATEDB".
//          mysql >= 5.1.23 does not allow database renames.
func (p *planner) RenameDatabase(n *parser.RenameDatabase) (planNode, error) {
	if n.Name == "" || n.NewName == "" {
		return nil, errEmptyDatabaseName
	}

	if p.session.User != security.RootUser {
		return nil, fmt.Errorf("only %s is allowed to rename databases", security.RootUser)
	}

	dbDesc, err := p.mustGetDatabaseDesc(string(n.Name))
	if err != nil {
		return nil, err
	}

	if err := p.checkPrivilege(dbDesc, privilege.DROP); err != nil {
		return nil, err
	}

	if n.Name == n.NewName {
		// Noop.
		return &emptyNode{}, nil
	}

	// Check if any views depend on tables in the database. Because our views
	// are currently just stored as strings, they explicitly specify the database
	// name. Rather than trying to rewrite them with the changed DB name, we
	// simply disallow such renames for now.
	tbNames, err := p.getTableNames(dbDesc)
	if err != nil {
		return nil, err
	}
	for i := range tbNames {
		tbDesc, err := p.getTableOrViewDesc(&tbNames[i])
		if err != nil {
			return nil, err
		}
		if tbDesc == nil {
			continue
		}
		if len(tbDesc.DependedOnBy) > 0 {
			viewDesc, err := sqlbase.GetTableDescFromID(p.txn, tbDesc.DependedOnBy[0].ID)
			if err != nil {
				return nil, err
			}
			viewName := viewDesc.Name
			if dbDesc.ID != viewDesc.ParentID {
				var err error
				viewName, err = p.getQualifiedTableName(viewDesc)
				if err != nil {
					log.Warningf(p.ctx(), "Unable to retrieve fully-qualified name of view %d: %v",
						viewDesc.ID, err)
					return nil, sqlbase.NewDependentObjectError(
						"cannot rename database because a view depends on table %q", tbDesc.Name)
				}
			}
			return nil, sqlbase.NewDependentObjectError(
				"cannot rename database because view %q depends on table %q", viewName, tbDesc.Name)
		}
	}

	if err := p.renameDatabase(dbDesc, string(n.NewName)); err != nil {
		return nil, err
	}
	return &emptyNode{}, nil
}