예제 #1
0
// deleteRow adds to the batch the kv operations necessary to delete a table row
// with the given values.
func (rd *rowDeleter) deleteRow(b *client.Batch, values []parser.Datum) error {
	if err := rd.fks.checkAll(values); err != nil {
		return err
	}

	primaryIndexKey, secondaryIndexEntries, err := rd.helper.encodeIndexes(rd.fetchColIDtoRowIndex, values)
	if err != nil {
		return err
	}

	for _, secondaryIndexEntry := range secondaryIndexEntries {
		if log.V(2) {
			log.Infof("Del %s", secondaryIndexEntry.Key)
		}
		b.Del(secondaryIndexEntry.Key)
	}

	// Delete the row.
	rd.startKey = roachpb.Key(primaryIndexKey)
	rd.endKey = roachpb.Key(encoding.EncodeNotNullDescending(primaryIndexKey))
	if log.V(2) {
		log.Infof("DelRange %s - %s", rd.startKey, rd.endKey)
	}
	b.DelRange(&rd.startKey, &rd.endKey, false)
	rd.startKey, rd.endKey = nil, nil

	return nil
}
예제 #2
0
// RenameDatabase renames the database.
// Privileges: security.RootUser user.
//   Notes: postgres requires superuser, db owner, or "CREATEDB".
//          mysql >= 5.1.23 does not allow database renames.
func (p *planner) RenameDatabase(n *parser.RenameDatabase) (planNode, error) {
	if n.Name == "" || n.NewName == "" {
		return nil, errEmptyDatabaseName
	}

	if p.session.User != security.RootUser {
		return nil, fmt.Errorf("only %s is allowed to rename databases", security.RootUser)
	}

	dbDesc, err := p.getDatabaseDesc(string(n.Name))
	if err != nil {
		return nil, err
	}
	if dbDesc == nil {
		return nil, sqlbase.NewUndefinedDatabaseError(string(n.Name))
	}

	if n.Name == n.NewName {
		// Noop.
		return &emptyNode{}, nil
	}

	// Now update the nameMetadataKey and the descriptor.
	descKey := sqlbase.MakeDescMetadataKey(dbDesc.GetID())
	dbDesc.SetName(string(n.NewName))

	if err := dbDesc.Validate(); err != nil {
		return nil, err
	}

	newKey := databaseKey{string(n.NewName)}.Key()
	oldKey := databaseKey{string(n.Name)}.Key()
	descID := dbDesc.GetID()
	descDesc := sqlbase.WrapDescriptor(dbDesc)

	b := client.Batch{}
	b.CPut(newKey, descID, nil)
	b.Put(descKey, descDesc)
	b.Del(oldKey)

	if err := p.txn.Run(&b); err != nil {
		if _, ok := err.(*roachpb.ConditionFailedError); ok {
			return nil, fmt.Errorf("the new database name %q already exists", string(n.NewName))
		}
		return nil, err
	}

	p.setTestingVerifyMetadata(func(systemConfig config.SystemConfig) error {
		if err := expectDescriptorID(systemConfig, newKey, descID); err != nil {
			return err
		}
		if err := expectDescriptor(systemConfig, descKey, descDesc); err != nil {
			return err
		}
		return expectDeleted(systemConfig, oldKey)
	})

	return &emptyNode{}, nil
}
예제 #3
0
// flush writes all dirty nodes and the tree to the transaction.
func (tc *treeContext) flush(b *client.Batch) {
	if tc.dirty {
		b.Put(keys.RangeTreeRoot, tc.tree)
	}
	for key, cachedNode := range tc.nodes {
		if cachedNode.dirty {
			if cachedNode.node == nil {
				b.Del(keys.RangeTreeNodeKey(roachpb.RKey(key)))
			} else {
				b.Put(keys.RangeTreeNodeKey(roachpb.RKey(key)), cachedNode.node)
			}
		}
	}
}
예제 #4
0
// deleteIndexRow adds to the batch the kv operations necessary to delete a
// table row from the given index.
func (rd *rowDeleter) deleteIndexRow(
	ctx context.Context, b *client.Batch, idx *sqlbase.IndexDescriptor, values []parser.Datum,
) error {
	if err := rd.fks.checkAll(values); err != nil {
		return err
	}
	secondaryIndexEntry, err := sqlbase.EncodeSecondaryIndex(
		rd.helper.tableDesc, idx, rd.fetchColIDtoRowIndex, values)
	if err != nil {
		return err
	}
	if log.V(2) {
		log.Infof(ctx, "Del %s", secondaryIndexEntry.Key)
	}
	b.Del(secondaryIndexEntry.Key)
	return nil
}
예제 #5
0
func runDel(cmd *cobra.Command, args []string) {
	if len(args) == 0 {
		mustUsage(cmd)
		return
	}

	var b client.Batch
	for _, arg := range args {
		b.Del(unquoteArg(arg, true /* disallow system keys */))
	}

	kvDB, stopper := makeDBClient()
	defer stopper.Stop()

	if err := kvDB.Run(&b); err != nil {
		panicf("delete failed: %s", err)
	}
}
예제 #6
0
func delMeta(b *client.Batch, key roachpb.Key, desc *roachpb.RangeDescriptor) {
	b.Del(key)
}
예제 #7
0
// updateRow adds to the batch the kv operations necessary to update a table row
// with the given values.
//
// The row corresponding to oldValues is updated with the ones in updateValues.
// Note that updateValues only contains the ones that are changing.
//
// The return value is only good until the next call to UpdateRow.
func (ru *rowUpdater) updateRow(
	b *client.Batch,
	oldValues []parser.Datum,
	updateValues []parser.Datum,
) ([]parser.Datum, error) {
	if len(oldValues) != len(ru.fetchCols) {
		return nil, errors.Errorf("got %d values but expected %d", len(oldValues), len(ru.fetchCols))
	}
	if len(updateValues) != len(ru.updateCols) {
		return nil, errors.Errorf("got %d values but expected %d", len(updateValues), len(ru.updateCols))
	}

	primaryIndexKey, secondaryIndexEntries, err := ru.helper.encodeIndexes(ru.fetchColIDtoRowIndex, oldValues)
	if err != nil {
		return nil, err
	}

	// The secondary index entries returned by rowHelper.encodeIndexes are only
	// valid until the next call to encodeIndexes. We need to copy them so that
	// we can compare against the new secondary index entries.
	secondaryIndexEntries = append(ru.indexEntriesBuf[:0], secondaryIndexEntries...)
	ru.indexEntriesBuf = secondaryIndexEntries

	// Check that the new value types match the column types. This needs to
	// happen before index encoding because certain datum types (i.e. tuple)
	// cannot be used as index values.
	for i, val := range updateValues {
		if ru.marshalled[i], err = sqlbase.MarshalColumnValue(ru.updateCols[i], val); err != nil {
			return nil, err
		}
	}

	// Update the row values.
	copy(ru.newValues, oldValues)
	for i, updateCol := range ru.updateCols {
		ru.newValues[ru.fetchColIDtoRowIndex[updateCol.ID]] = updateValues[i]
	}

	rowPrimaryKeyChanged := false
	var newSecondaryIndexEntries []sqlbase.IndexEntry
	if ru.primaryKeyColChange {
		var newPrimaryIndexKey []byte
		newPrimaryIndexKey, newSecondaryIndexEntries, err =
			ru.helper.encodeIndexes(ru.fetchColIDtoRowIndex, ru.newValues)
		if err != nil {
			return nil, err
		}
		rowPrimaryKeyChanged = !bytes.Equal(primaryIndexKey, newPrimaryIndexKey)
	} else {
		newSecondaryIndexEntries, err =
			ru.helper.encodeSecondaryIndexes(ru.fetchColIDtoRowIndex, ru.newValues)
		if err != nil {
			return nil, err
		}
	}

	if rowPrimaryKeyChanged {
		if err := ru.fks.checkIdx(ru.helper.tableDesc.PrimaryIndex.ID, oldValues, ru.newValues); err != nil {
			return nil, err
		}
		for i := range newSecondaryIndexEntries {
			if !bytes.Equal(newSecondaryIndexEntries[i].Key, secondaryIndexEntries[i].Key) {
				if err := ru.fks.checkIdx(ru.helper.indexes[i].ID, oldValues, ru.newValues); err != nil {
					return nil, err
				}
			}
		}

		if err := ru.rd.deleteRow(b, oldValues); err != nil {
			return nil, err
		}
		if err := ru.ri.insertRow(b, ru.newValues, false); err != nil {
			return nil, err
		}
		return ru.newValues, nil
	}

	// Add the new values.
	// TODO(dan): This has gotten very similar to the loop in insertRow, see if
	// they can be DRY'd. Ideally, this would also work for
	// truncateAndBackfillColumnsChunk, which is currently abusing rowUdpdater.
	for i, family := range ru.helper.tableDesc.Families {
		update := false
		for _, colID := range family.ColumnIDs {
			if _, ok := ru.updateColIDtoRowIndex[colID]; ok {
				update = true
				break
			}
		}
		if !update {
			continue
		}

		if i > 0 {
			// HACK: MakeFamilyKey appends to its argument, so on every loop iteration
			// after the first, trim primaryIndexKey so nothing gets overwritten.
			// TODO(dan): Instead of this, use something like engine.ChunkAllocator.
			primaryIndexKey = primaryIndexKey[:len(primaryIndexKey):len(primaryIndexKey)]
		}

		if len(family.ColumnIDs) == 1 && family.ColumnIDs[0] == family.DefaultColumnID {
			// Storage optimization to store DefaultColumnID directly as a value. Also
			// backwards compatible with the original BaseFormatVersion.

			idx, ok := ru.updateColIDtoRowIndex[family.DefaultColumnID]
			if !ok {
				continue
			}

			ru.key = keys.MakeFamilyKey(primaryIndexKey, uint32(family.ID))
			if log.V(2) {
				log.Infof("Put %s -> %v", ru.key, ru.marshalled[idx].PrettyPrint())
			}
			b.Put(&ru.key, &ru.marshalled[idx])
			ru.key = nil

			continue
		}

		ru.key = keys.MakeFamilyKey(primaryIndexKey, uint32(family.ID))
		ru.valueBuf = ru.valueBuf[:0]

		var lastColID sqlbase.ColumnID
		familySortedColumnIDs, ok := ru.helper.sortedColumnFamily(family.ID)
		if !ok {
			panic("invalid family sorted column id map")
		}
		for _, colID := range familySortedColumnIDs {
			if ru.helper.columnInPK(colID) {
				if family.ID != 0 {
					return nil, errors.Errorf("primary index column %d must be in family 0, was %d", colID, family.ID)
				}
				// Skip primary key columns as their values are encoded in the key of
				// each family. Family 0 is guaranteed to exist and acts as a sentinel.
				continue
			}

			idx, ok := ru.fetchColIDtoRowIndex[colID]
			if !ok {
				return nil, errors.Errorf("column %d was expected to be fetched, but wasn't", colID)
			}
			col := ru.fetchCols[idx]

			if ru.newValues[idx].Compare(parser.DNull) == 0 {
				continue
			}

			if lastColID > col.ID {
				panic(fmt.Errorf("cannot write column id %d after %d", col.ID, lastColID))
			}
			colIDDiff := col.ID - lastColID
			lastColID = col.ID
			ru.valueBuf, err = sqlbase.EncodeTableValue(ru.valueBuf, colIDDiff, ru.newValues[idx])
			if err != nil {
				return nil, err
			}
		}

		if family.ID != 0 && len(ru.valueBuf) == 0 {
			// The family might have already existed but every column in it is being
			// set to NULL, so delete it.
			if log.V(2) {
				log.Infof("Del %s", ru.key)
			}

			b.Del(&ru.key)
		} else {
			ru.value.SetTuple(ru.valueBuf)
			if log.V(2) {
				log.Infof("Put %s -> %v", ru.key, ru.value.PrettyPrint())
			}
			b.Put(&ru.key, &ru.value)
		}

		ru.key = nil
	}

	// Update secondary indexes.
	for i, newSecondaryIndexEntry := range newSecondaryIndexEntries {
		secondaryIndexEntry := secondaryIndexEntries[i]
		secondaryKeyChanged := !bytes.Equal(newSecondaryIndexEntry.Key, secondaryIndexEntry.Key)
		if secondaryKeyChanged {
			if err := ru.fks.checkIdx(ru.helper.indexes[i].ID, oldValues, ru.newValues); err != nil {
				return nil, err
			}

			if log.V(2) {
				log.Infof("Del %s", secondaryIndexEntry.Key)
			}
			b.Del(secondaryIndexEntry.Key)
			// Do not update Indexes in the DELETE_ONLY state.
			if _, ok := ru.deleteOnlyIndex[i]; !ok {
				if log.V(2) {
					log.Infof("CPut %s -> %v", newSecondaryIndexEntry.Key, newSecondaryIndexEntry.Value.PrettyPrint())
				}
				b.CPut(newSecondaryIndexEntry.Key, &newSecondaryIndexEntry.Value, nil)
			}
		}
	}

	return ru.newValues, nil
}
예제 #8
0
// Execute the entire schema change in steps. startBackfillNotification is
// called before the backfill starts; it can be nil.
func (sc SchemaChanger) exec(
	startBackfillNotification func() error,
	oldNameNotInUseNotification func(),
) error {
	// Acquire lease.
	lease, err := sc.AcquireLease()
	if err != nil {
		return err
	}
	needRelease := true
	// Always try to release lease.
	defer func(l *sqlbase.TableDescriptor_SchemaChangeLease) {
		// If the schema changer deleted the descriptor, there's no longer a lease to be
		// released.
		if !needRelease {
			return
		}
		if err := sc.ReleaseLease(*l); err != nil {
			log.Warning(err)
		}
	}(&lease)

	// Increment the version and unset tableDescriptor.UpVersion.
	desc, err := sc.MaybeIncrementVersion()
	if err != nil {
		return err
	}

	if desc.GetTable().Deleted() {
		lease, err = sc.ExtendLease(lease)
		if err != nil {
			return err
		}
		// Wait for everybody to see the version with the deleted bit set. When
		// this returns, nobody has any leases on the table, nor can get new leases,
		// so the table will no longer be modified.
		if err := sc.waitToUpdateLeases(); err != nil {
			return err
		}

		// Truncate the table and delete the descriptor.
		if err := sc.truncateAndDropTable(&lease, desc.GetTable()); err != nil {
			return err
		}
		needRelease = false
		return nil
	}

	if desc.GetTable().Renamed() {
		lease, err = sc.ExtendLease(lease)
		if err != nil {
			return err
		}
		// Wait for everyone to see the version with the new name. When this
		// returns, no new transactions will be using the old name for the table, so
		// the old name can now be re-used (by CREATE).
		if err := sc.waitToUpdateLeases(); err != nil {
			return err
		}

		if oldNameNotInUseNotification != nil {
			oldNameNotInUseNotification()
		}
		// Free up the old name(s).
		err := sc.db.Txn(func(txn *client.Txn) error {
			b := client.Batch{}
			for _, renameDetails := range desc.GetTable().Renames {
				tbKey := tableKey{
					sqlbase.ID(renameDetails.OldParentID), renameDetails.OldName}.Key()
				b.Del(tbKey)
			}
			if err := txn.Run(&b); err != nil {
				return err
			}
			return nil
		})
		if err != nil {
			return err
		}

		// Clean up - clear the descriptor's state.
		_, err = sc.leaseMgr.Publish(sc.tableID, func(desc *sqlbase.TableDescriptor) error {
			desc.Renames = nil
			return nil
		}, nil)
		if err != nil {
			return err
		}
	}

	// Wait for the schema change to propagate to all nodes after this function
	// returns, so that the new schema is live everywhere. This is not needed for
	// correctness but is done to make the UI experience/tests predictable.
	defer func() {
		if err := sc.waitToUpdateLeases(); err != nil {
			log.Warning(err)
		}
	}()

	if sc.mutationID == sqlbase.InvalidMutationID {
		// Nothing more to do.
		return nil
	}

	// Another transaction might set the up_version bit again,
	// but we're no longer responsible for taking care of that.

	// Run through mutation state machine and backfill.
	err = sc.runStateMachineAndBackfill(&lease, startBackfillNotification)

	// Purge the mutations if the application of the mutations failed due to
	// an integrity constraint violation. All other errors are transient
	// errors that are resolved by retrying the backfill.
	if sqlbase.IsIntegrityConstraintError(err) {
		log.Warningf("reversing schema change due to irrecoverable error: %s", err)
		if errReverse := sc.reverseMutations(err); errReverse != nil {
			// Although the backfill did hit an integrity constraint violation
			// and made a decision to reverse the mutations,
			// reverseMutations() failed. If exec() is called again the entire
			// schema change will be retried.
			return errReverse
		}

		// After this point the schema change has been reversed and any retry
		// of the schema change will act upon the reversed schema change.
		if errPurge := sc.runStateMachineAndBackfill(
			&lease, startBackfillNotification,
		); errPurge != nil {
			// Don't return this error because we do want the caller to know
			// that an integrity constraint was violated with the original
			// schema change. The reversed schema change will be
			// retried via the async schema change manager.
			log.Warningf("error purging mutation: %s, after error: %s", errPurge, err)
		}
	}

	return err
}