Ejemplo n.º 1
0
// insertCPutFn is used by insertRow when conflicts should be respected.
// logValue is used for pretty printing.
func insertCPutFn(ctx context.Context, b *client.Batch, key *roachpb.Key, value *roachpb.Value) {
	// TODO(dan): We want do this V(2) log everywhere in sql. Consider making a
	// client.Batch wrapper instead of inlining it everywhere.
	if log.V(2) {
		log.InfofDepth(ctx, 1, "CPut %s -> %s", *key, value.PrettyPrint())
	}
	b.CPut(key, value, nil)
}
Ejemplo n.º 2
0
// updateRow adds to the batch the kv operations necessary to update a table row
// with the given values.
//
// The row corresponding to oldValues is updated with the ones in updateValues.
// Note that updateValues only contains the ones that are changing.
//
// The return value is only good until the next call to UpdateRow.
func (ru *rowUpdater) updateRow(
	ctx context.Context, b *client.Batch, oldValues []parser.Datum, updateValues []parser.Datum,
) ([]parser.Datum, error) {
	if len(oldValues) != len(ru.fetchCols) {
		return nil, errors.Errorf("got %d values but expected %d", len(oldValues), len(ru.fetchCols))
	}
	if len(updateValues) != len(ru.updateCols) {
		return nil, errors.Errorf("got %d values but expected %d", len(updateValues), len(ru.updateCols))
	}

	primaryIndexKey, secondaryIndexEntries, err := ru.helper.encodeIndexes(ru.fetchColIDtoRowIndex, oldValues)
	if err != nil {
		return nil, err
	}

	// The secondary index entries returned by rowHelper.encodeIndexes are only
	// valid until the next call to encodeIndexes. We need to copy them so that
	// we can compare against the new secondary index entries.
	secondaryIndexEntries = append(ru.indexEntriesBuf[:0], secondaryIndexEntries...)
	ru.indexEntriesBuf = secondaryIndexEntries

	// Check that the new value types match the column types. This needs to
	// happen before index encoding because certain datum types (i.e. tuple)
	// cannot be used as index values.
	for i, val := range updateValues {
		if ru.marshalled[i], err = sqlbase.MarshalColumnValue(ru.updateCols[i], val); err != nil {
			return nil, err
		}
	}

	// Update the row values.
	copy(ru.newValues, oldValues)
	for i, updateCol := range ru.updateCols {
		ru.newValues[ru.fetchColIDtoRowIndex[updateCol.ID]] = updateValues[i]
	}

	rowPrimaryKeyChanged := false
	var newSecondaryIndexEntries []sqlbase.IndexEntry
	if ru.primaryKeyColChange {
		var newPrimaryIndexKey []byte
		newPrimaryIndexKey, newSecondaryIndexEntries, err =
			ru.helper.encodeIndexes(ru.fetchColIDtoRowIndex, ru.newValues)
		if err != nil {
			return nil, err
		}
		rowPrimaryKeyChanged = !bytes.Equal(primaryIndexKey, newPrimaryIndexKey)
	} else {
		newSecondaryIndexEntries, err =
			ru.helper.encodeSecondaryIndexes(ru.fetchColIDtoRowIndex, ru.newValues)
		if err != nil {
			return nil, err
		}
	}

	if rowPrimaryKeyChanged {
		if err := ru.fks.checkIdx(ru.helper.tableDesc.PrimaryIndex.ID, oldValues, ru.newValues); err != nil {
			return nil, err
		}
		for i := range newSecondaryIndexEntries {
			if !bytes.Equal(newSecondaryIndexEntries[i].Key, secondaryIndexEntries[i].Key) {
				if err := ru.fks.checkIdx(ru.helper.indexes[i].ID, oldValues, ru.newValues); err != nil {
					return nil, err
				}
			}
		}

		if err := ru.rd.deleteRow(ctx, b, oldValues); err != nil {
			return nil, err
		}
		if err := ru.ri.InsertRow(ctx, b, ru.newValues, false); err != nil {
			return nil, err
		}
		return ru.newValues, nil
	}

	// Add the new values.
	// TODO(dan): This has gotten very similar to the loop in insertRow, see if
	// they can be DRY'd. Ideally, this would also work for
	// truncateAndBackfillColumnsChunk, which is currently abusing rowUdpdater.
	for i, family := range ru.helper.tableDesc.Families {
		update := false
		for _, colID := range family.ColumnIDs {
			if _, ok := ru.updateColIDtoRowIndex[colID]; ok {
				update = true
				break
			}
		}
		if !update {
			continue
		}

		if i > 0 {
			// HACK: MakeFamilyKey appends to its argument, so on every loop iteration
			// after the first, trim primaryIndexKey so nothing gets overwritten.
			// TODO(dan): Instead of this, use something like engine.ChunkAllocator.
			primaryIndexKey = primaryIndexKey[:len(primaryIndexKey):len(primaryIndexKey)]
		}

		if len(family.ColumnIDs) == 1 && family.ColumnIDs[0] == family.DefaultColumnID {
			// Storage optimization to store DefaultColumnID directly as a value. Also
			// backwards compatible with the original BaseFormatVersion.

			idx, ok := ru.updateColIDtoRowIndex[family.DefaultColumnID]
			if !ok {
				continue
			}

			ru.key = keys.MakeFamilyKey(primaryIndexKey, uint32(family.ID))
			if log.V(2) {
				log.Infof(ctx, "Put %s -> %v", ru.key, ru.marshalled[idx].PrettyPrint())
			}
			b.Put(&ru.key, &ru.marshalled[idx])
			ru.key = nil

			continue
		}

		ru.key = keys.MakeFamilyKey(primaryIndexKey, uint32(family.ID))
		ru.valueBuf = ru.valueBuf[:0]

		var lastColID sqlbase.ColumnID
		familySortedColumnIDs, ok := ru.helper.sortedColumnFamily(family.ID)
		if !ok {
			panic("invalid family sorted column id map")
		}
		for _, colID := range familySortedColumnIDs {
			if ru.helper.columnInPK(colID) {
				if family.ID != 0 {
					return nil, errors.Errorf("primary index column %d must be in family 0, was %d", colID, family.ID)
				}
				// Skip primary key columns as their values are encoded in the key of
				// each family. Family 0 is guaranteed to exist and acts as a sentinel.
				continue
			}

			idx, ok := ru.fetchColIDtoRowIndex[colID]
			if !ok {
				return nil, errors.Errorf("column %d was expected to be fetched, but wasn't", colID)
			}
			col := ru.fetchCols[idx]

			if ru.newValues[idx].Compare(parser.DNull) == 0 {
				continue
			}

			if lastColID > col.ID {
				panic(fmt.Errorf("cannot write column id %d after %d", col.ID, lastColID))
			}
			colIDDiff := col.ID - lastColID
			lastColID = col.ID
			ru.valueBuf, err = sqlbase.EncodeTableValue(ru.valueBuf, colIDDiff, ru.newValues[idx])
			if err != nil {
				return nil, err
			}
		}

		if family.ID != 0 && len(ru.valueBuf) == 0 {
			// The family might have already existed but every column in it is being
			// set to NULL, so delete it.
			if log.V(2) {
				log.Infof(ctx, "Del %s", ru.key)
			}

			b.Del(&ru.key)
		} else {
			ru.value.SetTuple(ru.valueBuf)
			if log.V(2) {
				log.Infof(ctx, "Put %s -> %v", ru.key, ru.value.PrettyPrint())
			}
			b.Put(&ru.key, &ru.value)
		}

		ru.key = nil
	}

	// Update secondary indexes.
	for i, newSecondaryIndexEntry := range newSecondaryIndexEntries {
		secondaryIndexEntry := secondaryIndexEntries[i]
		secondaryKeyChanged := !bytes.Equal(newSecondaryIndexEntry.Key, secondaryIndexEntry.Key)
		if secondaryKeyChanged {
			if err := ru.fks.checkIdx(ru.helper.indexes[i].ID, oldValues, ru.newValues); err != nil {
				return nil, err
			}

			if log.V(2) {
				log.Infof(ctx, "Del %s", secondaryIndexEntry.Key)
			}
			b.Del(secondaryIndexEntry.Key)
			// Do not update Indexes in the DELETE_ONLY state.
			if _, ok := ru.deleteOnlyIndex[i]; !ok {
				if log.V(2) {
					log.Infof(ctx, "CPut %s -> %v", newSecondaryIndexEntry.Key, newSecondaryIndexEntry.Value.PrettyPrint())
				}
				b.CPut(newSecondaryIndexEntry.Key, &newSecondaryIndexEntry.Value, nil)
			}
		}
	}

	return ru.newValues, nil
}