コード例 #1
0
ファイル: rowwriter.go プロジェクト: BramGruneir/cockroach
// deleteRow adds to the batch the kv operations necessary to delete a table row
// with the given values.
func (rd *rowDeleter) deleteRow(ctx context.Context, b *client.Batch, values []parser.Datum) error {
	if err := rd.fks.checkAll(values); err != nil {
		return err
	}

	primaryIndexKey, secondaryIndexEntries, err := rd.helper.encodeIndexes(rd.fetchColIDtoRowIndex, values)
	if err != nil {
		return err
	}

	for _, secondaryIndexEntry := range secondaryIndexEntries {
		if log.V(2) {
			log.Infof(ctx, "Del %s", secondaryIndexEntry.Key)
		}
		b.Del(secondaryIndexEntry.Key)
	}

	// Delete the row.
	rd.startKey = roachpb.Key(primaryIndexKey)
	rd.endKey = roachpb.Key(encoding.EncodeNotNullDescending(primaryIndexKey))
	if log.V(2) {
		log.Infof(ctx, "DelRange %s - %s", rd.startKey, rd.endKey)
	}
	b.DelRange(&rd.startKey, &rd.endKey, false)
	rd.startKey, rd.endKey = nil, nil

	return nil
}
コード例 #2
0
ファイル: rowwriter.go プロジェクト: knz/cockroach
// insertCPutFn is used by insertRow when conflicts should be respected.
// logValue is used for pretty printing.
func insertCPutFn(ctx context.Context, b *client.Batch, key *roachpb.Key, value *roachpb.Value) {
	// TODO(dan): We want do this V(2) log everywhere in sql. Consider making a
	// client.Batch wrapper instead of inlining it everywhere.
	if log.V(2) {
		log.InfofDepth(ctx, 1, "CPut %s -> %s", *key, value.PrettyPrint())
	}
	b.CPut(key, value, nil)
}
コード例 #3
0
ファイル: errors.go プロジェクト: BramGruneir/cockroach
func convertBatchError(tableDesc *sqlbase.TableDescriptor, b *client.Batch) error {
	origPErr := b.MustPErr()
	if origPErr.Index == nil {
		return origPErr.GoError()
	}
	index := origPErr.Index.Index
	if index >= int32(len(b.Results)) {
		panic(fmt.Sprintf("index %d outside of results: %+v", index, b.Results))
	}
	result := b.Results[index]
	var alloc sqlbase.DatumAlloc
	if _, ok := origPErr.GetDetail().(*roachpb.ConditionFailedError); ok {
		for _, row := range result.Rows {
			// TODO(dan): There's too much internal knowledge of the sql table
			// encoding here (and this callsite is the only reason
			// DecodeIndexKeyPrefix is exported). Refactor this bit out.
			indexID, key, err := sqlbase.DecodeIndexKeyPrefix(&alloc, tableDesc, row.Key)
			if err != nil {
				return err
			}
			index, err := tableDesc.FindIndexByID(indexID)
			if err != nil {
				return err
			}
			vals, err := sqlbase.MakeEncodedKeyVals(tableDesc, index.ColumnIDs)
			if err != nil {
				return err
			}
			dirs := make([]encoding.Direction, 0, len(index.ColumnIDs))
			for _, dir := range index.ColumnDirections {
				convertedDir, err := dir.ToEncodingDirection()
				if err != nil {
					return err
				}
				dirs = append(dirs, convertedDir)
			}
			if _, err := sqlbase.DecodeKeyVals(&alloc, vals, dirs, key); err != nil {
				return err
			}
			decodedVals := make([]parser.Datum, len(vals))
			var da sqlbase.DatumAlloc
			for i, val := range vals {
				err := val.EnsureDecoded(&da)
				if err != nil {
					return err
				}
				decodedVals[i] = val.Datum
			}
			return sqlbase.NewUniquenessConstraintViolationError(index, decodedVals)
		}
	}
	return origPErr.GoError()
}
コード例 #4
0
ファイル: rowwriter.go プロジェクト: BramGruneir/cockroach
// deleteIndexRow adds to the batch the kv operations necessary to delete a
// table row from the given index.
func (rd *rowDeleter) deleteIndexRow(
	ctx context.Context, b *client.Batch, idx *sqlbase.IndexDescriptor, values []parser.Datum,
) error {
	if err := rd.fks.checkAll(values); err != nil {
		return err
	}
	secondaryIndexEntry, err := sqlbase.EncodeSecondaryIndex(
		rd.helper.tableDesc, idx, rd.fetchColIDtoRowIndex, values)
	if err != nil {
		return err
	}
	if log.V(2) {
		log.Infof(ctx, "Del %s", secondaryIndexEntry.Key)
	}
	b.Del(secondaryIndexEntry.Key)
	return nil
}
コード例 #5
0
ファイル: addressing.go プロジェクト: knz/cockroach
func delMeta(b *client.Batch, key roachpb.Key, desc *roachpb.RangeDescriptor) {
	b.Del(key)
}
コード例 #6
0
ファイル: addressing.go プロジェクト: knz/cockroach
func putMeta(b *client.Batch, key roachpb.Key, desc *roachpb.RangeDescriptor) {
	b.Put(key, desc)
}
コード例 #7
0
ファイル: db_test.go プロジェクト: hvaara/cockroach
// TestTxnDelRangeIntentResolutionCounts ensures that intents left behind by a
// DelRange with a MaxSpanRequestKeys limit are resolved correctly and by
// using the minimal span of keys.
func TestTxnDelRangeIntentResolutionCounts(t *testing.T) {
	defer leaktest.AfterTest(t)()
	var intentResolutionCount int64
	params := base.TestServerArgs{
		Knobs: base.TestingKnobs{
			Store: &storage.StoreTestingKnobs{
				NumKeysEvaluatedForRangeIntentResolution: &intentResolutionCount,
				TestingCommandFilter: func(filterArgs storagebase.FilterArgs) *roachpb.Error {
					req, ok := filterArgs.Req.(*roachpb.ResolveIntentRequest)
					if ok {
						key := req.Header().Key.String()
						// Check if the intent is from the range being
						// scanned below.
						if key >= "a" && key < "d" {
							t.Errorf("resolving intent on key %s", key)
						}
					}
					return nil
				},
			},
		},
	}
	s, _, db := serverutils.StartServer(t, params)
	defer s.Stopper().Stop()

	for _, abortTxn := range []bool{false, true} {
		spanSize := int64(10)
		prefixes := []string{"a", "b", "c"}
		for i := int64(0); i < spanSize; i++ {
			for _, prefix := range prefixes {
				if err := db.Put(context.TODO(), fmt.Sprintf("%s%d", prefix, i), "v"); err != nil {
					t.Fatal(err)
				}
			}
		}
		totalNumKeys := int64(len(prefixes)) * spanSize

		atomic.StoreInt64(&intentResolutionCount, 0)
		limit := totalNumKeys / 2
		if err := db.Txn(context.TODO(), func(txn *client.Txn) error {
			var b client.Batch
			// Fully deleted.
			b.DelRange("a", "b", false)
			// Partially deleted.
			b.DelRange("b", "c", false)
			// Not deleted.
			b.DelRange("c", "d", false)
			b.Header.MaxSpanRequestKeys = limit
			if err := txn.Run(&b); err != nil {
				return err
			}
			if abortTxn {
				return errors.New("aborting txn")
			}
			return nil
		}); err != nil && !abortTxn {
			t.Fatal(err)
		}

		// Ensure that the correct number of keys were evaluated for intents.
		if numKeys := atomic.LoadInt64(&intentResolutionCount); numKeys != limit {
			t.Fatalf("abortTxn: %v, resolved %d keys, expected %d", abortTxn, numKeys, limit)
		}

		// Ensure no intents are left behind. Scan the entire span to resolve
		// any intents that were left behind; intents are resolved internally
		// through ResolveIntentRequest(s).
		kvs, err := db.Scan(context.TODO(), "a", "d", totalNumKeys)
		if err != nil {
			t.Fatal(err)
		}
		expectedNumKeys := totalNumKeys / 2
		if abortTxn {
			expectedNumKeys = totalNumKeys
		}
		if int64(len(kvs)) != expectedNumKeys {
			t.Errorf("abortTxn: %v, %d keys left behind, expected=%d",
				abortTxn, len(kvs), expectedNumKeys)
		}
	}
}
コード例 #8
0
ファイル: rowwriter.go プロジェクト: BramGruneir/cockroach
// updateRow adds to the batch the kv operations necessary to update a table row
// with the given values.
//
// The row corresponding to oldValues is updated with the ones in updateValues.
// Note that updateValues only contains the ones that are changing.
//
// The return value is only good until the next call to UpdateRow.
func (ru *rowUpdater) updateRow(
	ctx context.Context, b *client.Batch, oldValues []parser.Datum, updateValues []parser.Datum,
) ([]parser.Datum, error) {
	if len(oldValues) != len(ru.fetchCols) {
		return nil, errors.Errorf("got %d values but expected %d", len(oldValues), len(ru.fetchCols))
	}
	if len(updateValues) != len(ru.updateCols) {
		return nil, errors.Errorf("got %d values but expected %d", len(updateValues), len(ru.updateCols))
	}

	primaryIndexKey, secondaryIndexEntries, err := ru.helper.encodeIndexes(ru.fetchColIDtoRowIndex, oldValues)
	if err != nil {
		return nil, err
	}

	// The secondary index entries returned by rowHelper.encodeIndexes are only
	// valid until the next call to encodeIndexes. We need to copy them so that
	// we can compare against the new secondary index entries.
	secondaryIndexEntries = append(ru.indexEntriesBuf[:0], secondaryIndexEntries...)
	ru.indexEntriesBuf = secondaryIndexEntries

	// Check that the new value types match the column types. This needs to
	// happen before index encoding because certain datum types (i.e. tuple)
	// cannot be used as index values.
	for i, val := range updateValues {
		if ru.marshalled[i], err = sqlbase.MarshalColumnValue(ru.updateCols[i], val); err != nil {
			return nil, err
		}
	}

	// Update the row values.
	copy(ru.newValues, oldValues)
	for i, updateCol := range ru.updateCols {
		ru.newValues[ru.fetchColIDtoRowIndex[updateCol.ID]] = updateValues[i]
	}

	rowPrimaryKeyChanged := false
	var newSecondaryIndexEntries []sqlbase.IndexEntry
	if ru.primaryKeyColChange {
		var newPrimaryIndexKey []byte
		newPrimaryIndexKey, newSecondaryIndexEntries, err =
			ru.helper.encodeIndexes(ru.fetchColIDtoRowIndex, ru.newValues)
		if err != nil {
			return nil, err
		}
		rowPrimaryKeyChanged = !bytes.Equal(primaryIndexKey, newPrimaryIndexKey)
	} else {
		newSecondaryIndexEntries, err =
			ru.helper.encodeSecondaryIndexes(ru.fetchColIDtoRowIndex, ru.newValues)
		if err != nil {
			return nil, err
		}
	}

	if rowPrimaryKeyChanged {
		if err := ru.fks.checkIdx(ru.helper.tableDesc.PrimaryIndex.ID, oldValues, ru.newValues); err != nil {
			return nil, err
		}
		for i := range newSecondaryIndexEntries {
			if !bytes.Equal(newSecondaryIndexEntries[i].Key, secondaryIndexEntries[i].Key) {
				if err := ru.fks.checkIdx(ru.helper.indexes[i].ID, oldValues, ru.newValues); err != nil {
					return nil, err
				}
			}
		}

		if err := ru.rd.deleteRow(ctx, b, oldValues); err != nil {
			return nil, err
		}
		if err := ru.ri.InsertRow(ctx, b, ru.newValues, false); err != nil {
			return nil, err
		}
		return ru.newValues, nil
	}

	// Add the new values.
	// TODO(dan): This has gotten very similar to the loop in insertRow, see if
	// they can be DRY'd. Ideally, this would also work for
	// truncateAndBackfillColumnsChunk, which is currently abusing rowUdpdater.
	for i, family := range ru.helper.tableDesc.Families {
		update := false
		for _, colID := range family.ColumnIDs {
			if _, ok := ru.updateColIDtoRowIndex[colID]; ok {
				update = true
				break
			}
		}
		if !update {
			continue
		}

		if i > 0 {
			// HACK: MakeFamilyKey appends to its argument, so on every loop iteration
			// after the first, trim primaryIndexKey so nothing gets overwritten.
			// TODO(dan): Instead of this, use something like engine.ChunkAllocator.
			primaryIndexKey = primaryIndexKey[:len(primaryIndexKey):len(primaryIndexKey)]
		}

		if len(family.ColumnIDs) == 1 && family.ColumnIDs[0] == family.DefaultColumnID {
			// Storage optimization to store DefaultColumnID directly as a value. Also
			// backwards compatible with the original BaseFormatVersion.

			idx, ok := ru.updateColIDtoRowIndex[family.DefaultColumnID]
			if !ok {
				continue
			}

			ru.key = keys.MakeFamilyKey(primaryIndexKey, uint32(family.ID))
			if log.V(2) {
				log.Infof(ctx, "Put %s -> %v", ru.key, ru.marshalled[idx].PrettyPrint())
			}
			b.Put(&ru.key, &ru.marshalled[idx])
			ru.key = nil

			continue
		}

		ru.key = keys.MakeFamilyKey(primaryIndexKey, uint32(family.ID))
		ru.valueBuf = ru.valueBuf[:0]

		var lastColID sqlbase.ColumnID
		familySortedColumnIDs, ok := ru.helper.sortedColumnFamily(family.ID)
		if !ok {
			panic("invalid family sorted column id map")
		}
		for _, colID := range familySortedColumnIDs {
			if ru.helper.columnInPK(colID) {
				if family.ID != 0 {
					return nil, errors.Errorf("primary index column %d must be in family 0, was %d", colID, family.ID)
				}
				// Skip primary key columns as their values are encoded in the key of
				// each family. Family 0 is guaranteed to exist and acts as a sentinel.
				continue
			}

			idx, ok := ru.fetchColIDtoRowIndex[colID]
			if !ok {
				return nil, errors.Errorf("column %d was expected to be fetched, but wasn't", colID)
			}
			col := ru.fetchCols[idx]

			if ru.newValues[idx].Compare(parser.DNull) == 0 {
				continue
			}

			if lastColID > col.ID {
				panic(fmt.Errorf("cannot write column id %d after %d", col.ID, lastColID))
			}
			colIDDiff := col.ID - lastColID
			lastColID = col.ID
			ru.valueBuf, err = sqlbase.EncodeTableValue(ru.valueBuf, colIDDiff, ru.newValues[idx])
			if err != nil {
				return nil, err
			}
		}

		if family.ID != 0 && len(ru.valueBuf) == 0 {
			// The family might have already existed but every column in it is being
			// set to NULL, so delete it.
			if log.V(2) {
				log.Infof(ctx, "Del %s", ru.key)
			}

			b.Del(&ru.key)
		} else {
			ru.value.SetTuple(ru.valueBuf)
			if log.V(2) {
				log.Infof(ctx, "Put %s -> %v", ru.key, ru.value.PrettyPrint())
			}
			b.Put(&ru.key, &ru.value)
		}

		ru.key = nil
	}

	// Update secondary indexes.
	for i, newSecondaryIndexEntry := range newSecondaryIndexEntries {
		secondaryIndexEntry := secondaryIndexEntries[i]
		secondaryKeyChanged := !bytes.Equal(newSecondaryIndexEntry.Key, secondaryIndexEntry.Key)
		if secondaryKeyChanged {
			if err := ru.fks.checkIdx(ru.helper.indexes[i].ID, oldValues, ru.newValues); err != nil {
				return nil, err
			}

			if log.V(2) {
				log.Infof(ctx, "Del %s", secondaryIndexEntry.Key)
			}
			b.Del(secondaryIndexEntry.Key)
			// Do not update Indexes in the DELETE_ONLY state.
			if _, ok := ru.deleteOnlyIndex[i]; !ok {
				if log.V(2) {
					log.Infof(ctx, "CPut %s -> %v", newSecondaryIndexEntry.Key, newSecondaryIndexEntry.Value.PrettyPrint())
				}
				b.CPut(newSecondaryIndexEntry.Key, &newSecondaryIndexEntry.Value, nil)
			}
		}
	}

	return ru.newValues, nil
}
コード例 #9
0
ファイル: rowwriter.go プロジェクト: knz/cockroach
// insertPutFn is used by insertRow when conflicts should be ignored.
// logValue is used for pretty printing.
func insertPutFn(ctx context.Context, b *client.Batch, key *roachpb.Key, value *roachpb.Value) {
	if log.V(2) {
		log.InfofDepth(ctx, 1, "Put %s -> %s", *key, value.PrettyPrint())
	}
	b.Put(key, value)
}