Exemple #1
0
// upsertRowPKs returns the primary keys of any rows with potential upsert
// conflicts.
func (tu *tableUpserter) upsertRowPKs() ([]roachpb.Key, error) {
	upsertRowPKs := make([]roachpb.Key, len(tu.insertRows))

	if tu.conflictIndex.ID == tu.tableDesc.PrimaryIndex.ID {
		// If the conflict index is the primary index, we can compute them directly.
		// In this case, the slice will be filled, but not all rows will have
		// conflicts.
		for i, insertRow := range tu.insertRows {
			upsertRowPK, _, err := sqlbase.EncodeIndexKey(
				tu.tableDesc, &tu.conflictIndex, tu.ri.insertColIDtoRowIndex, insertRow, tu.indexKeyPrefix)
			if err != nil {
				return nil, err
			}
			upsertRowPKs[i] = upsertRowPK
		}
		return upsertRowPKs, nil
	}

	// Otherwise, compute the keys for the conflict index and look them up. The
	// primary keys can be constructed from the entries that come back. In this
	// case, some spots in the slice will be nil (indicating no conflict) and the
	// others will be conflicting rows.
	b := tu.txn.NewBatch()
	for _, insertRow := range tu.insertRows {
		entry, err := sqlbase.EncodeSecondaryIndex(
			tu.tableDesc, &tu.conflictIndex, tu.ri.insertColIDtoRowIndex, insertRow)
		if err != nil {
			return nil, err
		}
		if log.V(2) {
			log.Infof("Get %s\n", entry.Key)
		}
		b.Get(entry.Key)
	}

	if err := tu.txn.Run(b); err != nil {
		return nil, err
	}
	for i, result := range b.Results {
		// if len(result.Rows) == 0, then no conflict for this row, so leave
		// upsertRowPKs[i] as nil.
		if len(result.Rows) == 1 {
			if result.Rows[0].Value == nil {
				upsertRowPKs[i] = nil
			} else {
				upsertRowPK, err := sqlbase.ExtractIndexKey(&tu.a, tu.tableDesc, result.Rows[0])
				if err != nil {
					return nil, err
				}
				upsertRowPKs[i] = upsertRowPK
			}
		} else if len(result.Rows) > 1 {
			panic(fmt.Errorf(
				"Expected <= 1 but got %d conflicts for row %s", len(result.Rows), tu.insertRows[i]))
		}
	}

	return upsertRowPKs, nil
}
Exemple #2
0
// TODO(dt): Batch checks of many rows.
func (f baseFKHelper) check(values parser.DTuple) (parser.DTuple, error) {
	keyBytes, _, err := sqlbase.EncodeIndexKey(f.searchIdx, f.ids, values, f.searchPrefix)
	if err != nil {
		return nil, err
	}
	key := roachpb.Key(keyBytes)

	spans := sqlbase.Spans{sqlbase.Span{Start: key, End: key.PrefixEnd()}}
	if err := f.rf.StartScan(f.txn, spans, 1); err != nil {
		return nil, err
	}
	return f.rf.NextRow()
}
Exemple #3
0
// fetchExisting returns any existing rows in the table that conflict with the
// ones in tu.insertRows. The returned slice is the same length as tu.insertRows
// and a nil entry indicates no conflict.
func (tu *tableUpserter) fetchExisting() ([]parser.DTuple, error) {
	primaryKeys, err := tu.upsertRowPKs()
	if err != nil {
		return nil, err
	}

	pkSpans := make(sqlbase.Spans, 0, len(primaryKeys))
	rowIdxForPrimaryKey := make(map[string]int, len(primaryKeys))
	for i, primaryKey := range primaryKeys {
		if primaryKey != nil {
			pkSpans = append(pkSpans, sqlbase.Span{Start: primaryKey, End: primaryKey.PrefixEnd()})
			if _, ok := rowIdxForPrimaryKey[string(primaryKey)]; ok {
				return nil, fmt.Errorf("UPSERT/ON CONFLICT DO UPDATE command cannot affect row a second time")
			}
			rowIdxForPrimaryKey[string(primaryKey)] = i
		}
	}
	if len(pkSpans) == 0 {
		// Every key was empty, so there's nothing to fetch.
		return make([]parser.DTuple, len(primaryKeys)), nil
	}

	if err := tu.fetcher.StartScan(tu.txn, pkSpans, int64(len(pkSpans))); err != nil {
		return nil, err
	}

	rows := make([]parser.DTuple, len(primaryKeys))
	for {
		row, err := tu.fetcher.NextRow()
		if err != nil {
			return nil, err
		}
		if row == nil {
			break // Done
		}

		rowPrimaryKey, _, err := sqlbase.EncodeIndexKey(
			tu.tableDesc, &tu.tableDesc.PrimaryIndex, tu.fetchColIDtoRowIndex, row, tu.indexKeyPrefix)
		if err != nil {
			return nil, err
		}

		// The rows returned by rowFetcher are invalidated after the call to
		// NextRow, so we have to copy them to save them.
		rowCopy := make(parser.DTuple, len(row))
		copy(rowCopy, row)
		rows[rowIdxForPrimaryKey[string(rowPrimaryKey)]] = rowCopy
	}
	return rows, nil
}
Exemple #4
0
func (tu *tableUpserter) row(row parser.DTuple) (parser.DTuple, error) {
	tu.upsertRows = append(tu.upsertRows, row)

	// TODO(dan): The presence of OnConflict currently implies the short form
	// (primary index and update the values being inserted). Implement the long
	// form.
	upsertRowPK, _, err := sqlbase.EncodeIndexKey(
		&tu.tableDesc.PrimaryIndex, tu.ri.insertColIDtoRowIndex, row, tu.indexKeyPrefix)
	if err != nil {
		return nil, err
	}
	tu.upsertRowPKs = append(tu.upsertRowPKs, upsertRowPK)

	// TODO(dan): If len(tu.upsertRows) > some threshold, call flush().
	return nil, nil
}
Exemple #5
0
func (tu *tableUpserter) row(row parser.DTuple) (parser.DTuple, error) {
	if tu.fastPathBatch != nil {
		primaryKey, _, err := sqlbase.EncodeIndexKey(
			tu.tableDesc, &tu.tableDesc.PrimaryIndex, tu.ri.insertColIDtoRowIndex, row, tu.indexKeyPrefix)
		if err != nil {
			return nil, err
		}
		if _, ok := tu.fastPathKeys[string(primaryKey)]; ok {
			return nil, fmt.Errorf("UPSERT/ON CONFLICT DO UPDATE command cannot affect row a second time")
		}
		tu.fastPathKeys[string(primaryKey)] = struct{}{}
		err = tu.ri.insertRow(tu.fastPathBatch, row, true)
		return nil, err
	}

	tu.insertRows = append(tu.insertRows, row)
	// TODO(dan): If len(tu.insertRows) > some threshold, call flush().
	return nil, nil
}
Exemple #6
0
// encodeIndexes encodes the primary and secondary index keys. The
// secondaryIndexEntries are only valid until the next call to encodeIndexes or
// encodeSecondaryIndexes.
func (rh *rowHelper) encodeIndexes(
	colIDtoRowIndex map[sqlbase.ColumnID]int, values []parser.Datum,
) (
	primaryIndexKey []byte,
	secondaryIndexEntries []sqlbase.IndexEntry,
	err error,
) {
	if rh.primaryIndexKeyPrefix == nil {
		rh.primaryIndexKeyPrefix = sqlbase.MakeIndexKeyPrefix(rh.tableDesc,
			rh.tableDesc.PrimaryIndex.ID)
	}
	primaryIndexKey, _, err = sqlbase.EncodeIndexKey(
		rh.tableDesc, &rh.tableDesc.PrimaryIndex, colIDtoRowIndex, values, rh.primaryIndexKeyPrefix)
	if err != nil {
		return nil, nil, err
	}
	secondaryIndexEntries, err = rh.encodeSecondaryIndexes(colIDtoRowIndex, values)
	if err != nil {
		return nil, nil, err
	}
	return primaryIndexKey, secondaryIndexEntries, nil
}
Exemple #7
0
func (sc *SchemaChanger) truncateAndBackfillColumnsChunk(
	added []sqlbase.ColumnDescriptor,
	dropped []sqlbase.ColumnDescriptor,
	defaultExprs []parser.TypedExpr,
	evalCtx *parser.EvalContext,
	sp sqlbase.Span,
) (roachpb.Key, bool, error) {
	var curIndexKey roachpb.Key
	done := false
	err := sc.db.Txn(func(txn *client.Txn) error {
		tableDesc, err := getTableDescFromID(txn, sc.tableID)
		if err != nil {
			return err
		}
		// Short circuit the backfill if the table has been deleted.
		if tableDesc.Deleted() {
			done = true
			return nil
		}

		updateCols := append(added, dropped...)
		fkTables := TablesNeededForFKs(*tableDesc, CheckUpdates)
		for k := range fkTables {
			if fkTables[k], err = getTableDescFromID(txn, k); err != nil {
				return err
			}
		}
		// TODO(dan): Tighten up the bound on the requestedCols parameter to
		// makeRowUpdater.
		requestedCols := make([]sqlbase.ColumnDescriptor, 0, len(tableDesc.Columns)+len(added))
		requestedCols = append(requestedCols, tableDesc.Columns...)
		requestedCols = append(requestedCols, added...)
		ru, err := makeRowUpdater(
			txn, tableDesc, fkTables, updateCols, requestedCols, rowUpdaterOnlyColumns,
		)
		if err != nil {
			return err
		}

		// TODO(dan): This check is an unfortunate bleeding of the internals of
		// rowUpdater. Extract the sql row to k/v mapping logic out into something
		// usable here.
		if !ru.isColumnOnlyUpdate() {
			panic("only column data should be modified, but the rowUpdater is configured otherwise")
		}

		// Run a scan across the table using the primary key. Running
		// the scan and applying the changes in many transactions is
		// fine because the schema change is in the correct state to
		// handle intermediate OLTP commands which delete and add
		// values during the scan.
		var rf sqlbase.RowFetcher
		colIDtoRowIndex := colIDtoRowIndexFromCols(tableDesc.Columns)
		valNeededForCol := make([]bool, len(tableDesc.Columns))
		for i := range valNeededForCol {
			_, valNeededForCol[i] = ru.fetchColIDtoRowIndex[tableDesc.Columns[i].ID]
		}
		err = rf.Init(tableDesc, colIDtoRowIndex, &tableDesc.PrimaryIndex, false, false,
			tableDesc.Columns, valNeededForCol)
		if err != nil {
			return err
		}
		// StartScan uses 0 as a sentinal for the default limit of entries scanned.
		if err := rf.StartScan(txn, sqlbase.Spans{sp}, 0); err != nil {
			return err
		}

		indexKeyPrefix := sqlbase.MakeIndexKeyPrefix(tableDesc, tableDesc.PrimaryIndex.ID)
		oldValues := make(parser.DTuple, len(ru.fetchCols))
		updateValues := make(parser.DTuple, len(updateCols))

		writeBatch := &client.Batch{}
		var i int
		for ; i < ColumnTruncateAndBackfillChunkSize; i++ {
			row, err := rf.NextRow()
			if err != nil {
				return err
			}
			if row == nil {
				break // Done
			}

			curIndexKey, _, err = sqlbase.EncodeIndexKey(
				tableDesc, &tableDesc.PrimaryIndex, colIDtoRowIndex, row, indexKeyPrefix)

			for j, col := range added {
				if defaultExprs == nil || defaultExprs[j] == nil {
					updateValues[j] = parser.DNull
				} else {
					updateValues[j], err = defaultExprs[j].Eval(evalCtx)
					if err != nil {
						return err
					}
				}
				if !col.Nullable && updateValues[j].Compare(parser.DNull) == 0 {
					return sqlbase.NewNonNullViolationError(col.Name)
				}
			}
			for j := range dropped {
				updateValues[j+len(added)] = parser.DNull
			}

			copy(oldValues, row)
			for j := len(row); j < len(oldValues); j++ {
				oldValues[j] = parser.DNull
			}
			if _, err := ru.updateRow(writeBatch, oldValues, updateValues); err != nil {
				return err
			}
		}
		if i < ColumnTruncateAndBackfillChunkSize {
			done = true
		}

		if err := txn.Run(writeBatch); err != nil {
			return convertBackfillError(tableDesc, writeBatch)
		}
		return nil
	})
	return curIndexKey.PrefixEnd(), done, err
}
Exemple #8
0
func (tu *tableUpserter) flush() error {
	pkSpans := make(sqlbase.Spans, len(tu.upsertRowPKs))
	for i, upsertRowPK := range tu.upsertRowPKs {
		pkSpans[i] = sqlbase.Span{Start: upsertRowPK, End: upsertRowPK.PrefixEnd()}
	}

	if err := tu.fetcher.StartScan(tu.txn, pkSpans, int64(len(pkSpans))); err != nil {
		return err
	}

	var upsertRowPKIdx int
	existingRows := make([]parser.DTuple, len(tu.upsertRowPKs))
	for {
		row, err := tu.fetcher.NextRow()
		if err != nil {
			return err
		}
		if row == nil {
			break
		}

		// TODO(dan): Can we do this without encoding an index key for every row we
		// get back?
		rowPK, _, err := sqlbase.EncodeIndexKey(
			&tu.tableDesc.PrimaryIndex, tu.ri.insertColIDtoRowIndex, row, tu.indexKeyPrefix)
		if err != nil {
			return err
		}

		for ; upsertRowPKIdx < len(tu.upsertRowPKs); upsertRowPKIdx++ {
			if bytes.Equal(tu.upsertRowPKs[upsertRowPKIdx], rowPK) {
				existingRows[upsertRowPKIdx] = row
				break
			}
		}
	}

	b := tu.txn.NewBatch()
	for i, upsertRow := range tu.upsertRows {
		existingRow := existingRows[i]
		if existingRow == nil {
			err := tu.ri.insertRow(b, upsertRow)
			if err != nil {
				return err
			}
		} else {
			for uCol, uIdx := range tu.updateColIDtoRowIndex {
				tu.updateRow[uIdx] = upsertRow[tu.ri.insertColIDtoRowIndex[uCol]]
			}
			_, err := tu.ru.updateRow(b, existingRow, tu.updateRow)
			if err != nil {
				return err
			}
		}
	}
	tu.upsertRows = nil
	tu.upsertRowPKs = nil

	if err := tu.txn.Run(b); err != nil {
		for _, r := range b.Results {
			if r.PErr != nil {
				return convertBatchError(tu.tableDesc, *b, r.PErr)
			}
		}
		return err
	}
	return nil
}
Exemple #9
0
func (n *indexJoinNode) Next() bool {
	// Loop looking up the next row. We either are going to pull a row from the
	// table or a batch of rows from the index. If we pull a batch of rows from
	// the index we perform another iteration of the loop looking for rows in the
	// table. This outer loop is necessary because a batch of rows from the index
	// might all be filtered when the resulting rows are read from the table.
	for tableLookup := (len(n.table.spans) > 0); true; tableLookup = true {
		// First, try to pull a row from the table.
		if tableLookup && n.table.Next() {
			if n.explain == explainDebug {
				n.debugVals = n.table.DebugValues()
			}
			return true
		}
		if n.err = n.table.Err(); n.err != nil {
			return false
		}

		// The table is out of rows. Pull primary keys from the index.
		n.table.scanInitialized = false
		n.table.spans = n.table.spans[:0]

		for len(n.table.spans) < joinBatchSize {
			if !n.index.Next() {
				// The index is out of rows or an error occurred.
				if n.err = n.index.Err(); n.err != nil {
					return false
				}
				if len(n.table.spans) == 0 {
					// The index is out of rows.
					return false
				}
				break
			}

			if n.explain == explainDebug {
				n.debugVals = n.index.DebugValues()
				if n.debugVals.output != debugValueRow {
					return true
				}
			}

			vals := n.index.Values()
			primaryIndexKey, _, err := sqlbase.EncodeIndexKey(
				n.table.index, n.colIDtoRowIndex, vals, n.primaryKeyPrefix)
			n.err = err
			if n.err != nil {
				return false
			}
			key := roachpb.Key(primaryIndexKey)
			n.table.spans = append(n.table.spans, sqlbase.Span{
				Start: key,
				End:   key.PrefixEnd(),
			})

			if n.explain == explainDebug {
				// In debug mode, return the index information as a "partial" row.
				n.debugVals.output = debugValuePartial
				return true
			}
		}

		if log.V(3) {
			log.Infof("table scan: %s", sqlbase.PrettySpans(n.table.spans, 0))
		}
	}
	return false
}