コード例 #1
0
ファイル: backfill.go プロジェクト: nporsche/cockroach
func (p *planner) makeBackfillBatch(tableName *parser.QualifiedName, tableDesc *TableDescriptor, indexDescs ...IndexDescriptor) (client.Batch, error) {
	b := client.Batch{}
	// Get all the rows affected.
	// TODO(vivek): Avoid going through Select.
	// TODO(tamird): Support partial indexes?
	row, err := p.Select(&parser.Select{
		Exprs: parser.SelectExprs{parser.StarSelectExpr()},
		From:  parser.TableExprs{&parser.AliasedTableExpr{Expr: tableName}},
	})
	if err != nil {
		return b, err
	}

	// Construct a map from column ID to the index the value appears at within a
	// row.
	colIDtoRowIndex := map[ColumnID]int{}
	for i, name := range row.Columns() {
		c, err := tableDesc.FindColumnByName(name)
		if err != nil {
			return b, err
		}
		colIDtoRowIndex[c.ID] = i
	}

	// TODO(tamird): This will fall down in production use. We need to do
	// something better (see #2036). In particular, this implementation
	// has the following problems:
	// - Very large tables will generate an enormous batch here. This
	// isn't really a problem in itself except that it will exacerbate
	// the other issue:
	// - Any non-quiescent table that this runs against will end up with
	// an inconsistent index. This is because as inserts/updates continue
	// to roll in behind this operation's read front, the written index
	// will become incomplete/stale before it's written.

	for row.Next() {
		rowVals := row.Values()

		for _, indexDesc := range indexDescs {
			secondaryIndexEntries, err := encodeSecondaryIndexes(
				tableDesc.ID, []IndexDescriptor{indexDesc}, colIDtoRowIndex, rowVals)
			if err != nil {
				return b, err
			}

			for _, secondaryIndexEntry := range secondaryIndexEntries {
				if log.V(2) {
					log.Infof("CPut %s -> %v", prettyKey(secondaryIndexEntry.key, 0),
						secondaryIndexEntry.value)
				}
				b.CPut(secondaryIndexEntry.key, secondaryIndexEntry.value, nil)
			}
		}
	}

	return b, row.Err()
}
コード例 #2
0
ファイル: backfill.go プロジェクト: mbertschler/cockroach
func (p *planner) backfillBatch(b *client.Batch, tableName *parser.QualifiedName, oldTableDesc, newTableDesc *TableDescriptor) error {
	table := &parser.AliasedTableExpr{Expr: tableName}
	var droppedColumnDescs []ColumnDescriptor
	var droppedIndexDescs []IndexDescriptor
	var newIndexDescs []IndexDescriptor
	for _, m := range oldTableDesc.Mutations {
		switch m.Direction {
		case DescriptorMutation_ADD:
			switch t := m.Descriptor_.(type) {
			case *DescriptorMutation_Column:
				// TODO(vivek): Add column to new columns and use it
				// to fill in default values.

			case *DescriptorMutation_Index:
				newIndexDescs = append(newIndexDescs, *t.Index)
			}

		case DescriptorMutation_DROP:
			switch t := m.Descriptor_.(type) {
			case *DescriptorMutation_Column:
				droppedColumnDescs = append(droppedColumnDescs, *t.Column)

			case *DescriptorMutation_Index:
				droppedIndexDescs = append(droppedIndexDescs, *t.Index)
			}
		}
	}

	if len(droppedColumnDescs) > 0 {
		var updateExprs parser.UpdateExprs
		for _, droppedColumnDesc := range droppedColumnDescs {
			updateExprs = append(updateExprs, &parser.UpdateExpr{
				Names: parser.QualifiedNames{&parser.QualifiedName{Base: parser.Name(droppedColumnDesc.Name)}},
				Expr:  parser.DNull,
			})
		}

		// Run `UPDATE <table> SET col1 = NULL, col2 = NULL, ...` to clear
		// the data stored in the columns being dropped.
		if _, err := p.Update(&parser.Update{
			Table: table,
			Exprs: updateExprs,
		}); err != nil {
			return err
		}
	}

	for _, indexDescriptor := range droppedIndexDescs {
		indexPrefix := MakeIndexKeyPrefix(newTableDesc.ID, indexDescriptor.ID)

		// Delete the index.
		indexStartKey := roachpb.Key(indexPrefix)
		indexEndKey := indexStartKey.PrefixEnd()
		if log.V(2) {
			log.Infof("DelRange %s - %s", prettyKey(indexStartKey, 0), prettyKey(indexEndKey, 0))
		}
		b.DelRange(indexStartKey, indexEndKey)
	}

	if len(newIndexDescs) > 0 {
		// Get all the rows affected.
		// TODO(vivek): Avoid going through Select.
		// TODO(tamird): Support partial indexes?
		rows, err := p.Select(&parser.Select{
			Exprs: parser.SelectExprs{parser.StarSelectExpr()},
			From:  parser.TableExprs{table},
		})
		if err != nil {
			return err
		}

		// Construct a map from column ID to the index the value appears at within a
		// row.
		colIDtoRowIndex, err := makeColIDtoRowIndex(rows, oldTableDesc)
		if err != nil {
			return err
		}

		// TODO(tamird): This will fall down in production use. We need to do
		// something better (see #2036). In particular, this implementation
		// has the following problems:
		// - Very large tables will generate an enormous batch here. This
		// isn't really a problem in itself except that it will exacerbate
		// the other issue:
		// - Any non-quiescent table that this runs against will end up with
		// an inconsistent index. This is because as inserts/updates continue
		// to roll in behind this operation's read front, the written index
		// will become incomplete/stale before it's written.

		for rows.Next() {
			rowVals := rows.Values()

			for _, newIndexDesc := range newIndexDescs {
				secondaryIndexEntries, err := encodeSecondaryIndexes(
					oldTableDesc.ID, []IndexDescriptor{newIndexDesc}, colIDtoRowIndex, rowVals)
				if err != nil {
					return err
				}

				for _, secondaryIndexEntry := range secondaryIndexEntries {
					if log.V(2) {
						log.Infof("CPut %s -> %v", prettyKey(secondaryIndexEntry.key, 0),
							secondaryIndexEntry.value)
					}
					b.CPut(secondaryIndexEntry.key, secondaryIndexEntry.value, nil)
				}
			}
		}

		return rows.Err()
	}

	return nil
}
コード例 #3
0
ファイル: delete.go プロジェクト: BobbWu/cockroach
// Delete deletes rows from a table.
// Privileges: DELETE and SELECT on table. We currently always use a SELECT statement.
//   Notes: postgres requires DELETE. Also requires SELECT for "USING" and "WHERE" with tables.
//          mysql requires DELETE. Also requires SELECT if a table is used in the "WHERE" clause.
func (p *planner) Delete(n *parser.Delete) (planNode, error) {
	tableDesc, err := p.getAliasedTableLease(n.Table)
	if err != nil {
		return nil, err
	}

	if err := p.checkPrivilege(tableDesc, privilege.DELETE); err != nil {
		return nil, err
	}

	// TODO(tamird,pmattis): avoid going through Select to avoid encoding
	// and decoding keys.
	rows, err := p.Select(&parser.Select{
		Exprs: parser.SelectExprs{parser.StarSelectExpr()},
		From:  parser.TableExprs{n.Table},
		Where: n.Where,
	})
	if err != nil {
		return nil, err
	}

	// Construct a map from column ID to the index the value appears at within a
	// row.
	colIDtoRowIndex, err := makeColIDtoRowIndex(rows, tableDesc)
	if err != nil {
		return nil, err
	}

	primaryIndex := tableDesc.PrimaryIndex
	primaryIndexKeyPrefix := MakeIndexKeyPrefix(tableDesc.ID, primaryIndex.ID)

	b := client.Batch{}
	result := &valuesNode{}
	for rows.Next() {
		rowVals := rows.Values()
		result.rows = append(result.rows, parser.DTuple(nil))

		primaryIndexKey, _, err := encodeIndexKey(
			primaryIndex.ColumnIDs, colIDtoRowIndex, rowVals, primaryIndexKeyPrefix)
		if err != nil {
			return nil, err
		}

		// Delete the secondary indexes.
		indexes := tableDesc.Indexes
		// Also include indexes under mutation.
		for _, m := range tableDesc.Mutations {
			if index := m.GetIndex(); index != nil {
				indexes = append(indexes, *index)
			}
		}
		secondaryIndexEntries, err := encodeSecondaryIndexes(
			tableDesc.ID, indexes, colIDtoRowIndex, rowVals)
		if err != nil {
			return nil, err
		}

		for _, secondaryIndexEntry := range secondaryIndexEntries {
			if log.V(2) {
				log.Infof("Del %s", prettyKey(secondaryIndexEntry.key, 0))
			}
			b.Del(secondaryIndexEntry.key)
		}

		// Delete the row.
		rowStartKey := roachpb.Key(primaryIndexKey)
		rowEndKey := rowStartKey.PrefixEnd()
		if log.V(2) {
			log.Infof("DelRange %s - %s", prettyKey(rowStartKey, 0), prettyKey(rowEndKey, 0))
		}
		b.DelRange(rowStartKey, rowEndKey)
	}

	if err := rows.Err(); err != nil {
		return nil, err
	}

	if IsSystemID(tableDesc.GetID()) {
		// Mark transaction as operating on the system DB.
		p.txn.SetSystemDBTrigger()
	}
	if err := p.txn.Run(&b); err != nil {
		return nil, err
	}

	return result, nil
}
コード例 #4
0
ファイル: update.go プロジェクト: husttom/cockroach
// Update updates columns for a selection of rows from a table.
// Privileges: UPDATE and SELECT on table. We currently always use a select statement.
//   Notes: postgres requires UPDATE. Requires SELECT with WHERE clause with table.
//          mysql requires UPDATE. Also requires SELECT with WHERE clause with table.
func (p *planner) Update(n *parser.Update) (planNode, error) {
	tableDesc, err := p.getAliasedTableDesc(n.Table)
	if err != nil {
		return nil, err
	}

	if err := p.checkPrivilege(tableDesc, privilege.UPDATE); err != nil {
		return nil, err
	}

	// Determine which columns we're inserting into.
	var names parser.QualifiedNames
	for _, expr := range n.Exprs {
		names = append(names, expr.Name)
	}
	cols, err := p.processColumns(tableDesc, names)
	if err != nil {
		return nil, err
	}

	// Set of columns being updated
	colIDSet := map[ColumnID]struct{}{}
	for _, c := range cols {
		colIDSet[c.ID] = struct{}{}
	}
	// Don't allow updating any column that is part of the primary key.
	for i, id := range tableDesc.PrimaryIndex.ColumnIDs {
		if _, ok := colIDSet[id]; ok {
			return nil, fmt.Errorf("primary key column %q cannot be updated", tableDesc.PrimaryIndex.ColumnNames[i])
		}
	}

	// Generate the list of select targets. We need to select all of the columns
	// plus we select all of the update expressions in case those expressions
	// reference columns (e.g. "UPDATE t SET v = v + 1").
	targets := make(parser.SelectExprs, 0, len(n.Exprs)+1)
	targets = append(targets, parser.StarSelectExpr())
	for _, expr := range n.Exprs {
		targets = append(targets, parser.SelectExpr{Expr: expr.Expr})
	}

	// Query the rows that need updating.
	rows, err := p.Select(&parser.Select{
		Exprs: targets,
		From:  parser.TableExprs{n.Table},
		Where: n.Where,
	})
	if err != nil {
		return nil, err
	}

	// Construct a map from column ID to the index the value appears at within a
	// row.
	colIDtoRowIndex := map[ColumnID]int{}
	for i, col := range tableDesc.Columns {
		colIDtoRowIndex[col.ID] = i
	}

	primaryIndex := tableDesc.PrimaryIndex
	primaryIndexKeyPrefix := MakeIndexKeyPrefix(tableDesc.ID, primaryIndex.ID)

	// Secondary indexes needing updating.
	var indexes []IndexDescriptor
	for _, index := range tableDesc.Indexes {
		for _, id := range index.ColumnIDs {
			if _, ok := colIDSet[id]; ok {
				indexes = append(indexes, index)
				break
			}
		}
	}

	// Update all the rows.
	var b client.Batch
	for rows.Next() {
		rowVals := rows.Values()
		primaryIndexKey, _, err := encodeIndexKey(
			primaryIndex.ColumnIDs, colIDtoRowIndex, rowVals, primaryIndexKeyPrefix)
		if err != nil {
			return nil, err
		}
		// Compute the current secondary index key:value pairs for this row.
		secondaryIndexEntries, err := encodeSecondaryIndexes(
			tableDesc.ID, indexes, colIDtoRowIndex, rowVals)
		if err != nil {
			return nil, err
		}

		// Our updated value expressions occur immediately after the plain
		// columns in the output.
		newVals := rowVals[len(tableDesc.Columns):]
		// Update the row values.
		for i, col := range cols {
			val := newVals[i]
			if !col.Nullable && val == parser.DNull {
				return nil, fmt.Errorf("null value in column %q violates not-null constraint", col.Name)
			}
			rowVals[colIDtoRowIndex[col.ID]] = val
		}
		// Compute the new secondary index key:value pairs for this row.
		newSecondaryIndexEntries, err := encodeSecondaryIndexes(
			tableDesc.ID, indexes, colIDtoRowIndex, rowVals)
		if err != nil {
			return nil, err
		}
		// Update secondary indexes.
		for i, newSecondaryIndexEntry := range newSecondaryIndexEntries {
			secondaryIndexEntry := secondaryIndexEntries[i]
			if !bytes.Equal(newSecondaryIndexEntry.key, secondaryIndexEntry.key) {
				if log.V(2) {
					log.Infof("CPut %q -> %v", newSecondaryIndexEntry.key, newSecondaryIndexEntry.value)
				}
				b.CPut(newSecondaryIndexEntry.key, newSecondaryIndexEntry.value, nil)
				if log.V(2) {
					log.Infof("Del %q", secondaryIndexEntry.key)
				}
				b.Del(secondaryIndexEntry.key)
			}
		}

		// Add the new values.
		for i, val := range newVals {
			col := cols[i]

			primitive, err := convertDatum(col, val)
			if err != nil {
				return nil, err
			}

			key := MakeColumnKey(col.ID, primaryIndexKey)
			if primitive != nil {
				// We only output non-NULL values. Non-existent column keys are
				// considered NULL during scanning and the row sentinel ensures we know
				// the row exists.
				if log.V(2) {
					log.Infof("Put %q -> %v", key, val)
				}

				b.Put(key, primitive)
			} else {
				// The column might have already existed but is being set to NULL, so
				// delete it.
				if log.V(2) {
					log.Infof("Del %q", key)
				}

				b.Del(key)
			}
		}
	}

	if err := rows.Err(); err != nil {
		return nil, err
	}
	if err := p.txn.Run(&b); err != nil {
		return nil, convertBatchError(tableDesc, b, err)
	}

	// TODO(tamird/pmattis): return the number of affected rows.
	return &valuesNode{}, nil
}
コード例 #5
0
ファイル: delete.go プロジェクト: alunarbeach/cockroach
// Delete deletes rows from a table.
// Privileges: DELETE and SELECT on table. We currently always use a SELECT statement.
//   Notes: postgres requires DELETE. Also requires SELECT for "USING" and "WHERE" with tables.
//          mysql requires DELETE. Also requires SELECT if a table is used in the "WHERE" clause.
func (p *planner) Delete(n *parser.Delete) (planNode, error) {
	tableDesc, err := p.getAliasedTableDesc(n.Table)
	if err != nil {
		return nil, err
	}

	if err := p.checkPrivilege(tableDesc, privilege.DELETE); err != nil {
		return nil, err
	}

	// TODO(tamird,pmattis): avoid going through Select to avoid encoding
	// and decoding keys. Also, avoiding Select may provide more
	// convenient access to index keys which we are not currently
	// deleting.
	node, err := p.Select(&parser.Select{
		Exprs: parser.SelectExprs{parser.StarSelectExpr()},
		From:  parser.TableExprs{n.Table},
		Where: n.Where,
	})
	if err != nil {
		return nil, err
	}

	// Construct a map from column ID to the index the value appears at within a
	// row.
	colIDtoRowIndex := map[ColumnID]int{}
	for i, name := range node.Columns() {
		c, err := tableDesc.FindColumnByName(name)
		if err != nil {
			return nil, err
		}
		colIDtoRowIndex[c.ID] = i
	}

	primaryIndex := tableDesc.PrimaryIndex
	primaryIndexKeyPrefix := MakeIndexKeyPrefix(tableDesc.ID, primaryIndex.ID)

	b := client.Batch{}

	for node.Next() {
		values := node.Values()

		primaryIndexKey, _, err := encodeIndexKey(
			primaryIndex.ColumnIDs, colIDtoRowIndex, values, primaryIndexKeyPrefix)
		if err != nil {
			return nil, err
		}

		// Delete the secondary indexes.
		secondaryIndexEntries, err := encodeSecondaryIndexes(
			tableDesc.ID, tableDesc.Indexes, colIDtoRowIndex, values)
		if err != nil {
			return nil, err
		}

		for _, secondaryIndexEntry := range secondaryIndexEntries {
			if log.V(2) {
				log.Infof("Del %q", secondaryIndexEntry.key)
			}
			b.Del(secondaryIndexEntry.key)
		}

		// Delete the row.
		rowStartKey := proto.Key(primaryIndexKey)
		rowEndKey := rowStartKey.PrefixEnd()
		if log.V(2) {
			log.Infof("DelRange %q - %q", rowStartKey, rowEndKey)
		}
		b.DelRange(rowStartKey, rowEndKey)
	}

	if err := node.Err(); err != nil {
		return nil, err
	}

	if err := p.txn.Run(&b).GoError(); err != nil {
		return nil, err
	}

	// TODO(tamird/pmattis): return the number of affected rows
	return &valuesNode{}, nil
}
コード例 #6
0
ファイル: update.go プロジェクト: kumarh1982/cockroach
// Update updates columns for a selection of rows from a table.
// Privileges: UPDATE and SELECT on table. We currently always use a select statement.
//   Notes: postgres requires UPDATE. Requires SELECT with WHERE clause with table.
//          mysql requires UPDATE. Also requires SELECT with WHERE clause with table.
func (p *planner) Update(n *parser.Update) (planNode, error) {
	tableDesc, err := p.getAliasedTableDesc(n.Table)
	if err != nil {
		return nil, err
	}

	if err := p.checkPrivilege(tableDesc, privilege.UPDATE); err != nil {
		return nil, err
	}

	// Determine which columns we're inserting into.
	var names parser.QualifiedNames
	for _, expr := range n.Exprs {
		var err error
		expr.Expr, err = p.expandSubqueries(expr.Expr, len(expr.Names))
		if err != nil {
			return nil, err
		}

		if expr.Tuple {
			// TODO(pmattis): The distinction between Tuple and DTuple here is
			// irritating. We'll see a DTuple if the expression was a subquery that
			// has been evaluated. We'll see a Tuple in other cases.
			n := 0
			switch t := expr.Expr.(type) {
			case parser.Tuple:
				n = len(t)
			case parser.DTuple:
				n = len(t)
			default:
				return nil, util.Errorf("unsupported tuple assignment: %T", expr.Expr)
			}
			if len(expr.Names) != n {
				return nil, fmt.Errorf("number of columns (%d) does not match number of values (%d)",
					len(expr.Names), n)
			}
		}
		names = append(names, expr.Names...)
	}
	cols, err := p.processColumns(tableDesc, names)
	if err != nil {
		return nil, err
	}

	// Set of columns being updated
	colIDSet := map[ColumnID]struct{}{}
	for _, c := range cols {
		colIDSet[c.ID] = struct{}{}
	}
	// Don't allow updating any column that is part of the primary key.
	for i, id := range tableDesc.PrimaryIndex.ColumnIDs {
		if _, ok := colIDSet[id]; ok {
			return nil, fmt.Errorf("primary key column %q cannot be updated", tableDesc.PrimaryIndex.ColumnNames[i])
		}
	}

	defaultExprs, err := p.makeDefaultExprs(cols)
	if err != nil {
		return nil, err
	}

	// Generate the list of select targets. We need to select all of the columns
	// plus we select all of the update expressions in case those expressions
	// reference columns (e.g. "UPDATE t SET v = v + 1"). Note that we flatten
	// expressions for tuple assignments just as we flattened the column names
	// above. So "UPDATE t SET (a, b) = (1, 2)" translates into select targets of
	// "*, 1, 2", not "*, (1, 2)".
	targets := make(parser.SelectExprs, 0, len(n.Exprs)+1)
	targets = append(targets, parser.StarSelectExpr())
	for _, expr := range n.Exprs {
		if expr.Tuple {
			switch t := expr.Expr.(type) {
			case parser.Tuple:
				for i, e := range t {
					e, err := fillDefault(e, i, defaultExprs)
					if err != nil {
						return nil, err
					}
					targets = append(targets, parser.SelectExpr{Expr: e})
				}
			case parser.DTuple:
				for _, e := range t {
					targets = append(targets, parser.SelectExpr{Expr: e})
				}
			}
		} else {
			e, err := fillDefault(expr.Expr, 0, defaultExprs)
			if err != nil {
				return nil, err
			}
			targets = append(targets, parser.SelectExpr{Expr: e})
		}
	}

	// Query the rows that need updating.
	rows, err := p.Select(&parser.Select{
		Exprs: targets,
		From:  parser.TableExprs{n.Table},
		Where: n.Where,
	})
	if err != nil {
		return nil, err
	}

	// Construct a map from column ID to the index the value appears at within a
	// row.
	colIDtoRowIndex := map[ColumnID]int{}
	for i, col := range tableDesc.Columns {
		colIDtoRowIndex[col.ID] = i
	}

	primaryIndex := tableDesc.PrimaryIndex
	primaryIndexKeyPrefix := MakeIndexKeyPrefix(tableDesc.ID, primaryIndex.ID)

	// Secondary indexes needing updating.
	var indexes []IndexDescriptor
	for _, index := range tableDesc.Indexes {
		for _, id := range index.ColumnIDs {
			if _, ok := colIDSet[id]; ok {
				indexes = append(indexes, index)
				break
			}
		}
	}

	marshalled := make([]interface{}, len(cols))

	// Update all the rows.
	var b client.Batch
	for rows.Next() {
		rowVals := rows.Values()
		primaryIndexKey, _, err := encodeIndexKey(
			primaryIndex.ColumnIDs, colIDtoRowIndex, rowVals, primaryIndexKeyPrefix)
		if err != nil {
			return nil, err
		}
		// Compute the current secondary index key:value pairs for this row.
		secondaryIndexEntries, err := encodeSecondaryIndexes(
			tableDesc.ID, indexes, colIDtoRowIndex, rowVals)
		if err != nil {
			return nil, err
		}

		// Our updated value expressions occur immediately after the plain
		// columns in the output.
		newVals := rowVals[len(tableDesc.Columns):]
		// Update the row values.
		for i, col := range cols {
			val := newVals[i]
			if !col.Nullable && val == parser.DNull {
				return nil, fmt.Errorf("null value in column %q violates not-null constraint", col.Name)
			}
			rowVals[colIDtoRowIndex[col.ID]] = val
		}

		// Check that the new value types match the column types. This needs to
		// happen before index encoding because certain datum types (i.e. tuple)
		// cannot be used as index values.
		for i, val := range newVals {
			var err error
			if marshalled[i], err = marshalColumnValue(cols[i], val); err != nil {
				return nil, err
			}
		}

		// Compute the new secondary index key:value pairs for this row.
		newSecondaryIndexEntries, err := encodeSecondaryIndexes(
			tableDesc.ID, indexes, colIDtoRowIndex, rowVals)
		if err != nil {
			return nil, err
		}

		// Update secondary indexes.
		for i, newSecondaryIndexEntry := range newSecondaryIndexEntries {
			secondaryIndexEntry := secondaryIndexEntries[i]
			if !bytes.Equal(newSecondaryIndexEntry.key, secondaryIndexEntry.key) {
				if log.V(2) {
					log.Infof("CPut %q -> %v", newSecondaryIndexEntry.key, newSecondaryIndexEntry.value)
				}
				b.CPut(newSecondaryIndexEntry.key, newSecondaryIndexEntry.value, nil)
				if log.V(2) {
					log.Infof("Del %q", secondaryIndexEntry.key)
				}
				b.Del(secondaryIndexEntry.key)
			}
		}

		// Add the new values.
		for i, val := range newVals {
			col := cols[i]

			key := MakeColumnKey(col.ID, primaryIndexKey)
			if marshalled[i] != nil {
				// We only output non-NULL values. Non-existent column keys are
				// considered NULL during scanning and the row sentinel ensures we know
				// the row exists.
				if log.V(2) {
					log.Infof("Put %q -> %v", key, val)
				}

				b.Put(key, marshalled[i])
			} else {
				// The column might have already existed but is being set to NULL, so
				// delete it.
				if log.V(2) {
					log.Infof("Del %q", key)
				}

				b.Del(key)
			}
		}
	}

	if err := rows.Err(); err != nil {
		return nil, err
	}
	if err := p.txn.Run(&b); err != nil {
		return nil, convertBatchError(tableDesc, b, err)
	}

	// TODO(tamird/pmattis): return the number of affected rows.
	return &valuesNode{}, nil
}
コード例 #7
0
ファイル: update.go プロジェクト: Eric-Gaudiello/cockroach
// Update updates columns for a selection of rows from a table.
// Privileges: UPDATE and SELECT on table. We currently always use a select statement.
//   Notes: postgres requires UPDATE. Requires SELECT with WHERE clause with table.
//          mysql requires UPDATE. Also requires SELECT with WHERE clause with table.
func (p *planner) Update(n *parser.Update) (planNode, error) {
	tableDesc, err := p.getAliasedTableDesc(n.Table)
	if err != nil {
		return nil, err
	}

	if err := p.checkPrivilege(tableDesc, privilege.UPDATE); err != nil {
		return nil, err
	}

	// Determine which columns we're inserting into.
	var names parser.QualifiedNames
	for _, expr := range n.Exprs {
		names = append(names, expr.Name)
	}
	cols, err := p.processColumns(tableDesc, names)
	if err != nil {
		return nil, err
	}

	// Set of columns being updated
	colIDSet := map[ColumnID]struct{}{}
	for _, c := range cols {
		colIDSet[c.ID] = struct{}{}
	}
	// Don't allow updating any column that is part of the primary key.
	for i, id := range tableDesc.PrimaryIndex.ColumnIDs {
		if _, ok := colIDSet[id]; ok {
			return nil, fmt.Errorf("primary key column %q cannot be updated", tableDesc.PrimaryIndex.ColumnNames[i])
		}
	}

	// Query the rows that need updating.
	// TODO(vivek): Avoid going through Select.
	row, err := p.Select(&parser.Select{
		Exprs: parser.SelectExprs{parser.StarSelectExpr()},
		From:  parser.TableExprs{n.Table},
		Where: n.Where,
	})
	if err != nil {
		return nil, err
	}

	// Construct a map from column ID to the index the value appears at within a
	// row.
	colIDtoRowIndex := map[ColumnID]int{}
	for i, name := range row.Columns() {
		c, err := tableDesc.FindColumnByName(name)
		if err != nil {
			return nil, err
		}
		colIDtoRowIndex[c.ID] = i
	}

	primaryIndex := tableDesc.PrimaryIndex
	primaryIndexKeyPrefix := MakeIndexKeyPrefix(tableDesc.ID, primaryIndex.ID)

	// Evaluate all the column value expressions.
	vals := make([]parser.Datum, 0, 10)
	for _, expr := range n.Exprs {
		val, err := parser.EvalExpr(expr.Expr)
		if err != nil {
			return nil, err
		}
		vals = append(vals, val)
	}

	// Secondary indexes needing updating.
	var indexes []IndexDescriptor
	for _, index := range tableDesc.Indexes {
		for _, id := range index.ColumnIDs {
			if _, ok := colIDSet[id]; ok {
				indexes = append(indexes, index)
				break
			}
		}
	}

	// Update all the rows.
	b := client.Batch{}
	for row.Next() {
		rowVals := row.Values()
		primaryIndexKey, _, err := encodeIndexKey(
			primaryIndex.ColumnIDs, colIDtoRowIndex, rowVals, primaryIndexKeyPrefix)
		if err != nil {
			return nil, err
		}
		// Compute the current secondary index key:value pairs for this row.
		secondaryIndexEntries, err := encodeSecondaryIndexes(
			tableDesc.ID, indexes, colIDtoRowIndex, rowVals)
		if err != nil {
			return nil, err
		}
		// Compute the new secondary index key:value pairs for this row.
		//
		// Update the row values.
		for i, col := range cols {
			val := vals[i]

			if !col.Nullable && val == parser.DNull {
				return nil, fmt.Errorf("null value in column %q violates not-null constraint", col.Name)
			}
			rowVals[colIDtoRowIndex[col.ID]] = val
		}
		newSecondaryIndexEntries, err := encodeSecondaryIndexes(
			tableDesc.ID, indexes, colIDtoRowIndex, rowVals)
		if err != nil {
			return nil, err
		}
		// Update secondary indexes.
		for i, newSecondaryIndexEntry := range newSecondaryIndexEntries {
			secondaryIndexEntry := secondaryIndexEntries[i]
			if !bytes.Equal(newSecondaryIndexEntry.key, secondaryIndexEntry.key) {
				if log.V(2) {
					log.Infof("CPut %q -> %v", newSecondaryIndexEntry.key, newSecondaryIndexEntry.value)
				}
				b.CPut(newSecondaryIndexEntry.key, newSecondaryIndexEntry.value, nil)
				if log.V(2) {
					log.Infof("Del %q", secondaryIndexEntry.key)
				}
				b.Del(secondaryIndexEntry.key)
			}
		}

		// Add the new values.
		for i, val := range vals {
			col := cols[i]

			primitive, err := convertDatum(col, val)
			if err != nil {
				return nil, err
			}

			key := MakeColumnKey(col.ID, primaryIndexKey)
			if primitive != nil {
				// We only output non-NULL values. Non-existent column keys are
				// considered NULL during scanning and the row sentinel ensures we know
				// the row exists.
				if log.V(2) {
					log.Infof("Put %q -> %v", key, val)
				}

				b.Put(key, primitive)
			} else {
				// The column might have already existed but is being set to NULL, so
				// delete it.
				if log.V(2) {
					log.Infof("Del %q", key)
				}

				b.Del(key)
			}
		}
	}

	if err := row.Err(); err != nil {
		return nil, err
	}

	if err := p.txn.Run(&b); err != nil {
		if tErr, ok := err.(*proto.ConditionFailedError); ok {
			return nil, util.Errorf("duplicate key value %q violates unique constraint %s", tErr.ActualValue.Bytes, "TODO(tamird)")
		}
		return nil, err
	}

	// TODO(tamird/pmattis): return the number of affected rows.
	return &valuesNode{}, nil
}
コード例 #8
0
ファイル: create.go プロジェクト: DevendranGitHub/cockroach
// CreateIndex creates an index.
// Privileges: CREATE on table.
//   notes: postgres requires CREATE on the table.
//          mysql requires ALTER, CREATE, INSERT on the table.
func (p *planner) CreateIndex(n *parser.CreateIndex) (planNode, error) {
	tableDesc, err := p.getTableDesc(n.Table)
	if err != nil {
		return nil, err
	}

	if _, err := tableDesc.FindIndexByName(string(n.Name)); err == nil {
		if n.IfNotExists {
			// Noop.
			return &valuesNode{}, nil
		}
		return nil, fmt.Errorf("index %q already exists", string(n.Name))
	}

	if err := p.checkPrivilege(tableDesc, privilege.CREATE); err != nil {
		return nil, err
	}

	index := IndexDescriptor{
		Name:        string(n.Name),
		Unique:      n.Unique,
		ColumnNames: n.Columns,
	}
	tableDesc.Indexes = append(tableDesc.Indexes, index)

	if err := tableDesc.AllocateIDs(); err != nil {
		return nil, err
	}

	// `index` changed on us when we called `tableDesc.AllocateIDs()`.
	index = tableDesc.Indexes[len(tableDesc.Indexes)-1]

	// Get all the rows affected.
	// TODO(vivek): Avoid going through Select.
	// TODO(tamird): Support partial indexes?
	row, err := p.Select(&parser.Select{
		Exprs: parser.SelectExprs{parser.StarSelectExpr()},
		From:  parser.TableExprs{&parser.AliasedTableExpr{Expr: n.Table}},
	})
	if err != nil {
		return nil, err
	}

	// Construct a map from column ID to the index the value appears at within a
	// row.
	colIDtoRowIndex := map[ColumnID]int{}
	for i, name := range row.Columns() {
		c, err := tableDesc.FindColumnByName(name)
		if err != nil {
			return nil, err
		}
		colIDtoRowIndex[c.ID] = i
	}

	// TODO(tamird): This will fall down in production use. We need to do
	// something better (see #2036). In particular, this implementation
	// has the following problems:
	// - Very large tables will generate an enormous batch here. This
	// isn't really a problem in itself except that it will exacerbate
	// the other issue:
	// - Any non-quiescent table that this runs against will end up with
	// an inconsistent index. This is because as inserts/updates continue
	// to roll in behind this operation's read front, the written index
	// will become incomplete/stale before it's written.
	var b client.Batch
	b.Put(MakeDescMetadataKey(tableDesc.GetID()), tableDesc)

	for row.Next() {
		rowVals := row.Values()

		secondaryIndexEntries, err := encodeSecondaryIndexes(
			tableDesc.ID, []IndexDescriptor{index}, colIDtoRowIndex, rowVals)
		if err != nil {
			return nil, err
		}

		for _, secondaryIndexEntry := range secondaryIndexEntries {
			if log.V(2) {
				log.Infof("CPut %q -> %v", secondaryIndexEntry.key, secondaryIndexEntry.value)
			}
			b.CPut(secondaryIndexEntry.key, secondaryIndexEntry.value, nil)
		}
	}

	if err := row.Err(); err != nil {
		return nil, err
	}

	if err := p.txn.Run(&b); err != nil {
		if tErr, ok := err.(*proto.ConditionFailedError); ok {
			return nil, fmt.Errorf("duplicate key value %q violates unique constraint %s", tErr.ActualValue.Bytes, "TODO(tamird)")
		}
		return nil, err
	}

	return &valuesNode{}, nil
}
コード例 #9
0
ファイル: delete.go プロジェクト: nporsche/cockroach
// Delete deletes rows from a table.
// Privileges: DELETE and SELECT on table. We currently always use a SELECT statement.
//   Notes: postgres requires DELETE. Also requires SELECT for "USING" and "WHERE" with tables.
//          mysql requires DELETE. Also requires SELECT if a table is used in the "WHERE" clause.
func (p *planner) Delete(n *parser.Delete) (planNode, error) {
	tableDesc, err := p.getAliasedTableDesc(n.Table, false /* !allowCache */)
	if err != nil {
		return nil, err
	}

	if err := p.checkPrivilege(tableDesc, privilege.DELETE); err != nil {
		return nil, err
	}

	// TODO(tamird,pmattis): avoid going through Select to avoid encoding
	// and decoding keys. Also, avoiding Select may provide more
	// convenient access to index keys which we are not currently
	// deleting.
	rows, err := p.Select(&parser.Select{
		Exprs: parser.SelectExprs{parser.StarSelectExpr()},
		From:  parser.TableExprs{n.Table},
		Where: n.Where,
	})
	if err != nil {
		return nil, err
	}

	// Construct a map from column ID to the index the value appears at within a
	// row.
	colIDtoRowIndex := map[ColumnID]int{}
	for i, name := range rows.Columns() {
		c, err := tableDesc.FindColumnByName(name)
		if err != nil {
			return nil, err
		}
		colIDtoRowIndex[c.ID] = i
	}

	primaryIndex := tableDesc.PrimaryIndex
	primaryIndexKeyPrefix := MakeIndexKeyPrefix(tableDesc.ID, primaryIndex.ID)

	b := client.Batch{}
	result := &valuesNode{}
	for rows.Next() {
		rowVals := rows.Values()
		result.rows = append(result.rows, parser.DTuple(nil))

		primaryIndexKey, _, err := encodeIndexKey(
			primaryIndex.ColumnIDs, colIDtoRowIndex, rowVals, primaryIndexKeyPrefix)
		if err != nil {
			return nil, err
		}

		// Delete the secondary indexes.
		secondaryIndexEntries, err := encodeSecondaryIndexes(
			tableDesc.ID, tableDesc.Indexes, colIDtoRowIndex, rowVals)
		if err != nil {
			return nil, err
		}

		for _, secondaryIndexEntry := range secondaryIndexEntries {
			if log.V(2) {
				log.Infof("Del %s", prettyKey(secondaryIndexEntry.key, 0))
			}
			b.Del(secondaryIndexEntry.key)
		}

		// Delete the row.
		rowStartKey := roachpb.Key(primaryIndexKey)
		rowEndKey := rowStartKey.PrefixEnd()
		if log.V(2) {
			log.Infof("DelRange %s - %s", prettyKey(rowStartKey, 0), prettyKey(rowEndKey, 0))
		}
		b.DelRange(rowStartKey, rowEndKey)
	}

	if err := rows.Err(); err != nil {
		return nil, err
	}

	if IsSystemID(tableDesc.GetID()) {
		// Mark transaction as operating on the system DB.
		p.txn.SetSystemDBTrigger()
	}
	if err := p.txn.Run(&b); err != nil {
		return nil, err
	}

	return result, nil
}