Exemplo n.º 1
0
// sendRPC sends one or more RPCs to replicas from the supplied
// roachpb.Replica slice. Returns an RPC error if the request could
// not be sent. Note that the reply may contain a higher level error
// and must be checked in addition to the RPC error.
//
// The replicas are assumed to be ordered by preference, with closer
// ones (i.e. expected lowest latency) first.
func (ds *DistSender) sendRPC(
	ctx context.Context, rangeID roachpb.RangeID, replicas ReplicaSlice, ba roachpb.BatchRequest,
) (*roachpb.BatchResponse, error) {
	if len(replicas) == 0 {
		return nil, roachpb.NewSendError(
			fmt.Sprintf("no replica node addresses available via gossip for range %d", rangeID))
	}

	// TODO(pmattis): This needs to be tested. If it isn't set we'll
	// still route the request appropriately by key, but won't receive
	// RangeNotFoundErrors.
	ba.RangeID = rangeID

	// Set RPC opts with stipulation that one of N RPCs must succeed.
	rpcOpts := SendOptions{
		ctx:              ctx,
		SendNextTimeout:  ds.sendNextTimeout,
		transportFactory: ds.transportFactory,
	}
	tracing.AnnotateTrace()
	defer tracing.AnnotateTrace()

	reply, err := ds.sendToReplicas(rpcOpts, rangeID, replicas, ba, ds.rpcContext)
	if err != nil {
		return nil, err
	}
	return reply, nil
}
Exemplo n.º 2
0
// sendRPC sends one or more RPCs to replicas from the supplied
// roachpb.Replica slice. Returns an RPC error if the request could
// not be sent. Note that the reply may contain a higher level error
// and must be checked in addition to the RPC error.
//
// The replicas are assumed to be ordered by preference, with closer
// ones (i.e. expected lowest latency) first.
func (ds *DistSender) sendRPC(
	ctx context.Context, rangeID roachpb.RangeID, replicas ReplicaSlice, ba roachpb.BatchRequest,
) (*roachpb.BatchResponse, error) {
	if len(replicas) == 0 {
		return nil, roachpb.NewSendError(
			fmt.Sprintf("no replica node addresses available via gossip for range %d", rangeID))
	}

	// TODO(pmattis): This needs to be tested. If it isn't set we'll
	// still route the request appropriately by key, but won't receive
	// RangeNotFoundErrors.
	ba.RangeID = rangeID

	// A given RPC may generate retries to multiple replicas, but as soon as we
	// get a response from one we want to cancel those other RPCs.
	ctx, cancel := context.WithCancel(ctx)
	defer cancel()

	// Set RPC opts with stipulation that one of N RPCs must succeed.
	rpcOpts := SendOptions{
		SendNextTimeout:  ds.sendNextTimeout,
		transportFactory: ds.transportFactory,
		metrics:          &ds.metrics,
	}
	tracing.AnnotateTrace()
	defer tracing.AnnotateTrace()

	reply, err := ds.sendToReplicas(ctx, rpcOpts, rangeID, replicas, ba, ds.rpcContext)
	if err != nil {
		return nil, err
	}
	return reply, nil
}
Exemplo n.º 3
0
// Run executes the operations queued up within a batch. Before executing any
// of the operations the batch is first checked to see if there were any errors
// during its construction (e.g. failure to marshal a proto message).
//
// The operations within a batch are run in parallel and the order is
// non-deterministic. It is an unspecified behavior to modify and retrieve the
// same key within a batch.
//
// Upon completion, Batch.Results will contain the results for each
// operation. The order of the results matches the order the operations were
// added to the batch.
func (txn *Txn) Run(b *Batch) error {
	tracing.AnnotateTrace()
	defer tracing.AnnotateTrace()

	if err := b.prepare(); err != nil {
		return err
	}
	return sendAndFill(txn.send, b)
}
Exemplo n.º 4
0
func (n *scanNode) Next() (bool, error) {
	tracing.AnnotateTrace()
	if !n.scanInitialized {
		if err := n.initScan(); err != nil {
			return false, err
		}
	}

	if n.explain == explainDebug {
		return n.debugNext()
	}

	// We fetch one row at a time until we find one that passes the filter.
	for {
		var err error
		n.row, err = n.fetcher.NextRowDecoded()
		if err != nil || n.row == nil {
			return false, err
		}
		passesFilter, err := sqlbase.RunFilter(n.filter, &n.p.evalCtx)
		if err != nil {
			return false, err
		}
		if passesFilter {
			return true, nil
		}
	}
}
Exemplo n.º 5
0
func (u *updateNode) Next() (bool, error) {
	next, err := u.run.rows.Next()
	if !next {
		if err == nil {
			// We're done. Finish the batch.
			err = u.tw.finalize(u.p.ctx())
		}
		return false, err
	}

	if u.run.explain == explainDebug {
		return true, nil
	}

	tracing.AnnotateTrace()

	oldValues := u.run.rows.Values()

	// Our updated value expressions occur immediately after the plain
	// columns in the output.
	updateValues := oldValues[len(u.tw.ru.fetchCols):]
	oldValues = oldValues[:len(u.tw.ru.fetchCols)]

	u.checkHelper.loadRow(u.tw.ru.fetchColIDtoRowIndex, oldValues, false)
	u.checkHelper.loadRow(u.updateColsIdx, updateValues, true)
	if err := u.checkHelper.check(&u.p.evalCtx); err != nil {
		return false, err
	}

	// Ensure that the values honor the specified column widths.
	for i := range updateValues {
		if err := sqlbase.CheckValueWidth(u.tw.ru.updateCols[i], updateValues[i]); err != nil {
			return false, err
		}
	}

	// Update the row values.
	for i, col := range u.tw.ru.updateCols {
		val := updateValues[i]
		if !col.Nullable && val == parser.DNull {
			return false, sqlbase.NewNonNullViolationError(col.Name)
		}
	}

	newValues, err := u.tw.row(u.p.ctx(), append(oldValues, updateValues...))
	if err != nil {
		return false, err
	}

	resultRow, err := u.rh.cookResultRow(newValues)
	if err != nil {
		return false, err
	}
	u.run.resultRow = resultRow

	return true, nil
}
Exemplo n.º 6
0
func (c *v3Conn) executeStatements(
	ctx context.Context,
	stmts string,
	pinfo *parser.PlaceholderInfo,
	formatCodes []formatCode,
	sendDescription bool,
	limit int,
) error {
	tracing.AnnotateTrace()
	// Note: sql.Executor gets its Context from c.session.context, which
	// has been bound by v3Conn.setupSession().
	results := c.executor.ExecuteStatements(c.session, stmts, pinfo)
	defer results.Close()

	tracing.AnnotateTrace()
	if results.Empty {
		// Skip executor and just send EmptyQueryResponse.
		c.writeBuf.initMsg(serverMsgEmptyQuery)
		return c.writeBuf.finishMsg(c.wr)
	}
	return c.sendResponse(ctx, results.ResultList, formatCodes, sendDescription, limit)
}
Exemplo n.º 7
0
func (db *DB) prepareToSend(ba *roachpb.BatchRequest) *roachpb.Error {
	if ba.ReadConsistency == roachpb.INCONSISTENT {
		for _, ru := range ba.Requests {
			req := ru.GetInner()
			if req.Method() != roachpb.Get && req.Method() != roachpb.Scan &&
				req.Method() != roachpb.ReverseScan {
				return roachpb.NewErrorf("method %s not allowed with INCONSISTENT batch", req.Method)
			}
		}
	}

	if db.ctx.UserPriority != 1 {
		ba.UserPriority = db.ctx.UserPriority
	}

	tracing.AnnotateTrace()
	return nil
}
Exemplo n.º 8
0
// Update updates columns for a selection of rows from a table.
// Privileges: UPDATE and SELECT on table. We currently always use a select statement.
//   Notes: postgres requires UPDATE. Requires SELECT with WHERE clause with table.
//          mysql requires UPDATE. Also requires SELECT with WHERE clause with table.
// TODO(guanqun): need to support CHECK in UPDATE
func (p *planner) Update(
	n *parser.Update, desiredTypes []parser.Type, autoCommit bool,
) (planNode, error) {
	tracing.AnnotateTrace()

	tn, err := p.getAliasedTableName(n.Table)
	if err != nil {
		return nil, err
	}

	en, err := p.makeEditNode(tn, autoCommit, privilege.UPDATE)
	if err != nil {
		return nil, err
	}

	exprs := make([]*parser.UpdateExpr, len(n.Exprs))
	for i, expr := range n.Exprs {
		// Replace the sub-query nodes.
		newExpr, err := p.replaceSubqueries(expr.Expr, len(expr.Names))
		if err != nil {
			return nil, err
		}
		exprs[i] = &parser.UpdateExpr{Tuple: expr.Tuple, Expr: newExpr, Names: expr.Names}
	}

	// Determine which columns we're inserting into.
	names, err := p.namesForExprs(exprs)
	if err != nil {
		return nil, err
	}

	updateCols, err := p.processColumns(en.tableDesc, names)
	if err != nil {
		return nil, err
	}

	defaultExprs, err := makeDefaultExprs(updateCols, &p.parser, &p.evalCtx)
	if err != nil {
		return nil, err
	}

	var requestedCols []sqlbase.ColumnDescriptor
	if len(n.Returning) > 0 || len(en.tableDesc.Checks) > 0 {
		// TODO(dan): This could be made tighter, just the rows needed for RETURNING
		// exprs.
		requestedCols = en.tableDesc.Columns
	}

	fkTables := tablesNeededForFKs(*en.tableDesc, CheckUpdates)
	if err := p.fillFKTableMap(fkTables); err != nil {
		return nil, err
	}
	ru, err := makeRowUpdater(p.txn, en.tableDesc, fkTables, updateCols, requestedCols, rowUpdaterDefault)
	if err != nil {
		return nil, err
	}
	tw := tableUpdater{ru: ru, autoCommit: autoCommit}

	tracing.AnnotateTrace()

	// Generate the list of select targets. We need to select all of the columns
	// plus we select all of the update expressions in case those expressions
	// reference columns (e.g. "UPDATE t SET v = v + 1"). Note that we flatten
	// expressions for tuple assignments just as we flattened the column names
	// above. So "UPDATE t SET (a, b) = (1, 2)" translates into select targets of
	// "*, 1, 2", not "*, (1, 2)".
	targets := sqlbase.ColumnsSelectors(ru.fetchCols)
	i := 0
	// Remember the index where the targets for exprs start.
	exprTargetIdx := len(targets)
	desiredTypesFromSelect := make([]parser.Type, len(targets), len(targets)+len(exprs))
	for i := range targets {
		desiredTypesFromSelect[i] = parser.TypeAny
	}
	for _, expr := range exprs {
		if expr.Tuple {
			switch t := expr.Expr.(type) {
			case (*parser.Tuple):
				for _, e := range t.Exprs {
					typ := updateCols[i].Type.ToDatumType()
					e := fillDefault(e, typ, i, defaultExprs)
					targets = append(targets, parser.SelectExpr{Expr: e})
					desiredTypesFromSelect = append(desiredTypesFromSelect, typ)
					i++
				}
			default:
				return nil, fmt.Errorf("cannot use this expression to assign multiple columns: %s", expr.Expr)
			}
		} else {
			typ := updateCols[i].Type.ToDatumType()
			e := fillDefault(expr.Expr, typ, i, defaultExprs)
			targets = append(targets, parser.SelectExpr{Expr: e})
			desiredTypesFromSelect = append(desiredTypesFromSelect, typ)
			i++
		}
	}

	rows, err := p.SelectClause(&parser.SelectClause{
		Exprs: targets,
		From:  &parser.From{Tables: []parser.TableExpr{n.Table}},
		Where: n.Where,
	}, nil, nil, desiredTypesFromSelect, publicAndNonPublicColumns)
	if err != nil {
		return nil, err
	}

	// Placeholders have their types populated in the above Select if they are part
	// of an expression ("SET a = 2 + $1") in the type check step where those
	// types are inferred. For the simpler case ("SET a = $1"), populate them
	// using checkColumnType. This step also verifies that the expression
	// types match the column types.
	sel := rows.(*selectTopNode).source.(*selectNode)
	for i, target := range sel.render[exprTargetIdx:] {
		// DefaultVal doesn't implement TypeCheck
		if _, ok := target.(parser.DefaultVal); ok {
			continue
		}
		// TODO(nvanbenschoten) isn't this TypeCheck redundant with the call to SelectClause?
		typedTarget, err := parser.TypeCheck(target, &p.semaCtx, updateCols[i].Type.ToDatumType())
		if err != nil {
			return nil, err
		}
		err = sqlbase.CheckColumnType(updateCols[i], typedTarget.ResolvedType(), p.semaCtx.Placeholders)
		if err != nil {
			return nil, err
		}
	}

	updateColsIdx := make(map[sqlbase.ColumnID]int, len(ru.updateCols))
	for i, col := range ru.updateCols {
		updateColsIdx[col.ID] = i
	}

	un := &updateNode{
		n:             n,
		editNodeBase:  en,
		updateCols:    ru.updateCols,
		updateColsIdx: updateColsIdx,
		tw:            tw,
	}
	if err := un.checkHelper.init(p, tn, en.tableDesc); err != nil {
		return nil, err
	}
	if err := un.run.initEditNode(&un.editNodeBase, rows, n.Returning, desiredTypes); err != nil {
		return nil, err
	}
	return un, nil
}
Exemplo n.º 9
0
// Send implements the batch.Sender interface. It subdivides the Batch
// into batches admissible for sending (preventing certain illegal
// mixtures of requests), executes each individual part (which may
// span multiple ranges), and recombines the response.
//
// When the request spans ranges, it is split by range and a partial
// subset of the batch request is sent to affected ranges in parallel.
//
// The first write in a transaction may not arrive before writes to
// other ranges. This is relevant in the case of a BeginTransaction
// request. Intents written to other ranges before the transaction
// record is created will cause the transaction to abort early.
func (ds *DistSender) Send(
	ctx context.Context, ba roachpb.BatchRequest,
) (*roachpb.BatchResponse, *roachpb.Error) {
	tracing.AnnotateTrace()

	if pErr := ds.initAndVerifyBatch(ctx, &ba); pErr != nil {
		return nil, pErr
	}

	ctx = ds.AnnotateCtx(ctx)
	ctx, cleanup := tracing.EnsureContext(ctx, ds.AmbientContext.Tracer)
	defer cleanup()

	var rplChunks []*roachpb.BatchResponse
	parts := ba.Split(false /* don't split ET */)
	if len(parts) > 1 && ba.MaxSpanRequestKeys != 0 {
		// We already verified above that the batch contains only scan requests of the same type.
		// Such a batch should never need splitting.
		panic("batch with MaxSpanRequestKeys needs splitting")
	}
	for len(parts) > 0 {
		part := parts[0]
		ba.Requests = part
		// The minimal key range encompassing all requests contained within.
		// Local addressing has already been resolved.
		// TODO(tschottdorf): consider rudimentary validation of the batch here
		// (for example, non-range requests with EndKey, or empty key ranges).
		rs, err := keys.Range(ba)
		if err != nil {
			return nil, roachpb.NewError(err)
		}
		rpl, pErr := ds.divideAndSendBatchToRanges(ctx, ba, rs, true /* isFirst */)

		if pErr == errNo1PCTxn {
			// If we tried to send a single round-trip EndTransaction but
			// it looks like it's going to hit multiple ranges, split it
			// here and try again.
			if len(parts) != 1 {
				panic("EndTransaction not in last chunk of batch")
			}
			parts = ba.Split(true /* split ET */)
			if len(parts) != 2 {
				panic("split of final EndTransaction chunk resulted in != 2 parts")
			}
			continue
		}
		if pErr != nil {
			return nil, pErr
		}
		// Propagate transaction from last reply to next request. The final
		// update is taken and put into the response's main header.
		ba.UpdateTxn(rpl.Txn)
		rplChunks = append(rplChunks, rpl)
		parts = parts[1:]
	}

	reply := rplChunks[0]
	for _, rpl := range rplChunks[1:] {
		reply.Responses = append(reply.Responses, rpl.Responses...)
		reply.CollectedSpans = append(reply.CollectedSpans, rpl.CollectedSpans...)
	}
	reply.BatchResponse_Header = rplChunks[len(rplChunks)-1].BatchResponse_Header
	return reply, nil
}
Exemplo n.º 10
0
// newPlan constructs a planNode from a statement. This is used
// recursively by the various node constructors.
func (p *planner) newPlan(
	stmt parser.Statement, desiredTypes []parser.Type, autoCommit bool,
) (planNode, error) {
	tracing.AnnotateTrace()

	// This will set the system DB trigger for transactions containing
	// DDL statements that have no effect, such as
	// `BEGIN; INSERT INTO ...; CREATE TABLE IF NOT EXISTS ...; COMMIT;`
	// where the table already exists. This will generate some false
	// refreshes, but that's expected to be quite rare in practice.
	if stmt.StatementType() == parser.DDL {
		p.txn.SetSystemConfigTrigger()
	}

	switch n := stmt.(type) {
	case *parser.AlterTable:
		return p.AlterTable(n)
	case *parser.BeginTransaction:
		return p.BeginTransaction(n)
	case CopyDataBlock:
		return p.CopyData(n, autoCommit)
	case *parser.CopyFrom:
		return p.CopyFrom(n, autoCommit)
	case *parser.CreateDatabase:
		return p.CreateDatabase(n)
	case *parser.CreateIndex:
		return p.CreateIndex(n)
	case *parser.CreateTable:
		return p.CreateTable(n)
	case *parser.CreateUser:
		return p.CreateUser(n)
	case *parser.CreateView:
		return p.CreateView(n)
	case *parser.Delete:
		return p.Delete(n, desiredTypes, autoCommit)
	case *parser.DropDatabase:
		return p.DropDatabase(n)
	case *parser.DropIndex:
		return p.DropIndex(n)
	case *parser.DropTable:
		return p.DropTable(n)
	case *parser.DropView:
		return p.DropView(n)
	case *parser.Explain:
		return p.Explain(n, autoCommit)
	case *parser.Grant:
		return p.Grant(n)
	case *parser.Help:
		return p.Help(n)
	case *parser.Insert:
		return p.Insert(n, desiredTypes, autoCommit)
	case *parser.ParenSelect:
		return p.newPlan(n.Select, desiredTypes, autoCommit)
	case *parser.RenameColumn:
		return p.RenameColumn(n)
	case *parser.RenameDatabase:
		return p.RenameDatabase(n)
	case *parser.RenameIndex:
		return p.RenameIndex(n)
	case *parser.RenameTable:
		return p.RenameTable(n)
	case *parser.Revoke:
		return p.Revoke(n)
	case *parser.Select:
		return p.Select(n, desiredTypes, autoCommit)
	case *parser.SelectClause:
		return p.SelectClause(n, nil, nil, desiredTypes, publicColumns)
	case *parser.Set:
		return p.Set(n)
	case *parser.SetTimeZone:
		return p.SetTimeZone(n)
	case *parser.SetTransaction:
		return p.SetTransaction(n)
	case *parser.SetDefaultIsolation:
		return p.SetDefaultIsolation(n)
	case *parser.Show:
		return p.Show(n)
	case *parser.ShowColumns:
		return p.ShowColumns(n)
	case *parser.ShowConstraints:
		return p.ShowConstraints(n)
	case *parser.ShowCreateTable:
		return p.ShowCreateTable(n)
	case *parser.ShowCreateView:
		return p.ShowCreateView(n)
	case *parser.ShowDatabases:
		return p.ShowDatabases(n)
	case *parser.ShowGrants:
		return p.ShowGrants(n)
	case *parser.ShowIndex:
		return p.ShowIndex(n)
	case *parser.ShowTables:
		return p.ShowTables(n)
	case *parser.ShowUsers:
		return p.ShowUsers(n)
	case *parser.Split:
		return p.Split(n)
	case *parser.Truncate:
		return p.Truncate(n)
	case *parser.UnionClause:
		return p.UnionClause(n, desiredTypes, autoCommit)
	case *parser.Update:
		return p.Update(n, desiredTypes, autoCommit)
	case *parser.ValuesClause:
		return p.ValuesClause(n, desiredTypes)
	default:
		return nil, errors.Errorf("unknown statement type: %T", stmt)
	}
}
Exemplo n.º 11
0
// newPlan constructs a planNode from a statement. This is used
// recursively by the various node constructors.
func (p *planner) newPlan(
	stmt parser.Statement, desiredTypes []parser.Type, autoCommit bool,
) (planNode, error) {
	tracing.AnnotateTrace()

	// This will set the system DB trigger for transactions containing
	// DDL statements that have no effect, such as
	// `BEGIN; INSERT INTO ...; CREATE TABLE IF NOT EXISTS ...; COMMIT;`
	// where the table already exists. This will generate some false
	// refreshes, but that's expected to be quite rare in practice.
	if stmt.StatementType() == parser.DDL {
		p.txn.SetSystemConfigTrigger()
	}

	// TODO(dan): This iteration makes the plan dispatch no longer constant
	// time. We could fix that with a map of `reflect.Type` but including
	// reflection in such a primary codepath is unfortunate. Instead, the
	// upcoming IR work will provide unique numeric type tags, which will
	// elegantly solve this.
	for _, planHook := range planHooks {
		if fn, header, err := planHook(p.ctx(), stmt, p.execCfg); err != nil {
			return nil, err
		} else if fn != nil {
			return &hookFnNode{f: fn, header: header}, nil
		}
	}

	switch n := stmt.(type) {
	case *parser.AlterTable:
		return p.AlterTable(n)
	case *parser.BeginTransaction:
		return p.BeginTransaction(n)
	case CopyDataBlock:
		return p.CopyData(n, autoCommit)
	case *parser.CopyFrom:
		return p.CopyFrom(n, autoCommit)
	case *parser.CreateDatabase:
		return p.CreateDatabase(n)
	case *parser.CreateIndex:
		return p.CreateIndex(n)
	case *parser.CreateTable:
		return p.CreateTable(n)
	case *parser.CreateUser:
		return p.CreateUser(n)
	case *parser.CreateView:
		return p.CreateView(n)
	case *parser.Delete:
		return p.Delete(n, desiredTypes, autoCommit)
	case *parser.DropDatabase:
		return p.DropDatabase(n)
	case *parser.DropIndex:
		return p.DropIndex(n)
	case *parser.DropTable:
		return p.DropTable(n)
	case *parser.DropView:
		return p.DropView(n)
	case *parser.Explain:
		return p.Explain(n, autoCommit)
	case *parser.Grant:
		return p.Grant(n)
	case *parser.Help:
		return p.Help(n)
	case *parser.Insert:
		return p.Insert(n, desiredTypes, autoCommit)
	case *parser.ParenSelect:
		return p.newPlan(n.Select, desiredTypes, autoCommit)
	case *parser.RenameColumn:
		return p.RenameColumn(n)
	case *parser.RenameDatabase:
		return p.RenameDatabase(n)
	case *parser.RenameIndex:
		return p.RenameIndex(n)
	case *parser.RenameTable:
		return p.RenameTable(n)
	case *parser.Revoke:
		return p.Revoke(n)
	case *parser.Select:
		return p.Select(n, desiredTypes, autoCommit)
	case *parser.SelectClause:
		return p.SelectClause(n, nil, nil, desiredTypes, publicColumns)
	case *parser.Set:
		return p.Set(n)
	case *parser.SetTimeZone:
		return p.SetTimeZone(n)
	case *parser.SetTransaction:
		return p.SetTransaction(n)
	case *parser.SetDefaultIsolation:
		return p.SetDefaultIsolation(n)
	case *parser.Show:
		return p.Show(n)
	case *parser.ShowColumns:
		return p.ShowColumns(n)
	case *parser.ShowConstraints:
		return p.ShowConstraints(n)
	case *parser.ShowCreateTable:
		return p.ShowCreateTable(n)
	case *parser.ShowCreateView:
		return p.ShowCreateView(n)
	case *parser.ShowDatabases:
		return p.ShowDatabases(n)
	case *parser.ShowGrants:
		return p.ShowGrants(n)
	case *parser.ShowIndex:
		return p.ShowIndex(n)
	case *parser.ShowTables:
		return p.ShowTables(n)
	case *parser.ShowUsers:
		return p.ShowUsers(n)
	case *parser.Split:
		return p.Split(n)
	case *parser.Truncate:
		return p.Truncate(n)
	case *parser.UnionClause:
		return p.UnionClause(n, desiredTypes, autoCommit)
	case *parser.Update:
		return p.Update(n, desiredTypes, autoCommit)
	case *parser.ValuesClause:
		return p.ValuesClause(n, desiredTypes)
	default:
		return nil, errors.Errorf("unknown statement type: %T", stmt)
	}
}