Ejemplo n.º 1
0
// Initializes a scanNode with a table descriptor.
func (n *scanNode) initTable(
	p *planner,
	desc *sqlbase.TableDescriptor,
	indexHints *parser.IndexHints,
	scanVisibility scanVisibility,
) error {
	n.desc = *desc

	if !p.skipSelectPrivilegeChecks {
		if err := p.checkPrivilege(&n.desc, privilege.SELECT); err != nil {
			return err
		}
	}

	if indexHints != nil && indexHints.Index != "" {
		indexName := indexHints.Index.Normalize()
		if indexName == parser.ReNormalizeName(n.desc.PrimaryIndex.Name) {
			n.specifiedIndex = &n.desc.PrimaryIndex
		} else {
			for i := range n.desc.Indexes {
				if indexName == parser.ReNormalizeName(n.desc.Indexes[i].Name) {
					n.specifiedIndex = &n.desc.Indexes[i]
					break
				}
			}
			if n.specifiedIndex == nil {
				return fmt.Errorf("index \"%s\" not found", indexName)
			}
		}
	}
	n.noIndexJoin = (indexHints != nil && indexHints.NoIndexJoin)
	n.initDescDefaults(scanVisibility)
	return nil
}
Ejemplo n.º 2
0
// commonColumns returns the names of columns common on the
// right and left sides, for use by NATURAL JOIN.
func commonColumns(left, right *dataSourceInfo) parser.NameList {
	var res parser.NameList
	for _, cLeft := range left.sourceColumns {
		if cLeft.hidden {
			continue
		}
		for _, cRight := range right.sourceColumns {
			if cRight.hidden {
				continue
			}

			if parser.ReNormalizeName(cLeft.Name) == parser.ReNormalizeName(cRight.Name) {
				res = append(res, parser.Name(cLeft.Name))
			}
		}
	}
	return res
}
Ejemplo n.º 3
0
// upsertExprsAndIndex returns the upsert conflict index and the (possibly
// synthetic) SET expressions used when a row conflicts.
func upsertExprsAndIndex(
	tableDesc *sqlbase.TableDescriptor,
	onConflict parser.OnConflict,
	insertCols []sqlbase.ColumnDescriptor,
) (parser.UpdateExprs, *sqlbase.IndexDescriptor, error) {
	if onConflict.IsUpsertAlias() {
		// The UPSERT syntactic sugar is the same as the longhand specifying the
		// primary index as the conflict index and SET expressions for the columns
		// in insertCols minus any columns in the conflict index. Example:
		// `UPSERT INTO abc VALUES (1, 2, 3)` is syntactic sugar for
		// `INSERT INTO abc VALUES (1, 2, 3) ON CONFLICT a DO UPDATE SET b = 2, c = 3`.
		conflictIndex := &tableDesc.PrimaryIndex
		indexColSet := make(map[sqlbase.ColumnID]struct{}, len(conflictIndex.ColumnIDs))
		for _, colID := range conflictIndex.ColumnIDs {
			indexColSet[colID] = struct{}{}
		}
		updateExprs := make(parser.UpdateExprs, 0, len(insertCols))
		for _, c := range insertCols {
			if _, ok := indexColSet[c.ID]; !ok {
				names := parser.UnresolvedNames{
					parser.UnresolvedName{parser.Name(c.Name)},
				}
				expr := &parser.ColumnItem{
					TableName:  upsertExcludedTable,
					ColumnName: parser.Name(c.Name),
				}
				updateExprs = append(updateExprs, &parser.UpdateExpr{Names: names, Expr: expr})
			}
		}
		return updateExprs, conflictIndex, nil
	}

	indexMatch := func(index sqlbase.IndexDescriptor) bool {
		if !index.Unique {
			return false
		}
		if len(index.ColumnNames) != len(onConflict.Columns) {
			return false
		}
		for i, colName := range index.ColumnNames {
			if parser.ReNormalizeName(colName) != onConflict.Columns[i].Normalize() {
				return false
			}
		}
		return true
	}

	if indexMatch(tableDesc.PrimaryIndex) {
		return onConflict.Exprs, &tableDesc.PrimaryIndex, nil
	}
	for _, index := range tableDesc.Indexes {
		if indexMatch(index) {
			return onConflict.Exprs, &index, nil
		}
	}
	return nil, nil, fmt.Errorf("there is no unique or exclusion constraint matching the ON CONFLICT specification")
}
Ejemplo n.º 4
0
// MakeNameMetadataKey returns the key for the name. Pass name == "" in order
// to generate the prefix key to use to scan over all of the names for the
// specified parentID.
func MakeNameMetadataKey(parentID ID, name string) roachpb.Key {
	normName := parser.ReNormalizeName(name)
	k := keys.MakeTablePrefix(uint32(NamespaceTable.ID))
	k = encoding.EncodeUvarintAscending(k, uint64(NamespaceTable.PrimaryIndex.ID))
	k = encoding.EncodeUvarintAscending(k, uint64(parentID))
	if name != "" {
		k = encoding.EncodeBytesAscending(k, []byte(normName))
		k = keys.MakeFamilyKey(k, uint32(NamespaceTable.Columns[2].ID))
	}
	return k
}
Ejemplo n.º 5
0
// findUnaliasedColumn looks up the column specified by a VarName, not
// taking column renames into account (but table renames will be taken
// into account). That is, given a table "blah" with single column "y",
// findUnaliasedColumn("y") returns a valid index even in the context
// of:
//     SELECT * FROM blah as foo(x)
// If the VarName specifies a table name, only columns that have that
// name as their source alias are considered. If the VarName does not
// specify a table name, all columns in the data source are
// considered.  If no column is found, invalidColIdx is returned with
// no error.
func (p *planDataSource) findUnaliasedColumn(c *parser.ColumnItem) (colIdx int, err error) {
	colName := c.ColumnName.Normalize()
	tableName := c.TableName.NormalizedTableName()

	if tableName.Table() != "" {
		tn, err := p.info.checkDatabaseName(tableName)
		if err != nil {
			return invalidColIdx, nil
		}
		tableName = tn
	}

	colIdx = invalidColIdx
	planColumns := p.plan.Columns()

	selCol := func(colIdx int, idx int) (int, error) {
		col := planColumns[idx]
		if parser.ReNormalizeName(col.Name) == colName {
			if colIdx != invalidColIdx {
				return invalidColIdx, fmt.Errorf("column reference %q is ambiguous", c)
			}
			colIdx = idx
		}
		return colIdx, nil
	}

	if tableName.Table() == "" {
		for idx := 0; idx < len(p.info.sourceColumns); idx++ {
			colIdx, err = selCol(colIdx, idx)
			if err != nil {
				return colIdx, err
			}
		}
	} else {
		colRange, ok := p.info.sourceAliases[tableName]
		if !ok {
			// A table name is specified, but there is no column with this
			// table name.
			return invalidColIdx, nil
		}
		for _, idx := range colRange {
			colIdx, err = selCol(colIdx, idx)
			if err != nil {
				return colIdx, err
			}
		}
	}

	return colIdx, nil
}
Ejemplo n.º 6
0
// pickUsingColumn searches for a column whose name matches colName.
// The column index and type are returned if found, otherwise an error
// is reported.
func pickUsingColumn(cols ResultColumns, colName string, context string) (int, parser.Type, error) {
	idx := invalidColIdx
	for j, col := range cols {
		if col.hidden {
			continue
		}
		if parser.ReNormalizeName(col.Name) == colName {
			idx = j
		}
	}
	if idx == invalidColIdx {
		return idx, nil, fmt.Errorf("column \"%s\" specified in USING clause does not exist in %s table", colName, context)
	}
	return idx, cols[idx].Typ, nil
}
Ejemplo n.º 7
0
func makeColIDtoRowIndex(
	row planNode, desc *sqlbase.TableDescriptor,
) (map[sqlbase.ColumnID]int, error) {
	columns := row.Columns()
	colIDtoRowIndex := make(map[sqlbase.ColumnID]int, len(columns))
	for i, column := range columns {
		s, idx, err := desc.FindColumnByNormalizedName(parser.ReNormalizeName(column.Name))
		if err != nil {
			return nil, err
		}
		switch s {
		case sqlbase.DescriptorActive:
			colIDtoRowIndex[desc.Columns[idx].ID] = i
		case sqlbase.DescriptorIncomplete:
			colIDtoRowIndex[desc.Mutations[idx].GetColumn().ID] = i
		default:
			panic("unreachable")
		}
	}
	return colIDtoRowIndex, nil
}
Ejemplo n.º 8
0
// getTableLease implements the SchemaAccessor interface.
func (p *planner) getTableLease(tn *parser.TableName) (*sqlbase.TableDescriptor, error) {
	if log.V(2) {
		log.Infof(p.ctx(), "planner acquiring lease on table '%s'", tn)
	}

	isSystemDB := tn.Database() == sqlbase.SystemDB.Name
	isVirtualDB := p.session.virtualSchemas.isVirtualDatabase(tn.Database())
	if isSystemDB || isVirtualDB || testDisableTableLeases {
		// We don't go through the normal lease mechanism for:
		// - system tables. The system.lease and system.descriptor table, in
		//   particular, are problematic because they are used for acquiring
		//   leases itself, creating a chicken&egg problem.
		// - virtual tables. These tables' descriptors are not persisted,
		//   so they cannot be leased. Instead, we simply return the static
		//   descriptor and rely on the immutability privileges set on the
		//   descriptors to cause upper layers to reject mutations statements.
		tbl, err := p.mustGetTableDesc(tn)
		if err != nil {
			return nil, err
		}
		if err := filterTableState(tbl); err != nil {
			return nil, err
		}
		return tbl, nil
	}

	dbID, err := p.getDatabaseID(tn.Database())
	if err != nil {
		return nil, err
	}

	// First, look to see if we already have a lease for this table.
	// This ensures that, once a SQL transaction resolved name N to id X, it will
	// continue to use N to refer to X even if N is renamed during the
	// transaction.
	var lease *LeaseState
	for _, l := range p.leases {
		if parser.ReNormalizeName(l.Name) == tn.TableName.Normalize() &&
			l.ParentID == dbID {
			lease = l
			if log.V(2) {
				log.Infof(p.ctx(), "found lease in planner cache for table '%s'", tn)
			}
			break
		}
	}

	// If we didn't find a lease or the lease is about to expire, acquire one.
	if lease == nil || p.removeLeaseIfExpiring(lease) {
		var err error
		lease, err = p.leaseMgr.AcquireByName(p.txn, dbID, tn.Table())
		if err != nil {
			if err == sqlbase.ErrDescriptorNotFound {
				// Transform the descriptor error into an error that references the
				// table's name.
				return nil, sqlbase.NewUndefinedTableError(tn.String())
			}
			return nil, err
		}
		p.leases = append(p.leases, lease)
		if log.V(2) {
			log.Infof(p.ctx(), "added lease on table '%s' to planner cache", tn)
		}
		// If the lease we just acquired expires before the txn's deadline, reduce
		// the deadline.
		p.txn.UpdateDeadlineMaybe(hlc.Timestamp{WallTime: lease.Expiration().UnixNano()})
	}
	return &lease.TableDescriptor, nil
}
Ejemplo n.º 9
0
// orderBy constructs a sortNode based on the ORDER BY clause.
//
// In the general case (SELECT/UNION/VALUES), we can sort by a column index or a
// column name.
//
// However, for a SELECT, we can also sort by the pre-alias column name (SELECT
// a AS b ORDER BY b) as well as expressions (SELECT a, b, ORDER BY a+b). In
// this case, construction of the sortNode might adjust the number of render
// targets in the selectNode if any ordering expressions are specified.
//
// TODO(dan): SQL also allows sorting a VALUES or UNION by an expression.
// Support this. It will reduce some of the special casing below, but requires a
// generalization of how to add derived columns to a SelectStatement.
func (p *planner) orderBy(orderBy parser.OrderBy, n planNode) (*sortNode, error) {
	if orderBy == nil {
		return nil, nil
	}

	// Multiple tests below use selectNode as a special case.
	// So factor the cast.
	s, _ := n.(*selectNode)

	// We grab a copy of columns here because we might add new render targets
	// below. This is the set of columns requested by the query.
	columns := n.Columns()
	numOriginalCols := len(columns)
	if s != nil {
		numOriginalCols = s.numOriginalCols
	}
	var ordering sqlbase.ColumnOrdering

	for _, o := range orderBy {
		direction := encoding.Ascending
		if o.Direction == parser.Descending {
			direction = encoding.Descending
		}

		index := -1

		// Unwrap parenthesized expressions like "((a))" to "a".
		expr := parser.StripParens(o.Expr)

		// The logical data source for ORDER BY is the list of render
		// expressions for a SELECT, as specified in the input SQL text
		// (or an entire UNION or VALUES clause).  Alas, SQL has some
		// historical baggage from SQL92 and there are some special cases:
		//
		// SQL92 rules:
		//
		// 1) if the expression is the aliased (AS) name of a render
		//    expression in a SELECT clause, then use that
		//    render as sort key.
		//    e.g. SELECT a AS b, b AS c ORDER BY b
		//    this sorts on the first render.
		//
		// 2) column ordinals. If a simple integer literal is used,
		//    optionally enclosed within parentheses but *not subject to
		//    any arithmetic*, then this refers to one of the columns of
		//    the data source. Then use the render expression at that
		//    ordinal position as sort key.
		//
		// SQL99 rules:
		//
		// 3) otherwise, if the expression is already in the render list,
		//    then use that render as sort key.
		//    e.g. SELECT b AS c ORDER BY b
		//    this sorts on the first render.
		//    (this is an optimization)
		//
		// 4) if the sort key is not dependent on the data source (no
		//    IndexedVar) then simply do not sort. (this is an optimization)
		//
		// 5) otherwise, add a new render with the ORDER BY expression
		//    and use that as sort key.
		//    e.g. SELECT a FROM t ORDER by b
		//    e.g. SELECT a, b FROM t ORDER by a+b

		// First, deal with render aliases.
		if vBase, ok := expr.(parser.VarName); index == -1 && ok {
			v, err := vBase.NormalizeVarName()
			if err != nil {
				return nil, err
			}

			if c, ok := v.(*parser.ColumnItem); ok && c.TableName.Table() == "" {
				// Look for an output column that matches the name. This
				// handles cases like:
				//
				//   SELECT a AS b FROM t ORDER BY b
				target := c.ColumnName.Normalize()
				for j, col := range columns {
					if parser.ReNormalizeName(col.Name) == target {
						if index != -1 {
							// There is more than one render alias that matches the ORDER BY
							// clause. Here, SQL92 is specific as to what should be done:
							// if the underlying expression is known (we're on a selectNode)
							// and it is equivalent, then just accept that and ignore the ambiguity.
							// This plays nice with `SELECT b, * FROM t ORDER BY b`. Otherwise,
							// reject with an ambituity error.
							if s == nil || !s.equivalentRenders(j, index) {
								return nil, errors.Errorf("ORDER BY \"%s\" is ambiguous", target)
							}
						}
						index = j
					}
				}
			}
		}

		// So Then, deal with column ordinals.
		if index == -1 {
			col, err := p.colIndex(numOriginalCols, expr, "ORDER BY")
			if err != nil {
				return nil, err
			}
			if col != -1 {
				index = col
			}
		}

		if s != nil {
			// Try to optimize constant sorts. For this we need to resolve
			// names to IndexedVars then see if there were any row-dependent
			// expressions found in the sort expression.
			sortExpr := expr
			if index != -1 {
				// We found a render above, so fetch it. This is needed
				// because if the sort expression is a reference to a render
				// alias, resolveNames below would be incorrect.
				sortExpr = s.render[index]
			}
			_, hasRowDependentValues, err := p.resolveNames(sortExpr, s.sourceInfo, s.ivarHelper)
			if err != nil {
				return nil, err
			}
			if !hasRowDependentValues {
				// No sorting needed!
				continue
			}
		}

		// Finally, if we haven't found anything so far, we really
		// need a new render.
		// TODO(knz/dan) currently this is only possible for selectNode.
		// If we are dealing with a UNION or something else we would need
		// to fabricate an intermediate selectNode to add the new render.
		if index == -1 && s != nil {
			cols, exprs, hasStar, err := p.computeRender(parser.SelectExpr{Expr: expr}, parser.TypeAny,
				s.source.info, s.ivarHelper, true)
			if err != nil {
				return nil, err
			}
			s.isStar = s.isStar || hasStar

			if len(cols) == 0 {
				// Nothing was expanded! No order here.
				continue
			}

			colIdxs := s.addOrMergeRenders(cols, exprs, true)
			for i := 0; i < len(colIdxs)-1; i++ {
				// If more than 1 column were expanded, turn them into sort columns too.
				// Except the last one, which will be added below.
				ordering = append(ordering,
					sqlbase.ColumnOrderInfo{ColIdx: colIdxs[i], Direction: direction})
			}
			index = colIdxs[len(colIdxs)-1]
		}

		if index == -1 {
			return nil, errors.Errorf("column %s does not exist", expr)
		}
		ordering = append(ordering,
			sqlbase.ColumnOrderInfo{ColIdx: index, Direction: direction})
	}

	if ordering == nil {
		// All the sort expressions are constant. Simply drop the sort node.
		return nil, nil
	}
	return &sortNode{ctx: p.ctx(), p: p, columns: columns, ordering: ordering}, nil
}
Ejemplo n.º 10
0
func (p *planner) makeUpsertHelper(
	tn *parser.TableName,
	tableDesc *sqlbase.TableDescriptor,
	insertCols []sqlbase.ColumnDescriptor,
	updateCols []sqlbase.ColumnDescriptor,
	updateExprs parser.UpdateExprs,
	upsertConflictIndex *sqlbase.IndexDescriptor,
) (*upsertHelper, error) {
	defaultExprs, err := makeDefaultExprs(updateCols, &p.parser, &p.evalCtx)
	if err != nil {
		return nil, err
	}

	untupledExprs := make(parser.Exprs, 0, len(updateExprs))
	i := 0
	for _, updateExpr := range updateExprs {
		if updateExpr.Tuple {
			if t, ok := updateExpr.Expr.(*parser.Tuple); ok {
				for _, e := range t.Exprs {
					typ := updateCols[i].Type.ToDatumType()
					e := fillDefault(e, typ, i, defaultExprs)
					untupledExprs = append(untupledExprs, e)
					i++
				}
			}
		} else {
			typ := updateCols[i].Type.ToDatumType()
			e := fillDefault(updateExpr.Expr, typ, i, defaultExprs)
			untupledExprs = append(untupledExprs, e)
			i++
		}
	}

	sourceInfo := newSourceInfoForSingleTable(*tn, makeResultColumns(tableDesc.Columns))
	excludedSourceInfo := newSourceInfoForSingleTable(upsertExcludedTable, makeResultColumns(insertCols))

	helper := &upsertHelper{
		p:                  p,
		sourceInfo:         sourceInfo,
		excludedSourceInfo: excludedSourceInfo,
	}

	var evalExprs []parser.TypedExpr
	ivarHelper := parser.MakeIndexedVarHelper(helper, len(sourceInfo.sourceColumns)+len(excludedSourceInfo.sourceColumns))
	sources := multiSourceInfo{sourceInfo, excludedSourceInfo}
	for _, expr := range untupledExprs {
		normExpr, err := p.analyzeExpr(expr, sources, ivarHelper, parser.NoTypePreference, false, "")
		if err != nil {
			return nil, err
		}
		evalExprs = append(evalExprs, normExpr)
	}
	helper.evalExprs = evalExprs

	helper.allExprsIdentity = true
	for i, expr := range evalExprs {
		// analyzeExpr above has normalized all direct column names to ColumnItems.
		c, ok := expr.(*parser.ColumnItem)
		if !ok {
			helper.allExprsIdentity = false
			break
		}
		if len(c.Selector) > 0 ||
			!c.TableName.TableName.Equal(upsertExcludedTable.TableName) ||
			c.ColumnName.Normalize() != parser.ReNormalizeName(updateCols[i].Name) {
			helper.allExprsIdentity = false
			break
		}
	}

	return helper, nil
}
Ejemplo n.º 11
0
// Set sets session variables.
// Privileges: None.
//   Notes: postgres/mysql do not require privileges for session variables (some exceptions).
func (p *planner) Set(n *parser.Set) (planNode, error) {
	if n.Name == nil {
		// A client has sent the reserved internal syntax SET ROW ...
		// Reject it.
		return nil, errors.New("invalid statement: SET ROW")
	}

	// By using VarName.String() here any variables that are keywords will
	// be double quoted.
	name := strings.ToUpper(n.Name.String())
	typedValues := make([]parser.TypedExpr, len(n.Values))
	for i, expr := range n.Values {
		typedValue, err := parser.TypeCheck(expr, nil, parser.TypeString)
		if err != nil {
			return nil, err
		}
		typedValues[i] = typedValue
	}
	switch name {
	case `DATABASE`:
		dbName, err := p.getStringVal(name, typedValues)
		if err != nil {
			return nil, err
		}
		if len(dbName) != 0 {
			// Verify database descriptor exists.
			if _, err := p.mustGetDatabaseDesc(dbName); err != nil {
				return nil, err
			}
		}
		p.session.Database = dbName
		p.evalCtx.Database = dbName

	case `SYNTAX`:
		s, err := p.getStringVal(name, typedValues)
		if err != nil {
			return nil, err
		}
		switch parser.Name(s).Normalize() {
		case parser.ReNormalizeName(parser.Modern.String()):
			p.session.Syntax = int32(parser.Modern)
		case parser.ReNormalizeName(parser.Traditional.String()):
			p.session.Syntax = int32(parser.Traditional)
		default:
			return nil, fmt.Errorf("%s: \"%s\" is not in (%q, %q)", name, s, parser.Modern, parser.Traditional)
		}

	case `EXTRA_FLOAT_DIGITS`:
		// These settings are sent by the JDBC driver but we silently ignore them.

	case `APPLICATION_NAME`:
		// These settings are sent by the clients to improve query logging on the server,
		// but we silently ignore them.

	case `DEFAULT_TRANSACTION_ISOLATION`:
		// It's unfortunate that clients want us to support both SET
		// SESSION CHARACTERISTICS AS TRANSACTION ..., which takes the
		// isolation level as keywords/identifiers (e.g. JDBC), and SET
		// DEFAULT_TRANSACTION_ISOLATION TO '...', which takes an
		// expression (e.g. psycopg2). But that's how it is.  Just ensure
		// this code keeps in sync with SetDefaultIsolation() below.
		s, err := p.getStringVal(name, typedValues)
		if err != nil {
			return nil, err
		}
		switch strings.ToUpper(s) {
		case `READ UNCOMMITTED`, `READ COMMITTED`, `SNAPSHOT`:
			p.session.DefaultIsolationLevel = enginepb.SNAPSHOT
		case `REPEATABLE READ`, `SERIALIZABLE`:
			p.session.DefaultIsolationLevel = enginepb.SERIALIZABLE
		default:
			return nil, fmt.Errorf("%s: unknown isolation level: %q", name, s)
		}

	case `DIST_SQL`:
		s, err := p.getStringVal(name, typedValues)
		if err != nil {
			return nil, err
		}
		switch parser.Name(s).Normalize() {
		case parser.ReNormalizeName("sync"):
			p.session.DistSQLMode = distSQLSync
		case parser.ReNormalizeName("async"):
			p.session.DistSQLMode = distSQLAsync
		default:
			return nil, fmt.Errorf("%s: \"%s\" not supported", name, s)
		}

	default:
		return nil, fmt.Errorf("unknown variable: %q", name)
	}
	return &emptyNode{}, nil
}
Ejemplo n.º 12
0
// findColumn looks up the column specified by a ColumnItem. The
// function returns the index of the source in the multiSourceInfo
// array and the column index for the column array of that
// source. Returns invalid indices and an error if the source is not
// found or the name is ambiguous.
func (sources multiSourceInfo) findColumn(c *parser.ColumnItem) (srcIdx int, colIdx int, err error) {
	if len(c.Selector) > 0 {
		return invalidSrcIdx, invalidColIdx, util.UnimplementedWithIssueErrorf(8318, "compound types not supported yet: %q", c)
	}

	colName := c.ColumnName.Normalize()
	var tableName parser.TableName
	if c.TableName.Table() != "" {
		tableName = c.TableName.NormalizedTableName()

		tn, err := sources.checkDatabaseName(tableName)
		if err != nil {
			return invalidSrcIdx, invalidColIdx, err
		}
		tableName = tn

		// Propagate the discovered database name back to the original VarName.
		// (to clarify the output of e.g. EXPLAIN)
		c.TableName.DatabaseName = tableName.DatabaseName
	}

	colIdx = invalidColIdx
	for iSrc, src := range sources {
		findColHelper := func(src *dataSourceInfo, iSrc, srcIdx, colIdx int, idx int) (int, int, error) {
			col := src.sourceColumns[idx]
			if parser.ReNormalizeName(col.Name) == colName {
				if colIdx != invalidColIdx {
					return invalidSrcIdx, invalidColIdx, fmt.Errorf("column reference %q is ambiguous", c)
				}
				srcIdx = iSrc
				colIdx = idx
			}
			return srcIdx, colIdx, nil
		}

		if tableName.Table() == "" {
			for idx := 0; idx < len(src.sourceColumns); idx++ {
				srcIdx, colIdx, err = findColHelper(src, iSrc, srcIdx, colIdx, idx)
				if err != nil {
					return srcIdx, colIdx, err
				}
			}
		} else {
			colRange, ok := src.sourceAliases[tableName]
			if !ok {
				// The data source "src" has no column for table tableName.
				// Try again with the net one.
				continue
			}
			for _, idx := range colRange {
				srcIdx, colIdx, err = findColHelper(src, iSrc, srcIdx, colIdx, idx)
				if err != nil {
					return srcIdx, colIdx, err
				}
			}
		}
	}

	if colIdx == invalidColIdx {
		return invalidSrcIdx, invalidColIdx, fmt.Errorf("column name %q not found", c)
	}

	return srcIdx, colIdx, nil
}
Ejemplo n.º 13
0
func nameMatchesLease(lease *LeaseState, dbID sqlbase.ID, tableName string) bool {
	return lease.ParentID == dbID &&
		parser.ReNormalizeName(lease.Name) == parser.ReNormalizeName(tableName)
}
Ejemplo n.º 14
0
func makeTableNameCacheKey(dbID sqlbase.ID, tableName string) tableNameCacheKey {
	return tableNameCacheKey{dbID, parser.ReNormalizeName(tableName)}
}
Ejemplo n.º 15
0
// makeEqualityPredicate constructs a joinPredicate object for joins. The join
// condition includes equality between numMergedEqualityColumns columns,
// specified by leftColNames and rightColNames.
func makeEqualityPredicate(
	left, right *dataSourceInfo,
	leftColNames, rightColNames parser.NameList,
	numMergedEqualityColumns int,
	concatInfos *dataSourceInfo,
) (resPred *joinPredicate, info *dataSourceInfo, err error) {
	if len(leftColNames) != len(rightColNames) {
		panic(fmt.Errorf("left columns' length %q doesn't match right columns' length %q in EqualityPredicate",
			len(leftColNames), len(rightColNames)))
	}
	if len(leftColNames) < numMergedEqualityColumns {
		panic(fmt.Errorf("cannot merge %d columns, only %d columns to compare", numMergedEqualityColumns, len(leftColNames)))
	}

	// Prepare the arrays populated below.
	cmpOps := make([]func(*parser.EvalContext, parser.Datum, parser.Datum) (parser.DBool, error), len(leftColNames))
	leftEqualityIndices := make([]int, len(leftColNames))
	rightEqualityIndices := make([]int, len(rightColNames))

	// usedLeft represents the list of indices that participate in the
	// equality predicate. They are collected in order to determine
	// below which columns remain after the equality; this is used
	// only when merging result columns.
	var usedLeft, usedRight []int
	var columns ResultColumns
	if numMergedEqualityColumns > 0 {
		usedLeft = make([]int, len(left.sourceColumns))
		for i := range usedLeft {
			usedLeft[i] = invalidColIdx
		}
		usedRight = make([]int, len(right.sourceColumns))
		for i := range usedRight {
			usedRight[i] = invalidColIdx
		}
		nResultColumns := len(left.sourceColumns) + len(right.sourceColumns) - numMergedEqualityColumns
		columns = make(ResultColumns, 0, nResultColumns)
	}

	// Find out which columns are involved in EqualityPredicate.
	for i := range leftColNames {
		leftColName := leftColNames[i].Normalize()
		rightColName := rightColNames[i].Normalize()

		// Find the column name on the left.
		leftIdx, leftType, err := pickUsingColumn(left.sourceColumns, leftColName, "left")
		if err != nil {
			return nil, nil, err
		}

		// Find the column name on the right.
		rightIdx, rightType, err := pickUsingColumn(right.sourceColumns, rightColName, "right")
		if err != nil {
			return nil, nil, err
		}

		// Remember the indices.
		leftEqualityIndices[i] = leftIdx
		rightEqualityIndices[i] = rightIdx

		// Memoize the comparison function.
		fn, found := parser.FindEqualComparisonFunction(leftType, rightType)
		if !found {
			return nil, nil, fmt.Errorf("JOIN/USING types %s for left column %s and %s for right column %s cannot be matched",
				leftType, leftColName, rightType, rightColName)
		}
		cmpOps[i] = fn

		if i < numMergedEqualityColumns {
			usedLeft[leftIdx] = i
			usedRight[rightIdx] = i

			// Merged columns come first in the results.
			columns = append(columns, left.sourceColumns[leftIdx])
		}
	}

	// Now, prepare/complete the metadata for the result columns.
	// The structure of the join data source results is like this:
	// - first, all the equality/USING columns;
	// - then all the left columns,
	// - then all the right columns,
	// The duplicate columns appended after the equality/USING columns
	// are hidden so that they are invisible to star expansion, but
	// not omitted so that they can still be selected separately.

	// Finish collecting the column definitions from the left and
	// right data sources.
	for i, c := range left.sourceColumns {
		if usedLeft != nil && usedLeft[i] != invalidColIdx {
			c.hidden = true
		}
		columns = append(columns, c)
	}
	for i, c := range right.sourceColumns {
		if usedRight != nil && usedRight[i] != invalidColIdx {
			c.hidden = true
		}
		columns = append(columns, c)
	}

	// Compute the mappings from table aliases to column sets from
	// both sides into a new alias-columnset mapping for the result
	// rows. We need to be extra careful about the aliases
	// for the anonymous table, which needs to be merged.
	aliases := make(sourceAliases, 0, len(left.sourceAliases)+len(right.sourceAliases))

	collectAliases := func(sourceAliases sourceAliases, offset int) {
		for _, alias := range sourceAliases {
			if alias.name == anonymousTable {
				continue
			}
			newRange := make([]int, len(alias.columnRange))
			for i, colIdx := range alias.columnRange {
				newRange[i] = colIdx + offset
			}
			aliases = append(aliases, sourceAlias{name: alias.name, columnRange: newRange})
		}
	}
	collectAliases(left.sourceAliases, numMergedEqualityColumns)
	collectAliases(right.sourceAliases, numMergedEqualityColumns+len(left.sourceColumns))

	anonymousAlias := sourceAlias{name: anonymousTable, columnRange: nil}
	var hiddenLeftNames, hiddenRightNames []string

	// All the merged columns at the beginning belong to the
	// anonymous data source.
	for i := 0; i < numMergedEqualityColumns; i++ {
		anonymousAlias.columnRange = append(anonymousAlias.columnRange, i)
		hiddenLeftNames = append(hiddenLeftNames, parser.ReNormalizeName(left.sourceColumns[i].Name))
		hiddenRightNames = append(hiddenRightNames, parser.ReNormalizeName(right.sourceColumns[i].Name))
	}

	// Now collect the other table-less columns into the anonymous data
	// source, but hide (skip) those that are already merged.
	collectAnonymousAliases := func(
		sourceAliases sourceAliases, hiddenNames []string, cols ResultColumns, offset int,
	) {
		for _, alias := range sourceAliases {
			if alias.name != anonymousTable {
				continue
			}
			for _, colIdx := range alias.columnRange {
				isHidden := false
				for _, hiddenName := range hiddenNames {
					if parser.ReNormalizeName(cols[colIdx].Name) == hiddenName {
						isHidden = true
						break
					}
				}
				if !isHidden {
					anonymousAlias.columnRange = append(anonymousAlias.columnRange, colIdx+offset)
				}
			}
		}
	}
	collectAnonymousAliases(left.sourceAliases, hiddenLeftNames, left.sourceColumns,
		numMergedEqualityColumns)
	collectAnonymousAliases(right.sourceAliases, hiddenRightNames, right.sourceColumns,
		numMergedEqualityColumns+len(left.sourceColumns))

	if anonymousAlias.columnRange != nil {
		aliases = append(aliases, anonymousAlias)
	}

	info = &dataSourceInfo{
		sourceColumns: columns,
		sourceAliases: aliases,
	}

	pred := &joinPredicate{
		numLeftCols:              len(left.sourceColumns),
		numRightCols:             len(right.sourceColumns),
		leftColNames:             leftColNames,
		rightColNames:            rightColNames,
		numMergedEqualityColumns: numMergedEqualityColumns,
		cmpFunctions:             cmpOps,
		leftEqualityIndices:      leftEqualityIndices,
		rightEqualityIndices:     rightEqualityIndices,
		info:                     info,
	}
	// We must initialize the indexed var helper in all cases, even when
	// there is no on condition, so that getNeededColumns() does not get
	// confused.
	pred.iVarHelper = parser.MakeIndexedVarHelper(pred, len(columns))
	return pred, info, nil
}
Ejemplo n.º 16
0
// orderBy constructs a sortNode based on the ORDER BY clause.
//
// In the general case (SELECT/UNION/VALUES), we can sort by a column index or a
// column name.
//
// However, for a SELECT, we can also sort by the pre-alias column name (SELECT
// a AS b ORDER BY b) as well as expressions (SELECT a, b, ORDER BY a+b). In
// this case, construction of the sortNode might adjust the number of render
// targets in the selectNode if any ordering expressions are specified.
//
// TODO(dan): SQL also allows sorting a VALUES or UNION by an expression.
// Support this. It will reduce some of the special casing below, but requires a
// generalization of how to add derived columns to a SelectStatement.
func (p *planner) orderBy(orderBy parser.OrderBy, n planNode) (*sortNode, error) {
	if orderBy == nil {
		return nil, nil
	}

	// Multiple tests below use selectNode as a special case.
	// So factor the cast.
	s, _ := n.(*selectNode)

	// We grab a copy of columns here because we might add new render targets
	// below. This is the set of columns requested by the query.
	columns := n.Columns()
	numOriginalCols := len(columns)
	if s != nil {
		numOriginalCols = s.numOriginalCols
	}
	var ordering sqlbase.ColumnOrdering

	for _, o := range orderBy {
		direction := encoding.Ascending
		if o.Direction == parser.Descending {
			direction = encoding.Descending
		}

		index := -1

		// Unwrap parenthesized expressions like "((a))" to "a".
		expr := parser.StripParens(o.Expr)

		// The logical data source for ORDER BY is the list of render
		// expressions for a SELECT, as specified in the input SQL text
		// (or an entire UNION or VALUES clause).  Alas, SQL has some
		// historical baggage and there are some special cases:
		//
		// 1) column ordinals. If a simple integer literal is used,
		//    optionally enclosed within parentheses but *not subject to
		//    any arithmetic*, then this refers to one of the columns of
		//    the data source. Then use the render expression at that
		//    ordinal position as sort key.
		//
		// 2) if the expression is the aliased (AS) name of a render
		//    expression in a SELECT clause, then use that
		//    render as sort key.
		//    e.g. SELECT a AS b, b AS c ORDER BY b
		//    this sorts on the first render.
		//
		// 3) otherwise, if the expression is already in the render list,
		//    then use that render as sort key.
		//    e.g. SELECT b AS c ORDER BY b
		//    this sorts on the first render.
		//    (this is an optimization)
		//
		// 4) if the sort key is not dependent on the data source (no
		//    IndexedVar) then simply do not sort. (this is an optimization)
		//
		// 5) otherwise, add a new render with the ORDER BY expression
		//    and use that as sort key.
		//    e.g. SELECT a FROM t ORDER by b
		//    e.g. SELECT a, b FROM t ORDER by a+b
		//
		// So first, deal with column ordinals.
		col, err := p.colIndex(numOriginalCols, expr, "ORDER BY")
		if err != nil {
			return nil, err
		}
		if col != -1 {
			index = col
		}

		// Now, deal with render aliases.
		if vBase, ok := expr.(parser.VarName); index == -1 && ok {
			v, err := vBase.NormalizeVarName()
			if err != nil {
				return nil, err
			}

			if c, ok := v.(*parser.ColumnItem); ok && c.TableName.Table() == "" {
				// Look for an output column that matches the name. This
				// handles cases like:
				//
				//   SELECT a AS b FROM t ORDER BY b
				target := c.ColumnName.Normalize()
				for j, col := range columns {
					if parser.ReNormalizeName(col.Name) == target {
						index = j
						break
					}
				}
			}
		}

		if s != nil {
			// Try to optimize constant sorts. For this we need to resolve
			// names to IndexedVars then see if there were any vars
			// substituted.
			sortExpr := expr
			if index != -1 {
				// We found a render above, so fetch it. This is needed
				// because if the sort expression is a reference to a render
				// alias, resolveNames below would be incorrect.
				sortExpr = s.render[index]
			}
			resolved, hasVars, err := p.resolveNames(sortExpr, s.sourceInfo, s.ivarHelper)
			if err != nil {
				return nil, err
			}
			if !hasVars {
				// No sorting needed!
				continue
			}

			// Now, try to find an equivalent render. We use the syntax
			// representation as approximation of equivalence.
			// We also use the expression after name resolution so
			// that comparison occurs after replacing ordinal references
			// to IndexedVars.
			if index == -1 {
				exprStr := parser.AsStringWithFlags(resolved, parser.FmtSymbolicVars)
				for j, render := range s.render {
					if parser.AsStringWithFlags(render, parser.FmtSymbolicVars) == exprStr {
						index = j
						break
					}
				}
			}
		}

		// Finally, if we haven't found anything so far, we really
		// need a new render.
		// TODO(knz/dan) currently this is only possible for selectNode.
		// If we are dealing with a UNION or something else we would need
		// to fabricate an intermediate selectNode to add the new render.
		if index == -1 && s != nil {
			// We need to add a new render, but let's be careful: if there's
			// a star in there, we're really adding multiple columns. These
			// all become sort columns! And if no columns are expanded, then
			// no columns become sort keys.
			nextRenderIdx := len(s.render)

			// TODO(knz) the optimizations above which reuse existing
			// renders and skip ordering for constant expressions are not
			// applied by addRender(). In other words, "ORDER BY foo.*" will
			// not be optimized as well as "ORDER BY foo.x, foo.y".  We
			// could do this either here or as a separate later
			// optimization.
			if err := s.addRender(parser.SelectExpr{Expr: expr}, nil); err != nil {
				return nil, err
			}

			for extraIdx := nextRenderIdx; extraIdx < len(s.render)-1; extraIdx++ {
				// If more than 1 column were expanded, turn them into sort columns too.
				// Except the last one, which will be added below.
				ordering = append(ordering,
					sqlbase.ColumnOrderInfo{ColIdx: extraIdx, Direction: direction})
			}
			if len(s.render) == nextRenderIdx {
				// Nothing was expanded! So there is no order here.
				continue
			}
			index = len(s.render) - 1
		}

		if index == -1 {
			return nil, errors.Errorf("column %s does not exist", expr)
		}
		ordering = append(ordering,
			sqlbase.ColumnOrderInfo{ColIdx: index, Direction: direction})
	}

	if ordering == nil {
		// All the sort expressions are constant. Simply drop the sort node.
		return nil, nil
	}
	return &sortNode{ctx: p.ctx(), p: p, columns: columns, ordering: ordering}, nil
}
Ejemplo n.º 17
0
// orderBy constructs a sortNode based on the ORDER BY clause.
//
// In the general case (SELECT/UNION/VALUES), we can sort by a column index or a
// column name.
//
// However, for a SELECT, we can also sort by the pre-alias column name (SELECT
// a AS b ORDER BY b) as well as expressions (SELECT a, b, ORDER BY a+b). In
// this case, construction of the sortNode might adjust the number of render
// targets in the selectNode if any ordering expressions are specified.
//
// TODO(dan): SQL also allows sorting a VALUES or UNION by an expression.
// Support this. It will reduce some of the special casing below, but requires a
// generalization of how to add derived columns to a SelectStatement.
func (p *planner) orderBy(orderBy parser.OrderBy, n planNode) (*sortNode, error) {
	if orderBy == nil {
		return nil, nil
	}

	// We grab a copy of columns here because we might add new render targets
	// below. This is the set of columns requested by the query.
	columns := n.Columns()
	numOriginalCols := len(columns)
	if s, ok := n.(*selectNode); ok {
		numOriginalCols = s.numOriginalCols
	}
	var ordering sqlbase.ColumnOrdering

	for _, o := range orderBy {
		index := -1

		// Unwrap parenthesized expressions like "((a))" to "a".
		expr := parser.StripParens(o.Expr)

		if vBase, ok := expr.(parser.VarName); ok {
			v, err := vBase.NormalizeVarName()
			if err != nil {
				return nil, err
			}

			var c *parser.ColumnItem
			switch t := v.(type) {
			case *parser.ColumnItem:
				c = t
			default:
				return nil, fmt.Errorf("invalid syntax for ORDER BY: %s", v)
			}

			if c.TableName.Table() == "" {
				// Look for an output column that matches the name. This
				// handles cases like:
				//
				//   SELECT a AS b FROM t ORDER BY b
				target := c.ColumnName.Normalize()
				for j, col := range columns {
					if parser.ReNormalizeName(col.Name) == target {
						index = j
						break
					}
				}
			}

			if s, ok := n.(*selectNode); ok && index == -1 {
				// No output column matched the  name, so look for an existing
				// render target that matches the column name. This handles cases like:
				//
				//   SELECT a AS b FROM t ORDER BY a
				colIdx, err := s.source.findUnaliasedColumn(c)
				if err != nil {
					return nil, err
				}
				if colIdx != invalidColIdx {
					for j, r := range s.render {
						if ivar, ok := r.(*parser.IndexedVar); ok {
							s.ivarHelper.AssertSameContainer(ivar)
							if ivar.Idx == colIdx {
								index = j
								break
							}
						}
					}
				}
			}
		}

		if index == -1 {
			// The order by expression matched neither an output column nor an
			// existing render target.
			if col, err := colIndex(numOriginalCols, expr); err != nil {
				return nil, err
			} else if col >= 0 {
				index = col
			} else if s, ok := n.(*selectNode); ok {
				// TODO(dan): Once we support VALUES (1), (2) ORDER BY 3*4, this type
				// check goes away.

				// Add a new render expression to use for ordering. This
				// handles cases were the expression is either not a name or
				// is a name that is otherwise not referenced by the query:
				//
				//   SELECT a FROM t ORDER by b
				//   SELECT a, b FROM t ORDER by a+b
				if err := s.addRender(parser.SelectExpr{Expr: expr}, nil); err != nil {
					return nil, err
				}
				index = len(s.columns) - 1
			} else {
				return nil, errors.Errorf("column %s does not exist", expr)
			}
		}
		direction := encoding.Ascending
		if o.Direction == parser.Descending {
			direction = encoding.Descending
		}
		ordering = append(ordering, sqlbase.ColumnOrderInfo{ColIdx: index, Direction: direction})
	}

	return &sortNode{ctx: p.ctx(), p: p, columns: columns, ordering: ordering}, nil
}
Ejemplo n.º 18
0
// Set sets session variables.
// Privileges: None.
//   Notes: postgres/mysql do not require privileges for session variables (some exceptions).
func (p *planner) Set(n *parser.Set) (planNode, error) {
	if n.Name == nil {
		// A client has sent the reserved internal syntax SET ROW ...
		// Reject it.
		return nil, errors.New("invalid statement: SET ROW")
	}

	// By using VarName.String() here any variables that are keywords will
	// be double quoted.
	name := strings.ToUpper(n.Name.String())
	typedValues := make([]parser.TypedExpr, len(n.Values))
	for i, expr := range n.Values {
		typedValue, err := parser.TypeCheck(expr, nil, parser.TypeString)
		if err != nil {
			return nil, err
		}
		typedValues[i] = typedValue
	}
	switch name {
	case `DATABASE`:
		dbName, err := p.getStringVal(name, typedValues)
		if err != nil {
			return nil, err
		}
		if len(dbName) != 0 {
			// Verify database descriptor exists.
			if _, err := p.mustGetDatabaseDesc(dbName); err != nil {
				return nil, err
			}
		}
		p.session.Database = dbName
		p.evalCtx.Database = dbName

	case `SYNTAX`:
		s, err := p.getStringVal(name, typedValues)
		if err != nil {
			return nil, err
		}
		switch parser.Name(s).Normalize() {
		case parser.ReNormalizeName(parser.Modern.String()):
			p.session.Syntax = int32(parser.Modern)
		case parser.ReNormalizeName(parser.Traditional.String()):
			p.session.Syntax = int32(parser.Traditional)
		default:
			return nil, fmt.Errorf("%s: \"%s\" is not in (%q, %q)", name, s, parser.Modern, parser.Traditional)
		}

	case `DEFAULT_TRANSACTION_ISOLATION`:
		// It's unfortunate that clients want us to support both SET
		// SESSION CHARACTERISTICS AS TRANSACTION ..., which takes the
		// isolation level as keywords/identifiers (e.g. JDBC), and SET
		// DEFAULT_TRANSACTION_ISOLATION TO '...', which takes an
		// expression (e.g. psycopg2). But that's how it is.  Just ensure
		// this code keeps in sync with SetDefaultIsolation() below.
		s, err := p.getStringVal(name, typedValues)
		if err != nil {
			return nil, err
		}
		switch strings.ToUpper(s) {
		case `READ UNCOMMITTED`, `READ COMMITTED`, `SNAPSHOT`:
			p.session.DefaultIsolationLevel = enginepb.SNAPSHOT
		case `REPEATABLE READ`, `SERIALIZABLE`:
			p.session.DefaultIsolationLevel = enginepb.SERIALIZABLE
		default:
			return nil, fmt.Errorf("%s: unknown isolation level: %q", name, s)
		}

	case `DIST_SQL`:
		s, err := p.getStringVal(name, typedValues)
		if err != nil {
			return nil, err
		}
		switch parser.Name(s).Normalize() {
		case parser.ReNormalizeName("off"):
			p.session.DistSQLMode = distSQLOff
		case parser.ReNormalizeName("on"):
			p.session.DistSQLMode = distSQLOn
		case parser.ReNormalizeName("always"):
			p.session.DistSQLMode = distSQLAlways
		default:
			return nil, fmt.Errorf("%s: \"%s\" not supported", name, s)
		}

	// These settings are sent by various client drivers. We don't support
	// changing them, so we either silently ignore them or throw an error given
	// a setting that we do not respect.
	case `EXTRA_FLOAT_DIGITS`:
	// See https://www.postgresql.org/docs/9.6/static/runtime-config-client.html
	case `APPLICATION_NAME`:
	// Set by clients to improve query logging.
	// See https://www.postgresql.org/docs/9.6/static/runtime-config-logging.html#GUC-APPLICATION-NAME
	case `CLIENT_ENCODING`:
		// See https://www.postgresql.org/docs/9.6/static/multibyte.html
		s, err := p.getStringVal(name, typedValues)
		if err != nil {
			return nil, err
		}
		if strings.ToUpper(s) != "UTF8" {
			return nil, fmt.Errorf("non-UTF8 encoding %s not supported", s)
		}
	case `SEARCH_PATH`:
	// Controls the schema search order. We don't really support this as we
	// don't have first-class support for schemas.
	// TODO(jordan) can we hook this up to EvalContext.SearchPath without
	// breaking things?
	// See https://www.postgresql.org/docs/9.6/static/runtime-config-client.html
	case `STANDARD_CONFORMING_STRINGS`:
		// If true, escape backslash literals in strings. We do this by default,
		// and we do not support the opposite behavior.
		// See https://www.postgresql.org/docs/9.1/static/runtime-config-compatible.html#GUC-STANDARD-CONFORMING-STRINGS
		s, err := p.getStringVal(name, typedValues)
		if err != nil {
			return nil, err
		}
		if parser.Name(s).Normalize() != parser.ReNormalizeName("on") {
			return nil, fmt.Errorf("%s: \"%s\" not supported", name, s)
		}
	case `CLIENT_MIN_MESSAGES`:
	// Controls returned message verbosity. We don't support this.
	// See https://www.postgresql.org/docs/9.6/static/runtime-config-compatible.html

	default:
		return nil, fmt.Errorf("unknown variable: %q", name)
	}
	return &emptyNode{}, nil
}