func runLsZones(cmd *cobra.Command, args []string) error { if len(args) > 0 { mustUsage(cmd) return nil } conn := makeSQLClient() defer conn.Close() zones, err := queryZones(conn) if err != nil { return err } if len(zones) == 0 { fmt.Printf("No zones found\n") return nil } // TODO(pmattis): This is inefficient. Instead of querying for all of the // descriptors in the system, we could query for only those identified by // zones. We'd also need to do a second query to retrieve all of the database // descriptors referred to by table descriptors. descs, err := queryDescriptors(conn) if err != nil { return err } // Loop over the zones and determine the name for each based on the name of // the corresponding descriptor. var output []string for id := range zones { if id == 0 { // We handle the default zone below. continue } desc, ok := descs[id] if !ok { continue } var name string if tableDesc := desc.GetTable(); tableDesc != nil { dbDesc, ok := descs[tableDesc.ParentID] if !ok { continue } name = parser.Name(dbDesc.GetName()).String() + "." } name += parser.Name(desc.GetName()).String() output = append(output, name) } sort.Strings(output) // Ensure the default zone is always printed first. if _, ok := zones[0]; ok { fmt.Println(".default") } for _, o := range output { fmt.Println(o) } return nil }
// getTableNames implements the SchemaAccessor interface. func (p *planner) getTableNames(dbDesc *sqlbase.DatabaseDescriptor) (parser.TableNames, error) { if e, ok := getVirtualSchemaEntry(dbDesc.Name); ok { return e.tableNames(), nil } prefix := sqlbase.MakeNameMetadataKey(dbDesc.ID, "") sr, err := p.txn.Scan(prefix, prefix.PrefixEnd(), 0) if err != nil { return nil, err } var tableNames parser.TableNames for _, row := range sr { _, tableName, err := encoding.DecodeUnsafeStringAscending( bytes.TrimPrefix(row.Key, prefix), nil) if err != nil { return nil, err } tn := parser.TableName{ DatabaseName: parser.Name(dbDesc.Name), TableName: parser.Name(tableName), } tableNames = append(tableNames, tn) } return tableNames, nil }
// upsertExprsAndIndex returns the upsert conflict index and the (possibly // synthetic) SET expressions used when a row conflicts. func upsertExprsAndIndex( tableDesc *sqlbase.TableDescriptor, onConflict parser.OnConflict, insertCols []sqlbase.ColumnDescriptor, ) (parser.UpdateExprs, *sqlbase.IndexDescriptor, error) { if onConflict.IsUpsertAlias() { // The UPSERT syntactic sugar is the same as the longhand specifying the // primary index as the conflict index and SET expressions for the columns // in insertCols minus any columns in the conflict index. Example: // `UPSERT INTO abc VALUES (1, 2, 3)` is syntactic sugar for // `INSERT INTO abc VALUES (1, 2, 3) ON CONFLICT a DO UPDATE SET b = 2, c = 3`. conflictIndex := &tableDesc.PrimaryIndex indexColSet := make(map[sqlbase.ColumnID]struct{}, len(conflictIndex.ColumnIDs)) for _, colID := range conflictIndex.ColumnIDs { indexColSet[colID] = struct{}{} } updateExprs := make(parser.UpdateExprs, 0, len(insertCols)) for _, c := range insertCols { if _, ok := indexColSet[c.ID]; !ok { names := parser.UnresolvedNames{ parser.UnresolvedName{parser.Name(c.Name)}, } expr := &parser.ColumnItem{ TableName: upsertExcludedTable, ColumnName: parser.Name(c.Name), } updateExprs = append(updateExprs, &parser.UpdateExpr{Names: names, Expr: expr}) } } return updateExprs, conflictIndex, nil } indexMatch := func(index sqlbase.IndexDescriptor) bool { if !index.Unique { return false } if len(index.ColumnNames) != len(onConflict.Columns) { return false } for i, colName := range index.ColumnNames { if sqlbase.ReNormalizeName(colName) != sqlbase.NormalizeName(onConflict.Columns[i]) { return false } } return true } if indexMatch(tableDesc.PrimaryIndex) { return onConflict.Exprs, &tableDesc.PrimaryIndex, nil } for _, index := range tableDesc.Indexes { if indexMatch(index) { return onConflict.Exprs, &index, nil } } return nil, nil, fmt.Errorf("there is no unique or exclusion constraint matching the ON CONFLICT specification") }
func (e virtualSchemaEntry) tableNames() parser.TableNames { var res parser.TableNames for _, tableName := range e.orderedTableNames { tn := parser.TableName{ DatabaseName: parser.Name(e.desc.Name), TableName: parser.Name(tableName), } res = append(res, tn) } return res }
func testInitDummySelectNode(desc *sqlbase.TableDescriptor) *selectNode { scan := &scanNode{} scan.desc = *desc scan.initDescDefaults(publicColumns) sel := &selectNode{} sel.qvals = make(qvalMap) sel.source.plan = scan testName := parser.TableName{TableName: parser.Name(desc.Name), DatabaseName: parser.Name("test")} sel.source.info = newSourceInfoForSingleTable(testName, scan.Columns()) sel.sourceInfo = multiSourceInfo{sel.source.info} return sel }
func (p *planner) getTableNames(dbDesc *DatabaseDescriptor) (parser.QualifiedNames, *roachpb.Error) { prefix := MakeNameMetadataKey(dbDesc.ID, "") sr, pErr := p.txn.Scan(prefix, prefix.PrefixEnd(), 0) if pErr != nil { return nil, pErr } var qualifiedNames parser.QualifiedNames for _, row := range sr { _, tableName, err := encoding.DecodeStringAscending( bytes.TrimPrefix(row.Key, prefix), nil) if err != nil { return nil, roachpb.NewError(err) } qname := &parser.QualifiedName{ Base: parser.Name(dbDesc.Name), Indirect: parser.Indirection{parser.NameIndirection(tableName)}, } if err := qname.NormalizeTableName(""); err != nil { return nil, roachpb.NewError(err) } qualifiedNames = append(qualifiedNames, qname) } return qualifiedNames, nil }
// findColumn looks up the column described by a QualifiedName. The qname will be normalized. func (qt qvalResolver) findColumn(qname *parser.QualifiedName) (columnRef, error) { ref := columnRef{colIdx: invalidColIdx} if err := qname.NormalizeColumnName(); err != nil { return ref, err } // We can't resolve stars to a single column. if qname.IsStar() { err := fmt.Errorf("qualified name \"%s\" not found", qname) return ref, err } // TODO(radu): when we support multiple FROMs, we will find the node with the correct alias; if // no alias is given, we will search for the column in all FROMs and make sure there is only // one. For now we just check that the name matches (if given). if qname.Base == "" { qname.Base = parser.Name(qt.table.alias) } if equalName(qt.table.alias, string(qname.Base)) { colName := qname.Column() for idx, col := range qt.table.columns { if equalName(col.Name, colName) { ref.table = qt.table ref.colIdx = idx return ref, nil } } } err := fmt.Errorf("qualified name \"%s\" not found", qname) return ref, err }
// DatabaseDetails is an endpoint that returns grants and a list of table names // for the specified database. func (s *adminServer) DatabaseDetails(_ context.Context, req *DatabaseDetailsRequest) (*DatabaseDetailsResponse, error) { var session sql.Session user := s.getUser(req) // Placeholders don't work with SHOW statements, so we need to manually // escape the database name. // // TODO(cdo): Use placeholders when they're supported by SHOW. escDBName := parser.Name(req.Database).String() query := fmt.Sprintf("SHOW GRANTS ON DATABASE %s; SHOW TABLES FROM %s;", escDBName, escDBName) r := s.sqlExecutor.ExecuteStatements(user, &session, query, nil) if pErr := s.firstNotFoundError(r.ResultList); pErr != nil { return nil, grpc.Errorf(codes.NotFound, "%s", pErr) } if err := s.checkQueryResults(r.ResultList, 2); err != nil { return nil, s.serverError(err) } // Marshal grants. var resp DatabaseDetailsResponse { const ( userCol = "User" privilegesCol = "Privileges" ) scanner := newResultScanner(r.ResultList[0].Columns) for _, row := range r.ResultList[0].Rows { // Marshal grant, splitting comma-separated privileges into a proper slice. var grant DatabaseDetailsResponse_Grant var privileges string if err := scanner.Scan(row, userCol, &grant.User); err != nil { return nil, err } if err := scanner.Scan(row, privilegesCol, &privileges); err != nil { return nil, err } grant.Privileges = strings.Split(privileges, ",") resp.Grants = append(resp.Grants, &grant) } } // Marshal table names. { const tableCol = "Table" scanner := newResultScanner(r.ResultList[1].Columns) if a, e := len(r.ResultList[1].Columns), 1; a != e { return nil, s.serverErrorf("show tables columns mismatch: %d != expected %d", a, e) } for _, row := range r.ResultList[1].Rows { var tableName string if err := scanner.Scan(row, tableCol, &tableName); err != nil { return nil, err } resp.TableNames = append(resp.TableNames, tableName) } } return &resp, nil }
// ShowTables returns all the tables. // Privileges: None. // Notes: postgres does not have a SHOW TABLES statement. // mysql only returns tables you have privileges on. func (p *planner) ShowTables(n *parser.ShowTables) (planNode, error) { // TODO(pmattis): This could be implemented as: // // SELECT name FROM system.namespace // WHERE parentID = (SELECT id FROM system.namespace // WHERE parentID = 0 AND name = <database>) if n.Name == nil { if p.session.Database == "" { return nil, errNoDatabase } n.Name = &parser.QualifiedName{Base: parser.Name(p.session.Database)} } dbDesc, err := p.getDatabaseDesc(string(n.Name.Base)) if err != nil { return nil, err } tableNames, err := p.getTableNames(dbDesc) if err != nil { return nil, err } v := &valuesNode{columns: []string{"Table"}} for _, name := range tableNames { v.rows = append(v.rows, []parser.Datum{parser.DString(name.Table())}) } return v, nil }
// quoteName quotes based on Traditional syntax and adds commas between names. func quoteNames(names ...string) string { nameList := make(parser.NameList, len(names)) for i, n := range names { nameList[i] = parser.Name(n) } return parser.AsString(nameList) }
func queryNamespace(conn *sqlConn, parentID sqlbase.ID, name string) (sqlbase.ID, error) { rows, err := makeQuery( `SELECT id FROM system.namespace WHERE parentID = $1 AND name = $2`, parentID, sqlbase.NormalizeName(parser.Name(name)))(conn) if err != nil { return 0, err } defer func() { _ = rows.Close() }() if err != nil { return 0, fmt.Errorf("%s not found: %v", name, err) } if len(rows.Columns()) != 1 { return 0, fmt.Errorf("unexpected result columns: %d", len(rows.Columns())) } vals := make([]driver.Value, 1) if err := rows.Next(vals); err != nil { return 0, err } switch t := vals[0].(type) { case int64: return sqlbase.ID(t), nil default: return 0, fmt.Errorf("unexpected result type: %T", vals[0]) } }
// GetTableSpan gets the key span for a SQL table, including any indices. func (ie InternalExecutor) GetTableSpan(user string, txn *client.Txn, dbName, tableName string) (roachpb.Span, error) { // Lookup the table ID. p := makeInternalPlanner(txn, user) p.leaseMgr = ie.LeaseManager tn := parser.TableName{DatabaseName: parser.Name(dbName), TableName: parser.Name(tableName)} tableID, err := getTableID(p, &tn) if err != nil { return roachpb.Span{}, err } // Determine table data span. tablePrefix := keys.MakeTablePrefix(uint32(tableID)) tableStartKey := roachpb.Key(tablePrefix) tableEndKey := tableStartKey.PrefixEnd() return roachpb.Span{Key: tableStartKey, EndKey: tableEndKey}, nil }
// Format pretty-prints the orderingInfo to a stream. // If columns is not nil, column names are printed instead of column indexes. func (ord orderingInfo) Format(buf *bytes.Buffer, columns []ResultColumn) { sep := "" // Print the exact match columns. We sort them to ensure // a deterministic output order. cols := make([]int, 0, len(ord.exactMatchCols)) for i := range ord.exactMatchCols { cols = append(cols, i) } sort.Ints(cols) for _, i := range cols { buf.WriteString(sep) sep = "," buf.WriteByte('=') if columns == nil || i >= len(columns) { fmt.Fprintf(buf, "%d", i) } else { parser.Name(columns[i].Name).Format(buf, parser.FmtSimple) } } // Print the ordering columns and for each their sort order. for _, o := range ord.ordering { buf.WriteString(sep) sep = "," prefix := byte('+') if o.Direction == encoding.Descending { prefix = '-' } buf.WriteByte(prefix) if columns == nil || o.ColIdx >= len(columns) { fmt.Fprintf(buf, "%d", o.ColIdx) } else { parser.Name(columns[o.ColIdx].Name).Format(buf, parser.FmtSimple) } } if ord.unique { buf.WriteString(sep) buf.WriteString("unique") } }
func (p *planner) makeReturningHelper( r parser.ReturningExprs, desiredTypes []parser.Datum, alias string, tablecols []sqlbase.ColumnDescriptor, ) (returningHelper, error) { rh := returningHelper{ p: p, } if len(r) == 0 { return rh, nil } for _, e := range r { if p.parser.AggregateInExpr(e.Expr) { return rh, fmt.Errorf("aggregate functions are not allowed in RETURNING") } } rh.columns = make([]ResultColumn, 0, len(r)) aliasTableName := parser.TableName{TableName: parser.Name(alias)} rh.source = newSourceInfoForSingleTable(aliasTableName, makeResultColumns(tablecols)) rh.qvals = make(qvalMap) rh.exprs = make([]parser.TypedExpr, 0, len(r)) for i, target := range r { // Pre-normalize VarNames at the top level so that checkRenderStar can see stars. if err := target.NormalizeTopLevelVarName(); err != nil { return returningHelper{}, err } if isStar, cols, typedExprs, err := checkRenderStar(target, rh.source, rh.qvals); err != nil { return returningHelper{}, err } else if isStar { rh.exprs = append(rh.exprs, typedExprs...) rh.columns = append(rh.columns, cols...) continue } // When generating an output column name it should exactly match the original // expression, so determine the output column name before we perform any // manipulations to the expression. outputName := getRenderColName(target) desired := parser.NoTypePreference if len(desiredTypes) > i { desired = desiredTypes[i] } typedExpr, err := rh.p.analyzeExpr(target.Expr, multiSourceInfo{rh.source}, rh.qvals, desired, false, "") if err != nil { return returningHelper{}, err } rh.exprs = append(rh.exprs, typedExpr) rh.columns = append(rh.columns, ResultColumn{Name: outputName, Typ: typedExpr.ReturnType()}) } return rh, nil }
// ColumnsSelectors generates Select expressions for cols. func ColumnsSelectors(cols []ColumnDescriptor) parser.SelectExprs { exprs := make(parser.SelectExprs, len(cols)) colItems := make([]parser.ColumnItem, len(cols)) for i, col := range cols { colItems[i].ColumnName = parser.Name(col.Name) exprs[i].Expr = &colItems[i] } return exprs }
func (desc *TableDescriptor) allColumnsSelector() parser.SelectExprs { exprs := make(parser.SelectExprs, len(desc.Columns)) qnames := make([]parser.QualifiedName, len(desc.Columns)) for i, col := range desc.Columns { qnames[i].Base = parser.Name(col.Name) exprs[i].Expr = &qnames[i] } return exprs }
// ColumnsSelectors generates Select expressions for cols. func ColumnsSelectors(cols []ColumnDescriptor) parser.SelectExprs { exprs := make(parser.SelectExprs, len(cols)) qnames := make([]parser.QualifiedName, len(cols)) for i, col := range cols { qnames[i].Base = parser.Name(col.Name) exprs[i].Expr = &qnames[i] } return exprs }
// Select selects rows from a single table. func (p *planner) Select(n *parser.Select) (planNode, error) { var desc *structured.TableDescriptor switch len(n.From) { case 0: // desc remains nil. case 1: var err error desc, err = p.getAliasedTableDesc(n.From[0]) if err != nil { return nil, err } default: return nil, util.Errorf("TODO(pmattis): unsupported FROM: %s", n.From) } // Loop over the select expressions and expand them into the expressions // we're going to use to generate the returned column set and the names for // those columns. exprs := make([]parser.Expr, 0, len(n.Exprs)) columns := make([]string, 0, len(n.Exprs)) for _, e := range n.Exprs { switch t := e.(type) { case *parser.StarExpr: if desc == nil { return nil, fmt.Errorf("* with no tables specified is not valid") } for _, col := range desc.Columns { columns = append(columns, col.Name) exprs = append(exprs, &parser.QualifiedName{Base: parser.Name(col.Name)}) } case *parser.NonStarExpr: exprs = append(exprs, t.Expr) if t.As != "" { columns = append(columns, string(t.As)) } else { // TODO(pmattis): Should verify at this point that any referenced // columns are represented in the tables being selected from. columns = append(columns, t.Expr.String()) } } } s := &scanNode{ db: p.db, desc: desc, columns: columns, render: exprs, } if n.Where != nil { s.filter = n.Where.Expr } return s, nil }
// Delete deletes rows from a table. func (p *planner) Delete(n *parser.Delete) (planNode, error) { tableDesc, err := p.getAliasedTableDesc(n.Table) if err != nil { return nil, err } // TODO(tamird,pmattis): avoid going through Select to avoid encoding // and decoding keys. Also, avoiding Select may provide more // convenient access to index keys which we are not currently // deleting. node, err := p.Select(&parser.Select{ Exprs: parser.SelectExprs{ &parser.StarExpr{TableName: parser.Name(tableDesc.Name)}, }, From: parser.TableExprs{n.Table}, Where: n.Where, }) if err != nil { return nil, err } colMap := map[uint32]int{} for i, name := range node.Columns() { c, err := tableDesc.FindColumnByName(name) if err != nil { return nil, err } colMap[c.ID] = i } index := tableDesc.Indexes[0] indexKey := encodeIndexKeyPrefix(tableDesc.ID, index.ID) b := client.Batch{} for node.Next() { if err := node.Err(); err != nil { return nil, err } // TODO(tamird/pmattis): delete the secondary indexes too primaryKey, err := encodeIndexKey(index, colMap, node.Values(), indexKey) if err != nil { return nil, err } rowStartKey := proto.Key(primaryKey) b.DelRange(rowStartKey, rowStartKey.PrefixEnd()) } if err := p.db.Run(&b); err != nil { return nil, err } // TODO(tamird/pmattis): return the number of affected rows return &valuesNode{}, nil }
// Set sets session variables. // Privileges: None. // Notes: postgres/mysql do not require privileges for session variables (some exceptions). func (p *planner) Set(n *parser.Set) (planNode, error) { if n.Name == nil { // A client has sent the reserved internal syntax SET ROW ... // Reject it. return nil, errors.New("invalid statement: SET ROW") } // By using VarName.String() here any variables that are keywords will // be double quoted. name := strings.ToUpper(n.Name.String()) typedValues := make([]parser.TypedExpr, len(n.Values)) for i, expr := range n.Values { typedValue, err := parser.TypeCheck(expr, nil, parser.TypeString) if err != nil { return nil, err } typedValues[i] = typedValue } switch name { case `DATABASE`: dbName, err := p.getStringVal(name, typedValues) if err != nil { return nil, err } if len(dbName) != 0 { // Verify database descriptor exists. if _, err := p.mustGetDatabaseDesc(dbName); err != nil { return nil, err } } p.session.Database = dbName case `SYNTAX`: s, err := p.getStringVal(name, typedValues) if err != nil { return nil, err } switch sqlbase.NormalizeName(parser.Name(s)) { case sqlbase.ReNormalizeName(parser.Modern.String()): p.session.Syntax = int32(parser.Modern) case sqlbase.ReNormalizeName(parser.Traditional.String()): p.session.Syntax = int32(parser.Traditional) default: return nil, fmt.Errorf("%s: \"%s\" is not in (%q, %q)", name, s, parser.Modern, parser.Traditional) } case `EXTRA_FLOAT_DIGITS`: // These settings are sent by the JDBC driver but we silently ignore them. default: return nil, fmt.Errorf("unknown variable: %q", name) } return &emptyNode{}, nil }
func (v *qnameVisitor) getDesc(qname *parser.QualifiedName) *TableDescriptor { if v.desc == nil { return nil } if qname.Base == "" { qname.Base = parser.Name(v.desc.Alias) return v.desc } if equalName(v.desc.Alias, string(qname.Base)) { return v.desc } return nil }
func (p *planner) normalizeTableName(qname *parser.QualifiedName) error { if qname == nil || qname.Base == "" { return fmt.Errorf("empty table name: %s", qname) } if len(qname.Indirect) == 0 { if p.session.Database == "" { return fmt.Errorf("no database specified") } qname.Indirect = append(qname.Indirect, parser.NameIndirection(qname.Base)) qname.Base = parser.Name(p.session.Database) } return nil }
func (s *selectNode) ExplainPlan(v bool) (name, description string, children []planNode) { subplans := []planNode{s.source.plan} subplans = s.planner.collectSubqueryPlans(s.filter, subplans) for _, e := range s.render { subplans = s.planner.collectSubqueryPlans(e, subplans) } if len(subplans) == 1 && !v { return s.source.plan.ExplainPlan(v) } var buf bytes.Buffer buf.WriteString("from (") for i, col := range s.source.info.sourceColumns { if i > 0 { buf.WriteString(", ") } if col.hidden { buf.WriteByte('*') } parser.Name(s.source.info.findTableAlias(i)).Format(&buf, parser.FmtSimple) buf.WriteByte('.') parser.Name(col.Name).Format(&buf, parser.FmtSimple) } buf.WriteByte(')') name = "render/filter" if s.explain != explainNone { name = fmt.Sprintf("%s(%s)", name, explainStrings[s.explain]) } return name, buf.String(), subplans }
func (p *planner) getTableNames(dbDesc *structured.DatabaseDescriptor) (parser.QualifiedNames, error) { prefix := structured.MakeNameMetadataKey(dbDesc.ID, "") sr, err := p.db.Scan(prefix, prefix.PrefixEnd(), 0) if err != nil { return nil, err } var qualifiedNames parser.QualifiedNames for _, row := range sr { tableName := string(bytes.TrimPrefix(row.Key, prefix)) qualifiedNames = append(qualifiedNames, &parser.QualifiedName{ Base: parser.Name(dbDesc.Name), Indirect: parser.Indirection{parser.NameIndirection(tableName)}, }) } return qualifiedNames, nil }
// handleDatabaseDetails is an endpoint that returns grants and a list of tables for the specified // database. func (s *adminServer) handleDatabaseDetails(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { var session sql.Session dbname, err := s.extractDatabase(ps) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } // TODO(cdo): Use real placeholders for the database name when we've extended our SQL grammar // to allow that. escDBName := parser.Name(dbname).String() query := fmt.Sprintf("SHOW GRANTS ON DATABASE %s; SHOW TABLES FROM %s;", escDBName, escDBName) resp := s.sqlExecutor.ExecuteStatements(security.RootUser, &session, query, nil) for _, res := range resp.ResultList { if res.PErr != nil { if strings.HasSuffix(res.PErr.String(), "does not exist") { http.Error(w, res.PErr.String(), http.StatusNotFound) return } s.internalServerErrorf(w, "%s", res.PErr.String()) return } } // Put the results of the queries in JSON-friendly objects. For grants, we split the comma- // separated lists of privileges into proper slices. const privilegesKey = "Privileges" grants := sqlResultToMaps(resp.ResultList[0]) for _, grant := range grants { privileges := string(grant[privilegesKey].(parser.DString)) grant[privilegesKey] = strings.Split(privileges, ",") } tables := firstColumnToSlice(resp.ResultList[1]) result := map[string]interface{}{ "Grants": grants, "Tables": tables, } respondAsJSON(w, r, result) }
// GetTableSpan gets the key span for a SQL table, including any indices. func (ie InternalExecutor) GetTableSpan(user string, txn *client.Txn, dbName, tableName string) (roachpb.Span, error) { // Lookup the table ID. p := makePlanner() p.setTxn(txn) p.session.User = user p.leaseMgr = ie.LeaseManager qname := &parser.QualifiedName{Base: parser.Name(tableName)} if err := qname.NormalizeTableName(dbName); err != nil { return roachpb.Span{}, err } tableID, err := p.getTableID(qname) if err != nil { return roachpb.Span{}, err } // Determine table data span. tablePrefix := keys.MakeTablePrefix(uint32(tableID)) tableStartKey := roachpb.Key(tablePrefix) tableEndKey := tableStartKey.PrefixEnd() return roachpb.Span{Key: tableStartKey, EndKey: tableEndKey}, nil }
// getVirtualDataSource attempts to find a virtual table with the // given name. func (p *planner) getVirtualDataSource(tn *parser.TableName) (planDataSource, bool, error) { virtual, err := getVirtualTableEntry(tn) if err != nil { return planDataSource{}, false, err } if virtual.desc != nil { v, err := virtual.getValuesNode(p) if err != nil { return planDataSource{}, false, err } sourceName := parser.TableName{ TableName: parser.Name(virtual.desc.Name), DatabaseName: tn.DatabaseName, } return planDataSource{ info: newSourceInfoForSingleTable(sourceName, v.Columns()), plan: v, }, true, nil } return planDataSource{}, false, nil }
// ShowTables returns all the tables. // Privileges: None. // Notes: postgres does not have a SHOW TABLES statement. // mysql only returns tables you have privileges on. func (p *planner) ShowTables(n *parser.ShowTables) (planNode, error) { if n.Name == nil { if p.session.Database == "" { return nil, errNoDatabase } n.Name = &parser.QualifiedName{Base: parser.Name(p.session.Database)} } dbDesc, err := p.getDatabaseDesc(n.Name.String()) if err != nil { return nil, err } tableNames, err := p.getTableNames(dbDesc) if err != nil { return nil, err } v := &valuesNode{columns: []string{"Table"}} for _, name := range tableNames { v.rows = append(v.rows, []parser.Datum{parser.DString(name.Table())}) } return v, nil }
// ShowTables returns all the tables. func (p *planner) ShowTables(n *parser.ShowTables) (planNode, error) { if n.Name == nil { if p.session.Database == "" { return nil, errNoDatabase } n.Name = &parser.QualifiedName{Base: parser.Name(p.session.Database)} } dbDesc, err := p.getDatabaseDesc(n.Name.String()) if err != nil { return nil, err } prefix := keys.MakeNameMetadataKey(dbDesc.ID, "") sr, err := p.db.Scan(prefix, prefix.PrefixEnd(), 0) if err != nil { return nil, err } v := &valuesNode{columns: []string{"Table"}} for _, row := range sr { name := string(bytes.TrimPrefix(row.Key, prefix)) v.rows = append(v.rows, []parser.Datum{parser.DString(name)}) } return v, nil }
func (p *planner) backfillBatch(b *client.Batch, tableName *parser.QualifiedName, oldTableDesc, newTableDesc *TableDescriptor) error { table := &parser.AliasedTableExpr{Expr: tableName} var droppedColumnDescs []ColumnDescriptor var droppedIndexDescs []IndexDescriptor var newIndexDescs []IndexDescriptor for _, m := range oldTableDesc.Mutations { switch m.Direction { case DescriptorMutation_ADD: switch t := m.Descriptor_.(type) { case *DescriptorMutation_Column: // TODO(vivek): Add column to new columns and use it // to fill in default values. case *DescriptorMutation_Index: newIndexDescs = append(newIndexDescs, *t.Index) } case DescriptorMutation_DROP: switch t := m.Descriptor_.(type) { case *DescriptorMutation_Column: droppedColumnDescs = append(droppedColumnDescs, *t.Column) case *DescriptorMutation_Index: droppedIndexDescs = append(droppedIndexDescs, *t.Index) } } } if len(droppedColumnDescs) > 0 { var updateExprs parser.UpdateExprs for _, droppedColumnDesc := range droppedColumnDescs { updateExprs = append(updateExprs, &parser.UpdateExpr{ Names: parser.QualifiedNames{&parser.QualifiedName{Base: parser.Name(droppedColumnDesc.Name)}}, Expr: parser.DNull, }) } // Run `UPDATE <table> SET col1 = NULL, col2 = NULL, ...` to clear // the data stored in the columns being dropped. if _, err := p.Update(&parser.Update{ Table: table, Exprs: updateExprs, }); err != nil { return err } } for _, indexDescriptor := range droppedIndexDescs { indexPrefix := MakeIndexKeyPrefix(newTableDesc.ID, indexDescriptor.ID) // Delete the index. indexStartKey := roachpb.Key(indexPrefix) indexEndKey := indexStartKey.PrefixEnd() if log.V(2) { log.Infof("DelRange %s - %s", prettyKey(indexStartKey, 0), prettyKey(indexEndKey, 0)) } b.DelRange(indexStartKey, indexEndKey) } if len(newIndexDescs) > 0 { // Get all the rows affected. // TODO(vivek): Avoid going through Select. // TODO(tamird): Support partial indexes? rows, err := p.Select(&parser.Select{ Exprs: parser.SelectExprs{parser.StarSelectExpr()}, From: parser.TableExprs{table}, }) if err != nil { return err } // Construct a map from column ID to the index the value appears at within a // row. colIDtoRowIndex, err := makeColIDtoRowIndex(rows, oldTableDesc) if err != nil { return err } // TODO(tamird): This will fall down in production use. We need to do // something better (see #2036). In particular, this implementation // has the following problems: // - Very large tables will generate an enormous batch here. This // isn't really a problem in itself except that it will exacerbate // the other issue: // - Any non-quiescent table that this runs against will end up with // an inconsistent index. This is because as inserts/updates continue // to roll in behind this operation's read front, the written index // will become incomplete/stale before it's written. for rows.Next() { rowVals := rows.Values() for _, newIndexDesc := range newIndexDescs { secondaryIndexEntries, err := encodeSecondaryIndexes( oldTableDesc.ID, []IndexDescriptor{newIndexDesc}, colIDtoRowIndex, rowVals) if err != nil { return err } for _, secondaryIndexEntry := range secondaryIndexEntries { if log.V(2) { log.Infof("CPut %s -> %v", prettyKey(secondaryIndexEntry.key, 0), secondaryIndexEntry.value) } b.CPut(secondaryIndexEntry.key, secondaryIndexEntry.value, nil) } } } return rows.Err() } return nil }