// getTableSpan returns a span stored at a checkpoint idx, or in the absence // of a checkpoint, the span over all keys within a table. func (sc *SchemaChanger) getTableSpan(mutationIdx int) (roachpb.Span, error) { var tableDesc *sqlbase.TableDescriptor if err := sc.db.Txn(context.TODO(), func(txn *client.Txn) error { var err error tableDesc, err = sqlbase.GetTableDescFromID(txn, sc.tableID) return err }); err != nil { return roachpb.Span{}, err } if len(tableDesc.Mutations) < mutationIdx { return roachpb.Span{}, errors.Errorf("cannot find idx %d among %d mutations", mutationIdx, len(tableDesc.Mutations)) } if mutationID := tableDesc.Mutations[mutationIdx].MutationID; mutationID != sc.mutationID { return roachpb.Span{}, errors.Errorf("mutation index pointing to the wrong schema change, %d vs expected %d", mutationID, sc.mutationID) } resumeSpan := tableDesc.Mutations[mutationIdx].ResumeSpan if resumeSpan.Key != nil { return resumeSpan, nil } prefix := roachpb.Key(sqlbase.MakeIndexKeyPrefix(tableDesc, tableDesc.PrimaryIndex.ID)) return roachpb.Span{ Key: prefix, EndKey: prefix.PrefixEnd(), }, nil }
// createTableReaders generates a plan consisting of table reader processors, // one for each node that has spans that we are reading. // overrideResultColumns is optional. func (dsp *distSQLPlanner) createTableReaders( planCtx *planningCtx, n *scanNode, overrideResultColumns []uint32, ) (physicalPlan, error) { spec, err := initTableReaderSpec(n) if err != nil { return physicalPlan{}, err } if overrideResultColumns != nil { spec.OutputColumns = overrideResultColumns } else { spec.OutputColumns = getOutputColumnsFromScanNode(n) } planToStreamColMap := make([]int, len(n.resultColumns)) for i := range planToStreamColMap { planToStreamColMap[i] = -1 } for i, col := range spec.OutputColumns { planToStreamColMap[col] = i } ordering := dsp.convertOrdering(n.ordering.ordering, planToStreamColMap) spans := n.spans if len(n.spans) == 0 { // If no spans were specified retrieve all of the keys that start with our // index key prefix. start := roachpb.Key(sqlbase.MakeIndexKeyPrefix(&n.desc, n.index.ID)) spans = roachpb.Spans{{Key: start, EndKey: start.PrefixEnd()}} } spanPartitions, err := dsp.partitionSpans(planCtx, spans) if err != nil { return physicalPlan{}, err } var p physicalPlan for _, sp := range spanPartitions { proc := processor{ node: sp.node, } tr := &distsql.TableReaderSpec{} *tr = spec tr.Spans = make([]distsql.TableReaderSpan, len(sp.spans)) for i := range sp.spans { tr.Spans[i].Span = sp.spans[i] } proc.spec.Core.SetValue(tr) proc.spec.Output = make([]distsql.OutputRouterSpec, 1) proc.spec.Output[0].Type = distsql.OutputRouterSpec_PASS_THROUGH pIdx := p.addProcessor(proc) p.resultRouters = append(p.resultRouters, pIdx) p.planToStreamColMap = planToStreamColMap p.ordering = ordering } return p, nil }
func (tu *tableUpserter) init(txn *client.Txn) error { tu.txn = txn tu.tableDesc = tu.ri.helper.tableDesc tu.indexKeyPrefix = sqlbase.MakeIndexKeyPrefix(tu.tableDesc, tu.tableDesc.PrimaryIndex.ID) allColsIdentityExpr := len(tu.ri.insertCols) == len(tu.tableDesc.Columns) && tu.evaler != nil && tu.evaler.isIdentityEvaler() if len(tu.tableDesc.Indexes) == 0 && allColsIdentityExpr { tu.fastPathBatch = tu.txn.NewBatch() tu.fastPathKeys = make(map[string]struct{}) return nil } // TODO(dan): This could be made tighter, just the rows needed for the ON // CONFLICT exprs. requestedCols := tu.tableDesc.Columns if len(tu.updateCols) == 0 { tu.fetchCols = requestedCols tu.fetchColIDtoRowIndex = colIDtoRowIndexFromCols(requestedCols) } else { var err error tu.ru, err = makeRowUpdater( txn, tu.tableDesc, tu.fkTables, tu.updateCols, requestedCols, rowUpdaterDefault, ) if err != nil { return err } // t.ru.fetchCols can also contain columns undergoing mutation. tu.fetchCols = tu.ru.fetchCols tu.fetchColIDtoRowIndex = tu.ru.fetchColIDtoRowIndex tu.updateColIDtoRowIndex = make(map[sqlbase.ColumnID]int) for i, updateCol := range tu.ru.updateCols { tu.updateColIDtoRowIndex[updateCol.ID] = i } } valNeededForCol := make([]bool, len(tu.fetchCols)) for i, col := range tu.fetchCols { if _, ok := tu.fetchColIDtoRowIndex[col.ID]; ok { valNeededForCol[i] = true } } return tu.fetcher.Init( tu.tableDesc, tu.fetchColIDtoRowIndex, &tu.tableDesc.PrimaryIndex, false, false, tu.fetchCols, valNeededForCol) }
func makeBaseFKHelper( txn *client.Txn, otherTables tableLookupsByID, writeIdx sqlbase.IndexDescriptor, ref sqlbase.ForeignKeyReference, colMap map[sqlbase.ColumnID]int, ) (baseFKHelper, error) { b := baseFKHelper{txn: txn, writeIdx: writeIdx, searchTable: otherTables[ref.Table].table} if b.searchTable == nil { return b, errors.Errorf("referenced table %d not in provided table map %+v", ref.Table, otherTables) } b.searchPrefix = sqlbase.MakeIndexKeyPrefix(b.searchTable, ref.Index) searchIdx, err := b.searchTable.FindIndexByID(ref.Index) if err != nil { return b, err } b.prefixLen = len(searchIdx.ColumnIDs) if len(writeIdx.ColumnIDs) < b.prefixLen { b.prefixLen = len(writeIdx.ColumnIDs) } b.searchIdx = searchIdx ids := colIDtoRowIndexFromCols(b.searchTable.Columns) needed := make([]bool, len(ids)) for _, i := range searchIdx.ColumnIDs { needed[ids[i]] = true } isSecondary := b.searchTable.PrimaryIndex.ID != searchIdx.ID err = b.rf.Init(b.searchTable, ids, searchIdx, false, isSecondary, b.searchTable.Columns, needed) if err != nil { return b, err } b.ids = make(map[sqlbase.ColumnID]int, len(writeIdx.ColumnIDs)) nulls := true for i, writeColID := range writeIdx.ColumnIDs[:b.prefixLen] { if found, ok := colMap[writeColID]; ok { b.ids[searchIdx.ColumnIDs[i]] = found nulls = false } else if !nulls { return b, errors.Errorf("missing value for column %q in multi-part foreign key", writeIdx.ColumnNames[i]) } } if nulls { return b, errSkipUnusedFK } return b, nil }
func (td *tableDeleter) deleteAllRowsScan( ctx context.Context, resume roachpb.Span, limit int64, ) (roachpb.Span, error) { if resume.Key == nil { tablePrefix := sqlbase.MakeIndexKeyPrefix( td.rd.helper.tableDesc, td.rd.helper.tableDesc.PrimaryIndex.ID) resume = roachpb.Span{Key: roachpb.Key(tablePrefix), EndKey: roachpb.Key(tablePrefix).PrefixEnd()} } valNeededForCol := make([]bool, len(td.rd.helper.tableDesc.Columns)) for _, idx := range td.rd.fetchColIDtoRowIndex { valNeededForCol[idx] = true } var rf sqlbase.RowFetcher err := rf.Init( td.rd.helper.tableDesc, td.rd.fetchColIDtoRowIndex, &td.rd.helper.tableDesc.PrimaryIndex, false, false, td.rd.fetchCols, valNeededForCol) if err != nil { return resume, err } if err := rf.StartScan(td.txn, roachpb.Spans{resume}, true /* limit batches */, 0); err != nil { return resume, err } for i := int64(0); i < limit; i++ { row, err := rf.NextRowDecoded() if err != nil { return resume, err } if row == nil { // Done deleting all rows. resume = roachpb.Span{} break } _, err = td.row(ctx, row) if err != nil { return resume, err } } if resume.Key != nil { // Update the resume start key for the next iteration. resume.Key = rf.Key() } return resume, td.finalize(ctx) }
// encodeIndexes encodes the primary and secondary index keys. The // secondaryIndexEntries are only valid until the next call to encodeIndexes or // encodeSecondaryIndexes. func (rh *rowHelper) encodeIndexes( colIDtoRowIndex map[sqlbase.ColumnID]int, values []parser.Datum, ) (primaryIndexKey []byte, secondaryIndexEntries []sqlbase.IndexEntry, err error) { if rh.primaryIndexKeyPrefix == nil { rh.primaryIndexKeyPrefix = sqlbase.MakeIndexKeyPrefix(rh.tableDesc, rh.tableDesc.PrimaryIndex.ID) } primaryIndexKey, _, err = sqlbase.EncodeIndexKey( rh.tableDesc, &rh.tableDesc.PrimaryIndex, colIDtoRowIndex, values, rh.primaryIndexKeyPrefix) if err != nil { return nil, nil, err } secondaryIndexEntries, err = rh.encodeSecondaryIndexes(colIDtoRowIndex, values) if err != nil { return nil, nil, err } return primaryIndexKey, secondaryIndexEntries, nil }
// initScan sets up the rowFetcher and starts a scan. func (n *scanNode) initScan() error { if len(n.spans) == 0 { // If no spans were specified retrieve all of the keys that start with our // index key prefix. This isn't needed for the fetcher, but it is for // other external users of n.spans. start := roachpb.Key(sqlbase.MakeIndexKeyPrefix(&n.desc, n.index.ID)) n.spans = append(n.spans, roachpb.Span{Key: start, EndKey: start.PrefixEnd()}) } limitHint := n.limitHint if limitHint != 0 && n.limitSoft { // Read a multiple of the limit if the limit is "soft". limitHint *= 2 } if err := n.fetcher.StartScan(n.p.txn, n.spans, !n.disableBatchLimits, limitHint); err != nil { return err } n.scanInitialized = true return nil }
func TestDropIndex(t *testing.T) { defer leaktest.AfterTest(t)() const chunkSize = 200 params, _ := createTestServerParams() params.Knobs = base.TestingKnobs{ SQLSchemaChanger: &sql.SchemaChangerTestingKnobs{ BackfillChunkSize: chunkSize, }, } s, sqlDB, kvDB := serverutils.StartServer(t, params) defer s.Stopper().Stop() numRows := 2*chunkSize + 1 createKVTable(t, sqlDB, numRows) tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "kv") status, i, err := tableDesc.FindIndexByName("foo") if err != nil { t.Fatal(err) } if status != sqlbase.DescriptorActive { t.Fatal("Index 'foo' is not active.") } indexPrefix := roachpb.Key(sqlbase.MakeIndexKeyPrefix(tableDesc, tableDesc.Indexes[i].ID)) checkKeyCount(t, kvDB, indexPrefix, numRows) if _, err := sqlDB.Exec(`DROP INDEX t.kv@foo`); err != nil { t.Fatal(err) } checkKeyCount(t, kvDB, indexPrefix, 0) tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "kv") if _, _, err := tableDesc.FindIndexByName("foo"); err == nil { t.Fatalf("table descriptor still contains index after index is dropped") } }
func (td *tableDeleter) deleteIndexFast( ctx context.Context, idx *sqlbase.IndexDescriptor, resume roachpb.Span, limit int64, ) (roachpb.Span, error) { if resume.Key == nil { indexPrefix := roachpb.Key(sqlbase.MakeIndexKeyPrefix(td.rd.helper.tableDesc, idx.ID)) resume = roachpb.Span{ Key: indexPrefix, EndKey: indexPrefix.PrefixEnd(), } } if log.V(2) { log.Infof(ctx, "DelRange %s - %s", resume.Key, resume.EndKey) } td.b.DelRange(resume.Key, resume.EndKey, false /* returnKeys */) td.b.Header.MaxSpanRequestKeys = limit if err := td.finalize(ctx); err != nil { return resume, err } if l := len(td.b.Results); l != 1 { panic(fmt.Sprintf("%d results returned, expected 1", l)) } return td.b.Results[0].ResumeSpan, nil }
// restoreTable inserts the given DatabaseDescriptor. If the name conflicts with // an existing table, the one being restored is rekeyed with a new ID and the // old data is deleted. func restoreTable( ctx context.Context, db client.DB, database sqlbase.DatabaseDescriptor, table *sqlbase.TableDescriptor, ranges []sqlbase.BackupRangeDescriptor, ) error { if log.V(1) { log.Infof(ctx, "Restoring Table %q", table.Name) } var newTableID sqlbase.ID if err := db.Txn(ctx, func(txn *client.Txn) error { // Make sure there's a database with a name that matches the original. if _, err := getDescriptorID(txn, tableKey{name: database.Name}); err != nil { return errors.Wrapf(err, "a database named %q needs to exist to restore table %q", database.Name, table.Name) } // Assign a new ID for the table. TODO(dan): For now, we're always // generating a new ID, but varints get longer as they get bigger and so // our keys will, too. We should someday figure out how to overwrite an // existing table and steal its ID. var err error newTableID, err = GenerateUniqueDescID(txn) return err }); err != nil { return err } // Create the iteration keys before we give the table its new ID. tableStartKeyOld := roachpb.Key(sqlbase.MakeIndexKeyPrefix(table, table.PrimaryIndex.ID)) tableEndKeyOld := tableStartKeyOld.PrefixEnd() // This loop makes restoring multiple tables O(N*M), where N is the number // of tables and M is the number of ranges. We could reduce this using an // interval tree if necessary. var wg sync.WaitGroup result := struct { syncutil.Mutex firstErr error numErrs int }{} for _, rangeDesc := range ranges { if len(rangeDesc.Path) == 0 { // Empty path means empty range. continue } intersectBegin, intersectEnd := IntersectHalfOpen( rangeDesc.StartKey, rangeDesc.EndKey, tableStartKeyOld, tableEndKeyOld) if intersectBegin != nil && intersectEnd != nil { // Write the data under the new ID. // TODO(dan): There's no SQL descriptors that point at this yet, so it // should be possible to remove it from the one txn this is all currently // run under. If we do that, make sure this data gets cleaned up on errors. wg.Add(1) go func(desc sqlbase.BackupRangeDescriptor) { for r := retry.StartWithCtx(ctx, base.DefaultRetryOptions()); r.Next(); { err := db.Txn(ctx, func(txn *client.Txn) error { return Ingest(ctx, txn, desc.Path, desc.CRC, intersectBegin, intersectEnd, newTableID) }) if _, ok := err.(*client.AutoCommitError); ok { log.Errorf(ctx, "auto commit error during ingest: %s", err) // TODO(dan): Ingest currently does not rely on the // range being empty, but the plan is that it will. When // that change happens, this will have to delete any // partially ingested data or something. continue } if err != nil { log.Errorf(ctx, "%T %s", err, err) result.Lock() defer result.Unlock() if result.firstErr != nil { result.firstErr = err } result.numErrs++ } break } wg.Done() }(rangeDesc) } } wg.Wait() // All concurrent accesses have finished, we don't need the lock anymore. if result.firstErr != nil { // This leaves the data that did get imported in case the user wants to // retry. // TODO(dan): Build tooling to allow a user to restart a failed restore. return errors.Wrapf(result.firstErr, "ingest encountered %d errors", result.numErrs) } table.ID = newTableID return db.Txn(ctx, func(txn *client.Txn) error { // Pass the descriptors by value to keep this idempotent. return restoreTableDesc(ctx, txn, database, *table) }) }
func TestClusterFlow(t *testing.T) { defer leaktest.AfterTest(t)() const numRows = 100 args := base.TestClusterArgs{ReplicationMode: base.ReplicationManual} tc := serverutils.StartTestCluster(t, 3, args) defer tc.Stopper().Stop() sumDigitsFn := func(row int) parser.Datum { sum := 0 for row > 0 { sum += row % 10 row /= 10 } return parser.NewDInt(parser.DInt(sum)) } sqlutils.CreateTable(t, tc.ServerConn(0), "t", "num INT PRIMARY KEY, digitsum INT, numstr STRING, INDEX s (digitsum)", numRows, sqlutils.ToRowFn(sqlutils.RowIdxFn, sumDigitsFn, sqlutils.RowEnglishFn)) kvDB := tc.Server(0).KVClient().(*client.DB) desc := sqlbase.GetTableDescriptor(kvDB, "test", "t") makeIndexSpan := func(start, end int) TableReaderSpan { var span roachpb.Span prefix := roachpb.Key(sqlbase.MakeIndexKeyPrefix(desc, desc.Indexes[0].ID)) span.Key = append(prefix, encoding.EncodeVarintAscending(nil, int64(start))...) span.EndKey = append(span.EndKey, prefix...) span.EndKey = append(span.EndKey, encoding.EncodeVarintAscending(nil, int64(end))...) return TableReaderSpan{Span: span} } // Set up table readers on three hosts feeding data into a join reader on // the third host. This is a basic test for the distributed flow // infrastructure, including local and remote streams. // // Note that the ranges won't necessarily be local to the table readers, but // that doesn't matter for the purposes of this test. // Start a span (useful to look at spans using Lighstep). sp, err := tracing.JoinOrNew(tracing.NewTracer(), nil, "cluster test") if err != nil { t.Fatal(err) } ctx := opentracing.ContextWithSpan(context.Background(), sp) defer sp.Finish() tr1 := TableReaderSpec{ Table: *desc, IndexIdx: 1, OutputColumns: []uint32{0, 1}, Spans: []TableReaderSpan{makeIndexSpan(0, 8)}, } tr2 := TableReaderSpec{ Table: *desc, IndexIdx: 1, OutputColumns: []uint32{0, 1}, Spans: []TableReaderSpan{makeIndexSpan(8, 12)}, } tr3 := TableReaderSpec{ Table: *desc, IndexIdx: 1, OutputColumns: []uint32{0, 1}, Spans: []TableReaderSpan{makeIndexSpan(12, 100)}, } jr := JoinReaderSpec{ Table: *desc, OutputColumns: []uint32{2}, } txn := client.NewTxn(ctx, *kvDB) fid := FlowID{uuid.MakeV4()} req1 := &SetupFlowRequest{Txn: txn.Proto} req1.Flow = FlowSpec{ FlowID: fid, Processors: []ProcessorSpec{{ Core: ProcessorCoreUnion{TableReader: &tr1}, Output: []OutputRouterSpec{{ Type: OutputRouterSpec_MIRROR, Streams: []StreamEndpointSpec{ {StreamID: 0, Mailbox: &MailboxSpec{TargetAddr: tc.Server(2).ServingAddr()}}, }, }}, }}, } req2 := &SetupFlowRequest{Txn: txn.Proto} req2.Flow = FlowSpec{ FlowID: fid, Processors: []ProcessorSpec{{ Core: ProcessorCoreUnion{TableReader: &tr2}, Output: []OutputRouterSpec{{ Type: OutputRouterSpec_MIRROR, Streams: []StreamEndpointSpec{ {StreamID: 1, Mailbox: &MailboxSpec{TargetAddr: tc.Server(2).ServingAddr()}}, }, }}, }}, } req3 := &SetupFlowRequest{Txn: txn.Proto} req3.Flow = FlowSpec{ FlowID: fid, Processors: []ProcessorSpec{ { Core: ProcessorCoreUnion{TableReader: &tr3}, Output: []OutputRouterSpec{{ Type: OutputRouterSpec_MIRROR, Streams: []StreamEndpointSpec{ {StreamID: StreamID(2)}, }, }}, }, { Input: []InputSyncSpec{{ Type: InputSyncSpec_ORDERED, Ordering: Ordering{Columns: []Ordering_Column{{1, Ordering_Column_ASC}}}, Streams: []StreamEndpointSpec{ {StreamID: 0, Mailbox: &MailboxSpec{}}, {StreamID: 1, Mailbox: &MailboxSpec{}}, {StreamID: StreamID(2)}, }, }}, Core: ProcessorCoreUnion{JoinReader: &jr}, Output: []OutputRouterSpec{{ Type: OutputRouterSpec_MIRROR, Streams: []StreamEndpointSpec{{Mailbox: &MailboxSpec{SimpleResponse: true}}}, }}}, }, } if err := SetFlowRequestTrace(ctx, req1); err != nil { t.Fatal(err) } if err := SetFlowRequestTrace(ctx, req2); err != nil { t.Fatal(err) } if err := SetFlowRequestTrace(ctx, req3); err != nil { t.Fatal(err) } var clients []DistSQLClient for i := 0; i < 3; i++ { s := tc.Server(i) conn, err := s.RPCContext().GRPCDial(s.ServingAddr()) if err != nil { t.Fatal(err) } clients = append(clients, NewDistSQLClient(conn)) } if log.V(1) { log.Infof(ctx, "Setting up flow on 0") } if resp, err := clients[0].SetupFlow(ctx, req1); err != nil { t.Fatal(err) } else if resp.Error != nil { t.Fatal(resp.Error) } if log.V(1) { log.Infof(ctx, "Setting up flow on 1") } if resp, err := clients[1].SetupFlow(ctx, req2); err != nil { t.Fatal(err) } else if resp.Error != nil { t.Fatal(resp.Error) } if log.V(1) { log.Infof(ctx, "Running flow on 2") } stream, err := clients[2].RunSimpleFlow(ctx, req3) if err != nil { t.Fatal(err) } var decoder StreamDecoder var rows sqlbase.EncDatumRows for { msg, err := stream.Recv() if err != nil { if err == io.EOF { break } t.Fatal(err) } err = decoder.AddMessage(msg) if err != nil { t.Fatal(err) } rows = testGetDecodedRows(t, &decoder, rows) } if done, trailerErr := decoder.IsDone(); !done { t.Fatal("stream not done") } else if trailerErr != nil { t.Fatal("error in the stream trailer:", trailerErr) } // The result should be all the numbers in string form, ordered by the // digit sum (and then by number). var results []string for sum := 1; sum <= 50; sum++ { for i := 1; i <= numRows; i++ { if int(*sumDigitsFn(i).(*parser.DInt)) == sum { results = append(results, fmt.Sprintf("['%s']", sqlutils.IntToEnglish(i))) } } } expected := strings.Join(results, " ") expected = "[" + expected + "]" if rowStr := rows.String(); rowStr != expected { t.Errorf("Result: %s\n Expected: %s\n", rowStr, expected) } }
// makeIndexJoin build an index join node. // This destroys the original table scan node argument and reuses its // storage to construct a new index scan node. A new table scan node // is created separately as a member of the resulting index join node. // The new index scan node is also returned alongside the new index join // node. func (p *planner) makeIndexJoin( origScan *scanNode, exactPrefix int, ) (resultPlan *indexJoinNode, indexScan *scanNode) { // Reuse the input argument's scanNode and its initialized parameters // at a starting point to build the new indexScan node. indexScan = origScan // Create a new scanNode that will be used with the primary index. table := p.Scan() table.desc = origScan.desc table.initDescDefaults(publicColumns) table.initOrdering(0) table.disableBatchLimit() colIDtoRowIndex := map[sqlbase.ColumnID]int{} for _, colID := range table.desc.PrimaryIndex.ColumnIDs { idx, ok := indexScan.colIdxMap[colID] if !ok { panic(fmt.Sprintf("Unknown column %d in PrimaryIndex!", colID)) } colIDtoRowIndex[colID] = idx } for _, colID := range indexScan.index.ColumnIDs { idx, ok := indexScan.colIdxMap[colID] if !ok { panic(fmt.Sprintf("Unknown column %d in index!", colID)) } colIDtoRowIndex[colID] = idx } // Transfer needed columns set to the table node. table.setNeededColumns(origScan.valNeededForCol) // For the index node, we need values for columns that are part of the index. // TODO(radu): we could reduce this further - we only need the PK columns plus // whatever filters may be used by the filter below. valNeededIndex := make([]bool, len(origScan.valNeededForCol)) for _, idx := range colIDtoRowIndex { valNeededIndex[idx] = true } indexScan.setNeededColumns(valNeededIndex) if origScan.filter != nil { // Transfer the filter to the table node. We must first convert the // IndexedVars associated with indexNode. convFunc := func(expr parser.VariableExpr) (ok bool, newExpr parser.VariableExpr) { iv := expr.(*parser.IndexedVar) return true, table.filterVars.IndexedVar(iv.Idx) } table.filter = exprConvertVars(origScan.filter, convFunc) // Now we split the filter by extracting the part that can be evaluated using just the index // columns. splitFunc := func(expr parser.VariableExpr) (ok bool, newExpr parser.VariableExpr) { colIdx := expr.(*parser.IndexedVar).Idx if !indexScan.valNeededForCol[colIdx] { return false, nil } return true, indexScan.filterVars.IndexedVar(colIdx) } indexScan.filter, table.filter = splitFilter(table.filter, splitFunc) } indexScan.initOrdering(exactPrefix) primaryKeyPrefix := roachpb.Key(sqlbase.MakeIndexKeyPrefix(&table.desc, table.index.ID)) return &indexJoinNode{ index: indexScan, table: table, primaryKeyPrefix: primaryKeyPrefix, colIDtoRowIndex: colIDtoRowIndex, }, indexScan }
// truncateAndBackfillColumnsChunk returns the next-key, done and an error. // next-key and done are invalid if error != nil. next-key is invalid if done // is true. func (sc *SchemaChanger) truncateAndBackfillColumnsChunk( added []sqlbase.ColumnDescriptor, dropped []sqlbase.ColumnDescriptor, defaultExprs []parser.TypedExpr, sp roachpb.Span, updateValues parser.DTuple, nonNullViolationColumnName string, chunkSize int64, mutationIdx int, lastCheckpoint *time.Time, ) (roachpb.Key, bool, error) { done := false var nextKey roachpb.Key err := sc.db.Txn(context.TODO(), func(txn *client.Txn) error { if sc.testingKnobs.RunBeforeBackfillChunk != nil { if err := sc.testingKnobs.RunBeforeBackfillChunk(sp); err != nil { return err } } if sc.testingKnobs.RunAfterBackfillChunk != nil { defer sc.testingKnobs.RunAfterBackfillChunk() } tableDesc, err := sqlbase.GetTableDescFromID(txn, sc.tableID) if err != nil { return err } // Short circuit the backfill if the table has been deleted. if done = tableDesc.Dropped(); done { return nil } updateCols := append(added, dropped...) fkTables := tablesNeededForFKs(*tableDesc, CheckUpdates) for k := range fkTables { table, err := sqlbase.GetTableDescFromID(txn, k) if err != nil { return err } fkTables[k] = tableLookup{table: table} } // TODO(dan): Tighten up the bound on the requestedCols parameter to // makeRowUpdater. requestedCols := make([]sqlbase.ColumnDescriptor, 0, len(tableDesc.Columns)+len(added)) requestedCols = append(requestedCols, tableDesc.Columns...) requestedCols = append(requestedCols, added...) ru, err := makeRowUpdater( txn, tableDesc, fkTables, updateCols, requestedCols, rowUpdaterOnlyColumns, ) if err != nil { return err } // TODO(dan): This check is an unfortunate bleeding of the internals of // rowUpdater. Extract the sql row to k/v mapping logic out into something // usable here. if !ru.isColumnOnlyUpdate() { panic("only column data should be modified, but the rowUpdater is configured otherwise") } // Run a scan across the table using the primary key. Running // the scan and applying the changes in many transactions is // fine because the schema change is in the correct state to // handle intermediate OLTP commands which delete and add // values during the scan. var rf sqlbase.RowFetcher colIDtoRowIndex := colIDtoRowIndexFromCols(tableDesc.Columns) valNeededForCol := make([]bool, len(tableDesc.Columns)) for i := range valNeededForCol { _, valNeededForCol[i] = ru.fetchColIDtoRowIndex[tableDesc.Columns[i].ID] } if err := rf.Init( tableDesc, colIDtoRowIndex, &tableDesc.PrimaryIndex, false, false, tableDesc.Columns, valNeededForCol, ); err != nil { return err } if err := rf.StartScan( txn, roachpb.Spans{sp}, true /* limit batches */, chunkSize, ); err != nil { return err } oldValues := make(parser.DTuple, len(ru.fetchCols)) writeBatch := txn.NewBatch() rowLength := 0 var lastRowSeen parser.DTuple i := int64(0) for ; i < chunkSize; i++ { row, err := rf.NextRow() if err != nil { return err } if row == nil { break } lastRowSeen = row if nonNullViolationColumnName != "" { return sqlbase.NewNonNullViolationError(nonNullViolationColumnName) } copy(oldValues, row) // Update oldValues with NULL values where values weren't found; // only update when necessary. if rowLength != len(row) { rowLength = len(row) for j := rowLength; j < len(oldValues); j++ { oldValues[j] = parser.DNull } } if _, err := ru.updateRow(txn.Context, writeBatch, oldValues, updateValues); err != nil { return err } } if err := txn.Run(writeBatch); err != nil { return convertBackfillError(tableDesc, writeBatch) } if done = i < chunkSize; done { return nil } curIndexKey, _, err := sqlbase.EncodeIndexKey( tableDesc, &tableDesc.PrimaryIndex, colIDtoRowIndex, lastRowSeen, sqlbase.MakeIndexKeyPrefix(tableDesc, tableDesc.PrimaryIndex.ID)) if err != nil { return err } resume := roachpb.Span{Key: roachpb.Key(curIndexKey).PrefixEnd(), EndKey: sp.EndKey} if err := sc.maybeWriteResumeSpan(txn, tableDesc, resume, mutationIdx, lastCheckpoint); err != nil { return err } nextKey = resume.Key return nil }) return nextKey, done, err }
// mainLoop runs the mainLoop and returns any error. // It does not close the output. func (jr *joinReader) mainLoop() error { primaryKeyPrefix := sqlbase.MakeIndexKeyPrefix(&jr.desc, jr.index.ID) var alloc sqlbase.DatumAlloc spans := make(roachpb.Spans, 0, joinReaderBatchSize) ctx, span := tracing.ChildSpan(jr.ctx, "join reader") defer tracing.FinishSpan(span) txn := jr.flowCtx.setupTxn(ctx) log.VEventf(ctx, 1, "starting (filter: %s)", &jr.filter) if log.V(1) { defer log.Infof(ctx, "exiting") } for { // TODO(radu): figure out how to send smaller batches if the source has // a soft limit (perhaps send the batch out if we don't get a result // within a certain amount of time). for spans = spans[:0]; len(spans) < joinReaderBatchSize; { row, err := jr.input.NextRow() if err != nil { return err } if row == nil { if len(spans) == 0 { return nil } break } key, err := jr.generateKey(row, &alloc, primaryKeyPrefix) if err != nil { return err } spans = append(spans, roachpb.Span{ Key: key, EndKey: key.PrefixEnd(), }) } err := jr.fetcher.StartScan(txn, spans, false /* no batch limits */, 0) if err != nil { log.Errorf(ctx, "scan error: %s", err) return err } // TODO(radu): we are consuming all results from a fetch before starting // the next batch. We could start the next batch early while we are // outputting rows. for { outRow, err := jr.nextRow() if err != nil { return err } if outRow == nil { // Done. break } if log.V(3) { log.Infof(ctx, "pushing row %s", outRow) } // Push the row to the output RowReceiver; stop if they don't need more // rows. if !jr.output.PushRow(outRow) { log.VEventf(ctx, 1, "no more rows required") return nil } } if len(spans) != joinReaderBatchSize { // This was the last batch. return nil } } }
func TestTableReader(t *testing.T) { defer leaktest.AfterTest(t)() s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() // Create a table where each row is: // // | a | b | sum | s | // |-----------------------------------------------------------------| // | rowId/10 | rowId%10 | rowId/10 + rowId%10 | IntToEnglish(rowId) | aFn := func(row int) parser.Datum { return parser.NewDInt(parser.DInt(row / 10)) } bFn := func(row int) parser.Datum { return parser.NewDInt(parser.DInt(row % 10)) } sumFn := func(row int) parser.Datum { return parser.NewDInt(parser.DInt(row/10 + row%10)) } sqlutils.CreateTable(t, sqlDB, "t", "a INT, b INT, sum INT, s STRING, PRIMARY KEY (a,b), INDEX bs (b,s)", 99, sqlutils.ToRowFn(aFn, bFn, sumFn, sqlutils.RowEnglishFn)) td := sqlbase.GetTableDescriptor(kvDB, "test", "t") makeIndexSpan := func(start, end int) TableReaderSpan { var span roachpb.Span prefix := roachpb.Key(sqlbase.MakeIndexKeyPrefix(td, td.Indexes[0].ID)) span.Key = append(prefix, encoding.EncodeVarintAscending(nil, int64(start))...) span.EndKey = append(span.EndKey, prefix...) span.EndKey = append(span.EndKey, encoding.EncodeVarintAscending(nil, int64(end))...) return TableReaderSpan{Span: span} } testCases := []struct { spec TableReaderSpec expected string }{ { spec: TableReaderSpec{ Filter: Expression{Expr: "@3 < 5 AND @2 != 3"}, // sum < 5 && b != 3 OutputColumns: []uint32{0, 1}, }, expected: "[[0 1] [0 2] [0 4] [1 0] [1 1] [1 2] [2 0] [2 1] [2 2] [3 0] [3 1] [4 0]]", }, { spec: TableReaderSpec{ Filter: Expression{Expr: "@3 < 5 AND @2 != 3"}, OutputColumns: []uint32{3}, // s HardLimit: 4, }, expected: "[['one'] ['two'] ['four'] ['one-zero']]", }, { spec: TableReaderSpec{ IndexIdx: 1, Reverse: true, Spans: []TableReaderSpan{makeIndexSpan(4, 6)}, Filter: Expression{Expr: "@1 < 3"}, // sum < 8 OutputColumns: []uint32{0, 1}, SoftLimit: 1, }, expected: "[[2 5] [1 5] [0 5] [2 4] [1 4] [0 4]]", }, } for _, c := range testCases { ts := c.spec ts.Table = *td flowCtx := FlowCtx{ Context: context.Background(), evalCtx: &parser.EvalContext{}, txnProto: &roachpb.Transaction{}, clientDB: kvDB, } out := &RowBuffer{} tr, err := newTableReader(&flowCtx, &ts, out) if err != nil { t.Fatal(err) } tr.Run(nil) if out.err != nil { t.Fatal(out.err) } if !out.closed { t.Fatalf("output RowReceiver not closed") } if result := out.rows.String(); result != c.expected { t.Errorf("invalid results: %s, expected %s'", result, c.expected) } } }
func TestMakeSpans(t *testing.T) { defer leaktest.AfterTest(t)() testData := []struct { expr string columns string expectedAsc string expectedDesc string }{ {`a = 1`, `a`, `/1-/2`, `/1-/0`}, {`a != 1`, `a`, `/#-`, `-/#`}, {`a > 1`, `a`, `/2-`, `-/1`}, {`a >= 1`, `a`, `/1-`, `-/0`}, {`a < 1`, `a`, `/#-/1`, `/0-/#`}, {`a <= 1`, `a`, `/#-/2`, `/1-/#`}, {`a IS NULL`, `a`, `-/#`, `/NULL-`}, {`a IS NOT NULL`, `a`, `/#-`, `-/#`}, {`a IN (1,2,3)`, `a`, `/1-/4`, `/3-/0`}, {`a IN (1,3,5)`, `a`, `/1-/2 /3-/4 /5-/6`, `/5-/4 /3-/2 /1-/0`}, {`a IN (1,2,3) AND b = 1`, `a,b`, `/1/1-/1/2 /2/1-/2/2 /3/1-/3/2`, `/3/1-/3/0 /2/1-/2/0 /1/1-/1/0`}, {`a = 1 AND b IN (1,2,3)`, `a,b`, `/1/1-/1/4`, `/1/3-/1/0`}, {`a = 1 AND b IN (1,3,5)`, `a,b`, `/1/1-/1/2 /1/3-/1/4 /1/5-/1/6`, `/1/5-/1/4 /1/3-/1/2 /1/1-/1/0`}, {`a >= 1 AND b IN (1,2,3)`, `a,b`, `/1-`, `-/0`}, {`a <= 1 AND b IN (1,2,3)`, `a,b`, `/#-/2`, `/1-/#`}, {`(a, b) IN ((1, 2), (3, 4))`, `a,b`, `/1/2-/1/3 /3/4-/3/5`, `/3/4-/3/3 /1/2-/1/1`}, {`(b, a) IN ((1, 2), (3, 4))`, `a,b`, `/2/1-/2/2 /4/3-/4/4`, `/4/3-/4/2 /2/1-/2/0`}, {`(a, b) IN ((1, 2), (3, 4))`, `b`, `/2-/3 /4-/5`, `/4-/3 /2-/1`}, {`a = 1 AND b = 1`, `a,b`, `/1/1-/1/2`, `/1/1-/1/0`}, {`a = 1 AND b != 1`, `a,b`, `/1/#-/2`, `/1-/1/#`}, {`a = 1 AND b > 1`, `a,b`, `/1/2-/2`, `/1-/1/1`}, {`a = 1 AND b >= 1`, `a,b`, `/1/1-/2`, `/1-/1/0`}, {`a = 1 AND b < 1`, `a,b`, `/1/#-/1/1`, `/1/0-/1/#`}, {`a = 1 AND b <= 1`, `a,b`, `/1/#-/1/2`, `/1/1-/1/#`}, {`a = 1 AND b IS NULL`, `a,b`, `/1-/1/#`, `/1/NULL-/0`}, {`a = 1 AND b IS NOT NULL`, `a,b`, `/1/#-/2`, `/1-/1/#`}, {`a != 1 AND b = 1`, `a,b`, `/#-`, `-/#`}, {`a != 1 AND b != 1`, `a,b`, `/#-`, `-/#`}, {`a != 1 AND b > 1`, `a,b`, `/#-`, `-/#`}, {`a != 1 AND b >= 1`, `a,b`, `/#-`, `-/#`}, {`a != 1 AND b < 1`, `a,b`, `/#-`, `-/#`}, {`a != 1 AND b <= 1`, `a,b`, `/#-`, `-/#`}, {`a != 1 AND b IS NULL`, `a,b`, `/#-`, `-/#`}, {`a != 1 AND b IS NOT NULL`, `a,b`, `/#-`, `-/#`}, {`a > 1 AND b = 1`, `a,b`, `/2/1-`, `-/2/0`}, {`a > 1 AND b != 1`, `a,b`, `/2/#-`, `-/2/#`}, {`a > 1 AND b > 1`, `a,b`, `/2/2-`, `-/2/1`}, {`a > 1 AND b >= 1`, `a,b`, `/2/1-`, `-/2/0`}, {`a > 1 AND b < 1`, `a,b`, `/2-`, `-/1`}, {`a > 1 AND b <= 1`, `a,b`, `/2-`, `-/1`}, {`a > 1 AND b IS NULL`, `a,b`, `/2-`, `-/1`}, {`a > 1 AND b IS NOT NULL`, `a,b`, `/2/#-`, `-/2/#`}, {`a >= 1 AND b = 1`, `a,b`, `/1/1-`, `-/1/0`}, {`a >= 1 AND b != 1`, `a,b`, `/1/#-`, `-/1/#`}, {`a >= 1 AND b > 1`, `a,b`, `/1/2-`, `-/1/1`}, {`a >= 1 AND b >= 1`, `a,b`, `/1/1-`, `-/1/0`}, {`a >= 1 AND b < 1`, `a,b`, `/1-`, `-/0`}, {`a >= 1 AND b <= 1`, `a,b`, `/1-`, `-/0`}, {`a >= 1 AND b IS NULL`, `a,b`, `/1-`, `-/0`}, {`a >= 1 AND b IS NOT NULL`, `a,b`, `/1/#-`, `-/1/#`}, {`a < 1 AND b = 1`, `a,b`, `/#-/0/2`, `/0/1-/#`}, {`a < 1 AND b != 1`, `a,b`, `/#-/1`, `/0-/#`}, {`a < 1 AND b > 1`, `a,b`, `/#-/1`, `/0-/#`}, {`a < 1 AND b >= 1`, `a,b`, `/#-/1`, `/0-/#`}, {`a < 1 AND b < 1`, `a,b`, `/#-/0/1`, `/0/0-/#`}, {`a < 1 AND b <= 1`, `a,b`, `/#-/0/2`, `/0/1-/#`}, {`a < 1 AND b IS NULL`, `a,b`, `/#-/0/#`, `/0/NULL-/#`}, {`a < 1 AND b IS NOT NULL`, `a,b`, `/#-/1`, `/0-/#`}, {`a <= 1 AND b = 1`, `a,b`, `/#-/1/2`, `/1/1-/#`}, {`a <= 1 AND b != 1`, `a,b`, `/#-/2`, `/1-/#`}, {`a <= 1 AND b > 1`, `a,b`, `/#-/2`, `/1-/#`}, {`a <= 1 AND b >= 1`, `a,b`, `/#-/2`, `/1-/#`}, {`a <= 1 AND b < 1`, `a,b`, `/#-/1/1`, `/1/0-/#`}, {`a <= 1 AND b <= 1`, `a,b`, `/#-/1/2`, `/1/1-/#`}, {`a <= 1 AND b IS NULL`, `a,b`, `/#-/1/#`, `/1/NULL-/#`}, {`a <= 1 AND b IS NOT NULL`, `a,b`, `/#-/2`, `/1-/#`}, {`a IN (1) AND b = 1`, `a,b`, `/1/1-/1/2`, `/1/1-/1/0`}, {`a IN (1) AND b != 1`, `a,b`, `/1/#-/2`, `/1-/1/#`}, {`a IN (1) AND b > 1`, `a,b`, `/1/2-/2`, `/1-/1/1`}, {`a IN (1) AND b >= 1`, `a,b`, `/1/1-/2`, `/1-/1/0`}, {`a IN (1) AND b < 1`, `a,b`, `/1/#-/1/1`, `/1/0-/1/#`}, {`a IN (1) AND b <= 1`, `a,b`, `/1/#-/1/2`, `/1/1-/1/#`}, {`a IN (1) AND b IS NULL`, `a,b`, `/1-/1/#`, `/1/NULL-/0`}, {`a IN (1) AND b IS NOT NULL`, `a,b`, `/1/#-/2`, `/1-/1/#`}, {`(a, b) = (1, 2)`, `a`, `/1-/2`, `/1-/0`}, {`(a, b) = (1, 2)`, `a,b`, `/1/2-/1/3`, `/1/2-/1/1`}, {`a > 1 OR a >= 5`, `a`, `/2-`, `-/1`}, {`a < 5 OR a >= 1`, `a`, `/#-`, `-/#`}, {`a < 1 OR a >= 5`, `a`, `/#-/1 /5-`, `-/4 /0-/#`}, {`a = 1 OR a > 8`, `a`, `/1-/2 /9-`, `-/8 /1-/0`}, {`a = 8 OR a > 1`, `a`, `/2-`, `-/1`}, {`a < 1 OR a = 5 OR a > 8`, `a`, `/#-/1 /5-/6 /9-`, `-/8 /5-/4 /0-/#`}, {`a < 8 OR a = 8 OR a > 8`, `a`, `/#-`, `-/#`}, {`(a = 1 AND b = 5) OR (a = 3 AND b = 7)`, `a`, `/1-/2 /3-/4`, `/3-/2 /1-/0`}, {`(a = 1 AND b = 5) OR (a = 3 AND b = 7)`, `b`, `/5-/6 /7-/8`, `/7-/6 /5-/4`}, {`(a = 1 AND b = 5) OR (a = 3 AND b = 7)`, `a,b`, `/1/5-/1/6 /3/7-/3/8`, `/3/7-/3/6 /1/5-/1/4`}, {`(a = 1 AND b < 5) OR (a = 3 AND b > 7)`, `a`, `/1-/2 /3-/4`, `/3-/2 /1-/0`}, {`(a = 1 AND b < 5) OR (a = 3 AND b > 7)`, `b`, `/#-/5 /8-`, `-/7 /4-/#`}, {`(a = 1 AND b < 5) OR (a = 3 AND b > 7)`, `a,b`, `/1/#-/1/5 /3/8-/4`, `/3-/3/7 /1/4-/1/#`}, {`(a = 1 AND b > 5) OR (a = 3 AND b > 7)`, `a`, `/1-/2 /3-/4`, `/3-/2 /1-/0`}, {`(a = 1 AND b > 5) OR (a = 3 AND b > 7)`, `b`, `/6-`, `-/5`}, {`(a = 1 AND b > 5) OR (a = 3 AND b > 7)`, `a,b`, `/1/6-/2 /3/8-/4`, `/3-/3/7 /1-/1/5`}, {`(a = 1 AND b > 5) OR (a = 3 AND b < 7)`, `a`, `/1-/2 /3-/4`, `/3-/2 /1-/0`}, {`(a = 1 AND b > 5) OR (a = 3 AND b < 7)`, `b`, `/#-`, `-/#`}, {`(a = 1 AND b > 5) OR (a = 3 AND b < 7)`, `a,b`, `/1/6-/2 /3/#-/3/7`, `/3/6-/3/# /1-/1/5`}, {`(a < 1 AND b < 5) OR (a > 3 AND b > 7)`, `a`, `/#-/1 /4-`, `-/3 /0-/#`}, {`(a < 1 AND b < 5) OR (a > 3 AND b > 7)`, `b`, `/#-/5 /8-`, `-/7 /4-/#`}, {`(a < 1 AND b < 5) OR (a > 3 AND b > 7)`, `a,b`, `/#-/0/5 /4/8-`, `-/4/7 /0/4-/#`}, {`(a > 3 AND b < 5) OR (a < 1 AND b > 7)`, `a`, `/#-/1 /4-`, `-/3 /0-/#`}, {`(a > 3 AND b < 5) OR (a < 1 AND b > 7)`, `b`, `/#-/5 /8-`, `-/7 /4-/#`}, {`(a > 3 AND b < 5) OR (a < 1 AND b > 7)`, `a,b`, `/#-/1 /4-`, `-/3 /0-/#`}, {`(a > 1 AND b < 5) OR (a < 3 AND b > 7)`, `a`, `/#-`, `-/#`}, {`(a > 1 AND b < 5) OR (a < 3 AND b > 7)`, `b`, `/#-/5 /8-`, `-/7 /4-/#`}, {`(a > 1 AND b < 5) OR (a < 3 AND b > 7)`, `a,b`, `/#-`, `-/#`}, {`(a = 5) OR (a, b) IN ((1, 1), (3, 3))`, `a`, `/1-/2 /3-/4 /5-/6`, `/5-/4 /3-/2 /1-/0`}, {`(a = 5) OR (a, b) IN ((1, 1), (3, 3))`, `b`, `-`, `-`}, {`(a = 5) OR (a, b) IN ((1, 1), (3, 3))`, `a,b`, `/1/1-/1/2 /3/3-/3/4 /5-/6`, `/5-/4 /3/3-/3/2 /1/1-/1/0`}, // When encoding an end constraint for a maximal datum, we use // bytes.PrefixEnd() to go beyond the normal encodings of that datatype. {fmt.Sprintf(`a = %d`, math.MaxInt64), `a`, `/9223372036854775807-/<varint 9223372036854775808 overflows int64>`, `/9223372036854775807-/9223372036854775806`}, {fmt.Sprintf(`a = %d`, math.MinInt64), `a`, `/-9223372036854775808-/-9223372036854775807`, `/-9223372036854775808-/<varint 9223372036854775808 overflows int64>`}, {`(a, b) >= (1, 4)`, `a,b`, `/1/4-`, `-/1/3`}, {`(a, b) > (1, 4)`, `a,b`, `/1/5-`, `-/1/4`}, {`(a, b) < (1, 4)`, `a,b`, `/#-/1/4`, `/1/3-/#`}, {`(a, b) <= (1, 4)`, `a,b`, `/#-/1/5`, `/1/4-/#`}, {`(a, b) = (1, 4)`, `a,b`, `/1/4-/1/5`, `/1/4-/1/3`}, {`(a, b) != (1, 4)`, `a,b`, `/#-`, `-/#`}, } for _, d := range testData { for _, dir := range []encoding.Direction{encoding.Ascending, encoding.Descending} { var expected string if dir == encoding.Ascending { expected = d.expectedAsc } else { expected = d.expectedDesc } t.Run(d.expr+"~"+expected, func(t *testing.T) { sel := makeSelectNode(t) columns := strings.Split(d.columns, ",") dirs := make([]encoding.Direction, 0, len(columns)) for range columns { dirs = append(dirs, dir) } desc, index := makeTestIndex(t, columns, dirs) constraints, _ := makeConstraints(t, d.expr, desc, index, sel) spans := makeSpans(constraints, desc, index) s := sqlbase.PrettySpans(spans, 2) s = keys.MassagePrettyPrintedSpanForTest(s, indexToDirs(index)) if expected != s { t.Errorf("[index direction: %d] %s: expected %s, but found %s", dir, d.expr, expected, s) } }) } } // Test indexes with mixed-directions (some cols Asc, some cols Desc) and other edge cases. testData2 := []struct { expr string columns string expected string }{ {`a = 1 AND b = 5`, `a,b-,c`, `/1/5-/1/4`}, {`a = 7 AND b IN (1,2,3) AND c = false`, `a,b-,c`, `/7/3/0-/7/3/1 /7/2/0-/7/2/1 /7/1/0-/7/1/1`}, // Test different directions for te columns inside a tuple. {`(a,b,j) IN ((1,2,3), (4,5,6))`, `a-,b,j-`, `/4/5/6-/4/5/5 /1/2/3-/1/2/2`}, {`k = b'\xff'`, `k`, `/"\xff"-/"\xff\x00"`}, // Test that limits on bytes work correctly: when encoding a descending limit for bytes, // we need to go outside the bytes encoding. // "\xaa" is encoded as [bytesDescMarker, ^0xaa, <term escape sequence>] {`k = b'\xaa'`, `k-`, fmt.Sprintf("raw:%c%c\xff\xfe-%c%c\xff\xff", encoding.BytesDescMarker, ^byte(0xaa), encoding.BytesDescMarker, ^byte(0xaa))}, // Ensure tuples with differing index directions aren't constrained. // TODO(mjibson): fix this, see #6346 {`(a, b) >= (1, 4)`, `a-,b`, `-`}, {`(a, b) >= (1, 4)`, `a,b-`, `-`}, } for _, d := range testData2 { t.Run(d.expr+"~"+d.expected, func(t *testing.T) { sel := makeSelectNode(t) desc, index := makeTestIndexFromStr(t, d.columns) constraints, _ := makeConstraints(t, d.expr, desc, index, sel) spans := makeSpans(constraints, desc, index) var got string raw := false if strings.HasPrefix(d.expected, "raw:") { raw = true span := spans[0] d.expected = d.expected[4:] // Trim the index prefix from the span. prefix := string(sqlbase.MakeIndexKeyPrefix(desc, index.ID)) got = strings.TrimPrefix(string(span.Key), prefix) + "-" + strings.TrimPrefix(string(span.EndKey), prefix) } else { got = keys.MassagePrettyPrintedSpanForTest(sqlbase.PrettySpans(spans, 2), indexToDirs(index)) } if d.expected != got { if !raw { t.Errorf("%s: expected %s, but found %s", d.expr, d.expected, got) } else { t.Errorf("%s: expected %# x, but found %# x", d.expr, []byte(d.expected), got) } } }) } }
// makeSpansForIndexConstraints constructs the spans for an index given an // instance of indexConstraints. The resulting spans are non-overlapping (by // virtue of the input constraints being disjunct). func makeSpansForIndexConstraints( constraints indexConstraints, tableDesc *sqlbase.TableDescriptor, index *sqlbase.IndexDescriptor, ) roachpb.Spans { prefix := roachpb.Key(sqlbase.MakeIndexKeyPrefix(tableDesc, index.ID)) // We have one constraint per column, so each contributes something // to the start and/or the end key of the span. // But we also have (...) IN <tuple> constraints that span multiple columns. // These constraints split each span, and that's how we can end up with // multiple spans. resultSpans := roachpb.Spans{{ Key: append(roachpb.Key(nil), prefix...), EndKey: append(roachpb.Key(nil), prefix...), }} colIdx := 0 for i, c := range constraints { // We perform special processing on the last end constraint to account for // the exclusive nature of the scan end key. lastEnd := (c.end != nil) && (i+1 == len(constraints) || constraints[i+1].end == nil) // IN is handled separately. if (c.start != nil && c.start.Operator == parser.In) || (c.end != nil && c.end.Operator == parser.In) { resultSpans = applyInConstraint(resultSpans, c, colIdx, index, lastEnd) } else { dir, err := index.ColumnDirections[colIdx].ToEncodingDirection() if err != nil { panic(err) } if c.start != nil { if dir == encoding.Ascending { encodeStartConstraintAscending(resultSpans, c.start) } else { encodeStartConstraintDescending(resultSpans, c.start) } } if c.end != nil { if dir == encoding.Ascending { encodeEndConstraintAscending(resultSpans, c.end, lastEnd) } else { encodeEndConstraintDescending(resultSpans, c.end, lastEnd) } } } colIdx += c.numColumns() } // If we had no end constraints, make it so that we scan the whole index. if len(constraints) == 0 || constraints[0].end == nil { for i := range resultSpans { resultSpans[i].EndKey = resultSpans[i].EndKey.PrefixEnd() } } // Remove any spans which are empty. This can happen for constraints such as // "a > 1 AND a < 2" which we do not simplify to false but which is treated // as "a >= 2 AND a < 2" for span generation. n := 0 for _, s := range resultSpans { if bytes.Compare(s.Key, s.EndKey) < 0 { resultSpans[n] = s n++ } } return resultSpans[:n] }