func (td *tableDeleter) deleteAllRowsScan( ctx context.Context, resume roachpb.Span, limit int64, ) (roachpb.Span, error) { if resume.Key == nil { tablePrefix := sqlbase.MakeIndexKeyPrefix( td.rd.helper.tableDesc, td.rd.helper.tableDesc.PrimaryIndex.ID) resume = roachpb.Span{Key: roachpb.Key(tablePrefix), EndKey: roachpb.Key(tablePrefix).PrefixEnd()} } valNeededForCol := make([]bool, len(td.rd.helper.tableDesc.Columns)) for _, idx := range td.rd.fetchColIDtoRowIndex { valNeededForCol[idx] = true } var rf sqlbase.RowFetcher err := rf.Init( td.rd.helper.tableDesc, td.rd.fetchColIDtoRowIndex, &td.rd.helper.tableDesc.PrimaryIndex, false, false, td.rd.fetchCols, valNeededForCol) if err != nil { return resume, err } if err := rf.StartScan(td.txn, roachpb.Spans{resume}, true /* limit batches */, 0); err != nil { return resume, err } for i := int64(0); i < limit; i++ { row, err := rf.NextRowDecoded() if err != nil { return resume, err } if row == nil { // Done deleting all rows. resume = roachpb.Span{} break } _, err = td.row(ctx, row) if err != nil { return resume, err } } if resume.Key != nil { // Update the resume start key for the next iteration. resume.Key = rf.Key() } return resume, td.finalize(ctx) }
// partitionSpans finds out which nodes are owners for ranges touching the given // spans, and splits the spans according to owning nodes. The result is a set of // spanPartitions (one for each relevant node), which form a partitioning of the // spans (i.e. they are non-overlapping and their union is exactly the original // set of spans). func (dsp *distSQLPlanner) partitionSpans( planCtx *planningCtx, spans roachpb.Spans, ) ([]spanPartition, error) { if len(spans) == 0 { panic("no spans") } ctx := planCtx.ctx splits := make([]spanPartition, 0, 1) // nodeMap maps a nodeID to an index inside the splits array. nodeMap := make(map[roachpb.NodeID]int) it := planCtx.spanIter for _, span := range spans { var rspan roachpb.RSpan var err error if rspan.Key, err = keys.Addr(span.Key); err != nil { return nil, err } if rspan.EndKey, err = keys.Addr(span.EndKey); err != nil { return nil, err } var lastNodeID roachpb.NodeID for it.Seek(ctx, span, kv.Ascending); ; it.Next(ctx) { if !it.Valid() { return nil, it.Error() } replInfo, err := it.ReplicaInfo(ctx) if err != nil { return nil, err } desc := it.Desc() var trimmedSpan roachpb.Span if rspan.Key.Less(desc.StartKey) { trimmedSpan.Key = desc.StartKey.AsRawKey() } else { trimmedSpan.Key = span.Key } if desc.EndKey.Less(rspan.EndKey) { trimmedSpan.EndKey = desc.EndKey.AsRawKey() } else { trimmedSpan.EndKey = span.EndKey } nodeID := replInfo.NodeDesc.NodeID idx, ok := nodeMap[nodeID] if !ok { idx = len(splits) splits = append(splits, spanPartition{node: nodeID}) nodeMap[nodeID] = idx if _, ok := planCtx.nodeAddresses[nodeID]; !ok { planCtx.nodeAddresses[nodeID] = replInfo.NodeDesc.Address.String() } } split := &splits[idx] if lastNodeID == nodeID { // Two consecutive ranges on the same node, merge the spans. if !split.spans[len(split.spans)-1].EndKey.Equal(trimmedSpan.Key) { log.Fatalf(ctx, "expected consecutive span pieces %v %v", split.spans, trimmedSpan) } split.spans[len(split.spans)-1].EndKey = trimmedSpan.EndKey } else { split.spans = append(split.spans, trimmedSpan) } lastNodeID = nodeID if !it.NeedAnother() { break } } } return splits, nil }
func TestTableReader(t *testing.T) { defer leaktest.AfterTest(t)() s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() // Create a table where each row is: // // | a | b | sum | s | // |-----------------------------------------------------------------| // | rowId/10 | rowId%10 | rowId/10 + rowId%10 | IntToEnglish(rowId) | aFn := func(row int) parser.Datum { return parser.NewDInt(parser.DInt(row / 10)) } bFn := func(row int) parser.Datum { return parser.NewDInt(parser.DInt(row % 10)) } sumFn := func(row int) parser.Datum { return parser.NewDInt(parser.DInt(row/10 + row%10)) } sqlutils.CreateTable(t, sqlDB, "t", "a INT, b INT, sum INT, s STRING, PRIMARY KEY (a,b), INDEX bs (b,s)", 99, sqlutils.ToRowFn(aFn, bFn, sumFn, sqlutils.RowEnglishFn)) td := sqlbase.GetTableDescriptor(kvDB, "test", "t") makeIndexSpan := func(start, end int) TableReaderSpan { var span roachpb.Span prefix := roachpb.Key(sqlbase.MakeIndexKeyPrefix(td, td.Indexes[0].ID)) span.Key = append(prefix, encoding.EncodeVarintAscending(nil, int64(start))...) span.EndKey = append(span.EndKey, prefix...) span.EndKey = append(span.EndKey, encoding.EncodeVarintAscending(nil, int64(end))...) return TableReaderSpan{Span: span} } testCases := []struct { spec TableReaderSpec expected string }{ { spec: TableReaderSpec{ Filter: Expression{Expr: "@3 < 5 AND @2 != 3"}, // sum < 5 && b != 3 OutputColumns: []uint32{0, 1}, }, expected: "[[0 1] [0 2] [0 4] [1 0] [1 1] [1 2] [2 0] [2 1] [2 2] [3 0] [3 1] [4 0]]", }, { spec: TableReaderSpec{ Filter: Expression{Expr: "@3 < 5 AND @2 != 3"}, OutputColumns: []uint32{3}, // s HardLimit: 4, }, expected: "[['one'] ['two'] ['four'] ['one-zero']]", }, { spec: TableReaderSpec{ IndexIdx: 1, Reverse: true, Spans: []TableReaderSpan{makeIndexSpan(4, 6)}, Filter: Expression{Expr: "@1 < 3"}, // sum < 8 OutputColumns: []uint32{0, 1}, SoftLimit: 1, }, expected: "[[2 5] [1 5] [0 5] [2 4] [1 4] [0 4]]", }, } for _, c := range testCases { ts := c.spec ts.Table = *td flowCtx := FlowCtx{ Context: context.Background(), evalCtx: &parser.EvalContext{}, txnProto: &roachpb.Transaction{}, clientDB: kvDB, } out := &RowBuffer{} tr, err := newTableReader(&flowCtx, &ts, out) if err != nil { t.Fatal(err) } tr.Run(nil) if out.err != nil { t.Fatal(out.err) } if !out.closed { t.Fatalf("output RowReceiver not closed") } if result := out.rows.String(); result != c.expected { t.Errorf("invalid results: %s, expected %s'", result, c.expected) } } }
func TestClusterFlow(t *testing.T) { defer leaktest.AfterTest(t)() const numRows = 100 args := base.TestClusterArgs{ReplicationMode: base.ReplicationManual} tc := serverutils.StartTestCluster(t, 3, args) defer tc.Stopper().Stop() sumDigitsFn := func(row int) parser.Datum { sum := 0 for row > 0 { sum += row % 10 row /= 10 } return parser.NewDInt(parser.DInt(sum)) } sqlutils.CreateTable(t, tc.ServerConn(0), "t", "num INT PRIMARY KEY, digitsum INT, numstr STRING, INDEX s (digitsum)", numRows, sqlutils.ToRowFn(sqlutils.RowIdxFn, sumDigitsFn, sqlutils.RowEnglishFn)) kvDB := tc.Server(0).KVClient().(*client.DB) desc := sqlbase.GetTableDescriptor(kvDB, "test", "t") makeIndexSpan := func(start, end int) TableReaderSpan { var span roachpb.Span prefix := roachpb.Key(sqlbase.MakeIndexKeyPrefix(desc, desc.Indexes[0].ID)) span.Key = append(prefix, encoding.EncodeVarintAscending(nil, int64(start))...) span.EndKey = append(span.EndKey, prefix...) span.EndKey = append(span.EndKey, encoding.EncodeVarintAscending(nil, int64(end))...) return TableReaderSpan{Span: span} } // Set up table readers on three hosts feeding data into a join reader on // the third host. This is a basic test for the distributed flow // infrastructure, including local and remote streams. // // Note that the ranges won't necessarily be local to the table readers, but // that doesn't matter for the purposes of this test. // Start a span (useful to look at spans using Lighstep). sp, err := tracing.JoinOrNew(tracing.NewTracer(), nil, "cluster test") if err != nil { t.Fatal(err) } ctx := opentracing.ContextWithSpan(context.Background(), sp) defer sp.Finish() tr1 := TableReaderSpec{ Table: *desc, IndexIdx: 1, OutputColumns: []uint32{0, 1}, Spans: []TableReaderSpan{makeIndexSpan(0, 8)}, } tr2 := TableReaderSpec{ Table: *desc, IndexIdx: 1, OutputColumns: []uint32{0, 1}, Spans: []TableReaderSpan{makeIndexSpan(8, 12)}, } tr3 := TableReaderSpec{ Table: *desc, IndexIdx: 1, OutputColumns: []uint32{0, 1}, Spans: []TableReaderSpan{makeIndexSpan(12, 100)}, } jr := JoinReaderSpec{ Table: *desc, OutputColumns: []uint32{2}, } txn := client.NewTxn(ctx, *kvDB) fid := FlowID{uuid.MakeV4()} req1 := &SetupFlowRequest{Txn: txn.Proto} req1.Flow = FlowSpec{ FlowID: fid, Processors: []ProcessorSpec{{ Core: ProcessorCoreUnion{TableReader: &tr1}, Output: []OutputRouterSpec{{ Type: OutputRouterSpec_MIRROR, Streams: []StreamEndpointSpec{ {StreamID: 0, Mailbox: &MailboxSpec{TargetAddr: tc.Server(2).ServingAddr()}}, }, }}, }}, } req2 := &SetupFlowRequest{Txn: txn.Proto} req2.Flow = FlowSpec{ FlowID: fid, Processors: []ProcessorSpec{{ Core: ProcessorCoreUnion{TableReader: &tr2}, Output: []OutputRouterSpec{{ Type: OutputRouterSpec_MIRROR, Streams: []StreamEndpointSpec{ {StreamID: 1, Mailbox: &MailboxSpec{TargetAddr: tc.Server(2).ServingAddr()}}, }, }}, }}, } req3 := &SetupFlowRequest{Txn: txn.Proto} req3.Flow = FlowSpec{ FlowID: fid, Processors: []ProcessorSpec{ { Core: ProcessorCoreUnion{TableReader: &tr3}, Output: []OutputRouterSpec{{ Type: OutputRouterSpec_MIRROR, Streams: []StreamEndpointSpec{ {StreamID: StreamID(2)}, }, }}, }, { Input: []InputSyncSpec{{ Type: InputSyncSpec_ORDERED, Ordering: Ordering{Columns: []Ordering_Column{{1, Ordering_Column_ASC}}}, Streams: []StreamEndpointSpec{ {StreamID: 0, Mailbox: &MailboxSpec{}}, {StreamID: 1, Mailbox: &MailboxSpec{}}, {StreamID: StreamID(2)}, }, }}, Core: ProcessorCoreUnion{JoinReader: &jr}, Output: []OutputRouterSpec{{ Type: OutputRouterSpec_MIRROR, Streams: []StreamEndpointSpec{{Mailbox: &MailboxSpec{SimpleResponse: true}}}, }}}, }, } if err := SetFlowRequestTrace(ctx, req1); err != nil { t.Fatal(err) } if err := SetFlowRequestTrace(ctx, req2); err != nil { t.Fatal(err) } if err := SetFlowRequestTrace(ctx, req3); err != nil { t.Fatal(err) } var clients []DistSQLClient for i := 0; i < 3; i++ { s := tc.Server(i) conn, err := s.RPCContext().GRPCDial(s.ServingAddr()) if err != nil { t.Fatal(err) } clients = append(clients, NewDistSQLClient(conn)) } if log.V(1) { log.Infof(ctx, "Setting up flow on 0") } if resp, err := clients[0].SetupFlow(ctx, req1); err != nil { t.Fatal(err) } else if resp.Error != nil { t.Fatal(resp.Error) } if log.V(1) { log.Infof(ctx, "Setting up flow on 1") } if resp, err := clients[1].SetupFlow(ctx, req2); err != nil { t.Fatal(err) } else if resp.Error != nil { t.Fatal(resp.Error) } if log.V(1) { log.Infof(ctx, "Running flow on 2") } stream, err := clients[2].RunSimpleFlow(ctx, req3) if err != nil { t.Fatal(err) } var decoder StreamDecoder var rows sqlbase.EncDatumRows for { msg, err := stream.Recv() if err != nil { if err == io.EOF { break } t.Fatal(err) } err = decoder.AddMessage(msg) if err != nil { t.Fatal(err) } rows = testGetDecodedRows(t, &decoder, rows) } if done, trailerErr := decoder.IsDone(); !done { t.Fatal("stream not done") } else if trailerErr != nil { t.Fatal("error in the stream trailer:", trailerErr) } // The result should be all the numbers in string form, ordered by the // digit sum (and then by number). var results []string for sum := 1; sum <= 50; sum++ { for i := 1; i <= numRows; i++ { if int(*sumDigitsFn(i).(*parser.DInt)) == sum { results = append(results, fmt.Sprintf("['%s']", sqlutils.IntToEnglish(i))) } } } expected := strings.Join(results, " ") expected = "[" + expected + "]" if rowStr := rows.String(); rowStr != expected { t.Errorf("Result: %s\n Expected: %s\n", rowStr, expected) } }