func (b *Batch) scan(s, e interface{}, isReverse bool) { begin, err := marshalKey(s) if err != nil { b.initResult(0, 0, notRaw, err) return } end, err := marshalKey(e) if err != nil { b.initResult(0, 0, notRaw, err) return } if !isReverse { b.appendReqs(roachpb.NewScan(begin, end)) } else { b.appendReqs(roachpb.NewReverseScan(begin, end)) } b.initResult(1, 0, notRaw, nil) }
// TestMultiRangeScanReverseScanInconsistent verifies that a Scan/ReverseScan // across ranges that doesn't require read consistency will set a timestamp // using the clock local to the distributed sender. func TestMultiRangeScanReverseScanInconsistent(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() db := setupMultipleRanges(t, s, "b") // Write keys "a" and "b", the latter of which is the first key in the // second range. keys := [2]string{"a", "b"} ts := [2]hlc.Timestamp{} for i, key := range keys { b := &client.Batch{} b.Put(key, "value") if err := db.Run(context.TODO(), b); err != nil { t.Fatal(err) } ts[i] = s.Clock().Now() log.Infof(context.TODO(), "%d: %s %d", i, key, ts[i]) if i == 0 { util.SucceedsSoon(t, func() error { // Enforce that when we write the second key, it's written // with a strictly higher timestamp. We're dropping logical // ticks and the clock may just have been pushed into the // future, so that's necessary. See #3122. if ts[0].WallTime >= s.Clock().Now().WallTime { return errors.New("time stands still") } return nil }) } } // Do an inconsistent Scan/ReverseScan from a new DistSender and verify // it does the read at its local clock and doesn't receive an // OpRequiresTxnError. We set the local clock to the timestamp of // just above the first key to verify it's used to read only key "a". for i, request := range []roachpb.Request{ roachpb.NewScan(roachpb.Key("a"), roachpb.Key("c")), roachpb.NewReverseScan(roachpb.Key("a"), roachpb.Key("c")), } { manual := hlc.NewManualClock(ts[0].WallTime + 1) clock := hlc.NewClock(manual.UnixNano) ds := kv.NewDistSender( kv.DistSenderConfig{Clock: clock, RPCContext: s.RPCContext()}, s.(*server.TestServer).Gossip(), ) reply, err := client.SendWrappedWith(context.Background(), ds, roachpb.Header{ ReadConsistency: roachpb.INCONSISTENT, }, request) if err != nil { t.Fatal(err) } var rows []roachpb.KeyValue switch r := reply.(type) { case *roachpb.ScanResponse: rows = r.Rows case *roachpb.ReverseScanResponse: rows = r.Rows default: t.Fatalf("unexpected response %T: %v", reply, reply) } if l := len(rows); l != 1 { t.Fatalf("%d: expected 1 row; got %d\n%s", i, l, rows) } if key := string(rows[0].Key); keys[0] != key { t.Errorf("expected key %q; got %q", keys[0], key) } } }
// TestMultiRangeScanWithMaxResults tests that commands which access multiple // ranges with MaxResults parameter are carried out properly. func TestMultiRangeScanWithMaxResults(t *testing.T) { defer leaktest.AfterTest(t)() testCases := []struct { splitKeys []roachpb.Key keys []roachpb.Key }{ {[]roachpb.Key{roachpb.Key("m")}, []roachpb.Key{roachpb.Key("a"), roachpb.Key("z")}}, {[]roachpb.Key{roachpb.Key("h"), roachpb.Key("q")}, []roachpb.Key{roachpb.Key("b"), roachpb.Key("f"), roachpb.Key("k"), roachpb.Key("r"), roachpb.Key("w"), roachpb.Key("y")}}, } for i, tc := range testCases { s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() ts := s.(*TestServer) retryOpts := base.DefaultRetryOptions() retryOpts.Closer = ts.stopper.ShouldQuiesce() ds := kv.NewDistSender(kv.DistSenderConfig{ Clock: s.Clock(), RPCContext: s.RPCContext(), RPCRetryOptions: &retryOpts, }, ts.Gossip()) ambient := log.AmbientContext{Tracer: tracing.NewTracer()} tds := kv.NewTxnCoordSender( ambient, ds, ts.Clock(), ts.Cfg.Linearizable, ts.stopper, kv.MakeTxnMetrics(metric.TestSampleInterval), ) for _, sk := range tc.splitKeys { if err := ts.node.storeCfg.DB.AdminSplit(context.TODO(), sk); err != nil { t.Fatal(err) } } for _, k := range tc.keys { put := roachpb.NewPut(k, roachpb.MakeValueFromBytes(k)) if _, err := client.SendWrapped(context.Background(), tds, put); err != nil { t.Fatal(err) } } // Try every possible ScanRequest startKey. for start := 0; start < len(tc.keys); start++ { // Try every possible maxResults, from 1 to beyond the size of key array. for maxResults := 1; maxResults <= len(tc.keys)-start+1; maxResults++ { scan := roachpb.NewScan(tc.keys[start], tc.keys[len(tc.keys)-1].Next()) reply, err := client.SendWrappedWith( context.Background(), tds, roachpb.Header{MaxSpanRequestKeys: int64(maxResults)}, scan, ) if err != nil { t.Fatal(err) } rows := reply.(*roachpb.ScanResponse).Rows if start+maxResults <= len(tc.keys) && len(rows) != maxResults { t.Errorf("%d: start=%s: expected %d rows, but got %d", i, tc.keys[start], maxResults, len(rows)) } else if start+maxResults == len(tc.keys)+1 && len(rows) != maxResults-1 { t.Errorf("%d: expected %d rows, but got %d", i, maxResults-1, len(rows)) } } } } }
// TestMultiRangeScanDeleteRange tests that commands which access multiple // ranges are carried out properly. func TestMultiRangeScanDeleteRange(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() ts := s.(*TestServer) retryOpts := base.DefaultRetryOptions() retryOpts.Closer = ts.stopper.ShouldQuiesce() ds := kv.NewDistSender(kv.DistSenderConfig{ Clock: s.Clock(), RPCContext: s.RPCContext(), RPCRetryOptions: &retryOpts, }, ts.Gossip()) ambient := log.AmbientContext{Tracer: tracing.NewTracer()} tds := kv.NewTxnCoordSender( ambient, ds, s.Clock(), ts.Cfg.Linearizable, ts.stopper, kv.MakeTxnMetrics(metric.TestSampleInterval), ) if err := ts.node.storeCfg.DB.AdminSplit(context.TODO(), "m"); err != nil { t.Fatal(err) } writes := []roachpb.Key{roachpb.Key("a"), roachpb.Key("z")} get := &roachpb.GetRequest{ Span: roachpb.Span{Key: writes[0]}, } get.EndKey = writes[len(writes)-1] if _, err := client.SendWrapped(context.Background(), tds, get); err == nil { t.Errorf("able to call Get with a key range: %v", get) } var delTS hlc.Timestamp for i, k := range writes { put := roachpb.NewPut(k, roachpb.MakeValueFromBytes(k)) if _, err := client.SendWrapped(context.Background(), tds, put); err != nil { t.Fatal(err) } scan := roachpb.NewScan(writes[0], writes[len(writes)-1].Next()) reply, err := client.SendWrapped(context.Background(), tds, scan) if err != nil { t.Fatal(err) } sr := reply.(*roachpb.ScanResponse) if sr.Txn != nil { // This was the other way around at some point in the past. // Same below for Delete, etc. t.Errorf("expected no transaction in response header") } if rows := sr.Rows; len(rows) != i+1 { t.Fatalf("expected %d rows, but got %d", i+1, len(rows)) } } del := &roachpb.DeleteRangeRequest{ Span: roachpb.Span{ Key: writes[0], EndKey: roachpb.Key(writes[len(writes)-1]).Next(), }, ReturnKeys: true, } reply, err := client.SendWrappedWith(context.Background(), tds, roachpb.Header{Timestamp: delTS}, del) if err != nil { t.Fatal(err) } dr := reply.(*roachpb.DeleteRangeResponse) if dr.Txn != nil { t.Errorf("expected no transaction in response header") } if !reflect.DeepEqual(dr.Keys, writes) { t.Errorf("expected %d keys to be deleted, but got %d instead", writes, dr.Keys) } scan := roachpb.NewScan(writes[0], writes[len(writes)-1].Next()) txn := &roachpb.Transaction{Name: "MyTxn"} reply, err = client.SendWrappedWith(context.Background(), tds, roachpb.Header{Txn: txn}, scan) if err != nil { t.Fatal(err) } sr := reply.(*roachpb.ScanResponse) if txn := sr.Txn; txn == nil || txn.Name != "MyTxn" { t.Errorf("wanted Txn to persist, but it changed to %v", txn) } if rows := sr.Rows; len(rows) > 0 { t.Fatalf("scan after delete returned rows: %v", rows) } }