// TestMultiRangeScanReverseScanInconsistent verifies that a Scan/ReverseScan // across ranges that doesn't require read consistency will set a timestamp // using the clock local to the distributed sender. func TestMultiRangeScanReverseScanInconsistent(t *testing.T) { defer leaktest.AfterTest(t) s, db := setupMultipleRanges(t, "b") defer s.Stop() // Write keys "a" and "b", the latter of which is the first key in the // second range. keys := []string{"a", "b"} ts := []time.Time{} b := &client.Batch{} for _, key := range keys { b.Put(key, "value") } if err := db.Run(b); err != nil { t.Fatal(err) } for i := range keys { ts = append(ts, b.Results[i].Rows[0].Timestamp()) log.Infof("%d: %s", i, b.Results[i].Rows[0].Timestamp()) } // Do an inconsistent Scan/ReverseScan from a new DistSender and verify // it does the read at its local clock and doesn't receive an // OpRequiresTxnError. We set the local clock to the timestamp of // the first key to verify it's used to read only key "a". manual := hlc.NewManualClock(ts[1].UnixNano() - 1) clock := hlc.NewClock(manual.UnixNano) ds := kv.NewDistSender(&kv.DistSenderContext{Clock: clock}, s.Gossip()) // Scan. sa := proto.NewScan(proto.Key("a"), proto.Key("c"), 0).(*proto.ScanRequest) sa.ReadConsistency = proto.INCONSISTENT reply, err := batchutil.SendWrapped(ds, sa) if err != nil { t.Fatal(err) } sr := reply.(*proto.ScanResponse) if l := len(sr.Rows); l != 1 { t.Fatalf("expected 1 row; got %d", l) } if key := string(sr.Rows[0].Key); keys[0] != key { t.Errorf("expected key %q; got %q", keys[0], key) } // ReverseScan. rsa := proto.NewReverseScan(proto.Key("a"), proto.Key("c"), 0).(*proto.ReverseScanRequest) rsa.ReadConsistency = proto.INCONSISTENT reply, err = batchutil.SendWrapped(ds, rsa) if err != nil { t.Fatal(err) } rsr := reply.(*proto.ReverseScanResponse) if l := len(rsr.Rows); l != 1 { t.Fatalf("expected 1 row; got %d", l) } if key := string(rsr.Rows[0].Key); keys[0] != key { t.Errorf("expected key %q; got %q", keys[0], key) } }
// TestSendRPCRetry verifies that sendRPC failed on first address but succeed on // second address, the second reply should be successfully returned back. func TestSendRPCRetry(t *testing.T) { defer leaktest.AfterTest(t) g, s := makeTestGossip(t) defer s() if err := g.SetNodeDescriptor(&proto.NodeDescriptor{NodeID: 1}); err != nil { t.Fatal(err) } // Fill RangeDescriptor with 2 replicas var descriptor = proto.RangeDescriptor{ RangeID: 1, StartKey: proto.Key("a"), EndKey: proto.Key("z"), } for i := 1; i <= 2; i++ { addr := util.MakeUnresolvedAddr("tcp", fmt.Sprintf("node%d", i)) nd := &proto.NodeDescriptor{ NodeID: proto.NodeID(i), Address: util.MakeUnresolvedAddr(addr.Network(), addr.String()), } if err := g.AddInfoProto(gossip.MakeNodeIDKey(proto.NodeID(i)), nd, time.Hour); err != nil { t.Fatal(err) } descriptor.Replicas = append(descriptor.Replicas, proto.Replica{ NodeID: proto.NodeID(i), StoreID: proto.StoreID(i), }) } // Define our rpcSend stub which returns success on the second address. var testFn rpcSendFn = func(_ rpc.Options, method string, addrs []net.Addr, getArgs func(addr net.Addr) gogoproto.Message, getReply func() gogoproto.Message, _ *rpc.Context) ([]gogoproto.Message, error) { if method == "Node.Batch" { // reply from first address failed _ = getReply() // reply from second address succeed batchReply := getReply().(*proto.BatchResponse) reply := &proto.ScanResponse{} batchReply.Add(reply) reply.Rows = append([]proto.KeyValue{}, proto.KeyValue{Key: proto.Key("b"), Value: proto.Value{}}) return []gogoproto.Message{batchReply}, nil } return nil, util.Errorf("unexpected method %v", method) } ctx := &DistSenderContext{ RPCSend: testFn, RangeDescriptorDB: mockRangeDescriptorDB(func(_ proto.Key, _ lookupOptions) ([]proto.RangeDescriptor, error) { return []proto.RangeDescriptor{descriptor}, nil }), } ds := NewDistSender(ctx, g) scan := proto.NewScan(proto.Key("a"), proto.Key("d"), 1) sr, err := batchutil.SendWrapped(ds, scan) if err != nil { t.Fatal(err) } if l := len(sr.(*proto.ScanResponse).Rows); l != 1 { t.Fatalf("expected 1 row; got %d", l) } }
// TestMultiRangeScanWithMaxResults tests that commands which access multiple // ranges with MaxResults parameter are carried out properly. func TestMultiRangeScanWithMaxResults(t *testing.T) { defer leaktest.AfterTest(t) testCases := []struct { splitKeys []proto.Key keys []proto.Key }{ {[]proto.Key{proto.Key("m")}, []proto.Key{proto.Key("a"), proto.Key("z")}}, {[]proto.Key{proto.Key("h"), proto.Key("q")}, []proto.Key{proto.Key("b"), proto.Key("f"), proto.Key("k"), proto.Key("r"), proto.Key("w"), proto.Key("y")}}, } for i, tc := range testCases { s := StartTestServer(t) ds := kv.NewDistSender(&kv.DistSenderContext{Clock: s.Clock()}, s.Gossip()) tds := kv.NewTxnCoordSender(ds, s.Clock(), testContext.Linearizable, nil, s.stopper) for _, sk := range tc.splitKeys { if err := s.node.ctx.DB.AdminSplit(sk); err != nil { t.Fatal(err) } } var reply proto.Response for _, k := range tc.keys { put := proto.NewPut(k, proto.Value{Bytes: k}) var err error reply, err = batchutil.SendWrapped(tds, put) if err != nil { t.Fatal(err) } } // Try every possible ScanRequest startKey. for start := 0; start < len(tc.keys); start++ { // Try every possible maxResults, from 1 to beyond the size of key array. for maxResults := 1; maxResults <= len(tc.keys)-start+1; maxResults++ { scan := proto.NewScan(tc.keys[start], tc.keys[len(tc.keys)-1].Next(), int64(maxResults)) scan.Header().Timestamp = reply.Header().Timestamp reply, err := batchutil.SendWrapped(tds, scan) if err != nil { t.Fatal(err) } rows := reply.(*proto.ScanResponse).Rows if start+maxResults <= len(tc.keys) && len(rows) != maxResults { t.Errorf("%d: start=%s: expected %d rows, but got %d", i, tc.keys[start], maxResults, len(rows)) } else if start+maxResults == len(tc.keys)+1 && len(rows) != maxResults-1 { t.Errorf("%d: expected %d rows, but got %d", i, maxResults-1, len(rows)) } } } defer s.Stop() } }
// TestRetryOnWrongReplicaError sets up a DistSender on a minimal gossip // network and a mock of rpc.Send, and verifies that the DistSender correctly // retries upon encountering a stale entry in its range descriptor cache. func TestRetryOnWrongReplicaError(t *testing.T) { defer leaktest.AfterTest(t) g, s := makeTestGossip(t) defer s() // Updated below, after it has first been returned. badStartKey := proto.Key("m") newRangeDescriptor := testRangeDescriptor goodStartKey := newRangeDescriptor.StartKey newRangeDescriptor.StartKey = badStartKey descStale := true var testFn rpcSendFn = func(_ rpc.Options, method string, addrs []net.Addr, getArgs func(addr net.Addr) gogoproto.Message, getReply func() gogoproto.Message, _ *rpc.Context) ([]gogoproto.Message, error) { ba := getArgs(testAddress).(*proto.BatchRequest) if _, ok := ba.GetArg(proto.RangeLookup); ok { if !descStale && bytes.HasPrefix(ba.Key, keys.Meta2Prefix) { t.Errorf("unexpected extra lookup for non-stale replica descriptor at %s", ba.Key) } br := getReply().(*proto.BatchResponse) r := &proto.RangeLookupResponse{} r.Ranges = append(r.Ranges, newRangeDescriptor) br.Add(r) // If we just returned the stale descriptor, set up returning the // good one next time. if bytes.HasPrefix(ba.Key, keys.Meta2Prefix) { if newRangeDescriptor.StartKey.Equal(badStartKey) { newRangeDescriptor.StartKey = goodStartKey } else { descStale = false } } return []gogoproto.Message{br}, nil } // When the Scan first turns up, update the descriptor for future // range descriptor lookups. if !newRangeDescriptor.StartKey.Equal(goodStartKey) { return nil, &proto.RangeKeyMismatchError{RequestStartKey: ba.Key, RequestEndKey: ba.EndKey} } return []gogoproto.Message{ba.CreateReply().(*proto.BatchResponse)}, nil } ctx := &DistSenderContext{ RPCSend: testFn, } ds := NewDistSender(ctx, g) scan := proto.NewScan(proto.Key("a"), proto.Key("d"), 0) if _, err := batchutil.SendWrapped(ds, scan); err != nil { t.Errorf("scan encountered error: %s", err) } }
func (b *Batch) scan(s, e interface{}, maxRows int64, isReverse bool) { begin, err := marshalKey(s) if err != nil { b.initResult(0, 0, err) return } end, err := marshalKey(e) if err != nil { b.initResult(0, 0, err) return } if !isReverse { b.reqs = append(b.reqs, proto.NewScan(proto.Key(begin), proto.Key(end), maxRows)) } else { b.reqs = append(b.reqs, proto.NewReverseScan(proto.Key(begin), proto.Key(end), maxRows)) } b.initResult(1, 0, nil) }
// TestMultiRangeMergeStaleDescriptor simulates the situation in which the // DistSender executes a multi-range scan which encounters the stale descriptor // of a range which has since incorporated its right neighbor by means of a // merge. It is verified that the DistSender scans the correct keyrange exactly // once. func TestMultiRangeMergeStaleDescriptor(t *testing.T) { defer leaktest.AfterTest(t) g, s := makeTestGossip(t) defer s() // Assume we have two ranges, [a-b) and [b-KeyMax). merged := false // The stale first range descriptor which is unaware of the merge. var firstRange = proto.RangeDescriptor{ RangeID: 1, StartKey: proto.Key("a"), EndKey: proto.Key("b"), Replicas: []proto.Replica{ { NodeID: 1, StoreID: 1, }, }, } // The merged descriptor, which will be looked up after having processed // the stale range [a,b). var mergedRange = proto.RangeDescriptor{ RangeID: 1, StartKey: proto.Key("a"), EndKey: proto.KeyMax, Replicas: []proto.Replica{ { NodeID: 1, StoreID: 1, }, }, } // Assume we have two key-value pairs, a=1 and c=2. existingKVs := []proto.KeyValue{ {Key: proto.Key("a"), Value: proto.Value{Bytes: []byte("1")}}, {Key: proto.Key("c"), Value: proto.Value{Bytes: []byte("2")}}, } var testFn rpcSendFn = func(_ rpc.Options, method string, addrs []net.Addr, getArgs func(addr net.Addr) gogoproto.Message, getReply func() gogoproto.Message, _ *rpc.Context) ([]gogoproto.Message, error) { if method != "Node.Batch" { t.Fatalf("unexpected method:%s", method) } header := getArgs(testAddress).(proto.Request).Header() batchReply := getReply().(*proto.BatchResponse) reply := &proto.ScanResponse{} batchReply.Add(reply) results := []proto.KeyValue{} for _, curKV := range existingKVs { if header.Key.Less(curKV.Key.Next()) && curKV.Key.Less(header.EndKey) { results = append(results, curKV) } } reply.Rows = results return []gogoproto.Message{batchReply}, nil } ctx := &DistSenderContext{ RPCSend: testFn, RangeDescriptorDB: mockRangeDescriptorDB(func(key proto.Key, _ lookupOptions) ([]proto.RangeDescriptor, error) { if !merged { // Assume a range merge operation happened. merged = true return []proto.RangeDescriptor{firstRange}, nil } return []proto.RangeDescriptor{mergedRange}, nil }), } ds := NewDistSender(ctx, g) scan := proto.NewScan(proto.Key("a"), proto.Key("d"), 10).(*proto.ScanRequest) // Set the Txn info to avoid an OpRequiresTxnError. scan.Txn = &proto.Transaction{} reply, err := batchutil.SendWrapped(ds, scan) if err != nil { t.Fatalf("scan encountered error: %s", err) } sr := reply.(*proto.ScanResponse) if !reflect.DeepEqual(existingKVs, sr.Rows) { t.Fatalf("expect get %v, actual get %v", existingKVs, sr.Rows) } }
// TestMultiRangeScanDeleteRange tests that commands which access multiple // ranges are carried out properly. func TestMultiRangeScanDeleteRange(t *testing.T) { defer leaktest.AfterTest(t) s := StartTestServer(t) defer s.Stop() ds := kv.NewDistSender(&kv.DistSenderContext{Clock: s.Clock()}, s.Gossip()) tds := kv.NewTxnCoordSender(ds, s.Clock(), testContext.Linearizable, nil, s.stopper) if err := s.node.ctx.DB.AdminSplit("m"); err != nil { t.Fatal(err) } writes := []proto.Key{proto.Key("a"), proto.Key("z")} get := &proto.GetRequest{ RequestHeader: proto.RequestHeader{Key: writes[0]}, } get.EndKey = writes[len(writes)-1] if _, err := batchutil.SendWrapped(tds, get); err == nil { t.Errorf("able to call Get with a key range: %v", get) } var delTS proto.Timestamp for i, k := range writes { put := proto.NewPut(k, proto.Value{Bytes: k}) reply, err := batchutil.SendWrapped(tds, put) if err != nil { t.Fatal(err) } scan := proto.NewScan(writes[0], writes[len(writes)-1].Next(), 0).(*proto.ScanRequest) // The Put ts may have been pushed by tsCache, // so make sure we see their values in our Scan. delTS = reply.(*proto.PutResponse).Timestamp scan.Timestamp = delTS reply, err = batchutil.SendWrapped(tds, scan) if err != nil { t.Fatal(err) } sr := reply.(*proto.ScanResponse) if sr.Txn != nil { // This was the other way around at some point in the past. // Same below for Delete, etc. t.Errorf("expected no transaction in response header") } if rows := sr.Rows; len(rows) != i+1 { t.Fatalf("expected %d rows, but got %d", i+1, len(rows)) } } del := &proto.DeleteRangeRequest{ RequestHeader: proto.RequestHeader{ Key: writes[0], EndKey: proto.Key(writes[len(writes)-1]).Next(), Timestamp: delTS, }, } reply, err := batchutil.SendWrapped(tds, del) if err != nil { t.Fatal(err) } dr := reply.(*proto.DeleteRangeResponse) if dr.Txn != nil { t.Errorf("expected no transaction in response header") } if n := dr.NumDeleted; n != int64(len(writes)) { t.Errorf("expected %d keys to be deleted, but got %d instead", len(writes), n) } scan := proto.NewScan(writes[0], writes[len(writes)-1].Next(), 0).(*proto.ScanRequest) scan.Timestamp = dr.Timestamp scan.Txn = &proto.Transaction{Name: "MyTxn"} reply, err = batchutil.SendWrapped(tds, scan) if err != nil { t.Fatal(err) } sr := reply.(*proto.ScanResponse) if txn := sr.Txn; txn == nil || txn.Name != "MyTxn" { t.Errorf("wanted Txn to persist, but it changed to %v", txn) } if rows := sr.Rows; len(rows) > 0 { t.Fatalf("scan after delete returned rows: %v", rows) } }