// TestRejectFutureCommand verifies that leaders reject commands that // would cause a large time jump. func TestRejectFutureCommand(t *testing.T) { defer leaktest.AfterTest(t)() const maxOffset = 100 * time.Millisecond manual := hlc.NewManualClock(0) clock := hlc.NewClock(manual.UnixNano) clock.SetMaxOffset(maxOffset) mtc := multiTestContext{ clock: clock, } mtc.Start(t, 1) defer mtc.Stop() // First do a write. The first write will advance the clock by MaxOffset // because of the read cache's low water mark. getArgs := putArgs([]byte("b"), []byte("b")) if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &getArgs); err != nil { t.Fatal(err) } if now := clock.Now(); now.WallTime != int64(maxOffset) { t.Fatalf("expected clock to advance to 100ms; got %s", now) } // The logical clock has advanced past the physical clock; increment // the "physical" clock to catch up. manual.Increment(int64(maxOffset)) startTime := manual.UnixNano() // Commands with a future timestamp that is within the MaxOffset // bound will be accepted and will cause the clock to advance. for i := int64(0); i < 3; i++ { incArgs := incrementArgs([]byte("a"), 5) ts := roachpb.ZeroTimestamp.Add(startTime+((i+1)*30)*int64(time.Millisecond), 0) if _, err := client.SendWrappedWith(rg1(mtc.stores[0]), nil, roachpb.Header{Timestamp: ts}, &incArgs); err != nil { t.Fatal(err) } } if now := clock.Now(); now.WallTime != int64(190*time.Millisecond) { t.Fatalf("expected clock to advance to 190ms; got %s", now) } // Once the accumulated offset reaches MaxOffset, commands will be rejected. incArgs := incrementArgs([]byte("a"), 11) ts := roachpb.ZeroTimestamp.Add(int64((time.Duration(startTime)+maxOffset+1)*time.Millisecond), 0) if _, err := client.SendWrappedWith(rg1(mtc.stores[0]), nil, roachpb.Header{Timestamp: ts}, &incArgs); err == nil { t.Fatalf("expected clock offset error but got nil") } // The clock remained at 190ms and the final command was not executed. if now := clock.Now(); now.WallTime != int64(190*time.Millisecond) { t.Errorf("expected clock to advance to 190ms; got %s", now) } val, _, err := engine.MVCCGet(mtc.engines[0], roachpb.Key("a"), clock.Now(), true, nil) if err != nil { t.Fatal(err) } if v := mustGetInt(val); v != 15 { t.Errorf("expected 15, got %v", v) } }
// TestMultiRangeScanReverseScanInconsistent verifies that a Scan/ReverseScan // across ranges that doesn't require read consistency will set a timestamp // using the clock local to the distributed sender. func TestMultiRangeScanReverseScanInconsistent(t *testing.T) { defer leaktest.AfterTest(t) s, db := setupMultipleRanges(t, "b") defer s.Stop() // Write keys "a" and "b", the latter of which is the first key in the // second range. keys := []string{"a", "b"} ts := []time.Time{} for i, key := range keys { b := &client.Batch{} b.Put(key, "value") if err := db.Run(b); err != nil { t.Fatal(err) } ts = append(ts, b.Results[0].Rows[0].Timestamp()) log.Infof("%d: %s", i, b.Results[0].Rows[0].Timestamp()) } // Do an inconsistent Scan/ReverseScan from a new DistSender and verify // it does the read at its local clock and doesn't receive an // OpRequiresTxnError. We set the local clock to the timestamp of // the first key to verify it's used to read only key "a". manual := hlc.NewManualClock(ts[1].UnixNano() - 1) clock := hlc.NewClock(manual.UnixNano) ds := kv.NewDistSender(&kv.DistSenderContext{Clock: clock}, s.Gossip()) // Scan. sa := roachpb.NewScan(roachpb.Key("a"), roachpb.Key("c"), 0).(*roachpb.ScanRequest) reply, err := client.SendWrappedWith(ds, nil, roachpb.BatchRequest_Header{ ReadConsistency: roachpb.INCONSISTENT, }, sa) if err != nil { t.Fatal(err) } sr := reply.(*roachpb.ScanResponse) if l := len(sr.Rows); l != 1 { t.Fatalf("expected 1 row; got %d", l) } if key := string(sr.Rows[0].Key); keys[0] != key { t.Errorf("expected key %q; got %q", keys[0], key) } // ReverseScan. rsa := roachpb.NewReverseScan(roachpb.Key("a"), roachpb.Key("c"), 0).(*roachpb.ReverseScanRequest) reply, err = client.SendWrappedWith(ds, nil, roachpb.BatchRequest_Header{ ReadConsistency: roachpb.INCONSISTENT, }, rsa) if err != nil { t.Fatal(err) } rsr := reply.(*roachpb.ReverseScanResponse) if l := len(rsr.Rows); l != 1 { t.Fatalf("expected 1 row; got %d", l) } if key := string(rsr.Rows[0].Key); keys[0] != key { t.Errorf("expected key %q; got %q", keys[0], key) } }
// TestReplicateAfterSplit verifies that a new replica whose start key // is not KeyMin replicating to a fresh store can apply snapshots correctly. func TestReplicateAfterSplit(t *testing.T) { defer leaktest.AfterTest(t) mtc := startMultiTestContext(t, 2) defer mtc.Stop() rangeID := roachpb.RangeID(1) splitKey := roachpb.Key("m") key := roachpb.Key("z") store0 := mtc.stores[0] // Make the split splitArgs := adminSplitArgs(roachpb.KeyMin, splitKey) if _, err := client.SendWrapped(rg1(store0), nil, &splitArgs); err != nil { t.Fatal(err) } rangeID2 := store0.LookupReplica(roachpb.RKey(key), nil).RangeID if rangeID2 == rangeID { t.Errorf("got same range id after split") } // Issue an increment for later check. incArgs := incrementArgs(key, 11) if _, err := client.SendWrappedWith(rg1(store0), nil, roachpb.Header{ RangeID: rangeID2, }, &incArgs); err != nil { t.Fatal(err) } // Now add the second replica. mtc.replicateRange(rangeID2, 0, 1) if mtc.stores[1].LookupReplica(roachpb.RKey(key), nil).GetMaxBytes() == 0 { t.Error("Range MaxBytes is not set after snapshot applied") } // Once it catches up, the effects of increment commands can be seen. if err := util.IsTrueWithin(func() bool { getArgs := getArgs(key) // Reading on non-leader replica should use inconsistent read reply, err := client.SendWrappedWith(rg1(mtc.stores[1]), nil, roachpb.Header{ RangeID: rangeID2, ReadConsistency: roachpb.INCONSISTENT, }, &getArgs) if err != nil { return false } getResp := reply.(*roachpb.GetResponse) if log.V(1) { log.Infof("read value %d", mustGetInt(getResp.Value)) } return mustGetInt(getResp.Value) == 11 }, replicaReadTimeout); err != nil { t.Fatal(err) } }
// TestTxnMultipleCoord checks that a coordinator uses the Writing flag to // enforce that only one coordinator can be used for transactional writes. func TestTxnMultipleCoord(t *testing.T) { defer leaktest.AfterTest(t)() s, sender := createTestDB(t) defer s.Stop() testCases := []struct { args roachpb.Request writing bool ok bool }{ {roachpb.NewGet(roachpb.Key("a")), true, false}, {roachpb.NewGet(roachpb.Key("a")), false, true}, {roachpb.NewPut(roachpb.Key("a"), roachpb.Value{}), false, false}, // transactional write before begin {roachpb.NewPut(roachpb.Key("a"), roachpb.Value{}), true, false}, // must have switched coordinators } for i, tc := range testCases { txn := roachpb.NewTransaction("test", roachpb.Key("a"), 1, roachpb.SERIALIZABLE, s.Clock.Now(), s.Clock.MaxOffset().Nanoseconds()) txn.Writing = tc.writing reply, pErr := client.SendWrappedWith(sender, nil, roachpb.Header{ Txn: txn, }, tc.args) if pErr == nil != tc.ok { t.Errorf("%d: %T (writing=%t): success_expected=%t, but got: %v", i, tc.args, tc.writing, tc.ok, pErr) } if pErr != nil { continue } txn = reply.Header().Txn // The transaction should come back rw if it started rw or if we just // wrote. isWrite := roachpb.IsTransactionWrite(tc.args) if (tc.writing || isWrite) != txn.Writing { t.Errorf("%d: unexpected writing state: %s", i, txn) } if !isWrite { continue } // Abort for clean shutdown. if _, pErr := client.SendWrappedWith(sender, nil, roachpb.Header{ Txn: txn, }, &roachpb.EndTransactionRequest{ Commit: false, }); pErr != nil { t.Fatal(pErr) } } }
// TestTxnMultipleCoord checks that a coordinator uses the Writing flag to // enforce that only one coordinator can be used for transactional writes. func TestTxnMultipleCoord(t *testing.T) { defer leaktest.AfterTest(t) s := createTestDB(t) defer s.Stop() for i, tc := range []struct { args roachpb.Request writing bool ok bool }{ {roachpb.NewGet(roachpb.Key("a")), true, true}, {roachpb.NewGet(roachpb.Key("a")), false, true}, {roachpb.NewPut(roachpb.Key("a"), roachpb.Value{}), false, true}, {roachpb.NewPut(roachpb.Key("a"), roachpb.Value{}), true, false}, } { txn := newTxn(s.Clock, roachpb.Key("a")) txn.Writing = tc.writing reply, err := client.SendWrappedWith(s.Sender, nil, roachpb.BatchRequest_Header{ Txn: txn, }, tc.args) if err == nil != tc.ok { t.Errorf("%d: %T (writing=%t): success_expected=%t, but got: %v", i, tc.args, tc.writing, tc.ok, err) } if err != nil { continue } txn = reply.Header().Txn // The transaction should come back rw if it started rw or if we just // wrote. isWrite := roachpb.IsTransactionWrite(tc.args) if (tc.writing || isWrite) != txn.Writing { t.Errorf("%d: unexpected writing state: %s", i, txn) } if !isWrite { continue } // Abort for clean shutdown. if _, err := client.SendWrappedWith(s.Sender, nil, roachpb.BatchRequest_Header{ Txn: txn, }, &roachpb.EndTransactionRequest{ Commit: false, }); err != nil { t.Fatal(err) } } }
// TestReplicateAfterSplit verifies that a new replica whose start key // is not KeyMin replicating to a fresh store can apply snapshots correctly. func TestReplicateAfterSplit(t *testing.T) { defer leaktest.AfterTest(t) mtc := startMultiTestContext(t, 2) defer mtc.Stop() rangeID := roachpb.RangeID(1) splitKey := roachpb.Key("m") key := roachpb.Key("z") store0 := mtc.stores[0] // Make the split splitArgs := adminSplitArgs(roachpb.KeyMin, splitKey) if _, err := client.SendWrapped(rg1(store0), nil, &splitArgs); err != nil { t.Fatal(err) } rangeID2 := store0.LookupReplica(roachpb.RKey(key), nil).RangeID if rangeID2 == rangeID { t.Errorf("got same range id after split") } // Issue an increment for later check. incArgs := incrementArgs(key, 11) if _, err := client.SendWrappedWith(rg1(store0), nil, roachpb.Header{ RangeID: rangeID2, }, &incArgs); err != nil { t.Fatal(err) } // Now add the second replica. mtc.replicateRange(rangeID2, 1) if mtc.stores[1].LookupReplica(roachpb.RKey(key), nil).GetMaxBytes() == 0 { t.Error("Range MaxBytes is not set after snapshot applied") } // Once it catches up, the effects of increment commands can be seen. util.SucceedsWithin(t, replicaReadTimeout, func() error { getArgs := getArgs(key) // Reading on non-leader replica should use inconsistent read if reply, err := client.SendWrappedWith(rg1(mtc.stores[1]), nil, roachpb.Header{ RangeID: rangeID2, ReadConsistency: roachpb.INCONSISTENT, }, &getArgs); err != nil { return util.Errorf("failed to read data: %s", err) } else if e, v := int64(11), mustGetInt(reply.(*roachpb.GetResponse).Value); v != e { return util.Errorf("failed to read correct data: expected %d, got %d", e, v) } return nil }) }
// TestTxnCoordSenderCleanupOnAborted verifies that if a txn receives a // TransactionAbortedError, the coordinator cleans up the transaction. func TestTxnCoordSenderCleanupOnAborted(t *testing.T) { defer leaktest.AfterTest(t) s := createTestDB(t) defer s.Stop() // Create a transaction with intent at "a". key := roachpb.Key("a") txn := newTxn(s.Clock, key) txn.Priority = 1 put, h := createPutRequest(key, []byte("value"), txn) if reply, err := client.SendWrappedWith(s.Sender, nil, h, put); err != nil { t.Fatal(err) } else { txn = reply.Header().Txn } // Push the transaction to abort it. txn2 := newTxn(s.Clock, key) txn2.Priority = 2 pushArgs := &roachpb.PushTxnRequest{ RequestHeader: roachpb.RequestHeader{ Key: txn.Key, }, Now: s.Clock.Now(), PusherTxn: *txn2, PusheeTxn: *txn, PushType: roachpb.ABORT_TXN, } if _, err := client.SendWrapped(s.Sender, nil, pushArgs); err != nil { t.Fatal(err) } // Now end the transaction and verify we've cleanup up, even though // end transaction failed. etArgs := &roachpb.EndTransactionRequest{ Commit: true, } _, err := client.SendWrappedWith(s.Sender, nil, roachpb.BatchRequest_Header{ Txn: txn, }, etArgs) switch err.(type) { case *roachpb.TransactionAbortedError: // Expected default: t.Fatalf("expected transaction aborted error; got %s", err) } verifyCleanup(key, s.Sender, s.Eng, t) }
// TestTxnCoordSenderBeginTransactionMinPriority verifies that when starting // a new transaction, a non-zero priority is treated as a minimum value. func TestTxnCoordSenderBeginTransactionMinPriority(t *testing.T) { defer leaktest.AfterTest(t) s := createTestDB(t) defer s.Stop() defer teardownHeartbeats(s.Sender) reply, err := client.SendWrappedWith(s.Sender, nil, roachpb.BatchRequest_Header{ UserPriority: proto.Int32(-10), // negative user priority is translated into positive priority Txn: &roachpb.Transaction{ Name: "test txn", Isolation: roachpb.SNAPSHOT, Priority: 11, }, }, &roachpb.PutRequest{ RequestHeader: roachpb.RequestHeader{ Key: roachpb.Key("key"), }, }) if err != nil { t.Fatal(err) } if prio := reply.(*roachpb.PutResponse).Txn.Priority; prio != 11 { t.Errorf("expected txn priority 11; got %d", prio) } }
// TestTxnCoordSenderBeginTransaction verifies that a command sent with a // not-nil Txn with empty ID gets a new transaction initialized. func TestTxnCoordSenderBeginTransaction(t *testing.T) { defer leaktest.AfterTest(t) s := createTestDB(t) defer s.Stop() defer teardownHeartbeats(s.Sender) key := roachpb.Key("key") reply, err := client.SendWrappedWith(s.Sender, nil, roachpb.BatchRequest_Header{ UserPriority: proto.Int32(-10), // negative user priority is translated into positive priority Txn: &roachpb.Transaction{ Name: "test txn", Isolation: roachpb.SNAPSHOT, }, }, &roachpb.PutRequest{ RequestHeader: roachpb.RequestHeader{ Key: key, }, }) if err != nil { t.Fatal(err) } pr := reply.(*roachpb.PutResponse) if pr.Txn.Name != "test txn" { t.Errorf("expected txn name to be %q; got %q", "test txn", pr.Txn.Name) } if pr.Txn.Priority != 10 { t.Errorf("expected txn priority 10; got %d", pr.Txn.Priority) } if !bytes.Equal(pr.Txn.Key, key) { t.Errorf("expected txn Key to match %q != %q", key, pr.Txn.Key) } if pr.Txn.Isolation != roachpb.SNAPSHOT { t.Errorf("expected txn isolation to be SNAPSHOT; got %s", pr.Txn.Isolation) } }
// process synchronously invokes admin split for each proposed split key. func (sq *splitQueue) process(now roachpb.Timestamp, rng *Replica, sysCfg config.SystemConfig) error { ctx := rng.context(context.TODO()) // First handle case of splitting due to zone config maps. desc := rng.Desc() splitKeys := sysCfg.ComputeSplitKeys(desc.StartKey, desc.EndKey) if len(splitKeys) > 0 { log.Infof("splitting %s at keys %v", rng, splitKeys) for _, splitKey := range splitKeys { if err := sq.db.AdminSplit(splitKey.AsRawKey()); err != nil { return util.Errorf("unable to split %s at key %q: %s", rng, splitKey, err) } } return nil } // Next handle case of splitting due to size. zone, err := sysCfg.GetZoneConfigForKey(desc.StartKey) if err != nil { return err } // FIXME: why is this implementation not the same as the one above? if float64(rng.stats.GetSize())/float64(zone.RangeMaxBytes) > 1 { log.Infof("splitting %s size=%d max=%d", rng, rng.stats.GetSize(), zone.RangeMaxBytes) if _, pErr := client.SendWrappedWith(rng, ctx, roachpb.Header{ Timestamp: now, }, &roachpb.AdminSplitRequest{ Span: roachpb.Span{Key: desc.StartKey.AsRawKey()}, }); pErr != nil { return pErr.GoError() } } return nil }
// TestLeaderRemoveSelf verifies that a leader can remove itself // without panicking and future access to the range returns a // RangeNotFoundError (not RaftGroupDeletedError, and even before // the ReplicaGCQueue has run). func TestLeaderRemoveSelf(t *testing.T) { defer leaktest.AfterTest(t) mtc := startMultiTestContext(t, 2) defer mtc.Stop() // Disable the replica GC queue. This verifies that the replica is // considered removed even before the gc queue has run, and also // helps avoid a deadlock at shutdown. mtc.stores[0].DisableReplicaGCQueue(true) raftID := roachpb.RangeID(1) mtc.replicateRange(raftID, 1) // Remove the replica from first store. mtc.unreplicateRange(raftID, 0) getArgs := getArgs([]byte("a")) // Force the read command request a new lease. clock := mtc.clocks[0] header := roachpb.Header{} header.Timestamp = clock.Update(clock.Now().Add(int64(storage.DefaultLeaderLeaseDuration), 0)) // Expect get a RangeNotFoundError. _, pErr := client.SendWrappedWith(rg1(mtc.stores[0]), nil, header, &getArgs) if _, ok := pErr.GoError().(*roachpb.RangeNotFoundError); !ok { t.Fatalf("expect get RangeNotFoundError, actual get %v ", pErr) } }
// TestTxnCoordSenderGC verifies that the coordinator cleans up extant // transactions after the lastUpdateNanos exceeds the timeout. func TestTxnCoordSenderGC(t *testing.T) { defer leaktest.AfterTest(t) s := createTestDB(t) defer s.Stop() // Set heartbeat interval to 1ms for testing. s.Sender.heartbeatInterval = 1 * time.Millisecond txn := newTxn(s.Clock, roachpb.Key("a")) put, h := createPutRequest(roachpb.Key("a"), []byte("value"), txn) if _, err := client.SendWrappedWith(s.Sender, nil, h, put); err != nil { t.Fatal(err) } // Now, advance clock past the default client timeout. // Locking the TxnCoordSender to prevent a data race. s.Sender.Lock() s.Manual.Set(defaultClientTimeout.Nanoseconds() + 1) s.Sender.Unlock() if err := util.IsTrueWithin(func() bool { // Locking the TxnCoordSender to prevent a data race. s.Sender.Lock() _, ok := s.Sender.txns[string(txn.ID)] s.Sender.Unlock() return !ok }, 50*time.Millisecond); err != nil { t.Error("expected garbage collection") } }
// fillRange writes keys with the given prefix and associated values // until bytes bytes have been written or the given range has split. func fillRange(store *storage.Store, rangeID roachpb.RangeID, prefix roachpb.Key, bytes int64, t *testing.T) { src := rand.New(rand.NewSource(0)) for { var ms engine.MVCCStats if err := engine.MVCCGetRangeStats(store.Engine(), rangeID, &ms); err != nil { t.Fatal(err) } keyBytes, valBytes := ms.KeyBytes, ms.ValBytes if keyBytes+valBytes >= bytes { return } key := append(append([]byte(nil), prefix...), randutil.RandBytes(src, 100)...) key = keys.MakeNonColumnKey(key) val := randutil.RandBytes(src, int(src.Int31n(1<<8))) pArgs := putArgs(key, val) _, err := client.SendWrappedWith(store, nil, roachpb.Header{ RangeID: rangeID, }, &pArgs) // When the split occurs in the background, our writes may start failing. // We know we can stop writing when this happens. if _, ok := err.(*roachpb.RangeKeyMismatchError); ok { return } else if err != nil { t.Fatal(err) } } }
// TestReplicateRange verifies basic replication functionality by creating two stores // and a range, replicating the range to the second store, and reading its data there. func TestReplicateRange(t *testing.T) { defer leaktest.AfterTest(t) mtc := startMultiTestContext(t, 2) defer mtc.Stop() // Issue a command on the first node before replicating. incArgs := incrementArgs([]byte("a"), 5) if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil { t.Fatal(err) } rng, err := mtc.stores[0].GetReplica(1) if err != nil { t.Fatal(err) } if err := rng.ChangeReplicas(roachpb.ADD_REPLICA, roachpb.ReplicaDescriptor{ NodeID: mtc.stores[1].Ident.NodeID, StoreID: mtc.stores[1].Ident.StoreID, }, rng.Desc()); err != nil { t.Fatal(err) } // Verify no intent remains on range descriptor key. key := keys.RangeDescriptorKey(rng.Desc().StartKey) desc := roachpb.RangeDescriptor{} if ok, err := engine.MVCCGetProto(mtc.stores[0].Engine(), key, mtc.stores[0].Clock().Now(), true, nil, &desc); !ok || err != nil { t.Fatalf("fetching range descriptor yielded %t, %s", ok, err) } // Verify that in time, no intents remain on meta addressing // keys, and that range descriptor on the meta records is correct. util.SucceedsWithin(t, 1*time.Second, func() error { meta2 := keys.Addr(keys.RangeMetaKey(roachpb.RKeyMax)) meta1 := keys.Addr(keys.RangeMetaKey(meta2)) for _, key := range []roachpb.RKey{meta2, meta1} { metaDesc := roachpb.RangeDescriptor{} if ok, err := engine.MVCCGetProto(mtc.stores[0].Engine(), key.AsRawKey(), mtc.stores[0].Clock().Now(), true, nil, &metaDesc); !ok || err != nil { return util.Errorf("failed to resolve %s", key.AsRawKey()) } if !reflect.DeepEqual(metaDesc, desc) { return util.Errorf("descs not equal: %+v != %+v", metaDesc, desc) } } return nil }) // Verify that the same data is available on the replica. util.SucceedsWithin(t, replicaReadTimeout, func() error { getArgs := getArgs([]byte("a")) if reply, err := client.SendWrappedWith(rg1(mtc.stores[1]), nil, roachpb.Header{ ReadConsistency: roachpb.INCONSISTENT, }, &getArgs); err != nil { return util.Errorf("failed to read data: %s", err) } else if e, v := int64(5), mustGetInt(reply.(*roachpb.GetResponse).Value); v != e { return util.Errorf("failed to read correct data: expected %d, got %d", e, v) } return nil }) }
// TestTxnCoordSenderKeyRanges verifies that multiple requests to same or // overlapping key ranges causes the coordinator to keep track only of // the minimum number of ranges. func TestTxnCoordSenderKeyRanges(t *testing.T) { defer leaktest.AfterTest(t) ranges := []struct { start, end roachpb.Key }{ {roachpb.Key("a"), roachpb.Key(nil)}, {roachpb.Key("a"), roachpb.Key(nil)}, {roachpb.Key("aa"), roachpb.Key(nil)}, {roachpb.Key("b"), roachpb.Key(nil)}, {roachpb.Key("aa"), roachpb.Key("c")}, {roachpb.Key("b"), roachpb.Key("c")}, } s := createTestDB(t) defer s.Stop() defer teardownHeartbeats(s.Sender) txn := newTxn(s.Clock, roachpb.Key("a")) for _, rng := range ranges { if rng.end != nil { delRangeReq, h := createDeleteRangeRequest(rng.start, rng.end, txn) if _, err := client.SendWrappedWith(s.Sender, nil, h, delRangeReq); err != nil { t.Fatal(err) } } else { putReq, h := createPutRequest(rng.start, []byte("value"), txn) if _, err := client.SendWrappedWith(s.Sender, nil, h, putReq); err != nil { t.Fatal(err) } } txn.Writing = true // required for all but first req } // Verify that the transaction metadata contains only two entries // in its "keys" interval cache. "a" and range "aa"-"c". txnMeta, ok := s.Sender.txns[string(txn.ID)] if !ok { t.Fatalf("expected a transaction to be created on coordinator") } if txnMeta.keys.Len() != 2 { t.Errorf("expected 2 entries in keys interval cache; got %v", txnMeta.keys) } }
// TestTxnCoordSenderEndTxn verifies that ending a transaction // sends resolve write intent requests and removes the transaction // from the txns map. func TestTxnCoordSenderEndTxn(t *testing.T) { defer leaktest.AfterTest(t) s := createTestDB(t) defer s.Stop() txn := newTxn(s.Clock, roachpb.Key("a")) key := roachpb.Key("a") put, h := createPutRequest(key, []byte("value"), txn) reply, err := client.SendWrappedWith(s.Sender, nil, h, put) if err != nil { t.Fatal(err) } pReply := reply.(*roachpb.PutResponse) if _, err := client.SendWrappedWith(s.Sender, nil, roachpb.BatchRequest_Header{ Txn: pReply.Header().Txn, }, &roachpb.EndTransactionRequest{Commit: true}); err != nil { t.Fatal(err) } verifyCleanup(key, s.Sender, s.Eng, t) }
// TestRangeCommandClockUpdate verifies that followers update their // clocks when executing a command, even if the leader's clock is far // in the future. func TestRangeCommandClockUpdate(t *testing.T) { defer leaktest.AfterTest(t)() const numNodes = 3 var manuals []*hlc.ManualClock var clocks []*hlc.Clock for i := 0; i < numNodes; i++ { manuals = append(manuals, hlc.NewManualClock(1)) clocks = append(clocks, hlc.NewClock(manuals[i].UnixNano)) clocks[i].SetMaxOffset(100 * time.Millisecond) } mtc := multiTestContext{ clocks: clocks, } mtc.Start(t, numNodes) defer mtc.Stop() mtc.replicateRange(1, 1, 2) // Advance the leader's clock ahead of the followers (by more than // MaxOffset but less than the leader lease) and execute a command. manuals[0].Increment(int64(500 * time.Millisecond)) incArgs := incrementArgs([]byte("a"), 5) ts := clocks[0].Now() if _, err := client.SendWrappedWith(rg1(mtc.stores[0]), nil, roachpb.Header{Timestamp: ts}, &incArgs); err != nil { t.Fatal(err) } // Wait for that command to execute on all the followers. util.SucceedsSoon(t, func() error { values := []int64{} for _, eng := range mtc.engines { val, _, err := engine.MVCCGet(eng, roachpb.Key("a"), clocks[0].Now(), true, nil) if err != nil { return err } values = append(values, mustGetInt(val)) } if !reflect.DeepEqual(values, []int64{5, 5, 5}) { return util.Errorf("expected (5, 5, 5), got %v", values) } return nil }) // Verify that all the followers have accepted the clock update from // node 0 even though it comes from outside the usual max offset. now := clocks[0].Now() for i, clock := range clocks { // Only compare the WallTimes: it's normal for clock 0 to be a few logical ticks ahead. if clock.Now().WallTime < now.WallTime { t.Errorf("clock %d is behind clock 0: %s vs %s", i, clock.Now(), now) } } }
// TestTxnCoordSenderMultipleTxns verifies correct operation with // multiple outstanding transactions. func TestTxnCoordSenderMultipleTxns(t *testing.T) { defer leaktest.AfterTest(t) s := createTestDB(t) defer s.Stop() defer teardownHeartbeats(s.Sender) txn1 := newTxn(s.Clock, roachpb.Key("a")) txn2 := newTxn(s.Clock, roachpb.Key("b")) put1, h := createPutRequest(roachpb.Key("a"), []byte("value"), txn1) if _, err := client.SendWrappedWith(s.Sender, nil, h, put1); err != nil { t.Fatal(err) } put2, h := createPutRequest(roachpb.Key("b"), []byte("value"), txn2) if _, err := client.SendWrappedWith(s.Sender, nil, h, put2); err != nil { t.Fatal(err) } if len(s.Sender.txns) != 2 { t.Errorf("expected length of transactions map to be 2; got %d", len(s.Sender.txns)) } }
// TestTxnCoordSenderAddRequest verifies adding a request creates a // transaction metadata and adding multiple requests with same // transaction ID updates the last update timestamp. func TestTxnCoordSenderAddRequest(t *testing.T) { defer leaktest.AfterTest(t) s := createTestDB(t) defer s.Stop() defer teardownHeartbeats(s.Sender) txn := newTxn(s.Clock, roachpb.Key("a")) put, h := createPutRequest(roachpb.Key("a"), []byte("value"), txn) // Put request will create a new transaction. reply, err := client.SendWrappedWith(s.Sender, nil, h, put) if err != nil { t.Fatal(err) } txnMeta, ok := s.Sender.txns[string(txn.ID)] if !ok { t.Fatal("expected a transaction to be created on coordinator") } if !reply.Header().Txn.Writing { t.Fatal("response Txn is not marked as writing") } ts := atomic.LoadInt64(&txnMeta.lastUpdateNanos) // Advance time and send another put request. Lock the coordinator // to prevent a data race. s.Sender.Lock() s.Manual.Set(1) s.Sender.Unlock() h.Txn.Writing = true if _, err := client.SendWrappedWith(s.Sender, nil, h, put); err != nil { t.Fatal(err) } if len(s.Sender.txns) != 1 { t.Errorf("expected length of transactions map to be 1; got %d", len(s.Sender.txns)) } txnMeta = s.Sender.txns[string(txn.ID)] if lu := atomic.LoadInt64(&txnMeta.lastUpdateNanos); ts >= lu || lu != s.Manual.UnixNano() { t.Errorf("expected last update time to advance; got %d", lu) } }
func TestOwnNodeCertain(t *testing.T) { defer leaktest.AfterTest(t)() g, s := makeTestGossip(t) defer s() const expNodeID = 42 nd := &roachpb.NodeDescriptor{ NodeID: expNodeID, Address: util.MakeUnresolvedAddr("tcp", "foobar:1234"), } g.ResetNodeID(nd.NodeID) if err := g.SetNodeDescriptor(nd); err != nil { t.Fatal(err) } if err := g.AddInfoProto(gossip.MakeNodeIDKey(expNodeID), nd, time.Hour); err != nil { t.Fatal(err) } act := make(map[roachpb.NodeID]roachpb.Timestamp) var testFn rpcSendFn = func(_ SendOptions, _ ReplicaSlice, ba roachpb.BatchRequest, _ *rpc.Context) (*roachpb.BatchResponse, error) { for k, v := range ba.Txn.ObservedTimestamps { act[k] = v } return ba.CreateReply(), nil } ctx := &DistSenderContext{ RPCSend: testFn, RangeDescriptorDB: mockRangeDescriptorDB(func(_ roachpb.RKey, _, _ bool) ([]roachpb.RangeDescriptor, *roachpb.Error) { return []roachpb.RangeDescriptor{testRangeDescriptor}, nil }), } expTS := roachpb.ZeroTimestamp.Add(1, 2) ds := NewDistSender(ctx, g) v := roachpb.MakeValueFromString("value") put := roachpb.NewPut(roachpb.Key("a"), v) if _, err := client.SendWrappedWith(ds, nil, roachpb.Header{ // MaxTimestamp is set very high so that all uncertainty updates have // effect. Txn: &roachpb.Transaction{OrigTimestamp: expTS, MaxTimestamp: roachpb.MaxTimestamp}, }, put); err != nil { t.Fatalf("put encountered error: %s", err) } exp := map[roachpb.NodeID]roachpb.Timestamp{ expNodeID: expTS, } if !reflect.DeepEqual(exp, act) { t.Fatalf("wanted %v, got %v", exp, act) } }
// getTxn fetches the requested key and returns the transaction info. func getTxn(coord *TxnCoordSender, txn *roachpb.Transaction) (bool, *roachpb.Transaction, *roachpb.Error) { hb := &roachpb.HeartbeatTxnRequest{ Span: roachpb.Span{ Key: txn.Key, }, } reply, pErr := client.SendWrappedWith(coord, nil, roachpb.Header{ Txn: txn, }, hb) if pErr != nil { return false, nil, pErr } return true, reply.(*roachpb.HeartbeatTxnResponse).Txn, nil }
func writeRandomDataToRange(t testing.TB, store *storage.Store, rangeID roachpb.RangeID, keyPrefix []byte) { src := rand.New(rand.NewSource(0)) for i := 0; i < 100; i++ { key := append([]byte(nil), keyPrefix...) key = append(key, randutil.RandBytes(src, int(src.Int31n(1<<7)))...) key = keys.MakeNonColumnKey(key) val := randutil.RandBytes(src, int(src.Int31n(1<<8))) pArgs := putArgs(key, val) if _, pErr := client.SendWrappedWith(rg1(store), nil, roachpb.Header{ RangeID: rangeID, }, &pArgs); pErr != nil { t.Fatal(pErr) } } }
func TestStoreMetrics(t *testing.T) { defer leaktest.AfterTest(t)() mtc := startMultiTestContext(t, 3) defer mtc.Stop() store0 := mtc.stores[0] // Perform a split, which has special metrics handling. splitArgs := adminSplitArgs(roachpb.KeyMin, roachpb.Key("m")) if _, err := client.SendWrapped(rg1(store0), nil, &splitArgs); err != nil { t.Fatal(err) } // Verify range count is as expected checkCounter(t, store0, "ranges", 2) // Verify all stats on store0 after split. verifyStats(t, store0) // Replicate the "right" range to the other stores. replica := store0.LookupReplica(roachpb.RKey("z"), nil) mtc.replicateRange(replica.RangeID, 1, 2) // Add some data to the "right" range. incArgs := incrementArgs([]byte("z"), 5) if _, err := client.SendWrappedWith(store0, nil, roachpb.Header{ RangeID: replica.RangeID, }, &incArgs); err != nil { t.Fatal(err) } mtc.waitForValues(roachpb.Key("z"), []int64{5, 5, 5}) // Verify all stats on store0 after addition. verifyStats(t, store0) // Unreplicate range from the first store. mtc.unreplicateRange(replica.RangeID, 0) // Force GC Scan on store 0 in order to fully remove range. mtc.stores[1].ForceReplicaGCScanAndProcess() mtc.waitForValues(roachpb.Key("z"), []int64{0, 5, 5}) // Verify range count is as expected. checkCounter(t, store0, "ranges", 1) // Verify all stats on store0 after range is removed. verifyStats(t, store0) }
func TestOwnNodeCertain(t *testing.T) { defer leaktest.AfterTest(t) g, s := makeTestGossip(t) defer s() const expNodeID = 42 nd := &roachpb.NodeDescriptor{ NodeID: expNodeID, Address: util.MakeUnresolvedAddr("tcp", "foobar:1234"), } g.SetNodeID(nd.NodeID) if err := g.SetNodeDescriptor(nd); err != nil { t.Fatal(err) } if err := g.AddInfoProto(gossip.MakeNodeIDKey(expNodeID), nd, time.Hour); err != nil { t.Fatal(err) } var act roachpb.NodeList var testFn rpcSendFn = func(_ rpc.Options, _ string, _ []net.Addr, getArgs func(addr net.Addr) proto.Message, _ func() proto.Message, _ *rpc.Context) ([]proto.Message, error) { ba := getArgs(nil).(*roachpb.BatchRequest) for _, nodeID := range ba.Txn.CertainNodes.Nodes { act.Add(roachpb.NodeID(nodeID)) } return []proto.Message{ba.CreateReply()}, nil } ctx := &DistSenderContext{ RPCSend: testFn, RangeDescriptorDB: mockRangeDescriptorDB(func(_ roachpb.RKey, _, _ bool) ([]roachpb.RangeDescriptor, *roachpb.Error) { return []roachpb.RangeDescriptor{testRangeDescriptor}, nil }), } ds := NewDistSender(ctx, g) v := roachpb.MakeValueFromString("value") put := roachpb.NewPut(roachpb.Key("a"), v) if _, err := client.SendWrappedWith(ds, nil, roachpb.Header{ Txn: &roachpb.Transaction{}, }, put); err != nil { t.Fatalf("put encountered error: %s", err) } if expNodes := []roachpb.NodeID{expNodeID}; !reflect.DeepEqual(act.Nodes, expNodes) { t.Fatalf("got %v, expected %v", act.Nodes, expNodes) } }
// fillRange writes keys with the given prefix and associated values // until bytes bytes have been written. func fillRange(store *storage.Store, rangeID roachpb.RangeID, prefix roachpb.Key, bytes int64, t *testing.T) { src := rand.New(rand.NewSource(0)) for { var ms engine.MVCCStats if err := engine.MVCCGetRangeStats(store.Engine(), rangeID, &ms); err != nil { t.Fatal(err) } keyBytes, valBytes := ms.KeyBytes, ms.ValBytes if keyBytes+valBytes >= bytes { return } key := append(append([]byte(nil), prefix...), randutil.RandBytes(src, 100)...) val := randutil.RandBytes(src, int(src.Int31n(1<<8))) pArgs := putArgs(key, val) if _, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{ RangeID: rangeID, }, &pArgs); err != nil { t.Fatal(err) } } }
// TestTxnCoordSenderHeartbeat verifies periodic heartbeat of the // transaction record. func TestTxnCoordSenderHeartbeat(t *testing.T) { defer leaktest.AfterTest(t) s := createTestDB(t) defer s.Stop() defer teardownHeartbeats(s.Sender) // Set heartbeat interval to 1ms for testing. s.Sender.heartbeatInterval = 1 * time.Millisecond initialTxn := newTxn(s.Clock, roachpb.Key("a")) put, h := createPutRequest(roachpb.Key("a"), []byte("value"), initialTxn) if reply, err := client.SendWrappedWith(s.Sender, nil, h, put); err != nil { t.Fatal(err) } else { *initialTxn = *reply.Header().Txn } // Verify 3 heartbeats. var heartbeatTS roachpb.Timestamp for i := 0; i < 3; i++ { if err := util.IsTrueWithin(func() bool { ok, txn, err := getTxn(s.Sender, initialTxn) if !ok || err != nil { return false } // Advance clock by 1ns. // Locking the TxnCoordSender to prevent a data race. s.Sender.Lock() s.Manual.Increment(1) s.Sender.Unlock() if heartbeatTS.Less(*txn.LastHeartbeat) { heartbeatTS = *txn.LastHeartbeat return true } return false }, 50*time.Millisecond); err != nil { t.Error("expected initial heartbeat within 50ms") } } }
func BenchmarkReplicaSnapshot(b *testing.B) { defer tracing.Disable()() defer config.TestingDisableTableSplits()() store, stopper, _ := createTestStore(b) // We want to manually control the size of the raft log. store.DisableRaftLogQueue(true) defer stopper.Stop() const rangeID = 1 const keySize = 1 << 7 // 128 B const valSize = 1 << 10 // 1 KiB const snapSize = 1 << 25 // 32 MiB rep, err := store.GetReplica(rangeID) if err != nil { b.Fatal(err) } src := rand.New(rand.NewSource(0)) for i := 0; i < snapSize/(keySize+valSize); i++ { key := keys.MakeNonColumnKey(randutil.RandBytes(src, keySize)) val := randutil.RandBytes(src, valSize) pArgs := putArgs(key, val) if _, pErr := client.SendWrappedWith(rep, nil, roachpb.Header{ RangeID: rangeID, }, &pArgs); pErr != nil { b.Fatal(pErr) } } b.ResetTimer() for i := 0; i < b.N; i++ { if _, err := rep.GetSnapshot(); err != nil { b.Fatal(err) } } }
// TestStoreRangeMergeMetadataCleanup tests that all metadata of a // subsumed range is cleaned up on merge. func TestStoreRangeMergeMetadataCleanup(t *testing.T) { defer leaktest.AfterTest(t) store, stopper := createTestStore(t) defer stopper.Stop() scan := func(f func(roachpb.KeyValue) (bool, error)) { if _, err := engine.MVCCIterate(store.Engine(), roachpb.KeyMin, roachpb.KeyMax, roachpb.ZeroTimestamp, true, nil, false, f); err != nil { t.Fatal(err) } } content := roachpb.Key("testing!") // Write some values left of the proposed split key. pArgs := putArgs([]byte("aaa"), content) if _, err := client.SendWrapped(rg1(store), nil, &pArgs); err != nil { t.Fatal(err) } // Collect all the keys. preKeys := make(map[string]struct{}) scan(func(kv roachpb.KeyValue) (bool, error) { preKeys[string(kv.Key)] = struct{}{} return false, nil }) // Split the range. _, bDesc, err := createSplitRanges(store) if err != nil { t.Fatal(err) } // Write some values right of the split key. pArgs = putArgs([]byte("ccc"), content) if _, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{ RangeID: bDesc.RangeID, }, &pArgs); err != nil { t.Fatal(err) } // Merge the b range back into the a range. args := adminMergeArgs(roachpb.KeyMin) if _, err := client.SendWrapped(rg1(store), nil, &args); err != nil { t.Fatal(err) } // Collect all the keys again. postKeys := make(map[string]struct{}) scan(func(kv roachpb.KeyValue) (bool, error) { postKeys[string(kv.Key)] = struct{}{} return false, nil }) // Compute the new keys. for k := range preKeys { delete(postKeys, k) } // Keep only the subsumed range's local keys. localRangeKeyPrefix := string(keys.MakeRangeIDPrefix(bDesc.RangeID)) for k := range postKeys { if !strings.HasPrefix(k, localRangeKeyPrefix) { delete(postKeys, k) } } if numKeys := len(postKeys); numKeys > 0 { var buf bytes.Buffer fmt.Fprintf(&buf, "%d keys were not cleaned up:\n", numKeys) for k := range postKeys { fmt.Fprintf(&buf, "%q\n", k) } t.Fatal(buf.String()) } }
// TestStoreRangeMergeWithData attempts to merge two collocate ranges // each containing data. func TestStoreRangeMergeWithData(t *testing.T) { defer leaktest.AfterTest(t) content := roachpb.Key("testing!") store, stopper := createTestStore(t) defer stopper.Stop() aDesc, bDesc, err := createSplitRanges(store) if err != nil { t.Fatal(err) } // Write some values left and right of the proposed split key. pArgs := putArgs([]byte("aaa"), content) if _, err := client.SendWrapped(rg1(store), nil, &pArgs); err != nil { t.Fatal(err) } pArgs = putArgs([]byte("ccc"), content) if _, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{ RangeID: bDesc.RangeID, }, &pArgs); err != nil { t.Fatal(err) } // Confirm the values are there. gArgs := getArgs([]byte("aaa")) if reply, err := client.SendWrapped(rg1(store), nil, &gArgs); err != nil { t.Fatal(err) } else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil { t.Fatal(err) } else if !bytes.Equal(replyBytes, content) { t.Fatalf("actual value %q did not match expected value %q", replyBytes, content) } gArgs = getArgs([]byte("ccc")) if reply, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{ RangeID: bDesc.RangeID, }, &gArgs); err != nil { t.Fatal(err) } else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil { t.Fatal(err) } else if !bytes.Equal(replyBytes, content) { t.Fatalf("actual value %q did not match expected value %q", replyBytes, content) } // Merge the b range back into the a range. args := adminMergeArgs(roachpb.KeyMin) if _, err := client.SendWrapped(rg1(store), nil, &args); err != nil { t.Fatal(err) } // Verify no intents remains on range descriptor keys. for _, key := range []roachpb.Key{keys.RangeDescriptorKey(aDesc.StartKey), keys.RangeDescriptorKey(bDesc.StartKey)} { if _, _, err := engine.MVCCGet(store.Engine(), key, store.Clock().Now(), true, nil); err != nil { t.Fatal(err) } } // Verify the merge by looking up keys from both ranges. rangeA := store.LookupReplica([]byte("a"), nil) rangeB := store.LookupReplica([]byte("c"), nil) rangeADesc := rangeA.Desc() rangeBDesc := rangeB.Desc() if !reflect.DeepEqual(rangeA, rangeB) { t.Fatalf("ranges were not merged %+v=%+v", rangeADesc, rangeBDesc) } if !bytes.Equal(rangeADesc.StartKey, roachpb.RKeyMin) { t.Fatalf("The start key is not equal to KeyMin %q=%q", rangeADesc.StartKey, roachpb.RKeyMin) } if !bytes.Equal(rangeADesc.EndKey, roachpb.RKeyMax) { t.Fatalf("The end key is not equal to KeyMax %q=%q", rangeADesc.EndKey, roachpb.RKeyMax) } // Try to get values from after the merge. gArgs = getArgs([]byte("aaa")) if reply, err := client.SendWrapped(rg1(store), nil, &gArgs); err != nil { t.Fatal(err) } else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil { t.Fatal(err) } else if !bytes.Equal(replyBytes, content) { t.Fatalf("actual value %q did not match expected value %q", replyBytes, content) } gArgs = getArgs([]byte("ccc")) if reply, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{ RangeID: rangeB.RangeID, }, &gArgs); err != nil { t.Fatal(err) } else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil { t.Fatal(err) } else if !bytes.Equal(replyBytes, content) { t.Fatalf("actual value %q did not match expected value %q", replyBytes, content) } // Put new values after the merge on both sides. pArgs = putArgs([]byte("aaaa"), content) if _, err := client.SendWrapped(rg1(store), nil, &pArgs); err != nil { t.Fatal(err) } pArgs = putArgs([]byte("cccc"), content) if _, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{ RangeID: rangeB.RangeID, }, &pArgs); err != nil { t.Fatal(err) } // Try to get the newly placed values. gArgs = getArgs([]byte("aaaa")) if reply, err := client.SendWrapped(rg1(store), nil, &gArgs); err != nil { t.Fatal(err) } else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil { t.Fatal(err) } else if !bytes.Equal(replyBytes, content) { t.Fatalf("actual value %q did not match expected value %q", replyBytes, content) } gArgs = getArgs([]byte("cccc")) if reply, err := client.SendWrapped(rg1(store), nil, &gArgs); err != nil { t.Fatal(err) } else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil { t.Fatal(err) } else if !bytes.Equal(replyBytes, content) { t.Fatalf("actual value %q did not match expected value %q", replyBytes, content) } }
// TestMultiRangeMergeStaleDescriptor simulates the situation in which the // DistSender executes a multi-range scan which encounters the stale descriptor // of a range which has since incorporated its right neighbor by means of a // merge. It is verified that the DistSender scans the correct keyrange exactly // once. func TestMultiRangeMergeStaleDescriptor(t *testing.T) { defer leaktest.AfterTest(t) g, s := makeTestGossip(t) defer s() // Assume we have two ranges, [a-b) and [b-KeyMax). merged := false // The stale first range descriptor which is unaware of the merge. var FirstRange = roachpb.RangeDescriptor{ RangeID: 1, StartKey: roachpb.RKey("a"), EndKey: roachpb.RKey("b"), Replicas: []roachpb.ReplicaDescriptor{ { NodeID: 1, StoreID: 1, }, }, } // The merged descriptor, which will be looked up after having processed // the stale range [a,b). var mergedRange = roachpb.RangeDescriptor{ RangeID: 1, StartKey: roachpb.RKey("a"), EndKey: roachpb.RKeyMax, Replicas: []roachpb.ReplicaDescriptor{ { NodeID: 1, StoreID: 1, }, }, } // Assume we have two key-value pairs, a=1 and c=2. existingKVs := []roachpb.KeyValue{ {Key: roachpb.Key("a"), Value: roachpb.MakeValueFromString("1")}, {Key: roachpb.Key("c"), Value: roachpb.MakeValueFromString("2")}, } var testFn rpcSendFn = func(_ rpc.Options, method string, addrs []net.Addr, getArgs func(addr net.Addr) proto.Message, getReply func() proto.Message, _ *rpc.Context) ([]proto.Message, error) { if method != "Node.Batch" { t.Fatalf("unexpected method:%s", method) } ba := getArgs(testAddress).(*roachpb.BatchRequest) rs := keys.Range(*ba) batchReply := getReply().(*roachpb.BatchResponse) reply := &roachpb.ScanResponse{} batchReply.Add(reply) results := []roachpb.KeyValue{} for _, curKV := range existingKVs { if rs.Key.Less(keys.Addr(curKV.Key).Next()) && keys.Addr(curKV.Key).Less(rs.EndKey) { results = append(results, curKV) } } reply.Rows = results return []proto.Message{batchReply}, nil } ctx := &DistSenderContext{ RPCSend: testFn, RangeDescriptorDB: mockRangeDescriptorDB(func(key roachpb.RKey, _, _ bool) ([]roachpb.RangeDescriptor, *roachpb.Error) { if !merged { // Assume a range merge operation happened. merged = true return []roachpb.RangeDescriptor{FirstRange}, nil } return []roachpb.RangeDescriptor{mergedRange}, nil }), } ds := NewDistSender(ctx, g) scan := roachpb.NewScan(roachpb.Key("a"), roachpb.Key("d"), 10).(*roachpb.ScanRequest) // Set the Txn info to avoid an OpRequiresTxnError. reply, err := client.SendWrappedWith(ds, nil, roachpb.Header{ Txn: &roachpb.Transaction{}, }, scan) if err != nil { t.Fatalf("scan encountered error: %s", err) } sr := reply.(*roachpb.ScanResponse) if !reflect.DeepEqual(existingKVs, sr.Rows) { t.Fatalf("expect get %v, actual get %v", existingKVs, sr.Rows) } }