// TestStoreRangeMergeStats starts by splitting a range, then writing random data // to both sides of the split. It then merges the ranges and verifies the merged // range has stats consistent with recomputations. func TestStoreRangeMergeStats(t *testing.T) { defer leaktest.AfterTest(t)() sCtx := storage.TestStoreContext() sCtx.TestingKnobs.DisableSplitQueue = true store, stopper, manual := createTestStoreWithContext(t, sCtx) defer stopper.Stop() // Split the range. aDesc, bDesc, err := createSplitRanges(store) if err != nil { t.Fatal(err) } // Write some values left and right of the proposed split key. writeRandomDataToRange(t, store, aDesc.RangeID, []byte("aaa")) writeRandomDataToRange(t, store, bDesc.RangeID, []byte("ccc")) // Get the range stats for both ranges now that we have data. var msA, msB enginepb.MVCCStats snap := store.Engine().NewSnapshot() defer snap.Close() if err := engine.MVCCGetRangeStats(context.Background(), snap, aDesc.RangeID, &msA); err != nil { t.Fatal(err) } if err := engine.MVCCGetRangeStats(context.Background(), snap, bDesc.RangeID, &msB); err != nil { t.Fatal(err) } // Stats should agree with recomputation. if err := verifyRecomputedStats(snap, aDesc, msA, manual.UnixNano()); err != nil { t.Fatalf("failed to verify range A's stats before split: %v", err) } if err := verifyRecomputedStats(snap, bDesc, msB, manual.UnixNano()); err != nil { t.Fatalf("failed to verify range B's stats before split: %v", err) } manual.Increment(100) // Merge the b range back into the a range. args := adminMergeArgs(roachpb.KeyMin) if _, err := client.SendWrapped(rg1(store), nil, &args); err != nil { t.Fatal(err) } rngMerged := store.LookupReplica(aDesc.StartKey, nil) // Get the range stats for the merged range and verify. snap = store.Engine().NewSnapshot() defer snap.Close() var msMerged enginepb.MVCCStats if err := engine.MVCCGetRangeStats(context.Background(), snap, rngMerged.RangeID, &msMerged); err != nil { t.Fatal(err) } // Merged stats should agree with recomputation. if err := verifyRecomputedStats(snap, rngMerged.Desc(), msMerged, manual.UnixNano()); err != nil { t.Errorf("failed to verify range's stats after merge: %v", err) } }
func TestRangeStatsMerge(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{ bootstrapMode: bootstrapRangeOnly, } tc.Start(t) defer tc.Stop() initialMS := engine.MVCCStats{ LiveBytes: 1, KeyBytes: 2, ValBytes: 2, IntentBytes: 1, LiveCount: 1, KeyCount: 1, ValCount: 1, IntentCount: 1, IntentAge: 1, GCBytesAge: 1, LastUpdateNanos: 1 * 1E9, } ms := initialMS ms.AgeTo(10 * 1E9) if err := tc.rng.stats.MergeMVCCStats(tc.engine, ms); err != nil { t.Fatal(err) } // Expect those stats to be forwarded to 10s and added to an empty stats // object (the latter of which is a noop). Everything will be equal but // the intent and gc bytes age, which will have increased. expMS := ms expMS.AgeTo(10 * 1E9) if err := engine.MVCCGetRangeStats(context.Background(), tc.engine, 1, &initialMS); err != nil { t.Fatal(err) } if !reflect.DeepEqual(ms, expMS) { t.Errorf("expected:\n%+v\ngot:\n%+v\n", expMS, ms) } // Merge again, but with 10 more s and an incoming stat which has been // created at 20s. This needs to age the existing stat and add the new one. ms = initialMS ms.LastUpdateNanos = 20 * 1E9 if err := tc.rng.stats.MergeMVCCStats(tc.engine, ms); err != nil { t.Fatal(err) } expMS.Add(ms) if err := engine.MVCCGetRangeStats(context.Background(), tc.engine, 1, &ms); err != nil { t.Fatal(err) } if !reflect.DeepEqual(ms, expMS) { t.Errorf("expected %+v; got %+v", expMS, ms) } if !reflect.DeepEqual(tc.rng.stats.mvccStats, expMS) { t.Errorf("expected %+v; got %+v", expMS, tc.rng.stats.mvccStats) } }
func loadMVCCStats(reader engine.Reader, rangeID roachpb.RangeID) (enginepb.MVCCStats, error) { var ms enginepb.MVCCStats if err := engine.MVCCGetRangeStats(context.Background(), reader, rangeID, &ms); err != nil { return enginepb.MVCCStats{}, err } return ms, nil }
// newRangeStats creates a new instance of rangeStats using the // provided engine and range. In particular, the values of last update // nanos and intent count are pulled from the engine and cached in the // struct for efficient processing (i.e. each new merge does not // require the values to be read from the engine). func newRangeStats(rangeID roachpb.RangeID, e engine.Reader) (*rangeStats, error) { rs := &rangeStats{rangeID: rangeID} if err := engine.MVCCGetRangeStats(context.Background(), e, rangeID, &rs.mvccStats); err != nil { return nil, err } return rs, nil }
// fillRange writes keys with the given prefix and associated values // until bytes bytes have been written or the given range has split. func fillRange(store *storage.Store, rangeID roachpb.RangeID, prefix roachpb.Key, bytes int64, t *testing.T) { src := rand.New(rand.NewSource(0)) for { var ms engine.MVCCStats if err := engine.MVCCGetRangeStats(store.Engine(), rangeID, &ms); err != nil { t.Fatal(err) } keyBytes, valBytes := ms.KeyBytes, ms.ValBytes if keyBytes+valBytes >= bytes { return } key := append(append([]byte(nil), prefix...), randutil.RandBytes(src, 100)...) key = keys.MakeNonColumnKey(key) val := randutil.RandBytes(src, int(src.Int31n(1<<8))) pArgs := putArgs(key, val) _, err := client.SendWrappedWith(store, nil, roachpb.Header{ RangeID: rangeID, }, &pArgs) // When the split occurs in the background, our writes may start failing. // We know we can stop writing when this happens. if _, ok := err.(*roachpb.RangeKeyMismatchError); ok { return } else if err != nil { t.Fatal(err) } } }
// newRangeStats creates a new instance of rangeStats using the // provided engine and range. In particular, the values of last update // nanos and intent count are pulled from the engine and cached in the // struct for efficient processing (i.e. each new merge does not // require the values to be read from the engine). func newRangeStats(rangeID roachpb.RangeID, e engine.Engine) (*rangeStats, error) { rs := &rangeStats{rangeID: rangeID} if err := engine.MVCCGetRangeStats(e, rangeID, &rs.MVCCStats); err != nil { return nil, err } return rs, nil }
func verifyRangeStats(eng engine.Engine, rangeID roachpb.RangeID, expMS engine.MVCCStats) error { var ms engine.MVCCStats if err := engine.MVCCGetRangeStats(eng, rangeID, &ms); err != nil { return err } // Clear system counts as these are expected to vary. ms.SysBytes, ms.SysCount = 0, 0 if !reflect.DeepEqual(expMS, ms) { return util.Errorf("expected stats %+v; got %+v", expMS, ms) } return nil }
// fillRange writes keys with the given prefix and associated values // until bytes bytes have been written. func fillRange(store *storage.Store, rangeID roachpb.RangeID, prefix roachpb.Key, bytes int64, t *testing.T) { src := rand.New(rand.NewSource(0)) for { var ms engine.MVCCStats if err := engine.MVCCGetRangeStats(store.Engine(), rangeID, &ms); err != nil { t.Fatal(err) } keyBytes, valBytes := ms.KeyBytes, ms.ValBytes if keyBytes+valBytes >= bytes { return } key := append(append([]byte(nil), prefix...), randutil.RandBytes(src, 100)...) val := randutil.RandBytes(src, int(src.Int31n(1<<8))) pArgs := putArgs(key, val, rangeID, store.StoreID()) if _, err := client.SendWrapped(store, nil, &pArgs); err != nil { t.Fatal(err) } } }
// fillRange writes keys with the given prefix and associated values // until bytes bytes have been written. func fillRange(store *storage.Store, rangeID proto.RangeID, prefix proto.Key, bytes int64, t *testing.T) { src := rand.New(rand.NewSource(0)) for { var ms engine.MVCCStats if err := engine.MVCCGetRangeStats(store.Engine(), rangeID, &ms); err != nil { t.Fatal(err) } keyBytes, valBytes := ms.KeyBytes, ms.ValBytes if keyBytes+valBytes >= bytes { return } key := append(append([]byte(nil), prefix...), randutil.RandBytes(src, 100)...) val := randutil.RandBytes(src, int(src.Int31n(1<<8))) pArgs := putArgs(key, val, rangeID, store.StoreID()) pArgs.Timestamp = store.Clock().Now() if _, err := store.ExecuteCmd(context.Background(), &pArgs); err != nil { t.Fatal(err) } } }
// TestStoreRangeSplitStats starts by splitting the system keys from user-space // keys and verifying that the user space side of the split (which is empty), // has all zeros for stats. It then writes random data to the user space side, // splits it halfway and verifies the two splits have stats exactly equaling // the pre-split. func TestStoreRangeSplitStats(t *testing.T) { defer leaktest.AfterTest(t) store, stopper := createTestStore(t) defer stopper.Stop() // Split the range after the last table data key. keyPrefix := keys.MakeTablePrefix(keys.MaxReservedDescID + 1) keyPrefix = keys.MakeNonColumnKey(keyPrefix) args := adminSplitArgs(roachpb.KeyMin, keyPrefix) if _, err := client.SendWrapped(rg1(store), nil, &args); err != nil { t.Fatal(err) } // Verify empty range has empty stats. rng := store.LookupReplica(keyPrefix, nil) // NOTE that this value is expected to change over time, depending on what // we store in the sys-local keyspace. Update it accordingly for this test. if err := verifyRangeStats(store.Engine(), rng.Desc().RangeID, engine.MVCCStats{}); err != nil { t.Fatal(err) } // Write random data. src := rand.New(rand.NewSource(0)) for i := 0; i < 100; i++ { key := append([]byte(nil), keyPrefix...) key = append(key, randutil.RandBytes(src, int(src.Int31n(1<<7)))...) key = keys.MakeNonColumnKey(key) val := randutil.RandBytes(src, int(src.Int31n(1<<8))) pArgs := putArgs(key, val) if _, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{ RangeID: rng.Desc().RangeID, }, &pArgs); err != nil { t.Fatal(err) } } // Get the range stats now that we have data. var ms engine.MVCCStats if err := engine.MVCCGetRangeStats(store.Engine(), rng.Desc().RangeID, &ms); err != nil { t.Fatal(err) } // Split the range at approximate halfway point ("Z" in string "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"). midKey := append([]byte(nil), keyPrefix...) midKey = append(midKey, []byte("Z")...) midKey = keys.MakeNonColumnKey(midKey) args = adminSplitArgs(keyPrefix, midKey) if _, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{ RangeID: rng.Desc().RangeID, }, &args); err != nil { t.Fatal(err) } var msLeft, msRight engine.MVCCStats if err := engine.MVCCGetRangeStats(store.Engine(), rng.Desc().RangeID, &msLeft); err != nil { t.Fatal(err) } rngRight := store.LookupReplica(midKey, nil) if err := engine.MVCCGetRangeStats(store.Engine(), rngRight.Desc().RangeID, &msRight); err != nil { t.Fatal(err) } // The stats should be exactly equal when added. expMS := engine.MVCCStats{ LiveBytes: msLeft.LiveBytes + msRight.LiveBytes, KeyBytes: msLeft.KeyBytes + msRight.KeyBytes, ValBytes: msLeft.ValBytes + msRight.ValBytes, IntentBytes: msLeft.IntentBytes + msRight.IntentBytes, LiveCount: msLeft.LiveCount + msRight.LiveCount, KeyCount: msLeft.KeyCount + msRight.KeyCount, ValCount: msLeft.ValCount + msRight.ValCount, IntentCount: msLeft.IntentCount + msRight.IntentCount, } ms.SysBytes, ms.SysCount = 0, 0 if !reflect.DeepEqual(expMS, ms) { t.Errorf("expected left and right ranges to equal original: %+v + %+v != %+v", msLeft, msRight, ms) } }
// TestStoreRangeSplitStats starts by splitting the system keys from user-space // keys and verifying that the user space side of the split (which is empty), // has all zeros for stats. It then writes random data to the user space side, // splits it halfway and verifies the two splits have stats exactly equaling // the pre-split. func TestStoreRangeSplitStats(t *testing.T) { defer leaktest.AfterTest(t)() sCtx := storage.TestStoreContext() sCtx.TestingKnobs.DisableSplitQueue = true store, stopper, manual := createTestStoreWithContext(t, sCtx) defer stopper.Stop() // Split the range after the last table data key. keyPrefix := keys.MakeTablePrefix(keys.MaxReservedDescID + 1) keyPrefix = keys.MakeRowSentinelKey(keyPrefix) args := adminSplitArgs(roachpb.KeyMin, keyPrefix) if _, pErr := client.SendWrapped(rg1(store), nil, &args); pErr != nil { t.Fatal(pErr) } // Verify empty range has empty stats. rng := store.LookupReplica(keyPrefix, nil) // NOTE that this value is expected to change over time, depending on what // we store in the sys-local keyspace. Update it accordingly for this test. empty := enginepb.MVCCStats{LastUpdateNanos: manual.UnixNano()} if err := verifyRangeStats(store.Engine(), rng.RangeID, empty); err != nil { t.Fatal(err) } // Write random data. midKey := writeRandomDataToRange(t, store, rng.RangeID, keyPrefix) // Get the range stats now that we have data. snap := store.Engine().NewSnapshot() defer snap.Close() var ms enginepb.MVCCStats if err := engine.MVCCGetRangeStats(context.Background(), snap, rng.RangeID, &ms); err != nil { t.Fatal(err) } if err := verifyRecomputedStats(snap, rng.Desc(), ms, manual.UnixNano()); err != nil { t.Fatalf("failed to verify range's stats before split: %v", err) } if inMemMS := rng.GetMVCCStats(); inMemMS != ms { t.Fatalf("in-memory and on-disk diverged:\n%+v\n!=\n%+v", inMemMS, ms) } manual.Increment(100) // Split the range at approximate halfway point. args = adminSplitArgs(keyPrefix, midKey) if _, pErr := client.SendWrappedWith(rg1(store), nil, roachpb.Header{ RangeID: rng.RangeID, }, &args); pErr != nil { t.Fatal(pErr) } snap = store.Engine().NewSnapshot() defer snap.Close() var msLeft, msRight enginepb.MVCCStats if err := engine.MVCCGetRangeStats(context.Background(), snap, rng.RangeID, &msLeft); err != nil { t.Fatal(err) } rngRight := store.LookupReplica(midKey, nil) if err := engine.MVCCGetRangeStats(context.Background(), snap, rngRight.RangeID, &msRight); err != nil { t.Fatal(err) } // The stats should be exactly equal when added. expMS := enginepb.MVCCStats{ LiveBytes: msLeft.LiveBytes + msRight.LiveBytes, KeyBytes: msLeft.KeyBytes + msRight.KeyBytes, ValBytes: msLeft.ValBytes + msRight.ValBytes, IntentBytes: msLeft.IntentBytes + msRight.IntentBytes, LiveCount: msLeft.LiveCount + msRight.LiveCount, KeyCount: msLeft.KeyCount + msRight.KeyCount, ValCount: msLeft.ValCount + msRight.ValCount, IntentCount: msLeft.IntentCount + msRight.IntentCount, } ms.SysBytes, ms.SysCount = 0, 0 ms.LastUpdateNanos = 0 if expMS != ms { t.Errorf("expected left plus right ranges to equal original, but\n %+v\n+\n %+v\n!=\n %+v", msLeft, msRight, ms) } // Stats should both have the new timestamp. now := manual.UnixNano() if lTs := msLeft.LastUpdateNanos; lTs != now { t.Errorf("expected left range stats to have new timestamp, want %d, got %d", now, lTs) } if rTs := msRight.LastUpdateNanos; rTs != now { t.Errorf("expected right range stats to have new timestamp, want %d, got %d", now, rTs) } // Stats should agree with recomputation. if err := verifyRecomputedStats(snap, rng.Desc(), msLeft, now); err != nil { t.Fatalf("failed to verify left range's stats after split: %v", err) } if err := verifyRecomputedStats(snap, rngRight.Desc(), msRight, now); err != nil { t.Fatalf("failed to verify right range's stats after split: %v", err) } }
// TestStoreRangeSplitStatsWithMerges starts by splitting the system keys from // user-space keys and verifying that the user space side of the split (which is empty), // has all zeros for stats. It then issues a number of Merge requests to the user // space side, simulating TimeSeries data. Finally, the test splits the user space // side halfway and verifies the stats on either side of the split are equal to a // recomputation. // // Note that unlike TestStoreRangeSplitStats, we do not check if the two halves of the // split's stats are equal to the pre-split stats when added, because this will not be // true of ranges populated with Merge requests. The reason for this is that Merge // requests' impact on MVCCStats are only estimated. See updateStatsOnMerge. func TestStoreRangeSplitStatsWithMerges(t *testing.T) { defer leaktest.AfterTest(t)() sCtx := storage.TestStoreContext() sCtx.TestingKnobs.DisableSplitQueue = true store, stopper, manual := createTestStoreWithContext(t, sCtx) defer stopper.Stop() // Split the range after the last table data key. keyPrefix := keys.MakeTablePrefix(keys.MaxReservedDescID + 1) keyPrefix = keys.MakeRowSentinelKey(keyPrefix) args := adminSplitArgs(roachpb.KeyMin, keyPrefix) if _, pErr := client.SendWrapped(rg1(store), nil, &args); pErr != nil { t.Fatal(pErr) } // Verify empty range has empty stats. rng := store.LookupReplica(keyPrefix, nil) // NOTE that this value is expected to change over time, depending on what // we store in the sys-local keyspace. Update it accordingly for this test. empty := enginepb.MVCCStats{LastUpdateNanos: manual.UnixNano()} if err := verifyRangeStats(store.Engine(), rng.RangeID, empty); err != nil { t.Fatal(err) } // Write random TimeSeries data. midKey := writeRandomTimeSeriesDataToRange(t, store, rng.RangeID, keyPrefix) manual.Increment(100) // Split the range at approximate halfway point. args = adminSplitArgs(keyPrefix, midKey) if _, pErr := client.SendWrappedWith(rg1(store), nil, roachpb.Header{ RangeID: rng.RangeID, }, &args); pErr != nil { t.Fatal(pErr) } snap := store.Engine().NewSnapshot() defer snap.Close() var msLeft, msRight enginepb.MVCCStats if err := engine.MVCCGetRangeStats(context.Background(), snap, rng.RangeID, &msLeft); err != nil { t.Fatal(err) } rngRight := store.LookupReplica(midKey, nil) if err := engine.MVCCGetRangeStats(context.Background(), snap, rngRight.RangeID, &msRight); err != nil { t.Fatal(err) } // Stats should both have the new timestamp. now := manual.UnixNano() if lTs := msLeft.LastUpdateNanos; lTs != now { t.Errorf("expected left range stats to have new timestamp, want %d, got %d", now, lTs) } if rTs := msRight.LastUpdateNanos; rTs != now { t.Errorf("expected right range stats to have new timestamp, want %d, got %d", now, rTs) } // Stats should agree with recomputation. if err := verifyRecomputedStats(snap, rng.Desc(), msLeft, now); err != nil { t.Fatalf("failed to verify left range's stats after split: %v", err) } if err := verifyRecomputedStats(snap, rngRight.Desc(), msRight, now); err != nil { t.Fatalf("failed to verify right range's stats after split: %v", err) } }
// TestStoreRangeSplit executes a split of a range and verifies that the // resulting ranges respond to the right key ranges and that their stats // and sequence cache have been properly accounted for. func TestStoreRangeSplitIdempotency(t *testing.T) { defer leaktest.AfterTest(t) store, stopper := createTestStore(t) defer stopper.Stop() rangeID := roachpb.RangeID(1) splitKey := roachpb.Key("m") content := roachpb.Key("asdvb") // First, write some values left and right of the proposed split key. pArgs := putArgs([]byte("c"), content) if _, err := client.SendWrapped(rg1(store), nil, &pArgs); err != nil { t.Fatal(err) } pArgs = putArgs([]byte("x"), content) if _, err := client.SendWrapped(rg1(store), nil, &pArgs); err != nil { t.Fatal(err) } // Increments are a good way of testing the sequence cache. Up here, we // address them to the original range, then later to the one that contains // the key. txn := roachpb.NewTransaction("test", []byte("c"), 10, roachpb.SERIALIZABLE, store.Clock().Now(), 0) lIncArgs := incrementArgs([]byte("apoptosis"), 100) if _, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{ Txn: txn, }, &lIncArgs); err != nil { t.Fatal(err) } rIncArgs := incrementArgs([]byte("wobble"), 10) txn.Sequence++ if _, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{ Txn: txn, }, &rIncArgs); err != nil { t.Fatal(err) } // Get the original stats for key and value bytes. var ms engine.MVCCStats if err := engine.MVCCGetRangeStats(store.Engine(), rangeID, &ms); err != nil { t.Fatal(err) } keyBytes, valBytes := ms.KeyBytes, ms.ValBytes // Split the range. args := adminSplitArgs(roachpb.KeyMin, splitKey) if _, err := client.SendWrapped(rg1(store), nil, &args); err != nil { t.Fatal(err) } // Verify no intents remains on range descriptor keys. for _, key := range []roachpb.Key{keys.RangeDescriptorKey(roachpb.RKeyMin), keys.RangeDescriptorKey(keys.Addr(splitKey))} { if _, _, err := engine.MVCCGet(store.Engine(), key, store.Clock().Now(), true, nil); err != nil { t.Fatal(err) } } rng := store.LookupReplica(roachpb.RKeyMin, nil) newRng := store.LookupReplica([]byte("m"), nil) if !bytes.Equal(newRng.Desc().StartKey, splitKey) || !bytes.Equal(splitKey, rng.Desc().EndKey) { t.Errorf("ranges mismatched, wanted %q=%q=%q", newRng.Desc().StartKey, splitKey, rng.Desc().EndKey) } if !bytes.Equal(newRng.Desc().EndKey, roachpb.RKeyMax) || !bytes.Equal(rng.Desc().StartKey, roachpb.RKeyMin) { t.Errorf("new ranges do not cover KeyMin-KeyMax, but only %q-%q", rng.Desc().StartKey, newRng.Desc().EndKey) } // Try to get values from both left and right of where the split happened. gArgs := getArgs([]byte("c")) if reply, err := client.SendWrapped(rg1(store), nil, &gArgs); err != nil { t.Fatal(err) } else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil { t.Fatal(err) } else if !bytes.Equal(replyBytes, content) { t.Fatalf("actual value %q did not match expected value %q", replyBytes, content) } gArgs = getArgs([]byte("x")) if reply, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{ RangeID: newRng.Desc().RangeID, }, &gArgs); err != nil { t.Fatal(err) } else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil { t.Fatal(err) } else if !bytes.Equal(replyBytes, content) { t.Fatalf("actual value %q did not match expected value %q", replyBytes, content) } // Send out an increment request copied from above (same txn/sequence) // which remains in the old range. _, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{ Txn: txn, }, &lIncArgs) if _, ok := err.(*roachpb.TransactionRetryError); !ok { t.Fatalf("unexpected sequence cache miss: %v", err) } // Send out the same increment copied from above (same txn/sequence), but // now to the newly created range (which should hold that key). _, err = client.SendWrappedWith(rg1(store), nil, roachpb.Header{ RangeID: newRng.Desc().RangeID, Txn: txn, }, &rIncArgs) if _, ok := err.(*roachpb.TransactionRetryError); !ok { t.Fatalf("unexpected sequence cache miss: %v", err) } // Compare stats of split ranges to ensure they are non zero and // exceed the original range when summed. var left, right engine.MVCCStats if err := engine.MVCCGetRangeStats(store.Engine(), rangeID, &left); err != nil { t.Fatal(err) } lKeyBytes, lValBytes := left.KeyBytes, left.ValBytes if err := engine.MVCCGetRangeStats(store.Engine(), newRng.Desc().RangeID, &right); err != nil { t.Fatal(err) } rKeyBytes, rValBytes := right.KeyBytes, right.ValBytes if lKeyBytes == 0 || rKeyBytes == 0 { t.Errorf("expected non-zero key bytes; got %d, %d", lKeyBytes, rKeyBytes) } if lValBytes == 0 || rValBytes == 0 { t.Errorf("expected non-zero val bytes; got %d, %d", lValBytes, rValBytes) } if lKeyBytes+rKeyBytes <= keyBytes { t.Errorf("left + right key bytes don't match; %d + %d <= %d", lKeyBytes, rKeyBytes, keyBytes) } if lValBytes+rValBytes <= valBytes { t.Errorf("left + right val bytes don't match; %d + %d <= %d", lValBytes, rValBytes, valBytes) } }
// TestStoreRangeSplitStats starts by splitting the system keys from user-space // keys and verifying that the user space side of the split (which is empty), // has all zeros for stats. It then writes random data to the user space side, // splits it halfway and verifies the two splits have stats exactly equaling // the pre-split. func TestStoreRangeSplitStats(t *testing.T) { defer leaktest.AfterTest(t)() defer config.TestingDisableTableSplits()() store, stopper, manual := createTestStore(t) defer stopper.Stop() // Split the range after the last table data key. keyPrefix := keys.MakeTablePrefix(keys.MaxReservedDescID + 1) keyPrefix = keys.MakeNonColumnKey(keyPrefix) args := adminSplitArgs(roachpb.KeyMin, keyPrefix) if _, pErr := client.SendWrapped(rg1(store), nil, &args); pErr != nil { t.Fatal(pErr) } // Verify empty range has empty stats. rng := store.LookupReplica(keyPrefix, nil) // NOTE that this value is expected to change over time, depending on what // we store in the sys-local keyspace. Update it accordingly for this test. if err := verifyRangeStats(store.Engine(), rng.RangeID, engine.MVCCStats{LastUpdateNanos: manual.UnixNano()}); err != nil { t.Fatal(err) } // Write random data. writeRandomDataToRange(t, store, rng.RangeID, keyPrefix) // Get the range stats now that we have data. snap := store.Engine().NewSnapshot() defer snap.Close() var ms engine.MVCCStats if err := engine.MVCCGetRangeStats(context.Background(), snap, rng.RangeID, &ms); err != nil { t.Fatal(err) } if err := verifyRecomputedStats(snap, rng.Desc(), ms, manual.UnixNano()); err != nil { t.Fatalf("failed to verify range's stats before split: %v", err) } manual.Increment(100) // Split the range at approximate halfway point ("Z" in string "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"). midKey := append([]byte(nil), keyPrefix...) midKey = append(midKey, []byte("Z")...) midKey = keys.MakeNonColumnKey(midKey) args = adminSplitArgs(keyPrefix, midKey) if _, pErr := client.SendWrappedWith(rg1(store), nil, roachpb.Header{ RangeID: rng.RangeID, }, &args); pErr != nil { t.Fatal(pErr) } snap = store.Engine().NewSnapshot() defer snap.Close() var msLeft, msRight engine.MVCCStats if err := engine.MVCCGetRangeStats(context.Background(), snap, rng.RangeID, &msLeft); err != nil { t.Fatal(err) } rngRight := store.LookupReplica(midKey, nil) if err := engine.MVCCGetRangeStats(context.Background(), snap, rngRight.RangeID, &msRight); err != nil { t.Fatal(err) } // The stats should be exactly equal when added. expMS := engine.MVCCStats{ LiveBytes: msLeft.LiveBytes + msRight.LiveBytes, KeyBytes: msLeft.KeyBytes + msRight.KeyBytes, ValBytes: msLeft.ValBytes + msRight.ValBytes, IntentBytes: msLeft.IntentBytes + msRight.IntentBytes, LiveCount: msLeft.LiveCount + msRight.LiveCount, KeyCount: msLeft.KeyCount + msRight.KeyCount, ValCount: msLeft.ValCount + msRight.ValCount, IntentCount: msLeft.IntentCount + msRight.IntentCount, } ms.SysBytes, ms.SysCount = 0, 0 ms.LastUpdateNanos = 0 if expMS != ms { t.Errorf("expected left and right ranges to equal original: %+v + %+v != %+v", msLeft, msRight, ms) } // Stats should both have the new timestamp. now := manual.UnixNano() if lTs := msLeft.LastUpdateNanos; lTs != now { t.Errorf("expected left range stats to have new timestamp, want %d, got %d", now, lTs) } if rTs := msRight.LastUpdateNanos; rTs != now { t.Errorf("expected right range stats to have new timestamp, want %d, got %d", now, rTs) } // Stats should agree with recomputation. if err := verifyRecomputedStats(snap, rng.Desc(), msLeft, now); err != nil { t.Fatalf("failed to verify left range's stats after split: %v", err) } if err := verifyRecomputedStats(snap, rngRight.Desc(), msRight, now); err != nil { t.Fatalf("failed to verify right range's stats after split: %v", err) } }
func TestRangeStatsMerge(t *testing.T) { defer leaktest.AfterTest(t) tc := testContext{ bootstrapMode: bootstrapRangeOnly, } tc.Start(t) defer tc.Stop() ms := engine.MVCCStats{ LiveBytes: 1, KeyBytes: 2, ValBytes: 2, IntentBytes: 1, LiveCount: 1, KeyCount: 1, ValCount: 1, IntentCount: 1, IntentAge: 1, GCBytesAge: 1, LastUpdateNanos: 1 * 1E9, } if err := tc.rng.stats.MergeMVCCStats(tc.engine, &ms, 10*1E9); err != nil { t.Fatal(err) } expMS := engine.MVCCStats{ LiveBytes: 1, KeyBytes: 2, ValBytes: 2, IntentBytes: 1, LiveCount: 1, KeyCount: 1, ValCount: 1, IntentCount: 1, IntentAge: 1, GCBytesAge: 1, LastUpdateNanos: 10 * 1E9, } if err := engine.MVCCGetRangeStats(tc.engine, 1, &ms); err != nil { t.Fatal(err) } if !reflect.DeepEqual(ms, expMS) { t.Errorf("expected %+v; got %+v", expMS, ms) } // Merge again, but with 10 more ns. if err := tc.rng.stats.MergeMVCCStats(tc.engine, &ms, 20*1E9); err != nil { t.Fatal(err) } expMS = engine.MVCCStats{ LiveBytes: 2, KeyBytes: 4, ValBytes: 4, IntentBytes: 2, LiveCount: 2, KeyCount: 2, ValCount: 2, IntentCount: 2, IntentAge: 12, GCBytesAge: 32, LastUpdateNanos: 20 * 1E9, } if err := engine.MVCCGetRangeStats(tc.engine, 1, &ms); err != nil { t.Fatal(err) } if !reflect.DeepEqual(ms, expMS) { t.Errorf("expected %+v; got %+v", expMS, ms) } if !reflect.DeepEqual(tc.rng.stats.MVCCStats, expMS) { t.Errorf("expected %+v; got %+v", expMS, tc.rng.stats.MVCCStats) } }
// TestStoreRangeSplit executes a split of a range and verifies that the // resulting ranges respond to the right key ranges and that their stats // and response caches have been properly accounted for. func TestStoreRangeSplit(t *testing.T) { defer leaktest.AfterTest(t) store, stopper := createTestStore(t) defer stopper.Stop() rangeID := roachpb.RangeID(1) splitKey := roachpb.RKey("m") content := roachpb.Key("asdvb") // First, write some values left and right of the proposed split key. pArgs := putArgs([]byte("c"), content) if _, err := client.SendWrapped(rg1(store), nil, &pArgs); err != nil { t.Fatal(err) } pArgs = putArgs([]byte("x"), content) if _, err := client.SendWrapped(rg1(store), nil, &pArgs); err != nil { t.Fatal(err) } // Increments are a good way of testing the response cache. Up here, we // address them to the original range, then later to the one that contains // the key. lCmdID := roachpb.ClientCmdID{WallTime: 123, Random: 423} lIncArgs := incrementArgs([]byte("apoptosis"), 100) if _, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{ CmdID: lCmdID, }, &lIncArgs); err != nil { t.Fatal(err) } rIncArgs := incrementArgs([]byte("wobble"), 10) rCmdID := roachpb.ClientCmdID{WallTime: 12, Random: 42} if _, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{ CmdID: rCmdID, }, &rIncArgs); err != nil { t.Fatal(err) } // Get the original stats for key and value bytes. var ms engine.MVCCStats if err := engine.MVCCGetRangeStats(store.Engine(), rangeID, &ms); err != nil { t.Fatal(err) } keyBytes, valBytes := ms.KeyBytes, ms.ValBytes // Split the range. args := adminSplitArgs(roachpb.RKeyMin, splitKey) if _, err := client.SendWrapped(rg1(store), nil, &args); err != nil { t.Fatal(err) } // Verify no intents remains on range descriptor keys. for _, key := range []roachpb.Key{keys.RangeDescriptorKey(roachpb.RKeyMin), keys.RangeDescriptorKey(splitKey)} { if _, _, err := engine.MVCCGet(store.Engine(), key, store.Clock().Now(), true, nil); err != nil { t.Fatal(err) } } rng := store.LookupReplica(roachpb.RKeyMin, nil) newRng := store.LookupReplica([]byte("m"), nil) if !bytes.Equal(newRng.Desc().StartKey, splitKey) || !bytes.Equal(splitKey, rng.Desc().EndKey) { t.Errorf("ranges mismatched, wanted %q=%q=%q", newRng.Desc().StartKey, splitKey, rng.Desc().EndKey) } if !bytes.Equal(newRng.Desc().EndKey, roachpb.RKeyMax) || !bytes.Equal(rng.Desc().StartKey, roachpb.RKeyMin) { t.Errorf("new ranges do not cover KeyMin-KeyMax, but only %q-%q", rng.Desc().StartKey, newRng.Desc().EndKey) } // Try to get values from both left and right of where the split happened. gArgs := getArgs([]byte("c")) if reply, err := client.SendWrapped(rg1(store), nil, &gArgs); err != nil { t.Fatal(err) } else if gReply := reply.(*roachpb.GetResponse); !bytes.Equal(gReply.Value.GetRawBytes(), content) { t.Fatalf("actual value %q did not match expected value %q", gReply.Value.GetRawBytes(), content) } gArgs = getArgs([]byte("x")) if reply, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{ RangeID: newRng.Desc().RangeID, }, &gArgs); err != nil { t.Fatal(err) } else if gReply := reply.(*roachpb.GetResponse); !bytes.Equal(gReply.Value.GetRawBytes(), content) { t.Fatalf("actual value %q did not match expected value %q", gReply.Value.GetRawBytes(), content) } // Send out an increment request copied from above (same ClientCmdID) which // remains in the old range. if reply, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{ CmdID: lCmdID, }, &lIncArgs); err != nil { t.Fatal(err) } else if lIncReply := reply.(*roachpb.IncrementResponse); lIncReply.NewValue != 100 { t.Errorf("response cache broken in old range, expected %d but got %d", lIncArgs.Increment, lIncReply.NewValue) } // Send out the same increment copied from above (same ClientCmdID), but // now to the newly created range (which should hold that key). if reply, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{ RangeID: newRng.Desc().RangeID, CmdID: rCmdID, }, &rIncArgs); err != nil { t.Fatal(err) } else if rIncReply := reply.(*roachpb.IncrementResponse); rIncReply.NewValue != 10 { t.Errorf("response cache not copied correctly to new range, expected %d but got %d", rIncArgs.Increment, rIncReply.NewValue) } // Compare stats of split ranges to ensure they are non zero and // exceed the original range when summed. var left, right engine.MVCCStats if err := engine.MVCCGetRangeStats(store.Engine(), rangeID, &left); err != nil { t.Fatal(err) } lKeyBytes, lValBytes := left.KeyBytes, left.ValBytes if err := engine.MVCCGetRangeStats(store.Engine(), newRng.Desc().RangeID, &right); err != nil { t.Fatal(err) } rKeyBytes, rValBytes := right.KeyBytes, right.ValBytes if lKeyBytes == 0 || rKeyBytes == 0 { t.Errorf("expected non-zero key bytes; got %d, %d", lKeyBytes, rKeyBytes) } if lValBytes == 0 || rValBytes == 0 { t.Errorf("expected non-zero val bytes; got %d, %d", lValBytes, rValBytes) } if lKeyBytes+rKeyBytes <= keyBytes { t.Errorf("left + right key bytes don't match; %d + %d <= %d", lKeyBytes, rKeyBytes, keyBytes) } if lValBytes+rValBytes <= valBytes { t.Errorf("left + right val bytes don't match; %d + %d <= %d", lValBytes, rValBytes, valBytes) } }
// TestStoreRangeSplit executes a split of a range and verifies that the // resulting ranges respond to the right key ranges and that their stats // and response caches have been properly accounted for. func TestStoreRangeSplit(t *testing.T) { defer leaktest.AfterTest(t) store, stopper := createTestStore(t) defer stopper.Stop() raftID := proto.RaftID(1) splitKey := proto.Key("m") content := proto.Key("asdvb") // First, write some values left and right of the proposed split key. pArgs, pReply := putArgs([]byte("c"), content, raftID, store.StoreID()) if err := store.ExecuteCmd(context.Background(), proto.Call{Args: pArgs, Reply: pReply}); err != nil { t.Fatal(err) } pArgs, pReply = putArgs([]byte("x"), content, raftID, store.StoreID()) if err := store.ExecuteCmd(context.Background(), proto.Call{Args: pArgs, Reply: pReply}); err != nil { t.Fatal(err) } // Increments are a good way of testing the response cache. Up here, we // address them to the original range, then later to the one that contains // the key. lIncArgs, lIncReply := incrementArgs([]byte("apoptosis"), 100, raftID, store.StoreID()) lIncArgs.CmdID = proto.ClientCmdID{WallTime: 123, Random: 423} if err := store.ExecuteCmd(context.Background(), proto.Call{Args: lIncArgs, Reply: lIncReply}); err != nil { t.Fatal(err) } rIncArgs, rIncReply := incrementArgs([]byte("wobble"), 10, raftID, store.StoreID()) rIncArgs.CmdID = proto.ClientCmdID{WallTime: 12, Random: 42} if err := store.ExecuteCmd(context.Background(), proto.Call{Args: rIncArgs, Reply: rIncReply}); err != nil { t.Fatal(err) } // Get the original stats for key and value bytes. var ms engine.MVCCStats if err := engine.MVCCGetRangeStats(store.Engine(), raftID, &ms); err != nil { t.Fatal(err) } keyBytes, valBytes := ms.KeyBytes, ms.ValBytes // Split the range. args, reply := adminSplitArgs(proto.KeyMin, splitKey, 1, store.StoreID()) if err := store.ExecuteCmd(context.Background(), proto.Call{Args: args, Reply: reply}); err != nil { t.Fatal(err) } // Verify no intents remains on range descriptor keys. for _, key := range []proto.Key{keys.RangeDescriptorKey(proto.KeyMin), keys.RangeDescriptorKey(splitKey)} { if _, _, err := engine.MVCCGet(store.Engine(), key, store.Clock().Now(), true, nil); err != nil { t.Fatal(err) } } rng := store.LookupRange(proto.KeyMin, nil) newRng := store.LookupRange([]byte("m"), nil) if !bytes.Equal(newRng.Desc().StartKey, splitKey) || !bytes.Equal(splitKey, rng.Desc().EndKey) { t.Errorf("ranges mismatched, wanted %q=%q=%q", newRng.Desc().StartKey, splitKey, rng.Desc().EndKey) } if !bytes.Equal(newRng.Desc().EndKey, proto.KeyMax) || !bytes.Equal(rng.Desc().StartKey, proto.KeyMin) { t.Errorf("new ranges do not cover KeyMin-KeyMax, but only %q-%q", rng.Desc().StartKey, newRng.Desc().EndKey) } // Try to get values from both left and right of where the split happened. gArgs, gReply := getArgs([]byte("c"), raftID, store.StoreID()) if err := store.ExecuteCmd(context.Background(), proto.Call{Args: gArgs, Reply: gReply}); err != nil || !bytes.Equal(gReply.Value.Bytes, content) { t.Fatal(err) } gArgs, gReply = getArgs([]byte("x"), newRng.Desc().RaftID, store.StoreID()) if err := store.ExecuteCmd(context.Background(), proto.Call{Args: gArgs, Reply: gReply}); err != nil || !bytes.Equal(gReply.Value.Bytes, content) { t.Fatal(err) } // Send out an increment request copied from above (same ClientCmdID) which // remains in the old range. lIncReply = &proto.IncrementResponse{} if err := store.ExecuteCmd(context.Background(), proto.Call{Args: lIncArgs, Reply: lIncReply}); err != nil { t.Fatal(err) } if lIncReply.NewValue != 100 { t.Errorf("response cache broken in old range, expected %d but got %d", lIncArgs.Increment, lIncReply.NewValue) } // Send out the same increment copied from above (same ClientCmdID), but // now to the newly created range (which should hold that key). rIncArgs.RequestHeader.RaftID = newRng.Desc().RaftID rIncReply = &proto.IncrementResponse{} if err := store.ExecuteCmd(context.Background(), proto.Call{Args: rIncArgs, Reply: rIncReply}); err != nil { t.Fatal(err) } if rIncReply.NewValue != 10 { t.Errorf("response cache not copied correctly to new range, expected %d but got %d", rIncArgs.Increment, rIncReply.NewValue) } // Compare stats of split ranges to ensure they are non ero and // exceed the original range when summed. var left, right engine.MVCCStats if err := engine.MVCCGetRangeStats(store.Engine(), raftID, &left); err != nil { t.Fatal(err) } lKeyBytes, lValBytes := left.KeyBytes, left.ValBytes if err := engine.MVCCGetRangeStats(store.Engine(), newRng.Desc().RaftID, &right); err != nil { t.Fatal(err) } rKeyBytes, rValBytes := right.KeyBytes, right.ValBytes if lKeyBytes == 0 || rKeyBytes == 0 { t.Errorf("expected non-zero key bytes; got %d, %d", lKeyBytes, rKeyBytes) } if lValBytes == 0 || rValBytes == 0 { t.Errorf("expected non-zero val bytes; got %d, %d", lValBytes, rValBytes) } if lKeyBytes+rKeyBytes <= keyBytes { t.Errorf("left + right key bytes don't match; %d + %d <= %d", lKeyBytes, rKeyBytes, keyBytes) } if lValBytes+rValBytes <= valBytes { t.Errorf("left + right val bytes don't match; %d + %d <= %d", lValBytes, rValBytes, valBytes) } }
// TestStoreRangeSplitStats starts by splitting the system keys from user-space // keys and verifying that the user space side of the split (which is empty), // has all zeros for stats. It then writes random data to the user space side, // splits it halfway and verifies the two splits have stats exactly equaling // the pre-split. func TestStoreRangeSplitStats(t *testing.T) { defer leaktest.AfterTest(t) store, stopper := createTestStore(t) defer stopper.Stop() // Split the range at the first user key. args, reply := adminSplitArgs(proto.KeyMin, proto.Key("\x01"), 1, store.StoreID()) if err := store.ExecuteCmd(context.Background(), proto.Call{Args: args, Reply: reply}); err != nil { t.Fatal(err) } // Verify empty range has empty stats. rng := store.LookupRange(proto.Key("\x01"), nil) // NOTE that this value is expected to change over time, depending on what // we store in the sys-local keyspace. Update it accordingly for this test. if err := verifyRangeStats(store.Engine(), rng.Desc().RaftID, engine.MVCCStats{}); err != nil { t.Fatal(err) } // Write random data. src := rand.New(rand.NewSource(0)) for i := 0; i < 100; i++ { key := randutil.RandBytes(src, int(src.Int31n(1<<7))) val := randutil.RandBytes(src, int(src.Int31n(1<<8))) pArgs, pReply := putArgs(key, val, rng.Desc().RaftID, store.StoreID()) pArgs.Timestamp = store.Clock().Now() if err := store.ExecuteCmd(context.Background(), proto.Call{Args: pArgs, Reply: pReply}); err != nil { t.Fatal(err) } } // Get the range stats now that we have data. var ms engine.MVCCStats if err := engine.MVCCGetRangeStats(store.Engine(), rng.Desc().RaftID, &ms); err != nil { t.Fatal(err) } // Split the range at approximate halfway point ("Z" in string "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"). args, reply = adminSplitArgs(proto.Key("\x01"), proto.Key("Z"), rng.Desc().RaftID, store.StoreID()) if err := store.ExecuteCmd(context.Background(), proto.Call{Args: args, Reply: reply}); err != nil { t.Fatal(err) } var msLeft, msRight engine.MVCCStats if err := engine.MVCCGetRangeStats(store.Engine(), rng.Desc().RaftID, &msLeft); err != nil { t.Fatal(err) } rngRight := store.LookupRange(proto.Key("Z"), nil) if err := engine.MVCCGetRangeStats(store.Engine(), rngRight.Desc().RaftID, &msRight); err != nil { t.Fatal(err) } // The stats should be exactly equal when added. expMS := engine.MVCCStats{ LiveBytes: msLeft.LiveBytes + msRight.LiveBytes, KeyBytes: msLeft.KeyBytes + msRight.KeyBytes, ValBytes: msLeft.ValBytes + msRight.ValBytes, IntentBytes: msLeft.IntentBytes + msRight.IntentBytes, LiveCount: msLeft.LiveCount + msRight.LiveCount, KeyCount: msLeft.KeyCount + msRight.KeyCount, ValCount: msLeft.ValCount + msRight.ValCount, IntentCount: msLeft.IntentCount + msRight.IntentCount, } ms.SysBytes, ms.SysCount = 0, 0 if !reflect.DeepEqual(expMS, ms) { t.Errorf("expected left and right ranges to equal original: %+v + %+v != %+v", msLeft, msRight, ms) } }