func TestRangeIterForward(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() defer stopper.Stop() cfg := DistSenderConfig{ RangeDescriptorDB: alphaRangeDescriptorDB, } ctx := context.Background() g := makeGossip(t, stopper) ds := NewDistSender(cfg, g) ri := NewRangeIterator(ds, false /*reverse*/) i := 0 for ri.Seek(ctx, roachpb.RKey(roachpb.KeyMin)); ri.Valid(); ri.Next(ctx) { if !reflect.DeepEqual(alphaRangeDescriptors[i], ri.Desc()) { t.Fatalf("%d: expected %v; got %v", i, alphaRangeDescriptors[i], ri.Desc()) } i++ if !ri.NeedAnother(roachpb.RSpan{ EndKey: roachpb.RKey([]byte("z")), }) { break } } }
func TestRangeIterReverse(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() defer stopper.Stop() g, clock := makeGossip(t, stopper) ds := NewDistSender(DistSenderConfig{ Clock: clock, RangeDescriptorDB: alphaRangeDescriptorDB, }, g) ctx := context.Background() ri := NewRangeIterator(ds, true /*reverse*/) i := len(alphaRangeDescriptors) - 1 for ri.Seek(ctx, roachpb.RKey([]byte{'z'})); ri.Valid(); ri.Next(ctx) { if !reflect.DeepEqual(alphaRangeDescriptors[i], ri.Desc()) { t.Fatalf("%d: expected %v; got %v", i, alphaRangeDescriptors[i], ri.Desc()) } i-- if !ri.NeedAnother(roachpb.RSpan{ Key: roachpb.RKey(roachpb.KeyMin), }) { break } } }
// Addr returns the address for the key, used to lookup the range containing // the key. In the normal case, this is simply the key's value. However, for // local keys, such as transaction records, range-spanning binary tree node // pointers, the address is the inner encoded key, with the local key prefix // and the suffix and optional detail removed. This address unwrapping is // performed repeatedly in the case of doubly-local keys. In this way, local // keys address to the same range as non-local keys, but are stored separately // so that they don't collide with user-space or global system keys. // // However, not all local keys are addressable in the global map. Only range // local keys incorporating a range key (start key or transaction key) are // addressable (e.g. range metadata and txn records). Range local keys // incorporating the Range ID are not (e.g. abort cache entries, and range // stats). func Addr(k roachpb.Key) (roachpb.RKey, error) { if !IsLocal(k) { return roachpb.RKey(k), nil } for { if bytes.HasPrefix(k, localStorePrefix) { return nil, errors.Errorf("store-local key %q is not addressable", k) } if bytes.HasPrefix(k, LocalRangeIDPrefix) { return nil, errors.Errorf("local range ID key %q is not addressable", k) } if !bytes.HasPrefix(k, LocalRangePrefix) { return nil, errors.Errorf("local key %q malformed; should contain prefix %q", k, LocalRangePrefix) } k = k[len(LocalRangePrefix):] var err error // Decode the encoded key, throw away the suffix and detail. if _, k, err = encoding.DecodeBytesAscending(k, nil); err != nil { return nil, err } if !bytes.HasPrefix(k, localPrefix) { break } } return roachpb.RKey(k), nil }
func TestRangeIterForward(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() defer stopper.Stop() g, clock := makeGossip(t, stopper) ds := NewDistSender(DistSenderConfig{ Clock: clock, RangeDescriptorDB: alphaRangeDescriptorDB, }, g) ctx := context.Background() ri := NewRangeIterator(ds) i := 0 span := roachpb.RSpan{ Key: roachpb.RKey(roachpb.KeyMin), EndKey: roachpb.RKey([]byte("z")), } for ri.Seek(ctx, span.Key, Ascending); ri.Valid(); ri.Next(ctx) { if !reflect.DeepEqual(alphaRangeDescriptors[i], ri.Desc()) { t.Fatalf("%d: expected %v; got %v", i, alphaRangeDescriptors[i], ri.Desc()) } i++ if !ri.NeedAnother(span) { break } } }
func TestRangeIterSeekReverse(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() defer stopper.Stop() g, clock := makeGossip(t, stopper) ds := NewDistSender(DistSenderConfig{ Clock: clock, RangeDescriptorDB: alphaRangeDescriptorDB, }, g) ctx := context.Background() ri := NewRangeIterator(ds) i := len(alphaRangeDescriptors) - 1 for ri.Seek(ctx, roachpb.RKey([]byte{'z'}), Descending); ri.Valid(); { if !reflect.DeepEqual(alphaRangeDescriptors[i], ri.Desc()) { t.Fatalf("%d: expected %v; got %v", i, alphaRangeDescriptors[i], ri.Desc()) } i -= 2 // Skip every other range. nextByte := ri.Desc().StartKey[0] - 1 if nextByte <= byte('a') { break } seekKey := roachpb.RKey([]byte{nextByte}) ri.Seek(ctx, seekKey, Descending) if !ri.Key().Equal(seekKey) { t.Errorf("expected iterator key %s; got %s", seekKey, ri.Key()) } } }
func TestRangeIterSeekForward(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() defer stopper.Stop() g, clock := makeGossip(t, stopper) ds := NewDistSender(DistSenderConfig{ Clock: clock, RangeDescriptorDB: alphaRangeDescriptorDB, }, g) ctx := context.Background() ri := NewRangeIterator(ds, false /*reverse*/) i := 0 for ri.Seek(ctx, roachpb.RKey(roachpb.KeyMin)); ri.Valid(); { if !reflect.DeepEqual(alphaRangeDescriptors[i], ri.Desc()) { t.Fatalf("%d: expected %v; got %v", i, alphaRangeDescriptors[i], ri.Desc()) } i += 2 // Skip even ranges. nextByte := ri.Desc().EndKey[0] + 1 if nextByte >= byte('z') { break } seekKey := roachpb.RKey([]byte{nextByte}) ri.Seek(ctx, seekKey) if !ri.Key().Equal(seekKey) { t.Errorf("expected iterator key %s; got %s", seekKey, ri.Key()) } } }
func TestComputeStatsForKeySpan(t *testing.T) { defer leaktest.AfterTest(t)() mtc := &multiTestContext{} defer mtc.Stop() mtc.Start(t, 3) // Create a number of ranges using splits. splitKeys := []string{"a", "c", "e", "g", "i"} for _, k := range splitKeys { key := []byte(k) repl := mtc.stores[0].LookupReplica(key, roachpb.RKeyMin) args := adminSplitArgs(key, key) header := roachpb.Header{ RangeID: repl.RangeID, } if _, err := client.SendWrappedWith(context.Background(), mtc.stores[0], header, args); err != nil { t.Fatal(err) } } // Wait for splits to finish. testutils.SucceedsSoon(t, func() error { repl := mtc.stores[0].LookupReplica(roachpb.RKey("z"), nil) if actualRSpan := repl.Desc().RSpan(); !actualRSpan.Key.Equal(roachpb.RKey("i")) { return errors.Errorf("expected range %s to begin at key 'i'", repl) } return nil }) // Create some keys across the ranges. incKeys := []string{"b", "bb", "bbb", "d", "dd", "h"} for _, k := range incKeys { if _, err := mtc.dbs[0].Inc(context.TODO(), []byte(k), 5); err != nil { t.Fatal(err) } } // Verify stats across different spans. for _, tcase := range []struct { startKey string endKey string expectedRanges int expectedKeys int64 }{ {"a", "i", 4, 6}, {"a", "c", 1, 3}, {"b", "e", 2, 5}, {"e", "i", 2, 1}, } { start, end := tcase.startKey, tcase.endKey stats, count := mtc.stores[0].ComputeStatsForKeySpan( roachpb.RKey(start), roachpb.RKey(end)) if a, e := count, tcase.expectedRanges; a != e { t.Errorf("Expected %d ranges in span [%s - %s], found %d", e, start, end, a) } if a, e := stats.LiveCount, tcase.expectedKeys; a != e { t.Errorf("Expected %d keys in span [%s - %s], found %d", e, start, end, a) } } }
// newTestRangeSet creates a new range set that has the count number of ranges. func newTestRangeSet(count int, t *testing.T) *testRangeSet { rs := &testRangeSet{replicasByKey: btree.New(64 /* degree */)} for i := 0; i < count; i++ { desc := &roachpb.RangeDescriptor{ RangeID: roachpb.RangeID(i), StartKey: roachpb.RKey(fmt.Sprintf("%03d", i)), EndKey: roachpb.RKey(fmt.Sprintf("%03d", i+1)), } // Initialize the range stat so the scanner can use it. repl := &Replica{ RangeID: desc.RangeID, } repl.mu.TimedMutex = syncutil.MakeTimedMutex(defaultMuLogger) repl.cmdQMu.TimedMutex = syncutil.MakeTimedMutex(defaultMuLogger) repl.mu.state.Stats = enginepb.MVCCStats{ KeyBytes: 1, ValBytes: 2, KeyCount: 1, LiveCount: 1, } if err := repl.setDesc(desc); err != nil { t.Fatal(err) } if exRngItem := rs.replicasByKey.ReplaceOrInsert(repl); exRngItem != nil { t.Fatalf("failed to insert range %s", repl) } } return rs }
// TestUpdateRangeAddressingSplitMeta1 verifies that it's an error to // attempt to update range addressing records that would allow a split // of meta1 records. func TestUpdateRangeAddressingSplitMeta1(t *testing.T) { defer leaktest.AfterTest(t)() left := &roachpb.RangeDescriptor{StartKey: roachpb.RKeyMin, EndKey: meta1Key(roachpb.RKey("a"))} right := &roachpb.RangeDescriptor{StartKey: meta1Key(roachpb.RKey("a")), EndKey: roachpb.RKeyMax} if err := splitRangeAddressing(&client.Batch{}, left, right); err == nil { t.Error("expected failure trying to update addressing records for meta1 split") } }
// TestBatchPrevNext tests batch.{Prev,Next}. func TestBatchPrevNext(t *testing.T) { defer leaktest.AfterTest(t)() loc := func(s string) string { return string(keys.RangeDescriptorKey(roachpb.RKey(s))) } span := func(strs ...string) []roachpb.Span { var r []roachpb.Span for i, str := range strs { if i%2 == 0 { r = append(r, roachpb.Span{Key: roachpb.Key(str)}) } else { r[len(r)-1].EndKey = roachpb.Key(str) } } return r } max, min := string(roachpb.RKeyMax), string(roachpb.RKeyMin) abc := span("a", "", "b", "", "c", "") testCases := []struct { spans []roachpb.Span key, expFW, expBW string }{ {spans: span("a", "c", "b", ""), key: "b", expFW: "b", expBW: "b"}, {spans: span("a", "c", "b", ""), key: "a", expFW: "a", expBW: "a"}, {spans: span("a", "c", "d", ""), key: "c", expFW: "d", expBW: "c"}, {spans: span("a", "c\x00", "d", ""), key: "c", expFW: "c", expBW: "c"}, {spans: abc, key: "b", expFW: "b", expBW: "b"}, {spans: abc, key: "b\x00", expFW: "c", expBW: "b\x00"}, {spans: abc, key: "bb", expFW: "c", expBW: "b"}, {spans: span(), key: "whatevs", expFW: max, expBW: min}, {spans: span(loc("a"), loc("c")), key: "c", expFW: "c", expBW: "c"}, {spans: span(loc("a"), loc("c")), key: "c\x00", expFW: max, expBW: "c\x00"}, } for i, test := range testCases { var ba roachpb.BatchRequest for _, span := range test.spans { args := &roachpb.ScanRequest{} args.Key, args.EndKey = span.Key, span.EndKey ba.Add(args) } if next, err := next(ba, roachpb.RKey(test.key)); err != nil { t.Errorf("%d: %v", i, err) } else if !bytes.Equal(next, roachpb.Key(test.expFW)) { t.Errorf("%d: next: expected %q, got %q", i, test.expFW, next) } if prev, err := prev(ba, roachpb.RKey(test.key)); err != nil { t.Errorf("%d: %v", i, err) } else if !bytes.Equal(prev, roachpb.Key(test.expBW)) { t.Errorf("%d: prev: expected %q, got %q", i, test.expBW, prev) } } }
func TestGCQueueLastProcessedTimestamps(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() defer stopper.Stop() tc.Start(t, stopper) // Create two last processed times both at the range start key and // also at some mid-point key in order to simulate a merge. // Two transactions. lastProcessedVals := []struct { key roachpb.Key expGC bool }{ {keys.QueueLastProcessedKey(roachpb.RKeyMin, "timeSeriesMaintenance"), false}, {keys.QueueLastProcessedKey(roachpb.RKeyMin, "replica consistency checker"), false}, {keys.QueueLastProcessedKey(roachpb.RKey("a"), "timeSeriesMaintenance"), true}, {keys.QueueLastProcessedKey(roachpb.RKey("b"), "replica consistency checker"), true}, } ts := tc.Clock().Now() for _, lpv := range lastProcessedVals { if err := engine.MVCCPutProto(context.Background(), tc.engine, nil, lpv.key, hlc.ZeroTimestamp, nil, &ts); err != nil { t.Fatal(err) } } cfg, ok := tc.gossip.GetSystemConfig() if !ok { t.Fatal("config not set") } // Process through a scan queue. gcQ := newGCQueue(tc.store, tc.gossip) if err := gcQ.process(context.Background(), tc.repl, cfg); err != nil { t.Fatal(err) } // Verify GC. testutils.SucceedsSoon(t, func() error { for _, lpv := range lastProcessedVals { ok, err := engine.MVCCGetProto(context.Background(), tc.engine, lpv.key, hlc.ZeroTimestamp, true, nil, &ts) if err != nil { return err } if ok == lpv.expGC { return errors.Errorf("expected GC of %s: %t; got %t", lpv.key, lpv.expGC, ok) } } return nil }) }
// TransactionKey returns a transaction key based on the provided // transaction key and ID. The base key is encoded in order to // guarantee that all transaction records for a range sort together. func TransactionKey(key roachpb.Key, txnID *uuid.UUID) roachpb.Key { rk, err := Addr(key) if err != nil { panic(err) } return MakeRangeKey(rk, localTransactionSuffix, roachpb.RKey(txnID.GetBytes())) }
func TestBatchPrevNextWithNoop(t *testing.T) { defer leaktest.AfterTest(t)() leftKey := roachpb.Key("a") middleKey := roachpb.RKey("b") rightKey := roachpb.Key("c") var ba roachpb.BatchRequest ba.Add(&roachpb.GetRequest{Span: roachpb.Span{Key: leftKey}}) ba.Add(&roachpb.NoopRequest{}) ba.Add(&roachpb.GetRequest{Span: roachpb.Span{Key: rightKey}}) t.Run("prev", func(t *testing.T) { rk, err := prev(ba, middleKey) if err != nil { t.Fatal(err) } if !rk.Equal(leftKey) { t.Errorf("got %s, expected %s", rk, leftKey) } }) t.Run("next", func(t *testing.T) { rk, err := next(ba, middleKey) if err != nil { t.Fatal(err) } if !rk.Equal(rightKey) { t.Errorf("got %s, expected %s", rk, rightKey) } }) }
func TestObjectIDForKey(t *testing.T) { defer leaktest.AfterTest(t)() testCases := []struct { key roachpb.RKey success bool id uint32 }{ // Before the structured span. {roachpb.RKeyMin, false, 0}, // Boundaries of structured span. {roachpb.RKeyMax, false, 0}, // Valid, even if there are things after the ID. {testutils.MakeKey(keys.MakeTablePrefix(42), roachpb.RKey("\xff")), true, 42}, {keys.MakeTablePrefix(0), true, 0}, {keys.MakeTablePrefix(999), true, 999}, } for tcNum, tc := range testCases { id, success := config.ObjectIDForKey(tc.key) if success != tc.success { t.Errorf("#%d: expected success=%t", tcNum, tc.success) continue } if id != tc.id { t.Errorf("#%d: expected id=%d, got %d", tcNum, tc.id, id) } } }
func localRangeIDKeyParse(input string) (remainder string, key roachpb.Key) { var rangeID int64 var err error input = mustShiftSlash(input) if endPos := strings.Index(input, "/"); endPos > 0 { rangeID, err = strconv.ParseInt(input[:endPos], 10, 64) if err != nil { panic(err) } input = input[endPos:] } else { panic(errors.Errorf("illegal RangeID: %q", input)) } input = mustShiftSlash(input) var infix string infix, input = mustShift(input) var replicated bool switch { case bytes.Equal(localRangeIDUnreplicatedInfix, []byte(infix)): case bytes.Equal(localRangeIDReplicatedInfix, []byte(infix)): replicated = true default: panic(errors.Errorf("invalid infix: %q", infix)) } input = mustShiftSlash(input) // Get the suffix. var suffix roachpb.RKey for _, s := range rangeIDSuffixDict { if strings.HasPrefix(input, s.name) { input = input[len(s.name):] if s.psFunc != nil { remainder, key = s.psFunc(roachpb.RangeID(rangeID), input) return } suffix = roachpb.RKey(s.suffix) break } } maker := MakeRangeIDUnreplicatedKey if replicated { maker = MakeRangeIDReplicatedKey } if suffix != nil { if input != "" { panic(&errUglifyUnsupported{errors.New("nontrivial detail")}) } var detail roachpb.RKey // TODO(tschottdorf): can't do this, init cycle: // detail, err := UglyPrint(input) // if err != nil { // return "", nil, err // } remainder = "" key = maker(roachpb.RangeID(rangeID), suffix, detail) return } panic(&errUglifyUnsupported{errors.New("unhandled general range key")}) }
func TestUserKey(t *testing.T) { testCases := []struct { key, expKey roachpb.RKey }{ { key: roachpb.RKeyMin, expKey: roachpb.RKey(Meta1Prefix), }, { key: roachpb.RKey("\x02\x04zonefoo"), expKey: roachpb.RKey("\x03\x04zonefoo"), }, { key: roachpb.RKey("\x03foo"), expKey: roachpb.RKey("foo"), }, { key: roachpb.RKey("foo"), expKey: roachpb.RKey("foo"), }, } for i, test := range testCases { result := UserKey(test.key) if !bytes.Equal(result, test.expKey) { t.Errorf("%d: expected range meta for key %q doesn't match %q (%q)", i, test.key, test.expKey, result) } } }
func init() { lastKey := roachpb.RKey(keys.MinKey) for i, b := 0, byte('a'); b <= byte('z'); i, b = i+1, b+1 { key := roachpb.RKey([]byte{b}) alphaRangeDescriptors = append(alphaRangeDescriptors, &roachpb.RangeDescriptor{ RangeID: roachpb.RangeID(i + 2), StartKey: lastKey, EndKey: key, Replicas: []roachpb.ReplicaDescriptor{ { NodeID: 1, StoreID: 1, }, }, }) lastKey = key } }
func TestKeyAddress(t *testing.T) { testCases := []struct { key roachpb.Key expAddress roachpb.RKey }{ {roachpb.Key{}, roachpb.RKeyMin}, {roachpb.Key("123"), roachpb.RKey("123")}, {RangeDescriptorKey(roachpb.RKey("foo")), roachpb.RKey("foo")}, {TransactionKey(roachpb.Key("baz"), uuid.MakeV4()), roachpb.RKey("baz")}, {TransactionKey(roachpb.KeyMax, uuid.MakeV4()), roachpb.RKeyMax}, {RangeDescriptorKey(roachpb.RKey(TransactionKey(roachpb.Key("doubleBaz"), uuid.MakeV4()))), roachpb.RKey("doubleBaz")}, {nil, nil}, } for i, test := range testCases { if keyAddr, err := Addr(test.key); err != nil { t.Errorf("%d: %v", i, err) } else if !keyAddr.Equal(test.expAddress) { t.Errorf("%d: expected address for key %q doesn't match %q", i, test.key, test.expAddress) } } }
// TestRangeSplitMeta executes various splits (including at meta addressing) // and checks that all created intents are resolved. This includes both intents // which are resolved synchronously with EndTransaction and via RPC. func TestRangeSplitMeta(t *testing.T) { defer leaktest.AfterTest(t)() s, _ := createTestDB(t) defer s.Stop() splitKeys := []roachpb.RKey{roachpb.RKey("G"), mustMeta(roachpb.RKey("F")), mustMeta(roachpb.RKey("K")), mustMeta(roachpb.RKey("H"))} // Execute the consecutive splits. for _, splitKey := range splitKeys { log.Infof(context.Background(), "starting split at key %q...", splitKey) if err := s.DB.AdminSplit(context.TODO(), roachpb.Key(splitKey)); err != nil { t.Fatal(err) } log.Infof(context.Background(), "split at key %q complete", splitKey) } testutils.SucceedsSoon(t, func() error { if _, _, _, err := engine.MVCCScan(context.Background(), s.Eng, keys.LocalMax, roachpb.KeyMax, math.MaxInt64, hlc.MaxTimestamp, true, nil); err != nil { return errors.Errorf("failed to verify no dangling intents: %s", err) } return nil }) }
// UserKey returns an ordinary key for the given range metadata (meta1, meta2) // indexing key. // // - For RKeyMin, Meta1Prefix is returned. // - For a meta1 key, a meta2 key is returned. // - For a meta2 key, an ordinary key is returned. // - For an ordinary key, the input key is returned. func UserKey(key roachpb.RKey) roachpb.RKey { if len(key) == 0 { // key.Equal(roachpb.RKeyMin) return roachpb.RKey(Meta1Prefix) } var prefix roachpb.Key switch key[0] { case meta1PrefixByte: prefix = Meta2Prefix key = key[len(Meta1Prefix):] case meta2PrefixByte: key = key[len(Meta2Prefix):] } buf := make(roachpb.RKey, 0, len(prefix)+len(key)) buf = append(buf, prefix...) buf = append(buf, key...) return buf }
// TestLocalKeySorting is a sanity check to make sure that // the non-replicated part of a store sorts before the meta. func TestKeySorting(t *testing.T) { // Reminder: Increasing the last byte by one < adding a null byte. if !(roachpb.RKey("").Less(roachpb.RKey("\x00")) && roachpb.RKey("\x00").Less(roachpb.RKey("\x01")) && roachpb.RKey("\x01").Less(roachpb.RKey("\x01\x00"))) { t.Fatalf("something is seriously wrong with this machine") } if bytes.Compare(localPrefix, Meta1Prefix) >= 0 { t.Fatalf("local key spilling into replicated ranges") } if !bytes.Equal(roachpb.Key(""), roachpb.Key(nil)) { t.Fatalf("equality between keys failed") } }
func TestValidateRangeMetaKey(t *testing.T) { testCases := []struct { key []byte expErr bool }{ {roachpb.RKeyMin, false}, {roachpb.RKey("\x00"), true}, {Meta1Prefix, false}, {makeKey(Meta1Prefix, roachpb.RKeyMax), false}, {makeKey(Meta2Prefix, roachpb.RKeyMax), false}, {makeKey(Meta2Prefix, roachpb.RKeyMax.Next()), true}, } for i, test := range testCases { err := validateRangeMetaKey(test.key) if err != nil != test.expErr { t.Errorf("%d: expected error? %t: %s", i, test.expErr, err) } } }
func TestContainsTimeSeries(t *testing.T) { defer leaktest.AfterTest(t)() tsdb := (*DB)(nil) for i, tcase := range []struct { start roachpb.RKey end roachpb.RKey expected bool }{ { roachpb.RKey("a"), roachpb.RKey("b"), false, }, { roachpb.RKeyMin, roachpb.RKey(keys.SystemPrefix), false, }, { roachpb.RKeyMin, roachpb.RKeyMax, true, }, { roachpb.RKeyMin, roachpb.RKey(MakeDataKey("metric", "", Resolution10s, 0)), true, }, { roachpb.RKey(MakeDataKey("metric", "", Resolution10s, 0)), roachpb.RKeyMax, true, }, { roachpb.RKey(MakeDataKey("metric", "", Resolution10s, 0)), roachpb.RKey(MakeDataKey("metric.b", "", Resolution10s, 0)), true, }, } { if actual := tsdb.ContainsTimeSeries(tcase.start, tcase.end); actual != tcase.expected { t.Errorf("case %d: was %t, expected %t", i, actual, tcase.expected) } } }
func TestKeyAddressError(t *testing.T) { testCases := map[string][]roachpb.Key{ "store-local key .* is not addressable": { StoreIdentKey(), StoreGossipKey(), }, "local range ID key .* is not addressable": { AbortCacheKey(0, uuid.MakeV4()), RaftTombstoneKey(0), RaftAppliedIndexKey(0), RaftTruncatedStateKey(0), RangeLeaseKey(0), RangeStatsKey(0), RaftHardStateKey(0), RaftLastIndexKey(0), RaftLogPrefix(0), RaftLogKey(0, 0), RangeLastReplicaGCTimestampKey(0), RangeLastVerificationTimestampKeyDeprecated(0), RangeDescriptorKey(roachpb.RKey(RangeLastVerificationTimestampKeyDeprecated(0))), }, "local key .* malformed": { makeKey(localPrefix, roachpb.Key("z")), }, } for regexp, keyList := range testCases { for _, key := range keyList { if addr, err := Addr(key); err == nil { t.Errorf("expected addressing key %q to throw error, but it returned address %q", key, addr) } else if !testutils.IsError(err, regexp) { t.Errorf("expected addressing key %q to throw error matching %s, but got error %v", key, regexp, err) } } } }
// TestReplicaDataIterator creates three ranges {"a"-"b" (pre), "b"-"c" // (main test range), "c"-"d" (post)} and fills each with data. It // first verifies the contents of the "b"-"c" range. Next, it makes sure // a replicated-only iterator does not show any unreplicated keys from // the range. Then, it deletes the range and verifies it's empty. Finally, // it verifies the pre and post ranges still contain the expected data. func TestReplicaDataIterator(t *testing.T) { defer leaktest.AfterTest(t)() cfg := TestStoreConfig(nil) // Disable Raft processing for this test as it mucks with low-level details // of replica storage in an unsafe way. cfg.TestingKnobs.DisableProcessRaft = true tc := testContext{ bootstrapMode: bootstrapRangeOnly, } tc.StartWithStoreConfig(t, cfg) defer tc.Stop() // See notes in EmptyRange test method for adjustment to descriptor. newDesc := *tc.repl.Desc() newDesc.StartKey = roachpb.RKey("b") newDesc.EndKey = roachpb.RKey("c") if err := tc.repl.setDesc(&newDesc); err != nil { t.Fatal(err) } // Create two more ranges, one before the test range and one after. preRng := createReplica(tc.store, 2, roachpb.RKeyMin, roachpb.RKey("b")) if err := tc.store.AddReplica(preRng); err != nil { t.Fatal(err) } postRng := createReplica(tc.store, 3, roachpb.RKey("c"), roachpb.RKeyMax) if err := tc.store.AddReplica(postRng); err != nil { t.Fatal(err) } // Create range data for all three ranges. preKeys := createRangeData(t, preRng) curKeys := createRangeData(t, tc.repl) postKeys := createRangeData(t, postRng) // Verify the contents of the "b"-"c" range. iter := NewReplicaDataIterator(tc.repl.Desc(), tc.repl.store.Engine(), false /* !replicatedOnly */) defer iter.Close() i := 0 for ; iter.Valid(); iter.Next() { if err := iter.Error(); err != nil { t.Fatal(err) } if i >= len(curKeys) { t.Fatal("there are more keys in the iteration than expected") } if key := iter.Key(); !key.Equal(curKeys[i]) { k1, ts1 := key.Key, key.Timestamp k2, ts2 := curKeys[i].Key, curKeys[i].Timestamp t.Errorf("%d: expected %q(%d); got %q(%d)", i, k2, ts2, k1, ts1) } i++ } if i != len(curKeys) { t.Fatal("there are fewer keys in the iteration than expected") } // Verify that the replicated-only iterator ignores unreplicated keys. unreplicatedPrefix := keys.MakeRangeIDUnreplicatedPrefix(tc.repl.RangeID) iter = NewReplicaDataIterator(tc.repl.Desc(), tc.repl.store.Engine(), true /* replicatedOnly */) defer iter.Close() for ; iter.Valid(); iter.Next() { if err := iter.Error(); err != nil { t.Fatal(err) } if bytes.HasPrefix(iter.Key().Key, unreplicatedPrefix) { t.Fatalf("unexpected unreplicated key: %s", iter.Key().Key) } } // Destroy range and verify that its data has been completely cleared. if err := tc.store.removeReplicaImpl(tc.repl, *tc.repl.Desc(), true); err != nil { t.Fatal(err) } iter = NewReplicaDataIterator(tc.repl.Desc(), tc.repl.store.Engine(), false /* !replicatedOnly */) defer iter.Close() if iter.Valid() { // If the range is destroyed, only a tombstone key should be there. k1 := iter.Key().Key if tombstoneKey := keys.RaftTombstoneKey(tc.repl.RangeID); !bytes.Equal(k1, tombstoneKey) { t.Errorf("expected a tombstone key %q, but found %q", tombstoneKey, k1) } if iter.Next(); iter.Valid() { t.Errorf("expected a destroyed replica to have only a tombstone key, but found more") } } else { t.Errorf("expected a tombstone key, but got an empty iteration") } // Verify the keys in pre & post ranges. for j, test := range []struct { r *Replica keys []engine.MVCCKey }{ {preRng, preKeys}, {postRng, postKeys}, } { iter = NewReplicaDataIterator(test.r.Desc(), test.r.store.Engine(), false /* !replicatedOnly */) defer iter.Close() i = 0 for ; iter.Valid(); iter.Next() { k1, ts1 := iter.Key().Key, iter.Key().Timestamp if bytes.HasPrefix(k1, keys.StatusPrefix) { // Some data is written into the system prefix by Store.BootstrapRange, // but it is not in our expected key list so skip it. // TODO(bdarnell): validate this data instead of skipping it. continue } if key := iter.Key(); !key.Equal(test.keys[i]) { k2, ts2 := test.keys[i].Key, test.keys[i].Timestamp t.Errorf("%d/%d: key mismatch %q(%d) != %q(%d) [%x]", j, i, k1, ts1, k2, ts2, []byte(k2)) } i++ } if i != len(curKeys) { t.Fatal("there are fewer keys in the iteration than expected") } } }
func TestBatchRange(t *testing.T) { testCases := []struct { req [][2]string exp [2]string }{ { // Boring single request. req: [][2]string{{"a", "b"}}, exp: [2]string{"a", "b"}, }, { // Request with invalid range. It's important that this still // results in a valid range. req: [][2]string{{"b", "a"}}, exp: [2]string{"b", "b\x00"}, }, { // Two overlapping ranges. req: [][2]string{{"a", "c"}, {"b", "d"}}, exp: [2]string{"a", "d"}, }, { // Two disjoint ranges. req: [][2]string{{"a", "b"}, {"c", "d"}}, exp: [2]string{"a", "d"}, }, { // Range and disjoint point request. req: [][2]string{{"a", "b"}, {"c", ""}}, exp: [2]string{"a", "c\x00"}, }, { // Three disjoint point requests. req: [][2]string{{"a", ""}, {"b", ""}, {"c", ""}}, exp: [2]string{"a", "c\x00"}, }, { // Disjoint range request and point request. req: [][2]string{{"a", "b"}, {"b", ""}}, exp: [2]string{"a", "b\x00"}, }, { // Range-local point request. req: [][2]string{{string(RangeDescriptorKey(roachpb.RKeyMax)), ""}}, exp: [2]string{"\xff\xff", "\xff\xff\x00"}, }, { // Range-local to global such that the key ordering flips. // Important that we get a valid range back. req: [][2]string{{string(RangeDescriptorKey(roachpb.RKeyMax)), "x"}}, exp: [2]string{"\xff\xff", "\xff\xff\x00"}, }, { // Range-local to global without order messed up. req: [][2]string{{string(RangeDescriptorKey(roachpb.RKey("a"))), "x"}}, exp: [2]string{"a", "x"}, }, } for i, c := range testCases { var ba roachpb.BatchRequest for _, pair := range c.req { ba.Add(&roachpb.ScanRequest{Span: roachpb.Span{Key: roachpb.Key(pair[0]), EndKey: roachpb.Key(pair[1])}}) } if rs, err := Range(ba); err != nil { t.Errorf("%d: %v", i, err) } else if actPair := [2]string{string(rs.Key), string(rs.EndKey)}; !reflect.DeepEqual(actPair, c.exp) { t.Errorf("%d: expected [%q,%q), got [%q,%q)", i, c.exp[0], c.exp[1], actPair[0], actPair[1]) } } }
func TestMetaReverseScanBounds(t *testing.T) { testCases := []struct { key []byte expStart, expEnd []byte expError string }{ { key: roachpb.RKey{}, expStart: nil, expEnd: nil, expError: "KeyMin and Meta1Prefix can't be used as the key of reverse scan", }, { key: Meta1Prefix, expStart: nil, expEnd: nil, expError: "KeyMin and Meta1Prefix can't be used as the key of reverse scan", }, { key: Meta2KeyMax.Next(), expStart: nil, expEnd: nil, expError: "body of meta key range lookup is", }, { key: Meta1KeyMax.Next(), expStart: nil, expEnd: nil, expError: "body of meta key range lookup is", }, { key: makeKey(Meta2Prefix, roachpb.Key("foo")), expStart: Meta2Prefix, expEnd: makeKey(Meta2Prefix, roachpb.Key("foo\x00")), expError: "", }, { key: makeKey(Meta1Prefix, roachpb.Key("foo")), expStart: Meta1Prefix, expEnd: makeKey(Meta1Prefix, roachpb.Key("foo\x00")), expError: "", }, { key: MustAddr(Meta2Prefix), expStart: Meta1Prefix, expEnd: Meta2Prefix.Next(), expError: "", }, { key: Meta2KeyMax, expStart: Meta2Prefix, expEnd: Meta2KeyMax.Next(), expError: "", }, } for i, test := range testCases { resStart, resEnd, err := MetaReverseScanBounds(roachpb.RKey(test.key)) if err != nil && !testutils.IsError(err, test.expError) { t.Errorf("expected error: %s ; got %v", test.expError, err) } else if err == nil && test.expError != "" { t.Errorf("expected error: %s", test.expError) } if !resStart.Equal(test.expStart) || !resEnd.Equal(test.expEnd) { t.Errorf("%d: range bounds %q-%q don't match expected bounds %q-%q for key %q", i, resStart, resEnd, test.expStart, test.expEnd, test.key) } } }
// QueueLastProcessedKey returns a range-local key for last processed // timestamps for the named queue. These keys represent per-range last // processed times. func QueueLastProcessedKey(key roachpb.RKey, queue string) roachpb.Key { return MakeRangeKey(key, LocalQueueLastProcessedSuffix, roachpb.RKey(queue)) }
func TestTruncate(t *testing.T) { defer leaktest.AfterTest(t)() loc := func(s string) string { return string(keys.RangeDescriptorKey(roachpb.RKey(s))) } locPrefix := func(s string) string { return string(keys.MakeRangeKeyPrefix(roachpb.RKey(s))) } testCases := []struct { keys [][2]string expKeys [][2]string from, to string desc [2]string // optional, defaults to {from,to} err string }{ { // Keys inside of active range. keys: [][2]string{{"a", "q"}, {"c"}, {"b, e"}, {"q"}}, expKeys: [][2]string{{"a", "q"}, {"c"}, {"b, e"}, {"q"}}, from: "a", to: "q\x00", }, { // Keys outside of active range. keys: [][2]string{{"a"}, {"a", "b"}, {"q"}, {"q", "z"}}, expKeys: [][2]string{{}, {}, {}, {}}, from: "b", to: "q", }, { // Range-local keys inside of active range. keys: [][2]string{{loc("b")}, {loc("c")}}, expKeys: [][2]string{{loc("b")}, {loc("c")}}, from: "b", to: "e", }, { // Range-local key outside of active range. keys: [][2]string{{loc("a")}}, expKeys: [][2]string{{}}, from: "b", to: "e", }, { // Range-local range contained in active range. keys: [][2]string{{loc("b"), loc("e") + "\x00"}}, expKeys: [][2]string{{loc("b"), loc("e") + "\x00"}}, from: "b", to: "e\x00", }, { // Range-local range not contained in active range. keys: [][2]string{{loc("a"), loc("b")}}, expKeys: [][2]string{{}}, from: "c", to: "e", }, { // Range-local range not contained in active range. keys: [][2]string{{loc("a"), locPrefix("b")}, {loc("e"), loc("f")}}, expKeys: [][2]string{{}, {}}, from: "b", to: "e", }, { // Range-local range partially contained in active range. keys: [][2]string{{loc("a"), loc("b")}}, expKeys: [][2]string{{loc("a"), locPrefix("b")}}, from: "a", to: "b", }, { // Range-local range partially contained in active range. keys: [][2]string{{loc("a"), loc("b")}}, expKeys: [][2]string{{locPrefix("b"), loc("b")}}, from: "b", to: "e", }, { // Range-local range contained in active range. keys: [][2]string{{locPrefix("b"), loc("b")}}, expKeys: [][2]string{{locPrefix("b"), loc("b")}}, from: "b", to: "c", }, { // Mixed range-local vs global key range. keys: [][2]string{{loc("c"), "d\x00"}}, from: "b", to: "e", err: "local key mixed with global key", }, { // Key range touching and intersecting active range. keys: [][2]string{{"a", "b"}, {"a", "c"}, {"p", "q"}, {"p", "r"}, {"a", "z"}}, expKeys: [][2]string{{}, {"b", "c"}, {"p", "q"}, {"p", "q"}, {"b", "q"}}, from: "b", to: "q", }, // Active key range is intersection of descriptor and [from,to). { keys: [][2]string{{"c", "q"}}, expKeys: [][2]string{{"d", "p"}}, from: "a", to: "z", desc: [2]string{"d", "p"}, }, { keys: [][2]string{{"c", "q"}}, expKeys: [][2]string{{"d", "p"}}, from: "d", to: "p", desc: [2]string{"a", "z"}, }, } for i, test := range testCases { goldenOriginal := roachpb.BatchRequest{} for _, ks := range test.keys { if len(ks[1]) > 0 { u := uuid.MakeV4() goldenOriginal.Add(&roachpb.ResolveIntentRangeRequest{ Span: roachpb.Span{Key: roachpb.Key(ks[0]), EndKey: roachpb.Key(ks[1])}, IntentTxn: enginepb.TxnMeta{ID: &u}, }) } else { goldenOriginal.Add(&roachpb.GetRequest{ Span: roachpb.Span{Key: roachpb.Key(ks[0])}, }) } } original := roachpb.BatchRequest{Requests: make([]roachpb.RequestUnion, len(goldenOriginal.Requests))} for i, request := range goldenOriginal.Requests { original.Requests[i].SetValue(request.GetInner().ShallowCopy()) } desc := &roachpb.RangeDescriptor{ StartKey: roachpb.RKey(test.desc[0]), EndKey: roachpb.RKey(test.desc[1]), } if len(desc.StartKey) == 0 { desc.StartKey = roachpb.RKey(test.from) } if len(desc.EndKey) == 0 { desc.EndKey = roachpb.RKey(test.to) } rs := roachpb.RSpan{Key: roachpb.RKey(test.from), EndKey: roachpb.RKey(test.to)} rs, err := rs.Intersect(desc) if err != nil { t.Errorf("%d: intersection failure: %v", i, err) continue } ba, num, err := truncate(original, rs) if err != nil || test.err != "" { if !testutils.IsError(err, test.err) { t.Errorf("%d: %v (expected: %q)", i, err, test.err) } continue } var reqs int for j, arg := range ba.Requests { req := arg.GetInner() if _, ok := req.(*roachpb.NoopRequest); ok { continue } if h := req.Header(); !bytes.Equal(h.Key, roachpb.Key(test.expKeys[j][0])) || !bytes.Equal(h.EndKey, roachpb.Key(test.expKeys[j][1])) { t.Errorf("%d.%d: range mismatch: actual [%q,%q), wanted [%q,%q)", i, j, h.Key, h.EndKey, test.expKeys[j][0], test.expKeys[j][1]) } else if _, ok := req.(*roachpb.NoopRequest); ok != (len(h.Key) == 0) { t.Errorf("%d.%d: expected NoopRequest, got %T", i, j, req) } else if len(h.Key) != 0 { reqs++ } } if reqs != num { t.Errorf("%d: counted %d requests, but truncation indicated %d", i, reqs, num) } if !reflect.DeepEqual(original, goldenOriginal) { t.Errorf("%d: truncation mutated original:\nexpected: %s\nactual: %s", i, goldenOriginal, original) } } }
func TestStoreMetrics(t *testing.T) { defer leaktest.AfterTest(t)() t.Skip("TODO(mrtracy): #9204") mtc := &multiTestContext{} defer mtc.Stop() mtc.Start(t, 3) // Flush RocksDB memtables, so that RocksDB begins using block-based tables. // This is useful, because most of the stats we track don't apply to // memtables. if err := mtc.stores[0].Engine().Flush(); err != nil { t.Fatal(err) } if err := mtc.stores[1].Engine().Flush(); err != nil { t.Fatal(err) } // Disable the raft log truncation which confuses this test. for _, s := range mtc.stores { s.SetRaftLogQueueActive(false) } // Perform a split, which has special metrics handling. splitArgs := adminSplitArgs(roachpb.KeyMin, roachpb.Key("m")) if _, err := client.SendWrapped(context.Background(), rg1(mtc.stores[0]), splitArgs); err != nil { t.Fatal(err) } // Verify range count is as expected checkCounter(t, mtc.stores[0].Metrics().ReplicaCount, 2) // Verify all stats on store0 after split. verifyStats(t, mtc, 0) // Replicate the "right" range to the other stores. replica := mtc.stores[0].LookupReplica(roachpb.RKey("z"), nil) mtc.replicateRange(replica.RangeID, 1, 2) // Verify stats on store1 after replication. verifyStats(t, mtc, 1) // Add some data to the "right" range. dataKey := []byte("z") if _, err := mtc.dbs[0].Inc(context.TODO(), dataKey, 5); err != nil { t.Fatal(err) } mtc.waitForValues(roachpb.Key("z"), []int64{5, 5, 5}) // Verify all stats on store 0 and 1 after addition. verifyStats(t, mtc, 0, 1) // Create a transaction statement that fails, but will add an entry to the // sequence cache. Regression test for #4969. if err := mtc.dbs[0].Txn(context.TODO(), func(txn *client.Txn) error { b := txn.NewBatch() b.CPut(dataKey, 7, 6) return txn.Run(b) }); err == nil { t.Fatal("Expected transaction error, but none received") } // Verify stats after sequence cache addition. verifyStats(t, mtc, 0) checkCounter(t, mtc.stores[0].Metrics().ReplicaCount, 2) // Unreplicate range from the first store. mtc.unreplicateRange(replica.RangeID, 0) // Force GC Scan on store 0 in order to fully remove range. mtc.stores[1].ForceReplicaGCScanAndProcess() mtc.waitForValues(roachpb.Key("z"), []int64{0, 5, 5}) // Verify range count is as expected. checkCounter(t, mtc.stores[0].Metrics().ReplicaCount, 1) checkCounter(t, mtc.stores[1].Metrics().ReplicaCount, 1) // Verify all stats on store0 and store1 after range is removed. verifyStats(t, mtc, 0, 1) verifyRocksDBStats(t, mtc.stores[0]) verifyRocksDBStats(t, mtc.stores[1]) }