// TestVerifyQueueShouldQueue verifies shouldQueue method correctly // indicates that a range should be queued for verification if the // time since last verification exceeds the threshold limit. func TestVerifyQueueShouldQueue(t *testing.T) { defer leaktest.AfterTest(t) tc := testContext{} tc.Start(t) defer tc.Stop() // Put empty verification timestamp key := keys.RangeLastVerificationTimestampKey(tc.rng.Desc().RangeID) if err := engine.MVCCPutProto(tc.rng.rm.Engine(), nil, key, roachpb.ZeroTimestamp, nil, &roachpb.Timestamp{}); err != nil { t.Fatal(err) } testCases := []struct { now roachpb.Timestamp shouldQ bool priority float64 }{ // No GC'able bytes, no intent bytes, verification interval elapsed. {makeTS(verificationInterval.Nanoseconds(), 0), false, 0}, // No GC'able bytes, no intent bytes, verification interval * 2 elapsed. {makeTS(verificationInterval.Nanoseconds()*2, 0), true, 2}, } verifyQ := newVerifyQueue(tc.gossip, nil) for i, test := range testCases { shouldQ, priority := verifyQ.shouldQueue(test.now, tc.rng, nil /* system config not used */) if shouldQ != test.shouldQ { t.Errorf("%d: should queue expected %t; got %t", i, test.shouldQ, shouldQ) } if math.Abs(priority-test.priority) > 0.00001 { t.Errorf("%d: priority expected %f; got %f", i, test.priority, priority) } } }
// GetLastVerificationTimestamp reads the timestamp at which the range's // data was last verified. func (r *Range) GetLastVerificationTimestamp() (proto.Timestamp, error) { key := keys.RangeLastVerificationTimestampKey(r.Desc().RaftID) timestamp := proto.Timestamp{} _, err := engine.MVCCGetProto(r.rm.Engine(), key, proto.ZeroTimestamp, true, nil, ×tamp) if err != nil { return proto.ZeroTimestamp, err } return timestamp, nil }
// createRangeData creates sample range data in all possible areas of // the key space. Returns a slice of the encoded keys of all created // data. func createRangeData(t *testing.T, r *Replica) []engine.MVCCKey { ts0 := hlc.ZeroTimestamp ts := hlc.Timestamp{WallTime: 1} desc := r.Desc() keyTSs := []struct { key roachpb.Key ts hlc.Timestamp }{ {keys.AbortCacheKey(r.RangeID, testTxnID), ts0}, {keys.AbortCacheKey(r.RangeID, testTxnID2), ts0}, {keys.RangeFrozenStatusKey(r.RangeID), ts0}, {keys.RangeLastGCKey(r.RangeID), ts0}, {keys.RaftAppliedIndexKey(r.RangeID), ts0}, {keys.RaftTruncatedStateKey(r.RangeID), ts0}, {keys.LeaseAppliedIndexKey(r.RangeID), ts0}, {keys.RangeStatsKey(r.RangeID), ts0}, {keys.RaftHardStateKey(r.RangeID), ts0}, {keys.RaftLastIndexKey(r.RangeID), ts0}, {keys.RaftLogKey(r.RangeID, 1), ts0}, {keys.RaftLogKey(r.RangeID, 2), ts0}, {keys.RangeLastReplicaGCTimestampKey(r.RangeID), ts0}, {keys.RangeLastVerificationTimestampKey(r.RangeID), ts0}, {keys.RangeDescriptorKey(desc.StartKey), ts}, {keys.TransactionKey(roachpb.Key(desc.StartKey), uuid.NewV4()), ts0}, {keys.TransactionKey(roachpb.Key(desc.StartKey.Next()), uuid.NewV4()), ts0}, {keys.TransactionKey(fakePrevKey(desc.EndKey), uuid.NewV4()), ts0}, // TODO(bdarnell): KeyMin.Next() results in a key in the reserved system-local space. // Once we have resolved https://github.com/cockroachdb/cockroach/issues/437, // replace this with something that reliably generates the first valid key in the range. //{r.Desc().StartKey.Next(), ts}, // The following line is similar to StartKey.Next() but adds more to the key to // avoid falling into the system-local space. {append(append([]byte{}, desc.StartKey...), '\x02'), ts}, {fakePrevKey(r.Desc().EndKey), ts}, } keys := []engine.MVCCKey{} for _, keyTS := range keyTSs { if err := engine.MVCCPut(context.Background(), r.store.Engine(), nil, keyTS.key, keyTS.ts, roachpb.MakeValueFromString("value"), nil); err != nil { t.Fatal(err) } keys = append(keys, engine.MVCCKey{Key: keyTS.key, Timestamp: keyTS.ts}) } return keys }
// createRangeData creates sample range data in all possible areas of // the key space. Returns a slice of the encoded keys of all created // data. func createRangeData(r *Replica, t *testing.T) []roachpb.EncodedKey { ts0 := roachpb.ZeroTimestamp ts := roachpb.Timestamp{WallTime: 1} keyTSs := []struct { key roachpb.Key ts roachpb.Timestamp }{ {keys.ResponseCacheKey(r.Desc().RangeID, &roachpb.ClientCmdID{WallTime: 1, Random: 1}), ts0}, {keys.ResponseCacheKey(r.Desc().RangeID, &roachpb.ClientCmdID{WallTime: 2, Random: 2}), ts0}, {keys.RaftHardStateKey(r.Desc().RangeID), ts0}, {keys.RaftLogKey(r.Desc().RangeID, 1), ts0}, {keys.RaftLogKey(r.Desc().RangeID, 2), ts0}, {keys.RangeGCMetadataKey(r.Desc().RangeID), ts0}, {keys.RangeLastVerificationTimestampKey(r.Desc().RangeID), ts0}, {keys.RangeStatsKey(r.Desc().RangeID), ts0}, {keys.RangeDescriptorKey(r.Desc().StartKey), ts}, {keys.TransactionKey(roachpb.Key(r.Desc().StartKey), []byte("1234")), ts0}, {keys.TransactionKey(roachpb.Key(r.Desc().StartKey.Next()), []byte("5678")), ts0}, {keys.TransactionKey(fakePrevKey(r.Desc().EndKey), []byte("2468")), ts0}, // TODO(bdarnell): KeyMin.Next() results in a key in the reserved system-local space. // Once we have resolved https://github.com/cockroachdb/cockroach/issues/437, // replace this with something that reliably generates the first valid key in the range. //{r.Desc().StartKey.Next(), ts}, // The following line is similar to StartKey.Next() but adds more to the key to // avoid falling into the system-local space. {append(append([]byte{}, r.Desc().StartKey...), '\x01'), ts}, {fakePrevKey(r.Desc().EndKey), ts}, } keys := []roachpb.EncodedKey{} for _, keyTS := range keyTSs { if err := engine.MVCCPut(r.store.Engine(), nil, keyTS.key, keyTS.ts, roachpb.MakeValueFromString("value"), nil); err != nil { t.Fatal(err) } keys = append(keys, engine.MVCCEncodeKey(keyTS.key)) if !keyTS.ts.Equal(ts0) { keys = append(keys, engine.MVCCEncodeVersionKey(keyTS.key, keyTS.ts)) } } return keys }
// SetLastVerificationTimestamp writes the timestamp at which the range's // data was last verified. func (r *Range) SetLastVerificationTimestamp(timestamp proto.Timestamp) error { key := keys.RangeLastVerificationTimestampKey(r.Desc().RaftID) return engine.MVCCPutProto(r.rm.Engine(), nil, key, proto.ZeroTimestamp, nil, ×tamp) }
// splitTrigger is called on a successful commit of an AdminSplit // transaction. It copies the response cache for the new range and // recomputes stats for both the existing, updated range and the new // range. func (r *Range) splitTrigger(batch engine.Engine, split *proto.SplitTrigger) error { if !bytes.Equal(r.Desc().StartKey, split.UpdatedDesc.StartKey) || !bytes.Equal(r.Desc().EndKey, split.NewDesc.EndKey) { return util.Errorf("range does not match splits: (%s-%s) + (%s-%s) != %s", split.UpdatedDesc.StartKey, split.UpdatedDesc.EndKey, split.NewDesc.StartKey, split.NewDesc.EndKey, r) } // Copy the GC metadata. gcMeta, err := r.GetGCMetadata() if err != nil { return util.Errorf("unable to fetch GC metadata: %s", err) } if err := engine.MVCCPutProto(batch, nil, keys.RangeGCMetadataKey(split.NewDesc.RaftID), proto.ZeroTimestamp, nil, gcMeta); err != nil { return util.Errorf("unable to copy GC metadata: %s", err) } // Copy the last verification timestamp. verifyTS, err := r.GetLastVerificationTimestamp() if err != nil { return util.Errorf("unable to fetch last verification timestamp: %s", err) } if err := engine.MVCCPutProto(batch, nil, keys.RangeLastVerificationTimestampKey(split.NewDesc.RaftID), proto.ZeroTimestamp, nil, &verifyTS); err != nil { return util.Errorf("unable to copy last verification timestamp: %s", err) } // Compute stats for updated range. now := r.rm.Clock().Timestamp() iter := newRangeDataIterator(&split.UpdatedDesc, batch) ms, err := engine.MVCCComputeStats(iter, now.WallTime) iter.Close() if err != nil { return util.Errorf("unable to compute stats for updated range after split: %s", err) } if err := r.stats.SetMVCCStats(batch, ms); err != nil { return util.Errorf("unable to write MVCC stats: %s", err) } // Initialize the new range's response cache by copying the original's. if err = r.respCache.CopyInto(batch, split.NewDesc.RaftID); err != nil { return util.Errorf("unable to copy response cache to new split range: %s", err) } // Add the new split range to the store. This step atomically // updates the EndKey of the updated range and also adds the // new range to the store's range map. newRng, err := NewRange(&split.NewDesc, r.rm) if err != nil { return err } // Compute stats for new range. iter = newRangeDataIterator(&split.NewDesc, batch) ms, err = engine.MVCCComputeStats(iter, now.WallTime) iter.Close() if err != nil { return util.Errorf("unable to compute stats for new range after split: %s", err) } if err = newRng.stats.SetMVCCStats(batch, ms); err != nil { return util.Errorf("unable to write MVCC stats: %s", err) } // Copy the timestamp cache into the new range. r.Lock() r.tsCache.MergeInto(newRng.tsCache, true /* clear */) r.Unlock() batch.Defer(func() { if err := r.rm.SplitRange(r, newRng); err != nil { // Our in-memory state has diverged from the on-disk state. log.Fatalf("failed to update Store after split: %s", err) } }) return nil }