// CopyInto copies all the cached results from this response cache // into the destRangeID response cache. Failures decoding individual // cache entries return an error. func (rc *ResponseCache) CopyInto(e engine.Engine, destRangeID roachpb.RangeID) error { start := engine.MVCCEncodeKey( keys.ResponseCacheKey(rc.rangeID, roachpb.KeyMin)) end := engine.MVCCEncodeKey( keys.ResponseCacheKey(rc.rangeID, roachpb.KeyMax)) return e.Iterate(start, end, func(kv engine.MVCCKeyValue) (bool, error) { // Decode the key into a cmd, skipping on error. Otherwise, // write it to the corresponding key in the new cache. family, err := rc.decodeResponseCacheKey(kv.Key) if err != nil { return false, util.Errorf("could not decode a response cache key %s: %s", roachpb.Key(kv.Key), err) } key := keys.ResponseCacheKey(destRangeID, family) encKey := engine.MVCCEncodeKey(key) // Decode the value, update the checksum and re-encode. meta := &engine.MVCCMetadata{} if err := proto.Unmarshal(kv.Value, meta); err != nil { return false, util.Errorf("could not decode response cache value %s [% x]: %s", roachpb.Key(kv.Key), kv.Value, err) } meta.Value.Checksum = nil meta.Value.InitChecksum(key) _, _, err = engine.PutProto(e, encKey, meta) return false, err }) }
// InternalTruncateLog discards a prefix of the raft log. func (r *Range) InternalTruncateLog(batch engine.Engine, ms *engine.MVCCStats, args *proto.InternalTruncateLogRequest, reply *proto.InternalTruncateLogResponse) { // args.Index is the first index to keep. term, err := r.Term(args.Index - 1) if err != nil { reply.SetGoError(err) return } start := keys.RaftLogKey(r.Desc().RaftID, 0) end := keys.RaftLogKey(r.Desc().RaftID, args.Index) err = batch.Iterate(engine.MVCCEncodeKey(start), engine.MVCCEncodeKey(end), func(kv proto.RawKeyValue) (bool, error) { err := batch.Clear(kv.Key) return false, err }) if err != nil { reply.SetGoError(err) return } ts := proto.RaftTruncatedState{ Index: args.Index - 1, Term: term, } err = engine.MVCCPutProto(batch, ms, keys.RaftTruncatedStateKey(r.Desc().RaftID), proto.ZeroTimestamp, nil, &ts) reply.SetGoError(err) }
// CopyFrom copies all the cached results from the originRangeID // response cache into this one. Note that the cache will not be // locked while copying is in progress. Failures decoding individual // cache entries return an error. The copy is done directly using the // engine instead of interpreting values through MVCC for efficiency. func (rc *ResponseCache) CopyFrom(e engine.Engine, originRangeID proto.RangeID) error { prefix := keys.ResponseCacheKey(originRangeID, nil) // response cache prefix start := engine.MVCCEncodeKey(prefix) end := engine.MVCCEncodeKey(prefix.PrefixEnd()) return e.Iterate(start, end, func(kv proto.RawKeyValue) (bool, error) { // Decode the key into a cmd, skipping on error. Otherwise, // write it to the corresponding key in the new cache. cmdID, err := rc.decodeResponseCacheKey(kv.Key) if err != nil { return false, util.Errorf("could not decode a response cache key %s: %s", proto.Key(kv.Key), err) } key := keys.ResponseCacheKey(rc.rangeID, &cmdID) encKey := engine.MVCCEncodeKey(key) // Decode the value, update the checksum and re-encode. meta := &engine.MVCCMetadata{} if err := gogoproto.Unmarshal(kv.Value, meta); err != nil { return false, util.Errorf("could not decode response cache value %s [% x]: %s", proto.Key(kv.Key), kv.Value, err) } meta.Value.Checksum = nil meta.Value.InitChecksum(key) _, _, err = engine.PutProto(e, encKey, meta) return false, err }) }
// TestGCQueueIntentResolution verifies intent resolution with many // intents spanning just two transactions. func TestGCQueueIntentResolution(t *testing.T) { defer leaktest.AfterTest(t) tc := testContext{} tc.Start(t) defer tc.Stop() const now int64 = 48 * 60 * 60 * 1E9 // 2d past the epoch tc.manualClock.Set(now) txns := []*proto.Transaction{ newTransaction("txn1", proto.Key("0-00000"), 1, proto.SERIALIZABLE, tc.clock), newTransaction("txn2", proto.Key("1-00000"), 1, proto.SERIALIZABLE, tc.clock), } intentResolveTS := makeTS(now-intentAgeThreshold.Nanoseconds(), 0) txns[0].OrigTimestamp = intentResolveTS txns[0].Timestamp = intentResolveTS txns[1].OrigTimestamp = intentResolveTS txns[1].Timestamp = intentResolveTS // Two transactions. for i := 0; i < 2; i++ { // 5 puts per transaction. // TODO(spencerkimball): benchmark with ~50k. for j := 0; j < 5; j++ { pArgs := putArgs(proto.Key(fmt.Sprintf("%d-%05d", i, j)), []byte("value"), tc.rng.Desc().RangeID, tc.store.StoreID()) pArgs.Timestamp = makeTS(1, 0) pArgs.Txn = txns[i] if _, err := tc.rng.AddCmd(tc.rng.context(), &pArgs); err != nil { t.Fatalf("%d: could not put data: %s", i, err) } } } // Process through a scan queue. gcQ := newGCQueue() if err := gcQ.process(tc.clock.Now(), tc.rng); err != nil { t.Fatal(err) } // Iterate through all values to ensure intents have been fully resolved. meta := &engine.MVCCMetadata{} err := tc.store.Engine().Iterate(engine.MVCCEncodeKey(proto.KeyMin), engine.MVCCEncodeKey(proto.KeyMax), func(kv proto.RawKeyValue) (bool, error) { if key, _, isValue := engine.MVCCDecodeKey(kv.Key); !isValue { if err := gogoproto.Unmarshal(kv.Value, meta); err != nil { t.Fatalf("unable to unmarshal mvcc metadata for key %s", key) } if meta.Txn != nil { t.Fatalf("non-nil Txn after GC for key %s", key) } } return false, nil }) if err != nil { t.Fatal(err) } }
func copySeqCache(e engine.Engine, srcID, dstID roachpb.RangeID, keyMin, keyMax engine.MVCCKey) error { var scratch [64]byte return e.Iterate(keyMin, keyMax, func(kv engine.MVCCKeyValue) (bool, error) { // Decode the key into a cmd, skipping on error. Otherwise, // write it to the corresponding key in the new cache. id, epoch, seq, err := decodeSequenceCacheMVCCKey(kv.Key, scratch[:0]) if err != nil { return false, util.Errorf("could not decode a sequence cache key %s: %s", roachpb.Key(kv.Key), err) } key := keys.SequenceCacheKey(dstID, id, epoch, seq) encKey := engine.MVCCEncodeKey(key) // Decode the value, update the checksum and re-encode. meta := &engine.MVCCMetadata{} if err := proto.Unmarshal(kv.Value, meta); err != nil { return false, util.Errorf("could not decode sequence cache value %s [% x]: %s", roachpb.Key(kv.Key), kv.Value, err) } meta.Value.Checksum = nil meta.Value.InitChecksum(key) _, _, err = engine.PutProto(e, encKey, meta) return false, err }) }
// CopyFrom copies all the cached results from another response cache // into this one. Note that the cache will not be locked while copying // is in progress. Failures decoding individual cache entries return an // error. The copy is done directly using the engine instead of interpreting // values through MVCC for efficiency. func (rc *ResponseCache) CopyFrom(e engine.Engine, originRaftID int64) error { prefix := engine.ResponseCacheKey(originRaftID, nil) // response cache prefix start := engine.MVCCEncodeKey(prefix) end := engine.MVCCEncodeKey(prefix.PrefixEnd()) return e.Iterate(start, end, func(kv proto.RawKeyValue) (bool, error) { // Decode the key into a cmd, skipping on error. Otherwise, // write it to the corresponding key in the new cache. cmdID, err := rc.decodeResponseCacheKey(kv.Key) if err != nil { return false, util.Errorf("could not decode a response cache key %q: %s", kv.Key, err) } encKey := engine.MVCCEncodeKey(engine.ResponseCacheKey(rc.raftID, &cmdID)) return false, rc.engine.Put(encKey, kv.Value) }) }
func newRangeDataIterator(r *Range, e engine.Engine) *rangeDataIterator { r.RLock() startKey := r.Desc().StartKey if startKey.Equal(engine.KeyMin) { startKey = engine.KeyLocalMax } endKey := r.Desc().EndKey r.RUnlock() ri := &rangeDataIterator{ ranges: []keyRange{ { start: engine.MVCCEncodeKey(engine.MakeKey(engine.KeyLocalRangeIDPrefix, encoding.EncodeUvarint(nil, uint64(r.Desc().RaftID)))), end: engine.MVCCEncodeKey(engine.MakeKey(engine.KeyLocalRangeIDPrefix, encoding.EncodeUvarint(nil, uint64(r.Desc().RaftID+1)))), }, { start: engine.MVCCEncodeKey(engine.MakeKey(engine.KeyLocalRangeKeyPrefix, encoding.EncodeBytes(nil, startKey))), end: engine.MVCCEncodeKey(engine.MakeKey(engine.KeyLocalRangeKeyPrefix, encoding.EncodeBytes(nil, endKey))), }, { start: engine.MVCCEncodeKey(startKey), end: engine.MVCCEncodeKey(endKey), }, }, iter: e.NewIterator(), } ri.iter.Seek(ri.ranges[ri.curIndex].start) ri.advance() return ri }
func newReplicaDataIterator(d *roachpb.RangeDescriptor, e engine.Engine) *replicaDataIterator { // The first range in the keyspace starts at KeyMin, which includes the node-local // space. We need the original StartKey to find the range metadata, but the // actual data starts at LocalMax. dataStartKey := d.StartKey.AsRawKey() if d.StartKey.Equal(roachpb.RKeyMin) { dataStartKey = keys.LocalMax } ri := &replicaDataIterator{ ranges: []keyRange{ { start: engine.MVCCEncodeKey(keys.MakeRangeIDPrefix(d.RangeID)), end: engine.MVCCEncodeKey(keys.MakeRangeIDPrefix(d.RangeID + 1)), }, { start: engine.MVCCEncodeKey(keys.MakeRangeKeyPrefix(d.StartKey)), end: engine.MVCCEncodeKey(keys.MakeRangeKeyPrefix(d.EndKey)), }, { start: engine.MVCCEncodeKey(dataStartKey), end: engine.MVCCEncodeKey(d.EndKey.AsRawKey()), }, }, iter: e.NewIterator(), } ri.iter.Seek(ri.ranges[ri.curIndex].start) ri.advance() return ri }
func newRangeDataIterator(d *proto.RangeDescriptor, e engine.Engine) *rangeDataIterator { // The first range in the keyspace starts at KeyMin, which includes the node-local // space. We need the original StartKey to find the range metadata, but the // actual data starts at LocalMax. dataStartKey := d.StartKey if d.StartKey.Equal(proto.KeyMin) { dataStartKey = keys.LocalMax } ri := &rangeDataIterator{ ranges: []keyRange{ { start: engine.MVCCEncodeKey(keys.MakeKey(keys.LocalRangeIDPrefix, encoding.EncodeUvarint(nil, uint64(d.RangeID)))), end: engine.MVCCEncodeKey(keys.MakeKey(keys.LocalRangeIDPrefix, encoding.EncodeUvarint(nil, uint64(d.RangeID+1)))), }, { start: engine.MVCCEncodeKey(keys.MakeKey(keys.LocalRangePrefix, encoding.EncodeBytes(nil, d.StartKey))), end: engine.MVCCEncodeKey(keys.MakeKey(keys.LocalRangePrefix, encoding.EncodeBytes(nil, d.EndKey))), }, { start: engine.MVCCEncodeKey(dataStartKey), end: engine.MVCCEncodeKey(d.EndKey), }, }, iter: e.NewIterator(), } ri.iter.Seek(ri.ranges[ri.curIndex].start) ri.advance() return ri }
func verifyCleanup(key proto.Key, coord *TxnCoordSender, eng engine.Engine, t *testing.T) { if len(coord.txns) != 0 { t.Errorf("expected empty transactions map; got %d", len(coord.txns)) } if err := util.IsTrueWithin(func() bool { meta := &engine.MVCCMetadata{} ok, _, _, err := eng.GetProto(engine.MVCCEncodeKey(key), meta) if err != nil { t.Errorf("error getting MVCC metadata: %s", err) } return !ok || meta.Txn == nil }, 500*time.Millisecond); err != nil { t.Errorf("expected intents to be cleaned up within 500ms") } }
func verifyCleanup(key roachpb.Key, coord *TxnCoordSender, eng engine.Engine, t *testing.T) { util.SucceedsWithin(t, 500*time.Millisecond, func() error { coord.Lock() l := len(coord.txns) coord.Unlock() if l != 0 { return fmt.Errorf("expected empty transactions map; got %d", l) } meta := &engine.MVCCMetadata{} ok, _, _, err := eng.GetProto(engine.MVCCEncodeKey(key), meta) if err != nil { return fmt.Errorf("error getting MVCC metadata: %s", err) } if ok && meta.Txn != nil { return fmt.Errorf("found unexpected write intent: %s", meta) } return nil }) }
func verifyCleanup(key proto.Key, coord *TxnCoordSender, eng engine.Engine, t *testing.T) { util.SucceedsWithin(t, 500*time.Millisecond, func() error { coord.Lock() l := len(coord.txns) coord.Unlock() if l != 0 { return fmt.Errorf("expected empty transactions map; got %d", l) } meta := &engine.MVCCMetadata{} ok, _, _, err := eng.GetProto(engine.MVCCEncodeKey(key), meta) if err != nil { return fmt.Errorf("error getting MVCC metadata: %s", err) } if !ok || meta.Txn == nil { return nil } return errors.New("intents not cleaned up") }) }
// createRangeData creates sample range data in all possible areas of // the key space. Returns a slice of the encoded keys of all created // data. func createRangeData(r *Replica, t *testing.T) []roachpb.EncodedKey { ts0 := roachpb.ZeroTimestamp ts := roachpb.Timestamp{WallTime: 1} keyTSs := []struct { key roachpb.Key ts roachpb.Timestamp }{ {keys.ResponseCacheKey(r.Desc().RangeID, &roachpb.ClientCmdID{WallTime: 1, Random: 1}), ts0}, {keys.ResponseCacheKey(r.Desc().RangeID, &roachpb.ClientCmdID{WallTime: 2, Random: 2}), ts0}, {keys.RaftHardStateKey(r.Desc().RangeID), ts0}, {keys.RaftLogKey(r.Desc().RangeID, 1), ts0}, {keys.RaftLogKey(r.Desc().RangeID, 2), ts0}, {keys.RangeGCMetadataKey(r.Desc().RangeID), ts0}, {keys.RangeLastVerificationTimestampKey(r.Desc().RangeID), ts0}, {keys.RangeStatsKey(r.Desc().RangeID), ts0}, {keys.RangeDescriptorKey(r.Desc().StartKey), ts}, {keys.TransactionKey(roachpb.Key(r.Desc().StartKey), []byte("1234")), ts0}, {keys.TransactionKey(roachpb.Key(r.Desc().StartKey.Next()), []byte("5678")), ts0}, {keys.TransactionKey(fakePrevKey(r.Desc().EndKey), []byte("2468")), ts0}, // TODO(bdarnell): KeyMin.Next() results in a key in the reserved system-local space. // Once we have resolved https://github.com/cockroachdb/cockroach/issues/437, // replace this with something that reliably generates the first valid key in the range. //{r.Desc().StartKey.Next(), ts}, // The following line is similar to StartKey.Next() but adds more to the key to // avoid falling into the system-local space. {append(append([]byte{}, r.Desc().StartKey...), '\x01'), ts}, {fakePrevKey(r.Desc().EndKey), ts}, } keys := []roachpb.EncodedKey{} for _, keyTS := range keyTSs { if err := engine.MVCCPut(r.store.Engine(), nil, keyTS.key, keyTS.ts, roachpb.MakeValueFromString("value"), nil); err != nil { t.Fatal(err) } keys = append(keys, engine.MVCCEncodeKey(keyTS.key)) if !keyTS.ts.Equal(ts0) { keys = append(keys, engine.MVCCEncodeVersionKey(keyTS.key, keyTS.ts)) } } return keys }
func makeReplicaKeyRanges(d *roachpb.RangeDescriptor) []keyRange { // The first range in the keyspace starts at KeyMin, which includes the // node-local space. We need the original StartKey to find the range // metadata, but the actual data starts at LocalMax. dataStartKey := d.StartKey.AsRawKey() if d.StartKey.Equal(roachpb.RKeyMin) { dataStartKey = keys.LocalMax } return []keyRange{ { start: engine.MVCCEncodeKey(keys.MakeRangeIDPrefix(d.RangeID)), end: engine.MVCCEncodeKey(keys.MakeRangeIDPrefix(d.RangeID + 1)), }, { start: engine.MVCCEncodeKey(keys.MakeRangeKeyPrefix(d.StartKey)), end: engine.MVCCEncodeKey(keys.MakeRangeKeyPrefix(d.EndKey)), }, { start: engine.MVCCEncodeKey(dataStartKey), end: engine.MVCCEncodeKey(d.EndKey.AsRawKey()), }, } }
// CopyInto copies all the results from this sequence cache into the destRangeID // sequence cache. Failures decoding individual cache entries return an error. func (sc *SequenceCache) CopyInto(e engine.Engine, destRangeID roachpb.RangeID) error { return copySeqCache(e, sc.rangeID, destRangeID, engine.MVCCEncodeKey(sc.min), engine.MVCCEncodeKey(sc.max)) }
// ClearData removes all items stored in the persistent cache. It does not alter // the inflight map. func (rc *ResponseCache) ClearData(e engine.Engine) error { p := keys.ResponseCacheKey(rc.rangeID, nil) // prefix for all response cache entries with this range ID end := p.PrefixEnd() _, err := engine.ClearRange(e, engine.MVCCEncodeKey(p), engine.MVCCEncodeKey(end)) return err }
// CopyFrom copies all the persisted results from the originRangeID // sequence cache into this one. Note that the cache will not be // locked while copying is in progress. Failures decoding individual // entries return an error. The copy is done directly using the engine // instead of interpreting values through MVCC for efficiency. func (sc *SequenceCache) CopyFrom(e engine.Engine, originRangeID roachpb.RangeID) error { originMin := engine.MVCCEncodeKey(keys.SequenceCacheKey(originRangeID, txnIDMin, math.MaxUint32, math.MaxUint32)) originMax := engine.MVCCEncodeKey(keys.SequenceCacheKey(originRangeID, txnIDMax, 0, 0)) return copySeqCache(e, originRangeID, sc.rangeID, originMin, originMax) }
// ClearData removes all persisted items stored in the cache. func (sc *SequenceCache) ClearData(e engine.Engine) error { _, err := engine.ClearRange(e, engine.MVCCEncodeKey(sc.min), engine.MVCCEncodeKey(sc.max)) return err }
// ClearData removes all items stored in the persistent cache. func (rc *ResponseCache) ClearData(e engine.Engine) error { from := keys.ResponseCacheKey(rc.rangeID, roachpb.KeyMin) to := keys.ResponseCacheKey(rc.rangeID, roachpb.KeyMax) _, err := engine.ClearRange(e, engine.MVCCEncodeKey(from), engine.MVCCEncodeKey(to)) return err }
// TestGCQueueProcess creates test data in the range over various time // scales and verifies that scan queue process properly GCs test data. func TestGCQueueProcess(t *testing.T) { defer leaktest.AfterTest(t) tc := testContext{} tc.Start(t) defer tc.Stop() const now int64 = 48 * 60 * 60 * 1E9 // 2d past the epoch tc.manualClock.Set(now) ts1 := makeTS(now-2*24*60*60*1E9+1, 0) // 2d old (add one nanosecond so we're not using zero timestamp) ts2 := makeTS(now-25*60*60*1E9, 0) // GC will occur at time=25 hours ts3 := makeTS(now-(intentAgeThreshold.Nanoseconds()+1), 0) // 2h+1ns old ts4 := makeTS(now-(intentAgeThreshold.Nanoseconds()-1), 0) // 2h-ns old ts5 := makeTS(now-1E9, 0) // 1s old key1 := proto.Key("a") key2 := proto.Key("b") key3 := proto.Key("c") key4 := proto.Key("d") key5 := proto.Key("e") key6 := proto.Key("f") key7 := proto.Key("g") key8 := proto.Key("h") key9 := proto.Key("i") data := []struct { key proto.Key ts proto.Timestamp del bool txn bool }{ // For key1, we expect first two values to GC. {key1, ts1, false, false}, {key1, ts2, false, false}, {key1, ts5, false, false}, // For key2, we expect all values to GC, because most recent is deletion. {key2, ts1, false, false}, {key2, ts2, false, false}, {key2, ts5, true, false}, // For key3, we expect just ts1 to GC, because most recent deletion is intent. {key3, ts1, false, false}, {key3, ts2, false, false}, {key3, ts5, true, true}, // For key4, expect oldest value to GC. {key4, ts1, false, false}, {key4, ts2, false, false}, // For key5, expect all values to GC (most recent value deleted). {key5, ts1, false, false}, {key5, ts2, true, false}, // For key6, expect no values to GC because most recent value is intent. {key6, ts1, false, false}, {key6, ts5, true, true}, // For key7, expect no values to GC because intent is exactly 2h old. {key7, ts2, false, false}, {key7, ts4, true, true}, // For key8, expect most recent value to resolve by aborting, which will clean it up. {key8, ts2, false, false}, {key8, ts3, true, true}, // /For key9, resolve naked intent with no remaining values. {key9, ts3, true, false}, } for i, datum := range data { if datum.del { dArgs, dReply := deleteArgs(datum.key, tc.rng.Desc().RaftID, tc.store.StoreID()) dArgs.Timestamp = datum.ts if datum.txn { dArgs.Txn = newTransaction("test", datum.key, 1, proto.SERIALIZABLE, tc.clock) dArgs.Txn.Timestamp = datum.ts } if err := tc.rng.AddCmd(tc.rng.context(), proto.Call{Args: dArgs, Reply: dReply}); err != nil { t.Fatalf("%d: could not delete data: %s", i, err) } } else { pArgs, pReply := putArgs(datum.key, []byte("value"), tc.rng.Desc().RaftID, tc.store.StoreID()) pArgs.Timestamp = datum.ts if datum.txn { pArgs.Txn = newTransaction("test", datum.key, 1, proto.SERIALIZABLE, tc.clock) pArgs.Txn.Timestamp = datum.ts } if err := tc.rng.AddCmd(tc.rng.context(), proto.Call{Args: pArgs, Reply: pReply}); err != nil { t.Fatalf("%d: could not put data: %s", i, err) } } } // Process through a scan queue. gcQ := newGCQueue() if err := gcQ.process(tc.clock.Now(), tc.rng); err != nil { t.Error(err) } expKVs := []struct { key proto.Key ts proto.Timestamp }{ {key1, proto.ZeroTimestamp}, {key1, ts5}, {key3, proto.ZeroTimestamp}, {key3, ts5}, {key3, ts2}, {key4, proto.ZeroTimestamp}, {key4, ts2}, {key6, proto.ZeroTimestamp}, {key6, ts5}, {key6, ts1}, {key7, proto.ZeroTimestamp}, {key7, ts4}, {key7, ts2}, {key8, proto.ZeroTimestamp}, {key8, ts2}, } // Read data directly from engine to avoid intent errors from MVCC. kvs, err := engine.Scan(tc.store.Engine(), engine.MVCCEncodeKey(key1), engine.MVCCEncodeKey(proto.KeyMax), 0) if err != nil { t.Fatal(err) } for i, kv := range kvs { if key, ts, isValue := engine.MVCCDecodeKey(kv.Key); isValue { if log.V(1) { log.Infof("%d: %q, ts=%s", i, key, ts) } } else { if log.V(1) { log.Infof("%d: %q meta", i, key) } } } if len(kvs) != len(expKVs) { t.Fatalf("expected length %d; got %d", len(expKVs), len(kvs)) } for i, kv := range kvs { key, ts, isValue := engine.MVCCDecodeKey(kv.Key) if !key.Equal(expKVs[i].key) { t.Errorf("%d: expected key %q; got %q", i, expKVs[i].key, key) } if !ts.Equal(expKVs[i].ts) { t.Errorf("%d: expected ts=%s; got %s", i, expKVs[i].ts, ts) } if isValue { if log.V(1) { log.Infof("%d: %q, ts=%s", i, key, ts) } } else { if log.V(1) { log.Infof("%d: %q meta", i, key) } } } // Verify the oldest extant intent age. gcMeta, err := tc.rng.GetGCMetadata() if err != nil { t.Fatal(err) } if gcMeta.LastScanNanos != now { t.Errorf("expected last scan nanos=%d; got %d", now, gcMeta.LastScanNanos) } if *gcMeta.OldestIntentNanos != ts4.WallTime { t.Errorf("expected oldest intent nanos=%d; got %d", ts4.WallTime, gcMeta.OldestIntentNanos) } // Verify that the last verification timestamp was updated as whole range was scanned. ts, err := tc.rng.GetLastVerificationTimestamp() if err != nil { t.Fatal(err) } if gcMeta.LastScanNanos != ts.WallTime { t.Errorf("expected walltime nanos %d; got %d", gcMeta.LastScanNanos, ts.WallTime) } }
// TestGCQueueIntentResolution verifies intent resolution with many // intents spanning just two transactions. func TestGCQueueIntentResolution(t *testing.T) { defer leaktest.AfterTest(t) tc := testContext{} tc.Start(t) defer tc.Stop() const now int64 = 48 * 60 * 60 * 1E9 // 2d past the epoch tc.manualClock.Set(now) txns := []*roachpb.Transaction{ newTransaction("txn1", roachpb.Key("0-00000"), 1, roachpb.SERIALIZABLE, tc.clock), newTransaction("txn2", roachpb.Key("1-00000"), 1, roachpb.SERIALIZABLE, tc.clock), } intentResolveTS := makeTS(now-intentAgeThreshold.Nanoseconds(), 0) txns[0].OrigTimestamp = intentResolveTS txns[0].Timestamp = intentResolveTS txns[1].OrigTimestamp = intentResolveTS txns[1].Timestamp = intentResolveTS // Two transactions. for i := 0; i < 2; i++ { // 5 puts per transaction. // TODO(spencerkimball): benchmark with ~50k. for j := 0; j < 5; j++ { pArgs := putArgs(roachpb.Key(fmt.Sprintf("%d-%05d", i, j)), []byte("value")) if _, err := client.SendWrappedWith(tc.Sender(), tc.rng.context(), roachpb.BatchRequest_Header{ Txn: txns[i], }, &pArgs); err != nil { t.Fatalf("%d: could not put data: %s", i, err) } } } cfg := tc.gossip.GetSystemConfig() if cfg == nil { t.Fatal("nil config") } // Process through a scan queue. gcQ := newGCQueue(tc.gossip) if err := gcQ.process(tc.clock.Now(), tc.rng, cfg); err != nil { t.Fatal(err) } // Iterate through all values to ensure intents have been fully resolved. meta := &engine.MVCCMetadata{} err := tc.store.Engine().Iterate(engine.MVCCEncodeKey(roachpb.KeyMin), engine.MVCCEncodeKey(roachpb.KeyMax), func(kv roachpb.RawKeyValue) (bool, error) { if key, _, isValue, err := engine.MVCCDecodeKey(kv.Key); err != nil { return false, err } else if !isValue { if err := proto.Unmarshal(kv.Value, meta); err != nil { return false, err } if meta.Txn != nil { return false, util.Errorf("non-nil Txn after GC for key %s", key) } } return false, nil }) if err != nil { t.Fatal(err) } }