// createRangeData creates sample range data in all possible areas of
// the key space. Returns a slice of the encoded keys of all created
// data.
func createRangeData(t *testing.T, r *Replica) []engine.MVCCKey {
	ts0 := hlc.ZeroTimestamp
	ts := hlc.Timestamp{WallTime: 1}
	desc := r.Desc()
	keyTSs := []struct {
		key roachpb.Key
		ts  hlc.Timestamp
	}{
		{keys.AbortCacheKey(r.RangeID, testTxnID), ts0},
		{keys.AbortCacheKey(r.RangeID, testTxnID2), ts0},
		{keys.RangeFrozenStatusKey(r.RangeID), ts0},
		{keys.RangeLastGCKey(r.RangeID), ts0},
		{keys.RaftAppliedIndexKey(r.RangeID), ts0},
		{keys.RaftTruncatedStateKey(r.RangeID), ts0},
		{keys.RangeLeaseKey(r.RangeID), ts0},
		{keys.LeaseAppliedIndexKey(r.RangeID), ts0},
		{keys.RangeStatsKey(r.RangeID), ts0},
		{keys.RangeTxnSpanGCThresholdKey(r.RangeID), ts0},
		{keys.RaftHardStateKey(r.RangeID), ts0},
		{keys.RaftLastIndexKey(r.RangeID), ts0},
		{keys.RaftLogKey(r.RangeID, 1), ts0},
		{keys.RaftLogKey(r.RangeID, 2), ts0},
		{keys.RangeLastReplicaGCTimestampKey(r.RangeID), ts0},
		{keys.RangeLastVerificationTimestampKeyDeprecated(r.RangeID), ts0},
		{keys.RangeDescriptorKey(desc.StartKey), ts},
		{keys.TransactionKey(roachpb.Key(desc.StartKey), uuid.MakeV4()), ts0},
		{keys.TransactionKey(roachpb.Key(desc.StartKey.Next()), uuid.MakeV4()), ts0},
		{keys.TransactionKey(fakePrevKey(desc.EndKey), uuid.MakeV4()), ts0},
		// TODO(bdarnell): KeyMin.Next() results in a key in the reserved system-local space.
		// Once we have resolved https://github.com/cockroachdb/cockroach/issues/437,
		// replace this with something that reliably generates the first valid key in the range.
		//{r.Desc().StartKey.Next(), ts},
		// The following line is similar to StartKey.Next() but adds more to the key to
		// avoid falling into the system-local space.
		{append(append([]byte{}, desc.StartKey...), '\x02'), ts},
		{fakePrevKey(r.Desc().EndKey), ts},
	}

	keys := []engine.MVCCKey{}
	for _, keyTS := range keyTSs {
		if err := engine.MVCCPut(context.Background(), r.store.Engine(), nil, keyTS.key, keyTS.ts, roachpb.MakeValueFromString("value"), nil); err != nil {
			t.Fatal(err)
		}
		keys = append(keys, engine.MVCCKey{Key: keyTS.key, Timestamp: keyTS.ts})
	}
	return keys
}
Example #2
0
// processTransactionTable scans the transaction table and updates txnMap with
// those transactions which are old and either PENDING or with intents
// registered. In the first case we want to push the transaction so that it is
// aborted, and in the second case we may have to resolve the intents success-
// fully before GCing the entry. The transaction records which can be gc'ed are
// returned separately and are not added to txnMap nor intentSpanMap.
func processTransactionTable(
	ctx context.Context,
	snap engine.Reader,
	desc *roachpb.RangeDescriptor,
	txnMap map[uuid.UUID]*roachpb.Transaction,
	cutoff hlc.Timestamp,
	infoMu *lockableGCInfo,
	resolveIntents resolveFunc,
) ([]roachpb.GCRequest_GCKey, error) {
	infoMu.Lock()
	defer infoMu.Unlock()

	var gcKeys []roachpb.GCRequest_GCKey
	handleOne := func(kv roachpb.KeyValue) error {
		var txn roachpb.Transaction
		if err := kv.Value.GetProto(&txn); err != nil {
			return err
		}
		infoMu.TransactionSpanTotal++
		if !txn.LastActive().Less(cutoff) {
			return nil
		}

		txnID := *txn.ID

		// The transaction record should be considered for removal.
		switch txn.Status {
		case roachpb.PENDING:
			// Marked as running, so we need to push it to abort it but won't
			// try to GC it in this cycle (for convenience).
			// TODO(tschottdorf): refactor so that we can GC PENDING entries
			// in the same cycle, but keeping the calls to pushTxn in a central
			// location (keeping it easy to batch them up in the future).
			infoMu.TransactionSpanGCPending++
			txnMap[txnID] = &txn
			return nil
		case roachpb.ABORTED:
			// If we remove this transaction, it effectively still counts as
			// ABORTED (by design). So this can be GC'ed even if we can't
			// resolve the intents.
			// Note: Most aborted transaction weren't aborted by their client,
			// but instead by the coordinator - those will not have any intents
			// persisted, though they still might exist in the system.
			infoMu.TransactionSpanGCAborted++
			func() {
				infoMu.Unlock() // intentional
				defer infoMu.Lock()
				if err := resolveIntents(roachpb.AsIntents(txn.Intents, &txn),
					true /* wait */, false /* !poison */); err != nil {
					log.Warningf(ctx, "failed to resolve intents of aborted txn on gc: %s", err)
				}
			}()
		case roachpb.COMMITTED:
			// It's committed, so it doesn't need a push but we can only
			// GC it after its intents are resolved.
			if err := func() error {
				infoMu.Unlock() // intentional
				defer infoMu.Lock()
				return resolveIntents(roachpb.AsIntents(txn.Intents, &txn), true /* wait */, false /* !poison */)
			}(); err != nil {
				log.Warningf(ctx, "unable to resolve intents of committed txn on gc: %s", err)
				// Returning the error here would abort the whole GC run, and
				// we don't want that. Instead, we simply don't GC this entry.
				return nil
			}
			infoMu.TransactionSpanGCCommitted++
		default:
			panic(fmt.Sprintf("invalid transaction state: %s", txn))
		}
		gcKeys = append(gcKeys, roachpb.GCRequest_GCKey{Key: kv.Key}) // zero timestamp
		return nil
	}

	startKey := keys.TransactionKey(desc.StartKey.AsRawKey(), uuid.UUID{})
	endKey := keys.TransactionKey(desc.EndKey.AsRawKey(), uuid.UUID{})

	_, err := engine.MVCCIterate(ctx, snap, startKey, endKey,
		hlc.ZeroTimestamp, true /* consistent */, nil, /* txn */
		false /* !reverse */, func(kv roachpb.KeyValue) (bool, error) {
			return false, handleOne(kv)
		})
	return gcKeys, err
}
Example #3
0
func TestGCQueueTransactionTable(t *testing.T) {
	defer leaktest.AfterTest(t)()

	const now time.Duration = 3 * 24 * time.Hour

	const gcTxnAndAC = now - txnCleanupThreshold
	const gcACOnly = now - abortCacheAgeThreshold
	if gcTxnAndAC >= gcACOnly {
		t.Fatalf("test assumption violated due to changing constants; needs adjustment")
	}

	type spec struct {
		status      roachpb.TransactionStatus
		orig        time.Duration
		hb          time.Duration             // last heartbeat (none if ZeroTimestamp)
		newStatus   roachpb.TransactionStatus // -1 for GCed
		failResolve bool                      // do we want to fail resolves in this trial?
		expResolve  bool                      // expect attempt at removing txn-persisted intents?
		expAbortGC  bool                      // expect abort cache entries removed?
	}
	// Describes the state of the Txn table before the test.
	// Many of the abort cache entries deleted wouldn't even be there, so don't
	// be confused by that.
	testCases := map[string]spec{
		// Too young, should not touch.
		"aa": {
			status:    roachpb.PENDING,
			orig:      gcACOnly + 1,
			newStatus: roachpb.PENDING,
		},
		// A little older, so the AbortCache gets cleaned up.
		"ab": {
			status:     roachpb.PENDING,
			orig:       gcTxnAndAC + 1,
			newStatus:  roachpb.PENDING,
			expAbortGC: true,
		},
		// Old and pending, but still heartbeat (so no Push attempted; it would succeed).
		// It's old enough to delete the abort cache entry though.
		"ba": {
			status:     roachpb.PENDING,
			hb:         gcTxnAndAC + 1,
			newStatus:  roachpb.PENDING,
			expAbortGC: true,
		},
		// Not old enough for Txn GC, but old enough to remove the abort cache entry.
		"bb": {
			status:     roachpb.ABORTED,
			orig:       gcACOnly - 1,
			newStatus:  roachpb.ABORTED,
			expAbortGC: true,
		},
		// Old, pending and abandoned. Should push and abort it successfully,
		// but not GC it just yet (this is an artifact of the implementation).
		// The abort cache gets cleaned up though.
		"c": {
			status:     roachpb.PENDING,
			orig:       gcTxnAndAC - 1,
			newStatus:  roachpb.ABORTED,
			expAbortGC: true,
		},
		// Old and aborted, should delete.
		"d": {
			status:     roachpb.ABORTED,
			orig:       gcTxnAndAC - 1,
			newStatus:  -1,
			expResolve: true,
			expAbortGC: true,
		},
		// Committed and fresh, so no action. But the abort cache entry is old
		// enough to be discarded.
		"e": {
			status:     roachpb.COMMITTED,
			orig:       gcTxnAndAC + 1,
			newStatus:  roachpb.COMMITTED,
			expAbortGC: true,
		},
		// Committed and old. It has an intent (like all tests here), which is
		// resolvable and hence we can GC.
		"f": {
			status:     roachpb.COMMITTED,
			orig:       gcTxnAndAC - 1,
			newStatus:  -1,
			expResolve: true,
			expAbortGC: true,
		},
		// Same as the previous one, but we've rigged things so that the intent
		// resolution here will fail and consequently no GC is expected.
		"g": {
			status:      roachpb.COMMITTED,
			orig:        gcTxnAndAC - 1,
			newStatus:   roachpb.COMMITTED,
			failResolve: true,
			expResolve:  true,
			expAbortGC:  true,
		},
	}

	resolved := map[string][]roachpb.Span{}

	tc := testContext{}
	tsc := TestStoreConfig()
	tsc.TestingKnobs.TestingCommandFilter =
		func(filterArgs storagebase.FilterArgs) *roachpb.Error {
			if resArgs, ok := filterArgs.Req.(*roachpb.ResolveIntentRequest); ok {
				id := string(resArgs.IntentTxn.Key)
				resolved[id] = append(resolved[id], roachpb.Span{
					Key:    resArgs.Key,
					EndKey: resArgs.EndKey,
				})
				// We've special cased one test case. Note that the intent is still
				// counted in `resolved`.
				if testCases[id].failResolve {
					return roachpb.NewErrorWithTxn(errors.Errorf("boom"), filterArgs.Hdr.Txn)
				}
			}
			return nil
		}
	tc.StartWithStoreConfig(t, tsc)
	defer tc.Stop()
	tc.manualClock.Set(int64(now))

	outsideKey := tc.rng.Desc().EndKey.Next().AsRawKey()
	testIntents := []roachpb.Span{{Key: roachpb.Key("intent")}}

	txns := map[string]roachpb.Transaction{}
	for strKey, test := range testCases {
		baseKey := roachpb.Key(strKey)
		txnClock := hlc.NewClock(hlc.NewManualClock(int64(test.orig)).UnixNano)
		txn := newTransaction("txn1", baseKey, 1, enginepb.SERIALIZABLE, txnClock)
		txn.Status = test.status
		txn.Intents = testIntents
		if test.hb > 0 {
			txn.LastHeartbeat = &hlc.Timestamp{WallTime: int64(test.hb)}
		}
		// Set a high Timestamp to make sure it does not matter. Only
		// OrigTimestamp (and heartbeat) are used for GC decisions.
		txn.Timestamp.Forward(hlc.MaxTimestamp)
		txns[strKey] = *txn
		for _, addrKey := range []roachpb.Key{baseKey, outsideKey} {
			key := keys.TransactionKey(addrKey, txn.ID)
			if err := engine.MVCCPutProto(context.Background(), tc.engine, nil, key, hlc.ZeroTimestamp, nil, txn); err != nil {
				t.Fatal(err)
			}
		}
		entry := roachpb.AbortCacheEntry{Key: txn.Key, Timestamp: txn.LastActive()}
		if err := tc.rng.abortCache.Put(context.Background(), tc.engine, nil, txn.ID, &entry); err != nil {
			t.Fatal(err)
		}
	}

	// Run GC.
	gcQ := newGCQueue(tc.store, tc.gossip)
	cfg, ok := tc.gossip.GetSystemConfig()
	if !ok {
		t.Fatal("config not set")
	}

	if err := gcQ.process(context.Background(), tc.clock.Now(), tc.rng, cfg); err != nil {
		t.Fatal(err)
	}

	util.SucceedsSoon(t, func() error {
		for strKey, sp := range testCases {
			txn := &roachpb.Transaction{}
			key := keys.TransactionKey(roachpb.Key(strKey), txns[strKey].ID)
			ok, err := engine.MVCCGetProto(context.Background(), tc.engine, key, hlc.ZeroTimestamp, true, nil, txn)
			if err != nil {
				return err
			}
			if expGC := (sp.newStatus == -1); expGC {
				if expGC != !ok {
					return fmt.Errorf("%s: expected gc: %t, but found %s\n%s", strKey, expGC, txn, roachpb.Key(strKey))
				}
			} else if sp.newStatus != txn.Status {
				return fmt.Errorf("%s: expected status %s, but found %s", strKey, sp.newStatus, txn.Status)
			}
			var expIntents []roachpb.Span
			if sp.expResolve {
				expIntents = testIntents
			}
			if !reflect.DeepEqual(resolved[strKey], expIntents) {
				return fmt.Errorf("%s: unexpected intent resolutions:\nexpected: %s\nobserved: %s",
					strKey, expIntents, resolved[strKey])
			}
			entry := &roachpb.AbortCacheEntry{}
			abortExists, err := tc.rng.abortCache.Get(context.Background(), tc.store.Engine(), txns[strKey].ID, entry)
			if err != nil {
				t.Fatal(err)
			}
			if abortExists == sp.expAbortGC {
				return fmt.Errorf("%s: expected abort cache gc: %t, found %+v", strKey, sp.expAbortGC, entry)
			}
		}
		return nil
	})

	outsideTxnPrefix := keys.TransactionKey(outsideKey, uuid.EmptyUUID)
	outsideTxnPrefixEnd := keys.TransactionKey(outsideKey.Next(), uuid.EmptyUUID)
	var count int
	if _, err := engine.MVCCIterate(context.Background(), tc.store.Engine(), outsideTxnPrefix, outsideTxnPrefixEnd, hlc.ZeroTimestamp,
		true, nil, false, func(roachpb.KeyValue) (bool, error) {
			count++
			return false, nil
		}); err != nil {
		t.Fatal(err)
	}
	if exp := len(testCases); exp != count {
		t.Fatalf("expected the %d external transaction entries to remain untouched, "+
			"but only %d are left", exp, count)
	}

	batch := tc.engine.NewSnapshot()
	defer batch.Close()
	tc.rng.assertState(batch) // check that in-mem and on-disk state were updated

	tc.rng.mu.Lock()
	txnSpanThreshold := tc.rng.mu.state.TxnSpanGCThreshold
	tc.rng.mu.Unlock()

	// Verify that the new TxnSpanGCThreshold has reached the Replica.
	if expWT := int64(gcTxnAndAC); txnSpanThreshold.WallTime != expWT {
		t.Fatalf("expected TxnSpanGCThreshold.Walltime %d, got timestamp %s",
			expWT, txnSpanThreshold)
	}
}
Example #4
0
// processIntentsAsync asynchronously processes intents which were
// encountered during another command but did not interfere with the
// execution of that command. This occurs in two cases: inconsistent
// reads and EndTransaction (which queues its own external intents for
// processing via this method). The two cases are handled somewhat
// differently and would be better served by different entry points,
// but combining them simplifies the plumbing necessary in Replica.
func (ir *intentResolver) processIntentsAsync(r *Replica, intents []intentsWithArg) {
	now := r.store.Clock().Now()
	ctx := context.TODO()
	stopper := r.store.Stopper()

	for _, item := range intents {
		if item.args.Method() != roachpb.EndTransaction {
			if err := stopper.RunLimitedAsyncTask(
				ctx, ir.sem, true /* wait */, func(ctx context.Context) {
					// Everything here is best effort; give up rather than waiting
					// too long (helps avoid deadlocks during test shutdown,
					// although this is imperfect due to the use of an
					// uninterruptible WaitGroup.Wait in beginCmds).
					ctxWithTimeout, cancel := context.WithTimeout(ctx, base.NetworkTimeout)
					defer cancel()
					h := roachpb.Header{Timestamp: now}
					resolveIntents, pushErr := ir.maybePushTransactions(ctxWithTimeout,
						item.intents, h, roachpb.PUSH_TOUCH, true /* skipInFlight */)

					// resolveIntents with poison=true because we're resolving
					// intents outside of the context of an EndTransaction.
					//
					// Naively, it doesn't seem like we need to poison the abort
					// cache since we're pushing with PUSH_TOUCH - meaning that
					// the primary way our Push leads to aborting intents is that
					// of the transaction having timed out (and thus presumably no
					// client being around any more, though at the time of writing
					// we don't guarantee that). But there are other paths in which
					// the Push comes back successful while the coordinating client
					// may still be active. Examples of this are when:
					//
					// - the transaction was aborted by someone else, but the
					//   coordinating client may still be running.
					// - the transaction entry wasn't written yet, which at the
					//   time of writing has our push abort it, leading to the
					//   same situation as above.
					//
					// Thus, we must poison.
					if err := ir.resolveIntents(ctxWithTimeout, resolveIntents,
						true /* wait */, true /* poison */); err != nil {
						log.Warningf(ctx, "%s: failed to resolve intents: %s", r, err)
						return
					}
					if pushErr != nil {
						log.Warningf(ctx, "%s: failed to push during intent resolution: %s", r, pushErr)
						return
					}
				}); err != nil {
				log.Warningf(ctx, "failed to resolve intents: %s", err)
				return
			}
		} else { // EndTransaction
			if err := stopper.RunLimitedAsyncTask(
				ctx, ir.sem, true /* wait */, func(ctx context.Context) {
					ctxWithTimeout, cancel := context.WithTimeout(ctx, base.NetworkTimeout)
					defer cancel()

					// For EndTransaction, we know the transaction is finalized so
					// we can skip the push and go straight to the resolve.
					//
					// This mechanism assumes that when an EndTransaction fails,
					// the client makes no assumptions about the result. For
					// example, an attempt to explicitly rollback the transaction
					// may succeed (triggering this code path), but the result may
					// not make it back to the client.
					if err := ir.resolveIntents(ctxWithTimeout, item.intents,
						true /* wait */, false /* !poison */); err != nil {
						log.Warningf(ctx, "%s: failed to resolve intents: %s", r, err)
						return
					}

					// We successfully resolved the intents, so we're able to GC from
					// the txn span directly.
					b := &client.Batch{}
					txn := item.intents[0].Txn
					txnKey := keys.TransactionKey(txn.Key, *txn.ID)

					// This is pretty tricky. Transaction keys are range-local and
					// so they are encoded specially. The key range addressed by
					// (txnKey, txnKey.Next()) might be empty (since Next() does
					// not imply monotonicity on the address side). Instead, we
					// send this request to a range determined using the resolved
					// transaction anchor, i.e. if the txn is anchored on
					// /Local/RangeDescriptor/"a"/uuid, the key range below would
					// be ["a", "a\x00"). However, the first range is special again
					// because the above procedure results in KeyMin, but we need
					// at least KeyLocalMax.
					//
					// #7880 will address this by making GCRequest less special and
					// thus obviating the need to cook up an artificial range here.
					var gcArgs roachpb.GCRequest
					{
						key := keys.MustAddr(txn.Key)
						if localMax := keys.MustAddr(keys.LocalMax); key.Less(localMax) {
							key = localMax
						}
						endKey := key.Next()

						gcArgs.Span = roachpb.Span{
							Key:    key.AsRawKey(),
							EndKey: endKey.AsRawKey(),
						}
					}

					gcArgs.Keys = append(gcArgs.Keys, roachpb.GCRequest_GCKey{
						Key: txnKey,
					})
					b.AddRawRequest(&gcArgs)
					if err := ir.store.db.Run(ctx, b); err != nil {
						log.Warningf(ctx, "could not GC completed transaction anchored at %s: %s",
							roachpb.Key(txn.Key), err)
						return
					}
				}); err != nil {
				log.Warningf(ctx, "failed to resolve intents: %s", err)
				return
			}
		}
	}
}