Exemplo n.º 1
0
func loadTxnSpanGCThreshold(
	ctx context.Context, reader engine.Reader, rangeID roachpb.RangeID,
) (hlc.Timestamp, error) {
	var t hlc.Timestamp
	_, err := engine.MVCCGetProto(ctx, reader, keys.RangeTxnSpanGCThresholdKey(rangeID),
		hlc.ZeroTimestamp, true, nil, &t)
	return t, err
}
Exemplo n.º 2
0
// Get looks up an abort cache entry recorded for this transaction ID.
// Returns whether an abort record was found and any error.
func (sc *AbortCache) Get(
	ctx context.Context, e engine.Reader, txnID uuid.UUID, entry *roachpb.AbortCacheEntry,
) (bool, error) {

	// Pull response from disk and read into reply if available.
	key := keys.AbortCacheKey(sc.rangeID, txnID)
	ok, err := engine.MVCCGetProto(ctx, e, key, hlc.ZeroTimestamp, true /* consistent */, nil /* txn */, entry)
	return ok, err
}
Exemplo n.º 3
0
func loadLease(
	ctx context.Context, reader engine.Reader, rangeID roachpb.RangeID,
) (roachpb.Lease, error) {
	var lease roachpb.Lease
	_, err := engine.MVCCGetProto(ctx, reader,
		keys.RangeLeaseKey(rangeID), hlc.ZeroTimestamp,
		true, nil, &lease,
	)
	return lease, err
}
Exemplo n.º 4
0
func loadTruncatedState(
	ctx context.Context, reader engine.Reader, rangeID roachpb.RangeID,
) (roachpb.RaftTruncatedState, error) {
	var truncState roachpb.RaftTruncatedState
	if _, err := engine.MVCCGetProto(ctx, reader,
		keys.RaftTruncatedStateKey(rangeID), hlc.ZeroTimestamp, true,
		nil, &truncState); err != nil {
		return roachpb.RaftTruncatedState{}, err
	}
	return truncState, nil
}
Exemplo n.º 5
0
func loadHardState(
	ctx context.Context, reader engine.Reader, rangeID roachpb.RangeID,
) (raftpb.HardState, error) {
	var hs raftpb.HardState
	found, err := engine.MVCCGetProto(ctx, reader,
		keys.RaftHardStateKey(rangeID), hlc.ZeroTimestamp, true, nil, &hs)

	if !found || err != nil {
		return raftpb.HardState{}, err
	}
	return hs, nil
}
Exemplo n.º 6
0
func TestGCQueueLastProcessedTimestamps(t *testing.T) {
	defer leaktest.AfterTest(t)()
	tc := testContext{}
	stopper := stop.NewStopper()
	defer stopper.Stop()
	tc.Start(t, stopper)

	// Create two last processed times both at the range start key and
	// also at some mid-point key in order to simulate a merge.
	// Two transactions.
	lastProcessedVals := []struct {
		key   roachpb.Key
		expGC bool
	}{
		{keys.QueueLastProcessedKey(roachpb.RKeyMin, "timeSeriesMaintenance"), false},
		{keys.QueueLastProcessedKey(roachpb.RKeyMin, "replica consistency checker"), false},
		{keys.QueueLastProcessedKey(roachpb.RKey("a"), "timeSeriesMaintenance"), true},
		{keys.QueueLastProcessedKey(roachpb.RKey("b"), "replica consistency checker"), true},
	}

	ts := tc.Clock().Now()
	for _, lpv := range lastProcessedVals {
		if err := engine.MVCCPutProto(context.Background(), tc.engine, nil, lpv.key, hlc.ZeroTimestamp, nil, &ts); err != nil {
			t.Fatal(err)
		}
	}

	cfg, ok := tc.gossip.GetSystemConfig()
	if !ok {
		t.Fatal("config not set")
	}

	// Process through a scan queue.
	gcQ := newGCQueue(tc.store, tc.gossip)
	if err := gcQ.process(context.Background(), tc.repl, cfg); err != nil {
		t.Fatal(err)
	}

	// Verify GC.
	testutils.SucceedsSoon(t, func() error {
		for _, lpv := range lastProcessedVals {
			ok, err := engine.MVCCGetProto(context.Background(), tc.engine, lpv.key, hlc.ZeroTimestamp, true, nil, &ts)
			if err != nil {
				return err
			}
			if ok == lpv.expGC {
				return errors.Errorf("expected GC of %s: %t; got %t", lpv.key, lpv.expGC, ok)
			}
		}
		return nil
	})
}
Exemplo n.º 7
0
// loadReplicaDestroyedError loads the replica destroyed error for the specified
// range. If there is no error, nil is returned.
func loadReplicaDestroyedError(
	ctx context.Context, reader engine.Reader, rangeID roachpb.RangeID,
) (*roachpb.Error, error) {
	var v roachpb.Error
	found, err := engine.MVCCGetProto(ctx, reader,
		keys.RangeReplicaDestroyedErrorKey(rangeID),
		hlc.ZeroTimestamp, true /* consistent */, nil, &v)
	if err != nil {
		return nil, err
	}
	if !found {
		return nil, nil
	}
	return &v, nil
}
Exemplo n.º 8
0
// ReadBootstrapInfo implements the gossip.Storage interface. Read
// attempts to read gossip bootstrap info from every known store and
// finds the most recent from all stores to initialize the bootstrap
// info argument. Returns an error on any issues reading data for the
// stores (but excluding the case in which no data has been persisted
// yet).
func (ls *Stores) ReadBootstrapInfo(bi *gossip.BootstrapInfo) error {
	ls.mu.RLock()
	defer ls.mu.RUnlock()
	latestTS := hlc.ZeroTimestamp

	ctx := ls.AnnotateCtx(context.TODO())

	// Find the most recent bootstrap info.
	for _, s := range ls.storeMap {
		var storeBI gossip.BootstrapInfo
		ok, err := engine.MVCCGetProto(ctx, s.engine, keys.StoreGossipKey(), hlc.ZeroTimestamp, true, nil, &storeBI)
		if err != nil {
			return err
		}
		if ok && latestTS.Less(storeBI.Timestamp) {
			latestTS = storeBI.Timestamp
			*bi = storeBI
		}
	}
	log.Infof(ctx, "read %d node addresses from persistent storage", len(bi.Addresses))
	return ls.updateBootstrapInfo(bi)
}
Exemplo n.º 9
0
func TestGCQueueTransactionTable(t *testing.T) {
	defer leaktest.AfterTest(t)()

	const now time.Duration = 3 * 24 * time.Hour

	const gcTxnAndAC = now - txnCleanupThreshold
	const gcACOnly = now - abortCacheAgeThreshold
	if gcTxnAndAC >= gcACOnly {
		t.Fatalf("test assumption violated due to changing constants; needs adjustment")
	}

	type spec struct {
		status      roachpb.TransactionStatus
		orig        time.Duration
		hb          time.Duration             // last heartbeat (none if ZeroTimestamp)
		newStatus   roachpb.TransactionStatus // -1 for GCed
		failResolve bool                      // do we want to fail resolves in this trial?
		expResolve  bool                      // expect attempt at removing txn-persisted intents?
		expAbortGC  bool                      // expect abort cache entries removed?
	}
	// Describes the state of the Txn table before the test.
	// Many of the abort cache entries deleted wouldn't even be there, so don't
	// be confused by that.
	testCases := map[string]spec{
		// Too young, should not touch.
		"aa": {
			status:    roachpb.PENDING,
			orig:      gcACOnly + 1,
			newStatus: roachpb.PENDING,
		},
		// A little older, so the AbortCache gets cleaned up.
		"ab": {
			status:     roachpb.PENDING,
			orig:       gcTxnAndAC + 1,
			newStatus:  roachpb.PENDING,
			expAbortGC: true,
		},
		// Old and pending, but still heartbeat (so no Push attempted; it would succeed).
		// It's old enough to delete the abort cache entry though.
		"ba": {
			status:     roachpb.PENDING,
			hb:         gcTxnAndAC + 1,
			newStatus:  roachpb.PENDING,
			expAbortGC: true,
		},
		// Not old enough for Txn GC, but old enough to remove the abort cache entry.
		"bb": {
			status:     roachpb.ABORTED,
			orig:       gcACOnly - 1,
			newStatus:  roachpb.ABORTED,
			expAbortGC: true,
		},
		// Old, pending and abandoned. Should push and abort it successfully,
		// but not GC it just yet (this is an artifact of the implementation).
		// The abort cache gets cleaned up though.
		"c": {
			status:     roachpb.PENDING,
			orig:       gcTxnAndAC - 1,
			newStatus:  roachpb.ABORTED,
			expAbortGC: true,
		},
		// Old and aborted, should delete.
		"d": {
			status:     roachpb.ABORTED,
			orig:       gcTxnAndAC - 1,
			newStatus:  -1,
			expResolve: true,
			expAbortGC: true,
		},
		// Committed and fresh, so no action. But the abort cache entry is old
		// enough to be discarded.
		"e": {
			status:     roachpb.COMMITTED,
			orig:       gcTxnAndAC + 1,
			newStatus:  roachpb.COMMITTED,
			expAbortGC: true,
		},
		// Committed and old. It has an intent (like all tests here), which is
		// resolvable and hence we can GC.
		"f": {
			status:     roachpb.COMMITTED,
			orig:       gcTxnAndAC - 1,
			newStatus:  -1,
			expResolve: true,
			expAbortGC: true,
		},
		// Same as the previous one, but we've rigged things so that the intent
		// resolution here will fail and consequently no GC is expected.
		"g": {
			status:      roachpb.COMMITTED,
			orig:        gcTxnAndAC - 1,
			newStatus:   roachpb.COMMITTED,
			failResolve: true,
			expResolve:  true,
			expAbortGC:  true,
		},
	}

	resolved := map[string][]roachpb.Span{}

	tc := testContext{}
	tsc := TestStoreConfig()
	tsc.TestingKnobs.TestingCommandFilter =
		func(filterArgs storagebase.FilterArgs) *roachpb.Error {
			if resArgs, ok := filterArgs.Req.(*roachpb.ResolveIntentRequest); ok {
				id := string(resArgs.IntentTxn.Key)
				resolved[id] = append(resolved[id], roachpb.Span{
					Key:    resArgs.Key,
					EndKey: resArgs.EndKey,
				})
				// We've special cased one test case. Note that the intent is still
				// counted in `resolved`.
				if testCases[id].failResolve {
					return roachpb.NewErrorWithTxn(errors.Errorf("boom"), filterArgs.Hdr.Txn)
				}
			}
			return nil
		}
	tc.StartWithStoreConfig(t, tsc)
	defer tc.Stop()
	tc.manualClock.Set(int64(now))

	outsideKey := tc.rng.Desc().EndKey.Next().AsRawKey()
	testIntents := []roachpb.Span{{Key: roachpb.Key("intent")}}

	txns := map[string]roachpb.Transaction{}
	for strKey, test := range testCases {
		baseKey := roachpb.Key(strKey)
		txnClock := hlc.NewClock(hlc.NewManualClock(int64(test.orig)).UnixNano)
		txn := newTransaction("txn1", baseKey, 1, enginepb.SERIALIZABLE, txnClock)
		txn.Status = test.status
		txn.Intents = testIntents
		if test.hb > 0 {
			txn.LastHeartbeat = &hlc.Timestamp{WallTime: int64(test.hb)}
		}
		// Set a high Timestamp to make sure it does not matter. Only
		// OrigTimestamp (and heartbeat) are used for GC decisions.
		txn.Timestamp.Forward(hlc.MaxTimestamp)
		txns[strKey] = *txn
		for _, addrKey := range []roachpb.Key{baseKey, outsideKey} {
			key := keys.TransactionKey(addrKey, txn.ID)
			if err := engine.MVCCPutProto(context.Background(), tc.engine, nil, key, hlc.ZeroTimestamp, nil, txn); err != nil {
				t.Fatal(err)
			}
		}
		entry := roachpb.AbortCacheEntry{Key: txn.Key, Timestamp: txn.LastActive()}
		if err := tc.rng.abortCache.Put(context.Background(), tc.engine, nil, txn.ID, &entry); err != nil {
			t.Fatal(err)
		}
	}

	// Run GC.
	gcQ := newGCQueue(tc.store, tc.gossip)
	cfg, ok := tc.gossip.GetSystemConfig()
	if !ok {
		t.Fatal("config not set")
	}

	if err := gcQ.process(context.Background(), tc.clock.Now(), tc.rng, cfg); err != nil {
		t.Fatal(err)
	}

	util.SucceedsSoon(t, func() error {
		for strKey, sp := range testCases {
			txn := &roachpb.Transaction{}
			key := keys.TransactionKey(roachpb.Key(strKey), txns[strKey].ID)
			ok, err := engine.MVCCGetProto(context.Background(), tc.engine, key, hlc.ZeroTimestamp, true, nil, txn)
			if err != nil {
				return err
			}
			if expGC := (sp.newStatus == -1); expGC {
				if expGC != !ok {
					return fmt.Errorf("%s: expected gc: %t, but found %s\n%s", strKey, expGC, txn, roachpb.Key(strKey))
				}
			} else if sp.newStatus != txn.Status {
				return fmt.Errorf("%s: expected status %s, but found %s", strKey, sp.newStatus, txn.Status)
			}
			var expIntents []roachpb.Span
			if sp.expResolve {
				expIntents = testIntents
			}
			if !reflect.DeepEqual(resolved[strKey], expIntents) {
				return fmt.Errorf("%s: unexpected intent resolutions:\nexpected: %s\nobserved: %s",
					strKey, expIntents, resolved[strKey])
			}
			entry := &roachpb.AbortCacheEntry{}
			abortExists, err := tc.rng.abortCache.Get(context.Background(), tc.store.Engine(), txns[strKey].ID, entry)
			if err != nil {
				t.Fatal(err)
			}
			if abortExists == sp.expAbortGC {
				return fmt.Errorf("%s: expected abort cache gc: %t, found %+v", strKey, sp.expAbortGC, entry)
			}
		}
		return nil
	})

	outsideTxnPrefix := keys.TransactionKey(outsideKey, uuid.EmptyUUID)
	outsideTxnPrefixEnd := keys.TransactionKey(outsideKey.Next(), uuid.EmptyUUID)
	var count int
	if _, err := engine.MVCCIterate(context.Background(), tc.store.Engine(), outsideTxnPrefix, outsideTxnPrefixEnd, hlc.ZeroTimestamp,
		true, nil, false, func(roachpb.KeyValue) (bool, error) {
			count++
			return false, nil
		}); err != nil {
		t.Fatal(err)
	}
	if exp := len(testCases); exp != count {
		t.Fatalf("expected the %d external transaction entries to remain untouched, "+
			"but only %d are left", exp, count)
	}

	batch := tc.engine.NewSnapshot()
	defer batch.Close()
	tc.rng.assertState(batch) // check that in-mem and on-disk state were updated

	tc.rng.mu.Lock()
	txnSpanThreshold := tc.rng.mu.state.TxnSpanGCThreshold
	tc.rng.mu.Unlock()

	// Verify that the new TxnSpanGCThreshold has reached the Replica.
	if expWT := int64(gcTxnAndAC); txnSpanThreshold.WallTime != expWT {
		t.Fatalf("expected TxnSpanGCThreshold.Walltime %d, got timestamp %s",
			expWT, txnSpanThreshold)
	}
}
Exemplo n.º 10
0
// snapshot creates an OutgoingSnapshot containing a rocksdb snapshot for the given range.
func snapshot(
	ctx context.Context,
	snapType string,
	snap engine.Reader,
	rangeID roachpb.RangeID,
	eCache *raftEntryCache,
	startKey roachpb.RKey,
) (OutgoingSnapshot, error) {
	var desc roachpb.RangeDescriptor
	// We ignore intents on the range descriptor (consistent=false) because we
	// know they cannot be committed yet; operations that modify range
	// descriptors resolve their own intents when they commit.
	ok, err := engine.MVCCGetProto(ctx, snap, keys.RangeDescriptorKey(startKey),
		hlc.MaxTimestamp, false /* !consistent */, nil, &desc)
	if err != nil {
		return OutgoingSnapshot{}, errors.Errorf("failed to get desc: %s", err)
	}
	if !ok {
		return OutgoingSnapshot{}, errors.Errorf("couldn't find range descriptor")
	}

	var snapData roachpb.RaftSnapshotData
	// Store RangeDescriptor as metadata, it will be retrieved by ApplySnapshot()
	snapData.RangeDescriptor = desc

	// Read the range metadata from the snapshot instead of the members
	// of the Range struct because they might be changed concurrently.
	appliedIndex, _, err := loadAppliedIndex(ctx, snap, rangeID)
	if err != nil {
		return OutgoingSnapshot{}, err
	}

	// Synthesize our raftpb.ConfState from desc.
	var cs raftpb.ConfState
	for _, rep := range desc.Replicas {
		cs.Nodes = append(cs.Nodes, uint64(rep.ReplicaID))
	}

	term, err := term(ctx, snap, rangeID, eCache, appliedIndex)
	if err != nil {
		return OutgoingSnapshot{}, errors.Errorf("failed to fetch term of %d: %s", appliedIndex, err)
	}

	state, err := loadState(ctx, snap, &desc)
	if err != nil {
		return OutgoingSnapshot{}, err
	}

	// Intentionally let this iterator and the snapshot escape so that the
	// streamer can send chunks from it bit by bit.
	iter := NewReplicaDataIterator(&desc, snap, true /* replicatedOnly */)
	snapUUID := uuid.MakeV4()

	log.Infof(ctx, "generated %s snapshot %s at index %d",
		snapType, snapUUID.Short(), appliedIndex)
	return OutgoingSnapshot{
		EngineSnap: snap,
		Iter:       iter,
		State:      state,
		SnapUUID:   snapUUID,
		RaftSnap: raftpb.Snapshot{
			Data: snapUUID.GetBytes(),
			Metadata: raftpb.SnapshotMetadata{
				Index:     appliedIndex,
				Term:      term,
				ConfState: cs,
			},
		},
	}, nil
}