// TestCorruptedClusterID verifies that a node fails to start when a // store's cluster ID is empty. func TestCorruptedClusterID(t *testing.T) { defer leaktest.AfterTest(t)() e := engine.NewInMem(roachpb.Attributes{}, 1<<20) defer e.Close() if _, err := bootstrapCluster( storage.StoreConfig{}, []engine.Engine{e}, kv.MakeTxnMetrics(metric.TestSampleInterval), ); err != nil { t.Fatal(err) } // Set the cluster ID to the empty UUID. sIdent := roachpb.StoreIdent{ ClusterID: uuid.UUID{}, NodeID: 1, StoreID: 1, } if err := engine.MVCCPutProto(context.Background(), e, nil, keys.StoreIdentKey(), hlc.ZeroTimestamp, nil, &sIdent); err != nil { t.Fatal(err) } engines := []engine.Engine{e} _, serverAddr, _, node, stopper := createTestNode(util.TestAddr, engines, nil, t) stopper.Stop() if err := node.start(context.Background(), serverAddr, engines, roachpb.Attributes{}, roachpb.Locality{}); !testutils.IsError(err, "unidentified store") { t.Errorf("unexpected error %v", err) } }
// TestRangeLookupWithOpenTransaction verifies that range lookups are // done in such a way (e.g. using inconsistent reads) that they // proceed in the event that a write intent is extant at the meta // index record being read. func TestRangeLookupWithOpenTransaction(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() db := createTestClient(t, s.Stopper(), s.ServingAddr()) // Create an intent on the meta1 record by writing directly to the // engine. key := testutils.MakeKey(keys.Meta1Prefix, roachpb.KeyMax) now := s.Clock().Now() txn := roachpb.NewTransaction("txn", roachpb.Key("foobar"), 0, enginepb.SERIALIZABLE, now, 0) if err := engine.MVCCPutProto( context.Background(), s.(*server.TestServer).Engines()[0], nil, key, now, txn, &roachpb.RangeDescriptor{}); err != nil { t.Fatal(err) } // Now, with an intent pending, attempt (asynchronously) to read // from an arbitrary key. This will cause the distributed sender to // do a range lookup, which will encounter the intent. We're // verifying here that the range lookup doesn't fail with a write // intent error. If it did, it would go into a deadloop attempting // to push the transaction, which in turn requires another range // lookup, etc, ad nauseam. if _, err := db.Get(context.TODO(), "a"); err != nil { t.Fatal(err) } }
func setHardState( ctx context.Context, batch engine.ReadWriter, rangeID roachpb.RangeID, st raftpb.HardState, ) error { return engine.MVCCPutProto(ctx, batch, nil, keys.RaftHardStateKey(rangeID), hlc.ZeroTimestamp, nil, &st) }
// Put writes an entry for the specified transaction ID. func (sc *AbortCache) Put( ctx context.Context, e engine.ReadWriter, ms *enginepb.MVCCStats, txnID uuid.UUID, entry *roachpb.AbortCacheEntry, ) error { key := keys.AbortCacheKey(sc.rangeID, txnID) return engine.MVCCPutProto(ctx, e, ms, key, hlc.ZeroTimestamp, nil /* txn */, entry) }
func TestGCQueueLastProcessedTimestamps(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() defer stopper.Stop() tc.Start(t, stopper) // Create two last processed times both at the range start key and // also at some mid-point key in order to simulate a merge. // Two transactions. lastProcessedVals := []struct { key roachpb.Key expGC bool }{ {keys.QueueLastProcessedKey(roachpb.RKeyMin, "timeSeriesMaintenance"), false}, {keys.QueueLastProcessedKey(roachpb.RKeyMin, "replica consistency checker"), false}, {keys.QueueLastProcessedKey(roachpb.RKey("a"), "timeSeriesMaintenance"), true}, {keys.QueueLastProcessedKey(roachpb.RKey("b"), "replica consistency checker"), true}, } ts := tc.Clock().Now() for _, lpv := range lastProcessedVals { if err := engine.MVCCPutProto(context.Background(), tc.engine, nil, lpv.key, hlc.ZeroTimestamp, nil, &ts); err != nil { t.Fatal(err) } } cfg, ok := tc.gossip.GetSystemConfig() if !ok { t.Fatal("config not set") } // Process through a scan queue. gcQ := newGCQueue(tc.store, tc.gossip) if err := gcQ.process(context.Background(), tc.repl, cfg); err != nil { t.Fatal(err) } // Verify GC. testutils.SucceedsSoon(t, func() error { for _, lpv := range lastProcessedVals { ok, err := engine.MVCCGetProto(context.Background(), tc.engine, lpv.key, hlc.ZeroTimestamp, true, nil, &ts) if err != nil { return err } if ok == lpv.expGC { return errors.Errorf("expected GC of %s: %t; got %t", lpv.key, lpv.expGC, ok) } } return nil }) }
func setGCThreshold( ctx context.Context, eng engine.ReadWriter, ms *enginepb.MVCCStats, rangeID roachpb.RangeID, threshold *hlc.Timestamp, ) error { if threshold == nil { return errors.New("cannot persist nil GCThreshold") } return engine.MVCCPutProto(ctx, eng, ms, keys.RangeLastGCKey(rangeID), hlc.ZeroTimestamp, nil, threshold) }
func setTruncatedState( ctx context.Context, eng engine.ReadWriter, ms *enginepb.MVCCStats, rangeID roachpb.RangeID, truncState roachpb.RaftTruncatedState, ) error { if (truncState == roachpb.RaftTruncatedState{}) { return errors.New("cannot persist empty RaftTruncatedState") } return engine.MVCCPutProto(ctx, eng, ms, keys.RaftTruncatedStateKey(rangeID), hlc.ZeroTimestamp, nil, &truncState) }
func setLease( ctx context.Context, eng engine.ReadWriter, ms *enginepb.MVCCStats, rangeID roachpb.RangeID, lease *roachpb.Lease, ) error { if lease == nil { return errors.New("cannot persist nil Lease") } return engine.MVCCPutProto( ctx, eng, ms, keys.RangeLeaseKey(rangeID), hlc.ZeroTimestamp, nil, lease) }
func (ls *Stores) updateBootstrapInfo(bi *gossip.BootstrapInfo) error { if bi.Timestamp.Less(ls.biLatestTS) { return nil } ctx := ls.AnnotateCtx(context.TODO()) // Update the latest timestamp and set cached version. ls.biLatestTS = bi.Timestamp ls.latestBI = protoutil.Clone(bi).(*gossip.BootstrapInfo) // Update all stores. for _, s := range ls.storeMap { if err := engine.MVCCPutProto(ctx, s.engine, nil, keys.StoreGossipKey(), hlc.ZeroTimestamp, nil, bi); err != nil { return err } } return nil }
func TestGCQueueTransactionTable(t *testing.T) { defer leaktest.AfterTest(t)() const now time.Duration = 3 * 24 * time.Hour const gcTxnAndAC = now - txnCleanupThreshold const gcACOnly = now - abortCacheAgeThreshold if gcTxnAndAC >= gcACOnly { t.Fatalf("test assumption violated due to changing constants; needs adjustment") } type spec struct { status roachpb.TransactionStatus orig time.Duration hb time.Duration // last heartbeat (none if ZeroTimestamp) newStatus roachpb.TransactionStatus // -1 for GCed failResolve bool // do we want to fail resolves in this trial? expResolve bool // expect attempt at removing txn-persisted intents? expAbortGC bool // expect abort cache entries removed? } // Describes the state of the Txn table before the test. // Many of the abort cache entries deleted wouldn't even be there, so don't // be confused by that. testCases := map[string]spec{ // Too young, should not touch. "aa": { status: roachpb.PENDING, orig: gcACOnly + 1, newStatus: roachpb.PENDING, }, // A little older, so the AbortCache gets cleaned up. "ab": { status: roachpb.PENDING, orig: gcTxnAndAC + 1, newStatus: roachpb.PENDING, expAbortGC: true, }, // Old and pending, but still heartbeat (so no Push attempted; it would succeed). // It's old enough to delete the abort cache entry though. "ba": { status: roachpb.PENDING, hb: gcTxnAndAC + 1, newStatus: roachpb.PENDING, expAbortGC: true, }, // Not old enough for Txn GC, but old enough to remove the abort cache entry. "bb": { status: roachpb.ABORTED, orig: gcACOnly - 1, newStatus: roachpb.ABORTED, expAbortGC: true, }, // Old, pending and abandoned. Should push and abort it successfully, // but not GC it just yet (this is an artifact of the implementation). // The abort cache gets cleaned up though. "c": { status: roachpb.PENDING, orig: gcTxnAndAC - 1, newStatus: roachpb.ABORTED, expAbortGC: true, }, // Old and aborted, should delete. "d": { status: roachpb.ABORTED, orig: gcTxnAndAC - 1, newStatus: -1, expResolve: true, expAbortGC: true, }, // Committed and fresh, so no action. But the abort cache entry is old // enough to be discarded. "e": { status: roachpb.COMMITTED, orig: gcTxnAndAC + 1, newStatus: roachpb.COMMITTED, expAbortGC: true, }, // Committed and old. It has an intent (like all tests here), which is // resolvable and hence we can GC. "f": { status: roachpb.COMMITTED, orig: gcTxnAndAC - 1, newStatus: -1, expResolve: true, expAbortGC: true, }, // Same as the previous one, but we've rigged things so that the intent // resolution here will fail and consequently no GC is expected. "g": { status: roachpb.COMMITTED, orig: gcTxnAndAC - 1, newStatus: roachpb.COMMITTED, failResolve: true, expResolve: true, expAbortGC: true, }, } resolved := map[string][]roachpb.Span{} tc := testContext{} tsc := TestStoreConfig() tsc.TestingKnobs.TestingCommandFilter = func(filterArgs storagebase.FilterArgs) *roachpb.Error { if resArgs, ok := filterArgs.Req.(*roachpb.ResolveIntentRequest); ok { id := string(resArgs.IntentTxn.Key) resolved[id] = append(resolved[id], roachpb.Span{ Key: resArgs.Key, EndKey: resArgs.EndKey, }) // We've special cased one test case. Note that the intent is still // counted in `resolved`. if testCases[id].failResolve { return roachpb.NewErrorWithTxn(errors.Errorf("boom"), filterArgs.Hdr.Txn) } } return nil } tc.StartWithStoreConfig(t, tsc) defer tc.Stop() tc.manualClock.Set(int64(now)) outsideKey := tc.rng.Desc().EndKey.Next().AsRawKey() testIntents := []roachpb.Span{{Key: roachpb.Key("intent")}} txns := map[string]roachpb.Transaction{} for strKey, test := range testCases { baseKey := roachpb.Key(strKey) txnClock := hlc.NewClock(hlc.NewManualClock(int64(test.orig)).UnixNano) txn := newTransaction("txn1", baseKey, 1, enginepb.SERIALIZABLE, txnClock) txn.Status = test.status txn.Intents = testIntents if test.hb > 0 { txn.LastHeartbeat = &hlc.Timestamp{WallTime: int64(test.hb)} } // Set a high Timestamp to make sure it does not matter. Only // OrigTimestamp (and heartbeat) are used for GC decisions. txn.Timestamp.Forward(hlc.MaxTimestamp) txns[strKey] = *txn for _, addrKey := range []roachpb.Key{baseKey, outsideKey} { key := keys.TransactionKey(addrKey, txn.ID) if err := engine.MVCCPutProto(context.Background(), tc.engine, nil, key, hlc.ZeroTimestamp, nil, txn); err != nil { t.Fatal(err) } } entry := roachpb.AbortCacheEntry{Key: txn.Key, Timestamp: txn.LastActive()} if err := tc.rng.abortCache.Put(context.Background(), tc.engine, nil, txn.ID, &entry); err != nil { t.Fatal(err) } } // Run GC. gcQ := newGCQueue(tc.store, tc.gossip) cfg, ok := tc.gossip.GetSystemConfig() if !ok { t.Fatal("config not set") } if err := gcQ.process(context.Background(), tc.clock.Now(), tc.rng, cfg); err != nil { t.Fatal(err) } util.SucceedsSoon(t, func() error { for strKey, sp := range testCases { txn := &roachpb.Transaction{} key := keys.TransactionKey(roachpb.Key(strKey), txns[strKey].ID) ok, err := engine.MVCCGetProto(context.Background(), tc.engine, key, hlc.ZeroTimestamp, true, nil, txn) if err != nil { return err } if expGC := (sp.newStatus == -1); expGC { if expGC != !ok { return fmt.Errorf("%s: expected gc: %t, but found %s\n%s", strKey, expGC, txn, roachpb.Key(strKey)) } } else if sp.newStatus != txn.Status { return fmt.Errorf("%s: expected status %s, but found %s", strKey, sp.newStatus, txn.Status) } var expIntents []roachpb.Span if sp.expResolve { expIntents = testIntents } if !reflect.DeepEqual(resolved[strKey], expIntents) { return fmt.Errorf("%s: unexpected intent resolutions:\nexpected: %s\nobserved: %s", strKey, expIntents, resolved[strKey]) } entry := &roachpb.AbortCacheEntry{} abortExists, err := tc.rng.abortCache.Get(context.Background(), tc.store.Engine(), txns[strKey].ID, entry) if err != nil { t.Fatal(err) } if abortExists == sp.expAbortGC { return fmt.Errorf("%s: expected abort cache gc: %t, found %+v", strKey, sp.expAbortGC, entry) } } return nil }) outsideTxnPrefix := keys.TransactionKey(outsideKey, uuid.EmptyUUID) outsideTxnPrefixEnd := keys.TransactionKey(outsideKey.Next(), uuid.EmptyUUID) var count int if _, err := engine.MVCCIterate(context.Background(), tc.store.Engine(), outsideTxnPrefix, outsideTxnPrefixEnd, hlc.ZeroTimestamp, true, nil, false, func(roachpb.KeyValue) (bool, error) { count++ return false, nil }); err != nil { t.Fatal(err) } if exp := len(testCases); exp != count { t.Fatalf("expected the %d external transaction entries to remain untouched, "+ "but only %d are left", exp, count) } batch := tc.engine.NewSnapshot() defer batch.Close() tc.rng.assertState(batch) // check that in-mem and on-disk state were updated tc.rng.mu.Lock() txnSpanThreshold := tc.rng.mu.state.TxnSpanGCThreshold tc.rng.mu.Unlock() // Verify that the new TxnSpanGCThreshold has reached the Replica. if expWT := int64(gcTxnAndAC); txnSpanThreshold.WallTime != expWT { t.Fatalf("expected TxnSpanGCThreshold.Walltime %d, got timestamp %s", expWT, txnSpanThreshold) } }
// setReplicaDestroyedError sets an error indicating that the replica has been // destroyed. func setReplicaDestroyedError( ctx context.Context, eng engine.ReadWriter, rangeID roachpb.RangeID, err *roachpb.Error, ) error { return engine.MVCCPutProto(ctx, eng, nil, keys.RangeReplicaDestroyedErrorKey(rangeID), hlc.ZeroTimestamp, nil /* txn */, err) }