Пример #1
0
// TestLeaderRemoveSelf verifies that a leader can remove itself
// without panicking and future access to the range returns a
// RangeNotFoundError (not RaftGroupDeletedError, and even before
// the ReplicaGCQueue has run).
func TestLeaderRemoveSelf(t *testing.T) {
	defer leaktest.AfterTest(t)

	mtc := startMultiTestContext(t, 2)
	defer mtc.Stop()
	// Disable the replica GC queue. This verifies that the replica is
	// considered removed even before the gc queue has run, and also
	// helps avoid a deadlock at shutdown.
	mtc.stores[0].DisableReplicaGCQueue(true)
	raftID := roachpb.RangeID(1)
	mtc.replicateRange(raftID, 1)
	// Remove the replica from first store.
	mtc.unreplicateRange(raftID, 0)
	getArgs := getArgs([]byte("a"))

	// Force the read command request a new lease.
	clock := mtc.clocks[0]
	header := roachpb.Header{}
	header.Timestamp = clock.Update(clock.Now().Add(int64(storage.DefaultLeaderLeaseDuration), 0))

	// Expect get a RangeNotFoundError.
	_, pErr := client.SendWrappedWith(rg1(mtc.stores[0]), nil, header, &getArgs)
	if _, ok := pErr.GoError().(*roachpb.RangeNotFoundError); !ok {
		t.Fatalf("expect get RangeNotFoundError, actual get %v ", pErr)
	}
}
Пример #2
0
// createPutRequest returns a ready-made request using the
// specified key, value & txn ID.
func createPutRequest(key roachpb.Key, value []byte, txn *roachpb.Transaction) (*roachpb.PutRequest, roachpb.Header) {
	h := roachpb.Header{}
	h.Txn = txn
	return &roachpb.PutRequest{
		Span: roachpb.Span{
			Key: key,
		},
		Value: roachpb.Value{Bytes: value},
	}, h
}
Пример #3
0
func createDeleteRangeRequest(key, endKey roachpb.Key, txn *roachpb.Transaction) (*roachpb.DeleteRangeRequest, roachpb.Header) {
	h := roachpb.Header{}
	h.Txn = txn
	return &roachpb.DeleteRangeRequest{
		Span: roachpb.Span{
			Key:    key,
			EndKey: endKey,
		},
	}, h
}
Пример #4
0
// TestStoreSplitReadRace prevents regression of #3148. It begins a couple of
// read requests and lets them complete while a split is happening; the reads
// hit the second half of the split. If the split happens non-atomically with
// respect to the reads (and in particular their update of the timestamp
// cache), then some of them may not be reflected in the timestamp cache of the
// new range, in which case this test would fail.
func TestStoreSplitReadRace(t *testing.T) {
	defer leaktest.AfterTest(t)
	defer func() { storage.TestingCommandFilter = nil }()
	splitKey := roachpb.Key("a")
	key := func(i int) roachpb.Key {
		return append(splitKey.Next(), []byte(fmt.Sprintf("%03d", i))...)
	}

	getContinues := make(chan struct{})
	var getStarted sync.WaitGroup
	storage.TestingCommandFilter = func(_ roachpb.StoreID, args roachpb.Request, h roachpb.Header) error {
		if et, ok := args.(*roachpb.EndTransactionRequest); ok {
			st := et.InternalCommitTrigger.GetSplitTrigger()
			if st == nil || !st.UpdatedDesc.EndKey.Equal(splitKey) {
				return nil
			}
			close(getContinues)
		} else if args.Method() == roachpb.Get &&
			bytes.HasPrefix(args.Header().Key, splitKey.Next()) {
			getStarted.Done()
			<-getContinues
		}
		return nil
	}
	store, stopper := createTestStore(t)
	defer stopper.Stop()

	now := store.Clock().Now()
	var wg sync.WaitGroup

	ts := func(i int) roachpb.Timestamp {
		return now.Add(0, int32(1000+i))
	}

	const num = 10

	for i := 0; i < num; i++ {
		wg.Add(1)
		getStarted.Add(1)
		go func(i int) {
			defer wg.Done()
			args := getArgs(key(i))
			var h roachpb.Header
			h.Timestamp = ts(i)
			if _, err := client.SendWrappedWith(rg1(store), nil, h, &args); err != nil {
				t.Fatal(err)
			}
		}(i)
	}

	getStarted.Wait()

	wg.Add(1)
	func() {
		defer wg.Done()
		args := adminSplitArgs(roachpb.KeyMin, splitKey)
		if _, err := client.SendWrapped(rg1(store), nil, &args); err != nil {
			t.Fatal(err)
		}
	}()

	wg.Wait()

	for i := 0; i < num; i++ {
		var h roachpb.Header
		h.Timestamp = now
		args := putArgs(key(i), []byte("foo"))
		h.RangeID = store.LookupReplica(keys.Addr(args.Key), nil).Desc().RangeID
		reply, err := client.SendWrappedWith(store, nil, h, &args)
		if err != nil {
			t.Fatal(err)
		}
		if reply.Header().Timestamp.Less(ts(i)) {
			t.Fatalf("%d: expected Put to be forced higher than %s by timestamp caches, but wrote at %s", i, ts(i), reply.Header().Timestamp)
		}
	}
}
Пример #5
0
// TestStoreSplitReadRace prevents regression of #3148. It begins a couple of
// read requests and lets them complete while a split is happening; the reads
// hit the second half of the split. If the split happens non-atomically with
// respect to the reads (and in particular their update of the timestamp
// cache), then some of them may not be reflected in the timestamp cache of the
// new range, in which case this test would fail.
func TestStoreSplitReadRace(t *testing.T) {
	defer leaktest.AfterTest(t)()
	defer config.TestingDisableTableSplits()()
	splitKey := roachpb.Key("a")
	key := func(i int) roachpb.Key {
		splitCopy := append([]byte(nil), splitKey.Next()...)
		return append(splitCopy, []byte(fmt.Sprintf("%03d", i))...)
	}

	getContinues := make(chan struct{})
	var getStarted sync.WaitGroup
	sCtx := storage.TestStoreContext()
	sCtx.TestingKnobs.TestingCommandFilter =
		func(filterArgs storagebase.FilterArgs) *roachpb.Error {
			if et, ok := filterArgs.Req.(*roachpb.EndTransactionRequest); ok {
				st := et.InternalCommitTrigger.GetSplitTrigger()
				if st == nil || !st.UpdatedDesc.EndKey.Equal(splitKey) {
					return nil
				}
				close(getContinues)
			} else if filterArgs.Req.Method() == roachpb.Get &&
				bytes.HasPrefix(filterArgs.Req.Header().Key, splitKey.Next()) {
				getStarted.Done()
				<-getContinues
			}
			return nil
		}
	store, stopper, _ := createTestStoreWithContext(t, &sCtx)
	defer stopper.Stop()

	now := store.Clock().Now()
	var wg sync.WaitGroup

	ts := func(i int) hlc.Timestamp {
		return now.Add(0, int32(1000+i))
	}

	const num = 10

	for i := 0; i < num; i++ {
		wg.Add(1)
		getStarted.Add(1)
		go func(i int) {
			defer wg.Done()
			args := getArgs(key(i))
			var h roachpb.Header
			h.Timestamp = ts(i)
			if _, pErr := client.SendWrappedWith(rg1(store), nil, h, &args); pErr != nil {
				t.Fatal(pErr)
			}
		}(i)
	}

	getStarted.Wait()

	wg.Add(1)
	func() {
		defer wg.Done()
		args := adminSplitArgs(roachpb.KeyMin, splitKey)
		if _, pErr := client.SendWrapped(rg1(store), nil, &args); pErr != nil {
			t.Fatal(pErr)
		}
	}()

	wg.Wait()

	for i := 0; i < num; i++ {
		var h roachpb.Header
		h.Timestamp = now
		args := putArgs(key(i), []byte("foo"))
		keyAddr, err := keys.Addr(args.Key)
		if err != nil {
			t.Fatal(err)
		}
		h.RangeID = store.LookupReplica(keyAddr, nil).RangeID
		_, respH, pErr := storage.SendWrapped(store, context.Background(), h, &args)
		if pErr != nil {
			t.Fatal(pErr)
		}
		if respH.Timestamp.Less(ts(i)) {
			t.Fatalf("%d: expected Put to be forced higher than %s by timestamp caches, but wrote at %s", i, ts(i), respH.Timestamp)
		}
	}
}