コード例 #1
0
ファイル: testcluster.go プロジェクト: hvaara/cockroach
// WaitForStores waits for all of the store descriptors to be gossiped. Servers
// other than the first "bootstrap" their stores asynchronously, but we'd like
// to wait for all of the stores to be initialized before returning the
// TestCluster.
func (tc *TestCluster) WaitForStores(t testing.TB, g *gossip.Gossip) {
	// Register a gossip callback for the store descriptors.
	var storesMu syncutil.Mutex
	stores := map[roachpb.StoreID]struct{}{}
	storesDone := make(chan error)
	storesDoneOnce := storesDone
	unregister := g.RegisterCallback(gossip.MakePrefixPattern(gossip.KeyStorePrefix),
		func(_ string, content roachpb.Value) {
			storesMu.Lock()
			defer storesMu.Unlock()
			if storesDoneOnce == nil {
				return
			}

			var desc roachpb.StoreDescriptor
			if err := content.GetProto(&desc); err != nil {
				storesDoneOnce <- err
				return
			}

			stores[desc.StoreID] = struct{}{}
			if len(stores) == len(tc.Servers) {
				close(storesDoneOnce)
				storesDoneOnce = nil
			}
		})
	defer unregister()

	// Wait for the store descriptors to be gossiped.
	for err := range storesDone {
		if err != nil {
			t.Fatal(err)
		}
	}
}
コード例 #2
0
ファイル: stopper_test.go プロジェクト: knz/cockroach
func TestStopperRunLimitedAsyncTask(t *testing.T) {
	defer leaktest.AfterTest(t)()
	s := stop.NewStopper()
	defer s.Stop()

	const maxConcurrency = 5
	const duration = 10 * time.Millisecond
	sem := make(chan struct{}, maxConcurrency)
	var mu syncutil.Mutex
	concurrency := 0
	peakConcurrency := 0
	var wg sync.WaitGroup

	f := func(_ context.Context) {
		mu.Lock()
		concurrency++
		if concurrency > peakConcurrency {
			peakConcurrency = concurrency
		}
		mu.Unlock()
		time.Sleep(duration)
		mu.Lock()
		concurrency--
		mu.Unlock()
		wg.Done()
	}

	for i := 0; i < maxConcurrency*3; i++ {
		wg.Add(1)
		if err := s.RunLimitedAsyncTask(
			context.TODO(), sem, true /* wait */, f,
		); err != nil {
			t.Fatal(err)
		}
	}
	wg.Wait()
	if concurrency != 0 {
		t.Fatalf("expected 0 concurrency at end of test but got %d", concurrency)
	}
	if peakConcurrency != maxConcurrency {
		t.Fatalf("expected peak concurrency %d to equal max concurrency %d",
			peakConcurrency, maxConcurrency)
	}

	sem = make(chan struct{}, 1)
	sem <- struct{}{}
	err := s.RunLimitedAsyncTask(
		context.TODO(), sem, false /* wait */, func(_ context.Context) {
		},
	)
	if err != stop.ErrThrottled {
		t.Fatalf("expected %v; got %v", stop.ErrThrottled, err)
	}
}
コード例 #3
0
ファイル: lease_test.go プロジェクト: BramGruneir/cockroach
// Test that we fail to lease a table that was marked for deletion.
func TestCantLeaseDeletedTable(testingT *testing.T) {
	defer leaktest.AfterTest(testingT)()

	var mu syncutil.Mutex
	clearSchemaChangers := false

	params, _ := createTestServerParams()
	params.Knobs = base.TestingKnobs{
		SQLSchemaChanger: &csql.SchemaChangerTestingKnobs{
			SyncFilter: func(tscc csql.TestingSchemaChangerCollection) {
				mu.Lock()
				defer mu.Unlock()
				if clearSchemaChangers {
					tscc.ClearSchemaChangers()
				}
			},
			AsyncExecNotification: asyncSchemaChangerDisabled,
		},
	}

	t := newLeaseTest(testingT, params)
	defer t.cleanup()

	sql := `
CREATE DATABASE test;
CREATE TABLE test.t(a INT PRIMARY KEY);
`
	_, err := t.db.Exec(sql)
	if err != nil {
		t.Fatal(err)
	}

	// Block schema changers so that the table we're about to DROP is not actually
	// dropped; it will be left in a "deleted" state.
	mu.Lock()
	clearSchemaChangers = true
	mu.Unlock()

	// DROP the table
	_, err = t.db.Exec(`DROP TABLE test.t`)
	if err != nil {
		t.Fatal(err)
	}

	// Make sure we can't get a lease on the descriptor.
	tableDesc := sqlbase.GetTableDescriptor(t.kvDB, "test", "t")
	// try to acquire at a bogus version to make sure we don't get back a lease we
	// already had.
	_, err = t.acquire(1, tableDesc.ID, tableDesc.Version+1)
	if !testutils.IsError(err, "table is being dropped") {
		t.Fatalf("got a different error than expected: %v", err)
	}
}
コード例 #4
0
// TestNodeLivenessCallback verifies that the liveness callback for a
// node is invoked when it changes from state false to true.
func TestNodeLivenessCallback(t *testing.T) {
	defer leaktest.AfterTest(t)()
	mtc := &multiTestContext{}
	defer mtc.Stop()
	mtc.Start(t, 3)

	// Verify liveness of all nodes for all nodes.
	verifyLiveness(t, mtc)
	pauseNodeLivenessHeartbeats(mtc, true)

	var cbMu syncutil.Mutex
	cbs := map[roachpb.NodeID]struct{}{}
	mtc.nodeLivenesses[0].RegisterCallback(func(nodeID roachpb.NodeID) {
		cbMu.Lock()
		defer cbMu.Unlock()
		cbs[nodeID] = struct{}{}
	})

	// Advance clock past the liveness threshold.
	mtc.manualClock.Increment(mtc.nodeLivenesses[0].GetLivenessThreshold().Nanoseconds() + 1)

	// Trigger a manual heartbeat and verify callbacks for each node ID are invoked.
	for _, nl := range mtc.nodeLivenesses {
		l, err := nl.Self()
		if err != nil {
			t.Fatal(err)
		}
		if err := nl.Heartbeat(context.Background(), l); err != nil {
			t.Fatal(err)
		}
	}

	testutils.SucceedsSoon(t, func() error {
		cbMu.Lock()
		defer cbMu.Unlock()
		for _, g := range mtc.gossips {
			nodeID := g.NodeID.Get()
			if _, ok := cbs[nodeID]; !ok {
				return errors.Errorf("expected IsLive callback for node %d", nodeID)
			}
		}
		return nil
	})
}
コード例 #5
0
ファイル: net.go プロジェクト: knz/cockroach
// MakeServer constructs a Server that tracks active connections, closing them
// when signalled by stopper.
func MakeServer(stopper *stop.Stopper, tlsConfig *tls.Config, handler http.Handler) Server {
	var mu syncutil.Mutex
	activeConns := make(map[net.Conn]struct{})
	server := Server{
		Server: &http.Server{
			Handler:   handler,
			TLSConfig: tlsConfig,
			ConnState: func(conn net.Conn, state http.ConnState) {
				mu.Lock()
				switch state {
				case http.StateNew:
					activeConns[conn] = struct{}{}
				case http.StateClosed:
					delete(activeConns, conn)
				}
				mu.Unlock()
			},
			ErrorLog: httpLogger,
		},
	}

	// net/http.(*Server).Serve/http2.ConfigureServer are not thread safe with
	// respect to net/http.(*Server).TLSConfig, so we call it synchronously here.
	if err := http2.ConfigureServer(server.Server, nil); err != nil {
		log.Fatal(context.TODO(), err)
	}

	stopper.RunWorker(func() {
		<-stopper.ShouldStop()

		mu.Lock()
		for conn := range activeConns {
			conn.Close()
		}
		mu.Unlock()
	})

	return server
}
コード例 #6
0
func TestRangeTransferLease(t *testing.T) {
	defer leaktest.AfterTest(t)()
	cfg := storage.TestStoreConfig(nil)
	var filterMu syncutil.Mutex
	var filter func(filterArgs storagebase.FilterArgs) *roachpb.Error
	cfg.TestingKnobs.TestingCommandFilter =
		func(filterArgs storagebase.FilterArgs) *roachpb.Error {
			filterMu.Lock()
			filterCopy := filter
			filterMu.Unlock()
			if filterCopy != nil {
				return filterCopy(filterArgs)
			}
			return nil
		}
	var waitForTransferBlocked atomic.Value
	waitForTransferBlocked.Store(false)
	transferBlocked := make(chan struct{})
	cfg.TestingKnobs.LeaseTransferBlockedOnExtensionEvent = func(
		_ roachpb.ReplicaDescriptor) {
		if waitForTransferBlocked.Load().(bool) {
			transferBlocked <- struct{}{}
			waitForTransferBlocked.Store(false)
		}
	}
	mtc := &multiTestContext{}
	mtc.storeConfig = &cfg
	mtc.Start(t, 2)
	defer mtc.Stop()

	// First, do a write; we'll use it to determine when the dust has settled.
	leftKey := roachpb.Key("a")
	incArgs := incrementArgs(leftKey, 1)
	if _, pErr := client.SendWrapped(context.Background(), mtc.distSenders[0], &incArgs); pErr != nil {
		t.Fatal(pErr)
	}

	// Get the left range's ID.
	rangeID := mtc.stores[0].LookupReplica(roachpb.RKey("a"), nil).RangeID

	// Replicate the left range onto node 1.
	mtc.replicateRange(rangeID, 1)

	replica0 := mtc.stores[0].LookupReplica(roachpb.RKey("a"), nil)
	replica1 := mtc.stores[1].LookupReplica(roachpb.RKey("a"), nil)
	gArgs := getArgs(leftKey)
	replica0Desc, err := replica0.GetReplicaDescriptor()
	if err != nil {
		t.Fatal(err)
	}
	// Check that replica0 can serve reads OK.
	if _, pErr := client.SendWrappedWith(
		context.Background(),
		mtc.senders[0],
		roachpb.Header{Replica: replica0Desc},
		&gArgs,
	); pErr != nil {
		t.Fatal(pErr)
	}

	{
		// Transferring the lease to ourself should be a no-op.
		origLeasePtr, _ := replica0.GetLease()
		origLease := *origLeasePtr
		if err := replica0.AdminTransferLease(replica0Desc.StoreID); err != nil {
			t.Fatal(err)
		}
		newLeasePtr, _ := replica0.GetLease()
		if origLeasePtr != newLeasePtr || origLease != *newLeasePtr {
			t.Fatalf("expected %+v, but found %+v", origLeasePtr, newLeasePtr)
		}
	}

	{
		// An invalid target should result in an error.
		const expected = "unable to find store .* in range"
		if err := replica0.AdminTransferLease(1000); !testutils.IsError(err, expected) {
			t.Fatalf("expected %s, but found %v", expected, err)
		}
	}

	// Move the lease to store 1.
	var newHolderDesc roachpb.ReplicaDescriptor
	util.SucceedsSoon(t, func() error {
		var err error
		newHolderDesc, err = replica1.GetReplicaDescriptor()
		return err
	})

	if err := replica0.AdminTransferLease(newHolderDesc.StoreID); err != nil {
		t.Fatal(err)
	}

	// Check that replica0 doesn't serve reads any more.
	replica0Desc, err = replica0.GetReplicaDescriptor()
	if err != nil {
		t.Fatal(err)
	}
	_, pErr := client.SendWrappedWith(
		context.Background(),
		mtc.senders[0],
		roachpb.Header{Replica: replica0Desc},
		&gArgs,
	)
	nlhe, ok := pErr.GetDetail().(*roachpb.NotLeaseHolderError)
	if !ok {
		t.Fatalf("expected %T, got %s", &roachpb.NotLeaseHolderError{}, pErr)
	}
	if *(nlhe.LeaseHolder) != newHolderDesc {
		t.Fatalf("expected lease holder %+v, got %+v",
			newHolderDesc, nlhe.LeaseHolder)
	}

	// Check that replica1 now has the lease (or gets it soon).
	util.SucceedsSoon(t, func() error {
		if _, pErr := client.SendWrappedWith(
			context.Background(),
			mtc.senders[1],
			roachpb.Header{Replica: replica0Desc},
			&gArgs,
		); pErr != nil {
			return pErr.GoError()
		}
		return nil
	})

	replica1Lease, _ := replica1.GetLease()

	// Verify the timestamp cache low water. Because we executed a transfer lease
	// request, the low water should be set to the new lease start time which is
	// less than the previous lease's expiration time.
	if lowWater := replica1.GetTimestampCacheLowWater(); lowWater != replica1Lease.Start {
		t.Fatalf("expected timestamp cache low water %s, but found %s",
			replica1Lease.Start, lowWater)
	}

	// Make replica1 extend its lease and transfer the lease immediately after
	// that. Test that the transfer still happens (it'll wait until the extension
	// is done).
	extensionSem := make(chan struct{})
	filterMu.Lock()
	filter = func(filterArgs storagebase.FilterArgs) *roachpb.Error {
		if filterArgs.Sid != mtc.stores[1].Ident.StoreID {
			return nil
		}
		llReq, ok := filterArgs.Req.(*roachpb.RequestLeaseRequest)
		if !ok {
			return nil
		}
		if llReq.Lease.Replica == newHolderDesc {
			// Notify the main thread that the extension is in progress and wait for
			// the signal to proceed.
			filterMu.Lock()
			filter = nil
			filterMu.Unlock()
			extensionSem <- struct{}{}
			<-extensionSem
		}
		return nil
	}
	filterMu.Unlock()
	// Initiate an extension.
	var wg sync.WaitGroup
	wg.Add(1)
	go func() {
		defer wg.Done()
		shouldRenewTS := replica1Lease.StartStasis.Add(-1, 0)
		mtc.manualClock.Set(shouldRenewTS.WallTime + 1)
		if _, pErr := client.SendWrappedWith(
			context.Background(),
			mtc.senders[1],
			roachpb.Header{Replica: replica0Desc},
			&gArgs,
		); pErr != nil {
			panic(pErr)
		}
	}()

	<-extensionSem
	waitForTransferBlocked.Store(true)
	// Initiate a transfer.
	wg.Add(1)
	go func() {
		defer wg.Done()
		// Transfer back from replica1 to replica0.
		if err := replica1.AdminTransferLease(replica0Desc.StoreID); err != nil {
			panic(err)
		}
	}()
	// Wait for the transfer to be blocked by the extension.
	<-transferBlocked
	// Now unblock the extension.
	extensionSem <- struct{}{}
	// Check that the transfer to replica1 eventually happens.
	util.SucceedsSoon(t, func() error {
		if _, pErr := client.SendWrappedWith(
			context.Background(),
			mtc.senders[0],
			roachpb.Header{Replica: replica0Desc},
			&gArgs,
		); pErr != nil {
			return pErr.GoError()
		}
		return nil
	})
	filterMu.Lock()
	filter = nil
	filterMu.Unlock()
	wg.Wait()
}
コード例 #7
0
ファイル: rename_test.go プロジェクト: knz/cockroach
// Test that a SQL txn that resolved a name can keep resolving that name during
// its lifetime even after the table has been renamed.
// Also tests that the name of a renamed table cannot be reused until everybody
// has stopped using it. Otherwise, we'd have different transactions in the
// systems using a single name for different tables.
// Also tests that the old name cannot be used by node that doesn't have a lease
// on the old version even while the name mapping still exists.
func TestTxnCanStillResolveOldName(t *testing.T) {
	defer leaktest.AfterTest(t)()

	var lmKnobs LeaseManagerTestingKnobs
	// renameUnblocked is used to block the rename schema change until the test
	// doesn't need the old name->id mapping to exist anymore.
	renameUnblocked := make(chan interface{})
	serverParams := base.TestServerArgs{
		Knobs: base.TestingKnobs{
			SQLSchemaChanger: &SchemaChangerTestingKnobs{
				RenameOldNameNotInUseNotification: func() {
					<-renameUnblocked
				},
			},
			SQLLeaseManager: &lmKnobs,
		}}
	var mu syncutil.Mutex
	var waitTableID sqlbase.ID
	// renamed is used to block until the node cannot get leases with the original
	// table name. It will be signaled once the table has been renamed and the update
	// about the new name has been processed. Moreover, not only does an update to
	// the name needs to have been received, but the version of the descriptor needs to
	// have also been incremented in order to guarantee that the node cannot get
	// leases using the old name (an update with the new name but the original
	// version is ignored by the leasing refresh mechanism).
	renamed := make(chan interface{})
	lmKnobs.TestingLeasesRefreshedEvent =
		func(cfg config.SystemConfig) {
			mu.Lock()
			defer mu.Unlock()
			if waitTableID != 0 {
				if isRenamed(waitTableID, "t2", 2, cfg) {
					close(renamed)
					waitTableID = 0
				}
			}
		}
	s, db, kvDB := serverutils.StartServer(t, serverParams)
	defer s.Stopper().Stop()

	sql := `
CREATE DATABASE test;
CREATE TABLE test.t (a INT PRIMARY KEY);
`
	_, err := db.Exec(sql)
	if err != nil {
		t.Fatal(err)
	}

	tableDesc := sqlbase.GetTableDescriptor(kvDB, "test", "t")
	mu.Lock()
	waitTableID = tableDesc.ID
	mu.Unlock()

	txn, err := db.Begin()
	if err != nil {
		t.Fatal(err)
	}

	// Run a command to make the transaction resolves the table name.
	if _, err := txn.Exec("SELECT * FROM test.t"); err != nil {
		t.Fatal(err)
	}

	// Concurrently, rename the table.
	threadDone := make(chan interface{})
	go func() {
		// The ALTER will commit and signal the main thread through `renamed`, but
		// the schema changer will remain blocked by the lease on the "t" version
		// held by the txn started above.
		if _, err := db.Exec("ALTER TABLE test.t RENAME TO test.t2"); err != nil {
			panic(err)
		}
		close(threadDone)
	}()

	// Block until the LeaseManager has processed the gossip update.
	<-renamed

	// Run another command in the transaction and make sure that we can still
	// resolve the table name.
	if _, err := txn.Exec("SELECT * FROM test.t"); err != nil {
		t.Fatal(err)
	}

	// Check that the name cannot be reused while somebody still has a lease on
	// the old one (the mechanism for ensuring this is that the entry for the old
	// name is not deleted from the database until the async schema changer checks
	// that there's no more leases on the old version).
	if _, err := db.Exec("CREATE TABLE test.t (a INT PRIMARY KEY)"); !testutils.IsError(
		err, `relation "t" already exists`) {
		t.Fatal(err)
	}

	if err := txn.Commit(); err != nil {
		t.Fatal(err)
	}

	// Check that the old name is not usable outside of the transaction now
	// that the node doesn't have a lease on it anymore (committing the txn
	// should have released the lease on the version of the descriptor with the
	// old name), even thoudh the name mapping still exists.
	lease := s.LeaseManager().(*LeaseManager).tableNames.get(tableDesc.ID, "t", s.Clock())
	if lease != nil {
		t.Fatalf(`still have lease on "t"`)
	}
	if _, err := db.Exec("SELECT * FROM test.t"); !testutils.IsError(
		err, `table "test.t" does not exist`) {
		t.Fatal(err)
	}
	close(renameUnblocked)

	// Block until the thread doing the rename has finished, so the test can clean
	// up. It needed to wait for the transaction to release its lease.
	<-threadDone
}
コード例 #8
0
ファイル: lease_test.go プロジェクト: BramGruneir/cockroach
// Test that once a table is marked as deleted, a lease's refcount dropping to 0
// means the lease is released immediately, as opposed to being released only
// when it expires.
func TestLeasesOnDeletedTableAreReleasedImmediately(t *testing.T) {
	defer leaktest.AfterTest(t)()

	var mu syncutil.Mutex
	clearSchemaChangers := false

	var waitTableID sqlbase.ID
	deleted := make(chan bool)

	params, _ := createTestServerParams()
	params.Knobs = base.TestingKnobs{
		SQLLeaseManager: &csql.LeaseManagerTestingKnobs{
			TestingLeasesRefreshedEvent: func(cfg config.SystemConfig) {
				mu.Lock()
				defer mu.Unlock()
				if waitTableID != 0 {
					if isDeleted(waitTableID, cfg) {
						close(deleted)
						waitTableID = 0
					}
				}
			},
		},
		SQLSchemaChanger: &csql.SchemaChangerTestingKnobs{
			SyncFilter: func(tscc csql.TestingSchemaChangerCollection) {
				mu.Lock()
				defer mu.Unlock()
				if clearSchemaChangers {
					tscc.ClearSchemaChangers()
				}
			},
			AsyncExecNotification: asyncSchemaChangerDisabled,
		},
	}
	s, db, kvDB := serverutils.StartServer(t, params)
	defer s.Stopper().Stop()

	sql := `
CREATE DATABASE test;
CREATE TABLE test.t(a INT PRIMARY KEY);
`
	_, err := db.Exec(sql)
	if err != nil {
		t.Fatal(err)
	}

	tableDesc := sqlbase.GetTableDescriptor(kvDB, "test", "t")

	lease1, err := acquire(s.(*server.TestServer), tableDesc.ID, 0)
	if err != nil {
		t.Fatal(err)
	}
	lease2, err := acquire(s.(*server.TestServer), tableDesc.ID, 0)
	if err != nil {
		t.Fatal(err)
	}

	// Block schema changers so that the table we're about to DROP is not actually
	// dropped; it will be left in a "deleted" state.
	// Also install a way to wait for the config update to be processed.
	mu.Lock()
	clearSchemaChangers = true
	waitTableID = tableDesc.ID
	mu.Unlock()

	// DROP the table
	_, err = db.Exec(`DROP TABLE test.t`)
	if err != nil {
		t.Fatal(err)
	}

	// Block until the LeaseManager has processed the gossip update.
	<-deleted

	// We should still be able to acquire, because we have an active lease.
	lease3, err := acquire(s.(*server.TestServer), tableDesc.ID, 0)
	if err != nil {
		t.Fatal(err)
	}

	// Release everything.
	if err := s.LeaseManager().(*csql.LeaseManager).Release(lease1); err != nil {
		t.Fatal(err)
	}
	if err := s.LeaseManager().(*csql.LeaseManager).Release(lease2); err != nil {
		t.Fatal(err)
	}
	if err := s.LeaseManager().(*csql.LeaseManager).Release(lease3); err != nil {
		t.Fatal(err)
	}
	// Now we shouldn't be able to acquire any more.
	_, err = acquire(s.(*server.TestServer), tableDesc.ID, 0)
	if !testutils.IsError(err, "table is being dropped") {
		t.Fatalf("got a different error than expected: %v", err)
	}
}
コード例 #9
0
ファイル: stopper_test.go プロジェクト: jmptrader/cockroach
func TestStopperRunLimitedAsyncTask(t *testing.T) {
	defer leaktest.AfterTest(t)()
	s := stop.NewStopper()
	defer s.Stop()

	const maxConcurrency = 5
	const numTasks = maxConcurrency * 3
	sem := make(chan struct{}, maxConcurrency)
	taskSignal := make(chan struct{}, maxConcurrency)
	var mu syncutil.Mutex
	concurrency := 0
	peakConcurrency := 0
	var wg sync.WaitGroup

	f := func(_ context.Context) {
		mu.Lock()
		concurrency++
		if concurrency > peakConcurrency {
			peakConcurrency = concurrency
		}
		mu.Unlock()
		<-taskSignal
		mu.Lock()
		concurrency--
		mu.Unlock()
		wg.Done()
	}
	go func() {
		// Loop until the desired peak concurrency has been reached.
		for {
			mu.Lock()
			c := concurrency
			mu.Unlock()
			if c >= maxConcurrency {
				break
			}
			time.Sleep(time.Millisecond)
		}
		// Then let the rest of the async tasks finish quickly.
		for i := 0; i < numTasks; i++ {
			taskSignal <- struct{}{}
		}
	}()

	for i := 0; i < numTasks; i++ {
		wg.Add(1)
		if err := s.RunLimitedAsyncTask(
			context.TODO(), sem, true /* wait */, f,
		); err != nil {
			t.Fatal(err)
		}
	}
	wg.Wait()
	if concurrency != 0 {
		t.Fatalf("expected 0 concurrency at end of test but got %d", concurrency)
	}
	if peakConcurrency != maxConcurrency {
		t.Fatalf("expected peak concurrency %d to equal max concurrency %d",
			peakConcurrency, maxConcurrency)
	}

	sem = make(chan struct{}, 1)
	sem <- struct{}{}
	err := s.RunLimitedAsyncTask(
		context.TODO(), sem, false /* wait */, func(_ context.Context) {
		},
	)
	if err != stop.ErrThrottled {
		t.Fatalf("expected %v; got %v", stop.ErrThrottled, err)
	}
}
コード例 #10
0
ファイル: rsg_test.go プロジェクト: knz/cockroach
// testRandomSyntax performs all of the RSG setup and teardown for common random syntax testing operations. It takes f, a closure where the random expression should be generated and executed. It returns a boolean indicating if the statement executed successfully. This is used to verify that at least 1 success occurs (otherwise it is likely a bad test).
func testRandomSyntax(
	t *testing.T, setup func(db *gosql.DB) error, f func(db *gosql.DB, r *rsg.RSG) (success bool),
) {
	if *flagRSGTime == 0 {
		t.Skip("enable with '-rsg <duration>'")
	}

	params, _ := createTestServerParams()
	params.UseDatabase = "ident"
	s, db, _ := serverutils.StartServer(t, params)
	defer s.Stopper().Stop()

	if setup != nil {
		err := setup(db)
		if err != nil {
			t.Fatal(err)
		}
	}

	y, err := ioutil.ReadFile(filepath.Join("parser", "sql.y"))
	if err != nil {
		t.Fatal(err)
	}
	r, err := rsg.NewRSG(timeutil.Now().UnixNano(), string(y))
	if err != nil {
		t.Fatal(err)
	}
	// Broadcast channel for all workers.
	done := make(chan bool)
	var wg sync.WaitGroup
	var lock syncutil.Mutex
	var total, success int
	worker := func() {
		defer wg.Done()
		for {
			select {
			case <-done:
				return
			default:
			}
			s := f(db, r)
			lock.Lock()
			total++
			if s {
				success++
			}
			lock.Unlock()
		}
	}
	for i := 0; i < *flagRSGGoRoutines; i++ {
		go worker()
		wg.Add(1)
	}
	time.Sleep(*flagRSGTime)
	close(done)
	wg.Wait()
	t.Logf("%d executions, %d successful", total, success)
	if success == 0 {
		t.Fatal("0 successful executions")
	}
}
コード例 #11
0
ファイル: intent_test.go プロジェクト: knz/cockroach
func TestIntentResolution(t *testing.T) {
	defer leaktest.AfterTest(t)()

	testCases := []struct {
		keys   []string
		ranges [][2]string
		exp    []string
	}{
		// Note that the first key (or, range, if no keys present) determines
		// the base key of the Txn. In these examples, it's always the first
		// range, so "a"-"s" is local. Any examples added must stick to that
		// convention and write the first key into "a"-"s".

		{
			keys:   []string{"a", "x", "b", "c", "s"},
			ranges: [][2]string{{"d", "e"}},
			exp:    []string{"s", "x"},
		},
		{
			keys:   []string{"h", "y", "z"},
			ranges: [][2]string{{"g", "z"}},
			exp:    []string{`"s"-"z\x00"`},
		},
		{
			keys:   []string{"q", "s"},
			ranges: [][2]string{{"a", "w"}, {"b", "x"}, {"t", "u"}},
			exp:    []string{`"s"-"x"`},
		},
		{
			keys:   []string{"q", "s", "y", "v"},
			ranges: [][2]string{{"a", "s"}, {"r", "t"}, {"u", "w"}},
			exp:    []string{`"s"-"t"`, `"u"-"w"`, "y"},
		},
	}

	splitKey := []byte("s")
	for i, tc := range testCases {
		// Use deterministic randomness to randomly put the writes in separate
		// batches or commit them with EndTransaction.
		rnd, seed := randutil.NewPseudoRand()
		log.Infof(context.Background(), "%d: using intent test seed %d", i, seed)

		results := map[string]struct{}{}
		func() {
			var storeKnobs storage.StoreTestingKnobs
			var mu syncutil.Mutex
			closer := make(chan struct{}, 2)
			var done bool
			storeKnobs.TestingCommandFilter =
				func(filterArgs storagebase.FilterArgs) *roachpb.Error {
					mu.Lock()
					defer mu.Unlock()
					header := filterArgs.Req.Header()
					// Ignore anything outside of the intent key range of "a" - "z"
					if header.Key.Compare(roachpb.Key("a")) < 0 || header.Key.Compare(roachpb.Key("z")) > 0 {
						return nil
					}
					var entry string
					switch arg := filterArgs.Req.(type) {
					case *roachpb.ResolveIntentRequest:
						if arg.Status == roachpb.COMMITTED {
							entry = string(header.Key)
						}
					case *roachpb.ResolveIntentRangeRequest:
						if arg.Status == roachpb.COMMITTED {
							entry = fmt.Sprintf("%s-%s", header.Key, header.EndKey)
						}
					}
					if entry != "" {
						log.Infof(context.Background(), "got %s", entry)
						results[entry] = struct{}{}
					}
					if len(results) >= len(tc.exp) && !done {
						done = true
						close(closer)
					}
					return nil
				}

			s, _, kvDB := serverutils.StartServer(t, base.TestServerArgs{
				Knobs: base.TestingKnobs{Store: &storeKnobs}})
			defer s.Stopper().Stop()
			// Split the Range. This should not have any asynchronous intents.
			if err := kvDB.AdminSplit(context.TODO(), splitKey); err != nil {
				t.Fatal(err)
			}

			if err := kvDB.Txn(context.TODO(), func(txn *client.Txn) error {
				b := txn.NewBatch()
				if tc.keys[0] >= string(splitKey) {
					t.Fatalf("first key %s must be < split key %s", tc.keys[0], splitKey)
				}
				for i, key := range tc.keys {
					// The first write must not go to batch, it anchors the
					// transaction to the correct range.
					local := i != 0 && rnd.Intn(2) == 0
					log.Infof(context.Background(), "%d: %s: local: %t", i, key, local)
					if local {
						b.Put(key, "test")
					} else if err := txn.Put(key, "test"); err != nil {
						return err
					}
				}

				for _, kr := range tc.ranges {
					local := rnd.Intn(2) == 0
					log.Infof(context.Background(), "%d: [%s,%s): local: %t", i, kr[0], kr[1], local)
					if local {
						b.DelRange(kr[0], kr[1], false)
					} else if err := txn.DelRange(kr[0], kr[1]); err != nil {
						return err
					}
				}

				return txn.CommitInBatch(b)
			}); err != nil {
				t.Fatalf("%d: %s", i, err)
			}
			<-closer // wait for async intents
			// Use Raft to make it likely that any straddling intent
			// resolutions have come in. Don't touch existing data; that could
			// generate unexpected intent resolutions.
			if _, err := kvDB.Scan(context.TODO(), "z\x00", "z\x00\x00", 0); err != nil {
				t.Fatal(err)
			}
		}()
		// Verification. Note that this runs after the system has stopped, so that
		// everything asynchronous has already happened.
		expResult := tc.exp
		sort.Strings(expResult)
		var actResult []string
		for k := range results {
			actResult = append(actResult, k)
		}
		sort.Strings(actResult)
		if !reflect.DeepEqual(actResult, expResult) {
			t.Fatalf("%d: unexpected non-local intents, expected %s: %s", i, expResult, actResult)
		}
	}
}