Пример #1
0
func TestWriteErrorAfterTimeout(t *testing.T) {
	// TODO: Make this test block at different points (e.g. before, during read/write).
	WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) {
		timedOut := make(chan struct{})
		done := make(chan struct{})
		handler := func(ctx context.Context, call *InboundCall) {
			<-ctx.Done()
			<-timedOut
			_, err := raw.ReadArgs(call)
			assert.Equal(t, ErrTimeout, err, "Read args should fail with timeout")
			response := call.Response()
			assert.Equal(t, ErrTimeout, response.SendSystemError(ErrServerBusy), "SendSystemError should fail")
			close(done)
		}
		ch.Register(HandlerFunc(handler), "call")

		ctx, cancel := NewContext(testutils.Timeout(20 * time.Millisecond))
		defer cancel()
		_, _, _, err := raw.Call(ctx, ch, hostPort, testServiceName, "call", nil, testutils.RandBytes(100000))
		assert.Equal(t, err, ErrTimeout, "Call should timeout")
		close(timedOut)
		<-done
	})
	goroutines.VerifyNoLeaks(t, nil)
}
Пример #2
0
func TestFragmentationSlowReader(t *testing.T) {
	startReading, handlerComplete := make(chan struct{}), make(chan struct{})
	handler := func(ctx context.Context, call *InboundCall) {
		<-ctx.Done()
		<-startReading
		_, err := raw.ReadArgs(call)
		assert.Error(t, err, "ReadArgs should fail since frames will be dropped due to slow reading")
		close(handlerComplete)
	}

	// Inbound forward will timeout and cause a warning log.
	opts := testutils.NewOpts().AddLogFilter("Unable to forward frame", 1)
	WithVerifiedServer(t, opts, func(ch *Channel, hostPort string) {
		ch.Register(HandlerFunc(handler), "echo")

		arg2 := testutils.RandBytes(MaxFramePayloadSize * MexChannelBufferSize)
		arg3 := testutils.RandBytes(MaxFramePayloadSize * (MexChannelBufferSize + 1))

		ctx, cancel := NewContext(testutils.Timeout(15 * time.Millisecond))
		defer cancel()

		_, _, _, err := raw.Call(ctx, ch, hostPort, testServiceName, "echo", arg2, arg3)
		assert.Error(t, err, "Call should timeout due to slow reader")

		close(startReading)
		<-handlerComplete
	})
	goroutines.VerifyNoLeaks(t, nil)
}
Пример #3
0
func TestCloseSemanticsIsolated(t *testing.T) {
	// We defer the check as we want it to run after the SetTimeout clears the timeout.
	defer goroutines.VerifyNoLeaks(t, nil)
	defer testutils.SetTimeout(t, 2*time.Second)()

	ctx, cancel := NewContext(time.Second)
	defer cancel()

	ct := &closeSemanticsTest{t, ctx, true /* isolated */}
	ct.runTest(ctx)
}
Пример #4
0
func TestLargeTimeout(t *testing.T) {
	WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) {
		ch.Register(raw.Wrap(newTestHandler(t)), "echo")

		ctx, cancel := NewContext(1000 * time.Second)
		defer cancel()

		_, _, _, err := raw.Call(ctx, ch, hostPort, testServiceName, "echo", testArg2, testArg3)
		assert.NoError(t, err, "Call failed")
	})
	goroutines.VerifyNoLeaks(t, nil)
}
Пример #5
0
func TestNoLeakedState(t *testing.T) {
	WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) {
		state1 := ch.IntrospectState(nil)
		for i := 0; i < 100; i++ {
			callWithNewClient(t, hostPort)
		}

		// Wait for all runnable goroutines to end. We expect one extra goroutine for the server.
		goroutines.VerifyNoLeaks(t, &goroutines.VerifyOpts{
			Exclude: "(*Channel).Serve",
		})

		state2 := ch.IntrospectState(nil)
		assert.Equal(t, state1, state2, "State mismatch")
	})
}
Пример #6
0
// TestCloseSendError tests that system errors are not attempted to be sent when
// a connection is closed, and ensures there's no race conditions such as the error
// frame being added to the channel just as it is closed.
func TestCloseSendError(t *testing.T) {
	var (
		closed  atomic.Uint32
		counter atomic.Uint32
	)

	opts := testutils.NewOpts().DisableLogVerification()
	serverCh := testutils.NewServer(t, opts)
	testutils.RegisterEcho(serverCh, func() {
		if counter.Inc() > 10 {
			// Close the server in a goroutine to possibly trigger more race conditions.
			go func() {
				closed.Inc()
				serverCh.Close()
			}()
		}
	})

	clientCh := testutils.NewClient(t, opts)

	// Create a connection that will be shared.
	require.NoError(t, testutils.Ping(clientCh, serverCh), "Ping from client to server failed")

	var wg sync.WaitGroup
	for i := 0; i < 100; i++ {
		wg.Add(1)
		go func() {
			time.Sleep(time.Duration(rand.Intn(1000)) * time.Microsecond)
			err := testutils.CallEcho(clientCh, serverCh.PeerInfo().HostPort, serverCh.ServiceName(), nil)
			if err != nil && closed.Load() == 0 {
				t.Errorf("Call failed: %v", err)
			}
			wg.Done()
		}()
	}

	// Wait for all the goroutines to end
	wg.Wait()

	clientCh.Close()
	goroutines.VerifyNoLeaks(t, nil)
}
Пример #7
0
func TestCloseOneSide(t *testing.T) {
	ctx, cancel := NewContext(time.Second)
	defer cancel()

	ch1 := testutils.NewServer(t, &testutils.ChannelOpts{ServiceName: "client"})
	ch2 := testutils.NewServer(t, &testutils.ChannelOpts{ServiceName: "server"})

	connected := make(chan struct{})
	completed := make(chan struct{})
	blockCall := make(chan struct{})
	testutils.RegisterFunc(ch2, "echo", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
		connected <- struct{}{}
		<-blockCall
		return &raw.Res{
			Arg2: args.Arg2,
			Arg3: args.Arg3,
		}, nil
	})

	go func() {
		ch2Peer := ch2.PeerInfo()
		_, _, _, err := raw.Call(ctx, ch1, ch2Peer.HostPort, ch2Peer.ServiceName, "echo", nil, nil)
		assert.NoError(t, err, "Call failed")
		completed <- struct{}{}
	}()

	// Wait for connected before calling Close.
	<-connected
	ch1.Close()

	// Now unblock the call and wait for the call to complete.
	close(blockCall)
	<-completed

	// Once the call completes, the channel should be closed.
	assertStateChangesTo(t, ch1, ChannelClosed)

	// We need to close all open TChannels before verifying blocked goroutines.
	ch2.Close()
	goroutines.VerifyNoLeaks(t, nil)
}
Пример #8
0
func TestCloseSingleChannel(t *testing.T) {
	ctx, cancel := NewContext(time.Second)
	defer cancel()
	ch := testutils.NewServer(t, nil)

	var connected sync.WaitGroup
	var completed sync.WaitGroup
	blockCall := make(chan struct{})

	testutils.RegisterFunc(ch, "echo", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
		connected.Done()
		<-blockCall
		return &raw.Res{
			Arg2: args.Arg2,
			Arg3: args.Arg3,
		}, nil
	})

	for i := 0; i < 10; i++ {
		connected.Add(1)
		completed.Add(1)
		go func() {
			peerInfo := ch.PeerInfo()
			_, _, _, err := raw.Call(ctx, ch, peerInfo.HostPort, peerInfo.ServiceName, "echo", nil, nil)
			assert.NoError(t, err, "Call failed")
			completed.Done()
		}()
	}

	// Wait for all calls to connect before triggerring the Close (so they do not fail).
	connected.Wait()
	ch.Close()

	// Unblock the calls, and wait for all the calls to complete.
	close(blockCall)
	completed.Wait()

	// Once all calls are complete, the channel should be closed.
	assertStateChangesTo(t, ch, ChannelClosed)
	goroutines.VerifyNoLeaks(t, nil)
}
Пример #9
0
func TestTimeout(t *testing.T) {
	WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) {
		// onError may be called when the block call tries to write the call response.
		onError := func(ctx context.Context, err error) {
			assert.Equal(t, ErrTimeout, err, "onError err should be ErrTimeout")
			assert.Equal(t, context.DeadlineExceeded, ctx.Err(), "Context should timeout")
		}
		testHandler := onErrorTestHandler{newTestHandler(t), onError}
		ch.Register(raw.Wrap(testHandler), "block")

		ctx, cancel := NewContext(testutils.Timeout(15 * time.Millisecond))
		defer cancel()

		_, _, _, err := raw.Call(ctx, ch, hostPort, testServiceName, "block", []byte("Arg2"), []byte("Arg3"))
		assert.Equal(t, ErrTimeout, err)

		// Verify the server-side receives an error from the context.
		assert.Equal(t, context.DeadlineExceeded, <-testHandler.blockErr)
	})
	goroutines.VerifyNoLeaks(t, nil)
}
Пример #10
0
// TestCloseSendError tests that system errors are not attempted to be sent when
// a connection is closed, and ensures there's no race conditions such as the error
// frame being added to the channel just as it is closed.
// TODO(prashant): This test is waiting for timeout, but socket close shouldn't wait for timeout.
func TestCloseSendError(t *testing.T) {
	closed := uint32(0)
	counter := uint32(0)

	serverCh := testutils.NewServer(t, nil)
	testutils.RegisterEcho(serverCh, func() {
		if atomic.AddUint32(&counter, 1) > 10 {
			// Close the server in a goroutine to possibly trigger more race conditions.
			go func() {
				atomic.AddUint32(&closed, 1)
				serverCh.Close()
			}()
		}
	})

	clientCh := testutils.NewClient(t, nil)

	// Create a connection that will be shared.
	require.NoError(t, testutils.Ping(clientCh, serverCh), "Ping from client to server failed")

	var wg sync.WaitGroup
	for i := 0; i < 100; i++ {
		wg.Add(1)
		go func() {
			time.Sleep(time.Duration(rand.Intn(1000)) * time.Microsecond)
			err := testutils.CallEcho(clientCh, serverCh, nil)
			if err != nil && atomic.LoadUint32(&closed) == 0 {
				t.Errorf("Call failed: %v", err)
			}
			wg.Done()
		}()
	}

	// Wait for all the goroutines to end
	wg.Wait()

	clientCh.Close()
	goroutines.VerifyNoLeaks(t, nil)
}
Пример #11
0
func TestWriteArg3AfterTimeout(t *testing.T) {
	// The channel reads and writes during timeouts, causing warning logs.
	opts := testutils.NewOpts().DisableLogVerification()
	WithVerifiedServer(t, opts, func(ch *Channel, hostPort string) {
		timedOut := make(chan struct{})

		handler := func(ctx context.Context, call *InboundCall) {
			_, err := raw.ReadArgs(call)
			assert.NoError(t, err, "Read args failed")
			response := call.Response()
			assert.NoError(t, NewArgWriter(response.Arg2Writer()).Write(nil), "Write Arg2 failed")
			writer, err := response.Arg3Writer()
			assert.NoError(t, err, "Arg3Writer failed")

			for {
				if _, err := writer.Write(testutils.RandBytes(4096)); err != nil {
					assert.Equal(t, err, ErrTimeout, "Handler should timeout")
					close(timedOut)
					return
				}
				runtime.Gosched()
			}
		}
		ch.Register(HandlerFunc(handler), "call")

		ctx, cancel := NewContext(testutils.Timeout(20 * time.Millisecond))
		defer cancel()
		_, _, _, err := raw.Call(ctx, ch, hostPort, testServiceName, "call", nil, nil)
		assert.Equal(t, err, ErrTimeout, "Call should timeout")

		// Wait for the write to complete, make sure there's no errors.
		select {
		case <-time.After(testutils.Timeout(30 * time.Millisecond)):
			t.Errorf("Handler should have failed due to timeout")
		case <-timedOut:
		}
	})
	goroutines.VerifyNoLeaks(t, nil)
}
Пример #12
0
func TestCloseAfterTimeout(t *testing.T) {
	WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) {
		testHandler := onErrorTestHandler{newTestHandler(t), func(_ context.Context, err error) {}}
		ch.Register(raw.Wrap(testHandler), "block")

		ctx, cancel := NewContext(100 * time.Millisecond)
		defer cancel()

		// Make a call, wait for it to timeout.
		clientCh := testutils.NewClient(t, nil)
		peerInfo := ch.PeerInfo()
		_, _, _, err := raw.Call(ctx, clientCh, peerInfo.HostPort, peerInfo.ServiceName, "block", nil, nil)
		require.Error(t, err, "Expected call to timeout")

		// The client channel should also close immediately.
		clientCh.Close()
		assertStateChangesTo(t, clientCh, ChannelClosed)
		assert.True(t, clientCh.Closed(), "Channel should be closed")

		// Unblock the testHandler so that a goroutine isn't leaked.
		<-testHandler.blockErr
	})
	goroutines.VerifyNoLeaks(t, nil)
}
Пример #13
0
// TestNoGoroutinesLeaked verifies that no tests have leaked goroutines.
func TestNoGoroutinesLeaked(t *testing.T) {
	goroutines.VerifyNoLeaks(t, nil)
}
Пример #14
0
func TestFramesReleased(t *testing.T) {
	CheckStress(t)

	defer testutils.SetTimeout(t, 10*time.Second)()
	const (
		requestsPerGoroutine = 10
		numGoroutines        = 10
	)

	var serverExchanges, clientExchanges string
	pool := NewRecordingFramePool()
	opts := testutils.NewOpts().
		SetServiceName("swap-server").
		SetFramePool(pool).
		AddLogFilter("Couldn't find handler.", numGoroutines*requestsPerGoroutine)
	WithVerifiedServer(t, opts, func(serverCh *Channel, hostPort string) {
		serverCh.Register(raw.Wrap(&swapper{t}), "swap")

		clientOpts := testutils.NewOpts().SetFramePool(pool)
		clientCh := testutils.NewClient(t, clientOpts)
		defer clientCh.Close()

		// Create an active connection that can be shared by the goroutines by calling Ping.
		ctx, cancel := NewContext(time.Second)
		defer cancel()
		require.NoError(t, clientCh.Ping(ctx, hostPort))

		var wg sync.WaitGroup
		for i := 0; i < numGoroutines; i++ {
			wg.Add(1)
			go func() {
				defer wg.Done()

				for i := 0; i < requestsPerGoroutine; i++ {
					doPingAndCall(t, clientCh, hostPort)
					doErrorCall(t, clientCh, hostPort)
				}
			}()
		}

		wg.Wait()

		serverExchanges = CheckEmptyExchanges(serverCh)
		clientExchanges = CheckEmptyExchanges(clientCh)
	})

	// Since the test is still running, the timeout goroutine will be running and can be ignored.
	goroutines.VerifyNoLeaks(t, &goroutines.VerifyOpts{
		Exclude: "testutils.SetTimeout",
	})

	if unreleasedCount, isEmpty := pool.CheckEmpty(); isEmpty != "" || unreleasedCount > 0 {
		t.Errorf("Frame pool has %v unreleased frames, errors:\n%v", unreleasedCount, isEmpty)
	}

	// Check the message exchanges and make sure they are all empty.
	if serverExchanges != "" {
		t.Errorf("Found uncleared message exchanges on server:\n%s", serverExchanges)
	}
	if clientExchanges != "" {
		t.Errorf("Found uncleared message exchanges on client:\n%s", clientExchanges)
	}
}