func assertStateChangesTo(t *testing.T, ch *Channel, state ChannelState) { var lastState ChannelState require.True(t, testutils.WaitFor(time.Second, func() bool { lastState = ch.State() return lastState == state }), "Channel state is %v expected %v", lastState, state) }
func TestIntrospectNumConnections(t *testing.T) { // Disable the relay, since the relay does not maintain a 1:1 mapping betewen // incoming connections vs outgoing connections. opts := testutils.NewOpts().NoRelay() testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) { ctx, cancel := NewContext(time.Second) defer cancel() assert.Equal(t, 0, ts.Server().IntrospectNumConnections(), "Expected no connection on new server") for i := 0; i < 10; i++ { client := ts.NewClient(nil) defer client.Close() require.NoError(t, client.Ping(ctx, ts.HostPort()), "Ping from new client failed") assert.Equal(t, 1, client.IntrospectNumConnections(), "Client should have single connection") assert.Equal(t, i+1, ts.Server().IntrospectNumConnections(), "Incorrect number of server connections") } // Make sure that a closed connection will reduce NumConnections. client := ts.NewClient(nil) require.NoError(t, client.Ping(ctx, ts.HostPort()), "Ping from new client failed") assert.Equal(t, 11, ts.Server().IntrospectNumConnections(), "Number of connections expected to increase") client.Close() require.True(t, testutils.WaitFor(100*time.Millisecond, func() bool { return ts.Server().IntrospectNumConnections() == 10 }), "Closed connection did not get removed, num connections is %v", ts.Server().IntrospectNumConnections()) }) }
func closeAndVerify(b *testing.B, ch *Channel) { ch.Close() isChanClosed := func() bool { return ch.State() == ChannelClosed } if !testutils.WaitFor(time.Second, isChanClosed) { b.Errorf("Timed out waiting for channel to close, state: %v", ch.State()) } }
func TestReuseConnection(t *testing.T) { ctx, cancel := NewContext(time.Second) defer cancel() // Since we're specifically testing that connections between hosts are re-used, // we can't interpose a relay in this test. s1Opts := testutils.NewOpts().SetServiceName("s1").NoRelay() testutils.WithTestServer(t, s1Opts, func(ts *testutils.TestServer) { ch2 := ts.NewServer(&testutils.ChannelOpts{ServiceName: "s2"}) hostPort2 := ch2.PeerInfo().HostPort defer ch2.Close() ts.Register(raw.Wrap(newTestHandler(t)), "echo") ch2.Register(raw.Wrap(newTestHandler(t)), "echo") outbound, err := ts.Server().BeginCall(ctx, hostPort2, "s2", "echo", nil) require.NoError(t, err) outboundConn, outboundNetConn := OutboundConnection(outbound) // Try to make another call at the same time, should reuse the same connection. outbound2, err := ts.Server().BeginCall(ctx, hostPort2, "s2", "echo", nil) require.NoError(t, err) outbound2Conn, _ := OutboundConnection(outbound) assert.Equal(t, outboundConn, outbound2Conn) // Wait for the connection to be marked as active in ch2. assert.True(t, testutils.WaitFor(time.Second, func() bool { return ch2.IntrospectState(nil).NumConnections > 0 }), "ch2 does not have any active connections") // When ch2 tries to call the test server, it should reuse the existing // inbound connection the test server. Of course, this only works if the // test server -> ch2 call wasn't relayed. outbound3, err := ch2.BeginCall(ctx, ts.HostPort(), "s1", "echo", nil) require.NoError(t, err) _, outbound3NetConn := OutboundConnection(outbound3) assert.Equal(t, outboundNetConn.RemoteAddr(), outbound3NetConn.LocalAddr()) assert.Equal(t, outboundNetConn.LocalAddr(), outbound3NetConn.RemoteAddr()) // Ensure all calls can complete in parallel. var wg sync.WaitGroup for _, call := range []*OutboundCall{outbound, outbound2, outbound3} { wg.Add(1) go func(call *OutboundCall) { defer wg.Done() resp1, resp2, _, err := raw.WriteArgs(call, []byte("arg2"), []byte("arg3")) require.NoError(t, err) assert.Equal(t, resp1, []byte("arg2"), "result does match argument") assert.Equal(t, resp2, []byte("arg3"), "result does match argument") }(call) } wg.Wait() }) }
func TestPeerScoreOnNewConnection(t *testing.T) { tests := []struct { message string connect func(s1, s2 *Channel) *Peer }{ { message: "outbound connection", connect: func(s1, s2 *Channel) *Peer { return s1.Peers().GetOrAdd(s2.PeerInfo().HostPort) }, }, { message: "inbound connection", connect: func(s1, s2 *Channel) *Peer { return s2.Peers().GetOrAdd(s1.PeerInfo().HostPort) }, }, } getScore := func(pl *PeerList) uint64 { peers := pl.IntrospectList(nil) require.Equal(t, 1, len(peers), "Wrong number of peers") return peers[0].Score } for _, tt := range tests { testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) { ctx, cancel := NewContext(time.Second) defer cancel() s1 := ts.Server() s2 := ts.NewServer(nil) s1.Peers().Add(s2.PeerInfo().HostPort) s2.Peers().Add(s1.PeerInfo().HostPort) initialScore := getScore(s1.Peers()) peer := tt.connect(s1, s2) conn, err := peer.GetConnection(ctx) require.NoError(t, err, "%v: GetConnection failed", tt.message) // When receiving an inbound connection, the outbound connect may return // before the inbound has updated the score, so we may need to retry. assert.True(t, testutils.WaitFor(time.Second, func() bool { connectedScore := getScore(s1.Peers()) return connectedScore < initialScore }), "%v: Expected connected peer score %v to be less than initial score %v", tt.message, getScore(s1.Peers()), initialScore) // Ping to ensure the connection has been added to peers on both sides. require.NoError(t, conn.Ping(ctx), "%v: Ping failed", tt.message) }) } }
func TestReuseConnection(t *testing.T) { ctx, cancel := NewContext(time.Second) defer cancel() s1Opts := &testutils.ChannelOpts{ServiceName: "s1"} WithVerifiedServer(t, s1Opts, func(ch1 *Channel, hostPort1 string) { s2Opts := &testutils.ChannelOpts{ServiceName: "s2"} WithVerifiedServer(t, s2Opts, func(ch2 *Channel, hostPort2 string) { ch1.Register(raw.Wrap(newTestHandler(t)), "echo") ch2.Register(raw.Wrap(newTestHandler(t)), "echo") // We need the servers to have their peers set before making outgoing calls // for the outgoing calls to contain the correct peerInfo. require.True(t, testutils.WaitFor(time.Second, func() bool { return !ch1.PeerInfo().IsEphemeral() && !ch2.PeerInfo().IsEphemeral() })) outbound, err := ch1.BeginCall(ctx, hostPort2, "s2", "echo", nil) require.NoError(t, err) outboundConn, outboundNetConn := OutboundConnection(outbound) // Try to make another call at the same time, should reuse the same connection. outbound2, err := ch1.BeginCall(ctx, hostPort2, "s2", "echo", nil) require.NoError(t, err) outbound2Conn, _ := OutboundConnection(outbound) assert.Equal(t, outboundConn, outbound2Conn) // When ch2 tries to call ch1, it should reuse the inbound connection from ch1. outbound3, err := ch2.BeginCall(ctx, hostPort1, "s1", "echo", nil) require.NoError(t, err) _, outbound3NetConn := OutboundConnection(outbound3) assert.Equal(t, outboundNetConn.RemoteAddr(), outbound3NetConn.LocalAddr()) assert.Equal(t, outboundNetConn.LocalAddr(), outbound3NetConn.RemoteAddr()) // Ensure all calls can complete in parallel. var wg sync.WaitGroup for _, call := range []*OutboundCall{outbound, outbound2, outbound3} { wg.Add(1) go func(call *OutboundCall) { defer wg.Done() resp1, resp2, _, err := raw.WriteArgs(call, []byte("arg2"), []byte("arg3")) require.NoError(t, err) assert.Equal(t, resp1, []byte("arg2"), "result does match argument") assert.Equal(t, resp2, []byte("arg3"), "result does match argument") }(call) } wg.Wait() }) }) }
func TestMockIgnoresDown(t *testing.T) { mockHB, err := mockhyperbahn.New() require.NoError(t, err, "Failed to set up mock hyperbahm") var ( moe1Called atomic.Bool moe2Called atomic.Bool ) moe1 := newAdvertisedEchoServer(t, "moe", mockHB, func() { moe1Called.Store(true) }) defer moe1.Close() moe2 := newAdvertisedEchoServer(t, "moe", mockHB, func() { moe2Called.Store(true) }) defer moe2.Close() client := newAdvertisedEchoServer(t, "client", mockHB, nil) ctx, cancel := tchannel.NewContext(time.Second) defer cancel() for i := 0; i < 20; i++ { _, _, _, err = raw.CallSC(ctx, client.GetSubChannel("moe"), "echo", nil, nil) assert.NoError(t, err, "Call failed") } require.True(t, moe1Called.Load(), "moe1 not called") require.True(t, moe2Called.Load(), "moe2 not called") // If moe2 is brought down, all calls should now be sent to moe1. moe2.Close() // Wait for the mock HB to have 0 connections to moe ok := testutils.WaitFor(time.Second, func() bool { in, out := mockHB.Channel().Peers().GetOrAdd(moe2.PeerInfo().HostPort).NumConnections() return in+out == 0 }) require.True(t, ok, "Failed waiting for mock HB to have 0 connections") // Make sure that all calls succeed (they should all go to moe2) moe1Called.Store(false) moe2Called.Store(false) for i := 0; i < 20; i++ { _, _, _, err = raw.CallSC(ctx, client.GetSubChannel("moe"), "echo", nil, nil) assert.NoError(t, err, "Call failed") } require.True(t, moe1Called.Load(), "moe1 not called") require.False(t, moe2Called.Load(), "moe2 should not be called after Close") }
// Trigger a race between receiving a new call and a connection closing // by closing the relay while a lot of background calls are being made. func TestRaceCloseWithNewCall(t *testing.T) { opts := serviceNameOpts("s1").SetRelayOnly().DisableLogVerification() testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) { s1 := ts.Server() s2 := ts.NewServer(serviceNameOpts("s2").DisableLogVerification()) testutils.RegisterEcho(s1, nil) // signal to start closing the relay. var ( closeRelay sync.WaitGroup stopCalling atomic.Int32 callers sync.WaitGroup ) for i := 0; i < 5; i++ { callers.Add(1) closeRelay.Add(1) go func() { defer callers.Done() calls := 0 for stopCalling.Load() == 0 { testutils.CallEcho(s2, ts.HostPort(), "s1", nil) calls++ if calls == 5 { closeRelay.Done() } } }() } closeRelay.Wait() // Close the relay, wait for it to close. ts.Relay().Close() closed := testutils.WaitFor(time.Second, func() bool { return ts.Relay().State() == ChannelClosed }) assert.True(t, closed, "Relay did not close within timeout") // Now stop all calls, and wait for the calling goroutine to end. stopCalling.Inc() callers.Wait() }) }