func TestReuseConnection(t *testing.T) { ctx, cancel := NewContext(time.Second) defer cancel() // Since we're specifically testing that connections between hosts are re-used, // we can't interpose a relay in this test. s1Opts := testutils.NewOpts().SetServiceName("s1").NoRelay() testutils.WithTestServer(t, s1Opts, func(ts *testutils.TestServer) { ch2 := ts.NewServer(&testutils.ChannelOpts{ServiceName: "s2"}) hostPort2 := ch2.PeerInfo().HostPort defer ch2.Close() ts.Register(raw.Wrap(newTestHandler(t)), "echo") ch2.Register(raw.Wrap(newTestHandler(t)), "echo") outbound, err := ts.Server().BeginCall(ctx, hostPort2, "s2", "echo", nil) require.NoError(t, err) outboundConn, outboundNetConn := OutboundConnection(outbound) // Try to make another call at the same time, should reuse the same connection. outbound2, err := ts.Server().BeginCall(ctx, hostPort2, "s2", "echo", nil) require.NoError(t, err) outbound2Conn, _ := OutboundConnection(outbound) assert.Equal(t, outboundConn, outbound2Conn) // Wait for the connection to be marked as active in ch2. assert.True(t, testutils.WaitFor(time.Second, func() bool { return ch2.IntrospectState(nil).NumConnections > 0 }), "ch2 does not have any active connections") // When ch2 tries to call the test server, it should reuse the existing // inbound connection the test server. Of course, this only works if the // test server -> ch2 call wasn't relayed. outbound3, err := ch2.BeginCall(ctx, ts.HostPort(), "s1", "echo", nil) require.NoError(t, err) _, outbound3NetConn := OutboundConnection(outbound3) assert.Equal(t, outboundNetConn.RemoteAddr(), outbound3NetConn.LocalAddr()) assert.Equal(t, outboundNetConn.LocalAddr(), outbound3NetConn.RemoteAddr()) // Ensure all calls can complete in parallel. var wg sync.WaitGroup for _, call := range []*OutboundCall{outbound, outbound2, outbound3} { wg.Add(1) go func(call *OutboundCall) { defer wg.Done() resp1, resp2, _, err := raw.WriteArgs(call, []byte("arg2"), []byte("arg3")) require.NoError(t, err) assert.Equal(t, resp1, []byte("arg2"), "result does match argument") assert.Equal(t, resp2, []byte("arg3"), "result does match argument") }(call) } wg.Wait() }) }
// Register the different endpoints of the test subject func register(ch *tchannel.Channel) { ch.Register(raw.Wrap(echoRawHandler{}), "echo/raw") ch.Register(raw.Wrap(handlerTimeoutRawHandler{}), "handlertimeout/raw") json.Register(ch, json.Handlers{"echo": echoJSONHandler}, onError) tserver := thrift.NewServer(ch) tserver.Register(echo.NewTChanEchoServer(&echoThriftHandler{})) tserver.Register(gauntlet_tchannel.NewTChanThriftTestServer(&thriftTestHandler{})) tserver.Register(gauntlet_tchannel.NewTChanSecondServiceServer(&secondServiceHandler{})) }
func TestReuseConnection(t *testing.T) { ctx, cancel := NewContext(time.Second) defer cancel() s1Opts := &testutils.ChannelOpts{ServiceName: "s1"} WithVerifiedServer(t, s1Opts, func(ch1 *Channel, hostPort1 string) { s2Opts := &testutils.ChannelOpts{ServiceName: "s2"} WithVerifiedServer(t, s2Opts, func(ch2 *Channel, hostPort2 string) { ch1.Register(raw.Wrap(newTestHandler(t)), "echo") ch2.Register(raw.Wrap(newTestHandler(t)), "echo") // We need the servers to have their peers set before making outgoing calls // for the outgoing calls to contain the correct peerInfo. require.True(t, testutils.WaitFor(time.Second, func() bool { return !ch1.PeerInfo().IsEphemeral() && !ch2.PeerInfo().IsEphemeral() })) outbound, err := ch1.BeginCall(ctx, hostPort2, "s2", "echo", nil) require.NoError(t, err) outboundConn, outboundNetConn := OutboundConnection(outbound) // Try to make another call at the same time, should reuse the same connection. outbound2, err := ch1.BeginCall(ctx, hostPort2, "s2", "echo", nil) require.NoError(t, err) outbound2Conn, _ := OutboundConnection(outbound) assert.Equal(t, outboundConn, outbound2Conn) // When ch2 tries to call ch1, it should reuse the inbound connection from ch1. outbound3, err := ch2.BeginCall(ctx, hostPort1, "s1", "echo", nil) require.NoError(t, err) _, outbound3NetConn := OutboundConnection(outbound3) assert.Equal(t, outboundNetConn.RemoteAddr(), outbound3NetConn.LocalAddr()) assert.Equal(t, outboundNetConn.LocalAddr(), outbound3NetConn.RemoteAddr()) // Ensure all calls can complete in parallel. var wg sync.WaitGroup for _, call := range []*OutboundCall{outbound, outbound2, outbound3} { wg.Add(1) go func(call *OutboundCall) { defer wg.Done() resp1, resp2, _, err := raw.WriteArgs(call, []byte("arg2"), []byte("arg3")) require.NoError(t, err) assert.Equal(t, resp1, []byte("arg2"), "result does match argument") assert.Equal(t, resp2, []byte("arg3"), "result does match argument") }(call) } wg.Wait() }) }) }
func TestRoundTrip(t *testing.T) { testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) { handler := newTestHandler(t) ts.Register(raw.Wrap(handler), "echo") ctx, cancel := NewContext(time.Second) defer cancel() call, err := ts.Server().BeginCall(ctx, ts.HostPort(), ts.ServiceName(), "echo", &CallOptions{Format: JSON}) require.NoError(t, err) assert.NotEmpty(t, call.RemotePeer().HostPort) require.NoError(t, NewArgWriter(call.Arg2Writer()).Write(testArg2)) require.NoError(t, NewArgWriter(call.Arg3Writer()).Write(testArg3)) var respArg2 []byte require.NoError(t, NewArgReader(call.Response().Arg2Reader()).Read(&respArg2)) assert.Equal(t, testArg2, []byte(respArg2)) var respArg3 []byte require.NoError(t, NewArgReader(call.Response().Arg3Reader()).Read(&respArg3)) assert.Equal(t, testArg3, []byte(respArg3)) assert.Equal(t, JSON, handler.format) assert.Equal(t, ts.ServiceName(), handler.caller) assert.Equal(t, JSON, call.Response().Format(), "response Format should match request Format") }) }
func Benchmark_TChannel_YARPCToTChannel(b *testing.B) { serverCh, err := tchannel.NewChannel("server", nil) require.NoError(b, err, "failed to build server TChannel") defer serverCh.Close() serverCh.Register(traw.Wrap(tchannelEcho{t: b}), "echo") require.NoError(b, serverCh.ListenAndServe(":0"), "failed to start up TChannel") clientCh, err := tchannel.NewChannel("client", nil) require.NoError(b, err, "failed to build client TChannel") clientCfg := yarpc.Config{ Name: "client", Outbounds: yarpc.Outbounds{ "server": { Unary: ytchannel.NewOutbound(clientCh, ytchannel.HostPort(serverCh.PeerInfo().HostPort)), }, }, } withDispatcher(b, clientCfg, func(client yarpc.Dispatcher) { b.ResetTimer() runYARPCClient(b, raw.New(client.Channel("server"))) }) }
func TestRequestStateRetry(t *testing.T) { ctx, cancel := NewContext(time.Second) defer cancel() server := testutils.NewServer(t, nil) defer server.Close() server.Register(raw.Wrap(newTestHandler(t)), "echo") client := testutils.NewClient(t, nil) defer client.Close() counter := 0 sc := client.GetSubChannel(server.PeerInfo().ServiceName) err := client.RunWithRetry(ctx, func(ctx context.Context, rs *RequestState) error { defer func() { counter++ }() assert.Equal(t, counter, len(rs.SelectedPeers), "SelectedPeers should not be reused") if counter < 4 { client.Peers().Add(testutils.GetClosedHostPort(t)) } else { client.Peers().Add(server.PeerInfo().HostPort) } _, err := raw.CallV2(ctx, sc, raw.CArgs{ Operation: "echo", CallOptions: &CallOptions{RequestState: rs}, }) return err }) assert.NoError(t, err, "RunWithRetry should succeed") assert.Equal(t, 5, counter, "RunWithRetry should retry 5 times") }
func TestCloseAfterTimeout(t *testing.T) { // Disable log verfication since connections are closed after a timeout // and the relay might still be reading/writing to the connection. // TODO: Ideally, we only disable log verification on the relay. opts := testutils.NewOpts().DisableLogVerification() testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) { testHandler := onErrorTestHandler{newTestHandler(t), func(_ context.Context, err error) {}} ts.Register(raw.Wrap(testHandler), "block") ctx, cancel := NewContext(100 * time.Millisecond) defer cancel() // Make a call, wait for it to timeout. clientCh := ts.NewClient(nil) _, _, _, err := raw.Call(ctx, clientCh, ts.HostPort(), ts.ServiceName(), "block", nil, nil) require.Equal(t, ErrTimeout, err, "Expected call to timeout") // The client channel should also close immediately. clientCh.Close() assertStateChangesTo(t, clientCh, ChannelClosed) assert.True(t, clientCh.Closed(), "Channel should be closed") // Unblock the testHandler so that a goroutine isn't leaked. <-testHandler.blockErr }) }
// NewServer returns a new Server that can recieve Thrift calls or raw calls. func NewServer(optFns ...Option) Server { opts := getOptions(optFns) if opts.external { return newExternalServer(opts) } ch, err := tchannel.NewChannel(opts.svcName, &tchannel.ChannelOptions{ Logger: tchannel.NewLevelLogger(tchannel.NewLogger(os.Stderr), tchannel.LogLevelWarn), }) if err != nil { panic("failed to create channel: " + err.Error()) } if err := ch.ListenAndServe("127.0.0.1:0"); err != nil { panic("failed to listen on port 0: " + err.Error()) } s := &internalServer{ ch: ch, opts: opts, } tServer := thrift.NewServer(ch) tServer.Register(gen.NewTChanSecondServiceServer(handler{calls: &s.thriftCalls})) ch.Register(raw.Wrap(rawHandler{calls: &s.rawCalls}), "echo") if len(opts.advertiseHosts) > 0 { if err := s.Advertise(opts.advertiseHosts); err != nil { panic("failed to advertise: " + err.Error()) } } return s }
func (pt *peerSelectionTest) setupClient(t testing.TB) { pt.client, _ = pt.NewService(t, "client", "client") pt.client.Register(raw.Wrap(newTestHandler(pt.t)), "echo") for _, server := range pt.servers { pt.client.Peers().Add(server.PeerInfo().HostPort) } }
func TestDirtyFrameRequests(t *testing.T) { argSizes := []int{50000, 100000, 150000} WithVerifiedServer(t, &testutils.ChannelOpts{ ServiceName: "swap-server", DefaultConnectionOptions: ConnectionOptions{ FramePool: dirtyFramePool{}, }, }, func(serverCh *Channel, hostPort string) { peerInfo := serverCh.PeerInfo() serverCh.Register(raw.Wrap(&swapper{t}), "swap") for _, arg2Size := range argSizes { for _, arg3Size := range argSizes { ctx, cancel := NewContext(time.Second) defer cancel() arg2, arg3 := testutils.RandBytes(arg2Size), testutils.RandBytes(arg3Size) res2, res3, _, err := raw.Call(ctx, serverCh, hostPort, peerInfo.ServiceName, "swap", arg2, arg3) if assert.NoError(t, err, "Call failed") { assert.Equal(t, arg2, res3, "Result arg3 wrong") assert.Equal(t, arg3, res2, "Result arg3 wrong") } } } }) }
func TestCloseAfterTimeout(t *testing.T) { WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) { testHandler := onErrorTestHandler{newTestHandler(t), func(_ context.Context, err error) {}} ch.Register(raw.Wrap(testHandler), "block") ctx, cancel := NewContext(10 * time.Millisecond) defer cancel() // Make a call, wait for it to timeout. clientCh, err := testutils.NewClient(nil) require.NoError(t, err, "NewClient failed") peerInfo := ch.PeerInfo() _, _, _, err = raw.Call(ctx, clientCh, peerInfo.HostPort, peerInfo.ServiceName, "block", nil, nil) require.Error(t, err, "Expected call to timeout") // The client channel should also close immediately. clientCh.Close() runtime.Gosched() assert.Equal(t, ChannelClosed, clientCh.State()) assert.True(t, clientCh.Closed(), "Channel should be closed") // Unblock the testHandler so that a goroutine isn't leaked. <-testHandler.blockErr }) VerifyNoBlockedGoroutines(t) }
func TestTimeout(t *testing.T) { testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) { // onError may be called when the block call tries to write the call response. onError := func(ctx context.Context, err error) { assert.Equal(t, ErrTimeout, err, "onError err should be ErrTimeout") assert.Equal(t, context.DeadlineExceeded, ctx.Err(), "Context should timeout") } testHandler := onErrorTestHandler{newTestHandler(t), onError} ts.Register(raw.Wrap(testHandler), "block") ctx, cancel := NewContext(testutils.Timeout(15 * time.Millisecond)) defer cancel() _, _, _, err := raw.Call(ctx, ts.Server(), ts.HostPort(), ts.ServiceName(), "block", []byte("Arg2"), []byte("Arg3")) assert.Equal(t, ErrTimeout, err) // Verify the server-side receives an error from the context. select { case err := <-testHandler.blockErr: assert.Equal(t, context.DeadlineExceeded, err, "Server should have received timeout") case <-time.After(time.Second): t.Errorf("Server did not receive call, may need higher timeout") } calls := relaytest.NewMockStats() calls.Add(ts.ServiceName(), ts.ServiceName(), "block").Failed("timeout").End() ts.AssertRelayStats(calls) }) }
func TestFragmentation(t *testing.T) { testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) { ts.Register(raw.Wrap(newTestHandler(t)), "echo") arg2 := make([]byte, MaxFramePayloadSize*2) for i := 0; i < len(arg2); i++ { arg2[i] = byte('a' + (i % 10)) } arg3 := make([]byte, MaxFramePayloadSize*3) for i := 0; i < len(arg3); i++ { arg3[i] = byte('A' + (i % 10)) } ctx, cancel := NewContext(time.Second) defer cancel() respArg2, respArg3, _, err := raw.Call(ctx, ts.Server(), ts.HostPort(), ts.ServiceName(), "echo", arg2, arg3) require.NoError(t, err) assert.Equal(t, arg2, respArg2) assert.Equal(t, arg3, respArg3) calls := relaytest.NewMockStats() calls.Add(ts.ServiceName(), ts.ServiceName(), "echo").Succeeded().End() ts.AssertRelayStats(calls) }) }
func TestRoundTrip(t *testing.T) { WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) { handler := newTestHandler(t) ch.Register(raw.Wrap(handler), "echo") ctx, cancel := NewContext(time.Second) defer cancel() call, err := ch.BeginCall(ctx, hostPort, testServiceName, "echo", &CallOptions{Format: JSON}) require.NoError(t, err) require.NoError(t, NewArgWriter(call.Arg2Writer()).Write(testArg2)) require.NoError(t, NewArgWriter(call.Arg3Writer()).Write(testArg3)) var respArg2 []byte require.NoError(t, NewArgReader(call.Response().Arg2Reader()).Read(&respArg2)) assert.Equal(t, testArg2, []byte(respArg2)) var respArg3 []byte require.NoError(t, NewArgReader(call.Response().Arg3Reader()).Read(&respArg3)) assert.Equal(t, testArg3, []byte(respArg3)) assert.Equal(t, JSON, handler.format) assert.Equal(t, testServiceName, handler.caller) assert.Equal(t, JSON, call.Response().Format(), "response Format should match request Format") }) }
func TestDirtyFrameRequests(t *testing.T) { argSizes := []int{25000, 50000, 75000} // Create the largest required random cache. testutils.RandBytes(argSizes[len(argSizes)-1]) opts := testutils.NewOpts(). SetServiceName("swap-server"). SetFramePool(dirtyFramePool{}) WithVerifiedServer(t, opts, func(serverCh *Channel, hostPort string) { peerInfo := serverCh.PeerInfo() serverCh.Register(raw.Wrap(&swapper{t}), "swap") for _, argSize := range argSizes { ctx, cancel := NewContext(time.Second) defer cancel() arg2, arg3 := testutils.RandBytes(argSize), testutils.RandBytes(argSize) res2, res3, _, err := raw.Call(ctx, serverCh, hostPort, peerInfo.ServiceName, "swap", arg2, arg3) if assert.NoError(t, err, "Call failed") { assert.Equal(t, arg2, res3, "Result arg3 wrong") assert.Equal(t, arg3, res2, "Result arg3 wrong") } } }) }
func TestLargeRequest(t *testing.T) { CheckStress(t) const ( KB = 1024 MB = 1024 * KB GB = 1024 * MB maxRequestSize = 1 * GB ) WithVerifiedServer(t, nil, func(serverCh *Channel, hostPort string) { serverCh.Register(raw.Wrap(newTestHandler(t)), "echo") for reqSize := 2; reqSize <= maxRequestSize; reqSize *= 2 { log.Printf("reqSize = %v", reqSize) arg3 := testutils.RandBytes(reqSize) arg2 := testutils.RandBytes(reqSize / 2) clientCh := testutils.NewClient(t, nil) ctx, cancel := NewContext(time.Second * 30) rArg2, rArg3, _, err := raw.Call(ctx, clientCh, hostPort, serverCh.PeerInfo().ServiceName, "echo", arg2, arg3) require.NoError(t, err, "Call failed") if !bytes.Equal(arg2, rArg2) { t.Errorf("echo arg2 mismatch") } if !bytes.Equal(arg3, rArg3) { t.Errorf("echo arg3 mismatch") } cancel() } }) }
func TestStatsCalls(t *testing.T) { defer testutils.SetTimeout(t, time.Second)() initialTime := time.Date(2015, 2, 1, 10, 10, 0, 0, time.UTC) clientNow, clientNowFn := testutils.NowStub(initialTime) serverNow, serverNowFn := testutils.NowStub(initialTime) clientNowFn(100 * time.Millisecond) serverNowFn(50 * time.Millisecond) clientStats := newRecordingStatsReporter() serverStats := newRecordingStatsReporter() serverOpts := testutils.NewOpts(). SetStatsReporter(serverStats). SetTimeNow(serverNow) WithVerifiedServer(t, serverOpts, func(serverCh *Channel, hostPort string) { handler := raw.Wrap(newTestHandler(t)) serverCh.Register(handler, "echo") serverCh.Register(handler, "app-error") ch := testutils.NewClient(t, testutils.NewOpts(). SetStatsReporter(clientStats). SetTimeNow(clientNow)) defer ch.Close() ctx, cancel := NewContext(time.Second * 5) defer cancel() _, _, _, err := raw.Call(ctx, ch, hostPort, testServiceName, "echo", []byte("Headers"), []byte("Body")) require.NoError(t, err) outboundTags := tagsForOutboundCall(serverCh, ch, "echo") clientStats.Expected.IncCounter("outbound.calls.send", outboundTags, 1) clientStats.Expected.IncCounter("outbound.calls.success", outboundTags, 1) clientStats.Expected.RecordTimer("outbound.calls.per-attempt.latency", outboundTags, 100*time.Millisecond) clientStats.Expected.RecordTimer("outbound.calls.latency", outboundTags, 100*time.Millisecond) inboundTags := tagsForInboundCall(serverCh, ch, "echo") serverStats.Expected.IncCounter("inbound.calls.recvd", inboundTags, 1) serverStats.Expected.IncCounter("inbound.calls.success", inboundTags, 1) serverStats.Expected.RecordTimer("inbound.calls.latency", inboundTags, 50*time.Millisecond) _, _, resp, err := raw.Call(ctx, ch, hostPort, testServiceName, "app-error", nil, nil) require.NoError(t, err) require.True(t, resp.ApplicationError(), "expected application error") outboundTags = tagsForOutboundCall(serverCh, ch, "app-error") clientStats.Expected.IncCounter("outbound.calls.send", outboundTags, 1) clientStats.Expected.IncCounter("outbound.calls.per-attempt.app-errors", outboundTags, 1) clientStats.Expected.IncCounter("outbound.calls.app-errors", outboundTags, 1) clientStats.Expected.RecordTimer("outbound.calls.per-attempt.latency", outboundTags, 100*time.Millisecond) clientStats.Expected.RecordTimer("outbound.calls.latency", outboundTags, 100*time.Millisecond) inboundTags = tagsForInboundCall(serverCh, ch, "app-error") serverStats.Expected.IncCounter("inbound.calls.recvd", inboundTags, 1) serverStats.Expected.IncCounter("inbound.calls.app-errors", inboundTags, 1) serverStats.Expected.RecordTimer("inbound.calls.latency", inboundTags, 50*time.Millisecond) }) clientStats.Validate(t) serverStats.Validate(t) }
func setupServer(b *testing.B) (ch *Channel, svcName, svcHostPort string) { serverCh := testutils.NewServer(b, nil) handler := &benchmarkHandler{} serverCh.Register(raw.Wrap(handler), "echo") peerInfo := serverCh.PeerInfo() return serverCh, peerInfo.ServiceName, peerInfo.HostPort }
// setupServers will create numPeer servers, and register handlers on them. func (pt *peerSelectionTest) setupServers(t testing.TB) { pt.servers = make([]*Channel, pt.numPeers) // Set up numPeers servers. for i := 0; i < pt.numPeers; i++ { pt.servers[i], _ = pt.NewService(t, "server", fmt.Sprintf("server-%v", i)) pt.servers[i].Register(raw.Wrap(newTestHandler(pt.t)), "echo") } }
func TestNoTimeout(t *testing.T) { WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) { ch.Register(raw.Wrap(newTestHandler(t)), "Echo") ctx := context.Background() _, _, _, err := raw.Call(ctx, ch, hostPort, "svc", "Echo", []byte("Headers"), []byte("Body")) require.NotNil(t, err) assert.Equal(t, ErrTimeoutRequired, err) }) }
func main() { tchan, err := tchannel.NewChannel("go-echo-server", nil) if err != nil { log.Fatalf("Failed to create channel: %v", err) } listenIP, err := tchannel.ListenIP() if err != nil { log.Fatalf("Failed to get IP to listen on: %v", err) } l, err := net.Listen("tcp", listenIP.String()+":61543") if err != nil { log.Fatalf("Could not listen: %v", err) } log.Printf("Listening on %v", l.Addr()) sc := tchan.GetSubChannel("go-echo-2") tchan.Register(raw.Wrap(handler{""}), "echo") sc.Register(raw.Wrap(handler{"subchannel:"}), "echo") tchan.Serve(l) if len(os.Args[1:]) == 0 { log.Fatalf("You must provide Hyperbahn nodes as arguments") } // advertise service with Hyperbahn. config := hyperbahn.Configuration{InitialNodes: os.Args[1:]} client, err := hyperbahn.NewClient(tchan, config, &hyperbahn.ClientOptions{ Handler: eventHandler{}, Timeout: time.Second, }) if err != nil { log.Fatalf("hyperbahn.NewClient failed: %v", err) } if err := client.Advertise(sc); err != nil { log.Fatalf("Advertise failed: %v", err) } // Server will keep running till Ctrl-C. select {} }
func TestNoTimeout(t *testing.T) { testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) { ts.Register(raw.Wrap(newTestHandler(t)), "Echo") ctx := context.Background() _, _, _, err := raw.Call(ctx, ts.Server(), ts.HostPort(), "svc", "Echo", []byte("Headers"), []byte("Body")) assert.Equal(t, ErrTimeoutRequired, err) ts.AssertRelayStats(relaytest.NewMockStats()) }) }
func TestServerBusy(t *testing.T) { WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) { ch.Register(raw.Wrap(newTestHandler(t)), "busy") ctx, cancel := NewContext(time.Second) defer cancel() _, _, _, err := raw.Call(ctx, ch, hostPort, testServiceName, "busy", []byte("Arg2"), []byte("Arg3")) require.NotNil(t, err) assert.Equal(t, ErrCodeBusy, GetSystemErrorCode(err), "err: %v", err) }) }
func TestLargeTimeout(t *testing.T) { WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) { ch.Register(raw.Wrap(newTestHandler(t)), "echo") ctx, cancel := NewContext(1000 * time.Second) defer cancel() _, _, _, err := raw.Call(ctx, ch, hostPort, testServiceName, "echo", testArg2, testArg3) assert.NoError(t, err, "Call failed") }) goroutines.VerifyNoLeaks(t, nil) }
func TestLargeTimeout(t *testing.T) { testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) { ts.Register(raw.Wrap(newTestHandler(t)), "echo") ctx, cancel := NewContext(1000 * time.Second) defer cancel() _, _, _, err := raw.Call(ctx, ts.Server(), ts.HostPort(), ts.ServiceName(), "echo", testArg2, testArg3) assert.NoError(t, err, "Call failed") calls := relaytest.NewMockStats() calls.Add(ts.ServiceName(), ts.ServiceName(), "echo").Succeeded().End() ts.AssertRelayStats(calls) }) }
func Benchmark_TChannel_TChannelToTChannel(b *testing.B) { serverCh, err := tchannel.NewChannel("server", nil) require.NoError(b, err, "failed to build server TChannel") defer serverCh.Close() serverCh.Register(traw.Wrap(tchannelEcho{t: b}), "echo") require.NoError(b, serverCh.ListenAndServe(":0"), "failed to start up TChannel") clientCh, err := tchannel.NewChannel("client", nil) require.NoError(b, err, "failed to build client TChannel") defer clientCh.Close() b.ResetTimer() runTChannelClient(b, clientCh, serverCh.PeerInfo().HostPort) }
func TestRequestStateRetry(t *testing.T) { ctx, cancel := NewContext(time.Second) defer cancel() testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) { ts.Register(raw.Wrap(newTestHandler(t)), "echo") closedHostPorts := make([]string, 4) for i := range closedHostPorts { hostPort, close := testutils.GetAcceptCloseHostPort(t) defer close() closedHostPorts[i] = hostPort } // Since we close connections remotely, there will be some warnings that we can ignore. opts := testutils.NewOpts().DisableLogVerification() client := ts.NewClient(opts) defer client.Close() counter := 0 sc := client.GetSubChannel(ts.Server().ServiceName()) err := client.RunWithRetry(ctx, func(ctx context.Context, rs *RequestState) error { defer func() { counter++ }() expectedPeers := counter if expectedPeers > 0 { // An entry is also added for each host. expectedPeers++ } assert.Equal(t, expectedPeers, len(rs.SelectedPeers), "SelectedPeers should not be reused") if counter < 4 { client.Peers().Add(closedHostPorts[counter]) } else { client.Peers().Add(ts.HostPort()) } _, err := raw.CallV2(ctx, sc, raw.CArgs{ Method: "echo", CallOptions: &CallOptions{RequestState: rs}, }) return err }) assert.NoError(t, err, "RunWithRetry should succeed") assert.Equal(t, 5, counter, "RunWithRetry should retry 5 times") }) }
func TestCancelled(t *testing.T) { testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) { ts.Register(raw.Wrap(newTestHandler(t)), "echo") ctx, cancel := NewContext(time.Second) // Make a call first to make sure we have a connection. // We want to test the BeginCall path. _, _, _, err := raw.Call(ctx, ts.Server(), ts.HostPort(), ts.ServiceName(), "echo", []byte("Headers"), []byte("Body")) assert.NoError(t, err, "Call failed") // Now cancel the context. cancel() _, _, _, err = raw.Call(ctx, ts.Server(), ts.HostPort(), ts.ServiceName(), "echo", []byte("Headers"), []byte("Body")) assert.Equal(t, context.Canceled, err, "Unexpected error when making call with canceled context") }) }
func TestDefaultFormat(t *testing.T) { testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) { handler := newTestHandler(t) ts.Register(raw.Wrap(handler), "echo") ctx, cancel := NewContext(time.Second) defer cancel() arg2, arg3, resp, err := raw.Call(ctx, ts.Server(), ts.HostPort(), ts.ServiceName(), "echo", testArg2, testArg3) require.Nil(t, err) require.Equal(t, testArg2, arg2) require.Equal(t, testArg3, arg3) require.Equal(t, Raw, handler.format) assert.Equal(t, Raw, resp.Format(), "response Format should match request Format") }) }
func TestDefaultFormat(t *testing.T) { WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) { handler := newTestHandler(t) ch.Register(raw.Wrap(handler), "echo") ctx, cancel := NewContext(time.Second) defer cancel() arg2, arg3, resp, err := raw.Call(ctx, ch, hostPort, testServiceName, "echo", testArg2, testArg3) require.Nil(t, err) require.Equal(t, testArg2, arg2) require.Equal(t, testArg3, arg3) require.Equal(t, Raw, handler.format) assert.Equal(t, Raw, resp.Format(), "response Format should match request Format") }) }