func TestContextBuilder(t *testing.T) { ctx, cancel := tchannel.NewContextBuilder(time.Second).SetShardKey("shard").Build() defer cancel() var called bool testutils.WithServer(nil, func(ch *tchannel.Channel, hostPort string) { peerInfo := ch.PeerInfo() testutils.RegisterFunc(t, ch, "SecondService::Echo", func(ctx context.Context, args *raw.Args) (*raw.Res, error) { call := tchannel.CurrentCall(ctx) assert.Equal(t, peerInfo.ServiceName, call.CallerName(), "unexpected caller name") assert.Equal(t, "shard", call.ShardKey(), "unexpected shard key") assert.Equal(t, tchannel.Thrift, args.Format) called = true return nil, errors.New("err") }) client := NewClient(ch, ch.PeerInfo().ServiceName, &ClientOptions{ HostPort: peerInfo.HostPort, }) secondClient := gen.NewTChanSecondServiceClient(client) secondClient.Echo(ctx, "asd") assert.True(t, called, "test not called") }) }
// TestCloseSendError tests that system errors are not attempted to be sent when // a connection is closed, and ensures there's no race conditions such as the error // frame being added to the channel just as it is closed. // TODO(prashant): This test is waiting for timeout, but socket close shouldn't wait for timeout. func TestCloseSendError(t *testing.T) { ctx, cancel := NewContext(time.Second) defer cancel() serverCh, err := testutils.NewServer(nil) require.NoError(t, err, "NewServer failed") closed := uint32(0) counter := uint32(0) testutils.RegisterFunc(t, serverCh, "echo", func(ctx context.Context, args *raw.Args) (*raw.Res, error) { atomic.AddUint32(&counter, 1) return &raw.Res{Arg2: args.Arg2, Arg3: args.Arg3}, nil }) clientCh, err := testutils.NewClient(nil) require.NoError(t, err, "NewClient failed") // Make a call to create a connection that will be shared. peerInfo := serverCh.PeerInfo() _, _, _, err = raw.Call(ctx, clientCh, peerInfo.HostPort, peerInfo.ServiceName, "echo", nil, nil) require.NoError(t, err, "Call should succeed") var wg sync.WaitGroup for i := 0; i < 100; i++ { wg.Add(1) go func() { time.Sleep(time.Duration(rand.Intn(1000)) * time.Microsecond) _, _, _, err := raw.Call(ctx, clientCh, peerInfo.HostPort, peerInfo.ServiceName, "echo", nil, nil) if err != nil && atomic.LoadUint32(&closed) == 0 { t.Errorf("Call failed: %v", err) } wg.Done() }() } // Wait for the server to have processed some number of these calls. for { if atomic.LoadUint32(&counter) >= 10 { break } runtime.Gosched() } atomic.AddUint32(&closed, 1) serverCh.Close() // Wait for all the goroutines to end wg.Wait() clientCh.Close() VerifyNoBlockedGoroutines(t) }
func TestCloseOneSide(t *testing.T) { ctx, cancel := NewContext(time.Second) defer cancel() ch1, err := testutils.NewServer(&testutils.ChannelOpts{ServiceName: "client"}) ch2, err := testutils.NewServer(&testutils.ChannelOpts{ServiceName: "server"}) require.NoError(t, err, "NewServer 1 failed") require.NoError(t, err, "NewServer 2 failed") connected := make(chan struct{}) completed := make(chan struct{}) blockCall := make(chan struct{}) testutils.RegisterFunc(t, ch2, "echo", func(ctx context.Context, args *raw.Args) (*raw.Res, error) { connected <- struct{}{} <-blockCall return &raw.Res{ Arg2: args.Arg2, Arg3: args.Arg3, }, nil }) go func() { ch2Peer := ch2.PeerInfo() _, _, _, err := raw.Call(ctx, ch1, ch2Peer.HostPort, ch2Peer.ServiceName, "echo", nil, nil) assert.NoError(t, err, "Call failed") completed <- struct{}{} }() // Wait for connected before calling Close. <-connected ch1.Close() // Now unblock the call and wait for the call to complete. close(blockCall) <-completed // Once the call completes, the channel should be closed. runtime.Gosched() assert.Equal(t, ChannelClosed, ch1.State()) // We need to close all open TChannels before verifying blocked goroutines. ch2.Close() VerifyNoBlockedGoroutines(t) }
func TestCloseSingleChannel(t *testing.T) { ctx, cancel := NewContext(time.Second) defer cancel() ch, err := testutils.NewServer(nil) require.NoError(t, err, "NewServer failed") var connected sync.WaitGroup var completed sync.WaitGroup blockCall := make(chan struct{}) testutils.RegisterFunc(t, ch, "echo", func(ctx context.Context, args *raw.Args) (*raw.Res, error) { connected.Done() <-blockCall return &raw.Res{ Arg2: args.Arg2, Arg3: args.Arg3, }, nil }) for i := 0; i < 10; i++ { connected.Add(1) completed.Add(1) go func() { peerInfo := ch.PeerInfo() _, _, _, err := raw.Call(ctx, ch, peerInfo.HostPort, peerInfo.ServiceName, "echo", nil, nil) assert.NoError(t, err, "Call failed") completed.Done() }() } // Wait for all calls to connect before triggerring the Close (so they do not fail). connected.Wait() ch.Close() // Unblock the calls, and wait for all the calls to complete. close(blockCall) completed.Wait() // Once all calls are complete, the channel should be closed. runtime.Gosched() assert.Equal(t, ChannelClosed, ch.State()) VerifyNoBlockedGoroutines(t) }
func TestActiveCallReq(t *testing.T) { ctx, cancel := NewContext(time.Second) defer cancel() // Note: This test leaks a message exchange due to the modification of IDs in the relay. require.NoError(t, testutils.WithServer(nil, func(ch *Channel, hostPort string) { gotCall := make(chan struct{}) unblock := make(chan struct{}) testutils.RegisterFunc(t, ch, "blocked", func(ctx context.Context, args *raw.Args) (*raw.Res, error) { gotCall <- struct{}{} <-unblock return &raw.Res{}, nil }) relayFunc := func(outgoing bool, frame *Frame) *Frame { if outgoing && frame.Header.ID == 2 { frame.Header.ID = 3 } return frame } relayHostPort, closeRelay := testutils.FrameRelay(t, hostPort, relayFunc) defer closeRelay() go func() { // This call will block until we close unblock. raw.Call(ctx, ch, relayHostPort, ch.PeerInfo().ServiceName, "blocked", nil, nil) }() // Wait for the first call to be received by the server <-gotCall // Make a new call, which should fail _, _, _, err := raw.Call(ctx, ch, relayHostPort, ch.PeerInfo().ServiceName, "blocked", nil, nil) assert.Error(t, err, "Expect error") assert.True(t, strings.Contains(err.Error(), "already active"), "expected already active error, got %v", err) close(unblock) })) }
func TestShardKeyPropagates(t *testing.T) { WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) { peerInfo := ch.PeerInfo() testutils.RegisterFunc(t, ch, "test", func(ctx context.Context, args *raw.Args) (*raw.Res, error) { return &raw.Res{ Arg3: []byte(CurrentCall(ctx).ShardKey()), }, nil }) ctx, cancel := NewContextBuilder(time.Second).Build() defer cancel() _, arg3, _, err := raw.Call(ctx, ch, peerInfo.HostPort, peerInfo.ServiceName, "test", nil, nil) assert.NoError(t, err, "Call failed") assert.Equal(t, arg3, []byte("")) ctx, cancel = NewContextBuilder(time.Second). SetShardKey("shard").Build() defer cancel() _, arg3, _, err = raw.Call(ctx, ch, peerInfo.HostPort, peerInfo.ServiceName, "test", nil, nil) assert.NoError(t, err, "Call failed") assert.Equal(t, string(arg3), "shard") }) }
func TestCloseSemantics(t *testing.T) { defer testutils.SetTimeout(t, 2*time.Second)() ctx, cancel := NewContext(time.Second) defer cancel() makeServer := func(name string) (*Channel, chan struct{}) { ch, err := testutils.NewServer(&testutils.ChannelOpts{ServiceName: name}) require.NoError(t, err) c := make(chan struct{}) testutils.RegisterFunc(t, ch, "stream", func(ctx context.Context, args *raw.Args) (*raw.Res, error) { <-c return &raw.Res{}, nil }) testutils.RegisterFunc(t, ch, "call", func(ctx context.Context, args *raw.Args) (*raw.Res, error) { return &raw.Res{}, nil }) return ch, c } withNewClient := func(f func(ch *Channel)) { ch, err := testutils.NewClient(&testutils.ChannelOpts{ServiceName: "client"}) require.NoError(t, err) f(ch) ch.Close() } call := func(from *Channel, to *Channel) error { toPeer := to.PeerInfo() _, _, _, err := raw.Call(ctx, from, toPeer.HostPort, toPeer.ServiceName, "call", nil, nil) return err } callStream := func(from *Channel, to *Channel) <-chan struct{} { c := make(chan struct{}) toPeer := to.PeerInfo() call, err := from.BeginCall(ctx, toPeer.HostPort, toPeer.ServiceName, "stream", nil) require.NoError(t, err) require.NoError(t, NewArgWriter(call.Arg2Writer()).Write(nil), "write arg2") require.NoError(t, NewArgWriter(call.Arg3Writer()).Write(nil), "write arg3") go func() { var d []byte require.NoError(t, NewArgReader(call.Response().Arg2Reader()).Read(&d), "read arg2 from %v to %v", from.PeerInfo(), to.PeerInfo()) require.NoError(t, NewArgReader(call.Response().Arg3Reader()).Read(&d), "read arg3") c <- struct{}{} }() return c } s1, s1C := makeServer("s1") s2, s2C := makeServer("s2") // Make a call from s1 -> s2, and s2 -> s1 call1 := callStream(s1, s2) call2 := callStream(s2, s1) // s1 and s2 are both open, so calls to it should be successful. withNewClient(func(ch *Channel) { require.NoError(t, call(ch, s1)) require.NoError(t, call(ch, s2)) }) require.NoError(t, call(s1, s2)) require.NoError(t, call(s2, s1)) // Close s1, should no longer be able to call it. s1.Close() assert.Equal(t, ChannelStartClose, s1.State()) withNewClient(func(ch *Channel) { assert.Error(t, call(ch, s1), "closed channel should not accept incoming calls") require.NoError(t, call(ch, s2), "closed channel with pending incoming calls should allow outgoing calls") }) // Even an existing connection (e.g. from s2) should fail. assert.Equal(t, ErrChannelClosed, call(s2, s1), "closed channel should not accept incoming calls") require.NoError(t, call(s1, s2), "closed channel with pending incoming calls should allow outgoing calls") // Once the incoming connection is drained, outgoing calls should fail. s1C <- struct{}{} <-call2 assert.Equal(t, ChannelInboundClosed, s1.State()) require.Error(t, call(s1, s2), "closed channel with no pending incoming calls should not allow outgoing calls") // Now the channel should be completely closed as there are no pending connections. s2C <- struct{}{} <-call1 assert.Equal(t, ChannelClosed, s1.State()) // Close s2 so we don't leave any goroutines running. s2.Close() VerifyNoBlockedGoroutines(t) }