Example #1
0
func (t *closeSemanticsTest) makeServer(name string) (*Channel, chan struct{}) {
	ch := testutils.NewServer(t.T, &testutils.ChannelOpts{ServiceName: name})

	c := make(chan struct{})
	testutils.RegisterFunc(ch, "stream", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
		<-c
		return &raw.Res{}, nil
	})
	testutils.RegisterFunc(ch, "call", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
		return &raw.Res{}, nil
	})
	return ch, c
}
Example #2
0
func TestContextBuilder(t *testing.T) {
	ctx, cancel := tchannel.NewContextBuilder(time.Second).SetShardKey("shard").Build()
	defer cancel()

	var called bool
	testutils.WithServer(t, nil, func(ch *tchannel.Channel, hostPort string) {
		peerInfo := ch.PeerInfo()

		testutils.RegisterFunc(ch, "SecondService::Echo", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
			call := tchannel.CurrentCall(ctx)
			assert.Equal(t, peerInfo.ServiceName, call.CallerName(), "unexpected caller name")
			assert.Equal(t, "shard", call.ShardKey(), "unexpected shard key")
			assert.Equal(t, tchannel.Thrift, args.Format)
			called = true
			return nil, errors.New("err")
		})

		client := NewClient(ch, ch.PeerInfo().ServiceName, &ClientOptions{
			HostPort: peerInfo.HostPort,
		})
		secondClient := gen.NewTChanSecondServiceClient(client)
		secondClient.Echo(ctx, "asd")
		assert.True(t, called, "test not called")
	})
}
Example #3
0
func TestRaceExchangesWithClose(t *testing.T) {
	var wg sync.WaitGroup

	ctx, cancel := NewContext(testutils.Timeout(70 * time.Millisecond))
	defer cancel()

	opts := testutils.NewOpts().DisableLogVerification()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		server := ts.Server()

		gotCall := make(chan struct{})
		completeCall := make(chan struct{})
		testutils.RegisterFunc(server, "dummy", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
			return &raw.Res{}, nil
		})

		testutils.RegisterEcho(server, func() {
			close(gotCall)
			<-completeCall
		})

		client := ts.NewClient(opts)
		defer client.Close()

		callDone := make(chan struct{})
		go func() {
			assert.NoError(t, testutils.CallEcho(client, ts.HostPort(), server.ServiceName(), &raw.Args{}), "Echo failed")
			close(callDone)
		}()

		// Wait until the server recieves a call, so it has an active inbound.
		<-gotCall

		// Start a bunch of clients to trigger races between connecting and close.
		for i := 0; i < 100; i++ {
			wg.Add(1)
			go func() {
				defer wg.Done()

				// We don't use ts.NewClient here to avoid data races.
				c := testutils.NewClient(t, opts)
				defer c.Close()

				c.Ping(ctx, ts.HostPort())
				raw.Call(ctx, c, ts.HostPort(), server.ServiceName(), "dummy", nil, nil)
			}()
		}

		// Now try to close the channel, it should block since there's active exchanges.
		server.Close()
		assert.Equal(t, ChannelStartClose, ts.Server().State(), "Server should be in StartClose")

		close(completeCall)
		<-callDone
	})

	// Wait for all calls to complete
	wg.Wait()
}
Example #4
0
func TestTraceSamplingRate(t *testing.T) {
	rand.Seed(10)

	tests := []struct {
		sampleRate  float64 // if this is < 0, then the value is not set.
		count       int
		expectedMin int
		expectedMax int
	}{
		{1.0, 100, 100, 100},
		{0.5, 100, 40, 60},
		{0.1, 100, 5, 15},
		{0, 100, 0, 0},
		{-1, 100, 100, 100}, // default of 1.0 should be used.
	}

	for _, tt := range tests {
		var reportedTraces int
		testTraceReporter := TraceReporterFunc(func(_ TraceData) {
			reportedTraces++
		})

		WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) {
			var tracedCalls int
			testutils.RegisterFunc(ch, "t", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
				if CurrentSpan(ctx).TracingEnabled() {
					tracedCalls++
				}

				return &raw.Res{}, nil
			})

			opts := testutils.NewOpts().SetTraceReporter(testTraceReporter)
			if tt.sampleRate >= 0 {
				opts.SetTraceSampleRate(tt.sampleRate)
			}

			client := testutils.NewClient(t, opts)
			defer client.Close()

			for i := 0; i < tt.count; i++ {
				ctx, cancel := NewContext(time.Second)
				defer cancel()

				_, _, _, err := raw.Call(ctx, client, hostPort, ch.PeerInfo().ServiceName, "t", nil, nil)
				require.NoError(t, err, "raw.Call failed")
			}

			assert.Equal(t, reportedTraces, tracedCalls,
				"Number of traces report doesn't match calls with tracing enabled")
			assert.True(t, tracedCalls >= tt.expectedMin,
				"Number of trace enabled calls (%v) expected to be greater than %v", tracedCalls, tt.expectedMin)
			assert.True(t, tracedCalls <= tt.expectedMax,
				"Number of trace enabled calls (%v) expected to be less than %v", tracedCalls, tt.expectedMax)
		})
	}
}
Example #5
0
// TestCloseSendError tests that system errors are not attempted to be sent when
// a connection is closed, and ensures there's no race conditions such as the error
// frame being added to the channel just as it is closed.
// TODO(prashant): This test is waiting for timeout, but socket close shouldn't wait for timeout.
func TestCloseSendError(t *testing.T) {
	ctx, cancel := NewContext(time.Second)
	defer cancel()

	serverCh, err := testutils.NewServer(nil)
	require.NoError(t, err, "NewServer failed")

	closed := uint32(0)
	counter := uint32(0)
	testutils.RegisterFunc(t, serverCh, "echo", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
		atomic.AddUint32(&counter, 1)
		return &raw.Res{Arg2: args.Arg2, Arg3: args.Arg3}, nil
	})

	clientCh, err := testutils.NewClient(nil)
	require.NoError(t, err, "NewClient failed")

	// Make a call to create a connection that will be shared.
	peerInfo := serverCh.PeerInfo()
	_, _, _, err = raw.Call(ctx, clientCh, peerInfo.HostPort, peerInfo.ServiceName, "echo", nil, nil)
	require.NoError(t, err, "Call should succeed")

	var wg sync.WaitGroup
	for i := 0; i < 100; i++ {
		wg.Add(1)
		go func() {
			time.Sleep(time.Duration(rand.Intn(1000)) * time.Microsecond)
			_, _, _, err := raw.Call(ctx, clientCh, peerInfo.HostPort, peerInfo.ServiceName, "echo", nil, nil)
			if err != nil && atomic.LoadUint32(&closed) == 0 {
				t.Errorf("Call failed: %v", err)
			}
			wg.Done()
		}()
	}

	// Wait for the server to have processed some number of these calls.
	for {
		if atomic.LoadUint32(&counter) >= 10 {
			break
		}
		runtime.Gosched()
	}

	atomic.AddUint32(&closed, 1)
	serverCh.Close()

	// Wait for all the goroutines to end
	wg.Wait()

	clientCh.Close()
	VerifyNoBlockedGoroutines(t)
}
Example #6
0
func TestReadTimeout(t *testing.T) {
	WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) {
		for i := 0; i < 100; i++ {
			ctx, cancel := NewContext(time.Second)
			handler := func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
				defer cancel()
				return nil, ErrTimeout
			}
			testutils.RegisterFunc(t, ch, "call", handler)
			raw.Call(ctx, ch, hostPort, ch.PeerInfo().ServiceName, "call", nil, nil)
		}
	})
}
Example #7
0
func TestRemotePeer(t *testing.T) {
	tests := []struct {
		name       string
		remote     *Channel
		expectedFn func(state *RuntimeState, serverHP string) PeerInfo
	}{
		{
			name:   "ephemeral client",
			remote: testutils.NewClient(t, nil),
			expectedFn: func(state *RuntimeState, serverHP string) PeerInfo {
				hostPort := state.RootPeers[serverHP].OutboundConnections[0].LocalHostPort
				return PeerInfo{
					HostPort:    hostPort,
					IsEphemeral: true,
					ProcessName: state.LocalPeer.ProcessName,
				}
			},
		},
		{
			name:   "listening server",
			remote: testutils.NewServer(t, nil),
			expectedFn: func(state *RuntimeState, _ string) PeerInfo {
				return PeerInfo{
					HostPort:    state.LocalPeer.HostPort,
					IsEphemeral: false,
					ProcessName: state.LocalPeer.ProcessName,
				}
			},
		},
	}

	ctx, cancel := NewContext(time.Second)
	defer cancel()

	for _, tt := range tests {
		WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) {
			defer tt.remote.Close()

			gotPeer := make(chan PeerInfo, 1)
			testutils.RegisterFunc(ch, "test", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
				gotPeer <- CurrentCall(ctx).RemotePeer()
				return &raw.Res{}, nil
			})

			_, _, _, err := raw.Call(ctx, tt.remote, hostPort, ch.ServiceName(), "test", nil, nil)
			assert.NoError(t, err, "%v: Call failed", tt.name)
			expected := tt.expectedFn(tt.remote.IntrospectState(nil), hostPort)
			assert.Equal(t, expected, <-gotPeer, "%v: RemotePeer mismatch", tt.name)
		})
	}
}
Example #8
0
func TestActiveCallReq(t *testing.T) {
	t.Skip("Test skipped due to unreliable way to test for protocol errors")

	ctx, cancel := NewContext(time.Second)
	defer cancel()

	// Note: This test cannot use log verification as the duplicate ID causes a log.
	// It does not use a verified server, as it leaks a message exchange due to the
	// modification of IDs in the relay.
	opts := testutils.NewOpts().DisableLogVerification()
	testutils.WithServer(t, opts, func(ch *Channel, hostPort string) {
		gotCall := make(chan struct{})
		unblock := make(chan struct{})

		testutils.RegisterFunc(ch, "blocked", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
			gotCall <- struct{}{}
			<-unblock
			return &raw.Res{}, nil
		})

		relayFunc := func(outgoing bool, frame *Frame) *Frame {
			if outgoing && frame.Header.ID == 3 {
				frame.Header.ID = 2
			}
			return frame
		}

		relayHostPort, closeRelay := testutils.FrameRelay(t, hostPort, relayFunc)
		defer closeRelay()

		firstComplete := make(chan struct{})
		go func() {
			// This call will block until we close unblock.
			raw.Call(ctx, ch, relayHostPort, ch.PeerInfo().ServiceName, "blocked", nil, nil)
			close(firstComplete)
		}()

		// Wait for the first call to be received by the server
		<-gotCall

		// Make a new call, which should fail
		_, _, _, err := raw.Call(ctx, ch, relayHostPort, ch.PeerInfo().ServiceName, "blocked", nil, nil)
		assert.Error(t, err, "Expect error")
		assert.True(t, strings.Contains(err.Error(), "already active"),
			"expected already active error, got %v", err)

		close(unblock)
		<-firstComplete
	})
}
func TestInboundConnection(t *testing.T) {
	ctx, cancel := NewContext(time.Second)
	defer cancel()

	WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) {
		testutils.RegisterFunc(t, ch, "test", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
			c, _ := InboundConnection(CurrentCall(ctx))
			assert.Equal(t, hostPort, c.RemotePeerInfo().HostPort, "Unexpected host port")
			return &raw.Res{}, nil
		})

		_, _, _, err := raw.Call(ctx, ch, hostPort, ch.PeerInfo().ServiceName, "test", nil, nil)
		require.NoError(t, err, "Call failed")
	})
}
Example #10
0
func TestChildCallsNotSampled(t *testing.T) {
	var traceEnabledCalls int

	s1 := testutils.NewServer(t, testutils.NewOpts().SetTraceSampleRate(0.0001))
	defer s1.Close()
	s2 := testutils.NewServer(t, nil)
	defer s2.Close()

	testutils.RegisterFunc(s1, "s1", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
		_, _, _, err := raw.Call(ctx, s1, s2.PeerInfo().HostPort, s2.ServiceName(), "s2", nil, nil)
		require.NoError(t, err, "raw.Call from s1 to s2 failed")
		return &raw.Res{}, nil
	})

	testutils.RegisterFunc(s2, "s2", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
		if CurrentSpan(ctx).TracingEnabled() {
			traceEnabledCalls++
		}
		return &raw.Res{}, nil
	})

	client := testutils.NewClient(t, nil)
	defer client.Close()

	const numCalls = 100
	for i := 0; i < numCalls; i++ {
		ctx, cancel := NewContext(time.Second)
		defer cancel()

		_, _, _, err := raw.Call(ctx, client, s1.PeerInfo().HostPort, s1.ServiceName(), "s1", nil, nil)
		require.NoError(t, err, "raw.Call to s1 failed")
	}

	// Even though s1 has sampling enabled, it should not affect incoming calls.
	assert.Equal(t, numCalls, traceEnabledCalls, "Trace sampling should not inbound calls")
}
Example #11
0
// Ensure that any connections created in the relay path send the ephemeral
// host:port.
func TestRelayOutgoingConnectionsEphemeral(t *testing.T) {
	opts := testutils.NewOpts().SetRelayOnly()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		s2 := ts.NewServer(serviceNameOpts("s2"))
		testutils.RegisterFunc(s2, "echo", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
			assert.True(t, CurrentCall(ctx).RemotePeer().IsEphemeral,
				"Connections created for the relay should send ephemeral host:port header")

			return &raw.Res{
				Arg2: args.Arg2,
				Arg3: args.Arg3,
			}, nil
		})

		require.NoError(t, testutils.CallEcho(ts.Server(), ts.HostPort(), "s2", nil), "CallEcho failed")
	})
}
Example #12
0
func TestCloseSingleChannel(t *testing.T) {
	ctx, cancel := NewContext(time.Second)
	defer cancel()

	ch, err := testutils.NewServer(nil)
	require.NoError(t, err, "NewServer failed")

	var connected sync.WaitGroup
	var completed sync.WaitGroup
	blockCall := make(chan struct{})

	testutils.RegisterFunc(t, ch, "echo", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
		connected.Done()
		<-blockCall
		return &raw.Res{
			Arg2: args.Arg2,
			Arg3: args.Arg3,
		}, nil
	})

	for i := 0; i < 10; i++ {
		connected.Add(1)
		completed.Add(1)
		go func() {
			peerInfo := ch.PeerInfo()
			_, _, _, err := raw.Call(ctx, ch, peerInfo.HostPort, peerInfo.ServiceName, "echo", nil, nil)
			assert.NoError(t, err, "Call failed")
			completed.Done()
		}()
	}

	// Wait for all calls to connect before triggerring the Close (so they do not fail).
	connected.Wait()
	ch.Close()

	// Unblock the calls, and wait for all the calls to complete.
	close(blockCall)
	completed.Wait()

	// Once all calls are complete, the channel should be closed.
	runtime.Gosched()
	assert.Equal(t, ChannelClosed, ch.State())
	VerifyNoBlockedGoroutines(t)
}
Example #13
0
func TestCloseOneSide(t *testing.T) {
	ctx, cancel := NewContext(time.Second)
	defer cancel()

	ch1, err := testutils.NewServer(&testutils.ChannelOpts{ServiceName: "client"})
	ch2, err := testutils.NewServer(&testutils.ChannelOpts{ServiceName: "server"})
	require.NoError(t, err, "NewServer 1 failed")
	require.NoError(t, err, "NewServer 2 failed")

	connected := make(chan struct{})
	completed := make(chan struct{})
	blockCall := make(chan struct{})
	testutils.RegisterFunc(t, ch2, "echo", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
		connected <- struct{}{}
		<-blockCall
		return &raw.Res{
			Arg2: args.Arg2,
			Arg3: args.Arg3,
		}, nil
	})

	go func() {
		ch2Peer := ch2.PeerInfo()
		_, _, _, err := raw.Call(ctx, ch1, ch2Peer.HostPort, ch2Peer.ServiceName, "echo", nil, nil)
		assert.NoError(t, err, "Call failed")
		completed <- struct{}{}
	}()

	// Wait for connected before calling Close.
	<-connected
	ch1.Close()

	// Now unblock the call and wait for the call to complete.
	close(blockCall)
	<-completed

	// Once the call completes, the channel should be closed.
	runtime.Gosched()
	assert.Equal(t, ChannelClosed, ch1.State())

	// We need to close all open TChannels before verifying blocked goroutines.
	ch2.Close()
	VerifyNoBlockedGoroutines(t)
}
Example #14
0
func TestActiveCallReq(t *testing.T) {
	ctx, cancel := NewContext(time.Second)
	defer cancel()

	// Note: This test leaks a message exchange due to the modification of IDs in the relay.
	require.NoError(t, testutils.WithServer(nil, func(ch *Channel, hostPort string) {
		gotCall := make(chan struct{})
		unblock := make(chan struct{})

		testutils.RegisterFunc(t, ch, "blocked", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
			gotCall <- struct{}{}
			<-unblock
			return &raw.Res{}, nil
		})

		relayFunc := func(outgoing bool, frame *Frame) *Frame {
			if outgoing && frame.Header.ID == 2 {
				frame.Header.ID = 3
			}
			return frame
		}

		relayHostPort, closeRelay := testutils.FrameRelay(t, hostPort, relayFunc)
		defer closeRelay()

		go func() {
			// This call will block until we close unblock.
			raw.Call(ctx, ch, relayHostPort, ch.PeerInfo().ServiceName, "blocked", nil, nil)
		}()

		// Wait for the first call to be received by the server
		<-gotCall

		// Make a new call, which should fail
		_, _, _, err := raw.Call(ctx, ch, relayHostPort, ch.PeerInfo().ServiceName, "blocked", nil, nil)
		assert.Error(t, err, "Expect error")
		assert.True(t, strings.Contains(err.Error(), "already active"),
			"expected already active error, got %v", err)

		close(unblock)
	}))
}
Example #15
0
func TestReadTimeout(t *testing.T) {
	// The error frame may fail to send since the connection closes before the handler sends it
	// or the handler connection may be closed as it sends when the other side closes the conn.
	opts := testutils.NewOpts().
		AddLogFilter("Couldn't send outbound error frame", 1).
		// TODO: Make the log message more specific by checking that the site is "read frames".
		AddLogFilter("Connection error", 1)
	WithVerifiedServer(t, opts, func(ch *Channel, hostPort string) {
		for i := 0; i < 10; i++ {
			ctx, cancel := NewContext(time.Second)
			handler := func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
				defer cancel()
				return nil, ErrTimeout
			}
			testutils.RegisterFunc(ch, "call", handler)
			_, _, _, err := raw.Call(ctx, ch, hostPort, ch.PeerInfo().ServiceName, "call", nil, nil)
			assert.Equal(t, err, context.Canceled, "Call should fail due to cancel")
		}
	})
}
Example #16
0
func TestRoutingDelegatePropagates(t *testing.T) {
	WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) {
		peerInfo := ch.PeerInfo()
		testutils.RegisterFunc(ch, "test", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
			return &raw.Res{
				Arg3: []byte(CurrentCall(ctx).RoutingDelegate()),
			}, nil
		})

		ctx, cancel := NewContextBuilder(time.Second).Build()
		defer cancel()
		_, arg3, _, err := raw.Call(ctx, ch, peerInfo.HostPort, peerInfo.ServiceName, "test", nil, nil)
		assert.NoError(t, err, "Call failed")
		assert.Equal(t, "", string(arg3), "Expected no routing delegate header")

		ctx, cancel = NewContextBuilder(time.Second).SetRoutingDelegate("xpr").Build()
		defer cancel()
		_, arg3, _, err = raw.Call(ctx, ch, peerInfo.HostPort, peerInfo.ServiceName, "test", nil, nil)
		assert.NoError(t, err, "Call failed")
		assert.Equal(t, "xpr", string(arg3), "Expected routing delegate header to be set")
	})
}
Example #17
0
func TestShardKeyPropagates(t *testing.T) {
	WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) {
		peerInfo := ch.PeerInfo()
		testutils.RegisterFunc(t, ch, "test", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
			return &raw.Res{
				Arg3: []byte(CurrentCall(ctx).ShardKey()),
			}, nil
		})

		ctx, cancel := NewContextBuilder(time.Second).Build()
		defer cancel()
		_, arg3, _, err := raw.Call(ctx, ch, peerInfo.HostPort, peerInfo.ServiceName, "test", nil, nil)
		assert.NoError(t, err, "Call failed")
		assert.Equal(t, arg3, []byte(""))

		ctx, cancel = NewContextBuilder(time.Second).
			SetShardKey("shard").Build()
		defer cancel()
		_, arg3, _, err = raw.Call(ctx, ch, peerInfo.HostPort, peerInfo.ServiceName, "test", nil, nil)
		assert.NoError(t, err, "Call failed")
		assert.Equal(t, string(arg3), "shard")
	})
}
Example #18
0
func TestCloseSemantics(t *testing.T) {
	// We defer the check as we want it to run after the SetTimeout clears the timeout.
	defer VerifyNoBlockedGoroutines(t)
	defer testutils.SetTimeout(t, 2*time.Second)()
	ctx, cancel := NewContext(time.Second)
	defer cancel()

	makeServer := func(name string) (*Channel, chan struct{}) {
		ch, err := testutils.NewServer(&testutils.ChannelOpts{ServiceName: name})
		require.NoError(t, err)
		c := make(chan struct{})
		testutils.RegisterFunc(t, ch, "stream", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
			<-c
			return &raw.Res{}, nil
		})
		testutils.RegisterFunc(t, ch, "call", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
			return &raw.Res{}, nil
		})
		return ch, c
	}

	withNewClient := func(f func(ch *Channel)) {
		ch, err := testutils.NewClient(&testutils.ChannelOpts{ServiceName: "client"})
		require.NoError(t, err)
		f(ch)
		ch.Close()
	}

	call := func(from *Channel, to *Channel) error {
		toPeer := to.PeerInfo()
		_, _, _, err := raw.Call(ctx, from, toPeer.HostPort, toPeer.ServiceName, "call", nil, nil)
		return err
	}

	callStream := func(from *Channel, to *Channel) <-chan struct{} {
		c := make(chan struct{})

		toPeer := to.PeerInfo()
		call, err := from.BeginCall(ctx, toPeer.HostPort, toPeer.ServiceName, "stream", nil)
		require.NoError(t, err)
		require.NoError(t, NewArgWriter(call.Arg2Writer()).Write(nil), "write arg2")
		require.NoError(t, NewArgWriter(call.Arg3Writer()).Write(nil), "write arg3")

		go func() {
			var d []byte
			require.NoError(t, NewArgReader(call.Response().Arg2Reader()).Read(&d), "read arg2 from %v to %v", from.PeerInfo(), to.PeerInfo())
			require.NoError(t, NewArgReader(call.Response().Arg3Reader()).Read(&d), "read arg3")
			c <- struct{}{}
		}()

		return c
	}

	s1, s1C := makeServer("s1")
	s2, s2C := makeServer("s2")

	// Make a call from s1 -> s2, and s2 -> s1
	call1 := callStream(s1, s2)
	call2 := callStream(s2, s1)

	// s1 and s2 are both open, so calls to it should be successful.
	withNewClient(func(ch *Channel) {
		require.NoError(t, call(ch, s1))
		require.NoError(t, call(ch, s2))
	})
	require.NoError(t, call(s1, s2))
	require.NoError(t, call(s2, s1))

	// Close s1, should no longer be able to call it.
	s1.Close()
	assert.Equal(t, ChannelStartClose, s1.State())
	withNewClient(func(ch *Channel) {
		assert.Error(t, call(ch, s1), "closed channel should not accept incoming calls")
		require.NoError(t, call(ch, s2),
			"closed channel with pending incoming calls should allow outgoing calls")
	})

	// Even an existing connection (e.g. from s2) should fail.
	assert.Equal(t, ErrChannelClosed, call(s2, s1), "closed channel should not accept incoming calls")

	require.NoError(t, call(s1, s2),
		"closed channel with pending incoming calls should allow outgoing calls")

	// Once the incoming connection is drained, outgoing calls should fail.
	s1C <- struct{}{}
	<-call2
	runtime.Gosched()
	assert.Equal(t, ChannelInboundClosed, s1.State())
	require.Error(t, call(s1, s2),
		"closed channel with no pending incoming calls should not allow outgoing calls")

	// Now the channel should be completely closed as there are no pending connections.
	s2C <- struct{}{}
	<-call1
	runtime.Gosched()
	assert.Equal(t, ChannelClosed, s1.State())

	// Close s2 so we don't leave any goroutines running.
	s2.Close()
}
Example #19
0
func TestStatsWithRetries(t *testing.T) {
	defer testutils.SetTimeout(t, 2*time.Second)()
	a := testutils.DurationArray

	initialTime := time.Date(2015, 2, 1, 10, 10, 0, 0, time.UTC)
	nowStub, nowFn := testutils.NowStub(initialTime)

	clientStats := newRecordingStatsReporter()
	ch := testutils.NewClient(t, testutils.NewOpts().
		SetStatsReporter(clientStats).
		SetTimeNow(nowStub))
	defer ch.Close()

	nowFn(10 * time.Millisecond)
	ctx, cancel := NewContext(time.Second)
	defer cancel()

	// TODO why do we need this??
	opts := testutils.NewOpts().NoRelay()
	WithVerifiedServer(t, opts, func(serverCh *Channel, hostPort string) {
		respErr := make(chan error, 1)
		testutils.RegisterFunc(serverCh, "req", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
			return &raw.Res{Arg2: args.Arg2, Arg3: args.Arg3}, <-respErr
		})
		ch.Peers().Add(serverCh.PeerInfo().HostPort)

		// timeNow is called at:
		// RunWithRetry start, per-attempt start, per-attempt end.
		// Each attempt takes 2 * step.
		tests := []struct {
			expectErr           error
			numFailures         int
			numAttempts         int
			overallLatency      time.Duration
			perAttemptLatencies []time.Duration
		}{
			{
				numFailures:         0,
				numAttempts:         1,
				perAttemptLatencies: a(10 * time.Millisecond),
				overallLatency:      20 * time.Millisecond,
			},
			{
				numFailures:         1,
				numAttempts:         2,
				perAttemptLatencies: a(10*time.Millisecond, 10*time.Millisecond),
				overallLatency:      40 * time.Millisecond,
			},
			{
				numFailures:         4,
				numAttempts:         5,
				perAttemptLatencies: a(10*time.Millisecond, 10*time.Millisecond, 10*time.Millisecond, 10*time.Millisecond, 10*time.Millisecond),
				overallLatency:      100 * time.Millisecond,
			},
			{
				numFailures:         5,
				numAttempts:         5,
				expectErr:           ErrServerBusy,
				perAttemptLatencies: a(10*time.Millisecond, 10*time.Millisecond, 10*time.Millisecond, 10*time.Millisecond, 10*time.Millisecond),
				overallLatency:      100 * time.Millisecond,
			},
		}

		for _, tt := range tests {
			clientStats.Reset()
			err := ch.RunWithRetry(ctx, func(ctx context.Context, rs *RequestState) error {
				if rs.Attempt > tt.numFailures {
					respErr <- nil
				} else {
					respErr <- ErrServerBusy
				}

				sc := ch.GetSubChannel(serverCh.ServiceName())
				_, err := raw.CallV2(ctx, sc, raw.CArgs{
					Method:      "req",
					CallOptions: &CallOptions{RequestState: rs},
				})
				return err
			})
			assert.Equal(t, tt.expectErr, err, "RunWithRetry unexpected error")

			outboundTags := tagsForOutboundCall(serverCh, ch, "req")
			if tt.expectErr == nil {
				clientStats.Expected.IncCounter("outbound.calls.success", outboundTags, 1)
			}
			clientStats.Expected.IncCounter("outbound.calls.send", outboundTags, int64(tt.numAttempts))
			for i, latency := range tt.perAttemptLatencies {
				clientStats.Expected.RecordTimer("outbound.calls.per-attempt.latency", outboundTags, latency)
				if i > 0 {
					tags := tagsForOutboundCall(serverCh, ch, "req")
					tags["retry-count"] = fmt.Sprint(i)
					clientStats.Expected.IncCounter("outbound.calls.retries", tags, 1)
				}
			}
			clientStats.Expected.RecordTimer("outbound.calls.latency", outboundTags, tt.overallLatency)
			clientStats.Validate(t)
		}
	})
}