예제 #1
0
func TestTimeoutCallsThenClose(t *testing.T) {
	// Test needs at least 2 CPUs to trigger race conditions.
	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))

	opts := serviceNameOpts("s1").SetRelayOnly().DisableLogVerification()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		s1 := ts.Server()
		s2 := ts.NewServer(serviceNameOpts("s2").DisableLogVerification())

		unblockEcho := make(chan struct{})
		testutils.RegisterEcho(s1, func() {
			<-unblockEcho
		})

		ctx, cancel := NewContext(testutils.Timeout(30 * time.Millisecond))
		defer cancel()

		var callers sync.WaitGroup
		for i := 0; i < 100; i++ {
			callers.Add(1)
			go func() {
				defer callers.Done()
				raw.Call(ctx, s2, ts.HostPort(), "s1", "echo", nil, nil)
			}()
		}

		close(unblockEcho)

		// Wait for all the callers to end
		callers.Wait()
	})
}
예제 #2
0
func TestNetDialTimeout(t *testing.T) {
	// timeoutHostPort uses a blackholed address (RFC 6890) with a port
	// reserved for documentation. This address should always cause a timeout.
	const timeoutHostPort = "192.18.0.254:44444"
	timeoutPeriod := testutils.Timeout(50 * time.Millisecond)

	client := testutils.NewClient(t, nil)
	defer client.Close()

	started := time.Now()
	ctx, cancel := NewContext(timeoutPeriod)
	defer cancel()

	err := client.Ping(ctx, timeoutHostPort)
	if !assert.Error(t, err, "Ping to blackhole address should fail") {
		return
	}

	if strings.Contains(err.Error(), "network is unreachable") {
		t.Skipf("Skipping test, as network interface may not be available")
	}

	d := time.Since(started)
	assert.Equal(t, ErrTimeout, err, "Ping expected to fail with timeout")
	assert.True(t, d >= timeoutPeriod, "Timeout should take more than %v, took %v", timeoutPeriod, d)
}
예제 #3
0
func TestTimeout(t *testing.T) {
	testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
		// onError may be called when the block call tries to write the call response.
		onError := func(ctx context.Context, err error) {
			assert.Equal(t, ErrTimeout, err, "onError err should be ErrTimeout")
			assert.Equal(t, context.DeadlineExceeded, ctx.Err(), "Context should timeout")
		}
		testHandler := onErrorTestHandler{newTestHandler(t), onError}
		ts.Register(raw.Wrap(testHandler), "block")

		ctx, cancel := NewContext(testutils.Timeout(15 * time.Millisecond))
		defer cancel()

		_, _, _, err := raw.Call(ctx, ts.Server(), ts.HostPort(), ts.ServiceName(), "block", []byte("Arg2"), []byte("Arg3"))
		assert.Equal(t, ErrTimeout, err)

		// Verify the server-side receives an error from the context.
		select {
		case err := <-testHandler.blockErr:
			assert.Equal(t, context.DeadlineExceeded, err, "Server should have received timeout")
		case <-time.After(time.Second):
			t.Errorf("Server did not receive call, may need higher timeout")
		}

		calls := relaytest.NewMockStats()
		calls.Add(ts.ServiceName(), ts.ServiceName(), "block").Failed("timeout").End()
		ts.AssertRelayStats(calls)
	})
}
예제 #4
0
func TestWriteErrorAfterTimeout(t *testing.T) {
	// TODO: Make this test block at different points (e.g. before, during read/write).
	testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
		timedOut := make(chan struct{})
		done := make(chan struct{})
		handler := func(ctx context.Context, call *InboundCall) {
			<-ctx.Done()
			<-timedOut
			_, err := raw.ReadArgs(call)
			assert.Equal(t, ErrTimeout, err, "Read args should fail with timeout")
			response := call.Response()
			assert.Equal(t, ErrTimeout, response.SendSystemError(ErrServerBusy), "SendSystemError should fail")
			close(done)
		}
		ts.Register(HandlerFunc(handler), "call")

		ctx, cancel := NewContext(testutils.Timeout(30 * time.Millisecond))
		defer cancel()
		_, _, _, err := raw.Call(ctx, ts.Server(), ts.HostPort(), ts.ServiceName(), "call", nil, testutils.RandBytes(100000))
		assert.Equal(t, err, ErrTimeout, "Call should timeout")
		close(timedOut)

		select {
		case <-done:
		case <-time.After(time.Second):
			t.Errorf("Handler not called, timeout may be too low")
		}

		calls := relaytest.NewMockStats()
		calls.Add(ts.ServiceName(), ts.ServiceName(), "call").Failed("timeout").End()
		ts.AssertRelayStats(calls)
	})
}
예제 #5
0
func TestFragmentationSlowReader(t *testing.T) {
	startReading, handlerComplete := make(chan struct{}), make(chan struct{})
	handler := func(ctx context.Context, call *InboundCall) {
		<-ctx.Done()
		<-startReading
		_, err := raw.ReadArgs(call)
		assert.Error(t, err, "ReadArgs should fail since frames will be dropped due to slow reading")
		close(handlerComplete)
	}

	// Inbound forward will timeout and cause a warning log.
	opts := testutils.NewOpts().AddLogFilter("Unable to forward frame", 1)
	WithVerifiedServer(t, opts, func(ch *Channel, hostPort string) {
		ch.Register(HandlerFunc(handler), "echo")

		arg2 := testutils.RandBytes(MaxFramePayloadSize * MexChannelBufferSize)
		arg3 := testutils.RandBytes(MaxFramePayloadSize * (MexChannelBufferSize + 1))

		ctx, cancel := NewContext(testutils.Timeout(15 * time.Millisecond))
		defer cancel()

		_, _, _, err := raw.Call(ctx, ch, hostPort, testServiceName, "echo", arg2, arg3)
		assert.Error(t, err, "Call should timeout due to slow reader")

		close(startReading)
		<-handlerComplete
	})
	goroutines.VerifyNoLeaks(t, nil)
}
예제 #6
0
func TestWriteErrorAfterTimeout(t *testing.T) {
	// TODO: Make this test block at different points (e.g. before, during read/write).
	WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) {
		timedOut := make(chan struct{})
		done := make(chan struct{})
		handler := func(ctx context.Context, call *InboundCall) {
			<-ctx.Done()
			<-timedOut
			_, err := raw.ReadArgs(call)
			assert.Equal(t, ErrTimeout, err, "Read args should fail with timeout")
			response := call.Response()
			assert.Equal(t, ErrTimeout, response.SendSystemError(ErrServerBusy), "SendSystemError should fail")
			close(done)
		}
		ch.Register(HandlerFunc(handler), "call")

		ctx, cancel := NewContext(testutils.Timeout(20 * time.Millisecond))
		defer cancel()
		_, _, _, err := raw.Call(ctx, ch, hostPort, testServiceName, "call", nil, testutils.RandBytes(100000))
		assert.Equal(t, err, ErrTimeout, "Call should timeout")
		close(timedOut)
		<-done
	})
	goroutines.VerifyNoLeaks(t, nil)
}
예제 #7
0
func TestRaceExchangesWithClose(t *testing.T) {
	var wg sync.WaitGroup

	ctx, cancel := NewContext(testutils.Timeout(70 * time.Millisecond))
	defer cancel()

	opts := testutils.NewOpts().DisableLogVerification()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		server := ts.Server()

		gotCall := make(chan struct{})
		completeCall := make(chan struct{})
		testutils.RegisterFunc(server, "dummy", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
			return &raw.Res{}, nil
		})

		testutils.RegisterEcho(server, func() {
			close(gotCall)
			<-completeCall
		})

		client := ts.NewClient(opts)
		defer client.Close()

		callDone := make(chan struct{})
		go func() {
			assert.NoError(t, testutils.CallEcho(client, ts.HostPort(), server.ServiceName(), &raw.Args{}), "Echo failed")
			close(callDone)
		}()

		// Wait until the server recieves a call, so it has an active inbound.
		<-gotCall

		// Start a bunch of clients to trigger races between connecting and close.
		for i := 0; i < 100; i++ {
			wg.Add(1)
			go func() {
				defer wg.Done()

				// We don't use ts.NewClient here to avoid data races.
				c := testutils.NewClient(t, opts)
				defer c.Close()

				c.Ping(ctx, ts.HostPort())
				raw.Call(ctx, c, ts.HostPort(), server.ServiceName(), "dummy", nil, nil)
			}()
		}

		// Now try to close the channel, it should block since there's active exchanges.
		server.Close()
		assert.Equal(t, ChannelStartClose, ts.Server().State(), "Server should be in StartClose")

		close(completeCall)
		<-callDone
	})

	// Wait for all calls to complete
	wg.Wait()
}
예제 #8
0
func TestWriteArg3AfterTimeout(t *testing.T) {
	// The channel reads and writes during timeouts, causing warning logs.
	opts := testutils.NewOpts().DisableLogVerification()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		timedOut := make(chan struct{})

		handler := func(ctx context.Context, call *InboundCall) {
			_, err := raw.ReadArgs(call)
			assert.NoError(t, err, "Read args failed")
			response := call.Response()
			assert.NoError(t, NewArgWriter(response.Arg2Writer()).Write(nil), "Write Arg2 failed")
			writer, err := response.Arg3Writer()
			assert.NoError(t, err, "Arg3Writer failed")

			for {
				if _, err := writer.Write(testutils.RandBytes(4096)); err != nil {
					assert.Equal(t, err, ErrTimeout, "Handler should timeout")
					close(timedOut)
					return
				}
				runtime.Gosched()
			}
		}
		ts.Register(HandlerFunc(handler), "call")

		ctx, cancel := NewContext(testutils.Timeout(50 * time.Millisecond))
		defer cancel()

		_, _, _, err := raw.Call(ctx, ts.Server(), ts.HostPort(), ts.ServiceName(), "call", nil, nil)
		assert.Equal(t, err, ErrTimeout, "Call should timeout")

		// Wait for the write to complete, make sure there's no errors.
		select {
		case <-time.After(testutils.Timeout(30 * time.Millisecond)):
			t.Errorf("Handler should have failed due to timeout")
		case <-timedOut:
		}

		calls := relaytest.NewMockStats()
		calls.Add(ts.ServiceName(), ts.ServiceName(), "call").Failed("timeout").Succeeded().End()
		ts.AssertRelayStats(calls)
	})
}
예제 #9
0
func TestConnectTimeout(t *testing.T) {
	opts := testutils.NewOpts().DisableLogVerification()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		// Set up a relay that will delay the initial init req.
		testComplete := make(chan struct{})

		relayFunc := func(outgoing bool, f *Frame) *Frame {
			select {
			case <-time.After(testutils.Timeout(200 * time.Millisecond)):
				return f
			case <-testComplete:
				// TODO: We should be able to forward the frame and have this test not fail.
				// Currently, it fails since the sequence of events is:
				// Server receives a TCP connection
				// Channel.Close() is called on the server
				// Server's TCP connection receives an init req
				// Since we don't currently track pending connections, the open TCP connection is not closed, and
				// we process the init req. This leaves an open connection at the end of the test.
				return nil
			}
		}
		relay, shutdown := testutils.FrameRelay(t, ts.HostPort(), relayFunc)
		defer shutdown()

		// Make a call with a long timeout, but short connect timeout.
		// We expect the call to fall almost immediately with ErrTimeout.
		ctx, cancel := NewContextBuilder(2 * time.Second).
			SetConnectTimeout(testutils.Timeout(100 * time.Millisecond)).
			Build()
		defer cancel()

		client := ts.NewClient(opts)
		err := client.Ping(ctx, relay)
		assert.Equal(t, ErrTimeout, err, "Ping should timeout due to timeout relay")

		// Note: we do not defer this, as we need to close(testComplete) before
		// we call shutdown since shutdown waits for the relay to close, which
		// is stuck waiting inside of our custom relay function.
		close(testComplete)
	})
}
예제 #10
0
func TestFragmentationSlowReader(t *testing.T) {
	// Inbound forward will timeout and cause a warning log.
	opts := testutils.NewOpts().
		AddLogFilter("Unable to forward frame", 1).
		AddLogFilter("Connection error", 1)

	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		startReading, handlerComplete := make(chan struct{}), make(chan struct{})
		handler := func(ctx context.Context, call *InboundCall) {
			<-startReading
			<-ctx.Done()
			_, err := raw.ReadArgs(call)
			assert.Error(t, err, "ReadArgs should fail since frames will be dropped due to slow reading")
			close(handlerComplete)
		}

		ts.Register(HandlerFunc(handler), "echo")

		arg2 := testutils.RandBytes(MaxFramePayloadSize * MexChannelBufferSize)
		arg3 := testutils.RandBytes(MaxFramePayloadSize * (MexChannelBufferSize + 1))

		ctx, cancel := NewContext(testutils.Timeout(30 * time.Millisecond))
		defer cancel()

		_, _, _, err := raw.Call(ctx, ts.Server(), ts.HostPort(), ts.ServiceName(), "echo", arg2, arg3)
		assert.Error(t, err, "Call should timeout due to slow reader")

		close(startReading)
		select {
		case <-handlerComplete:
		case <-time.After(testutils.Timeout(70 * time.Millisecond)):
			t.Errorf("Handler not called, context timeout may be too low")
		}

		calls := relaytest.NewMockStats()
		calls.Add(ts.ServiceName(), ts.ServiceName(), "echo").Failed("timeout").End()
		ts.AssertRelayStats(calls)
	})
}
예제 #11
0
func TestResponseClosedBeforeRequest(t *testing.T) {
	testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
		ts.Register(streamPartialHandler(t, false /* report errors */), "echoStream")

		ctx, cancel := NewContext(testutils.Timeout(time.Second))
		defer cancel()

		helper := streamHelper{t}
		ch := ts.NewClient(nil)
		responseClosed := make(chan struct{})
		writerDone := make(chan struct{})

		arg3Writer, arg3Reader := helper.startCall(ctx, ch, ts.HostPort(), ts.Server().ServiceName())
		go func() {
			defer close(writerDone)

			for i := 0; i < 10; i++ {
				assert.NoError(t, writeFlushBytes(arg3Writer, []byte{1}), "Write failed")
			}

			// Ignore the error of writeFlushBytes here since once we flush, the
			// remote side could receive and close the response before we've created
			// a new fragment (see fragmentingWriter.Flush). This could result
			// in the Flush returning a "mex is already shutdown" error.
			writeFlushBytes(arg3Writer, []byte{streamRequestClose})

			// Wait until our reader gets the EOF.
			<-responseClosed

			// Now our writes should fail, since the stream is shutdown
			err := writeFlushBytes(arg3Writer, []byte{1})
			if assert.Error(t, err, "Req write should fail since response stream ended") {
				assert.Contains(t, err.Error(), "mex has been shutdown")
			}
		}()

		for i := 0; i < 10; i++ {
			arg3 := make([]byte, 1)
			n, err := arg3Reader.Read(arg3)
			assert.Equal(t, 1, n, "Read did not correct number of bytes")
			assert.NoError(t, err, "Read failed")
		}

		eofBuf := make([]byte, 1)
		_, err := arg3Reader.Read(eofBuf)
		assert.Equal(t, io.EOF, err, "Response should EOF after request close")
		assert.NoError(t, arg3Reader.Close(), "Close should succeed")
		close(responseClosed)
		<-writerDone
	})
}
예제 #12
0
func TestThriftTimeout(t *testing.T) {
	withSetup(t, func(ctx Context, args testArgs) {
		handler := make(chan struct{})

		args.s2.On("Echo", ctxArg(), "asd").Return("asd", nil).Run(func(args mock.Arguments) {
			time.Sleep(testutils.Timeout(15 * time.Millisecond))
			close(handler)
		})

		ctx, cancel := NewContext(testutils.Timeout(10 * time.Millisecond))
		defer cancel()

		_, err := args.c2.Echo(ctx, "asd")
		assert.Equal(t, err, tchannel.ErrTimeout, "Expect call to time out")

		// Wait for the handler to return, otherwise the test ends before the Server gets an error.
		select {
		case <-handler:
		case <-time.After(time.Second):
			t.Errorf("Echo handler did not run")
		}
	})
}
예제 #13
0
func TestStreamCancelled(t *testing.T) {
	// Since the cancel message is unimplemented, the relay does not know that the
	// call was cancelled, andwill block closing till the timeout.
	opts := testutils.NewOpts().NoRelay()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		ts.Register(streamPartialHandler(t, false /* report errors */), "echoStream")

		ctx, cancel := NewContext(testutils.Timeout(time.Second))
		defer cancel()

		helper := streamHelper{t}
		ch := ts.NewClient(nil)
		cancelContext := make(chan struct{})

		arg3Writer, arg3Reader := helper.startCall(ctx, ch, ts.HostPort(), ts.ServiceName())
		go func() {
			for i := 0; i < 10; i++ {
				assert.NoError(t, writeFlushBytes(arg3Writer, []byte{1}), "Write failed")
			}

			// Our reads and writes should fail now.
			<-cancelContext
			cancel()

			_, err := arg3Writer.Write([]byte{1})
			// The write will succeed since it's buffered.
			assert.NoError(t, err, "Write after fail should be buffered")
			assert.Error(t, arg3Writer.Flush(), "writer.Flush should fail after cancel")
			assert.Error(t, arg3Writer.Close(), "writer.Close should fail after cancel")
		}()

		for i := 0; i < 10; i++ {
			arg3 := make([]byte, 1)
			n, err := arg3Reader.Read(arg3)
			assert.Equal(t, 1, n, "Read did not correct number of bytes")
			assert.NoError(t, err, "Read failed")
		}

		close(cancelContext)

		n, err := io.Copy(ioutil.Discard, arg3Reader)
		assert.EqualValues(t, 0, n, "Read should not read any bytes after cancel")
		assert.Error(t, err, "Read should fail after cancel")
		assert.Error(t, arg3Reader.Close(), "reader.Close should fail after cancel")
	})
}
예제 #14
0
func TestTimeout(t *testing.T) {
	WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) {
		// onError may be called when the block call tries to write the call response.
		onError := func(ctx context.Context, err error) {
			assert.Equal(t, ErrTimeout, err, "onError err should be ErrTimeout")
			assert.Equal(t, context.DeadlineExceeded, ctx.Err(), "Context should timeout")
		}
		testHandler := onErrorTestHandler{newTestHandler(t), onError}
		ch.Register(raw.Wrap(testHandler), "block")

		ctx, cancel := NewContext(testutils.Timeout(15 * time.Millisecond))
		defer cancel()

		_, _, _, err := raw.Call(ctx, ch, hostPort, testServiceName, "block", []byte("Arg2"), []byte("Arg3"))
		assert.Equal(t, ErrTimeout, err)

		// Verify the server-side receives an error from the context.
		assert.Equal(t, context.DeadlineExceeded, <-testHandler.blockErr)
	})
	goroutines.VerifyNoLeaks(t, nil)
}
예제 #15
0
func TestWriteTimeout(t *testing.T) {
	testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
		ch := ts.Server()
		ctx, cancel := NewContext(testutils.Timeout(15 * time.Millisecond))
		defer cancel()

		call, err := ch.BeginCall(ctx, ts.HostPort(), ch.ServiceName(), "call", nil)
		require.NoError(t, err, "Call failed")

		writer, err := call.Arg2Writer()
		require.NoError(t, err, "Arg2Writer failed")

		_, err = writer.Write([]byte{1})
		require.NoError(t, err, "Write initial bytes failed")
		<-ctx.Done()

		_, err = io.Copy(writer, testreader.Looper([]byte{1}))
		assert.Equal(t, ErrTimeout, err, "Write should fail with timeout")

		ts.AssertRelayStats(relaytest.NewMockStats())
	})
}
예제 #16
0
func waitForChannelClose(t *testing.T, ch *Channel) bool {
	// TODO: remove standalone use (outside testutils.TestServer).
	started := time.Now()

	var state ChannelState
	for i := 0; i < 50; i++ {
		if state = ch.State(); state == ChannelClosed {
			return true
		}

		runtime.Gosched()
		if i < 5 {
			continue
		}

		sleepFor := time.Duration(i) * 100 * time.Microsecond
		time.Sleep(testutils.Timeout(sleepFor))
	}

	// Channel is not closing, fail the test.
	sinceStart := time.Since(started)
	t.Errorf("Channel did not close after %v, last state: %v", sinceStart, state)
	return false
}
예제 #17
0
func (s *PropagationTestSuite) runTestCase(
	t *testing.T,
	tracer tracerChoice,
	ch *tchannel.Channel,
	test PropagationTestCase,
	call TracingCall,
) {
	descr := fmt.Sprintf("test %+v with tracer %+v", test, tracer)
	ch.Logger().Debugf("Starting tracing test %s", descr)

	tracer.resetSpans()

	span := ch.Tracer().StartSpan("client")
	span.SetBaggageItem(BaggageKey, BaggageValue)
	ctx := opentracing.ContextWithSpan(context.Background(), span)

	ctxBuilder := tchannel.NewContextBuilder(5 * time.Second).SetParentContext(ctx)
	if test.TracingDisabled {
		ctxBuilder.DisableTracing()
	}
	ctx, cancel := ctxBuilder.Build()
	defer cancel()

	req := &TracingRequest{ForwardCount: test.ForwardCount}
	ch.Logger().Infof("Sending tracing request %+v", req)
	response, err := call(ctx, req)
	require.NoError(t, err)
	ch.Logger().Infof("Received tracing response %+v", response)

	// Spans are finished in inbound.doneSending() or outbound.doneReading(),
	// which are called on different go-routines and may execute *after* the
	// response has been received by the client. Give them a chance to run.
	for i := 0; i < 1000; i++ {
		if spanCount := tracer.spansRecorded(); spanCount == test.ExpectedSpanCount {
			break
		}
		time.Sleep(testutils.Timeout(time.Millisecond))
	}
	spanCount := tracer.spansRecorded()
	ch.Logger().Debugf("end span count: %d", spanCount)

	// finish span after taking count of recorded spans, as we're only interested
	// in the count of spans created by RPC calls.
	span.Finish()

	root := new(TracingResponse).ObserveSpan(ctx)

	if !tracer.isFake {
		assert.Equal(t, uint64(0), root.ParentID)
		assert.NotEqual(t, uint64(0), root.TraceID)
	}

	assert.Equal(t, test.ExpectedSpanCount, spanCount, "Wrong span count; %s", descr)

	for r, cnt := response, 0; r != nil || cnt <= test.ForwardCount; r, cnt = r.Child, cnt+1 {
		require.NotNil(t, r, "Expecting response for forward=%d; %s", cnt, descr)
		if !tracer.isFake {
			if tracer.zipkinCompatible || s.Encoding.HeadersSupported {
				assert.Equal(t, root.TraceID, r.TraceID, "traceID should be the same; %s", descr)
			}
			assert.Equal(t, test.ExpectedBaggage, r.Luggage, "baggage should propagate; %s", descr)
		}
	}
	ch.Logger().Debugf("Finished tracing test %s", descr)
}
예제 #18
0
func TestTracingSpanAttributes(t *testing.T) {
	tracer := mocktracer.New()

	opts := &testutils.ChannelOpts{
		ChannelOptions: ChannelOptions{Tracer: tracer},
		DisableRelay:   true,
	}
	WithVerifiedServer(t, opts, func(ch *Channel, hostPort string) {
		// Register JSON handler
		jsonHandler := &JSONHandler{TraceHandler: testtracing.TraceHandler{Ch: ch}, t: t}
		json.Register(ch, json.Handlers{"call": jsonHandler.callJSON}, jsonHandler.onError)

		span := ch.Tracer().StartSpan("client")
		span.SetBaggageItem(testtracing.BaggageKey, testtracing.BaggageValue)
		ctx := opentracing.ContextWithSpan(context.Background(), span)
		root := new(testtracing.TracingResponse).ObserveSpan(ctx)

		ctx, cancel := NewContextBuilder(2 * time.Second).SetParentContext(ctx).Build()
		defer cancel()

		peer := ch.Peers().GetOrAdd(ch.PeerInfo().HostPort)
		var response testtracing.TracingResponse
		require.NoError(t, json.CallPeer(json.Wrap(ctx), peer, ch.PeerInfo().ServiceName,
			"call", &testtracing.TracingRequest{}, &response))

		// Spans are finished in inbound.doneSending() or outbound.doneReading(),
		// which are called on different go-routines and may execute *after* the
		// response has been received by the client. Give them a chance to finish.
		for i := 0; i < 1000; i++ {
			if spanCount := len(testtracing.MockTracerSampledSpans(tracer)); spanCount == 2 {
				break
			}
			time.Sleep(testutils.Timeout(time.Millisecond))
		}
		spans := testtracing.MockTracerSampledSpans(tracer)
		spanCount := len(spans)
		ch.Logger().Debugf("end span count: %d", spanCount)

		// finish span after taking count of recorded spans
		span.Finish()

		require.Equal(t, 2, spanCount, "Wrong span count")
		assert.Equal(t, root.TraceID, response.TraceID, "Trace ID must match root span")
		assert.Equal(t, testtracing.BaggageValue, response.Luggage, "Baggage must match")

		var parent, child *mocktracer.MockSpan
		for _, s := range spans {
			if s.Tag("span.kind") == ext.SpanKindRPCClientEnum {
				parent = s
				ch.Logger().Debugf("Found parent span: %+v", s)
			} else if s.Tag("span.kind") == ext.SpanKindRPCServerEnum {
				child = s
				ch.Logger().Debugf("Found child span: %+v", s)
			}
		}

		require.NotNil(t, parent)
		require.NotNil(t, child)

		traceID := func(s opentracing.Span) int {
			return s.Context().(mocktracer.MockSpanContext).TraceID
		}
		spanID := func(s *mocktracer.MockSpan) int {
			return s.Context().(mocktracer.MockSpanContext).SpanID
		}
		sampled := func(s *mocktracer.MockSpan) bool {
			return s.Context().(mocktracer.MockSpanContext).Sampled
		}

		require.Equal(t, traceID(span), traceID(parent), "parent must be found")
		require.Equal(t, traceID(span), traceID(child), "child must be found")
		assert.Equal(t, traceID(parent), traceID(child))
		assert.Equal(t, spanID(parent), child.ParentID)
		assert.True(t, sampled(parent), "should be sampled")
		assert.True(t, sampled(child), "should be sampled")
		assert.Equal(t, "call", parent.OperationName)
		assert.Equal(t, "call", child.OperationName)
		assert.Equal(t, "testService", parent.Tag("peer.service"))
		assert.Equal(t, "testService", child.Tag("peer.service"))
		assert.Equal(t, "json", parent.Tag("as"))
		assert.Equal(t, "json", child.Tag("as"))
		assert.NotNil(t, parent.Tag("peer.ipv4"))
		assert.NotNil(t, child.Tag("peer.ipv4"))
		assert.NotNil(t, parent.Tag("peer.port"))
		assert.NotNil(t, child.Tag("peer.port"))
	})
}