Пример #1
0
func TestRelayConnection(t *testing.T) {
	var errTest = errors.New("test")
	var wantHostPort string
	getHost := func(call relay.CallFrame, conn relay.Conn) (relay.Peer, error) {
		matches := conn.RemoteProcessPrefixMatches()
		assert.Equal(t, []bool{true, true, true, false}, matches, "Unexpected prefix matches.")
		assert.Equal(t, wantHostPort, conn.RemoteHostPort(), "Unexpected RemoteHostPort")
		return relay.Peer{}, errTest
	}

	// Note: we cannot use WithTestServer since we override the RelayHosts.
	opts := testutils.NewOpts().
		SetServiceName("relay").
		SetRelayHosts(hostsFunc(getHost)).
		SetProcessPrefixes("nod", "nodejs-hyperbahn", "", "hyperbahn")
	relay := testutils.NewServer(t, opts)
	defer relay.Close()

	// Create a client that is listening so we can set the expected host:port.
	clientOpts := testutils.NewOpts().SetProcessName("nodejs-hyperbahn")
	client := testutils.NewServer(t, clientOpts)
	wantHostPort = client.PeerInfo().HostPort
	defer client.Close()

	err := testutils.CallEcho(client, relay.PeerInfo().HostPort, relay.ServiceName(), nil)
	require.Error(t, err, "Expected CallEcho to fail")
	assert.Contains(t, err.Error(), errTest.Error(), "Unexpected error")
}
Пример #2
0
func TestStatsCalls(t *testing.T) {
	defer testutils.SetTimeout(t, time.Second)()

	initialTime := time.Date(2015, 2, 1, 10, 10, 0, 0, time.UTC)
	clientNow, clientNowFn := testutils.NowStub(initialTime)
	serverNow, serverNowFn := testutils.NowStub(initialTime)
	clientNowFn(100 * time.Millisecond)
	serverNowFn(50 * time.Millisecond)

	clientStats := newRecordingStatsReporter()
	serverStats := newRecordingStatsReporter()
	serverOpts := testutils.NewOpts().
		SetStatsReporter(serverStats).
		SetTimeNow(serverNow)
	WithVerifiedServer(t, serverOpts, func(serverCh *Channel, hostPort string) {
		handler := raw.Wrap(newTestHandler(t))
		serverCh.Register(handler, "echo")
		serverCh.Register(handler, "app-error")

		ch := testutils.NewClient(t, testutils.NewOpts().
			SetStatsReporter(clientStats).
			SetTimeNow(clientNow))
		defer ch.Close()

		ctx, cancel := NewContext(time.Second * 5)
		defer cancel()

		_, _, _, err := raw.Call(ctx, ch, hostPort, testServiceName, "echo", []byte("Headers"), []byte("Body"))
		require.NoError(t, err)

		outboundTags := tagsForOutboundCall(serverCh, ch, "echo")
		clientStats.Expected.IncCounter("outbound.calls.send", outboundTags, 1)
		clientStats.Expected.IncCounter("outbound.calls.success", outboundTags, 1)
		clientStats.Expected.RecordTimer("outbound.calls.per-attempt.latency", outboundTags, 100*time.Millisecond)
		clientStats.Expected.RecordTimer("outbound.calls.latency", outboundTags, 100*time.Millisecond)
		inboundTags := tagsForInboundCall(serverCh, ch, "echo")
		serverStats.Expected.IncCounter("inbound.calls.recvd", inboundTags, 1)
		serverStats.Expected.IncCounter("inbound.calls.success", inboundTags, 1)
		serverStats.Expected.RecordTimer("inbound.calls.latency", inboundTags, 50*time.Millisecond)

		_, _, resp, err := raw.Call(ctx, ch, hostPort, testServiceName, "app-error", nil, nil)
		require.NoError(t, err)
		require.True(t, resp.ApplicationError(), "expected application error")

		outboundTags = tagsForOutboundCall(serverCh, ch, "app-error")
		clientStats.Expected.IncCounter("outbound.calls.send", outboundTags, 1)
		clientStats.Expected.IncCounter("outbound.calls.per-attempt.app-errors", outboundTags, 1)
		clientStats.Expected.IncCounter("outbound.calls.app-errors", outboundTags, 1)
		clientStats.Expected.RecordTimer("outbound.calls.per-attempt.latency", outboundTags, 100*time.Millisecond)
		clientStats.Expected.RecordTimer("outbound.calls.latency", outboundTags, 100*time.Millisecond)
		inboundTags = tagsForInboundCall(serverCh, ch, "app-error")
		serverStats.Expected.IncCounter("inbound.calls.recvd", inboundTags, 1)
		serverStats.Expected.IncCounter("inbound.calls.app-errors", inboundTags, 1)
		serverStats.Expected.RecordTimer("inbound.calls.latency", inboundTags, 50*time.Millisecond)
	})

	clientStats.Validate(t)
	serverStats.Validate(t)
}
Пример #3
0
func BenchmarkBothSerial(b *testing.B) {
	serverAddr, err := setupBenchServer()
	require.NoError(b, err, "setupBenchServer failed")

	opts := testutils.NewOpts().SetFramePool(tchannel.NewSyncFramePool())
	clientCh := testutils.NewClient(b, opts)
	for _, addr := range serverAddr {
		clientCh.Peers().Add(addr)
	}

	thriftClient := thrift.NewClient(clientCh, "bench-server", nil)
	client := gen.NewTChanSecondServiceClient(thriftClient)
	ctx, cancel := thrift.NewContext(10 * time.Millisecond)
	client.Echo(ctx, "make connection")
	cancel()

	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		ctx, cancel := thrift.NewContext(10 * time.Millisecond)
		defer cancel()

		_, err := client.Echo(ctx, "hello world")
		if err != nil {
			b.Errorf("Echo failed: %v", err)
		}
	}
}
Пример #4
0
func TestRequestSubChannel(t *testing.T) {
	ctx, cancel := NewContext(time.Second)
	defer cancel()

	tchan := testutils.NewServer(t, testutils.NewOpts().SetServiceName("svc1"))
	defer tchan.Close()

	clientCh := testutils.NewClient(t, nil)
	defer clientCh.Close()
	clientCh.Peers().Add(tchan.PeerInfo().HostPort)

	tests := []tchannel.Registrar{tchan, tchan.GetSubChannel("svc2"), tchan.GetSubChannel("svc3")}
	for _, ch := range tests {
		mockHandler := new(mocks.TChanSecondService)
		server := NewServer(ch)
		server.Register(gen.NewTChanSecondServiceServer(mockHandler))

		client := NewClient(clientCh, ch.ServiceName(), nil)
		secondClient := gen.NewTChanSecondServiceClient(client)

		echoArg := ch.ServiceName()
		echoRes := echoArg + "-echo"
		mockHandler.On("Echo", ctxArg(), echoArg).Return(echoRes, nil)
		res, err := secondClient.Echo(ctx, echoArg)
		assert.NoError(t, err, "Echo failed")
		assert.Equal(t, echoRes, res)
	}
}
Пример #5
0
func TestIntrospectNumConnections(t *testing.T) {
	// Disable the relay, since the relay does not maintain a 1:1 mapping betewen
	// incoming connections vs outgoing connections.
	opts := testutils.NewOpts().NoRelay()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		ctx, cancel := NewContext(time.Second)
		defer cancel()

		assert.Equal(t, 0, ts.Server().IntrospectNumConnections(), "Expected no connection on new server")

		for i := 0; i < 10; i++ {
			client := ts.NewClient(nil)
			defer client.Close()

			require.NoError(t, client.Ping(ctx, ts.HostPort()), "Ping from new client failed")
			assert.Equal(t, 1, client.IntrospectNumConnections(), "Client should have single connection")
			assert.Equal(t, i+1, ts.Server().IntrospectNumConnections(), "Incorrect number of server connections")
		}

		// Make sure that a closed connection will reduce NumConnections.
		client := ts.NewClient(nil)
		require.NoError(t, client.Ping(ctx, ts.HostPort()), "Ping from new client failed")
		assert.Equal(t, 11, ts.Server().IntrospectNumConnections(), "Number of connections expected to increase")

		client.Close()
		require.True(t, testutils.WaitFor(100*time.Millisecond, func() bool {
			return ts.Server().IntrospectNumConnections() == 10
		}), "Closed connection did not get removed, num connections is %v", ts.Server().IntrospectNumConnections())
	})
}
Пример #6
0
// TestRelayStress makes many concurrent calls and ensures that
// we don't try to reuse any frames once they've been released.
func TestRelayConcurrentCalls(t *testing.T) {
	pool := NewProtectMemFramePool()
	opts := testutils.NewOpts().SetRelayOnly().SetFramePool(pool)
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		server := benchmark.NewServer(
			benchmark.WithNoLibrary(),
			benchmark.WithServiceName("s1"),
		)
		defer server.Close()
		ts.RelayHosts().Add("s1", server.HostPort())

		client := benchmark.NewClient([]string{ts.HostPort()},
			benchmark.WithNoDurations(),
			benchmark.WithNoLibrary(),
			benchmark.WithNumClients(20),
			benchmark.WithServiceName("s1"),
			benchmark.WithTimeout(time.Minute),
		)
		defer client.Close()
		require.NoError(t, client.Warmup(), "Client warmup failed")

		_, err := client.RawCall(1000)
		assert.NoError(t, err, "RawCalls failed")
	})
}
Пример #7
0
func TestDirtyFrameRequests(t *testing.T) {
	argSizes := []int{25000, 50000, 75000}

	// Create the largest required random cache.
	testutils.RandBytes(argSizes[len(argSizes)-1])

	opts := testutils.NewOpts().
		SetServiceName("swap-server").
		SetFramePool(dirtyFramePool{})
	WithVerifiedServer(t, opts, func(serverCh *Channel, hostPort string) {
		peerInfo := serverCh.PeerInfo()
		serverCh.Register(raw.Wrap(&swapper{t}), "swap")

		for _, argSize := range argSizes {
			ctx, cancel := NewContext(time.Second)
			defer cancel()

			arg2, arg3 := testutils.RandBytes(argSize), testutils.RandBytes(argSize)
			res2, res3, _, err := raw.Call(ctx, serverCh, hostPort, peerInfo.ServiceName, "swap", arg2, arg3)
			if assert.NoError(t, err, "Call failed") {
				assert.Equal(t, arg2, res3, "Result arg3 wrong")
				assert.Equal(t, arg3, res2, "Result arg3 wrong")
			}
		}
	})
}
Пример #8
0
func TestRelayHandleLargeLocalCall(t *testing.T) {
	opts := testutils.NewOpts().SetRelayOnly().
		SetRelayLocal("relay").
		AddLogFilter("Received fragmented callReq", 1).
		// Expect 4 callReqContinues for 256 kb payload that we cannot relay.
		AddLogFilter("Failed to relay frame.", 4)
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		client := ts.NewClient(nil)
		testutils.RegisterEcho(ts.Relay(), nil)

		// This large call should fail with a bad request.
		err := testutils.CallEcho(client, ts.HostPort(), "relay", &raw.Args{
			Arg2: testutils.RandBytes(128 * 1024),
			Arg3: testutils.RandBytes(128 * 1024),
		})
		if assert.Equal(t, ErrCodeBadRequest, GetSystemErrorCode(err), "Expected BadRequest for large call to relay") {
			assert.Contains(t, err.Error(), "cannot receive fragmented calls")
		}

		// We may get an error before the call is finished flushing.
		// Do a ping to ensure everything has been flushed.
		ctx, cancel := NewContext(time.Second)
		defer cancel()
		require.NoError(t, client.Ping(ctx, ts.HostPort()), "Ping failed")
	})
}
Пример #9
0
func runRetryTest(t *testing.T, f func(r *retryTest)) {
	r := &retryTest{}
	defer testutils.SetTimeout(t, time.Second)()
	r.setup()
	defer testutils.ResetSleepStub(&timeSleep)

	withSetup(t, func(hypCh *tchannel.Channel, hostPort string) {
		json.Register(hypCh, json.Handlers{"ad": r.adHandler}, nil)

		// Advertise failures cause warning log messages.
		opts := testutils.NewOpts().
			SetServiceName("my-client").
			AddLogFilter("Hyperbahn client registration failed", 10)
		serverCh := testutils.NewServer(t, opts)
		defer serverCh.Close()

		var err error
		r.ch = serverCh
		r.client, err = NewClient(serverCh, configFor(hostPort), &ClientOptions{
			Handler:      r,
			FailStrategy: FailStrategyIgnore,
		})
		require.NoError(t, err, "NewClient")
		defer r.client.Close()
		f(r)
		r.mock.AssertExpectations(t)
	})
}
Пример #10
0
func TestInboundEphemeralPeerRemoved(t *testing.T) {
	ctx, cancel := NewContext(time.Second)
	defer cancel()

	// No relay, since we look for the exact host:port in peer lists.
	opts := testutils.NewOpts().NoRelay()
	WithVerifiedServer(t, opts, func(ch *Channel, hostPort string) {
		client := testutils.NewClient(t, nil)
		assert.NoError(t, client.Ping(ctx, hostPort), "Ping to server failed")

		// Server should have a host:port in the root peers for the client.
		var clientHP string
		peers := ch.RootPeers().Copy()
		for k := range peers {
			clientHP = k
		}

		waitTillInboundEmpty(t, ch, clientHP, func() {
			client.Close()
		})
		assert.Equal(t, ChannelClosed, client.State(), "Client should be closed")

		_, ok := ch.RootPeers().Get(clientHP)
		assert.False(t, ok, "server's root peers should remove peer for client on close")
	})
}
Пример #11
0
func TestReadTimeout(t *testing.T) {
	// The error frame may fail to send since the connection closes before the handler sends it
	// or the handler connection may be closed as it sends when the other side closes the conn.
	opts := testutils.NewOpts().
		AddLogFilter("Couldn't send outbound error frame", 1).
		AddLogFilter("Connection error", 1, "site", "read frames").
		AddLogFilter("Connection error", 1, "site", "write frames").
		AddLogFilter("simpleHandler OnError", 1,
			"error", "failed to send error frame, connection state connectionClosed")

	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		sn := ts.ServiceName()
		calls := relaytest.NewMockStats()

		for i := 0; i < 10; i++ {
			ctx, cancel := NewContext(time.Second)
			handler := func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
				defer cancel()
				return nil, ErrTimeout
			}
			ts.RegisterFunc("call", handler)

			_, _, _, err := raw.Call(ctx, ts.Server(), ts.HostPort(), ts.ServiceName(), "call", nil, nil)
			assert.Equal(t, err, context.Canceled, "Call should fail due to cancel")
			calls.Add(sn, sn, "call").Failed("timeout").End()
		}

		ts.AssertRelayStats(calls)
	})
}
Пример #12
0
func TestFragmentationSlowReader(t *testing.T) {
	startReading, handlerComplete := make(chan struct{}), make(chan struct{})
	handler := func(ctx context.Context, call *InboundCall) {
		<-ctx.Done()
		<-startReading
		_, err := raw.ReadArgs(call)
		assert.Error(t, err, "ReadArgs should fail since frames will be dropped due to slow reading")
		close(handlerComplete)
	}

	// Inbound forward will timeout and cause a warning log.
	opts := testutils.NewOpts().AddLogFilter("Unable to forward frame", 1)
	WithVerifiedServer(t, opts, func(ch *Channel, hostPort string) {
		ch.Register(HandlerFunc(handler), "echo")

		arg2 := testutils.RandBytes(MaxFramePayloadSize * MexChannelBufferSize)
		arg3 := testutils.RandBytes(MaxFramePayloadSize * (MexChannelBufferSize + 1))

		ctx, cancel := NewContext(testutils.Timeout(15 * time.Millisecond))
		defer cancel()

		_, _, _, err := raw.Call(ctx, ch, hostPort, testServiceName, "echo", arg2, arg3)
		assert.Error(t, err, "Call should timeout due to slow reader")

		close(startReading)
		<-handlerComplete
	})
	goroutines.VerifyNoLeaks(t, nil)
}
Пример #13
0
func setupBenchServer() ([]string, error) {
	ch, err := testutils.NewServerChannel(testutils.NewOpts().
		SetServiceName(benchServerName).
		SetFramePool(tchannel.NewSyncFramePool()))
	if err != nil {
		return nil, err
	}
	fmt.Println(benchServerName, "started on", ch.PeerInfo().HostPort)

	server := thrift.NewServer(ch)
	server.Register(gen.NewTChanSecondServiceServer(benchSecondHandler{}))

	if !*useHyperbahn {
		return []string{ch.PeerInfo().HostPort}, nil
	}

	// Set up a Hyperbahn client and advertise it.
	nodes := strings.Split(*hyperbahnNodes, ",")
	config := hyperbahn.Configuration{InitialNodes: nodes}
	hc, err := hyperbahn.NewClient(ch, config, nil)
	if err := hc.Advertise(); err != nil {
		return nil, err
	}

	return nodes, nil
}
Пример #14
0
func TestCloseAfterTimeout(t *testing.T) {
	// Disable log verfication since connections are closed after a timeout
	// and the relay might still be reading/writing to the connection.
	// TODO: Ideally, we only disable log verification on the relay.
	opts := testutils.NewOpts().DisableLogVerification()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		testHandler := onErrorTestHandler{newTestHandler(t), func(_ context.Context, err error) {}}
		ts.Register(raw.Wrap(testHandler), "block")

		ctx, cancel := NewContext(100 * time.Millisecond)
		defer cancel()

		// Make a call, wait for it to timeout.
		clientCh := ts.NewClient(nil)
		_, _, _, err := raw.Call(ctx, clientCh, ts.HostPort(), ts.ServiceName(), "block", nil, nil)
		require.Equal(t, ErrTimeout, err, "Expected call to timeout")

		// The client channel should also close immediately.
		clientCh.Close()
		assertStateChangesTo(t, clientCh, ChannelClosed)
		assert.True(t, clientCh.Closed(), "Channel should be closed")

		// Unblock the testHandler so that a goroutine isn't leaked.
		<-testHandler.blockErr
	})
}
Пример #15
0
func TestRaceExchangesWithClose(t *testing.T) {
	var wg sync.WaitGroup

	ctx, cancel := NewContext(testutils.Timeout(70 * time.Millisecond))
	defer cancel()

	opts := testutils.NewOpts().DisableLogVerification()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		server := ts.Server()

		gotCall := make(chan struct{})
		completeCall := make(chan struct{})
		testutils.RegisterFunc(server, "dummy", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
			return &raw.Res{}, nil
		})

		testutils.RegisterEcho(server, func() {
			close(gotCall)
			<-completeCall
		})

		client := ts.NewClient(opts)
		defer client.Close()

		callDone := make(chan struct{})
		go func() {
			assert.NoError(t, testutils.CallEcho(client, ts.HostPort(), server.ServiceName(), &raw.Args{}), "Echo failed")
			close(callDone)
		}()

		// Wait until the server recieves a call, so it has an active inbound.
		<-gotCall

		// Start a bunch of clients to trigger races between connecting and close.
		for i := 0; i < 100; i++ {
			wg.Add(1)
			go func() {
				defer wg.Done()

				// We don't use ts.NewClient here to avoid data races.
				c := testutils.NewClient(t, opts)
				defer c.Close()

				c.Ping(ctx, ts.HostPort())
				raw.Call(ctx, c, ts.HostPort(), server.ServiceName(), "dummy", nil, nil)
			}()
		}

		// Now try to close the channel, it should block since there's active exchanges.
		server.Close()
		assert.Equal(t, ChannelStartClose, ts.Server().State(), "Server should be in StartClose")

		close(completeCall)
		<-callDone
	})

	// Wait for all calls to complete
	wg.Wait()
}
Пример #16
0
func TestRelayMakeOutgoingCall(t *testing.T) {
	opts := testutils.NewOpts().SetRelayOnly()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		svr1 := ts.Relay()
		svr2 := ts.NewServer(testutils.NewOpts().SetServiceName("svc2"))
		testutils.RegisterEcho(svr2, nil)

		sizes := []int{128, 1024, 128 * 1024}
		for _, size := range sizes {
			err := testutils.CallEcho(svr1, ts.HostPort(), "svc2", &raw.Args{
				Arg2: testutils.RandBytes(size),
				Arg3: testutils.RandBytes(size),
			})
			assert.NoError(t, err, "Echo with size %v failed", size)
		}
	})
}
Пример #17
0
func TestPeersIncomingConnection(t *testing.T) {
	newService := func(svcName string) (*Channel, string) {
		ch, _, hostPort := NewServer(t, &testutils.ChannelOpts{ServiceName: svcName})
		return ch, hostPort
	}

	opts := testutils.NewOpts().NoRelay()
	WithVerifiedServer(t, opts, func(ch *Channel, hostPort string) {
		doPing := func(ch *Channel) {
			ctx, cancel := NewContext(time.Second)
			defer cancel()
			assert.NoError(t, ch.Ping(ctx, hostPort), "Ping failed")
		}

		hyperbahnSC := ch.GetSubChannel("hyperbahn")
		ringpopSC := ch.GetSubChannel("ringpop", Isolated)

		hyperbahn, hyperbahnHostPort := newService("hyperbahn")
		defer hyperbahn.Close()
		ringpop, ringpopHostPort := newService("ringpop")
		defer ringpop.Close()

		doPing(hyperbahn)
		doPing(ringpop)

		// The root peer list should contain all incoming connections.
		rootPeers := ch.RootPeers().Copy()
		assert.NotNil(t, rootPeers[hyperbahnHostPort], "missing hyperbahn peer")
		assert.NotNil(t, rootPeers[ringpopHostPort], "missing ringpop peer")

		for _, sc := range []Registrar{ch, hyperbahnSC, ringpopSC} {
			_, err := sc.Peers().Get(nil)
			assert.Equal(t, ErrNoPeers, err,
				"incoming connections should not be added to non-root peer list")
		}

		// verify number of peers/connections on the client side
		serverState := ch.IntrospectState(nil).RootPeers
		serverHostPort := ch.PeerInfo().HostPort

		assert.Equal(t, len(serverState), 2, "Incorrect peer count")
		for _, client := range []*Channel{ringpop, hyperbahn} {
			clientPeerState := client.IntrospectState(nil).RootPeers
			clientHostPort := client.PeerInfo().HostPort
			assert.Equal(t, len(clientPeerState), 1, "Incorrect peer count")
			assert.Equal(t, len(clientPeerState[serverHostPort].OutboundConnections), 1, "Incorrect outbound connection count")
			assert.Equal(t, len(clientPeerState[serverHostPort].InboundConnections), 0, "Incorrect inbound connection count")

			assert.Equal(t, len(serverState[clientHostPort].InboundConnections), 1, "Incorrect inbound connection count")
			assert.Equal(t, len(serverState[clientHostPort].OutboundConnections), 0, "Incorrect outbound connection count")
		}

		// In future when connections send a service name, we should be able to
		// check that a new connection containing a service name for an isolated
		// subchannel is only added to the isolated subchannels' peers, but all
		// other incoming connections are added to the shared peer list.
	})
}
Пример #18
0
func TestTraceSamplingRate(t *testing.T) {
	rand.Seed(10)

	tests := []struct {
		sampleRate  float64 // if this is < 0, then the value is not set.
		count       int
		expectedMin int
		expectedMax int
	}{
		{1.0, 100, 100, 100},
		{0.5, 100, 40, 60},
		{0.1, 100, 5, 15},
		{0, 100, 0, 0},
		{-1, 100, 100, 100}, // default of 1.0 should be used.
	}

	for _, tt := range tests {
		var reportedTraces int
		testTraceReporter := TraceReporterFunc(func(_ TraceData) {
			reportedTraces++
		})

		WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) {
			var tracedCalls int
			testutils.RegisterFunc(ch, "t", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
				if CurrentSpan(ctx).TracingEnabled() {
					tracedCalls++
				}

				return &raw.Res{}, nil
			})

			opts := testutils.NewOpts().SetTraceReporter(testTraceReporter)
			if tt.sampleRate >= 0 {
				opts.SetTraceSampleRate(tt.sampleRate)
			}

			client := testutils.NewClient(t, opts)
			defer client.Close()

			for i := 0; i < tt.count; i++ {
				ctx, cancel := NewContext(time.Second)
				defer cancel()

				_, _, _, err := raw.Call(ctx, client, hostPort, ch.PeerInfo().ServiceName, "t", nil, nil)
				require.NoError(t, err, "raw.Call failed")
			}

			assert.Equal(t, reportedTraces, tracedCalls,
				"Number of traces report doesn't match calls with tracing enabled")
			assert.True(t, tracedCalls >= tt.expectedMin,
				"Number of trace enabled calls (%v) expected to be greater than %v", tracedCalls, tt.expectedMin)
			assert.True(t, tracedCalls <= tt.expectedMax,
				"Number of trace enabled calls (%v) expected to be less than %v", tracedCalls, tt.expectedMax)
		})
	}
}
Пример #19
0
func TestRelayUsesRootPeers(t *testing.T) {
	opts := testutils.NewOpts().SetRelayOnly()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		testutils.RegisterEcho(ts.Server(), nil)
		client := testutils.NewClient(t, nil)
		err := testutils.CallEcho(client, ts.HostPort(), ts.ServiceName(), nil)
		assert.NoError(t, err, "Echo failed")
		assert.Len(t, ts.Relay().Peers().Copy(), 0, "Peers should not be modified by relay")
	})
}
Пример #20
0
func (tt tchannelTransport) WithRegistry(r transport.Registry, f func(transport.UnaryOutbound)) {
	serverOpts := testutils.NewOpts().SetServiceName(testService)
	clientOpts := testutils.NewOpts().SetServiceName(testCaller)
	testutils.WithServer(tt.t, serverOpts, func(ch *tchannel.Channel, hostPort string) {
		i := tch.NewInbound(ch)
		require.NoError(tt.t, i.Start(transport.ServiceDetail{Name: testService, Registry: r}, transport.NoDeps), "failed to start")

		defer i.Stop()
		// ^ the server is already listening so this will just set up the
		// handler.

		client := testutils.NewClient(tt.t, clientOpts)
		o := tch.NewOutbound(client, tch.HostPort(hostPort))
		require.NoError(tt.t, o.Start(transport.NoDeps), "failed to start outbound")
		defer o.Stop()

		f(o)
	})
}
Пример #21
0
func TestReuseConnection(t *testing.T) {
	ctx, cancel := NewContext(time.Second)
	defer cancel()

	// Since we're specifically testing that connections between hosts are re-used,
	// we can't interpose a relay in this test.
	s1Opts := testutils.NewOpts().SetServiceName("s1").NoRelay()

	testutils.WithTestServer(t, s1Opts, func(ts *testutils.TestServer) {
		ch2 := ts.NewServer(&testutils.ChannelOpts{ServiceName: "s2"})
		hostPort2 := ch2.PeerInfo().HostPort
		defer ch2.Close()

		ts.Register(raw.Wrap(newTestHandler(t)), "echo")
		ch2.Register(raw.Wrap(newTestHandler(t)), "echo")

		outbound, err := ts.Server().BeginCall(ctx, hostPort2, "s2", "echo", nil)
		require.NoError(t, err)
		outboundConn, outboundNetConn := OutboundConnection(outbound)

		// Try to make another call at the same time, should reuse the same connection.
		outbound2, err := ts.Server().BeginCall(ctx, hostPort2, "s2", "echo", nil)
		require.NoError(t, err)
		outbound2Conn, _ := OutboundConnection(outbound)
		assert.Equal(t, outboundConn, outbound2Conn)

		// Wait for the connection to be marked as active in ch2.
		assert.True(t, testutils.WaitFor(time.Second, func() bool {
			return ch2.IntrospectState(nil).NumConnections > 0
		}), "ch2 does not have any active connections")

		// When ch2 tries to call the test server, it should reuse the existing
		// inbound connection the test server. Of course, this only works if the
		// test server -> ch2 call wasn't relayed.
		outbound3, err := ch2.BeginCall(ctx, ts.HostPort(), "s1", "echo", nil)
		require.NoError(t, err)
		_, outbound3NetConn := OutboundConnection(outbound3)
		assert.Equal(t, outboundNetConn.RemoteAddr(), outbound3NetConn.LocalAddr())
		assert.Equal(t, outboundNetConn.LocalAddr(), outbound3NetConn.RemoteAddr())

		// Ensure all calls can complete in parallel.
		var wg sync.WaitGroup
		for _, call := range []*OutboundCall{outbound, outbound2, outbound3} {
			wg.Add(1)
			go func(call *OutboundCall) {
				defer wg.Done()
				resp1, resp2, _, err := raw.WriteArgs(call, []byte("arg2"), []byte("arg3"))
				require.NoError(t, err)
				assert.Equal(t, resp1, []byte("arg2"), "result does match argument")
				assert.Equal(t, resp2, []byte("arg3"), "result does match argument")
			}(call)
		}
		wg.Wait()
	})
}
Пример #22
0
func TestBadRequest(t *testing.T) {
	// ch will log an error when it receives a request for an unknown handler.
	opts := testutils.NewOpts().AddLogFilter("Couldn't find handler.", 1)
	WithVerifiedServer(t, opts, func(ch *Channel, hostPort string) {
		ctx, cancel := NewContext(time.Second)
		defer cancel()

		_, _, _, err := raw.Call(ctx, ch, hostPort, "Nowhere", "Noone", []byte("Headers"), []byte("Body"))
		require.NotNil(t, err)
		assert.Equal(t, ErrCodeBadRequest, GetSystemErrorCode(err))
	})
}
Пример #23
0
func TestRemotePeer(t *testing.T) {
	tests := []struct {
		name       string
		remote     func(*testutils.TestServer) *Channel
		expectedFn func(*RuntimeState, *testutils.TestServer) PeerInfo
	}{
		{
			name:   "ephemeral client",
			remote: func(ts *testutils.TestServer) *Channel { return ts.NewClient(nil) },
			expectedFn: func(state *RuntimeState, ts *testutils.TestServer) PeerInfo {
				return PeerInfo{
					HostPort:    state.RootPeers[ts.HostPort()].OutboundConnections[0].LocalHostPort,
					IsEphemeral: true,
					ProcessName: state.LocalPeer.ProcessName,
				}
			},
		},
		{
			name:   "listening server",
			remote: func(ts *testutils.TestServer) *Channel { return ts.NewServer(nil) },
			expectedFn: func(state *RuntimeState, ts *testutils.TestServer) PeerInfo {
				return PeerInfo{
					HostPort:    state.LocalPeer.HostPort,
					IsEphemeral: false,
					ProcessName: state.LocalPeer.ProcessName,
				}
			},
		},
	}

	ctx, cancel := NewContext(time.Second)
	defer cancel()

	for _, tt := range tests {
		opts := testutils.NewOpts().SetServiceName("fake-service").NoRelay()
		testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
			remote := tt.remote(ts)
			defer remote.Close()

			gotPeer := make(chan PeerInfo, 1)
			ts.RegisterFunc("test", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
				gotPeer <- CurrentCall(ctx).RemotePeer()
				return &raw.Res{}, nil
			})

			_, _, _, err := raw.Call(ctx, remote, ts.HostPort(), ts.Server().ServiceName(), "test", nil, nil)
			assert.NoError(t, err, "%v: Call failed", tt.name)
			expected := tt.expectedFn(remote.IntrospectState(nil), ts)
			assert.Equal(t, expected, <-gotPeer, "%v: RemotePeer mismatch", tt.name)
		})
	}
}
Пример #24
0
func TestPeerRemovedFromRootPeers(t *testing.T) {
	tests := []struct {
		addHostPort    bool
		removeHostPort bool
		expectFound    bool
	}{
		{
			addHostPort: true,
			expectFound: true,
		},
		{
			addHostPort:    true,
			removeHostPort: true,
			expectFound:    false,
		},
		{
			addHostPort: false,
			expectFound: false,
		},
	}

	ctx, cancel := NewContext(time.Second)
	defer cancel()

	for _, tt := range tests {
		opts := testutils.NewOpts().NoRelay()
		WithVerifiedServer(t, opts, func(server *Channel, hostPort string) {
			ch := testutils.NewServer(t, nil)
			clientHP := ch.PeerInfo().HostPort

			if tt.addHostPort {
				server.Peers().Add(clientHP)
			}

			assert.NoError(t, ch.Ping(ctx, hostPort), "Ping failed")

			if tt.removeHostPort {
				require.NoError(t, server.Peers().Remove(clientHP), "Failed to remove peer")
			}

			waitTillInboundEmpty(t, server, clientHP, func() {
				ch.Close()
			})

			rootPeers := server.RootPeers()
			_, found := rootPeers.Get(clientHP)
			assert.Equal(t, tt.expectFound, found, "Peer found mismatch, addHostPort: %v", tt.addHostPort)
		})
	}
}
Пример #25
0
func TestActiveCallReq(t *testing.T) {
	t.Skip("Test skipped due to unreliable way to test for protocol errors")

	ctx, cancel := NewContext(time.Second)
	defer cancel()

	// Note: This test cannot use log verification as the duplicate ID causes a log.
	// It does not use a verified server, as it leaks a message exchange due to the
	// modification of IDs in the relay.
	opts := testutils.NewOpts().DisableLogVerification()
	testutils.WithServer(t, opts, func(ch *Channel, hostPort string) {
		gotCall := make(chan struct{})
		unblock := make(chan struct{})

		testutils.RegisterFunc(ch, "blocked", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
			gotCall <- struct{}{}
			<-unblock
			return &raw.Res{}, nil
		})

		relayFunc := func(outgoing bool, frame *Frame) *Frame {
			if outgoing && frame.Header.ID == 3 {
				frame.Header.ID = 2
			}
			return frame
		}

		relayHostPort, closeRelay := testutils.FrameRelay(t, hostPort, relayFunc)
		defer closeRelay()

		firstComplete := make(chan struct{})
		go func() {
			// This call will block until we close unblock.
			raw.Call(ctx, ch, relayHostPort, ch.PeerInfo().ServiceName, "blocked", nil, nil)
			close(firstComplete)
		}()

		// Wait for the first call to be received by the server
		<-gotCall

		// Make a new call, which should fail
		_, _, _, err := raw.Call(ctx, ch, relayHostPort, ch.PeerInfo().ServiceName, "blocked", nil, nil)
		assert.Error(t, err, "Expect error")
		assert.True(t, strings.Contains(err.Error(), "already active"),
			"expected already active error, got %v", err)

		close(unblock)
		<-firstComplete
	})
}
Пример #26
0
func TestRequestStateRetry(t *testing.T) {
	ctx, cancel := NewContext(time.Second)
	defer cancel()

	testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
		ts.Register(raw.Wrap(newTestHandler(t)), "echo")

		closedHostPorts := make([]string, 4)
		for i := range closedHostPorts {
			hostPort, close := testutils.GetAcceptCloseHostPort(t)
			defer close()
			closedHostPorts[i] = hostPort
		}

		// Since we close connections remotely, there will be some warnings that we can ignore.
		opts := testutils.NewOpts().DisableLogVerification()
		client := ts.NewClient(opts)
		defer client.Close()
		counter := 0

		sc := client.GetSubChannel(ts.Server().ServiceName())
		err := client.RunWithRetry(ctx, func(ctx context.Context, rs *RequestState) error {
			defer func() { counter++ }()

			expectedPeers := counter
			if expectedPeers > 0 {
				// An entry is also added for each host.
				expectedPeers++
			}

			assert.Equal(t, expectedPeers, len(rs.SelectedPeers), "SelectedPeers should not be reused")

			if counter < 4 {
				client.Peers().Add(closedHostPorts[counter])
			} else {
				client.Peers().Add(ts.HostPort())
			}

			_, err := raw.CallV2(ctx, sc, raw.CArgs{
				Method:      "echo",
				CallOptions: &CallOptions{RequestState: rs},
			})
			return err
		})
		assert.NoError(t, err, "RunWithRetry should succeed")
		assert.Equal(t, 5, counter, "RunWithRetry should retry 5 times")
	})
}
Пример #27
0
func withNewServerClient(svcName string, f func(server, client *tchannel.Channel)) {
	opts := testutils.NewOpts().SetServiceName(svcName)
	server, err := testutils.NewServerChannel(opts)
	if err != nil {
		panic(err)
	}
	defer server.Close()

	client, err := testutils.NewClientChannel(opts)
	if err != nil {
		panic(err)
	}
	defer client.Close()

	f(server, client)
}
Пример #28
0
func TestBadRequest(t *testing.T) {
	// ch will log an error when it receives a request for an unknown handler.
	opts := testutils.NewOpts().AddLogFilter("Couldn't find handler.", 1)
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		ctx, cancel := NewContext(time.Second)
		defer cancel()

		_, _, _, err := raw.Call(ctx, ts.Server(), ts.HostPort(), ts.ServiceName(), "Noone", []byte("Headers"), []byte("Body"))
		require.NotNil(t, err)
		assert.Equal(t, ErrCodeBadRequest, GetSystemErrorCode(err))

		calls := relaytest.NewMockStats()
		calls.Add(ts.ServiceName(), ts.ServiceName(), "Noone").Failed("bad-request").End()
		ts.AssertRelayStats(calls)
	})
}
Пример #29
0
func setupTChan(t *testing.T, mux *http.ServeMux) (string, func()) {
	ch := testutils.NewServer(t, testutils.NewOpts().SetServiceName("test"))
	handler := func(ctx context.Context, call *tchannel.InboundCall) {
		req, err := ReadRequest(call)
		if !assert.NoError(t, err, "ReadRequest failed") {
			return
		}

		// Make the HTTP call using the default mux.
		writer, finish := ResponseWriter(call.Response())
		mux.ServeHTTP(writer, req)
		finish()
	}
	ch.Register(tchannel.HandlerFunc(handler), "http")
	return ch.PeerInfo().HostPort, func() { ch.Close() }
}
Пример #30
0
func SetupServer(t *testing.T, fn thrift.HealthFunc) (*tchannel.Channel, string) {
	_, cancel := tchannel.NewContext(time.Second * 10)
	defer cancel()

	opts := testutils.NewOpts().
		SetServiceName("testing").
		DisableLogVerification()
	tchan := testutils.NewServer(t, opts)

	if fn != nil {
		server := thrift.NewServer(tchan)
		server.RegisterHealthHandler(fn)
	}

	return tchan, tchan.PeerInfo().HostPort
}