Beispiel #1
0
func TestReadTimeout(t *testing.T) {
	// The error frame may fail to send since the connection closes before the handler sends it
	// or the handler connection may be closed as it sends when the other side closes the conn.
	opts := testutils.NewOpts().
		AddLogFilter("Couldn't send outbound error frame", 1).
		AddLogFilter("Connection error", 1, "site", "read frames").
		AddLogFilter("Connection error", 1, "site", "write frames").
		AddLogFilter("simpleHandler OnError", 1,
			"error", "failed to send error frame, connection state connectionClosed")

	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		sn := ts.ServiceName()
		calls := relaytest.NewMockStats()

		for i := 0; i < 10; i++ {
			ctx, cancel := NewContext(time.Second)
			handler := func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
				defer cancel()
				return nil, ErrTimeout
			}
			ts.RegisterFunc("call", handler)

			_, _, _, err := raw.Call(ctx, ts.Server(), ts.HostPort(), ts.ServiceName(), "call", nil, nil)
			assert.Equal(t, err, context.Canceled, "Call should fail due to cancel")
			calls.Add(sn, sn, "call").Failed("timeout").End()
		}

		ts.AssertRelayStats(calls)
	})
}
Beispiel #2
0
func TestFragmentation(t *testing.T) {
	testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
		ts.Register(raw.Wrap(newTestHandler(t)), "echo")

		arg2 := make([]byte, MaxFramePayloadSize*2)
		for i := 0; i < len(arg2); i++ {
			arg2[i] = byte('a' + (i % 10))
		}

		arg3 := make([]byte, MaxFramePayloadSize*3)
		for i := 0; i < len(arg3); i++ {
			arg3[i] = byte('A' + (i % 10))
		}

		ctx, cancel := NewContext(time.Second)
		defer cancel()

		respArg2, respArg3, _, err := raw.Call(ctx, ts.Server(), ts.HostPort(), ts.ServiceName(), "echo", arg2, arg3)
		require.NoError(t, err)
		assert.Equal(t, arg2, respArg2)
		assert.Equal(t, arg3, respArg3)

		calls := relaytest.NewMockStats()
		calls.Add(ts.ServiceName(), ts.ServiceName(), "echo").Succeeded().End()
		ts.AssertRelayStats(calls)
	})
}
Beispiel #3
0
func TestTimeout(t *testing.T) {
	testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
		// onError may be called when the block call tries to write the call response.
		onError := func(ctx context.Context, err error) {
			assert.Equal(t, ErrTimeout, err, "onError err should be ErrTimeout")
			assert.Equal(t, context.DeadlineExceeded, ctx.Err(), "Context should timeout")
		}
		testHandler := onErrorTestHandler{newTestHandler(t), onError}
		ts.Register(raw.Wrap(testHandler), "block")

		ctx, cancel := NewContext(testutils.Timeout(15 * time.Millisecond))
		defer cancel()

		_, _, _, err := raw.Call(ctx, ts.Server(), ts.HostPort(), ts.ServiceName(), "block", []byte("Arg2"), []byte("Arg3"))
		assert.Equal(t, ErrTimeout, err)

		// Verify the server-side receives an error from the context.
		select {
		case err := <-testHandler.blockErr:
			assert.Equal(t, context.DeadlineExceeded, err, "Server should have received timeout")
		case <-time.After(time.Second):
			t.Errorf("Server did not receive call, may need higher timeout")
		}

		calls := relaytest.NewMockStats()
		calls.Add(ts.ServiceName(), ts.ServiceName(), "block").Failed("timeout").End()
		ts.AssertRelayStats(calls)
	})
}
Beispiel #4
0
func TestRoundTrip(t *testing.T) {
	testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
		handler := newTestHandler(t)
		ts.Register(raw.Wrap(handler), "echo")

		ctx, cancel := NewContext(time.Second)
		defer cancel()

		call, err := ts.Server().BeginCall(ctx, ts.HostPort(), ts.ServiceName(), "echo", &CallOptions{Format: JSON})
		require.NoError(t, err)
		assert.NotEmpty(t, call.RemotePeer().HostPort)

		require.NoError(t, NewArgWriter(call.Arg2Writer()).Write(testArg2))
		require.NoError(t, NewArgWriter(call.Arg3Writer()).Write(testArg3))

		var respArg2 []byte
		require.NoError(t, NewArgReader(call.Response().Arg2Reader()).Read(&respArg2))
		assert.Equal(t, testArg2, []byte(respArg2))

		var respArg3 []byte
		require.NoError(t, NewArgReader(call.Response().Arg3Reader()).Read(&respArg3))
		assert.Equal(t, testArg3, []byte(respArg3))

		assert.Equal(t, JSON, handler.format)
		assert.Equal(t, ts.ServiceName(), handler.caller)
		assert.Equal(t, JSON, call.Response().Format(), "response Format should match request Format")
	})
}
Beispiel #5
0
// Purpose of this test is to ensure introspection doesn't cause any panics
// and we have coverage of the introspection code.
func TestIntrospection(t *testing.T) {
	testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
		client := testutils.NewClient(t, nil)
		defer client.Close()

		ctx, cancel := json.NewContext(time.Second)
		defer cancel()

		var resp map[string]interface{}
		peer := client.Peers().GetOrAdd(ts.HostPort())
		err := json.CallPeer(ctx, peer, ts.ServiceName(), "_gometa_introspect", map[string]interface{}{
			"includeExchanges":  true,
			"includeEmptyPeers": true,
			"includeTombstones": true,
		}, &resp)
		require.NoError(t, err, "Call _gometa_introspect failed")

		err = json.CallPeer(ctx, peer, ts.ServiceName(), "_gometa_runtime", map[string]interface{}{
			"includeGoStacks": true,
		}, &resp)
		require.NoError(t, err, "Call _gometa_runtime failed")

		if !ts.HasRelay() {
			// Try making the call on the "tchannel" service which is where meta handlers
			// are registered. This will only work when we call it directly as the relay
			// will not forward the tchannel service.
			err = json.CallPeer(ctx, peer, "tchannel", "_gometa_runtime", map[string]interface{}{
				"includeGoStacks": true,
			}, &resp)
			require.NoError(t, err, "Call _gometa_runtime failed")
		}
	})
}
Beispiel #6
0
func TestRelayIDClash(t *testing.T) {
	opts := serviceNameOpts("s1").SetRelayOnly()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		s1 := ts.Server()
		s2 := ts.NewServer(serviceNameOpts("s2"))

		unblock := make(chan struct{})
		testutils.RegisterEcho(s1, func() {
			<-unblock
		})
		testutils.RegisterEcho(s2, nil)

		var wg sync.WaitGroup
		for i := 0; i < 10; i++ {
			wg.Add(1)
			go func() {
				defer wg.Done()
				testutils.AssertEcho(t, s2, ts.HostPort(), s1.ServiceName())
			}()
		}

		for i := 0; i < 5; i++ {
			testutils.AssertEcho(t, s1, ts.HostPort(), s2.ServiceName())
		}

		close(unblock)
		wg.Wait()
	})
}
Beispiel #7
0
func TestIntrospectNumConnections(t *testing.T) {
	// Disable the relay, since the relay does not maintain a 1:1 mapping betewen
	// incoming connections vs outgoing connections.
	opts := testutils.NewOpts().NoRelay()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		ctx, cancel := NewContext(time.Second)
		defer cancel()

		assert.Equal(t, 0, ts.Server().IntrospectNumConnections(), "Expected no connection on new server")

		for i := 0; i < 10; i++ {
			client := ts.NewClient(nil)
			defer client.Close()

			require.NoError(t, client.Ping(ctx, ts.HostPort()), "Ping from new client failed")
			assert.Equal(t, 1, client.IntrospectNumConnections(), "Client should have single connection")
			assert.Equal(t, i+1, ts.Server().IntrospectNumConnections(), "Incorrect number of server connections")
		}

		// Make sure that a closed connection will reduce NumConnections.
		client := ts.NewClient(nil)
		require.NoError(t, client.Ping(ctx, ts.HostPort()), "Ping from new client failed")
		assert.Equal(t, 11, ts.Server().IntrospectNumConnections(), "Number of connections expected to increase")

		client.Close()
		require.True(t, testutils.WaitFor(100*time.Millisecond, func() bool {
			return ts.Server().IntrospectNumConnections() == 10
		}), "Closed connection did not get removed, num connections is %v", ts.Server().IntrospectNumConnections())
	})
}
Beispiel #8
0
func TestTimeoutCallsThenClose(t *testing.T) {
	// Test needs at least 2 CPUs to trigger race conditions.
	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))

	opts := serviceNameOpts("s1").SetRelayOnly().DisableLogVerification()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		s1 := ts.Server()
		s2 := ts.NewServer(serviceNameOpts("s2").DisableLogVerification())

		unblockEcho := make(chan struct{})
		testutils.RegisterEcho(s1, func() {
			<-unblockEcho
		})

		ctx, cancel := NewContext(testutils.Timeout(30 * time.Millisecond))
		defer cancel()

		var callers sync.WaitGroup
		for i := 0; i < 100; i++ {
			callers.Add(1)
			go func() {
				defer callers.Done()
				raw.Call(ctx, s2, ts.HostPort(), "s1", "echo", nil, nil)
			}()
		}

		close(unblockEcho)

		// Wait for all the callers to end
		callers.Wait()
	})
}
Beispiel #9
0
// TestRelayStress makes many concurrent calls and ensures that
// we don't try to reuse any frames once they've been released.
func TestRelayConcurrentCalls(t *testing.T) {
	pool := NewProtectMemFramePool()
	opts := testutils.NewOpts().SetRelayOnly().SetFramePool(pool)
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		server := benchmark.NewServer(
			benchmark.WithNoLibrary(),
			benchmark.WithServiceName("s1"),
		)
		defer server.Close()
		ts.RelayHosts().Add("s1", server.HostPort())

		client := benchmark.NewClient([]string{ts.HostPort()},
			benchmark.WithNoDurations(),
			benchmark.WithNoLibrary(),
			benchmark.WithNumClients(20),
			benchmark.WithServiceName("s1"),
			benchmark.WithTimeout(time.Minute),
		)
		defer client.Close()
		require.NoError(t, client.Warmup(), "Client warmup failed")

		_, err := client.RawCall(1000)
		assert.NoError(t, err, "RawCalls failed")
	})
}
Beispiel #10
0
func TestRelayHandleLargeLocalCall(t *testing.T) {
	opts := testutils.NewOpts().SetRelayOnly().
		SetRelayLocal("relay").
		AddLogFilter("Received fragmented callReq", 1).
		// Expect 4 callReqContinues for 256 kb payload that we cannot relay.
		AddLogFilter("Failed to relay frame.", 4)
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		client := ts.NewClient(nil)
		testutils.RegisterEcho(ts.Relay(), nil)

		// This large call should fail with a bad request.
		err := testutils.CallEcho(client, ts.HostPort(), "relay", &raw.Args{
			Arg2: testutils.RandBytes(128 * 1024),
			Arg3: testutils.RandBytes(128 * 1024),
		})
		if assert.Equal(t, ErrCodeBadRequest, GetSystemErrorCode(err), "Expected BadRequest for large call to relay") {
			assert.Contains(t, err.Error(), "cannot receive fragmented calls")
		}

		// We may get an error before the call is finished flushing.
		// Do a ping to ensure everything has been flushed.
		ctx, cancel := NewContext(time.Second)
		defer cancel()
		require.NoError(t, client.Ping(ctx, ts.HostPort()), "Ping failed")
	})
}
Beispiel #11
0
func TestWriteErrorAfterTimeout(t *testing.T) {
	// TODO: Make this test block at different points (e.g. before, during read/write).
	testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
		timedOut := make(chan struct{})
		done := make(chan struct{})
		handler := func(ctx context.Context, call *InboundCall) {
			<-ctx.Done()
			<-timedOut
			_, err := raw.ReadArgs(call)
			assert.Equal(t, ErrTimeout, err, "Read args should fail with timeout")
			response := call.Response()
			assert.Equal(t, ErrTimeout, response.SendSystemError(ErrServerBusy), "SendSystemError should fail")
			close(done)
		}
		ts.Register(HandlerFunc(handler), "call")

		ctx, cancel := NewContext(testutils.Timeout(30 * time.Millisecond))
		defer cancel()
		_, _, _, err := raw.Call(ctx, ts.Server(), ts.HostPort(), ts.ServiceName(), "call", nil, testutils.RandBytes(100000))
		assert.Equal(t, err, ErrTimeout, "Call should timeout")
		close(timedOut)

		select {
		case <-done:
		case <-time.After(time.Second):
			t.Errorf("Handler not called, timeout may be too low")
		}

		calls := relaytest.NewMockStats()
		calls.Add(ts.ServiceName(), ts.ServiceName(), "call").Failed("timeout").End()
		ts.AssertRelayStats(calls)
	})
}
Beispiel #12
0
func TestCloseAfterTimeout(t *testing.T) {
	// Disable log verfication since connections are closed after a timeout
	// and the relay might still be reading/writing to the connection.
	// TODO: Ideally, we only disable log verification on the relay.
	opts := testutils.NewOpts().DisableLogVerification()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		testHandler := onErrorTestHandler{newTestHandler(t), func(_ context.Context, err error) {}}
		ts.Register(raw.Wrap(testHandler), "block")

		ctx, cancel := NewContext(100 * time.Millisecond)
		defer cancel()

		// Make a call, wait for it to timeout.
		clientCh := ts.NewClient(nil)
		_, _, _, err := raw.Call(ctx, clientCh, ts.HostPort(), ts.ServiceName(), "block", nil, nil)
		require.Equal(t, ErrTimeout, err, "Expected call to timeout")

		// The client channel should also close immediately.
		clientCh.Close()
		assertStateChangesTo(t, clientCh, ChannelClosed)
		assert.True(t, clientCh.Closed(), "Channel should be closed")

		// Unblock the testHandler so that a goroutine isn't leaked.
		<-testHandler.blockErr
	})
}
Beispiel #13
0
func TestRaceExchangesWithClose(t *testing.T) {
	var wg sync.WaitGroup

	ctx, cancel := NewContext(testutils.Timeout(70 * time.Millisecond))
	defer cancel()

	opts := testutils.NewOpts().DisableLogVerification()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		server := ts.Server()

		gotCall := make(chan struct{})
		completeCall := make(chan struct{})
		testutils.RegisterFunc(server, "dummy", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
			return &raw.Res{}, nil
		})

		testutils.RegisterEcho(server, func() {
			close(gotCall)
			<-completeCall
		})

		client := ts.NewClient(opts)
		defer client.Close()

		callDone := make(chan struct{})
		go func() {
			assert.NoError(t, testutils.CallEcho(client, ts.HostPort(), server.ServiceName(), &raw.Args{}), "Echo failed")
			close(callDone)
		}()

		// Wait until the server recieves a call, so it has an active inbound.
		<-gotCall

		// Start a bunch of clients to trigger races between connecting and close.
		for i := 0; i < 100; i++ {
			wg.Add(1)
			go func() {
				defer wg.Done()

				// We don't use ts.NewClient here to avoid data races.
				c := testutils.NewClient(t, opts)
				defer c.Close()

				c.Ping(ctx, ts.HostPort())
				raw.Call(ctx, c, ts.HostPort(), server.ServiceName(), "dummy", nil, nil)
			}()
		}

		// Now try to close the channel, it should block since there's active exchanges.
		server.Close()
		assert.Equal(t, ChannelStartClose, ts.Server().State(), "Server should be in StartClose")

		close(completeCall)
		<-callDone
	})

	// Wait for all calls to complete
	wg.Wait()
}
Beispiel #14
0
func withRelayedEcho(t testing.TB, f func(relay, server, client *Channel, ts *testutils.TestServer)) {
	opts := serviceNameOpts("test").SetRelayOnly()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		testutils.RegisterEcho(ts.Server(), nil)
		client := ts.NewClient(serviceNameOpts("client"))
		client.Peers().Add(ts.HostPort())
		f(ts.Relay(), ts.Server(), client, ts)
	})
}
Beispiel #15
0
func TestPing(t *testing.T) {
	testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
		ctx, cancel := NewContext(time.Second)
		defer cancel()

		clientCh := ts.NewClient(nil)
		defer clientCh.Close()
		require.NoError(t, clientCh.Ping(ctx, ts.HostPort()))
	})
}
Beispiel #16
0
func TestLargeMethod(t *testing.T) {
	testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
		ctx, cancel := NewContext(time.Second)
		defer cancel()

		largeMethod := testutils.RandBytes(16*1024 + 1)
		_, _, _, err := raw.Call(ctx, ts.Server(), ts.HostPort(), ts.ServiceName(), string(largeMethod), nil, nil)
		assert.Equal(t, ErrMethodTooLarge, err)
	})
}
Beispiel #17
0
func TestRelayUsesRootPeers(t *testing.T) {
	opts := testutils.NewOpts().SetRelayOnly()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		testutils.RegisterEcho(ts.Server(), nil)
		client := testutils.NewClient(t, nil)
		err := testutils.CallEcho(client, ts.HostPort(), ts.ServiceName(), nil)
		assert.NoError(t, err, "Echo failed")
		assert.Len(t, ts.Relay().Peers().Copy(), 0, "Peers should not be modified by relay")
	})
}
Beispiel #18
0
func TestReuseConnection(t *testing.T) {
	ctx, cancel := NewContext(time.Second)
	defer cancel()

	// Since we're specifically testing that connections between hosts are re-used,
	// we can't interpose a relay in this test.
	s1Opts := testutils.NewOpts().SetServiceName("s1").NoRelay()

	testutils.WithTestServer(t, s1Opts, func(ts *testutils.TestServer) {
		ch2 := ts.NewServer(&testutils.ChannelOpts{ServiceName: "s2"})
		hostPort2 := ch2.PeerInfo().HostPort
		defer ch2.Close()

		ts.Register(raw.Wrap(newTestHandler(t)), "echo")
		ch2.Register(raw.Wrap(newTestHandler(t)), "echo")

		outbound, err := ts.Server().BeginCall(ctx, hostPort2, "s2", "echo", nil)
		require.NoError(t, err)
		outboundConn, outboundNetConn := OutboundConnection(outbound)

		// Try to make another call at the same time, should reuse the same connection.
		outbound2, err := ts.Server().BeginCall(ctx, hostPort2, "s2", "echo", nil)
		require.NoError(t, err)
		outbound2Conn, _ := OutboundConnection(outbound)
		assert.Equal(t, outboundConn, outbound2Conn)

		// Wait for the connection to be marked as active in ch2.
		assert.True(t, testutils.WaitFor(time.Second, func() bool {
			return ch2.IntrospectState(nil).NumConnections > 0
		}), "ch2 does not have any active connections")

		// When ch2 tries to call the test server, it should reuse the existing
		// inbound connection the test server. Of course, this only works if the
		// test server -> ch2 call wasn't relayed.
		outbound3, err := ch2.BeginCall(ctx, ts.HostPort(), "s1", "echo", nil)
		require.NoError(t, err)
		_, outbound3NetConn := OutboundConnection(outbound3)
		assert.Equal(t, outboundNetConn.RemoteAddr(), outbound3NetConn.LocalAddr())
		assert.Equal(t, outboundNetConn.LocalAddr(), outbound3NetConn.RemoteAddr())

		// Ensure all calls can complete in parallel.
		var wg sync.WaitGroup
		for _, call := range []*OutboundCall{outbound, outbound2, outbound3} {
			wg.Add(1)
			go func(call *OutboundCall) {
				defer wg.Done()
				resp1, resp2, _, err := raw.WriteArgs(call, []byte("arg2"), []byte("arg3"))
				require.NoError(t, err)
				assert.Equal(t, resp1, []byte("arg2"), "result does match argument")
				assert.Equal(t, resp2, []byte("arg3"), "result does match argument")
			}(call)
		}
		wg.Wait()
	})
}
Beispiel #19
0
func TestPeerScoreOnNewConnection(t *testing.T) {
	tests := []struct {
		message string
		connect func(s1, s2 *Channel) *Peer
	}{
		{
			message: "outbound connection",
			connect: func(s1, s2 *Channel) *Peer {
				return s1.Peers().GetOrAdd(s2.PeerInfo().HostPort)
			},
		},
		{
			message: "inbound connection",
			connect: func(s1, s2 *Channel) *Peer {
				return s2.Peers().GetOrAdd(s1.PeerInfo().HostPort)
			},
		},
	}

	getScore := func(pl *PeerList) uint64 {
		peers := pl.IntrospectList(nil)
		require.Equal(t, 1, len(peers), "Wrong number of peers")
		return peers[0].Score
	}

	for _, tt := range tests {
		testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
			ctx, cancel := NewContext(time.Second)
			defer cancel()

			s1 := ts.Server()
			s2 := ts.NewServer(nil)

			s1.Peers().Add(s2.PeerInfo().HostPort)
			s2.Peers().Add(s1.PeerInfo().HostPort)

			initialScore := getScore(s1.Peers())
			peer := tt.connect(s1, s2)
			conn, err := peer.GetConnection(ctx)
			require.NoError(t, err, "%v: GetConnection failed", tt.message)

			// When receiving an inbound connection, the outbound connect may return
			// before the inbound has updated the score, so we may need to retry.
			assert.True(t, testutils.WaitFor(time.Second, func() bool {
				connectedScore := getScore(s1.Peers())
				return connectedScore < initialScore
			}), "%v: Expected connected peer score %v to be less than initial score %v",
				tt.message, getScore(s1.Peers()), initialScore)

			// Ping to ensure the connection has been added to peers on both sides.
			require.NoError(t, conn.Ping(ctx), "%v: Ping failed", tt.message)
		})
	}
}
Beispiel #20
0
func TestNoTimeout(t *testing.T) {
	testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
		ts.Register(raw.Wrap(newTestHandler(t)), "Echo")

		ctx := context.Background()
		_, _, _, err := raw.Call(ctx, ts.Server(), ts.HostPort(), "svc", "Echo", []byte("Headers"), []byte("Body"))
		assert.Equal(t, ErrTimeoutRequired, err)

		ts.AssertRelayStats(relaytest.NewMockStats())
	})
}
Beispiel #21
0
func TestNoServiceNaming(t *testing.T) {
	testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
		ctx, cancel := NewContext(time.Second)
		defer cancel()

		_, _, _, err := raw.Call(ctx, ts.Server(), ts.HostPort(), "", "Echo", []byte("Headers"), []byte("Body"))
		assert.Equal(t, ErrNoServiceName, err)

		ts.AssertRelayStats(relaytest.NewMockStats())
	})
}
Beispiel #22
0
func TestRemotePeer(t *testing.T) {
	tests := []struct {
		name       string
		remote     func(*testutils.TestServer) *Channel
		expectedFn func(*RuntimeState, *testutils.TestServer) PeerInfo
	}{
		{
			name:   "ephemeral client",
			remote: func(ts *testutils.TestServer) *Channel { return ts.NewClient(nil) },
			expectedFn: func(state *RuntimeState, ts *testutils.TestServer) PeerInfo {
				return PeerInfo{
					HostPort:    state.RootPeers[ts.HostPort()].OutboundConnections[0].LocalHostPort,
					IsEphemeral: true,
					ProcessName: state.LocalPeer.ProcessName,
				}
			},
		},
		{
			name:   "listening server",
			remote: func(ts *testutils.TestServer) *Channel { return ts.NewServer(nil) },
			expectedFn: func(state *RuntimeState, ts *testutils.TestServer) PeerInfo {
				return PeerInfo{
					HostPort:    state.LocalPeer.HostPort,
					IsEphemeral: false,
					ProcessName: state.LocalPeer.ProcessName,
				}
			},
		},
	}

	ctx, cancel := NewContext(time.Second)
	defer cancel()

	for _, tt := range tests {
		opts := testutils.NewOpts().SetServiceName("fake-service").NoRelay()
		testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
			remote := tt.remote(ts)
			defer remote.Close()

			gotPeer := make(chan PeerInfo, 1)
			ts.RegisterFunc("test", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
				gotPeer <- CurrentCall(ctx).RemotePeer()
				return &raw.Res{}, nil
			})

			_, _, _, err := raw.Call(ctx, remote, ts.HostPort(), ts.Server().ServiceName(), "test", nil, nil)
			assert.NoError(t, err, "%v: Call failed", tt.name)
			expected := tt.expectedFn(remote.IntrospectState(nil), ts)
			assert.Equal(t, expected, <-gotPeer, "%v: RemotePeer mismatch", tt.name)
		})
	}
}
Beispiel #23
0
func TestResponseClosedBeforeRequest(t *testing.T) {
	testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
		ts.Register(streamPartialHandler(t, false /* report errors */), "echoStream")

		ctx, cancel := NewContext(testutils.Timeout(time.Second))
		defer cancel()

		helper := streamHelper{t}
		ch := ts.NewClient(nil)
		responseClosed := make(chan struct{})
		writerDone := make(chan struct{})

		arg3Writer, arg3Reader := helper.startCall(ctx, ch, ts.HostPort(), ts.Server().ServiceName())
		go func() {
			defer close(writerDone)

			for i := 0; i < 10; i++ {
				assert.NoError(t, writeFlushBytes(arg3Writer, []byte{1}), "Write failed")
			}

			// Ignore the error of writeFlushBytes here since once we flush, the
			// remote side could receive and close the response before we've created
			// a new fragment (see fragmentingWriter.Flush). This could result
			// in the Flush returning a "mex is already shutdown" error.
			writeFlushBytes(arg3Writer, []byte{streamRequestClose})

			// Wait until our reader gets the EOF.
			<-responseClosed

			// Now our writes should fail, since the stream is shutdown
			err := writeFlushBytes(arg3Writer, []byte{1})
			if assert.Error(t, err, "Req write should fail since response stream ended") {
				assert.Contains(t, err.Error(), "mex has been shutdown")
			}
		}()

		for i := 0; i < 10; i++ {
			arg3 := make([]byte, 1)
			n, err := arg3Reader.Read(arg3)
			assert.Equal(t, 1, n, "Read did not correct number of bytes")
			assert.NoError(t, err, "Read failed")
		}

		eofBuf := make([]byte, 1)
		_, err := arg3Reader.Read(eofBuf)
		assert.Equal(t, io.EOF, err, "Response should EOF after request close")
		assert.NoError(t, arg3Reader.Close(), "Close should succeed")
		close(responseClosed)
		<-writerDone
	})
}
Beispiel #24
0
func TestLargeTimeout(t *testing.T) {
	testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
		ts.Register(raw.Wrap(newTestHandler(t)), "echo")

		ctx, cancel := NewContext(1000 * time.Second)
		defer cancel()

		_, _, _, err := raw.Call(ctx, ts.Server(), ts.HostPort(), ts.ServiceName(), "echo", testArg2, testArg3)
		assert.NoError(t, err, "Call failed")

		calls := relaytest.NewMockStats()
		calls.Add(ts.ServiceName(), ts.ServiceName(), "echo").Succeeded().End()
		ts.AssertRelayStats(calls)
	})
}
Beispiel #25
0
func TestRequestStateRetry(t *testing.T) {
	ctx, cancel := NewContext(time.Second)
	defer cancel()

	testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
		ts.Register(raw.Wrap(newTestHandler(t)), "echo")

		closedHostPorts := make([]string, 4)
		for i := range closedHostPorts {
			hostPort, close := testutils.GetAcceptCloseHostPort(t)
			defer close()
			closedHostPorts[i] = hostPort
		}

		// Since we close connections remotely, there will be some warnings that we can ignore.
		opts := testutils.NewOpts().DisableLogVerification()
		client := ts.NewClient(opts)
		defer client.Close()
		counter := 0

		sc := client.GetSubChannel(ts.Server().ServiceName())
		err := client.RunWithRetry(ctx, func(ctx context.Context, rs *RequestState) error {
			defer func() { counter++ }()

			expectedPeers := counter
			if expectedPeers > 0 {
				// An entry is also added for each host.
				expectedPeers++
			}

			assert.Equal(t, expectedPeers, len(rs.SelectedPeers), "SelectedPeers should not be reused")

			if counter < 4 {
				client.Peers().Add(closedHostPorts[counter])
			} else {
				client.Peers().Add(ts.HostPort())
			}

			_, err := raw.CallV2(ctx, sc, raw.CArgs{
				Method:      "echo",
				CallOptions: &CallOptions{RequestState: rs},
			})
			return err
		})
		assert.NoError(t, err, "RunWithRetry should succeed")
		assert.Equal(t, 5, counter, "RunWithRetry should retry 5 times")
	})
}
Beispiel #26
0
func TestGracefulClose(t *testing.T) {
	testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
		ch2 := ts.NewServer(nil)
		hp2 := ch2.PeerInfo().HostPort
		defer ch2.Close()

		ctx, cancel := NewContext(time.Second)
		defer cancel()

		assert.NoError(t, ts.Server().Ping(ctx, hp2), "Ping from ch1 -> ch2 failed")
		assert.NoError(t, ch2.Ping(ctx, ts.HostPort()), "Ping from ch2 -> ch1 failed")

		// No stats for pings.
		ts.AssertRelayStats(relaytest.NewMockStats())
	})
}
Beispiel #27
0
func TestBadRequest(t *testing.T) {
	// ch will log an error when it receives a request for an unknown handler.
	opts := testutils.NewOpts().AddLogFilter("Couldn't find handler.", 1)
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		ctx, cancel := NewContext(time.Second)
		defer cancel()

		_, _, _, err := raw.Call(ctx, ts.Server(), ts.HostPort(), ts.ServiceName(), "Noone", []byte("Headers"), []byte("Body"))
		require.NotNil(t, err)
		assert.Equal(t, ErrCodeBadRequest, GetSystemErrorCode(err))

		calls := relaytest.NewMockStats()
		calls.Add(ts.ServiceName(), ts.ServiceName(), "Noone").Failed("bad-request").End()
		ts.AssertRelayStats(calls)
	})
}
Beispiel #28
0
func TestCancelled(t *testing.T) {
	testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
		ts.Register(raw.Wrap(newTestHandler(t)), "echo")
		ctx, cancel := NewContext(time.Second)

		// Make a call first to make sure we have a connection.
		// We want to test the BeginCall path.
		_, _, _, err := raw.Call(ctx, ts.Server(), ts.HostPort(), ts.ServiceName(), "echo", []byte("Headers"), []byte("Body"))
		assert.NoError(t, err, "Call failed")

		// Now cancel the context.
		cancel()
		_, _, _, err = raw.Call(ctx, ts.Server(), ts.HostPort(), ts.ServiceName(), "echo", []byte("Headers"), []byte("Body"))
		assert.Equal(t, context.Canceled, err, "Unexpected error when making call with canceled context")
	})
}
Beispiel #29
0
// Ensure that any connections created in the relay path send the ephemeral
// host:port.
func TestRelayOutgoingConnectionsEphemeral(t *testing.T) {
	opts := testutils.NewOpts().SetRelayOnly()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		s2 := ts.NewServer(serviceNameOpts("s2"))
		testutils.RegisterFunc(s2, "echo", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
			assert.True(t, CurrentCall(ctx).RemotePeer().IsEphemeral,
				"Connections created for the relay should send ephemeral host:port header")

			return &raw.Res{
				Arg2: args.Arg2,
				Arg3: args.Arg3,
			}, nil
		})

		require.NoError(t, testutils.CallEcho(ts.Server(), ts.HostPort(), "s2", nil), "CallEcho failed")
	})
}
Beispiel #30
0
func TestRelayMakeOutgoingCall(t *testing.T) {
	opts := testutils.NewOpts().SetRelayOnly()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		svr1 := ts.Relay()
		svr2 := ts.NewServer(testutils.NewOpts().SetServiceName("svc2"))
		testutils.RegisterEcho(svr2, nil)

		sizes := []int{128, 1024, 128 * 1024}
		for _, size := range sizes {
			err := testutils.CallEcho(svr1, ts.HostPort(), "svc2", &raw.Args{
				Arg2: testutils.RandBytes(size),
				Arg3: testutils.RandBytes(size),
			})
			assert.NoError(t, err, "Echo with size %v failed", size)
		}
	})
}