Ejemplo n.º 1
0
func TestRelayConnection(t *testing.T) {
	var errTest = errors.New("test")
	var wantHostPort string
	getHost := func(call relay.CallFrame, conn relay.Conn) (relay.Peer, error) {
		matches := conn.RemoteProcessPrefixMatches()
		assert.Equal(t, []bool{true, true, true, false}, matches, "Unexpected prefix matches.")
		assert.Equal(t, wantHostPort, conn.RemoteHostPort(), "Unexpected RemoteHostPort")
		return relay.Peer{}, errTest
	}

	// Note: we cannot use WithTestServer since we override the RelayHosts.
	opts := testutils.NewOpts().
		SetServiceName("relay").
		SetRelayHosts(hostsFunc(getHost)).
		SetProcessPrefixes("nod", "nodejs-hyperbahn", "", "hyperbahn")
	relay := testutils.NewServer(t, opts)
	defer relay.Close()

	// Create a client that is listening so we can set the expected host:port.
	clientOpts := testutils.NewOpts().SetProcessName("nodejs-hyperbahn")
	client := testutils.NewServer(t, clientOpts)
	wantHostPort = client.PeerInfo().HostPort
	defer client.Close()

	err := testutils.CallEcho(client, relay.PeerInfo().HostPort, relay.ServiceName(), nil)
	require.Error(t, err, "Expected CallEcho to fail")
	assert.Contains(t, err.Error(), errTest.Error(), "Unexpected error")
}
Ejemplo n.º 2
0
func TestRelayHandleLargeLocalCall(t *testing.T) {
	opts := testutils.NewOpts().SetRelayOnly().
		SetRelayLocal("relay").
		AddLogFilter("Received fragmented callReq", 1).
		// Expect 4 callReqContinues for 256 kb payload that we cannot relay.
		AddLogFilter("Failed to relay frame.", 4)
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		client := ts.NewClient(nil)
		testutils.RegisterEcho(ts.Relay(), nil)

		// This large call should fail with a bad request.
		err := testutils.CallEcho(client, ts.HostPort(), "relay", &raw.Args{
			Arg2: testutils.RandBytes(128 * 1024),
			Arg3: testutils.RandBytes(128 * 1024),
		})
		if assert.Equal(t, ErrCodeBadRequest, GetSystemErrorCode(err), "Expected BadRequest for large call to relay") {
			assert.Contains(t, err.Error(), "cannot receive fragmented calls")
		}

		// We may get an error before the call is finished flushing.
		// Do a ping to ensure everything has been flushed.
		ctx, cancel := NewContext(time.Second)
		defer cancel()
		require.NoError(t, client.Ping(ctx, ts.HostPort()), "Ping failed")
	})
}
Ejemplo n.º 3
0
func TestRaceExchangesWithClose(t *testing.T) {
	var wg sync.WaitGroup

	ctx, cancel := NewContext(testutils.Timeout(70 * time.Millisecond))
	defer cancel()

	opts := testutils.NewOpts().DisableLogVerification()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		server := ts.Server()

		gotCall := make(chan struct{})
		completeCall := make(chan struct{})
		testutils.RegisterFunc(server, "dummy", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
			return &raw.Res{}, nil
		})

		testutils.RegisterEcho(server, func() {
			close(gotCall)
			<-completeCall
		})

		client := ts.NewClient(opts)
		defer client.Close()

		callDone := make(chan struct{})
		go func() {
			assert.NoError(t, testutils.CallEcho(client, ts.HostPort(), server.ServiceName(), &raw.Args{}), "Echo failed")
			close(callDone)
		}()

		// Wait until the server recieves a call, so it has an active inbound.
		<-gotCall

		// Start a bunch of clients to trigger races between connecting and close.
		for i := 0; i < 100; i++ {
			wg.Add(1)
			go func() {
				defer wg.Done()

				// We don't use ts.NewClient here to avoid data races.
				c := testutils.NewClient(t, opts)
				defer c.Close()

				c.Ping(ctx, ts.HostPort())
				raw.Call(ctx, c, ts.HostPort(), server.ServiceName(), "dummy", nil, nil)
			}()
		}

		// Now try to close the channel, it should block since there's active exchanges.
		server.Close()
		assert.Equal(t, ChannelStartClose, ts.Server().State(), "Server should be in StartClose")

		close(completeCall)
		<-callDone
	})

	// Wait for all calls to complete
	wg.Wait()
}
Ejemplo n.º 4
0
func TestRelayUsesRootPeers(t *testing.T) {
	opts := testutils.NewOpts().SetRelayOnly()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		testutils.RegisterEcho(ts.Server(), nil)
		client := testutils.NewClient(t, nil)
		err := testutils.CallEcho(client, ts.HostPort(), ts.ServiceName(), nil)
		assert.NoError(t, err, "Echo failed")
		assert.Len(t, ts.Relay().Peers().Copy(), 0, "Peers should not be modified by relay")
	})
}
Ejemplo n.º 5
0
func TestRelayHandleLocalCall(t *testing.T) {
	opts := testutils.NewOpts().SetRelayOnly().
		SetRelayLocal("relay", "tchannel", "test").
		// We make a call to "test" for an unknown method.
		AddLogFilter("Couldn't find handler.", 1)
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		s2 := ts.NewServer(serviceNameOpts("s2"))
		testutils.RegisterEcho(s2, nil)

		client := ts.NewClient(nil)
		testutils.AssertEcho(t, client, ts.HostPort(), "s2")

		testutils.RegisterEcho(ts.Relay(), nil)
		testutils.AssertEcho(t, client, ts.HostPort(), "relay")

		// Sould get a bad request for "test" since the channel does not handle it.
		err := testutils.CallEcho(client, ts.HostPort(), "test", nil)
		assert.Equal(t, ErrCodeBadRequest, GetSystemErrorCode(err), "Expected BadRequest for test")

		// But an unknown service causes declined
		err = testutils.CallEcho(client, ts.HostPort(), "unknown", nil)
		assert.Equal(t, ErrCodeDeclined, GetSystemErrorCode(err), "Expected Declined for unknown")
	})
}
Ejemplo n.º 6
0
func TestRelayMakeOutgoingCall(t *testing.T) {
	opts := testutils.NewOpts().SetRelayOnly()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		svr1 := ts.Relay()
		svr2 := ts.NewServer(testutils.NewOpts().SetServiceName("svc2"))
		testutils.RegisterEcho(svr2, nil)

		sizes := []int{128, 1024, 128 * 1024}
		for _, size := range sizes {
			err := testutils.CallEcho(svr1, ts.HostPort(), "svc2", &raw.Args{
				Arg2: testutils.RandBytes(size),
				Arg3: testutils.RandBytes(size),
			})
			assert.NoError(t, err, "Echo with size %v failed", size)
		}
	})
}
Ejemplo n.º 7
0
// Ensure that any connections created in the relay path send the ephemeral
// host:port.
func TestRelayOutgoingConnectionsEphemeral(t *testing.T) {
	opts := testutils.NewOpts().SetRelayOnly()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		s2 := ts.NewServer(serviceNameOpts("s2"))
		testutils.RegisterFunc(s2, "echo", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
			assert.True(t, CurrentCall(ctx).RemotePeer().IsEphemeral,
				"Connections created for the relay should send ephemeral host:port header")

			return &raw.Res{
				Arg2: args.Arg2,
				Arg3: args.Arg3,
			}, nil
		})

		require.NoError(t, testutils.CallEcho(ts.Server(), ts.HostPort(), "s2", nil), "CallEcho failed")
	})
}
Ejemplo n.º 8
0
// Trigger a race between receiving a new call and a connection closing
// by closing the relay while a lot of background calls are being made.
func TestRaceCloseWithNewCall(t *testing.T) {
	opts := serviceNameOpts("s1").SetRelayOnly().DisableLogVerification()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		s1 := ts.Server()
		s2 := ts.NewServer(serviceNameOpts("s2").DisableLogVerification())
		testutils.RegisterEcho(s1, nil)

		// signal to start closing the relay.
		var (
			closeRelay  sync.WaitGroup
			stopCalling atomic.Int32
			callers     sync.WaitGroup
		)

		for i := 0; i < 5; i++ {
			callers.Add(1)
			closeRelay.Add(1)

			go func() {
				defer callers.Done()

				calls := 0
				for stopCalling.Load() == 0 {
					testutils.CallEcho(s2, ts.HostPort(), "s1", nil)
					calls++
					if calls == 5 {
						closeRelay.Done()
					}
				}
			}()
		}

		closeRelay.Wait()

		// Close the relay, wait for it to close.
		ts.Relay().Close()
		closed := testutils.WaitFor(time.Second, func() bool {
			return ts.Relay().State() == ChannelClosed
		})
		assert.True(t, closed, "Relay did not close within timeout")

		// Now stop all calls, and wait for the calling goroutine to end.
		stopCalling.Inc()
		callers.Wait()
	})
}
Ejemplo n.º 9
0
// TestCloseSendError tests that system errors are not attempted to be sent when
// a connection is closed, and ensures there's no race conditions such as the error
// frame being added to the channel just as it is closed.
func TestCloseSendError(t *testing.T) {
	var (
		closed  atomic.Uint32
		counter atomic.Uint32
	)

	opts := testutils.NewOpts().DisableLogVerification()
	serverCh := testutils.NewServer(t, opts)
	testutils.RegisterEcho(serverCh, func() {
		if counter.Inc() > 10 {
			// Close the server in a goroutine to possibly trigger more race conditions.
			go func() {
				closed.Inc()
				serverCh.Close()
			}()
		}
	})

	clientCh := testutils.NewClient(t, opts)

	// Create a connection that will be shared.
	require.NoError(t, testutils.Ping(clientCh, serverCh), "Ping from client to server failed")

	var wg sync.WaitGroup
	for i := 0; i < 100; i++ {
		wg.Add(1)
		go func() {
			time.Sleep(time.Duration(rand.Intn(1000)) * time.Microsecond)
			err := testutils.CallEcho(clientCh, serverCh.PeerInfo().HostPort, serverCh.ServiceName(), nil)
			if err != nil && closed.Load() == 0 {
				t.Errorf("Call failed: %v", err)
			}
			wg.Done()
		}()
	}

	// Wait for all the goroutines to end
	wg.Wait()

	clientCh.Close()
	goroutines.VerifyNoLeaks(t, nil)
}
Ejemplo n.º 10
0
// TestCloseSendError tests that system errors are not attempted to be sent when
// a connection is closed, and ensures there's no race conditions such as the error
// frame being added to the channel just as it is closed.
// TODO(prashant): This test is waiting for timeout, but socket close shouldn't wait for timeout.
func TestCloseSendError(t *testing.T) {
	closed := uint32(0)
	counter := uint32(0)

	serverCh := testutils.NewServer(t, nil)
	testutils.RegisterEcho(serverCh, func() {
		if atomic.AddUint32(&counter, 1) > 10 {
			// Close the server in a goroutine to possibly trigger more race conditions.
			go func() {
				atomic.AddUint32(&closed, 1)
				serverCh.Close()
			}()
		}
	})

	clientCh := testutils.NewClient(t, nil)

	// Create a connection that will be shared.
	require.NoError(t, testutils.Ping(clientCh, serverCh), "Ping from client to server failed")

	var wg sync.WaitGroup
	for i := 0; i < 100; i++ {
		wg.Add(1)
		go func() {
			time.Sleep(time.Duration(rand.Intn(1000)) * time.Microsecond)
			err := testutils.CallEcho(clientCh, serverCh, nil)
			if err != nil && atomic.LoadUint32(&closed) == 0 {
				t.Errorf("Call failed: %v", err)
			}
			wg.Done()
		}()
	}

	// Wait for all the goroutines to end
	wg.Wait()

	clientCh.Close()
	goroutines.VerifyNoLeaks(t, nil)
}
Ejemplo n.º 11
0
func TestErrorFrameEndsRelay(t *testing.T) {
	// TestServer validates that there are no relay items left after the given func.
	opts := serviceNameOpts("svc").SetRelayOnly().DisableLogVerification()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		client := ts.NewClient(nil)

		err := testutils.CallEcho(client, ts.HostPort(), "svc", nil)
		if !assert.Error(t, err, "Expected error due to unknown method") {
			return
		}

		se, ok := err.(SystemError)
		if !assert.True(t, ok, "err should be a SystemError, got %T", err) {
			return
		}

		assert.Equal(t, ErrCodeBadRequest, se.Code(), "Expected BadRequest error")

		calls := relaytest.NewMockStats()
		calls.Add(client.PeerInfo().ServiceName, "svc", "echo").Failed("bad-request").End()
		ts.AssertRelayStats(calls)
	})
}
Ejemplo n.º 12
0
func TestRelayErrorsOnGetPeer(t *testing.T) {
	busyErr := NewSystemError(ErrCodeBusy, "busy")
	tests := []struct {
		desc      string
		addPeer   func(*testutils.SimpleRelayHosts)
		statsKey  string
		statsPeer relay.Peer
		wantErr   error
	}{
		{
			desc:     "No peer mappings, return empty Peer",
			addPeer:  func(_ *testutils.SimpleRelayHosts) {},
			statsKey: "relay-declined",
			wantErr:  NewSystemError(ErrCodeDeclined, `invalid peer for "svc"`),
		},
		{
			desc: "System error getting peer",
			addPeer: func(rh *testutils.SimpleRelayHosts) {
				rh.AddError("svc", busyErr)
			},
			statsKey: "relay-busy",
			wantErr:  busyErr,
		},
		{
			desc: "Unknown error getting peer",
			addPeer: func(rh *testutils.SimpleRelayHosts) {
				rh.AddError("svc", errors.New("unknown"))
			},
			statsKey: "relay-declined",
			wantErr:  NewSystemError(ErrCodeDeclined, "unknown"),
		},
		{
			desc: "No host:port on peer",
			addPeer: func(rh *testutils.SimpleRelayHosts) {
				rh.AddPeer("svc", "", "pool", "zone")
			},
			statsKey:  "relay-declined",
			statsPeer: relay.Peer{Zone: "zone", Pool: "pool"},
			wantErr:   NewSystemError(ErrCodeDeclined, `invalid peer for "svc"`),
		},
	}

	for _, tt := range tests {
		opts := testutils.NewOpts().SetRelayOnly()
		testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
			tt.addPeer(ts.RelayHosts())

			client := ts.NewClient(nil)
			err := testutils.CallEcho(client, ts.HostPort(), "svc", nil)
			if !assert.Error(t, err, "Call to unknown service should fail") {
				return
			}

			assert.Equal(t, tt.wantErr, err, "%v: unexpected error", tt.desc)

			calls := relaytest.NewMockStats()
			calls.Add(client.PeerInfo().ServiceName, "svc", "echo").
				SetPeer(tt.statsPeer).
				Failed(tt.statsKey).End()
			ts.AssertRelayStats(calls)
		})
	}
}