Example #1
0
func TestRelayIDClash(t *testing.T) {
	opts := serviceNameOpts("s1").SetRelayOnly()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		s1 := ts.Server()
		s2 := ts.NewServer(serviceNameOpts("s2"))

		unblock := make(chan struct{})
		testutils.RegisterEcho(s1, func() {
			<-unblock
		})
		testutils.RegisterEcho(s2, nil)

		var wg sync.WaitGroup
		for i := 0; i < 10; i++ {
			wg.Add(1)
			go func() {
				defer wg.Done()
				testutils.AssertEcho(t, s2, ts.HostPort(), s1.ServiceName())
			}()
		}

		for i := 0; i < 5; i++ {
			testutils.AssertEcho(t, s1, ts.HostPort(), s2.ServiceName())
		}

		close(unblock)
		wg.Wait()
	})
}
Example #2
0
func TestRelayHandleLargeLocalCall(t *testing.T) {
	opts := testutils.NewOpts().SetRelayOnly().
		SetRelayLocal("relay").
		AddLogFilter("Received fragmented callReq", 1).
		// Expect 4 callReqContinues for 256 kb payload that we cannot relay.
		AddLogFilter("Failed to relay frame.", 4)
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		client := ts.NewClient(nil)
		testutils.RegisterEcho(ts.Relay(), nil)

		// This large call should fail with a bad request.
		err := testutils.CallEcho(client, ts.HostPort(), "relay", &raw.Args{
			Arg2: testutils.RandBytes(128 * 1024),
			Arg3: testutils.RandBytes(128 * 1024),
		})
		if assert.Equal(t, ErrCodeBadRequest, GetSystemErrorCode(err), "Expected BadRequest for large call to relay") {
			assert.Contains(t, err.Error(), "cannot receive fragmented calls")
		}

		// We may get an error before the call is finished flushing.
		// Do a ping to ensure everything has been flushed.
		ctx, cancel := NewContext(time.Second)
		defer cancel()
		require.NoError(t, client.Ping(ctx, ts.HostPort()), "Ping failed")
	})
}
Example #3
0
func TestTimeoutCallsThenClose(t *testing.T) {
	// Test needs at least 2 CPUs to trigger race conditions.
	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))

	opts := serviceNameOpts("s1").SetRelayOnly().DisableLogVerification()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		s1 := ts.Server()
		s2 := ts.NewServer(serviceNameOpts("s2").DisableLogVerification())

		unblockEcho := make(chan struct{})
		testutils.RegisterEcho(s1, func() {
			<-unblockEcho
		})

		ctx, cancel := NewContext(testutils.Timeout(30 * time.Millisecond))
		defer cancel()

		var callers sync.WaitGroup
		for i := 0; i < 100; i++ {
			callers.Add(1)
			go func() {
				defer callers.Done()
				raw.Call(ctx, s2, ts.HostPort(), "s1", "echo", nil, nil)
			}()
		}

		close(unblockEcho)

		// Wait for all the callers to end
		callers.Wait()
	})
}
Example #4
0
func TestRaceExchangesWithClose(t *testing.T) {
	var wg sync.WaitGroup

	ctx, cancel := NewContext(testutils.Timeout(70 * time.Millisecond))
	defer cancel()

	opts := testutils.NewOpts().DisableLogVerification()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		server := ts.Server()

		gotCall := make(chan struct{})
		completeCall := make(chan struct{})
		testutils.RegisterFunc(server, "dummy", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
			return &raw.Res{}, nil
		})

		testutils.RegisterEcho(server, func() {
			close(gotCall)
			<-completeCall
		})

		client := ts.NewClient(opts)
		defer client.Close()

		callDone := make(chan struct{})
		go func() {
			assert.NoError(t, testutils.CallEcho(client, ts.HostPort(), server.ServiceName(), &raw.Args{}), "Echo failed")
			close(callDone)
		}()

		// Wait until the server recieves a call, so it has an active inbound.
		<-gotCall

		// Start a bunch of clients to trigger races between connecting and close.
		for i := 0; i < 100; i++ {
			wg.Add(1)
			go func() {
				defer wg.Done()

				// We don't use ts.NewClient here to avoid data races.
				c := testutils.NewClient(t, opts)
				defer c.Close()

				c.Ping(ctx, ts.HostPort())
				raw.Call(ctx, c, ts.HostPort(), server.ServiceName(), "dummy", nil, nil)
			}()
		}

		// Now try to close the channel, it should block since there's active exchanges.
		server.Close()
		assert.Equal(t, ChannelStartClose, ts.Server().State(), "Server should be in StartClose")

		close(completeCall)
		<-callDone
	})

	// Wait for all calls to complete
	wg.Wait()
}
Example #5
0
func withRelayedEcho(t testing.TB, f func(relay, server, client *Channel, ts *testutils.TestServer)) {
	opts := serviceNameOpts("test").SetRelayOnly()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		testutils.RegisterEcho(ts.Server(), nil)
		client := ts.NewClient(serviceNameOpts("client"))
		client.Peers().Add(ts.HostPort())
		f(ts.Relay(), ts.Server(), client, ts)
	})
}
Example #6
0
func TestRelayUsesRootPeers(t *testing.T) {
	opts := testutils.NewOpts().SetRelayOnly()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		testutils.RegisterEcho(ts.Server(), nil)
		client := testutils.NewClient(t, nil)
		err := testutils.CallEcho(client, ts.HostPort(), ts.ServiceName(), nil)
		assert.NoError(t, err, "Echo failed")
		assert.Len(t, ts.Relay().Peers().Copy(), 0, "Peers should not be modified by relay")
	})
}
Example #7
0
func newAdvertisedEchoServer(t *testing.T, name string, mockHB *mockhyperbahn.Mock, f func()) *tchannel.Channel {
	server := testutils.NewServer(t, &testutils.ChannelOpts{
		ServiceName: name,
	})
	testutils.RegisterEcho(server, f)

	hbClient, err := hyperbahn.NewClient(server, mockHB.Configuration(), nil)
	require.NoError(t, err, "Failed to set up Hyperbahn client")
	require.NoError(t, hbClient.Advertise(), "Advertise failed")

	return server
}
Example #8
0
func TestRelayHandleLocalCall(t *testing.T) {
	opts := testutils.NewOpts().SetRelayOnly().
		SetRelayLocal("relay", "tchannel", "test").
		// We make a call to "test" for an unknown method.
		AddLogFilter("Couldn't find handler.", 1)
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		s2 := ts.NewServer(serviceNameOpts("s2"))
		testutils.RegisterEcho(s2, nil)

		client := ts.NewClient(nil)
		testutils.AssertEcho(t, client, ts.HostPort(), "s2")

		testutils.RegisterEcho(ts.Relay(), nil)
		testutils.AssertEcho(t, client, ts.HostPort(), "relay")

		// Sould get a bad request for "test" since the channel does not handle it.
		err := testutils.CallEcho(client, ts.HostPort(), "test", nil)
		assert.Equal(t, ErrCodeBadRequest, GetSystemErrorCode(err), "Expected BadRequest for test")

		// But an unknown service causes declined
		err = testutils.CallEcho(client, ts.HostPort(), "unknown", nil)
		assert.Equal(t, ErrCodeDeclined, GetSystemErrorCode(err), "Expected Declined for unknown")
	})
}
Example #9
0
// Trigger a race between receiving a new call and a connection closing
// by closing the relay while a lot of background calls are being made.
func TestRaceCloseWithNewCall(t *testing.T) {
	opts := serviceNameOpts("s1").SetRelayOnly().DisableLogVerification()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		s1 := ts.Server()
		s2 := ts.NewServer(serviceNameOpts("s2").DisableLogVerification())
		testutils.RegisterEcho(s1, nil)

		// signal to start closing the relay.
		var (
			closeRelay  sync.WaitGroup
			stopCalling atomic.Int32
			callers     sync.WaitGroup
		)

		for i := 0; i < 5; i++ {
			callers.Add(1)
			closeRelay.Add(1)

			go func() {
				defer callers.Done()

				calls := 0
				for stopCalling.Load() == 0 {
					testutils.CallEcho(s2, ts.HostPort(), "s1", nil)
					calls++
					if calls == 5 {
						closeRelay.Done()
					}
				}
			}()
		}

		closeRelay.Wait()

		// Close the relay, wait for it to close.
		ts.Relay().Close()
		closed := testutils.WaitFor(time.Second, func() bool {
			return ts.Relay().State() == ChannelClosed
		})
		assert.True(t, closed, "Relay did not close within timeout")

		// Now stop all calls, and wait for the calling goroutine to end.
		stopCalling.Inc()
		callers.Wait()
	})
}
Example #10
0
func TestRelayMakeOutgoingCall(t *testing.T) {
	opts := testutils.NewOpts().SetRelayOnly()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		svr1 := ts.Relay()
		svr2 := ts.NewServer(testutils.NewOpts().SetServiceName("svc2"))
		testutils.RegisterEcho(svr2, nil)

		sizes := []int{128, 1024, 128 * 1024}
		for _, size := range sizes {
			err := testutils.CallEcho(svr1, ts.HostPort(), "svc2", &raw.Args{
				Arg2: testutils.RandBytes(size),
				Arg3: testutils.RandBytes(size),
			})
			assert.NoError(t, err, "Echo with size %v failed", size)
		}
	})
}
Example #11
0
// TestCloseSendError tests that system errors are not attempted to be sent when
// a connection is closed, and ensures there's no race conditions such as the error
// frame being added to the channel just as it is closed.
func TestCloseSendError(t *testing.T) {
	var (
		closed  atomic.Uint32
		counter atomic.Uint32
	)

	opts := testutils.NewOpts().DisableLogVerification()
	serverCh := testutils.NewServer(t, opts)
	testutils.RegisterEcho(serverCh, func() {
		if counter.Inc() > 10 {
			// Close the server in a goroutine to possibly trigger more race conditions.
			go func() {
				closed.Inc()
				serverCh.Close()
			}()
		}
	})

	clientCh := testutils.NewClient(t, opts)

	// Create a connection that will be shared.
	require.NoError(t, testutils.Ping(clientCh, serverCh), "Ping from client to server failed")

	var wg sync.WaitGroup
	for i := 0; i < 100; i++ {
		wg.Add(1)
		go func() {
			time.Sleep(time.Duration(rand.Intn(1000)) * time.Microsecond)
			err := testutils.CallEcho(clientCh, serverCh.PeerInfo().HostPort, serverCh.ServiceName(), nil)
			if err != nil && closed.Load() == 0 {
				t.Errorf("Call failed: %v", err)
			}
			wg.Done()
		}()
	}

	// Wait for all the goroutines to end
	wg.Wait()

	clientCh.Close()
	goroutines.VerifyNoLeaks(t, nil)
}
Example #12
0
func getRawCallFrames(timeout time.Duration, svcName string, reqSize int) frames {
	var fs frames
	modifier := func(fromClient bool, f *tchannel.Frame) *tchannel.Frame {
		buf := &bytes.Buffer{}
		if err := f.WriteOut(buf); err != nil {
			panic(err)
		}

		if fromClient {
			fs.outgoing = append(fs.outgoing, buf.Bytes())
		} else {
			fs.incoming = append(fs.incoming, buf.Bytes())
		}

		return f
	}

	withNewServerClient(svcName, func(server, client *tchannel.Channel) {
		testutils.RegisterEcho(server, nil)

		relay, err := NewTCPFrameRelay([]string{server.PeerInfo().HostPort}, modifier)
		if err != nil {
			panic(err)
		}
		defer relay.Close()

		args := &raw.Args{
			Arg2: getRequestBytes(reqSize),
			Arg3: getRequestBytes(reqSize),
		}

		ctx, cancel := tchannel.NewContext(timeout)
		defer cancel()

		if _, _, _, err := raw.Call(ctx, client, relay.HostPort(), svcName, "echo", args.Arg2, args.Arg3); err != nil {
			panic(err)
		}
	})

	return fs
}
Example #13
0
// TestCloseSendError tests that system errors are not attempted to be sent when
// a connection is closed, and ensures there's no race conditions such as the error
// frame being added to the channel just as it is closed.
// TODO(prashant): This test is waiting for timeout, but socket close shouldn't wait for timeout.
func TestCloseSendError(t *testing.T) {
	closed := uint32(0)
	counter := uint32(0)

	serverCh := testutils.NewServer(t, nil)
	testutils.RegisterEcho(serverCh, func() {
		if atomic.AddUint32(&counter, 1) > 10 {
			// Close the server in a goroutine to possibly trigger more race conditions.
			go func() {
				atomic.AddUint32(&closed, 1)
				serverCh.Close()
			}()
		}
	})

	clientCh := testutils.NewClient(t, nil)

	// Create a connection that will be shared.
	require.NoError(t, testutils.Ping(clientCh, serverCh), "Ping from client to server failed")

	var wg sync.WaitGroup
	for i := 0; i < 100; i++ {
		wg.Add(1)
		go func() {
			time.Sleep(time.Duration(rand.Intn(1000)) * time.Microsecond)
			err := testutils.CallEcho(clientCh, serverCh, nil)
			if err != nil && atomic.LoadUint32(&closed) == 0 {
				t.Errorf("Call failed: %v", err)
			}
			wg.Done()
		}()
	}

	// Wait for all the goroutines to end
	wg.Wait()

	clientCh.Close()
	goroutines.VerifyNoLeaks(t, nil)
}
Example #14
0
func TestRelayHandlesClosedPeers(t *testing.T) {
	opts := serviceNameOpts("test").SetRelayOnly().
		// Disable logs as we are closing connections that can error in a lot of places.
		DisableLogVerification()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		ctx, cancel := NewContext(300 * time.Millisecond)
		defer cancel()

		testutils.RegisterEcho(ts.Server(), nil)
		client := ts.NewClient(serviceNameOpts("client"))
		client.Peers().Add(ts.HostPort())

		sc := client.GetSubChannel("test")
		_, _, _, err := raw.CallSC(ctx, sc, "echo", []byte("fake-header"), []byte("fake-body"))
		require.NoError(t, err, "Relayed call failed.")

		ts.Server().Close()
		require.NotPanics(t, func() {
			raw.CallSC(ctx, sc, "echo", []byte("fake-header"), []byte("fake-body"))
		})
	})
}
Example #15
0
func TestRelayConnectionCloseDrainsRelayItems(t *testing.T) {
	opts := serviceNameOpts("s1").SetRelayOnly()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		ctx, cancel := NewContext(time.Second)
		defer cancel()

		s1 := ts.Server()
		s2 := ts.NewServer(serviceNameOpts("s2"))

		s2HP := s2.PeerInfo().HostPort
		testutils.RegisterEcho(s1, func() {
			// When s1 gets called, it calls Close on the connection from the relay to s2.
			conn, err := ts.Relay().Peers().GetOrAdd(s2HP).GetConnection(ctx)
			require.NoError(t, err, "Unexpected failure getting connection between s1 and relay")
			conn.Close()
		})

		testutils.AssertEcho(t, s2, ts.HostPort(), "s1")

		calls := relaytest.NewMockStats()
		calls.Add("s2", "s1", "echo").Succeeded().End()
		ts.AssertRelayStats(calls)
	})
}
Example #16
0
func TestWriteAfterConnectionError(t *testing.T) {
	ctx, cancel := NewContext(time.Second)
	defer cancel()

	// Closing network connections can lead to warnings in many places.
	// TODO: Relay is disabled due to https://github.com/uber/tchannel-go/issues/390
	// Enabling relay causes the test to be flaky.
	opts := testutils.NewOpts().DisableLogVerification().NoRelay()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		testutils.RegisterEcho(ts.Server(), nil)
		server := ts.Server()

		call, err := server.BeginCall(ctx, ts.HostPort(), server.ServiceName(), "echo", nil)
		require.NoError(t, err, "Call failed")

		w, err := call.Arg2Writer()
		require.NoError(t, err, "Arg2Writer failed")
		require.NoError(t, writeFlushStr(w, "initial"), "write initial failed")

		// Now close the underlying network connection, writes should fail.
		_, conn := OutboundConnection(call)
		conn.Close()

		// Writes should start failing pretty soon.
		var writeErr error
		for i := 0; i < 100; i++ {
			if writeErr = writeFlushStr(w, "f"); writeErr != nil {
				break
			}
			time.Sleep(time.Millisecond)
		}
		if assert.Error(t, writeErr, "Writes should fail after a connection is closed") {
			assert.Equal(t, ErrCodeNetwork, GetSystemErrorCode(writeErr), "write should fail due to network error")
		}
	})
}
Example #17
0
func TestTraceReportingEnabled(t *testing.T) {
	initialTime := time.Date(2015, 2, 1, 10, 10, 0, 0, time.UTC)

	var state struct {
		signal chan struct{}

		call TraceData
		span Span
	}
	testTraceReporter := TraceReporterFunc(func(data TraceData) {
		defer close(state.signal)

		span := data.Span
		data.Span = Span{}
		state.call = data
		state.span = span
	})

	traceReporterOpts := testutils.NewOpts().SetTraceReporter(testTraceReporter)
	tests := []struct {
		name       string
		serverOpts *testutils.ChannelOpts
		clientOpts *testutils.ChannelOpts
		expected   []Annotation
		fromServer bool
	}{
		{
			name:       "inbound",
			serverOpts: traceReporterOpts,
			expected: []Annotation{
				{Key: "sr", Timestamp: initialTime.Add(2 * time.Second)},
				{Key: "ss", Timestamp: initialTime.Add(3 * time.Second)},
			},
			fromServer: true,
		},
		{
			name:       "outbound",
			clientOpts: traceReporterOpts,
			expected: []Annotation{
				{Key: "cs", Timestamp: initialTime.Add(time.Second)},
				{Key: "cr", Timestamp: initialTime.Add(6 * time.Second)},
			},
		},
	}

	for _, tt := range tests {
		state.signal = make(chan struct{})

		serverNow, serverNowFn := testutils.NowStub(initialTime.Add(time.Second))
		clientNow, clientNowFn := testutils.NowStub(initialTime)
		serverNowFn(time.Second)
		clientNowFn(time.Second)

		tt.serverOpts = testutils.DefaultOpts(tt.serverOpts).SetTimeNow(serverNow)
		tt.clientOpts = testutils.DefaultOpts(tt.clientOpts).SetTimeNow(clientNow)

		WithVerifiedServer(t, tt.serverOpts, func(ch *Channel, hostPort string) {
			testutils.RegisterEcho(ch, func() {
				clientNowFn(5 * time.Second)
			})

			clientCh := testutils.NewClient(t, tt.clientOpts)
			defer clientCh.Close()
			ctx, cancel := NewContext(time.Second)
			defer cancel()

			_, _, _, err := raw.Call(ctx, clientCh, hostPort, ch.PeerInfo().ServiceName, "echo", nil, []byte("arg3"))
			require.NoError(t, err, "raw.Call failed")

			binaryAnnotations := []BinaryAnnotation{
				{"cn", clientCh.PeerInfo().ServiceName},
				{"as", Raw.String()},
			}
			target := TraceEndpoint{
				HostPort:    hostPort,
				ServiceName: ch.ServiceName(),
			}
			source := target
			if !tt.fromServer {
				source = TraceEndpoint{
					HostPort:    "0.0.0.0:0",
					ServiceName: clientCh.ServiceName(),
				}
			}

			select {
			case <-state.signal:
			case <-time.After(time.Second):
				t.Fatalf("Did not receive trace report within timeout")
			}

			expected := TraceData{Annotations: tt.expected, BinaryAnnotations: binaryAnnotations, Source: source, Target: target, Method: "echo"}
			assert.Equal(t, expected, state.call, "%v: Report args mismatch", tt.name)
			curSpan := CurrentSpan(ctx)
			assert.Equal(t, NewSpan(curSpan.TraceID(), 0, curSpan.TraceID()), state.span, "Span mismatch")
		})
	}
}