Exemple #1
0
func TestStatsCalls(t *testing.T) {
	defer testutils.SetTimeout(t, time.Second)()

	initialTime := time.Date(2015, 2, 1, 10, 10, 0, 0, time.UTC)
	clientNow, clientNowFn := testutils.NowStub(initialTime)
	serverNow, serverNowFn := testutils.NowStub(initialTime)
	clientNowFn(100 * time.Millisecond)
	serverNowFn(50 * time.Millisecond)

	clientStats := newRecordingStatsReporter()
	serverStats := newRecordingStatsReporter()
	serverOpts := testutils.NewOpts().
		SetStatsReporter(serverStats).
		SetTimeNow(serverNow)
	WithVerifiedServer(t, serverOpts, func(serverCh *Channel, hostPort string) {
		handler := raw.Wrap(newTestHandler(t))
		serverCh.Register(handler, "echo")
		serverCh.Register(handler, "app-error")

		ch := testutils.NewClient(t, testutils.NewOpts().
			SetStatsReporter(clientStats).
			SetTimeNow(clientNow))
		defer ch.Close()

		ctx, cancel := NewContext(time.Second * 5)
		defer cancel()

		_, _, _, err := raw.Call(ctx, ch, hostPort, testServiceName, "echo", []byte("Headers"), []byte("Body"))
		require.NoError(t, err)

		outboundTags := tagsForOutboundCall(serverCh, ch, "echo")
		clientStats.Expected.IncCounter("outbound.calls.send", outboundTags, 1)
		clientStats.Expected.IncCounter("outbound.calls.success", outboundTags, 1)
		clientStats.Expected.RecordTimer("outbound.calls.per-attempt.latency", outboundTags, 100*time.Millisecond)
		clientStats.Expected.RecordTimer("outbound.calls.latency", outboundTags, 100*time.Millisecond)
		inboundTags := tagsForInboundCall(serverCh, ch, "echo")
		serverStats.Expected.IncCounter("inbound.calls.recvd", inboundTags, 1)
		serverStats.Expected.IncCounter("inbound.calls.success", inboundTags, 1)
		serverStats.Expected.RecordTimer("inbound.calls.latency", inboundTags, 50*time.Millisecond)

		_, _, resp, err := raw.Call(ctx, ch, hostPort, testServiceName, "app-error", nil, nil)
		require.NoError(t, err)
		require.True(t, resp.ApplicationError(), "expected application error")

		outboundTags = tagsForOutboundCall(serverCh, ch, "app-error")
		clientStats.Expected.IncCounter("outbound.calls.send", outboundTags, 1)
		clientStats.Expected.IncCounter("outbound.calls.per-attempt.app-errors", outboundTags, 1)
		clientStats.Expected.IncCounter("outbound.calls.app-errors", outboundTags, 1)
		clientStats.Expected.RecordTimer("outbound.calls.per-attempt.latency", outboundTags, 100*time.Millisecond)
		clientStats.Expected.RecordTimer("outbound.calls.latency", outboundTags, 100*time.Millisecond)
		inboundTags = tagsForInboundCall(serverCh, ch, "app-error")
		serverStats.Expected.IncCounter("inbound.calls.recvd", inboundTags, 1)
		serverStats.Expected.IncCounter("inbound.calls.app-errors", inboundTags, 1)
		serverStats.Expected.RecordTimer("inbound.calls.latency", inboundTags, 50*time.Millisecond)
	})

	clientStats.Validate(t)
	serverStats.Validate(t)
}
Exemple #2
0
// TestCloseSendError tests that system errors are not attempted to be sent when
// a connection is closed, and ensures there's no race conditions such as the error
// frame being added to the channel just as it is closed.
// TODO(prashant): This test is waiting for timeout, but socket close shouldn't wait for timeout.
func TestCloseSendError(t *testing.T) {
	ctx, cancel := NewContext(time.Second)
	defer cancel()

	serverCh, err := testutils.NewServer(nil)
	require.NoError(t, err, "NewServer failed")

	closed := uint32(0)
	counter := uint32(0)
	testutils.RegisterFunc(t, serverCh, "echo", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
		atomic.AddUint32(&counter, 1)
		return &raw.Res{Arg2: args.Arg2, Arg3: args.Arg3}, nil
	})

	clientCh, err := testutils.NewClient(nil)
	require.NoError(t, err, "NewClient failed")

	// Make a call to create a connection that will be shared.
	peerInfo := serverCh.PeerInfo()
	_, _, _, err = raw.Call(ctx, clientCh, peerInfo.HostPort, peerInfo.ServiceName, "echo", nil, nil)
	require.NoError(t, err, "Call should succeed")

	var wg sync.WaitGroup
	for i := 0; i < 100; i++ {
		wg.Add(1)
		go func() {
			time.Sleep(time.Duration(rand.Intn(1000)) * time.Microsecond)
			_, _, _, err := raw.Call(ctx, clientCh, peerInfo.HostPort, peerInfo.ServiceName, "echo", nil, nil)
			if err != nil && atomic.LoadUint32(&closed) == 0 {
				t.Errorf("Call failed: %v", err)
			}
			wg.Done()
		}()
	}

	// Wait for the server to have processed some number of these calls.
	for {
		if atomic.LoadUint32(&counter) >= 10 {
			break
		}
		runtime.Gosched()
	}

	atomic.AddUint32(&closed, 1)
	serverCh.Close()

	// Wait for all the goroutines to end
	wg.Wait()

	clientCh.Close()
	VerifyNoBlockedGoroutines(t)
}
Exemple #3
0
func TestActiveCallReq(t *testing.T) {
	t.Skip("Test skipped due to unreliable way to test for protocol errors")

	ctx, cancel := NewContext(time.Second)
	defer cancel()

	// Note: This test cannot use log verification as the duplicate ID causes a log.
	// It does not use a verified server, as it leaks a message exchange due to the
	// modification of IDs in the relay.
	opts := testutils.NewOpts().DisableLogVerification()
	testutils.WithServer(t, opts, func(ch *Channel, hostPort string) {
		gotCall := make(chan struct{})
		unblock := make(chan struct{})

		testutils.RegisterFunc(ch, "blocked", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
			gotCall <- struct{}{}
			<-unblock
			return &raw.Res{}, nil
		})

		relayFunc := func(outgoing bool, frame *Frame) *Frame {
			if outgoing && frame.Header.ID == 3 {
				frame.Header.ID = 2
			}
			return frame
		}

		relayHostPort, closeRelay := testutils.FrameRelay(t, hostPort, relayFunc)
		defer closeRelay()

		firstComplete := make(chan struct{})
		go func() {
			// This call will block until we close unblock.
			raw.Call(ctx, ch, relayHostPort, ch.PeerInfo().ServiceName, "blocked", nil, nil)
			close(firstComplete)
		}()

		// Wait for the first call to be received by the server
		<-gotCall

		// Make a new call, which should fail
		_, _, _, err := raw.Call(ctx, ch, relayHostPort, ch.PeerInfo().ServiceName, "blocked", nil, nil)
		assert.Error(t, err, "Expect error")
		assert.True(t, strings.Contains(err.Error(), "already active"),
			"expected already active error, got %v", err)

		close(unblock)
		<-firstComplete
	})
}
func BenchmarkCallsConcurrent(b *testing.B) {
	const numWorkers = 5

	serverCh, svcName, svcHostPort := setupServer(b)
	defer serverCh.Close()

	var wg sync.WaitGroup
	inCh := make(chan struct{})
	for i := 0; i < numWorkers; i++ {
		go func() {
			clientCh := testutils.NewClient(b, nil)
			defer clientCh.Close()

			for range inCh {
				ctx, cancel := NewContext(time.Second)

				_, _, _, err := raw.Call(ctx, clientCh, svcHostPort, svcName, "echo", []byte("data111"), []byte("data222"))
				assert.NoError(b, err)

				cancel()
				wg.Done()
			}
		}()
	}

	for i := 0; i < b.N; i++ {
		wg.Add(1)
		inCh <- struct{}{}
	}

	wg.Wait()
	close(inCh)
}
Exemple #5
0
func TestCancelled(t *testing.T) {
	testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
		ts.Register(raw.Wrap(newTestHandler(t)), "echo")
		ctx, cancel := NewContext(time.Second)

		// Make a call first to make sure we have a connection.
		// We want to test the BeginCall path.
		_, _, _, err := raw.Call(ctx, ts.Server(), ts.HostPort(), ts.ServiceName(), "echo", []byte("Headers"), []byte("Body"))
		assert.NoError(t, err, "Call failed")

		// Now cancel the context.
		cancel()
		_, _, _, err = raw.Call(ctx, ts.Server(), ts.HostPort(), ts.ServiceName(), "echo", []byte("Headers"), []byte("Body"))
		assert.Equal(t, context.Canceled, err, "Unexpected error when making call with canceled context")
	})
}
Exemple #6
0
func TestCloseAfterTimeout(t *testing.T) {
	WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) {
		testHandler := onErrorTestHandler{newTestHandler(t), func(_ context.Context, err error) {}}
		ch.Register(raw.Wrap(testHandler), "block")

		ctx, cancel := NewContext(10 * time.Millisecond)
		defer cancel()

		// Make a call, wait for it to timeout.
		clientCh, err := testutils.NewClient(nil)
		require.NoError(t, err, "NewClient failed")
		peerInfo := ch.PeerInfo()
		_, _, _, err = raw.Call(ctx, clientCh, peerInfo.HostPort, peerInfo.ServiceName, "block", nil, nil)
		require.Error(t, err, "Expected call to timeout")

		// The client channel should also close immediately.
		clientCh.Close()
		runtime.Gosched()
		assert.Equal(t, ChannelClosed, clientCh.State())
		assert.True(t, clientCh.Closed(), "Channel should be closed")

		// Unblock the testHandler so that a goroutine isn't leaked.
		<-testHandler.blockErr
	})
	VerifyNoBlockedGoroutines(t)
}
Exemple #7
0
func makeCall(ch *Channel, hostPort, service string) error {
	ctx, cancel := NewContext(time.Second)
	defer cancel()

	_, _, _, err := raw.Call(ctx, ch, hostPort, service, "test", nil, nil)
	return err
}
Exemple #8
0
func makeCall(client *Channel, server *testutils.TestServer) error {
	ctx, cancel := NewContext(time.Second)
	defer cancel()

	_, _, _, err := raw.Call(ctx, client, server.HostPort(), server.ServiceName(), "test", nil, nil)
	return err
}
Exemple #9
0
func TestLargeRequest(t *testing.T) {
	CheckStress(t)

	const (
		KB = 1024
		MB = 1024 * KB
		GB = 1024 * MB

		maxRequestSize = 1 * GB
	)

	WithVerifiedServer(t, nil, func(serverCh *Channel, hostPort string) {
		serverCh.Register(raw.Wrap(newTestHandler(t)), "echo")

		for reqSize := 2; reqSize <= maxRequestSize; reqSize *= 2 {
			log.Printf("reqSize = %v", reqSize)
			arg3 := testutils.RandBytes(reqSize)
			arg2 := testutils.RandBytes(reqSize / 2)

			clientCh := testutils.NewClient(t, nil)
			ctx, cancel := NewContext(time.Second * 30)
			rArg2, rArg3, _, err := raw.Call(ctx, clientCh, hostPort, serverCh.PeerInfo().ServiceName, "echo", arg2, arg3)
			require.NoError(t, err, "Call failed")

			if !bytes.Equal(arg2, rArg2) {
				t.Errorf("echo arg2 mismatch")
			}
			if !bytes.Equal(arg3, rArg3) {
				t.Errorf("echo arg3 mismatch")
			}
			cancel()
		}
	})
}
Exemple #10
0
func TestReadTimeout(t *testing.T) {
	// The error frame may fail to send since the connection closes before the handler sends it
	// or the handler connection may be closed as it sends when the other side closes the conn.
	opts := testutils.NewOpts().
		AddLogFilter("Couldn't send outbound error frame", 1).
		AddLogFilter("Connection error", 1, "site", "read frames").
		AddLogFilter("Connection error", 1, "site", "write frames").
		AddLogFilter("simpleHandler OnError", 1,
			"error", "failed to send error frame, connection state connectionClosed")

	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		sn := ts.ServiceName()
		calls := relaytest.NewMockStats()

		for i := 0; i < 10; i++ {
			ctx, cancel := NewContext(time.Second)
			handler := func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
				defer cancel()
				return nil, ErrTimeout
			}
			ts.RegisterFunc("call", handler)

			_, _, _, err := raw.Call(ctx, ts.Server(), ts.HostPort(), ts.ServiceName(), "call", nil, nil)
			assert.Equal(t, err, context.Canceled, "Call should fail due to cancel")
			calls.Add(sn, sn, "call").Failed("timeout").End()
		}

		ts.AssertRelayStats(calls)
	})
}
func TestDirtyFrameRequests(t *testing.T) {

	argSizes := []int{50000, 100000, 150000}
	WithVerifiedServer(t, &testutils.ChannelOpts{
		ServiceName: "swap-server",
		DefaultConnectionOptions: ConnectionOptions{
			FramePool: dirtyFramePool{},
		},
	}, func(serverCh *Channel, hostPort string) {
		peerInfo := serverCh.PeerInfo()
		serverCh.Register(raw.Wrap(&swapper{t}), "swap")

		for _, arg2Size := range argSizes {
			for _, arg3Size := range argSizes {
				ctx, cancel := NewContext(time.Second)
				defer cancel()

				arg2, arg3 := testutils.RandBytes(arg2Size), testutils.RandBytes(arg3Size)
				res2, res3, _, err := raw.Call(ctx, serverCh, hostPort, peerInfo.ServiceName, "swap", arg2, arg3)
				if assert.NoError(t, err, "Call failed") {
					assert.Equal(t, arg2, res3, "Result arg3 wrong")
					assert.Equal(t, arg3, res2, "Result arg3 wrong")
				}
			}
		}
	})
}
Exemple #12
0
func TestWriteErrorAfterTimeout(t *testing.T) {
	// TODO: Make this test block at different points (e.g. before, during read/write).
	testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
		timedOut := make(chan struct{})
		done := make(chan struct{})
		handler := func(ctx context.Context, call *InboundCall) {
			<-ctx.Done()
			<-timedOut
			_, err := raw.ReadArgs(call)
			assert.Equal(t, ErrTimeout, err, "Read args should fail with timeout")
			response := call.Response()
			assert.Equal(t, ErrTimeout, response.SendSystemError(ErrServerBusy), "SendSystemError should fail")
			close(done)
		}
		ts.Register(HandlerFunc(handler), "call")

		ctx, cancel := NewContext(testutils.Timeout(30 * time.Millisecond))
		defer cancel()
		_, _, _, err := raw.Call(ctx, ts.Server(), ts.HostPort(), ts.ServiceName(), "call", nil, testutils.RandBytes(100000))
		assert.Equal(t, err, ErrTimeout, "Call should timeout")
		close(timedOut)

		select {
		case <-done:
		case <-time.After(time.Second):
			t.Errorf("Handler not called, timeout may be too low")
		}

		calls := relaytest.NewMockStats()
		calls.Add(ts.ServiceName(), ts.ServiceName(), "call").Failed("timeout").End()
		ts.AssertRelayStats(calls)
	})
}
Exemple #13
0
func TestTimeout(t *testing.T) {
	testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
		// onError may be called when the block call tries to write the call response.
		onError := func(ctx context.Context, err error) {
			assert.Equal(t, ErrTimeout, err, "onError err should be ErrTimeout")
			assert.Equal(t, context.DeadlineExceeded, ctx.Err(), "Context should timeout")
		}
		testHandler := onErrorTestHandler{newTestHandler(t), onError}
		ts.Register(raw.Wrap(testHandler), "block")

		ctx, cancel := NewContext(testutils.Timeout(15 * time.Millisecond))
		defer cancel()

		_, _, _, err := raw.Call(ctx, ts.Server(), ts.HostPort(), ts.ServiceName(), "block", []byte("Arg2"), []byte("Arg3"))
		assert.Equal(t, ErrTimeout, err)

		// Verify the server-side receives an error from the context.
		select {
		case err := <-testHandler.blockErr:
			assert.Equal(t, context.DeadlineExceeded, err, "Server should have received timeout")
		case <-time.After(time.Second):
			t.Errorf("Server did not receive call, may need higher timeout")
		}

		calls := relaytest.NewMockStats()
		calls.Add(ts.ServiceName(), ts.ServiceName(), "block").Failed("timeout").End()
		ts.AssertRelayStats(calls)
	})
}
Exemple #14
0
func TestCloseAfterTimeout(t *testing.T) {
	// Disable log verfication since connections are closed after a timeout
	// and the relay might still be reading/writing to the connection.
	// TODO: Ideally, we only disable log verification on the relay.
	opts := testutils.NewOpts().DisableLogVerification()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		testHandler := onErrorTestHandler{newTestHandler(t), func(_ context.Context, err error) {}}
		ts.Register(raw.Wrap(testHandler), "block")

		ctx, cancel := NewContext(100 * time.Millisecond)
		defer cancel()

		// Make a call, wait for it to timeout.
		clientCh := ts.NewClient(nil)
		_, _, _, err := raw.Call(ctx, clientCh, ts.HostPort(), ts.ServiceName(), "block", nil, nil)
		require.Equal(t, ErrTimeout, err, "Expected call to timeout")

		// The client channel should also close immediately.
		clientCh.Close()
		assertStateChangesTo(t, clientCh, ChannelClosed)
		assert.True(t, clientCh.Closed(), "Channel should be closed")

		// Unblock the testHandler so that a goroutine isn't leaked.
		<-testHandler.blockErr
	})
}
func TestWriteErrorAfterTimeout(t *testing.T) {
	// TODO: Make this test block at different points (e.g. before, during read/write).
	WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) {
		timedOut := make(chan struct{})
		done := make(chan struct{})
		handler := func(ctx context.Context, call *InboundCall) {
			<-ctx.Done()
			<-timedOut
			_, err := raw.ReadArgs(call)
			assert.Equal(t, ErrTimeout, err, "Read args should fail with timeout")
			response := call.Response()
			assert.Equal(t, ErrTimeout, response.SendSystemError(ErrServerBusy), "SendSystemError should fail")
			close(done)
		}
		ch.Register(HandlerFunc(handler), "call")

		ctx, cancel := NewContext(testutils.Timeout(20 * time.Millisecond))
		defer cancel()
		_, _, _, err := raw.Call(ctx, ch, hostPort, testServiceName, "call", nil, testutils.RandBytes(100000))
		assert.Equal(t, err, ErrTimeout, "Call should timeout")
		close(timedOut)
		<-done
	})
	goroutines.VerifyNoLeaks(t, nil)
}
func TestFragmentationSlowReader(t *testing.T) {
	startReading, handlerComplete := make(chan struct{}), make(chan struct{})
	handler := func(ctx context.Context, call *InboundCall) {
		<-startReading
		_, err := raw.ReadArgs(call)
		assert.Error(t, err, "ReadArgs should fail since frames will be dropped due to slow reading")
		close(handlerComplete)
	}

	WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) {
		ch.Register(HandlerFunc(handler), "echo")

		arg2 := testutils.RandBytes(MaxFramePayloadSize * MexChannelBufferSize)
		arg3 := testutils.RandBytes(MaxFramePayloadSize * (MexChannelBufferSize + 1))

		ctx, cancel := NewContext(10 * time.Millisecond)
		defer cancel()

		_, _, _, err := raw.Call(ctx, ch, hostPort, testServiceName, "echo", arg2, arg3)
		assert.Error(t, err, "Call should timeout due to slow reader")

		close(startReading)
		<-handlerComplete
	})
	VerifyNoBlockedGoroutines(t)
}
func TestDirtyFrameRequests(t *testing.T) {
	argSizes := []int{25000, 50000, 75000}

	// Create the largest required random cache.
	testutils.RandBytes(argSizes[len(argSizes)-1])

	opts := testutils.NewOpts().
		SetServiceName("swap-server").
		SetFramePool(dirtyFramePool{})
	WithVerifiedServer(t, opts, func(serverCh *Channel, hostPort string) {
		peerInfo := serverCh.PeerInfo()
		serverCh.Register(raw.Wrap(&swapper{t}), "swap")

		for _, argSize := range argSizes {
			ctx, cancel := NewContext(time.Second)
			defer cancel()

			arg2, arg3 := testutils.RandBytes(argSize), testutils.RandBytes(argSize)
			res2, res3, _, err := raw.Call(ctx, serverCh, hostPort, peerInfo.ServiceName, "swap", arg2, arg3)
			if assert.NoError(t, err, "Call failed") {
				assert.Equal(t, arg2, res3, "Result arg3 wrong")
				assert.Equal(t, arg3, res2, "Result arg3 wrong")
			}
		}
	})
}
Exemple #18
0
func TestFragmentation(t *testing.T) {
	testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
		ts.Register(raw.Wrap(newTestHandler(t)), "echo")

		arg2 := make([]byte, MaxFramePayloadSize*2)
		for i := 0; i < len(arg2); i++ {
			arg2[i] = byte('a' + (i % 10))
		}

		arg3 := make([]byte, MaxFramePayloadSize*3)
		for i := 0; i < len(arg3); i++ {
			arg3[i] = byte('A' + (i % 10))
		}

		ctx, cancel := NewContext(time.Second)
		defer cancel()

		respArg2, respArg3, _, err := raw.Call(ctx, ts.Server(), ts.HostPort(), ts.ServiceName(), "echo", arg2, arg3)
		require.NoError(t, err)
		assert.Equal(t, arg2, respArg2)
		assert.Equal(t, arg3, respArg3)

		calls := relaytest.NewMockStats()
		calls.Add(ts.ServiceName(), ts.ServiceName(), "echo").Succeeded().End()
		ts.AssertRelayStats(calls)
	})
}
func TestFragmentationSlowReader(t *testing.T) {
	startReading, handlerComplete := make(chan struct{}), make(chan struct{})
	handler := func(ctx context.Context, call *InboundCall) {
		<-ctx.Done()
		<-startReading
		_, err := raw.ReadArgs(call)
		assert.Error(t, err, "ReadArgs should fail since frames will be dropped due to slow reading")
		close(handlerComplete)
	}

	// Inbound forward will timeout and cause a warning log.
	opts := testutils.NewOpts().AddLogFilter("Unable to forward frame", 1)
	WithVerifiedServer(t, opts, func(ch *Channel, hostPort string) {
		ch.Register(HandlerFunc(handler), "echo")

		arg2 := testutils.RandBytes(MaxFramePayloadSize * MexChannelBufferSize)
		arg3 := testutils.RandBytes(MaxFramePayloadSize * (MexChannelBufferSize + 1))

		ctx, cancel := NewContext(testutils.Timeout(15 * time.Millisecond))
		defer cancel()

		_, _, _, err := raw.Call(ctx, ch, hostPort, testServiceName, "echo", arg2, arg3)
		assert.Error(t, err, "Call should timeout due to slow reader")

		close(startReading)
		<-handlerComplete
	})
	goroutines.VerifyNoLeaks(t, nil)
}
Exemple #20
0
func TestTimeoutCallsThenClose(t *testing.T) {
	// Test needs at least 2 CPUs to trigger race conditions.
	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))

	opts := serviceNameOpts("s1").SetRelayOnly().DisableLogVerification()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		s1 := ts.Server()
		s2 := ts.NewServer(serviceNameOpts("s2").DisableLogVerification())

		unblockEcho := make(chan struct{})
		testutils.RegisterEcho(s1, func() {
			<-unblockEcho
		})

		ctx, cancel := NewContext(testutils.Timeout(30 * time.Millisecond))
		defer cancel()

		var callers sync.WaitGroup
		for i := 0; i < 100; i++ {
			callers.Add(1)
			go func() {
				defer callers.Done()
				raw.Call(ctx, s2, ts.HostPort(), "s1", "echo", nil, nil)
			}()
		}

		close(unblockEcho)

		// Wait for all the callers to end
		callers.Wait()
	})
}
Exemple #21
0
func TestRaceExchangesWithClose(t *testing.T) {
	var wg sync.WaitGroup

	ctx, cancel := NewContext(testutils.Timeout(70 * time.Millisecond))
	defer cancel()

	opts := testutils.NewOpts().DisableLogVerification()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		server := ts.Server()

		gotCall := make(chan struct{})
		completeCall := make(chan struct{})
		testutils.RegisterFunc(server, "dummy", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
			return &raw.Res{}, nil
		})

		testutils.RegisterEcho(server, func() {
			close(gotCall)
			<-completeCall
		})

		client := ts.NewClient(opts)
		defer client.Close()

		callDone := make(chan struct{})
		go func() {
			assert.NoError(t, testutils.CallEcho(client, ts.HostPort(), server.ServiceName(), &raw.Args{}), "Echo failed")
			close(callDone)
		}()

		// Wait until the server recieves a call, so it has an active inbound.
		<-gotCall

		// Start a bunch of clients to trigger races between connecting and close.
		for i := 0; i < 100; i++ {
			wg.Add(1)
			go func() {
				defer wg.Done()

				// We don't use ts.NewClient here to avoid data races.
				c := testutils.NewClient(t, opts)
				defer c.Close()

				c.Ping(ctx, ts.HostPort())
				raw.Call(ctx, c, ts.HostPort(), server.ServiceName(), "dummy", nil, nil)
			}()
		}

		// Now try to close the channel, it should block since there's active exchanges.
		server.Close()
		assert.Equal(t, ChannelStartClose, ts.Server().State(), "Server should be in StartClose")

		close(completeCall)
		<-callDone
	})

	// Wait for all calls to complete
	wg.Wait()
}
func doErrorCall(t *testing.T, clientCh *Channel, hostPort string) {
	ctx, cancel := NewContext(time.Second * 5)
	defer cancel()

	_, _, _, err := raw.Call(ctx, clientCh, hostPort, "swap-server", "non-existent", nil, nil)
	assert.Error(t, err, "Call to non-existent endpoint should fail")
	assert.Equal(t, ErrCodeBadRequest, GetSystemErrorCode(err), "Error code mismatch")
}
Exemple #23
0
func (h *RawHandler) firstCall(ctx context.Context, req *TracingRequest) (*TracingResponse, error) {
	_, arg3, _, err := raw.Call(ctx, h.Ch, h.Ch.PeerInfo().HostPort, h.Ch.PeerInfo().ServiceName,
		"rawcall", nil, requestToRaw(req))
	if err != nil {
		return nil, err
	}
	return responseFromRaw(h.t, arg3)
}
Exemple #24
0
func TestTraceSamplingRate(t *testing.T) {
	rand.Seed(10)

	tests := []struct {
		sampleRate  float64 // if this is < 0, then the value is not set.
		count       int
		expectedMin int
		expectedMax int
	}{
		{1.0, 100, 100, 100},
		{0.5, 100, 40, 60},
		{0.1, 100, 5, 15},
		{0, 100, 0, 0},
		{-1, 100, 100, 100}, // default of 1.0 should be used.
	}

	for _, tt := range tests {
		var reportedTraces int
		testTraceReporter := TraceReporterFunc(func(_ TraceData) {
			reportedTraces++
		})

		WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) {
			var tracedCalls int
			testutils.RegisterFunc(ch, "t", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
				if CurrentSpan(ctx).TracingEnabled() {
					tracedCalls++
				}

				return &raw.Res{}, nil
			})

			opts := testutils.NewOpts().SetTraceReporter(testTraceReporter)
			if tt.sampleRate >= 0 {
				opts.SetTraceSampleRate(tt.sampleRate)
			}

			client := testutils.NewClient(t, opts)
			defer client.Close()

			for i := 0; i < tt.count; i++ {
				ctx, cancel := NewContext(time.Second)
				defer cancel()

				_, _, _, err := raw.Call(ctx, client, hostPort, ch.PeerInfo().ServiceName, "t", nil, nil)
				require.NoError(t, err, "raw.Call failed")
			}

			assert.Equal(t, reportedTraces, tracedCalls,
				"Number of traces report doesn't match calls with tracing enabled")
			assert.True(t, tracedCalls >= tt.expectedMin,
				"Number of trace enabled calls (%v) expected to be greater than %v", tracedCalls, tt.expectedMin)
			assert.True(t, tracedCalls <= tt.expectedMax,
				"Number of trace enabled calls (%v) expected to be less than %v", tracedCalls, tt.expectedMax)
		})
	}
}
func TestLargeMethod(t *testing.T) {
	WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) {
		ctx, cancel := NewContext(time.Second)
		defer cancel()

		largeMethod := testutils.RandBytes(16*1024 + 1)
		_, _, _, err := raw.Call(ctx, ch, hostPort, testServiceName, string(largeMethod), nil, nil)
		assert.Equal(t, ErrMethodTooLarge, err)
	})
}
func TestNoTimeout(t *testing.T) {
	WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) {
		ch.Register(raw.Wrap(newTestHandler(t)), "Echo")

		ctx := context.Background()
		_, _, _, err := raw.Call(ctx, ch, hostPort, "svc", "Echo", []byte("Headers"), []byte("Body"))
		require.NotNil(t, err)
		assert.Equal(t, ErrTimeoutRequired, err)
	})
}
func TestBadRequest(t *testing.T) {
	WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) {
		ctx, cancel := NewContext(time.Second)
		defer cancel()

		_, _, _, err := raw.Call(ctx, ch, hostPort, "Nowhere", "Noone", []byte("Headers"), []byte("Body"))
		require.NotNil(t, err)
		assert.Equal(t, ErrCodeBadRequest, GetSystemErrorCode(err))
	})
}
Exemple #28
0
func TestLargeMethod(t *testing.T) {
	testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
		ctx, cancel := NewContext(time.Second)
		defer cancel()

		largeMethod := testutils.RandBytes(16*1024 + 1)
		_, _, _, err := raw.Call(ctx, ts.Server(), ts.HostPort(), ts.ServiceName(), string(largeMethod), nil, nil)
		assert.Equal(t, ErrMethodTooLarge, err)
	})
}
func TestActiveCallReq(t *testing.T) {
	ctx, cancel := NewContext(time.Second)
	defer cancel()

	// Note: This test leaks a message exchange due to the modification of IDs in the relay.
	require.NoError(t, testutils.WithServer(nil, func(ch *Channel, hostPort string) {
		gotCall := make(chan struct{})
		unblock := make(chan struct{})

		testutils.RegisterFunc(t, ch, "blocked", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
			gotCall <- struct{}{}
			<-unblock
			return &raw.Res{}, nil
		})

		relayFunc := func(outgoing bool, frame *Frame) *Frame {
			if outgoing && frame.Header.ID == 2 {
				frame.Header.ID = 3
			}
			return frame
		}

		relayHostPort, closeRelay := testutils.FrameRelay(t, hostPort, relayFunc)
		defer closeRelay()

		go func() {
			// This call will block until we close unblock.
			raw.Call(ctx, ch, relayHostPort, ch.PeerInfo().ServiceName, "blocked", nil, nil)
		}()

		// Wait for the first call to be received by the server
		<-gotCall

		// Make a new call, which should fail
		_, _, _, err := raw.Call(ctx, ch, relayHostPort, ch.PeerInfo().ServiceName, "blocked", nil, nil)
		assert.Error(t, err, "Expect error")
		assert.True(t, strings.Contains(err.Error(), "already active"),
			"expected already active error, got %v", err)

		close(unblock)
	}))
}
Exemple #30
0
func TestNoServiceNaming(t *testing.T) {
	testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
		ctx, cancel := NewContext(time.Second)
		defer cancel()

		_, _, _, err := raw.Call(ctx, ts.Server(), ts.HostPort(), "", "Echo", []byte("Headers"), []byte("Body"))
		assert.Equal(t, ErrNoServiceName, err)

		ts.AssertRelayStats(relaytest.NewMockStats())
	})
}