예제 #1
0
func TestReuseConnection(t *testing.T) {
	ctx, cancel := NewContext(time.Second * 5)
	defer cancel()

	s1Opts := &testutils.ChannelOpts{ServiceName: "s1"}
	require.Nil(t, testutils.WithServer(s1Opts, func(ch1 *Channel, hostPort1 string) {
		s2Opts := &testutils.ChannelOpts{ServiceName: "s2"}
		require.Nil(t, testutils.WithServer(s2Opts, func(ch2 *Channel, hostPort2 string) {
			ch1.Register(raw.Wrap(newTestHandler(t)), "echo")
			ch2.Register(raw.Wrap(newTestHandler(t)), "echo")

			// We need the servers to have their peers set before making outgoing calls
			// for the outgoing calls to contain the correct peerInfo.
			require.True(t, testutils.WaitFor(time.Second, func() bool {
				return !ch1.PeerInfo().IsEphemeral() && !ch2.PeerInfo().IsEphemeral()
			}))

			outbound, err := ch1.BeginCall(ctx, hostPort2, "s2", "echo", nil)
			require.NoError(t, err)
			outboundConn, outboundNetConn := OutboundConnection(outbound)

			// Try to make another call at the same time, should reuse the same connection.
			outbound2, err := ch1.BeginCall(ctx, hostPort2, "s2", "echo", nil)
			require.NoError(t, err)
			outbound2Conn, _ := OutboundConnection(outbound)
			assert.Equal(t, outboundConn, outbound2Conn)

			// When ch2 tries to call ch1, it should reuse the inbound connection from ch1.
			outbound3, err := ch2.BeginCall(ctx, hostPort1, "s1", "echo", nil)
			require.NoError(t, err)
			_, outbound3NetConn := OutboundConnection(outbound3)
			assert.Equal(t, outboundNetConn.RemoteAddr(), outbound3NetConn.LocalAddr())
			assert.Equal(t, outboundNetConn.LocalAddr(), outbound3NetConn.RemoteAddr())

			// Ensure all calls can complete in parallel.
			var wg sync.WaitGroup
			for _, call := range []*OutboundCall{outbound, outbound2, outbound3} {
				wg.Add(1)
				go func(call *OutboundCall) {
					defer wg.Done()
					resp1, resp2, _, err := raw.WriteArgs(call, []byte("arg2"), []byte("arg3"))
					require.NoError(t, err)
					assert.Equal(t, resp1, []byte("arg2"), "result does match argument")
					assert.Equal(t, resp2, []byte("arg3"), "result does match argument")
				}(call)
			}
			wg.Wait()
		}))
	}))
}
예제 #2
0
func main() {
	flag.Parse()

	// Sets up a listener for pprof.
	go func() {
		log.Println(http.ListenAndServe("localhost:6060", nil))
	}()

	ch, err := tchannel.NewChannel("benchmark", &tchannel.ChannelOptions{
		ProcessName: "benchmark",
	})
	if err != nil {
		log.Fatalf("NewChannel failed: %v", err)
	}

	handler := raw.Wrap(&kvHandler{vals: make(map[string]string)})
	ch.Register(handler, "ping")
	ch.Register(handler, "get")
	ch.Register(handler, "set")

	if err := ch.ListenAndServe(*hostPort); err != nil {
		log.Fatalf("ListenAndServe failed: %v", err)
	}

	// Listen indefinitely.
	select {}
}
예제 #3
0
func TestRoundTrip(t *testing.T) {
	require.Nil(t, testutils.WithServer(nil, func(ch *Channel, hostPort string) {
		handler := newTestHandler(t)
		ch.Register(raw.Wrap(handler), "echo")

		ctx, cancel := NewContext(time.Second * 5)
		defer cancel()

		call, err := ch.BeginCall(ctx, hostPort, testServiceName, "echo", &CallOptions{Format: JSON})
		require.NoError(t, err)

		require.NoError(t, NewArgWriter(call.Arg2Writer()).Write(testArg2))
		require.NoError(t, NewArgWriter(call.Arg3Writer()).Write(testArg3))

		var respArg2 []byte
		require.NoError(t, NewArgReader(call.Response().Arg2Reader()).Read(&respArg2))
		assert.Equal(t, testArg2, []byte(respArg2))

		var respArg3 []byte
		require.NoError(t, NewArgReader(call.Response().Arg3Reader()).Read(&respArg3))
		assert.Equal(t, testArg3, []byte(respArg3))

		assert.Equal(t, JSON, handler.format)
		assert.Equal(t, testServiceName, handler.caller)
		assert.Equal(t, JSON, call.Response().Format(), "response Format should match request Format")
	}))
}
예제 #4
0
func setupServer(b *testing.B) (ch *Channel, svcName, svcHostPort string) {
	serverCh, err := testutils.NewServer(nil)
	require.Nil(b, err)
	handler := &benchmarkHandler{}
	serverCh.Register(raw.Wrap(handler), "echo")

	peerInfo := serverCh.PeerInfo()
	return serverCh, peerInfo.ServiceName, peerInfo.HostPort
}
예제 #5
0
func TestNoTimeout(t *testing.T) {
	require.Nil(t, testutils.WithServer(nil, func(ch *Channel, hostPort string) {
		ch.Register(raw.Wrap(newTestHandler(t)), "Echo")

		ctx := context.Background()
		_, _, _, err := raw.Call(ctx, ch, hostPort, "svc", "Echo", []byte("Headers"), []byte("Body"))
		require.NotNil(t, err)
		assert.Equal(t, ErrTimeoutRequired, err)
	}))
}
예제 #6
0
파일: main.go 프로젝트: hustxiaoc/tchannel
func main() {
	tchan, err := tchannel.NewChannel("go-echo-server", nil)
	if err != nil {
		log.Fatalf("Failed to create channel: %v", err)
	}

	listenIP, err := tchannel.ListenIP()
	if err != nil {
		log.Fatalf("Failed to get IP to listen on: %v", err)
	}

	l, err := net.Listen("tcp", listenIP.String()+":61543")
	if err != nil {
		log.Fatalf("Could not listen: %v", err)
	}
	log.Printf("Listening on %v", l.Addr())

	sc := tchan.GetSubChannel("go-echo-2")
	tchan.Register(raw.Wrap(handler{""}), "echo")
	sc.Register(raw.Wrap(handler{"subchannel:"}), "echo")
	tchan.Serve(l)

	if len(os.Args[1:]) == 0 {
		log.Fatalf("You must provide Hyperbahn nodes as arguments")
	}

	// advertise service with Hyperbahn.
	config := hyperbahn.Configuration{InitialNodes: os.Args[1:]}
	client, err := hyperbahn.NewClient(tchan, config, &hyperbahn.ClientOptions{
		Handler: eventHandler{},
		Timeout: time.Second,
	})
	if err != nil {
		log.Fatalf("hyperbahn.NewClient failed: %v", err)
	}
	if err := client.Advertise(sc); err != nil {
		log.Fatalf("Advertise failed: %v", err)
	}

	// Server will keep running till Ctrl-C.
	select {}
}
예제 #7
0
func TestServerBusy(t *testing.T) {
	require.Nil(t, testutils.WithServer(nil, func(ch *Channel, hostPort string) {
		ch.Register(raw.Wrap(newTestHandler(t)), "busy")

		ctx, cancel := NewContext(time.Second * 5)
		defer cancel()

		_, _, _, err := raw.Call(ctx, ch, hostPort, testServiceName, "busy", []byte("Arg2"), []byte("Arg3"))
		require.NotNil(t, err)
		assert.Equal(t, ErrCodeBusy, GetSystemErrorCode(err), "err: %v", err)
	}))
}
예제 #8
0
func TestTimeout(t *testing.T) {
	require.Nil(t, testutils.WithServer(nil, func(ch *Channel, hostPort string) {
		ch.Register(raw.Wrap(newTestHandler(t)), "timeout")

		ctx, cancel := NewContext(time.Millisecond * 100)
		defer cancel()

		_, _, _, err := raw.Call(ctx, ch, hostPort, testServiceName, "timeout", []byte("Arg2"), []byte("Arg3"))

		// TODO(mmihic): Maybe translate this into ErrTimeout (or vice versa)?
		assert.Equal(t, context.DeadlineExceeded, err)
	}))
}
예제 #9
0
func TestDefaultFormat(t *testing.T) {
	require.Nil(t, testutils.WithServer(nil, func(ch *Channel, hostPort string) {
		handler := newTestHandler(t)
		ch.Register(raw.Wrap(handler), "echo")

		ctx, cancel := NewContext(time.Second * 5)
		defer cancel()

		arg2, arg3, resp, err := raw.Call(ctx, ch, hostPort, testServiceName, "echo", testArg2, testArg3)
		require.Nil(t, err)

		require.Equal(t, testArg2, arg2)
		require.Equal(t, testArg3, arg3)
		require.Equal(t, Raw, handler.format)
		assert.Equal(t, Raw, resp.Format(), "response Format should match request Format")
	}))
}
예제 #10
0
func TestStatsCalls(t *testing.T) {
	serverStats := newRecordingStatsReporter()
	serverOpts := &testutils.ChannelOpts{
		StatsReporter: serverStats,
	}
	require.NoError(t, testutils.WithServer(serverOpts, func(serverCh *Channel, hostPort string) {
		handler := raw.Wrap(newTestHandler(t))
		serverCh.Register(handler, "echo")
		serverCh.Register(handler, "app-error")

		clientStats := newRecordingStatsReporter()
		ch, err := testutils.NewClient(&testutils.ChannelOpts{StatsReporter: clientStats})
		require.NoError(t, err)

		ctx, cancel := NewContext(time.Second * 5)
		defer cancel()

		_, _, _, err = raw.Call(ctx, ch, hostPort, testServiceName, "echo", []byte("Headers"), []byte("Body"))
		require.NoError(t, err)

		_, _, resp, err := raw.Call(ctx, ch, hostPort, testServiceName, "app-error", nil, nil)
		require.NoError(t, err)
		require.True(t, resp.ApplicationError(), "expected application error")

		outboundTags := tagsForOutboundCall(serverCh, ch, "echo")
		clientStats.Expected.IncCounter("outbound.calls.send", outboundTags, 1)
		clientStats.Expected.IncCounter("outbound.calls.success", outboundTags, 1)
		outboundTags["target-endpoint"] = "app-error"
		clientStats.Expected.IncCounter("outbound.calls.send", outboundTags, 1)
		clientStats.Expected.IncCounter("outbound.calls.app-errors", outboundTags, 1)

		inboundTags := tagsForInboundCall(serverCh, ch, "echo")
		serverStats.Expected.IncCounter("inbound.calls.recvd", inboundTags, 1)
		serverStats.Expected.IncCounter("inbound.calls.success", inboundTags, 1)
		inboundTags["endpoint"] = "app-error"
		serverStats.Expected.IncCounter("inbound.calls.recvd", inboundTags, 1)
		serverStats.Expected.IncCounter("inbound.calls.app-errors", inboundTags, 1)

		clientStats.ValidateCounters(t)
		serverStats.ValidateCounters(t)
	}))
}
예제 #11
0
func setupServer(host string, basePort, instanceNum int) error {
	hostPort := fmt.Sprintf("%s:%v", host, basePort+instanceNum)
	ch, err := tchannel.NewChannel("benchmark", &tchannel.ChannelOptions{
		ProcessName: fmt.Sprintf("benchmark-%v", instanceNum),
	})
	if err != nil {
		return fmt.Errorf("NewChannel failed: %v", err)
	}

	handler := raw.Wrap(&kvHandler{vals: make(map[string]string)})
	ch.Register(handler, "ping")
	ch.Register(handler, "get")
	ch.Register(handler, "set")

	if err := ch.ListenAndServe(hostPort); err != nil {
		return fmt.Errorf("ListenAndServe failed: %v", err)
	}

	return nil
}
예제 #12
0
파일: server.go 프로젝트: jammyluo/tchannel
func main() {
	flag.Parse()

	ch, err := tchannel.NewChannel("test_as_raw", nil)
	if err != nil {
		log.Fatalf("NewChannel failed: %v", err)
	}

	handler := raw.Wrap(rawHandler{})
	ch.Register(handler, "echo")
	ch.Register(handler, "streaming_echo")

	hostPort := fmt.Sprintf("%s:%v", *flagHost, *flagPort)
	if err := ch.ListenAndServe(hostPort); err != nil {
		log.Fatalf("ListenAndServe failed: %v", err)
	}

	fmt.Println("listening on", ch.PeerInfo().HostPort)
	select {}
}
예제 #13
0
func TestLargeRequest(t *testing.T) {
	if !*flagStressTest {
		t.Skip("skipping as stress tests are not enabled")
	}

	const (
		KB = 1024
		MB = 1024 * KB
		GB = 1024 * MB

		maxRequestSize = 1 * GB
	)

	require.NoError(t, testutils.WithServer(nil, func(serverCh *Channel, hostPort string) {
		serverCh.Register(raw.Wrap(newTestHandler(t)), "echo")

		for reqSize := 2; reqSize <= maxRequestSize; reqSize *= 2 {
			log.Printf("reqSize = %v", reqSize)
			arg3 := makeData(reqSize)
			arg2 := arg3[len(arg3)/2:]

			clientCh, err := testutils.NewClient(nil)
			require.NoError(t, err, "new client failed")

			ctx, cancel := NewContext(time.Second * 30)
			rArg2, rArg3, _, err := raw.Call(ctx, clientCh, hostPort, serverCh.PeerInfo().ServiceName, "echo", arg2, arg3)
			require.NoError(t, err, "Call failed")

			if !bytes.Equal(arg2, rArg2) {
				t.Errorf("echo arg2 mismatch")
			}
			if !bytes.Equal(arg3, rArg3) {
				t.Errorf("echo arg3 mismatch")
			}
			cancel()
		}
	}))
}
예제 #14
0
func TestFragmentation(t *testing.T) {
	require.Nil(t, testutils.WithServer(nil, func(ch *Channel, hostPort string) {
		ch.Register(raw.Wrap(newTestHandler(t)), "echo")

		arg2 := make([]byte, MaxFramePayloadSize*2)
		for i := 0; i < len(arg2); i++ {
			arg2[i] = byte('a' + (i % 10))
		}

		arg3 := make([]byte, MaxFramePayloadSize*3)
		for i := 0; i < len(arg3); i++ {
			arg3[i] = byte('A' + (i % 10))
		}

		ctx, cancel := NewContext(time.Second * 10)
		defer cancel()

		respArg2, respArg3, _, err := raw.Call(ctx, ch, hostPort, testServiceName, "echo", arg2, arg3)
		require.NoError(t, err)
		assert.Equal(t, arg2, respArg2)
		assert.Equal(t, arg3, respArg3)
	}))
}
예제 #15
0
func TestStatsCalls(t *testing.T) {
	statsReporter := newRecordingStatsReporter()
	testOpts := &testutils.ChannelOpts{
		StatsReporter: statsReporter,
	}
	require.NoError(t, testutils.WithServer(testOpts, func(ch *Channel, hostPort string) {
		ch.Register(raw.Wrap(newTestHandler(t)), "echo")

		ctx, cancel := NewContext(time.Second * 5)
		defer cancel()

		_, _, _, err := raw.Call(ctx, ch, hostPort, testServiceName, "echo", []byte("Headers"), []byte("Body"))
		require.NoError(t, err)

		_, _, _, err = raw.Call(ctx, ch, hostPort, testServiceName, "error", nil, nil)
		require.Error(t, err)

		host, err := os.Hostname()
		require.Nil(t, err)

		expectedTags := map[string]string{
			"app":             ch.PeerInfo().ProcessName,
			"host":            host,
			"service":         ch.PeerInfo().ServiceName,
			"target-service":  ch.PeerInfo().ServiceName,
			"target-endpoint": "echo",
		}
		statsReporter.Expected.IncCounter("outbound.calls.send", expectedTags, 1)
		statsReporter.Expected.IncCounter("outbound.calls.successful", expectedTags, 1)
		expectedTags["target-endpoint"] = "error"
		statsReporter.Expected.IncCounter("outbound.calls.send", expectedTags, 1)
		// TODO(prashant): Make the following stat work too.
		// statsReporter.Expected.IncCounter("outbound.calls.app-errors", expectedTags, 1)
		statsReporter.ValidateCounters(t)
	}))
}
예제 #16
0
func TestFramesReleased(t *testing.T) {
	if testing.Short() {
		return
	}

	defer testutils.SetTimeout(t, 10*time.Second)()
	const (
		requestsPerGoroutine = 10
		numGoroutines        = 10
		maxRandArg           = 512 * 1024
	)

	var connections []*Connection
	pool := NewRecordingFramePool()
	require.NoError(t, testutils.WithServer(&testutils.ChannelOpts{
		ServiceName: "swap-server",
		DefaultConnectionOptions: ConnectionOptions{
			FramePool: pool,
		},
	}, func(serverCh *Channel, hostPort string) {
		serverCh.Register(raw.Wrap(&swapper{t}), "swap")

		clientCh, err := NewChannel("swap-client", nil)
		require.NoError(t, err)
		defer clientCh.Close()

		// Create an active connection that can be shared by the goroutines by calling Ping.
		ctx, cancel := NewContext(time.Second)
		defer cancel()
		require.NoError(t, clientCh.Ping(ctx, hostPort))

		var wg sync.WaitGroup
		worker := func() {
			for i := 0; i < requestsPerGoroutine; i++ {
				ctx, cancel := NewContext(time.Second * 5)
				defer cancel()

				require.NoError(t, clientCh.Ping(ctx, hostPort))

				arg2 := testutils.RandBytes(rand.Intn(maxRandArg))
				arg3 := testutils.RandBytes(rand.Intn(maxRandArg))
				resArg2, resArg3, _, err := raw.Call(ctx, clientCh, hostPort, "swap-server", "swap", arg2, arg3)
				if !assert.NoError(t, err, "error during sendRecv") {
					continue
				}

				// We expect the arguments to be swapped.
				if bytes.Compare(arg3, resArg2) != 0 {
					t.Errorf("returned arg2 does not match expected:\n  got %v\n want %v", resArg2, arg3)
				}
				if bytes.Compare(arg2, resArg3) != 0 {
					t.Errorf("returned arg2 does not match expected:\n  got %v\n want %v", resArg3, arg2)
				}
			}
			wg.Done()
		}

		for i := 0; i < numGoroutines; i++ {
			wg.Add(1)
			go worker()
		}

		wg.Wait()

		connections = append(connections, GetConnections(serverCh)...)
		connections = append(connections, GetConnections(clientCh)...)
	}))

	// Wait a few milliseconds for the closing of channels to take effect.
	time.Sleep(10 * time.Millisecond)

	if unreleasedCount, isEmpty := pool.CheckEmpty(); isEmpty != "" || unreleasedCount > 0 {
		t.Errorf("Frame pool has %v unreleased frames, errors:\n%v", unreleasedCount, isEmpty)
	}

	// Check the message exchanges and make sure they are all empty.
	if exchangesLeft := CheckEmptyExchangesConns(connections); exchangesLeft != "" {
		t.Errorf("Found uncleared message exchanges:\n%v", exchangesLeft)
	}
}
예제 #17
0
// TestClose ensures that once a Channel is closed, it cannot be reached.
func TestClose(t *testing.T) {
	if testing.Short() {
		return
	}

	const numHandlers = 5
	handler := &swapper{t}
	var lock sync.RWMutex
	var channels []*channelState

	// Start numHandlers servers, and don't close the connections till they are signalled.
	for i := 0; i < numHandlers; i++ {
		go func() {
			assert.NoError(t, testutils.WithServer(nil, func(ch *Channel, hostPort string) {
				ch.Register(raw.Wrap(handler), "test")

				chState := &channelState{
					ch:      ch,
					closeCh: make(chan struct{}),
				}

				lock.Lock()
				channels = append(channels, chState)
				lock.Unlock()

				// Wait for a close signal.
				<-chState.closeCh

				// Lock until the connection is closed.
				lock.Lock()
				chState.closed = true
			}))
			lock.Unlock()
		}()
	}

	time.Sleep(time.Millisecond * 100)

	// Start goroutines to make calls until the test has ended.
	testEnded := make(chan struct{})
	for i := 0; i < 10; i++ {
		go func() {
			for {
				select {
				case <-testEnded:
					return
				default:
					// Keep making requests till the test ends.
				}

				// Get 2 random channels and make a call from one to the other.
				chState1 := channels[rand.Intn(len(channels))]
				chState2 := channels[rand.Intn(len(channels))]
				if chState1 == chState2 {
					continue
				}

				// Grab a read lock to make sure channels aren't closed while we call.
				lock.RLock()
				ch1Closed := chState1.closed
				ch2Closed := chState2.closed
				err := makeCall(chState1.ch, chState2.ch.PeerInfo().HostPort, chState2.ch.PeerInfo().ServiceName)
				lock.RUnlock()
				if ch1Closed || ch2Closed {
					assert.Error(t, err, "Call from %v to %v should fail", chState1.ch.PeerInfo(), chState2.ch.PeerInfo())
				} else {
					assert.NoError(t, err)
				}
			}
		}()
	}

	// Kill connections till all of the connections are dead.
	for i := 0; i < numHandlers; i++ {
		time.Sleep(time.Duration(rand.Intn(50)) * time.Millisecond)
		channels[i].closeCh <- struct{}{}
	}
}
예제 #18
0
func registerFunc(t *testing.T, ch *Channel, name string,
	f func(ctx context.Context, args *raw.Args) (*raw.Res, error)) {
	ch.Register(raw.Wrap(simpleHandler{t, f}), name)
}
예제 #19
0
func TestStatsCalls(t *testing.T) {
	defer testutils.SetTimeout(t, time.Second)()

	initialTime := time.Date(2015, 2, 1, 10, 10, 0, 0, time.UTC)
	nowFn := testutils.NowStub(GetTimeNow(), initialTime)
	defer testutils.ResetNowStub(GetTimeNow())
	// time.Now will be called in this order for each call:
	// sender records time they started sending
	// receiver records time the request is sent to application
	// receiver calculates application handler latency
	// sender records call latency
	// so expected inbound latency = incrementor, outbound = 3 * incrementor

	clientStats := newRecordingStatsReporter()
	serverStats := newRecordingStatsReporter()
	serverOpts := &testutils.ChannelOpts{
		StatsReporter: serverStats,
	}
	require.NoError(t, testutils.WithServer(serverOpts, func(serverCh *Channel, hostPort string) {
		handler := raw.Wrap(newTestHandler(t))
		serverCh.Register(handler, "echo")
		serverCh.Register(handler, "app-error")

		ch, err := testutils.NewClient(&testutils.ChannelOpts{StatsReporter: clientStats})
		require.NoError(t, err)

		ctx, cancel := NewContext(time.Second * 5)
		defer cancel()

		// Set now incrementor to 50ms, so expected Inbound latency is 50ms, outbound is 150ms.
		nowFn(50 * time.Millisecond)
		_, _, _, err = raw.Call(ctx, ch, hostPort, testServiceName, "echo", []byte("Headers"), []byte("Body"))
		require.NoError(t, err)

		outboundTags := tagsForOutboundCall(serverCh, ch, "echo")
		clientStats.Expected.IncCounter("outbound.calls.send", outboundTags, 1)
		clientStats.Expected.IncCounter("outbound.calls.success", outboundTags, 1)
		clientStats.Expected.RecordTimer("outbound.calls.latency", outboundTags, 150*time.Millisecond)
		inboundTags := tagsForInboundCall(serverCh, ch, "echo")
		serverStats.Expected.IncCounter("inbound.calls.recvd", inboundTags, 1)
		serverStats.Expected.IncCounter("inbound.calls.success", inboundTags, 1)
		serverStats.Expected.RecordTimer("inbound.calls.latency", inboundTags, 50*time.Millisecond)

		// Expected inbound latency = 70ms, outbound = 210ms.
		nowFn(70 * time.Millisecond)
		_, _, resp, err := raw.Call(ctx, ch, hostPort, testServiceName, "app-error", nil, nil)
		require.NoError(t, err)
		require.True(t, resp.ApplicationError(), "expected application error")

		outboundTags = tagsForOutboundCall(serverCh, ch, "app-error")
		clientStats.Expected.IncCounter("outbound.calls.send", outboundTags, 1)
		clientStats.Expected.IncCounter("outbound.calls.app-errors", outboundTags, 1)
		clientStats.Expected.RecordTimer("outbound.calls.latency", outboundTags, 210*time.Millisecond)
		inboundTags = tagsForInboundCall(serverCh, ch, "app-error")
		serverStats.Expected.IncCounter("inbound.calls.recvd", inboundTags, 1)
		serverStats.Expected.IncCounter("inbound.calls.app-errors", inboundTags, 1)
		serverStats.Expected.RecordTimer("inbound.calls.latency", inboundTags, 70*time.Millisecond)
	}))

	clientStats.Validate(t)
	serverStats.Validate(t)
}