Beispiel #1
0
// TestCloseSendError tests that system errors are not attempted to be sent when
// a connection is closed, and ensures there's no race conditions such as the error
// frame being added to the channel just as it is closed.
// TODO(prashant): This test is waiting for timeout, but socket close shouldn't wait for timeout.
func TestCloseSendError(t *testing.T) {
	ctx, cancel := NewContext(time.Second)
	defer cancel()

	serverCh, err := testutils.NewServer(nil)
	require.NoError(t, err, "NewServer failed")

	closed := uint32(0)
	counter := uint32(0)
	registerFunc(t, serverCh, "echo", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
		atomic.AddUint32(&counter, 1)
		return &raw.Res{Arg2: args.Arg2, Arg3: args.Arg3}, nil
	})

	clientCh, err := testutils.NewClient(nil)
	require.NoError(t, err, "NewClient failed")

	// Make a call to create a connection that will be shared.
	peerInfo := serverCh.PeerInfo()
	_, _, _, err = raw.Call(ctx, clientCh, peerInfo.HostPort, peerInfo.ServiceName, "echo", nil, nil)
	require.NoError(t, err, "Call should succeed")

	var wg sync.WaitGroup
	for i := 0; i < 100; i++ {
		wg.Add(1)
		go func() {
			time.Sleep(time.Duration(rand.Intn(1000)) * time.Microsecond)
			_, _, _, err := raw.Call(ctx, clientCh, peerInfo.HostPort, peerInfo.ServiceName, "echo", nil, nil)
			if err != nil && atomic.LoadUint32(&closed) == 0 {
				t.Errorf("Call failed: %v", err)
			}
			wg.Done()
		}()
	}

	// Wait for the server to have processed some number of these calls.
	for {
		if atomic.LoadUint32(&counter) >= 10 {
			break
		}
		runtime.Gosched()
	}

	atomic.AddUint32(&closed, 1)
	serverCh.Close()

	// Wait for all the goroutines to end
	wg.Wait()

	clientCh.Close()
	VerifyNoBlockedGoroutines(t)
}
func BenchmarkCallsConcurrent(b *testing.B) {
	const numWorkers = 5

	serverCh, svcName, svcHostPort := setupServer(b)
	defer serverCh.Close()

	var wg sync.WaitGroup
	inCh := make(chan struct{})
	for i := 0; i < numWorkers; i++ {
		go func() {
			clientCh, err := testutils.NewClient(nil)
			require.NoError(b, err)
			defer clientCh.Close()

			for range inCh {
				ctx, cancel := NewContext(time.Second)

				_, _, _, err = raw.Call(ctx, clientCh, svcHostPort, svcName, "echo", []byte("data111"), []byte("data222"))
				assert.NoError(b, err)

				cancel()
				wg.Done()
			}
		}()
	}

	for i := 0; i < b.N; i++ {
		wg.Add(1)
		inCh <- struct{}{}
	}

	wg.Wait()
	close(inCh)
}
Beispiel #3
0
func makeCall(ch *Channel, hostPort, service string) error {
	ctx, cancel := NewContext(time.Second)
	defer cancel()

	_, _, _, err := raw.Call(ctx, ch, hostPort, service, "test", nil, nil)
	return err
}
Beispiel #4
0
func TestLargeOperation(t *testing.T) {
	WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) {
		ctx, cancel := NewContext(time.Second)
		defer cancel()

		largeOperation := testutils.RandBytes(16*1024 + 1)
		_, _, _, err := raw.Call(ctx, ch, hostPort, testServiceName, string(largeOperation), nil, nil)
		assert.Equal(t, ErrOperationTooLarge, err)
	})
}
Beispiel #5
0
func TestBadRequest(t *testing.T) {
	require.Nil(t, testutils.WithServer(nil, func(ch *Channel, hostPort string) {
		ctx, cancel := NewContext(time.Second * 5)
		defer cancel()

		_, _, _, err := raw.Call(ctx, ch, hostPort, "Nowhere", "Noone", []byte("Headers"), []byte("Body"))
		require.NotNil(t, err)
		assert.Equal(t, ErrCodeBadRequest, GetSystemErrorCode(err))
	}))
}
Beispiel #6
0
func TestNoTimeout(t *testing.T) {
	require.Nil(t, testutils.WithServer(nil, func(ch *Channel, hostPort string) {
		ch.Register(raw.Wrap(newTestHandler(t)), "Echo")

		ctx := context.Background()
		_, _, _, err := raw.Call(ctx, ch, hostPort, "svc", "Echo", []byte("Headers"), []byte("Body"))
		require.NotNil(t, err)
		assert.Equal(t, ErrTimeoutRequired, err)
	}))
}
Beispiel #7
0
func TestStatsCalls(t *testing.T) {
	serverStats := newRecordingStatsReporter()
	serverOpts := &testutils.ChannelOpts{
		StatsReporter: serverStats,
	}
	require.NoError(t, testutils.WithServer(serverOpts, func(serverCh *Channel, hostPort string) {
		handler := raw.Wrap(newTestHandler(t))
		serverCh.Register(handler, "echo")
		serverCh.Register(handler, "app-error")

		clientStats := newRecordingStatsReporter()
		ch, err := testutils.NewClient(&testutils.ChannelOpts{StatsReporter: clientStats})
		require.NoError(t, err)

		ctx, cancel := NewContext(time.Second * 5)
		defer cancel()

		_, _, _, err = raw.Call(ctx, ch, hostPort, testServiceName, "echo", []byte("Headers"), []byte("Body"))
		require.NoError(t, err)

		_, _, resp, err := raw.Call(ctx, ch, hostPort, testServiceName, "app-error", nil, nil)
		require.NoError(t, err)
		require.True(t, resp.ApplicationError(), "expected application error")

		outboundTags := tagsForOutboundCall(serverCh, ch, "echo")
		clientStats.Expected.IncCounter("outbound.calls.send", outboundTags, 1)
		clientStats.Expected.IncCounter("outbound.calls.success", outboundTags, 1)
		outboundTags["target-endpoint"] = "app-error"
		clientStats.Expected.IncCounter("outbound.calls.send", outboundTags, 1)
		clientStats.Expected.IncCounter("outbound.calls.app-errors", outboundTags, 1)

		inboundTags := tagsForInboundCall(serverCh, ch, "echo")
		serverStats.Expected.IncCounter("inbound.calls.recvd", inboundTags, 1)
		serverStats.Expected.IncCounter("inbound.calls.success", inboundTags, 1)
		inboundTags["endpoint"] = "app-error"
		serverStats.Expected.IncCounter("inbound.calls.recvd", inboundTags, 1)
		serverStats.Expected.IncCounter("inbound.calls.app-errors", inboundTags, 1)

		clientStats.ValidateCounters(t)
		serverStats.ValidateCounters(t)
	}))
}
Beispiel #8
0
func TestActiveCallReq(t *testing.T) {
	ctx, cancel := NewContext(time.Second)
	defer cancel()

	// Note: This test leaks a message exchange due to the modification of IDs in the relay.
	require.NoError(t, testutils.WithServer(nil, func(ch *Channel, hostPort string) {
		gotCall := make(chan struct{})
		unblock := make(chan struct{})

		testutils.RegisterFunc(t, ch, "blocked", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
			gotCall <- struct{}{}
			<-unblock
			return &raw.Res{}, nil
		})

		relayFunc := func(outgoing bool, frame *Frame) *Frame {
			if outgoing && frame.Header.ID == 2 {
				frame.Header.ID = 3
			}
			return frame
		}

		relayHostPort, closeRelay := testutils.FrameRelay(t, hostPort, relayFunc)
		defer closeRelay()

		go func() {
			// This call will block until we close unblock.
			raw.Call(ctx, ch, relayHostPort, ch.PeerInfo().ServiceName, "blocked", nil, nil)
		}()

		// Wait for the first call to be received by the server
		<-gotCall

		// Make a new call, which should fail
		_, _, _, err := raw.Call(ctx, ch, relayHostPort, ch.PeerInfo().ServiceName, "blocked", nil, nil)
		assert.Error(t, err, "Expect error")
		assert.True(t, strings.Contains(err.Error(), "already active"),
			"expected already active error, got %v", err)

		close(unblock)
	}))
}
Beispiel #9
0
func TestServerBusy(t *testing.T) {
	require.Nil(t, testutils.WithServer(nil, func(ch *Channel, hostPort string) {
		ch.Register(raw.Wrap(newTestHandler(t)), "busy")

		ctx, cancel := NewContext(time.Second * 5)
		defer cancel()

		_, _, _, err := raw.Call(ctx, ch, hostPort, testServiceName, "busy", []byte("Arg2"), []byte("Arg3"))
		require.NotNil(t, err)
		assert.Equal(t, ErrCodeBusy, GetSystemErrorCode(err), "err: %v", err)
	}))
}
Beispiel #10
0
func TestTimeout(t *testing.T) {
	require.Nil(t, testutils.WithServer(nil, func(ch *Channel, hostPort string) {
		ch.Register(raw.Wrap(newTestHandler(t)), "timeout")

		ctx, cancel := NewContext(time.Millisecond * 100)
		defer cancel()

		_, _, _, err := raw.Call(ctx, ch, hostPort, testServiceName, "timeout", []byte("Arg2"), []byte("Arg3"))

		// TODO(mmihic): Maybe translate this into ErrTimeout (or vice versa)?
		assert.Equal(t, context.DeadlineExceeded, err)
	}))
}
Beispiel #11
0
func TestShardKeyPropagates(t *testing.T) {
	WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) {
		peerInfo := ch.PeerInfo()
		testutils.RegisterFunc(t, ch, "test", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
			return &raw.Res{
				Arg3: []byte(CurrentCall(ctx).ShardKey()),
			}, nil
		})

		ctx, cancel := NewContextBuilder(time.Second).Build()
		defer cancel()
		_, arg3, _, err := raw.Call(ctx, ch, peerInfo.HostPort, peerInfo.ServiceName, "test", nil, nil)
		assert.NoError(t, err, "Call failed")
		assert.Equal(t, arg3, []byte(""))

		ctx, cancel = NewContextBuilder(time.Second).
			SetShardKey("shard").Build()
		defer cancel()
		_, arg3, _, err = raw.Call(ctx, ch, peerInfo.HostPort, peerInfo.ServiceName, "test", nil, nil)
		assert.NoError(t, err, "Call failed")
		assert.Equal(t, string(arg3), "shard")
	})
}
func BenchmarkCallsSerial(b *testing.B) {
	serverCh, svcName, svcHostPort := setupServer(b)
	defer serverCh.Close()

	clientCh, err := testutils.NewClient(nil)
	require.NoError(b, err)

	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		ctx, cancel := NewContext(time.Second)
		_, _, _, err = raw.Call(ctx, clientCh, svcHostPort, svcName, "echo", []byte("data111"), []byte("data222"))
		assert.NoError(b, err)
		cancel()
	}
}
Beispiel #13
0
func TestStatsCalls(t *testing.T) {
	statsReporter := newRecordingStatsReporter()
	testOpts := &testutils.ChannelOpts{
		StatsReporter: statsReporter,
	}
	require.NoError(t, testutils.WithServer(testOpts, func(ch *Channel, hostPort string) {
		ch.Register(raw.Wrap(newTestHandler(t)), "echo")

		ctx, cancel := NewContext(time.Second * 5)
		defer cancel()

		_, _, _, err := raw.Call(ctx, ch, hostPort, testServiceName, "echo", []byte("Headers"), []byte("Body"))
		require.NoError(t, err)

		_, _, _, err = raw.Call(ctx, ch, hostPort, testServiceName, "error", nil, nil)
		require.Error(t, err)

		host, err := os.Hostname()
		require.Nil(t, err)

		expectedTags := map[string]string{
			"app":             ch.PeerInfo().ProcessName,
			"host":            host,
			"service":         ch.PeerInfo().ServiceName,
			"target-service":  ch.PeerInfo().ServiceName,
			"target-endpoint": "echo",
		}
		statsReporter.Expected.IncCounter("outbound.calls.send", expectedTags, 1)
		statsReporter.Expected.IncCounter("outbound.calls.successful", expectedTags, 1)
		expectedTags["target-endpoint"] = "error"
		statsReporter.Expected.IncCounter("outbound.calls.send", expectedTags, 1)
		// TODO(prashant): Make the following stat work too.
		// statsReporter.Expected.IncCounter("outbound.calls.app-errors", expectedTags, 1)
		statsReporter.ValidateCounters(t)
	}))
}
Beispiel #14
0
func TestDefaultFormat(t *testing.T) {
	require.Nil(t, testutils.WithServer(nil, func(ch *Channel, hostPort string) {
		handler := newTestHandler(t)
		ch.Register(raw.Wrap(handler), "echo")

		ctx, cancel := NewContext(time.Second * 5)
		defer cancel()

		arg2, arg3, resp, err := raw.Call(ctx, ch, hostPort, testServiceName, "echo", testArg2, testArg3)
		require.Nil(t, err)

		require.Equal(t, testArg2, arg2)
		require.Equal(t, testArg3, arg3)
		require.Equal(t, Raw, handler.format)
		assert.Equal(t, Raw, resp.Format(), "response Format should match request Format")
	}))
}
Beispiel #15
0
func TestCloseSingleChannel(t *testing.T) {
	ctx, cancel := NewContext(time.Second)
	defer cancel()

	ch, err := testutils.NewServer(nil)
	require.NoError(t, err, "NewServer failed")

	var connected sync.WaitGroup
	var completed sync.WaitGroup
	blockCall := make(chan struct{})

	registerFunc(t, ch, "echo", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
		connected.Done()
		<-blockCall
		return &raw.Res{
			Arg2: args.Arg2,
			Arg3: args.Arg3,
		}, nil
	})

	for i := 0; i < 10; i++ {
		connected.Add(1)
		completed.Add(1)
		go func() {
			peerInfo := ch.PeerInfo()
			_, _, _, err := raw.Call(ctx, ch, peerInfo.HostPort, peerInfo.ServiceName, "echo", nil, nil)
			assert.NoError(t, err, "Call failed")
			completed.Done()
		}()
	}

	// Wait for all calls to connect before triggerring the Close (so they do not fail).
	connected.Wait()
	ch.Close()

	// Unblock the calls, and wait for all the calls to complete.
	close(blockCall)
	completed.Wait()

	// Once all calls are complete, the channel should be closed.
	runtime.Gosched()
	assert.Equal(t, ChannelClosed, ch.State())
	VerifyNoBlockedGoroutines(t)
}
Beispiel #16
0
func TestCloseOneSide(t *testing.T) {
	ctx, cancel := NewContext(time.Second)
	defer cancel()

	ch1, err := testutils.NewServer(&testutils.ChannelOpts{ServiceName: "client"})
	ch2, err := testutils.NewServer(&testutils.ChannelOpts{ServiceName: "server"})
	require.NoError(t, err, "NewServer 1 failed")
	require.NoError(t, err, "NewServer 2 failed")

	connected := make(chan struct{})
	completed := make(chan struct{})
	blockCall := make(chan struct{})
	registerFunc(t, ch2, "echo", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
		connected <- struct{}{}
		<-blockCall
		return &raw.Res{
			Arg2: args.Arg2,
			Arg3: args.Arg3,
		}, nil
	})

	go func() {
		ch2Peer := ch2.PeerInfo()
		_, _, _, err := raw.Call(ctx, ch1, ch2Peer.HostPort, ch2Peer.ServiceName, "echo", nil, nil)
		assert.NoError(t, err, "Call failed")
		completed <- struct{}{}
	}()

	// Wait for connected before calling Close.
	<-connected
	ch1.Close()

	// Now unblock the call and wait for the call to complete.
	close(blockCall)
	<-completed

	// Once the call completes, the channel should be closed.
	runtime.Gosched()
	assert.Equal(t, ChannelClosed, ch1.State())

	// We need to close all open TChannels before verifying blocked goroutines.
	ch2.Close()
	VerifyNoBlockedGoroutines(t)
}
Beispiel #17
0
func TestLargeRequest(t *testing.T) {
	if !*flagStressTest {
		t.Skip("skipping as stress tests are not enabled")
	}

	const (
		KB = 1024
		MB = 1024 * KB
		GB = 1024 * MB

		maxRequestSize = 1 * GB
	)

	require.NoError(t, testutils.WithServer(nil, func(serverCh *Channel, hostPort string) {
		serverCh.Register(raw.Wrap(newTestHandler(t)), "echo")

		for reqSize := 2; reqSize <= maxRequestSize; reqSize *= 2 {
			log.Printf("reqSize = %v", reqSize)
			arg3 := makeData(reqSize)
			arg2 := arg3[len(arg3)/2:]

			clientCh, err := testutils.NewClient(nil)
			require.NoError(t, err, "new client failed")

			ctx, cancel := NewContext(time.Second * 30)
			rArg2, rArg3, _, err := raw.Call(ctx, clientCh, hostPort, serverCh.PeerInfo().ServiceName, "echo", arg2, arg3)
			require.NoError(t, err, "Call failed")

			if !bytes.Equal(arg2, rArg2) {
				t.Errorf("echo arg2 mismatch")
			}
			if !bytes.Equal(arg3, rArg3) {
				t.Errorf("echo arg3 mismatch")
			}
			cancel()
		}
	}))
}
Beispiel #18
0
func TestFragmentation(t *testing.T) {
	require.Nil(t, testutils.WithServer(nil, func(ch *Channel, hostPort string) {
		ch.Register(raw.Wrap(newTestHandler(t)), "echo")

		arg2 := make([]byte, MaxFramePayloadSize*2)
		for i := 0; i < len(arg2); i++ {
			arg2[i] = byte('a' + (i % 10))
		}

		arg3 := make([]byte, MaxFramePayloadSize*3)
		for i := 0; i < len(arg3); i++ {
			arg3[i] = byte('A' + (i % 10))
		}

		ctx, cancel := NewContext(time.Second * 10)
		defer cancel()

		respArg2, respArg3, _, err := raw.Call(ctx, ch, hostPort, testServiceName, "echo", arg2, arg3)
		require.NoError(t, err)
		assert.Equal(t, arg2, respArg2)
		assert.Equal(t, arg3, respArg3)
	}))
}
Beispiel #19
0
func TestFramesReleased(t *testing.T) {
	if testing.Short() {
		return
	}

	defer testutils.SetTimeout(t, 10*time.Second)()
	const (
		requestsPerGoroutine = 10
		numGoroutines        = 10
		maxRandArg           = 512 * 1024
	)

	var connections []*Connection
	pool := NewRecordingFramePool()
	require.NoError(t, testutils.WithServer(&testutils.ChannelOpts{
		ServiceName: "swap-server",
		DefaultConnectionOptions: ConnectionOptions{
			FramePool: pool,
		},
	}, func(serverCh *Channel, hostPort string) {
		serverCh.Register(raw.Wrap(&swapper{t}), "swap")

		clientCh, err := NewChannel("swap-client", nil)
		require.NoError(t, err)
		defer clientCh.Close()

		// Create an active connection that can be shared by the goroutines by calling Ping.
		ctx, cancel := NewContext(time.Second)
		defer cancel()
		require.NoError(t, clientCh.Ping(ctx, hostPort))

		var wg sync.WaitGroup
		worker := func() {
			for i := 0; i < requestsPerGoroutine; i++ {
				ctx, cancel := NewContext(time.Second * 5)
				defer cancel()

				require.NoError(t, clientCh.Ping(ctx, hostPort))

				arg2 := testutils.RandBytes(rand.Intn(maxRandArg))
				arg3 := testutils.RandBytes(rand.Intn(maxRandArg))
				resArg2, resArg3, _, err := raw.Call(ctx, clientCh, hostPort, "swap-server", "swap", arg2, arg3)
				if !assert.NoError(t, err, "error during sendRecv") {
					continue
				}

				// We expect the arguments to be swapped.
				if bytes.Compare(arg3, resArg2) != 0 {
					t.Errorf("returned arg2 does not match expected:\n  got %v\n want %v", resArg2, arg3)
				}
				if bytes.Compare(arg2, resArg3) != 0 {
					t.Errorf("returned arg2 does not match expected:\n  got %v\n want %v", resArg3, arg2)
				}
			}
			wg.Done()
		}

		for i := 0; i < numGoroutines; i++ {
			wg.Add(1)
			go worker()
		}

		wg.Wait()

		connections = append(connections, GetConnections(serverCh)...)
		connections = append(connections, GetConnections(clientCh)...)
	}))

	// Wait a few milliseconds for the closing of channels to take effect.
	time.Sleep(10 * time.Millisecond)

	if unreleasedCount, isEmpty := pool.CheckEmpty(); isEmpty != "" || unreleasedCount > 0 {
		t.Errorf("Frame pool has %v unreleased frames, errors:\n%v", unreleasedCount, isEmpty)
	}

	// Check the message exchanges and make sure they are all empty.
	if exchangesLeft := CheckEmptyExchangesConns(connections); exchangesLeft != "" {
		t.Errorf("Found uncleared message exchanges:\n%v", exchangesLeft)
	}
}
Beispiel #20
0
func TestStatsCalls(t *testing.T) {
	defer testutils.SetTimeout(t, time.Second)()

	initialTime := time.Date(2015, 2, 1, 10, 10, 0, 0, time.UTC)
	nowFn := testutils.NowStub(GetTimeNow(), initialTime)
	defer testutils.ResetNowStub(GetTimeNow())
	// time.Now will be called in this order for each call:
	// sender records time they started sending
	// receiver records time the request is sent to application
	// receiver calculates application handler latency
	// sender records call latency
	// so expected inbound latency = incrementor, outbound = 3 * incrementor

	clientStats := newRecordingStatsReporter()
	serverStats := newRecordingStatsReporter()
	serverOpts := &testutils.ChannelOpts{
		StatsReporter: serverStats,
	}
	require.NoError(t, testutils.WithServer(serverOpts, func(serverCh *Channel, hostPort string) {
		handler := raw.Wrap(newTestHandler(t))
		serverCh.Register(handler, "echo")
		serverCh.Register(handler, "app-error")

		ch, err := testutils.NewClient(&testutils.ChannelOpts{StatsReporter: clientStats})
		require.NoError(t, err)

		ctx, cancel := NewContext(time.Second * 5)
		defer cancel()

		// Set now incrementor to 50ms, so expected Inbound latency is 50ms, outbound is 150ms.
		nowFn(50 * time.Millisecond)
		_, _, _, err = raw.Call(ctx, ch, hostPort, testServiceName, "echo", []byte("Headers"), []byte("Body"))
		require.NoError(t, err)

		outboundTags := tagsForOutboundCall(serverCh, ch, "echo")
		clientStats.Expected.IncCounter("outbound.calls.send", outboundTags, 1)
		clientStats.Expected.IncCounter("outbound.calls.success", outboundTags, 1)
		clientStats.Expected.RecordTimer("outbound.calls.latency", outboundTags, 150*time.Millisecond)
		inboundTags := tagsForInboundCall(serverCh, ch, "echo")
		serverStats.Expected.IncCounter("inbound.calls.recvd", inboundTags, 1)
		serverStats.Expected.IncCounter("inbound.calls.success", inboundTags, 1)
		serverStats.Expected.RecordTimer("inbound.calls.latency", inboundTags, 50*time.Millisecond)

		// Expected inbound latency = 70ms, outbound = 210ms.
		nowFn(70 * time.Millisecond)
		_, _, resp, err := raw.Call(ctx, ch, hostPort, testServiceName, "app-error", nil, nil)
		require.NoError(t, err)
		require.True(t, resp.ApplicationError(), "expected application error")

		outboundTags = tagsForOutboundCall(serverCh, ch, "app-error")
		clientStats.Expected.IncCounter("outbound.calls.send", outboundTags, 1)
		clientStats.Expected.IncCounter("outbound.calls.app-errors", outboundTags, 1)
		clientStats.Expected.RecordTimer("outbound.calls.latency", outboundTags, 210*time.Millisecond)
		inboundTags = tagsForInboundCall(serverCh, ch, "app-error")
		serverStats.Expected.IncCounter("inbound.calls.recvd", inboundTags, 1)
		serverStats.Expected.IncCounter("inbound.calls.app-errors", inboundTags, 1)
		serverStats.Expected.RecordTimer("inbound.calls.latency", inboundTags, 70*time.Millisecond)
	}))

	clientStats.Validate(t)
	serverStats.Validate(t)
}
Beispiel #21
0
func setRequest(ch *tchannel.Channel, key, value string) error {
	ctx, _ := context.WithTimeout(context.Background(), time.Second*10)
	_, _, _, err := raw.Call(ctx, ch, *hostPort, "benchmark", "set", []byte(key), []byte(value))
	return err
}
Beispiel #22
0
func getRequest(ch *tchannel.Channel, key string) (string, error) {
	ctx, _ := context.WithTimeout(context.Background(), time.Second)
	_, arg3, _, err := raw.Call(ctx, ch, *hostPort, "benchmark", "get", []byte(key), nil)
	return string(arg3), err
}
Beispiel #23
0
func TestCloseSemantics(t *testing.T) {
	defer testutils.SetTimeout(t, 2*time.Second)()
	ctx, cancel := NewContext(time.Second)
	defer cancel()

	makeServer := func(name string) (*Channel, chan struct{}) {
		ch, err := testutils.NewServer(&testutils.ChannelOpts{ServiceName: name})
		require.NoError(t, err)
		c := make(chan struct{})
		registerFunc(t, ch, "stream", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
			<-c
			return &raw.Res{}, nil
		})
		registerFunc(t, ch, "call", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
			return &raw.Res{}, nil
		})
		return ch, c
	}

	withNewClient := func(f func(ch *Channel)) {
		ch, err := testutils.NewClient(&testutils.ChannelOpts{ServiceName: "client"})
		require.NoError(t, err)
		f(ch)
		ch.Close()
	}

	call := func(from *Channel, to *Channel) error {
		toPeer := to.PeerInfo()
		_, _, _, err := raw.Call(ctx, from, toPeer.HostPort, toPeer.ServiceName, "call", nil, nil)
		return err
	}

	callStream := func(from *Channel, to *Channel) <-chan struct{} {
		c := make(chan struct{})

		toPeer := to.PeerInfo()
		call, err := from.BeginCall(ctx, toPeer.HostPort, toPeer.ServiceName, "stream", nil)
		require.NoError(t, err)
		require.NoError(t, NewArgWriter(call.Arg2Writer()).Write(nil), "write arg2")
		require.NoError(t, NewArgWriter(call.Arg3Writer()).Write(nil), "write arg3")

		go func() {
			var d []byte
			require.NoError(t, NewArgReader(call.Response().Arg2Reader()).Read(&d), "read arg2 from %v to %v", from.PeerInfo(), to.PeerInfo())
			require.NoError(t, NewArgReader(call.Response().Arg3Reader()).Read(&d), "read arg3")
			c <- struct{}{}
		}()

		return c
	}

	s1, s1C := makeServer("s1")
	s2, s2C := makeServer("s2")

	// Make a call from s1 -> s2, and s2 -> s1
	call1 := callStream(s1, s2)
	call2 := callStream(s2, s1)

	// s1 and s2 are both open, so calls to it should be successful.
	withNewClient(func(ch *Channel) {
		require.NoError(t, call(ch, s1))
		require.NoError(t, call(ch, s2))
	})
	require.NoError(t, call(s1, s2))
	require.NoError(t, call(s2, s1))

	// Close s1, should no longer be able to call it.
	s1.Close()
	assert.Equal(t, ChannelStartClose, s1.State())
	withNewClient(func(ch *Channel) {
		assert.Error(t, call(ch, s1), "closed channel should not accept incoming calls")
		require.NoError(t, call(ch, s2),
			"closed channel with pending incoming calls should allow outgoing calls")
	})

	// Even an existing connection (e.g. from s2) should fail.
	assert.Equal(t, ErrChannelClosed, call(s2, s1), "closed channel should not accept incoming calls")

	require.NoError(t, call(s1, s2),
		"closed channel with pending incoming calls should allow outgoing calls")

	// Once the incoming connection is drained, outgoing calls should fail.
	s1C <- struct{}{}
	<-call2
	assert.Equal(t, ChannelInboundClosed, s1.State())
	require.Error(t, call(s1, s2),
		"closed channel with no pending incoming calls should not allow outgoing calls")

	// Now the channel should be completely closed as there are no pending connections.
	s2C <- struct{}{}
	<-call1
	assert.Equal(t, ChannelClosed, s1.State())

	// Close s2 so we don't leave any goroutines running.
	s2.Close()
	VerifyNoBlockedGoroutines(t)
}