Ejemplo n.º 1
0
func BenchmarkBothSerial(b *testing.B) {
	serverAddr, err := setupBenchServer()
	require.NoError(b, err, "setupBenchServer failed")

	opts := testutils.NewOpts().SetFramePool(tchannel.NewSyncFramePool())
	clientCh := testutils.NewClient(b, opts)
	for _, addr := range serverAddr {
		clientCh.Peers().Add(addr)
	}

	thriftClient := thrift.NewClient(clientCh, "bench-server", nil)
	client := gen.NewTChanSecondServiceClient(thriftClient)
	ctx, cancel := thrift.NewContext(10 * time.Millisecond)
	client.Echo(ctx, "make connection")
	cancel()

	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		ctx, cancel := thrift.NewContext(10 * time.Millisecond)
		defer cancel()

		_, err := client.Echo(ctx, "hello world")
		if err != nil {
			b.Errorf("Echo failed: %v", err)
		}
	}
}
Ejemplo n.º 2
0
func TestRequestSubChannel(t *testing.T) {
	ctx, cancel := NewContext(time.Second)
	defer cancel()

	tchan := testutils.NewServer(t, testutils.NewOpts().SetServiceName("svc1"))
	defer tchan.Close()

	clientCh := testutils.NewClient(t, nil)
	defer clientCh.Close()
	clientCh.Peers().Add(tchan.PeerInfo().HostPort)

	tests := []tchannel.Registrar{tchan, tchan.GetSubChannel("svc2"), tchan.GetSubChannel("svc3")}
	for _, ch := range tests {
		mockHandler := new(mocks.TChanSecondService)
		server := NewServer(ch)
		server.Register(gen.NewTChanSecondServiceServer(mockHandler))

		client := NewClient(clientCh, ch.ServiceName(), nil)
		secondClient := gen.NewTChanSecondServiceClient(client)

		echoArg := ch.ServiceName()
		echoRes := echoArg + "-echo"
		mockHandler.On("Echo", ctxArg(), echoArg).Return(echoRes, nil)
		res, err := secondClient.Echo(ctx, echoArg)
		assert.NoError(t, err, "Echo failed")
		assert.Equal(t, echoRes, res)
	}
}
Ejemplo n.º 3
0
func TestRequestStateRetry(t *testing.T) {
	ctx, cancel := NewContext(time.Second)
	defer cancel()

	server := testutils.NewServer(t, nil)
	defer server.Close()
	server.Register(raw.Wrap(newTestHandler(t)), "echo")

	client := testutils.NewClient(t, nil)
	defer client.Close()

	counter := 0
	sc := client.GetSubChannel(server.PeerInfo().ServiceName)
	err := client.RunWithRetry(ctx, func(ctx context.Context, rs *RequestState) error {
		defer func() { counter++ }()

		assert.Equal(t, counter, len(rs.SelectedPeers), "SelectedPeers should not be reused")

		if counter < 4 {
			client.Peers().Add(testutils.GetClosedHostPort(t))
		} else {
			client.Peers().Add(server.PeerInfo().HostPort)
		}

		_, err := raw.CallV2(ctx, sc, raw.CArgs{
			Operation:   "echo",
			CallOptions: &CallOptions{RequestState: rs},
		})
		return err
	})
	assert.NoError(t, err, "RunWithRetry should succeed")
	assert.Equal(t, 5, counter, "RunWithRetry should retry 5 times")
}
Ejemplo n.º 4
0
func TestGetPeerNoPeer(t *testing.T) {
	ch := testutils.NewClient(t, nil)
	defer ch.Close()
	peer, err := ch.Peers().Get(nil)
	assert.Equal(t, ErrNoPeers, err, "Empty peer list should return error")
	assert.Nil(t, peer, "should not return peer")
}
Ejemplo n.º 5
0
func TestRetryNetConnect(t *testing.T) {
	e := getTestErrors()
	ch := testutils.NewClient(t, nil)
	defer ch.Close()

	ctx, cancel := NewContext(time.Second)
	defer cancel()

	closedAddr := testutils.GetClosedHostPort(t)
	listenC, err := net.Listen("tcp", ":0")
	require.NoError(t, err, "Listen failed")
	defer listenC.Close()

	counter := 0
	f := func(ctx context.Context, rs *RequestState) error {
		counter++
		if !rs.HasRetries(e.Connection) {
			c, err := net.Dial("tcp", listenC.Addr().String())
			if err == nil {
				c.Close()
			}
			return err
		}

		_, err := net.Dial("tcp", closedAddr)
		return err
	}

	assert.NoError(t, ch.RunWithRetry(ctx, f), "RunWithRetry should succeed")
	assert.Equal(t, 5, counter, "RunWithRetry should have run f 5 times")
}
Ejemplo n.º 6
0
func TestLargeRequest(t *testing.T) {
	CheckStress(t)

	const (
		KB = 1024
		MB = 1024 * KB
		GB = 1024 * MB

		maxRequestSize = 1 * GB
	)

	WithVerifiedServer(t, nil, func(serverCh *Channel, hostPort string) {
		serverCh.Register(raw.Wrap(newTestHandler(t)), "echo")

		for reqSize := 2; reqSize <= maxRequestSize; reqSize *= 2 {
			log.Printf("reqSize = %v", reqSize)
			arg3 := testutils.RandBytes(reqSize)
			arg2 := testutils.RandBytes(reqSize / 2)

			clientCh := testutils.NewClient(t, nil)
			ctx, cancel := NewContext(time.Second * 30)
			rArg2, rArg3, _, err := raw.Call(ctx, clientCh, hostPort, serverCh.PeerInfo().ServiceName, "echo", arg2, arg3)
			require.NoError(t, err, "Call failed")

			if !bytes.Equal(arg2, rArg2) {
				t.Errorf("echo arg2 mismatch")
			}
			if !bytes.Equal(arg3, rArg3) {
				t.Errorf("echo arg3 mismatch")
			}
			cancel()
		}
	})
}
Ejemplo n.º 7
0
func BenchmarkCallsConcurrent(b *testing.B) {
	const numWorkers = 5

	serverCh, svcName, svcHostPort := setupServer(b)
	defer serverCh.Close()

	var wg sync.WaitGroup
	inCh := make(chan struct{})
	for i := 0; i < numWorkers; i++ {
		go func() {
			clientCh := testutils.NewClient(b, nil)
			defer clientCh.Close()

			for range inCh {
				ctx, cancel := NewContext(time.Second)

				_, _, _, err := raw.Call(ctx, clientCh, svcHostPort, svcName, "echo", []byte("data111"), []byte("data222"))
				assert.NoError(b, err)

				cancel()
				wg.Done()
			}
		}()
	}

	for i := 0; i < b.N; i++ {
		wg.Add(1)
		inCh <- struct{}{}
	}

	wg.Wait()
	close(inCh)
}
Ejemplo n.º 8
0
func TestNetDialTimeout(t *testing.T) {
	// timeoutHostPort uses a blackholed address (RFC 6890) with a port
	// reserved for documentation. This address should always cause a timeout.
	const timeoutHostPort = "192.18.0.254:44444"
	timeoutPeriod := testutils.Timeout(50 * time.Millisecond)

	client := testutils.NewClient(t, nil)
	defer client.Close()

	started := time.Now()
	ctx, cancel := NewContext(timeoutPeriod)
	defer cancel()

	err := client.Ping(ctx, timeoutHostPort)
	if !assert.Error(t, err, "Ping to blackhole address should fail") {
		return
	}

	if strings.Contains(err.Error(), "network is unreachable") {
		t.Skipf("Skipping test, as network interface may not be available")
	}

	d := time.Since(started)
	assert.Equal(t, ErrTimeout, err, "Ping expected to fail with timeout")
	assert.True(t, d >= timeoutPeriod, "Timeout should take more than %v, took %v", timeoutPeriod, d)
}
Ejemplo n.º 9
0
func TestInboundEphemeralPeerRemoved(t *testing.T) {
	ctx, cancel := NewContext(time.Second)
	defer cancel()

	WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) {
		client := testutils.NewClient(t, nil)
		assert.NoError(t, client.Ping(ctx, hostPort), "Ping to server failed")

		// Server should have a host:port in the root peers for the client.
		var clientHP string
		peers := ch.RootPeers().Copy()
		for k := range peers {
			clientHP = k
		}

		// Close the connection, which should remove the peer from the server channel.
		client.Close()
		runtime.Gosched()
		assert.Equal(t, ChannelClosed, client.State(), "Client should be closed")

		// Wait for the channel to see the connection as closed and update the peer list.
		time.Sleep(time.Millisecond)

		_, ok := ch.RootPeers().Get(clientHP)
		assert.False(t, ok, "server's root peers should remove peer for client on close")
	})
}
Ejemplo n.º 10
0
func TestInboundEphemeralPeerRemoved(t *testing.T) {
	ctx, cancel := NewContext(time.Second)
	defer cancel()

	// No relay, since we look for the exact host:port in peer lists.
	opts := testutils.NewOpts().NoRelay()
	WithVerifiedServer(t, opts, func(ch *Channel, hostPort string) {
		client := testutils.NewClient(t, nil)
		assert.NoError(t, client.Ping(ctx, hostPort), "Ping to server failed")

		// Server should have a host:port in the root peers for the client.
		var clientHP string
		peers := ch.RootPeers().Copy()
		for k := range peers {
			clientHP = k
		}

		waitTillInboundEmpty(t, ch, clientHP, func() {
			client.Close()
		})
		assert.Equal(t, ChannelClosed, client.State(), "Client should be closed")

		_, ok := ch.RootPeers().Get(clientHP)
		assert.False(t, ok, "server's root peers should remove peer for client on close")
	})
}
Ejemplo n.º 11
0
func TestPeerSelectionRanking(t *testing.T) {
	const numPeers = 10
	const numIterations = 1000

	// Selected is a map from rank -> [peer, count]
	// It tracks how often a peer gets selected at a specific rank.
	selected := make([]map[string]int, numPeers)
	for i := 0; i < numPeers; i++ {
		selected[i] = make(map[string]int)
	}

	for i := 0; i < numIterations; i++ {
		ch := testutils.NewClient(t, nil)
		defer ch.Close()
		ch.SetRandomSeed(int64(i * 100))

		for i := 0; i < numPeers; i++ {
			hp := fmt.Sprintf("127.0.0.1:60%v", i)
			ch.Peers().Add(hp)
		}

		for i := 0; i < numPeers; i++ {
			peer, err := ch.Peers().Get(nil)
			require.NoError(t, err, "Peers.Get failed")
			selected[i][peer.HostPort()]++
		}
	}

	for _, m := range selected {
		testDistribution(t, m, 50, 150)
	}
}
Ejemplo n.º 12
0
func TestCloseAfterTimeout(t *testing.T) {
	WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) {
		testHandler := onErrorTestHandler{newTestHandler(t), func(_ context.Context, err error) {}}
		ch.Register(raw.Wrap(testHandler), "block")

		ctx, cancel := NewContext(10 * time.Millisecond)
		defer cancel()

		// Make a call, wait for it to timeout.
		clientCh, err := testutils.NewClient(nil)
		require.NoError(t, err, "NewClient failed")
		peerInfo := ch.PeerInfo()
		_, _, _, err = raw.Call(ctx, clientCh, peerInfo.HostPort, peerInfo.ServiceName, "block", nil, nil)
		require.Error(t, err, "Expected call to timeout")

		// The client channel should also close immediately.
		clientCh.Close()
		runtime.Gosched()
		assert.Equal(t, ChannelClosed, clientCh.State())
		assert.True(t, clientCh.Closed(), "Channel should be closed")

		// Unblock the testHandler so that a goroutine isn't leaked.
		<-testHandler.blockErr
	})
	VerifyNoBlockedGoroutines(t)
}
Ejemplo n.º 13
0
func setupProxy(t *testing.T, tchanAddr string) (string, func()) {
	mux := http.NewServeMux()
	mux.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		// You get /proxy/host:port/rest/of/the/path
		parts := strings.SplitN(r.URL.Path, "/", 4)
		r.URL.Host = parts[2]
		r.URL.Scheme = "http"
		r.URL.Path = parts[3]

		ch := testutils.NewClient(t, nil)
		ctx, cancel := tchannel.NewContext(time.Second)
		defer cancel()

		call, err := ch.BeginCall(ctx, tchanAddr, "test", "http", nil)
		require.NoError(t, err, "BeginCall failed")

		require.NoError(t, WriteRequest(call, r), "WriteRequest failed")
		resp, err := ReadResponse(call.Response())
		require.NoError(t, err, "Read response failed")

		for k, vs := range resp.Header {
			for _, v := range vs {
				w.Header().Add(k, v)
			}
		}
		w.WriteHeader(resp.StatusCode)

		_, err = io.Copy(w, resp.Body)
		assert.NoError(t, err, "io.Copy failed")
		err = resp.Body.Close()
		assert.NoError(t, err, "Close Response Body failed")
	}))
	return setupHTTP(t, mux)
}
Ejemplo n.º 14
0
func TestRetrySubContextTimeoutPerAttempt(t *testing.T) {
	e := getTestErrors()
	ctx, cancel := NewContextBuilder(time.Second).
		SetTimeoutPerAttempt(time.Millisecond).Build()
	defer cancel()

	ch := testutils.NewClient(t, nil)
	defer ch.Close()

	var lastDeadline time.Time

	counter := 0
	ch.RunWithRetry(ctx, func(sctx context.Context, _ *RequestState) error {
		counter++

		assert.NotEqual(t, ctx, sctx, "Sub-context should be different")
		deadline, _ := sctx.Deadline()
		assert.True(t, deadline.After(lastDeadline), "Deadline is invalid")
		lastDeadline = deadline

		overallDeadline, _ := ctx.Deadline()
		assert.True(t, overallDeadline.After(deadline), "Deadline is invalid")

		return e.Busy
	})
	assert.Equal(t, 5, counter, "RunWithRetry did not run f enough times")
}
Ejemplo n.º 15
0
func TestStopMultiple(t *testing.T) {
	for _, getOutbound := range newOutbounds {
		out := getOutbound(testutils.NewClient(t, &testutils.ChannelOpts{
			ServiceName: "caller",
		}), "localhost:4040")
		// TODO: If we change Start() to establish a connection to the host, this
		// hostport will have to be changed to a real server.

		err := out.Start(transport.NoDeps)
		require.NoError(t, err)

		var wg sync.WaitGroup
		signal := make(chan struct{})

		for i := 0; i < 10; i++ {
			wg.Add(1)
			go func() {
				defer wg.Done()
				<-signal

				err := out.Stop()
				assert.NoError(t, err)
			}()
		}
		close(signal)
		wg.Wait()
	}
}
Ejemplo n.º 16
0
func TestPeerRemoveClosedConnection(t *testing.T) {
	ctx, cancel := NewContext(time.Second)
	defer cancel()

	WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) {
		client := testutils.NewClient(t, nil)
		defer client.Close()

		p := client.Peers().Add(hostPort)

		c1, err := p.Connect(ctx)
		require.NoError(t, err, "Failed to connect")
		require.NoError(t, err, c1.Ping(ctx))

		c2, err := p.Connect(ctx)
		require.NoError(t, err, "Failed to connect")
		require.NoError(t, err, c2.Ping(ctx))

		require.NoError(t, c1.Close(), "Failed to close first connection")
		_, outConns := p.NumConnections()
		assert.Equal(t, 1, outConns, "Expected 1 remaining outgoing connection")

		c, err := p.GetConnection(ctx)
		require.NoError(t, err, "GetConnection failed")
		assert.Equal(t, c2, c, "Expected second active connection")
	})
}
Ejemplo n.º 17
0
// Purpose of this test is to ensure introspection doesn't cause any panics
// and we have coverage of the introspection code.
func TestIntrospection(t *testing.T) {
	testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
		client := testutils.NewClient(t, nil)
		defer client.Close()

		ctx, cancel := json.NewContext(time.Second)
		defer cancel()

		var resp map[string]interface{}
		peer := client.Peers().GetOrAdd(ts.HostPort())
		err := json.CallPeer(ctx, peer, ts.ServiceName(), "_gometa_introspect", map[string]interface{}{
			"includeExchanges":  true,
			"includeEmptyPeers": true,
			"includeTombstones": true,
		}, &resp)
		require.NoError(t, err, "Call _gometa_introspect failed")

		err = json.CallPeer(ctx, peer, ts.ServiceName(), "_gometa_runtime", map[string]interface{}{
			"includeGoStacks": true,
		}, &resp)
		require.NoError(t, err, "Call _gometa_runtime failed")

		if !ts.HasRelay() {
			// Try making the call on the "tchannel" service which is where meta handlers
			// are registered. This will only work when we call it directly as the relay
			// will not forward the tchannel service.
			err = json.CallPeer(ctx, peer, "tchannel", "_gometa_runtime", map[string]interface{}{
				"includeGoStacks": true,
			}, &resp)
			require.NoError(t, err, "Call _gometa_runtime failed")
		}
	})
}
Ejemplo n.º 18
0
func TestRaceExchangesWithClose(t *testing.T) {
	var wg sync.WaitGroup

	ctx, cancel := NewContext(testutils.Timeout(70 * time.Millisecond))
	defer cancel()

	opts := testutils.NewOpts().DisableLogVerification()
	testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
		server := ts.Server()

		gotCall := make(chan struct{})
		completeCall := make(chan struct{})
		testutils.RegisterFunc(server, "dummy", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
			return &raw.Res{}, nil
		})

		testutils.RegisterEcho(server, func() {
			close(gotCall)
			<-completeCall
		})

		client := ts.NewClient(opts)
		defer client.Close()

		callDone := make(chan struct{})
		go func() {
			assert.NoError(t, testutils.CallEcho(client, ts.HostPort(), server.ServiceName(), &raw.Args{}), "Echo failed")
			close(callDone)
		}()

		// Wait until the server recieves a call, so it has an active inbound.
		<-gotCall

		// Start a bunch of clients to trigger races between connecting and close.
		for i := 0; i < 100; i++ {
			wg.Add(1)
			go func() {
				defer wg.Done()

				// We don't use ts.NewClient here to avoid data races.
				c := testutils.NewClient(t, opts)
				defer c.Close()

				c.Ping(ctx, ts.HostPort())
				raw.Call(ctx, c, ts.HostPort(), server.ServiceName(), "dummy", nil, nil)
			}()
		}

		// Now try to close the channel, it should block since there's active exchanges.
		server.Close()
		assert.Equal(t, ChannelStartClose, ts.Server().State(), "Server should be in StartClose")

		close(completeCall)
		<-callDone
	})

	// Wait for all calls to complete
	wg.Wait()
}
Ejemplo n.º 19
0
func TestGetPeerAvoidPrevSelected(t *testing.T) {
	const (
		peer1 = "1.1.1.1:1"
		peer2 = "2.2.2.2:2"
		peer3 = "3.3.3.3:3"
	)

	ch := testutils.NewClient(t, nil)
	a, m := testutils.StrArray, testutils.StrMap
	tests := []struct {
		peers        []string
		prevSelected map[string]struct{}
		expected     map[string]struct{}
	}{
		{
			peers:    a(peer1),
			expected: m(peer1),
		},
		{
			peers:        a(peer1, peer2),
			prevSelected: m(peer1),
			expected:     m(peer2),
		},
		{
			peers:        a(peer1, peer2, peer3),
			prevSelected: m(peer1, peer2),
			expected:     m(peer3),
		},
		{
			peers:        a(peer1),
			prevSelected: m(peer1),
			expected:     m(peer1),
		},
		{
			peers:        a(peer1, peer2, peer3),
			prevSelected: m(peer1, peer2, peer3),
			expected:     m(peer1, peer2, peer3),
		},
	}

	for i, tt := range tests {
		peers := ch.GetSubChannel(fmt.Sprintf("test-%d", i), Isolated).Peers()
		for _, p := range tt.peers {
			peers.Add(p)
		}

		gotPeer, err := peers.Get(tt.prevSelected)
		if err != nil {
			t.Errorf("Got unexpected error selecting peer: %v", err)
			continue
		}

		got := gotPeer.HostPort()
		if _, ok := tt.expected[got]; !ok {
			t.Errorf("Got unexpected peer, expected one of %v got %v\n  Peers = %v PrevSelected = %v",
				tt.expected, got, tt.peers, tt.prevSelected)
		}
	}
}
Ejemplo n.º 20
0
func TestStatsCalls(t *testing.T) {
	defer testutils.SetTimeout(t, time.Second)()

	initialTime := time.Date(2015, 2, 1, 10, 10, 0, 0, time.UTC)
	clientNow, clientNowFn := testutils.NowStub(initialTime)
	serverNow, serverNowFn := testutils.NowStub(initialTime)
	clientNowFn(100 * time.Millisecond)
	serverNowFn(50 * time.Millisecond)

	clientStats := newRecordingStatsReporter()
	serverStats := newRecordingStatsReporter()
	serverOpts := testutils.NewOpts().
		SetStatsReporter(serverStats).
		SetTimeNow(serverNow)
	WithVerifiedServer(t, serverOpts, func(serverCh *Channel, hostPort string) {
		handler := raw.Wrap(newTestHandler(t))
		serverCh.Register(handler, "echo")
		serverCh.Register(handler, "app-error")

		ch := testutils.NewClient(t, testutils.NewOpts().
			SetStatsReporter(clientStats).
			SetTimeNow(clientNow))
		defer ch.Close()

		ctx, cancel := NewContext(time.Second * 5)
		defer cancel()

		_, _, _, err := raw.Call(ctx, ch, hostPort, testServiceName, "echo", []byte("Headers"), []byte("Body"))
		require.NoError(t, err)

		outboundTags := tagsForOutboundCall(serverCh, ch, "echo")
		clientStats.Expected.IncCounter("outbound.calls.send", outboundTags, 1)
		clientStats.Expected.IncCounter("outbound.calls.success", outboundTags, 1)
		clientStats.Expected.RecordTimer("outbound.calls.per-attempt.latency", outboundTags, 100*time.Millisecond)
		clientStats.Expected.RecordTimer("outbound.calls.latency", outboundTags, 100*time.Millisecond)
		inboundTags := tagsForInboundCall(serverCh, ch, "echo")
		serverStats.Expected.IncCounter("inbound.calls.recvd", inboundTags, 1)
		serverStats.Expected.IncCounter("inbound.calls.success", inboundTags, 1)
		serverStats.Expected.RecordTimer("inbound.calls.latency", inboundTags, 50*time.Millisecond)

		_, _, resp, err := raw.Call(ctx, ch, hostPort, testServiceName, "app-error", nil, nil)
		require.NoError(t, err)
		require.True(t, resp.ApplicationError(), "expected application error")

		outboundTags = tagsForOutboundCall(serverCh, ch, "app-error")
		clientStats.Expected.IncCounter("outbound.calls.send", outboundTags, 1)
		clientStats.Expected.IncCounter("outbound.calls.per-attempt.app-errors", outboundTags, 1)
		clientStats.Expected.IncCounter("outbound.calls.app-errors", outboundTags, 1)
		clientStats.Expected.RecordTimer("outbound.calls.per-attempt.latency", outboundTags, 100*time.Millisecond)
		clientStats.Expected.RecordTimer("outbound.calls.latency", outboundTags, 100*time.Millisecond)
		inboundTags = tagsForInboundCall(serverCh, ch, "app-error")
		serverStats.Expected.IncCounter("inbound.calls.recvd", inboundTags, 1)
		serverStats.Expected.IncCounter("inbound.calls.app-errors", inboundTags, 1)
		serverStats.Expected.RecordTimer("inbound.calls.latency", inboundTags, 50*time.Millisecond)
	})

	clientStats.Validate(t)
	serverStats.Validate(t)
}
Ejemplo n.º 21
0
func TestGetPeerNoPeer(t *testing.T) {
	ch, err := testutils.NewClient(nil)
	require.NoError(t, err, "NewClient failed")

	peer, err := ch.Peers().Get(nil)
	assert.Equal(t, ErrNoPeers, err, "Empty peer list should return error")
	assert.Nil(t, peer, "should not return peer")
}
Ejemplo n.º 22
0
func TestCloseNewClient(t *testing.T) {
	ch := testutils.NewClient(t, nil)

	// If there are no connections, then the channel should close immediately.
	ch.Close()
	assert.Equal(t, ChannelClosed, ch.State())
	assert.True(t, ch.Closed(), "Channel should be closed")
}
Ejemplo n.º 23
0
func TestGetPeerSinglePeer(t *testing.T) {
	ch := testutils.NewClient(t, nil)
	ch.Peers().Add("1.1.1.1:1234")

	peer, err := ch.Peers().Get(nil)
	assert.NoError(t, err, "peer list should return contained element")
	assert.Equal(t, "1.1.1.1:1234", peer.HostPort(), "returned peer mismatch")
}
Ejemplo n.º 24
0
func callWithNewClient(t *testing.T, hostPort string) {
	ctx, cancel := NewContext(time.Second)
	defer cancel()

	client := testutils.NewClient(t, nil)
	assert.NoError(t, client.Ping(ctx, hostPort))
	client.Close()
}
Ejemplo n.º 25
0
func withNewSet(t *testing.T, f func(*testing.T, chanSet)) {
	ch := testutils.NewClient(t, nil)
	f(t, chanSet{
		main:     ch,
		sub:      ch.GetSubChannel("hyperbahn"),
		isolated: ch.GetSubChannel("ringpop", tchannel.Isolated),
	})
}
Ejemplo n.º 26
0
func TestTraceSamplingRate(t *testing.T) {
	rand.Seed(10)

	tests := []struct {
		sampleRate  float64 // if this is < 0, then the value is not set.
		count       int
		expectedMin int
		expectedMax int
	}{
		{1.0, 100, 100, 100},
		{0.5, 100, 40, 60},
		{0.1, 100, 5, 15},
		{0, 100, 0, 0},
		{-1, 100, 100, 100}, // default of 1.0 should be used.
	}

	for _, tt := range tests {
		var reportedTraces int
		testTraceReporter := TraceReporterFunc(func(_ TraceData) {
			reportedTraces++
		})

		WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) {
			var tracedCalls int
			testutils.RegisterFunc(ch, "t", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
				if CurrentSpan(ctx).TracingEnabled() {
					tracedCalls++
				}

				return &raw.Res{}, nil
			})

			opts := testutils.NewOpts().SetTraceReporter(testTraceReporter)
			if tt.sampleRate >= 0 {
				opts.SetTraceSampleRate(tt.sampleRate)
			}

			client := testutils.NewClient(t, opts)
			defer client.Close()

			for i := 0; i < tt.count; i++ {
				ctx, cancel := NewContext(time.Second)
				defer cancel()

				_, _, _, err := raw.Call(ctx, client, hostPort, ch.PeerInfo().ServiceName, "t", nil, nil)
				require.NoError(t, err, "raw.Call failed")
			}

			assert.Equal(t, reportedTraces, tracedCalls,
				"Number of traces report doesn't match calls with tracing enabled")
			assert.True(t, tracedCalls >= tt.expectedMin,
				"Number of trace enabled calls (%v) expected to be greater than %v", tracedCalls, tt.expectedMin)
			assert.True(t, tracedCalls <= tt.expectedMax,
				"Number of trace enabled calls (%v) expected to be less than %v", tracedCalls, tt.expectedMax)
		})
	}
}
func getClient(dst string) (tchannel.TraceReporter, error) {
	tchan, err := testutils.NewClient(nil)
	if err != nil {
		return nil, err
	}

	tchan.Peers().Add(dst)
	return NewZipkinTraceReporter(tchan), nil
}
Ejemplo n.º 28
0
func TestPing(t *testing.T) {
	WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) {
		ctx, cancel := NewContext(time.Second)
		defer cancel()

		clientCh := testutils.NewClient(t, nil)
		require.NoError(t, clientCh.Ping(ctx, hostPort))
	})
}
Ejemplo n.º 29
0
func BenchmarkAddPeers(b *testing.B) {
	for i := 0; i < b.N; i++ {
		ch := testutils.NewClient(b, nil)
		for i := 0; i < 1000; i++ {
			hp := fmt.Sprintf("127.0.0.1:%v", i)
			ch.Peers().Add(hp)
		}
	}
}
Ejemplo n.º 30
0
func TestRemovePeerNotFound(t *testing.T) {
	ch := testutils.NewClient(t, nil)
	defer ch.Close()

	peers := ch.Peers()
	peers.Add("1.1.1.1:1")
	assert.Error(t, peers.Remove("not-found"), "Remove should fa")
	assert.NoError(t, peers.Remove("1.1.1.1:1"), "Remove shouldn't fail for existing peer")
}