func TestRelayConnection(t *testing.T) { var errTest = errors.New("test") var wantHostPort string getHost := func(call relay.CallFrame, conn relay.Conn) (relay.Peer, error) { matches := conn.RemoteProcessPrefixMatches() assert.Equal(t, []bool{true, true, true, false}, matches, "Unexpected prefix matches.") assert.Equal(t, wantHostPort, conn.RemoteHostPort(), "Unexpected RemoteHostPort") return relay.Peer{}, errTest } // Note: we cannot use WithTestServer since we override the RelayHosts. opts := testutils.NewOpts(). SetServiceName("relay"). SetRelayHosts(hostsFunc(getHost)). SetProcessPrefixes("nod", "nodejs-hyperbahn", "", "hyperbahn") relay := testutils.NewServer(t, opts) defer relay.Close() // Create a client that is listening so we can set the expected host:port. clientOpts := testutils.NewOpts().SetProcessName("nodejs-hyperbahn") client := testutils.NewServer(t, clientOpts) wantHostPort = client.PeerInfo().HostPort defer client.Close() err := testutils.CallEcho(client, relay.PeerInfo().HostPort, relay.ServiceName(), nil) require.Error(t, err, "Expected CallEcho to fail") assert.Contains(t, err.Error(), errTest.Error(), "Unexpected error") }
func TestClientHostPort(t *testing.T) { ctx, cancel := NewContext(time.Second) defer cancel() s1ch := testutils.NewServer(t, nil) s2ch := testutils.NewServer(t, nil) defer s1ch.Close() defer s2ch.Close() s1ch.Peers().Add(s2ch.PeerInfo().HostPort) s2ch.Peers().Add(s1ch.PeerInfo().HostPort) mock1, mock2 := new(mocks.TChanSecondService), new(mocks.TChanSecondService) NewServer(s1ch).Register(gen.NewTChanSecondServiceServer(mock1)) NewServer(s2ch).Register(gen.NewTChanSecondServiceServer(mock2)) // When we call using a normal client, it can only call the other server (only peer). c1 := gen.NewTChanSecondServiceClient(NewClient(s1ch, s2ch.PeerInfo().ServiceName, nil)) mock2.On("Echo", ctxArg(), "call1").Return("call1", nil) res, err := c1.Echo(ctx, "call1") assert.NoError(t, err, "call1 failed") assert.Equal(t, "call1", res) // When we call using a client that specifies host:port, it should call that server. c2 := gen.NewTChanSecondServiceClient(NewClient(s1ch, s1ch.PeerInfo().ServiceName, &ClientOptions{ HostPort: s1ch.PeerInfo().HostPort, })) mock1.On("Echo", ctxArg(), "call2").Return("call2", nil) res, err = c2.Echo(ctx, "call2") assert.NoError(t, err, "call2 failed") assert.Equal(t, "call2", res) }
func TestRequestSubChannel(t *testing.T) { ctx, cancel := NewContext(time.Second) defer cancel() tchan := testutils.NewServer(t, testutils.NewOpts().SetServiceName("svc1")) defer tchan.Close() clientCh := testutils.NewClient(t, nil) defer clientCh.Close() clientCh.Peers().Add(tchan.PeerInfo().HostPort) tests := []tchannel.Registrar{tchan, tchan.GetSubChannel("svc2"), tchan.GetSubChannel("svc3")} for _, ch := range tests { mockHandler := new(mocks.TChanSecondService) server := NewServer(ch) server.Register(gen.NewTChanSecondServiceServer(mockHandler)) client := NewClient(clientCh, ch.ServiceName(), nil) secondClient := gen.NewTChanSecondServiceClient(client) echoArg := ch.ServiceName() echoRes := echoArg + "-echo" mockHandler.On("Echo", ctxArg(), echoArg).Return(echoRes, nil) res, err := secondClient.Echo(ctx, echoArg) assert.NoError(t, err, "Echo failed") assert.Equal(t, echoRes, res) } }
func TestRequestStateRetry(t *testing.T) { ctx, cancel := NewContext(time.Second) defer cancel() server := testutils.NewServer(t, nil) defer server.Close() server.Register(raw.Wrap(newTestHandler(t)), "echo") client := testutils.NewClient(t, nil) defer client.Close() counter := 0 sc := client.GetSubChannel(server.PeerInfo().ServiceName) err := client.RunWithRetry(ctx, func(ctx context.Context, rs *RequestState) error { defer func() { counter++ }() assert.Equal(t, counter, len(rs.SelectedPeers), "SelectedPeers should not be reused") if counter < 4 { client.Peers().Add(testutils.GetClosedHostPort(t)) } else { client.Peers().Add(server.PeerInfo().HostPort) } _, err := raw.CallV2(ctx, sc, raw.CArgs{ Operation: "echo", CallOptions: &CallOptions{RequestState: rs}, }) return err }) assert.NoError(t, err, "RunWithRetry should succeed") assert.Equal(t, 5, counter, "RunWithRetry should retry 5 times") }
func setupServer(t *testing.T, h *mocks.TChanSimpleService, sh *mocks.TChanSecondService) (*tchannel.Channel, *Server) { ch := testutils.NewServer(t, nil) server := NewServer(ch) server.Register(gen.NewTChanSimpleServiceServer(h)) server.Register(gen.NewTChanSecondServiceServer(sh)) return ch, server }
func TestPeerSelectionConnClosed(t *testing.T) { ctx, cancel := NewContext(time.Second) defer cancel() WithVerifiedServer(t, nil, func(server *Channel, hostPort string) { client := testutils.NewServer(t, nil) defer client.Close() // Ping will create an outbound connection from client -> server. require.NoError(t, testutils.Ping(client, server), "Ping failed") waitTillInboundEmpty(t, server, client.PeerInfo().HostPort, func() { peer, ok := client.RootPeers().Get(server.PeerInfo().HostPort) require.True(t, ok, "Client has no peer for %v", server.PeerInfo()) conn, err := peer.GetConnection(ctx) require.NoError(t, err, "Failed to get a connection") conn.Close() }) // Make sure the closed connection is not used. for i := 0; i < 10; i++ { require.NoError(t, testutils.Ping(client, server), "Ping failed") } }) }
func setupBenchServer() ([]string, error) { ch, err := testutils.NewServer(testutils.NewOpts(). SetServiceName(benchServerName). SetFramePool(tchannel.NewSyncFramePool())) if err != nil { return nil, err } fmt.Println(benchServerName, "started on", ch.PeerInfo().HostPort) server := thrift.NewServer(ch) server.Register(gen.NewTChanSecondServiceServer(benchSecondHandler{})) if !*useHyperbahn { return []string{ch.PeerInfo().HostPort}, nil } // Set up a Hyperbahn client and advertise it. nodes := strings.Split(*hyperbahnNodes, ",") config := hyperbahn.Configuration{InitialNodes: nodes} hc, err := hyperbahn.NewClient(ch, config, nil) if err := hc.Advertise(); err != nil { return nil, err } return nodes, nil }
func runRetryTest(t *testing.T, f func(r *retryTest)) { r := &retryTest{} defer testutils.SetTimeout(t, time.Second)() r.setup() defer testutils.ResetSleepStub(&timeSleep) withSetup(t, func(hypCh *tchannel.Channel, hostPort string) { json.Register(hypCh, json.Handlers{"ad": r.adHandler}, nil) // Advertise failures cause warning log messages. opts := testutils.NewOpts(). SetServiceName("my-client"). AddLogFilter("Hyperbahn client registration failed", 10) serverCh := testutils.NewServer(t, opts) defer serverCh.Close() var err error r.ch = serverCh r.client, err = NewClient(serverCh, configFor(hostPort), &ClientOptions{ Handler: r, FailStrategy: FailStrategyIgnore, }) require.NoError(t, err, "NewClient") defer r.client.Close() f(r) r.mock.AssertExpectations(t) }) }
func TestInitialAdvertiseFailedRetryTimeout(t *testing.T) { withSetup(t, func(hypCh *tchannel.Channel, hyperbahnHostPort string) { started := time.Now() count := 0 adHandler := func(ctx json.Context, req *AdRequest) (*AdResponse, error) { count++ deadline, ok := ctx.Deadline() if assert.True(t, ok, "context is missing Deadline") { assert.True(t, deadline.Sub(started) <= 2*time.Second, "Timeout per attempt should be 1 second. Started: %v Deadline: %v", started, deadline) } return nil, tchannel.NewSystemError(tchannel.ErrCodeUnexpected, "unexpected") } json.Register(hypCh, json.Handlers{"ad": adHandler}, nil) ch := testutils.NewServer(t, nil) client, err := NewClient(ch, configFor(hyperbahnHostPort), stubbedSleep()) assert.NoError(t, err, "hyperbahn NewClient failed") defer client.Close() assert.Error(t, client.Advertise(), "Advertise should not succeed") // We expect 5 retries by TChannel and we attempt 5 to advertise 5 times. assert.Equal(t, 5*5, count, "adHandler not retried correct number of times") }) }
func TestInitialAdvertiseFailedRetryBackoff(t *testing.T) { defer testutils.SetTimeout(t, 2*time.Second)() clientOpts := stubbedSleep() sleepArgs, sleepBlock, sleepClose := testutils.SleepStub(&clientOpts.TimeSleep) // We expect to retry 5 times, go func() { for attempt := uint(0); attempt < 5; attempt++ { maxSleepFor := advertiseRetryInterval * time.Duration(1<<attempt) got := <-sleepArgs assert.True(t, got <= maxSleepFor, "Initial advertise attempt %v expected sleep %v < %v", attempt, got, maxSleepFor) sleepBlock <- struct{}{} } sleepClose() }() withSetup(t, func(hypCh *tchannel.Channel, hostPort string) { serverCh := testutils.NewServer(t, nil) defer serverCh.Close() client, err := NewClient(serverCh, configFor(hostPort), clientOpts) require.NoError(t, err, "NewClient") defer client.Close() assert.Error(t, client.Advertise(), "Advertise without handler should fail") }) }
func TestPeerRemovedFromRootPeers(t *testing.T) { tests := []struct { addHostPort bool expectFound bool }{ {true, true}, {false, false}, } ctx, cancel := NewContext(time.Second) defer cancel() for _, tt := range tests { WithVerifiedServer(t, nil, func(server *Channel, hostPort string) { ch := testutils.NewServer(t, nil) clientHP := ch.PeerInfo().HostPort if tt.addHostPort { server.Peers().Add(clientHP) } assert.NoError(t, ch.Ping(ctx, hostPort), "Ping failed") waitTillInboundEmpty(t, server, clientHP, func() { ch.Close() }) rootPeers := server.RootPeers() _, found := rootPeers.Get(clientHP) assert.Equal(t, tt.expectFound, found, "Peer found mismatch, addHostPort: %v", tt.addHostPort) }) } }
func setupServer(b *testing.B) (ch *Channel, svcName, svcHostPort string) { serverCh := testutils.NewServer(b, nil) handler := &benchmarkHandler{} serverCh.Register(raw.Wrap(handler), "echo") peerInfo := serverCh.PeerInfo() return serverCh, peerInfo.ServiceName, peerInfo.HostPort }
func TestCloseNewClient(t *testing.T) { ch := testutils.NewServer(t, nil) // If there are no connections, then the channel should close immediately. ch.Close() assert.Equal(t, ChannelClosed, ch.State()) assert.True(t, ch.Closed(), "Channel should be closed") }
func TestCloseOnlyListening(t *testing.T) { ch, err := testutils.NewServer(nil) require.NoError(t, err, "NewServer failed") // If there are no connections, then the channel should close immediately. ch.Close() assert.Equal(t, ChannelClosed, ch.State()) assert.True(t, ch.Closed(), "Channel should be closed") }
func TestCloseOneSide(t *testing.T) { ctx, cancel := NewContext(time.Second) defer cancel() ch1, err := testutils.NewServer(&testutils.ChannelOpts{ServiceName: "client"}) ch2, err := testutils.NewServer(&testutils.ChannelOpts{ServiceName: "server"}) require.NoError(t, err, "NewServer 1 failed") require.NoError(t, err, "NewServer 2 failed") connected := make(chan struct{}) completed := make(chan struct{}) blockCall := make(chan struct{}) testutils.RegisterFunc(t, ch2, "echo", func(ctx context.Context, args *raw.Args) (*raw.Res, error) { connected <- struct{}{} <-blockCall return &raw.Res{ Arg2: args.Arg2, Arg3: args.Arg3, }, nil }) go func() { ch2Peer := ch2.PeerInfo() _, _, _, err := raw.Call(ctx, ch1, ch2Peer.HostPort, ch2Peer.ServiceName, "echo", nil, nil) assert.NoError(t, err, "Call failed") completed <- struct{}{} }() // Wait for connected before calling Close. <-connected ch1.Close() // Now unblock the call and wait for the call to complete. close(blockCall) <-completed // Once the call completes, the channel should be closed. runtime.Gosched() assert.Equal(t, ChannelClosed, ch1.State()) // We need to close all open TChannels before verifying blocked goroutines. ch2.Close() VerifyNoBlockedGoroutines(t) }
func TestAdvertiseFailed(t *testing.T) { withSetup(t, func(hypCh *tchannel.Channel, hostPort string) { serverCh := testutils.NewServer(t, nil) defer serverCh.Close() client, err := NewClient(serverCh, configFor(hostPort), nil) require.NoError(t, err, "NewClient") defer client.Close() assert.Error(t, client.Advertise(), "Advertise without handler should fail") }) }
func setupServer(h *mocks.TChanSimpleService, sh *mocks.TChanSecondService) (*tchannel.Channel, *Server, error) { ch, err := testutils.NewServer(nil) if err != nil { return nil, nil, err } server := NewServer(ch) server.Register(gen.NewTChanSimpleServiceServer(h)) server.Register(gen.NewTChanSecondServiceServer(sh)) return ch, server, nil }
func newAdvertisedEchoServer(t *testing.T, name string, mockHB *mockhyperbahn.Mock, f func()) *tchannel.Channel { server := testutils.NewServer(t, &testutils.ChannelOpts{ ServiceName: name, }) testutils.RegisterEcho(server, f) hbClient, err := hyperbahn.NewClient(server, mockHB.Configuration(), nil) require.NoError(t, err, "Failed to set up Hyperbahn client") require.NoError(t, hbClient.Advertise(), "Advertise failed") return server }
func setupServer(h *mocks.TChanTCollector) (*tchannel.Channel, error) { tchan, err := testutils.NewServer(&testutils.ChannelOpts{ ServiceName: tcollectorServiceName, }) if err != nil { return nil, err } server := thrift.NewServer(tchan) server.Register(gen.NewTChanTCollectorServer(h)) return tchan, nil }
// TestCloseSendError tests that system errors are not attempted to be sent when // a connection is closed, and ensures there's no race conditions such as the error // frame being added to the channel just as it is closed. // TODO(prashant): This test is waiting for timeout, but socket close shouldn't wait for timeout. func TestCloseSendError(t *testing.T) { ctx, cancel := NewContext(time.Second) defer cancel() serverCh, err := testutils.NewServer(nil) require.NoError(t, err, "NewServer failed") closed := uint32(0) counter := uint32(0) testutils.RegisterFunc(t, serverCh, "echo", func(ctx context.Context, args *raw.Args) (*raw.Res, error) { atomic.AddUint32(&counter, 1) return &raw.Res{Arg2: args.Arg2, Arg3: args.Arg3}, nil }) clientCh, err := testutils.NewClient(nil) require.NoError(t, err, "NewClient failed") // Make a call to create a connection that will be shared. peerInfo := serverCh.PeerInfo() _, _, _, err = raw.Call(ctx, clientCh, peerInfo.HostPort, peerInfo.ServiceName, "echo", nil, nil) require.NoError(t, err, "Call should succeed") var wg sync.WaitGroup for i := 0; i < 100; i++ { wg.Add(1) go func() { time.Sleep(time.Duration(rand.Intn(1000)) * time.Microsecond) _, _, _, err := raw.Call(ctx, clientCh, peerInfo.HostPort, peerInfo.ServiceName, "echo", nil, nil) if err != nil && atomic.LoadUint32(&closed) == 0 { t.Errorf("Call failed: %v", err) } wg.Done() }() } // Wait for the server to have processed some number of these calls. for { if atomic.LoadUint32(&counter) >= 10 { break } runtime.Gosched() } atomic.AddUint32(&closed, 1) serverCh.Close() // Wait for all the goroutines to end wg.Wait() clientCh.Close() VerifyNoBlockedGoroutines(t) }
func (t *closeSemanticsTest) makeServer(name string) (*Channel, chan struct{}) { ch := testutils.NewServer(t.T, &testutils.ChannelOpts{ServiceName: name}) c := make(chan struct{}) testutils.RegisterFunc(ch, "stream", func(ctx context.Context, args *raw.Args) (*raw.Res, error) { <-c return &raw.Res{}, nil }) testutils.RegisterFunc(ch, "call", func(ctx context.Context, args *raw.Args) (*raw.Res, error) { return &raw.Res{}, nil }) return ch, c }
func TestGetSubchannelOptionsOnNew(t *testing.T) { ch := testutils.NewServer(t, nil) defer ch.Close() peers := ch.GetSubChannel("s", Isolated).Peers() want := peers.Add("1.1.1.1:1") peers2 := ch.GetSubChannel("s", Isolated).Peers() assert.Equal(t, peers, peers2, "Get isolated subchannel should not clear existing peers") peer, err := peers2.Get(nil) require.NoError(t, err, "Should get peer") assert.Equal(t, want, peer, "Unexpected peer") }
func TestRemotePeer(t *testing.T) { tests := []struct { name string remote *Channel expectedFn func(state *RuntimeState, serverHP string) PeerInfo }{ { name: "ephemeral client", remote: testutils.NewClient(t, nil), expectedFn: func(state *RuntimeState, serverHP string) PeerInfo { hostPort := state.RootPeers[serverHP].OutboundConnections[0].LocalHostPort return PeerInfo{ HostPort: hostPort, IsEphemeral: true, ProcessName: state.LocalPeer.ProcessName, } }, }, { name: "listening server", remote: testutils.NewServer(t, nil), expectedFn: func(state *RuntimeState, _ string) PeerInfo { return PeerInfo{ HostPort: state.LocalPeer.HostPort, IsEphemeral: false, ProcessName: state.LocalPeer.ProcessName, } }, }, } ctx, cancel := NewContext(time.Second) defer cancel() for _, tt := range tests { WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) { defer tt.remote.Close() gotPeer := make(chan PeerInfo, 1) testutils.RegisterFunc(ch, "test", func(ctx context.Context, args *raw.Args) (*raw.Res, error) { gotPeer <- CurrentCall(ctx).RemotePeer() return &raw.Res{}, nil }) _, _, _, err := raw.Call(ctx, tt.remote, hostPort, ch.ServiceName(), "test", nil, nil) assert.NoError(t, err, "%v: Call failed", tt.name) expected := tt.expectedFn(tt.remote.IntrospectState(nil), hostPort) assert.Equal(t, expected, <-gotPeer, "%v: RemotePeer mismatch", tt.name) }) } }
func benchmarkGetConnection(b *testing.B, numIncoming, numOutgoing int) { ctx, cancel := NewContext(10 * time.Second) defer cancel() s1 := testutils.NewServer(b, nil) s2 := testutils.NewServer(b, nil) defer s1.Close() defer s2.Close() for i := 0; i < numOutgoing; i++ { _, err := s1.Connect(ctx, s2.PeerInfo().HostPort) require.NoError(b, err, "Connect from s1 -> s2 failed") } for i := 0; i < numIncoming; i++ { _, err := s2.Connect(ctx, s1.PeerInfo().HostPort) require.NoError(b, err, "Connect from s2 -> s1 failed") } peer := s1.Peers().GetOrAdd(s2.PeerInfo().HostPort) b.ResetTimer() for i := 0; i < b.N; i++ { peer.GetConnection(ctx) } }
func TestChildCallsNotSampled(t *testing.T) { var traceEnabledCalls int s1 := testutils.NewServer(t, testutils.NewOpts().SetTraceSampleRate(0.0001)) defer s1.Close() s2 := testutils.NewServer(t, nil) defer s2.Close() testutils.RegisterFunc(s1, "s1", func(ctx context.Context, args *raw.Args) (*raw.Res, error) { _, _, _, err := raw.Call(ctx, s1, s2.PeerInfo().HostPort, s2.ServiceName(), "s2", nil, nil) require.NoError(t, err, "raw.Call from s1 to s2 failed") return &raw.Res{}, nil }) testutils.RegisterFunc(s2, "s2", func(ctx context.Context, args *raw.Args) (*raw.Res, error) { if CurrentSpan(ctx).TracingEnabled() { traceEnabledCalls++ } return &raw.Res{}, nil }) client := testutils.NewClient(t, nil) defer client.Close() const numCalls = 100 for i := 0; i < numCalls; i++ { ctx, cancel := NewContext(time.Second) defer cancel() _, _, _, err := raw.Call(ctx, client, s1.PeerInfo().HostPort, s1.ServiceName(), "s1", nil, nil) require.NoError(t, err, "raw.Call to s1 failed") } // Even though s1 has sampling enabled, it should not affect incoming calls. assert.Equal(t, numCalls, traceEnabledCalls, "Trace sampling should not inbound calls") }
func SetupServer(t *testing.T, fn thrift.HealthFunc) (*tchannel.Channel, string) { _, cancel := tchannel.NewContext(time.Second * 10) defer cancel() opts := testutils.NewOpts(). SetServiceName("testing"). DisableLogVerification() tchan := testutils.NewServer(t, opts) if fn != nil { server := thrift.NewServer(tchan) server.RegisterHealthHandler(fn) } return tchan, tchan.PeerInfo().HostPort }
func setupTChan(t *testing.T, mux *http.ServeMux) (string, func()) { ch := testutils.NewServer(t, testutils.NewOpts().SetServiceName("test")) handler := func(ctx context.Context, call *tchannel.InboundCall) { req, err := ReadRequest(call) if !assert.NoError(t, err, "ReadRequest failed") { return } // Make the HTTP call using the default mux. writer, finish := ResponseWriter(call.Response()) mux.ServeHTTP(writer, req) finish() } ch.Register(tchannel.HandlerFunc(handler), "http") return ch.PeerInfo().HostPort, func() { ch.Close() } }
func TestInvalidThriftBytes(t *testing.T) { ctx, cancel := NewContext(time.Second) defer cancel() ch := testutils.NewClient(t, nil) sCh := testutils.NewServer(t, nil) defer sCh.Close() svr := NewServer(sCh) svr.Register(gen.NewTChanSecondServiceServer(new(mocks.TChanSecondService))) tests := []struct { name string arg3 []byte }{ { name: "missing bytes", arg3: serializeStruct(t, &gen.SecondServiceEchoArgs{Arg: "Hello world"})[:5], }, { name: "wrong struct", arg3: serializeStruct(t, &gen.Data{B1: true}), }, } for _, tt := range tests { sPeer := sCh.PeerInfo() call, err := ch.BeginCall(ctx, sPeer.HostPort, sPeer.ServiceName, "SecondService::Echo", &tchannel.CallOptions{ Format: tchannel.Thrift, }) require.NoError(t, err, "BeginCall failed") require.NoError(t, tchannel.NewArgWriter(call.Arg2Writer()).Write([]byte{0, 0}), "Write arg2 failed") writer, err := call.Arg3Writer() require.NoError(t, err, "Arg3Writer failed") _, err = writer.Write(tt.arg3) require.NoError(t, err, "Write arg3 failed") require.NoError(t, writer.Close(), "Close failed") response := call.Response() _, _, err = raw.ReadArgsV2(response) assert.Error(t, err, "%v: Expected error", tt.name) assert.Equal(t, tchannel.ErrCodeBadRequest, tchannel.GetSystemErrorCode(err), "%v: Expected bad request, got %v", tt.name, err) } }
func (s *PropagationTestSuite) runWithTracer(t *testing.T, tracer tracerChoice) { testCases, ok := s.TestCases[tracer.tracerType] if !ok { t.Logf("No test cases for encoding=%s and tracer=%s", s.Encoding.Format, tracer.tracerType) return } opts := &testutils.ChannelOpts{ ChannelOptions: tchannel.ChannelOptions{Tracer: tracer.tracer}, DisableRelay: true, } ch := testutils.NewServer(t, opts) defer ch.Close() ch.Peers().Add(ch.PeerInfo().HostPort) call := s.Register(t, ch) for _, tt := range testCases { s.runTestCase(t, tracer, ch, tt, call) } }
func TestInitialAdvertiseFailedRetry(t *testing.T) { withSetup(t, func(hypCh *tchannel.Channel, hyperbahnHostPort string) { count := 0 adHandler := func(ctx json.Context, req *AdRequest) (*AdResponse, error) { count++ return nil, tchannel.NewSystemError(tchannel.ErrCodeUnexpected, "unexpected") } json.Register(hypCh, json.Handlers{"ad": adHandler}, nil) ch := testutils.NewServer(t, nil) client, err := NewClient(ch, configFor(hyperbahnHostPort), nil) assert.NoError(t, err, "hyperbahn NewClient failed") defer client.Close() assert.Error(t, client.Advertise(), "Advertise should not succeed") assert.Equal(t, 5, count, "adHandler not retried correct number of times") }) }