func TestRequestStateRetry(t *testing.T) {
	ctx, cancel := NewContext(time.Second)
	defer cancel()

	server := testutils.NewServer(t, nil)
	defer server.Close()
	server.Register(raw.Wrap(newTestHandler(t)), "echo")

	client := testutils.NewClient(t, nil)
	defer client.Close()

	counter := 0
	sc := client.GetSubChannel(server.PeerInfo().ServiceName)
	err := client.RunWithRetry(ctx, func(ctx context.Context, rs *RequestState) error {
		defer func() { counter++ }()

		assert.Equal(t, counter, len(rs.SelectedPeers), "SelectedPeers should not be reused")

		if counter < 4 {
			client.Peers().Add(testutils.GetClosedHostPort(t))
		} else {
			client.Peers().Add(server.PeerInfo().HostPort)
		}

		_, err := raw.CallV2(ctx, sc, raw.CArgs{
			Operation:   "echo",
			CallOptions: &CallOptions{RequestState: rs},
		})
		return err
	})
	assert.NoError(t, err, "RunWithRetry should succeed")
	assert.Equal(t, 5, counter, "RunWithRetry should retry 5 times")
}
示例#2
0
func TestRequestStateRetry(t *testing.T) {
	ctx, cancel := NewContext(time.Second)
	defer cancel()

	testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
		ts.Register(raw.Wrap(newTestHandler(t)), "echo")

		closedHostPorts := make([]string, 4)
		for i := range closedHostPorts {
			hostPort, close := testutils.GetAcceptCloseHostPort(t)
			defer close()
			closedHostPorts[i] = hostPort
		}

		// Since we close connections remotely, there will be some warnings that we can ignore.
		opts := testutils.NewOpts().DisableLogVerification()
		client := ts.NewClient(opts)
		defer client.Close()
		counter := 0

		sc := client.GetSubChannel(ts.Server().ServiceName())
		err := client.RunWithRetry(ctx, func(ctx context.Context, rs *RequestState) error {
			defer func() { counter++ }()

			expectedPeers := counter
			if expectedPeers > 0 {
				// An entry is also added for each host.
				expectedPeers++
			}

			assert.Equal(t, expectedPeers, len(rs.SelectedPeers), "SelectedPeers should not be reused")

			if counter < 4 {
				client.Peers().Add(closedHostPorts[counter])
			} else {
				client.Peers().Add(ts.HostPort())
			}

			_, err := raw.CallV2(ctx, sc, raw.CArgs{
				Method:      "echo",
				CallOptions: &CallOptions{RequestState: rs},
			})
			return err
		})
		assert.NoError(t, err, "RunWithRetry should succeed")
		assert.Equal(t, 5, counter, "RunWithRetry should retry 5 times")
	})
}
示例#3
0
func TestStatsWithRetries(t *testing.T) {
	defer testutils.SetTimeout(t, 2*time.Second)()
	a := testutils.DurationArray

	initialTime := time.Date(2015, 2, 1, 10, 10, 0, 0, time.UTC)
	nowStub, nowFn := testutils.NowStub(initialTime)

	clientStats := newRecordingStatsReporter()
	ch := testutils.NewClient(t, testutils.NewOpts().
		SetStatsReporter(clientStats).
		SetTimeNow(nowStub))
	defer ch.Close()

	nowFn(10 * time.Millisecond)
	ctx, cancel := NewContext(time.Second)
	defer cancel()

	// TODO why do we need this??
	opts := testutils.NewOpts().NoRelay()
	WithVerifiedServer(t, opts, func(serverCh *Channel, hostPort string) {
		respErr := make(chan error, 1)
		testutils.RegisterFunc(serverCh, "req", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
			return &raw.Res{Arg2: args.Arg2, Arg3: args.Arg3}, <-respErr
		})
		ch.Peers().Add(serverCh.PeerInfo().HostPort)

		// timeNow is called at:
		// RunWithRetry start, per-attempt start, per-attempt end.
		// Each attempt takes 2 * step.
		tests := []struct {
			expectErr           error
			numFailures         int
			numAttempts         int
			overallLatency      time.Duration
			perAttemptLatencies []time.Duration
		}{
			{
				numFailures:         0,
				numAttempts:         1,
				perAttemptLatencies: a(10 * time.Millisecond),
				overallLatency:      20 * time.Millisecond,
			},
			{
				numFailures:         1,
				numAttempts:         2,
				perAttemptLatencies: a(10*time.Millisecond, 10*time.Millisecond),
				overallLatency:      40 * time.Millisecond,
			},
			{
				numFailures:         4,
				numAttempts:         5,
				perAttemptLatencies: a(10*time.Millisecond, 10*time.Millisecond, 10*time.Millisecond, 10*time.Millisecond, 10*time.Millisecond),
				overallLatency:      100 * time.Millisecond,
			},
			{
				numFailures:         5,
				numAttempts:         5,
				expectErr:           ErrServerBusy,
				perAttemptLatencies: a(10*time.Millisecond, 10*time.Millisecond, 10*time.Millisecond, 10*time.Millisecond, 10*time.Millisecond),
				overallLatency:      100 * time.Millisecond,
			},
		}

		for _, tt := range tests {
			clientStats.Reset()
			err := ch.RunWithRetry(ctx, func(ctx context.Context, rs *RequestState) error {
				if rs.Attempt > tt.numFailures {
					respErr <- nil
				} else {
					respErr <- ErrServerBusy
				}

				sc := ch.GetSubChannel(serverCh.ServiceName())
				_, err := raw.CallV2(ctx, sc, raw.CArgs{
					Method:      "req",
					CallOptions: &CallOptions{RequestState: rs},
				})
				return err
			})
			assert.Equal(t, tt.expectErr, err, "RunWithRetry unexpected error")

			outboundTags := tagsForOutboundCall(serverCh, ch, "req")
			if tt.expectErr == nil {
				clientStats.Expected.IncCounter("outbound.calls.success", outboundTags, 1)
			}
			clientStats.Expected.IncCounter("outbound.calls.send", outboundTags, int64(tt.numAttempts))
			for i, latency := range tt.perAttemptLatencies {
				clientStats.Expected.RecordTimer("outbound.calls.per-attempt.latency", outboundTags, latency)
				if i > 0 {
					tags := tagsForOutboundCall(serverCh, ch, "req")
					tags["retry-count"] = fmt.Sprint(i)
					clientStats.Expected.IncCounter("outbound.calls.retries", tags, 1)
				}
			}
			clientStats.Expected.RecordTimer("outbound.calls.latency", outboundTags, tt.overallLatency)
			clientStats.Validate(t)
		}
	})
}