Ejemplo n.º 1
0
func testExceedMaxStreamsLimit(t *testing.T, e env) {
	// Only allows 1 live stream per server transport.
	s, cc := setUp(nil, 1, "", e)
	tc := testpb.NewTestServiceClient(cc)
	defer tearDown(s, cc)
	done := make(chan struct{})
	ch := make(chan int)
	go func() {
		for {
			select {
			case <-time.After(5 * time.Millisecond):
				ch <- 0
			case <-time.After(5 * time.Second):
				close(done)
				return
			}
		}
	}()
	// Loop until a stream creation hangs due to the new max stream setting.
	for {
		select {
		case <-ch:
			ctx, _ := context.WithTimeout(context.Background(), time.Second)
			if _, err := tc.StreamingInputCall(ctx); err != nil {
				if grpc.Code(err) == codes.DeadlineExceeded {
					return
				}
				t.Fatalf("%v.StreamingInputCall(_) = %v, want <nil>", tc, err)
			}
		case <-done:
			t.Fatalf("Client has not received the max stream setting in 5 seconds.")
		}
	}
}
Ejemplo n.º 2
0
func testTimeoutOnDeadServer(t *testing.T, e env) {
	s, cc := setUp(nil, math.MaxUint32, "", e)
	tc := testpb.NewTestServiceClient(cc)
	if ok := cc.WaitForStateChange(time.Second, grpc.Idle); !ok {
		t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Idle, ok)
	}
	if ok := cc.WaitForStateChange(time.Second, grpc.Connecting); !ok {
		t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Connecting, ok)
	}
	if cc.State() != grpc.Ready {
		t.Fatalf("cc.State() = %s, want %s", cc.State(), grpc.Ready)
	}
	if ok := cc.WaitForStateChange(time.Millisecond, grpc.Ready); ok {
		t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want false", grpc.Ready, ok)
	}
	s.Stop()
	// Set -1 as the timeout to make sure if transportMonitor gets error
	// notification in time the failure path of the 1st invoke of
	// ClientConn.wait hits the deadline exceeded error.
	ctx, _ := context.WithTimeout(context.Background(), -1)
	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); grpc.Code(err) != codes.DeadlineExceeded {
		t.Fatalf("TestService/EmptyCall(%v, _) = _, error %v, want _, error code: %d", ctx, err, codes.DeadlineExceeded)
	}
	if ok := cc.WaitForStateChange(time.Second, grpc.Ready); !ok {
		t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Ready, ok)
	}
	state := cc.State()
	if state != grpc.Connecting && state != grpc.TransientFailure {
		t.Fatalf("cc.State() = %s, want %s or %s", state, grpc.Connecting, grpc.TransientFailure)
	}
	cc.Close()
}
Ejemplo n.º 3
0
func healthCheck(t time.Duration, cc *grpc.ClientConn, serviceName string) (*healthpb.HealthCheckResponse, error) {
	ctx, _ := context.WithTimeout(context.Background(), t)
	hc := healthpb.NewHealthCheckClient(cc)
	req := &healthpb.HealthCheckRequest{
		Service: serviceName,
	}
	return hc.Check(ctx, req)
}
Ejemplo n.º 4
0
func ExampleWithTimeout() {
	// Pass a context with a timeout to tell a blocking function that it
	// should abandon its work after the timeout elapses.
	ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond)
	select {
	case <-time.After(200 * time.Millisecond):
		fmt.Println("overslept")
	case <-ctx.Done():
		fmt.Println(ctx.Err()) // prints "context deadline exceeded"
	}
	// Output:
	// context deadline exceeded
}
Ejemplo n.º 5
0
// TODO(zhaoq): Have a better test coverage of timeout and cancellation mechanism.
func testRPCTimeout(t *testing.T, e env) {
	s, cc := setUp(nil, math.MaxUint32, "", e)
	tc := testpb.NewTestServiceClient(cc)
	defer tearDown(s, cc)
	argSize := 2718
	respSize := 314
	req := &testpb.SimpleRequest{
		ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(),
		ResponseSize: proto.Int32(int32(respSize)),
		Payload:      newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)),
	}
	for i := -1; i <= 10; i++ {
		ctx, _ := context.WithTimeout(context.Background(), time.Duration(i)*time.Millisecond)
		reply, err := tc.UnaryCall(ctx, req)
		if grpc.Code(err) != codes.DeadlineExceeded {
			t.Fatalf(`TestService/UnaryCallv(_, _) = %v, %v; want <nil>, error code: %d`, reply, err, codes.DeadlineExceeded)
		}
	}
}
Ejemplo n.º 6
0
func TestLargeMessageSuspension(t *testing.T) {
	server, ct := setUp(t, 0, math.MaxUint32, suspended)
	callHdr := &CallHdr{
		Host:   "localhost",
		Method: "foo.Large",
	}
	// Set a long enough timeout for writing a large message out.
	ctx, _ := context.WithTimeout(context.Background(), time.Second)
	s, err := ct.NewStream(ctx, callHdr)
	if err != nil {
		t.Fatalf("failed to open stream: %v", err)
	}
	// Write should not be done successfully due to flow control.
	err = ct.Write(s, expectedRequestLarge, &Options{Last: true, Delay: false})
	expectedErr := StreamErrorf(codes.DeadlineExceeded, "%v", context.DeadlineExceeded)
	if err == nil || err != expectedErr {
		t.Fatalf("Write got %v, want %v", err, expectedErr)
	}
	ct.Close()
	server.stop()
}
Ejemplo n.º 7
0
func doTimeoutOnSleepingServer(tc testpb.TestServiceClient) {
	ctx, _ := context.WithTimeout(context.Background(), 1*time.Millisecond)
	stream, err := tc.FullDuplexCall(ctx)
	if err != nil {
		if grpc.Code(err) == codes.DeadlineExceeded {
			grpclog.Println("TimeoutOnSleepingServer done")
			return
		}
		grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err)
	}
	pl := newPayload(testpb.PayloadType_COMPRESSABLE, 27182)
	req := &testpb.StreamingOutputCallRequest{
		ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(),
		Payload:      pl,
	}
	if err := stream.Send(req); err != nil {
		grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err)
	}
	if _, err := stream.Recv(); grpc.Code(err) != codes.DeadlineExceeded {
		grpclog.Fatalf("%v.Recv() = _, %v, want error code %d", stream, err, codes.DeadlineExceeded)
	}
	grpclog.Println("TimeoutOnSleepingServer done")
}
Ejemplo n.º 8
0
// operateHeader takes action on the decoded headers. It returns the current
// stream if there are remaining headers on the wire (in the following
// Continuation frame).
func (t *http2Server) operateHeaders(hDec *hpackDecoder, s *Stream, frame headerFrame, endStream bool, handle func(*Stream), wg *sync.WaitGroup) (pendingStream *Stream) {
	defer func() {
		if pendingStream == nil {
			hDec.state = decodeState{}
		}
	}()
	endHeaders, err := hDec.decodeServerHTTP2Headers(frame)
	if s == nil {
		// s has been closed.
		return nil
	}
	if err != nil {
		grpclog.Printf("transport: http2Server.operateHeader found %v", err)
		if se, ok := err.(StreamError); ok {
			t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]})
		}
		return nil
	}
	if endStream {
		// s is just created by the caller. No lock needed.
		s.state = streamReadDone
	}
	if !endHeaders {
		return s
	}
	t.mu.Lock()
	if t.state != reachable {
		t.mu.Unlock()
		return nil
	}
	if uint32(len(t.activeStreams)) >= t.maxStreams {
		t.mu.Unlock()
		t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream})
		return nil
	}
	s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota))
	t.activeStreams[s.id] = s
	t.mu.Unlock()
	s.windowHandler = func(n int) {
		t.updateWindow(s, uint32(n))
	}
	if hDec.state.timeoutSet {
		s.ctx, s.cancel = context.WithTimeout(context.TODO(), hDec.state.timeout)
	} else {
		s.ctx, s.cancel = context.WithCancel(context.TODO())
	}
	// Attach Auth info if there is any.
	if t.authInfo != nil {
		s.ctx = credentials.NewContext(s.ctx, t.authInfo)
	}
	// Cache the current stream to the context so that the server application
	// can find out. Required when the server wants to send some metadata
	// back to the client (unary call only).
	s.ctx = newContextWithStream(s.ctx, s)
	// Attach the received metadata to the context.
	if len(hDec.state.mdata) > 0 {
		s.ctx = metadata.NewContext(s.ctx, hDec.state.mdata)
	}

	s.dec = &recvBufferReader{
		ctx:  s.ctx,
		recv: s.buf,
	}
	s.method = hDec.state.method

	wg.Add(1)
	go func() {
		handle(s)
		wg.Done()
	}()
	return nil
}