// DoCancelAfterFirstResponse cancels the RPC after receiving the first message from the server. func DoCancelAfterFirstResponse(tc testpb.TestServiceClient) { ctx, cancel := context.WithCancel(context.Background()) stream, err := tc.FullDuplexCall(ctx) if err != nil { grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) } respParam := []*testpb.ResponseParameters{ { Size: proto.Int32(31415), }, } pl := clientNewPayload(testpb.PayloadType_COMPRESSABLE, 27182) req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseParameters: respParam, Payload: pl, } if err := stream.Send(req); err != nil { grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err) } if _, err := stream.Recv(); err != nil { grpclog.Fatalf("%v.Recv() = %v", stream, err) } cancel() if _, err := stream.Recv(); grpc.Code(err) != codes.Canceled { grpclog.Fatalf("%v compleled with error code %d, want %d", stream, grpc.Code(err), codes.Canceled) } grpclog.Println("CancelAfterFirstResponse done") }
// DoCancelAfterBegin cancels the RPC after metadata has been sent but before payloads are sent. func DoCancelAfterBegin(tc testpb.TestServiceClient) { ctx, cancel := context.WithCancel(metadata.NewContext(context.Background(), testMetadata)) stream, err := tc.StreamingInputCall(ctx) if err != nil { grpclog.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err) } cancel() _, err = stream.CloseAndRecv() if grpc.Code(err) != codes.Canceled { grpclog.Fatalf("%v.CloseAndRecv() got error code %d, want %d", stream, grpc.Code(err), codes.Canceled) } grpclog.Println("CancelAfterBegin done") }
func testCancel(t *testing.T, e env) { s, addr := serverSetUp(t, nil, math.MaxUint32, nil, nil, e) cc := clientSetUp(t, addr, nil, nil, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) argSize := 2718 respSize := 314 payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)) if err != nil { t.Fatal(err) } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(respSize)), Payload: payload, } ctx, cancel := context.WithCancel(context.Background()) time.AfterFunc(1*time.Millisecond, cancel) reply, err := tc.UnaryCall(ctx, req) if grpc.Code(err) != codes.Canceled { t.Fatalf(`TestService/UnaryCall(_, _) = %v, %v; want <nil>, error code: %d`, reply, err, codes.Canceled) } }
// TODO(zhaoq): Have a better test coverage of timeout and cancellation mechanism. func testRPCTimeout(t *testing.T, e env) { s, addr := serverSetUp(t, nil, math.MaxUint32, nil, nil, e) cc := clientSetUp(t, addr, nil, nil, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) argSize := 2718 respSize := 314 payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)) if err != nil { t.Fatal(err) } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(respSize)), Payload: payload, } for i := -1; i <= 10; i++ { ctx, _ := context.WithTimeout(context.Background(), time.Duration(i)*time.Millisecond) reply, err := tc.UnaryCall(ctx, req) if grpc.Code(err) != codes.DeadlineExceeded { t.Fatalf(`TestService/UnaryCallv(_, _) = %v, %v; want <nil>, error code: %d`, reply, err, codes.DeadlineExceeded) } } }
func testCompressServerHasNoSupport(t *testing.T, e env) { s, addr := serverSetUp(t, nil, math.MaxUint32, nil, nil, e) cc := clientSetUp(t, addr, grpc.NewGZIPCompressor, nil, "", e) // Unary call tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) argSize := 271828 respSize := 314159 payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)) if err != nil { t.Fatal(err) } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(respSize)), Payload: payload, } if _, err := tc.UnaryCall(context.Background(), req); err == nil || grpc.Code(err) != codes.InvalidArgument { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code %d", err, codes.InvalidArgument) } // Streaming RPC stream, err := tc.FullDuplexCall(context.Background()) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err) } respParam := []*testpb.ResponseParameters{ { Size: proto.Int32(31415), }, } payload, err = newPayload(testpb.PayloadType_COMPRESSABLE, int32(31415)) if err != nil { t.Fatal(err) } sreq := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseParameters: respParam, Payload: payload, } if err := stream.Send(sreq); err != nil { t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err) } if _, err := stream.Recv(); err == nil || grpc.Code(err) != codes.InvalidArgument { t.Fatalf("%v.Recv() = %v, want error code %d", stream, err, codes.InvalidArgument) } }
func testTimeoutOnDeadServer(t *testing.T, e env) { s, cc := setUp(math.MaxUint32, e) tc := testpb.NewTestServiceClient(cc) s.Stop() // Set -1 as the timeout to make sure if transportMonitor gets error // notification in time the failure path of the 1st invoke of // ClientConn.wait hits the deadline exceeded error. ctx, _ := context.WithTimeout(context.Background(), -1) if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); grpc.Code(err) != codes.DeadlineExceeded { t.Fatalf("TestService/EmptyCall(%v, _) = _, error %v, want _, error code: %d", ctx, err, codes.DeadlineExceeded) } cc.Close() }
// DoTimeoutOnSleepingServer performs an RPC on a sleep server which causes RPC timeout. func DoTimeoutOnSleepingServer(tc testpb.TestServiceClient) { ctx, _ := context.WithTimeout(context.Background(), 1*time.Millisecond) stream, err := tc.FullDuplexCall(ctx) if err != nil { if grpc.Code(err) == codes.DeadlineExceeded { grpclog.Println("TimeoutOnSleepingServer done") return } grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) } pl := clientNewPayload(testpb.PayloadType_COMPRESSABLE, 27182) req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), Payload: pl, } if err := stream.Send(req); err != nil { grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err) } if _, err := stream.Recv(); grpc.Code(err) != codes.DeadlineExceeded { grpclog.Fatalf("%v.Recv() = _, %v, want error code %d", stream, err, codes.DeadlineExceeded) } grpclog.Println("TimeoutOnSleepingServer done") }
func testExceedMaxStreamsLimit(t *testing.T, e env) { // Only allows 1 live stream per server transport. s, cc := setUp(1, e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) var err error for { time.Sleep(2 * time.Millisecond) _, err = tc.StreamingInputCall(context.Background()) // Loop until the settings of max concurrent streams is // received by the client. if err != nil { break } } if grpc.Code(err) != codes.Unavailable { t.Fatalf("got %v, want error code %d", err, codes.Unavailable) } }
// TODO(zhaoq): Have a better test coverage of timeout and cancellation mechanism. func testRPCTimeout(t *testing.T, e env) { s, cc := setUp(math.MaxUint32, e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) argSize := 2718 respSize := 314 req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(respSize)), Payload: newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)), } // Performs 100 RPCs with various timeout values so that // the RPCs could timeout on different stages of their lifetime. This // is the best-effort to cover various cases when an rpc gets cancelled. for i := 1; i <= 100; i++ { ctx, _ := context.WithTimeout(context.Background(), time.Duration(i)*time.Microsecond) reply, err := tc.UnaryCall(ctx, req) if grpc.Code(err) != codes.DeadlineExceeded { t.Fatalf(`TestService/UnaryCallv(_, _) = %v, %v; want <nil>, error code: %d`, reply, err, codes.DeadlineExceeded) } } }
func testCancelNoIO(t *testing.T, e env) { // Only allows 1 live stream per server transport. s, addr := serverSetUp(t, nil, 1, nil, nil, e) cc := clientSetUp(t, addr, nil, nil, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) ctx, cancel := context.WithCancel(context.Background()) _, err := tc.StreamingInputCall(ctx) if err != nil { t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, <nil>", tc, err) } // Loop until receiving the new max stream setting from the server. for { ctx, _ := context.WithTimeout(context.Background(), time.Second) _, err := tc.StreamingInputCall(ctx) if err == nil { time.Sleep(time.Second) continue } if grpc.Code(err) == codes.DeadlineExceeded { break } t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, %d", tc, err, codes.DeadlineExceeded) } // If there are any RPCs slipping before the client receives the max streams setting, // let them be expired. time.Sleep(2 * time.Second) ch := make(chan struct{}) go func() { defer close(ch) // This should be blocked until the 1st is canceled. ctx, _ := context.WithTimeout(context.Background(), 2*time.Second) if _, err := tc.StreamingInputCall(ctx); err != nil { t.Errorf("%v.StreamingInputCall(_) = _, %v, want _, <nil>", tc, err) } }() cancel() <-ch }
func testTimeoutOnDeadServer(t *testing.T, e env) { s, addr := serverSetUp(t, nil, math.MaxUint32, nil, nil, e) cc := clientSetUp(t, addr, nil, nil, "", e) tc := testpb.NewTestServiceClient(cc) ctx, _ := context.WithTimeout(context.Background(), time.Second) if _, err := cc.WaitForStateChange(ctx, grpc.Idle); err != nil { t.Fatalf("cc.WaitForStateChange(_, %s) = _, %v, want _, <nil>", grpc.Idle, err) } ctx, _ = context.WithTimeout(context.Background(), time.Second) if _, err := cc.WaitForStateChange(ctx, grpc.Connecting); err != nil { t.Fatalf("cc.WaitForStateChange(_, %s) = _, %v, want _, <nil>", grpc.Connecting, err) } if state, err := cc.State(); err != nil || state != grpc.Ready { t.Fatalf("cc.State() = %s, %v, want %s, <nil>", state, err, grpc.Ready) } ctx, _ = context.WithTimeout(context.Background(), time.Second) if _, err := cc.WaitForStateChange(ctx, grpc.Ready); err != context.DeadlineExceeded { t.Fatalf("cc.WaitForStateChange(_, %s) = _, %v, want _, %v", grpc.Ready, err, context.DeadlineExceeded) } s.Stop() // Set -1 as the timeout to make sure if transportMonitor gets error // notification in time the failure path of the 1st invoke of // ClientConn.wait hits the deadline exceeded error. ctx, _ = context.WithTimeout(context.Background(), -1) if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); grpc.Code(err) != codes.DeadlineExceeded { t.Fatalf("TestService/EmptyCall(%v, _) = _, error %v, want _, error code: %d", ctx, err, codes.DeadlineExceeded) } ctx, _ = context.WithTimeout(context.Background(), time.Second) if _, err := cc.WaitForStateChange(ctx, grpc.Ready); err != nil { t.Fatalf("cc.WaitForStateChange(_, %s) = _, %v, want _, <nil>", grpc.Ready, err) } if state, err := cc.State(); err != nil || (state != grpc.Connecting && state != grpc.TransientFailure) { t.Fatalf("cc.State() = %s, %v, want %s or %s, <nil>", state, err, grpc.Connecting, grpc.TransientFailure) } cc.Close() }
func testExceedMaxStreamsLimit(t *testing.T, e env) { // Only allows 1 live stream per server transport. s, addr := serverSetUp(t, nil, 1, nil, nil, e) cc := clientSetUp(t, addr, nil, nil, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) _, err := tc.StreamingInputCall(context.Background()) if err != nil { t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, <nil>", tc, err) } // Loop until receiving the new max stream setting from the server. for { ctx, _ := context.WithTimeout(context.Background(), time.Second) _, err := tc.StreamingInputCall(ctx) if err == nil { time.Sleep(time.Second) continue } if grpc.Code(err) == codes.DeadlineExceeded { break } t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, %d", tc, err, codes.DeadlineExceeded) } }
func isRPCError(err error) bool { return grpc.Code(err) != codes.Unknown }