func doCancelAfterFirstResponse(tc testpb.TestServiceClient) { ctx, cancel := context.WithCancel(context.Background()) stream, err := tc.FullDuplexCall(ctx) if err != nil { grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) } respParam := []*testpb.ResponseParameters{ { Size: proto.Int32(31415), }, } pl := newPayload(testpb.PayloadType_COMPRESSABLE, 27182) req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseParameters: respParam, Payload: pl, } if err := stream.Send(req); err != nil { grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err) } if _, err := stream.Recv(); err != nil { grpclog.Fatalf("%v.Recv() = %v", stream, err) } cancel() if _, err := stream.Recv(); grpc.Code(err) != codes.Canceled { grpclog.Fatalf("%v compleled with error code %d, want %d", stream, grpc.Code(err), codes.Canceled) } grpclog.Println("CancelAfterFirstResponse done") }
func doCancelAfterBegin(tc testpb.TestServiceClient) { ctx, cancel := context.WithCancel(metadata.NewContext(context.Background(), testMetadata)) stream, err := tc.StreamingInputCall(ctx) if err != nil { grpclog.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err) } cancel() _, err = stream.CloseAndRecv() if grpc.Code(err) != codes.Canceled { grpclog.Fatalf("%v.CloseAndRecv() got error code %d, want %d", stream, grpc.Code(err), codes.Canceled) } grpclog.Println("CancelAfterBegin done") }
// TODO(zhaoq): Have a better test coverage of timeout and cancellation mechanism. func testRPCTimeout(t *testing.T, e env) { s, cc := setUp(t, nil, math.MaxUint32, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) argSize := 2718 respSize := 314 payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)) if err != nil { t.Fatal(err) } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(respSize)), Payload: payload, } for i := -1; i <= 10; i++ { ctx, _ := context.WithTimeout(context.Background(), time.Duration(i)*time.Millisecond) reply, err := tc.UnaryCall(ctx, req) if grpc.Code(err) != codes.DeadlineExceeded { t.Fatalf(`TestService/UnaryCallv(_, _) = %v, %v; want <nil>, error code: %d`, reply, err, codes.DeadlineExceeded) } } }
func testTimeoutOnDeadServer(t *testing.T, e env) { s, cc := setUp(t, nil, math.MaxUint32, "", e) tc := testpb.NewTestServiceClient(cc) if ok := cc.WaitForStateChange(time.Second, grpc.Idle); !ok { t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Idle, ok) } if ok := cc.WaitForStateChange(time.Second, grpc.Connecting); !ok { t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Connecting, ok) } if cc.State() != grpc.Ready { t.Fatalf("cc.State() = %s, want %s", cc.State(), grpc.Ready) } if ok := cc.WaitForStateChange(time.Millisecond, grpc.Ready); ok { t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want false", grpc.Ready, ok) } s.Stop() // Set -1 as the timeout to make sure if transportMonitor gets error // notification in time the failure path of the 1st invoke of // ClientConn.wait hits the deadline exceeded error. ctx, _ := context.WithTimeout(context.Background(), -1) if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); grpc.Code(err) != codes.DeadlineExceeded { t.Fatalf("TestService/EmptyCall(%v, _) = _, error %v, want _, error code: %d", ctx, err, codes.DeadlineExceeded) } if ok := cc.WaitForStateChange(time.Second, grpc.Ready); !ok { t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Ready, ok) } state := cc.State() if state != grpc.Connecting && state != grpc.TransientFailure { t.Fatalf("cc.State() = %s, want %s or %s", state, grpc.Connecting, grpc.TransientFailure) } cc.Close() }
func doTimeoutOnSleepingServer(tc testpb.TestServiceClient) { ctx, _ := context.WithTimeout(context.Background(), 1*time.Millisecond) stream, err := tc.FullDuplexCall(ctx) if err != nil { if grpc.Code(err) == codes.DeadlineExceeded { grpclog.Println("TimeoutOnSleepingServer done") return } grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) } pl := newPayload(testpb.PayloadType_COMPRESSABLE, 27182) req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), Payload: pl, } if err := stream.Send(req); err != nil { grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err) } if _, err := stream.Recv(); grpc.Code(err) != codes.DeadlineExceeded { grpclog.Fatalf("%v.Recv() = _, %v, want error code %d", stream, err, codes.DeadlineExceeded) } grpclog.Println("TimeoutOnSleepingServer done") }
func testCancelNoIO(t *testing.T, e env) { // Only allows 1 live stream per server transport. s, cc := setUp(t, nil, 1, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) ctx, cancel := context.WithCancel(context.Background()) _, err := tc.StreamingInputCall(ctx) if err != nil { t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, <nil>", tc, err) } // Loop until receiving the new max stream setting from the server. for { ctx, _ := context.WithTimeout(context.Background(), time.Second) _, err := tc.StreamingInputCall(ctx) if err == nil { time.Sleep(time.Second) continue } if grpc.Code(err) == codes.DeadlineExceeded { break } t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, %d", tc, err, codes.DeadlineExceeded) } // If there are any RPCs slipping before the client receives the max streams setting, // let them be expired. time.Sleep(2 * time.Second) ch := make(chan struct{}) go func() { defer close(ch) // This should be blocked until the 1st is canceled. ctx, _ := context.WithTimeout(context.Background(), 2*time.Second) if _, err := tc.StreamingInputCall(ctx); err != nil { t.Errorf("%v.StreamingInputCall(_) = _, %v, want _, <nil>", tc, err) } }() cancel() <-ch }
func testExceedMaxStreamsLimit(t *testing.T, e env) { // Only allows 1 live stream per server transport. s, cc := setUp(t, nil, 1, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) _, err := tc.StreamingInputCall(context.Background()) if err != nil { t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, <nil>", tc, err) } // Loop until receiving the new max stream setting from the server. for { ctx, _ := context.WithTimeout(context.Background(), time.Second) _, err := tc.StreamingInputCall(ctx) if err == nil { time.Sleep(time.Second) continue } if grpc.Code(err) == codes.DeadlineExceeded { break } t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, %d", tc, err, codes.DeadlineExceeded) } }
func testCancel(t *testing.T, e env) { s, cc := setUp(t, nil, math.MaxUint32, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) argSize := 2718 respSize := 314 payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)) if err != nil { t.Fatal(err) } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(respSize)), Payload: payload, } ctx, cancel := context.WithCancel(context.Background()) time.AfterFunc(1*time.Millisecond, cancel) reply, err := tc.UnaryCall(ctx, req) if grpc.Code(err) != codes.Canceled { t.Fatalf(`TestService/UnaryCall(_, _) = %v, %v; want <nil>, error code: %d`, reply, err, codes.Canceled) } }