func testClientStreaming(t *testing.T, e env) { s, cc := setUp(math.MaxUint32, e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) stream, err := tc.StreamingInputCall(context.Background()) if err != nil { t.Fatalf("%v.StreamingInputCall(_) = _, %v, want <nil>", tc, err) } var sum int for _, s := range reqSizes { pl := newPayload(testpb.PayloadType_COMPRESSABLE, int32(s)) req := &testpb.StreamingInputCallRequest{ Payload: pl, } if err := stream.Send(req); err != nil { t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err) } sum += s } reply, err := stream.CloseAndRecv() if err != nil { t.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) } if reply.GetAggregatedPayloadSize() != int32(sum) { t.Fatalf("%v.CloseAndRecv().GetAggregatePayloadSize() = %v; want %v", stream, reply.GetAggregatedPayloadSize(), sum) } }
func testFailedEmptyUnary(t *testing.T, e env) { s, cc := setUp(math.MaxUint32, e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) ctx := metadata.NewContext(context.Background(), testMetadata) if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != grpc.Errorf(codes.DataLoss, "got extra metadata") { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %v", err, grpc.Errorf(codes.DataLoss, "got extra metadata")) } }
func testEmptyUnary(t *testing.T, e env) { s, cc := setUp(math.MaxUint32, e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) reply, err := tc.EmptyCall(context.Background(), &testpb.Empty{}) if err != nil || !proto.Equal(&testpb.Empty{}, reply) { t.Fatalf("TestService/EmptyCall(_, _) = %v, %v, want %v, <nil>", reply, err, &testpb.Empty{}) } }
func testTimeoutOnDeadServer(t *testing.T, e env) { s, cc := setUp(math.MaxUint32, e) tc := testpb.NewTestServiceClient(cc) s.Stop() // Set -1 as the timeout to make sure if transportMonitor gets error // notification in time the failure path of the 1st invoke of // ClientConn.wait hits the deadline exceeded error. ctx, _ := context.WithTimeout(context.Background(), -1) if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); grpc.Code(err) != codes.DeadlineExceeded { t.Fatalf("TestService/EmptyCall(%v, _) = _, error %v, want _, error code: %d", ctx, err, codes.DeadlineExceeded) } cc.Close() }
func testMetadataStreamingRPC(t *testing.T, e env) { s, cc := setUp(math.MaxUint32, e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) ctx := metadata.NewContext(context.Background(), testMetadata) stream, err := tc.FullDuplexCall(ctx) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err) } go func() { headerMD, err := stream.Header() if err != nil || !reflect.DeepEqual(testMetadata, headerMD) { t.Errorf("#1 %v.Header() = %v, %v, want %v, <nil>", stream, headerMD, err, testMetadata) } // test the cached value. headerMD, err = stream.Header() if err != nil || !reflect.DeepEqual(testMetadata, headerMD) { t.Errorf("#2 %v.Header() = %v, %v, want %v, <nil>", stream, headerMD, err, testMetadata) } var index int for index < len(reqSizes) { respParam := []*testpb.ResponseParameters{ { Size: proto.Int32(int32(respSizes[index])), }, } req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseParameters: respParam, Payload: newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index])), } if err := stream.Send(req); err != nil { t.Errorf("%v.Send(%v) = %v, want <nil>", stream, req, err) return } index++ } // Tell the server we're done sending args. stream.CloseSend() }() for { if _, err := stream.Recv(); err != nil { break } } trailerMD := stream.Trailer() if !reflect.DeepEqual(testMetadata, trailerMD) { t.Fatalf("%v.Trailer() = %v, want %v", stream, trailerMD, testMetadata) } }
func testServerStreaming(t *testing.T, e env) { s, cc := setUp(math.MaxUint32, e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) respParam := make([]*testpb.ResponseParameters, len(respSizes)) for i, s := range respSizes { respParam[i] = &testpb.ResponseParameters{ Size: proto.Int32(int32(s)), } } req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseParameters: respParam, } stream, err := tc.StreamingOutputCall(context.Background(), req) if err != nil { t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want <nil>", tc, err) } var rpcStatus error var respCnt int var index int for { reply, err := stream.Recv() if err != nil { rpcStatus = err break } pt := reply.GetPayload().GetType() if pt != testpb.PayloadType_COMPRESSABLE { t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE) } size := len(reply.GetPayload().GetBody()) if size != int(respSizes[index]) { t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index]) } index++ respCnt++ } if rpcStatus != io.EOF { t.Fatalf("Failed to finish the server streaming rpc: %v, want <EOF>", err) } if respCnt != len(respSizes) { t.Fatalf("Got %d reply, want %d", len(respSizes), respCnt) } }
func testCancel(t *testing.T, e env) { s, cc := setUp(math.MaxUint32, e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) argSize := 2718 respSize := 314 req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(respSize)), Payload: newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)), } ctx, cancel := context.WithCancel(context.Background()) time.AfterFunc(1*time.Millisecond, cancel) reply, err := tc.UnaryCall(ctx, req) if grpc.Code(err) != codes.Canceled { t.Fatalf(`TestService/UnaryCall(_, _) = %v, %v; want <nil>, error code: %d`, reply, err, codes.Canceled) } }
func testPingPong(t *testing.T, e env) { s, cc := setUp(math.MaxUint32, e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) stream, err := tc.FullDuplexCall(context.Background()) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err) } var index int for index < len(reqSizes) { respParam := []*testpb.ResponseParameters{ { Size: proto.Int32(int32(respSizes[index])), }, } req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseParameters: respParam, Payload: newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index])), } if err := stream.Send(req); err != nil { t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err) } reply, err := stream.Recv() if err != nil { t.Fatalf("%v.Recv() = %v, want <nil>", stream, err) } pt := reply.GetPayload().GetType() if pt != testpb.PayloadType_COMPRESSABLE { t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE) } size := len(reply.GetPayload().GetBody()) if size != int(respSizes[index]) { t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index]) } index++ } if err := stream.CloseSend(); err != nil { t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil) } if _, err := stream.Recv(); err != io.EOF { t.Fatalf("%v failed to complele the ping pong test: %v", stream, err) } }
func testExceedMaxStreamsLimit(t *testing.T, e env) { // Only allows 1 live stream per server transport. s, cc := setUp(1, e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) var err error for { time.Sleep(2 * time.Millisecond) _, err = tc.StreamingInputCall(context.Background()) // Loop until the settings of max concurrent streams is // received by the client. if err != nil { break } } if grpc.Code(err) != codes.Unavailable { t.Fatalf("got %v, want error code %d", err, codes.Unavailable) } }
func testLargeUnary(t *testing.T, e env) { s, cc := setUp(math.MaxUint32, e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) argSize := 271828 respSize := 314159 req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(respSize)), Payload: newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)), } reply, err := tc.UnaryCall(context.Background(), req) if err != nil { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, <nil>", err) } pt := reply.GetPayload().GetType() ps := len(reply.GetPayload().GetBody()) if pt != testpb.PayloadType_COMPRESSABLE || ps != respSize { t.Fatalf("Got the reply with type %d len %d; want %d, %d", pt, ps, testpb.PayloadType_COMPRESSABLE, respSize) } }
// TODO(zhaoq): Have a better test coverage of timeout and cancellation mechanism. func testRPCTimeout(t *testing.T, e env) { s, cc := setUp(math.MaxUint32, e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) argSize := 2718 respSize := 314 req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(respSize)), Payload: newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)), } // Performs 100 RPCs with various timeout values so that // the RPCs could timeout on different stages of their lifetime. This // is the best-effort to cover various cases when an rpc gets cancelled. for i := 1; i <= 100; i++ { ctx, _ := context.WithTimeout(context.Background(), time.Duration(i)*time.Microsecond) reply, err := tc.UnaryCall(ctx, req) if grpc.Code(err) != codes.DeadlineExceeded { t.Fatalf(`TestService/UnaryCallv(_, _) = %v, %v; want <nil>, error code: %d`, reply, err, codes.DeadlineExceeded) } } }
// This test mimics a user who sends 1000 RPCs concurrently on a faulty transport. // TODO(zhaoq): Refactor to make this clearer and add more cases to test racy // and error-prone paths. func testRetry(t *testing.T, e env) { s, cc := setUp(math.MaxUint32, e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) var wg sync.WaitGroup wg.Add(1) go func() { time.Sleep(1 * time.Second) // The server shuts down the network connection to make a // transport error which will be detected by the client side // code. s.TestingCloseConns() wg.Done() }() // All these RPCs should succeed eventually. for i := 0; i < 1000; i++ { time.Sleep(2 * time.Millisecond) wg.Add(1) go performOneRPC(t, tc, &wg) } wg.Wait() }
func testFailedServerStreaming(t *testing.T, e env) { s, cc := setUp(math.MaxUint32, e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) respParam := make([]*testpb.ResponseParameters, len(respSizes)) for i, s := range respSizes { respParam[i] = &testpb.ResponseParameters{ Size: proto.Int32(int32(s)), } } req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseParameters: respParam, } ctx := metadata.NewContext(context.Background(), testMetadata) stream, err := tc.StreamingOutputCall(ctx, req) if err != nil { t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want <nil>", tc, err) } if _, err := stream.Recv(); err != grpc.Errorf(codes.DataLoss, "got extra metadata") { t.Fatalf("%v.Recv() = _, %v, want _, %v", stream, err, grpc.Errorf(codes.DataLoss, "got extra metadata")) } }
func TestReconnectTimeout(t *testing.T) { lis, err := net.Listen("tcp", ":0") if err != nil { t.Fatalf("Failed to listen: %v", err) } _, port, err := net.SplitHostPort(lis.Addr().String()) if err != nil { t.Fatalf("Failed to parse listener address: %v", err) } addr := "localhost:" + port conn, err := grpc.Dial(addr, grpc.WithTimeout(5*time.Second)) if err != nil { t.Fatalf("Failed to dial to the server %q: %v", addr, err) } // Close unaccepted connection (i.e., conn). lis.Close() tc := testpb.NewTestServiceClient(conn) waitC := make(chan struct{}) go func() { defer close(waitC) argSize := 271828 respSize := 314159 req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(respSize)), Payload: newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)), } if _, err := tc.UnaryCall(context.Background(), req); err == nil { t.Fatalf("TestService/UnaryCall(_, _) = _, <nil>, want _, non-nil") } }() // Block untill reconnect times out. <-waitC if err := conn.Close(); err != grpc.ErrClientConnClosing { t.Fatalf("%v.Close() = %v, want %v", conn, err, grpc.ErrClientConnClosing) } }
func testMetadataUnaryRPC(t *testing.T, e env) { s, cc := setUp(math.MaxUint32, e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) argSize := 2718 respSize := 314 req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(respSize)), Payload: newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)), } var header, trailer metadata.MD ctx := metadata.NewContext(context.Background(), testMetadata) _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.Trailer(&trailer)) if err != nil { t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <nil>", ctx, err) } if !reflect.DeepEqual(testMetadata, header) { t.Fatalf("Received header metadata %v, want %v", header, testMetadata) } if !reflect.DeepEqual(testMetadata, trailer) { t.Fatalf("Received trailer metadata %v, want %v", trailer, testMetadata) } }