func testClientStreaming(t *testing.T, e env) { s, cc := setUp(nil, math.MaxUint32, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) stream, err := tc.StreamingInputCall(context.Background()) if err != nil { t.Fatalf("%v.StreamingInputCall(_) = _, %v, want <nil>", tc, err) } var sum int for _, s := range reqSizes { pl := newPayload(testpb.PayloadType_COMPRESSABLE, int32(s)) req := &testpb.StreamingInputCallRequest{ Payload: pl, } if err := stream.Send(req); err != nil { t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err) } sum += s } reply, err := stream.CloseAndRecv() if err != nil { t.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) } if reply.GetAggregatedPayloadSize() != int32(sum) { t.Fatalf("%v.CloseAndRecv().GetAggregatePayloadSize() = %v; want %v", stream, reply.GetAggregatedPayloadSize(), sum) } }
func testExceedMaxStreamsLimit(t *testing.T, e env) { // Only allows 1 live stream per server transport. s, cc := setUp(nil, 1, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) done := make(chan struct{}) ch := make(chan int) go func() { for { select { case <-time.After(5 * time.Millisecond): ch <- 0 case <-time.After(5 * time.Second): close(done) return } } }() // Loop until a stream creation hangs due to the new max stream setting. for { select { case <-ch: ctx, _ := context.WithTimeout(context.Background(), time.Second) if _, err := tc.StreamingInputCall(ctx); err != nil { if grpc.Code(err) == codes.DeadlineExceeded { return } t.Fatalf("%v.StreamingInputCall(_) = %v, want <nil>", tc, err) } case <-done: t.Fatalf("Client has not received the max stream setting in 5 seconds.") } } }
func testEmptyUnaryWithUserAgent(t *testing.T, e env) { s, cc := setUp(nil, math.MaxUint32, testAppUA, e) // Wait until cc is connected. if ok := cc.WaitForStateChange(time.Second, grpc.Idle); !ok { t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Idle, ok) } if ok := cc.WaitForStateChange(10*time.Second, grpc.Connecting); !ok { t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Connecting, ok) } if cc.State() != grpc.Ready { t.Fatalf("cc.State() = %s, want %s", cc.State(), grpc.Ready) } if ok := cc.WaitForStateChange(time.Second, grpc.Ready); ok { t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want false", grpc.Ready, ok) } tc := testpb.NewTestServiceClient(cc) var header metadata.MD reply, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Header(&header)) if err != nil || !proto.Equal(&testpb.Empty{}, reply) { t.Fatalf("TestService/EmptyCall(_, _) = %v, %v, want %v, <nil>", reply, err, &testpb.Empty{}) } if v, ok := header["ua"]; !ok || v[0] != testAppUA { t.Fatalf("header[\"ua\"] = %q, %t, want %q, true", v, ok, testAppUA) } tearDown(s, cc) if ok := cc.WaitForStateChange(5*time.Second, grpc.Ready); !ok { t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Ready, ok) } if cc.State() != grpc.Shutdown { t.Fatalf("cc.State() = %s, want %s", cc.State(), grpc.Shutdown) } }
func testTimeoutOnDeadServer(t *testing.T, e env) { s, cc := setUp(nil, math.MaxUint32, "", e) tc := testpb.NewTestServiceClient(cc) if ok := cc.WaitForStateChange(time.Second, grpc.Idle); !ok { t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Idle, ok) } if ok := cc.WaitForStateChange(time.Second, grpc.Connecting); !ok { t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Connecting, ok) } if cc.State() != grpc.Ready { t.Fatalf("cc.State() = %s, want %s", cc.State(), grpc.Ready) } if ok := cc.WaitForStateChange(time.Millisecond, grpc.Ready); ok { t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want false", grpc.Ready, ok) } s.Stop() // Set -1 as the timeout to make sure if transportMonitor gets error // notification in time the failure path of the 1st invoke of // ClientConn.wait hits the deadline exceeded error. ctx, _ := context.WithTimeout(context.Background(), -1) if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); grpc.Code(err) != codes.DeadlineExceeded { t.Fatalf("TestService/EmptyCall(%v, _) = _, error %v, want _, error code: %d", ctx, err, codes.DeadlineExceeded) } if ok := cc.WaitForStateChange(time.Second, grpc.Ready); !ok { t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Ready, ok) } state := cc.State() if state != grpc.Connecting && state != grpc.TransientFailure { t.Fatalf("cc.State() = %s, want %s or %s", state, grpc.Connecting, grpc.TransientFailure) } cc.Close() }
func testFailedEmptyUnary(t *testing.T, e env) { s, cc := setUp(nil, math.MaxUint32, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) ctx := metadata.NewContext(context.Background(), testMetadata) if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != grpc.Errorf(codes.DataLoss, "got extra metadata") { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %v", err, grpc.Errorf(codes.DataLoss, "got extra metadata")) } }
func testMetadataStreamingRPC(t *testing.T, e env) { s, cc := setUp(nil, math.MaxUint32, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) ctx := metadata.NewContext(context.Background(), testMetadata) stream, err := tc.FullDuplexCall(ctx) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err) } go func() { headerMD, err := stream.Header() if e.security == "tls" { delete(headerMD, "transport_security_type") } if err != nil || !reflect.DeepEqual(testMetadata, headerMD) { t.Errorf("#1 %v.Header() = %v, %v, want %v, <nil>", stream, headerMD, err, testMetadata) } // test the cached value. headerMD, err = stream.Header() if err != nil || !reflect.DeepEqual(testMetadata, headerMD) { t.Errorf("#2 %v.Header() = %v, %v, want %v, <nil>", stream, headerMD, err, testMetadata) } var index int for index < len(reqSizes) { respParam := []*testpb.ResponseParameters{ { Size: proto.Int32(int32(respSizes[index])), }, } req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseParameters: respParam, Payload: newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index])), } if err := stream.Send(req); err != nil { t.Errorf("%v.Send(%v) = %v, want <nil>", stream, req, err) return } index++ } // Tell the server we're done sending args. stream.CloseSend() }() for { if _, err := stream.Recv(); err != nil { break } } trailerMD := stream.Trailer() if !reflect.DeepEqual(testMetadata, trailerMD) { t.Fatalf("%v.Trailer() = %v, want %v", stream, trailerMD, testMetadata) } }
func testServerStreaming(t *testing.T, e env) { s, cc := setUp(nil, math.MaxUint32, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) respParam := make([]*testpb.ResponseParameters, len(respSizes)) for i, s := range respSizes { respParam[i] = &testpb.ResponseParameters{ Size: proto.Int32(int32(s)), } } req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseParameters: respParam, } stream, err := tc.StreamingOutputCall(context.Background(), req) if err != nil { t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want <nil>", tc, err) } var rpcStatus error var respCnt int var index int for { reply, err := stream.Recv() if err != nil { rpcStatus = err break } pt := reply.GetPayload().GetType() if pt != testpb.PayloadType_COMPRESSABLE { t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE) } size := len(reply.GetPayload().GetBody()) if size != int(respSizes[index]) { t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index]) } index++ respCnt++ } if rpcStatus != io.EOF { t.Fatalf("Failed to finish the server streaming rpc: %v, want <EOF>", rpcStatus) } if respCnt != len(respSizes) { t.Fatalf("Got %d reply, want %d", len(respSizes), respCnt) } }
func testCancel(t *testing.T, e env) { s, cc := setUp(nil, math.MaxUint32, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) argSize := 2718 respSize := 314 req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(respSize)), Payload: newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)), } ctx, cancel := context.WithCancel(context.Background()) time.AfterFunc(1*time.Millisecond, cancel) reply, err := tc.UnaryCall(ctx, req) if grpc.Code(err) != codes.Canceled { t.Fatalf(`TestService/UnaryCall(_, _) = %v, %v; want <nil>, error code: %d`, reply, err, codes.Canceled) } }
func testPingPong(t *testing.T, e env) { s, cc := setUp(nil, math.MaxUint32, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) stream, err := tc.FullDuplexCall(context.Background()) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err) } var index int for index < len(reqSizes) { respParam := []*testpb.ResponseParameters{ { Size: proto.Int32(int32(respSizes[index])), }, } req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseParameters: respParam, Payload: newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index])), } if err := stream.Send(req); err != nil { t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err) } reply, err := stream.Recv() if err != nil { t.Fatalf("%v.Recv() = %v, want <nil>", stream, err) } pt := reply.GetPayload().GetType() if pt != testpb.PayloadType_COMPRESSABLE { t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE) } size := len(reply.GetPayload().GetBody()) if size != int(respSizes[index]) { t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index]) } index++ } if err := stream.CloseSend(); err != nil { t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil) } if _, err := stream.Recv(); err != io.EOF { t.Fatalf("%v failed to complele the ping pong test: %v", stream, err) } }
// TODO(zhaoq): Have a better test coverage of timeout and cancellation mechanism. func testRPCTimeout(t *testing.T, e env) { s, cc := setUp(nil, math.MaxUint32, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) argSize := 2718 respSize := 314 req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(respSize)), Payload: newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)), } for i := -1; i <= 10; i++ { ctx, _ := context.WithTimeout(context.Background(), time.Duration(i)*time.Millisecond) reply, err := tc.UnaryCall(ctx, req) if grpc.Code(err) != codes.DeadlineExceeded { t.Fatalf(`TestService/UnaryCallv(_, _) = %v, %v; want <nil>, error code: %d`, reply, err, codes.DeadlineExceeded) } } }
func testLargeUnary(t *testing.T, e env) { s, cc := setUp(nil, math.MaxUint32, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) argSize := 271828 respSize := 314159 req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(respSize)), Payload: newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)), } reply, err := tc.UnaryCall(context.Background(), req) if err != nil { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, <nil>", err) } pt := reply.GetPayload().GetType() ps := len(reply.GetPayload().GetBody()) if pt != testpb.PayloadType_COMPRESSABLE || ps != respSize { t.Fatalf("Got the reply with type %d len %d; want %d, %d", pt, ps, testpb.PayloadType_COMPRESSABLE, respSize) } }
// This test mimics a user who sends 1000 RPCs concurrently on a faulty transport. // TODO(zhaoq): Refactor to make this clearer and add more cases to test racy // and error-prone paths. func testRetry(t *testing.T, e env) { s, cc := setUp(nil, math.MaxUint32, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) var wg sync.WaitGroup wg.Add(1) go func() { time.Sleep(1 * time.Second) // The server shuts down the network connection to make a // transport error which will be detected by the client side // code. s.TestingCloseConns() wg.Done() }() // All these RPCs should succeed eventually. for i := 0; i < 1000; i++ { time.Sleep(2 * time.Millisecond) wg.Add(1) go performOneRPC(t, tc, &wg) } wg.Wait() }
func testFailedServerStreaming(t *testing.T, e env) { s, cc := setUp(nil, math.MaxUint32, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) respParam := make([]*testpb.ResponseParameters, len(respSizes)) for i, s := range respSizes { respParam[i] = &testpb.ResponseParameters{ Size: proto.Int32(int32(s)), } } req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseParameters: respParam, } ctx := metadata.NewContext(context.Background(), testMetadata) stream, err := tc.StreamingOutputCall(ctx, req) if err != nil { t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want <nil>", tc, err) } if _, err := stream.Recv(); err != grpc.Errorf(codes.DataLoss, "got extra metadata") { t.Fatalf("%v.Recv() = _, %v, want _, %v", stream, err, grpc.Errorf(codes.DataLoss, "got extra metadata")) } }
func TestReconnectTimeout(t *testing.T) { lis, err := net.Listen("tcp", ":0") if err != nil { t.Fatalf("Failed to listen: %v", err) } _, port, err := net.SplitHostPort(lis.Addr().String()) if err != nil { t.Fatalf("Failed to parse listener address: %v", err) } addr := "localhost:" + port conn, err := grpc.Dial(addr, grpc.WithTimeout(5*time.Second), grpc.WithBlock(), grpc.WithInsecure()) if err != nil { t.Fatalf("Failed to dial to the server %q: %v", addr, err) } // Close unaccepted connection (i.e., conn). lis.Close() tc := testpb.NewTestServiceClient(conn) waitC := make(chan struct{}) go func() { defer close(waitC) argSize := 271828 respSize := 314159 req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(respSize)), Payload: newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)), } if _, err := tc.UnaryCall(context.Background(), req); err == nil { t.Fatalf("TestService/UnaryCall(_, _) = _, <nil>, want _, non-nil") } }() // Block untill reconnect times out. <-waitC if err := conn.Close(); err != grpc.ErrClientConnClosing { t.Fatalf("%v.Close() = %v, want %v", conn, err, grpc.ErrClientConnClosing) } }
func testMetadataUnaryRPC(t *testing.T, e env) { s, cc := setUp(nil, math.MaxUint32, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) argSize := 2718 respSize := 314 req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(respSize)), Payload: newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)), } var header, trailer metadata.MD ctx := metadata.NewContext(context.Background(), testMetadata) _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.Trailer(&trailer)) if err != nil { t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <nil>", ctx, err) } if !reflect.DeepEqual(testMetadata, header) { t.Fatalf("Received header metadata %v, want %v", header, testMetadata) } if !reflect.DeepEqual(testMetadata, trailer) { t.Fatalf("Received trailer metadata %v, want %v", trailer, testMetadata) } }