func TestClientWithMisbehavedServer(t *testing.T) { server, ct := setUp(t, 0, math.MaxUint32, misbehaved) callHdr := &CallHdr{ Host: "localhost", Method: "foo", } conn, ok := ct.(*http2Client) if !ok { t.Fatalf("Failed to convert %v to *http2Client", ct) } // Test the logic for the violation of stream flow control window size restriction. s, err := ct.NewStream(context.Background(), callHdr) if err != nil { t.Fatalf("Failed to open stream: %v", err) } if err := ct.Write(s, expectedRequest, &Options{Last: true, Delay: false}); err != nil { t.Fatalf("Failed to write: %v", err) } // Read without window update. for { p := make([]byte, http2MaxFrameLen) if _, err = s.dec.Read(p); err != nil { break } } if s.fc.pendingData != initialWindowSize || s.fc.pendingUpdate != 0 || conn.fc.pendingData != initialWindowSize || conn.fc.pendingUpdate != 0 { t.Fatalf("Client mistakenly updates inbound flow control params: got %d, %d, %d, %d; want %d, %d, %d, %d", s.fc.pendingData, s.fc.pendingUpdate, conn.fc.pendingData, conn.fc.pendingUpdate, initialWindowSize, 0, initialWindowSize, 0) } if err != io.EOF || s.statusCode != codes.Internal { t.Fatalf("Got err %v and the status code %d, want <EOF> and the code %d", err, s.statusCode, codes.Internal) } conn.CloseStream(s, err) if s.fc.pendingData != 0 || s.fc.pendingUpdate != 0 || conn.fc.pendingData != 0 || conn.fc.pendingUpdate != initialWindowSize { t.Fatalf("Client mistakenly resets inbound flow control params: got %d, %d, %d, %d; want 0, 0, 0, %d", s.fc.pendingData, s.fc.pendingUpdate, conn.fc.pendingData, conn.fc.pendingUpdate, initialWindowSize) } // Test the logic for the violation of the connection flow control window size restriction. // // Generate enough streams to drain the connection window. callHdr = &CallHdr{ Host: "localhost", Method: "foo.MaxFrame", } for i := 0; i < int(initialConnWindowSize/initialWindowSize+10); i++ { s, err := ct.NewStream(context.Background(), callHdr) if err != nil { break } if err := ct.Write(s, expectedRequest, &Options{Last: true, Delay: false}); err != nil { break } } // http2Client.errChan is closed due to connection flow control window size violation. <-conn.Error() ct.Close() server.stop() }
func runStream(b *testing.B, maxConcurrentCalls int) { s := stats.AddStats(b, 38) b.StopTimer() target, stopper := StartServer("localhost:0") defer stopper() conn := NewClientConn(target) tc := testpb.NewTestServiceClient(conn) // Warm up connection. stream, err := tc.StreamingCall(context.Background()) if err != nil { b.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) } for i := 0; i < 10; i++ { streamCaller(tc, stream) } ch := make(chan int, maxConcurrentCalls*4) var ( mu sync.Mutex wg sync.WaitGroup ) wg.Add(maxConcurrentCalls) // Distribute the b.N calls over maxConcurrentCalls workers. for i := 0; i < maxConcurrentCalls; i++ { go func() { stream, err := tc.StreamingCall(context.Background()) if err != nil { b.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) } for range ch { start := time.Now() streamCaller(tc, stream) elapse := time.Since(start) mu.Lock() s.Add(elapse) mu.Unlock() } wg.Done() }() } b.StartTimer() for i := 0; i < b.N; i++ { ch <- i } b.StopTimer() close(ch) wg.Wait() conn.Close() }
func performOneRPC(ct ClientTransport) { callHdr := &CallHdr{ Host: "localhost", Method: "foo.Small", } s, err := ct.NewStream(context.Background(), callHdr) if err != nil { return } opts := Options{ Last: true, Delay: false, } if err := ct.Write(s, expectedRequest, &opts); err == nil { time.Sleep(5 * time.Millisecond) // The following s.Recv()'s could error out because the // underlying transport is gone. // // Read response p := make([]byte, len(expectedResponse)) io.ReadFull(s, p) // Read io.EOF io.ReadFull(s, p) } }
func testClientStreaming(t *testing.T, e env) { s, cc := setUp(t, nil, math.MaxUint32, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) stream, err := tc.StreamingInputCall(context.Background()) if err != nil { t.Fatalf("%v.StreamingInputCall(_) = _, %v, want <nil>", tc, err) } var sum int for _, s := range reqSizes { payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(s)) if err != nil { t.Fatal(err) } req := &testpb.StreamingInputCallRequest{ Payload: payload, } if err := stream.Send(req); err != nil { t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err) } sum += s } reply, err := stream.CloseAndRecv() if err != nil { t.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) } if reply.GetAggregatedPayloadSize() != int32(sum) { t.Fatalf("%v.CloseAndRecv().GetAggregatePayloadSize() = %v; want %v", stream, reply.GetAggregatedPayloadSize(), sum) } }
func testTimeoutOnDeadServer(t *testing.T, e env) { s, cc := setUp(t, nil, math.MaxUint32, "", e) tc := testpb.NewTestServiceClient(cc) if ok := cc.WaitForStateChange(time.Second, grpc.Idle); !ok { t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Idle, ok) } if ok := cc.WaitForStateChange(time.Second, grpc.Connecting); !ok { t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Connecting, ok) } if cc.State() != grpc.Ready { t.Fatalf("cc.State() = %s, want %s", cc.State(), grpc.Ready) } if ok := cc.WaitForStateChange(time.Millisecond, grpc.Ready); ok { t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want false", grpc.Ready, ok) } s.Stop() // Set -1 as the timeout to make sure if transportMonitor gets error // notification in time the failure path of the 1st invoke of // ClientConn.wait hits the deadline exceeded error. ctx, _ := context.WithTimeout(context.Background(), -1) if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); grpc.Code(err) != codes.DeadlineExceeded { t.Fatalf("TestService/EmptyCall(%v, _) = _, error %v, want _, error code: %d", ctx, err, codes.DeadlineExceeded) } if ok := cc.WaitForStateChange(time.Second, grpc.Ready); !ok { t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Ready, ok) } state := cc.State() if state != grpc.Connecting && state != grpc.TransientFailure { t.Fatalf("cc.State() = %s, want %s or %s", state, grpc.Connecting, grpc.TransientFailure) } cc.Close() }
func testEmptyUnaryWithUserAgent(t *testing.T, e env) { s, cc := setUp(t, nil, math.MaxUint32, testAppUA, e) // Wait until cc is connected. if ok := cc.WaitForStateChange(time.Second, grpc.Idle); !ok { t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Idle, ok) } if ok := cc.WaitForStateChange(10*time.Second, grpc.Connecting); !ok { t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Connecting, ok) } if cc.State() != grpc.Ready { t.Fatalf("cc.State() = %s, want %s", cc.State(), grpc.Ready) } if ok := cc.WaitForStateChange(time.Second, grpc.Ready); ok { t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want false", grpc.Ready, ok) } tc := testpb.NewTestServiceClient(cc) var header metadata.MD reply, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Header(&header)) if err != nil || !proto.Equal(&testpb.Empty{}, reply) { t.Fatalf("TestService/EmptyCall(_, _) = %v, %v, want %v, <nil>", reply, err, &testpb.Empty{}) } if v, ok := header["ua"]; !ok || v[0] != testAppUA { t.Fatalf("header[\"ua\"] = %q, %t, want %q, true", v, ok, testAppUA) } tearDown(s, cc) if ok := cc.WaitForStateChange(5*time.Second, grpc.Ready); !ok { t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Ready, ok) } if cc.State() != grpc.Shutdown { t.Fatalf("cc.State() = %s, want %s", cc.State(), grpc.Shutdown) } }
func doCancelAfterFirstResponse(tc testpb.TestServiceClient) { ctx, cancel := context.WithCancel(context.Background()) stream, err := tc.FullDuplexCall(ctx) if err != nil { grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) } respParam := []*testpb.ResponseParameters{ { Size: proto.Int32(31415), }, } pl := newPayload(testpb.PayloadType_COMPRESSABLE, 27182) req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseParameters: respParam, Payload: pl, } if err := stream.Send(req); err != nil { grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err) } if _, err := stream.Recv(); err != nil { grpclog.Fatalf("%v.Recv() = %v", stream, err) } cancel() if _, err := stream.Recv(); grpc.Code(err) != codes.Canceled { grpclog.Fatalf("%v compleled with error code %d, want %d", stream, grpc.Code(err), codes.Canceled) } grpclog.Println("CancelAfterFirstResponse done") }
func testLargeUnary(t *testing.T, e env) { s, cc := setUp(t, nil, math.MaxUint32, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) argSize := 271828 respSize := 314159 payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)) if err != nil { t.Fatal(err) } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(respSize)), Payload: payload, } reply, err := tc.UnaryCall(context.Background(), req) if err != nil { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, <nil>", err) } pt := reply.GetPayload().GetType() ps := len(reply.GetPayload().GetBody()) if pt != testpb.PayloadType_COMPRESSABLE || ps != respSize { t.Fatalf("Got the reply with type %d len %d; want %d, %d", pt, ps, testpb.PayloadType_COMPRESSABLE, respSize) } }
func doPerRPCCreds(tc testpb.TestServiceClient) { jsonKey := getServiceAccountJSONKey() pl := newPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(largeRespSize)), Payload: pl, FillUsername: proto.Bool(true), FillOauthScope: proto.Bool(true), } token := getToken() kv := map[string]string{"authorization": token.TokenType + " " + token.AccessToken} ctx := metadata.NewContext(context.Background(), metadata.MD{"authorization": []string{kv["authorization"]}}) reply, err := tc.UnaryCall(ctx, req) if err != nil { grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) } user := reply.GetUsername() scope := reply.GetOauthScope() if !strings.Contains(string(jsonKey), user) { grpclog.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey) } if !strings.Contains(*oauthScope, scope) { grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, *oauthScope) } grpclog.Println("PerRPCCreds done") }
func testMetadataUnaryRPC(t *testing.T, e env) { s, cc := setUp(t, nil, math.MaxUint32, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) argSize := 2718 respSize := 314 payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)) if err != nil { t.Fatal(err) } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(respSize)), Payload: payload, } var header, trailer metadata.MD ctx := metadata.NewContext(context.Background(), testMetadata) if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.Trailer(&trailer)); err != nil { t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <nil>", ctx, err) } if !reflect.DeepEqual(testMetadata, header) { t.Fatalf("Received header metadata %v, want %v", header, testMetadata) } if !reflect.DeepEqual(testMetadata, trailer) { t.Fatalf("Received trailer metadata %v, want %v", trailer, testMetadata) } }
func performOneRPC(t *testing.T, tc testpb.TestServiceClient, wg *sync.WaitGroup) { argSize := 2718 respSize := 314 payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)) if err != nil { t.Fatal(err) } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(respSize)), Payload: payload, } reply, err := tc.UnaryCall(context.Background(), req) if err != nil { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, <nil>", err) } pt := reply.GetPayload().GetType() ps := len(reply.GetPayload().GetBody()) if pt != testpb.PayloadType_COMPRESSABLE || ps != respSize { t.Fatalf("Got the reply with type %d len %d; want %d, %d", pt, ps, testpb.PayloadType_COMPRESSABLE, respSize) } wg.Done() }
func doClientStreaming(tc testpb.TestServiceClient) { stream, err := tc.StreamingInputCall(context.Background()) if err != nil { grpclog.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err) } var sum int for _, s := range reqSizes { pl := newPayload(testpb.PayloadType_COMPRESSABLE, s) req := &testpb.StreamingInputCallRequest{ Payload: pl, } if err := stream.Send(req); err != nil { grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err) } sum += s grpclog.Printf("Sent a request of size %d, aggregated size %d", s, sum) } reply, err := stream.CloseAndRecv() if err != nil { grpclog.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) } if reply.GetAggregatedPayloadSize() != int32(sum) { grpclog.Fatalf("%v.CloseAndRecv().GetAggregatePayloadSize() = %v; want %v", stream, reply.GetAggregatedPayloadSize(), sum) } grpclog.Println("ClientStreaming done") }
// TODO(zhaoq): Have a better test coverage of timeout and cancellation mechanism. func testRPCTimeout(t *testing.T, e env) { s, cc := setUp(t, nil, math.MaxUint32, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) argSize := 2718 respSize := 314 payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)) if err != nil { t.Fatal(err) } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(respSize)), Payload: payload, } for i := -1; i <= 10; i++ { ctx, _ := context.WithTimeout(context.Background(), time.Duration(i)*time.Millisecond) reply, err := tc.UnaryCall(ctx, req) if grpc.Code(err) != codes.DeadlineExceeded { t.Fatalf(`TestService/UnaryCallv(_, _) = %v, %v; want <nil>, error code: %d`, reply, err, codes.DeadlineExceeded) } } }
func TestLargeMessage(t *testing.T) { server, ct := setUp(t, 0, math.MaxUint32, normal) callHdr := &CallHdr{ Host: "localhost", Method: "foo.Large", } var wg sync.WaitGroup for i := 0; i < 2; i++ { wg.Add(1) go func() { defer wg.Done() s, err := ct.NewStream(context.Background(), callHdr) if err != nil { t.Errorf("failed to open stream: %v", err) } if err := ct.Write(s, expectedRequestLarge, &Options{Last: true, Delay: false}); err != nil { t.Errorf("failed to send data: %v", err) } p := make([]byte, len(expectedResponseLarge)) _, recvErr := io.ReadFull(s, p) if recvErr != nil || !bytes.Equal(p, expectedResponseLarge) { t.Errorf("Error: %v, want <nil>; Result len: %d, want len %d", recvErr, len(p), len(expectedResponseLarge)) } _, recvErr = io.ReadFull(s, p) if recvErr != io.EOF { t.Errorf("Error: %v; want <EOF>", recvErr) } }() } wg.Wait() ct.Close() server.stop() }
func testMetadataStreamingRPC(t *testing.T, e env) { s, cc := setUp(t, nil, math.MaxUint32, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) ctx := metadata.NewContext(context.Background(), testMetadata) stream, err := tc.FullDuplexCall(ctx) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err) } go func() { headerMD, err := stream.Header() if e.security == "tls" { delete(headerMD, "transport_security_type") } if err != nil || !reflect.DeepEqual(testMetadata, headerMD) { t.Errorf("#1 %v.Header() = %v, %v, want %v, <nil>", stream, headerMD, err, testMetadata) } // test the cached value. headerMD, err = stream.Header() if err != nil || !reflect.DeepEqual(testMetadata, headerMD) { t.Errorf("#2 %v.Header() = %v, %v, want %v, <nil>", stream, headerMD, err, testMetadata) } var index int for index < len(reqSizes) { respParam := []*testpb.ResponseParameters{ { Size: proto.Int32(int32(respSizes[index])), }, } payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index])) if err != nil { t.Fatal(err) } req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseParameters: respParam, Payload: payload, } if err := stream.Send(req); err != nil { t.Errorf("%v.Send(%v) = %v, want <nil>", stream, req, err) return } index++ } // Tell the server we're done sending args. stream.CloseSend() }() for { if _, err := stream.Recv(); err != nil { break } } trailerMD := stream.Trailer() if !reflect.DeepEqual(testMetadata, trailerMD) { t.Fatalf("%v.Trailer() = %v, want %v", stream, trailerMD, testMetadata) } }
func healthCheck(t time.Duration, cc *grpc.ClientConn, serviceName string) (*healthpb.HealthCheckResponse, error) { ctx, _ := context.WithTimeout(context.Background(), t) hc := healthpb.NewHealthClient(cc) req := &healthpb.HealthCheckRequest{ Service: serviceName, } return hc.Check(ctx, req) }
func TestStreamContext(t *testing.T) { expectedStream := Stream{} ctx := newContextWithStream(context.Background(), &expectedStream) s, ok := StreamFromContext(ctx) if !ok || !reflect.DeepEqual(expectedStream, *s) { t.Fatalf("GetStreamFromContext(%v) = %v, %t, want: %v, true", ctx, *s, ok, expectedStream) } }
func testFailedEmptyUnary(t *testing.T, e env) { s, cc := setUp(t, nil, math.MaxUint32, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) ctx := metadata.NewContext(context.Background(), testMetadata) if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != grpc.Errorf(codes.DataLoss, "got extra metadata") { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %v", err, grpc.Errorf(codes.DataLoss, "got extra metadata")) } }
func doEmptyUnaryCall(tc testpb.TestServiceClient) { reply, err := tc.EmptyCall(context.Background(), &testpb.Empty{}) if err != nil { grpclog.Fatal("/TestService/EmptyCall RPC failed: ", err) } if !proto.Equal(&testpb.Empty{}, reply) { grpclog.Fatalf("/TestService/EmptyCall receives %v, want %v", reply, testpb.Empty{}) } grpclog.Println("EmptyUnaryCall done") }
// DoUnaryCall performs an unary RPC with given stub and request and response sizes. func DoUnaryCall(tc testpb.TestServiceClient, reqSize, respSize int) { pl := newPayload(testpb.PayloadType_COMPRESSABLE, reqSize) req := &testpb.SimpleRequest{ ResponseType: pl.Type, ResponseSize: int32(respSize), Payload: pl, } if _, err := tc.UnaryCall(context.Background(), req); err != nil { grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) } }
func getToken() *oauth2.Token { jsonKey := getServiceAccountJSONKey() config, err := google.JWTConfigFromJSON(jsonKey, *oauthScope) if err != nil { grpclog.Fatalf("Failed to get the config: %v", err) } token, err := config.TokenSource(context.Background()).Token() if err != nil { grpclog.Fatalf("Failed to get the token: %v", err) } return token }
func doEmptyStream(tc testpb.TestServiceClient) { stream, err := tc.FullDuplexCall(context.Background()) if err != nil { grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) } if err := stream.CloseSend(); err != nil { grpclog.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil) } if _, err := stream.Recv(); err != io.EOF { grpclog.Fatalf("%v failed to complete the empty stream test: %v", stream, err) } grpclog.Println("Emptystream done") }
func doCancelAfterBegin(tc testpb.TestServiceClient) { ctx, cancel := context.WithCancel(metadata.NewContext(context.Background(), testMetadata)) stream, err := tc.StreamingInputCall(ctx) if err != nil { grpclog.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err) } cancel() _, err = stream.CloseAndRecv() if grpc.Code(err) != codes.Canceled { grpclog.Fatalf("%v.CloseAndRecv() got error code %d, want %d", stream, grpc.Code(err), codes.Canceled) } grpclog.Println("CancelAfterBegin done") }
func ExampleWithTimeout() { // Pass a context with a timeout to tell a blocking function that it // should abandon its work after the timeout elapses. ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond) select { case <-time.After(200 * time.Millisecond): fmt.Println("overslept") case <-ctx.Done(): fmt.Println(ctx.Err()) // prints "context deadline exceeded" } // Output: // context deadline exceeded }
func TestClientSendAndReceive(t *testing.T) { server, ct := setUp(t, 0, math.MaxUint32, normal) callHdr := &CallHdr{ Host: "localhost", Method: "foo.Small", } s1, err1 := ct.NewStream(context.Background(), callHdr) if err1 != nil { t.Fatalf("failed to open stream: %v", err1) } if s1.id != 1 { t.Fatalf("wrong stream id: %d", s1.id) } s2, err2 := ct.NewStream(context.Background(), callHdr) if err2 != nil { t.Fatalf("failed to open stream: %v", err2) } if s2.id != 3 { t.Fatalf("wrong stream id: %d", s2.id) } opts := Options{ Last: true, Delay: false, } if err := ct.Write(s1, expectedRequest, &opts); err != nil { t.Fatalf("failed to send data: %v", err) } p := make([]byte, len(expectedResponse)) _, recvErr := io.ReadFull(s1, p) if recvErr != nil || !bytes.Equal(p, expectedResponse) { t.Fatalf("Error: %v, want <nil>; Result: %v, want %v", recvErr, p, expectedResponse) } _, recvErr = io.ReadFull(s1, p) if recvErr != io.EOF { t.Fatalf("Error: %v; want <EOF>", recvErr) } ct.Close() server.stop() }
func testCancelNoIO(t *testing.T, e env) { // Only allows 1 live stream per server transport. s, cc := setUp(t, nil, 1, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) ctx, cancel := context.WithCancel(context.Background()) _, err := tc.StreamingInputCall(ctx) if err != nil { t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, <nil>", tc, err) } // Loop until receiving the new max stream setting from the server. for { ctx, _ := context.WithTimeout(context.Background(), time.Second) _, err := tc.StreamingInputCall(ctx) if err == nil { time.Sleep(time.Second) continue } if grpc.Code(err) == codes.DeadlineExceeded { break } t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, %d", tc, err, codes.DeadlineExceeded) } // If there are any RPCs slipping before the client receives the max streams setting, // let them be expired. time.Sleep(2 * time.Second) ch := make(chan struct{}) go func() { defer close(ch) // This should be blocked until the 1st is canceled. ctx, _ := context.WithTimeout(context.Background(), 2*time.Second) if _, err := tc.StreamingInputCall(ctx); err != nil { t.Errorf("%v.StreamingInputCall(_) = _, %v, want _, <nil>", tc, err) } }() cancel() <-ch }
func testPingPong(t *testing.T, e env) { s, cc := setUp(t, nil, math.MaxUint32, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) stream, err := tc.FullDuplexCall(context.Background()) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err) } var index int for index < len(reqSizes) { respParam := []*testpb.ResponseParameters{ { Size: proto.Int32(int32(respSizes[index])), }, } payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index])) if err != nil { t.Fatal(err) } req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseParameters: respParam, Payload: payload, } if err := stream.Send(req); err != nil { t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err) } reply, err := stream.Recv() if err != nil { t.Fatalf("%v.Recv() = %v, want <nil>", stream, err) } pt := reply.GetPayload().GetType() if pt != testpb.PayloadType_COMPRESSABLE { t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE) } size := len(reply.GetPayload().GetBody()) if size != int(respSizes[index]) { t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index]) } index++ } if err := stream.CloseSend(); err != nil { t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil) } if _, err := stream.Recv(); err != io.EOF { t.Fatalf("%v failed to complele the ping pong test: %v", stream, err) } }
func testExceedMaxStreamsLimit(t *testing.T, e env) { // Only allows 1 live stream per server transport. s, cc := setUp(t, nil, 1, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) _, err := tc.StreamingInputCall(context.Background()) if err != nil { t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, <nil>", tc, err) } // Loop until receiving the new max stream setting from the server. for { ctx, _ := context.WithTimeout(context.Background(), time.Second) _, err := tc.StreamingInputCall(ctx) if err == nil { time.Sleep(time.Second) continue } if grpc.Code(err) == codes.DeadlineExceeded { break } t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, %d", tc, err, codes.DeadlineExceeded) } }
func closeLoopStream() { s, conn, tc := buildConnection() stream, err := tc.StreamingCall(context.Background()) if err != nil { grpclog.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) } for i := 0; i < 100; i++ { streamCaller(tc, stream) } ch := make(chan int, *maxConcurrentRPCs*4) var ( mu sync.Mutex wg sync.WaitGroup ) wg.Add(*maxConcurrentRPCs) // Distribute RPCs over maxConcurrentCalls workers. for i := 0; i < *maxConcurrentRPCs; i++ { go func() { for _ = range ch { start := time.Now() streamCaller(tc, stream) elapse := time.Since(start) mu.Lock() s.Add(elapse) mu.Unlock() } wg.Done() }() } // Stop the client when time is up. done := make(chan struct{}) go func() { <-time.After(time.Duration(*duration) * time.Second) close(done) }() ok := true for ok { select { case ch <- 0: case <-done: ok = false } } close(ch) wg.Wait() conn.Close() grpclog.Println(s.String()) }
func (r *resolver) Resolve(target string) (naming.Watcher, error) { resp, err := r.kapi.Get(context.Background(), target, &etcdcl.GetOptions{Recursive: true}) if err != nil { return nil, err } kv := make(map[string]string) // Record the index in order to avoid missing updates between Get returning and // watch starting. index := getNode(resp.Node, kv) return &watcher{ wr: r.kapi.Watcher(target, &etcdcl.WatcherOptions{ AfterIndex: index, Recursive: true}), kv: kv, }, nil }