Ejemplo n.º 1
0
func testLargeUnary(t *testing.T, e env) {
	s, addr := serverSetUp(t, nil, math.MaxUint32, nil, nil, e)
	cc := clientSetUp(t, addr, nil, nil, "", e)
	tc := testpb.NewTestServiceClient(cc)
	defer tearDown(s, cc)
	argSize := 271828
	respSize := 314159

	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize))
	if err != nil {
		t.Fatal(err)
	}

	req := &testpb.SimpleRequest{
		ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(),
		ResponseSize: proto.Int32(int32(respSize)),
		Payload:      payload,
	}
	reply, err := tc.UnaryCall(context.Background(), req)
	if err != nil {
		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, <nil>", err)
	}
	pt := reply.GetPayload().GetType()
	ps := len(reply.GetPayload().GetBody())
	if pt != testpb.PayloadType_COMPRESSABLE || ps != respSize {
		t.Fatalf("Got the reply with type %d len %d; want %d, %d", pt, ps, testpb.PayloadType_COMPRESSABLE, respSize)
	}
}
Ejemplo n.º 2
0
func testExceedMaxStreamsLimit(t *testing.T, e env) {
	// Only allows 1 live stream per server transport.
	s, cc := setUp(nil, 1, "", e)
	tc := testpb.NewTestServiceClient(cc)
	defer tearDown(s, cc)
	done := make(chan struct{})
	ch := make(chan int)
	go func() {
		for {
			select {
			case <-time.After(5 * time.Millisecond):
				ch <- 0
			case <-time.After(5 * time.Second):
				close(done)
				return
			}
		}
	}()
	// Loop until a stream creation hangs due to the new max stream setting.
	for {
		select {
		case <-ch:
			ctx, _ := context.WithTimeout(context.Background(), time.Second)
			if _, err := tc.StreamingInputCall(ctx); err != nil {
				if grpc.Code(err) == codes.DeadlineExceeded {
					return
				}
				t.Fatalf("%v.StreamingInputCall(_) = %v, want <nil>", tc, err)
			}
		case <-done:
			t.Fatalf("Client has not received the max stream setting in 5 seconds.")
		}
	}
}
Ejemplo n.º 3
0
func testCancel(t *testing.T, e env) {
	s, addr := serverSetUp(t, nil, math.MaxUint32, nil, nil, e)
	cc := clientSetUp(t, addr, nil, nil, "", e)
	tc := testpb.NewTestServiceClient(cc)
	defer tearDown(s, cc)
	argSize := 2718
	respSize := 314

	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize))
	if err != nil {
		t.Fatal(err)
	}

	req := &testpb.SimpleRequest{
		ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(),
		ResponseSize: proto.Int32(int32(respSize)),
		Payload:      payload,
	}
	ctx, cancel := context.WithCancel(context.Background())
	time.AfterFunc(1*time.Millisecond, cancel)
	reply, err := tc.UnaryCall(ctx, req)
	if grpc.Code(err) != codes.Canceled {
		t.Fatalf(`TestService/UnaryCall(_, _) = %v, %v; want <nil>, error code: %d`, reply, err, codes.Canceled)
	}
}
Ejemplo n.º 4
0
func testMetadataStreamingRPC(t *testing.T, e env) {
	s, addr := serverSetUp(t, nil, math.MaxUint32, nil, nil, e)
	cc := clientSetUp(t, addr, nil, nil, "", e)
	tc := testpb.NewTestServiceClient(cc)
	defer tearDown(s, cc)
	ctx := metadata.NewContext(context.Background(), testMetadata)
	stream, err := tc.FullDuplexCall(ctx)
	if err != nil {
		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
	}
	go func() {
		headerMD, err := stream.Header()
		if e.security == "tls" {
			delete(headerMD, "transport_security_type")
		}
		if err != nil || !reflect.DeepEqual(testMetadata, headerMD) {
			t.Errorf("#1 %v.Header() = %v, %v, want %v, <nil>", stream, headerMD, err, testMetadata)
		}
		// test the cached value.
		headerMD, err = stream.Header()
		if err != nil || !reflect.DeepEqual(testMetadata, headerMD) {
			t.Errorf("#2 %v.Header() = %v, %v, want %v, <nil>", stream, headerMD, err, testMetadata)
		}
		var index int
		for index < len(reqSizes) {
			respParam := []*testpb.ResponseParameters{
				{
					Size: proto.Int32(int32(respSizes[index])),
				},
			}

			payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index]))
			if err != nil {
				t.Fatal(err)
			}

			req := &testpb.StreamingOutputCallRequest{
				ResponseType:       testpb.PayloadType_COMPRESSABLE.Enum(),
				ResponseParameters: respParam,
				Payload:            payload,
			}
			if err := stream.Send(req); err != nil {
				t.Errorf("%v.Send(%v) = %v, want <nil>", stream, req, err)
				return
			}
			index++
		}
		// Tell the server we're done sending args.
		stream.CloseSend()
	}()
	for {
		if _, err := stream.Recv(); err != nil {
			break
		}
	}
	trailerMD := stream.Trailer()
	if !reflect.DeepEqual(testMetadata, trailerMD) {
		t.Fatalf("%v.Trailer() = %v, want %v", stream, trailerMD, testMetadata)
	}
}
Ejemplo n.º 5
0
// TODO(zhaoq): Have a better test coverage of timeout and cancellation mechanism.
func testRPCTimeout(t *testing.T, e env) {
	s, addr := serverSetUp(t, nil, math.MaxUint32, nil, nil, e)
	cc := clientSetUp(t, addr, nil, nil, "", e)
	tc := testpb.NewTestServiceClient(cc)
	defer tearDown(s, cc)
	argSize := 2718
	respSize := 314

	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize))
	if err != nil {
		t.Fatal(err)
	}

	req := &testpb.SimpleRequest{
		ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(),
		ResponseSize: proto.Int32(int32(respSize)),
		Payload:      payload,
	}
	for i := -1; i <= 10; i++ {
		ctx, _ := context.WithTimeout(context.Background(), time.Duration(i)*time.Millisecond)
		reply, err := tc.UnaryCall(ctx, req)
		if grpc.Code(err) != codes.DeadlineExceeded {
			t.Fatalf(`TestService/UnaryCallv(_, _) = %v, %v; want <nil>, error code: %d`, reply, err, codes.DeadlineExceeded)
		}
	}
}
Ejemplo n.º 6
0
func testTimeoutOnDeadServer(t *testing.T, e env) {
	s, cc := setUp(nil, math.MaxUint32, "", e)
	tc := testpb.NewTestServiceClient(cc)
	if ok := cc.WaitForStateChange(time.Second, grpc.Idle); !ok {
		t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Idle, ok)
	}
	if ok := cc.WaitForStateChange(time.Second, grpc.Connecting); !ok {
		t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Connecting, ok)
	}
	if cc.State() != grpc.Ready {
		t.Fatalf("cc.State() = %s, want %s", cc.State(), grpc.Ready)
	}
	if ok := cc.WaitForStateChange(time.Millisecond, grpc.Ready); ok {
		t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want false", grpc.Ready, ok)
	}
	s.Stop()
	// Set -1 as the timeout to make sure if transportMonitor gets error
	// notification in time the failure path of the 1st invoke of
	// ClientConn.wait hits the deadline exceeded error.
	ctx, _ := context.WithTimeout(context.Background(), -1)
	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); grpc.Code(err) != codes.DeadlineExceeded {
		t.Fatalf("TestService/EmptyCall(%v, _) = _, error %v, want _, error code: %d", ctx, err, codes.DeadlineExceeded)
	}
	if ok := cc.WaitForStateChange(time.Second, grpc.Ready); !ok {
		t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Ready, ok)
	}
	state := cc.State()
	if state != grpc.Connecting && state != grpc.TransientFailure {
		t.Fatalf("cc.State() = %s, want %s or %s", state, grpc.Connecting, grpc.TransientFailure)
	}
	cc.Close()
}
func testExceedMaxStreamsLimit(t *testing.T, e env) {
	// Only allows 1 live stream per server transport.
	s, addr := serverSetUp(t, true, nil, 1, nil, nil, e)
	cc := clientSetUp(t, addr, nil, nil, "", e)
	tc := testpb.NewTestServiceClient(cc)
	defer tearDown(s, cc)
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()
	_, err := tc.StreamingInputCall(ctx)
	if err != nil {
		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, <nil>", tc, err)
	}
	// Loop until receiving the new max stream setting from the server.
	for {
		ctx, cancel := context.WithTimeout(context.Background(), time.Second)
		defer cancel()
		_, err := tc.StreamingInputCall(ctx)
		if err == nil {
			time.Sleep(time.Second)
			continue
		}
		if grpc.Code(err) == codes.DeadlineExceeded {
			break
		}
		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, %d", tc, err, codes.DeadlineExceeded)
	}
}
Ejemplo n.º 8
0
func testExceedMaxStreamsLimit(t *testing.T, e env) {
	// Only allows 1 live stream per server transport.
	s, cc := setUp(nil, 1, e)
	tc := testpb.NewTestServiceClient(cc)
	defer tearDown(s, cc)
	// Perform a unary RPC to make sure the new settings were propagated to the client.
	if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil {
		t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, <nil>", tc, err)
	}
	// Initiate the 1st stream
	if _, err := tc.StreamingInputCall(context.Background()); err != nil {
		t.Fatalf("%v.StreamingInputCall(_) = %v, want <nil>", tc, err)
	}
	var wg sync.WaitGroup
	wg.Add(1)
	go func() {
		defer wg.Done()
		// The 2nd stream should block until its deadline exceeds.
		ctx, _ := context.WithTimeout(context.Background(), time.Second)
		if _, err := tc.StreamingInputCall(ctx); grpc.Code(err) != codes.DeadlineExceeded {
			t.Errorf("%v.StreamingInputCall(%v) = _, %v, want error code %d", tc, ctx, err, codes.DeadlineExceeded)
		}
	}()
	wg.Wait()
}
Ejemplo n.º 9
0
func testEmptyUnaryWithUserAgent(t *testing.T, e env) {
	s, cc := setUp(nil, math.MaxUint32, testAppUA, e)
	// Wait until cc is connected.
	if ok := cc.WaitForStateChange(time.Second, grpc.Idle); !ok {
		t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Idle, ok)
	}
	if ok := cc.WaitForStateChange(10*time.Second, grpc.Connecting); !ok {
		t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Connecting, ok)
	}
	if cc.State() != grpc.Ready {
		t.Fatalf("cc.State() = %s, want %s", cc.State(), grpc.Ready)
	}
	if ok := cc.WaitForStateChange(time.Second, grpc.Ready); ok {
		t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want false", grpc.Ready, ok)
	}
	tc := testpb.NewTestServiceClient(cc)
	var header metadata.MD
	reply, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Header(&header))
	if err != nil || !proto.Equal(&testpb.Empty{}, reply) {
		t.Fatalf("TestService/EmptyCall(_, _) = %v, %v, want %v, <nil>", reply, err, &testpb.Empty{})
	}
	if v, ok := header["ua"]; !ok || v[0] != testAppUA {
		t.Fatalf("header[\"ua\"] = %q, %t, want %q, true", v, ok, testAppUA)
	}
	tearDown(s, cc)
	if ok := cc.WaitForStateChange(5*time.Second, grpc.Ready); !ok {
		t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Ready, ok)
	}
	if cc.State() != grpc.Shutdown {
		t.Fatalf("cc.State() = %s, want %s", cc.State(), grpc.Shutdown)
	}
}
Ejemplo n.º 10
0
func testClientStreaming(t *testing.T, e env) {
	s, cc := setUp(nil, math.MaxUint32, "", e)
	tc := testpb.NewTestServiceClient(cc)
	defer tearDown(s, cc)
	stream, err := tc.StreamingInputCall(context.Background())
	if err != nil {
		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want <nil>", tc, err)
	}
	var sum int
	for _, s := range reqSizes {
		pl := newPayload(testpb.PayloadType_COMPRESSABLE, int32(s))
		req := &testpb.StreamingInputCallRequest{
			Payload: pl,
		}
		if err := stream.Send(req); err != nil {
			t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
		}
		sum += s
	}
	reply, err := stream.CloseAndRecv()
	if err != nil {
		t.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil)
	}
	if reply.GetAggregatedPayloadSize() != int32(sum) {
		t.Fatalf("%v.CloseAndRecv().GetAggregatePayloadSize() = %v; want %v", stream, reply.GetAggregatedPayloadSize(), sum)
	}
}
// This test mimics a user who sends 1000 RPCs concurrently on a faulty transport.
// TODO(zhaoq): Refactor to make this clearer and add more cases to test racy
// and error-prone paths.
func testRetry(t *testing.T, e env) {
	s, addr := serverSetUp(t, true, nil, math.MaxUint32, nil, nil, e)
	cc := clientSetUp(t, addr, nil, nil, "", e)
	tc := testpb.NewTestServiceClient(cc)
	defer tearDown(s, cc)
	var wg sync.WaitGroup

	numRPC := 1000
	rpcSpacing := 2 * time.Millisecond

	wg.Add(1)
	go func() {
		// Halfway through starting RPCs, kill all connections:
		time.Sleep(time.Duration(numRPC/2) * rpcSpacing)

		// The server shuts down the network connection to make a
		// transport error which will be detected by the client side
		// code.
		s.TestingCloseConns()
		wg.Done()
	}()
	// All these RPCs should succeed eventually.
	for i := 0; i < numRPC; i++ {
		time.Sleep(rpcSpacing)
		wg.Add(1)
		go performOneRPC(t, tc, &wg)
	}
	wg.Wait()
}
Ejemplo n.º 12
0
func testMetadataUnaryRPC(t *testing.T, e env) {
	s, addr := serverSetUp(t, nil, math.MaxUint32, nil, nil, e)
	cc := clientSetUp(t, addr, nil, nil, "", e)
	tc := testpb.NewTestServiceClient(cc)
	defer tearDown(s, cc)
	argSize := 2718
	respSize := 314

	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize))
	if err != nil {
		t.Fatal(err)
	}

	req := &testpb.SimpleRequest{
		ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(),
		ResponseSize: proto.Int32(int32(respSize)),
		Payload:      payload,
	}
	var header, trailer metadata.MD
	ctx := metadata.NewContext(context.Background(), testMetadata)
	if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.Trailer(&trailer)); err != nil {
		t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <nil>", ctx, err)
	}
	if !reflect.DeepEqual(testMetadata, header) {
		t.Fatalf("Received header metadata %v, want %v", header, testMetadata)
	}
	if !reflect.DeepEqual(testMetadata, trailer) {
		t.Fatalf("Received trailer metadata %v, want %v", trailer, testMetadata)
	}
}
Ejemplo n.º 13
0
func testEmptyUnary(t *testing.T, e env) {
	s, cc := setUp(nil, math.MaxUint32, e)
	tc := testpb.NewTestServiceClient(cc)
	defer tearDown(s, cc)
	reply, err := tc.EmptyCall(context.Background(), &testpb.Empty{})
	if err != nil || !proto.Equal(&testpb.Empty{}, reply) {
		t.Fatalf("TestService/EmptyCall(_, _) = %v, %v, want %v, <nil>", reply, err, &testpb.Empty{})
	}
}
Ejemplo n.º 14
0
func testFailedEmptyUnary(t *testing.T, e env) {
	s, cc := setUp(nil, math.MaxUint32, "", e)
	tc := testpb.NewTestServiceClient(cc)
	defer tearDown(s, cc)
	ctx := metadata.NewContext(context.Background(), testMetadata)
	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != grpc.Errorf(codes.DataLoss, "got extra metadata") {
		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %v", err, grpc.Errorf(codes.DataLoss, "got extra metadata"))
	}
}
Ejemplo n.º 15
0
func testTimeoutOnDeadServer(t *testing.T, e env) {
	s, cc := setUp(nil, math.MaxUint32, e)
	tc := testpb.NewTestServiceClient(cc)
	s.Stop()
	// Set -1 as the timeout to make sure if transportMonitor gets error
	// notification in time the failure path of the 1st invoke of
	// ClientConn.wait hits the deadline exceeded error.
	ctx, _ := context.WithTimeout(context.Background(), -1)
	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); grpc.Code(err) != codes.DeadlineExceeded {
		t.Fatalf("TestService/EmptyCall(%v, _) = _, error %v, want _, error code: %d", ctx, err, codes.DeadlineExceeded)
	}
	cc.Close()
}
Ejemplo n.º 16
0
func testEmptyUnaryWithUserAgent(t *testing.T, e env) {
	s, cc := setUp(nil, math.MaxUint32, testAppUA, e)
	tc := testpb.NewTestServiceClient(cc)
	defer tearDown(s, cc)
	var header metadata.MD
	reply, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Header(&header))
	if err != nil || !proto.Equal(&testpb.Empty{}, reply) {
		t.Fatalf("TestService/EmptyCall(_, _) = %v, %v, want %v, <nil>", reply, err, &testpb.Empty{})
	}
	if v, ok := header["ua"]; !ok || v != testAppUA {
		t.Fatalf("header[\"ua\"] = %q, %t, want %q, true", v, ok, testAppUA)
	}
}
Ejemplo n.º 17
0
func testPingPong(t *testing.T, e env) {
	s, addr := serverSetUp(t, nil, math.MaxUint32, nil, nil, e)
	cc := clientSetUp(t, addr, nil, nil, "", e)
	tc := testpb.NewTestServiceClient(cc)
	defer tearDown(s, cc)
	stream, err := tc.FullDuplexCall(context.Background())
	if err != nil {
		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
	}
	var index int
	for index < len(reqSizes) {
		respParam := []*testpb.ResponseParameters{
			{
				Size: proto.Int32(int32(respSizes[index])),
			},
		}

		payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index]))
		if err != nil {
			t.Fatal(err)
		}

		req := &testpb.StreamingOutputCallRequest{
			ResponseType:       testpb.PayloadType_COMPRESSABLE.Enum(),
			ResponseParameters: respParam,
			Payload:            payload,
		}
		if err := stream.Send(req); err != nil {
			t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
		}
		reply, err := stream.Recv()
		if err != nil {
			t.Fatalf("%v.Recv() = %v, want <nil>", stream, err)
		}
		pt := reply.GetPayload().GetType()
		if pt != testpb.PayloadType_COMPRESSABLE {
			t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE)
		}
		size := len(reply.GetPayload().GetBody())
		if size != int(respSizes[index]) {
			t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index])
		}
		index++
	}
	if err := stream.CloseSend(); err != nil {
		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
	}
	if _, err := stream.Recv(); err != io.EOF {
		t.Fatalf("%v failed to complele the ping pong test: %v", stream, err)
	}
}
Ejemplo n.º 18
0
func testNoService(t *testing.T, e env) {
	s, addr := serverSetUp(t, false, nil, math.MaxUint32, nil, nil, e)
	cc := clientSetUp(t, addr, nil, nil, "", e)
	tc := testpb.NewTestServiceClient(cc)
	defer tearDown(s, cc)
	// Make sure setting ack has been sent.
	time.Sleep(2 * time.Second)
	stream, err := tc.FullDuplexCall(context.Background())
	if err != nil {
		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
	}
	if _, err := stream.Recv(); grpc.Code(err) != codes.Unimplemented {
		t.Fatalf("stream.Recv() = _, %v, want _, error code %d", err, codes.Unimplemented)
	}
}
func testCompressOK(t *testing.T, e env) {
	s, addr := serverSetUp(t, true, nil, math.MaxUint32, grpc.NewGZIPCompressor(), grpc.NewGZIPDecompressor(), e)
	cc := clientSetUp(t, addr, grpc.NewGZIPCompressor(), grpc.NewGZIPDecompressor(), "", e)
	// Unary call
	tc := testpb.NewTestServiceClient(cc)
	defer tearDown(s, cc)
	argSize := 271828
	respSize := 314159
	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize))
	if err != nil {
		t.Fatal(err)
	}
	req := &testpb.SimpleRequest{
		ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(),
		ResponseSize: proto.Int32(int32(respSize)),
		Payload:      payload,
	}
	if _, err := tc.UnaryCall(context.Background(), req); err != nil {
		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, <nil>", err)
	}
	// Streaming RPC
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()
	stream, err := tc.FullDuplexCall(ctx)
	if err != nil {
		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
	}
	respParam := []*testpb.ResponseParameters{
		{
			Size: proto.Int32(31415),
		},
	}
	payload, err = newPayload(testpb.PayloadType_COMPRESSABLE, int32(31415))
	if err != nil {
		t.Fatal(err)
	}
	sreq := &testpb.StreamingOutputCallRequest{
		ResponseType:       testpb.PayloadType_COMPRESSABLE.Enum(),
		ResponseParameters: respParam,
		Payload:            payload,
	}
	if err := stream.Send(sreq); err != nil {
		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
	}
	if _, err := stream.Recv(); err != nil {
		t.Fatalf("%v.Recv() = %v, want <nil>", stream, err)
	}
}
Ejemplo n.º 20
0
func testServerStreaming(t *testing.T, e env) {
	s, addr := serverSetUp(t, nil, math.MaxUint32, nil, nil, e)
	cc := clientSetUp(t, addr, nil, nil, "", e)
	tc := testpb.NewTestServiceClient(cc)
	defer tearDown(s, cc)
	respParam := make([]*testpb.ResponseParameters, len(respSizes))
	for i, s := range respSizes {
		respParam[i] = &testpb.ResponseParameters{
			Size: proto.Int32(int32(s)),
		}
	}
	req := &testpb.StreamingOutputCallRequest{
		ResponseType:       testpb.PayloadType_COMPRESSABLE.Enum(),
		ResponseParameters: respParam,
	}
	stream, err := tc.StreamingOutputCall(context.Background(), req)
	if err != nil {
		t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want <nil>", tc, err)
	}
	var rpcStatus error
	var respCnt int
	var index int
	for {
		reply, err := stream.Recv()
		if err != nil {
			rpcStatus = err
			break
		}
		pt := reply.GetPayload().GetType()
		if pt != testpb.PayloadType_COMPRESSABLE {
			t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE)
		}
		size := len(reply.GetPayload().GetBody())
		if size != int(respSizes[index]) {
			t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index])
		}
		index++
		respCnt++
	}
	if rpcStatus != io.EOF {
		t.Fatalf("Failed to finish the server streaming rpc: %v, want <EOF>", rpcStatus)
	}
	if respCnt != len(respSizes) {
		t.Fatalf("Got %d reply, want %d", len(respSizes), respCnt)
	}
}
func TestReconnectTimeout(t *testing.T) {
	defer leakCheck(t)()
	lis, err := net.Listen("tcp", ":0")
	if err != nil {
		t.Fatalf("Failed to listen: %v", err)
	}
	_, port, err := net.SplitHostPort(lis.Addr().String())
	if err != nil {
		t.Fatalf("Failed to parse listener address: %v", err)
	}
	addr := "localhost:" + port
	conn, err := grpc.Dial(addr, grpc.WithTimeout(5*time.Second), grpc.WithBlock(), grpc.WithInsecure())
	if err != nil {
		t.Fatalf("Failed to dial to the server %q: %v", addr, err)
	}
	// Close unaccepted connection (i.e., conn).
	lis.Close()
	tc := testpb.NewTestServiceClient(conn)
	waitC := make(chan struct{})
	go func() {
		defer close(waitC)
		const argSize = 271828
		const respSize = 314159

		payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
		if err != nil {
			t.Error(err)
			return
		}

		req := &testpb.SimpleRequest{
			ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(),
			ResponseSize: proto.Int32(respSize),
			Payload:      payload,
		}
		if _, err := tc.UnaryCall(context.Background(), req); err == nil {
			t.Errorf("TestService/UnaryCall(_, _) = _, <nil>, want _, non-nil")
			return
		}
	}()
	// Block untill reconnect times out.
	<-waitC
	if err := conn.Close(); err != grpc.ErrClientConnClosing {
		t.Fatalf("%v.Close() = %v, want %v", conn, err, grpc.ErrClientConnClosing)
	}
}
Ejemplo n.º 22
0
func testExceedMaxStreamsLimit(t *testing.T, e env) {
	// Only allows 1 live stream per server transport.
	s, cc := setUp(1, e)
	tc := testpb.NewTestServiceClient(cc)
	defer tearDown(s, cc)
	var err error
	for {
		time.Sleep(2 * time.Millisecond)
		_, err = tc.StreamingInputCall(context.Background())
		// Loop until the settings of max concurrent streams is
		// received by the client.
		if err != nil {
			break
		}
	}
	if grpc.Code(err) != codes.Unavailable {
		t.Fatalf("got %v, want error code %d", err, codes.Unavailable)
	}
}
Ejemplo n.º 23
0
// TODO(zhaoq): Have a better test coverage of timeout and cancellation mechanism.
func testRPCTimeout(t *testing.T, e env) {
	s, cc := setUp(math.MaxUint32, e)
	tc := testpb.NewTestServiceClient(cc)
	defer tearDown(s, cc)
	argSize := 2718
	respSize := 314
	req := &testpb.SimpleRequest{
		ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(),
		ResponseSize: proto.Int32(int32(respSize)),
		Payload:      newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)),
	}
	// Performs 100 RPCs with various timeout values so that
	// the RPCs could timeout on different stages of their lifetime. This
	// is the best-effort to cover various cases when an rpc gets cancelled.
	for i := 1; i <= 100; i++ {
		ctx, _ := context.WithTimeout(context.Background(), time.Duration(i)*time.Microsecond)
		reply, err := tc.UnaryCall(ctx, req)
		if grpc.Code(err) != codes.DeadlineExceeded {
			t.Fatalf(`TestService/UnaryCallv(_, _) = %v, %v; want <nil>, error code: %d`, reply, err, codes.DeadlineExceeded)
		}
	}
}
Ejemplo n.º 24
0
// This test mimics a user who sends 1000 RPCs concurrently on a faulty transport.
// TODO(zhaoq): Refactor to make this clearer and add more cases to test racy
// and error-prone paths.
func testRetry(t *testing.T, e env) {
	s, cc := setUp(nil, math.MaxUint32, "", e)
	tc := testpb.NewTestServiceClient(cc)
	defer tearDown(s, cc)
	var wg sync.WaitGroup
	wg.Add(1)
	go func() {
		time.Sleep(1 * time.Second)
		// The server shuts down the network connection to make a
		// transport error which will be detected by the client side
		// code.
		s.TestingCloseConns()
		wg.Done()
	}()
	// All these RPCs should succeed eventually.
	for i := 0; i < 1000; i++ {
		time.Sleep(2 * time.Millisecond)
		wg.Add(1)
		go performOneRPC(t, tc, &wg)
	}
	wg.Wait()
}
Ejemplo n.º 25
0
func testCancelNoIO(t *testing.T, e env) {
	// Only allows 1 live stream per server transport.
	s, addr := serverSetUp(t, nil, 1, nil, nil, e)
	cc := clientSetUp(t, addr, nil, nil, "", e)
	tc := testpb.NewTestServiceClient(cc)
	defer tearDown(s, cc)
	ctx, cancel := context.WithCancel(context.Background())
	_, err := tc.StreamingInputCall(ctx)
	if err != nil {
		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, <nil>", tc, err)
	}
	// Loop until receiving the new max stream setting from the server.
	for {
		ctx, _ := context.WithTimeout(context.Background(), time.Second)
		_, err := tc.StreamingInputCall(ctx)
		if err == nil {
			time.Sleep(time.Second)
			continue
		}
		if grpc.Code(err) == codes.DeadlineExceeded {
			break
		}
		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, %d", tc, err, codes.DeadlineExceeded)
	}
	// If there are any RPCs slipping before the client receives the max streams setting,
	// let them be expired.
	time.Sleep(2 * time.Second)
	ch := make(chan struct{})
	go func() {
		defer close(ch)
		// This should be blocked until the 1st is canceled.
		ctx, _ := context.WithTimeout(context.Background(), 2*time.Second)
		if _, err := tc.StreamingInputCall(ctx); err != nil {
			t.Errorf("%v.StreamingInputCall(_) = _, %v, want _, <nil>", tc, err)
		}
	}()
	cancel()
	<-ch
}
Ejemplo n.º 26
0
func testFailedServerStreaming(t *testing.T, e env) {
	s, cc := setUp(nil, math.MaxUint32, "", e)
	tc := testpb.NewTestServiceClient(cc)
	defer tearDown(s, cc)
	respParam := make([]*testpb.ResponseParameters, len(respSizes))
	for i, s := range respSizes {
		respParam[i] = &testpb.ResponseParameters{
			Size: proto.Int32(int32(s)),
		}
	}
	req := &testpb.StreamingOutputCallRequest{
		ResponseType:       testpb.PayloadType_COMPRESSABLE.Enum(),
		ResponseParameters: respParam,
	}
	ctx := metadata.NewContext(context.Background(), testMetadata)
	stream, err := tc.StreamingOutputCall(ctx, req)
	if err != nil {
		t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want <nil>", tc, err)
	}
	if _, err := stream.Recv(); err != grpc.Errorf(codes.DataLoss, "got extra metadata") {
		t.Fatalf("%v.Recv() = _, %v, want _, %v", stream, err, grpc.Errorf(codes.DataLoss, "got extra metadata"))
	}
}
Ejemplo n.º 27
0
func setUp(useTLS bool, maxStream uint32) (s *grpc.Server, tc testpb.TestServiceClient) {
	lis, err := net.Listen("tcp", ":0")
	if err != nil {
		log.Fatalf("Failed to listen: %v", err)
	}
	_, port, err := net.SplitHostPort(lis.Addr().String())
	if err != nil {
		log.Fatalf("Failed to parse listener address: %v", err)
	}
	s = grpc.NewServer(grpc.MaxConcurrentStreams(maxStream))
	testpb.RegisterTestServiceServer(s, &testServer{})
	if useTLS {
		creds, err := credentials.NewServerTLSFromFile(tlsDir+"server1.pem", tlsDir+"server1.key")
		if err != nil {
			log.Fatalf("Failed to generate credentials %v", err)
		}
		go s.Serve(creds.NewListener(lis))
	} else {
		go s.Serve(lis)
	}
	addr := "localhost:" + port
	var conn *grpc.ClientConn
	if useTLS {
		creds, err := credentials.NewClientTLSFromFile(tlsDir+"ca.pem", "x.test.youtube.com")
		if err != nil {
			log.Fatalf("Failed to create credentials %v", err)
		}
		conn, err = grpc.Dial(addr, grpc.WithTransportCredentials(creds))
	} else {
		conn, err = grpc.Dial(addr)
	}
	if err != nil {
		log.Fatalf("Dial(%q) = %v", addr, err)
	}
	tc = testpb.NewTestServiceClient(conn)
	return
}
Ejemplo n.º 28
0
func testEmptyUnaryWithUserAgent(t *testing.T, e env) {
	s, addr := serverSetUp(t, nil, math.MaxUint32, nil, nil, e)
	cc := clientSetUp(t, addr, nil, nil, testAppUA, e)
	// Wait until cc is connected.
	ctx, _ := context.WithTimeout(context.Background(), time.Second)
	if _, err := cc.WaitForStateChange(ctx, grpc.Idle); err != nil {
		t.Fatalf("cc.WaitForStateChange(_, %s) = _, %v, want _, <nil>", grpc.Idle, err)
	}
	ctx, _ = context.WithTimeout(context.Background(), time.Second)
	if _, err := cc.WaitForStateChange(ctx, grpc.Connecting); err != nil {
		t.Fatalf("cc.WaitForStateChange(_, %s) = _, %v, want _, <nil>", grpc.Connecting, err)
	}
	if state, err := cc.State(); err != nil || state != grpc.Ready {
		t.Fatalf("cc.State() = %s, %v, want %s, <nil>", state, err, grpc.Ready)
	}
	ctx, _ = context.WithTimeout(context.Background(), time.Second)
	if _, err := cc.WaitForStateChange(ctx, grpc.Ready); err == nil {
		t.Fatalf("cc.WaitForStateChange(_, %s) = _, <nil>, want _, %v", grpc.Ready, context.DeadlineExceeded)
	}
	tc := testpb.NewTestServiceClient(cc)
	var header metadata.MD
	reply, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Header(&header))
	if err != nil || !proto.Equal(&testpb.Empty{}, reply) {
		t.Fatalf("TestService/EmptyCall(_, _) = %v, %v, want %v, <nil>", reply, err, &testpb.Empty{})
	}
	if v, ok := header["ua"]; !ok || v[0] != testAppUA {
		t.Fatalf("header[\"ua\"] = %q, %t, want %q, true", v, ok, testAppUA)
	}
	tearDown(s, cc)
	ctx, _ = context.WithTimeout(context.Background(), 5*time.Second)
	if _, err := cc.WaitForStateChange(ctx, grpc.Ready); err != nil {
		t.Fatalf("cc.WaitForStateChange(_, %s) = _, %v, want _, <nil>", grpc.Ready, err)
	}
	if state, err := cc.State(); err != nil || state != grpc.Shutdown {
		t.Fatalf("cc.State() = %s, %v, want %s, <nil>", state, err, grpc.Shutdown)
	}
}
Ejemplo n.º 29
0
func testTimeoutOnDeadServer(t *testing.T, e env) {
	s, addr := serverSetUp(t, nil, math.MaxUint32, nil, nil, e)
	cc := clientSetUp(t, addr, nil, nil, "", e)
	tc := testpb.NewTestServiceClient(cc)
	ctx, _ := context.WithTimeout(context.Background(), time.Second)
	if _, err := cc.WaitForStateChange(ctx, grpc.Idle); err != nil {
		t.Fatalf("cc.WaitForStateChange(_, %s) = _, %v, want _, <nil>", grpc.Idle, err)
	}
	ctx, _ = context.WithTimeout(context.Background(), time.Second)
	if _, err := cc.WaitForStateChange(ctx, grpc.Connecting); err != nil {
		t.Fatalf("cc.WaitForStateChange(_, %s) = _, %v, want _, <nil>", grpc.Connecting, err)
	}
	if state, err := cc.State(); err != nil || state != grpc.Ready {
		t.Fatalf("cc.State() = %s, %v, want %s, <nil>", state, err, grpc.Ready)
	}
	ctx, _ = context.WithTimeout(context.Background(), time.Second)
	if _, err := cc.WaitForStateChange(ctx, grpc.Ready); err != context.DeadlineExceeded {
		t.Fatalf("cc.WaitForStateChange(_, %s) = _, %v, want _, %v", grpc.Ready, err, context.DeadlineExceeded)
	}
	s.Stop()
	// Set -1 as the timeout to make sure if transportMonitor gets error
	// notification in time the failure path of the 1st invoke of
	// ClientConn.wait hits the deadline exceeded error.
	ctx, _ = context.WithTimeout(context.Background(), -1)
	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); grpc.Code(err) != codes.DeadlineExceeded {
		t.Fatalf("TestService/EmptyCall(%v, _) = _, error %v, want _, error code: %d", ctx, err, codes.DeadlineExceeded)
	}
	ctx, _ = context.WithTimeout(context.Background(), time.Second)
	if _, err := cc.WaitForStateChange(ctx, grpc.Ready); err != nil {
		t.Fatalf("cc.WaitForStateChange(_, %s) = _, %v, want _, <nil>", grpc.Ready, err)
	}
	if state, err := cc.State(); err != nil || (state != grpc.Connecting && state != grpc.TransientFailure) {
		t.Fatalf("cc.State() = %s, %v, want %s or %s, <nil>", state, err, grpc.Connecting, grpc.TransientFailure)
	}
	cc.Close()
}