func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { fc := &inFlow{ limit: initialWindowSize, conn: t.fc, } // TODO(zhaoq): Handle uint32 overflow of Stream.id. s := &Stream{ id: t.nextID, method: callHdr.Method, buf: newRecvBuffer(), fc: fc, sendQuotaPool: newQuotaPool(int(t.streamSendQuota)), headerChan: make(chan struct{}), } t.nextID += 2 s.windowHandler = func(n int) { t.updateWindow(s, uint32(n)) } // Make a stream be able to cancel the pending operations by itself. s.ctx, s.cancel = context.WithCancel(ctx) s.dec = &recvBufferReader{ ctx: s.ctx, recv: s.buf, } return s }
func doCancelAfterFirstResponse(tc testpb.TestServiceClient) { ctx, cancel := context.WithCancel(context.Background()) stream, err := tc.FullDuplexCall(ctx) if err != nil { grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) } respParam := []*testpb.ResponseParameters{ { Size: proto.Int32(31415), }, } pl := newPayload(testpb.PayloadType_COMPRESSABLE, 27182) req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseParameters: respParam, Payload: pl, } if err := stream.Send(req); err != nil { grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err) } if _, err := stream.Recv(); err != nil { grpclog.Fatalf("%v.Recv() = %v", stream, err) } cancel() if _, err := stream.Recv(); grpc.Code(err) != codes.Canceled { grpclog.Fatalf("%v compleled with error code %d, want %d", stream, grpc.Code(err), codes.Canceled) } grpclog.Println("CancelAfterFirstResponse done") }
func doCancelAfterBegin(tc testpb.TestServiceClient) { ctx, cancel := context.WithCancel(metadata.NewContext(context.Background(), testMetadata)) stream, err := tc.StreamingInputCall(ctx) if err != nil { grpclog.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err) } cancel() _, err = stream.CloseAndRecv() if grpc.Code(err) != codes.Canceled { grpclog.Fatalf("%v.CloseAndRecv() got error code %d, want %d", stream, grpc.Code(err), codes.Canceled) } grpclog.Println("CancelAfterBegin done") }
func testCancelNoIO(t *testing.T, e env) { // Only allows 1 live stream per server transport. s, cc := setUp(t, nil, 1, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) ctx, cancel := context.WithCancel(context.Background()) _, err := tc.StreamingInputCall(ctx) if err != nil { t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, <nil>", tc, err) } // Loop until receiving the new max stream setting from the server. for { ctx, _ := context.WithTimeout(context.Background(), time.Second) _, err := tc.StreamingInputCall(ctx) if err == nil { time.Sleep(time.Second) continue } if grpc.Code(err) == codes.DeadlineExceeded { break } t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, %d", tc, err, codes.DeadlineExceeded) } // If there are any RPCs slipping before the client receives the max streams setting, // let them be expired. time.Sleep(2 * time.Second) ch := make(chan struct{}) go func() { defer close(ch) // This should be blocked until the 1st is canceled. ctx, _ := context.WithTimeout(context.Background(), 2*time.Second) if _, err := tc.StreamingInputCall(ctx); err != nil { t.Errorf("%v.StreamingInputCall(_) = _, %v, want _, <nil>", tc, err) } }() cancel() <-ch }
func testCancel(t *testing.T, e env) { s, cc := setUp(t, nil, math.MaxUint32, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) argSize := 2718 respSize := 314 payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)) if err != nil { t.Fatal(err) } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(respSize)), Payload: payload, } ctx, cancel := context.WithCancel(context.Background()) time.AfterFunc(1*time.Millisecond, cancel) reply, err := tc.UnaryCall(ctx, req) if grpc.Code(err) != codes.Canceled { t.Fatalf(`TestService/UnaryCall(_, _) = %v, %v; want <nil>, error code: %d`, reply, err, codes.Canceled) } }
// operateHeader takes action on the decoded headers. It returns the current // stream if there are remaining headers on the wire (in the following // Continuation frame). func (t *http2Server) operateHeaders(hDec *hpackDecoder, s *Stream, frame headerFrame, endStream bool, handle func(*Stream)) (pendingStream *Stream) { defer func() { if pendingStream == nil { hDec.state = decodeState{} } }() endHeaders, err := hDec.decodeServerHTTP2Headers(frame) if s == nil { // s has been closed. return nil } if err != nil { grpclog.Printf("transport: http2Server.operateHeader found %v", err) if se, ok := err.(StreamError); ok { t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]}) } return nil } if endStream { // s is just created by the caller. No lock needed. s.state = streamReadDone } if !endHeaders { return s } if hDec.state.timeoutSet { s.ctx, s.cancel = context.WithTimeout(context.TODO(), hDec.state.timeout) } else { s.ctx, s.cancel = context.WithCancel(context.TODO()) } // Attach Auth info if there is any. if t.authInfo != nil { s.ctx = credentials.NewContext(s.ctx, t.authInfo) } // Cache the current stream to the context so that the server application // can find out. Required when the server wants to send some metadata // back to the client (unary call only). s.ctx = newContextWithStream(s.ctx, s) // Attach the received metadata to the context. if len(hDec.state.mdata) > 0 { s.ctx = metadata.NewContext(s.ctx, hDec.state.mdata) } s.dec = &recvBufferReader{ ctx: s.ctx, recv: s.buf, } s.method = hDec.state.method t.mu.Lock() if t.state != reachable { t.mu.Unlock() return nil } if uint32(len(t.activeStreams)) >= t.maxStreams { t.mu.Unlock() t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream}) return nil } s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota)) t.activeStreams[s.id] = s t.mu.Unlock() s.windowHandler = func(n int) { t.updateWindow(s, uint32(n)) } handle(s) return nil }