// DoPerRPCCreds performs a unary RPC with per RPC OAUTH2 token. func DoPerRPCCreds(tc testpb.TestServiceClient, serviceAccountKeyFile, oauthScope string) { jsonKey := getServiceAccountJSONKey(serviceAccountKeyFile) pl := clientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(largeRespSize)), Payload: pl, FillUsername: proto.Bool(true), FillOauthScope: proto.Bool(true), } token := GetToken(serviceAccountKeyFile, oauthScope) kv := map[string]string{"authorization": token.TokenType + " " + token.AccessToken} ctx := metadata.NewContext(context.Background(), metadata.MD{"authorization": []string{kv["authorization"]}}) reply, err := tc.UnaryCall(ctx, req) if err != nil { grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) } user := reply.GetUsername() scope := reply.GetOauthScope() if !strings.Contains(string(jsonKey), user) { grpclog.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey) } if !strings.Contains(oauthScope, scope) { grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, oauthScope) } grpclog.Println("PerRPCCreds done") }
func testMetadataStreamingRPC(t *testing.T, e env) { s, addr := serverSetUp(t, nil, math.MaxUint32, nil, nil, e) cc := clientSetUp(t, addr, nil, nil, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) ctx := metadata.NewContext(context.Background(), testMetadata) stream, err := tc.FullDuplexCall(ctx) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err) } go func() { headerMD, err := stream.Header() if e.security == "tls" { delete(headerMD, "transport_security_type") } if err != nil || !reflect.DeepEqual(testMetadata, headerMD) { t.Errorf("#1 %v.Header() = %v, %v, want %v, <nil>", stream, headerMD, err, testMetadata) } // test the cached value. headerMD, err = stream.Header() if err != nil || !reflect.DeepEqual(testMetadata, headerMD) { t.Errorf("#2 %v.Header() = %v, %v, want %v, <nil>", stream, headerMD, err, testMetadata) } var index int for index < len(reqSizes) { respParam := []*testpb.ResponseParameters{ { Size: proto.Int32(int32(respSizes[index])), }, } payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index])) if err != nil { t.Fatal(err) } req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseParameters: respParam, Payload: payload, } if err := stream.Send(req); err != nil { t.Errorf("%v.Send(%v) = %v, want <nil>", stream, req, err) return } index++ } // Tell the server we're done sending args. stream.CloseSend() }() for { if _, err := stream.Recv(); err != nil { break } } trailerMD := stream.Trailer() if !reflect.DeepEqual(testMetadata, trailerMD) { t.Fatalf("%v.Trailer() = %v, want %v", stream, trailerMD, testMetadata) } }
func testMetadataUnaryRPC(t *testing.T, e env) { s, addr := serverSetUp(t, nil, math.MaxUint32, nil, nil, e) cc := clientSetUp(t, addr, nil, nil, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) argSize := 2718 respSize := 314 payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)) if err != nil { t.Fatal(err) } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(respSize)), Payload: payload, } var header, trailer metadata.MD ctx := metadata.NewContext(context.Background(), testMetadata) if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.Trailer(&trailer)); err != nil { t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <nil>", ctx, err) } if !reflect.DeepEqual(testMetadata, header) { t.Fatalf("Received header metadata %v, want %v", header, testMetadata) } if !reflect.DeepEqual(testMetadata, trailer) { t.Fatalf("Received trailer metadata %v, want %v", trailer, testMetadata) } }
func testFailedEmptyUnary(t *testing.T, e env) { s, cc := setUp(math.MaxUint32, e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) ctx := metadata.NewContext(context.Background(), testMetadata) if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != grpc.Errorf(codes.DataLoss, "got extra metadata") { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %v", err, grpc.Errorf(codes.DataLoss, "got extra metadata")) } }
// DoCancelAfterBegin cancels the RPC after metadata has been sent but before payloads are sent. func DoCancelAfterBegin(tc testpb.TestServiceClient) { ctx, cancel := context.WithCancel(metadata.NewContext(context.Background(), testMetadata)) stream, err := tc.StreamingInputCall(ctx) if err != nil { grpclog.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err) } cancel() _, err = stream.CloseAndRecv() if grpc.Code(err) != codes.Canceled { grpclog.Fatalf("%v.CloseAndRecv() got error code %d, want %d", stream, grpc.Code(err), codes.Canceled) } grpclog.Println("CancelAfterBegin done") }
func testFailedServerStreaming(t *testing.T, e env) { s, cc := setUp(math.MaxUint32, e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) respParam := make([]*testpb.ResponseParameters, len(respSizes)) for i, s := range respSizes { respParam[i] = &testpb.ResponseParameters{ Size: proto.Int32(int32(s)), } } req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseParameters: respParam, } ctx := metadata.NewContext(context.Background(), testMetadata) stream, err := tc.StreamingOutputCall(ctx, req) if err != nil { t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want <nil>", tc, err) } if _, err := stream.Recv(); err != grpc.Errorf(codes.DataLoss, "got extra metadata") { t.Fatalf("%v.Recv() = _, %v, want _, %v", stream, err, grpc.Errorf(codes.DataLoss, "got extra metadata")) } }
func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) { // With this transport type there will be exactly 1 stream: this HTTP request. var ctx context.Context var cancel context.CancelFunc if ht.timeoutSet { ctx, cancel = context.WithTimeout(context.Background(), ht.timeout) } else { ctx, cancel = context.WithCancel(context.Background()) } // requestOver is closed when either the request's context is done // or the status has been written via WriteStatus. requestOver := make(chan struct{}) // clientGone receives a single value if peer is gone, either // because the underlying connection is dead or because the // peer sends an http2 RST_STREAM. clientGone := ht.rw.(http.CloseNotifier).CloseNotify() go func() { select { case <-requestOver: return case <-ht.closedCh: case <-clientGone: } cancel() }() req := ht.req s := &Stream{ id: 0, // irrelevant windowHandler: func(int) {}, // nothing cancel: cancel, buf: newRecvBuffer(), st: ht, method: req.URL.Path, recvCompress: req.Header.Get("grpc-encoding"), } pr := &peer.Peer{ Addr: ht.RemoteAddr(), } if req.TLS != nil { pr.AuthInfo = credentials.TLSInfo{*req.TLS} } ctx = metadata.NewContext(ctx, ht.headerMD) ctx = peer.NewContext(ctx, pr) s.ctx = newContextWithStream(ctx, s) s.dec = &recvBufferReader{ctx: s.ctx, recv: s.buf} // readerDone is closed when the Body.Read-ing goroutine exits. readerDone := make(chan struct{}) go func() { defer close(readerDone) for { buf := make([]byte, 1024) // TODO: minimize garbage, optimize recvBuffer code/ownership n, err := req.Body.Read(buf) if n > 0 { s.buf.put(&recvMsg{data: buf[:n]}) } if err != nil { s.buf.put(&recvMsg{err: mapRecvMsgError(err)}) return } } }() // startStream is provided by the *grpc.Server's serveStreams. // It starts a goroutine serving s and exits immediately. // The goroutine that is started is the one that then calls // into ht, calling WriteHeader, Write, WriteStatus, Close, etc. startStream(s) ht.runStream() close(requestOver) // Wait for reading goroutine to finish. req.Body.Close() <-readerDone }
// operateHeader takes action on the decoded headers. It returns the current // stream if there are remaining headers on the wire (in the following // Continuation frame). func (t *http2Server) operateHeaders(hDec *hpackDecoder, s *Stream, frame headerFrame, endStream bool, handle func(*Stream)) (pendingStream *Stream) { defer func() { if pendingStream == nil { hDec.state = decodeState{} } }() endHeaders, err := hDec.decodeServerHTTP2Headers(frame) if s == nil { // s has been closed. return nil } if err != nil { grpclog.Printf("transport: http2Server.operateHeader found %v", err) if se, ok := err.(StreamError); ok { t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]}) } return nil } if endStream { // s is just created by the caller. No lock needed. s.state = streamReadDone } if !endHeaders { return s } s.recvCompress = hDec.state.encoding if hDec.state.timeoutSet { s.ctx, s.cancel = context.WithTimeout(context.TODO(), hDec.state.timeout) } else { s.ctx, s.cancel = context.WithCancel(context.TODO()) } pr := &peer.Peer{ Addr: t.conn.RemoteAddr(), } // Attach Auth info if there is any. if t.authInfo != nil { pr.AuthInfo = t.authInfo } s.ctx = peer.NewContext(s.ctx, pr) // Cache the current stream to the context so that the server application // can find out. Required when the server wants to send some metadata // back to the client (unary call only). s.ctx = newContextWithStream(s.ctx, s) // Attach the received metadata to the context. if len(hDec.state.mdata) > 0 { s.ctx = metadata.NewContext(s.ctx, hDec.state.mdata) } s.dec = &recvBufferReader{ ctx: s.ctx, recv: s.buf, } s.recvCompress = hDec.state.encoding s.method = hDec.state.method t.mu.Lock() if t.state != reachable { t.mu.Unlock() return nil } if uint32(len(t.activeStreams)) >= t.maxStreams { t.mu.Unlock() t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream}) return nil } s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota)) t.activeStreams[s.id] = s t.mu.Unlock() s.windowHandler = func(n int) { t.updateWindow(s, uint32(n)) } handle(s) return nil }
// operateHeader takes action on the decoded headers. func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) { buf := newRecvBuffer() fc := &inFlow{ limit: initialWindowSize, conn: t.fc, } s := &Stream{ id: frame.Header().StreamID, st: t, buf: buf, fc: fc, } var state decodeState for _, hf := range frame.Fields { state.processHeaderField(hf) } if err := state.err; err != nil { if se, ok := err.(StreamError); ok { t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]}) } return } if frame.StreamEnded() { // s is just created by the caller. No lock needed. s.state = streamReadDone } s.recvCompress = state.encoding if state.timeoutSet { s.ctx, s.cancel = context.WithTimeout(context.TODO(), state.timeout) } else { s.ctx, s.cancel = context.WithCancel(context.TODO()) } pr := &peer.Peer{ Addr: t.conn.RemoteAddr(), } // Attach Auth info if there is any. if t.authInfo != nil { pr.AuthInfo = t.authInfo } s.ctx = peer.NewContext(s.ctx, pr) // Cache the current stream to the context so that the server application // can find out. Required when the server wants to send some metadata // back to the client (unary call only). s.ctx = newContextWithStream(s.ctx, s) // Attach the received metadata to the context. if len(state.mdata) > 0 { s.ctx = metadata.NewContext(s.ctx, state.mdata) } s.dec = &recvBufferReader{ ctx: s.ctx, recv: s.buf, } s.recvCompress = state.encoding s.method = state.method t.mu.Lock() if t.state != reachable { t.mu.Unlock() return } if uint32(len(t.activeStreams)) >= t.maxStreams { t.mu.Unlock() t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream}) return } s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota)) t.activeStreams[s.id] = s t.mu.Unlock() s.windowHandler = func(n int) { t.updateWindow(s, uint32(n)) } handle(s) }