// Serve accepts incoming connections on the listener lis, creating a new // ServerTransport and service goroutine for each. The service goroutines // read gRPC request and then call the registered handlers to reply to them. // Service returns when lis.Accept fails. func (s *Server) Serve(lis net.Listener) error { s.mu.Lock() if s.lis == nil { s.mu.Unlock() return ErrServerStopped } s.lis[lis] = true s.mu.Unlock() defer func() { lis.Close() s.mu.Lock() delete(s.lis, lis) s.mu.Unlock() }() for { c, err := lis.Accept() if err != nil { return err } var authInfo credentials.AuthInfo if creds, ok := s.opts.creds.(credentials.TransportAuthenticator); ok { c, authInfo, err = creds.ServerHandshake(c) if err != nil { grpclog.Println("grpc: Server.Serve failed to complete security handshake.") continue } } s.mu.Lock() if s.conns == nil { s.mu.Unlock() c.Close() return nil } st, err := transport.NewServerTransport("http2", c, s.opts.maxConcurrentStreams, authInfo) if err != nil { s.mu.Unlock() c.Close() grpclog.Println("grpc: Server.Serve failed to create ServerTransport: ", err) continue } s.conns[st] = true s.mu.Unlock() go func() { st.HandleStreams(func(stream *transport.Stream) { s.handleStream(st, stream) }) s.mu.Lock() delete(s.conns, st) s.mu.Unlock() }() } }
func doClientStreaming(tc testpb.TestServiceClient) { stream, err := tc.StreamingInputCall(context.Background()) if err != nil { grpclog.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err) } var sum int for _, s := range reqSizes { pl := newPayload(testpb.PayloadType_COMPRESSABLE, s) req := &testpb.StreamingInputCallRequest{ Payload: pl, } if err := stream.Send(req); err != nil { grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err) } sum += s grpclog.Printf("Sent a request of size %d, aggregated size %d", s, sum) } reply, err := stream.CloseAndRecv() if err != nil { grpclog.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) } if reply.GetAggregatedPayloadSize() != int32(sum) { grpclog.Fatalf("%v.CloseAndRecv().GetAggregatePayloadSize() = %v; want %v", stream, reply.GetAggregatedPayloadSize(), sum) } grpclog.Println("ClientStreaming done") }
func doPerRPCCreds(tc testpb.TestServiceClient) { jsonKey := getServiceAccountJSONKey() pl := newPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(largeRespSize)), Payload: pl, FillUsername: proto.Bool(true), FillOauthScope: proto.Bool(true), } token := getToken() kv := map[string]string{"authorization": token.TokenType + " " + token.AccessToken} ctx := metadata.NewContext(context.Background(), metadata.MD{"authorization": []string{kv["authorization"]}}) reply, err := tc.UnaryCall(ctx, req) if err != nil { grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) } user := reply.GetUsername() scope := reply.GetOauthScope() if !strings.Contains(string(jsonKey), user) { grpclog.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey) } if !strings.Contains(*oauthScope, scope) { grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, *oauthScope) } grpclog.Println("PerRPCCreds done") }
func doCancelAfterFirstResponse(tc testpb.TestServiceClient) { ctx, cancel := context.WithCancel(context.Background()) stream, err := tc.FullDuplexCall(ctx) if err != nil { grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) } respParam := []*testpb.ResponseParameters{ { Size: proto.Int32(31415), }, } pl := newPayload(testpb.PayloadType_COMPRESSABLE, 27182) req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseParameters: respParam, Payload: pl, } if err := stream.Send(req); err != nil { grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err) } if _, err := stream.Recv(); err != nil { grpclog.Fatalf("%v.Recv() = %v", stream, err) } cancel() if _, err := stream.Recv(); grpc.Code(err) != codes.Canceled { grpclog.Fatalf("%v compleled with error code %d, want %d", stream, grpc.Code(err), codes.Canceled) } grpclog.Println("CancelAfterFirstResponse done") }
func main() { flag.Parse() go func() { lis, err := net.Listen("tcp", ":0") if err != nil { grpclog.Fatalf("Failed to listen: %v", err) } grpclog.Println("Server profiling address: ", lis.Addr().String()) if err := http.Serve(lis, nil); err != nil { grpclog.Fatalf("Failed to serve: %v", err) } }() addr, stopper := benchmark.StartServer() grpclog.Println("Server Address: ", addr) <-time.After(time.Duration(*duration) * time.Second) stopper() }
// printFeature gets the feature for the given point. func printFeature(client pb.RouteGuideClient, point *pb.Point) { grpclog.Printf("Getting feature for point (%d, %d)", point.Latitude, point.Longitude) feature, err := client.GetFeature(context.Background(), point) if err != nil { grpclog.Fatalf("%v.GetFeatures(_) = _, %v: ", client, err) } grpclog.Println(feature) }
func doEmptyUnaryCall(tc testpb.TestServiceClient) { reply, err := tc.EmptyCall(context.Background(), &testpb.Empty{}) if err != nil { grpclog.Fatal("/TestService/EmptyCall RPC failed: ", err) } if !proto.Equal(&testpb.Empty{}, reply) { grpclog.Fatalf("/TestService/EmptyCall receives %v, want %v", reply, testpb.Empty{}) } grpclog.Println("EmptyUnaryCall done") }
func doCancelAfterBegin(tc testpb.TestServiceClient) { ctx, cancel := context.WithCancel(metadata.NewContext(context.Background(), testMetadata)) stream, err := tc.StreamingInputCall(ctx) if err != nil { grpclog.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err) } cancel() _, err = stream.CloseAndRecv() if grpc.Code(err) != codes.Canceled { grpclog.Fatalf("%v.CloseAndRecv() got error code %d, want %d", stream, grpc.Code(err), codes.Canceled) } grpclog.Println("CancelAfterBegin done") }
func doEmptyStream(tc testpb.TestServiceClient) { stream, err := tc.FullDuplexCall(context.Background()) if err != nil { grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) } if err := stream.CloseSend(); err != nil { grpclog.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil) } if _, err := stream.Recv(); err != io.EOF { grpclog.Fatalf("%v failed to complete the empty stream test: %v", stream, err) } grpclog.Println("Emptystream done") }
func doTimeoutOnSleepingServer(tc testpb.TestServiceClient) { ctx, _ := context.WithTimeout(context.Background(), 1*time.Millisecond) stream, err := tc.FullDuplexCall(ctx) if err != nil { if grpc.Code(err) == codes.DeadlineExceeded { grpclog.Println("TimeoutOnSleepingServer done") return } grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) } pl := newPayload(testpb.PayloadType_COMPRESSABLE, 27182) req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), Payload: pl, } if err := stream.Send(req); err != nil { grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err) } if _, err := stream.Recv(); grpc.Code(err) != codes.DeadlineExceeded { grpclog.Fatalf("%v.Recv() = _, %v, want error code %d", stream, err, codes.DeadlineExceeded) } grpclog.Println("TimeoutOnSleepingServer done") }
func main() { flag.Parse() go func() { lis, err := net.Listen("tcp", ":0") if err != nil { grpclog.Fatalf("Failed to listen: %v", err) } grpclog.Println("Client profiling address: ", lis.Addr().String()) if err := http.Serve(lis, nil); err != nil { grpclog.Fatalf("Failed to serve: %v", err) } }() closeLoop() }
func closeLoopStream() { s, conn, tc := buildConnection() stream, err := tc.StreamingCall(context.Background()) if err != nil { grpclog.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) } for i := 0; i < 100; i++ { streamCaller(tc, stream) } ch := make(chan int, *maxConcurrentRPCs*4) var ( mu sync.Mutex wg sync.WaitGroup ) wg.Add(*maxConcurrentRPCs) // Distribute RPCs over maxConcurrentCalls workers. for i := 0; i < *maxConcurrentRPCs; i++ { go func() { for _ = range ch { start := time.Now() streamCaller(tc, stream) elapse := time.Since(start) mu.Lock() s.Add(elapse) mu.Unlock() } wg.Done() }() } // Stop the client when time is up. done := make(chan struct{}) go func() { <-time.After(time.Duration(*duration) * time.Second) close(done) }() ok := true for ok { select { case ch <- 0: case <-done: ok = false } } close(ch) wg.Wait() conn.Close() grpclog.Println(s.String()) }
func closeLoop() { s := stats.NewStats(256) conn := benchmark.NewClientConn(*server) tc := testpb.NewTestServiceClient(conn) // Warm up connection. for i := 0; i < 100; i++ { caller(tc) } ch := make(chan int, *maxConcurrentRPCs*4) var ( mu sync.Mutex wg sync.WaitGroup ) wg.Add(*maxConcurrentRPCs) // Distribute RPCs over maxConcurrentCalls workers. for i := 0; i < *maxConcurrentRPCs; i++ { go func() { for _ = range ch { start := time.Now() caller(tc) elapse := time.Since(start) mu.Lock() s.Add(elapse) mu.Unlock() } wg.Done() }() } // Stop the client when time is up. done := make(chan struct{}) go func() { <-time.After(time.Duration(*duration) * time.Second) close(done) }() ok := true for ok { select { case ch <- 0: case <-done: ok = false } } close(ch) wg.Wait() conn.Close() grpclog.Println(s.String()) }
// printFeatures lists all the features within the given bounding Rectangle. func printFeatures(client pb.RouteGuideClient, rect *pb.Rectangle) { grpclog.Printf("Looking for features within %v", rect) stream, err := client.ListFeatures(context.Background(), rect) if err != nil { grpclog.Fatalf("%v.ListFeatures(_) = _, %v", client, err) } for { feature, err := stream.Recv() if err == io.EOF { break } if err != nil { grpclog.Fatalf("%v.ListFeatures(_) = _, %v", client, err) } grpclog.Println(feature) } }
func closeLoopUnary() { s, conn, tc := buildConnection() for i := 0; i < 100; i++ { unaryCaller(tc) } ch := make(chan int, *maxConcurrentRPCs*4) var ( mu sync.Mutex wg sync.WaitGroup ) wg.Add(*maxConcurrentRPCs) for i := 0; i < *maxConcurrentRPCs; i++ { go func() { for _ = range ch { start := time.Now() unaryCaller(tc) elapse := time.Since(start) mu.Lock() s.Add(elapse) mu.Unlock() } wg.Done() }() } // Stop the client when time is up. done := make(chan struct{}) go func() { <-time.After(time.Duration(*duration) * time.Second) close(done) }() ok := true for ok { select { case ch <- 0: case <-done: ok = false } } close(ch) wg.Wait() conn.Close() grpclog.Println(s.String()) }
func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { s, ok := t.getStream(f) if !ok { return } s.mu.Lock() if s.state == streamDone { s.mu.Unlock() return } s.state = streamDone s.statusCode, ok = http2RSTErrConvTab[http2.ErrCode(f.ErrCode)] if !ok { grpclog.Println("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error ", f.ErrCode) } s.mu.Unlock() s.write(recvMsg{err: io.EOF}) }
func doLargeUnaryCall(tc testpb.TestServiceClient) { pl := newPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(largeRespSize)), Payload: pl, } reply, err := tc.UnaryCall(context.Background(), req) if err != nil { grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) } t := reply.GetPayload().GetType() s := len(reply.GetPayload().GetBody()) if t != testpb.PayloadType_COMPRESSABLE || s != largeRespSize { grpclog.Fatalf("Got the reply with type %d len %d; want %d, %d", t, s, testpb.PayloadType_COMPRESSABLE, largeRespSize) } grpclog.Println("LargeUnaryCall done") }
func doJWTTokenCreds(tc testpb.TestServiceClient) { pl := newPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(largeRespSize)), Payload: pl, FillUsername: proto.Bool(true), } reply, err := tc.UnaryCall(context.Background(), req) if err != nil { grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) } jsonKey := getServiceAccountJSONKey() user := reply.GetUsername() if !strings.Contains(string(jsonKey), user) { grpclog.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey) } grpclog.Println("JWTtokenCreds done") }
func doServerStreaming(tc testpb.TestServiceClient) { respParam := make([]*testpb.ResponseParameters, len(respSizes)) for i, s := range respSizes { respParam[i] = &testpb.ResponseParameters{ Size: proto.Int32(int32(s)), } } req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseParameters: respParam, } stream, err := tc.StreamingOutputCall(context.Background(), req) if err != nil { grpclog.Fatalf("%v.StreamingOutputCall(_) = _, %v", tc, err) } var rpcStatus error var respCnt int var index int for { reply, err := stream.Recv() if err != nil { rpcStatus = err break } t := reply.GetPayload().GetType() if t != testpb.PayloadType_COMPRESSABLE { grpclog.Fatalf("Got the reply of type %d, want %d", t, testpb.PayloadType_COMPRESSABLE) } size := len(reply.GetPayload().GetBody()) if size != int(respSizes[index]) { grpclog.Fatalf("Got reply body of length %d, want %d", size, respSizes[index]) } index++ respCnt++ } if rpcStatus != io.EOF { grpclog.Fatalf("Failed to finish the server streaming rpc: %v", err) } if respCnt != len(respSizes) { grpclog.Fatalf("Got %d reply, want %d", len(respSizes), respCnt) } grpclog.Println("ServerStreaming done") }
func doPingPong(tc testpb.TestServiceClient) { stream, err := tc.FullDuplexCall(context.Background()) if err != nil { grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) } var index int for index < len(reqSizes) { respParam := []*testpb.ResponseParameters{ { Size: proto.Int32(int32(respSizes[index])), }, } pl := newPayload(testpb.PayloadType_COMPRESSABLE, reqSizes[index]) req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseParameters: respParam, Payload: pl, } if err := stream.Send(req); err != nil { grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err) } reply, err := stream.Recv() if err != nil { grpclog.Fatalf("%v.Recv() = %v", stream, err) } t := reply.GetPayload().GetType() if t != testpb.PayloadType_COMPRESSABLE { grpclog.Fatalf("Got the reply of type %d, want %d", t, testpb.PayloadType_COMPRESSABLE) } size := len(reply.GetPayload().GetBody()) if size != int(respSizes[index]) { grpclog.Fatalf("Got reply body of length %d, want %d", size, respSizes[index]) } index++ } if err := stream.CloseSend(); err != nil { grpclog.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil) } if _, err := stream.Recv(); err != io.EOF { grpclog.Fatalf("%v failed to complele the ping pong test: %v", stream, err) } grpclog.Println("Pingpong done") }
func main() { flag.Parse() grpc.EnableTracing = *trace go func() { lis, err := net.Listen("tcp", ":0") if err != nil { grpclog.Fatalf("Failed to listen: %v", err) } grpclog.Println("Client profiling address: ", lis.Addr().String()) if err := http.Serve(lis, nil); err != nil { grpclog.Fatalf("Failed to serve: %v", err) } }() switch *rpcType { case 0: closeLoopUnary() case 1: closeLoopStream() } }
func doComputeEngineCreds(tc testpb.TestServiceClient) { pl := newPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(largeRespSize)), Payload: pl, FillUsername: proto.Bool(true), FillOauthScope: proto.Bool(true), } reply, err := tc.UnaryCall(context.Background(), req) if err != nil { grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) } user := reply.GetUsername() scope := reply.GetOauthScope() if user != *defaultServiceAccount { grpclog.Fatalf("Got user name %q, want %q.", user, *defaultServiceAccount) } if !strings.Contains(*oauthScope, scope) { grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, *oauthScope) } grpclog.Println("ComputeEngineCreds done") }
// HandleStreams receives incoming streams using the given handler. This is // typically run in a separate goroutine. func (t *http2Server) HandleStreams(handle func(*Stream)) { // Check the validity of client preface. preface := make([]byte, len(clientPreface)) if _, err := io.ReadFull(t.conn, preface); err != nil { grpclog.Printf("transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) t.Close() return } if !bytes.Equal(preface, clientPreface) { grpclog.Printf("transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) t.Close() return } frame, err := t.framer.readFrame() if err != nil { grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err) t.Close() return } sf, ok := frame.(*http2.SettingsFrame) if !ok { grpclog.Printf("transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) t.Close() return } t.handleSettings(sf) hDec := newHPACKDecoder() var curStream *Stream var wg sync.WaitGroup defer wg.Wait() for { frame, err := t.framer.readFrame() if err != nil { t.Close() return } switch frame := frame.(type) { case *http2.HeadersFrame: id := frame.Header().StreamID if id%2 != 1 || id <= t.maxStreamID { // illegal gRPC stream id. grpclog.Println("transport: http2Server.HandleStreams received an illegal stream id: ", id) t.Close() break } t.maxStreamID = id buf := newRecvBuffer() fc := &inFlow{ limit: initialWindowSize, conn: t.fc, } curStream = &Stream{ id: frame.Header().StreamID, st: t, buf: buf, fc: fc, } endStream := frame.Header().Flags.Has(http2.FlagHeadersEndStream) curStream = t.operateHeaders(hDec, curStream, frame, endStream, handle, &wg) case *http2.ContinuationFrame: curStream = t.operateHeaders(hDec, curStream, frame, false, handle, &wg) case *http2.DataFrame: t.handleData(frame) case *http2.RSTStreamFrame: t.handleRSTStream(frame) case *http2.SettingsFrame: t.handleSettings(frame) case *http2.PingFrame: t.handlePing(frame) case *http2.WindowUpdateFrame: t.handleWindowUpdate(frame) case *http2.GoAwayFrame: break default: grpclog.Printf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) } } }