// handleRawConn is run in its own goroutine and handles a just-accepted // connection that has not had any I/O performed on it yet. func (s *Server) handleRawConn(rawConn net.Conn) { conn, authInfo, err := s.useTransportAuthenticator(rawConn) if err != nil { s.mu.Lock() s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) s.mu.Unlock() grpclog.Printf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) rawConn.Close() return } s.mu.Lock() if s.conns == nil { s.mu.Unlock() conn.Close() return } s.mu.Unlock() if s.opts.useHandlerImpl { s.serveUsingHandler(conn) } else { s.serveNewHTTP2Transport(conn, authInfo) } }
// DefaultHTTPError is the default implementation of HTTPError. // If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode. // If otherwise, it replies with http.StatusInternalServerError. // // The response body returned by this function is a JSON object, // which contains a member whose key is "error" and whose value is err.Error(). func DefaultHTTPError(ctx context.Context, w http.ResponseWriter, _ *http.Request, err error) { const fallback = `{"error": "failed to marshal error message"}` w.Header().Del("Trailer") w.Header().Set("Content-Type", "application/json") body := errorBody{ Error: grpc.ErrorDesc(err), Code: int(grpc.Code(err)), } buf, merr := json.Marshal(body) if merr != nil { grpclog.Printf("Failed to marshal error message %q: %v", body, merr) w.WriteHeader(http.StatusInternalServerError) if _, err := io.WriteString(w, fallback); err != nil { grpclog.Printf("Failed to write response: %v", err) } return } md, ok := ServerMetadataFromContext(ctx) if !ok { grpclog.Printf("Failed to extract ServerMetadata from context") } handleForwardResponseServerMetadata(w, md) st := HTTPStatusFromCode(grpc.Code(err)) w.WriteHeader(st) if _, err := w.Write(buf); err != nil { grpclog.Printf("Failed to write response: %v", err) } handleForwardResponseTrailer(w, md) }
func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { sm := stream.Method() if sm != "" && sm[0] == '/' { sm = sm[1:] } pos := strings.LastIndex(sm, "/") if pos == -1 { if err := t.WriteStatus(stream, codes.InvalidArgument, fmt.Sprintf("malformed method name: %q", stream.Method())); err != nil { grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) } return } service := sm[:pos] method := sm[pos+1:] srv, ok := s.m[service] if !ok { if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown service %v", service)); err != nil { grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) } return } // Unary RPC or Streaming RPC? if md, ok := srv.md[method]; ok { s.processUnaryRPC(t, stream, srv, md) return } if sd, ok := srv.sd[method]; ok { s.processStreamingRPC(t, stream, srv, sd) return } if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown method %v", method)); err != nil { grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) } }
// add_songs add one or more songs to DB. func add_songs(client pb.SongSrvClient, SongsObj []pb.SongObj, client_id int) { grpclog.Printf("CLIENT-%v: Call to Adds Function with list of %v SongObj\n", client_id, len(SongsObj)) songs := uniq_id(SongsObj, client_id) stream, err := client.Adds(context.Background()) if err != nil { grpclog.Fatalf("%v.add_songs(_) = _, %v", client, err) } waitc := make(chan struct{}) go func() { for { in, err := stream.Recv() if err == io.EOF { // read done. close(waitc) return } if err != nil { grpclog.Fatalf("CLIENT-%v: Failed to add song Id %v: %v", client_id, in.Id, err) } grpclog.Printf("CLIENT-%v: Successful add song id %v\n", client_id, in.Id) } }() for _, song := range songs { if err := stream.Send(&song); err != nil { grpclog.Fatalf("CLIENT-%v: Failed to send a song id %v : %v", client_id, song.Id, err) } } stream.CloseSend() <-waitc grpclog.Printf("\n") }
// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client. func ForwardResponseMessage(ctx context.Context, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { md, ok := ServerMetadataFromContext(ctx) if !ok { grpclog.Printf("Failed to extract ServerMetadata from context") } handleForwardResponseServerMetadata(w, md) handleForwardResponseTrailerHeader(w, md) w.Header().Set("Content-Type", marshaler.ContentType()) if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { HTTPError(ctx, marshaler, w, req, err) return } buf, err := marshaler.Marshal(resp) if err != nil { grpclog.Printf("Marshal error: %v", err) HTTPError(ctx, marshaler, w, req, err) return } if _, err = w.Write(buf); err != nil { grpclog.Printf("Failed to write response: %v", err) } handleForwardResponseTrailer(w, md) }
func printMetrics(client metricspb.MetricsServiceClient, totalOnly bool) { stream, err := client.GetAllGauges(context.Background(), &metricspb.EmptyMessage{}) if err != nil { grpclog.Fatalf("failed to call GetAllGuages: %v", err) } var ( overallQPS int64 rpcStatus error ) for { gaugeResponse, err := stream.Recv() if err != nil { rpcStatus = err break } if _, ok := gaugeResponse.GetValue().(*metricspb.GaugeResponse_LongValue); !ok { panic(fmt.Sprintf("gauge %s is not a long value", gaugeResponse.Name)) } v := gaugeResponse.GetLongValue() if !totalOnly { grpclog.Printf("%s: %d", gaugeResponse.Name, v) } overallQPS += v } if rpcStatus != io.EOF { grpclog.Fatalf("failed to finish server streaming: %v", rpcStatus) } grpclog.Printf("overall qps: %d", overallQPS) }
// runRecordRoute sends a sequence of points to server and expects to get a RouteSummary from server. func runRecordRoute(client pb.RouteGuideClient) { // Create a random number of random points r := rand.New(rand.NewSource(time.Now().UnixNano())) pointCount := int(r.Int31n(100)) + 2 // Traverse at least two points var points []*pb.Point for i := 0; i < pointCount; i++ { points = append(points, randomPoint(r)) } grpclog.Printf("Traversing %d points.", len(points)) // 通过Stream的方式将数据发送到服务器 stream, err := client.RecordRoute(context.Background()) if err != nil { grpclog.Fatalf("%v.RecordRoute(_) = _, %v", client, err) } for _, point := range points { if err := stream.Send(point); err != nil { grpclog.Fatalf("%v.Send(%v) = %v", stream, point, err) } } // 一口气发送完毕 reply, err := stream.CloseAndRecv() if err != nil { grpclog.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) } grpclog.Printf("Route summary: %v", reply) }
func (s *workerServer) RunServer(stream testpb.WorkerService_RunServerServer) error { var bs *benchmarkServer defer func() { // Close benchmark server when stream ends. grpclog.Printf("closing benchmark server") if bs != nil { bs.closeFunc() } }() for { in, err := stream.Recv() if err == io.EOF { return nil } if err != nil { return err } var out *testpb.ServerStatus switch argtype := in.Argtype.(type) { case *testpb.ServerArgs_Setup: grpclog.Printf("server setup received:") if bs != nil { grpclog.Printf("server setup received when server already exists, closing the existing server") bs.closeFunc() } bs, err = startBenchmarkServer(argtype.Setup, s.serverPort) if err != nil { return err } out = &testpb.ServerStatus{ Stats: bs.getStats(), Port: int32(bs.port), Cores: int32(bs.cores), } case *testpb.ServerArgs_Mark: grpclog.Printf("server mark received:") grpclog.Printf(" - %v", argtype) if bs == nil { return grpc.Errorf(codes.InvalidArgument, "server does not exist when mark received") } out = &testpb.ServerStatus{ Stats: bs.getStats(), Port: int32(bs.port), Cores: int32(bs.cores), } if argtype.Mark.Reset_ { bs.reset() } } if err := stream.Send(out); err != nil { return err } } return nil }
// delete_song modify one song by id. func delete_song(client pb.SongSrvClient, SongsObj []pb.SongObj, client_id int) { SongObj := uniq_id(SongsObj, client_id)[0] grpclog.Printf("CLIENT-%v: Request Delete Song Id %v\n", client_id, SongObj.Id) status, err := client.Delete(context.Background(), &SongObj) if err != nil { grpclog.Fatalf("%v.modify_song(_) = _, %v: ", client, err) } grpclog.Printf("CLIENT-%v: Successful Delete Song Obj %v\n\n", client_id, status) }
// add_song add one song to DB. func add_song(client pb.SongSrvClient, SongsObj []pb.SongObj, client_id int) { SongObj := uniq_id(SongsObj, client_id)[0] grpclog.Printf("CLIENT-%v: Call to Add Function with Title %v and Id %v\n", client_id, SongObj.Tags.Title, SongObj.Id) status, err := client.Add(context.Background(), &SongObj) if err != nil { grpclog.Fatalf("CLIENT-%v: %v.add_song(_) = _, %v: ", client_id, client, err) } grpclog.Printf("CLIENT-%v: Got Response from Server that Song id %v was Add succesfully to DB\n\n", client_id, status.Id) }
func (s *workerServer) RunClient(stream testpb.WorkerService_RunClientServer) error { var bc *benchmarkClient defer func() { // Shut down benchmark client when stream ends. grpclog.Printf("shuting down benchmark client") if bc != nil { bc.shutdown() } }() for { in, err := stream.Recv() if err == io.EOF { return nil } if err != nil { return err } var out *testpb.ClientStatus switch t := in.Argtype.(type) { case *testpb.ClientArgs_Setup: grpclog.Printf("client setup received:") if bc != nil { grpclog.Printf("client setup received when client already exists, shuting down the existing client") bc.shutdown() } bc, err = startBenchmarkClient(t.Setup) if err != nil { return err } out = &testpb.ClientStatus{ Stats: bc.getStats(), } case *testpb.ClientArgs_Mark: grpclog.Printf("client mark received:") grpclog.Printf(" - %v", t) if bc == nil { return grpc.Errorf(codes.InvalidArgument, "client does not exist when mark received") } out = &testpb.ClientStatus{ Stats: bc.getStats(), } if t.Mark.Reset_ { bc.reset() } } if err := stream.Send(out); err != nil { return err } } return nil }
func handleForwardResponseStreamError(marshaler Marshaler, w http.ResponseWriter, err error) { buf, merr := marshaler.Marshal(streamChunk(nil, err)) if merr != nil { grpclog.Printf("Failed to marshal an error: %v", merr) return } if _, werr := fmt.Fprintf(w, "%s\n", buf); werr != nil { grpclog.Printf("Failed to notify error to client: %v", werr) return } }
// get_song get one song by id. func get_song(client pb.SongSrvClient, SongsObj []pb.SongObj, client_id int) { SongObj := uniq_id(SongsObj, client_id)[0] grpclog.Printf("CLIENT-%v: Send Get Request for Song Id %v\n", client_id, SongObj.Id) song, err := client.Get(context.Background(), &SongObj) if err != nil { grpclog.Printf("CLIENT-%v: %v.get_song(_) = _, %v: \n", client_id, client, err) } else { grpclog.Printf("CLIENT-%v: Successful Get Song Obj %v\n\n", client_id, song) } }
// ForwardResponseStream forwards the stream from gRPC server to REST client. func ForwardResponseStream(ctx context.Context, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { f, ok := w.(http.Flusher) if !ok { grpclog.Printf("Flush not supported in %T", w) http.Error(w, "unexpected type of web server", http.StatusInternalServerError) return } md, ok := ServerMetadataFromContext(ctx) if !ok { grpclog.Printf("Failed to extract ServerMetadata from context") http.Error(w, "unexpected error", http.StatusInternalServerError) return } handleForwardResponseServerMetadata(w, md) w.Header().Set("Transfer-Encoding", "chunked") w.Header().Set("Content-Type", marshaler.ContentType()) if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } w.WriteHeader(http.StatusOK) f.Flush() for { resp, err := recv() if err == io.EOF { return } if err != nil { handleForwardResponseStreamError(marshaler, w, err) return } if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { handleForwardResponseStreamError(marshaler, w, err) return } buf, err := marshaler.Marshal(streamChunk(resp, nil)) if err != nil { grpclog.Printf("Failed to marshal response chunk: %v", err) return } if _, err = fmt.Fprintf(w, "%s\n", buf); err != nil { grpclog.Printf("Failed to send response chunk: %v", err) return } f.Flush() } }
func Chat(letters ...string) error { // get connection for chat conn := connect(serverAddr) defer conn.Close() client := pb.NewChatClient(conn) stream, err := client.Chat(context.Background()) if err != nil { grpclog.Println("%v.Chat(_) = _, %v", client, err) // better logging return err } // receive msg waitc := make(chan struct{}) var recevieErr error go func() { for { in, err := stream.Recv() if err == io.EOF { // read done close(waitc) return } if err != nil { grpclog.Printf("Failed to receive a msg : %v", err) // need better logging recevieErr = err return } grpclog.Printf("client -- server status: %s", in.Content) } }() if recevieErr != nil { return recevieErr } // send msg for _, str := range letters { grpclog.Printf("client -- send msg: %v", str) if err := stream.Send(&pb.Msg{Content: str, Title: title}); err != nil { grpclog.Printf("%v.Send(%v) = %v", stream, str, err) // need better logging return err } } // close send stream.CloseSend() <-waitc return nil }
func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values []string) error { m := reflect.ValueOf(msg) if m.Kind() != reflect.Ptr { return fmt.Errorf("unexpected type %T: %v", msg, msg) } m = m.Elem() for i, fieldName := range fieldPath { isLast := i == len(fieldPath)-1 if !isLast && m.Kind() != reflect.Struct { return fmt.Errorf("non-aggregate type in the mid of path: %s", strings.Join(fieldPath, ".")) } f := fieldByProtoName(m, fieldName) if !f.IsValid() { grpclog.Printf("field not found in %T: %s", msg, strings.Join(fieldPath, ".")) return nil } switch f.Kind() { case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64: m = f case reflect.Slice: // TODO(yugui) Support []byte if !isLast { return fmt.Errorf("unexpected repeated field in %s", strings.Join(fieldPath, ".")) } return populateRepeatedField(f, values) case reflect.Ptr: if f.IsNil() { m = reflect.New(f.Type().Elem()) f.Set(m) } m = f.Elem() continue case reflect.Struct: m = f continue default: return fmt.Errorf("unexpected type %s in %T", f.Type(), msg) } } switch len(values) { case 0: return fmt.Errorf("no value of field: %s", strings.Join(fieldPath, ".")) case 1: default: grpclog.Printf("too many field values: %s", strings.Join(fieldPath, ".")) } return populateField(m, values[0]) }
func (t *http2Server) handleData(f *http2.DataFrame) { // Select the right stream to dispatch. size := len(f.Data()) s, ok := t.getStream(f) if !ok { cwu, err := t.fc.adjustConnPendingUpdate(uint32(size)) if err != nil { grpclog.Printf("transport: http2Server %v", err) t.Close() return } if cwu > 0 { t.controlBuf.put(&windowUpdate{0, cwu}) } return } if size > 0 { if err := s.fc.onData(uint32(size)); err != nil { if _, ok := err.(ConnectionError); ok { grpclog.Printf("transport: http2Server %v", err) t.Close() return } t.closeStream(s) t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) return } // TODO(bradfitz, zhaoq): A copy is required here because there is no // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? data := make([]byte, size) copy(data, f.Data()) s.write(recvMsg{data: data}) } if f.Header().Flags.Has(http2.FlagDataEndStream) { // Received the end of stream from the client. s.mu.Lock() if s.state != streamDone { if s.state == streamWriteDone { s.state = streamDone } else { s.state = streamReadDone } } s.mu.Unlock() s.write(recvMsg{err: io.EOF}) } }
// Run in a goroutine to track the error in transport and create the // new transport if an error happens. It returns when the channel is closing. func (cc *Conn) transportMonitor() { for { select { // shutdownChan is needed to detect the teardown when // the ClientConn is idle (i.e., no RPC in flight). case <-cc.shutdownChan: return case <-cc.transport.Error(): cc.mu.Lock() if cc.state == Shutdown { // cc.Close() has been invoked. cc.mu.Unlock() return } cc.state = TransientFailure cc.stateCV.Broadcast() cc.mu.Unlock() if err := cc.resetTransport(true); err != nil { // The ClientConn is closing. cc.mu.Lock() cc.printf("transport exiting: %v", err) cc.mu.Unlock() grpclog.Printf("grpc: ClientConn.transportMonitor exits due to: %v", err) return } continue } } }
// Run in a goroutine to track the error in transport and create the // new transport if an error happens. It returns when the channel is closing. func (ac *addrConn) transportMonitor() { for { ac.mu.Lock() t := ac.transport ac.mu.Unlock() select { // shutdownChan is needed to detect the teardown when // the addrConn is idle (i.e., no RPC in flight). case <-ac.shutdownChan: return case <-t.Error(): ac.mu.Lock() if ac.state == Shutdown { // ac.tearDown(...) has been invoked. ac.mu.Unlock() return } ac.state = TransientFailure ac.stateCV.Broadcast() ac.mu.Unlock() if err := ac.resetTransport(true); err != nil { ac.mu.Lock() ac.printf("transport exiting: %v", err) ac.mu.Unlock() grpclog.Printf("grpc: addrConn.transportMonitor exits due to: %v", err) return } } } }
func doClientStreaming(tc testpb.TestServiceClient) { stream, err := tc.StreamingInputCall(context.Background()) if err != nil { grpclog.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err) } var sum int for _, s := range reqSizes { pl := newPayload(testpb.PayloadType_COMPRESSABLE, s) req := &testpb.StreamingInputCallRequest{ Payload: pl, } if err := stream.Send(req); err != nil { grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err) } sum += s grpclog.Printf("Sent a request of size %d, aggregated size %d", s, sum) } reply, err := stream.CloseAndRecv() if err != nil { grpclog.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) } if reply.GetAggregatedPayloadSize() != int32(sum) { grpclog.Fatalf("%v.CloseAndRecv().GetAggregatePayloadSize() = %v; want %v", stream, reply.GetAggregatedPayloadSize(), sum) } grpclog.Println("ClientStreaming done") }
// handleRawConn is run in its own goroutine and handles a just-accepted // connection that has not had any I/O performed on it yet. func (s *Server) handleRawConn(rawConn net.Conn) { // Server在每一个Connection上都都做啥了 // 1. 获取经过 TLS 封装的 Conn/AuthInfo等 conn, authInfo, err := s.useTransportAuthenticator(rawConn) // HandShake Failed if err != nil { s.mu.Lock() s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) s.mu.Unlock() grpclog.Printf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) rawConn.Close() return } // 服务关闭 s.mu.Lock() if s.conns == nil { s.mu.Unlock() conn.Close() return } s.mu.Unlock() // 2. 对外提供服务 if s.opts.useHandlerImpl { s.serveUsingHandler(conn) } else { s.serveNewHTTP2Transport(conn, authInfo) } }
// NewConn creates a Conn. func NewConn(cc *ClientConn) (*Conn, error) { if cc.target == "" { return nil, ErrUnspecTarget } c := &Conn{ target: cc.target, dopts: cc.dopts, shutdownChan: make(chan struct{}), } if EnableTracing { c.events = trace.NewEventLog("grpc.ClientConn", c.target) } if !c.dopts.insecure { var ok bool for _, cd := range c.dopts.copts.AuthOptions { if _, ok := cd.(credentials.TransportAuthenticator); !ok { continue } ok = true } if !ok { return nil, ErrNoTransportSecurity } } else { for _, cd := range c.dopts.copts.AuthOptions { if cd.RequireTransportSecurity() { return nil, ErrCredentialsMisuse } } } colonPos := strings.LastIndex(c.target, ":") if colonPos == -1 { colonPos = len(c.target) } c.authority = c.target[:colonPos] if c.dopts.codec == nil { // Set the default codec. c.dopts.codec = protoCodec{} } c.stateCV = sync.NewCond(&c.mu) if c.dopts.block { if err := c.resetTransport(false); err != nil { c.Close() return nil, err } // Start to monitor the error status of transport. go c.transportMonitor() } else { // Start a goroutine connecting to the server asynchronously. go func() { if err := c.resetTransport(false); err != nil { grpclog.Printf("Failed to dial %s: %v; please retry.", c.target, err) c.Close() return } c.transportMonitor() }() } return c, nil }
func main() { grpc.EnableTracing = false flag.Parse() lis, err := net.Listen("tcp", ":"+strconv.Itoa(*driverPort)) if err != nil { grpclog.Fatalf("failed to listen: %v", err) } grpclog.Printf("worker listening at port %v", *driverPort) s := grpc.NewServer() stop := make(chan bool) testpb.RegisterWorkerServiceServer(s, &workerServer{ stop: stop, serverPort: *serverPort, }) go func() { <-stop // Wait for 1 second before stopping the server to make sure the return value of QuitWorker is sent to client. // TODO revise this once server graceful stop is supported in gRPC. time.Sleep(time.Second) s.Stop() }() s.Serve(lis) }
func (c *co) OnWatchMessage(request *aaa.Send, genwsc *genws.Connection, session []byte) error { var c2request aaa.Send c2request = *request c2request.GWSRPCID = c.id s, err := aaaClient.WatchMessage(context.Background(), &c2request) if err != nil { grpclog.Printf("aaaClient %v: ", err) return err } for { r, err := s.Recv() if err == io.EOF { break } else if err != nil { return err } body, err := proto.Marshal(r) if err != nil { return err } genws.RecvGRPC2GWSRPC(genwsc, session, body) } return nil }
func (s *SongSrvServer) Filter(query *pb.SongQuery, stream pb.SongSrv_FilterServer) error { grpclog.Printf("SERVER: Recive Filter Request : %v\n", query) var triger func(string, string) bool if query.SearchType == 0 { triger = regex } else if query.SearchType == 2 { triger = contains } else if query.SearchType == 3 { triger = match } for _, song := range s.savedSongs { var str string if query.SearchField == 0 { str = song.Tags.Title } else if query.SearchField == 1 { str = song.Tags.Artist } else if query.SearchField == 2 { str = song.Tags.Album } if triger(str, query.Search) { if err := stream.Send(&song); err != nil { return err } } } return nil }
func (b *balancer) callRemoteBalancer(lbc lbpb.LoadBalancerClient, seq int) (retry bool) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() stream, err := lbc.BalanceLoad(ctx, grpc.FailFast(false)) if err != nil { grpclog.Printf("Failed to perform RPC to the remote balancer %v", err) return } b.mu.Lock() if b.done { b.mu.Unlock() return } b.mu.Unlock() initReq := &lbpb.LoadBalanceRequest{ LoadBalanceRequestType: &lbpb.LoadBalanceRequest_InitialRequest{ InitialRequest: new(lbpb.InitialLoadBalanceRequest), }, } if err := stream.Send(initReq); err != nil { // TODO: backoff on retry? return true } reply, err := stream.Recv() if err != nil { // TODO: backoff on retry? return true } initResp := reply.GetInitialResponse() if initResp == nil { grpclog.Println("Failed to receive the initial response from the remote balancer.") return } // TODO: Support delegation. if initResp.LoadBalancerDelegate != "" { // delegation grpclog.Println("TODO: Delegation is not supported yet.") return } // Retrieve the server list. for { reply, err := stream.Recv() if err != nil { break } b.mu.Lock() if b.done || seq < b.seq { b.mu.Unlock() return } b.seq++ // tick when receiving a new list of servers. seq = b.seq b.mu.Unlock() if serverList := reply.GetServerList(); serverList != nil { b.processServerList(serverList, seq) } } return true }
// printFeature gets the feature for the given point. func printFeature(client pb.RouteGuideClient, point *pb.Point) { grpclog.Printf("Getting feature for point (%d, %d)", point.Latitude, point.Longitude) feature, err := client.GetFeature(context.Background(), point) if err != nil { grpclog.Fatalf("%v.GetFeatures(_) = _, %v: ", client, err) } grpclog.Println(feature) }
// Run in a goroutine to track the error in transport and create the // new transport if an error happens. It returns when the channel is closing. func (ac *addrConn) transportMonitor() { for { ac.mu.Lock() t := ac.transport ac.mu.Unlock() select { // This is needed to detect the teardown when // the addrConn is idle (i.e., no RPC in flight). case <-ac.ctx.Done(): select { case <-t.Error(): t.Close() default: } return case <-t.GoAway(): // If GoAway happens without any network I/O error, ac is closed without shutting down the // underlying transport (the transport will be closed when all the pending RPCs finished or // failed.). // If GoAway and some network I/O error happen concurrently, ac and its underlying transport // are closed. // In both cases, a new ac is created. select { case <-t.Error(): ac.cc.resetAddrConn(ac.addr, true, errNetworkIO) default: ac.cc.resetAddrConn(ac.addr, true, errConnDrain) } return case <-t.Error(): select { case <-ac.ctx.Done(): t.Close() return case <-t.GoAway(): ac.cc.resetAddrConn(ac.addr, true, errNetworkIO) return default: } ac.mu.Lock() if ac.state == Shutdown { // ac has been shutdown. ac.mu.Unlock() return } ac.state = TransientFailure ac.stateCV.Broadcast() ac.mu.Unlock() if err := ac.resetTransport(true); err != nil { ac.mu.Lock() ac.printf("transport exiting: %v", err) ac.mu.Unlock() grpclog.Printf("grpc: addrConn.transportMonitor exits due to: %v", err) return } } } }
// reader runs as a separate goroutine in charge of reading data from network // connection. // // TODO(zhaoq): currently one reader per transport. Investigate whether this is // optimal. // TODO(zhaoq): Check the validity of the incoming frame sequence. func (t *http2Client) reader() { // Check the validity of server preface. frame, err := t.framer.readFrame() if err != nil { t.notifyError(err) return } sf, ok := frame.(*http2.SettingsFrame) if !ok { t.notifyError(err) return } t.handleSettings(sf) // loop to keep reading incoming messages on this transport. for { frame, err := t.framer.readFrame() if err != nil { // Abort an active stream if the http2.Framer returns a // http2.StreamError. This can happen only if the server's response // is malformed http2. if se, ok := err.(http2.StreamError); ok { t.mu.Lock() s := t.activeStreams[se.StreamID] t.mu.Unlock() if s != nil { // use error detail to provide better err message handleMalformedHTTP2(s, StreamErrorf(http2ErrConvTab[se.Code], "%v", t.framer.errorDetail())) } continue } else { // Transport error. t.notifyError(err) return } } switch frame := frame.(type) { case *http2.MetaHeadersFrame: t.operateHeaders(frame) case *http2.DataFrame: t.handleData(frame) case *http2.RSTStreamFrame: t.handleRSTStream(frame) case *http2.SettingsFrame: t.handleSettings(frame) case *http2.PingFrame: t.handlePing(frame) case *http2.GoAwayFrame: t.handleGoAway(frame) case *http2.WindowUpdateFrame: t.handleWindowUpdate(frame) default: grpclog.Printf("transport: http2Client.reader got unhandled frame type %v.", frame) } } }
func newHPACKDecoder() *hpackDecoder { d := &hpackDecoder{} d.h = hpack.NewDecoder(http2InitHeaderTableSize, func(f hpack.HeaderField) { switch f.Name { case "content-type": // TODO(zhaoq): Tentatively disable the check until a bug is fixed. /* if !strings.Contains(f.Value, "application/grpc") { d.err = StreamErrorf(codes.FailedPrecondition, "transport: received the unexpected header") return } */ case "grpc-status": code, err := strconv.Atoi(f.Value) if err != nil { d.err = StreamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err) return } d.state.statusCode = codes.Code(code) case "grpc-message": d.state.statusDesc = f.Value case "grpc-timeout": d.state.timeoutSet = true var err error d.state.timeout, err = timeoutDecode(f.Value) if err != nil { d.err = StreamErrorf(codes.Internal, "transport: malformed time-out: %v", err) return } case ":path": d.state.method = f.Value default: if !isReservedHeader(f.Name) { if f.Name == "user-agent" { i := strings.LastIndex(f.Value, " ") if i == -1 { // There is no application user agent string being set. return } // Extract the application user agent string. f.Value = f.Value[:i] } if d.state.mdata == nil { d.state.mdata = make(map[string][]string) } k, v, err := metadata.DecodeKeyValue(f.Name, f.Value) if err != nil { grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err) return } d.state.mdata[k] = append(d.state.mdata[k], v) } } }) return d }