func TestClientStreamRedirect(t *testing.T) { l, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) addr := l.Addr().String() conn, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithTimeout(5*time.Second)) require.NoError(t, err) defer conn.Close() cluster := &mockCluster{addr: addr} cs := raftpicker.NewConnSelector(cluster, grpc.WithInsecure(), grpc.WithTimeout(5*time.Second)) forwardAsOwnRequest := func(ctx context.Context) (context.Context, error) { return ctx, nil } api := NewRaftProxyRouteGuideServer(testRouteGuide{}, cs, cluster, forwardAsOwnRequest) srv := grpc.NewServer() RegisterRouteGuideServer(srv, api) go srv.Serve(l) defer srv.Stop() client := NewRouteGuideClient(conn) stream, err := client.RecordRoute(context.Background()) // err not nil is only on network issues assert.Nil(t, err) // any Send will be ok, I don't know why assert.Nil(t, stream.Send(&Point{})) _, err = stream.CloseAndRecv() assert.NotNil(t, err) assert.Equal(t, codes.ResourceExhausted, grpc.Code(err)) }
// getClient returns a connection to the Suite containerd func (cs *ContainerdSuite) getClient(socket string) error { // Parse proto://address form addresses. bindParts := strings.SplitN(socket, "://", 2) if len(bindParts) != 2 { return fmt.Errorf("bad bind address format %s, expected proto://address", socket) } // reset the logger for grpc to log to dev/null so that it does not mess with our stdio grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags)) dialOpts := []grpc.DialOption{grpc.WithInsecure()} dialOpts = append(dialOpts, grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { return net.DialTimeout(bindParts[0], bindParts[1], timeout) }), grpc.WithBlock(), grpc.WithTimeout(5*time.Second), ) conn, err := grpc.Dial(socket, dialOpts...) if err != nil { return err } healthClient := grpc_health_v1.NewHealthClient(conn) if _, err := healthClient.Check(context.Background(), &grpc_health_v1.HealthCheckRequest{}); err != nil { return err } cs.grpcClient = types.NewAPIClient(conn) return nil }
func Connect(hostport string, dopts []grpc.DialOption) (*Connection, error) { _, _, err := net.SplitHostPort(hostport) if err != nil { panic(err) } if dopts == nil { rpc_timeout := 2 dopts = []grpc.DialOption{ grpc.WithInsecure(), grpc.WithTimeout(time.Duration(rpc_timeout) * time.Second), } } conn, err := grpc.Dial(hostport, dopts...) if err != nil { log.Warnf("rpc dial: %s: %s", hostport, err) } client := NewVApiClient(conn) c := &Connection{ ClientConn: conn, Client: client, } return c, err }
func createFactoryFunc(addrFunc AddressFunc, timeout time.Duration) pools.Factory { return func() (pools.Resource, error) { addr := addrFunc() // create new client return New(addr, grpc.WithBlock(), grpc.WithTimeout(timeout)) } }
func (c *cluster) compactKV(rev int64, timeout time.Duration) (err error) { if rev <= 0 { return nil } for i, u := range c.GRPCURLs { conn, derr := grpc.Dial(u, grpc.WithInsecure(), grpc.WithTimeout(5*time.Second)) if derr != nil { err = derr continue } kvc := pb.NewKVClient(conn) ctx, cancel := context.WithTimeout(context.Background(), timeout) _, cerr := kvc.Compact(ctx, &pb.CompactionRequest{Revision: rev, Physical: true}) cancel() conn.Close() if cerr != nil { if strings.Contains(cerr.Error(), "required revision has been compacted") && i > 0 { plog.Printf("%s is already compacted with %d (%v)", u, rev, cerr) } else { err = cerr } } } return err }
// DialTablet creates and initializes gRPCQueryClient. func DialTablet(ctx context.Context, endPoint *pbt.EndPoint, keyspace, shard string, timeout time.Duration) (tabletconn.TabletConn, error) { // create the RPC client addr := netutil.JoinHostPort(endPoint.Host, endPoint.PortMap["grpc"]) cc, err := grpc.Dial(addr, grpc.WithBlock(), grpc.WithTimeout(timeout)) if err != nil { return nil, err } c := pbs.NewQueryClient(cc) result := &gRPCQueryClient{ endPoint: endPoint, cc: cc, c: c, } if keyspace != "" || shard != "" { gsir, err := c.GetSessionId(ctx, &pb.GetSessionIdRequest{ Keyspace: keyspace, Shard: shard, }) if err != nil { cc.Close() return nil, err } if gsir.Error != nil { cc.Close() return nil, tabletErrorFromRPCError(gsir.Error) } result.sessionID = gsir.SessionId } return result, nil }
// DialTablet creates and initializes gRPCQueryClient. func DialTablet(tablet *topodatapb.Tablet, timeout time.Duration) (tabletconn.TabletConn, error) { // create the RPC client addr := "" if grpcPort, ok := tablet.PortMap["grpc"]; ok { addr = netutil.JoinHostPort(tablet.Hostname, grpcPort) } else { addr = tablet.Hostname } opt, err := grpcutils.ClientSecureDialOption(*cert, *key, *ca, *name) if err != nil { return nil, err } opts := []grpc.DialOption{opt} if timeout > 0 { opts = append(opts, grpc.WithBlock(), grpc.WithTimeout(timeout)) } cc, err := grpc.Dial(addr, opts...) if err != nil { return nil, err } c := queryservicepb.NewQueryClient(cc) result := &gRPCQueryClient{ tablet: tablet, cc: cc, c: c, } return result, nil }
// DialTablet creates and initializes gRPCQueryClient. func DialTablet(ctx context.Context, tablet *topodatapb.Tablet, timeout time.Duration) (tabletconn.TabletConn, error) { // create the RPC client addr := netutil.JoinHostPort(tablet.Hostname, tablet.PortMap["grpc"]) opt, err := grpcutils.ClientSecureDialOption(*cert, *key, *ca, *name) if err != nil { return nil, err } cc, err := grpc.Dial(addr, opt, grpc.WithBlock(), grpc.WithTimeout(timeout)) if err != nil { return nil, err } c := queryservicepb.NewQueryClient(cc) result := &gRPCQueryClient{ tablet: tablet, cc: cc, c: c, target: &querypb.Target{ Keyspace: tablet.Keyspace, Shard: tablet.Shard, TabletType: tablet.Type, }, } return result, nil }
func main() { flag.Parse() conn, err := grpc.Dial(fmt.Sprintf("%v:%v", *host, *port), grpc.WithInsecure(), grpc.WithTimeout(3*time.Second), ) if err != nil { log.Fatal(err) } defer conn.Close() client := crowdsound.NewCrowdSoundClient(conn) for _, b := range benchmarks { benchName, results, err := b(client) if err != nil { log.Fatal("Failed to run benchmark:", err) } log.Printf("Benchmark results: %v", benchName) log.Printf("--------------------------------") log.Printf("Min: \t%v μs", results.Min()/1000) log.Printf("Max: \t%v μs", results.Max()/1000) log.Printf("Mean: \t%v μs", results.Mean()/1000) log.Printf("Std: \t%v μs", results.StdDev()/1000) log.Print("") } }
func (c *cluster) compactKV(rev int64) error { var ( conn *grpc.ClientConn err error ) if rev <= 0 { return nil } for i, u := range c.GRPCURLs { conn, err = grpc.Dial(u, grpc.WithInsecure(), grpc.WithTimeout(5*time.Second)) if err != nil { continue } kvc := pb.NewKVClient(conn) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) _, err = kvc.Compact(ctx, &pb.CompactionRequest{Revision: rev, Physical: true}) cancel() conn.Close() if err != nil { if strings.Contains(err.Error(), "required revision has been compacted") && i > 0 { plog.Printf("%s is already compacted with %d (%v)", u, rev, err) err = nil // in case compact was requested more than once } } } return err }
func newSession(ctx context.Context, agent *Agent, delay time.Duration) *session { s := &session{ agent: agent, errs: make(chan error, 1), messages: make(chan *api.SessionMessage), tasks: make(chan *api.TasksMessage), registered: make(chan struct{}), closed: make(chan struct{}), } peer, err := agent.config.Managers.Select() if err != nil { s.errs <- err return s } cc, err := grpc.Dial(peer.Addr, grpc.WithTransportCredentials(agent.config.Credentials), grpc.WithTimeout(dispatcherRPCTimeout), ) if err != nil { s.errs <- err return s } s.addr = peer.Addr s.conn = cc go s.run(ctx, delay) return s }
// GetClient attempts to dial the specified address flag and returns a service // client and its underlying connection. If it is unable to make a connection, // it dies. func GetClient() (*grpc.ClientConn, pb.BookServiceClient) { conn, err := grpc.Dial(*address, grpc.WithTimeout(5*time.Second), grpc.WithInsecure()) if err != nil { log.Fatalf("did not connect: %v", err) } return conn, pb.NewBookServiceClient(conn) }
// dialSetupOpts gives the dial opts prior to any authentication func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts []grpc.DialOption) { if c.cfg.DialTimeout > 0 { opts = []grpc.DialOption{grpc.WithTimeout(c.cfg.DialTimeout)} } opts = append(opts, dopts...) f := func(host string, t time.Duration) (net.Conn, error) { proto, host, _ := parseEndpoint(c.balancer.getEndpoint(host)) if proto == "" { return nil, fmt.Errorf("unknown scheme for %q", host) } select { case <-c.ctx.Done(): return nil, c.ctx.Err() default: } return net.DialTimeout(proto, host, t) } opts = append(opts, grpc.WithDialer(f)) creds := c.creds if _, _, scheme := parseEndpoint(endpoint); len(scheme) != 0 { creds = c.processCreds(scheme) } if creds != nil { opts = append(opts, grpc.WithTransportCredentials(*creds)) } else { opts = append(opts, grpc.WithInsecure()) } return opts }
func newPeerClientConnection() (*grpc.ClientConn, error) { var opts []grpc.DialOption if viper.GetBool("peer.tls.enabled") { var sn string if viper.GetString("peer.tls.serverhostoverride") != "" { sn = viper.GetString("peer.tls.serverhostoverride") } var creds credentials.TransportAuthenticator if viper.GetString("peer.tls.cert.file") != "" { var err error creds, err = credentials.NewClientTLSFromFile(viper.GetString("peer.tls.cert.file"), sn) if err != nil { grpclog.Fatalf("Failed to create TLS credentials %v", err) } } else { creds = credentials.NewClientTLSFromCert(nil, sn) } opts = append(opts, grpc.WithTransportCredentials(creds)) } opts = append(opts, grpc.WithTimeout(1*time.Second)) opts = append(opts, grpc.WithBlock()) opts = append(opts, grpc.WithInsecure()) conn, err := grpc.Dial(getPeerAddress(), opts...) if err != nil { return nil, err } return conn, err }
func newSession(ctx context.Context, agent *Agent, delay time.Duration, sessionID string, description *api.NodeDescription) *session { s := &session{ agent: agent, sessionID: sessionID, errs: make(chan error, 1), messages: make(chan *api.SessionMessage), assignments: make(chan *api.AssignmentsMessage), subscriptions: make(chan *api.SubscriptionMessage), registered: make(chan struct{}), closed: make(chan struct{}), } // TODO(stevvooe): Need to move connection management up a level or create // independent connection for log broker client. peer, err := agent.config.Managers.Select() if err != nil { s.errs <- err return s } cc, err := grpc.Dial(peer.Addr, grpc.WithTransportCredentials(agent.config.Credentials), grpc.WithTimeout(dispatcherRPCTimeout), ) if err != nil { s.errs <- err return s } s.addr = peer.Addr s.conn = cc go s.run(ctx, delay, description) return s }
// RegisterGRPCGateway starts the gateway (i.e. reverse proxy) that proxies // HTTP requests to the appropriate gRPC endpoints. func (s *adminServer) RegisterGRPCGateway(serverCtx *Context) error { // Setup HTTP<->gRPC handlers. var opts []grpc.DialOption if serverCtx.Insecure { opts = append(opts, grpc.WithInsecure()) } else { tlsConfig, err := serverCtx.GetClientTLSConfig() if err != nil { return err } opts = append( opts, // TODO(tamird): remove this timeout. It is currently necessary because // GRPC will not actually bail on a bad certificate error - it will just // retry indefinitely. See https://github.com/grpc/grpc-go/issues/622. grpc.WithTimeout(time.Second), grpc.WithBlock(), grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)), ) } if err := RegisterAdminHandlerFromEndpoint(s.gwCtx, s.gwMux, serverCtx.Addr, opts); err != nil { return util.Errorf("error constructing grpc-gateway: %s. are your certificates valid?", err) } // Pass all requests for gRPC-based API endpoints to the gateway mux. s.ServeMux.Handle(apiEndpoint, s.gwMux) return nil }
func TestBlockGRPC(t *testing.T) { test := makeTestData(512 * 1024) m := &mockBlockGRPC{ data: test, } lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatal(err) } grpcSrv := grpc.NewServer() models.RegisterTorusStorageServer(grpcSrv, m) go grpcSrv.Serve(lis) defer grpcSrv.Stop() conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure(), grpc.WithTimeout(10*time.Second)) if err != nil { t.Fatal(err) } defer conn.Close() client := models.NewTorusStorageClient(conn) resp, err := client.Block(context.TODO(), &models.BlockRequest{ BlockRef: torus.BlockRef{ INodeRef: torus.NewINodeRef(1, 2), Index: 3, }.ToProto(), }) if err != nil { t.Fatal(err) } if !bytes.Equal(test, resp.Data) { t.Fatal("unequal response") } }
func getDevopsClient(peerAddress string) (obc.DevopsClient, error) { var opts []grpc.DialOption if viper.GetBool("pki.validity-period.tls.enabled") { var sn string if viper.GetString("pki.validity-period.tls.server-host-override") != "" { sn = viper.GetString("pki.validity-period.tls.server-host-override") } var creds credentials.TransportAuthenticator if viper.GetString("pki.validity-period.tls.cert.file") != "" { var err error creds, err = credentials.NewClientTLSFromFile(viper.GetString("pki.validity-period.tls.cert.file"), sn) if err != nil { grpclog.Fatalf("Failed to create TLS credentials %v", err) } } else { creds = credentials.NewClientTLSFromCert(nil, sn) } opts = append(opts, grpc.WithTransportCredentials(creds)) } opts = append(opts, grpc.WithTimeout(systemChaincodeTimeout)) opts = append(opts, grpc.WithBlock()) opts = append(opts, grpc.WithInsecure()) conn, err := grpc.Dial(peerAddress, opts...) if err != nil { return nil, fmt.Errorf("Error trying to connect to local peer: %s", err) } devopsClient := obc.NewDevopsClient(conn) return devopsClient, nil }
// GetBroadcastClient creates a simple instance of the BroadcastClient interface func GetBroadcastClient() (BroadcastClient, error) { var orderer string if viper.GetBool("peer.committer.enabled") { orderer = viper.GetString("peer.committer.ledger.orderer") } if orderer == "" { return nil, fmt.Errorf("Can't get orderer address") } var opts []grpc.DialOption opts = append(opts, grpc.WithInsecure()) opts = append(opts, grpc.WithTimeout(3*time.Second)) opts = append(opts, grpc.WithBlock()) conn, err := grpc.Dial(orderer, opts...) if err != nil { return nil, fmt.Errorf("Error connecting to %s due to %s", orderer, err) } client, err := ab.NewAtomicBroadcastClient(conn).Broadcast(context.TODO()) if err != nil { conn.Close() return nil, fmt.Errorf("Error connecting to %s due to %s", orderer, err) } return &broadcastClient{conn: conn, client: client}, nil }
func GRPCClient(addr, token, caFile string) (*RemoteU2FClient, error) { var err error var tCreds credentials.TransportCredentials if caFile == "" { tCreds = credentials.NewClientTLSFromCert(nil, "") } else { tCreds, err = credentials.NewClientTLSFromFile(caFile, "") if err != nil { return nil, fmt.Errorf("error reading CA file: %s", err) } } t := oauth2.Token{ AccessToken: token, TokenType: "Bearer", } rpcCreds := oauth.NewOauthAccess(&t) conn, err := grpc.Dial(addr, grpc.WithTransportCredentials(tCreds), grpc.WithPerRPCCredentials(rpcCreds), grpc.WithBlock(), grpc.WithTimeout(30*time.Second)) if err != nil { return nil, fmt.Errorf("error connecting to server: %s", err) } c := pb.NewRemoteU2FClient(conn) return &RemoteU2FClient{c}, nil }
func (c *cluster) compactKV(rev int64, timeout time.Duration) (err error) { if rev <= 0 { return nil } for i, u := range c.GRPCURLs { conn, derr := grpc.Dial(u, grpc.WithInsecure(), grpc.WithTimeout(5*time.Second)) if derr != nil { plog.Printf("[compact kv #%d] dial error %v (endpoint %s)", i, derr, u) err = derr continue } kvc := pb.NewKVClient(conn) ctx, cancel := context.WithTimeout(context.Background(), timeout) plog.Printf("[compact kv #%d] starting (endpoint %s)", i, u) _, cerr := kvc.Compact(ctx, &pb.CompactionRequest{Revision: rev, Physical: true}) cancel() conn.Close() succeed := true if cerr != nil { if strings.Contains(cerr.Error(), "required revision has been compacted") && i > 0 { plog.Printf("[compact kv #%d] already compacted (endpoint %s)", i, u) } else { plog.Printf("[compact kv #%d] error %v (endpoint %s)", i, cerr, u) err = cerr succeed = false } } if succeed { plog.Printf("[compact kv #%d] done (endpoint %s)", i, u) } } return err }
// Dial establishes a connection for a given endpoint using the client's config func (c *Client) Dial(endpoint string) (*grpc.ClientConn, error) { opts := []grpc.DialOption{ grpc.WithBlock(), grpc.WithTimeout(c.cfg.DialTimeout), } if c.creds != nil { opts = append(opts, grpc.WithTransportCredentials(*c.creds)) } else { opts = append(opts, grpc.WithInsecure()) } proto := "tcp" if url, uerr := url.Parse(endpoint); uerr == nil && url.Scheme == "unix" { proto = "unix" // strip unix:// prefix so certs work endpoint = url.Host } f := func(a string, t time.Duration) (net.Conn, error) { select { case <-c.ctx.Done(): return nil, c.ctx.Err() default: } return net.DialTimeout(proto, a, t) } opts = append(opts, grpc.WithDialer(f)) conn, err := grpc.Dial(endpoint, opts...) if err != nil { return nil, err } return conn, nil }
func (sc *client) Send() { conn, err := grpc.Dial(*host, grpc.WithTimeout(time.Second)) if err != nil { log.Fatalf("Failed to dial to %q: %v", *host, err) } defer conn.Close() c := pb.NewSinkClient(conn) // Prepare fake request, using random data, to avoid the data to be compressed. data := make([]byte, *blobSize**blobsPerRequest) for i := range data { data[i] = byte(rand.Intn(256)) } var req pb.UploadRequest for i := 0; i < *blobsPerRequest; i++ { req.Blob = append(req.Blob, &pb.Blob{Data: data[i*(*blobSize) : (i+1)*(*blobSize)]}) } for i := 0; ; i++ { if i%100 == 0 && i > 0 { sc.Inc(100) } if _, err := c.Upload(context.TODO(), &req); err != nil { log.Printf("Error: %v", err) if grpc.Code(err) == codes.Internal { // Server has closed the connection, or something else really bad happenned. // For this benchmark, there's no sense to implement any kind of reconnect logic, // so just quitting from the program. os.Exit(1) } } } }
//invoke the EmptyCall RPC func invokeEmptyCall(address string, dialOptions []grpc.DialOption) (*testpb.Empty, error) { //add DialOptions dialOptions = append(dialOptions, grpc.WithBlock()) dialOptions = append(dialOptions, grpc.WithTimeout(timeout)) //create GRPC client conn clientConn, err := grpc.Dial(address, dialOptions...) if err != nil { return nil, err } defer clientConn.Close() //create GRPC client client := testpb.NewTestServiceClient(clientConn) ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() //invoke service empty, err := client.EmptyCall(ctx, new(testpb.Empty)) if err != nil { return nil, err } return empty, nil }
//newEventsClientConnectionWithAddress Returns a new grpc.ClientConn to the configured local PEER. func newEventsClientConnectionWithAddress(peerAddress string) (*grpc.ClientConn, error) { var opts []grpc.DialOption if peer.TLSEnabled() { var sn string if viper.GetString("peer.tls.serverhostoverride") != "" { sn = viper.GetString("peer.tls.serverhostoverride") } var creds credentials.TransportAuthenticator if viper.GetString("peer.tls.cert.file") != "" { var err error creds, err = credentials.NewClientTLSFromFile(viper.GetString("peer.tls.cert.file"), sn) if err != nil { grpclog.Fatalf("Failed to create TLS credentials %v", err) } } else { creds = credentials.NewClientTLSFromCert(nil, sn) } opts = append(opts, grpc.WithTransportCredentials(creds)) } opts = append(opts, grpc.WithTimeout(defaultTimeout)) opts = append(opts, grpc.WithBlock()) opts = append(opts, grpc.WithInsecure()) return grpc.Dial(peerAddress, opts...) }
func TestServerStreamRedirect(t *testing.T) { l, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) addr := l.Addr().String() conn, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithTimeout(5*time.Second)) require.NoError(t, err) defer conn.Close() cluster := &mockCluster{conn: conn} forwardAsOwnRequest := func(ctx context.Context) (context.Context, error) { return ctx, nil } api := NewRaftProxyRouteGuideServer(testRouteGuide{}, cluster, forwardAsOwnRequest) srv := grpc.NewServer() RegisterRouteGuideServer(srv, api) go srv.Serve(l) defer srv.Stop() client := NewRouteGuideClient(conn) stream, err := client.ListFeatures(context.Background(), &Rectangle{}) // err not nil is only on network issues assert.Nil(t, err) _, err = stream.Recv() assert.NotNil(t, err) assert.Equal(t, codes.ResourceExhausted, grpc.Code(err)) }
func DefaultDialOptions() []grpc.DialOption { rpc_timeout := 2 dopts := []grpc.DialOption{ grpc.WithInsecure(), grpc.WithTimeout(time.Duration(rpc_timeout) * time.Second), } return dopts }
// getClientConn returns a grpc client connection func getClientConn(addr string, protocol string, timeout time.Duration) (*grpc.ClientConn, error) { conn, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithTimeout(timeout)) if err != nil { return nil, err } return conn, nil }
func (node *nodeImpl) getClientConn(address string, serverName string) (*grpc.ClientConn, error) { node.debug("Dial to addr:[%s], with serverName:[%s]...", address, serverName) var conn *grpc.ClientConn var err error if node.conf.isTLSEnabled() { node.debug("TLS enabled...") // setup tls options var opts []grpc.DialOption config := tls.Config{ InsecureSkipVerify: false, RootCAs: node.tlsCertPool, ServerName: serverName, } if node.conf.isTLSClientAuthEnabled() { } creds := credentials.NewTLS(&config) opts = append(opts, grpc.WithTransportCredentials(creds)) opts = append(opts, grpc.WithTimeout(time.Second*3)) conn, err = grpc.Dial(address, opts...) } else { node.debug("TLS disabled...") var opts []grpc.DialOption opts = append(opts, grpc.WithInsecure()) opts = append(opts, grpc.WithTimeout(time.Second*3)) conn, err = grpc.Dial(address, opts...) } if err != nil { node.error("Failed dailing in [%s].", err.Error()) return nil, err } node.debug("Dial to addr:[%s], with serverName:[%s]...done!", address, serverName) return conn, nil }
func TestDialTimeout(t *testing.T) { conn, err := grpc.Dial("Non-Existent.Server:80", grpc.WithTimeout(time.Millisecond), grpc.WithBlock()) if err == nil { conn.Close() } if err != grpc.ErrClientConnTimeout { t.Fatalf("grpc.Dial(_, _) = %v, %v, want %v", conn, err, grpc.ErrClientConnTimeout) } }