// StartServer starts a gRPC server serving a benchmark service. It returns its // listen address and a function to stop the server. func StartServer() (string, func()) { lis, err := net.Listen("tcp", ":0") if err != nil { grpclog.Fatalf("Failed to listen: %v", err) } s := grpc.NewServer(grpc.MaxConcurrentStreams(math.MaxUint32)) testpb.RegisterTestServiceServer(s, &testServer{}) go s.Serve(lis) return lis.Addr().String(), func() { s.Stop() } }
func setUp(maxStream uint32, e env) (s *grpc.Server, cc *grpc.ClientConn) { sopts := []grpc.ServerOption{grpc.MaxConcurrentStreams(maxStream)} la := ":0" switch e.network { case "unix": la = "/tmp/testsock" + fmt.Sprintf("%d", time.Now()) syscall.Unlink(la) } lis, err := net.Listen(e.network, la) if err != nil { grpclog.Fatalf("Failed to listen: %v", err) } if e.security == "tls" { creds, err := credentials.NewServerTLSFromFile(tlsDir+"server1.pem", tlsDir+"server1.key") if err != nil { grpclog.Fatalf("Failed to generate credentials %v", err) } sopts = append(sopts, grpc.Creds(creds)) } s = grpc.NewServer(sopts...) testpb.RegisterTestServiceServer(s, &testServer{}) go s.Serve(lis) addr := la switch e.network { case "unix": default: _, port, err := net.SplitHostPort(lis.Addr().String()) if err != nil { grpclog.Fatalf("Failed to parse listener address: %v", err) } addr = "localhost:" + port } if e.security == "tls" { creds, err := credentials.NewClientTLSFromFile(tlsDir+"ca.pem", "x.test.youtube.com") if err != nil { grpclog.Fatalf("Failed to create credentials %v", err) } cc, err = grpc.Dial(addr, grpc.WithTransportCredentials(creds), grpc.WithDialer(e.dialer)) } else { cc, err = grpc.Dial(addr, grpc.WithDialer(e.dialer)) } if err != nil { grpclog.Fatalf("Dial(%q) = %v", addr, err) } return }
func main() { flag.Parse() lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) if err != nil { grpclog.Fatalf("failed to listen: %v", err) } var opts []grpc.ServerOption if *tls { creds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile) if err != nil { grpclog.Fatalf("Failed to generate credentials %v", err) } opts = []grpc.ServerOption{grpc.Creds(creds)} } grpcServer := grpc.NewServer(opts...) pb.RegisterRouteGuideServer(grpcServer, newServer()) grpcServer.Serve(lis) }
func main() { flag.Parse() p := strconv.Itoa(*port) lis, err := net.Listen("tcp", ":"+p) if err != nil { grpclog.Fatalf("failed to listen: %v", err) } var opts []grpc.ServerOption if *useTLS { creds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile) if err != nil { grpclog.Fatalf("Failed to generate credentials %v", err) } opts = []grpc.ServerOption{grpc.Creds(creds)} } server := grpc.NewServer(opts...) testpb.RegisterTestServiceServer(server, &testServer{}) server.Serve(lis) }
// startEtcd launches the etcd server and HTTP handlers for client/server communication. func startEtcd(cfg *config) (<-chan struct{}, error) { urlsmap, token, err := getPeerURLsMapAndToken(cfg, "etcd") if err != nil { return nil, fmt.Errorf("error setting up initial cluster: %v", err) } if !cfg.peerTLSInfo.Empty() { plog.Infof("peerTLS: %s", cfg.peerTLSInfo) } plns := make([]net.Listener, 0) for _, u := range cfg.lpurls { if u.Scheme == "http" && !cfg.peerTLSInfo.Empty() { plog.Warningf("The scheme of peer url %s is http while peer key/cert files are presented. Ignored peer key/cert files.", u.String()) } var l net.Listener l, err = transport.NewTimeoutListener(u.Host, u.Scheme, cfg.peerTLSInfo, rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout) if err != nil { return nil, err } urlStr := u.String() plog.Info("listening for peers on ", urlStr) defer func() { if err != nil { l.Close() plog.Info("stopping listening for peers on ", urlStr) } }() plns = append(plns, l) } if !cfg.clientTLSInfo.Empty() { plog.Infof("clientTLS: %s", cfg.clientTLSInfo) } clns := make([]net.Listener, 0) for _, u := range cfg.lcurls { if u.Scheme == "http" && !cfg.clientTLSInfo.Empty() { plog.Warningf("The scheme of client url %s is http while client key/cert files are presented. Ignored client key/cert files.", u.String()) } var l net.Listener l, err = transport.NewKeepAliveListener(u.Host, u.Scheme, cfg.clientTLSInfo) if err != nil { return nil, err } if fdLimit, err := runtimeutil.FDLimit(); err == nil { if fdLimit <= reservedInternalFDNum { plog.Fatalf("file descriptor limit[%d] of etcd process is too low, and should be set higher than %d to ensure internal usage", fdLimit, reservedInternalFDNum) } l = netutil.LimitListener(l, int(fdLimit-reservedInternalFDNum)) } urlStr := u.String() plog.Info("listening for client requests on ", urlStr) defer func() { if err != nil { l.Close() plog.Info("stopping listening for client requests on ", urlStr) } }() clns = append(clns, l) } var v3l net.Listener if cfg.v3demo { v3l, err = net.Listen("tcp", cfg.gRPCAddr) if err != nil { plog.Fatal(err) } plog.Infof("listening for client rpc on %s", cfg.gRPCAddr) } srvcfg := &etcdserver.ServerConfig{ Name: cfg.name, ClientURLs: cfg.acurls, PeerURLs: cfg.apurls, DataDir: cfg.dir, DedicatedWALDir: cfg.walDir, SnapCount: cfg.snapCount, MaxSnapFiles: cfg.maxSnapFiles, MaxWALFiles: cfg.maxWalFiles, InitialPeerURLsMap: urlsmap, InitialClusterToken: token, DiscoveryURL: cfg.durl, DiscoveryProxy: cfg.dproxy, NewCluster: cfg.isNewCluster(), ForceNewCluster: cfg.forceNewCluster, PeerTLSInfo: cfg.peerTLSInfo, TickMs: cfg.TickMs, ElectionTicks: cfg.electionTicks(), V3demo: cfg.v3demo, StrictReconfigCheck: cfg.strictReconfigCheck, } var s *etcdserver.EtcdServer s, err = etcdserver.NewServer(srvcfg) if err != nil { return nil, err } s.Start() osutil.RegisterInterruptHandler(s.Stop) if cfg.corsInfo.String() != "" { plog.Infof("cors = %s", cfg.corsInfo) } ch := &cors.CORSHandler{ Handler: etcdhttp.NewClientHandler(s, srvcfg.ReqTimeout()), Info: cfg.corsInfo, } ph := etcdhttp.NewPeerHandler(s.Cluster(), s.RaftHandler()) // Start the peer server in a goroutine for _, l := range plns { go func(l net.Listener) { plog.Fatal(serveHTTP(l, ph, 5*time.Minute)) }(l) } // Start a client server goroutine for each listen address for _, l := range clns { go func(l net.Listener) { // read timeout does not work with http close notify // TODO: https://github.com/golang/go/issues/9524 plog.Fatal(serveHTTP(l, ch, 0)) }(l) } if cfg.v3demo { // set up v3 demo rpc grpcServer := grpc.NewServer() etcdserverpb.RegisterEtcdServer(grpcServer, v3rpc.New(s)) go plog.Fatal(grpcServer.Serve(v3l)) } return s.StopNotify(), nil }