func NewClusterServer(s *etcdserver.EtcdServer) *ClusterServer { return &ClusterServer{ cluster: s.Cluster(), server: s, raftTimer: s, } }
func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor { smap := monitorLeader(s) return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { if !api.IsCapabilityEnabled(api.V3rpcCapability) { return rpctypes.ErrGRPCNotCapable } md, ok := metadata.FromContext(ss.Context()) if ok { if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader { if s.Leader() == types.ID(raft.None) { return rpctypes.ErrGRPCNoLeader } cctx, cancel := context.WithCancel(ss.Context()) ss = serverStreamWithCtx{ctx: cctx, cancel: &cancel, ServerStream: ss} smap.mu.Lock() smap.streams[ss] = struct{}{} smap.mu.Unlock() defer func() { smap.mu.Lock() delete(smap.streams, ss) smap.mu.Unlock() cancel() }() } } return metricsStreamInterceptor(srv, ss, info, handler) } }
func NewKVServer(s *etcdserver.EtcdServer) pb.KVServer { return &kvServer{ clusterID: int64(s.Cluster().ID()), memberID: int64(s.ID()), raftTimer: s, kv: s, } }
// NewClientHandler generates a muxed http.Handler with the given parameters to serve etcd client requests. func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http.Handler { sec := auth.NewStore(server, timeout) kh := &keysHandler{ sec: sec, server: server, cluster: server.Cluster(), timer: server, timeout: timeout, clientCertAuthEnabled: server.Cfg.ClientCertAuthEnabled, } sh := &statsHandler{ stats: server, } mh := &membersHandler{ sec: sec, server: server, cluster: server.Cluster(), timeout: timeout, clock: clockwork.NewRealClock(), clientCertAuthEnabled: server.Cfg.ClientCertAuthEnabled, } dmh := &deprecatedMachinesHandler{ cluster: server.Cluster(), } sech := &authHandler{ sec: sec, cluster: server.Cluster(), clientCertAuthEnabled: server.Cfg.ClientCertAuthEnabled, } mux := http.NewServeMux() mux.HandleFunc("/", http.NotFound) mux.Handle(healthPath, healthHandler(server)) mux.HandleFunc(versionPath, versionHandler(server.Cluster(), serveVersion)) mux.Handle(keysPrefix, kh) mux.Handle(keysPrefix+"/", kh) mux.HandleFunc(statsPrefix+"/store", sh.serveStore) mux.HandleFunc(statsPrefix+"/self", sh.serveSelf) mux.HandleFunc(statsPrefix+"/leader", sh.serveLeader) mux.HandleFunc(varsPath, serveVars) mux.HandleFunc(configPath+"/local/log", logHandleFunc) mux.Handle(metricsPath, prometheus.Handler()) mux.Handle(membersPrefix, mh) mux.Handle(membersPrefix+"/", mh) mux.Handle(deprecatedMachinesPrefix, dmh) handleAuth(mux, sech) return requestLogger(mux) }
// NewPeerHandler generates an http.Handler to handle etcd peer (raft) requests. func NewPeerHandler(server *etcdserver.EtcdServer) http.Handler { mh := &peerMembersHandler{ clusterInfo: server.Cluster, } mux := http.NewServeMux() mux.HandleFunc("/", http.NotFound) mux.Handle(rafthttp.RaftPrefix, server.RaftHandler()) mux.Handle(rafthttp.RaftPrefix+"/", server.RaftHandler()) mux.Handle(peerMembersPrefix, mh) return mux }
// NewPeerHandler generates an http.Handler to handle etcd peer (raft) requests. func NewPeerHandler(server *etcdserver.EtcdServer) http.Handler { rh := rafthttp.NewHandler(server, server.Cluster.ID()) rsh := rafthttp.NewStreamHandler(server.SenderFinder(), server.ID(), server.Cluster.ID()) mh := &peerMembersHandler{ clusterInfo: server.Cluster, } mux := http.NewServeMux() mux.HandleFunc("/", http.NotFound) mux.Handle(rafthttp.RaftPrefix, rh) mux.Handle(rafthttp.RaftStreamPrefix+"/", rsh) mux.Handle(peerMembersPrefix, mh) return mux }
func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor { return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { if !api.IsCapabilityEnabled(api.V3rpcCapability) { return nil, rpctypes.ErrGRPCNotCapable } md, ok := metadata.FromContext(ctx) if ok { if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader { if s.Leader() == types.ID(raft.None) { return nil, rpctypes.ErrGRPCNoLeader } } } return metricsUnaryInterceptor(ctx, req, info, handler) } }
// NewPeerHandler generates an http.Handler to handle etcd peer requests. func NewPeerHandler(s *etcdserver.EtcdServer) http.Handler { var lh http.Handler if l := s.Lessor(); l != nil { lh = lease.NewHandler(l) } return newPeerHandler(s.Cluster(), s.RaftHandler(), lh) }
// serve accepts incoming connections on the listener l, // creating a new service goroutine for each. The service goroutines // read requests and then call handler to reply to them. func serve(sctx *serveCtx, s *etcdserver.EtcdServer, tlscfg *tls.Config, handler http.Handler) error { logger := defaultLog.New(ioutil.Discard, "etcdhttp", 0) <-s.ReadyNotify() plog.Info("ready to serve client requests") m := cmux.New(sctx.l) if sctx.insecure { gs := v3rpc.Server(s, nil) grpcl := m.Match(cmux.HTTP2()) go func() { plog.Fatal(gs.Serve(grpcl)) }() srvhttp := &http.Server{ Handler: handler, ErrorLog: logger, // do not log user error } httpl := m.Match(cmux.HTTP1()) go func() { plog.Fatal(srvhttp.Serve(httpl)) }() plog.Noticef("serving insecure client requests on %s, this is strongly discouraged!", sctx.host) } if sctx.secure { gs := v3rpc.Server(s, tlscfg) handler = grpcHandlerFunc(gs, handler) tlsl := tls.NewListener(m.Match(cmux.Any()), tlscfg) // TODO: add debug flag; enable logging when debug flag is set srv := &http.Server{ Handler: handler, TLSConfig: tlscfg, ErrorLog: logger, // do not log user error } go func() { plog.Fatal(srv.Serve(tlsl)) }() plog.Infof("serving client requests on %s", sctx.host) } return m.Serve() }
// TODO: change etcdserver to raft interface when we have it. // add test for healthHandler when we have the interface ready. func healthHandler(server *etcdserver.EtcdServer) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { if !allowMethod(w, r.Method, "GET") { return } if uint64(server.Leader()) == raft.None { http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable) return } // wait for raft's progress index := server.Index() for i := 0; i < 3; i++ { time.Sleep(250 * time.Millisecond) if server.Index() > index { w.WriteHeader(http.StatusOK) w.Write([]byte(`{"health": "true"}`)) return } } http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable) return } }
func monitorLeader(s *etcdserver.EtcdServer) *streamsMap { smap := &streamsMap{ streams: make(map[grpc.ServerStream]struct{}), } go func() { election := time.Duration(s.Cfg.TickMs) * time.Duration(s.Cfg.ElectionTicks) * time.Millisecond noLeaderCnt := 0 for { select { case <-s.StopNotify(): return case <-time.After(election): if s.Leader() == types.ID(raft.None) { noLeaderCnt++ } else { noLeaderCnt = 0 } // We are more conservative on canceling existing streams. Reconnecting streams // cost much more than just rejecting new requests. So we wait until the member // cannot find a leader for maxNoLeaderCnt election timeouts to cancel existing streams. if noLeaderCnt >= maxNoLeaderCnt { smap.mu.Lock() for ss := range smap.streams { if ssWithCtx, ok := ss.(serverStreamWithCtx); ok { (*ssWithCtx.cancel)() <-ss.Context().Done() } } smap.streams = make(map[grpc.ServerStream]struct{}) smap.mu.Unlock() } } } }() return smap }
// capabilityLoop checks the cluster version every 500ms and updates // the enabledMap when the cluster version increased. // capabilityLoop MUST be ran in a goroutine before checking capability // or using capabilityHandler. func capabilityLoop(s *etcdserver.EtcdServer) { stopped := s.StopNotify() var pv *semver.Version for { if v := s.ClusterVersion(); v != pv { if pv == nil || (v != nil && pv.LessThan(*v)) { pv = v enableMapMu.Lock() enabledMap = capabilityMaps[pv.String()] enableMapMu.Unlock() plog.Infof("enabled capabilities for version %s", pv) } } select { case <-stopped: return case <-time.After(500 * time.Millisecond): } } }
func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer { return &watchServer{ clusterID: int64(s.Cluster().ID()), memberID: int64(s.ID()), raftTimer: s, watchable: s.Watchable(), } }
func newHeader(s *etcdserver.EtcdServer) header { return header{ clusterID: int64(s.Cluster().ID()), memberID: int64(s.ID()), raftTimer: s, rev: func() int64 { return s.KV().Rev() }, } }
// serve accepts incoming connections on the listener l, // creating a new service goroutine for each. The service goroutines // read requests and then call handler to reply to them. func serve(sctx *serveCtx, s *etcdserver.EtcdServer, tlscfg *tls.Config, handler http.Handler) error { logger := defaultLog.New(ioutil.Discard, "etcdhttp", 0) <-s.ReadyNotify() plog.Info("ready to serve client requests") m := cmux.New(sctx.l) if sctx.insecure { gs := v3rpc.Server(s, nil) grpcl := m.Match(cmux.HTTP2()) go func() { plog.Fatal(gs.Serve(grpcl)) }() opts := []grpc.DialOption{ grpc.WithInsecure(), } gwmux, err := registerGateway(sctx.l.Addr().String(), opts) if err != nil { return err } httpmux := http.NewServeMux() httpmux.Handle("/v3alpha/", gwmux) httpmux.Handle("/", handler) srvhttp := &http.Server{ Handler: httpmux, ErrorLog: logger, // do not log user error } httpl := m.Match(cmux.HTTP1()) go func() { plog.Fatal(srvhttp.Serve(httpl)) }() plog.Noticef("serving insecure client requests on %s, this is strongly discouraged!", sctx.host) } if sctx.secure { gs := v3rpc.Server(s, tlscfg) handler = grpcHandlerFunc(gs, handler) dtls := *tlscfg // trust local server dtls.InsecureSkipVerify = true creds := credentials.NewTLS(&dtls) opts := []grpc.DialOption{grpc.WithTransportCredentials(creds)} gwmux, err := registerGateway(sctx.l.Addr().String(), opts) if err != nil { return err } tlsl := tls.NewListener(m.Match(cmux.Any()), tlscfg) // TODO: add debug flag; enable logging when debug flag is set httpmux := http.NewServeMux() httpmux.Handle("/v3alpha/", gwmux) httpmux.Handle("/", handler) srv := &http.Server{ Handler: httpmux, TLSConfig: tlscfg, ErrorLog: logger, // do not log user error } go func() { plog.Fatal(srv.Serve(tlsl)) }() plog.Infof("serving client requests on %s", sctx.host) } return m.Serve() }
// startEtcd launches the etcd server and HTTP handlers for client/server communication. //开启ETCD func startEtcd(cfg *config) (<-chan struct{}, error) { //获取ETCD集群信息(静态模式时获取与acurls相同, 动态模式时将从ETCD Discovery或DNS Discovery获取) 与 token信息 urlsmap, token, err := getPeerURLsMapAndToken(cfg, "etcd") if err != nil { return nil, fmt.Errorf("error setting up initial cluster: %v", err) } //加密文本传输协议 if cfg.peerAutoTLS && cfg.peerTLSInfo.Empty() { phosts := make([]string, 0) for _, u := range cfg.lpurls { phosts = append(phosts, u.Host) } cfg.peerTLSInfo, err = transport.SelfCert(cfg.dir, phosts) if err != nil { plog.Fatalf("could not get certs (%v)", err) } } else if cfg.peerAutoTLS { plog.Warningf("ignoring peer auto TLS since certs given") } if !cfg.peerTLSInfo.Empty() { plog.Infof("peerTLS: %s", cfg.peerTLSInfo) } plns := make([]net.Listener, 0) //监听etcd初期准备 for _, u := range cfg.lpurls { if u.Scheme == "http" { if !cfg.peerTLSInfo.Empty() { plog.Warningf("The scheme of peer url %s is HTTP while peer key/cert files are presented. Ignored peer key/cert files.", u.String()) } if cfg.peerTLSInfo.ClientCertAuth { plog.Warningf("The scheme of peer url %s is HTTP while client cert auth (--peer-client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String()) } } var ( l net.Listener tlscfg *tls.Config ) if !cfg.peerTLSInfo.Empty() { tlscfg, err = cfg.peerTLSInfo.ServerConfig() if err != nil { return nil, err } } l, err = rafthttp.NewListener(u, tlscfg) if err != nil { return nil, err } urlStr := u.String() plog.Info("listening for peers on ", urlStr) defer func() { if err != nil { l.Close() plog.Info("stopping listening for peers on ", urlStr) } }() plns = append(plns, l) } var ctlscfg *tls.Config if !cfg.clientTLSInfo.Empty() { plog.Infof("clientTLS: %s", cfg.clientTLSInfo) ctlscfg, err = cfg.clientTLSInfo.ServerConfig() if err != nil { return nil, err } } sctxs := make(map[string]*serveCtx) //监听客户端初期准备 for _, u := range cfg.lcurls { if u.Scheme == "http" { if !cfg.clientTLSInfo.Empty() { plog.Warningf("The scheme of client url %s is HTTP while peer key/cert files are presented. Ignored key/cert files.", u.String()) } if cfg.clientTLSInfo.ClientCertAuth { plog.Warningf("The scheme of client url %s is HTTP while client cert auth (--client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String()) } } if u.Scheme == "https" && ctlscfg == nil { return nil, fmt.Errorf("TLS key/cert (--cert-file, --key-file) must be provided for client url %s with HTTPs scheme", u.String()) } ctx := &serveCtx{host: u.Host} if u.Scheme == "https" { ctx.secure = true } else { ctx.insecure = true } if sctxs[u.Host] != nil { if ctx.secure { sctxs[u.Host].secure = true } if ctx.insecure { sctxs[u.Host].insecure = true } continue } var l net.Listener l, err = net.Listen("tcp", u.Host) if err != nil { return nil, err } var fdLimit uint64 if fdLimit, err = runtimeutil.FDLimit(); err == nil { if fdLimit <= reservedInternalFDNum { plog.Fatalf("file descriptor limit[%d] of etcd process is too low, and should be set higher than %d to ensure internal usage", fdLimit, reservedInternalFDNum) } l = transport.LimitListener(l, int(fdLimit-reservedInternalFDNum)) } l, err = transport.NewKeepAliveListener(l, "tcp", nil) ctx.l = l if err != nil { return nil, err } plog.Info("listening for client requests on ", u.Host) defer func() { if err != nil { l.Close() plog.Info("stopping listening for client requests on ", u.Host) } }() sctxs[u.Host] = ctx } //载入服务器相关配置 srvcfg := &etcdserver.ServerConfig{ Name: cfg.name, //etcd服务名 ClientURLs: cfg.acurls, //其他etcd监听型号地址(一般为其他主机或其他端口) (同步与交互etcd信息) PeerURLs: cfg.apurls, //监听信号地址(本机端口) (同步与交互etcd信息) DataDir: cfg.dir, //数据通信保存位置 (默认产生的文件夹 member/snap member/wal) DedicatedWALDir: cfg.walDir, //ETCD将写WAL文件到walDir而不是DATADIR。这允许使用一个专用磁盘 并有助于避免记录和其它IO操作之间io的竞争。 默认为空 写入DataDir SnapCount: cfg.snapCount, //提交的事务数来触发快照到磁盘 默认为10000 MaxSnapFiles: cfg.maxSnapFiles, //快照文件保留最大数量(0为无限制)默认值:5 windows为:0 MaxWALFiles: cfg.maxWalFiles, //wal文件保留最大数量(0为无限制)默认值:5 windows为:0 InitialPeerURLsMap: urlsmap, //其他etcd监听型号地址 InitialClusterToken: token, //通信认证用 DiscoveryURL: cfg.durl, //动态发现协议的url DiscoveryProxy: cfg.dproxy, //动态发现协议(HTTP代理) NewCluster: cfg.isNewCluster(), //是否为新建集群 ForceNewCluster: cfg.forceNewCluster, //--???-- PeerTLSInfo: cfg.peerTLSInfo, //加密文本传输协议基本信息 TickMs: cfg.TickMs, //心跳间隔的时间(毫秒)。 ElectionTicks: cfg.electionTicks(), //时间(毫秒)选举超时 AutoCompactionRetention: cfg.autoCompactionRetention, StrictReconfigCheck: cfg.strictReconfigCheck, //严格配置检查 默认为false EnablePprof: cfg.enablePprof, //启用通过HTTP服务器运行时分析数据。地址是在客户端的URL+“/debug/ pprof” } //启动服务 var s *etcdserver.EtcdServer s, err = etcdserver.NewServer(srvcfg) if err != nil { return nil, err } s.Start() osutil.RegisterInterruptHandler(s.Stop) if cfg.corsInfo.String() != "" { plog.Infof("cors = %s", cfg.corsInfo) } ch := http.Handler(&cors.CORSHandler{ Handler: etcdhttp.NewClientHandler(s, srvcfg.ReqTimeout()), Info: cfg.corsInfo, }) ph := etcdhttp.NewPeerHandler(s) // Start the peer server in a goroutine for _, l := range plns { go func(l net.Listener) { plog.Fatal(servePeerHTTP(l, ph)) }(l) } // Start a client server goroutine for each listen address for _, sctx := range sctxs { go func(sctx *serveCtx) { // read timeout does not work with http close notify // TODO: https://github.com/golang/go/issues/9524 plog.Fatal(serve(sctx, s, ctlscfg, ch)) }(sctx) } return s.StopNotify(), nil }
func NewQuotaLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer { return "aLeaseServer{ NewLeaseServer(s), quotaAlarmer{etcdserver.NewBackendQuota(s), s, s.ID()}, } }
// NewClientHandler generates a muxed http.Handler with the given parameters to serve etcd client requests. func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http.Handler { go capabilityLoop(server) sec := auth.NewStore(server, timeout) kh := &keysHandler{ sec: sec, server: server, cluster: server.Cluster(), timer: server, timeout: timeout, } sh := &statsHandler{ stats: server, } mh := &membersHandler{ sec: sec, server: server, cluster: server.Cluster(), timeout: timeout, clock: clockwork.NewRealClock(), } dmh := &deprecatedMachinesHandler{ cluster: server.Cluster(), } sech := &authHandler{ sec: sec, cluster: server.Cluster(), } mux := http.NewServeMux() mux.HandleFunc("/", http.NotFound) mux.Handle(healthPath, healthHandler(server)) mux.HandleFunc(versionPath, versionHandler(server.Cluster(), serveVersion)) mux.Handle(keysPrefix, kh) mux.Handle(keysPrefix+"/", kh) mux.HandleFunc(statsPrefix+"/store", sh.serveStore) mux.HandleFunc(statsPrefix+"/self", sh.serveSelf) mux.HandleFunc(statsPrefix+"/leader", sh.serveLeader) mux.HandleFunc(varsPath, serveVars) mux.HandleFunc(configPath+"/local/log", logHandleFunc) mux.Handle(metricsPath, prometheus.Handler()) mux.Handle(membersPrefix, mh) mux.Handle(membersPrefix+"/", mh) mux.Handle(deprecatedMachinesPrefix, dmh) handleAuth(mux, sech) if server.IsPprofEnabled() { plog.Infof("pprof is enabled under %s", pprofPrefix) mux.HandleFunc(pprofPrefix, pprof.Index) mux.HandleFunc(pprofPrefix+"/profile", pprof.Profile) mux.HandleFunc(pprofPrefix+"/symbol", pprof.Symbol) mux.HandleFunc(pprofPrefix+"/cmdline", pprof.Cmdline) // TODO: currently, we don't create an entry for pprof.Trace, // because go 1.4 doesn't provide it. After support of go 1.4 is dropped, // we should add the entry. mux.Handle(pprofPrefix+"/heap", pprof.Handler("heap")) mux.Handle(pprofPrefix+"/goroutine", pprof.Handler("goroutine")) mux.Handle(pprofPrefix+"/threadcreate", pprof.Handler("threadcreate")) mux.Handle(pprofPrefix+"/block", pprof.Handler("block")) } return requestLogger(mux) }
// startEtcd launches the etcd server and HTTP handlers for client/server communication. func startEtcd(cfg *config) (<-chan struct{}, error) { urlsmap, token, err := getPeerURLsMapAndToken(cfg) if err != nil { return nil, fmt.Errorf("error setting up initial cluster: %v", err) } pt, err := transport.NewTimeoutTransport(cfg.peerTLSInfo, rafthttp.DialTimeout, rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout) if err != nil { return nil, err } if !cfg.peerTLSInfo.Empty() { plog.Infof("peerTLS: %s", cfg.peerTLSInfo) } plns := make([]net.Listener, 0) for _, u := range cfg.lpurls { var l net.Listener l, err = transport.NewTimeoutListener(u.Host, u.Scheme, cfg.peerTLSInfo, rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout) if err != nil { return nil, err } urlStr := u.String() plog.Info("listening for peers on ", urlStr) defer func() { if err != nil { l.Close() plog.Info("stopping listening for peers on ", urlStr) } }() plns = append(plns, l) } if !cfg.clientTLSInfo.Empty() { plog.Infof("clientTLS: %s", cfg.clientTLSInfo) } clns := make([]net.Listener, 0) for _, u := range cfg.lcurls { var l net.Listener l, err = transport.NewKeepAliveListener(u.Host, u.Scheme, cfg.clientTLSInfo) if err != nil { return nil, err } urlStr := u.String() plog.Info("listening for client requests on ", urlStr) defer func() { if err != nil { l.Close() plog.Info("stopping listening for client requests on ", urlStr) } }() clns = append(clns, l) } srvcfg := &etcdserver.ServerConfig{ Name: cfg.name, ClientURLs: cfg.acurls, PeerURLs: cfg.apurls, DataDir: cfg.dir, SnapCount: cfg.snapCount, MaxSnapFiles: cfg.maxSnapFiles, MaxWALFiles: cfg.maxWalFiles, InitialPeerURLsMap: urlsmap, InitialClusterToken: token, DiscoveryURL: cfg.durl, DiscoveryProxy: cfg.dproxy, NewCluster: cfg.isNewCluster(), ForceNewCluster: cfg.forceNewCluster, Transport: pt, TickMs: cfg.TickMs, ElectionTicks: cfg.electionTicks(), } var s *etcdserver.EtcdServer s, err = etcdserver.NewServer(srvcfg) if err != nil { return nil, err } s.Start() osutil.RegisterInterruptHandler(s.Stop) if cfg.corsInfo.String() != "" { plog.Infof("cors = %s", cfg.corsInfo) } ch := &cors.CORSHandler{ Handler: etcdhttp.NewClientHandler(s), Info: cfg.corsInfo, } ph := etcdhttp.NewPeerHandler(s.Cluster(), s.RaftHandler()) // Start the peer server in a goroutine for _, l := range plns { go func(l net.Listener) { plog.Fatal(serveHTTP(l, ph, 5*time.Minute)) }(l) } // Start a client server goroutine for each listen address for _, l := range clns { go func(l net.Listener) { // read timeout does not work with http close notify // TODO: https://github.com/golang/go/issues/9524 plog.Fatal(serveHTTP(l, ch, 0)) }(l) } return s.StopNotify(), nil }
// startEtcd launches the etcd server and HTTP handlers for client/server communication. func startEtcd(cfg *config) (<-chan struct{}, error) { cls, err := setupCluster(cfg) if err != nil { return nil, fmt.Errorf("error setting up initial cluster: %v", err) } if cfg.dir == "" { cfg.dir = fmt.Sprintf("%v.etcd", cfg.name) log.Printf("no data-dir provided, using default data-dir ./%s", cfg.dir) } if err := makeMemberDir(cfg.dir); err != nil { return nil, fmt.Errorf("cannot use /member sub-directory: %v", err) } membdir := path.Join(cfg.dir, "member") if err := fileutil.IsDirWriteable(membdir); err != nil { return nil, fmt.Errorf("cannot write to data directory: %v", err) } pt, err := transport.NewTimeoutTransport(cfg.peerTLSInfo, rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout) if err != nil { return nil, err } if !cfg.peerTLSInfo.Empty() { log.Printf("etcd: peerTLS: %s", cfg.peerTLSInfo) } plns := make([]net.Listener, 0) for _, u := range cfg.lpurls { var l net.Listener l, err = transport.NewTimeoutListener(u.Host, u.Scheme, cfg.peerTLSInfo, rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout) if err != nil { return nil, err } urlStr := u.String() log.Print("etcd: listening for peers on ", urlStr) defer func() { if err != nil { l.Close() log.Print("etcd: stopping listening for peers on ", urlStr) } }() plns = append(plns, l) } if !cfg.clientTLSInfo.Empty() { log.Printf("etcd: clientTLS: %s", cfg.clientTLSInfo) } clns := make([]net.Listener, 0) for _, u := range cfg.lcurls { var l net.Listener l, err = transport.NewKeepAliveListener(u.Host, u.Scheme, cfg.clientTLSInfo) if err != nil { return nil, err } urlStr := u.String() log.Print("etcd: listening for client requests on ", urlStr) defer func() { if err != nil { l.Close() log.Print("etcd: stopping listening for client requests on ", urlStr) } }() clns = append(clns, l) } srvcfg := &etcdserver.ServerConfig{ Name: cfg.name, ClientURLs: cfg.acurls, PeerURLs: cfg.apurls, DataDir: membdir, SnapCount: cfg.snapCount, MaxSnapFiles: cfg.maxSnapFiles, MaxWALFiles: cfg.maxWalFiles, Cluster: cls, DiscoveryURL: cfg.durl, DiscoveryProxy: cfg.dproxy, NewCluster: cfg.isNewCluster(), ForceNewCluster: cfg.forceNewCluster, Transport: pt, TickMs: cfg.TickMs, ElectionTicks: cfg.electionTicks(), } var s *etcdserver.EtcdServer s, err = etcdserver.NewServer(srvcfg) if err != nil { return nil, err } s.Start() if cfg.corsInfo.String() != "" { log.Printf("etcd: cors = %s", cfg.corsInfo) } ch := &cors.CORSHandler{ Handler: etcdhttp.NewClientHandler(s), Info: cfg.corsInfo, } ph := etcdhttp.NewPeerHandler(s.Cluster, s.RaftHandler()) // Start the peer server in a goroutine for _, l := range plns { go func(l net.Listener) { log.Fatal(serveHTTP(l, ph, 5*time.Minute)) }(l) } // Start a client server goroutine for each listen address for _, l := range clns { go func(l net.Listener) { // read timeout does not work with http close notify // TODO: https://github.com/golang/go/issues/9524 log.Fatal(serveHTTP(l, ch, 0)) }(l) } return s.StopNotify(), nil }
// startEtcd launches the etcd server and HTTP handlers for client/server communication. func startEtcd(cfg *config) (<-chan struct{}, error) { urlsmap, token, err := getPeerURLsMapAndToken(cfg, "etcd") if err != nil { return nil, fmt.Errorf("error setting up initial cluster: %v", err) } pt, err := transport.NewTimeoutTransport(cfg.peerTLSInfo, peerDialTimeout(cfg.ElectionMs), rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout) if err != nil { return nil, err } if !cfg.peerTLSInfo.Empty() { plog.Infof("peerTLS: %s", cfg.peerTLSInfo) } plns := make([]net.Listener, 0) for _, u := range cfg.lpurls { if u.Scheme == "http" && !cfg.peerTLSInfo.Empty() { plog.Warningf("The scheme of peer url %s is http while peer key/cert files are presented. Ignored peer key/cert files.", u.String()) } var l net.Listener l, err = transport.NewTimeoutListener(u.Host, u.Scheme, cfg.peerTLSInfo, rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout) if err != nil { return nil, err } urlStr := u.String() plog.Info("listening for peers on ", urlStr) defer func() { if err != nil { l.Close() plog.Info("stopping listening for peers on ", urlStr) } }() plns = append(plns, l) } if !cfg.clientTLSInfo.Empty() { plog.Infof("clientTLS: %s", cfg.clientTLSInfo) } clns := make([]net.Listener, 0) for _, u := range cfg.lcurls { if u.Scheme == "http" && !cfg.clientTLSInfo.Empty() { plog.Warningf("The scheme of client url %s is http while client key/cert files are presented. Ignored client key/cert files.", u.String()) } var l net.Listener l, err = transport.NewKeepAliveListener(u.Host, u.Scheme, cfg.clientTLSInfo) if err != nil { return nil, err } if fdLimit, err := runtimeutil.FDLimit(); err == nil { if fdLimit <= reservedInternalFDNum { plog.Fatalf("file descriptor limit[%d] of etcd process is too low, and should be set higher than %d to ensure internal usage", fdLimit, reservedInternalFDNum) } l = netutil.LimitListener(l, int(fdLimit-reservedInternalFDNum)) } urlStr := u.String() plog.Info("listening for client requests on ", urlStr) defer func() { if err != nil { l.Close() plog.Info("stopping listening for client requests on ", urlStr) } }() clns = append(clns, l) } var v3l net.Listener if cfg.v3demo { v3l, err = net.Listen("tcp", cfg.gRPCAddr) if err != nil { plog.Fatal(err) } plog.Infof("listening for client rpc on %s", cfg.gRPCAddr) } srvcfg := &etcdserver.ServerConfig{ Name: cfg.name, ClientURLs: cfg.acurls, PeerURLs: cfg.apurls, DataDir: cfg.dir, DedicatedWALDir: cfg.walDir, SnapCount: cfg.snapCount, MaxSnapFiles: cfg.maxSnapFiles, MaxWALFiles: cfg.maxWalFiles, InitialPeerURLsMap: urlsmap, InitialClusterToken: token, DiscoveryURL: cfg.durl, DiscoveryProxy: cfg.dproxy, NewCluster: cfg.isNewCluster(), ForceNewCluster: cfg.forceNewCluster, Transport: pt, TickMs: cfg.TickMs, ElectionTicks: cfg.electionTicks(), V3demo: cfg.v3demo, StrictReconfigCheck: cfg.strictReconfigCheck, } var s *etcdserver.EtcdServer s, err = etcdserver.NewServer(srvcfg) if err != nil { return nil, err } s.Start() osutil.RegisterInterruptHandler(s.Stop) if cfg.corsInfo.String() != "" { plog.Infof("cors = %s", cfg.corsInfo) } ch := &cors.CORSHandler{ Handler: etcdhttp.NewClientHandler(s, srvcfg.ReqTimeout()), Info: cfg.corsInfo, } ph := etcdhttp.NewPeerHandler(s.Cluster(), s.RaftHandler()) // Start the peer server in a goroutine for _, l := range plns { go func(l net.Listener) { plog.Fatal(serveHTTP(l, ph, 5*time.Minute)) }(l) } // Start a client server goroutine for each listen address for _, l := range clns { go func(l net.Listener) { // read timeout does not work with http close notify // TODO: https://github.com/golang/go/issues/9524 plog.Fatal(serveHTTP(l, ch, 0)) }(l) } if cfg.v3demo { // set up v3 demo rpc grpcServer := grpc.NewServer() etcdserverpb.RegisterEtcdServer(grpcServer, v3rpc.New(s)) go plog.Fatal(grpcServer.Serve(v3l)) } return s.StopNotify(), nil }
// startEtcd launches the etcd server and HTTP handlers for client/server communication. func startEtcd() (<-chan struct{}, error) { apurls, err := flags.URLsFromFlags(fs, "initial-advertise-peer-urls", "addr", peerTLSInfo) if err != nil { return nil, err } cls, err := setupCluster(apurls) if err != nil { return nil, fmt.Errorf("error setting up initial cluster: %v", err) } if *dir == "" { *dir = fmt.Sprintf("%v.etcd", *name) log.Printf("no data-dir provided, using default data-dir ./%s", *dir) } if err := os.MkdirAll(*dir, privateDirMode); err != nil { return nil, fmt.Errorf("cannot create data directory: %v", err) } if err := fileutil.IsDirWriteable(*dir); err != nil { return nil, fmt.Errorf("cannot write to data directory: %v", err) } pt, err := transport.NewTimeoutTransport(peerTLSInfo, rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout) if err != nil { return nil, err } acurls, err := flags.URLsFromFlags(fs, "advertise-client-urls", "addr", clientTLSInfo) if err != nil { return nil, err } lpurls, err := flags.URLsFromFlags(fs, "listen-peer-urls", "peer-bind-addr", peerTLSInfo) if err != nil { return nil, err } if !peerTLSInfo.Empty() { log.Printf("etcd: peerTLS: %s", peerTLSInfo) } plns := make([]net.Listener, 0) for _, u := range lpurls { var l net.Listener l, err = transport.NewTimeoutListener(u.Host, u.Scheme, peerTLSInfo, rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout) if err != nil { return nil, err } urlStr := u.String() log.Print("etcd: listening for peers on ", urlStr) defer func() { if err != nil { l.Close() log.Print("etcd: stopping listening for peers on ", urlStr) } }() plns = append(plns, l) } lcurls, err := flags.URLsFromFlags(fs, "listen-client-urls", "bind-addr", clientTLSInfo) if err != nil { return nil, err } if !clientTLSInfo.Empty() { log.Printf("etcd: clientTLS: %s", clientTLSInfo) } clns := make([]net.Listener, 0) for _, u := range lcurls { var l net.Listener l, err = transport.NewListener(u.Host, u.Scheme, clientTLSInfo) if err != nil { return nil, err } urlStr := u.String() log.Print("etcd: listening for client requests on ", urlStr) defer func() { if err != nil { l.Close() log.Print("etcd: stopping listening for client requests on ", urlStr) } }() clns = append(clns, l) } cfg := &etcdserver.ServerConfig{ Name: *name, ClientURLs: acurls, PeerURLs: apurls, DataDir: *dir, SnapCount: *snapCount, MaxSnapFiles: *maxSnapFiles, MaxWALFiles: *maxWalFiles, Cluster: cls, DiscoveryURL: *durl, DiscoveryProxy: *dproxy, NewCluster: clusterStateFlag.String() == clusterStateFlagNew, ForceNewCluster: *forceNewCluster, Transport: pt, } var s *etcdserver.EtcdServer s, err = etcdserver.NewServer(cfg) if err != nil { return nil, err } s.Start() if corsInfo.String() != "" { log.Printf("etcd: cors = %s", corsInfo) } ch := &cors.CORSHandler{ Handler: etcdhttp.NewClientHandler(s), Info: corsInfo, } ph := etcdhttp.NewPeerHandler(s) // Start the peer server in a goroutine for _, l := range plns { go func(l net.Listener) { log.Fatal(http.Serve(l, ph)) }(l) } // Start a client server goroutine for each listen address for _, l := range clns { go func(l net.Listener) { log.Fatal(http.Serve(l, ch)) }(l) } return s.StopNotify(), nil }
// startEtcd launches the etcd server and HTTP handlers for client/server communication. func startEtcd(cfg *config) (<-chan struct{}, error) { urlsmap, token, err := getPeerURLsMapAndToken(cfg, "etcd") if err != nil { return nil, fmt.Errorf("error setting up initial cluster: %v", err) } if cfg.peerAutoTLS && cfg.peerTLSInfo.Empty() { phosts := make([]string, 0) for _, u := range cfg.lpurls { phosts = append(phosts, u.Host) } cfg.peerTLSInfo, err = transport.SelfCert(cfg.dir, phosts) if err != nil { plog.Fatalf("could not get certs (%v)", err) } } else if cfg.peerAutoTLS { plog.Warningf("ignoring peer auto TLS since certs given") } if !cfg.peerTLSInfo.Empty() { plog.Infof("peerTLS: %s", cfg.peerTLSInfo) } plns := make([]net.Listener, 0) for _, u := range cfg.lpurls { if u.Scheme == "http" { if !cfg.peerTLSInfo.Empty() { plog.Warningf("The scheme of peer url %s is HTTP while peer key/cert files are presented. Ignored peer key/cert files.", u.String()) } if cfg.peerTLSInfo.ClientCertAuth { plog.Warningf("The scheme of peer url %s is HTTP while client cert auth (--peer-client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String()) } } var ( l net.Listener tlscfg *tls.Config ) if !cfg.peerTLSInfo.Empty() { tlscfg, err = cfg.peerTLSInfo.ServerConfig() if err != nil { return nil, err } } l, err = rafthttp.NewListener(u, tlscfg) if err != nil { return nil, err } urlStr := u.String() plog.Info("listening for peers on ", urlStr) defer func() { if err != nil { l.Close() plog.Info("stopping listening for peers on ", urlStr) } }() plns = append(plns, l) } var ctlscfg *tls.Config if !cfg.clientTLSInfo.Empty() { plog.Infof("clientTLS: %s", cfg.clientTLSInfo) ctlscfg, err = cfg.clientTLSInfo.ServerConfig() if err != nil { return nil, err } } sctxs := make(map[string]*serveCtx) for _, u := range cfg.lcurls { if u.Scheme == "http" { if !cfg.clientTLSInfo.Empty() { plog.Warningf("The scheme of client url %s is HTTP while peer key/cert files are presented. Ignored key/cert files.", u.String()) } if cfg.clientTLSInfo.ClientCertAuth { plog.Warningf("The scheme of client url %s is HTTP while client cert auth (--client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String()) } } if u.Scheme == "https" && ctlscfg == nil { return nil, fmt.Errorf("TLS key/cert (--cert-file, --key-file) must be provided for client url %s with HTTPs scheme", u.String()) } ctx := &serveCtx{host: u.Host} if u.Scheme == "https" { ctx.secure = true } else { ctx.insecure = true } if sctxs[u.Host] != nil { if ctx.secure { sctxs[u.Host].secure = true } if ctx.insecure { sctxs[u.Host].insecure = true } continue } var l net.Listener l, err = net.Listen("tcp", u.Host) if err != nil { return nil, err } var fdLimit uint64 if fdLimit, err = runtimeutil.FDLimit(); err == nil { if fdLimit <= reservedInternalFDNum { plog.Fatalf("file descriptor limit[%d] of etcd process is too low, and should be set higher than %d to ensure internal usage", fdLimit, reservedInternalFDNum) } l = transport.LimitListener(l, int(fdLimit-reservedInternalFDNum)) } l, err = transport.NewKeepAliveListener(l, "tcp", nil) ctx.l = l if err != nil { return nil, err } plog.Info("listening for client requests on ", u.Host) defer func() { if err != nil { l.Close() plog.Info("stopping listening for client requests on ", u.Host) } }() sctxs[u.Host] = ctx } srvcfg := &etcdserver.ServerConfig{ Name: cfg.name, ClientURLs: cfg.acurls, PeerURLs: cfg.apurls, DataDir: cfg.dir, DedicatedWALDir: cfg.walDir, SnapCount: cfg.snapCount, MaxSnapFiles: cfg.maxSnapFiles, MaxWALFiles: cfg.maxWalFiles, InitialPeerURLsMap: urlsmap, InitialClusterToken: token, DiscoveryURL: cfg.durl, DiscoveryProxy: cfg.dproxy, NewCluster: cfg.isNewCluster(), ForceNewCluster: cfg.forceNewCluster, PeerTLSInfo: cfg.peerTLSInfo, TickMs: cfg.TickMs, ElectionTicks: cfg.electionTicks(), AutoCompactionRetention: cfg.autoCompactionRetention, QuotaBackendBytes: cfg.quotaBackendBytes, StrictReconfigCheck: cfg.strictReconfigCheck, EnablePprof: cfg.enablePprof, } var s *etcdserver.EtcdServer s, err = etcdserver.NewServer(srvcfg) if err != nil { return nil, err } s.Start() osutil.RegisterInterruptHandler(s.Stop) if cfg.corsInfo.String() != "" { plog.Infof("cors = %s", cfg.corsInfo) } ch := http.Handler(&cors.CORSHandler{ Handler: v2http.NewClientHandler(s, srvcfg.ReqTimeout()), Info: cfg.corsInfo, }) ph := v2http.NewPeerHandler(s) // Start the peer server in a goroutine for _, l := range plns { go func(l net.Listener) { plog.Fatal(servePeerHTTP(l, ph)) }(l) } // Start a client server goroutine for each listen address for _, sctx := range sctxs { go func(sctx *serveCtx) { // read timeout does not work with http close notify // TODO: https://github.com/golang/go/issues/9524 plog.Fatal(serve(sctx, s, ctlscfg, ch)) }(sctx) } return s.StopNotify(), nil }
// startEtcd launches the etcd server and HTTP handlers for client/server communication. func startEtcd(cfg *config) (<-chan struct{}, error) { initialPeers, token, err := setupCluster(cfg) if err != nil { return nil, fmt.Errorf("error setting up initial cluster: %v", err) } pt, err := transport.NewTimeoutTransport(cfg.peerTLSInfo, time.Second, rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout) if err != nil { return nil, err } if !cfg.peerTLSInfo.Empty() { glog.V(2).Infof("etcd: peerTLS: %s", cfg.peerTLSInfo) } plns := make([]net.Listener, 0) for _, u := range cfg.lpurls { var l net.Listener l, err = transport.NewTimeoutListener(u.Host, u.Scheme, cfg.peerTLSInfo, rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout) if err != nil { return nil, err } urlStr := u.String() glog.V(2).Info("etcd: listening for peers on ", urlStr) defer func() { if err != nil { l.Close() glog.V(2).Info("etcd: stopping listening for peers on ", urlStr) } }() plns = append(plns, l) } if !cfg.clientTLSInfo.Empty() { glog.V(2).Infof("etcd: clientTLS: %s", cfg.clientTLSInfo) } clns := make([]net.Listener, 0) for _, u := range cfg.lcurls { l, err := net.Listen("tcp", u.Host) if err != nil { return nil, err } l, err = transport.NewKeepAliveListener(l, u.Scheme, cfg.clientTLSInfo) if err != nil { return nil, err } urlStr := u.String() glog.V(2).Info("etcd: listening for client requests on ", urlStr) defer func() { if err != nil { l.Close() glog.V(2).Info("etcd: stopping listening for client requests on ", urlStr) } }() clns = append(clns, l) } srvcfg := &etcdserver.ServerConfig{ Name: cfg.name, ClientURLs: cfg.acurls, PeerURLs: cfg.apurls, DataDir: cfg.dir, SnapCount: cfg.snapCount, MaxSnapFiles: cfg.maxSnapFiles, InitialPeerURLsMap: initialPeers, InitialClusterToken: token, MaxWALFiles: cfg.maxWalFiles, NewCluster: true, ForceNewCluster: false, Transport: pt, TickMs: cfg.TickMs, ElectionTicks: cfg.electionTicks(), } var s *etcdserver.EtcdServer s, err = etcdserver.NewServer(srvcfg) if err != nil { return nil, err } osutil.HandleInterrupts() s.Start() osutil.RegisterInterruptHandler(s.Stop) ch := etcdhttp.NewClientHandler(s, srvcfg.ReqTimeout()) ph := etcdhttp.NewPeerHandler(s.Cluster(), s.RaftHandler()) // Start the peer server in a goroutine for _, l := range plns { go func(l net.Listener) { glog.Fatal(serveHTTP(l, ph, 5*time.Minute)) }(l) } // Start a client server goroutine for each listen address for _, l := range clns { go func(l net.Listener) { // read timeout does not work with http close notify // TODO: https://github.com/golang/go/issues/9524 glog.Fatal(serveHTTP(l, ch, 0)) }(l) } return s.StopNotify(), nil }
// startEtcd launches the etcd server and HTTP handlers for client/server communication. func startEtcd(cfg *config) (<-chan struct{}, error) { urlsmap, token, err := getPeerURLsMapAndToken(cfg, "etcd") if err != nil { return nil, fmt.Errorf("error setting up initial cluster: %v", err) } if cfg.peerAutoTLS && cfg.peerTLSInfo.Empty() { phosts := make([]string, 0) for _, u := range cfg.lpurls { phosts = append(phosts, u.Host) } cfg.peerTLSInfo, err = transport.SelfCert(cfg.dir, phosts) if err != nil { plog.Fatalf("could not get certs (%v)", err) } } else if cfg.peerAutoTLS { plog.Warningf("ignoring peer auto TLS since certs given") } if !cfg.peerTLSInfo.Empty() { plog.Infof("peerTLS: %s", cfg.peerTLSInfo) } plns := make([]net.Listener, 0) for _, u := range cfg.lpurls { if u.Scheme == "http" && !cfg.peerTLSInfo.Empty() { plog.Warningf("The scheme of peer url %s is http while peer key/cert files are presented. Ignored peer key/cert files.", u.String()) } var l net.Listener l, err = rafthttp.NewListener(u, cfg.peerTLSInfo) if err != nil { return nil, err } urlStr := u.String() plog.Info("listening for peers on ", urlStr) defer func() { if err != nil { l.Close() plog.Info("stopping listening for peers on ", urlStr) } }() plns = append(plns, l) } if !cfg.clientTLSInfo.Empty() { plog.Infof("clientTLS: %s", cfg.clientTLSInfo) } clns := make([]net.Listener, 0) for _, u := range cfg.lcurls { if u.Scheme == "http" && !cfg.clientTLSInfo.Empty() { plog.Warningf("The scheme of client url %s is http while client key/cert files are presented. Ignored client key/cert files.", u.String()) } var l net.Listener l, err = net.Listen("tcp", u.Host) if err != nil { return nil, err } if fdLimit, err := runtimeutil.FDLimit(); err == nil { if fdLimit <= reservedInternalFDNum { plog.Fatalf("file descriptor limit[%d] of etcd process is too low, and should be set higher than %d to ensure internal usage", fdLimit, reservedInternalFDNum) } l = transport.LimitListener(l, int(fdLimit-reservedInternalFDNum)) } // Do not wrap around this listener if TLS Info is set. // HTTPS server expects TLS Conn created by TLSListener. l, err = transport.NewKeepAliveListener(l, u.Scheme, cfg.clientTLSInfo) if err != nil { return nil, err } urlStr := u.String() plog.Info("listening for client requests on ", urlStr) defer func() { if err != nil { l.Close() plog.Info("stopping listening for client requests on ", urlStr) } }() clns = append(clns, l) } srvcfg := &etcdserver.ServerConfig{ Name: cfg.name, ClientURLs: cfg.acurls, PeerURLs: cfg.apurls, DataDir: cfg.dir, DedicatedWALDir: cfg.walDir, SnapCount: cfg.snapCount, MaxSnapFiles: cfg.maxSnapFiles, MaxWALFiles: cfg.maxWalFiles, InitialPeerURLsMap: urlsmap, InitialClusterToken: token, DiscoveryURL: cfg.durl, DiscoveryProxy: cfg.dproxy, NewCluster: cfg.isNewCluster(), ForceNewCluster: cfg.forceNewCluster, PeerTLSInfo: cfg.peerTLSInfo, TickMs: cfg.TickMs, ElectionTicks: cfg.electionTicks(), V3demo: cfg.v3demo, AutoCompactionRetention: cfg.autoCompactionRetention, StrictReconfigCheck: cfg.strictReconfigCheck, EnablePprof: cfg.enablePprof, } var s *etcdserver.EtcdServer s, err = etcdserver.NewServer(srvcfg) if err != nil { return nil, err } s.Start() osutil.RegisterInterruptHandler(s.Stop) if cfg.corsInfo.String() != "" { plog.Infof("cors = %s", cfg.corsInfo) } ch := &cors.CORSHandler{ Handler: etcdhttp.NewClientHandler(s, srvcfg.ReqTimeout()), Info: cfg.corsInfo, } ph := etcdhttp.NewPeerHandler(s) var grpcS *grpc.Server if cfg.v3demo { // set up v3 demo rpc tls := &cfg.clientTLSInfo if cfg.clientTLSInfo.Empty() { tls = nil } grpcS, err = v3rpc.Server(s, tls) if err != nil { s.Stop() <-s.StopNotify() return nil, err } } // Start the peer server in a goroutine for _, l := range plns { go func(l net.Listener) { plog.Fatal(serve(l, nil, ph, 5*time.Minute)) }(l) } // Start a client server goroutine for each listen address for _, l := range clns { go func(l net.Listener) { // read timeout does not work with http close notify // TODO: https://github.com/golang/go/issues/9524 plog.Fatal(serve(l, grpcS, ch, 0)) }(l) } return s.StopNotify(), nil }