func monitorLeader(s *etcdserver.EtcdServer) *streamsMap { smap := &streamsMap{ streams: make(map[grpc.ServerStream]struct{}), } go func() { election := time.Duration(s.Cfg.TickMs) * time.Duration(s.Cfg.ElectionTicks) * time.Millisecond noLeaderCnt := 0 for { select { case <-s.StopNotify(): return case <-time.After(election): if s.Leader() == types.ID(raft.None) { noLeaderCnt++ } else { noLeaderCnt = 0 } // We are more conservative on canceling existing streams. Reconnecting streams // cost much more than just rejecting new requests. So we wait until the member // cannot find a leader for maxNoLeaderCnt election timeouts to cancel existing streams. if noLeaderCnt >= maxNoLeaderCnt { smap.mu.Lock() for ss := range smap.streams { if ssWithCtx, ok := ss.(serverStreamWithCtx); ok { (*ssWithCtx.cancel)() <-ss.Context().Done() } } smap.streams = make(map[grpc.ServerStream]struct{}) smap.mu.Unlock() } } } }() return smap }
// capabilityLoop checks the cluster version every 500ms and updates // the enabledMap when the cluster version increased. // capabilityLoop MUST be ran in a goroutine before checking capability // or using capabilityHandler. func capabilityLoop(s *etcdserver.EtcdServer) { stopped := s.StopNotify() var pv *semver.Version for { if v := s.ClusterVersion(); v != pv { if pv == nil || (v != nil && pv.LessThan(*v)) { pv = v enableMapMu.Lock() enabledMap = capabilityMaps[pv.String()] enableMapMu.Unlock() plog.Infof("enabled capabilities for version %s", pv) } } select { case <-stopped: return case <-time.After(500 * time.Millisecond): } } }
// startEtcd launches the etcd server and HTTP handlers for client/server communication. func startEtcd(cfg *config) (<-chan struct{}, error) { initialPeers, token, err := setupCluster(cfg) if err != nil { return nil, fmt.Errorf("error setting up initial cluster: %v", err) } pt, err := transport.NewTimeoutTransport(cfg.peerTLSInfo, time.Second, rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout) if err != nil { return nil, err } if !cfg.peerTLSInfo.Empty() { glog.V(2).Infof("etcd: peerTLS: %s", cfg.peerTLSInfo) } plns := make([]net.Listener, 0) for _, u := range cfg.lpurls { var l net.Listener l, err = transport.NewTimeoutListener(u.Host, u.Scheme, cfg.peerTLSInfo, rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout) if err != nil { return nil, err } urlStr := u.String() glog.V(2).Info("etcd: listening for peers on ", urlStr) defer func() { if err != nil { l.Close() glog.V(2).Info("etcd: stopping listening for peers on ", urlStr) } }() plns = append(plns, l) } if !cfg.clientTLSInfo.Empty() { glog.V(2).Infof("etcd: clientTLS: %s", cfg.clientTLSInfo) } clns := make([]net.Listener, 0) for _, u := range cfg.lcurls { l, err := net.Listen("tcp", u.Host) if err != nil { return nil, err } l, err = transport.NewKeepAliveListener(l, u.Scheme, cfg.clientTLSInfo) if err != nil { return nil, err } urlStr := u.String() glog.V(2).Info("etcd: listening for client requests on ", urlStr) defer func() { if err != nil { l.Close() glog.V(2).Info("etcd: stopping listening for client requests on ", urlStr) } }() clns = append(clns, l) } srvcfg := &etcdserver.ServerConfig{ Name: cfg.name, ClientURLs: cfg.acurls, PeerURLs: cfg.apurls, DataDir: cfg.dir, SnapCount: cfg.snapCount, MaxSnapFiles: cfg.maxSnapFiles, InitialPeerURLsMap: initialPeers, InitialClusterToken: token, MaxWALFiles: cfg.maxWalFiles, NewCluster: true, ForceNewCluster: false, Transport: pt, TickMs: cfg.TickMs, ElectionTicks: cfg.electionTicks(), } var s *etcdserver.EtcdServer s, err = etcdserver.NewServer(srvcfg) if err != nil { return nil, err } osutil.HandleInterrupts() s.Start() osutil.RegisterInterruptHandler(s.Stop) ch := etcdhttp.NewClientHandler(s, srvcfg.ReqTimeout()) ph := etcdhttp.NewPeerHandler(s.Cluster(), s.RaftHandler()) // Start the peer server in a goroutine for _, l := range plns { go func(l net.Listener) { glog.Fatal(serveHTTP(l, ph, 5*time.Minute)) }(l) } // Start a client server goroutine for each listen address for _, l := range clns { go func(l net.Listener) { // read timeout does not work with http close notify // TODO: https://github.com/golang/go/issues/9524 glog.Fatal(serveHTTP(l, ch, 0)) }(l) } return s.StopNotify(), nil }
// startEtcd launches the etcd server and HTTP handlers for client/server communication. func startEtcd(cfg *config) (<-chan struct{}, error) { urlsmap, token, err := getPeerURLsMapAndToken(cfg, "etcd") if err != nil { return nil, fmt.Errorf("error setting up initial cluster: %v", err) } pt, err := transport.NewTimeoutTransport(cfg.peerTLSInfo, peerDialTimeout(cfg.ElectionMs), rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout) if err != nil { return nil, err } if !cfg.peerTLSInfo.Empty() { plog.Infof("peerTLS: %s", cfg.peerTLSInfo) } plns := make([]net.Listener, 0) for _, u := range cfg.lpurls { if u.Scheme == "http" && !cfg.peerTLSInfo.Empty() { plog.Warningf("The scheme of peer url %s is http while peer key/cert files are presented. Ignored peer key/cert files.", u.String()) } var l net.Listener l, err = transport.NewTimeoutListener(u.Host, u.Scheme, cfg.peerTLSInfo, rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout) if err != nil { return nil, err } urlStr := u.String() plog.Info("listening for peers on ", urlStr) defer func() { if err != nil { l.Close() plog.Info("stopping listening for peers on ", urlStr) } }() plns = append(plns, l) } if !cfg.clientTLSInfo.Empty() { plog.Infof("clientTLS: %s", cfg.clientTLSInfo) } clns := make([]net.Listener, 0) for _, u := range cfg.lcurls { if u.Scheme == "http" && !cfg.clientTLSInfo.Empty() { plog.Warningf("The scheme of client url %s is http while client key/cert files are presented. Ignored client key/cert files.", u.String()) } var l net.Listener l, err = transport.NewKeepAliveListener(u.Host, u.Scheme, cfg.clientTLSInfo) if err != nil { return nil, err } if fdLimit, err := runtimeutil.FDLimit(); err == nil { if fdLimit <= reservedInternalFDNum { plog.Fatalf("file descriptor limit[%d] of etcd process is too low, and should be set higher than %d to ensure internal usage", fdLimit, reservedInternalFDNum) } l = netutil.LimitListener(l, int(fdLimit-reservedInternalFDNum)) } urlStr := u.String() plog.Info("listening for client requests on ", urlStr) defer func() { if err != nil { l.Close() plog.Info("stopping listening for client requests on ", urlStr) } }() clns = append(clns, l) } var v3l net.Listener if cfg.v3demo { v3l, err = net.Listen("tcp", cfg.gRPCAddr) if err != nil { plog.Fatal(err) } plog.Infof("listening for client rpc on %s", cfg.gRPCAddr) } srvcfg := &etcdserver.ServerConfig{ Name: cfg.name, ClientURLs: cfg.acurls, PeerURLs: cfg.apurls, DataDir: cfg.dir, DedicatedWALDir: cfg.walDir, SnapCount: cfg.snapCount, MaxSnapFiles: cfg.maxSnapFiles, MaxWALFiles: cfg.maxWalFiles, InitialPeerURLsMap: urlsmap, InitialClusterToken: token, DiscoveryURL: cfg.durl, DiscoveryProxy: cfg.dproxy, NewCluster: cfg.isNewCluster(), ForceNewCluster: cfg.forceNewCluster, Transport: pt, TickMs: cfg.TickMs, ElectionTicks: cfg.electionTicks(), V3demo: cfg.v3demo, StrictReconfigCheck: cfg.strictReconfigCheck, } var s *etcdserver.EtcdServer s, err = etcdserver.NewServer(srvcfg) if err != nil { return nil, err } s.Start() osutil.RegisterInterruptHandler(s.Stop) if cfg.corsInfo.String() != "" { plog.Infof("cors = %s", cfg.corsInfo) } ch := &cors.CORSHandler{ Handler: etcdhttp.NewClientHandler(s, srvcfg.ReqTimeout()), Info: cfg.corsInfo, } ph := etcdhttp.NewPeerHandler(s.Cluster(), s.RaftHandler()) // Start the peer server in a goroutine for _, l := range plns { go func(l net.Listener) { plog.Fatal(serveHTTP(l, ph, 5*time.Minute)) }(l) } // Start a client server goroutine for each listen address for _, l := range clns { go func(l net.Listener) { // read timeout does not work with http close notify // TODO: https://github.com/golang/go/issues/9524 plog.Fatal(serveHTTP(l, ch, 0)) }(l) } if cfg.v3demo { // set up v3 demo rpc grpcServer := grpc.NewServer() etcdserverpb.RegisterEtcdServer(grpcServer, v3rpc.New(s)) go plog.Fatal(grpcServer.Serve(v3l)) } return s.StopNotify(), nil }
// startEtcd launches the etcd server and HTTP handlers for client/server communication. func startEtcd() (<-chan struct{}, error) { apurls, err := flags.URLsFromFlags(fs, "initial-advertise-peer-urls", "addr", peerTLSInfo) if err != nil { return nil, err } cls, err := setupCluster(apurls) if err != nil { return nil, fmt.Errorf("error setting up initial cluster: %v", err) } if *dir == "" { *dir = fmt.Sprintf("%v.etcd", *name) log.Printf("no data-dir provided, using default data-dir ./%s", *dir) } if err := os.MkdirAll(*dir, privateDirMode); err != nil { return nil, fmt.Errorf("cannot create data directory: %v", err) } if err := fileutil.IsDirWriteable(*dir); err != nil { return nil, fmt.Errorf("cannot write to data directory: %v", err) } pt, err := transport.NewTimeoutTransport(peerTLSInfo, rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout) if err != nil { return nil, err } acurls, err := flags.URLsFromFlags(fs, "advertise-client-urls", "addr", clientTLSInfo) if err != nil { return nil, err } lpurls, err := flags.URLsFromFlags(fs, "listen-peer-urls", "peer-bind-addr", peerTLSInfo) if err != nil { return nil, err } if !peerTLSInfo.Empty() { log.Printf("etcd: peerTLS: %s", peerTLSInfo) } plns := make([]net.Listener, 0) for _, u := range lpurls { var l net.Listener l, err = transport.NewTimeoutListener(u.Host, u.Scheme, peerTLSInfo, rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout) if err != nil { return nil, err } urlStr := u.String() log.Print("etcd: listening for peers on ", urlStr) defer func() { if err != nil { l.Close() log.Print("etcd: stopping listening for peers on ", urlStr) } }() plns = append(plns, l) } lcurls, err := flags.URLsFromFlags(fs, "listen-client-urls", "bind-addr", clientTLSInfo) if err != nil { return nil, err } if !clientTLSInfo.Empty() { log.Printf("etcd: clientTLS: %s", clientTLSInfo) } clns := make([]net.Listener, 0) for _, u := range lcurls { var l net.Listener l, err = transport.NewListener(u.Host, u.Scheme, clientTLSInfo) if err != nil { return nil, err } urlStr := u.String() log.Print("etcd: listening for client requests on ", urlStr) defer func() { if err != nil { l.Close() log.Print("etcd: stopping listening for client requests on ", urlStr) } }() clns = append(clns, l) } cfg := &etcdserver.ServerConfig{ Name: *name, ClientURLs: acurls, PeerURLs: apurls, DataDir: *dir, SnapCount: *snapCount, MaxSnapFiles: *maxSnapFiles, MaxWALFiles: *maxWalFiles, Cluster: cls, DiscoveryURL: *durl, DiscoveryProxy: *dproxy, NewCluster: clusterStateFlag.String() == clusterStateFlagNew, ForceNewCluster: *forceNewCluster, Transport: pt, } var s *etcdserver.EtcdServer s, err = etcdserver.NewServer(cfg) if err != nil { return nil, err } s.Start() if corsInfo.String() != "" { log.Printf("etcd: cors = %s", corsInfo) } ch := &cors.CORSHandler{ Handler: etcdhttp.NewClientHandler(s), Info: corsInfo, } ph := etcdhttp.NewPeerHandler(s) // Start the peer server in a goroutine for _, l := range plns { go func(l net.Listener) { log.Fatal(http.Serve(l, ph)) }(l) } // Start a client server goroutine for each listen address for _, l := range clns { go func(l net.Listener) { log.Fatal(http.Serve(l, ch)) }(l) } return s.StopNotify(), nil }
// startEtcd launches the etcd server and HTTP handlers for client/server communication. func startEtcd(cfg *config) (<-chan struct{}, error) { urlsmap, token, err := getPeerURLsMapAndToken(cfg, "etcd") if err != nil { return nil, fmt.Errorf("error setting up initial cluster: %v", err) } if cfg.peerAutoTLS && cfg.peerTLSInfo.Empty() { phosts := make([]string, 0) for _, u := range cfg.lpurls { phosts = append(phosts, u.Host) } cfg.peerTLSInfo, err = transport.SelfCert(cfg.dir, phosts) if err != nil { plog.Fatalf("could not get certs (%v)", err) } } else if cfg.peerAutoTLS { plog.Warningf("ignoring peer auto TLS since certs given") } if !cfg.peerTLSInfo.Empty() { plog.Infof("peerTLS: %s", cfg.peerTLSInfo) } plns := make([]net.Listener, 0) for _, u := range cfg.lpurls { if u.Scheme == "http" { if !cfg.peerTLSInfo.Empty() { plog.Warningf("The scheme of peer url %s is HTTP while peer key/cert files are presented. Ignored peer key/cert files.", u.String()) } if cfg.peerTLSInfo.ClientCertAuth { plog.Warningf("The scheme of peer url %s is HTTP while client cert auth (--peer-client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String()) } } var ( l net.Listener tlscfg *tls.Config ) if !cfg.peerTLSInfo.Empty() { tlscfg, err = cfg.peerTLSInfo.ServerConfig() if err != nil { return nil, err } } l, err = rafthttp.NewListener(u, tlscfg) if err != nil { return nil, err } urlStr := u.String() plog.Info("listening for peers on ", urlStr) defer func() { if err != nil { l.Close() plog.Info("stopping listening for peers on ", urlStr) } }() plns = append(plns, l) } var ctlscfg *tls.Config if !cfg.clientTLSInfo.Empty() { plog.Infof("clientTLS: %s", cfg.clientTLSInfo) ctlscfg, err = cfg.clientTLSInfo.ServerConfig() if err != nil { return nil, err } } sctxs := make(map[string]*serveCtx) for _, u := range cfg.lcurls { if u.Scheme == "http" { if !cfg.clientTLSInfo.Empty() { plog.Warningf("The scheme of client url %s is HTTP while peer key/cert files are presented. Ignored key/cert files.", u.String()) } if cfg.clientTLSInfo.ClientCertAuth { plog.Warningf("The scheme of client url %s is HTTP while client cert auth (--client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String()) } } if u.Scheme == "https" && ctlscfg == nil { return nil, fmt.Errorf("TLS key/cert (--cert-file, --key-file) must be provided for client url %s with HTTPs scheme", u.String()) } ctx := &serveCtx{host: u.Host} if u.Scheme == "https" { ctx.secure = true } else { ctx.insecure = true } if sctxs[u.Host] != nil { if ctx.secure { sctxs[u.Host].secure = true } if ctx.insecure { sctxs[u.Host].insecure = true } continue } var l net.Listener l, err = net.Listen("tcp", u.Host) if err != nil { return nil, err } var fdLimit uint64 if fdLimit, err = runtimeutil.FDLimit(); err == nil { if fdLimit <= reservedInternalFDNum { plog.Fatalf("file descriptor limit[%d] of etcd process is too low, and should be set higher than %d to ensure internal usage", fdLimit, reservedInternalFDNum) } l = transport.LimitListener(l, int(fdLimit-reservedInternalFDNum)) } l, err = transport.NewKeepAliveListener(l, "tcp", nil) ctx.l = l if err != nil { return nil, err } plog.Info("listening for client requests on ", u.Host) defer func() { if err != nil { l.Close() plog.Info("stopping listening for client requests on ", u.Host) } }() sctxs[u.Host] = ctx } srvcfg := &etcdserver.ServerConfig{ Name: cfg.name, ClientURLs: cfg.acurls, PeerURLs: cfg.apurls, DataDir: cfg.dir, DedicatedWALDir: cfg.walDir, SnapCount: cfg.snapCount, MaxSnapFiles: cfg.maxSnapFiles, MaxWALFiles: cfg.maxWalFiles, InitialPeerURLsMap: urlsmap, InitialClusterToken: token, DiscoveryURL: cfg.durl, DiscoveryProxy: cfg.dproxy, NewCluster: cfg.isNewCluster(), ForceNewCluster: cfg.forceNewCluster, PeerTLSInfo: cfg.peerTLSInfo, TickMs: cfg.TickMs, ElectionTicks: cfg.electionTicks(), AutoCompactionRetention: cfg.autoCompactionRetention, QuotaBackendBytes: cfg.quotaBackendBytes, StrictReconfigCheck: cfg.strictReconfigCheck, EnablePprof: cfg.enablePprof, } var s *etcdserver.EtcdServer s, err = etcdserver.NewServer(srvcfg) if err != nil { return nil, err } s.Start() osutil.RegisterInterruptHandler(s.Stop) if cfg.corsInfo.String() != "" { plog.Infof("cors = %s", cfg.corsInfo) } ch := http.Handler(&cors.CORSHandler{ Handler: v2http.NewClientHandler(s, srvcfg.ReqTimeout()), Info: cfg.corsInfo, }) ph := v2http.NewPeerHandler(s) // Start the peer server in a goroutine for _, l := range plns { go func(l net.Listener) { plog.Fatal(servePeerHTTP(l, ph)) }(l) } // Start a client server goroutine for each listen address for _, sctx := range sctxs { go func(sctx *serveCtx) { // read timeout does not work with http close notify // TODO: https://github.com/golang/go/issues/9524 plog.Fatal(serve(sctx, s, ctlscfg, ch)) }(sctx) } return s.StopNotify(), nil }
// startEtcd launches the etcd server and HTTP handlers for client/server communication. func startEtcd(cfg *config) (<-chan struct{}, error) { urlsmap, token, err := getPeerURLsMapAndToken(cfg) if err != nil { return nil, fmt.Errorf("error setting up initial cluster: %v", err) } pt, err := transport.NewTimeoutTransport(cfg.peerTLSInfo, rafthttp.DialTimeout, rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout) if err != nil { return nil, err } if !cfg.peerTLSInfo.Empty() { plog.Infof("peerTLS: %s", cfg.peerTLSInfo) } plns := make([]net.Listener, 0) for _, u := range cfg.lpurls { var l net.Listener l, err = transport.NewTimeoutListener(u.Host, u.Scheme, cfg.peerTLSInfo, rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout) if err != nil { return nil, err } urlStr := u.String() plog.Info("listening for peers on ", urlStr) defer func() { if err != nil { l.Close() plog.Info("stopping listening for peers on ", urlStr) } }() plns = append(plns, l) } if !cfg.clientTLSInfo.Empty() { plog.Infof("clientTLS: %s", cfg.clientTLSInfo) } clns := make([]net.Listener, 0) for _, u := range cfg.lcurls { var l net.Listener l, err = transport.NewKeepAliveListener(u.Host, u.Scheme, cfg.clientTLSInfo) if err != nil { return nil, err } urlStr := u.String() plog.Info("listening for client requests on ", urlStr) defer func() { if err != nil { l.Close() plog.Info("stopping listening for client requests on ", urlStr) } }() clns = append(clns, l) } srvcfg := &etcdserver.ServerConfig{ Name: cfg.name, ClientURLs: cfg.acurls, PeerURLs: cfg.apurls, DataDir: cfg.dir, SnapCount: cfg.snapCount, MaxSnapFiles: cfg.maxSnapFiles, MaxWALFiles: cfg.maxWalFiles, InitialPeerURLsMap: urlsmap, InitialClusterToken: token, DiscoveryURL: cfg.durl, DiscoveryProxy: cfg.dproxy, NewCluster: cfg.isNewCluster(), ForceNewCluster: cfg.forceNewCluster, Transport: pt, TickMs: cfg.TickMs, ElectionTicks: cfg.electionTicks(), } var s *etcdserver.EtcdServer s, err = etcdserver.NewServer(srvcfg) if err != nil { return nil, err } s.Start() osutil.RegisterInterruptHandler(s.Stop) if cfg.corsInfo.String() != "" { plog.Infof("cors = %s", cfg.corsInfo) } ch := &cors.CORSHandler{ Handler: etcdhttp.NewClientHandler(s), Info: cfg.corsInfo, } ph := etcdhttp.NewPeerHandler(s.Cluster(), s.RaftHandler()) // Start the peer server in a goroutine for _, l := range plns { go func(l net.Listener) { plog.Fatal(serveHTTP(l, ph, 5*time.Minute)) }(l) } // Start a client server goroutine for each listen address for _, l := range clns { go func(l net.Listener) { // read timeout does not work with http close notify // TODO: https://github.com/golang/go/issues/9524 plog.Fatal(serveHTTP(l, ch, 0)) }(l) } return s.StopNotify(), nil }
// startEtcd launches the etcd server and HTTP handlers for client/server communication. //开启ETCD func startEtcd(cfg *config) (<-chan struct{}, error) { //获取ETCD集群信息(静态模式时获取与acurls相同, 动态模式时将从ETCD Discovery或DNS Discovery获取) 与 token信息 urlsmap, token, err := getPeerURLsMapAndToken(cfg, "etcd") if err != nil { return nil, fmt.Errorf("error setting up initial cluster: %v", err) } //加密文本传输协议 if cfg.peerAutoTLS && cfg.peerTLSInfo.Empty() { phosts := make([]string, 0) for _, u := range cfg.lpurls { phosts = append(phosts, u.Host) } cfg.peerTLSInfo, err = transport.SelfCert(cfg.dir, phosts) if err != nil { plog.Fatalf("could not get certs (%v)", err) } } else if cfg.peerAutoTLS { plog.Warningf("ignoring peer auto TLS since certs given") } if !cfg.peerTLSInfo.Empty() { plog.Infof("peerTLS: %s", cfg.peerTLSInfo) } plns := make([]net.Listener, 0) //监听etcd初期准备 for _, u := range cfg.lpurls { if u.Scheme == "http" { if !cfg.peerTLSInfo.Empty() { plog.Warningf("The scheme of peer url %s is HTTP while peer key/cert files are presented. Ignored peer key/cert files.", u.String()) } if cfg.peerTLSInfo.ClientCertAuth { plog.Warningf("The scheme of peer url %s is HTTP while client cert auth (--peer-client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String()) } } var ( l net.Listener tlscfg *tls.Config ) if !cfg.peerTLSInfo.Empty() { tlscfg, err = cfg.peerTLSInfo.ServerConfig() if err != nil { return nil, err } } l, err = rafthttp.NewListener(u, tlscfg) if err != nil { return nil, err } urlStr := u.String() plog.Info("listening for peers on ", urlStr) defer func() { if err != nil { l.Close() plog.Info("stopping listening for peers on ", urlStr) } }() plns = append(plns, l) } var ctlscfg *tls.Config if !cfg.clientTLSInfo.Empty() { plog.Infof("clientTLS: %s", cfg.clientTLSInfo) ctlscfg, err = cfg.clientTLSInfo.ServerConfig() if err != nil { return nil, err } } sctxs := make(map[string]*serveCtx) //监听客户端初期准备 for _, u := range cfg.lcurls { if u.Scheme == "http" { if !cfg.clientTLSInfo.Empty() { plog.Warningf("The scheme of client url %s is HTTP while peer key/cert files are presented. Ignored key/cert files.", u.String()) } if cfg.clientTLSInfo.ClientCertAuth { plog.Warningf("The scheme of client url %s is HTTP while client cert auth (--client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String()) } } if u.Scheme == "https" && ctlscfg == nil { return nil, fmt.Errorf("TLS key/cert (--cert-file, --key-file) must be provided for client url %s with HTTPs scheme", u.String()) } ctx := &serveCtx{host: u.Host} if u.Scheme == "https" { ctx.secure = true } else { ctx.insecure = true } if sctxs[u.Host] != nil { if ctx.secure { sctxs[u.Host].secure = true } if ctx.insecure { sctxs[u.Host].insecure = true } continue } var l net.Listener l, err = net.Listen("tcp", u.Host) if err != nil { return nil, err } var fdLimit uint64 if fdLimit, err = runtimeutil.FDLimit(); err == nil { if fdLimit <= reservedInternalFDNum { plog.Fatalf("file descriptor limit[%d] of etcd process is too low, and should be set higher than %d to ensure internal usage", fdLimit, reservedInternalFDNum) } l = transport.LimitListener(l, int(fdLimit-reservedInternalFDNum)) } l, err = transport.NewKeepAliveListener(l, "tcp", nil) ctx.l = l if err != nil { return nil, err } plog.Info("listening for client requests on ", u.Host) defer func() { if err != nil { l.Close() plog.Info("stopping listening for client requests on ", u.Host) } }() sctxs[u.Host] = ctx } //载入服务器相关配置 srvcfg := &etcdserver.ServerConfig{ Name: cfg.name, //etcd服务名 ClientURLs: cfg.acurls, //其他etcd监听型号地址(一般为其他主机或其他端口) (同步与交互etcd信息) PeerURLs: cfg.apurls, //监听信号地址(本机端口) (同步与交互etcd信息) DataDir: cfg.dir, //数据通信保存位置 (默认产生的文件夹 member/snap member/wal) DedicatedWALDir: cfg.walDir, //ETCD将写WAL文件到walDir而不是DATADIR。这允许使用一个专用磁盘 并有助于避免记录和其它IO操作之间io的竞争。 默认为空 写入DataDir SnapCount: cfg.snapCount, //提交的事务数来触发快照到磁盘 默认为10000 MaxSnapFiles: cfg.maxSnapFiles, //快照文件保留最大数量(0为无限制)默认值:5 windows为:0 MaxWALFiles: cfg.maxWalFiles, //wal文件保留最大数量(0为无限制)默认值:5 windows为:0 InitialPeerURLsMap: urlsmap, //其他etcd监听型号地址 InitialClusterToken: token, //通信认证用 DiscoveryURL: cfg.durl, //动态发现协议的url DiscoveryProxy: cfg.dproxy, //动态发现协议(HTTP代理) NewCluster: cfg.isNewCluster(), //是否为新建集群 ForceNewCluster: cfg.forceNewCluster, //--???-- PeerTLSInfo: cfg.peerTLSInfo, //加密文本传输协议基本信息 TickMs: cfg.TickMs, //心跳间隔的时间(毫秒)。 ElectionTicks: cfg.electionTicks(), //时间(毫秒)选举超时 AutoCompactionRetention: cfg.autoCompactionRetention, StrictReconfigCheck: cfg.strictReconfigCheck, //严格配置检查 默认为false EnablePprof: cfg.enablePprof, //启用通过HTTP服务器运行时分析数据。地址是在客户端的URL+“/debug/ pprof” } //启动服务 var s *etcdserver.EtcdServer s, err = etcdserver.NewServer(srvcfg) if err != nil { return nil, err } s.Start() osutil.RegisterInterruptHandler(s.Stop) if cfg.corsInfo.String() != "" { plog.Infof("cors = %s", cfg.corsInfo) } ch := http.Handler(&cors.CORSHandler{ Handler: etcdhttp.NewClientHandler(s, srvcfg.ReqTimeout()), Info: cfg.corsInfo, }) ph := etcdhttp.NewPeerHandler(s) // Start the peer server in a goroutine for _, l := range plns { go func(l net.Listener) { plog.Fatal(servePeerHTTP(l, ph)) }(l) } // Start a client server goroutine for each listen address for _, sctx := range sctxs { go func(sctx *serveCtx) { // read timeout does not work with http close notify // TODO: https://github.com/golang/go/issues/9524 plog.Fatal(serve(sctx, s, ctlscfg, ch)) }(sctx) } return s.StopNotify(), nil }
// startEtcd launches the etcd server and HTTP handlers for client/server communication. func startEtcd(cfg *config) (<-chan struct{}, error) { cls, err := setupCluster(cfg) if err != nil { return nil, fmt.Errorf("error setting up initial cluster: %v", err) } if cfg.dir == "" { cfg.dir = fmt.Sprintf("%v.etcd", cfg.name) log.Printf("no data-dir provided, using default data-dir ./%s", cfg.dir) } if err := makeMemberDir(cfg.dir); err != nil { return nil, fmt.Errorf("cannot use /member sub-directory: %v", err) } membdir := path.Join(cfg.dir, "member") if err := fileutil.IsDirWriteable(membdir); err != nil { return nil, fmt.Errorf("cannot write to data directory: %v", err) } pt, err := transport.NewTimeoutTransport(cfg.peerTLSInfo, rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout) if err != nil { return nil, err } if !cfg.peerTLSInfo.Empty() { log.Printf("etcd: peerTLS: %s", cfg.peerTLSInfo) } plns := make([]net.Listener, 0) for _, u := range cfg.lpurls { var l net.Listener l, err = transport.NewTimeoutListener(u.Host, u.Scheme, cfg.peerTLSInfo, rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout) if err != nil { return nil, err } urlStr := u.String() log.Print("etcd: listening for peers on ", urlStr) defer func() { if err != nil { l.Close() log.Print("etcd: stopping listening for peers on ", urlStr) } }() plns = append(plns, l) } if !cfg.clientTLSInfo.Empty() { log.Printf("etcd: clientTLS: %s", cfg.clientTLSInfo) } clns := make([]net.Listener, 0) for _, u := range cfg.lcurls { var l net.Listener l, err = transport.NewKeepAliveListener(u.Host, u.Scheme, cfg.clientTLSInfo) if err != nil { return nil, err } urlStr := u.String() log.Print("etcd: listening for client requests on ", urlStr) defer func() { if err != nil { l.Close() log.Print("etcd: stopping listening for client requests on ", urlStr) } }() clns = append(clns, l) } srvcfg := &etcdserver.ServerConfig{ Name: cfg.name, ClientURLs: cfg.acurls, PeerURLs: cfg.apurls, DataDir: membdir, SnapCount: cfg.snapCount, MaxSnapFiles: cfg.maxSnapFiles, MaxWALFiles: cfg.maxWalFiles, Cluster: cls, DiscoveryURL: cfg.durl, DiscoveryProxy: cfg.dproxy, NewCluster: cfg.isNewCluster(), ForceNewCluster: cfg.forceNewCluster, Transport: pt, TickMs: cfg.TickMs, ElectionTicks: cfg.electionTicks(), } var s *etcdserver.EtcdServer s, err = etcdserver.NewServer(srvcfg) if err != nil { return nil, err } s.Start() if cfg.corsInfo.String() != "" { log.Printf("etcd: cors = %s", cfg.corsInfo) } ch := &cors.CORSHandler{ Handler: etcdhttp.NewClientHandler(s), Info: cfg.corsInfo, } ph := etcdhttp.NewPeerHandler(s.Cluster, s.RaftHandler()) // Start the peer server in a goroutine for _, l := range plns { go func(l net.Listener) { log.Fatal(serveHTTP(l, ph, 5*time.Minute)) }(l) } // Start a client server goroutine for each listen address for _, l := range clns { go func(l net.Listener) { // read timeout does not work with http close notify // TODO: https://github.com/golang/go/issues/9524 log.Fatal(serveHTTP(l, ch, 0)) }(l) } return s.StopNotify(), nil }
// startEtcd launches the etcd server and HTTP handlers for client/server communication. func startEtcd(cfg *config) (<-chan struct{}, error) { urlsmap, token, err := getPeerURLsMapAndToken(cfg, "etcd") if err != nil { return nil, fmt.Errorf("error setting up initial cluster: %v", err) } if cfg.peerAutoTLS && cfg.peerTLSInfo.Empty() { phosts := make([]string, 0) for _, u := range cfg.lpurls { phosts = append(phosts, u.Host) } cfg.peerTLSInfo, err = transport.SelfCert(cfg.dir, phosts) if err != nil { plog.Fatalf("could not get certs (%v)", err) } } else if cfg.peerAutoTLS { plog.Warningf("ignoring peer auto TLS since certs given") } if !cfg.peerTLSInfo.Empty() { plog.Infof("peerTLS: %s", cfg.peerTLSInfo) } plns := make([]net.Listener, 0) for _, u := range cfg.lpurls { if u.Scheme == "http" && !cfg.peerTLSInfo.Empty() { plog.Warningf("The scheme of peer url %s is http while peer key/cert files are presented. Ignored peer key/cert files.", u.String()) } var l net.Listener l, err = rafthttp.NewListener(u, cfg.peerTLSInfo) if err != nil { return nil, err } urlStr := u.String() plog.Info("listening for peers on ", urlStr) defer func() { if err != nil { l.Close() plog.Info("stopping listening for peers on ", urlStr) } }() plns = append(plns, l) } if !cfg.clientTLSInfo.Empty() { plog.Infof("clientTLS: %s", cfg.clientTLSInfo) } clns := make([]net.Listener, 0) for _, u := range cfg.lcurls { if u.Scheme == "http" && !cfg.clientTLSInfo.Empty() { plog.Warningf("The scheme of client url %s is http while client key/cert files are presented. Ignored client key/cert files.", u.String()) } var l net.Listener l, err = net.Listen("tcp", u.Host) if err != nil { return nil, err } if fdLimit, err := runtimeutil.FDLimit(); err == nil { if fdLimit <= reservedInternalFDNum { plog.Fatalf("file descriptor limit[%d] of etcd process is too low, and should be set higher than %d to ensure internal usage", fdLimit, reservedInternalFDNum) } l = transport.LimitListener(l, int(fdLimit-reservedInternalFDNum)) } // Do not wrap around this listener if TLS Info is set. // HTTPS server expects TLS Conn created by TLSListener. l, err = transport.NewKeepAliveListener(l, u.Scheme, cfg.clientTLSInfo) if err != nil { return nil, err } urlStr := u.String() plog.Info("listening for client requests on ", urlStr) defer func() { if err != nil { l.Close() plog.Info("stopping listening for client requests on ", urlStr) } }() clns = append(clns, l) } srvcfg := &etcdserver.ServerConfig{ Name: cfg.name, ClientURLs: cfg.acurls, PeerURLs: cfg.apurls, DataDir: cfg.dir, DedicatedWALDir: cfg.walDir, SnapCount: cfg.snapCount, MaxSnapFiles: cfg.maxSnapFiles, MaxWALFiles: cfg.maxWalFiles, InitialPeerURLsMap: urlsmap, InitialClusterToken: token, DiscoveryURL: cfg.durl, DiscoveryProxy: cfg.dproxy, NewCluster: cfg.isNewCluster(), ForceNewCluster: cfg.forceNewCluster, PeerTLSInfo: cfg.peerTLSInfo, TickMs: cfg.TickMs, ElectionTicks: cfg.electionTicks(), V3demo: cfg.v3demo, AutoCompactionRetention: cfg.autoCompactionRetention, StrictReconfigCheck: cfg.strictReconfigCheck, EnablePprof: cfg.enablePprof, } var s *etcdserver.EtcdServer s, err = etcdserver.NewServer(srvcfg) if err != nil { return nil, err } s.Start() osutil.RegisterInterruptHandler(s.Stop) if cfg.corsInfo.String() != "" { plog.Infof("cors = %s", cfg.corsInfo) } ch := &cors.CORSHandler{ Handler: etcdhttp.NewClientHandler(s, srvcfg.ReqTimeout()), Info: cfg.corsInfo, } ph := etcdhttp.NewPeerHandler(s) var grpcS *grpc.Server if cfg.v3demo { // set up v3 demo rpc tls := &cfg.clientTLSInfo if cfg.clientTLSInfo.Empty() { tls = nil } grpcS, err = v3rpc.Server(s, tls) if err != nil { s.Stop() <-s.StopNotify() return nil, err } } // Start the peer server in a goroutine for _, l := range plns { go func(l net.Listener) { plog.Fatal(serve(l, nil, ph, 5*time.Minute)) }(l) } // Start a client server goroutine for each listen address for _, l := range clns { go func(l net.Listener) { // read timeout does not work with http close notify // TODO: https://github.com/golang/go/issues/9524 plog.Fatal(serve(l, grpcS, ch, 0)) }(l) } return s.StopNotify(), nil }