Esempio n. 1
0
File: peer.go Progetto: Gwill/etcd
// NewPeerHandler generates an http.Handler to handle etcd peer requests.
func NewPeerHandler(s *etcdserver.EtcdServer) http.Handler {
	var lh http.Handler
	if l := s.Lessor(); l != nil {
		lh = lease.NewHandler(l)
	}
	return newPeerHandler(s.Cluster(), s.RaftHandler(), lh)
}
Esempio n. 2
0
File: member.go Progetto: nhr/origin
func NewClusterServer(s *etcdserver.EtcdServer) *ClusterServer {
	return &ClusterServer{
		cluster:   s.Cluster(),
		server:    s,
		raftTimer: s,
	}
}
Esempio n. 3
0
func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer {
	return &watchServer{
		clusterID: int64(s.Cluster().ID()),
		memberID:  int64(s.ID()),
		raftTimer: s,
		watchable: s.Watchable(),
	}
}
Esempio n. 4
0
File: key.go Progetto: jkhelil/etcd
func NewKVServer(s *etcdserver.EtcdServer) pb.KVServer {
	return &kvServer{
		clusterID: int64(s.Cluster().ID()),
		memberID:  int64(s.ID()),
		raftTimer: s,
		kv:        s,
	}
}
Esempio n. 5
0
func newHeader(s *etcdserver.EtcdServer) header {
	return header{
		clusterID: int64(s.Cluster().ID()),
		memberID:  int64(s.ID()),
		raftTimer: s,
		rev:       func() int64 { return s.KV().Rev() },
	}
}
Esempio n. 6
0
// NewClientHandler generates a muxed http.Handler with the given parameters to serve etcd client requests.
func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http.Handler {
	sec := auth.NewStore(server, timeout)

	kh := &keysHandler{
		sec:                   sec,
		server:                server,
		cluster:               server.Cluster(),
		timer:                 server,
		timeout:               timeout,
		clientCertAuthEnabled: server.Cfg.ClientCertAuthEnabled,
	}

	sh := &statsHandler{
		stats: server,
	}

	mh := &membersHandler{
		sec:     sec,
		server:  server,
		cluster: server.Cluster(),
		timeout: timeout,
		clock:   clockwork.NewRealClock(),
		clientCertAuthEnabled: server.Cfg.ClientCertAuthEnabled,
	}

	dmh := &deprecatedMachinesHandler{
		cluster: server.Cluster(),
	}

	sech := &authHandler{
		sec:                   sec,
		cluster:               server.Cluster(),
		clientCertAuthEnabled: server.Cfg.ClientCertAuthEnabled,
	}

	mux := http.NewServeMux()
	mux.HandleFunc("/", http.NotFound)
	mux.Handle(healthPath, healthHandler(server))
	mux.HandleFunc(versionPath, versionHandler(server.Cluster(), serveVersion))
	mux.Handle(keysPrefix, kh)
	mux.Handle(keysPrefix+"/", kh)
	mux.HandleFunc(statsPrefix+"/store", sh.serveStore)
	mux.HandleFunc(statsPrefix+"/self", sh.serveSelf)
	mux.HandleFunc(statsPrefix+"/leader", sh.serveLeader)
	mux.HandleFunc(varsPath, serveVars)
	mux.HandleFunc(configPath+"/local/log", logHandleFunc)
	mux.Handle(metricsPath, prometheus.Handler())
	mux.Handle(membersPrefix, mh)
	mux.Handle(membersPrefix+"/", mh)
	mux.Handle(deprecatedMachinesPrefix, dmh)
	handleAuth(mux, sech)

	return requestLogger(mux)
}
Esempio n. 7
0
// startEtcd launches the etcd server and HTTP handlers for client/server communication.
func startEtcd(cfg *config) (<-chan struct{}, error) {
	initialPeers, token, err := setupCluster(cfg)
	if err != nil {
		return nil, fmt.Errorf("error setting up initial cluster: %v", err)
	}

	pt, err := transport.NewTimeoutTransport(cfg.peerTLSInfo, time.Second, rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout)
	if err != nil {
		return nil, err
	}

	if !cfg.peerTLSInfo.Empty() {
		glog.V(2).Infof("etcd: peerTLS: %s", cfg.peerTLSInfo)
	}
	plns := make([]net.Listener, 0)
	for _, u := range cfg.lpurls {
		var l net.Listener
		l, err = transport.NewTimeoutListener(u.Host, u.Scheme, cfg.peerTLSInfo, rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout)
		if err != nil {
			return nil, err
		}

		urlStr := u.String()
		glog.V(2).Info("etcd: listening for peers on ", urlStr)
		defer func() {
			if err != nil {
				l.Close()
				glog.V(2).Info("etcd: stopping listening for peers on ", urlStr)
			}
		}()
		plns = append(plns, l)
	}

	if !cfg.clientTLSInfo.Empty() {
		glog.V(2).Infof("etcd: clientTLS: %s", cfg.clientTLSInfo)
	}
	clns := make([]net.Listener, 0)
	for _, u := range cfg.lcurls {
		l, err := net.Listen("tcp", u.Host)
		if err != nil {
			return nil, err
		}
		l, err = transport.NewKeepAliveListener(l, u.Scheme, cfg.clientTLSInfo)
		if err != nil {
			return nil, err
		}

		urlStr := u.String()
		glog.V(2).Info("etcd: listening for client requests on ", urlStr)
		defer func() {
			if err != nil {
				l.Close()
				glog.V(2).Info("etcd: stopping listening for client requests on ", urlStr)
			}
		}()
		clns = append(clns, l)
	}

	srvcfg := &etcdserver.ServerConfig{
		Name:                cfg.name,
		ClientURLs:          cfg.acurls,
		PeerURLs:            cfg.apurls,
		DataDir:             cfg.dir,
		SnapCount:           cfg.snapCount,
		MaxSnapFiles:        cfg.maxSnapFiles,
		InitialPeerURLsMap:  initialPeers,
		InitialClusterToken: token,
		MaxWALFiles:         cfg.maxWalFiles,
		NewCluster:          true,
		ForceNewCluster:     false,
		Transport:           pt,
		TickMs:              cfg.TickMs,
		ElectionTicks:       cfg.electionTicks(),
	}
	var s *etcdserver.EtcdServer
	s, err = etcdserver.NewServer(srvcfg)
	if err != nil {
		return nil, err
	}
	osutil.HandleInterrupts()
	s.Start()
	osutil.RegisterInterruptHandler(s.Stop)

	ch := etcdhttp.NewClientHandler(s, srvcfg.ReqTimeout())
	ph := etcdhttp.NewPeerHandler(s.Cluster(), s.RaftHandler())
	// Start the peer server in a goroutine
	for _, l := range plns {
		go func(l net.Listener) {
			glog.Fatal(serveHTTP(l, ph, 5*time.Minute))
		}(l)
	}
	// Start a client server goroutine for each listen address
	for _, l := range clns {
		go func(l net.Listener) {
			// read timeout does not work with http close notify
			// TODO: https://github.com/golang/go/issues/9524
			glog.Fatal(serveHTTP(l, ch, 0))
		}(l)
	}
	return s.StopNotify(), nil
}
Esempio n. 8
0
File: etcd.go Progetto: utahcon/etcd
// startEtcd launches the etcd server and HTTP handlers for client/server communication.
func startEtcd(cfg *config) (<-chan struct{}, error) {
	urlsmap, token, err := getPeerURLsMapAndToken(cfg, "etcd")
	if err != nil {
		return nil, fmt.Errorf("error setting up initial cluster: %v", err)
	}

	pt, err := transport.NewTimeoutTransport(cfg.peerTLSInfo, peerDialTimeout(cfg.ElectionMs), rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout)
	if err != nil {
		return nil, err
	}

	if !cfg.peerTLSInfo.Empty() {
		plog.Infof("peerTLS: %s", cfg.peerTLSInfo)
	}
	plns := make([]net.Listener, 0)
	for _, u := range cfg.lpurls {
		if u.Scheme == "http" && !cfg.peerTLSInfo.Empty() {
			plog.Warningf("The scheme of peer url %s is http while peer key/cert files are presented. Ignored peer key/cert files.", u.String())
		}
		var l net.Listener
		l, err = transport.NewTimeoutListener(u.Host, u.Scheme, cfg.peerTLSInfo, rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout)
		if err != nil {
			return nil, err
		}

		urlStr := u.String()
		plog.Info("listening for peers on ", urlStr)
		defer func() {
			if err != nil {
				l.Close()
				plog.Info("stopping listening for peers on ", urlStr)
			}
		}()
		plns = append(plns, l)
	}

	if !cfg.clientTLSInfo.Empty() {
		plog.Infof("clientTLS: %s", cfg.clientTLSInfo)
	}
	clns := make([]net.Listener, 0)
	for _, u := range cfg.lcurls {
		if u.Scheme == "http" && !cfg.clientTLSInfo.Empty() {
			plog.Warningf("The scheme of client url %s is http while client key/cert files are presented. Ignored client key/cert files.", u.String())
		}
		var l net.Listener
		l, err = transport.NewKeepAliveListener(u.Host, u.Scheme, cfg.clientTLSInfo)
		if err != nil {
			return nil, err
		}
		if fdLimit, err := runtimeutil.FDLimit(); err == nil {
			if fdLimit <= reservedInternalFDNum {
				plog.Fatalf("file descriptor limit[%d] of etcd process is too low, and should be set higher than %d to ensure internal usage", fdLimit, reservedInternalFDNum)
			}
			l = netutil.LimitListener(l, int(fdLimit-reservedInternalFDNum))
		}

		urlStr := u.String()
		plog.Info("listening for client requests on ", urlStr)
		defer func() {
			if err != nil {
				l.Close()
				plog.Info("stopping listening for client requests on ", urlStr)
			}
		}()
		clns = append(clns, l)
	}

	var v3l net.Listener
	if cfg.v3demo {
		v3l, err = net.Listen("tcp", cfg.gRPCAddr)
		if err != nil {
			plog.Fatal(err)
		}
		plog.Infof("listening for client rpc on %s", cfg.gRPCAddr)
	}

	srvcfg := &etcdserver.ServerConfig{
		Name:                cfg.name,
		ClientURLs:          cfg.acurls,
		PeerURLs:            cfg.apurls,
		DataDir:             cfg.dir,
		DedicatedWALDir:     cfg.walDir,
		SnapCount:           cfg.snapCount,
		MaxSnapFiles:        cfg.maxSnapFiles,
		MaxWALFiles:         cfg.maxWalFiles,
		InitialPeerURLsMap:  urlsmap,
		InitialClusterToken: token,
		DiscoveryURL:        cfg.durl,
		DiscoveryProxy:      cfg.dproxy,
		NewCluster:          cfg.isNewCluster(),
		ForceNewCluster:     cfg.forceNewCluster,
		Transport:           pt,
		TickMs:              cfg.TickMs,
		ElectionTicks:       cfg.electionTicks(),
		V3demo:              cfg.v3demo,
		StrictReconfigCheck: cfg.strictReconfigCheck,
	}
	var s *etcdserver.EtcdServer
	s, err = etcdserver.NewServer(srvcfg)
	if err != nil {
		return nil, err
	}
	s.Start()
	osutil.RegisterInterruptHandler(s.Stop)

	if cfg.corsInfo.String() != "" {
		plog.Infof("cors = %s", cfg.corsInfo)
	}
	ch := &cors.CORSHandler{
		Handler: etcdhttp.NewClientHandler(s, srvcfg.ReqTimeout()),
		Info:    cfg.corsInfo,
	}
	ph := etcdhttp.NewPeerHandler(s.Cluster(), s.RaftHandler())
	// Start the peer server in a goroutine
	for _, l := range plns {
		go func(l net.Listener) {
			plog.Fatal(serveHTTP(l, ph, 5*time.Minute))
		}(l)
	}
	// Start a client server goroutine for each listen address
	for _, l := range clns {
		go func(l net.Listener) {
			// read timeout does not work with http close notify
			// TODO: https://github.com/golang/go/issues/9524
			plog.Fatal(serveHTTP(l, ch, 0))
		}(l)
	}

	if cfg.v3demo {
		// set up v3 demo rpc
		grpcServer := grpc.NewServer()
		etcdserverpb.RegisterEtcdServer(grpcServer, v3rpc.New(s))
		go plog.Fatal(grpcServer.Serve(v3l))
	}

	return s.StopNotify(), nil
}
Esempio n. 9
0
// NewClientHandler generates a muxed http.Handler with the given parameters to serve etcd client requests.
func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http.Handler {
	go capabilityLoop(server)

	sec := auth.NewStore(server, timeout)

	kh := &keysHandler{
		sec:     sec,
		server:  server,
		cluster: server.Cluster(),
		timer:   server,
		timeout: timeout,
	}

	sh := &statsHandler{
		stats: server,
	}

	mh := &membersHandler{
		sec:     sec,
		server:  server,
		cluster: server.Cluster(),
		timeout: timeout,
		clock:   clockwork.NewRealClock(),
	}

	dmh := &deprecatedMachinesHandler{
		cluster: server.Cluster(),
	}

	sech := &authHandler{
		sec:     sec,
		cluster: server.Cluster(),
	}

	mux := http.NewServeMux()
	mux.HandleFunc("/", http.NotFound)
	mux.Handle(healthPath, healthHandler(server))
	mux.HandleFunc(versionPath, versionHandler(server.Cluster(), serveVersion))
	mux.Handle(keysPrefix, kh)
	mux.Handle(keysPrefix+"/", kh)
	mux.HandleFunc(statsPrefix+"/store", sh.serveStore)
	mux.HandleFunc(statsPrefix+"/self", sh.serveSelf)
	mux.HandleFunc(statsPrefix+"/leader", sh.serveLeader)
	mux.HandleFunc(varsPath, serveVars)
	mux.HandleFunc(configPath+"/local/log", logHandleFunc)
	mux.Handle(metricsPath, prometheus.Handler())
	mux.Handle(membersPrefix, mh)
	mux.Handle(membersPrefix+"/", mh)
	mux.Handle(deprecatedMachinesPrefix, dmh)
	handleAuth(mux, sech)

	if server.IsPprofEnabled() {
		plog.Infof("pprof is enabled under %s", pprofPrefix)

		mux.HandleFunc(pprofPrefix, pprof.Index)
		mux.HandleFunc(pprofPrefix+"/profile", pprof.Profile)
		mux.HandleFunc(pprofPrefix+"/symbol", pprof.Symbol)
		mux.HandleFunc(pprofPrefix+"/cmdline", pprof.Cmdline)
		// TODO: currently, we don't create an entry for pprof.Trace,
		// because go 1.4 doesn't provide it. After support of go 1.4 is dropped,
		// we should add the entry.

		mux.Handle(pprofPrefix+"/heap", pprof.Handler("heap"))
		mux.Handle(pprofPrefix+"/goroutine", pprof.Handler("goroutine"))
		mux.Handle(pprofPrefix+"/threadcreate", pprof.Handler("threadcreate"))
		mux.Handle(pprofPrefix+"/block", pprof.Handler("block"))
	}

	return requestLogger(mux)
}
Esempio n. 10
0
// startEtcd launches the etcd server and HTTP handlers for client/server communication.
func startEtcd(cfg *config) (<-chan struct{}, error) {
	urlsmap, token, err := getPeerURLsMapAndToken(cfg)
	if err != nil {
		return nil, fmt.Errorf("error setting up initial cluster: %v", err)
	}

	pt, err := transport.NewTimeoutTransport(cfg.peerTLSInfo, rafthttp.DialTimeout, rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout)
	if err != nil {
		return nil, err
	}

	if !cfg.peerTLSInfo.Empty() {
		plog.Infof("peerTLS: %s", cfg.peerTLSInfo)
	}
	plns := make([]net.Listener, 0)
	for _, u := range cfg.lpurls {
		var l net.Listener
		l, err = transport.NewTimeoutListener(u.Host, u.Scheme, cfg.peerTLSInfo, rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout)
		if err != nil {
			return nil, err
		}

		urlStr := u.String()
		plog.Info("listening for peers on ", urlStr)
		defer func() {
			if err != nil {
				l.Close()
				plog.Info("stopping listening for peers on ", urlStr)
			}
		}()
		plns = append(plns, l)
	}

	if !cfg.clientTLSInfo.Empty() {
		plog.Infof("clientTLS: %s", cfg.clientTLSInfo)
	}
	clns := make([]net.Listener, 0)
	for _, u := range cfg.lcurls {
		var l net.Listener
		l, err = transport.NewKeepAliveListener(u.Host, u.Scheme, cfg.clientTLSInfo)
		if err != nil {
			return nil, err
		}

		urlStr := u.String()
		plog.Info("listening for client requests on ", urlStr)
		defer func() {
			if err != nil {
				l.Close()
				plog.Info("stopping listening for client requests on ", urlStr)
			}
		}()
		clns = append(clns, l)
	}

	srvcfg := &etcdserver.ServerConfig{
		Name:                cfg.name,
		ClientURLs:          cfg.acurls,
		PeerURLs:            cfg.apurls,
		DataDir:             cfg.dir,
		SnapCount:           cfg.snapCount,
		MaxSnapFiles:        cfg.maxSnapFiles,
		MaxWALFiles:         cfg.maxWalFiles,
		InitialPeerURLsMap:  urlsmap,
		InitialClusterToken: token,
		DiscoveryURL:        cfg.durl,
		DiscoveryProxy:      cfg.dproxy,
		NewCluster:          cfg.isNewCluster(),
		ForceNewCluster:     cfg.forceNewCluster,
		Transport:           pt,
		TickMs:              cfg.TickMs,
		ElectionTicks:       cfg.electionTicks(),
	}
	var s *etcdserver.EtcdServer
	s, err = etcdserver.NewServer(srvcfg)
	if err != nil {
		return nil, err
	}
	s.Start()
	osutil.RegisterInterruptHandler(s.Stop)

	if cfg.corsInfo.String() != "" {
		plog.Infof("cors = %s", cfg.corsInfo)
	}
	ch := &cors.CORSHandler{
		Handler: etcdhttp.NewClientHandler(s),
		Info:    cfg.corsInfo,
	}
	ph := etcdhttp.NewPeerHandler(s.Cluster(), s.RaftHandler())
	// Start the peer server in a goroutine
	for _, l := range plns {
		go func(l net.Listener) {
			plog.Fatal(serveHTTP(l, ph, 5*time.Minute))
		}(l)
	}
	// Start a client server goroutine for each listen address
	for _, l := range clns {
		go func(l net.Listener) {
			// read timeout does not work with http close notify
			// TODO: https://github.com/golang/go/issues/9524
			plog.Fatal(serveHTTP(l, ch, 0))
		}(l)
	}
	return s.StopNotify(), nil
}