Esempio n. 1
0
func getHTTPBodyAsLines(t *testing.T, url string) []string {
	cfgtls := transport.TLSInfo{}
	tr, err := transport.NewTransport(cfgtls, time.Second)
	if err != nil {
		t.Fatalf("Error getting transport: %v", err)
	}

	tr.MaxIdleConns = -1
	tr.DisableKeepAlives = true

	cli := &http.Client{Transport: tr}

	resp, err := cli.Get(url)
	if err != nil {
		t.Fatalf("Error fetching: %v", err)
	}

	reader := bufio.NewReader(resp.Body)
	lines := []string{}
	for {
		line, err := reader.ReadString('\n')
		if err != nil {
			if err == io.EOF {
				break
			} else {
				t.Fatalf("error reading: %v", err)
			}
		}
		lines = append(lines, line)
	}
	resp.Body.Close()
	return lines
}
Esempio n. 2
0
func NewClient(url string) (*Client, error) {
	tr, err := transport.NewTransport(transport.TLSInfo{}, defaultDialTimeout)
	if err != nil {
		return nil, err
	}
	return newClient(url, tr)
}
Esempio n. 3
0
func (c *config) newTransport() (*http.Transport, error) {
	return transport.NewTransport(transport.TLSInfo{
		CAFile:   c.ca,
		CertFile: c.cert,
		KeyFile:  c.key,
	}, 30*time.Second)
}
Esempio n. 4
0
File: etcc.go Progetto: topicai/etcc
// New trys to connect to etcd server.  endpoints must be addreses
// delimited by comma, like "http://127.0.0.1:4001,http://127.0.0.1:2379".
func New(endpoints string) (*Etcd, error) {
	eps := strings.Split(endpoints, ",")
	for i, ep := range eps {
		u, e := url.Parse(ep)
		if e != nil {
			return nil, fmt.Errorf("url.Parse: %v", e)
		}

		if u.Scheme == "" {
			u.Scheme = "http"
		}
		eps[i] = u.String()
	}

	tr, e := transport.NewTransport(transport.TLSInfo{},
		10*time.Second) // timeout = 10sec
	if e != nil {
		return nil, fmt.Errorf("transport.NewTransport: %v", e)
	}

	c, e := client.New(client.Config{Endpoints: eps, Transport: tr})
	if e != nil {
		return nil, fmt.Errorf("client.New: %v", e)
	}

	ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
	e = c.Sync(ctx)
	cancel()
	if e != nil {
		return nil, fmt.Errorf("(etc)client.Sync: %v", e)
	}

	return &Etcd{client.NewKeysAPI(c)}, nil
}
Esempio n. 5
0
func newDiscovery(durl, dproxyurl string, id types.ID) (*discovery, error) {
	u, err := url.Parse(durl)
	if err != nil {
		return nil, err
	}
	token := u.Path
	u.Path = ""
	pf, err := newProxyFunc(dproxyurl)
	if err != nil {
		return nil, err
	}

	// TODO: add ResponseHeaderTimeout back when watch on discovery service writes header early
	tr, err := transport.NewTransport(transport.TLSInfo{}, 30*time.Second)
	if err != nil {
		return nil, err
	}
	tr.Proxy = pf
	cfg := client.Config{
		Transport: tr,
		Endpoints: []string{u.String()},
	}
	c, err := client.New(cfg)
	if err != nil {
		return nil, err
	}
	dc := client.NewKeysAPIWithPrefix(c, "")
	return &discovery{
		cluster: token,
		c:       dc,
		id:      id,
		url:     u,
		clock:   clockwork.NewRealClock(),
	}, nil
}
Esempio n. 6
0
func getTransport(c *cli.Context) (*http.Transport, error) {
	cafile := c.GlobalString("ca-file")
	certfile := c.GlobalString("cert-file")
	keyfile := c.GlobalString("key-file")

	// Use an environment variable if nothing was supplied on the
	// command line
	if cafile == "" {
		cafile = os.Getenv("ETCDCTL_CA_FILE")
	}
	if certfile == "" {
		certfile = os.Getenv("ETCDCTL_CERT_FILE")
	}
	if keyfile == "" {
		keyfile = os.Getenv("ETCDCTL_KEY_FILE")
	}

	tls := transport.TLSInfo{
		CAFile:   cafile,
		CertFile: certfile,
		KeyFile:  keyfile,
	}
	return transport.NewTransport(tls)

}
Esempio n. 7
0
func handleUpgrade(c *cli.Context) {
	if c.String("old-version") != "1" {
		fmt.Printf("Do not support upgrade from version %s\n", c.String("old-version"))
		os.Exit(1)
	}
	if c.String("new-version") != "2" {
		fmt.Printf("Do not support upgrade to version %s\n", c.String("new-version"))
		os.Exit(1)
	}
	tls := transport.TLSInfo{
		CAFile:   c.String("peer-ca-file"),
		CertFile: c.String("peer-cert-file"),
		KeyFile:  c.String("peer-key-file"),
	}
	t, err := transport.NewTransport(tls)
	if err != nil {
		log.Fatal(err)
	}
	client := http.Client{Transport: t}
	resp, err := client.Get(c.String("peer-url") + "/v2/admin/next-internal-version")
	if err != nil {
		fmt.Printf("Failed to send upgrade request to %s: %v\n", c.String("peer-url"), err)
		return
	}
	if resp.StatusCode == http.StatusOK {
		fmt.Println("Cluster will start upgrading from internal version 1 to 2 in 10 seconds.")
		return
	}
	if resp.StatusCode == http.StatusNotFound {
		fmt.Println("Cluster cannot upgrade to 2: version is not 0.4.7")
		return
	}
	fmt.Printf("Faild to send upgrade request to %s: bad status code %d\n", c.String("cluster-url"), resp.StatusCode)
}
Esempio n. 8
0
func getTransport(c *cli.Context) (*http.Transport, error) {
	cafile := c.GlobalString("ca-file")
	certfile := c.GlobalString("cert-file")
	keyfile := c.GlobalString("key-file")

	// Use an environment variable if nothing was supplied on the
	// command line
	if cafile == "" {
		cafile = os.Getenv("ETCDCTL_CA_FILE")
	}
	if certfile == "" {
		certfile = os.Getenv("ETCDCTL_CERT_FILE")
	}
	if keyfile == "" {
		keyfile = os.Getenv("ETCDCTL_KEY_FILE")
	}

	tls := transport.TLSInfo{
		CAFile:   cafile,
		CertFile: certfile,
		KeyFile:  keyfile,
	}

	dialTimeout := defaultDialTimeout
	totalTimeout := c.GlobalDuration("total-timeout")
	if totalTimeout != 0 && totalTimeout < dialTimeout {
		dialTimeout = totalTimeout
	}
	return transport.NewTransport(tls, dialTimeout)
}
Esempio n. 9
0
func getTransport(params *EtcdConnectionParams) (*http.Transport, error) {
	tls := transport.TLSInfo{
		CAFile:   params.CaFile,
		CertFile: params.CertFile,
		KeyFile:  params.KeyFile,
	}

	return transport.NewTransport(tls, params.ConnectionTimeout)
}
Esempio n. 10
0
func getTransport(c *cli.Context) (*http.Transport, error) {
	tls := transport.TLSInfo{
		CAFile:   c.GlobalString("ca-file"),
		CertFile: c.GlobalString("cert-file"),
		KeyFile:  c.GlobalString("key-file"),
	}
	return transport.NewTransport(tls)

}
Esempio n. 11
0
// startProxy launches an HTTP proxy for client communication which proxies to other etcd nodes.
func startProxy(cfg *config) error {
	cls, err := setupCluster(cfg)
	if err != nil {
		return fmt.Errorf("error setting up initial cluster: %v", err)
	}

	if cfg.durl != "" {
		s, err := discovery.GetCluster(cfg.durl, cfg.dproxy)
		if err != nil {
			return err
		}
		if cls, err = etcdserver.NewClusterFromString(cfg.durl, s); err != nil {
			return err
		}
	}

	pt, err := transport.NewTransport(cfg.clientTLSInfo)
	if err != nil {
		return err
	}

	// TODO(jonboulle): update peerURLs dynamically (i.e. when updating
	// clientURLs) instead of just using the initial fixed list here
	peerURLs := cls.PeerURLs()
	uf := func() []string {
		cls, err := etcdserver.GetClusterFromPeers(peerURLs)
		if err != nil {
			log.Printf("proxy: %v", err)
			return []string{}
		}
		return cls.ClientURLs()
	}
	ph := proxy.NewHandler(pt, uf)
	ph = &cors.CORSHandler{
		Handler: ph,
		Info:    cfg.corsInfo,
	}

	if cfg.isReadonlyProxy() {
		ph = proxy.NewReadonlyHandler(ph)
	}
	// Start a proxy server goroutine for each listen address
	for _, u := range cfg.lcurls {
		l, err := transport.NewListener(u.Host, u.Scheme, cfg.clientTLSInfo)
		if err != nil {
			return err
		}

		host := u.Host
		go func() {
			log.Print("proxy: listening for client requests on ", host)
			log.Fatal(http.Serve(l, ph))
		}()
	}
	return nil
}
Esempio n. 12
0
func newHttpTransport(t *testing.T, certFile, keyFile, caFile string) etcd.CancelableTransport {
	tlsInfo := transport.TLSInfo{
		CertFile: certFile,
		KeyFile:  keyFile,
		CAFile:   caFile,
	}
	tr, err := transport.NewTransport(tlsInfo, time.Second)
	if err != nil {
		t.Fatal(err)
	}
	return tr
}
Esempio n. 13
0
func NewTLSClient(url string, certFile, keyFile, caCertFile string) (*Client, error) {
	tr, err := transport.NewTransport(
		transport.TLSInfo{
			CAFile:   caCertFile,
			CertFile: certFile,
			KeyFile:  keyFile,
		},
		defaultDialTimeout,
	)
	if err != nil {
		return nil, err
	}
	return newClient(url, tr)
}
Esempio n. 14
0
// startProxy launches an HTTP proxy for client communication which proxies to other etcd nodes.
func startProxy() {
	cls, err := setupCluster()
	if err != nil {
		log.Fatalf("etcd: error setting up initial cluster: %v", err)
	}

	pt, err := transport.NewTransport(clientTLSInfo)
	if err != nil {
		log.Fatal(err)
	}

	// TODO(jonboulle): update peerURLs dynamically (i.e. when updating
	// clientURLs) instead of just using the initial fixed list here
	peerURLs := cls.PeerURLs()
	uf := func() []string {
		cls, err := etcdserver.GetClusterFromPeers(peerURLs)
		if err != nil {
			log.Printf("etcd: %v", err)
			return []string{}
		}
		return cls.ClientURLs()
	}
	ph := proxy.NewHandler(pt, uf)
	ph = &cors.CORSHandler{
		Handler: ph,
		Info:    corsInfo,
	}

	if string(*proxyFlag) == flags.ProxyValueReadonly {
		ph = proxy.NewReadonlyHandler(ph)
	}

	lcurls, err := flags.URLsFromFlags(fs, "listen-client-urls", "bind-addr", clientTLSInfo)
	if err != nil {
		log.Fatal(err.Error())
	}
	// Start a proxy server goroutine for each listen address
	for _, u := range lcurls {
		l, err := transport.NewListener(u.Host, clientTLSInfo)
		if err != nil {
			log.Fatal(err)
		}

		host := u.Host
		go func() {
			log.Print("etcd: proxy listening for client requests on ", host)
			log.Fatal(http.Serve(l, ph))
		}()
	}
}
Esempio n. 15
0
func newTransport(e Etcdtool) *http.Transport {
	tls := transport.TLSInfo{
		CAFile:   e.CA,
		CertFile: e.Cert,
		KeyFile:  e.Key,
	}

	timeout := 30 * time.Second
	tr, err := transport.NewTransport(tls, timeout)
	if err != nil {
		fatal(err.Error())
	}

	return tr
}
Esempio n. 16
0
// TODO: support TLS
func (c *cluster) Launch(t *testing.T) {
	if c.Size <= 0 {
		t.Fatalf("cluster size <= 0")
	}

	lns := make([]net.Listener, c.Size)
	addrs := make([]string, c.Size)
	for i := 0; i < c.Size; i++ {
		l := newLocalListener(t)
		// each member claims only one peer listener
		lns[i] = l
		addrs[i] = fmt.Sprintf("%v=%v", c.name(i), "http://"+l.Addr().String())
	}
	clusterStr := strings.Join(addrs, ",")

	var err error
	for i := 0; i < c.Size; i++ {
		m := member{}
		m.PeerListeners = []net.Listener{lns[i]}
		cln := newLocalListener(t)
		m.ClientListeners = []net.Listener{cln}
		m.Name = c.name(i)
		m.ClientURLs, err = types.NewURLs([]string{"http://" + cln.Addr().String()})
		if err != nil {
			t.Fatal(err)
		}
		m.DataDir, err = ioutil.TempDir(os.TempDir(), "etcd")
		if err != nil {
			t.Fatal(err)
		}
		m.Cluster, err = etcdserver.NewClusterFromString(clusterName, clusterStr)
		if err != nil {
			t.Fatal(err)
		}
		m.ClusterState = etcdserver.ClusterStateValueNew
		m.Transport, err = transport.NewTransport(transport.TLSInfo{})
		if err != nil {
			t.Fatal(err)
		}
		// TODO: need the support of graceful stop in Sender to remove this
		m.Transport.DisableKeepAlives = true
		m.Transport.Dial = (&net.Dialer{Timeout: 100 * time.Millisecond}).Dial

		m.Launch(t)
		c.Members = append(c.Members, m)
	}
}
Esempio n. 17
0
func (t *Transport) Start() error {
	var err error
	// Read/write timeout is set for stream roundTripper to promptly
	// find out broken status, which minimizes the number of messages
	// sent on broken connection.
	t.streamRt, err = transport.NewTimeoutTransport(t.TLSInfo, t.DialTimeout, ConnReadTimeout, ConnWriteTimeout)
	if err != nil {
		return err
	}
	t.pipelineRt, err = transport.NewTransport(t.TLSInfo, t.DialTimeout)
	if err != nil {
		return err
	}
	t.remotes = make(map[types.ID]*remote)
	t.peers = make(map[types.ID]Peer)
	t.prober = probing.NewProber(t.pipelineRt)
	return nil
}
Esempio n. 18
0
// New constructs a new kvdb.Kvdb.
func New(
	domain string,
	machines []string,
	options map[string]string,
	fatalErrorCb kvdb.FatalErrorCB,
) (kvdb.Kvdb, error) {
	if len(machines) == 0 {
		machines = defaultMachines
	}

	etcdCommon := ec.NewEtcdCommon(options)
	tls, username, password, err := etcdCommon.GetAuthInfoFromOptions()
	if err != nil {
		return nil, err
	}
	tr, err := transport.NewTransport(tls, ec.DefaultDialTimeout)
	if err != nil {
		return nil, err
	}
	cfg := e.Config{
		Endpoints: machines,
		Transport: tr,
		Username:  username,
		Password:  password,
		// The time required for a request to fail - 30 sec
		HeaderTimeoutPerRequest: time.Duration(10) * time.Second,
	}
	c, err := e.New(cfg)
	if err != nil {
		return nil, err
	}
	if domain != "" && !strings.HasSuffix(domain, "/") {
		domain = domain + "/"
	}
	return &etcdKV{
		common.BaseKvdb{FatalCb: fatalErrorCb},
		e.NewKeysAPI(c),
		e.NewAuthUserAPI(c),
		e.NewAuthRoleAPI(c),
		domain,
		etcdCommon,
	}, nil
}
Esempio n. 19
0
// startProxy launches an HTTP proxy for client communication which proxies to other etcd nodes.
func startProxy() {
	pt, err := transport.NewTransport(clientTLSInfo)
	if err != nil {
		log.Fatal(err)
	}

	ph, err := proxy.NewHandler(pt, (*cluster).PeerURLs())
	if err != nil {
		log.Fatal(err)
	}

	ph = &pkg.CORSHandler{
		Handler: ph,
		Info:    cors,
	}

	if string(*proxyFlag) == flagtypes.ProxyValueReadonly {
		ph = proxy.NewReadonlyHandler(ph)
	}

	lcurls, err := pkg.URLsFromFlags(flag.CommandLine, "listen-client-urls", "bind-addr", clientTLSInfo)
	if err != nil {
		log.Fatal(err.Error())
	}
	// Start a proxy server goroutine for each listen address
	for _, u := range lcurls {
		l, err := transport.NewListener(u.Host, clientTLSInfo)
		if err != nil {
			log.Fatal(err)
		}

		host := u.Host
		go func() {
			log.Print("Listening for client requests on ", host)
			log.Fatal(http.Serve(l, ph))
		}()
	}
}
Esempio n. 20
0
func main() {
	c := config{}
	initFlags(&c)
	pflag.Parse()
	validateFlags(&c)

	cfg := etcd.Config{}

	if c.etcdSecure == true {
		tlsinfo := transport.TLSInfo{
			CertFile:       c.etcdCertfile,
			KeyFile:        c.etcdKeyfile,
			CAFile:         c.etcdCafile,
			TrustedCAFile:  c.etcdCafile,
			ClientCertAuth: true,
		}
		trans, err := transport.NewTransport(tlsinfo)
		if err != nil {
			glog.Fatalf("misconfigured TLSInfo in transport: %v", err)
		}
		cfg = etcd.Config{
			Endpoints: c.etcdServers,
			Transport: trans,
		}
	} else {
		cfg = etcd.Config{
			Endpoints: c.etcdServers,
		}
	}

	etcdClient, err := etcd.New(cfg)
	if err != nil {
		glog.Fatalf("misconfigured etcd: %v", err)
	}

	c.leaseAndUpdateLoop(&etcdClient)
}
Esempio n. 21
0
func newEtcdClient(c *EtcdConfig) (etcd.KeysAPI, error) {
	tlsInfo := transport.TLSInfo{
		CertFile: c.Certfile,
		KeyFile:  c.Keyfile,
		CAFile:   c.CAFile,
	}

	t, err := transport.NewTransport(tlsInfo)
	if err != nil {
		return nil, err
	}

	cli, err := etcd.New(etcd.Config{
		Endpoints: c.Endpoints,
		Transport: t,
		Username:  c.Username,
		Password:  c.Password,
	})
	if err != nil {
		return nil, err
	}

	return etcd.NewKeysAPI(cli), nil
}
Esempio n. 22
0
// Creates a new HTTP client with KeepAlive disabled.
func NewTestClient() *testHttpClient {
	tr, _ := transport.NewTransport(transport.TLSInfo{}, time.Second)
	tr.DisableKeepAlives = true
	return &testHttpClient{&http.Client{Transport: tr}}
}
Esempio n. 23
0
// NewServer creates a new EtcdServer from the supplied configuration. The
// configuration is considered static for the lifetime of the EtcdServer.
func NewServer(cfg *ServerConfig) (*EtcdServer, error) {
	st := store.New(StoreClusterPrefix, StoreKeysPrefix)
	var w *wal.WAL
	var n raft.Node
	var s *raftStorage
	var id types.ID
	var cl *cluster

	if !cfg.V3demo && fileutil.Exist(path.Join(cfg.StorageDir(), databaseFilename)) {
		return nil, errors.New("experimental-v3demo cannot be disabled once it is enabled")
	}

	// Run the migrations.
	dataVer, err := version.DetectDataDir(cfg.DataDir)
	if err != nil {
		return nil, err
	}
	if err := upgradeDataDir(cfg.DataDir, cfg.Name, dataVer); err != nil {
		return nil, err
	}

	err = os.MkdirAll(cfg.MemberDir(), privateDirMode)
	if err != nil && err != os.ErrExist {
		return nil, err
	}

	haveWAL := wal.Exist(cfg.WALDir())
	ss := snap.New(cfg.SnapDir())

	pt, err := transport.NewTransport(cfg.PeerTLSInfo, cfg.peerDialTimeout())
	if err != nil {
		return nil, err
	}
	var remotes []*Member
	switch {
	case !haveWAL && !cfg.NewCluster:
		if err := cfg.VerifyJoinExisting(); err != nil {
			return nil, err
		}
		cl, err = newClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
		if err != nil {
			return nil, err
		}
		existingCluster, err := GetClusterFromRemotePeers(getRemotePeerURLs(cl, cfg.Name), pt)
		if err != nil {
			return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", err)
		}
		if err := ValidateClusterAndAssignIDs(cl, existingCluster); err != nil {
			return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err)
		}
		if !isCompatibleWithCluster(cl, cl.MemberByName(cfg.Name).ID, pt) {
			return nil, fmt.Errorf("incomptible with current running cluster")
		}

		remotes = existingCluster.Members()
		cl.SetID(existingCluster.id)
		cl.SetStore(st)
		cfg.Print()
		id, n, s, w = startNode(cfg, cl, nil)
	case !haveWAL && cfg.NewCluster:
		if err := cfg.VerifyBootstrap(); err != nil {
			return nil, err
		}
		cl, err = newClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
		if err != nil {
			return nil, err
		}
		m := cl.MemberByName(cfg.Name)
		if isMemberBootstrapped(cl, cfg.Name, pt) {
			return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID)
		}
		if cfg.ShouldDiscover() {
			var str string
			var err error
			str, err = discovery.JoinCluster(cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String())
			if err != nil {
				return nil, &discoveryError{op: "join", err: err}
			}
			urlsmap, err := types.NewURLsMap(str)
			if err != nil {
				return nil, err
			}
			if checkDuplicateURL(urlsmap) {
				return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap)
			}
			if cl, err = newClusterFromURLsMap(cfg.InitialClusterToken, urlsmap); err != nil {
				return nil, err
			}
		}
		cl.SetStore(st)
		cfg.PrintWithInitial()
		id, n, s, w = startNode(cfg, cl, cl.MemberIDs())
	case haveWAL:
		if err := fileutil.IsDirWriteable(cfg.DataDir); err != nil {
			return nil, fmt.Errorf("cannot write to data directory: %v", err)
		}

		if err := fileutil.IsDirWriteable(cfg.MemberDir()); err != nil {
			return nil, fmt.Errorf("cannot write to member directory: %v", err)
		}

		if err := fileutil.IsDirWriteable(cfg.WALDir()); err != nil {
			return nil, fmt.Errorf("cannot write to WAL directory: %v", err)
		}

		if cfg.ShouldDiscover() {
			plog.Warningf("discovery token ignored since a cluster has already been initialized. Valid log found at %q", cfg.WALDir())
		}
		var snapshot *raftpb.Snapshot
		var err error
		snapshot, err = ss.Load()
		if err != nil && err != snap.ErrNoSnapshot {
			return nil, err
		}
		if snapshot != nil {
			if err := st.Recovery(snapshot.Data); err != nil {
				plog.Panicf("recovered store from snapshot error: %v", err)
			}
			plog.Infof("recovered store from snapshot at index %d", snapshot.Metadata.Index)
		}
		cfg.Print()
		if !cfg.ForceNewCluster {
			id, cl, n, s, w = restartNode(cfg, snapshot)
		} else {
			id, cl, n, s, w = restartAsStandaloneNode(cfg, snapshot)
		}
		cl.SetStore(st)
		cl.Recover()
	default:
		return nil, fmt.Errorf("unsupported bootstrap config")
	}

	sstats := &stats.ServerStats{
		Name: cfg.Name,
		ID:   id.String(),
	}
	sstats.Initialize()
	lstats := stats.NewLeaderStats(id.String())

	srv := &EtcdServer{
		cfg:       cfg,
		snapCount: cfg.SnapCount,
		errorc:    make(chan error, 1),
		store:     st,
		r: raftNode{
			Node:        n,
			ticker:      time.Tick(time.Duration(cfg.TickMs) * time.Millisecond),
			raftStorage: s,
			storage:     NewStorage(w, ss),
		},
		id:            id,
		attributes:    Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
		cluster:       cl,
		stats:         sstats,
		lstats:        lstats,
		SyncTicker:    time.Tick(500 * time.Millisecond),
		versionTr:     pt,
		reqIDGen:      idutil.NewGenerator(uint8(id), time.Now()),
		forceVersionC: make(chan struct{}),
	}

	if cfg.V3demo {
		err = os.MkdirAll(cfg.StorageDir(), privateDirMode)
		if err != nil && err != os.ErrExist {
			return nil, err
		}
		srv.kv = dstorage.New(path.Join(cfg.StorageDir(), databaseFilename), &srv.consistIndex)
		if err := srv.kv.Restore(); err != nil {
			plog.Fatalf("v3 storage restore error: %v", err)
		}
		s.snapStore = newSnapshotStore(cfg.StorageDir(), srv.kv)
	}

	// TODO: move transport initialization near the definition of remote
	tr := &rafthttp.Transport{
		TLSInfo:     cfg.PeerTLSInfo,
		DialTimeout: cfg.peerDialTimeout(),
		ID:          id,
		ClusterID:   cl.ID(),
		Raft:        srv,
		SnapSaver:   s.snapStore,
		ServerStats: sstats,
		LeaderStats: lstats,
		ErrorC:      srv.errorc,
		V3demo:      cfg.V3demo,
	}
	if err := tr.Start(); err != nil {
		return nil, err
	}
	// add all remotes into transport
	for _, m := range remotes {
		if m.ID != id {
			tr.AddRemote(m.ID, m.PeerURLs)
		}
	}
	for _, m := range cl.Members() {
		if m.ID != id {
			tr.AddPeer(m.ID, m.PeerURLs)
		}
	}
	srv.r.transport = tr

	if cfg.V3demo {
		s.snapStore.tr = tr
	}

	return srv, nil
}
Esempio n. 24
0
// startEtcd launches the etcd server and HTTP handlers for client/server communication.
func startEtcd() {
	cls, err := setupCluster()
	if err != nil {
		log.Fatalf("etcd: error setting up initial cluster: %v", err)
	}

	if *dir == "" {
		*dir = fmt.Sprintf("%v.etcd", *name)
		log.Printf("etcd: no data-dir provided, using default data-dir ./%s", *dir)
	}
	if err := os.MkdirAll(*dir, privateDirMode); err != nil {
		log.Fatalf("etcd: cannot create data directory: %v", err)
	}
	if err := fileutil.IsDirWriteable(*dir); err != nil {
		log.Fatalf("etcd: cannot write to data directory: %v", err)
	}

	pt, err := transport.NewTransport(peerTLSInfo)
	if err != nil {
		log.Fatal(err)
	}

	acurls, err := flags.URLsFromFlags(fs, "advertise-client-urls", "addr", clientTLSInfo)
	if err != nil {
		log.Fatal(err.Error())
	}
	cfg := &etcdserver.ServerConfig{
		Name:         *name,
		ClientURLs:   acurls,
		DataDir:      *dir,
		SnapCount:    *snapCount,
		Cluster:      cls,
		DiscoveryURL: *durl,
		ClusterState: *clusterState,
		Transport:    pt,
	}
	s := etcdserver.NewServer(cfg)
	s.Start()

	ch := &cors.CORSHandler{
		Handler: etcdhttp.NewClientHandler(s),
		Info:    corsInfo,
	}
	ph := etcdhttp.NewPeerHandler(s)

	lpurls, err := flags.URLsFromFlags(fs, "listen-peer-urls", "peer-bind-addr", peerTLSInfo)
	if err != nil {
		log.Fatal(err.Error())
	}

	for _, u := range lpurls {
		l, err := transport.NewListener(u.Host, peerTLSInfo)
		if err != nil {
			log.Fatal(err)
		}

		// Start the peer server in a goroutine
		urlStr := u.String()
		go func() {
			log.Print("etcd: listening for peers on ", urlStr)
			log.Fatal(http.Serve(l, ph))
		}()
	}

	lcurls, err := flags.URLsFromFlags(fs, "listen-client-urls", "bind-addr", clientTLSInfo)
	if err != nil {
		log.Fatal(err.Error())
	}

	// Start a client server goroutine for each listen address
	for _, u := range lcurls {
		l, err := transport.NewListener(u.Host, clientTLSInfo)
		if err != nil {
			log.Fatal(err)
		}

		urlStr := u.String()
		go func() {
			log.Print("etcd: listening for client requests on ", urlStr)
			log.Fatal(http.Serve(l, ch))
		}()
	}
}
Esempio n. 25
0
// startEtcd launches the etcd server and HTTP handlers for client/server communication.
func startEtcd() {
	self := cluster.FindName(*name)
	if self == nil {
		log.Fatalf("etcd: no member with name=%q exists", *name)
	}

	if self.ID == raft.None {
		log.Fatalf("etcd: cannot use None(%d) as member id", raft.None)
	}

	if *dir == "" {
		*dir = fmt.Sprintf("%v_etcd_data", self.ID)
		log.Printf("main: no data-dir provided, using default data-dir ./%s", *dir)
	}
	if err := os.MkdirAll(*dir, privateDirMode); err != nil {
		log.Fatalf("main: cannot create data directory: %v", err)
	}

	pt, err := transport.NewTransport(peerTLSInfo)
	if err != nil {
		log.Fatal(err)
	}

	acurls, err := pkg.URLsFromFlags(flag.CommandLine, "advertise-client-urls", "addr", clientTLSInfo)
	if err != nil {
		log.Fatal(err.Error())
	}
	cfg := &etcdserver.ServerConfig{
		Name:       *name,
		ClientURLs: acurls,
		DataDir:    *dir,
		SnapCount:  int64(*snapCount),
		Cluster:    cluster,
		Transport:  pt,
	}
	s := etcdserver.NewServer(cfg)
	s.Start()

	ch := &pkg.CORSHandler{
		Handler: etcdhttp.NewClientHandler(s),
		Info:    cors,
	}
	ph := etcdhttp.NewPeerHandler(s)

	lpurls, err := pkg.URLsFromFlags(flag.CommandLine, "listen-peer-urls", "peer-bind-addr", peerTLSInfo)
	if err != nil {
		log.Fatal(err.Error())
	}

	for _, u := range lpurls {
		l, err := transport.NewListener(u.Host, peerTLSInfo)
		if err != nil {
			log.Fatal(err)
		}

		// Start the peer server in a goroutine
		urlStr := u.String()
		go func() {
			log.Print("Listening for peers on ", urlStr)
			log.Fatal(http.Serve(l, ph))
		}()
	}

	lcurls, err := pkg.URLsFromFlags(flag.CommandLine, "listen-client-urls", "bind-addr", clientTLSInfo)
	if err != nil {
		log.Fatal(err.Error())
	}

	// Start a client server goroutine for each listen address
	for _, u := range lcurls {
		l, err := transport.NewListener(u.Host, clientTLSInfo)
		if err != nil {
			log.Fatal(err)
		}

		urlStr := u.String()
		go func() {
			log.Print("Listening for client requests on ", urlStr)
			log.Fatal(http.Serve(l, ch))
		}()
	}
}
Esempio n. 26
0
File: etcd.go Progetto: dterei/etcd
// startProxy launches an HTTP proxy for client communication which proxies to other etcd nodes.
func startProxy() error {
	apurls, err := flags.URLsFromFlags(fs, "initial-advertise-peer-urls", "addr", peerTLSInfo)
	if err != nil {
		return err
	}
	cls, err := setupCluster(apurls)
	if err != nil {
		return fmt.Errorf("error setting up initial cluster: %v", err)
	}

	if *durl != "" {
		s, err := discovery.GetCluster(*durl, *dproxy)
		if err != nil {
			return err
		}
		if cls, err = etcdserver.NewClusterFromString(*durl, s); err != nil {
			return err
		}
	}

	pt, err := transport.NewTransport(clientTLSInfo)
	if err != nil {
		return err
	}

	// TODO(jonboulle): update peerURLs dynamically (i.e. when updating
	// clientURLs) instead of just using the initial fixed list here
	peerURLs := cls.PeerURLs()
	uf := func() []string {
		cls, err := etcdserver.GetClusterFromPeers(peerURLs)
		if err != nil {
			log.Printf("proxy: %v", err)
			return []string{}
		}
		return cls.ClientURLs()
	}
	ph := proxy.NewHandler(pt, uf)
	ph = &cors.CORSHandler{
		Handler: ph,
		Info:    corsInfo,
	}

	if proxyFlag.String() == proxyFlagReadonly {
		ph = proxy.NewReadonlyHandler(ph)
	}
	lcurls, err := flags.URLsFromFlags(fs, "listen-client-urls", "bind-addr", clientTLSInfo)
	if err != nil {
		return err
	}
	// Start a proxy server goroutine for each listen address
	for _, u := range lcurls {
		l, err := transport.NewListener(u.Host, u.Scheme, clientTLSInfo)
		if err != nil {
			return err
		}

		host := u.Host
		go func() {
			log.Print("proxy: listening for client requests on ", host)
			log.Fatal(http.Serve(l, ph))
		}()
	}
	return nil
}
Esempio n. 27
0
// newEtcdBackend constructs a etcd backend using a given machine address.
func newEtcdBackend(conf map[string]string) (Backend, error) {
	// Get the etcd path form the configuration.
	path, ok := conf["path"]
	if !ok {
		path = "/vault"
	}

	// Ensure path is prefixed.
	if !strings.HasPrefix(path, "/") {
		path = "/" + path
	}

	// Set a default machines list and check for an overriding address value.
	machines := "http://128.0.0.1:2379"
	if address, ok := conf["address"]; ok {
		machines = address
	}
	machinesParsed := strings.Split(machines, EtcdMachineDelimiter)

	// Verify that the machines are valid URLs
	for _, machine := range machinesParsed {
		u, urlErr := url.Parse(machine)
		if urlErr != nil || u.Scheme == "" {
			return nil, EtcdAddressError
		}
	}

	// Create a new client from the supplied address and attempt to sync with the
	// cluster.
	var cTransport client.CancelableTransport
	cert, hasCert := conf["tls_cert_file"]
	key, hasKey := conf["tls_key_file"]
	ca, hasCa := conf["tls_ca_file"]
	if (hasCert && hasKey) || hasCa {
		var transportErr error
		tls := transport.TLSInfo{
			CAFile:   ca,
			CertFile: cert,
			KeyFile:  key,
		}
		cTransport, transportErr = transport.NewTransport(tls, 30*time.Second)

		if transportErr != nil {
			return nil, transportErr
		}
	} else {
		cTransport = client.DefaultTransport
	}

	cfg := client.Config{
		Endpoints: machinesParsed,
		Transport: cTransport,
	}

	// Set credentials.
	username := os.Getenv("ETCD_USERNAME")
	if username == "" {
		username, _ = conf["username"]
	}

	password := os.Getenv("ETCD_PASSWORD")
	if password == "" {
		password, _ = conf["password"]
	}

	if username != "" && password != "" {
		cfg.Username = username
		cfg.Password = password
	}

	c, err := client.New(cfg)
	if err != nil {
		return nil, err
	}

	ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
	syncErr := c.Sync(ctx)
	cancel()
	if syncErr != nil {
		return nil, EtcdSyncClusterError
	}

	kAPI := client.NewKeysAPI(c)

	// Setup the backend.
	return &EtcdBackend{
		path:       path,
		kAPI:       kAPI,
		permitPool: NewPermitPool(DefaultParallelOperations),
	}, nil
}
Esempio n. 28
0
// newEtcdBackend constructs a etcd backend using a given machine address.
func newEtcdBackend(conf map[string]string) (Backend, error) {
	// Get the etcd path form the configuration.
	path, ok := conf["path"]
	if !ok {
		path = "/vault"
	}

	// Ensure path is prefixed.
	if !strings.HasPrefix(path, "/") {
		path = "/" + path
	}

	// Set a default machines list and check for an overriding address value.
	machines := "http://128.0.0.1:2379"
	if address, ok := conf["address"]; ok {
		machines = address
	}
	machinesParsed := strings.Split(machines, EtcdMachineDelimiter)

	// Verify that the machines are valid URLs
	for _, machine := range machinesParsed {
		u, urlErr := url.Parse(machine)
		if urlErr != nil || u.Scheme == "" {
			return nil, EtcdAddressError
		}
	}

	// Create a new client from the supplied address and attempt to sync with the
	// cluster.
	var cTransport client.CancelableTransport
	cert, hasCert := conf["tls_cert_file"]
	key, hasKey := conf["tls_key_file"]
	ca, hasCa := conf["tls_ca_file"]
	if (hasCert && hasKey) || hasCa {
		var transportErr error
		tls := transport.TLSInfo{
			CAFile:   ca,
			CertFile: cert,
			KeyFile:  key,
		}
		cTransport, transportErr = transport.NewTransport(tls, 30*time.Second)

		if transportErr != nil {
			return nil, transportErr
		}
	} else {
		cTransport = client.DefaultTransport
	}

	cfg := client.Config{
		Endpoints: machinesParsed,
		Transport: cTransport,
	}

	// Set credentials.
	username := os.Getenv("ETCD_USERNAME")
	if username == "" {
		username, _ = conf["username"]
	}

	password := os.Getenv("ETCD_PASSWORD")
	if password == "" {
		password, _ = conf["password"]
	}

	if username != "" && password != "" {
		cfg.Username = username
		cfg.Password = password
	}

	c, err := client.New(cfg)
	if err != nil {
		return nil, err
	}

	// Should we sync the cluster state? There are three available options
	// for our client library: don't sync (required for some proxies), sync
	// once, or sync periodically with AutoSync.  We currently support the
	// first two.
	sync, ok := conf["sync"]
	if !ok {
		sync = "yes"
	}
	switch sync {
	case "yes", "true", "y", "1":
		ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
		syncErr := c.Sync(ctx)
		cancel()
		if syncErr != nil {
			return nil, fmt.Errorf("%s: %s", EtcdSyncClusterError, syncErr)
		}
	case "no", "false", "n", "0":
	default:
		return nil, fmt.Errorf("value of 'sync' could not be understood")
	}

	kAPI := client.NewKeysAPI(c)

	// Setup the backend.
	return &EtcdBackend{
		path:       path,
		kAPI:       kAPI,
		permitPool: NewPermitPool(DefaultParallelOperations),
	}, nil
}
Esempio n. 29
0
// startProxy launches an HTTP proxy for client communication which proxies to other etcd nodes.
func startProxy(cfg *config) error {
	urlsmap, _, err := getPeerURLsMapAndToken(cfg)
	if err != nil {
		return fmt.Errorf("error setting up initial cluster: %v", err)
	}

	if cfg.durl != "" {
		s, err := discovery.GetCluster(cfg.durl, cfg.dproxy)
		if err != nil {
			return err
		}
		if urlsmap, err = types.NewURLsMap(s); err != nil {
			return err
		}
	}

	pt, err := transport.NewTransport(cfg.clientTLSInfo)
	pt.MaxIdleConnsPerHost = proxy.DefaultMaxIdleConnsPerHost
	if err != nil {
		return err
	}

	tr, err := transport.NewTransport(cfg.peerTLSInfo)
	if err != nil {
		return err
	}

	cfg.dir = path.Join(cfg.dir, "proxy")
	err = os.MkdirAll(cfg.dir, 0700)
	if err != nil {
		return err
	}

	var peerURLs []string
	clusterfile := path.Join(cfg.dir, "cluster")

	b, err := ioutil.ReadFile(clusterfile)
	switch {
	case err == nil:
		urls := struct{ PeerURLs []string }{}
		err := json.Unmarshal(b, &urls)
		if err != nil {
			return err
		}
		peerURLs = urls.PeerURLs
		plog.Infof("proxy: using peer urls %v from cluster file ./%s", peerURLs, clusterfile)
	case os.IsNotExist(err):
		peerURLs = urlsmap.URLs()
		plog.Infof("proxy: using peer urls %v ", peerURLs)
	default:
		return err
	}

	clientURLs := []string{}
	uf := func() []string {
		gcls, err := etcdserver.GetClusterFromRemotePeers(peerURLs, tr)
		// TODO: remove the 2nd check when we fix GetClusterFromPeers
		// GetClusterFromPeers should not return nil error with an invaild empty cluster
		if err != nil {
			plog.Warningf("proxy: %v", err)
			return []string{}
		}
		if len(gcls.Members()) == 0 {
			return clientURLs
		}
		clientURLs = gcls.ClientURLs()

		urls := struct{ PeerURLs []string }{gcls.PeerURLs()}
		b, err := json.Marshal(urls)
		if err != nil {
			plog.Warningf("proxy: error on marshal peer urls %s", err)
			return clientURLs
		}

		err = ioutil.WriteFile(clusterfile+".bak", b, 0600)
		if err != nil {
			plog.Warningf("proxy: error on writing urls %s", err)
			return clientURLs
		}
		err = os.Rename(clusterfile+".bak", clusterfile)
		if err != nil {
			plog.Warningf("proxy: error on updating clusterfile %s", err)
			return clientURLs
		}
		if !reflect.DeepEqual(gcls.PeerURLs(), peerURLs) {
			plog.Noticef("proxy: updated peer urls in cluster file from %v to %v", peerURLs, gcls.PeerURLs())
		}
		peerURLs = gcls.PeerURLs()

		return clientURLs
	}
	ph := proxy.NewHandler(pt, uf)
	ph = &cors.CORSHandler{
		Handler: ph,
		Info:    cfg.corsInfo,
	}

	if cfg.isReadonlyProxy() {
		ph = proxy.NewReadonlyHandler(ph)
	}
	// Start a proxy server goroutine for each listen address
	for _, u := range cfg.lcurls {
		l, err := transport.NewListener(u.Host, u.Scheme, cfg.clientTLSInfo)
		if err != nil {
			return err
		}

		host := u.Host
		go func() {
			plog.Info("proxy: listening for client requests on ", host)
			plog.Fatal(http.Serve(l, ph))
		}()
	}
	return nil
}