// getPeerURLsMapAndToken sets up an initial peer URLsMap and cluster token for bootstrap or discovery. func getPeerURLsMapAndToken(cfg *config, which string) (urlsmap types.URLsMap, token string, err error) { switch { case cfg.durl != "": urlsmap = types.URLsMap{} // If using discovery, generate a temporary cluster based on // self's advertised peer URLs urlsmap[cfg.name] = cfg.apurls token = cfg.durl case cfg.dnsCluster != "": var clusterStr string clusterStr, token, err = discovery.SRVGetCluster(cfg.name, cfg.dnsCluster, cfg.initialClusterToken, cfg.apurls) if err != nil { return nil, "", err } urlsmap, err = types.NewURLsMap(clusterStr) // only etcd member must belong to the discovered cluster. // proxy does not need to belong to the discovered cluster. if which == "etcd" { if _, ok := urlsmap[cfg.name]; !ok { return nil, "", fmt.Errorf("cannot find local etcd member %q in SRV records", cfg.name) } } default: // We're statically configured, and cluster has appropriately been set. urlsmap, err = types.NewURLsMap(cfg.initialCluster) token = cfg.initialClusterToken } return urlsmap, token, err }
// Clone returns a member with the same server configuration. The returned // member will not set PeerListeners and ClientListeners. func (m *member) Clone(t *testing.T) *member { mm := &member{} mm.ServerConfig = m.ServerConfig var err error clientURLStrs := m.ClientURLs.StringSlice() mm.ClientURLs, err = types.NewURLs(clientURLStrs) if err != nil { // this should never fail panic(err) } peerURLStrs := m.PeerURLs.StringSlice() mm.PeerURLs, err = types.NewURLs(peerURLStrs) if err != nil { // this should never fail panic(err) } clusterStr := m.InitialPeerURLsMap.String() mm.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr) if err != nil { // this should never fail panic(err) } mm.InitialClusterToken = m.InitialClusterToken mm.ElectionTicks = m.ElectionTicks mm.PeerTLSInfo = m.PeerTLSInfo return mm }
// mustNewMember return an inited member with the given name. If usePeerTLS is // true, it will set PeerTLSInfo and use https scheme to communicate between // peers. func mustNewMember(t *testing.T, name string, usePeerTLS bool) *member { var ( testTLSInfo = transport.TLSInfo{ KeyFile: "./fixtures/server.key.insecure", CertFile: "./fixtures/server.crt", TrustedCAFile: "./fixtures/ca.crt", ClientCertAuth: true, } err error ) m := &member{} peerScheme := "http" if usePeerTLS { peerScheme = "https" } pln := newLocalListener(t) m.PeerListeners = []net.Listener{pln} m.PeerURLs, err = types.NewURLs([]string{peerScheme + "://" + pln.Addr().String()}) if err != nil { t.Fatal(err) } if usePeerTLS { m.PeerTLSInfo = testTLSInfo } cln := newLocalListener(t) m.ClientListeners = []net.Listener{cln} m.ClientURLs, err = types.NewURLs([]string{"http://" + cln.Addr().String()}) if err != nil { t.Fatal(err) } m.Name = name m.DataDir, err = ioutil.TempDir(os.TempDir(), "etcd") if err != nil { t.Fatal(err) } clusterStr := fmt.Sprintf("%s=%s://%s", name, peerScheme, pln.Addr().String()) m.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr) if err != nil { t.Fatal(err) } m.InitialClusterToken = clusterName m.NewCluster = true m.ServerConfig.PeerTLSInfo = m.PeerTLSInfo m.ElectionTicks = electionTicks m.TickMs = uint(tickDuration / time.Millisecond) return m }
func nodesToCluster(ns []*client.Node, size int) (string, error) { s := make([]string, len(ns)) for i, n := range ns { s[i] = n.Value } us := strings.Join(s, ",") m, err := types.NewURLsMap(us) if err != nil { return us, ErrInvalidURL } if m.Len() != size { return us, ErrDuplicateName } return us, nil }
func TestConfigVerifyExistingWithDiscoveryURLFail(t *testing.T) { cluster, err := types.NewURLsMap("node1=http://127.0.0.1:2380") if err != nil { t.Fatalf("NewCluster error: %v", err) } c := &ServerConfig{ Name: "node1", DiscoveryURL: "http://127.0.0.1:2379/abcdefg", PeerURLs: mustNewURLs(t, []string{"http://127.0.0.1:2380"}), InitialPeerURLsMap: cluster, NewCluster: false, } if err := c.VerifyJoinExisting(); err == nil { t.Errorf("err = nil, want not nil") } }
func fillClusterForMembers(ms []*member) error { addrs := make([]string, 0) for _, m := range ms { scheme := "http" if !m.PeerTLSInfo.Empty() { scheme = "https" } for _, l := range m.PeerListeners { addrs = append(addrs, fmt.Sprintf("%s=%s://%s", m.Name, scheme, l.Addr().String())) } } clusterStr := strings.Join(addrs, ",") var err error for _, m := range ms { m.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr) if err != nil { return err } } return nil }
// startProxy launches an HTTP proxy for client communication which proxies to other etcd nodes. func startProxy(cfg *config) error { urlsmap, _, err := getPeerURLsMapAndToken(cfg, "proxy") if err != nil { return fmt.Errorf("error setting up initial cluster: %v", err) } pt, err := transport.NewTimeoutTransport(cfg.peerTLSInfo, time.Duration(cfg.proxyDialTimeoutMs)*time.Millisecond, time.Duration(cfg.proxyReadTimeoutMs)*time.Millisecond, time.Duration(cfg.proxyWriteTimeoutMs)*time.Millisecond) if err != nil { return err } pt.MaxIdleConnsPerHost = proxy.DefaultMaxIdleConnsPerHost tr, err := transport.NewTimeoutTransport(cfg.peerTLSInfo, time.Duration(cfg.proxyDialTimeoutMs)*time.Millisecond, time.Duration(cfg.proxyReadTimeoutMs)*time.Millisecond, time.Duration(cfg.proxyWriteTimeoutMs)*time.Millisecond) if err != nil { return err } cfg.dir = path.Join(cfg.dir, "proxy") err = os.MkdirAll(cfg.dir, 0700) if err != nil { return err } var peerURLs []string clusterfile := path.Join(cfg.dir, "cluster") b, err := ioutil.ReadFile(clusterfile) switch { case err == nil: if cfg.durl != "" { plog.Warningf("discovery token ignored since the proxy has already been initialized. Valid cluster file found at %q", clusterfile) } urls := struct{ PeerURLs []string }{} err := json.Unmarshal(b, &urls) if err != nil { return err } peerURLs = urls.PeerURLs plog.Infof("proxy: using peer urls %v from cluster file %q", peerURLs, clusterfile) case os.IsNotExist(err): if cfg.durl != "" { s, err := discovery.GetCluster(cfg.durl, cfg.dproxy) if err != nil { return err } if urlsmap, err = types.NewURLsMap(s); err != nil { return err } } peerURLs = urlsmap.URLs() plog.Infof("proxy: using peer urls %v ", peerURLs) default: return err } clientURLs := []string{} uf := func() []string { gcls, err := etcdserver.GetClusterFromRemotePeers(peerURLs, tr) // TODO: remove the 2nd check when we fix GetClusterFromPeers // GetClusterFromPeers should not return nil error with an invalid empty cluster if err != nil { plog.Warningf("proxy: %v", err) return []string{} } if len(gcls.Members()) == 0 { return clientURLs } clientURLs = gcls.ClientURLs() urls := struct{ PeerURLs []string }{gcls.PeerURLs()} b, err := json.Marshal(urls) if err != nil { plog.Warningf("proxy: error on marshal peer urls %s", err) return clientURLs } err = ioutil.WriteFile(clusterfile+".bak", b, 0600) if err != nil { plog.Warningf("proxy: error on writing urls %s", err) return clientURLs } err = os.Rename(clusterfile+".bak", clusterfile) if err != nil { plog.Warningf("proxy: error on updating clusterfile %s", err) return clientURLs } if !reflect.DeepEqual(gcls.PeerURLs(), peerURLs) { plog.Noticef("proxy: updated peer urls in cluster file from %v to %v", peerURLs, gcls.PeerURLs()) } peerURLs = gcls.PeerURLs() return clientURLs } ph := proxy.NewHandler(pt, uf, time.Duration(cfg.proxyFailureWaitMs)*time.Millisecond, time.Duration(cfg.proxyRefreshIntervalMs)*time.Millisecond) ph = &cors.CORSHandler{ Handler: ph, Info: cfg.corsInfo, } if cfg.isReadonlyProxy() { ph = proxy.NewReadonlyHandler(ph) } // Start a proxy server goroutine for each listen address for _, u := range cfg.lcurls { l, err := transport.NewListener(u.Host, u.Scheme, cfg.clientTLSInfo) if err != nil { return err } host := u.String() go func() { plog.Info("proxy: listening for client requests on ", host) mux := http.NewServeMux() mux.Handle("/metrics", prometheus.Handler()) mux.Handle("/", ph) plog.Fatal(http.Serve(l, mux)) }() } return nil }
func TestConfigVerifyLocalMember(t *testing.T) { tests := []struct { clusterSetting string apurls []string strict bool shouldError bool }{ { // Node must exist in cluster "", nil, true, true, }, { // Initial cluster set "node1=http://localhost:7001,node2=http://localhost:7002", []string{"http://localhost:7001"}, true, false, }, { // Default initial cluster "node1=http://localhost:2380,node1=http://localhost:7001", []string{"http://localhost:2380", "http://localhost:7001"}, true, false, }, { // Advertised peer URLs must match those in cluster-state "node1=http://localhost:7001", []string{"http://localhost:12345"}, true, true, }, { // Advertised peer URLs must match those in cluster-state "node1=http://localhost:2380,node1=http://localhost:12345", []string{"http://localhost:12345"}, true, true, }, { // Advertised peer URLs must match those in cluster-state "node1=http://localhost:2380", []string{}, true, true, }, { // do not care about the urls if strict is not set "node1=http://localhost:2380", []string{}, false, false, }, } for i, tt := range tests { cluster, err := types.NewURLsMap(tt.clusterSetting) if err != nil { t.Fatalf("#%d: Got unexpected error: %v", i, err) } cfg := ServerConfig{ Name: "node1", InitialPeerURLsMap: cluster, } if tt.apurls != nil { cfg.PeerURLs = mustNewURLs(t, tt.apurls) } err = cfg.verifyLocalMember(tt.strict) if (err == nil) && tt.shouldError { t.Errorf("#%d: Got no error where one was expected", i) } if (err != nil) && !tt.shouldError { t.Errorf("#%d: Got unexpected error: %v", i, err) } } }