func NewPostgresKeeper(id string, cfg config, stop chan bool, end chan error) (*PostgresKeeper, error) { etcdPath := filepath.Join(common.EtcdBasePath, cfg.clusterName) e, err := etcdm.NewEtcdManager(cfg.etcdEndpoints, etcdPath, common.DefaultEtcdRequestTimeout) if err != nil { return nil, fmt.Errorf("cannot create etcd manager: %v", err) } clusterConfig, _, err := e.GetClusterConfig() if err != nil { return nil, fmt.Errorf("cannot get cluster config: %v", err) } log.Debugf(spew.Sprintf("clusterConfig: %#v", clusterConfig)) p := &PostgresKeeper{id: id, dataDir: cfg.dataDir, e: e, listenAddress: cfg.listenAddress, port: cfg.port, pgListenAddress: cfg.pgListenAddress, pgPort: cfg.pgPort, clusterConfig: clusterConfig, stop: stop, end: end, } serverParameters := p.createServerParameters() pgm, err := postgresql.NewManager(id, cfg.pgBinPath, cfg.dataDir, serverParameters, p.getOurConnString(), p.getOurReplConnString(), clusterConfig.PGReplUser, clusterConfig.PGReplPassword, clusterConfig.RequestTimeout) if err != nil { return nil, fmt.Errorf("cannot create postgres manager: %v", err) } p.pgm = pgm return p, nil }
func configReplace(cmd *cobra.Command, args []string) { if crOpts.file == "" { die("no config file provided (--file/-f option)") } config := []byte{} var err error if crOpts.file == "-" { config, err = ioutil.ReadAll(os.Stdin) if err != nil { die("cannot read config file from stdin: %v", err) } } else { config, err = ioutil.ReadFile(crOpts.file) if err != nil { die("cannot read provided config file: %v", err) } } etcdPath := filepath.Join(common.EtcdBasePath, cfg.clusterName) e, err := etcdm.NewEtcdManager(cfg.etcdEndpoints, etcdPath, common.DefaultEtcdRequestTimeout) if err != nil { die("error: %v", err) } if err = replaceConfig(e, config); err != nil { die("error: %v", err) } }
func NewClusterChecker(cfg config, C chan pollon.ConfData) *ClusterChecker { etcdPath := filepath.Join(common.EtcdBasePath, cfg.clusterName) e, err := etcdm.NewEtcdManager(cfg.etcdEndpoints, etcdPath, common.DefaultEtcdRequestTimeout) if err != nil { log.Fatalf("error: %v", err) } return &ClusterChecker{e: e, C: C} }
func NewClusterChecker(id string, cfg config, C chan pollon.ConfData) (*ClusterChecker, error) { etcdPath := filepath.Join(common.EtcdBasePath, cfg.clusterName) e, err := etcdm.NewEtcdManager(cfg.etcdEndpoints, etcdPath, common.DefaultEtcdRequestTimeout) if err != nil { return nil, err } return &ClusterChecker{ id: id, listenAddress: cfg.listenAddress, port: cfg.port, e: e, C: C, }, nil }
func NewClusterChecker(id string, cfg config) (*ClusterChecker, error) { etcdPath := filepath.Join(common.EtcdBasePath, cfg.clusterName) e, err := etcdm.NewEtcdManager(cfg.etcdEndpoints, etcdPath, common.DefaultEtcdRequestTimeout) if err != nil { return nil, err } return &ClusterChecker{ id: id, listenAddress: cfg.listenAddress, port: cfg.port, stopListening: cfg.stopListening, e: e, endPollonProxyCh: make(chan error), }, nil }
func NewSentinel(id string, cfg config, stop chan bool, end chan bool) (*Sentinel, error) { etcdPath := filepath.Join(common.EtcdBasePath, cfg.clusterName) e, err := etcdm.NewEtcdManager(cfg.etcdEndpoints, etcdPath, common.DefaultEtcdRequestTimeout) if err != nil { return nil, fmt.Errorf("cannot create etcd manager: %v", err) } clusterConfig, _, err := e.GetClusterConfig() if err != nil { return nil, fmt.Errorf("cannot get cluster config: %v", err) } log.Debugf(spew.Sprintf("clusterConfig: %#v", clusterConfig)) lManager := e.NewLeaseManager() return &Sentinel{id: id, e: e, lManager: lManager, clusterConfig: clusterConfig, stop: stop, end: end}, nil }
func configPatch(cmd *cobra.Command, args []string) { if len(args) > 1 { die("too many arguments") } if cpOpts.file == "" && len(args) < 1 { die("no patch provided as argument and no patch file provided (--file/-f option)") } if cpOpts.file != "" && len(args) == 1 { die("only one of patch provided as argument or patch file must provided (--file/-f option)") } config := []byte{} if len(args) == 1 { config = []byte(args[0]) } else { var err error if cpOpts.file == "-" { config, err = ioutil.ReadAll(os.Stdin) if err != nil { die("cannot read config file from stdin: %v", err) } } else { config, err = ioutil.ReadFile(cpOpts.file) if err != nil { die("cannot read provided config file: %v", err) } } } etcdPath := filepath.Join(common.EtcdBasePath, cfg.clusterName) e, err := etcdm.NewEtcdManager(cfg.etcdEndpoints, etcdPath, common.DefaultEtcdRequestTimeout) if err != nil { die("error: %v", err) } var nc cluster.NilConfig err = json.Unmarshal(config, &nc) if err != nil { die("failed to marshal config: %v", err) } if err = patchConfig(e, &nc); err != nil { die("failed to patch config: %v", err) } }
func NewPostgresKeeper(id string, cfg config, stop chan bool, end chan error) (*PostgresKeeper, error) { etcdPath := filepath.Join(common.EtcdBasePath, cfg.clusterName) e, err := etcdm.NewEtcdManager(cfg.etcdEndpoints, etcdPath, common.DefaultEtcdRequestTimeout) if err != nil { return nil, fmt.Errorf("cannot create etcd manager: %v", err) } cd, _, err := e.GetClusterData() if err != nil { return nil, fmt.Errorf("error retrieving cluster data: %v", err) } var cv *cluster.ClusterView if cd == nil { cv = cluster.NewClusterView() } else { cv = cd.ClusterView } log.Debugf(spew.Sprintf("clusterView: %#v", cv)) clusterConfig := cv.Config.ToConfig() log.Debugf(spew.Sprintf("clusterConfig: %#v", clusterConfig)) p := &PostgresKeeper{id: id, dataDir: cfg.dataDir, e: e, listenAddress: cfg.listenAddress, port: cfg.port, pgListenAddress: cfg.pgListenAddress, pgPort: cfg.pgPort, clusterConfig: clusterConfig, stop: stop, end: end, } followersIDs := cv.GetFollowersIDs(p.id) pgParameters := p.createPGParameters(followersIDs) pgm, err := postgresql.NewManager(id, cfg.pgBinPath, cfg.dataDir, cfg.pgConfDir, pgParameters, p.getOurConnString(), p.getOurReplConnString(), clusterConfig.PGReplUser, clusterConfig.PGReplPassword, clusterConfig.RequestTimeout) if err != nil { return nil, fmt.Errorf("cannot create postgres manager: %v", err) } p.pgm = pgm return p, nil }
func NewSentinel(id string, cfg config, stop chan bool, end chan bool) (*Sentinel, error) { etcdPath := filepath.Join(common.EtcdBasePath, cfg.clusterName) e, err := etcdm.NewEtcdManager(cfg.etcdEndpoints, etcdPath, common.DefaultEtcdRequestTimeout) if err != nil { return nil, fmt.Errorf("cannot create etcd manager: %v", err) } cd, _, err := e.GetClusterData() if err != nil { return nil, fmt.Errorf("error retrieving cluster data: %v", err) } var cv *cluster.ClusterView if cd == nil { cv = cluster.NewClusterView() } else { cv = cd.ClusterView } log.Debugf(spew.Sprintf("clusterView: %#v", cv)) clusterConfig := cv.Config.ToConfig() if err != nil { return nil, fmt.Errorf("cannot get cluster config: %v", err) } log.Debugf(spew.Sprintf("clusterConfig: %#v", clusterConfig)) lManager := e.NewLeaseManager() return &Sentinel{ id: id, e: e, listenAddress: cfg.listenAddress, port: cfg.port, lManager: lManager, clusterConfig: clusterConfig, stop: stop, end: end}, nil }
func configGet(cmd *cobra.Command, args []string) { etcdPath := filepath.Join(common.EtcdBasePath, cfg.clusterName) e, err := etcdm.NewEtcdManager(cfg.etcdEndpoints, etcdPath, common.DefaultEtcdRequestTimeout) if err != nil { die("error: %v", err) } cfg, err := getConfig(e) if err != nil { die("error: %v", err) } if cfg == nil { stdout("config is not defined") os.Exit(0) } cfgj, err := json.MarshalIndent(cfg, "", "\t") if err != nil { die("failed to marshall config: %v", err) } stdout(string(cfgj)) }
func status(cmd *cobra.Command, args []string) { tabOut := new(tabwriter.Writer) tabOut.Init(os.Stdout, 0, 8, 1, '\t', 0) if cfg.clusterName == "" { die("cluster name required") } etcdPath := filepath.Join(common.EtcdBasePath, cfg.clusterName) e, err := etcdm.NewEtcdManager(cfg.etcdEndpoints, etcdPath, common.DefaultEtcdRequestTimeout) if err != nil { die("cannot create etcd manager: %v", err) } sentinelsInfo, err := e.GetSentinelsInfo() if err != nil { die("cannot get sentinels info: %v", err) } lsi, _, err := e.GetLeaderSentinelInfo() if err != nil { die("cannot get leader sentinel info") } stdout("=== Active sentinels ===") stdout("") if len(sentinelsInfo) == 0 { stdout("No active sentinels") } else { fmt.Fprintf(tabOut, "ID\tLISTENADDRESS\tLEADER\n") for _, si := range sentinelsInfo { leader := false if lsi != nil { if si.ID == lsi.ID { leader = true } } fmt.Fprintf(tabOut, "%s\t%s:%s\t%t\n", si.ID, si.ListenAddress, si.Port, leader) tabOut.Flush() } } proxiesInfo, err := e.GetProxiesInfo() if err != nil { die("cannot get proxies info: %v", err) } stdout("") stdout("=== Active proxies ===") stdout("") if len(proxiesInfo) == 0 { stdout("No active proxies") } else { fmt.Fprintf(tabOut, "ID\tLISTENADDRESS\tCV VERSION\n") for _, pi := range proxiesInfo { fmt.Fprintf(tabOut, "%s\t%s:%s\t%d\n", pi.ID, pi.ListenAddress, pi.Port, pi.ClusterViewVersion) tabOut.Flush() } } clusterData, _, err := e.GetClusterData() if err != nil { die("cannot get cluster data: %v", err) } if clusterData == nil { die("cluster data not available: %v", err) } cv := clusterData.ClusterView ks := clusterData.KeepersState stdout("") stdout("=== Keepers ===") stdout("") if ks == nil { stdout("No keepers state available") stdout("") } else { fmt.Fprintf(tabOut, "ID\tLISTENADDRESS\tPG LISTENADDRESS\tCV VERSION\tHEALTHY\n") for _, k := range ks { fmt.Fprintf(tabOut, "%s\t%s:%s\t%s:%s\t%d\t%t\n", k.ID, k.ListenAddress, k.Port, k.PGListenAddress, k.PGPort, k.ClusterViewVersion, k.Healthy) } } tabOut.Flush() stdout("") stdout("=== Required Cluster View ===") stdout("") stdout("Version: %d", cv.Version) if cv == nil { stdout("No clusterview available") } else { stdout("Master: %s", cv.Master) stdout("") stdout("===== Keepers tree =====") for _, mr := range cv.KeepersRole { if mr.Follow == "" { stdout("") printTree(mr.ID, cv, 0, "", true) } } } stdout("") }
func setupServers(t *testing.T, dir string, numKeepers, numSentinels uint8, syncRepl bool) ([]*TestKeeper, []*TestSentinel, *TestEtcd) { te, err := NewTestEtcd(dir) if err != nil { t.Fatalf("unexpected err: %v", err) } if err := te.Start(); err != nil { t.Fatalf("unexpected err: %v", err) } if err := te.WaitUp(10 * time.Second); err != nil { t.Fatalf("error waiting on etcd up: %v", err) } etcdEndpoints := fmt.Sprintf("http://%s:%s", te.listenAddress, te.port) clusterName := uuid.NewV4().String() etcdPath := filepath.Join(common.EtcdBasePath, clusterName) e, err := etcdm.NewEtcdManager(etcdEndpoints, etcdPath, common.DefaultEtcdRequestTimeout) if err != nil { t.Fatalf("cannot create etcd manager: %v", err) } // TODO(sgotti) change this to a call to the sentinel to change the // cluster config (when the sentinel's code is done) e.SetClusterData(cluster.KeepersState{}, &cluster.ClusterView{ Version: 1, Config: &cluster.NilConfig{ SleepInterval: cluster.DurationP(5 * time.Second), KeeperFailInterval: cluster.DurationP(10 * time.Second), SynchronousReplication: cluster.BoolP(syncRepl), }, }, 0) tks := []*TestKeeper{} tss := []*TestSentinel{} tk, err := NewTestKeeper(dir, clusterName, etcdEndpoints) if err != nil { t.Fatalf("unexpected err: %v", err) } tks = append(tks, tk) fmt.Printf("tk: %v\n", tk) // Start sentinels for i := uint8(0); i < numSentinels; i++ { ts, err := NewTestSentinel(dir, clusterName, etcdEndpoints) if err != nil { t.Fatalf("unexpected err: %v", err) } if err := ts.Start(); err != nil { t.Fatalf("unexpected err: %v", err) } tss = append(tss, ts) } if err := tk.Start(); err != nil { t.Fatalf("unexpected err: %v", err) } if err := tk.WaitDBUp(60 * time.Second); err != nil { t.Fatalf("unexpected err: %v", err) } if err := tk.WaitRole(common.MasterRole, 30*time.Second); err != nil { t.Fatalf("unexpected err: %v", err) } // Wait for clusterView containing tk as master if err := WaitClusterViewMaster(tk.id, e, 30*time.Second); err != nil { t.Fatalf("expected master %q in cluster view", tk.id) } // Start standbys for i := uint8(1); i < numKeepers; i++ { tk, err := NewTestKeeper(dir, clusterName, etcdEndpoints) if err != nil { t.Fatalf("unexpected err: %v", err) } if err := tk.Start(); err != nil { t.Fatalf("unexpected err: %v", err) } if err := tk.WaitDBUp(60 * time.Second); err != nil { t.Fatalf("unexpected err: %v", err) } if err := tk.WaitRole(common.StandbyRole, 30*time.Second); err != nil { t.Fatalf("unexpected err: %v", err) } tks = append(tks, tk) } return tks, tss, te }
func setupServers(t *testing.T, dir string, numKeepers, numSentinels uint8, syncRepl bool) ([]*TestKeeper, []*TestSentinel) { clusterName := uuid.NewV4().String() etcdPath := filepath.Join(common.EtcdBasePath, clusterName) e, err := etcdm.NewEtcdManager(common.DefaultEtcdEndpoints, etcdPath, common.DefaultEtcdRequestTimeout) if err != nil { t.Fatalf("cannot create etcd manager: %v", err) } e.SetClusterConfig(&cluster.Config{ SleepInterval: 5 * time.Second, KeeperFailInterval: 10 * time.Second, SynchronousReplication: syncRepl, }) tms := []*TestKeeper{} tss := []*TestSentinel{} tm, err := NewTestKeeper(dir, clusterName) if err != nil { t.Fatalf("unexpected err: %v", err) } tms = append(tms, tm) fmt.Printf("tm: %v\n", tm) // Start sentinels for i := uint8(0); i < numSentinels; i++ { ts, err := NewTestSentinel(dir, clusterName) if err != nil { t.Fatalf("unexpected err: %v", err) } if err := ts.Start(); err != nil { t.Fatalf("unexpected err: %v", err) } tss = append(tss, ts) } if err := tm.Start(); err != nil { t.Fatalf("unexpected err: %v", err) } if err := tm.WaitDBUp(60 * time.Second); err != nil { t.Fatalf("unexpected err: %v", err) } if err := tm.WaitRole(common.MasterRole, 30*time.Second); err != nil { t.Fatalf("unexpected err: %v", err) } // Start standbys for i := uint8(1); i < numKeepers; i++ { tm, err := NewTestKeeper(dir, clusterName) if err != nil { t.Fatalf("unexpected err: %v", err) } if err := tm.Start(); err != nil { t.Fatalf("unexpected err: %v", err) } if err := tm.WaitDBUp(60 * time.Second); err != nil { t.Fatalf("unexpected err: %v", err) } if err := tm.WaitRole(common.StandbyRole, 30*time.Second); err != nil { t.Fatalf("unexpected err: %v", err) } tms = append(tms, tm) } return tms, tss }
func TestProxyListening(t *testing.T) { dir, err := ioutil.TempDir("", "") if err != nil { t.Fatalf("unexpected err: %v", err) } defer os.RemoveAll(dir) clusterName := uuid.NewV4().String() te, err := NewTestEtcd(dir) if err != nil { t.Fatalf("unexpected err: %v", err) } etcdEndpoints := fmt.Sprintf("http://%s:%s", te.listenAddress, te.port) tp, err := NewTestProxy(dir, clusterName, etcdEndpoints) if err != nil { t.Fatalf("unexpected err: %v", err) } if err := tp.Start(); err != nil { t.Fatalf("unexpected err: %v", err) } defer tp.Stop() log.Printf("test proxy start with etcd down. Should not listen") // tp should not listen because it cannot talk with etcd if err := tp.WaitNotListening(10 * time.Second); err != nil { t.Fatalf("expecting tp not listening due to failed etcd communication, but it's listening.") } tp.Stop() if err := te.Start(); err != nil { t.Fatalf("unexpected err: %v", err) } if err := te.WaitUp(10 * time.Second); err != nil { t.Fatalf("error waiting on etcd up: %v", err) } defer func() { if te.cmd != nil { te.Stop() } }() etcdPath := filepath.Join(common.EtcdBasePath, clusterName) e, err := etcdm.NewEtcdManager(etcdEndpoints, etcdPath, common.DefaultEtcdRequestTimeout) if err != nil { t.Fatalf("cannot create etcd manager: %v", err) } res, err := e.SetClusterData(cluster.KeepersState{}, &cluster.ClusterView{ Version: 1, Config: &cluster.NilConfig{ SleepInterval: &cluster.Duration{5 * time.Second}, KeeperFailInterval: &cluster.Duration{10 * time.Second}, }, ProxyConf: &cluster.ProxyConf{ // fake pg address, not relevant Host: "localhost", Port: "5432", }, }, 0) if err != nil { t.Fatalf("unexpected err: %v", err) } prevCDIndex := res.Node.ModifiedIndex // test etcd start with etcd up log.Printf("test proxy start with etcd up. Should listen") if err := tp.Start(); err != nil { t.Fatalf("unexpected err: %v", err) } // tp should listen if err := tp.WaitListening(10 * time.Second); err != nil { t.Fatalf("expecting tp listening, but it's not listening.") } log.Printf("test proxy error communicating with etcd. Should stop listening") // Stop etcd te.Stop() if err := te.WaitDown(10 * time.Second); err != nil { t.Fatalf("error waiting on etcd down: %v", err) } // tp should not listen because it cannot talk with etcd if err := tp.WaitNotListening(10 * time.Second); err != nil { t.Fatalf("expecting tp not listening due to failed etcd communication, but it's listening.") } log.Printf("test proxy communication with etcd restored. Should start listening") // Start etcd if err := te.Start(); err != nil { t.Fatalf("unexpected err: %v", err) } if err := te.WaitUp(10 * time.Second); err != nil { t.Fatalf("error waiting on etcd up: %v", err) } // tp should listen if err := tp.WaitListening(10 * time.Second); err != nil { t.Fatalf("expecting tp listening, but it's not listening.") } log.Printf("test proxyConf removed. Should continue listening") // remove proxyConf res, err = e.SetClusterData(cluster.KeepersState{}, &cluster.ClusterView{ Version: 1, Config: &cluster.NilConfig{ SleepInterval: &cluster.Duration{5 * time.Second}, KeeperFailInterval: &cluster.Duration{10 * time.Second}, }, ProxyConf: nil, }, prevCDIndex) if err != nil { t.Fatalf("unexpected err: %v", err) } prevCDIndex = res.Node.ModifiedIndex // tp should listen if err := tp.WaitListening(10 * time.Second); err != nil { t.Fatalf("expecting tp listening, but it's not listening.") } log.Printf("test proxyConf restored. Should continue listening") // Set proxyConf again res, err = e.SetClusterData(cluster.KeepersState{}, &cluster.ClusterView{ Version: 1, Config: &cluster.NilConfig{ SleepInterval: &cluster.Duration{5 * time.Second}, KeeperFailInterval: &cluster.Duration{10 * time.Second}, }, ProxyConf: &cluster.ProxyConf{ // fake pg address, not relevant Host: "localhost", Port: "5432", }, }, prevCDIndex) if err != nil { t.Fatalf("unexpected err: %v", err) } prevCDIndex = res.Node.ModifiedIndex // tp should listen if err := tp.WaitListening(10 * time.Second); err != nil { t.Fatalf("expecting tp listening, but it's not listening.") } log.Printf("test clusterView removed. Should continue listening") // remove whole clusterview _, err = e.SetClusterData(cluster.KeepersState{}, nil, prevCDIndex) if err != nil { t.Fatalf("unexpected err: %v", err) } // tp should listen if err := tp.WaitListening(10 * time.Second); err != nil { t.Fatalf("expecting tp listening, but it's not listening.") } }
func TestServerParameters(t *testing.T) { dir, err := ioutil.TempDir("", "") if err != nil { t.Fatalf("unexpected err: %v", err) } defer os.RemoveAll(dir) te, err := NewTestEtcd(dir) if err != nil { t.Fatalf("unexpected err: %v", err) } if err := te.Start(); err != nil { t.Fatalf("unexpected err: %v", err) } if err := te.WaitUp(10 * time.Second); err != nil { t.Fatalf("error waiting on etcd up: %v", err) } etcdEndpoints := fmt.Sprintf("http://%s:%s", te.listenAddress, te.port) defer te.Stop() clusterName := uuid.NewV4().String() etcdPath := filepath.Join(common.EtcdBasePath, clusterName) e, err := etcdm.NewEtcdManager(etcdEndpoints, etcdPath, common.DefaultEtcdRequestTimeout) if err != nil { t.Fatalf("cannot create etcd manager: %v", err) } res, err := e.SetClusterData(cluster.KeepersState{}, &cluster.ClusterView{ Version: 1, Config: &cluster.NilConfig{ SleepInterval: &cluster.Duration{5 * time.Second}, KeeperFailInterval: &cluster.Duration{10 * time.Second}, }, }, 0) if err != nil { t.Fatalf("unexpected err: %v", err) } prevCDIndex := res.Node.ModifiedIndex tk, err := NewTestKeeper(dir, clusterName, etcdEndpoints) if err != nil { t.Fatalf("unexpected err: %v", err) } if err := tk.StartExpect(); err != nil { t.Fatalf("unexpected err: %v", err) } defer tk.Stop() if err := tk.WaitDBUp(60 * time.Second); err != nil { t.Fatalf("unexpected err: %v", err) } res, err = e.SetClusterData(cluster.KeepersState{}, &cluster.ClusterView{ Version: 2, Master: tk.id, KeepersRole: cluster.KeepersRole{ tk.id: &cluster.KeeperRole{ID: tk.id, Follow: ""}, }, Config: &cluster.NilConfig{ SleepInterval: &cluster.Duration{5 * time.Second}, KeeperFailInterval: &cluster.Duration{10 * time.Second}, PGParameters: &map[string]string{ "unexistent_parameter": "value", }, }, }, prevCDIndex) if err != nil { t.Fatalf("unexpected err: %v", err) } prevCDIndex = res.Node.ModifiedIndex tk.cmd.Expect("postgres parameters changed, reloading postgres instance") // On the next keeper check they shouldn't be changed tk.cmd.Expect("postgres parameters not changed") tk.Stop() // Start tk again, postgres should fail to start due to bad parameter if err := tk.StartExpect(); err != nil { t.Fatalf("unexpected err: %v", err) } tk.cmd.Expect("failed to start postgres:") // Fix wrong parameters res, err = e.SetClusterData(cluster.KeepersState{}, &cluster.ClusterView{ Version: 2, Master: tk.id, KeepersRole: cluster.KeepersRole{ tk.id: &cluster.KeeperRole{ID: tk.id, Follow: ""}, }, Config: &cluster.NilConfig{ SleepInterval: &cluster.Duration{5 * time.Second}, KeeperFailInterval: &cluster.Duration{10 * time.Second}, PGParameters: &map[string]string{}, }, }, prevCDIndex) if err != nil { t.Fatalf("unexpected err: %v", err) } prevCDIndex = res.Node.ModifiedIndex if err := tk.WaitDBUp(30 * time.Second); err != nil { t.Fatalf("unexpected err: %v", err) } }