// Create a full cluster, disconnect a peer, wait for removal, wait for standby join. func TestStandbyAutoJoin(t *testing.T) { clusterSize := 5 _, etcds, err := CreateCluster(clusterSize, &os.ProcAttr{Files: []*os.File{nil, os.Stdout, os.Stderr}}, false) if err != nil { t.Fatal("cannot create cluster") } defer func() { // Wrap this in a closure so that it picks up the updated version of // the "etcds" variable. DestroyCluster(etcds) }() c := etcd.NewClient(nil) c.SyncCluster() time.Sleep(1 * time.Second) // Verify that we have five machines. result, err := c.Get("_etcd/machines", false, true) assert.NoError(t, err) assert.Equal(t, len(result.Node.Nodes), 5) // Reconfigure with a short remove delay (2 second). resp, _ := tests.Put("http://localhost:7001/v2/admin/config", "application/json", bytes.NewBufferString(`{"activeSize":4, "removeDelay":2, "syncInterval":1}`)) if !assert.Equal(t, resp.StatusCode, 200) { t.FailNow() } // Wait for a monitor cycle before checking for removal. time.Sleep(server.ActiveMonitorTimeout + (1 * time.Second)) // Verify that we now have four peers. result, err = c.Get("_etcd/machines", false, true) assert.NoError(t, err) assert.Equal(t, len(result.Node.Nodes), 4) // Remove peer. etcd := etcds[1] etcds = append(etcds[:1], etcds[2:]...) if err := etcd.Kill(); err != nil { panic(err.Error()) } etcd.Release() // Wait for it to get dropped. time.Sleep(server.PeerActivityMonitorTimeout + (1 * time.Second)) // Wait for the standby to join. time.Sleep((1 * time.Second) + (1 * time.Second)) // Verify that we have 4 peers. result, err = c.Get("_etcd/machines", true, true) assert.NoError(t, err) assert.Equal(t, len(result.Node.Nodes), 4) // Verify that node2 is not one of those peers. _, err = c.Get("_etcd/machines/node2", false, false) assert.Error(t, err) }
// Ensures that a bad flag returns an error. func TestConfigBadFlag(t *testing.T) { c := New() err := c.LoadFlags([]string{"-no-such-flag"}) assert.Error(t, err) assert.Equal(t, err.Error(), `flag provided but not defined: -no-such-flag`) }
// Ensures that a bad IPv6 address will raise an error func TestConfigBindAddrErrorOnBadIPv6Addr(t *testing.T) { c := New() assert.Nil(t, c.LoadFlags([]string{"-addr", "[::1%lo]:4009"}), "") assert.Error(t, c.Sanitize()) }
// Ensures that a port only argument errors out func TestConfigBindAddrErrorOnNoHost(t *testing.T) { c := New() assert.Nil(t, c.LoadFlags([]string{"-addr", "127.0.0.1:4009", "-bind-addr", ":4010"}), "") assert.Error(t, c.Sanitize()) }
// This test will kill the current leader and wait for the etcd cluster to elect a new leader for 200 times. // It will print out the election time and the average election time. // It runs in a cluster with standby nodes. func TestKillLeaderWithStandbys(t *testing.T) { // https://github.com/goraft/raft/issues/222 t.Skip("stuck on raft issue") procAttr := new(os.ProcAttr) procAttr.Files = []*os.File{nil, os.Stdout, os.Stderr} clusterSize := 5 argGroup, etcds, err := CreateCluster(clusterSize, procAttr, false) if err != nil { t.Fatal("cannot create cluster") } defer DestroyCluster(etcds) stop := make(chan bool) leaderChan := make(chan string, 1) all := make(chan bool, 1) time.Sleep(time.Second) go Monitor(clusterSize, 1, leaderChan, all, stop) c := etcd.NewClient(nil) c.SyncCluster() // Reconfigure with a small active size. resp, _ := tests.Put("http://localhost:7001/v2/admin/config", "application/json", bytes.NewBufferString(`{"activeSize":3, "removeDelay":2, "syncInterval":1}`)) if !assert.Equal(t, resp.StatusCode, 200) { t.FailNow() } // Wait for two monitor cycles before checking for demotion. time.Sleep((2 * server.ActiveMonitorTimeout) + (2 * time.Second)) // Verify that we have 3 peers. result, err := c.Get("_etcd/machines", true, true) assert.NoError(t, err) assert.Equal(t, len(result.Node.Nodes), 3) var totalTime time.Duration leader := "http://127.0.0.1:7001" for i := 0; i < clusterSize; i++ { t.Log("leader is ", leader) port, _ := strconv.Atoi(strings.Split(leader, ":")[2]) num := port - 7001 t.Log("kill server ", num) etcds[num].Kill() etcds[num].Release() start := time.Now() for { newLeader := <-leaderChan if newLeader != leader { leader = newLeader break } } take := time.Now().Sub(start) totalTime += take avgTime := totalTime / (time.Duration)(i+1) fmt.Println("Total time:", totalTime, "; Avg time:", avgTime) time.Sleep(server.ActiveMonitorTimeout + (1 * time.Second)) time.Sleep(2 * time.Second) // Verify that we have 3 peers. result, err = c.Get("_etcd/machines", true, true) assert.NoError(t, err) assert.Equal(t, len(result.Node.Nodes), 3) // Verify that killed node is not one of those peers. _, err = c.Get(fmt.Sprintf("_etcd/machines/node%d", num+1), false, false) assert.Error(t, err) etcds[num], err = os.StartProcess(EtcdBinPath, argGroup[num], procAttr) } stop <- true }