func TestRaftQuorumRecovery(t *testing.T) { t.Parallel() // Bring up a 5 nodes cluster nodes, clockSource := raftutils.NewRaftCluster(t, tc) raftutils.AddRaftNode(t, clockSource, nodes, tc) raftutils.AddRaftNode(t, clockSource, nodes, tc) defer raftutils.TeardownCluster(t, nodes) // Lose a majority for i := uint64(1); i <= 3; i++ { nodes[i].Server.Stop() nodes[i].Shutdown() } raftutils.AdvanceTicks(clockSource, 5) // Restore the majority by restarting node 3 nodes[3] = raftutils.RestartNode(t, clockSource, nodes[3], false) delete(nodes, 1) delete(nodes, 2) raftutils.WaitForCluster(t, clockSource, nodes) // Propose a value value, err := raftutils.ProposeValue(t, raftutils.Leader(nodes)) assert.NoError(t, err) for _, node := range nodes { raftutils.CheckValue(t, clockSource, node, value) } }
func testRaftRestartCluster(t *testing.T, stagger bool) { nodes, clockSource := raftutils.NewRaftCluster(t, tc) defer raftutils.TeardownCluster(t, nodes) // Propose a value values := make([]*api.Node, 2) var err error values[0], err = raftutils.ProposeValue(t, nodes[1], "id1") assert.NoError(t, err, "failed to propose value") // Stop all nodes for _, node := range nodes { node.Server.Stop() node.Shutdown() } raftutils.AdvanceTicks(clockSource, 5) // Restart all nodes i := 0 for k, node := range nodes { if stagger && i != 0 { raftutils.AdvanceTicks(clockSource, 1) } nodes[k] = raftutils.RestartNode(t, clockSource, node, false) i++ } raftutils.WaitForCluster(t, clockSource, nodes) // Propose another value values[1], err = raftutils.ProposeValue(t, raftutils.Leader(nodes), "id2") assert.NoError(t, err, "failed to propose value") for _, node := range nodes { assert.NoError(t, raftutils.PollFunc(clockSource, func() error { var err error node.MemoryStore().View(func(tx store.ReadTx) { var allNodes []*api.Node allNodes, err = store.FindNodes(tx, store.All) if err != nil { return } if len(allNodes) != 2 { err = fmt.Errorf("expected 2 nodes, got %d", len(allNodes)) return } for i, nodeID := range []string{"id1", "id2"} { n := store.GetNode(tx, nodeID) if !reflect.DeepEqual(n, values[i]) { err = fmt.Errorf("node %s did not match expected value", nodeID) return } } }) return err })) } }
// This test rotates the encryption key and restarts the node - the intent is try to trigger // race conditions if there is more than one node and hence consensus may take longer. func TestRaftEncryptionKeyRotationStress(t *testing.T) { t.Parallel() // Bring up a 3 nodes cluster nodes, clockSource := raftutils.NewRaftCluster(t, tc) defer raftutils.TeardownCluster(t, nodes) leader := nodes[1] // constantly propose values done, stop, restart, clusterReady := make(chan struct{}), make(chan struct{}), make(chan struct{}), make(chan struct{}) go func() { counter := len(nodes) for { select { case <-stop: close(done) return case <-restart: // the node restarts may trigger a leadership change, so wait until the cluster has 3 // nodes again and a leader is selected before proposing more values <-clusterReady leader = raftutils.Leader(nodes) default: counter += 1 raftutils.ProposeValue(t, leader, DefaultProposalTime, fmt.Sprintf("id%d", counter)) } } }() for i := 0; i < 30; i++ { // rotate the encryption key nodes[3].KeyRotator.QueuePendingKey([]byte(fmt.Sprintf("newKey%d", i))) nodes[3].KeyRotator.RotationNotify() <- struct{}{} require.NoError(t, raftutils.PollFunc(clockSource, func() error { if nodes[3].KeyRotator.GetKeys().PendingDEK == nil { return nil } return fmt.Errorf("not done rotating yet") })) // restart the node and wait for everything to settle and a leader to be elected nodes[3].Server.Stop() nodes[3].ShutdownRaft() restart <- struct{}{} nodes[3] = raftutils.RestartNode(t, clockSource, nodes[3], false) raftutils.AdvanceTicks(clockSource, 1) raftutils.WaitForCluster(t, clockSource, nodes) clusterReady <- struct{}{} } close(stop) <-done }
func TestStress(t *testing.T) { t.Parallel() // Bring up a 5 nodes cluster nodes, clockSource := raftutils.NewRaftCluster(t, tc) raftutils.AddRaftNode(t, clockSource, nodes, tc) raftutils.AddRaftNode(t, clockSource, nodes, tc) defer raftutils.TeardownCluster(t, nodes) // number of nodes that are running nup := len(nodes) // record of nodes that are down idleNodes := map[int]struct{}{} // record of ids that proposed successfully or time-out pIDs := []string{} leader := -1 for iters := 0; iters < 1000; iters++ { // keep proposing new values and killing leader for i := 1; i <= 5; i++ { if nodes[uint64(i)] != nil { id := strconv.Itoa(iters) _, err := raftutils.ProposeValue(t, nodes[uint64(i)], id) if err == nil { pIDs = append(pIDs, id) // if propose successfully, at least there are 3 running nodes assert.True(t, nup >= 3) // only leader can propose value assert.True(t, leader == i || leader == -1) // update leader leader = i break } else if strings.Contains(err.Error(), "context deadline exceeded") { // though it's timing out, we still record this value // for it may be proposed successfully and stored in Raft some time later pIDs = append(pIDs, id) } } } if rand.Intn(100) < 10 { // increase clock to make potential election finish quickly clockSource.Increment(200 * time.Millisecond) time.Sleep(10 * time.Millisecond) } else { ms := rand.Intn(10) clockSource.Increment(time.Duration(ms) * time.Millisecond) } if leader != -1 { // if propose successfully, try to kill a node in random s := rand.Intn(5) + 1 if _, ok := idleNodes[s]; !ok { id := uint64(s) nodes[id].Server.Stop() nodes[id].Shutdown() idleNodes[s] = struct{}{} nup -= 1 if s == leader { // leader is killed leader = -1 } } } if nup < 3 { // if quorum is lost, try to bring back a node s := rand.Intn(5) + 1 if _, ok := idleNodes[s]; ok { id := uint64(s) nodes[id] = raftutils.RestartNode(t, clockSource, nodes[id], false) delete(idleNodes, s) nup++ } } } // bring back all nodes and propose the final value for i := range idleNodes { id := uint64(i) nodes[id] = raftutils.RestartNode(t, clockSource, nodes[id], false) } raftutils.WaitForCluster(t, clockSource, nodes) id := strconv.Itoa(1000) val, err := raftutils.ProposeValue(t, raftutils.Leader(nodes), id) assert.NoError(t, err, "failed to propose value") pIDs = append(pIDs, id) // increase clock to make cluster stable time.Sleep(500 * time.Millisecond) clockSource.Increment(500 * time.Millisecond) ids, values := raftutils.GetAllValuesOnNode(t, clockSource, nodes[1]) // since cluster is stable, final value must be in the raft store find := false for _, value := range values { if reflect.DeepEqual(value, val) { find = true break } } assert.True(t, find) // all nodes must have the same value raftutils.CheckValuesOnNodes(t, clockSource, nodes, ids, values) // ids should be a subset of pIDs for _, id := range ids { find = false for _, pid := range pIDs { if id == pid { find = true break } } assert.True(t, find) } }
func TestRaftForceNewCluster(t *testing.T) { t.Parallel() nodes, clockSource := raftutils.NewRaftCluster(t, tc) // Propose a value values := make([]*api.Node, 2) var err error values[0], err = raftutils.ProposeValue(t, nodes[1], "id1") assert.NoError(t, err, "failed to propose value") // The memberlist should contain 3 members on each node for i := 1; i <= 3; i++ { assert.Equal(t, len(nodes[uint64(i)].GetMemberlist()), 3) } // Stop all nodes for _, node := range nodes { node.Server.Stop() node.Shutdown() } raftutils.AdvanceTicks(clockSource, 5) toClean := map[uint64]*raftutils.TestNode{ 2: nodes[2], 3: nodes[3], } raftutils.TeardownCluster(t, toClean) delete(nodes, 2) delete(nodes, 3) // Only restart the first node with force-new-cluster option nodes[1] = raftutils.RestartNode(t, clockSource, nodes[1], true) raftutils.WaitForCluster(t, clockSource, nodes) // The memberlist should contain only one node (self) assert.Equal(t, len(nodes[1].GetMemberlist()), 1) // Add 2 more members nodes[2] = raftutils.NewJoinNode(t, clockSource, nodes[1].Address, tc) raftutils.WaitForCluster(t, clockSource, nodes) nodes[3] = raftutils.NewJoinNode(t, clockSource, nodes[1].Address, tc) raftutils.WaitForCluster(t, clockSource, nodes) newCluster := map[uint64]*raftutils.TestNode{ 1: nodes[1], 2: nodes[2], 3: nodes[3], } defer raftutils.TeardownCluster(t, newCluster) // The memberlist should contain 3 members on each node for i := 1; i <= 3; i++ { assert.Equal(t, len(nodes[uint64(i)].GetMemberlist()), 3) } // Propose another value values[1], err = raftutils.ProposeValue(t, raftutils.Leader(nodes), "id2") assert.NoError(t, err, "failed to propose value") for _, node := range nodes { assert.NoError(t, raftutils.PollFunc(clockSource, func() error { var err error node.MemoryStore().View(func(tx store.ReadTx) { var allNodes []*api.Node allNodes, err = store.FindNodes(tx, store.All) if err != nil { return } if len(allNodes) != 2 { err = fmt.Errorf("expected 2 nodes, got %d", len(allNodes)) return } for i, nodeID := range []string{"id1", "id2"} { n := store.GetNode(tx, nodeID) if !reflect.DeepEqual(n, values[i]) { err = fmt.Errorf("node %s did not match expected value", nodeID) return } } }) return err })) } }
func TestGCWAL(t *testing.T) { t.Parallel() // Additional log entries from cluster setup, leader election extraLogEntries := 5 // Number of large entries to propose proposals := 8 // Bring up a 3 node cluster nodes, clockSource := raftutils.NewRaftCluster(t, tc, &api.RaftConfig{SnapshotInterval: uint64(proposals + extraLogEntries), LogEntriesForSlowFollowers: 0}) for i := 0; i != proposals; i++ { _, err := proposeLargeValue(t, nodes[1], DefaultProposalTime, fmt.Sprintf("id%d", i)) assert.NoError(t, err, "failed to propose value") } time.Sleep(250 * time.Millisecond) // Snapshot should have been triggered just as the WAL rotated, so // both WAL files should be preserved assert.NoError(t, raftutils.PollFunc(clockSource, func() error { dirents, err := ioutil.ReadDir(filepath.Join(nodes[1].StateDir, "snap")) if err != nil { return err } if len(dirents) != 1 { return fmt.Errorf("expected 1 snapshot, found %d", len(dirents)) } dirents, err = ioutil.ReadDir(filepath.Join(nodes[1].StateDir, "wal")) if err != nil { return err } var walCount int for _, f := range dirents { if strings.HasSuffix(f.Name(), ".wal") { walCount++ } } if walCount != 2 { return fmt.Errorf("expected 2 WAL files, found %d", walCount) } return nil })) raftutils.TeardownCluster(t, nodes) // Repeat this test, but trigger the snapshot after the WAL has rotated proposals++ nodes, clockSource = raftutils.NewRaftCluster(t, tc, &api.RaftConfig{SnapshotInterval: uint64(proposals + extraLogEntries), LogEntriesForSlowFollowers: 0}) defer raftutils.TeardownCluster(t, nodes) for i := 0; i != proposals; i++ { _, err := proposeLargeValue(t, nodes[1], DefaultProposalTime, fmt.Sprintf("id%d", i)) assert.NoError(t, err, "failed to propose value") } time.Sleep(250 * time.Millisecond) // This time only one WAL file should be saved. assert.NoError(t, raftutils.PollFunc(clockSource, func() error { dirents, err := ioutil.ReadDir(filepath.Join(nodes[1].StateDir, "snap")) if err != nil { return err } if len(dirents) != 1 { return fmt.Errorf("expected 1 snapshot, found %d", len(dirents)) } dirents, err = ioutil.ReadDir(filepath.Join(nodes[1].StateDir, "wal")) if err != nil { return err } var walCount int for _, f := range dirents { if strings.HasSuffix(f.Name(), ".wal") { walCount++ } } if walCount != 1 { return fmt.Errorf("expected 1 WAL file, found %d", walCount) } return nil })) // Restart the whole cluster for _, node := range nodes { node.Server.Stop() node.Shutdown() } raftutils.AdvanceTicks(clockSource, 5) i := 0 for k, node := range nodes { nodes[k] = raftutils.RestartNode(t, clockSource, node, false) i++ } raftutils.WaitForCluster(t, clockSource, nodes) // Is the data intact after restart? for _, node := range nodes { assert.NoError(t, raftutils.PollFunc(clockSource, func() error { var err error node.MemoryStore().View(func(tx store.ReadTx) { var allNodes []*api.Node allNodes, err = store.FindNodes(tx, store.All) if err != nil { return } if len(allNodes) != proposals { err = fmt.Errorf("expected %d nodes, got %d", proposals, len(allNodes)) return } }) return err })) } // It should still be possible to propose values _, err := raftutils.ProposeValue(t, raftutils.Leader(nodes), DefaultProposalTime, "newnode") assert.NoError(t, err, "failed to propose value") for _, node := range nodes { assert.NoError(t, raftutils.PollFunc(clockSource, func() error { var err error node.MemoryStore().View(func(tx store.ReadTx) { var allNodes []*api.Node allNodes, err = store.FindNodes(tx, store.All) if err != nil { return } if len(allNodes) != proposals+1 { err = fmt.Errorf("expected %d nodes, got %d", proposals, len(allNodes)) return } }) return err })) } }
func TestRaftSnapshotRestart(t *testing.T) { t.Parallel() // Bring up a 3 node cluster nodes, clockSource := raftutils.NewRaftCluster(t, tc, &api.RaftConfig{SnapshotInterval: 10, LogEntriesForSlowFollowers: 0}) defer raftutils.TeardownCluster(t, nodes) nodeIDs := []string{"id1", "id2", "id3", "id4", "id5", "id6", "id7"} values := make([]*api.Node, len(nodeIDs)) // Propose 3 values var err error for i, nodeID := range nodeIDs[:3] { values[i], err = raftutils.ProposeValue(t, nodes[1], DefaultProposalTime, nodeID) assert.NoError(t, err, "failed to propose value") } // Take down node 3 nodes[3].Server.Stop() nodes[3].Shutdown() // Propose a 4th value before the snapshot values[3], err = raftutils.ProposeValue(t, nodes[1], DefaultProposalTime, nodeIDs[3]) assert.NoError(t, err, "failed to propose value") // Remaining nodes shouldn't have snapshot files yet for _, node := range []*raftutils.TestNode{nodes[1], nodes[2]} { dirents, err := ioutil.ReadDir(filepath.Join(node.StateDir, "snap")) assert.NoError(t, err) assert.Len(t, dirents, 0) } // Add a node to the cluster before the snapshot. This is the event // that triggers the snapshot. nodes[4] = raftutils.NewJoinNode(t, clockSource, nodes[1].Address, tc) raftutils.WaitForCluster(t, clockSource, map[uint64]*raftutils.TestNode{1: nodes[1], 2: nodes[2], 4: nodes[4]}) // Remaining nodes should now have a snapshot file for nodeIdx, node := range []*raftutils.TestNode{nodes[1], nodes[2]} { assert.NoError(t, raftutils.PollFunc(clockSource, func() error { dirents, err := ioutil.ReadDir(filepath.Join(node.StateDir, "snap")) if err != nil { return err } if len(dirents) != 1 { return fmt.Errorf("expected 1 snapshot, found %d on node %d", len(dirents), nodeIdx+1) } return nil })) } raftutils.CheckValuesOnNodes(t, clockSource, map[uint64]*raftutils.TestNode{1: nodes[1], 2: nodes[2]}, nodeIDs[:4], values[:4]) // Propose a 5th value values[4], err = raftutils.ProposeValue(t, nodes[1], DefaultProposalTime, nodeIDs[4]) require.NoError(t, err) // Add another node to the cluster nodes[5] = raftutils.NewJoinNode(t, clockSource, nodes[1].Address, tc) raftutils.WaitForCluster(t, clockSource, map[uint64]*raftutils.TestNode{1: nodes[1], 2: nodes[2], 4: nodes[4], 5: nodes[5]}) // New node should get a copy of the snapshot assert.NoError(t, raftutils.PollFunc(clockSource, func() error { dirents, err := ioutil.ReadDir(filepath.Join(nodes[5].StateDir, "snap")) if err != nil { return err } if len(dirents) != 1 { return fmt.Errorf("expected 1 snapshot, found %d on new node", len(dirents)) } return nil })) dirents, err := ioutil.ReadDir(filepath.Join(nodes[5].StateDir, "snap")) assert.NoError(t, err) assert.Len(t, dirents, 1) raftutils.CheckValuesOnNodes(t, clockSource, map[uint64]*raftutils.TestNode{1: nodes[1], 2: nodes[2]}, nodeIDs[:5], values[:5]) // It should know about the other nodes, including the one that was just added stripMembers := func(memberList map[uint64]*api.RaftMember) map[uint64]*api.RaftMember { raftNodes := make(map[uint64]*api.RaftMember) for k, v := range memberList { raftNodes[k] = &api.RaftMember{ RaftID: v.RaftID, Addr: v.Addr, } } return raftNodes } assert.Equal(t, stripMembers(nodes[1].GetMemberlist()), stripMembers(nodes[4].GetMemberlist())) // Restart node 3 nodes[3] = raftutils.RestartNode(t, clockSource, nodes[3], false) raftutils.WaitForCluster(t, clockSource, nodes) // Node 3 should know about other nodes, including the new one assert.Len(t, nodes[3].GetMemberlist(), 5) assert.Equal(t, stripMembers(nodes[1].GetMemberlist()), stripMembers(nodes[3].GetMemberlist())) // Propose yet another value, to make sure the rejoined node is still // receiving new logs values[5], err = raftutils.ProposeValue(t, raftutils.Leader(nodes), DefaultProposalTime, nodeIDs[5]) require.NoError(t, err) // All nodes should have all the data raftutils.CheckValuesOnNodes(t, clockSource, nodes, nodeIDs[:6], values[:6]) // Restart node 3 again. It should load the snapshot. nodes[3].Server.Stop() nodes[3].Shutdown() nodes[3] = raftutils.RestartNode(t, clockSource, nodes[3], false) raftutils.WaitForCluster(t, clockSource, nodes) assert.Len(t, nodes[3].GetMemberlist(), 5) assert.Equal(t, stripMembers(nodes[1].GetMemberlist()), stripMembers(nodes[3].GetMemberlist())) raftutils.CheckValuesOnNodes(t, clockSource, nodes, nodeIDs[:6], values[:6]) // Propose again. Just to check consensus after this latest restart. values[6], err = raftutils.ProposeValue(t, raftutils.Leader(nodes), DefaultProposalTime, nodeIDs[6]) require.NoError(t, err) raftutils.CheckValuesOnNodes(t, clockSource, nodes, nodeIDs, values) }