func TestCM_SOLO_Leader_TickAdvancesCommitIndexIfPossible(t *testing.T) { var err error mcm, mrs := testSetupMCM_SOLO_Leader_WithTerms(t, testdata.TestUtil_MakeFigure7LeaderLineTerms()) serverTerm := mcm.pcm.RaftPersistentState.GetCurrentTerm() // pre checks if serverTerm != 8 { t.Fatal() } if mcm.pcm.GetCommitIndex() != 0 { t.Fatal() } mrs.CheckSentRpcs(t, map[ServerId]interface{}{}) mrs.ClearSentRpcs() // tick should try to advance commitIndex but nothing should happen err = mcm.Tick() if err != nil { t.Fatal(err) } if mcm.pcm.GetCommitIndex() != 0 { t.Fatal() } mrs.CheckSentRpcs(t, map[ServerId]interface{}{}) mrs.ClearSentRpcs() // let's make some new log entries ioleAC, err := mcm.pcm.AppendCommand(testhelpers.DummyCommand(11)) if err != nil || ioleAC != 11 { t.Fatal() } ioleAC, err = mcm.pcm.AppendCommand(testhelpers.DummyCommand(12)) if err != nil || ioleAC != 12 { t.Fatal() } // commitIndex does not advance immediately if mcm.pcm.GetCommitIndex() != 0 { t.Fatal() } // tick will advance commitIndex to the highest match possible err = mcm.Tick() if err != nil { t.Fatal(err) } if mcm.pcm.GetCommitIndex() != 12 { t.Fatal(mcm.pcm.GetCommitIndex()) } mrs.CheckSentRpcs(t, map[ServerId]interface{}{}) mrs.ClearSentRpcs() }
func TestConsensusModule_AppendCommand_Follower(t *testing.T) { cm, _ := setupConsensusModuleR2(t, testdata.TestUtil_MakeFigure7LeaderLineTerms()) defer cm.Stop() // pre check iole, err := cm.passiveConsensusModule.LogRO.GetIndexOfLastEntry() if err != nil { t.Fatal() } if iole != 10 { t.Fatal() } _, err = cm.AppendCommand(testhelpers.DummyCommand(1101)) if err != ErrNotLeader { t.Fatal() } if cm.IsStopped() { t.Error() } iole, err = cm.passiveConsensusModule.LogRO.GetIndexOfLastEntry() if err != nil { t.Fatal() } if iole != 10 { t.Fatal() } }
// #RFS-L2a: If Command received from client: append entry to local log func TestCM_FollowerOrCandidate_AppendCommand(t *testing.T) { f := func( setup func(t *testing.T) (mcm *managedConsensusModule, mrs *testhelpers.MockRpcSender), ) { mcm, _ := setup(t) // pre check iole, err := mcm.pcm.LogRO.GetIndexOfLastEntry() if err != nil { t.Fatal() } if iole != 10 { t.Fatal() } _, err = mcm.pcm.AppendCommand(testhelpers.DummyCommand(1101)) if err != ErrNotLeader { t.Fatal() } iole, err = mcm.pcm.LogRO.GetIndexOfLastEntry() if err != nil { t.Fatal() } if iole != 10 { t.Fatal() } } f(testSetupMCM_Follower_Figure7LeaderLine) f(testSetupMCM_Candidate_Figure7LeaderLine) }
// #RFS-L2a: If Command received from client: append entry to local log func TestCM_Leader_AppendCommand(t *testing.T) { mcm, _ := testSetupMCM_Leader_Figure7LeaderLine(t) // pre check iole, err := mcm.pcm.LogRO.GetIndexOfLastEntry() if err != nil { t.Fatal() } if iole != 10 { t.Fatal() } ioleAC, err := mcm.pcm.AppendCommand(testhelpers.DummyCommand(1101)) if err != nil || ioleAC != 11 { t.Fatal() } iole, err = mcm.pcm.LogRO.GetIndexOfLastEntry() if err != nil { t.Fatal() } if iole != 11 { t.Fatal() } le := testhelpers.TestHelper_GetLogEntryAtIndex(mcm.pcm.LogRO, 11) if !reflect.DeepEqual(le, LogEntry{8, Command("c1101")}) { t.Fatal(le) } }
func TestConsensusModule_AppendCommand_Follower_StoppedCM(t *testing.T) { cm, _ := setupConsensusModuleR2(t, testdata.TestUtil_MakeFigure7LeaderLineTerms()) cm.Stop() _, err := cm.AppendCommand(testhelpers.DummyCommand(1101)) if err != ErrStopped { t.Fatal(err) } }
func TestConsensusModule_AppendCommand_Leader(t *testing.T) { cm, mrs := setupConsensusModuleR2(t, testdata.TestUtil_MakeFigure7LeaderLineTerms()) defer cm.Stop() testConsensusModule_RpcReplyCallback_AndBecomeLeader(t, cm, mrs) // pre check iole, err := cm.passiveConsensusModule.LogRO.GetIndexOfLastEntry() if err != nil { t.Fatal() } if iole != 10 { t.Fatal() } ioleAC, err := cm.AppendCommand(testhelpers.DummyCommand(1101)) if cm.IsStopped() { t.Error() } if err != nil { t.Fatal() } if ioleAC != 11 { t.Fatal() } iole, err = cm.passiveConsensusModule.LogRO.GetIndexOfLastEntry() if err != nil { t.Fatal() } if iole != 11 { t.Fatal() } le := testhelpers.TestHelper_GetLogEntryAtIndex(cm.passiveConsensusModule.LogRO, 11) if !reflect.DeepEqual(le, LogEntry{8, Command("c1101")}) { t.Fatal(le) } }
func TestCluster_SOLO_Command_And_CommitIndexAdvance(t *testing.T) { cm, diml, dsm := testSetup_SOLO_Leader(t) defer cm.Stop() // Apply a command on the leader ioleAC, result := cm.AppendCommand(testhelpers.DummyCommand(101)) // FIXME: sleep just enough! time.Sleep(testdata.SleepToLetGoroutineRun) if result != nil { t.Fatal() } if ioleAC != 1 { t.Fatal() } if iole, err := diml.GetIndexOfLastEntry(); err != nil || iole != 1 { t.Fatal() } expectedLe := LogEntry{1, Command("c101")} // Command is in the leader's log le := testhelpers.TestHelper_GetLogEntryAtIndex(diml, 1) if !reflect.DeepEqual(le, expectedLe) { t.Fatal(le) } // but not yet committed if dsm.GetCommitIndex() != 0 { t.Fatal() } // A tick allows command to be committed time.Sleep(testdata.TickerDuration) if dsm.GetCommitIndex() != 1 { t.Fatal() } }
func TestCluster_CommandIsReplicatedVsMissingNode(t *testing.T) { imrsh, cm1, diml1, dsm1, cm2, diml2, dsm2, cm3, _, _ := testSetupClusterWithLeader(t) defer cm1.Stop() defer cm2.Stop() // Simulate a follower crash imrsh.cms["s3"] = nil cm3.Stop() cm3 = nil // Apply a command on the leader ioleAC, result := cm1.AppendCommand(testhelpers.DummyCommand(101)) if result != nil { t.Fatal() } if ioleAC != 1 { t.Fatal() } if iole, err := diml1.GetIndexOfLastEntry(); err != nil || iole != 1 { t.Fatal() } expectedLe := LogEntry{1, Command("c101")} // Command is in the leader's log le := testhelpers.TestHelper_GetLogEntryAtIndex(diml1, 1) if !reflect.DeepEqual(le, expectedLe) { t.Fatal(le) } // but not yet in connected follower's iole, err := diml2.GetIndexOfLastEntry() if err != nil || iole != 0 { t.Fatal() } // A tick allows command to be replicated to connected followers time.Sleep(testdata.TickerDuration) iole, err = diml2.GetIndexOfLastEntry() if err != nil || iole != 1 { t.Fatal(iole) } le = testhelpers.TestHelper_GetLogEntryAtIndex(diml2, 1) if !reflect.DeepEqual(le, expectedLe) { t.Fatal(le) } // and committed on the leader if dsm1.GetCommitIndex() != 1 { t.Fatal() } // but not yet on the connected followers if dsm2.GetCommitIndex() != 0 { t.Fatal() } // Another tick propagates the commit to the connected followers time.Sleep(testdata.TickerDuration) if dsm2.GetCommitIndex() != 1 { t.Fatal() } // Crashed follower restarts cm3b, diml3b, dsm3b := setupConsensusModuleR3( t, "s3", testdata.ElectionTimeoutLow, nil, imrsh.getRpcService("s3"), ) defer cm3b.Stop() imrsh.cms["s3"] = cm3b if dsm3b.GetCommitIndex() != 0 { t.Fatal() } // A tick propagates the command and the commit to the recovered follower time.Sleep(testdata.TickerDuration) // FIXME: err if cm3b.GetLeader() != "s1" le = testhelpers.TestHelper_GetLogEntryAtIndex(diml3b, 1) if !reflect.DeepEqual(le, expectedLe) { t.Fatal(le) } if dsm3b.GetCommitIndex() != 1 { t.Fatal() } }
// #RFS-L3.1: If successful: update nextIndex and matchIndex for // follower (#5.3) // #RFS-L4: If there exists an N such that N > commitIndex, a majority // of matchIndex[i] >= N, and log[N].term == currentTerm: // set commitIndex = N (#5.3, #5.4) // Note: test based on Figure 7; server is leader line; peer is case (a) func TestCM_RpcAER_Leader_ResultIsSuccess_PeerJustCaughtUp(t *testing.T) { mcm, mrs := testSetupMCM_Leader_Figure7LeaderLine(t) serverTerm := mcm.pcm.RaftPersistentState.GetCurrentTerm() err := mcm.pcm.setCommitIndex(3) if err != nil { t.Fatal(err) } // hack & sanity check mcm.pcm.LeaderVolatileState.NextIndex["s2"] = 10 expectedNextIndex := map[ServerId]LogIndex{"s2": 10, "s3": 11, "s4": 11, "s5": 11} if !reflect.DeepEqual(mcm.pcm.LeaderVolatileState.NextIndex, expectedNextIndex) { t.Fatal() } expectedMatchIndex := map[ServerId]LogIndex{"s2": 0, "s3": 0, "s4": 0, "s5": 0} if !reflect.DeepEqual(mcm.pcm.LeaderVolatileState.MatchIndex, expectedMatchIndex) { t.Fatal() } sentRpc := &RpcAppendEntries{ serverTerm, 9, 6, []LogEntry{ {6, Command("c10")}, }, mcm.pcm.GetCommitIndex(), } err = mcm.pcm.RpcReply_RpcAppendEntriesReply( "s2", sentRpc, &RpcAppendEntriesReply{serverTerm, true}, ) if err != nil { t.Fatal(err) } if mcm.pcm.GetServerState() != LEADER { t.Fatal() } if mcm.pcm.RaftPersistentState.GetCurrentTerm() != serverTerm { t.Fatal() } expectedNextIndex = map[ServerId]LogIndex{"s2": 11, "s3": 11, "s4": 11, "s5": 11} if !reflect.DeepEqual(mcm.pcm.LeaderVolatileState.NextIndex, expectedNextIndex) { t.Fatal(mcm.pcm.LeaderVolatileState.NextIndex) } expectedMatchIndex = map[ServerId]LogIndex{"s2": 10, "s3": 0, "s4": 0, "s5": 0} if !reflect.DeepEqual(mcm.pcm.LeaderVolatileState.MatchIndex, expectedMatchIndex) { t.Fatal() } // let's make some new log entries ioleAC, err := mcm.pcm.AppendCommand(testhelpers.DummyCommand(11)) if err != nil || ioleAC != 11 { t.Fatal(err) } ioleAC, err = mcm.pcm.AppendCommand(testhelpers.DummyCommand(12)) if err != nil || ioleAC != 12 { t.Fatal(err) } // we currently do not expect appendCommand() to send AppendEntries expectedRpcs := map[ServerId]interface{}{} mrs.CheckSentRpcs(t, expectedRpcs) // rpcs should go out on tick expectedRpc := &RpcAppendEntries{serverTerm, 10, 6, []LogEntry{ {8, Command("c11")}, {8, Command("c12")}, }, 3} expectedRpcs = map[ServerId]interface{}{ "s2": expectedRpc, "s3": expectedRpc, "s4": expectedRpc, "s5": expectedRpc, } err = mcm.Tick() if err != nil { t.Fatal(err) } mrs.CheckSentRpcs(t, expectedRpcs) // one reply - cannot advance commitIndex err = mcm.pcm.RpcReply_RpcAppendEntriesReply( "s2", expectedRpc, &RpcAppendEntriesReply{serverTerm, true}, ) if err != nil { t.Fatal(err) } if ci := mcm.pcm.GetCommitIndex(); ci != 3 { t.Fatal(ci) } // another reply - can advance commitIndex with majority // commitIndex will advance to the highest match possible err = mcm.pcm.RpcReply_RpcAppendEntriesReply( "s4", expectedRpc, &RpcAppendEntriesReply{serverTerm, true}, ) if err != nil { t.Fatal(err) } if ci := mcm.pcm.GetCommitIndex(); ci != 12 { t.Fatal(ci) } // other checks if mcm.pcm.GetServerState() != LEADER { t.Fatal() } expectedNextIndex = map[ServerId]LogIndex{"s2": 13, "s3": 11, "s4": 13, "s5": 11} if !reflect.DeepEqual(mcm.pcm.LeaderVolatileState.NextIndex, expectedNextIndex) { t.Fatal(mcm.pcm.LeaderVolatileState.NextIndex) } expectedMatchIndex = map[ServerId]LogIndex{"s2": 12, "s3": 0, "s4": 12, "s5": 0} if !reflect.DeepEqual(mcm.pcm.LeaderVolatileState.MatchIndex, expectedMatchIndex) { t.Fatal(mcm.pcm.LeaderVolatileState.MatchIndex) } }
// #RFS-L4: If there exists an N such that N > commitIndex, a majority // of matchIndex[i] >= N, and log[N].term == currentTerm: // set commitIndex = N (#5.3, #5.4) // Note: test based on Figure 7; server is leader line; peers are other cases func TestCM_Leader_TickAdvancesCommitIndexIfPossible(t *testing.T) { mcm, mrs := testSetupMCM_Leader_Figure7LeaderLine(t) serverTerm := mcm.pcm.RaftPersistentState.GetCurrentTerm() // pre checks if serverTerm != 8 { t.Fatal() } if mcm.pcm.GetCommitIndex() != 0 { t.Fatal() } expectedRpcs := map[ServerId]interface{}{} mrs.CheckSentRpcs(t, expectedRpcs) mrs.ClearSentRpcs() // match peers for cases (a), (b), (c) & (d) err := mcm.pcm.LeaderVolatileState.SetMatchIndexAndNextIndex("s2", 9) if err != nil { t.Fatal(err) } err = mcm.pcm.LeaderVolatileState.SetMatchIndexAndNextIndex("s3", 4) if err != nil { t.Fatal(err) } err = mcm.pcm.LeaderVolatileState.SetMatchIndexAndNextIndex("s4", 10) if err != nil { t.Fatal(err) } err = mcm.pcm.LeaderVolatileState.SetMatchIndexAndNextIndex("s5", 10) if err != nil { t.Fatal(err) } // tick should try to advance commitIndex but nothing should happen err = mcm.Tick() if err != nil { t.Fatal(err) } if mcm.pcm.GetCommitIndex() != 0 { t.Fatal() } expectedRpcs = map[ServerId]interface{}{ "s2": &RpcAppendEntries{serverTerm, 9, 6, []LogEntry{ {6, Command("c10")}, }, 0}, "s3": &RpcAppendEntries{serverTerm, 4, 4, []LogEntry{ {4, Command("c5")}, {5, Command("c6")}, {5, Command("c7")}, }, 0}, "s4": &RpcAppendEntries{serverTerm, 10, 6, []LogEntry{}, 0}, "s5": &RpcAppendEntries{serverTerm, 10, 6, []LogEntry{}, 0}, } mrs.CheckSentRpcs(t, expectedRpcs) mrs.ClearSentRpcs() // let's make some new log entries ioleAC, err := mcm.pcm.AppendCommand(testhelpers.DummyCommand(11)) if err != nil || ioleAC != 11 { t.Fatal(err) } ioleAC, err = mcm.pcm.AppendCommand(testhelpers.DummyCommand(12)) if err != nil || ioleAC != 12 { t.Fatal() } // tick should try to advance commitIndex but nothing should happen err = mcm.Tick() if err != nil { t.Fatal(err) } if mcm.pcm.GetCommitIndex() != 0 { t.Fatal() } expectedRpcs = map[ServerId]interface{}{ "s2": &RpcAppendEntries{serverTerm, 9, 6, []LogEntry{ {6, Command("c10")}, {8, Command("c11")}, {8, Command("c12")}, }, 0}, "s3": &RpcAppendEntries{serverTerm, 4, 4, []LogEntry{ {4, Command("c5")}, {5, Command("c6")}, {5, Command("c7")}, }, 0}, "s4": &RpcAppendEntries{serverTerm, 10, 6, []LogEntry{ {8, Command("c11")}, {8, Command("c12")}, }, 0}, "s5": &RpcAppendEntries{serverTerm, 10, 6, []LogEntry{ {8, Command("c11")}, {8, Command("c12")}, }, 0}, } mrs.CheckSentRpcs(t, expectedRpcs) mrs.ClearSentRpcs() // 2 peers - for cases (a) & (b) - catch up err = mcm.pcm.LeaderVolatileState.SetMatchIndexAndNextIndex("s2", 11) if err != nil { t.Fatal(err) } err = mcm.pcm.LeaderVolatileState.SetMatchIndexAndNextIndex("s3", 11) if err != nil { t.Fatal(err) } // tick advances commitIndex err = mcm.Tick() if err != nil { t.Fatal(err) } if mcm.pcm.GetCommitIndex() != 11 { t.Fatal() } expectedRpcs = map[ServerId]interface{}{ "s2": &RpcAppendEntries{serverTerm, 11, 8, []LogEntry{ {8, Command("c12")}, }, 11}, "s3": &RpcAppendEntries{serverTerm, 11, 8, []LogEntry{ {8, Command("c12")}, }, 11}, "s4": &RpcAppendEntries{serverTerm, 10, 6, []LogEntry{ {8, Command("c11")}, {8, Command("c12")}, }, 11}, "s5": &RpcAppendEntries{serverTerm, 10, 6, []LogEntry{ {8, Command("c11")}, {8, Command("c12")}, }, 11}, } mrs.CheckSentRpcs(t, expectedRpcs) mrs.ClearSentRpcs() // replies never came back -> tick cannot advance commitIndex err = mcm.Tick() if err != nil { t.Fatal(err) } if mcm.pcm.GetCommitIndex() != 11 { t.Fatal(mcm.pcm.GetCommitIndex()) } mrs.CheckSentRpcs(t, expectedRpcs) mrs.ClearSentRpcs() }