//Minority failure test.. Stopped maximum possible servers. func Test_KnownFault(t *testing.T) { testChannel1 := time.NewTimer(testDuration / 2) total := 5 var server [5]Raft.Server for i := 0; i < total; i++ { server[i] = Raft.New(i+1, "../config.json") dbg.Println("opening ", i+1) } //can't be greater than 2 serversClosed := 0 induceFault := Raft.RandomTimer(time.Second) go func() { for { select { case <-induceFault: pid := 0 for i := 0; i < total; i++ { if server[i].State() != Raft.CLOSEDSTATE { pid = server[i].Leader() break } } if pid != 0 && serversClosed < MAXTOLERANCE { dbg.Println("closing", pid) server[pid-1].Error() <- true <-server[pid-1].ServerStopped() serversClosed++ dbg.Println("closed", pid) } induceFault = Raft.RandomTimer(time.Second) case <-testChannel1.C: return } } }() wg := new(sync.WaitGroup) wg.Add(1) go testUnanimousLeadership(server, total, wg) wg.Wait() dbg.Println("yahan2") for i := 0; i < total; i++ { dbg.Println(i, server[i].State()) if server[i].State() != Raft.CLOSEDSTATE { dbg.Println("closing", i+1) server[i].Error() <- true <-server[i].ServerStopped() dbg.Println("closed", i+1) } } t.Log("Maximum break test passed.") }
// Test for partitioning the network, simulating network failures. func Test_Partitioning(t *testing.T) { testChannel1 := time.NewTimer(testDuration) total := 5 var server [5]Raft.Server for i := 0; i < total; i++ { server[i] = Raft.New(i+1, "../config.json") dbg.Println("opening ", i+1) } induceFault := Raft.RandomTimer(time.Second) go func() { localCount := 0 for { select { case <-induceFault: switch localCount { case 0: breakServer(server, 0, total) case 1: breakServer(server, 1, total) case 5: breakServer(server, 2, total) case 6: makeServer(server, 2, total) case 7: makeServer(server, 0, total) case 8: makeServer(server, 1, total) } localCount++ induceFault = Raft.RandomTimer(time.Second) case <-testChannel1.C: return } } }() wg := new(sync.WaitGroup) wg.Add(1) go testUnanimousLeadership(server, total, wg) wg.Wait() for i := 0; i < total; i++ { dbg.Println("closing", i+1) server[i].Error() <- true <-server[i].ServerStopped() dbg.Println("closed", i+1) } t.Log("Partitioning test passed.") }
// Inducing random faults, less than the threshold capacity. func Test_RandomFault(t *testing.T) { testChannel1 := time.NewTimer(testDuration) testChannel2 := time.NewTimer(testDuration) total := 5 var server [5]Raft.Server for i := 0; i < total; i++ { server[i] = Raft.New(i+1, "../config.json") dbg.Println("opening ", i+1) } //can't be greater than 2 serversClosed := 0 induceFault := Raft.RandomTimer(time.Second) go func() { for { select { case <-induceFault: rand := rand.New(rand.NewSource(time.Now().UnixNano())) index := rand.Intn(total) if server[index].State() != Raft.CLOSEDSTATE && serversClosed < MAXTOLERANCE { dbg.Println("closing", index+1) server[index].Error() <- true <-server[index].ServerStopped() dbg.Println("closed", index+1) serversClosed++ dbg.Println(serversClosed) } induceFault = Raft.RandomTimer(time.Second) case <-testChannel1.C: return } } }() removeFault := Raft.RandomTimer(time.Second) go func() { for { select { case <-removeFault: rand := rand.New(rand.NewSource(time.Now().UnixNano())) index := rand.Intn(total) if server[index].State() == Raft.CLOSEDSTATE { dbg.Println("opening", index+1) server[index] = Raft.New(index+1, "../config.json") serversClosed-- dbg.Println("opened", index+1) //dbg.Println(serversClosed) } removeFault = Raft.RandomTimer(time.Second) case <-testChannel2.C: return } } }() wg := new(sync.WaitGroup) wg.Add(1) go testUnanimousLeadership(server, total, wg) wg.Wait() dbg.Println("yahan2") for i := 0; i < total; i++ { dbg.Println(i, server[i].State()) if server[i].State() != Raft.CLOSEDSTATE { dbg.Println("closing", i+1) server[i].Error() <- true <-server[i].ServerStopped() dbg.Println("closed", i+1) } } t.Log("Random faults test passed.") }