func CreateRaftCluster() ([]raft.Raft, error) {
	raftConf, err := raftImpl.ReadConfig("./config.json")
	if err != nil {
		return nil, err
	}

	initState(raftConf.StableStoreDirectoryPath, raftConf.LogDirectoryPath, raftConf.RaftLogDirectoryPath)

	cluster.NewProxyWithConfig(raftImpl.RaftToClusterConf(raftConf))

	registerTypes()

	raftServers := make([]raft.Raft, serverCount)

	for i := 0; i < serverCount; i += 1 {
		// create cluster.Server
		clusterServer, err := cluster.NewWithConfig(i+1, "127.0.0.1", 7500+i, raftImpl.RaftToClusterConf(raftConf))
		if err != nil {
			return nil, err
		}

		logStore, err := llog.Create(raftConf.RaftLogDirectoryPath + "/" + strconv.Itoa(i+1))
		if err != nil {
			return nil, err
		}

		s, err := raftImpl.NewWithConfig(clusterServer, logStore, raftConf)
		if err != nil {
			return nil, err
		}
		raftServers[i] = s
	}
	// wait for some time to ensure that the servers
	// have elected a leader
	time.Sleep(5 * time.Second)
	return raftServers, err
}
Exemplo n.º 2
0
// TestReplicateScale tests that 1000 messages
// are replicated in reasonable amount of time
// Reasonable is considering 5 servers doing
// disk IO for each message
func TestReplicateScale(t *testing.T) {
	raftConf := &RaftConfig{MemberRegSocket: "127.0.0.1:8145", PeerSocket: "127.0.0.1:9100", TimeoutInMillis: 1500, HbTimeoutInMillis: 100, LogDirectoryPath: "logs", StableStoreDirectoryPath: "./stable", RaftLogDirectoryPath: "../LocalLog"}

	// delete stored state to avoid unnecessary effect on following test cases
	initState(raftConf.StableStoreDirectoryPath, raftConf.LogDirectoryPath, raftConf.RaftLogDirectoryPath)

	// launch cluster proxy servers
	cluster.NewProxyWithConfig(RaftToClusterConf(raftConf))

	fmt.Println("Started Proxy")

	time.Sleep(100 * time.Millisecond)

	serverCount := 5
	raftServers := make([]raft.Raft, serverCount+1)

	for i := 1; i <= serverCount; i += 1 {
		// create cluster.Server
		clusterServer, err := cluster.NewWithConfig(i, "127.0.0.1", 7000+i, RaftToClusterConf(raftConf))
		if err != nil {
			t.Errorf("Error in creating cluster server. " + err.Error())
			return
		}

		logStore, err := llog.Create(raftConf.RaftLogDirectoryPath + "/" + strconv.Itoa(i))
		if err != nil {
			t.Errorf("Error in creating log. " + err.Error())
			return
		}

		s, err := NewWithConfig(clusterServer, logStore, raftConf)
		if err != nil {
			t.Errorf("Error in creating Raft servers. " + err.Error())
			return
		}
		raftServers[i] = s
	}

	// there should be a leader after sufficiently long duration
	time.Sleep(5 * time.Second)
	leaderId := raftServers[2].Leader()

	if leaderId == NONE {
		t.Errorf("No leader was chosen")
		return
	}

	fmt.Println("Leader pid: " + strconv.Itoa(leaderId))

	// replicate an entry
	// data := "Woah, it works !!"
	leader := raftServers[leaderId]

	var follower raft.Raft = nil

	for i, server := range raftServers {
		if i > 0 && server.Pid() != leaderId {
			follower = server
			fmt.Println("Follower is " + strconv.Itoa(follower.Pid()))
			break
		}
	}

	count := 100

	// launch a goroutine to consume messages for other followers and leader
	for i, server := range raftServers {
		if i > 0 && server.Pid() != follower.Pid() {
			go readMessages(server)
		}
	}

	done := make(chan bool, 1)

	go replicateVerification(follower, count, done)

	for count != 0 {
		leader.Outbox() <- strconv.Itoa(count)
		count -= 1
	}

	select {
	case <-done:
	case <-time.After(5 * time.Minute):
		t.Errorf("Message replication took more than 5 minutes !")

	}
}
Exemplo n.º 3
0
// TestPartition tests that in case of network partition
// the partition with majority servers elect leader
func TestPartition(t *testing.T) {
	raftConf := &RaftConfig{MemberRegSocket: "127.0.0.1:8124", PeerSocket: "127.0.0.1:9987", TimeoutInMillis: 1500, HbTimeoutInMillis: 150, LogDirectoryPath: "logs1", StableStoreDirectoryPath: "./stable1", RaftLogDirectoryPath: "../LocalLog1"}

	// delete stored state to avoid unnecessary effect on following test cases
	initState(raftConf.StableStoreDirectoryPath, raftConf.LogDirectoryPath, raftConf.RaftLogDirectoryPath)

	// launch cluster proxy servers
	cluster.NewProxyWithConfig(RaftToClusterConf(raftConf))

	fmt.Println("Started Proxy")

	time.Sleep(100 * time.Millisecond)

	serverCount := 5
	raftServers := make([]raft.Raft, serverCount+1)
	pseudoClusters := make([]*test.PseudoCluster, serverCount+1)

	for i := 1; i <= serverCount; i += 1 {
		// create cluster.Server
		clusterServer, err := cluster.NewWithConfig(i, "127.0.0.1", 8500+i, RaftToClusterConf(raftConf))
		pseudoCluster := test.NewPseudoCluster(clusterServer)
		if err != nil {
			t.Errorf("Error in creating cluster server. " + err.Error())
			return
		}

		logStore, err := llog.Create(raftConf.RaftLogDirectoryPath + "/" + strconv.Itoa(i))
		if err != nil {
			t.Errorf("Error in creating log. " + err.Error())
			return
		}

		s, err := NewWithConfig(pseudoCluster, logStore, raftConf)
		if err != nil {
			t.Errorf("Error in creating Raft servers. " + err.Error())
			return
		}
		raftServers[i] = s
		pseudoClusters[i] = pseudoCluster
	}

	// wait for leader to be elected
	time.Sleep(20 * time.Second)
	count := 0
	oldLeader := 0
	for i := 1; i <= serverCount; i += 1 {
		if raftServers[i].Leader() == raftServers[i].Pid() {
			oldLeader = i
			count++
		}
	}
	if count != 1 {
		t.Errorf("No leader was chosen in 1 minute")
		return
	}

	// isolate Leader and any one follower
	follower := 0
	for i := 1; i <= serverCount; i += 1 {
		if i != oldLeader {
			follower = i
			break
		}
	}
	fmt.Println("Server " + strconv.Itoa(follower) + " was chosen as follower in minority partition")
	for i := 1; i <= serverCount; i += 1 {
		pseudoClusters[oldLeader].AddToInboxFilter(raftServers[i].Pid())
		pseudoClusters[oldLeader].AddToOutboxFilter(raftServers[i].Pid())
		pseudoClusters[follower].AddToInboxFilter(raftServers[i].Pid())
		pseudoClusters[follower].AddToOutboxFilter(raftServers[i].Pid())
	}

	pseudoClusters[oldLeader].AddToOutboxFilter(cluster.BROADCAST)
	pseudoClusters[follower].AddToOutboxFilter(cluster.BROADCAST)

	// wait for other servers to discover that leader
	// has crashed and to elect a new leader
	time.Sleep(20 * time.Second)

	count = 0
	for i := 1; i <= serverCount; i += 1 {

		if i != oldLeader && i != follower && raftServers[i].Leader() == raftServers[i].Pid() {
			fmt.Println("Server " + strconv.Itoa(i) + " was chosen as new leader in majority partition.")
			count++
		}
	}
	// new leader must be chosen
	if count != 1 {
		t.Errorf("No leader was chosen in majority partition")
	}
}
Exemplo n.º 4
0
// TestReplicate tests that a single
// message is correctly replicated
func _TestReplicate(t *testing.T) {
	raftConf := &RaftConfig{MemberRegSocket: "127.0.0.1:9999", PeerSocket: "127.0.0.1:9009", TimeoutInMillis: 1500, HbTimeoutInMillis: 50, LogDirectoryPath: "logs", StableStoreDirectoryPath: "./stable", RaftLogDirectoryPath: "../LocalLog"}

	// delete stored state to avoid unnecessary effect on following test cases
	initState(raftConf.StableStoreDirectoryPath, raftConf.LogDirectoryPath, raftConf.RaftLogDirectoryPath)

	// launch cluster proxy servers
	cluster.NewProxyWithConfig(RaftToClusterConf(raftConf))

	fmt.Println("Started Proxy")

	time.Sleep(100 * time.Millisecond)

	serverCount := 5
	raftServers := make([]raft.Raft, serverCount+1)

	for i := 1; i <= serverCount; i += 1 {
		// create cluster.Server
		clusterServer, err := cluster.NewWithConfig(i, "127.0.0.1", 5000+i, RaftToClusterConf(raftConf))
		if err != nil {
			t.Errorf("Error in creating cluster server. " + err.Error())
			return
		}

		logStore, err := llog.Create(raftConf.RaftLogDirectoryPath + "/" + strconv.Itoa(i))
		if err != nil {
			t.Errorf("Error in creating log. " + err.Error())
			return
		}

		s, err := NewWithConfig(clusterServer, logStore, raftConf)
		if err != nil {
			t.Errorf("Error in creating Raft servers. " + err.Error())
			return
		}
		raftServers[i] = s
	}

	// there should be a leader after sufficiently long duration
	time.Sleep(5 * time.Second)
	leaderId := raftServers[2].Leader()

	if leaderId == NONE {
		t.Errorf("No leader was chosen")
		return
	}

	fmt.Println("Leader pid: " + strconv.Itoa(leaderId))

	// replicate an entry
	data := "Woah, it works !!"
	leader := raftServers[leaderId]

	leader.Outbox() <- data

	var follower raft.Raft = nil

	for i, server := range raftServers {
		if i > 0 && server.Pid() != leaderId {
			follower = server
			break
		}
	}

	select {
	case msg := <-follower.Inbox():
		fmt.Println("Received message: ")
		fmt.Println(msg)
	case <-time.After(15 * time.Second):
		fmt.Println("Message replication took more than 15 seconds !")

	}

}
Exemplo n.º 5
0
// TestLeaderSeparation tests that new leader gets elected when
// elected leader crashes. Leader crash is simulated using
// PseudoCluster service. It then re-enables the links between
// old leader and rest of the cluster and checks whether the
// old leader reverts back to follower.
func TestLeaderSeparation(t *testing.T) {
	raftConf := &RaftConfig{MemberRegSocket: "127.0.0.1:7334", PeerSocket: "127.0.0.1:3434", TimeoutInMillis: 1500, HbTimeoutInMillis: 150, LogDirectoryPath: "logs4", StableStoreDirectoryPath: "./stable4", RaftLogDirectoryPath: "../LocalLog4"}

	// delete stored state to avoid unnecessary effect on following test cases
	initState(raftConf.StableStoreDirectoryPath, raftConf.LogDirectoryPath, raftConf.RaftLogDirectoryPath)

	// launch cluster proxy servers
	cluster.NewProxyWithConfig(RaftToClusterConf(raftConf))

	fmt.Println("Started Proxy")

	time.Sleep(100 * time.Millisecond)

	serverCount := 5
	raftServers := make([]raft.Raft, serverCount+1)
	pseudoClusters := make([]*test.PseudoCluster, serverCount+1)

	for i := 1; i <= serverCount; i += 1 {
		// create cluster.Server
		clusterServer, err := cluster.NewWithConfig(i, "127.0.0.1", 12600+i, RaftToClusterConf(raftConf))
		pseudoCluster := test.NewPseudoCluster(clusterServer)
		if err != nil {
			t.Errorf("Error in creating cluster server. " + err.Error())
			return
		}

		logStore, err := llog.Create(raftConf.RaftLogDirectoryPath + "/" + strconv.Itoa(i))
		if err != nil {
			t.Errorf("Error in creating log. " + err.Error())
			return
		}

		s, err := NewWithConfig(pseudoCluster, logStore, raftConf)
		if err != nil {
			t.Errorf("Error in creating Raft servers. " + err.Error())
			return
		}
		raftServers[i] = s
		pseudoClusters[i] = pseudoCluster
	}

	// wait for leader to be elected
	time.Sleep(4 * time.Second)
	count := 0
	oldLeader := 0
	for i := 1; i <= serverCount; i += 1 {
		if raftServers[i].Leader() == raftServers[i].Pid() {
			fmt.Println("Server " + strconv.Itoa(i) + " was chosen as leader.")
			oldLeader = i
			count++
		}
	}
	if count != 1 {
		t.Errorf("No leader was chosen in 1 minute")
	}

	// isolate Leader
	for i := 1; i <= serverCount; i += 1 {
		if raftServers[i].Pid() != oldLeader {
			pseudoClusters[oldLeader].AddToInboxFilter(raftServers[i].Pid())
			pseudoClusters[oldLeader].AddToOutboxFilter(raftServers[i].Pid())
		}
	}
	// prevent broadcasts from leader too
	pseudoClusters[oldLeader].AddToOutboxFilter(cluster.BROADCAST)
	// wait for other servers to discover that leader
	// has crashed and to elect a new leader
	time.Sleep(5 * time.Second)

	count = 0
	for i := 1; i <= serverCount; i += 1 {
		if i != oldLeader && raftServers[i].Leader() == raftServers[i].Pid() {
			fmt.Println("Server " + strconv.Itoa(i) + " was chosen as new leader.")
			count++
		}
	}
	// new leader must be chosen
	if count != 1 {
		t.Errorf("No leader was chosen")
	}

	// re-enable link between old leader and other servers
	for i := 1; i <= serverCount; i += 1 {
		s := raftServers[i]
		if i != oldLeader {
			pseudoClusters[oldLeader].RemoveFromInboxFilter(s.Pid())
			pseudoClusters[oldLeader].RemoveFromOutboxFilter(s.Pid())
		}
	}

	// wait for oldLeader to discover new leader
	time.Sleep(2 * time.Second)

	// state of the old leader must be FOLLOWER
	// since a new leader has been elected
	// with a greater term
	if raftServers[oldLeader].Leader() == raftServers[oldLeader].Pid() {
		t.Errorf("Old leader is still in leader state.")
	}
}