Ejemplo n.º 1
0
func mkCluster() (*mock.MockCluster, error) {
	config := cluster.Config{Peers: []cluster.PeerConfig{
		{Id: 0}, {Id: 1}, {Id: 2}, {Id: 3}, {Id: 4},
	}}
	cl, err := mock.NewCluster(config)
	return cl, err
}
Ejemplo n.º 2
0
// Generates a mock cluster of 5 raft nodes
func makeMockRafts() ([]RaftNode, *mock.MockCluster) {
	// init the communication layer.
	// Note, mock cluster doesn't need IP addresses. Only Ids
	clconfig := cluster.Config{Peers: []cluster.PeerConfig{
		{Id: 0}, {Id: 1}, {Id: 2}, {Id: 3}, {Id: 4}}}
	cluster, err := mock.NewCluster(clconfig)
	if err != nil {
		return nil, cluster
	}

	// init the raft node layer
	nodes := make([]RaftNode, len(clconfig.Peers))

	// Create a raft node, and give the corresponding "Server" object from the
	// cluster to help it communicate with the others.
	for id := 0; id < 5; id++ {
		config := NodeConfig{clconfig, id, "$GOPATH/src/github.com/aakashdeshpande/cs733/assignment3/", 500}
		raftNode := New(config) //

		// Give each raftNode its own "Server" from the cluster.
		raftNode.server = cluster.Servers[id]
		nodes[id] = raftNode
	}

	return nodes, cluster
}
Ejemplo n.º 3
0
func (myRaft Raft) makeMockRafts() (Raft, *mock.MockCluster) {
	//create mock cluster.
	clconfig := cluster.Config{Peers: nil}
	cl, err := mock.NewCluster(clconfig)
	if err != nil {
		panic(err)
	}
	myRaft.CommitInfo = make(chan interface{})
	for id := 1; id <= PEERS; id++ {
		//Ojects to store statemachine, config and server node.
		myNode := new(RaftMachine)
		sm := new(State_Machine)
		myConf := new(Config)

		//initialize config and server object.
		server := createMockNode(id, myConf, sm, cl)
		sm.id = int32(id)
		myNode.Node = server
		myNode.SM = sm
		myNode.Conf = myConf
		//append object related to node into raft array.
		myRaft.Cluster = append(myRaft.Cluster, myNode)
		//start all the processing threads.
		go myRaft.startNode(myRaft.Cluster[id-1].Conf, myRaft.Cluster[id-1].Node, myRaft.Cluster[id-1].SM)
	}
	return myRaft, cl
}
Ejemplo n.º 4
0
func makerafts() []*Node {

	newconfig := cluster.Config{
		Peers: []cluster.PeerConfig{
			{Id: 1, Address: "localhost:5050"},
			{Id: 2, Address: "localhost:6060"},
			{Id: 3, Address: "localhost:7070"},
			{Id: 4, Address: "localhost:8080"},
			{Id: 5, Address: "localhost:9090"},
		}}
	cl, _ := mock.NewCluster(newconfig)

	nodes := make([]*Node, len(newconfig.Peers))

	TempConfig := ConfigRaft{
		LogDir:           "Log",
		ElectionTimeout:  ETimeout,
		HeartbeatTimeout: HeartBeat,
	}

	MatchIndex := map[int]int{0: -1, 1: -1, 2: -1, 3: -1, 4: -1}
	NextIndex := map[int]int{0: 1, 1: 0, 2: 0, 3: 0, 4: 0}
	Votes := make([]int, NumServers)
	for i := 0; i < NumServers; i++ {
		Votes[i] = 0
	}

	for i := 1; i <= NumServers; i++ {
		RaftNode := Node{id: i, leaderid: -1, timeoutCh: time.NewTimer(time.Duration(TempConfig.ElectionTimeout) * time.Millisecond), LogDir: TempConfig.LogDir + strconv.Itoa(i), CommitCh: make(chan *Commit, 100)}
		log, err := log.Open(RaftNode.LogDir)
		log.RegisterSampleEntry(LogStore{})
		if err != nil {
			fmt.Println("Error opening Log File" + "i")
		}
		peer := make([]int, NumServers)
		server := cl.Servers[i]
		p := server.Peers()
		temp := 0

		for j := 0; j < NumServers; j++ {
			if j != i-1 {
				peer[j] = 0
			} else {
				peer[j] = p[temp]
				temp++
			}
		}

		RaftNode.server = server
		RaftNode.sm = &StateMachine{state: "follower", id: server.Pid(), leaderid: -1, peers: peer, term: 0, votedFor: 0, Votes: Votes, commitIndex: -1, Log: log, index: -1, matchIndex: MatchIndex, nextIndex: NextIndex, HeartbeatTimeout: 500, ElectionTimeout: ETimeout}
		nodes[i-1] = &RaftNode
	}
	for i := 0; i < 5; i++ {
		//fmt.Println(i)
		go nodes[i].listenonchannel()
	}

	return nodes
}
Ejemplo n.º 5
0
func initRaft(id int) *raft.RaftNode {
	cluster, _ := cluster.NewCluster("raft/cluster_test_config.json")
	raftNode := raft.NewNode(id)
	raftNode.(*raft.RaftNode).Server = cluster.Servers[id]
	peerIds := raftNode.(*raft.RaftNode).Server.Peers()
	raftNode.(*raft.RaftNode).Sm = raftNode.(*raft.RaftNode).NewSm(id, peerIds)
	return raftNode.(*raft.RaftNode)
}
Ejemplo n.º 6
0
func createMockCluster(configFile string) (*cluster.Config, *mock.MockCluster) {
	cfg, err := cluster.ToConfig(configFile)
	if err != nil {
		panic(err)
	}
	c1, err := mock.NewCluster(configFile)
	if err != nil {
		panic(err)
	}
	return cfg, c1
}
Ejemplo n.º 7
0
func createMockCluster(config Config) (*mockcluster.MockCluster, error) {

	clconfig := cluster.Config{Peers: []cluster.PeerConfig{
		{Id: 1}, {Id: 2}, {Id: 3}, {Id: 4}, {Id: 5},
	}}
	cluster, err := mockcluster.NewCluster(clconfig)
	if err != nil {
		return cluster, err
	}

	return cluster, nil

}
Ejemplo n.º 8
0
func MakeNodes(t *testing.T) ([]*RaftNode, *mock.MockCluster) {
	cl, _ := mock.NewCluster(nil)
	rafts := make([]*RaftNode, len(nc))
	for i := 0; i < len(nc); i++ {
		rafts[i] = GetNode(i, t)
		rafts[i].server.Close() //Override with mock cluster
		tmp, err := cl.AddServer(int(rafts[i].id))
		if err != nil {
			t.Fatal("Error creating nodes.", err)
		}
		rafts[i].server = tmp
	}
	return rafts, cl
}
//starting rafter
func initialization() {
	cleanup() //cleanup before begining

	fmt.Print("TEST TAKES APPROX 180 SECS/ PLEASE HOLD ON")
	clconfig := cluster.Config{Peers: []cluster.PeerConfig{
		{Id: 1}, {Id: 2}, {Id: 3}, {Id: 4}, {Id: 5},
	}}
	cluster, err := mock.NewCluster(clconfig)
	mckk = cluster
	rafts = rafter(cluster) // array of []raft.Node

	check(err)
	time.Sleep(10 * time.Second)

}
Ejemplo n.º 10
0
// creates raft node, initializes state machines in each node
// and run goroutines corresponding to each node
func MakeRafts() map[int]Node {
	cluster, _ := cluster.NewCluster("cluster_test_config.json")
	raftNode1 := NewNode(1)
	raftNode1.(*RaftNode).Server = cluster.Servers[1]
	peerIds := raftNode1.(*RaftNode).Server.Peers()
	raftNode1.(*RaftNode).Sm = raftNode1.(*RaftNode).NewSm(1, peerIds)
	raftNodes := make(map[int]Node)
	raftNodes[1] = raftNode1
	for _, raftNodeId := range raftNode1.(*RaftNode).Server.Peers() {
		newNode := NewNode(raftNodeId)
		newNode.(*RaftNode).Server = cluster.Servers[raftNodeId]
		peerIds := newNode.(*RaftNode).Server.Peers()
		newNode.(*RaftNode).Sm = newNode.(*RaftNode).NewSm(raftNodeId, peerIds)
		raftNodes[raftNodeId] = newNode
	}
	return raftNodes
}
Ejemplo n.º 11
0
func makeMockRafts() ([]RaftNode, *mock.MockCluster) {
	// votedForDB, _ = leveldb.OpenFile(PATH+"/votedFor", nil)
	clusterConfig := cluster.Config{Peers: []cluster.PeerConfig{
		{Id: 1}, {Id: 2}, {Id: 3}, {Id: 4}, {Id: 5}}}
	clstr, _ := mock.NewCluster(clusterConfig)

	var nodes []RaftNode

	for i := 0; i < len(clusterConfig.Peers); i++ {

		config := Config{clusterConfig, clusterConfig.Peers[i].Id, PATH + "", 550, 50}
		nodes = append(nodes, New(config, clstr.Servers[i+1]))

	}
	// //fmt.Println("nodes:", nodes, "cls:", clstr)
	return nodes, clstr
}
Ejemplo n.º 12
0
/* Tests fault tolerance of cluster in the event of network partition
 * Splits cluster such that partition with leader stays in majority
 * In paritioned network, logentry is appended to leader's log.
 * Proper replication of log is tested once partition is healed
 */
func TestSplit(t *testing.T) {
	if !TESTSPLIT {
		return
	}
	contents := make([]string, 2)
	contents[0] = "duckduck"
	contents[1] = "go"
	mkcl, err := mock.NewCluster("input_spec.json")
	rafts, err := makeMockRafts(mkcl, "log", 250, 350)
	checkError(t, err, "While creating mock clusters")
	time.Sleep(5 * time.Second)
	rafts[0].Append([]byte(contents[0]))
	time.Sleep(5 * time.Second)
	mkcl.Lock()
	part1 := []int{1, 3}
	part2 := []int{2, 4}
	rafts[1].smLock.RLock()
	ldrId := rafts[4].LeaderId()
	rafts[1].smLock.RUnlock()
	fmt.Printf("ldrId:%v\n", ldrId)
	if ldrId%2 == 0 {
		part2 = append(part2, 5)
	} else {
		part1 = append(part1, 5)
	}
	mkcl.Unlock()
	mkcl.Partition(part1, part2)
	debugRaftTest(fmt.Sprintf("Partitions: %v %v\n", part1, part2))
	time.Sleep(4 * time.Second)
	mkcl.Lock()
	rafts[ldrId-1].Append([]byte(contents[1]))
	mkcl.Unlock()
	time.Sleep(8 * time.Second)
	mkcl.Heal()
	debugRaftTest(fmt.Sprintf("Healed\n"))
	time.Sleep(8 * time.Second)
	ciarr := []int{0, 0, 0, 0, 0}
	for cnt := 0; cnt < 5; {
		for idx, node := range rafts {
			select {
			case ci := <-node.CommitChannel():
				if ci.Err != nil {
					fmt.Fprintln(os.Stderr, ci.Err)
				}
				//Testing CommitChannel
				expect(t, contents[ciarr[idx]], string(ci.Data))
				ciarr[idx] += 1
				if ciarr[idx] == 2 {
					cnt += 1
				}
			}
		}
	}

	for _, node := range rafts {
		node.smLock.RLock()
		debugRaftTest(fmt.Sprintf("%v", node.sm.String()))
		node.smLock.RUnlock()
		node.Shutdown()
	}
}
func TestSeparationAndHeal(t *testing.T) {
	//---------------------------------------------------subtest 1(test to check whether the partion 1,2,3 makes progress) ------------------------
	cleanup()
	clconfig := cluster.Config{Peers: []cluster.PeerConfig{
		{Id: 1}, {Id: 2}, {Id: 3}, {Id: 4}, {Id: 5},
	}}
	cluster, err := mock.NewCluster(clconfig)
	check(err)
	cluster.Partition([]int{1, 2, 3}, []int{4, 5}) // Cluster partitions into two.

	rafts := rafter(cluster) // array of []raft.Node

	//time.Sleep(10 * time.Second)
	(rafts[findLeader(rafts)-1]).Append([]byte("foo"))
	time.Sleep(3 * time.Second)

	//test the different log contents at partitions

	for i := 0; i < len(rafts); i++ {

		if i >= 3 {
			if rafts[i].LogHandler.GetLastIndex() != 0 {
				t.Fatal(rafts[i].LogHandler.GetLastIndex())
			}
		} else {
			if rafts[i].LogHandler.GetLastIndex() != 1 {
				t.Fatal(rafts[i].LogHandler.GetLastIndex())
			}
		}

	}

	//---------------------------------------------------subtest 2(test to check whether the partion 1,2,3,4,5 catches up and same after stability) ------------------------

	cluster.Heal() //healing

	time.Sleep(4 * time.Second)
	(rafts[findLeader(rafts)-1]).Append([]byte("bar"))
	time.Sleep(10 * time.Second)

	for i := 0; i < len(rafts); i++ {
		//		select {
		if rafts[i].LogHandler.GetLastIndex() != 2 {

			t.Fatal(rafts[i].LogHandler.GetLastIndex())
		}

	}

	//-------------------------------------sub test 4 --------------making partition of 1,2, 3,4  and   5 -------------------------
	cluster.Partition([]int{1, 2, 3, 4}, []int{5}) // Cluster partitions into two.
	time.Sleep(20 * time.Second)

	(rafts[findLeader(rafts)-1]).Append([]byte("cmd1"))
	time.Sleep(1 * time.Second)
	(rafts[findLeader(rafts)-1]).Append([]byte("cmd2"))
	time.Sleep(1 * time.Second)
	(rafts[findLeader(rafts)-1]).Append([]byte("cmd3"))
	time.Sleep(1 * time.Second)

	cluster.Heal() // healed the partition

	time.Sleep(5 * time.Second)

	(rafts[findLeader(rafts)-1]).Append([]byte("cmd4"))
	time.Sleep(10 * time.Second)

	for i := 0; i < len(rafts); i++ {

		if rafts[i].LogHandler.GetLastIndex() != 6 {

			t.Fatal(rafts[i].LogHandler.GetLastIndex())
		}

	}

	//-------------------------------------sub test 5 --------------stopping the active leader to see rise of another leader -------

	rafts[findLeader(rafts)-1].ShutDown() // stopping the current leader

	time.Sleep(time.Second * 5) // allowing to stabilise

	rafts[findLeader(rafts)-1].ShutDown() // stopping the current leader

	time.Sleep(time.Second * 5) // allowing to stabilise

	(rafts[findLeader(rafts)-1]).Append([]byte("cmd2"))
	(rafts[findLeader(rafts)-1]).Append([]byte("cmd3"))

	time.Sleep(time.Second * 10)

	for i := 0; i < len(rafts); i++ {
		if rafts[i].StopSignal == true {

			time.Sleep(time.Second * 10)
			rafts[i].StopSignal = false
			rafts[i].startOperation()

		}
	}

	time.Sleep(time.Second * 10)
	(rafts[findLeader(rafts)-1]).Append([]byte("cmd4"))
	time.Sleep(time.Second * 10)

	//turn up down servers and watch if they came to commit index 9

	for i := 0; i < len(rafts); i++ {

		if rafts[i].LogHandler.GetLastIndex() != 9 {
			t.Fatal(rafts[i].LogHandler.GetLastIndex())
		}

	}

}
Ejemplo n.º 14
0
func InitiateCluster(conf cluster.Config) {
	Mcluster, _ = mock.NewCluster(conf)
}
Ejemplo n.º 15
0
///*
func TestPartition(t *testing.T) {
	rafts := makeRafts(my_clust) // array of []raft.Node
	time.Sleep(time.Second)
	MockClustConfig := GetMockClusterConfig(my_clust)
	mockCluster, _ := mock.NewCluster(MockClustConfig)
	changeToMockCluster(rafts, mockCluster)
	time.Sleep(time.Second * 2)

	var part1 []Node
	var part2 []Node
	var part1_Ids []int
	var part2_Ids []int

	ldr, err := getLeader(rafts)
	for err != nil {
		ldr, err = getLeader(rafts)
	}
	for _, node := range rafts {
		node_id, _ := node.Id()
		node_leader, _ := node.LeaderId()
		if node_id == node_leader {
			part1 = append(part1, node)
			part1_Ids = append(part1_Ids, node_id)
		} else {
			part2 = append(part2, node)
			part2_Ids = append(part2_Ids, node_id)
		}
	}

	mockCluster.Partition(part1_Ids, part2_Ids)
	ldr = part1[0]
	ldr_id, _ := ldr.Id()
	if debug_l3 {
		fmt.Println("--------------------- Leader ---------------------> " + strconv.Itoa(ldr_id))
	}
	ldr.Append([]byte("foo1"))

	time.Sleep(time.Second * 2)
	mockCluster.Heal()
	time.Sleep(time.Second * 2)

	ldr, err = getLeader(rafts)
	for err != nil {
		ldr, err = getLeader(rafts)
	}
	ldr_id, _ = ldr.Id()
	if debug_l3 {
		fmt.Println("--------------------- Leader ---------------------> " + strconv.Itoa(ldr_id))
	}
	ldr.Append([]byte("foo2"))
	time.Sleep(time.Second * 4)

	if debug_l3 {
		for _, node := range rafts {
			rn_node := node.(*RaftNode)
			fmt.Println(rn_node.SM.Log, strconv.Itoa(rn_node.SM.CommitIndex))
			//fmt.Println(strconv.Itoa(rn_node.SM.Id) + ":" + string(rn_node.SM.Log[0].Data))
			//fmt.Println(strconv.Itoa(rn_node.SM.Id) + ":" + string(rn_node.SM.Log[1].Data))
		}
	}

	for _, node := range rafts {
		ch, _ := node.CommitChannel()
		rn_node := node.(*RaftNode)
		select {
		case ci := <-ch:
			if ci.Err != nil {
				t.Fatal(ci.Err)
			}
			if debug_l3 {
				fmt.Println(strconv.Itoa(rn_node.SM.Id) + "->" + string(ci.Data))
			}
			//if ci.Data != nil {
			if string(ci.Data) != "foo2" {
				t.Fatal("Got different data")
			}
		default:
			t.Fatal("Expected message on all nodes")
		}
	}

	for _, node := range rafts {
		node.Shutdown()
	}

	if debug_l3 {
		fmt.Println("---------------------------------------------------------------------------------------")
	}
}