Пример #1
0
func NewMockFsNode(state State, srvr *mock.MockServer, logFileName string, hbTimeout int, timeout int, _pubaddr string) (*FsNode, error) {
	//TODO Add code for recovery
	registerStructs()

	_mainLog, err := log.Open(logFileName)
	if err != nil {
		return nil, err
	}

	_stateLog, err := log.Open(logFileName + "_state")
	if err != nil {
		return nil, err
	}

	_mainLog.RegisterSampleEntry([]byte{})
	_stateLog.RegisterSampleEntry(StateInfo{})

	_sm, alarm := NewSm(state, srvr.Pid(), srvr.Peers(), hbTimeout, timeout)

	fsn := FsNode{appendCh: make(chan Event, 1000), commitCh: make(chan *CommitInfo, 1000), processQuitCh: make(chan bool), frontEndQuitCh: make(chan bool, 1), backEndQuitCh: make(chan bool, 1), isOn: true, sm: _sm, server: srvr, mainLog: _mainLog, stateLog: _stateLog, dict: make(map[string]*FileStruct, 1000), pubaddr: _pubaddr, connMap: make(map[int64]*net.Conn)}
	fsn.timer = time.NewTimer(time.Millisecond * time.Duration(alarm.duration))

	addr, err := net.ResolveTCPAddr("tcp4", fsn.pubaddr)
	printErr(err)

	fsn.socket, err = net.ListenTCP("tcp", addr)
	printErr(err)

	go fsn.frontEndMain()
	go fsn.backEndMain()
	go fsn.handleEvent()
	return &fsn, err
}
Пример #2
0
func NewRN(state State, id int, clusterConfigFileName string, logFileName string, hbTimeout int, timeout int) (*RaftNode, error) {
	//TODO Add code for recovery
	registerStructs()
	srvr, err := cluster.New(id, clusterConfigFileName)
	if err != nil {
		return nil, err
	}

	_mainLog, err := log.Open(logFileName)
	if err != nil {
		return nil, err
	}

	_stateLog, err := log.Open(logFileName + "_state")
	if err != nil {
		return nil, err
	}

	_mainLog.RegisterSampleEntry([]byte{})
	_stateLog.RegisterSampleEntry(StateInfo{})

	_sm, alarm := NewSm(state, id, srvr.Peers(), hbTimeout, timeout)

	rn := RaftNode{appendCh: make(chan Event, 1000), commitCh: make(chan *CommitInfo, 1000), processQuitCh: make(chan bool, 1), isOn: true, sm: _sm, server: srvr, mainLog: _mainLog, stateLog: _stateLog}
	rn.timer = time.NewTimer(time.Millisecond * time.Duration(alarm.duration))

	go rn.handleEvent()
	return &rn, err
}
Пример #3
0
func InitializeStateMachine(RaftNode_config RaftConfig) StateMachine {
	var smobj StateMachine
	// initialize config structure of state machine
	smobj.myconfig.myId = RaftNode_config.Id
	for i := 0; i < len(RaftNode_config.cluster); i++ {
		if RaftNode_config.cluster[i].Id != RaftNode_config.Id {
			smobj.myconfig.peer = append(smobj.myconfig.peer, RaftNode_config.cluster[i].Id)
		}
	}
	// initialize state, current term and voted For of the state machine
	var ok bool
	stateFile := RaftNode_config.StateDir + "/" + "mystate"
	state, err := log.Open(stateFile)
	defer state.Close()
	assert(err == nil)
	res, err := state.Get(0)
	assert(err == nil)
	data, ok := res.(SMState)
	assert(ok)
	smobj.state = data.State
	smobj.currentTerm = data.CurrentTerm
	smobj.votedFor = data.VotedFor
	// initialize statemachine log
	lgFile := RaftNode_config.LogDir + "/" + "logfile"
	lg, err := log.Open(lgFile)
	lg.RegisterSampleEntry(LogEntry{})
	defer lg.Close()
	assert(err == nil)
	i := int(lg.GetLastIndex())
	for cnt := 0; cnt <= i; cnt++ {
		data, err := lg.Get(int64(cnt))
		assert(err == nil)
		lgentry, ok := data.(LogEntry)
		assert(ok)
		smobj.log = append(smobj.log, lgentry)
	}
	smobj.logCurrentIndex = int(lg.GetLastIndex())
	smobj.logCommitIndex = -1
	//initialize match index and next index
	for i := 0; i < len(RaftNode_config.cluster); i++ {
		if RaftNode_config.cluster[i].Id != RaftNode_config.Id {
			smobj.nextIndex = append(smobj.nextIndex, smobj.logCurrentIndex+1)
			smobj.matchIndex = append(smobj.matchIndex, -1)

		}
	}

	smobj.yesVotesNum = 0
	smobj.noVotesNum = 0
	smobj.electionTO = RaftNode_config.ElectionTimeout
	smobj.heartbeatTO = RaftNode_config.HeartbeatTimeout
	return smobj
}
Пример #4
0
func main() {
	rmlog() // for  a clean start

	lg, err := log.Open(LOGFILE)
	lg.RegisterSampleEntry(Foo{})

	defer rmlog()
	assert(err == nil)

	err = lg.Append("foo")
	assert(err == nil)

	lg.Append("bar")
	res, err := lg.Get(1) // should return "bar"
	assert(err == nil)
	str, ok := res.(string)
	assert(ok)
	assert(str == "bar")

	err = lg.Append(Foo{Bar: 10, Baz: []string{"x", "y"}})
	assert(err == nil)

	i := lg.GetLastIndex() // should return 2 as an int64 value
	assert(i == 2)

	data, err := lg.Get(i) // should return the Foo instance appended above
	assert(err == nil)
	foo, ok := data.(Foo)
	assert(ok)
	assert(foo.Bar == 10 && foo.Baz[1] == "y")

	lg.TruncateToEnd( /*from*/ 1)
	i = lg.GetLastIndex() // should return 0. One entry is left.
	assert(i == 0)
}
Пример #5
0
// Append log, increment last log index, and update log on disk
func (sm *StateMachine) LogStore(index int, term int, data []byte) {
	smstate := 2
	sm.Lock()
	defer sm.Unlock()
	logname := "log"
	logname += strconv.Itoa(sm.Id)
	lg, err1 := log.Open(logname)
	if err1 != nil {
	}
	lg.SetCacheSize(5000)
	lg.RegisterSampleEntry(LogEntry{})
	defer lg.Close()
	if sm.State == leader {
		smstate = 1
	}
	/*if len(data) != 2 {
		logentry:=LogEntry{term,data,smstate}
	} else {
		logentry:=LogEntry{term,nil,smstate}
	}*/
	sm.LastLogIndex++
	if sm.is_not_sm_test != 0 {
		if len(data) == 2 {
			lg.TruncateToEnd(int64(index))
			data = nil
		}
		lg.Append(LogEntry{term, data, smstate})
		sm.Log = append(sm.Log, LogEntry{term, data, smstate})
	}
	lg.Close()
}
Пример #6
0
func initNode(id int, myConf *Config, sm *State_Machine) {
	//Register a struct name by giving it a dummy object of that name.
	gob.Register(AppEntrReq{})
	gob.Register(AppEntrResp{})
	gob.Register(VoteReq{})
	gob.Register(VoteResp{})
	gob.Register(StateStore{})
	gob.Register(LoggStore{})
	gob.Register(CommitInfo{})
	gob.Register(MyLogg{})

	//Channel initialization.
	sm.CommMedium.clientCh = make(chan interface{})
	sm.CommMedium.netCh = make(chan interface{})
	sm.CommMedium.timeoutCh = make(chan interface{})
	sm.CommMedium.actionCh = make(chan interface{})
	sm.CommMedium.CommitCh = make(chan interface{}, TESTENTRIES)

	//Seed randon number generator.
	rand.Seed(time.Now().UTC().UnixNano() * int64(id))
	//Initialize the timer object for timeuts.
	myConf.DoTO = time.AfterFunc(10, func() {})

	//Initialize the Log and Node configuration.
	logConfig(id, myConf)
	var err error
	myConf.lg, err = log.Open(myConf.LogDir)
	if err != nil {
		panic(err)
	}

}
Пример #7
0
func (rn *RaftNode) doActions(actions []Action) {

	for _, action := range actions {
		switch action.(type) {

		case Send:
			//resetting the timer
			rn.timer.Stop()
			rn.timer = time.AfterFunc(time.Duration(1000+rand.Intn(400))*time.Millisecond, func() { rn.timeoutCh <- Timeout{} })

			actionname := action.(Send)

			rn.serverOfCluster.Outbox() <- &cluster.Envelope{Pid: actionname.To, Msg: actionname.Event}
		case Alarm:
			//resetting the timer
			rn.timer.Stop()
			rn.timer = time.AfterFunc(time.Duration(1000+rand.Intn(400))*time.Millisecond, func() { rn.timeoutCh <- Timeout{} })

		case Commit:

			//output commit obtained from statemachine into the Commit Channel
			newaction := action.(Commit)

			rn.CommitChannel() <- &newaction

		case LogStore:

			//creating persistent log files

			lg, _ := log.Open(rn.LogDir + string(rn.server.MyID))

			logstore := action.(LogStore)

			lg.Append(logstore.Data)

		case StateStore:

			//creating files for persistent storage of State

			statestore := action.(StateStore)

			//writing statestore into the persistent storage
			err := writeFile("statestore"+strconv.Itoa(rn.server.MyID), statestore)

			if err != nil {

				//reading from the persistent storage
				_, err := readFile("statestore" + strconv.Itoa(rn.server.MyID))

				if err != nil {

				}
			}

		}

	}

}
Пример #8
0
func makerafts() []*Node {

	newconfig := cluster.Config{
		Peers: []cluster.PeerConfig{
			{Id: 1, Address: "localhost:5050"},
			{Id: 2, Address: "localhost:6060"},
			{Id: 3, Address: "localhost:7070"},
			{Id: 4, Address: "localhost:8080"},
			{Id: 5, Address: "localhost:9090"},
		}}
	cl, _ := mock.NewCluster(newconfig)

	nodes := make([]*Node, len(newconfig.Peers))

	TempConfig := ConfigRaft{
		LogDir:           "Log",
		ElectionTimeout:  ETimeout,
		HeartbeatTimeout: HeartBeat,
	}

	MatchIndex := map[int]int{0: -1, 1: -1, 2: -1, 3: -1, 4: -1}
	NextIndex := map[int]int{0: 1, 1: 0, 2: 0, 3: 0, 4: 0}
	Votes := make([]int, NumServers)
	for i := 0; i < NumServers; i++ {
		Votes[i] = 0
	}

	for i := 1; i <= NumServers; i++ {
		RaftNode := Node{id: i, leaderid: -1, timeoutCh: time.NewTimer(time.Duration(TempConfig.ElectionTimeout) * time.Millisecond), LogDir: TempConfig.LogDir + strconv.Itoa(i), CommitCh: make(chan *Commit, 100)}
		log, err := log.Open(RaftNode.LogDir)
		log.RegisterSampleEntry(LogStore{})
		if err != nil {
			fmt.Println("Error opening Log File" + "i")
		}
		peer := make([]int, NumServers)
		server := cl.Servers[i]
		p := server.Peers()
		temp := 0

		for j := 0; j < NumServers; j++ {
			if j != i-1 {
				peer[j] = 0
			} else {
				peer[j] = p[temp]
				temp++
			}
		}

		RaftNode.server = server
		RaftNode.sm = &StateMachine{state: "follower", id: server.Pid(), leaderid: -1, peers: peer, term: 0, votedFor: 0, Votes: Votes, commitIndex: -1, Log: log, index: -1, matchIndex: MatchIndex, nextIndex: NextIndex, HeartbeatTimeout: 500, ElectionTimeout: ETimeout}
		nodes[i-1] = &RaftNode
	}
	for i := 0; i < 5; i++ {
		//fmt.Println(i)
		go nodes[i].listenonchannel()
	}

	return nodes
}
Пример #9
0
func (rn *RaftNode) ProcessStateStoreAc(action StateStoreAc) {
	// fmt.Printf("%v ProcessStateStoreAc \n", rn.Id())
	stateAttrsFP, err := log.Open(rn.logDir + "/" + StateFile)
	stateAttrsFP.RegisterSampleEntry(PersistentStateAttrs{})
	assert(err == nil)
	defer stateAttrsFP.Close()
	stateAttrsFP.TruncateToEnd(0) // Flush previous state
	stateAttrsFP.Append(PersistentStateAttrs{action.term, action.state, action.votedFor})
}
Пример #10
0
func (rnode *RaftNode) StateStoreHandler(obj StateStore) {
	lg, err := log.Open(rnode.statefile)
	lg.RegisterSampleEntry(NodePers{})
	assert(err == nil)
	defer lg.Close()
	i := lg.GetLastIndex()
	lg.TruncateToEnd(i)
	lg.Append(NodePers{CurrentTerm: obj.term, VotedFor: obj.votedFor, CurrentState: obj.state})
}
Пример #11
0
func New(nodeConfig Config) (Node, error) {
	clusterConfig := cluster.Config{Peers: NetToPeersConfig(nodeConfig.cluster), InboxSize: 50, OutboxSize: 50}
	server, err := cluster.New(nodeConfig.Id, clusterConfig)
	if err != nil {
		return nil, errors.New("Could not start the messaging service")
	}

	lg, err := log.Open(nodeConfig.LogDir)
	if err != nil {
		return nil, errors.New("Could not start log service")
	}
	lg.RegisterSampleEntry(LogEntry{})

	commitChannel := make(chan CommitInfo)
	eventChannel := make(chan interface{})
	shutdownChan := make(chan int)
	initLog := make([]LogEntry, 0)
	initLog = append(initLog, LogEntry{0, make([]byte, 0)})
	votedFor := -1
	term := 0
	_, err = os.Stat(strconv.Itoa(nodeConfig.Id) + "_state")
	if err == nil {
		// State file already exists, so restart read vars from it
		term, votedFor = readState(nodeConfig.Id)
		// restore log entries from log saved on disk
		logLastIndex := lg.GetLastIndex()
		if logLastIndex != -1 {
			//logger.Println(nodeConfig.Id, " : Last index on disk : ", logLastIndex)
			for i := 0; int64(i) < logLastIndex; i++ {
				entry, _ := lg.Get(int64(i))
				initLog = append(initLog, entry.(LogEntry))
			}
		}

	}

	sm := StateMachine{nodeConfig.Id, getPeers(nodeConfig.cluster), term,
		votedFor, 1, initLog, make(map[int]int), make(map[int]int),
		0, nodeConfig.ElectionTimeout, make(map[int]int), -1}

	rn := RaftNode{sm: sm, server: server, lg: lg, commitChannel: commitChannel, eventChannel: eventChannel, shutDownChan: shutdownChan}
	timerFunc := func(eventChannel chan interface{}) func() {
		return func() {
			rn.eventChannel <- TimeoutEv{}
		}
	}
	rn.timer = time.AfterFunc(time.Duration(random(sm.timeout, 2*sm.timeout))*time.Millisecond, timerFunc(rn.eventChannel))
	rn.commitLock = &sync.RWMutex{}

	gob.Register(AppendEntriesReqEv{})
	gob.Register(AppendEntriesRespEv{})
	gob.Register(VoteReqEv{})
	gob.Register(VoteRespEv{})
	go rn.ProcessEvents()
	return &rn, nil
}
Пример #12
0
func (sm *StateMachine) StateStore() {
	logname := "statelog"
	logname += strconv.Itoa(sm.Id)
	lg, _ := log.Open(logname)
	lg.SetCacheSize(5000)
	defer lg.Close()
	lg.RegisterSampleEntry(SMPersist{})
	logentry := SMPersist{sm.Term, sm.CommitIndex, sm.VotedFor}
	lg.Append(logentry)
}
Пример #13
0
func (rnode *RaftNode) LogStoreHandler(obj LogStore) {
	//fmt.Printf("LogstoreBegin: index->%d, command->%v\n", obj.index, obj.command)
	lg, err := log.Open(rnode.logfile)
	lg.RegisterSampleEntry(logEntry{})
	assert(err == nil)
	defer lg.Close()
	lg.TruncateToEnd(int64(obj.index))
	lg.Append(obj.command)
	//fmt.Printf("LogstoreEnd: index->%d, command->%v\n", obj.index, obj.command)
}
Пример #14
0
func (rn *RaftNode) ProcessLogStoreAc(action LogStoreAc) {
	// fmt.Printf("%v ProcessLogStoreAc \n", rn.Id())
	logFP, err := log.Open(rn.logDir + "/" + LogFile)
	logFP.RegisterSampleEntry(LogEntry{})
	assert(err == nil)
	defer logFP.Close()
	assert(int64(logFP.GetLastIndex()+1) >= action.index)
	logFP.TruncateToEnd(int64(action.index))
	logFP.Append(LogEntry{action.term, action.data})
}
Пример #15
0
// Returns the data at a log index, or an error.
func (rn *RaftNode) Get(index int) (error, []byte) {
	lgFile := rn.rc.LogDir + "/" + "logfile"
	lg, err := log.Open(lgFile)
	defer lg.Close()
	assert(err == nil)
	lgentry, err := lg.Get(int64(index))
	le, _ := lgentry.(LogEntry)
	data := le.Cmd
	return err, data
}
Пример #16
0
// Append log, increment last log index, and update log on disk
func (sm *StateMachine) InitLog() {
	logname := "log"
	logname += strconv.Itoa(sm.Id)
	lg, _ := log.Open(logname)
	logentry := LogEntry{0, nil}
	lg.Append(logentry)
	sm.Log = append(sm.Log, LogEntry{0, nil})
	sm.LastLogIndex++
	defer lg.Close()
}
Пример #17
0
// This func creates a state file for storing initila state of each raft machine
func initialState(sd string) {
	stateFile := sd + "/" + "mystate"
	//	rmlog(stateFile)
	st, err := log.Open(stateFile)
	//st.SetCacheSize(50)
	assert(err == nil)
	st.RegisterSampleEntry(SMState{})
	defer st.Close()
	err = st.Append(SMState{State: "FOLLOWER", CurrentTerm: 0, VotedFor: 0})
	assert(err == nil)
}
Пример #18
0
// Delete log entries starting from index, update last log index, update log on disk
func (sm *StateMachine) LogDelete(index int) {
	logname := "log"
	logname += strconv.Itoa(sm.Id)
	lg, _ := log.Open(logname)
	lg.SetCacheSize(5000)
	lg.RegisterSampleEntry(LogEntry{})
	defer lg.Close()
	lg.TruncateToEnd(int64(index))
	sm.Log = sm.Log[:index]
	sm.LastLogIndex = index - 1
}
Пример #19
0
// Append log, increment last log index, and update log on disk
func (sm *StateMachine) LogStore(index int, term int, data []byte) {
	logname := "log"
	logname += strconv.Itoa(sm.Id)
	lg, _ := log.Open(logname)
	lg.SetCacheSize(5000)
	lg.RegisterSampleEntry(LogEntry{})
	defer lg.Close()
	logentry := LogEntry{term, data}
	lg.Append(logentry)
	sm.Log = append(sm.Log, LogEntry{term, data})
	sm.LastLogIndex++
}
Пример #20
0
func (fileServer *FileServer) SetLastProcessed(lastProcessed int64) {
	fileServer.LastProcessedIndex = lastProcessed
	logname := "fslog"
	logname += strconv.Itoa(fileServer.Id)
	lg, _ := log.Open(logname)
	lg.SetCacheSize(5000)
	lg.RegisterSampleEntry(FsLog{})
	defer lg.Close()
	lg.TruncateToEnd(1)
	lg.Append(FsLog{lastProcessed})
	lg.Close()
}
Пример #21
0
func (rn *RaftNode) InitializeStateMachine(RaftNode_config RaftConfig) {

	//	var smobj StateMachine

	// initialize config structure of state machine
	rn.sm.myconfig.myId = RaftNode_config.Id
	for i := 0; i < len(RaftNode_config.cluster); i++ {
		if RaftNode_config.cluster[i].Id != RaftNode_config.Id {
			rn.sm.myconfig.peer = append(rn.sm.myconfig.peer, RaftNode_config.cluster[i].Id)
		}
	}

	// initialize state, current term and voted For of the state machine
	stateFile := RaftNode_config.StateFile
	rn.stateFP = OpenFile(stateFile)
	data := ReadState(rn.stateFP)
	rn.sm.state = data.State
	rn.sm.currentTerm = data.CurrentTerm
	rn.sm.votedFor = data.VotedFor

	// initialize statemachine log
	lgFile := RaftNode_config.LogDir + "/" + "logfile"
	var err error
	rn.logFP, err = log.Open(lgFile)
	rn.logFP.RegisterSampleEntry(LogEntry{})
	assert(err == nil)
	i := int(rn.logFP.GetLastIndex())
	for cnt := 0; cnt <= i; cnt++ {
		data, err := rn.logFP.Get(int64(cnt))
		assert(err == nil)
		lgentry, ok := data.(LogEntry)
		assert(ok)
		rn.sm.log = append(rn.sm.log, lgentry)
	}
	rn.sm.logCurrentIndex = int(rn.logFP.GetLastIndex())
	rn.sm.logCommitIndex = -1

	//initialize match index and next index
	for i := 0; i < len(RaftNode_config.cluster); i++ {
		if RaftNode_config.cluster[i].Id != RaftNode_config.Id {
			rn.sm.nextIndex = append(rn.sm.nextIndex, rn.sm.logCurrentIndex+1)
			rn.sm.matchIndex = append(rn.sm.matchIndex, -1)

		}
	}

	rn.sm.yesVotesNum = 0
	rn.sm.noVotesNum = 0
	rn.sm.electionTO = RaftNode_config.ElectionTimeout
	rn.sm.heartbeatTO = RaftNode_config.HeartbeatTimeout
	//	return smobj
}
Пример #22
0
func GetLog(dir string) *log.Log {
	rnlog, err := log.Open(dir)
	if err != nil {
		fmt.Println("Error greating log :", err)
		return nil
	}
	rnlog.RegisterSampleEntry(LogEntry{})
	rnlog.TruncateToEnd(0)
	if rnlog.GetLastIndex() == -1 {
		rnlog.Append(LogEntry{0, nil, false})
	}
	return rnlog
}
Пример #23
0
func initRaftStateFile(logDir string) {
	cleanup(logDir)
	stateAttrsFP, err := log.Open(logDir + "/" + "statefile")
	stateAttrsFP.RegisterSampleEntry(NodePers{})
	stateAttrsFP.SetCacheSize(1)
	assert(err == nil)
	defer stateAttrsFP.Close()
	stateAttrsFP.TruncateToEnd(0)
	err1 := stateAttrsFP.Append(NodePers{0, 0, "follower"})
	//fmt.Println(err1)
	assert(err1 == nil)
	fmt.Println("file created successfully")
}
Пример #24
0
func (rn *RaftNode) Get(index int) (err1 error, data []byte) {
	lg, err := log.Open(rn.logfile)
	lg.RegisterSampleEntry(logEntry{})
	assert(err == nil)
	defer lg.Close()
	val, err1 := lg.Get(int64(index))
	if err1 == nil {
		data = val.(logEntry).Command
		return err1, data
	} else {
		return err1, data
	}
}
func (rn *RaftNode) initializeLog(config Config) {

	var err error

	handler, err := log.Open(config.LogDir + "/raftlog")
	rn.LogHandler = handler
	check(err)

	if rn.LogHandler.GetLastIndex() == -1 { //log is absolutely empty

		rn.LogHandler.Append(SERVER_LOG_DATASTR{Index: 0, Term: 0, Data: []byte("")})
	}

}
Пример #26
0
// Get log from storage corresponding to an index
func (rn *RaftNode) Get(index int64) ([]byte, error) {
	logname := "log"
	logname += strconv.Itoa(rn.Sm.Id)
	lg, err1 := log.Open(logname)
	if err1 != nil {
		return nil, err1
	}
	defer lg.Close()
	logentry, err2 := lg.Get(index)
	if err2 != nil {
		return nil, err2
	}
	return logentry.(LogEntry).Data, nil
}
Пример #27
0
func initRaftStateFile(logDir string) {
	// fmt.Printf("init raft state file : logDir - %v \n", logDir)
	cleanup(logDir)
	stateAttrsFP, err := log.Open(logDir + "/" + StateFile)
	stateAttrsFP.RegisterSampleEntry(PersistentStateAttrs{})
	stateAttrsFP.SetCacheSize(1)
	assert(err == nil)
	defer stateAttrsFP.Close()
	stateAttrsFP.TruncateToEnd(0)
	err1 := stateAttrsFP.Append(PersistentStateAttrs{0, "Follower", 0})
	// fmt.Println(err1)
	assert(err1 == nil)
	// fmt.Println("file created successfully")
}
Пример #28
0
// Append log, increment last log index, and update log on disk
func (sm *StateMachine) InitLog() {
	sm.Lock()
	defer sm.Unlock()
	logname := "log"
	logname += strconv.Itoa(sm.Id)
	lg, err1 := log.Open(logname)
	if err1 != nil {
	}
	defer lg.Close()
	logentry := LogEntry{0, nil, 2}
	lg.Append(logentry)
	sm.Log = append(sm.Log, LogEntry{0, nil, 2})
	sm.LastLogIndex++
	lg.Close()
}
Пример #29
0
func New(conf Config) Node {
	var (
		rn          RaftNode
		rsmLog      []LogEntry
		peerIds     []int
		hasVoted    map[int]int
		nextIndex   map[int]int64
		matchIndex  map[int]int64
		ClustConfig cluster.Config
	)
	// initlisation of other raft node variables
	rn.IsWorking = true
	rn.TimeoutTimer = time.NewTimer(time.Duration(randRange(conf.ElectionTimeout, 2*conf.ElectionTimeout)) * time.Millisecond)
	<-rn.TimeoutTimer.C
	//fmt.Println(<-rn.TimeoutTimer.C)
	rn.AppendEventCh = make(chan Event, 100)
	//rn.TimeoutEventCh = make(chan Event, 100)
	rn.CommitCh = make(chan CommitInfo, 100)
	ClustConfig = GetClusterConfig(conf)
	rn.NetServer, _ = cluster.New(conf.Id, ClustConfig)
	rn.LogFile, _ = log.Open(conf.LogFileDir)
	// initilisation of state machine
	peerIds = getPeerIds(conf)
	hasVoted = getVotesRcvd(peerIds)
	nextIndex = getNextIndex(peerIds)
	matchIndex = getMatchIndex(peerIds)
	registerStructs()
	rsmLog, _ = rn.getRsmLog()
	//rsmState, err = rn.getRsmState();
	rn.SM.init( /* currTerm */ 0,
		/* votedFor */ -1,
		/* Log */ rsmLog,
		/* selfId */ conf.Id,
		/* peerIds */ peerIds,
		/* electionAlarm */ conf.ElectionTimeout,
		/* heartbeatAlarm */ conf.HeartbeatTimeout,
		/* lastMatchIndex */ -1,
		/* currState --Follower*/ "follower",
		/* commitIndex */ -1,
		/* leaderId */ -1,
		/* lastLogIndex */ -1,
		/* lastLogTerm */ 0,
		/* votedAs */ hasVoted,
		/* nextIndex */ nextIndex,
		/* matchIndex */ matchIndex)
	go rn.ProcessNodeEvents()
	return &rn
}
Пример #30
0
// Resets all logs and term, votedFor values
func termReset() {
	currentTerm, _ := leveldb.OpenFile("currentTerm", nil)
	defer currentTerm.Close()
	// Database to store votedFor
	voted, _ := leveldb.OpenFile("votedFor", nil)
	defer voted.Close()

	for i := 0; i < len(configs.Peers); i++ {
		currentTerm.Put([]byte(strconv.FormatInt(int64(i), 10)), []byte(strconv.FormatInt(int64(0), 10)), nil)
		voted.Put([]byte(strconv.FormatInt(int64(i), 10)), []byte(strconv.FormatInt(int64(-1), 10)), nil)

		lg, _ := log.Open("Logs/Log" + strconv.Itoa(i))
		lg.TruncateToEnd(0)
		lg.Close()
	}
}