Ejemplo n.º 1
0
//
// Get the txnid from the packet if the packet is a ProposalMsg or CommitMsg.
//
func (l *LeaderSyncProxy) getPacketTxnId(packet common.Packet) common.Txnid {

	txid := common.BOOTSTRAP_LAST_LOGGED_TXID
	if packet != nil {
		switch request := packet.(type) {
		case ProposalMsg:
			txid = common.Txnid(request.GetTxnid())
		case CommitMsg:
			txid = common.Txnid(request.GetTxnid())
		}
	}

	return txid
}
Ejemplo n.º 2
0
func (w *watcher) LogProposal(p protocol.ProposalMsg) error {

	w.mutex.Lock()
	defer w.mutex.Unlock()

	msg := w.factory.CreateLogEntry(p.GetTxnid(), p.GetOpCode(), p.GetKey(), p.GetContent())
	w.pendings[common.Txnid(p.GetTxnid())] = msg

	handle, ok := w.pendingReqs[p.GetReqId()]
	if ok {
		delete(w.pendingReqs, p.GetReqId())
		w.loggedReqs[common.Txnid(p.GetTxnid())] = handle
	}

	return nil
}
Ejemplo n.º 3
0
// Get value from iterator
func (i *TransientLogIterator) Next() (txnid common.Txnid, op common.OpCode, key string, content []byte, err error) {

	if i.iter == nil {
		return 0, common.OPCODE_INVALID, "", nil, fdb.RESULT_ITERATOR_FAIL
	}

	if i.curError != nil {
		return 0, common.OPCODE_INVALID, "", nil, i.curError
	}

	key = i.curKey
	content = i.curContent
	txnid = i.curTxnid

	i.curTxnid = common.Txnid(uint64(i.curTxnid) + 1)
	i.curKey, i.curContent, i.curError = i.iter.Next()

	if i.curError == nil {
		// it is not the last entry. Does not matter what is the actual txnid as long as it is
		// smaller than the snapshot's txnid (TransientLogIterator.txnid).
		return txnid, common.OPCODE_SET, key, content, nil
	}

	// last entry : use the txnid matching the snapshot
	return i.txnid, common.OPCODE_SET, key, content, nil
}
Ejemplo n.º 4
0
//
// handle accept message from follower
//
func (l *Leader) handleAccept(msg AcceptMsg) error {

	// If this Ack is on an old proposal, then ignore.
	// This indicates that the follower may be slower
	// than others.  Therefore, the proposal may be
	// committed, before the follower can Ack.
	mtxid := common.Txnid(msg.GetTxnid())
	if l.lastCommitted >= mtxid {
		// cleanup.  l.quorums should not have mtxid.
		// This is just in case since we will never commit
		// this proposal.
		_, ok := l.quorums[mtxid]
		if ok {
			delete(l.quorums, mtxid)
			delete(l.proposals, mtxid)
		}
		return nil
	}

	// update quorum
	l.updateQuorum(mtxid, msg.GetFid())
	if l.hasQuorum(mtxid) {
		// proposal has quorum. Now commit. If cannot commit, then return error
		// which will cause the leader to re-election.  Just to handle
		// case where there is hardware failure or repository corruption.
		err := l.commit(mtxid)
		if err != nil {
			return err
		}
	}

	return nil
}
Ejemplo n.º 5
0
//
// Send the entries in the committed log to the follower.  Termination condition:
// 1) There is no more entry in commit log
// 2) The entry in commit log matches the first entry in the observer queue
// This function returns the txid of the last entry sent from the log. Return 0 if nothing is sent from commit log.
//
func (l *LeaderSyncProxy) sendEntriesInCommittedLog(startTxid, endTxid common.Txnid, o *observer) (common.Txnid, error) {

	log.Current.Debugf("LeaderSyncProxy.sendEntriesInCommittedLog(): startTxid %d endTxid %d observer first txid %d",
		startTxid, endTxid, l.firstTxnIdInObserver(o))

	var lastSeen common.Txnid = common.BOOTSTRAP_LAST_LOGGED_TXID

	logChan, errChan, killch, err := l.handler.GetCommitedEntries(startTxid, endTxid)
	if logChan == nil || errChan == nil || err != nil {
		return lastSeen, err
	}

	for {
		select {
		case entry, ok := <-logChan:
			if !ok {
				killch <- true
				return lastSeen, nil // no more data
			}

			// send the entry to follower
			err = send(entry, l.follower)
			if err != nil {
				// lastSeen can be 0 if there is no new entry in repo
				killch <- true
				return lastSeen, err
			}

			lastSeen = common.Txnid(entry.GetTxnid())

			// we found the committed entries matches what's in observer queue
			if l.hasSeenEntryInObserver(o, common.Txnid(entry.GetTxnid())) {
				killch <- true
				return lastSeen, nil
			}

		case err := <-errChan:
			if err != nil {
				return lastSeen, err
			}
			break
		}
	}

	return lastSeen, nil
}
Ejemplo n.º 6
0
func (a *ServerAction) LogProposal(p protocol.ProposalMsg) error {

	if a.notifier != nil {
		tnxid, op, key, content := p.GetTxnid(), p.GetOpCode(), p.GetKey(), p.GetContent()
		if err := a.notifier.OnNewProposal(common.Txnid(tnxid), common.OpCode(op), key, content); err != nil {
			return err
		}
	}

	err := a.appendCommitLog(common.Txnid(p.GetTxnid()), common.OpCode(p.GetOpCode()), p.GetKey(), p.GetContent())
	if err != nil {
		return err
	}

	a.server.UpdateStateOnNewProposal(p)

	return nil
}
Ejemplo n.º 7
0
//
// Bootstrp
//
func (s *Coordinator) bootstrap(config string) (err error) {

	s.state.mutex.Lock()
	defer s.state.mutex.Unlock()

	if s.state.done {
		return
	}

	s.env, err = newEnv(config)
	if err != nil {
		return err
	}

	// Initialize server state
	s.state.resetCoordinatorState()

	// Initialize various callback facility for leader election and
	// voting protocol.
	s.factory = message.NewConcreteMsgFactory()
	s.skillch = make(chan bool, 1) // make it buffered to unblock sender
	s.site = nil

	// Create and initialize new txn state.
	s.txn = common.NewTxnState()

	// Initialize the state to enable voting
	repoName := filepath.Join(s.basepath, COORDINATOR_CONFIG_STORE)
	s.configRepo, err = r.OpenRepositoryWithName(repoName, s.idxMgr.GetMemoryQuota())
	if err != nil {
		return err
	}

	s.config = r.NewServerConfig(s.configRepo)
	lastLoggedTxid, err := s.config.GetLastLoggedTxnId()
	if err != nil {
		return err
	}
	s.txn.InitCurrentTxnid(common.Txnid(lastLoggedTxid))

	// Need to start the peer listener before election. A follower may
	// finish its election before a leader finishes its election. Therefore,
	// a follower node can request a connection to the leader node before that
	// node knows it is a leader.  By starting the listener now, it allows the
	// follower to establish the connection and let the leader handles this
	// connection at a later time (when it is ready to be a leader).
	s.listener, err = common.StartPeerListener(s.getHostTCPAddr())
	if err != nil {
		return NewError(ERROR_COOR_LISTENER_FAIL, NORMAL, COORDINATOR, err,
			fmt.Sprintf("Index Coordinator : Fail to start PeerListener"))
	}

	// tell boostrap is ready
	s.markReady()

	return nil
}
Ejemplo n.º 8
0
//
// Handle a new proposal
//
func (l *Leader) newProposal(proposal ProposalMsg) error {

	// Call out to log the proposal.  Always do this first before
	// sending to followers.
	err := l.handler.LogProposal(proposal)
	if err != nil {
		if _, ok := err.(*common.RecoverableError); ok {
			/// update the last committed to advacne the txnid.
			l.lastCommitted = common.Txnid(proposal.GetTxnid())
			l.sendAbort(proposal.GetFid(), proposal.GetReqId(), err.Error())
			return nil
		}

		// If fails to log the proposal, return the error.
		// This can cause the leader to re-elect.  Just to handle
		// case where there is hardware failure or repository
		// corruption.
		return err
	}

	// The leader votes for itself
	l.updateQuorum(common.Txnid(proposal.GetTxnid()), l.GetFollowerId())

	// remember this proposal so I can commmit it later
	l.proposals[common.Txnid(proposal.GetTxnid())] = proposal

	// Send the proposal to follower
	l.sendProposal(proposal)

	// check if proposal has quorum (if ensembleSize <= 2).  Make sure that this
	// is after sendProposal() so that we can still send proposal to follower BEFORE
	// we send the commit message.
	if l.hasQuorum(common.Txnid(proposal.GetTxnid())) {
		// proposal has quorum. Now commit. If cannot commit, then return error
		// which will cause the leader to re-election.  Just to handle
		// case where there is hardware failure or repository corruption.
		err := l.commit(common.Txnid(proposal.GetTxnid()))
		if err != nil {
			return err
		}
	}

	return nil
}
Ejemplo n.º 9
0
//
// Add a follower and starts a listener for the follower.
// If the leader is terminated, the pipe between leader
// and follower will also be closed.
//
func (l *Leader) AddFollower(fid string,
	peer *common.PeerPipe,
	o *observer) {
	l.mutex.Lock()
	defer l.mutex.Unlock()

	// AddFollower requires holding the mutex such that the leader thread
	// will not be sending new proposal or commit (see sendProposal() and
	// sendCommit()) to followers.  This allow this function to copy the
	// proposals and commits from the observer queue into the pipe, before
	// the leader has a chance to send new messages.
	for packet := o.getNext(); packet != nil; packet = o.getNext() {

		switch request := packet.(type) {
		case ProposalMsg:
			txid := common.Txnid(request.GetTxnid())
			log.Current.Debugf("Leader.AddFollower() : send observer's packet %s, txid %d", packet.Name(), txid)
		case CommitMsg:
			txid := common.Txnid(request.GetTxnid())
			log.Current.Debugf("Leader.AddFollower() : send observer's packet %s, txid %d", packet.Name(), txid)
		}

		peer.Send(packet)
	}

	// Rememeber the old message listener and start a new one.
	oldListener, ok := l.followers[fid]

	listener := newListener(fid, peer, l)
	l.followers[fid] = listener
	go listener.start()

	// kill the old message listener
	if ok && oldListener != nil {
		log.Current.Debugf("Leader.AddFollower() : old Listener found for follower %s.  Terminating old listener", fid)
		oldListener.terminate()
	} else {
		// notify a brand new follower (not just replacing an existing one)
		l.changech <- true
	}
}
Ejemplo n.º 10
0
//
// Bootstrp
//
func (s *Server) bootstrap() (err error) {

	// Initialize server state
	s.state = newServerState()

	// Initialize repository service
	s.repo, err = r.OpenRepository()
	if err != nil {
		return err
	}
	s.log = r.NewCommitLog(s.repo)
	s.srvConfig = r.NewServerConfig(s.repo)

	// Create and initialize new txn state.
	s.txn = common.NewTxnState()

	// initialize the current transaction id to the lastLoggedTxid.  This
	// is the txid that this node has seen so far.  If this node becomes
	// the leader, a new epoch will be used and new current txid will
	// be generated. So no need to initialize the epoch at this point.
	lastLoggedTxid, err := s.srvConfig.GetLastLoggedTxnId()
	if err != nil {
		return err
	}
	s.txn.InitCurrentTxnid(common.Txnid(lastLoggedTxid))

	// Initialize various callback facility for leader election and
	// voting protocol.
	s.factory = message.NewConcreteMsgFactory()
	s.handler = action.NewServerAction(s.repo, s.log, s.srvConfig, s, s.txn, s.factory, s)
	s.skillch = make(chan bool, 1) // make it buffered to unblock sender
	s.site = nil

	// Need to start the peer listener before election. A follower may
	// finish its election before a leader finishes its election. Therefore,
	// a follower node can request a connection to the leader node before that
	// node knows it is a leader.  By starting the listener now, it allows the
	// follower to establish the connection and let the leader handles this
	// connection at a later time (when it is ready to be a leader).
	s.listener, err = common.StartPeerListener(GetHostTCPAddr())
	if err != nil {
		return common.WrapError(common.SERVER_ERROR, "Fail to start PeerListener.", err)
	}

	// Start a request listener.
	s.reqListener, err = StartRequestListener(GetHostRequestAddr(), s)
	if err != nil {
		return common.WrapError(common.SERVER_ERROR, "Fail to start RequestListener.", err)
	}

	return nil
}
Ejemplo n.º 11
0
func (r *Repository) AcquireSnapshot(kind RepoKind) (common.Txnid, *RepoIterator, error) {

	r.mutex.Lock()
	defer r.mutex.Unlock()

	if len(r.snapshots[kind]) == 0 {
		return common.Txnid(0), nil, nil
	}

	snapshot := r.snapshots[kind][len(r.snapshots[kind])-1]
	snapshot.count++

	// Create a snaphsot for iteration
	var FORESTDB_INMEMSEQ = fdb.SeqNum(math.MaxUint64)
	kvstore, err := snapshot.snapshot.SnapshotOpen(FORESTDB_INMEMSEQ)

	iter, err := kvstore.IteratorInit(nil, nil, fdb.ITR_NO_DELETES)
	if err != nil {
		return common.Txnid(0), nil, err
	}
	return snapshot.txnid, &RepoIterator{iter: iter, store: kvstore}, nil
}
Ejemplo n.º 12
0
//
// Create a new iterator.   This is used by LeaderSyncProxy to replicate log
// entries to a folower/watcher.  txid1 is last logged commited txid from follower/watcher.
// txid2 is the txid of the first packet in the observer queue for this follower/watcher.
// If the observer queue is empty, then txid2 is 0.
//
// For any LogEntry returned form the iterator, the following condition must be satisifed:
// 1) The txid of any LogEntry returned from the iterator must be greater than or equal to txid1.
// 2) The last LogEntry must have txid >= txid2.  In other words, the iterator can gaurantee
//    that it will cover any mutation by txid2, but cannot guarantee that it stops at txid2.
//
// Since TransientCommitLog does not keep track of history, it only supports the following case:
// 1) the beginning txid (txid1) is 0 (sending the full repository)
//
func (r *TransientCommitLog) NewIterator(txid1, txid2 common.Txnid) (CommitLogIterator, error) {

	if txid1 != 0 {
		return nil, common.NewError(common.REPO_ERROR, "TransientLCommitLog.NewIterator: cannot support beginning txid > 0")
	}

	// iter can be nil if nothing has been logged
	retry := true

retryLabel:
	txnid, iter, err := r.repo.AcquireSnapshot(MAIN)
	if err != nil {
		return nil, err
	}

	// The snapshot txnid must be at least match the ending txid (txid2).  If not, it will retry.
	// This is to ensure that it takes care race condition in which the first msg in an observer's
	// queue is a commit, and the caller asks for the state of the repository which is at least as
	// recent as that commit msg (txid2).  If retry fails, return an error.
	if txnid < txid2 {
		if retry {
			time.Sleep(common.XACT_COMMIT_WAIT_TIME)
			retry = false
			goto retryLabel
		}

		return nil, common.NewError(common.REPO_ERROR,
			fmt.Sprintf("TransientLCommitLog.NewIterator: cannot support ending txid > %d", txnid))
	}

	result := &TransientLogIterator{
		iter:       iter,
		txnid:      txnid,
		repo:       r.repo,
		curTxnid:   common.Txnid(txid1 + 1),
		curKey:     "",
		curContent: nil,
		curError:   nil}

	if result.iter != nil {
		result.curKey, result.curContent, result.curError = result.iter.Next()
	}

	return result, nil
}
Ejemplo n.º 13
0
//
// Callback when a new proposal arrives
//
func (s *Server) UpdateStateOnNewProposal(proposal protocol.ProposalMsg) {

	fid := proposal.GetFid()
	reqId := proposal.GetReqId()
	txnid := proposal.GetTxnid()

	// If this host is the one that sends the request to the leader
	if fid == s.handler.GetFollowerId() {
		s.state.mutex.Lock()
		defer s.state.mutex.Unlock()

		// look up the request handle from the pending list and
		// move it to the proposed list
		handle, ok := s.state.pendings[reqId]
		if ok {
			delete(s.state.pendings, reqId)
			s.state.proposals[common.Txnid(txnid)] = handle
		}
	}
}
Ejemplo n.º 14
0
// Get value from iterator
func (i *LogIterator) Next() (txnid common.Txnid, op common.OpCode, key string, content []byte, err error) {

	// TODO: Check if fdb and iterator is closed
	key, content, err = i.iter.Next()
	if err != nil {
		return 0, common.OPCODE_INVALID, "", nil, err
	}

	// Since actual data is stored in the same repository, make sure
	// we don't read them.
	log.Current.Debugf("CommitLog.Next() : Iterator read key %s", key)

	entry, err := unmarshall(content)
	if err != nil {
		return 0, common.OPCODE_INVALID, "", nil, err
	}

	return common.Txnid(entry.GetTxnid()),
		common.GetOpCodeFromInt(entry.GetOpCode()),
		entry.GetKey(), entry.GetContent(), nil
}
Ejemplo n.º 15
0
func (s *watcher) UpdateStateOnNewProposal(proposal protocol.ProposalMsg) {
	s.mutex.Lock()
	defer s.mutex.Unlock()

	opCode := common.OpCode(proposal.GetOpCode())
	logging.Debugf("Watcher.UpdateStateOnNewProposal(): receive proposal on metadata kind %d", findTypeFromKey(proposal.GetKey()))

	// register the event for notification
	var evtType EventType = EVENT_NONE
	switch opCode {
	case common.OPCODE_ADD:
		metaType := findTypeFromKey(proposal.GetKey())
		if metaType == KIND_INDEX_DEFN {
			evtType = EVENT_CREATE_INDEX
		} else if metaType == KIND_TOPOLOGY {
			evtType = EVENT_UPDATE_TOPOLOGY
		}
	case common.OPCODE_SET:
		metaType := findTypeFromKey(proposal.GetKey())
		if metaType == KIND_INDEX_DEFN {
			evtType = EVENT_CREATE_INDEX
		} else if metaType == KIND_TOPOLOGY {
			evtType = EVENT_UPDATE_TOPOLOGY
		}
	case common.OPCODE_DELETE:
		if findTypeFromKey(proposal.GetKey()) == KIND_INDEX_DEFN {
			evtType = EVENT_DROP_INDEX
		}
	default:
		logging.Debugf("Watcher.UpdateStateOnNewProposal(): recieve proposal with opcode %d.  Skip convert proposal to event.", opCode)
	}

	logging.Debugf("Watcher.UpdateStateOnNewProposal(): convert metadata type to event  %d", evtType)
	if evtType != EVENT_NONE {
		logging.Debugf("Watcher.UpdateStateOnNewProposal(): register event for txid %d", proposal.GetTxnid())
		s.notifications[common.Txnid(proposal.GetTxnid())] =
			newNotificationHandle(proposal.GetKey(), evtType, proposal.GetContent())
	}
}
Ejemplo n.º 16
0
func (l *LeaderSyncProxy) updateCurrentEpochAfterQuorum() error {

	log.Current.Debugf("LeaderSyncProxy.updateCurrentEpochAfterQuorum()")

	// Get my follower's vote for the epoch ack
	packet, err := listen("EpochAck", l.follower)
	if err != nil {
		return err
	}

	// Get epoch from follower message
	// TODO : Validate follower epoch
	info := packet.(EpochAckMsg)
	l.followerState.currentEpoch = info.GetCurrentEpoch()
	l.followerState.lastLoggedTxid = common.Txnid(info.GetLastLoggedTxid())

	// update my vote and wait for quorum of ack from followers
	ok := l.state.voteEpochAck(l.GetFid(), l.followerState.voting)
	if !ok {
		return common.NewError(common.ELECTION_ERROR,
			"LeaderSyncProxy.updateCurrentEpochAfterQuorum(): Fail to reach quorum on current epoch (EpochAck)")
	}

	// update the current epock after quorum of followers have ack'ed
	epoch, err := l.handler.GetAcceptedEpoch()
	if err != nil {
		return err
	}

	// update the current epoch based on the quorum result.   This function
	// will perform update only if the new epoch is larger than existing value.
	err = l.handler.NotifyNewCurrentEpoch(epoch)
	if err != nil {
		return err
	}

	return nil
}
Ejemplo n.º 17
0
//
// Update the request upon new proposal.
//
func (c *Coordinator) updateRequestOnNewProposal(proposal protocol.ProposalMsg) {

	fid := proposal.GetFid()
	reqId := proposal.GetReqId()
	txnid := proposal.GetTxnid()

	logging.Debugf("Coorindator.updateRequestOnNewProposal(): recieve proposal. Txnid %d, follower id %s, coorindator fid %s",
		txnid, fid, c.GetFollowerId())

	// If this host is the one that sends the request to the leader
	if fid == c.GetFollowerId() {
		c.state.mutex.Lock()
		defer c.state.mutex.Unlock()

		// look up the request handle from the pending list and
		// move it to the proposed list
		handle, ok := c.state.pendings[reqId]
		if ok {
			delete(c.state.pendings, reqId)
			c.state.proposals[common.Txnid(txnid)] = handle
		}
	}
}
Ejemplo n.º 18
0
func (l *FollowerSyncProxy) syncReceive() error {

	log.Current.Tracef("FollowerSyncProxy.syncReceive()")

	lastCommittedFromLeader := common.BOOTSTRAP_LAST_COMMITTED_TXID
	pendingCommit := make([]LogEntryMsg, 0, common.MAX_PROPOSALS)

	for {
		packet, err := listen("LogEntry", l.leader)
		if err != nil {
			return err
		}

		entry := packet.(LogEntryMsg)
		lastTxnid := common.Txnid(entry.GetTxnid())

		// If this is the first one, skip
		if entry.GetOpCode() == uint32(common.OPCODE_STREAM_BEGIN_MARKER) {
			log.Current.Debugf("LeaderSyncProxy.syncReceive(). Receive stream_begin.  Txid : %d", lastTxnid)
			lastCommittedFromLeader = lastTxnid
			continue
		}

		// If this is the last one, then flush the pending log entry as well.  The streamEnd
		// message has a more recent lastCommitedTxid from the leader which is retreievd after
		// the last log entry is sent.
		if entry.GetOpCode() == uint32(common.OPCODE_STREAM_END_MARKER) {
			log.Current.Debugf("LeaderSyncProxy.syncReceive(). Receive stream_end.  Txid : %d", lastTxnid)
			lastCommittedFromLeader = lastTxnid

			// write any log entry that has not been logged.
			for _, entry := range pendingCommit {
				toCommit := entry.GetTxnid() <= uint64(lastCommittedFromLeader)

				if err := l.handler.LogAndCommit(common.Txnid(entry.GetTxnid()),
					entry.GetOpCode(),
					entry.GetKey(),
					entry.GetContent(),
					toCommit); err != nil {
					return err
				}
			}

			return nil
		}

		// write the new log entry.  If the txid is less than the last known committed txid
		// from the leader, then commit the entry. Otherwise, keep it in a pending list.
		toCommit := lastTxnid <= lastCommittedFromLeader
		if toCommit {
			// This call needs to be atomic to ensure that the commit log and the data store
			// are updated transactionally.  This ensures that if the follower crashes, the
			// repository as a while remains in a consistent state.
			if err := l.handler.LogAndCommit(common.Txnid(entry.GetTxnid()),
				entry.GetOpCode(),
				entry.GetKey(),
				entry.GetContent(),
				true); err != nil {
				return err
			}
		} else {
			pendingCommit = append(pendingCommit, entry)
		}
	}

	return nil
}
Ejemplo n.º 19
0
func (l *FollowerSyncProxy) receiveAndUpdateAcceptedEpoch() error {

	log.Current.Debugf("FollowerSyncProxy.receiveAndUpdateAcceptedEpoch()")

	// Get the accepted epoch from the leader.   This epoch
	// is already being voted on by multiple followers (the highest
	// epoch among the quorum of followers).
	packet, err := listen("LeaderInfo", l.leader)
	if err != nil {
		return err
	}

	// Get epoch from leader message
	info := packet.(LeaderInfoMsg)
	epoch := info.GetAcceptedEpoch()
	if err != nil {
		return err
	}

	acceptedEpoch, err := l.handler.GetAcceptedEpoch()
	if err != nil {
		return err
	}
	if epoch > acceptedEpoch {
		// Update the accepted epoch based on the quorum result.   This function
		// will perform update only if the new epoch is larger than existing value.
		// Once the accepted epoch is updated, it will not be reset even if the
		// sychornization with the leader fails.  Therefore, the follower will always
		// remember the largest accepted epoch known to it, such that it can be used
		// in the next round of voting.   Note that the leader derives this new accepted
		// epoch only after it has polled from a quorum of followers.  So even if sync fails,
		// it is unlikey that in the next sync, the leader will give a new accepted epoch smaller
		// than what is being stored now.
		err = l.handler.NotifyNewAcceptedEpoch(epoch)
		if err != nil {
			return err
		}
	} else if epoch == acceptedEpoch {
		// In ZK, if the local epoch (acceptedEpoch) == leader's epoch (epoch), it will replly an EpochAck with epoch = -1.
		// This is to tell the leader that it should not count this EpockAck when computing quorum of EpochAck.
		// This is to ensure that this follower does not "double ack" to the leader (e.g. when this follower rejoins a
		// stable ensemble).   In our implementation for ConsentState, it should not be affected by double ack from the same host.
	} else {
		return common.NewError(common.PROTOCOL_ERROR, "Accepted Epoch from leader is smaller or equal to my epoch.")
	}

	// Notify the leader that I have accepted the epoch.  Send
	// the last logged txid and current epoch to the leader.
	txid, err := l.handler.GetLastLoggedTxid()
	if err != nil {
		return err
	}
	currentEpoch, err := l.handler.GetCurrentEpoch()
	if err != nil {
		return err
	}

	l.state.lastLoggedTxid = common.Txnid(txid)
	l.state.currentEpoch = currentEpoch

	packet = l.factory.CreateEpochAck(uint64(txid), currentEpoch)
	return send(packet, l.leader)
}
Ejemplo n.º 20
0
func (w *watcher) GetLastCommittedTxid() (common.Txnid, error) {
	return common.Txnid(0), nil
}
Ejemplo n.º 21
0
func (c *Coordinator) GetLastCommittedTxid() (common.Txnid, error) {
	val, err := c.config.GetLastCommittedTxnId()
	return common.Txnid(val), err
}
Ejemplo n.º 22
0
func (a *ServerAction) GetLastCommittedTxid() (common.Txnid, error) {
	val, err := a.config.GetLastCommittedTxnId()
	return common.Txnid(val), err
}
Ejemplo n.º 23
0
//
// Bootstrp
//
func (s *EmbeddedServer) bootstrap() (err error) {

	defer func() {
		r := recover()
		if r != nil {
			log.Current.Errorf("panic in EmbeddedServer.bootstrap() : %s\n", r)
			log.Current.Errorf("%s", log.Current.StackTrace())
		}

		if err != nil || r != nil {
			common.SafeRun("EmbeddedServer.bootstrap()",
				func() {
					s.cleanupState()
				})
		}
	}()

	// Initialize server state
	s.state = newServerState()

	// Create and initialize new txn state.
	s.txn = common.NewTxnState()

	// Initialize repository service
	s.repo, err = r.OpenRepositoryWithName(s.repoName, s.quota)
	if err != nil {
		return err
	}

	// Initialize server config
	s.srvConfig = r.NewServerConfig(s.repo)

	// initialize the current transaction id to the lastLoggedTxid.  This
	// is the txid that this node has seen so far.  If this node becomes
	// the leader, a new epoch will be used and new current txid will
	// be generated. So no need to initialize the epoch at this point.
	lastLoggedTxid, err := s.srvConfig.GetLastLoggedTxnId()
	if err != nil {
		return err
	}
	s.txn.InitCurrentTxnid(common.Txnid(lastLoggedTxid))

	// Initialize commit log
	lastCommittedTxid, err := s.srvConfig.GetLastCommittedTxnId()
	if err != nil {
		return err
	}
	s.log, err = r.NewTransientCommitLog(s.repo, lastCommittedTxid)
	if err != nil {
		return err
	}

	// Initialize various callback facility for leader election and
	// voting protocol.
	s.factory = message.NewConcreteMsgFactory()
	s.handler = action.NewServerActionWithNotifier(s.repo, s.log, s.srvConfig, s, s.notifier, s.txn, s.factory, s)
	s.skillch = make(chan bool, 1) // make it buffered to unblock sender

	// Need to start the peer listener before election. A follower may
	// finish its election before a leader finishes its election. Therefore,
	// a follower node can request a connection to the leader node before that
	// node knows it is a leader.  By starting the listener now, it allows the
	// follower to establish the connection and let the leader handles this
	// connection at a later time (when it is ready to be a leader).
	s.listener, err = common.StartPeerListener(s.msgAddr)
	if err != nil {
		err = common.WrapError(common.SERVER_ERROR, "Fail to start PeerListener. err = %v", err)
		return
	}

	return nil
}
Ejemplo n.º 24
0
func (r *ServerConfig) GetLastCommittedTxnId() (common.Txnid, error) {
	value, err := r.GetInt(common.CONFIG_LAST_COMMITTED_TXID)
	return common.Txnid(value), err
}