Beispiel #1
0
//
// Bootstrp
//
func (s *Coordinator) bootstrap(config string) (err error) {

	s.state.mutex.Lock()
	defer s.state.mutex.Unlock()

	if s.state.done {
		return
	}

	s.env, err = newEnv(config)
	if err != nil {
		return err
	}

	// Initialize server state
	s.state.resetCoordinatorState()

	// Initialize various callback facility for leader election and
	// voting protocol.
	s.factory = message.NewConcreteMsgFactory()
	s.skillch = make(chan bool, 1) // make it buffered to unblock sender
	s.site = nil

	// Create and initialize new txn state.
	s.txn = common.NewTxnState()

	// Initialize the state to enable voting
	repoName := filepath.Join(s.basepath, COORDINATOR_CONFIG_STORE)
	s.configRepo, err = r.OpenRepositoryWithName(repoName, s.idxMgr.GetMemoryQuota())
	if err != nil {
		return err
	}

	s.config = r.NewServerConfig(s.configRepo)
	lastLoggedTxid, err := s.config.GetLastLoggedTxnId()
	if err != nil {
		return err
	}
	s.txn.InitCurrentTxnid(common.Txnid(lastLoggedTxid))

	// Need to start the peer listener before election. A follower may
	// finish its election before a leader finishes its election. Therefore,
	// a follower node can request a connection to the leader node before that
	// node knows it is a leader.  By starting the listener now, it allows the
	// follower to establish the connection and let the leader handles this
	// connection at a later time (when it is ready to be a leader).
	s.listener, err = common.StartPeerListener(s.getHostTCPAddr())
	if err != nil {
		return NewError(ERROR_COOR_LISTENER_FAIL, NORMAL, COORDINATOR, err,
			fmt.Sprintf("Index Coordinator : Fail to start PeerListener"))
	}

	// tell boostrap is ready
	s.markReady()

	return nil
}
Beispiel #2
0
//
// Bootstrp
//
func (s *Server) bootstrap() (err error) {

	// Initialize server state
	s.state = newServerState()

	// Initialize repository service
	s.repo, err = r.OpenRepository()
	if err != nil {
		return err
	}
	s.log = r.NewCommitLog(s.repo)
	s.srvConfig = r.NewServerConfig(s.repo)

	// Create and initialize new txn state.
	s.txn = common.NewTxnState()

	// initialize the current transaction id to the lastLoggedTxid.  This
	// is the txid that this node has seen so far.  If this node becomes
	// the leader, a new epoch will be used and new current txid will
	// be generated. So no need to initialize the epoch at this point.
	lastLoggedTxid, err := s.srvConfig.GetLastLoggedTxnId()
	if err != nil {
		return err
	}
	s.txn.InitCurrentTxnid(common.Txnid(lastLoggedTxid))

	// Initialize various callback facility for leader election and
	// voting protocol.
	s.factory = message.NewConcreteMsgFactory()
	s.handler = action.NewServerAction(s.repo, s.log, s.srvConfig, s, s.txn, s.factory, s)
	s.skillch = make(chan bool, 1) // make it buffered to unblock sender
	s.site = nil

	// Need to start the peer listener before election. A follower may
	// finish its election before a leader finishes its election. Therefore,
	// a follower node can request a connection to the leader node before that
	// node knows it is a leader.  By starting the listener now, it allows the
	// follower to establish the connection and let the leader handles this
	// connection at a later time (when it is ready to be a leader).
	s.listener, err = common.StartPeerListener(GetHostTCPAddr())
	if err != nil {
		return common.WrapError(common.SERVER_ERROR, "Fail to start PeerListener.", err)
	}

	// Start a request listener.
	s.reqListener, err = StartRequestListener(GetHostRequestAddr(), s)
	if err != nil {
		return common.WrapError(common.SERVER_ERROR, "Fail to start RequestListener.", err)
	}

	return nil
}
Beispiel #3
0
func (s *fakeServer) bootstrap() (err error) {

	// Initialize repository service
	s.repo, err = repo.OpenRepository()
	if err != nil {
		return err
	}

	s.txn = common.NewTxnState()
	s.factory = message.NewConcreteMsgFactory()
	s.handler = action.NewDefaultServerAction(s.repo, s, s.txn)
	s.killch = make(chan bool, 1) // make it buffered to unblock sender
	s.status = protocol.ELECTING

	return nil
}
Beispiel #4
0
func startWatcher(mgr *IndexManager,
	repo *repo.Repository,
	leaderAddr string,
	watcherId string) (s *watcher, err error) {

	s = new(watcher)

	s.mgr = mgr
	s.leaderAddr = leaderAddr
	s.repo = repo
	s.isClosed = false
	s.observes = make(map[string]*observeHandle)
	s.notifications = make(map[common.Txnid]*notificationHandle)

	s.watcherAddr, err = getWatcherAddr(watcherId)
	if err != nil {
		return nil, err
	}
	logging.Debugf("watcher.startWatcher(): watcher follower ID %s", s.watcherAddr)

	s.txn = common.NewTxnState()
	s.factory = message.NewConcreteMsgFactory()
	// TODO: Using DefaultServerAction, but need a callback on LogAndCommit
	s.handler = action.NewDefaultServerAction(s.repo, s, s.txn)
	s.killch = make(chan bool, 1) // make it buffered to unblock sender
	s.status = protocol.ELECTING

	readych := make(chan bool)

	// TODO: call Close() to cleanup the state upon retry by the watcher server
	go protocol.RunWatcherServer(
		leaderAddr,
		s.handler,
		s.factory,
		s.killch,
		readych)

	// TODO: timeout
	<-readych

	return s, nil
}
Beispiel #5
0
//
// Bootstrp
//
func (s *EmbeddedServer) bootstrap() (err error) {

	defer func() {
		r := recover()
		if r != nil {
			log.Current.Errorf("panic in EmbeddedServer.bootstrap() : %s\n", r)
			log.Current.Errorf("%s", log.Current.StackTrace())
		}

		if err != nil || r != nil {
			common.SafeRun("EmbeddedServer.bootstrap()",
				func() {
					s.cleanupState()
				})
		}
	}()

	// Initialize server state
	s.state = newServerState()

	// Create and initialize new txn state.
	s.txn = common.NewTxnState()

	// Initialize repository service
	s.repo, err = r.OpenRepositoryWithName(s.repoName, s.quota)
	if err != nil {
		return err
	}

	// Initialize server config
	s.srvConfig = r.NewServerConfig(s.repo)

	// initialize the current transaction id to the lastLoggedTxid.  This
	// is the txid that this node has seen so far.  If this node becomes
	// the leader, a new epoch will be used and new current txid will
	// be generated. So no need to initialize the epoch at this point.
	lastLoggedTxid, err := s.srvConfig.GetLastLoggedTxnId()
	if err != nil {
		return err
	}
	s.txn.InitCurrentTxnid(common.Txnid(lastLoggedTxid))

	// Initialize commit log
	lastCommittedTxid, err := s.srvConfig.GetLastCommittedTxnId()
	if err != nil {
		return err
	}
	s.log, err = r.NewTransientCommitLog(s.repo, lastCommittedTxid)
	if err != nil {
		return err
	}

	// Initialize various callback facility for leader election and
	// voting protocol.
	s.factory = message.NewConcreteMsgFactory()
	s.handler = action.NewServerActionWithNotifier(s.repo, s.log, s.srvConfig, s, s.notifier, s.txn, s.factory, s)
	s.skillch = make(chan bool, 1) // make it buffered to unblock sender

	// Need to start the peer listener before election. A follower may
	// finish its election before a leader finishes its election. Therefore,
	// a follower node can request a connection to the leader node before that
	// node knows it is a leader.  By starting the listener now, it allows the
	// follower to establish the connection and let the leader handles this
	// connection at a later time (when it is ready to be a leader).
	s.listener, err = common.StartPeerListener(s.msgAddr)
	if err != nil {
		err = common.WrapError(common.SERVER_ERROR, "Fail to start PeerListener. err = %v", err)
		return
	}

	return nil
}