コード例 #1
0
ファイル: lifecycle.go プロジェクト: jchris/indexing
func (m *LifecycleMgr) OnNewRequest(fid string, request protocol.RequestMsg) {

	req := &requestHolder{request: request, fid: fid}
	op := c.OpCode(request.GetOpCode())

	logging.Debugf("LifecycleMgr.OnNewRequest(): queuing new request. reqId %v opCode %v", request.GetReqId(), op)

	if op == client.OPCODE_INDEXER_READY {
		m.indexerReady = true
		close(m.bootstraps)

	} else if op == client.OPCODE_SERVICE_MAP {
		// short cut the connection request by spawn its own go-routine
		// This call does not change the state of the repository, so it
		// is OK to shortcut.
		go m.dispatchRequest(req, message.NewConcreteMsgFactory())

	} else {
		// if indexer is not yet ready, put them in the bootstrap queue so
		// they can get processed.  For client side, they will be queued
		// up in the regular queue until indexer is ready.
		if !m.indexerReady {
			if op == client.OPCODE_UPDATE_INDEX_INST || op == client.OPCODE_DELETE_BUCKET {
				m.bootstraps <- req
				return
			}
		}

		// for create/drop/build index, always go to the client queue -- which will wait for
		// indexer to be ready.
		m.incomings <- req
	}
}
コード例 #2
0
ファイル: coordinator.go プロジェクト: jchris/indexing
//
// Bootstrp
//
func (s *Coordinator) bootstrap(config string) (err error) {

	s.state.mutex.Lock()
	defer s.state.mutex.Unlock()

	if s.state.done {
		return
	}

	s.env, err = newEnv(config)
	if err != nil {
		return err
	}

	// Initialize server state
	s.state.resetCoordinatorState()

	// Initialize various callback facility for leader election and
	// voting protocol.
	s.factory = message.NewConcreteMsgFactory()
	s.skillch = make(chan bool, 1) // make it buffered to unblock sender
	s.site = nil

	// Create and initialize new txn state.
	s.txn = common.NewTxnState()

	// Initialize the state to enable voting
	repoName := filepath.Join(s.basepath, COORDINATOR_CONFIG_STORE)
	s.configRepo, err = r.OpenRepositoryWithName(repoName, s.idxMgr.GetMemoryQuota())
	if err != nil {
		return err
	}

	s.config = r.NewServerConfig(s.configRepo)
	lastLoggedTxid, err := s.config.GetLastLoggedTxnId()
	if err != nil {
		return err
	}
	s.txn.InitCurrentTxnid(common.Txnid(lastLoggedTxid))

	// Need to start the peer listener before election. A follower may
	// finish its election before a leader finishes its election. Therefore,
	// a follower node can request a connection to the leader node before that
	// node knows it is a leader.  By starting the listener now, it allows the
	// follower to establish the connection and let the leader handles this
	// connection at a later time (when it is ready to be a leader).
	s.listener, err = common.StartPeerListener(s.getHostTCPAddr())
	if err != nil {
		return NewError(ERROR_COOR_LISTENER_FAIL, NORMAL, COORDINATOR, err,
			fmt.Sprintf("Index Coordinator : Fail to start PeerListener"))
	}

	// tell boostrap is ready
	s.markReady()

	return nil
}
コード例 #3
0
ファイル: lifecycle.go プロジェクト: jchris/indexing
func (m *LifecycleMgr) processRequest() {

	/*
		defer func() {
			if r := recover(); r != nil {
				logging.Debugf("panic in LifecycleMgr.processRequest() : %s\n", r)
				logging.Debugf("%s", debug.Stack())
			}
		}()
	*/

	logging.Debugf("LifecycleMgr.processRequest(): LifecycleMgr is ready to proces request")
	factory := message.NewConcreteMsgFactory()

	// process any requests form the boostrap phase.   Once indexer is ready, this channel
	// will be closed, and this go-routine will proceed to process regular message.
END_BOOTSTRAP:
	for {
		select {
		case request, ok := <-m.bootstraps:
			if ok {
				m.dispatchRequest(request, factory)
			} else {
				logging.Debugf("LifecycleMgr.handleRequest(): closing bootstrap channel")
				break END_BOOTSTRAP
			}
		case <-m.killch:
			// server shutdown
			logging.Debugf("LifecycleMgr.processRequest(): receive kill signal. Stop boostrap request processing.")
			return
		}
	}

	logging.Debugf("LifecycleMgr.processRequest(): indexer is ready to process new client request.")

	// Indexer is ready and all bootstrap requests are processed.  Proceed to handle regular messages.
	for {
		select {
		case request, ok := <-m.incomings:
			if ok {
				// TOOD: deal with error
				m.dispatchRequest(request, factory)
			} else {
				// server shutdown.
				logging.Debugf("LifecycleMgr.handleRequest(): channel for receiving client request is closed. Terminate.")
				return
			}
		case <-m.killch:
			// server shutdown
			logging.Debugf("LifecycleMgr.processRequest(): receive kill signal. Stop Client request processing.")
			return
		}
	}
}
コード例 #4
0
ファイル: server.go プロジェクト: couchbase/gometa
//
// Bootstrp
//
func (s *Server) bootstrap() (err error) {

	// Initialize server state
	s.state = newServerState()

	// Initialize repository service
	s.repo, err = r.OpenRepository()
	if err != nil {
		return err
	}
	s.log = r.NewCommitLog(s.repo)
	s.srvConfig = r.NewServerConfig(s.repo)

	// Create and initialize new txn state.
	s.txn = common.NewTxnState()

	// initialize the current transaction id to the lastLoggedTxid.  This
	// is the txid that this node has seen so far.  If this node becomes
	// the leader, a new epoch will be used and new current txid will
	// be generated. So no need to initialize the epoch at this point.
	lastLoggedTxid, err := s.srvConfig.GetLastLoggedTxnId()
	if err != nil {
		return err
	}
	s.txn.InitCurrentTxnid(common.Txnid(lastLoggedTxid))

	// Initialize various callback facility for leader election and
	// voting protocol.
	s.factory = message.NewConcreteMsgFactory()
	s.handler = action.NewServerAction(s.repo, s.log, s.srvConfig, s, s.txn, s.factory, s)
	s.skillch = make(chan bool, 1) // make it buffered to unblock sender
	s.site = nil

	// Need to start the peer listener before election. A follower may
	// finish its election before a leader finishes its election. Therefore,
	// a follower node can request a connection to the leader node before that
	// node knows it is a leader.  By starting the listener now, it allows the
	// follower to establish the connection and let the leader handles this
	// connection at a later time (when it is ready to be a leader).
	s.listener, err = common.StartPeerListener(GetHostTCPAddr())
	if err != nil {
		return common.WrapError(common.SERVER_ERROR, "Fail to start PeerListener.", err)
	}

	// Start a request listener.
	s.reqListener, err = StartRequestListener(GetHostRequestAddr(), s)
	if err != nil {
		return common.WrapError(common.SERVER_ERROR, "Fail to start RequestListener.", err)
	}

	return nil
}
コード例 #5
0
ファイル: watcher.go プロジェクト: couchbase/gometa
func (s *fakeServer) bootstrap() (err error) {

	// Initialize repository service
	s.repo, err = repo.OpenRepository()
	if err != nil {
		return err
	}

	s.txn = common.NewTxnState()
	s.factory = message.NewConcreteMsgFactory()
	s.handler = action.NewDefaultServerAction(s.repo, s, s.txn)
	s.killch = make(chan bool, 1) // make it buffered to unblock sender
	s.status = protocol.ELECTING

	return nil
}
コード例 #6
0
//
// Create a new commit log
//
func NewTransientCommitLog(repo *Repository, lastCommittedTxnid common.Txnid) (CommitLogger, error) {

	log := &TransientCommitLog{
		repo:    repo,
		factory: message.NewConcreteMsgFactory(),
		logs:    make(map[common.Txnid]*message.LogEntry)}

	if lastCommittedTxnid != common.BOOTSTRAP_LAST_COMMITTED_TXID {
		if err := repo.CreateSnapshot(MAIN, lastCommittedTxnid); err != nil {
			logging.Current.Errorf("NewTransientCommitLog: Cannot create initial snapshot")
			return nil, err
		}
	}

	return log, nil
}
コード例 #7
0
ファイル: action.go プロジェクト: couchbase/gometa
func NewDefaultServerAction(repository *repo.Repository,
	server DefaultServerCallback,
	txn *common.TxnState) *ServerAction {

	log := repo.NewCommitLog(repository)
	config := repo.NewServerConfig(repository)
	factory := message.NewConcreteMsgFactory()

	return &ServerAction{
		repo:     repository,
		log:      log,
		config:   config,
		txn:      txn,
		server:   server,
		notifier: nil,
		factory:  factory,
		verifier: server}
}
コード例 #8
0
ファイル: metadata_provider.go プロジェクト: jchris/indexing
func newWatcher(o *MetadataProvider, addr string) *watcher {
	s := new(watcher)

	s.provider = o
	s.leaderAddr = addr
	s.killch = make(chan bool, 1) // make it buffered to unblock sender
	s.alivech = make(chan bool, 1)
	s.pingch = make(chan bool, 1)
	s.factory = message.NewConcreteMsgFactory()
	s.pendings = make(map[common.Txnid]protocol.LogEntryMsg)
	s.incomingReqs = make(chan *protocol.RequestHandle, REQUEST_CHANNEL_COUNT)
	s.pendingReqs = make(map[uint64]*protocol.RequestHandle)
	s.loggedReqs = make(map[common.Txnid]*protocol.RequestHandle)
	s.notifiers = make(map[c.IndexDefnId]*event)
	s.indices = make(map[c.IndexDefnId]interface{})
	s.isClosed = false

	return s
}
コード例 #9
0
ファイル: watcher.go プロジェクト: jchris/indexing
func startWatcher(mgr *IndexManager,
	repo *repo.Repository,
	leaderAddr string,
	watcherId string) (s *watcher, err error) {

	s = new(watcher)

	s.mgr = mgr
	s.leaderAddr = leaderAddr
	s.repo = repo
	s.isClosed = false
	s.observes = make(map[string]*observeHandle)
	s.notifications = make(map[common.Txnid]*notificationHandle)

	s.watcherAddr, err = getWatcherAddr(watcherId)
	if err != nil {
		return nil, err
	}
	logging.Debugf("watcher.startWatcher(): watcher follower ID %s", s.watcherAddr)

	s.txn = common.NewTxnState()
	s.factory = message.NewConcreteMsgFactory()
	// TODO: Using DefaultServerAction, but need a callback on LogAndCommit
	s.handler = action.NewDefaultServerAction(s.repo, s, s.txn)
	s.killch = make(chan bool, 1) // make it buffered to unblock sender
	s.status = protocol.ELECTING

	readych := make(chan bool)

	// TODO: call Close() to cleanup the state upon retry by the watcher server
	go protocol.RunWatcherServer(
		leaderAddr,
		s.handler,
		s.factory,
		s.killch,
		readych)

	// TODO: timeout
	<-readych

	return s, nil
}
コード例 #10
0
ファイル: commit_log.go プロジェクト: couchbase/gometa
//
// Create a new commit log
//
func NewCommitLog(repo *Repository) *CommitLog {
	return &CommitLog{repo: repo,
		factory: message.NewConcreteMsgFactory()}
}
コード例 #11
0
ファイル: embeddedServer.go プロジェクト: couchbase/gometa
//
// Bootstrp
//
func (s *EmbeddedServer) bootstrap() (err error) {

	defer func() {
		r := recover()
		if r != nil {
			log.Current.Errorf("panic in EmbeddedServer.bootstrap() : %s\n", r)
			log.Current.Errorf("%s", log.Current.StackTrace())
		}

		if err != nil || r != nil {
			common.SafeRun("EmbeddedServer.bootstrap()",
				func() {
					s.cleanupState()
				})
		}
	}()

	// Initialize server state
	s.state = newServerState()

	// Create and initialize new txn state.
	s.txn = common.NewTxnState()

	// Initialize repository service
	s.repo, err = r.OpenRepositoryWithName(s.repoName, s.quota)
	if err != nil {
		return err
	}

	// Initialize server config
	s.srvConfig = r.NewServerConfig(s.repo)

	// initialize the current transaction id to the lastLoggedTxid.  This
	// is the txid that this node has seen so far.  If this node becomes
	// the leader, a new epoch will be used and new current txid will
	// be generated. So no need to initialize the epoch at this point.
	lastLoggedTxid, err := s.srvConfig.GetLastLoggedTxnId()
	if err != nil {
		return err
	}
	s.txn.InitCurrentTxnid(common.Txnid(lastLoggedTxid))

	// Initialize commit log
	lastCommittedTxid, err := s.srvConfig.GetLastCommittedTxnId()
	if err != nil {
		return err
	}
	s.log, err = r.NewTransientCommitLog(s.repo, lastCommittedTxid)
	if err != nil {
		return err
	}

	// Initialize various callback facility for leader election and
	// voting protocol.
	s.factory = message.NewConcreteMsgFactory()
	s.handler = action.NewServerActionWithNotifier(s.repo, s.log, s.srvConfig, s, s.notifier, s.txn, s.factory, s)
	s.skillch = make(chan bool, 1) // make it buffered to unblock sender

	// Need to start the peer listener before election. A follower may
	// finish its election before a leader finishes its election. Therefore,
	// a follower node can request a connection to the leader node before that
	// node knows it is a leader.  By starting the listener now, it allows the
	// follower to establish the connection and let the leader handles this
	// connection at a later time (when it is ready to be a leader).
	s.listener, err = common.StartPeerListener(s.msgAddr)
	if err != nil {
		err = common.WrapError(common.SERVER_ERROR, "Fail to start PeerListener. err = %v", err)
		return
	}

	return nil
}