示例#1
0
// TODO : what to do if createIndex returns error
func (c *Coordinator) LogProposal(proposal protocol.ProposalMsg) error {

	if c.GetStatus() == protocol.LEADING {
		switch common.OpCode(proposal.GetOpCode()) {
		case OPCODE_ADD_IDX_DEFN:
			success := c.createIndex(proposal.GetKey(), proposal.GetContent())
			logging.Debugf("Coordinator.LogProposal(): (createIndex) success = %s", success)
		case OPCODE_DEL_IDX_DEFN:
			success := c.deleteIndex(proposal.GetKey())
			logging.Debugf("Coordinator.LogProposal(): (deleteIndex) success = %s", success)
		}
	}

	switch common.OpCode(proposal.GetOpCode()) {
	case OPCODE_NOTIFY_TIMESTAMP:
		timestamp, err := unmarshallTimestampSerializable(proposal.GetContent())
		if err == nil {
			c.idxMgr.notifyNewTimestamp(timestamp)
		} else {
			logging.Debugf("Coordinator.LogProposal(): error when unmarshalling timestamp. Ignore timestamp.  Error=%s", err.Error())
		}
	}

	c.updateRequestOnNewProposal(proposal)

	return nil
}
示例#2
0
//
// Handle an incoming message based on its type.  All incoming messages from followers are processed serially
// (by Leader.listen()).  Therefore, the order of corresponding outbound messages (proposal, commit) will be placed
// in the same order into each follower's pipe.   This implies that we can use 2 state variables (LastLoggedTxid and
// LastCommittedTxid) to determine which peer has the latest repository.  This, in turn, enforces stronger serializability
// semantics, since we won't have a case where one peer may have a higher LastLoggedTxid while another has a higher
// LastCommittedTxid.
//
func (l *Leader) handleMessage(msg common.Packet, follower string) (err error) {

	err = nil
	switch request := msg.(type) {
	case RequestMsg:
		if common.IsCustomOpCode(common.OpCode(request.GetOpCode())) {
			if l.reqHandler != nil {
				l.reqHandler.OnNewRequest(follower, request)
			} else {
				log.Current.Debugf("Leader.handleMessage(): No custom request handler registered to handle custom request.")
				response := l.factory.CreateResponse(follower, request.GetReqId(), "No custom request handler", nil)
				l.sendResponse(response)
			}
		} else {
			err = l.createProposal(follower, request)
		}
	case AcceptMsg:
		err = l.handleAccept(request)
	case ResponseMsg:
		l.sendResponse(request)
	default:
		// TODO: Should throw exception.  There is a possiblity that there is another leader.
		log.Current.Infof("Leader.handleMessage(): Leader unable to process message of type %s. Ignore message.", request.Name())
	}
	return err
}
示例#3
0
func (m *LifecycleMgr) OnNewRequest(fid string, request protocol.RequestMsg) {

	req := &requestHolder{request: request, fid: fid}
	op := c.OpCode(request.GetOpCode())

	logging.Debugf("LifecycleMgr.OnNewRequest(): queuing new request. reqId %v opCode %v", request.GetReqId(), op)

	if op == client.OPCODE_INDEXER_READY {
		m.indexerReady = true
		close(m.bootstraps)

	} else if op == client.OPCODE_SERVICE_MAP {
		// short cut the connection request by spawn its own go-routine
		// This call does not change the state of the repository, so it
		// is OK to shortcut.
		go m.dispatchRequest(req, message.NewConcreteMsgFactory())

	} else {
		// if indexer is not yet ready, put them in the bootstrap queue so
		// they can get processed.  For client side, they will be queued
		// up in the regular queue until indexer is ready.
		if !m.indexerReady {
			if op == client.OPCODE_UPDATE_INDEX_INST || op == client.OPCODE_DELETE_BUCKET {
				m.bootstraps <- req
				return
			}
		}

		// for create/drop/build index, always go to the client queue -- which will wait for
		// indexer to be ready.
		m.incomings <- req
	}
}
示例#4
0
func (a *ServerAction) LogProposal(p protocol.ProposalMsg) error {

	if a.notifier != nil {
		tnxid, op, key, content := p.GetTxnid(), p.GetOpCode(), p.GetKey(), p.GetContent()
		if err := a.notifier.OnNewProposal(common.Txnid(tnxid), common.OpCode(op), key, content); err != nil {
			return err
		}
	}

	err := a.appendCommitLog(common.Txnid(p.GetTxnid()), common.OpCode(p.GetOpCode()), p.GetKey(), p.GetContent())
	if err != nil {
		return err
	}

	a.server.UpdateStateOnNewProposal(p)

	return nil
}
示例#5
0
func (a *ServerAction) LogAndCommit(txid common.Txnid, op uint32, key string, content []byte, toCommit bool) error {

	// TODO: Make this transactional

	if err := a.appendCommitLog(txid, common.OpCode(op), key, content); err != nil {
		return err
	}

	if toCommit {
		if err := a.persistChange(common.OpCode(op), key, content); err != nil {
			return err
		}

		if err := a.config.SetLastCommittedTxid(txid); err != nil {
			return err
		}

		a.log.MarkCommitted(txid)
	}

	return nil
}
示例#6
0
func (w *watcher) processChange(op uint32, key string, content []byte) error {

	logging.Debugf("watcher.processChange(): key = %v", key)
	defer logging.Debugf("watcher.processChange(): done -> key = %v", key)

	opCode := common.OpCode(op)

	switch opCode {
	case common.OPCODE_ADD, common.OPCODE_SET:
		if isIndexDefnKey(key) {
			if len(content) == 0 {
				logging.Debugf("watcher.processChange(): content of key = %v is empty.", key)
			}

			id, err := extractDefnIdFromKey(key)
			if err != nil {
				return err
			}
			w.addDefnWithNoLock(c.IndexDefnId(id))
			if err := w.provider.repo.unmarshallAndAddDefn(content); err != nil {
				return err
			}
			w.notifyEventNoLock()

		} else if isIndexTopologyKey(key) {
			if len(content) == 0 {
				logging.Debugf("watcher.processChange(): content of key = %v is empty.", key)
			}
			if err := w.provider.repo.unmarshallAndAddInst(content); err != nil {
				return err
			}
			w.notifyEventNoLock()
		}
	case common.OPCODE_DELETE:
		if isIndexDefnKey(key) {

			id, err := extractDefnIdFromKey(key)
			if err != nil {
				return err
			}
			w.removeDefnWithNoLock(c.IndexDefnId(id))
			w.provider.repo.removeDefn(c.IndexDefnId(id))
			w.notifyEventNoLock()
		}
	}

	return nil
}
示例#7
0
func (s *watcher) UpdateStateOnNewProposal(proposal protocol.ProposalMsg) {
	s.mutex.Lock()
	defer s.mutex.Unlock()

	opCode := common.OpCode(proposal.GetOpCode())
	logging.Debugf("Watcher.UpdateStateOnNewProposal(): receive proposal on metadata kind %d", findTypeFromKey(proposal.GetKey()))

	// register the event for notification
	var evtType EventType = EVENT_NONE
	switch opCode {
	case common.OPCODE_ADD:
		metaType := findTypeFromKey(proposal.GetKey())
		if metaType == KIND_INDEX_DEFN {
			evtType = EVENT_CREATE_INDEX
		} else if metaType == KIND_TOPOLOGY {
			evtType = EVENT_UPDATE_TOPOLOGY
		}
	case common.OPCODE_SET:
		metaType := findTypeFromKey(proposal.GetKey())
		if metaType == KIND_INDEX_DEFN {
			evtType = EVENT_CREATE_INDEX
		} else if metaType == KIND_TOPOLOGY {
			evtType = EVENT_UPDATE_TOPOLOGY
		}
	case common.OPCODE_DELETE:
		if findTypeFromKey(proposal.GetKey()) == KIND_INDEX_DEFN {
			evtType = EVENT_DROP_INDEX
		}
	default:
		logging.Debugf("Watcher.UpdateStateOnNewProposal(): recieve proposal with opcode %d.  Skip convert proposal to event.", opCode)
	}

	logging.Debugf("Watcher.UpdateStateOnNewProposal(): convert metadata type to event  %d", evtType)
	if evtType != EVENT_NONE {
		logging.Debugf("Watcher.UpdateStateOnNewProposal(): register event for txid %d", proposal.GetTxnid())
		s.notifications[common.Txnid(proposal.GetTxnid())] =
			newNotificationHandle(proposal.GetKey(), evtType, proposal.GetContent())
	}
}
示例#8
0
func (m *LifecycleMgr) dispatchRequest(request *requestHolder, factory *message.ConcreteMsgFactory) {

	reqId := request.request.GetReqId()
	op := c.OpCode(request.request.GetOpCode())
	key := request.request.GetKey()
	content := request.request.GetContent()
	fid := request.fid

	logging.Debugf("LifecycleMgr.dispatchRequest () : requestId %d, op %d, key %v", reqId, op, key)

	var err error = nil
	var result []byte = nil

	switch op {
	case client.OPCODE_CREATE_INDEX:
		err = m.handleCreateIndex(key, content)
	case client.OPCODE_UPDATE_INDEX_INST:
		err = m.handleTopologyChange(content)
	case client.OPCODE_DROP_INDEX:
		err = m.handleDeleteIndex(key)
	case client.OPCODE_BUILD_INDEX:
		err = m.handleBuildIndexes(content)
	case client.OPCODE_SERVICE_MAP:
		result, err = m.handleServiceMap(content)
	case client.OPCODE_DELETE_BUCKET:
		err = m.handleDeleteBucket(key, content)
	}

	logging.Debugf("LifecycleMgr.dispatchRequest () : send response for requestId %d, op %d, len(result) %d", reqId, op, len(result))

	if err == nil {
		msg := factory.CreateResponse(fid, reqId, "", result)
		m.outgoings <- msg
	} else {
		msg := factory.CreateResponse(fid, reqId, err.Error(), result)
		m.outgoings <- msg
	}
}