Esempio n. 1
0
func UnmarshalMessage(data []byte) (interfaces.IMsg, error) {
	if data == nil {
		return nil, fmt.Errorf("No data provided")
	}
	if len(data) == 0 {
		return nil, fmt.Errorf("No data provided")
	}
	messageType := int(data[0])
	var msg interfaces.IMsg
	switch messageType {
	case constants.EOM_MSG:
		msg = new(EOM)
	case constants.ACK_MSG:
		msg = new(Ack)
	case constants.AUDIT_SERVER_FAULT_MSG:
		msg = new(AuditServerFault)
	case constants.COMMIT_CHAIN_MSG:
		msg = new(CommitChainMsg)
	case constants.COMMIT_ENTRY_MSG:
		msg = new(CommitEntryMsg)
	case constants.DIRECTORY_BLOCK_SIGNATURE_MSG:
		msg = NewDirectoryBlockSignature()
	case constants.EOM_TIMEOUT_MSG:
		msg = new(EOMTimeout)
	case constants.FACTOID_TRANSACTION_MSG:
		msg = new(FactoidTransaction)
	case constants.HEARTBEAT_MSG:
		msg = new(Heartbeat)
	case constants.INVALID_ACK_MSG:
		msg = new(InvalidAck)
	case constants.INVALID_DIRECTORY_BLOCK_MSG:
		msg = new(InvalidDirectoryBlock)
	case constants.MISSING_ACK_MSG:
		msg = new(MissingAck)
	case constants.PROMOTION_DEMOTION_MSG:
		msg = new(PromotionDemotion)
	case constants.REVEAL_ENTRY_MSG:
		msg = new(RevealEntryMsg)
	case constants.REQUEST_BLOCK_MSG:
		msg = new(RequestBlock)
	case constants.SIGNATURE_TIMEOUT_MSG:
		msg = new(SignatureTimeout)
	default:
		return nil, fmt.Errorf("Unknown message type")
	}

	// Unmarshal does not include the message type.
	err := msg.UnmarshalBinary(data[:])
	if err != nil {
		return nil, err
	}
	return msg, nil

}
Esempio n. 2
0
func (p *P2PProxy) logMessage(msg interfaces.IMsg, received bool) {
	if 2 < p.debugMode {
		// if constants.DBSTATE_MSG == msg.Type() {
		// fmt.Printf("AppMsgLogging: \n Type: %s \n Network Origin: %s \n Message: %s", msg.Type(), msg.GetNetworkOrigin(), msg.String())
		// }
		hash := fmt.Sprintf("%x", msg.GetMsgHash().Bytes())
		time := time.Now().Unix()
		ml := messageLog{hash: hash, received: received, time: time, mtype: msg.Type(), target: msg.GetNetworkOrigin()}
		p2p.BlockFreeChannelSend(p.logging, ml)
	}
}
Esempio n. 3
0
func (s *State) LeaderExecuteEOM(m interfaces.IMsg) {

	if !m.IsLocal() {
		s.FollowerExecuteEOM(m)
		return
	}

	// The zero based minute for the message is equal to
	// the one based "LastMinute".  This way we know we are
	// generating minutes in order.

	eom := m.(*messages.EOM)
	pl := s.ProcessLists.Get(s.LLeaderHeight)
	vm := pl.VMs[s.LeaderVMIndex]

	// Put the System Height and Serial Hash into the EOM
	eom.SysHeight = uint32(pl.System.Height)
	if pl.System.Height > 1 {
		ff, ok := pl.System.List[pl.System.Height-1].(*messages.FullServerFault)
		if ok {
			eom.SysHash = ff.GetSerialHash()
		}
	}

	if s.Syncing && vm.Synced {
		return
	} else if !s.Syncing {
		s.Syncing = true
		s.EOM = true
		s.EOMsyncing = true
		s.EOMProcessed = 0
		for _, vm := range pl.VMs {
			vm.Synced = false
		}
		s.EOMLimit = len(pl.FedServers)
		s.EOMMinute = int(s.CurrentMinute)
	}

	//_, vmindex := pl.GetVirtualServers(s.EOMMinute, s.IdentityChainID)

	eom.DBHeight = s.LLeaderHeight
	eom.VMIndex = s.LeaderVMIndex
	// eom.Minute is zerobased, while LeaderMinute is 1 based.  So
	// a simple assignment works.
	eom.Minute = byte(s.CurrentMinute)
	eom.Sign(s)
	eom.MsgHash = nil
	ack := s.NewAck(m)
	s.Acks[eom.GetMsgHash().Fixed()] = ack
	m.SetLocal(false)
	s.FollowerExecuteEOM(m)
	s.UpdateState()
}
Esempio n. 4
0
func (f *SimPeer) Send(msg interfaces.IMsg) error {
	data, err := msg.MarshalBinary()
	f.bytesOut += len(data)
	f.computeBandwidth()
	if err != nil {
		fmt.Println("ERROR on Send: ", err)
		return err
	}
	if len(f.BroadcastOut) < 9000 {
		packet := SimPacket{data, time.Now().UnixNano() / 1000000}
		f.BroadcastOut <- &packet
	}
	return nil
}
Esempio n. 5
0
func (p *ProcessList) AddToSystemList(m interfaces.IMsg) bool {
	// Make sure we have a list, and punt if we don't.
	if p == nil {
		p.State.Holding[m.GetRepeatHash().Fixed()] = m
		return false
	}

	fullFault, ok := m.(*messages.FullServerFault)
	if !ok {
		return false // Should never happen;  Don't pass junk to be added to the System List
	}

	// If we have already processed past this fault, just ignore.
	if p.System.Height > int(fullFault.SystemHeight) {
		return false
	}

	// If the fault is in the future, hold it.
	if p.System.Height < int(fullFault.SystemHeight) {
		p.State.Holding[m.GetRepeatHash().Fixed()] = m
		return false
	}

	for len(p.System.List) > 0 && p.System.List[len(p.System.List)-1] == nil {
		p.System.List = p.System.List[:len(p.System.List)-1]
	}

	// If we are here, fullFault.SystemHeight == p.System.Height
	if len(p.System.List) <= p.System.Height {
		// Nothing in our list a this slot yet, so insert this FullFault message
		p.System.List = append(p.System.List, fullFault)
		return true
	}

	// Something is in our SystemList at this height;
	// We will prioritize the FullFault with the highest VMIndex
	existingSystemFault, _ := p.System.List[p.System.Height].(*messages.FullServerFault)
	if int(existingSystemFault.VMIndex) >= int(fullFault.VMIndex) {
		return false
	}

	p.System.List[p.System.Height] = fullFault

	return true

}
Esempio n. 6
0
// Messages that will go into the Process List must match an Acknowledgement.
// The code for this is the same for all such messages, so we put it here.
//
// Returns true if it finds a match, puts the message in holding, or invalidates the message
func (s *State) FollowerExecuteMsg(m interfaces.IMsg) {

	s.Holding[m.GetMsgHash().Fixed()] = m
	ack, _ := s.Acks[m.GetMsgHash().Fixed()].(*messages.Ack)

	if ack != nil {
		m.SetLeaderChainID(ack.GetLeaderChainID())
		m.SetMinute(ack.Minute)

		pl := s.ProcessLists.Get(ack.DBHeight)
		pl.AddToProcessList(ack, m)
	}
}
Esempio n. 7
0
// Create a new Acknowledgement.  Must be called by a leader.  This
// call assumes all the pieces are in place to create a new acknowledgement
func (s *State) NewAck(msg interfaces.IMsg) interfaces.IMsg {

	vmIndex := msg.GetVMIndex()

	msg.SetLeaderChainID(s.IdentityChainID)
	ack := new(messages.Ack)
	ack.DBHeight = s.LLeaderHeight
	ack.VMIndex = vmIndex
	ack.Minute = byte(s.ProcessLists.Get(s.LLeaderHeight).VMs[vmIndex].LeaderMinute)
	ack.Timestamp = s.GetTimestamp()
	ack.SaltNumber = s.GetSalt(ack.Timestamp)
	copy(ack.Salt[:8], s.Salt.Bytes()[:8])
	ack.MessageHash = msg.GetMsgHash()
	ack.LeaderChainID = s.IdentityChainID

	listlen := len(s.LeaderPL.VMs[vmIndex].List)
	if listlen == 0 {
		ack.Height = 0
		ack.SerialHash = ack.MessageHash
	} else {
		last := s.LeaderPL.GetAckAt(vmIndex, listlen-1)
		ack.Height = last.Height + 1
		ack.SerialHash, _ = primitives.CreateHash(last.MessageHash, ack.MessageHash)
	}

	ack.Sign(s)

	return ack
}
Esempio n. 8
0
func (s *State) FollowerExecuteRevealEntry(m interfaces.IMsg) {
	s.Holding[m.GetMsgHash().Fixed()] = m
	ack, _ := s.Acks[m.GetMsgHash().Fixed()].(*messages.Ack)

	if ack != nil {

		m.SetLeaderChainID(ack.GetLeaderChainID())
		m.SetMinute(ack.Minute)

		pl := s.ProcessLists.Get(ack.DBHeight)
		pl.AddToProcessList(ack, m)

		// If we added the ack, then it will be cleared from the ack map.
		if s.Acks[m.GetMsgHash().Fixed()] == nil {
			msg := m.(*messages.RevealEntryMsg)
			delete(s.Commits, msg.Entry.GetHash().Fixed())
			// Okay the Reveal has been recorded.  Record this as an entry that cannot be duplicated.
			s.Replay.IsTSValid_(constants.REVEAL_REPLAY, msg.Entry.GetHash().Fixed(), msg.Timestamp, s.GetTimestamp())
		}

	}

}
Esempio n. 9
0
// Messages that will go into the Process List must match an Acknowledgement.
// The code for this is the same for all such messages, so we put it here.
//
// Returns true if it finds a match, puts the message in holding, or invalidates the message
func (s *State) FollowerExecuteEOM(m interfaces.IMsg) {

	if m.IsLocal() {
		return // This is an internal EOM message.  We are not a leader so ignore.
	}

	s.Holding[m.GetMsgHash().Fixed()] = m

	ack, _ := s.Acks[m.GetMsgHash().Fixed()].(*messages.Ack)
	if ack != nil {
		pl := s.ProcessLists.Get(ack.DBHeight)
		pl.AddToProcessList(ack, m)
	}
}
Esempio n. 10
0
// Messages that match an acknowledgement, and are added to the process list
// all do the same thing.  So that logic is here.
func (s *State) MatchAckFollowerExecute(m interfaces.IMsg) error {
	acks := s.GetAcks()
	ack, ok := acks[m.GetHash().Fixed()].(*messages.Ack)
	if !ok || ack == nil {
		s.GetHolding()[m.GetHash().Fixed()] = m
	} else {
		processlist := s.GetProcessList()[ack.ServerIndex]
		for len(processlist) < ack.Height+1 {
			processlist = append(processlist, nil)
		}
		processlist[ack.Height] = m
		s.GetProcessList()[ack.ServerIndex] = processlist
		delete(acks, m.GetHash().Fixed())
	}
	return nil
}
Esempio n. 11
0
func Peers(fnode *FactomNode) {
	cnt := 0
	for {
		for i := 0; i < 100 && len(fnode.State.APIQueue()) > 0; i++ {
			select {
			case msg := <-fnode.State.APIQueue():
				if msg == nil {
					break
				}
				repeatHash := msg.GetRepeatHash()
				if repeatHash == nil {
					fmt.Println("dddd ERROR!", msg.String())
					break
				}
				cnt++
				msg.SetOrigin(0)
				if fnode.State.Replay.IsTSValid_(constants.NETWORK_REPLAY, repeatHash.Fixed(),
					msg.GetTimestamp(),
					fnode.State.GetTimestamp()) {

					fnode.MLog.add2(fnode, false, fnode.State.FactomNodeName, "API", true, msg)
					if len(fnode.State.InMsgQueue()) < 9000 {
						fnode.State.InMsgQueue() <- msg
					}
				}
			default:

			}
		}

		// Put any broadcasts from our peers into our BroadcastIn queue
		for i, peer := range fnode.Peers {
			for j := 0; j < 100; j++ {

				var msg interfaces.IMsg
				var err error

				if !fnode.State.GetNetStateOff() {
					msg, err = peer.Recieve()
				}

				if msg == nil {
					// Recieve is not blocking; nothing to do, we get a nil.
					break
				}

				cnt++

				if err != nil {
					fmt.Println("ERROR recieving message on", fnode.State.FactomNodeName+":", err)
					break
				}

				msg.SetOrigin(i + 1)
				if fnode.State.Replay.IsTSValid_(constants.NETWORK_REPLAY, msg.GetRepeatHash().Fixed(),
					msg.GetTimestamp(),
					fnode.State.GetTimestamp()) {
					//if state.GetOut() {
					//	fnode.State.Println("In Comming!! ",msg)
					//}
					in := "PeerIn"
					if msg.IsPeer2Peer() {
						in = "P2P In"
					}
					nme := fmt.Sprintf("%s %d", in, i+1)

					fnode.MLog.add2(fnode, false, peer.GetNameTo(), nme, true, msg)

					// Ignore messages if there are too many.
					if len(fnode.State.InMsgQueue()) < 9000 {
						fnode.State.InMsgQueue() <- msg
					}

				} else {
					fnode.MLog.add2(fnode, false, peer.GetNameTo(), "PeerIn", false, msg)
				}
			}
		}
		if cnt == 0 {
			time.Sleep(50 * time.Millisecond)
		}
		cnt = 0
	}
}
Esempio n. 12
0
func (s *State) LeaderExecuteRevealEntry(m interfaces.IMsg) {
	re := m.(*messages.RevealEntryMsg)
	eh := re.Entry.GetHash()

	commit, rtn := re.ValidateRTN(s)

	switch rtn {
	case 0:
		m.FollowerExecute(s)
	case -1:
		return
	}
	now := s.GetTimestamp()
	// If we have already recorded a Reveal Entry with this hash in this period, just ignore.
	if _, v := s.Replay.Valid(constants.REVEAL_REPLAY, eh.Fixed(), s.GetLeaderTimestamp(), now); !v {
		return
	}

	ack := s.NewAck(m).(*messages.Ack)

	m.SetLeaderChainID(ack.GetLeaderChainID())
	m.SetMinute(ack.Minute)

	// Put the acknowledgement in the Acks so we can tell if AddToProcessList() adds it.
	s.Acks[m.GetMsgHash().Fixed()] = ack
	s.ProcessLists.Get(ack.DBHeight).AddToProcessList(ack, m)
	// If it was added, then get rid of the matching Commit.
	if s.Acks[m.GetMsgHash().Fixed()] != nil {
		m.FollowerExecute(s)
		s.PutCommit(eh, commit)
	} else {
		// Okay the Reveal has been recorded.  Record this as an entry that cannot be duplicated.
		s.Replay.IsTSValid_(constants.REVEAL_REPLAY, eh.Fixed(), m.GetTimestamp(), now)
		delete(s.Commits, eh.Fixed())
	}
}
Esempio n. 13
0
func (p *ProcessList) AddToProcessList(ack *messages.Ack, m interfaces.IMsg) {

	if p == nil {
		return
	}

	// We don't check the SaltNumber if this isn't an actual message, i.e. a response from
	// the past.
	if !ack.Response && ack.LeaderChainID.IsSameAs(p.State.IdentityChainID) {
		num := p.State.GetSalt(ack.Timestamp)
		if num != ack.SaltNumber {
			os.Stderr.WriteString(fmt.Sprintf("This  ChainID    %x\n", p.State.IdentityChainID.Bytes()))
			os.Stderr.WriteString(fmt.Sprintf("This  Salt       %x\n", p.State.Salt.Bytes()[:8]))
			os.Stderr.WriteString(fmt.Sprintf("This  SaltNumber %x\n for this ack", num))
			os.Stderr.WriteString(fmt.Sprintf("Ack   ChainID    %x\n", ack.LeaderChainID.Bytes()))
			os.Stderr.WriteString(fmt.Sprintf("Ack   Salt       %x\n", ack.Salt))
			os.Stderr.WriteString(fmt.Sprintf("Ack   SaltNumber %x\n for this ack", ack.SaltNumber))
			panic("There are two leaders configured with the same Identity in this network!  This is a configuration problem!")
		}
	}

	if _, ok := m.(*messages.MissingMsg); ok {
		panic("This shouldn't happen")
	}

	toss := func(hint string) {
		fmt.Println("dddd TOSS in Process List", p.State.FactomNodeName, hint)
		fmt.Println("dddd TOSS in Process List", p.State.FactomNodeName, ack.String())
		fmt.Println("dddd TOSS in Process List", p.State.FactomNodeName, m.String())
		delete(p.State.Holding, ack.GetHash().Fixed())
		delete(p.State.Acks, ack.GetHash().Fixed())
	}

	now := p.State.GetTimestamp()

	vm := p.VMs[ack.VMIndex]

	if len(vm.List) > int(ack.Height) && vm.List[ack.Height] != nil {
		_, isNew2 := p.State.Replay.Valid(constants.INTERNAL_REPLAY, m.GetRepeatHash().Fixed(), m.GetTimestamp(), now)
		if !isNew2 {
			toss("seen before, or too old")
			return
		}
	}

	if ack.DBHeight != p.DBHeight {
		panic(fmt.Sprintf("Ack is wrong height.  Expected: %d Ack: %s", p.DBHeight, ack.String()))
		return
	}

	if len(vm.List) > int(ack.Height) && vm.List[ack.Height] != nil {

		if vm.List[ack.Height].GetMsgHash().IsSameAs(m.GetMsgHash()) {
			fmt.Printf("dddd %-30s %10s %s\n", "xxxxxxxxx PL Duplicate   ", p.State.GetFactomNodeName(), m.String())
			fmt.Printf("dddd %-30s %10s %s\n", "xxxxxxxxx PL Duplicate   ", p.State.GetFactomNodeName(), ack.String())
			fmt.Printf("dddd %-30s %10s %s\n", "xxxxxxxxx PL Duplicate vm", p.State.GetFactomNodeName(), vm.List[ack.Height].String())
			fmt.Printf("dddd %-30s %10s %s\n", "xxxxxxxxx PL Duplicate vm", p.State.GetFactomNodeName(), vm.ListAck[ack.Height].String())
			toss("2")
			return
		}

		vm.List[ack.Height] = nil

		return
	}

	// From this point on, we consider the transaction recorded.  If we detect it has already been
	// recorded, then we still treat it as if we recorded it.

	vm.heartBeat = 0 // We have heard from this VM

	// We have already tested and found m to be a new message.  We now record its hashes so later, we
	// can detect that it has been recorded.  We don't care about the results of IsTSValid_ at this point.
	p.State.Replay.IsTSValid_(constants.INTERNAL_REPLAY, m.GetRepeatHash().Fixed(), m.GetTimestamp(), now)
	p.State.Replay.IsTSValid_(constants.INTERNAL_REPLAY, m.GetMsgHash().Fixed(), m.GetTimestamp(), now)

	delete(p.State.Acks, ack.GetHash().Fixed())
	delete(p.State.Holding, m.GetMsgHash().Fixed())

	// Both the ack and the message hash to the same GetHash()
	m.SetLocal(false)
	ack.SetLocal(false)
	ack.SetPeer2Peer(false)
	m.SetPeer2Peer(false)

	ack.SendOut(p.State, ack)
	m.SendOut(p.State, m)

	for len(vm.List) <= int(ack.Height) {
		vm.List = append(vm.List, nil)
		vm.ListAck = append(vm.ListAck, nil)
	}

	p.VMs[ack.VMIndex].List[ack.Height] = m
	p.VMs[ack.VMIndex].ListAck[ack.Height] = ack
	p.AddOldMsgs(m)
	p.OldAcks[m.GetMsgHash().Fixed()] = ack

}
Esempio n. 14
0
func (s *State) LeaderExecute(m interfaces.IMsg) {

	_, ok := s.Replay.Valid(constants.INTERNAL_REPLAY, m.GetRepeatHash().Fixed(), m.GetTimestamp(), s.GetTimestamp())
	if !ok {
		delete(s.Holding, m.GetRepeatHash().Fixed())
		delete(s.Holding, m.GetMsgHash().Fixed())
		return
	}

	ack := s.NewAck(m).(*messages.Ack)
	m.SetLeaderChainID(ack.GetLeaderChainID())
	m.SetMinute(ack.Minute)

	s.ProcessLists.Get(ack.DBHeight).AddToProcessList(ack, m)
}
Esempio n. 15
0
func (f *P2PProxy) Send(msg interfaces.IMsg) error {
	f.logMessage(msg, false) // NODE_TALK_FIX
	data, err := msg.MarshalBinary()
	if err != nil {
		fmt.Println("ERROR on Send: ", err)
		return err
	}
	hash := fmt.Sprintf("%x", msg.GetMsgHash().Bytes())
	appType := fmt.Sprintf("%d", msg.Type())
	message := factomMessage{message: data, peerHash: msg.GetNetworkOrigin(), appHash: hash, appType: appType}
	switch {
	case !msg.IsPeer2Peer():
		message.peerHash = p2p.BroadcastFlag
		f.trace(message.appHash, message.appType, "P2PProxy.Send() - BroadcastFlag", "a")
	case msg.IsPeer2Peer() && 0 == len(message.peerHash): // directed, with no direction of who to send it to
		message.peerHash = p2p.RandomPeerFlag
		f.trace(message.appHash, message.appType, "P2PProxy.Send() - RandomPeerFlag", "a")
	default:
		f.trace(message.appHash, message.appType, "P2PProxy.Send() - Addressed by hash", "a")
	}
	if msg.IsPeer2Peer() && 1 < f.debugMode {
		fmt.Printf("%s Sending directed to: %s message: %+v\n", time.Now().String(), message.peerHash, msg.String())
	}
	p2p.BlockFreeChannelSend(f.BroadcastOut, message)
	return nil
}
Esempio n. 16
0
func SetMsg(msg interfaces.IMsg) {
	oldsync.Lock()
	old[msg.GetHash().Fixed()] = msg
	oldsync.Unlock()
}
Esempio n. 17
0
// When we process the directory Signature, and we are the leader for said signature, it
// is then that we push it out to the rest of the network.  Otherwise, if we are not the
// leader for the signature, it marks the sig complete for that list
func (s *State) ProcessDBSig(dbheight uint32, msg interfaces.IMsg) bool {

	dbs := msg.(*messages.DirectoryBlockSignature)
	// Don't process if syncing an EOM
	if s.Syncing && !s.DBSig {
		return false
	}

	pl := s.ProcessLists.Get(dbheight)
	vm := s.ProcessLists.Get(dbheight).VMs[msg.GetVMIndex()]

	if uint32(pl.System.Height) >= dbs.SysHeight {
		s.DBSigSys = true
	}

	// If we are done with DBSigs, and this message is processed, then we are done.  Let everything go!
	if s.DBSigSys && s.DBSig && s.DBSigDone {
		s.DBSigProcessed--
		if s.DBSigProcessed <= 0 {
			s.DBSig = false
			s.Syncing = false
		}
		vm.Signed = true
		//s.LeaderPL.AdminBlock
		return true
	}

	// Put the stuff that only executes once at the start of DBSignatures here
	if !s.DBSig {
		s.DBSigLimit = len(pl.FedServers)
		s.DBSigProcessed = 0
		s.DBSig = true
		s.Syncing = true
		s.DBSigDone = false
		for _, vm := range pl.VMs {
			vm.Synced = false
		}
		pl.ResetDiffSigTally()
	}

	// Put the stuff that executes per DBSignature here
	if !dbs.Processed {
		if dbs.VMIndex == 0 {
			s.SetLeaderTimestamp(dbs.GetTimestamp())
		}
		dbstate := s.GetDBState(dbheight - 1)

		if dbstate == nil || !dbs.DirectoryBlockHeader.GetBodyMR().IsSameAs(dbstate.DirectoryBlock.GetHeader().GetBodyMR()) {
			//fmt.Println(s.FactomNodeName, "JUST COMPARED", dbs.DirectoryBlockHeader.GetBodyMR().String()[:10], " : ", dbstate.DirectoryBlock.GetHeader().GetBodyMR().String()[:10])
			pl.IncrementDiffSigTally()
		}

		// Adds DB Sig to be added to Admin block if passes sig checks
		allChecks := false
		data, err := dbs.DirectoryBlockHeader.MarshalBinary()
		if err != nil {
			fmt.Println("Debug: DBSig Signature Error, Marshal binary errored")
		} else {
			if !dbs.DBSignature.Verify(data) {
				fmt.Println("Debug: DBSig Signature Error, Verify errored")
			} else {
				if valid, err := s.VerifyAuthoritySignature(data, dbs.DBSignature.GetSignature(), dbs.DBHeight); err == nil && valid == 1 {
					allChecks = true
				}
			}
		}

		if allChecks {
			dbs.Matches = true
			s.AddDBSig(dbheight, dbs.ServerIdentityChainID, dbs.DBSignature)
		}

		dbs.Processed = true
		s.DBSigProcessed++
		vm.Synced = true
	}

	allfaults := s.LeaderPL.System.Height >= s.LeaderPL.SysHighest

	// Put the stuff that executes once for set of DBSignatures (after I have them all) here
	if allfaults && !s.DBSigDone && s.DBSigProcessed >= s.DBSigLimit {
		fails := 0
		for i := range pl.FedServers {
			vm := pl.VMs[i]
			tdbsig, ok := vm.List[0].(*messages.DirectoryBlockSignature)
			if !ok || !tdbsig.Matches {
				fails++
				vm.List[0] = nil
				vm.Height = 0
				s.DBSigProcessed--
			}
		}
		if fails > len(pl.FedServers)/2 {
			//s.DoReset()
			return false
		} else if fails > 0 {
			return false
		}
		dbstate := s.DBStates.Get(int(dbheight - 1))

		// TODO: check signatures here.  Count what match and what don't.  Then if a majority
		// disagree with us, null our entry out.  Otherwise toss our DBState and ask for one from
		// our neighbors.
		if s.KeepMismatch || pl.CheckDiffSigTally() {
			if !dbstate.Saved {
				dbstate.ReadyToSave = true
				s.DBStates.SaveDBStateToDB(dbstate)
				//s.LeaderPL.AddDBSig(dbs.ServerIdentityChainID, dbs.DBSignature)
			}
		} else {
			s.DBSigFails++
			s.Reset()
			msg := messages.NewDBStateMissing(s, uint32(dbheight-1), uint32(dbheight-1))

			if msg != nil {
				s.RunLeader = false
				s.StartDelay = s.GetTimestamp().GetTimeMilli()
				s.NetworkOutMsgQueue() <- msg
			}
		}
		s.ReviewHolding()
		s.Saving = false
		s.DBSigDone = true
	}
	return false
	/*
		err := s.LeaderPL.AdminBlock.AddDBSig(dbs.ServerIdentityChainID, dbs.DBSignature)
		if err != nil {
			fmt.Printf("Error in adding DB sig to admin block, %s\n", err.Error())
		}
	*/
}
Esempio n. 18
0
// TODO: Should fault the server if we don't have the proper sequence of EOM messages.
func (s *State) ProcessEOM(dbheight uint32, msg interfaces.IMsg) bool {

	e := msg.(*messages.EOM)

	if s.Syncing && !s.EOM {
		return false
	}

	if s.EOM && int(e.Minute) > s.EOMMinute {
		return false
	}

	pl := s.ProcessLists.Get(dbheight)
	vm := s.ProcessLists.Get(dbheight).VMs[msg.GetVMIndex()]

	if uint32(pl.System.Height) >= e.SysHeight {
		s.EOMSys = true
	}

	// If I have done everything for all EOMs for all VMs, then and only then do I
	// let processing continue.
	if s.EOMDone && s.EOMSys {
		s.EOMProcessed--
		if s.EOMProcessed <= 0 {
			s.EOM = false
			s.EOMDone = false
			s.ReviewHolding()
			s.Syncing = false
		}
		s.SendHeartBeat()

		return true
	}

	// What I do once  for all VMs at the beginning of processing a particular EOM
	if !s.EOM {
		s.EOMSys = false
		s.Syncing = true
		s.EOM = true
		s.EOMLimit = len(s.LeaderPL.FedServers)
		s.EOMMinute = int(e.Minute)
		s.EOMsyncing = true
		s.EOMProcessed = 0
		s.Newblk = false

		for _, vm := range pl.VMs {
			vm.Synced = false
		}
		s.AddStatus("EOM Syncing")
		return false
	}

	// What I do for each EOM
	if !e.Processed {
		vm.LeaderMinute++
		s.EOMProcessed++
		e.Processed = true
		vm.Synced = true
		if s.LeaderPL.SysHighest < int(e.SysHeight) {
			s.LeaderPL.SysHighest = int(e.SysHeight)
		}
		s.AddStatus("EOM Processed")
		return false
	}

	allfaults := s.LeaderPL.System.Height >= s.LeaderPL.SysHighest

	// After all EOM markers are processed, Claim we are done.  Now we can unwind
	if allfaults && s.EOMProcessed == s.EOMLimit && !s.EOMDone {
		s.AddStatus("EOM All Done")

		s.EOMDone = true
		for _, eb := range pl.NewEBlocks {
			eb.AddEndOfMinuteMarker(byte(e.Minute + 1))
		}

		s.FactoidState.EndOfPeriod(int(e.Minute))

		ecblk := pl.EntryCreditBlock
		ecbody := ecblk.GetBody()
		mn := entryCreditBlock.NewMinuteNumber(e.Minute + 1)
		ecbody.AddEntry(mn)

		if !s.Leader {
			s.CurrentMinute = int(e.Minute)
		}

		s.CurrentMinute++

		switch {
		case s.CurrentMinute < 10:
			s.LeaderPL = s.ProcessLists.Get(s.LLeaderHeight)
			s.Leader, s.LeaderVMIndex = s.LeaderPL.GetVirtualServers(s.CurrentMinute, s.IdentityChainID)
		case s.CurrentMinute == 10:
			eBlocks := []interfaces.IEntryBlock{}
			entries := []interfaces.IEBEntry{}
			for _, v := range pl.NewEBlocks {
				eBlocks = append(eBlocks, v)
			}
			for _, v := range pl.NewEntries {
				entries = append(entries, v)
			}

			dbstate := s.AddDBState(true, s.LeaderPL.DirectoryBlock, s.LeaderPL.AdminBlock, s.GetFactoidState().GetCurrentBlock(), s.LeaderPL.EntryCreditBlock, eBlocks, entries)
			if dbstate == nil {
				dbstate = s.DBStates.Get(int(s.LeaderPL.DirectoryBlock.GetHeader().GetDBHeight()))
			}
			dbht := int(dbstate.DirectoryBlock.GetHeader().GetDBHeight())
			if dbht > 0 {
				prev := s.DBStates.Get(dbht - 1)
				s.DBStates.FixupLinks(prev, dbstate)
			}
			s.DBStates.ProcessBlocks(dbstate)

			s.CurrentMinute = 0
			s.LLeaderHeight++
			s.LeaderPL = s.ProcessLists.Get(s.LLeaderHeight)
			s.Leader, s.LeaderVMIndex = s.LeaderPL.GetVirtualServers(0, s.IdentityChainID)

			s.DBSigProcessed = 0

			// Note about dbsigs.... If we processed the previous minute, then we generate the DBSig for the next block.
			// But if we didn't process the preivious block, like we start from scratch, or we had to reset the entire
			// network, then no dbsig exists.  This code doesn't execute, and so we have no dbsig.  In that case, on
			// the next EOM, we see the block hasn't been signed, and we sign the block (Thats the call to SendDBSig()
			// above).
			if s.Leader {
				dbstate := s.DBStates.Get(int(s.LLeaderHeight - 1))
				dbs := new(messages.DirectoryBlockSignature)
				db := dbstate.DirectoryBlock
				dbs.DirectoryBlockHeader = db.GetHeader()
				//dbs.DirectoryBlockKeyMR = dbstate.DirectoryBlock.GetKeyMR()
				dbs.ServerIdentityChainID = s.GetIdentityChainID()
				dbs.DBHeight = s.LLeaderHeight
				dbs.Timestamp = s.GetTimestamp()
				dbs.SetVMHash(nil)
				dbs.SetVMIndex(s.LeaderVMIndex)
				dbs.SetLocal(true)
				dbs.Sign(s)
				err := dbs.Sign(s)
				if err != nil {
					panic(err)
				}
				dbs.LeaderExecute(s)

			}

			s.Saving = true
		}

		for k := range s.Commits {
			vs := s.Commits[k]
			if len(vs) == 0 {
				delete(s.Commits, k)
				continue
			}
			v, ok := vs[0].(interfaces.IMsg)
			if ok {
				_, ok := s.Replay.Valid(constants.TIME_TEST, v.GetRepeatHash().Fixed(), v.GetTimestamp(), s.GetTimestamp())
				if !ok {
					copy(vs, vs[1:])
					vs[len(vs)-1] = nil
					s.Commits[k] = vs[:len(vs)-1]
				}
			}
		}

		for k := range s.Acks {
			v := s.Acks[k].(*messages.Ack)
			if v.DBHeight < s.LLeaderHeight {
				delete(s.Acks, k)
			}
		}
	}

	return false
}
Esempio n. 19
0
// Returns true if message is new
func MsgIsNew(msg interfaces.IMsg) bool {
	oldsync.Lock()
	defer oldsync.Unlock()
	return old[msg.GetHash().Fixed()] == nil
}
Esempio n. 20
0
func (s *State) executeMsg(vm *VM, msg interfaces.IMsg) (ret bool) {
	_, ok := s.Replay.Valid(constants.INTERNAL_REPLAY, msg.GetRepeatHash().Fixed(), msg.GetTimestamp(), s.GetTimestamp())
	if !ok {
		return
	}
	s.SetString()
	msg.ComputeVMIndex(s)

	switch msg.Validate(s) {
	case 1:
		if s.RunLeader &&
			s.Leader &&
			!s.Saving &&
			vm != nil && int(vm.Height) == len(vm.List) &&
			(!s.Syncing || !vm.Synced) &&
			(msg.IsLocal() || msg.GetVMIndex() == s.LeaderVMIndex) &&
			s.LeaderPL.DBHeight+1 >= s.GetHighestKnownBlock() {
			if len(vm.List) == 0 {
				s.SendDBSig(s.LLeaderHeight, s.LeaderVMIndex)
				s.XReview = append(s.XReview, msg)
			} else {
				msg.LeaderExecute(s)
			}
		} else {
			msg.FollowerExecute(s)
		}
		ret = true
	case 0:
		s.Holding[msg.GetMsgHash().Fixed()] = msg
	default:
		s.Holding[msg.GetMsgHash().Fixed()] = msg
		if !msg.SentInvlaid() {
			msg.MarkSentInvalid(true)
			s.networkInvalidMsgQueue <- msg
		}
	}

	return

}
Esempio n. 21
0
func (state *State) ValidatorLoop() {
	timeStruct := new(Timer)
	for {

		// Check if we should shut down.
		select {
		case <-state.ShutdownChan:
			fmt.Println("Closing the Database on", state.GetFactomNodeName())
			state.DB.Close()
			fmt.Println(state.GetFactomNodeName(), "closed")
			return
		default:
		}

		// Look for pending messages, and get one if there is one.
		var msg interfaces.IMsg
	loop:
		for i := 0; i < 10; i++ {

			// Process any messages we might have queued up.
			for i = 0; i < 10; i++ {
				p, b := state.Process(), state.UpdateState()
				if !p && !b {
					break
				}
				//fmt.Printf("dddd %20s %10s --- %10s %10v %10s %10v\n", "Validation", state.FactomNodeName, "Process", p, "Update", b)
			}

			select {
			case min := <-state.tickerQueue:
				timeStruct.timer(state, min)
			default:
			}

			select {
			case msg = <-state.TimerMsgQueue():
				state.JournalMessage(msg)
				break loop
			default:
			}

			select {
			case msg = <-state.InMsgQueue(): // Get message from the timer or input queue
				state.JournalMessage(msg)
				break loop
			default: // No messages? Sleep for a bit
				time.Sleep(10 * time.Millisecond)
			}
		}

		// Sort the messages.
		if msg != nil {
			if state.IsReplaying == true {
				state.ReplayTimestamp = msg.GetTimestamp()
			}
			if _, ok := msg.(*messages.Ack); ok {
				state.ackQueue <- msg
			} else {
				state.msgQueue <- msg
			}
		}
	}
}
Esempio n. 22
0
func (p *ProcessList) AddOldMsgs(m interfaces.IMsg) {
	p.oldmsgslock.Lock()
	defer p.oldmsgslock.Unlock()
	p.OldMsgs[m.GetHash().Fixed()] = m
}