func (cs *ConsensusState) stageBlock(block *types.Block, blockParts *types.PartSet) error { if block == nil { PanicSanity("Cannot stage nil block") } // Already staged? blockHash := block.Hash() if cs.stagedBlock != nil && len(blockHash) != 0 && bytes.Equal(cs.stagedBlock.Hash(), blockHash) { return nil } // Create a copy of the state for staging stateCopy := cs.state.Copy() // reset the event cache and pass it into the state cs.evc = events.NewEventCache(cs.evsw) stateCopy.SetFireable(cs.evc) // Commit block onto the copied state. // NOTE: Basic validation is done in state.AppendBlock(). err := sm.ExecBlock(stateCopy, block, blockParts.Header()) if err != nil { return err } else { cs.stagedBlock = block cs.stagedState = stateCopy return nil } }
// Handle messages from the poolReactor telling the reactor what to do. // NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down! // (Except for the SYNC_LOOP, which is the primary purpose and must be synchronous.) func (bcR *BlockchainReactor) poolRoutine() { trySyncTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond) statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second) switchToConsensusTicker := time.NewTicker(switchToConsensusIntervalSeconds * time.Second) FOR_LOOP: for { select { case request := <-bcR.requestsCh: // chan BlockRequest peer := bcR.sw.Peers().Get(request.PeerId) if peer == nil { // We can't assign the request. continue FOR_LOOP } msg := &bcBlockRequestMessage{request.Height} queued := peer.TrySend(BlockchainChannel, msg) if !queued { // We couldn't make the request, send-queue full. // The pool handles retries, so just let it go. continue FOR_LOOP } case peerId := <-bcR.timeoutsCh: // chan string // Peer timed out. peer := bcR.sw.Peers().Get(peerId) if peer != nil { bcR.sw.StopPeerForError(peer, errors.New("BlockchainReactor Timeout")) } case _ = <-statusUpdateTicker.C: // ask for status updates go bcR.BroadcastStatusRequest() case _ = <-switchToConsensusTicker.C: height, numPending, numUnassigned := bcR.pool.GetStatus() outbound, inbound, _ := bcR.sw.NumPeers() log.Debug("Consensus ticker", "numUnassigned", numUnassigned, "numPending", numPending, "total", len(bcR.pool.requests), "outbound", outbound, "inbound", inbound) // NOTE: this condition is very strict right now. may need to weaken // If all `maxPendingRequests` requests are unassigned // and we have some peers (say >= 3), then we're caught up maxPending := numPending == maxPendingRequests allUnassigned := numPending == numUnassigned enoughPeers := outbound+inbound >= 3 if maxPending && allUnassigned && enoughPeers { log.Info("Time to switch to consensus reactor!", "height", height) bcR.pool.Stop() conR := bcR.sw.Reactor("CONSENSUS").(consensusReactor) conR.SwitchToConsensus(bcR.state) break FOR_LOOP } case _ = <-trySyncTicker.C: // chan time // This loop can be slow as long as it's doing syncing work. SYNC_LOOP: for i := 0; i < 10; i++ { // See if there are any blocks to sync. first, second := bcR.pool.PeekTwoBlocks() //log.Debug("TrySync peeked", "first", first, "second", second) if first == nil || second == nil { // We need both to sync the first block. break SYNC_LOOP } firstParts := first.MakePartSet() firstPartsHeader := firstParts.Header() // Finally, verify the first block using the second's validation. err := bcR.state.BondedValidators.VerifyValidation( bcR.state.ChainID, first.Hash(), firstPartsHeader, first.Height, second.LastValidation) if err != nil { log.Debug("error in validation", "error", err) bcR.pool.RedoRequest(first.Height) break SYNC_LOOP } else { bcR.pool.PopRequest() err := sm.ExecBlock(bcR.state, first, firstPartsHeader) if err != nil { // TODO This is bad, are we zombie? panic(Fmt("Failed to process committed block: %v", err)) } bcR.store.SaveBlock(first, firstParts, second.LastValidation) bcR.state.Save() } } continue FOR_LOOP case <-bcR.quit: break FOR_LOOP } } }