// End of Block means packing the current block away, and setting // up the next func (fs *FactoidState) ProcessEndOfBlock(state interfaces.IState) { var hash, hash2 interfaces.IHash if fs.GetCurrentBlock() == nil { panic("Invalid state on initialization") } hash = fs.CurrentBlock.GetHash() hash2 = fs.CurrentBlock.GetLedgerKeyMR() state.GetCurrentDirectoryBlock().GetDBEntries()[2].SetKeyMR(hash) if err := state.GetDB().SaveFactoidBlockHead(fs.CurrentBlock); err != nil { panic(err) } state.SetPrevFactoidKeyMR(hash) fs.CurrentBlock = block.NewFBlock(fs.GetFactoshisPerEC(), state.GetDBHeight()+1) t := coinbase.GetCoinbase(primitives.GetTimeMilli()) err := fs.CurrentBlock.AddCoinbase(t) if err != nil { panic(err.Error()) } fs.UpdateTransaction(t) if hash != nil { fs.CurrentBlock.SetPrevKeyMR(hash.Bytes()) fs.CurrentBlock.SetPrevLedgerKeyMR(hash2.Bytes()) } }
// btcdMain is the real main function for btcd. It is necessary to work around // the fact that deferred functions do not run when os.Exit() is called. The // optional serverChan parameter is mainly used by the service code to be // notified with the server once it is setup so it can gracefully stop it when // requested from the service control manager. //func btcdMain(serverChan chan<- *server) error { func btcdMain(serverChan chan<- *Server, state interfaces.IState) error { // Load configuration and parse command line. This function also // initializes logging and configures it accordingly. tcfg, _, err := loadConfig() if err != nil { return err } cfg = tcfg // tweak some config options cfg.DisableCheckpoints = true defer backendLog.Flush() // Show version at startup. btcdLog.Infof("Version %s", version()) // Ensure the database is sync'd and closed on Ctrl+C. AddInterruptHandler(func() { btcdLog.Infof("Gracefully shutting down the database...") state.GetDB().(interfaces.IDatabase).Close() //db.RollbackClose() }) // Create server and start it. server, err := newServer(cfg.Listeners, activeNetParams.Params, state) if err != nil { // TODO(oga) this logging could do with some beautifying. btcdLog.Errorf("Unable to start server on %v: %v", cfg.Listeners, err) return err } AddInterruptHandler(func() { btcdLog.Infof("Gracefully shutting down the server...") server.Stop() server.WaitForShutdown() }) server.Start() if serverChan != nil { serverChan <- server } // Factom Additions BEGIN //factomForkInit(server) // Factom Additions END go func() { server.WaitForShutdown() srvrLog.Infof("Server shutdown complete") shutdownChannel <- struct{}{} }() // Wait for shutdown signal from either a graceful server stop or from // the interrupt handler. <-shutdownChannel btcdLog.Info("Shutdown complete") return nil }
// InitAnchor inits rpc clients for factom // and load up unconfirmed DirBlockInfo from leveldb func InitAnchor(s interfaces.IState) (*Anchor, error) { anchorLog.Debug("InitAnchor") a := NewAnchor() a.state = s a.db = s.GetDB() a.minBalance, _ = btcutil.NewAmount(0.01) var err error a.dirBlockInfoSlice, err = a.db.FetchAllUnconfirmedDirBlockInfos() if err != nil { anchorLog.Error("InitAnchor error - " + err.Error()) return nil, err } anchorLog.Debug("init dirBlockInfoSlice.len=", len(a.dirBlockInfoSlice)) // this might take a while to check missing DirBlockInfo for existing DirBlocks in database //TODO: handle concurrance better go a.checkMissingDirBlockInfo() a.readConfig() if err = a.InitRPCClient(); err != nil { anchorLog.Error(err.Error()) } else { a.updateUTXO(a.minBalance) } ticker0 := time.NewTicker(time.Minute * time.Duration(1)) go func() { for _ = range ticker0.C { a.checkForAnchor() } }() ticker := time.NewTicker(time.Minute * time.Duration(a.tenMinutes)) go func() { for _ = range ticker.C { anchorLog.Info("In 10 minutes ticker...") a.readConfig() if a.dclient == nil || a.wclient == nil { if err = a.InitRPCClient(); err != nil { anchorLog.Error(err.Error()) } } if a.wclient != nil { a.checkTxConfirmations() } } }() return a, nil }
// DirBlockLocatorFromHash returns a block locator for the passed block hash. // See BlockLocator for details on the algotirhm used to create a block locator. // // In addition to the general algorithm referenced above, there are a couple of // special cases which are handled: // // - If the genesis hash is passed, there are no previous hashes to add and // therefore the block locator will only consist of the genesis hash // - If the passed hash is not currently known, the block locator will only // consist of the passed hash func DirBlockLocatorFromHash(hash interfaces.IHash, state interfaces.IState) BlockLocator { // The locator contains the requested hash at the very least. locator := make(BlockLocator, 0, messages.MaxBlockLocatorsPerMsg) locator = append(locator, hash) genesisHash, _ := HexToHash(GENESIS_DIR_BLOCK_HASH) // Nothing more to do if a locator for the genesis hash was requested. if genesisHash.IsSameAs(hash) { return locator } // Attempt to find the height of the block that corresponds to the // passed hash, and if it's on a side chain, also find the height at // which it forks from the main chain. blockHeight := int64(-1) // Generate the block locators according to the algorithm described in // in the BlockLocator comment and make sure to leave room for the // final genesis hash. dblock, _ := state.GetDB().FetchDBlockByHash(hash) //dblock := dblock0.(directoryBlock.DirectoryBlock) if dblock != nil { blockHeight = int64(dblock.GetHeader().GetDBHeight()) } increment := int64(1) for len(locator) < messages.MaxBlockLocatorsPerMsg-1 { // Once there are 10 locators, exponentially increase the // distance between each block locator. if len(locator) > 10 { increment *= 2 } blockHeight -= increment if blockHeight < 1 { break } blk, _ := state.GetDB().FetchDBlockByHeight(uint32(blockHeight)) if blk == nil || blk.GetHash() == nil { //blk.DBHash, _ = CreateHash(blk) continue } locator = append(locator, blk.GetHash()) } // Append the appropriate genesis block. locator = append(locator, genesisHash) return locator }