// LogBlockHeight logs a new block height as an information message to show // progress to the user. In order to prevent spam, it limits logging to one // message every 10 seconds with duration and totals included. func (b *blockProgressLogger) LogBlockHeight(block *dcrutil.Block) { b.Lock() defer b.Unlock() b.receivedLogBlocks++ b.receivedLogTx += int64(len(block.MsgBlock().Transactions)) now := time.Now() duration := now.Sub(b.lastBlockLogTime) if duration < time.Second*10 { return } // Truncate the duration to 10s of milliseconds. durationMillis := int64(duration / time.Millisecond) tDuration := 10 * time.Millisecond * time.Duration(durationMillis/10) // Log information about new block height. blockStr := "blocks" if b.receivedLogBlocks == 1 { blockStr = "block" } txStr := "transactions" if b.receivedLogTx == 1 { txStr = "transaction" } b.subsystemLogger.Infof("%s %d %s in the last %s (%d %s, height %d, %s)", b.progressAction, b.receivedLogBlocks, blockStr, tDuration, b.receivedLogTx, txStr, block.Height(), block.MsgBlock().Header.Timestamp) b.receivedLogBlocks = 0 b.receivedLogTx = 0 b.lastBlockLogTime = now }
// checkBlockContext peforms several validation checks on the block which depend // on its position within the block chain. // // The flags modify the behavior of this function as follows: // - BFFastAdd: The transaction are not checked to see if they are finalized // and the somewhat expensive duplication transaction check is not performed. // // The flags are also passed to checkBlockHeaderContext. See its documentation // for how the flags modify its behavior. func (b *BlockChain) checkBlockContext(block *dcrutil.Block, prevNode *blockNode, flags BehaviorFlags) error { // The genesis block is valid by definition. if prevNode == nil { return nil } // Perform all block header related validation checks. header := &block.MsgBlock().Header err := b.checkBlockHeaderContext(header, prevNode, flags) if err != nil { return err } fastAdd := flags&BFFastAdd == BFFastAdd if !fastAdd { // The height of this block is one more than the referenced // previous block. blockHeight := prevNode.height + 1 // Ensure all transactions in the block are finalized. for _, tx := range block.Transactions() { if !IsFinalizedTransaction(tx, blockHeight, header.Timestamp) { str := fmt.Sprintf("block contains unfinalized regular "+ "transaction %v", tx.Sha()) return ruleError(ErrUnfinalizedTx, str) } } for _, stx := range block.STransactions() { if !IsFinalizedTransaction(stx, blockHeight, header.Timestamp) { str := fmt.Sprintf("block contains unfinalized stake "+ "transaction %v", stx.Sha()) return ruleError(ErrUnfinalizedTx, str) } } // Check that the node is at the correct height in the blockchain, // as specified in the block header. if blockHeight != int64(block.MsgBlock().Header.Height) { errStr := fmt.Sprintf("Block header height invalid; expected %v"+ " but %v was found", blockHeight, header.Height) return ruleError(ErrBadBlockHeight, errStr) } // Check that the coinbase contains at minimum the block // height in output 1. if blockHeight > 1 { err := checkCoinbaseUniqueHeight(blockHeight, block) if err != nil { return err } } } return nil }
// dbIndexDisconnectBlock removes all of the index entries associated with the // given block using the provided indexer and updates the tip of the indexer // accordingly. An error will be returned if the current tip for the indexer is // not the passed block. func dbIndexDisconnectBlock(dbTx database.Tx, indexer Indexer, block, parent *dcrutil.Block, view *blockchain.UtxoViewpoint) error { // Assert that the block being disconnected is the current tip of the // index. idxKey := indexer.Key() curTipHash, _, err := dbFetchIndexerTip(dbTx, idxKey) if err != nil { return err } if !curTipHash.IsEqual(block.Sha()) { return AssertError(fmt.Sprintf("dbIndexDisconnectBlock must "+ "be called with the block at the current index tip "+ "(%s, tip %s, block %s)", indexer.Name(), curTipHash, block.Sha())) } // Notify the indexer with the disconnected block so it can remove all // of the appropriate entries. if err := indexer.DisconnectBlock(dbTx, block, parent, view); err != nil { return err } // Update the current index tip. prevHash := &block.MsgBlock().Header.PrevBlock return dbPutIndexerTip(dbTx, idxKey, prevHash, uint32(block.Height())-1) }
// submitBlock submits the passed block to network after ensuring it passes all // of the consensus validation rules. func (m *CPUMiner) submitBlock(block *dcrutil.Block) bool { m.submitBlockLock.Lock() defer m.submitBlockLock.Unlock() _, latestHeight := m.server.blockManager.chainState.Best() // Be sure to set this so ProcessBlock doesn't fail! - Decred block.SetHeight(latestHeight + 1) // Process this block using the same rules as blocks coming from other // nodes. This will in turn relay it to the network like normal. isOrphan, err := m.server.blockManager.ProcessBlock(block, blockchain.BFNone) if err != nil { // Anything other than a rule violation is an unexpected error, // so log that error as an internal error. if rErr, ok := err.(blockchain.RuleError); !ok { minrLog.Errorf("Unexpected error while processing "+ "block submitted via CPU miner: %v", err) return false } else { // Occasionally errors are given out for timing errors with // ResetMinDifficulty and high block works that is above // the target. Feed these to debug. if m.server.chainParams.ResetMinDifficulty && rErr.ErrorCode == blockchain.ErrHighHash { minrLog.Debugf("Block submitted via CPU miner rejected "+ "because of ResetMinDifficulty time sync failure: %v", err) return false } else { // Other rule errors should be reported. minrLog.Errorf("Block submitted via CPU miner rejected: %v", err) return false } } } if isOrphan { minrLog.Errorf("Block submitted via CPU miner is an orphan building "+ "on parent %v", block.MsgBlock().Header.PrevBlock) return false } // The block was accepted. coinbaseTxOuts := block.MsgBlock().Transactions[0].TxOut coinbaseTxGenerated := int64(0) for _, out := range coinbaseTxOuts { coinbaseTxGenerated += out.Value } minrLog.Infof("Block submitted via CPU miner accepted (hash %s, "+ "height %v, amount %v)", block.Sha(), block.Height(), dcrutil.Amount(coinbaseTxGenerated)) return true }
// ticketsRevokedInBlock fetches a list of tickets that were revoked in the // block. func ticketsRevokedInBlock(bl *dcrutil.Block) []chainhash.Hash { var tickets []chainhash.Hash for _, stx := range bl.MsgBlock().STransactions { if stake.DetermineTxType(stx) == stake.TxTypeSSRtx { tickets = append(tickets, stx.TxIn[0].PreviousOutPoint.Hash) } } return tickets }
// voteVersionsInBlock returns all versions in a block. func voteVersionsInBlock(bl *dcrutil.Block, params *chaincfg.Params) []uint32 { versions := make([]uint32, 0, params.TicketsPerBlock) for _, stx := range bl.MsgBlock().STransactions { if is, _ := stake.IsSSGen(stx); !is { continue } versions = append(versions, stake.SSGenVersion(stx)) } return versions }
// lookupTransaction is a special transaction lookup function that searches // the database, the block, and its parent for a transaction. This is needed // because indexBlockAddrs is called AFTER a block is added/removed in the // blockchain in blockManager, necessitating that the blocks internally be // searched for inputs for any given transaction too. Additionally, it's faster // to get the tx from the blocks here since they're already func (a *addrIndexer) lookupTransaction(txHash chainhash.Hash, blk *dcrutil.Block, parent *dcrutil.Block) (*wire.MsgTx, error) { // Search the previous block and parent first. txTreeRegularValid := dcrutil.IsFlagSet16(blk.MsgBlock().Header.VoteBits, dcrutil.BlockValid) // Search the regular tx tree of this and the last block if the // tx tree regular was validated. if txTreeRegularValid { for _, stx := range parent.STransactions() { if stx.Sha().IsEqual(&txHash) { return stx.MsgTx(), nil } } for _, tx := range parent.Transactions() { if tx.Sha().IsEqual(&txHash) { return tx.MsgTx(), nil } } for _, tx := range blk.Transactions() { if tx.Sha().IsEqual(&txHash) { return tx.MsgTx(), nil } } } else { // Just search this block's regular tx tree and the previous // block's stake tx tree. for _, stx := range parent.STransactions() { if stx.Sha().IsEqual(&txHash) { return stx.MsgTx(), nil } } for _, tx := range blk.Transactions() { if tx.Sha().IsEqual(&txHash) { return tx.MsgTx(), nil } } } // Lookup and fetch the referenced output's tx in the database. txList, err := a.server.db.FetchTxBySha(&txHash) if err != nil { adxrLog.Errorf("Error fetching tx %v: %v", txHash, err) return nil, err } if len(txList) == 0 { return nil, fmt.Errorf("transaction %v not found", txHash) } return txList[len(txList)-1].Tx, nil }
// checkCoinbaseUniqueHeight checks to ensure that for all blocks height > 1 // that the coinbase contains the height encoding to make coinbase hash collisions // impossible. func checkCoinbaseUniqueHeight(blockHeight int64, block *dcrutil.Block) error { if !(len(block.MsgBlock().Transactions) > 0) { str := fmt.Sprintf("block %v has no coinbase", block.Sha()) return ruleError(ErrNoTransactions, str) } // Coinbase TxOut[0] is always tax, TxOut[1] is always // height + extranonce, so at least two outputs must // exist. if !(len(block.MsgBlock().Transactions[0].TxOut) > 1) { str := fmt.Sprintf("block %v is missing necessary coinbase "+ "outputs", block.Sha()) return ruleError(ErrFirstTxNotCoinbase, str) } // The first 4 bytes of the NullData output must be the // encoded height of the block, so that every coinbase // created has a unique transaction hash. nullData, err := txscript.GetNullDataContent( block.MsgBlock().Transactions[0].TxOut[1].Version, block.MsgBlock().Transactions[0].TxOut[1].PkScript) if err != nil { str := fmt.Sprintf("block %v txOut 1 has wrong pkScript "+ "type", block.Sha()) return ruleError(ErrFirstTxNotCoinbase, str) } if len(nullData) < 4 { str := fmt.Sprintf("block %v txOut 1 has too short nullData "+ "push to contain height", block.Sha()) return ruleError(ErrFirstTxNotCoinbase, str) } // Check the height and ensure it is correct. cbHeight := binary.LittleEndian.Uint32(nullData[0:4]) if cbHeight != uint32(blockHeight) { prevBlock := block.MsgBlock().Header.PrevBlock str := fmt.Sprintf("block %v txOut 1 has wrong height in "+ "coinbase; want %v, got %v; prevBlock %v, header height %v", block.Sha(), blockHeight, cbHeight, prevBlock, block.MsgBlock().Header.Height) return ruleError(ErrCoinbaseHeight, str) } return nil }
// connectTransactions updates the view by adding all new utxos created by all // of the transactions in the passed block, marking all utxos the transactions // spend as spent, and setting the best hash for the view to the passed block. // In addition, when the 'stxos' argument is not nil, it will be updated to // append an entry for each spent txout. func (b *BlockChain) connectTransactions(view *UtxoViewpoint, block *dcrutil.Block, parent *dcrutil.Block, stxos *[]spentTxOut) error { regularTxTreeValid := dcrutil.IsFlagSet16(block.MsgBlock().Header.VoteBits, dcrutil.BlockValid) thisNodeStakeViewpoint := ViewpointPrevInvalidStake if regularTxTreeValid { thisNodeStakeViewpoint = ViewpointPrevValidStake } if parent != nil && block.Height() != 0 { view.SetStakeViewpoint(ViewpointPrevValidInitial) err := view.fetchInputUtxos(b.db, block, parent) if err != nil { return err } mBlock := block.MsgBlock() votebits := mBlock.Header.VoteBits regularTxTreeValid := dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) if regularTxTreeValid { for i, tx := range parent.Transactions() { err := view.connectTransaction(tx, parent.Height(), uint32(i), stxos) if err != nil { return err } } } } for i, stx := range block.STransactions() { view.SetStakeViewpoint(thisNodeStakeViewpoint) err := view.fetchInputUtxos(b.db, block, parent) if err != nil { return err } err = view.connectTransaction(stx, block.Height(), uint32(i), stxos) if err != nil { return err } } // Update the best hash for view to include this block since all of its // transactions have been connected. view.SetBestHash(block.Sha()) return nil }
// NewMerkleBlock returns a new *wire.MsgMerkleBlock and an array of the matched // transaction hashes based on the passed block and filter. func NewMerkleBlock(block *dcrutil.Block, filter *Filter) (*wire.MsgMerkleBlock, []*chainhash.Hash) { numTx := uint32(len(block.Transactions())) mBlock := merkleBlock{ numTx: numTx, allHashes: make([]*chainhash.Hash, 0, numTx), matchedBits: make([]byte, 0, numTx), } // Find and keep track of any transactions that match the filter. var matchedHashes []*chainhash.Hash for _, tx := range block.Transactions() { if filter.MatchTxAndUpdate(tx) { mBlock.matchedBits = append(mBlock.matchedBits, 0x01) matchedHashes = append(matchedHashes, tx.Sha()) } else { mBlock.matchedBits = append(mBlock.matchedBits, 0x00) } mBlock.allHashes = append(mBlock.allHashes, tx.Sha()) } // Calculate the number of merkle branches (height) in the tree. height := uint32(0) for mBlock.calcTreeWidth(height) > 1 { height++ } // Build the depth-first partial merkle tree. mBlock.traverseAndBuild(height, 0) // Create and return the merkle block. msgMerkleBlock := wire.MsgMerkleBlock{ Header: block.MsgBlock().Header, Transactions: uint32(mBlock.numTx), Hashes: make([]*chainhash.Hash, 0, len(mBlock.finalHashes)), Flags: make([]byte, (len(mBlock.bits)+7)/8), } for _, sha := range mBlock.finalHashes { msgMerkleBlock.AddTxHash(sha) } for i := uint32(0); i < uint32(len(mBlock.bits)); i++ { msgMerkleBlock.Flags[i/8] |= mBlock.bits[i] << (i % 8) } return &msgMerkleBlock, matchedHashes }
// dbRemoveTxIndexEntries uses an existing database transaction to remove the // latest transaction entry for every transaction in the passed block. func dbRemoveTxIndexEntries(dbTx database.Tx, block, parent *dcrutil.Block) error { regularTxTreeValid := dcrutil.IsFlagSet16(block.MsgBlock().Header.VoteBits, dcrutil.BlockValid) if regularTxTreeValid { for _, tx := range parent.Transactions() { txSha := tx.Sha() err := dbRemoveTxIndexEntry(dbTx, *txSha) if err != nil { return err } } } for _, tx := range block.STransactions() { txSha := tx.Sha() err := dbRemoveTxIndexEntry(dbTx, *txSha) if err != nil { return err } } return nil }
// dbIndexConnectBlock adds all of the index entries associated with the // given block using the provided indexer and updates the tip of the indexer // accordingly. An error will be returned if the current tip for the indexer is // not the previous block for the passed block. func dbIndexConnectBlock(dbTx database.Tx, indexer Indexer, block, parent *dcrutil.Block, view *blockchain.UtxoViewpoint) error { // Assert that the block being connected properly connects to the // current tip of the index. idxKey := indexer.Key() curTipHash, _, err := dbFetchIndexerTip(dbTx, idxKey) if err != nil { return err } if !curTipHash.IsEqual(&block.MsgBlock().Header.PrevBlock) { return AssertError(fmt.Sprintf("dbIndexConnectBlock must be "+ "called with a block that extends the current index "+ "tip (%s, tip %s, block %s)", indexer.Name(), curTipHash, block.Sha())) } // Notify the indexer with the connected block so it can index it. if err := indexer.ConnectBlock(dbTx, block, parent, view); err != nil { return err } // Update the current index tip. return dbPutIndexerTip(dbTx, idxKey, block.Sha(), uint32(block.Height())) }
// CalculateAddedSubsidy calculates the amount of subsidy added by a block // and its parent. The blocks passed to this function MUST be valid blocks // that have already been confirmed to abide by the consensus rules of the // network, or the function might panic. func CalculateAddedSubsidy(block, parent *dcrutil.Block) int64 { var subsidy int64 regularTxTreeValid := dcrutil.IsFlagSet16(block.MsgBlock().Header.VoteBits, dcrutil.BlockValid) if regularTxTreeValid { subsidy += parent.MsgBlock().Transactions[0].TxIn[0].ValueIn } for _, stx := range block.MsgBlock().STransactions { if isSSGen, _ := stake.IsSSGen(stx); isSSGen { subsidy += stx.TxIn[0].ValueIn } } return subsidy }
// maybeAcceptBlock potentially accepts a block into the memory block chain. // It performs several validation checks which depend on its position within // the block chain before adding it. The block is expected to have already gone // through ProcessBlock before calling this function with it. // // The flags modify the behavior of this function as follows: // - BFDryRun: The memory chain index will not be pruned and no accept // notification will be sent since the block is not being accepted. func (b *BlockChain) maybeAcceptBlock(block *dcrutil.Block, flags BehaviorFlags) (bool, error) { dryRun := flags&BFDryRun == BFDryRun // Get a block node for the block previous to this one. Will be nil // if this is the genesis block. prevNode, err := b.getPrevNodeFromBlock(block) if err != nil { log.Debugf("getPrevNodeFromBlock: %v", err) return false, err } // The height of this block is one more than the referenced previous // block. blockHeight := int64(0) if prevNode != nil { blockHeight = prevNode.height + 1 } block.SetHeight(blockHeight) // The block must pass all of the validation rules which depend on the // position of the block within the block chain. err = b.checkBlockContext(block, prevNode, flags) if err != nil { return false, err } // Prune block nodes which are no longer needed before creating // a new node. if !dryRun { err = b.pruneBlockNodes() if err != nil { return false, err } } // Create a new block node for the block and add it to the in-memory // block chain (could be either a side chain or the main chain). blockHeader := &block.MsgBlock().Header var voteBitsStake []uint16 for _, stx := range block.STransactions() { if is, _ := stake.IsSSGen(stx); is { vb := stake.GetSSGenVoteBits(stx) voteBitsStake = append(voteBitsStake, vb) } } newNode := newBlockNode(blockHeader, block.Sha(), blockHeight, voteBitsStake) if prevNode != nil { newNode.parent = prevNode newNode.height = blockHeight newNode.workSum.Add(prevNode.workSum, newNode.workSum) } // Connect the passed block to the chain while respecting proper chain // selection according to the chain with the most proof of work. This // also handles validation of the transaction scripts. var onMainChain bool onMainChain, err = b.connectBestChain(newNode, block, flags) if err != nil { return false, err } // Notify the caller that the new block was accepted into the block // chain. The caller would typically want to react by relaying the // inventory to other peers. if !dryRun { b.sendNotification(NTBlockAccepted, &BlockAcceptedNtfnsData{onMainChain, block}) } return onMainChain, nil }
// insertBlock is the internal function which implements the public // InsertBlock. See the comment for InsertBlock for more details. // // This function MUST be called with the tmdb lock held (for writes). func (tmdb *TicketDB) insertBlock(block *dcrutil.Block) (SStxMemMap, SStxMemMap, SStxMemMap, error) { height := block.Height() if height < tmdb.StakeEnabledHeight { return nil, nil, nil, nil } // Sanity check: Does the number of tickets in ticketMap equal the number // of tickets indicated in the header? poolSizeBlock := int(block.MsgBlock().Header.PoolSize) poolSize := 0 for i := 0; i < BucketsSize; i++ { poolSize += len(tmdb.maps.ticketMap[i]) } if poolSize != poolSizeBlock { return nil, nil, nil, fmt.Errorf("ticketpoolsize in block %v not "+ "equal to the calculated ticketpoolsize, indicating database "+ "corruption (got %v, want %v)", block.Sha(), poolSizeBlock, poolSize) } // Create the block in the spentTicketMap. tmdb.maybeInsertBlock(block.Height()) // Iterate through all the SSGen (vote) tx in the block and add them to // a map of tickets that were actually used. The rest of the tickets in // the buckets were then considered missed --> missedTicketMap. // Note that it doesn't really matter what value you set usedTickets to, // it's just a map of tickets that were actually used in the block. It // would probably be more efficient to use an array. usedTickets := make(map[chainhash.Hash]struct{}) spendingHashes := make(map[chainhash.Hash]chainhash.Hash) revocations := make(map[chainhash.Hash]struct{}) for _, staketx := range block.STransactions() { if is, _ := IsSSGen(staketx); is { msgTx := staketx.MsgTx() sstxIn := msgTx.TxIn[1] // sstx input sstxHash := sstxIn.PreviousOutPoint.Hash usedTickets[sstxHash] = struct{}{} spendingHashes[sstxHash] = *staketx.Sha() } if is, _ := IsSSRtx(staketx); is { msgTx := staketx.MsgTx() sstxIn := msgTx.TxIn[0] // sstx input sstxHash := sstxIn.PreviousOutPoint.Hash revocations[sstxHash] = struct{}{} } } // Spend or miss all the necessary tickets and do some sanity checks. parentBlock, err := tmdb.database.FetchBlockBySha( &block.MsgBlock().Header.PrevBlock) if err != nil { return nil, nil, nil, err } spentAndMissedTickets, err := tmdb.spendTickets(parentBlock, usedTickets, spendingHashes) if err != nil { return nil, nil, nil, err } // Expire all old tickets, and stick them into the spent and missed ticket // map too. expiredTickets, err := tmdb.expireTickets(height) if err != nil { return nil, nil, nil, err } if len(expiredTickets) > 0 && len(spentAndMissedTickets) == 0 { return nil, nil, nil, fmt.Errorf("tried to expire tickets before " + "stake validation height! TicketExpiry may be too small") } if len(expiredTickets) > 0 { for hash, ticket := range expiredTickets { spentAndMissedTickets[hash] = ticket } } revokedTickets, err := tmdb.revokeTickets(revocations) if err != nil { return nil, nil, nil, err } newTickets, err := tmdb.pushMatureTicketsAtHeight(block.Height()) if err != nil { return nil, nil, nil, err } log.Debugf("Connected block %v (height %v) to the ticket database", block.Sha(), block.Height()) return cloneSStxMemMap(spentAndMissedTickets), cloneSStxMemMap(newTickets), cloneSStxMemMap(revokedTickets), nil }
// ProcessBlock is the main workhorse for handling insertion of new blocks into // the block chain. It includes functionality such as rejecting duplicate // blocks, ensuring blocks follow all rules, orphan handling, and insertion into // the block chain along with best chain selection and reorganization. // // It returns a first bool specifying whether or not the block is on on a fork // or on a side chain. True means it's on the main chain. // // It returns a second bool which indicates whether or not the block is an orphan // and any errors that occurred during processing. The returned bool is only // valid when the error is nil. func (b *BlockChain) ProcessBlock(block *dcrutil.Block, timeSource MedianTimeSource, flags BehaviorFlags) (bool, bool, error) { fastAdd := flags&BFFastAdd == BFFastAdd dryRun := flags&BFDryRun == BFDryRun blockHash := block.Sha() log.Tracef("Processing block %v", blockHash) // The block must not already exist in the main chain or side chains. exists, err := b.blockExists(blockHash) if err != nil { return false, false, err } if exists { str := fmt.Sprintf("already have block %v", blockHash) return false, false, ruleError(ErrDuplicateBlock, str) } // The block must not already exist as an orphan. if _, exists := b.orphans[*blockHash]; exists { str := fmt.Sprintf("already have block (orphan) %v", blockHash) return false, false, ruleError(ErrDuplicateBlock, str) } // Perform preliminary sanity checks on the block and its transactions. err = checkBlockSanity(block, timeSource, flags, b.chainParams) if err != nil { return false, false, err } // Find the previous checkpoint and perform some additional checks based // on the checkpoint. This provides a few nice properties such as // preventing old side chain blocks before the last checkpoint, // rejecting easy to mine, but otherwise bogus, blocks that could be // used to eat memory, and ensuring expected (versus claimed) proof of // work requirements since the previous checkpoint are met. blockHeader := &block.MsgBlock().Header checkpointBlock, err := b.findPreviousCheckpoint() if err != nil { return false, false, err } if checkpointBlock != nil { // Ensure the block timestamp is after the checkpoint timestamp. checkpointHeader := &checkpointBlock.MsgBlock().Header checkpointTime := checkpointHeader.Timestamp if blockHeader.Timestamp.Before(checkpointTime) { str := fmt.Sprintf("block %v has timestamp %v before "+ "last checkpoint timestamp %v", blockHash, blockHeader.Timestamp, checkpointTime) return false, false, ruleError(ErrCheckpointTimeTooOld, str) } if !fastAdd { // Even though the checks prior to now have already ensured the // proof of work exceeds the claimed amount, the claimed amount // is a field in the block header which could be forged. This // check ensures the proof of work is at least the minimum // expected based on elapsed time since the last checkpoint and // maximum adjustment allowed by the retarget rules. duration := blockHeader.Timestamp.Sub(checkpointTime) requiredTarget := CompactToBig(b.calcEasiestDifficulty( checkpointHeader.Bits, duration)) currentTarget := CompactToBig(blockHeader.Bits) if currentTarget.Cmp(requiredTarget) > 0 { str := fmt.Sprintf("block target difficulty of %064x "+ "is too low when compared to the previous "+ "checkpoint", currentTarget) return false, false, ruleError(ErrDifficultyTooLow, str) } } } // Handle orphan blocks. prevHash := &blockHeader.PrevBlock if !prevHash.IsEqual(zeroHash) { prevHashExists, err := b.blockExists(prevHash) if err != nil { return false, false, err } if !prevHashExists { if !dryRun { log.Infof("Adding orphan block %v with parent %v", blockHash, prevHash) b.addOrphanBlock(block) } return false, true, err } } // The block has passed all context independent checks and appears sane // enough to potentially accept it into the block chain. var onMainChain bool onMainChain, err = b.maybeAcceptBlock(block, flags) if err != nil { return false, false, err } // Don't process any orphans or log when the dry run flag is set. if !dryRun { // Accept any orphan blocks that depend on this block (they are // no longer orphans) and repeat for those accepted blocks until // there are no more. err := b.processOrphans(blockHash, flags) if err != nil { return false, false, err } log.Debugf("Accepted block %v", blockHash) } return onMainChain, false, err }
// indexBlockAddrs returns a populated index of the all the transactions in the // passed block based on the addresses involved in each transaction. func (a *addrIndexer) indexBlockAddrs(blk *dcrutil.Block, parent *dcrutil.Block) (database.BlockAddrIndex, error) { var addrIndex database.BlockAddrIndex _, stxLocs, err := blk.TxLoc() if err != nil { return nil, err } txTreeRegularValid := dcrutil.IsFlagSet16(blk.MsgBlock().Header.VoteBits, dcrutil.BlockValid) // Add regular transactions iff the block was validated. if txTreeRegularValid { txLocs, _, err := parent.TxLoc() if err != nil { return nil, err } for txIdx, tx := range parent.Transactions() { // Tx's offset and length in the block. locInBlock := &txLocs[txIdx] // Coinbases don't have any inputs. if !blockchain.IsCoinBase(tx) { // Index the SPK's of each input's previous outpoint // transaction. for _, txIn := range tx.MsgTx().TxIn { prevOutTx, err := a.lookupTransaction( txIn.PreviousOutPoint.Hash, blk, parent) inputOutPoint := prevOutTx.TxOut[txIn.PreviousOutPoint.Index] toAppend, err := convertToAddrIndex(inputOutPoint.Version, inputOutPoint.PkScript, parent.Height(), locInBlock) if err != nil { adxrLog.Tracef("Error converting tx txin %v: %v", txIn.PreviousOutPoint.Hash, err) continue } addrIndex = append(addrIndex, toAppend...) } } for _, txOut := range tx.MsgTx().TxOut { toAppend, err := convertToAddrIndex(txOut.Version, txOut.PkScript, parent.Height(), locInBlock) if err != nil { adxrLog.Tracef("Error converting tx txout %v: %v", tx.MsgTx().TxSha(), err) continue } addrIndex = append(addrIndex, toAppend...) } } } // Add stake transactions. for stxIdx, stx := range blk.STransactions() { // Tx's offset and length in the block. locInBlock := &stxLocs[stxIdx] isSSGen, _ := stake.IsSSGen(stx) // Index the SPK's of each input's previous outpoint // transaction. for i, txIn := range stx.MsgTx().TxIn { // Stakebases don't have any inputs. if isSSGen && i == 0 { continue } // Lookup and fetch the referenced output's tx. prevOutTx, err := a.lookupTransaction( txIn.PreviousOutPoint.Hash, blk, parent) inputOutPoint := prevOutTx.TxOut[txIn.PreviousOutPoint.Index] toAppend, err := convertToAddrIndex(inputOutPoint.Version, inputOutPoint.PkScript, blk.Height(), locInBlock) if err != nil { adxrLog.Tracef("Error converting stx txin %v: %v", txIn.PreviousOutPoint.Hash, err) continue } addrIndex = append(addrIndex, toAppend...) } for _, txOut := range stx.MsgTx().TxOut { toAppend, err := convertToAddrIndex(txOut.Version, txOut.PkScript, blk.Height(), locInBlock) if err != nil { adxrLog.Tracef("Error converting stx txout %v: %v", stx.MsgTx().TxSha(), err) continue } addrIndex = append(addrIndex, toAppend...) } } return addrIndex, nil }
// IsCheckpointCandidate returns whether or not the passed block is a good // checkpoint candidate. // // The factors used to determine a good checkpoint are: // - The block must be in the main chain // - The block must be at least 'CheckpointConfirmations' blocks prior to the // current end of the main chain // - The timestamps for the blocks before and after the checkpoint must have // timestamps which are also before and after the checkpoint, respectively // (due to the median time allowance this is not always the case) // - The block must not contain any strange transaction such as those with // nonstandard scripts // // The intent is that candidates are reviewed by a developer to make the final // decision and then manually added to the list of checkpoints for a network. func (b *BlockChain) IsCheckpointCandidate(block *dcrutil.Block) (bool, error) { // Checkpoints must be enabled. if b.noCheckpoints { return false, fmt.Errorf("checkpoints are disabled") } // A checkpoint must be in the main chain. exists, err := b.db.ExistsSha(block.Sha()) if err != nil { return false, err } if !exists { return false, nil } // A checkpoint must be at least CheckpointConfirmations blocks before // the end of the main chain. blockHeight := block.Height() _, mainChainHeight, err := b.db.NewestSha() if err != nil { return false, err } if blockHeight > (mainChainHeight - CheckpointConfirmations) { return false, nil } // Get the previous block. prevHash := &block.MsgBlock().Header.PrevBlock prevBlock, err := b.db.FetchBlockBySha(prevHash) if err != nil { return false, err } // Get the next block. nextHash, err := b.db.FetchBlockShaByHeight(blockHeight + 1) if err != nil { return false, err } nextBlock, err := b.db.FetchBlockBySha(nextHash) if err != nil { return false, err } // A checkpoint must have timestamps for the block and the blocks on // either side of it in order (due to the median time allowance this is // not always the case). prevTime := prevBlock.MsgBlock().Header.Timestamp curTime := block.MsgBlock().Header.Timestamp nextTime := nextBlock.MsgBlock().Header.Timestamp if prevTime.After(curTime) || nextTime.Before(curTime) { return false, nil } // A checkpoint must have transactions that only contain standard // scripts. for _, tx := range block.Transactions() { if isNonstandardTransaction(tx) { return false, nil } } return true, nil }
// maybeAcceptBlock potentially accepts a block into the memory block chain. // It performs several validation checks which depend on its position within // the block chain before adding it. The block is expected to have already gone // through ProcessBlock before calling this function with it. // // The flags modify the behavior of this function as follows: // - BFDryRun: The memory chain index will not be pruned and no accept // notification will be sent since the block is not being accepted. // // This function MUST be called with the chain state lock held (for writes). func (b *BlockChain) maybeAcceptBlock(block *dcrutil.Block, flags BehaviorFlags) (bool, error) { dryRun := flags&BFDryRun == BFDryRun // Get a block node for the block previous to this one. Will be nil // if this is the genesis block. prevNode, err := b.getPrevNodeFromBlock(block) if err != nil { log.Debugf("getPrevNodeFromBlock: %v", err) return false, err } // The height of this block is one more than the referenced previous // block. blockHeight := int64(0) if prevNode != nil { blockHeight = prevNode.height + 1 } block.SetHeight(blockHeight) // The block must pass all of the validation rules which depend on the // position of the block within the block chain. err = b.checkBlockContext(block, prevNode, flags) if err != nil { return false, err } // Prune stake nodes and block nodes which are no longer needed before // creating a new node. if !dryRun { err := b.pruner.pruneChainIfNeeded() if err != nil { return false, err } } // Create a new block node for the block and add it to the in-memory // block chain (could be either a side chain or the main chain). blockHeader := &block.MsgBlock().Header newNode := newBlockNode(blockHeader, block.Sha(), blockHeight, ticketsSpentInBlock(block), ticketsRevokedInBlock(block), voteVersionsInBlock(block, b.chainParams)) if prevNode != nil { newNode.parent = prevNode newNode.height = blockHeight newNode.workSum.Add(prevNode.workSum, newNode.workSum) } // Fetching a stake node could enable a new DoS vector, so restrict // this only to blocks that are recent in history. if newNode.height < b.bestNode.height-minMemoryNodes { newNode.stakeNode, err = b.fetchStakeNode(newNode) if err != nil { return false, err } newNode.stakeUndoData = newNode.stakeNode.UndoData() } // Connect the passed block to the chain while respecting proper chain // selection according to the chain with the most proof of work. This // also handles validation of the transaction scripts. var onMainChain bool onMainChain, err = b.connectBestChain(newNode, block, flags) if err != nil { return false, err } // Notify the caller that the new block was accepted into the block // chain. The caller would typically want to react by relaying the // inventory to other peers. if !dryRun { b.chainLock.Unlock() b.sendNotification(NTBlockAccepted, &BlockAcceptedNtfnsData{onMainChain, block}) b.chainLock.Lock() } return onMainChain, nil }
// IsCheckpointCandidate returns whether or not the passed block is a good // checkpoint candidate. // // The factors used to determine a good checkpoint are: // - The block must be in the main chain // - The block must be at least 'CheckpointConfirmations' blocks prior to the // current end of the main chain // - The timestamps for the blocks before and after the checkpoint must have // timestamps which are also before and after the checkpoint, respectively // (due to the median time allowance this is not always the case) // - The block must not contain any strange transaction such as those with // nonstandard scripts // // The intent is that candidates are reviewed by a developer to make the final // decision and then manually added to the list of checkpoints for a network. // // This function is safe for concurrent access. func (b *BlockChain) IsCheckpointCandidate(block *dcrutil.Block) (bool, error) { b.chainLock.RLock() defer b.chainLock.RUnlock() // Checkpoints must be enabled. if b.noCheckpoints { return false, fmt.Errorf("checkpoints are disabled") } var isCandidate bool err := b.db.View(func(dbTx database.Tx) error { // A checkpoint must be in the main chain. blockHeight, err := dbFetchHeightByHash(dbTx, block.Sha()) if err != nil { // Only return an error if it's not due to the block not // being in the main chain. if !isNotInMainChainErr(err) { return err } return nil } // Ensure the height of the passed block and the entry for the // block in the main chain match. This should always be the // case unless the caller provided an invalid block. if blockHeight != block.Height() { return fmt.Errorf("passed block height of %d does not "+ "match the main chain height of %d", block.Height(), blockHeight) } // A checkpoint must be at least CheckpointConfirmations blocks // before the end of the main chain. mainChainHeight := b.bestNode.height if blockHeight > (mainChainHeight - CheckpointConfirmations) { return nil } // Get the previous block header. prevHash := &block.MsgBlock().Header.PrevBlock prevHeader, err := dbFetchHeaderByHash(dbTx, prevHash) if err != nil { return err } // Get the next block header. nextHeader, err := dbFetchHeaderByHeight(dbTx, blockHeight+1) if err != nil { return err } // A checkpoint must have timestamps for the block and the // blocks on either side of it in order (due to the median time // allowance this is not always the case). prevTime := prevHeader.Timestamp curTime := block.MsgBlock().Header.Timestamp nextTime := nextHeader.Timestamp if prevTime.After(curTime) || nextTime.Before(curTime) { return nil } // A checkpoint must have transactions that only contain // standard scripts. for _, tx := range block.Transactions() { if isNonstandardTransaction(tx) { return nil } } // All of the checks passed, so the block is a candidate. isCandidate = true return nil }) return isCandidate, err }
// ConnectBlock is invoked by the index manager when a new block has been // connected to the main chain. This indexer adds a key for each address // the transactions in the block involve. // // This is part of the Indexer interface. func (idx *ExistsAddrIndex) ConnectBlock(dbTx database.Tx, block, parent *dcrutil.Block, view *blockchain.UtxoViewpoint) error { regularTxTreeValid := dcrutil.IsFlagSet16(block.MsgBlock().Header.VoteBits, dcrutil.BlockValid) var parentTxs []*dcrutil.Tx if regularTxTreeValid && block.Height() > 1 { parentTxs = parent.Transactions() } blockTxns := block.STransactions() allTxns := append(parentTxs, blockTxns...) usedAddrs := make(map[[addrKeySize]byte]struct{}) for _, tx := range allTxns { msgTx := tx.MsgTx() isSStx, _ := stake.IsSStx(msgTx) for _, txIn := range msgTx.TxIn { if txscript.IsMultisigSigScript(txIn.SignatureScript) { rs, err := txscript.MultisigRedeemScriptFromScriptSig( txIn.SignatureScript) if err != nil { continue } class, addrs, _, err := txscript.ExtractPkScriptAddrs( txscript.DefaultScriptVersion, rs, idx.chainParams) if err != nil { // Non-standard outputs are skipped. continue } if class != txscript.MultiSigTy { // This should never happen, but be paranoid. continue } for _, addr := range addrs { k, err := addrToKey(addr, idx.chainParams) if err != nil { continue } usedAddrs[k] = struct{}{} } } } for _, txOut := range tx.MsgTx().TxOut { class, addrs, _, err := txscript.ExtractPkScriptAddrs( txOut.Version, txOut.PkScript, idx.chainParams) if err != nil { // Non-standard outputs are skipped. continue } if isSStx && class == txscript.NullDataTy { addr, err := stake.AddrFromSStxPkScrCommitment(txOut.PkScript, idx.chainParams) if err != nil { // Ignore unsupported address types. continue } addrs = append(addrs, addr) } for _, addr := range addrs { k, err := addrToKey(addr, idx.chainParams) if err != nil { // Ignore unsupported address types. continue } usedAddrs[k] = struct{}{} } } } // Write all the newly used addresses to the database, // skipping any keys that already exist. Write any // addresses we see in mempool at this time, too, // then remove them from the unconfirmed map drop // dropping the old map and reassigning a new map. idx.unconfirmedLock.Lock() for k := range idx.mpExistsAddr { usedAddrs[k] = struct{}{} } idx.mpExistsAddr = make(map[[addrKeySize]byte]struct{}) idx.unconfirmedLock.Unlock() meta := dbTx.Metadata() existsAddrIndex := meta.Bucket(existsAddrIndexKey) newUsedAddrs := make(map[[addrKeySize]byte]struct{}) for k := range usedAddrs { if !idx.existsAddress(existsAddrIndex, k) { newUsedAddrs[k] = struct{}{} } } for k := range newUsedAddrs { err := dbPutExistsAddr(existsAddrIndex, k) if err != nil { return err } } return nil }
// DropAfterBlockBySha will remove any blocks from the database after // the given block. func (db *LevelDb) DropAfterBlockBySha(sha *chainhash.Hash) (rerr error) { db.dbLock.Lock() defer db.dbLock.Unlock() defer func() { if rerr == nil { rerr = db.processBatches() } else { db.lBatch().Reset() } }() startheight := db.nextBlock - 1 keepidx, err := db.getBlkLoc(sha) if err != nil { // should the error here be normalized ? log.Tracef("block loc failed %v ", sha) return err } for height := startheight; height > keepidx; height = height - 1 { var blk *dcrutil.Block blksha, buf, err := db.getBlkByHeight(height) if err != nil { return err } blk, err = dcrutil.NewBlockFromBytes(buf) if err != nil { return err } // Obtain previous block sha and buffer var blkprev *dcrutil.Block _, bufprev, errprev := db.getBlkByHeight(height - 1) // discard blkshaprev if errprev != nil { return errprev } // Do the same thing for the parent block blkprev, errprev = dcrutil.NewBlockFromBytes(bufprev) if errprev != nil { return errprev } // Unspend the stake tx in the current block for _, tx := range blk.MsgBlock().STransactions { err = db.unSpend(tx) if err != nil { return err } } // rather than iterate the list of tx backward, do it twice. for _, tx := range blk.STransactions() { var txUo txUpdateObj txUo.delete = true db.txUpdateMap[*tx.Sha()] = &txUo } // Check to see if the regular txs of the parent were even included; if // they are, unspend all of these regular tx too votebits := blk.MsgBlock().Header.VoteBits if dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) && height != 0 { // Unspend the regular tx in the current block for _, tx := range blkprev.MsgBlock().Transactions { err = db.unSpend(tx) if err != nil { return err } } // rather than iterate the list of tx backward, do it twice. for _, tx := range blkprev.Transactions() { var txUo txUpdateObj txUo.delete = true db.txUpdateMap[*tx.Sha()] = &txUo } } db.lBatch().Delete(shaBlkToKey(blksha)) db.lBatch().Delete(int64ToKey(height)) } // update the last block cache db.lastBlkShaCached = true db.lastBlkSha = *sha db.lastBlkIdx = keepidx db.nextBlock = keepidx + 1 return nil }
// InsertBlock inserts raw block and transaction data from a block into the // database. The first block inserted into the database will be treated as the // genesis block. Every subsequent block insert requires the referenced parent // block to already exist. func (db *LevelDb) InsertBlock(block *dcrutil.Block) (height int64, rerr error) { // Be careful with this function on syncs. It contains decred changes. // Obtain the previous block first so long as it's not the genesis block var blockPrev *dcrutil.Block // Decred: WARNING. This function assumes that all block insertion calls have // dcrutil.blocks passed to them with block.blockHeight set correctly. However, // loading the genesis block in btcd didn't do this (via block manager); pre- // production it should be established that all calls to this function pass // blocks with block.blockHeight set correctly. if block.Height() != 0 { var errBlockPrev error blockPrev, errBlockPrev = db.FetchBlockBySha(&block.MsgBlock().Header.PrevBlock) if errBlockPrev != nil { blockSha := block.Sha() log.Warnf("Failed to fetch parent block of block %v", blockSha) return 0, errBlockPrev } } db.dbLock.Lock() defer db.dbLock.Unlock() defer func() { if rerr == nil { rerr = db.processBatches() } else { db.lBatch().Reset() } }() blocksha := block.Sha() mblock := block.MsgBlock() rawMsg, err := block.Bytes() if err != nil { log.Warnf("Failed to obtain raw block sha %v", blocksha) return 0, err } _, sTxLoc, err := block.TxLoc() if err != nil { log.Warnf("Failed to obtain raw block sha %v, stxloc %v", blocksha, sTxLoc) return 0, err } // Insert block into database newheight, err := db.insertBlockData(blocksha, &mblock.Header.PrevBlock, rawMsg) if err != nil { log.Warnf("Failed to insert block %v %v %v", blocksha, &mblock.Header.PrevBlock, err) return 0, err } // Get data necessary to process regular tx tree of parent block if it's not // the genesis block. var mBlockPrev *wire.MsgBlock var txLoc []wire.TxLoc if blockPrev != nil { blockShaPrev := blockPrev.Sha() mBlockPrev = blockPrev.MsgBlock() txLoc, _, err = blockPrev.TxLoc() if err != nil { log.Warnf("Failed to obtain raw block sha %v, txloc %v", blockShaPrev, txLoc) return 0, err } } // Insert the regular tx of the parent block into the tx database if the vote // bits enable it, and if it's not the genesis block. votebits := mblock.Header.VoteBits if dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) && blockPrev != nil { for txidx, tx := range mBlockPrev.Transactions { txsha, err := blockPrev.TxSha(txidx) if err != nil { log.Warnf("failed to compute tx name block %v idx %v err %v", blocksha, txidx, err) return 0, err } spentbuflen := (len(tx.TxOut) + 7) / 8 spentbuf := make([]byte, spentbuflen, spentbuflen) if len(tx.TxOut)%8 != 0 { for i := uint(len(tx.TxOut) % 8); i < 8; i++ { spentbuf[spentbuflen-1] |= (byte(1) << i) } } // newheight-1 instead of newheight below, as the tx is actually found // in the parent. //fmt.Printf("insert tx %v into db at height %v\n", txsha, newheight) err = db.insertTx(txsha, newheight-1, uint32(txidx), txLoc[txidx].TxStart, txLoc[txidx].TxLen, spentbuf) if err != nil { log.Warnf("block %v idx %v failed to insert tx %v %v err %v", blocksha, newheight-1, &txsha, txidx, err) return 0, err } err = db.doSpend(tx) if err != nil { log.Warnf("block %v idx %v failed to spend tx %v %v err %v", blocksha, newheight, txsha, txidx, err) return 0, err } } } // Insert the stake tx of the current block into the tx database. if len(mblock.STransactions) != 0 { for txidx, tx := range mblock.STransactions { txsha, err := block.STxSha(txidx) if err != nil { log.Warnf("failed to compute stake tx name block %v idx %v err %v", blocksha, txidx, err) return 0, err } spentbuflen := (len(tx.TxOut) + 7) / 8 spentbuf := make([]byte, spentbuflen, spentbuflen) if len(tx.TxOut)%8 != 0 { for i := uint(len(tx.TxOut) % 8); i < 8; i++ { spentbuf[spentbuflen-1] |= (byte(1) << i) } } err = db.insertTx(txsha, newheight, uint32(txidx), sTxLoc[txidx].TxStart, sTxLoc[txidx].TxLen, spentbuf) if err != nil { log.Warnf("block %v idx %v failed to insert stake tx %v %v err %v", blocksha, newheight, &txsha, txidx, err) return 0, err } err = db.doSpend(tx) if err != nil { log.Warnf("block %v idx %v failed to spend stx %v %v err %v", blocksha, newheight, txsha, txidx, err) return 0, err } } } return newheight, nil }
func connectTransactions(txStore TxStore, block *dcrutil.Block, parent *dcrutil.Block) error { // There is no regular tx from before the genesis block, so ignore the genesis // block for the next step. if parent != nil && block.Height() != 0 { mBlock := block.MsgBlock() votebits := mBlock.Header.VoteBits regularTxTreeValid := dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) // Only add the transactions in the event that the parent block's regular // tx were validated. if regularTxTreeValid { // Loop through all of the regular transactions in the block to see if // any of them are ones we need to update and spend based on the // results map. for i, tx := range parent.Transactions() { // Update the transaction store with the transaction information // if it's one of the requested transactions. msgTx := tx.MsgTx() if txD, exists := txStore[*tx.Sha()]; exists { txD.Tx = tx txD.BlockHeight = block.Height() - 1 txD.BlockIndex = uint32(i) txD.Spent = make([]bool, len(msgTx.TxOut)) txD.Err = nil } // Spend the origin transaction output. for _, txIn := range msgTx.TxIn { originHash := &txIn.PreviousOutPoint.Hash originIndex := txIn.PreviousOutPoint.Index if originTx, exists := txStore[*originHash]; exists { if originTx.Spent == nil { continue } if originIndex >= uint32(len(originTx.Spent)) { continue } originTx.Spent[originIndex] = true } } } } } // Loop through all of the stake transactions in the block to see if any of // them are ones we need to update and spend based on the results map. for i, tx := range block.STransactions() { // Update the transaction store with the transaction information // if it's one of the requested transactions. msgTx := tx.MsgTx() if txD, exists := txStore[*tx.Sha()]; exists { txD.Tx = tx txD.BlockHeight = block.Height() txD.BlockIndex = uint32(i) txD.Spent = make([]bool, len(msgTx.TxOut)) txD.Err = nil } // Spend the origin transaction output. for _, txIn := range msgTx.TxIn { originHash := &txIn.PreviousOutPoint.Hash originIndex := txIn.PreviousOutPoint.Index if originTx, exists := txStore[*originHash]; exists { if originTx.Spent == nil { continue } if originIndex >= uint32(len(originTx.Spent)) { continue } originTx.Spent[originIndex] = true } } } return nil }
// makeUtxoView creates a mock unspent transaction output view by using the // transaction index in order to look up all inputs referenced by the // transactions in the block. This is sometimes needed when catching indexes up // because many of the txouts could actually already be spent however the // associated scripts are still required to index them. func makeUtxoView(dbTx database.Tx, block, parent *dcrutil.Block) (*blockchain.UtxoViewpoint, error) { view := blockchain.NewUtxoViewpoint() regularTxTreeValid := dcrutil.IsFlagSet16(block.MsgBlock().Header.VoteBits, dcrutil.BlockValid) if regularTxTreeValid { for txIdx, tx := range parent.Transactions() { // Coinbases do not reference any inputs. Since the block is // required to have already gone through full validation, it has // already been proven on the first transaction in the block is // a coinbase. if txIdx == 0 { continue } // Use the transaction index to load all of the referenced // inputs and add their outputs to the view. for _, txIn := range tx.MsgTx().TxIn { // Skip already fetched outputs. originOut := &txIn.PreviousOutPoint if view.LookupEntry(&originOut.Hash) != nil { continue } originTx, err := dbFetchTx(dbTx, originOut.Hash) if err != nil { return nil, err } view.AddTxOuts(dcrutil.NewTx(originTx), int64(wire.NullBlockHeight), wire.NullBlockIndex) } } } for _, tx := range block.STransactions() { msgTx := tx.MsgTx() isSSGen, _ := stake.IsSSGen(msgTx) // Use the transaction index to load all of the referenced // inputs and add their outputs to the view. for i, txIn := range msgTx.TxIn { // Skip stakebases. if isSSGen && i == 0 { continue } originOut := &txIn.PreviousOutPoint if view.LookupEntry(&originOut.Hash) != nil { continue } originTx, err := dbFetchTx(dbTx, originOut.Hash) if err != nil { return nil, err } view.AddTxOuts(dcrutil.NewTx(originTx), int64(wire.NullBlockHeight), wire.NullBlockIndex) } } return view, nil }
// disconnectTransactions updates the passed map by undoing transaction and // spend information for all transactions in the passed block. Only // transactions in the passed map are updated. func disconnectTransactions(txStore TxStore, block *dcrutil.Block, parent *dcrutil.Block) error { // Loop through all of the stake transactions in the block to see if any of // them are ones that need to be undone based on the transaction store. for _, tx := range block.STransactions() { // Clear this transaction from the transaction store if needed. // Only clear it rather than deleting it because the transaction // connect code relies on its presence to decide whether or not // to update the store and any transactions which exist on both // sides of a fork would otherwise not be updated. if txD, exists := txStore[*tx.Sha()]; exists { txD.Tx = nil txD.BlockHeight = int64(wire.NullBlockHeight) txD.BlockIndex = wire.NullBlockIndex txD.Spent = nil txD.Err = database.ErrTxShaMissing } // Unspend the origin transaction output. for _, txIn := range tx.MsgTx().TxIn { originHash := &txIn.PreviousOutPoint.Hash originIndex := txIn.PreviousOutPoint.Index originTx, exists := txStore[*originHash] if exists && originTx.Tx != nil && originTx.Err == nil { if originTx.Spent == nil { continue } if originIndex >= uint32(len(originTx.Spent)) { continue } originTx.Spent[originIndex] = false } } } // There is no regular tx from before the genesis block, so ignore the genesis // block for the next step. if parent != nil && block.Height() != 0 { mBlock := block.MsgBlock() votebits := mBlock.Header.VoteBits regularTxTreeValid := dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) // Only bother to unspend transactions if the parent's tx tree was // validated. Otherwise, these transactions were never in the blockchain's // history in the first place. if regularTxTreeValid { // Loop through all of the regular transactions in the block to see if // any of them are ones that need to be undone based on the // transaction store. for _, tx := range parent.Transactions() { // Clear this transaction from the transaction store if needed. // Only clear it rather than deleting it because the transaction // connect code relies on its presence to decide whether or not // to update the store and any transactions which exist on both // sides of a fork would otherwise not be updated. if txD, exists := txStore[*tx.Sha()]; exists { txD.Tx = nil txD.BlockHeight = int64(wire.NullBlockHeight) txD.BlockIndex = wire.NullBlockIndex txD.Spent = nil txD.Err = database.ErrTxShaMissing } // Unspend the origin transaction output. for _, txIn := range tx.MsgTx().TxIn { originHash := &txIn.PreviousOutPoint.Hash originIndex := txIn.PreviousOutPoint.Index originTx, exists := txStore[*originHash] if exists && originTx.Tx != nil && originTx.Err == nil { if originTx.Spent == nil { continue } if originIndex >= uint32(len(originTx.Spent)) { continue } originTx.Spent[originIndex] = false } } } } } return nil }
// spendTickets transfers tickets from the ticketMap to the spentTicketMap. Useful // when connecting blocks. Also pushes missed tickets to the missed ticket map. // usedtickets is a map that contains all tickets that were actually used in SSGen // votes; all other tickets are considered missed. // // This function MUST be called with the tmdb lock held (for writes). func (tmdb *TicketDB) spendTickets(parentBlock *dcrutil.Block, usedTickets map[chainhash.Hash]struct{}, spendingHashes map[chainhash.Hash]chainhash.Hash) (SStxMemMap, error) { // If there is nothing being spent, break. if len(spendingHashes) < 1 { return nil, nil } // Make sure there's a bucket in the map for used tickets height := parentBlock.Height() + 1 tmdb.maybeInsertBlock(height) tempTickets := make(SStxMemMap) // Sort the entire list of tickets lexicographically by sorting // each bucket and then appending it to the list. totalTickets := 0 var sortedSlice []*TicketData for i := 0; i < BucketsSize; i++ { mapLen := len(tmdb.maps.ticketMap[i]) totalTickets += mapLen tempTdSlice := NewTicketDataSlice(mapLen) itr := 0 // Iterator for _, td := range tmdb.maps.ticketMap[i] { tempTdSlice[itr] = td itr++ } sort.Sort(tempTdSlice) sortedSlice = append(sortedSlice, tempTdSlice...) } // Use the parent block's header to seed a PRNG that picks the lottery winners. ticketsPerBlock := int(tmdb.chainParams.TicketsPerBlock) pbhB, err := parentBlock.MsgBlock().Header.Bytes() if err != nil { return nil, err } prng := NewHash256PRNG(pbhB) ts, err := FindTicketIdxs(int64(totalTickets), ticketsPerBlock, prng) if err != nil { return nil, err } ticketsToSpendOrMiss := make([]*TicketData, ticketsPerBlock, ticketsPerBlock) for i, idx := range ts { ticketsToSpendOrMiss[i] = sortedSlice[idx] } // Spend or miss these tickets by checking for their existence in the // passed usedtickets map. tixSpent := 0 tixMissed := 0 for _, ticket := range ticketsToSpendOrMiss { // Move the ticket from active tickets map into the used tickets map // if the ticket was spent. _, wasSpent := usedTickets[ticket.SStxHash] if wasSpent { ticket.Missed = false ticket.SpendHash = spendingHashes[ticket.SStxHash] err := tmdb.pushSpentTicket(height, ticket) if err != nil { return nil, err } err = tmdb.removeLiveTicket(ticket) if err != nil { return nil, err } tixSpent++ } else { // Ticket missed being spent and --> false or nil ticket.Missed = true // TODO fix test failure @ L150 due to this err := tmdb.pushSpentTicket(height, ticket) if err != nil { return nil, err } err = tmdb.pushMissedTicket(ticket) if err != nil { return nil, err } err = tmdb.removeLiveTicket(ticket) if err != nil { return nil, err } tixMissed++ } // Report on the spent and missed tickets for the block in debug. if ticket.Missed { log.Debugf("Ticket %v has been missed and expired from "+ "the lottery pool as a missed ticket", ticket.SStxHash) } else { log.Debugf("Ticket %v was spent and removed from "+ "the lottery pool", ticket.SStxHash) } // Add the ticket to the temporary tickets buffer for later use in // map restoration if needed. tempTickets[ticket.SStxHash] = ticket } // Some sanity checks. if tixSpent != len(usedTickets) { errStr := fmt.Sprintf("spendTickets error, an invalid number %v "+ "tickets was spent, but %v many tickets should "+ "have been spent!", tixSpent, len(usedTickets)) return nil, errors.New(errStr) } if tixMissed != (ticketsPerBlock - len(usedTickets)) { errStr := fmt.Sprintf("spendTickets error, an invalid number %v "+ "tickets was missed, but %v many tickets should "+ "have been missed!", tixMissed, ticketsPerBlock-len(usedTickets)) return nil, errors.New(errStr) } if (tixSpent + tixMissed) != ticketsPerBlock { errStr := fmt.Sprintf("spendTickets error, an invalid number %v "+ "tickets was spent and missed, but TicketsPerBlock %v many "+ "tickets should have been spent!", tixSpent, ticketsPerBlock) return nil, errors.New(errStr) } return tempTickets, nil }
// DebugBlockHeaderString dumps a verbose message containing information about // the block header of a block. func DebugBlockHeaderString(chainParams *chaincfg.Params, block *dcrutil.Block) string { bh := block.MsgBlock().Header var buffer bytes.Buffer str := fmt.Sprintf("Version: %v\n", bh.Version) buffer.WriteString(str) str = fmt.Sprintf("Previous block: %v\n", bh.PrevBlock) buffer.WriteString(str) str = fmt.Sprintf("Merkle root (reg): %v\n", bh.MerkleRoot) buffer.WriteString(str) str = fmt.Sprintf("Merkle root (stk): %v\n", bh.StakeRoot) buffer.WriteString(str) str = fmt.Sprintf("VoteBits: %v\n", bh.VoteBits) buffer.WriteString(str) str = fmt.Sprintf("FinalState: %v\n", bh.FinalState) buffer.WriteString(str) str = fmt.Sprintf("Voters: %v\n", bh.Voters) buffer.WriteString(str) str = fmt.Sprintf("FreshStake: %v\n", bh.FreshStake) buffer.WriteString(str) str = fmt.Sprintf("Revocations: %v\n", bh.Revocations) buffer.WriteString(str) str = fmt.Sprintf("PoolSize: %v\n", bh.PoolSize) buffer.WriteString(str) str = fmt.Sprintf("Timestamp: %v\n", bh.Timestamp) buffer.WriteString(str) bitsBig := CompactToBig(bh.Bits) if bitsBig.Cmp(bigZero) != 0 { bitsBig.Div(chainParams.PowLimit, bitsBig) } diff := bitsBig.Int64() str = fmt.Sprintf("Bits: %v (Difficulty: %v)\n", bh.Bits, diff) buffer.WriteString(str) str = fmt.Sprintf("SBits: %v (In coins: %v)\n", bh.SBits, float64(bh.SBits)/dcrutil.AtomsPerCoin) buffer.WriteString(str) str = fmt.Sprintf("Nonce: %v \n", bh.Nonce) buffer.WriteString(str) str = fmt.Sprintf("Height: %v \n", bh.Height) buffer.WriteString(str) str = fmt.Sprintf("Size: %v \n", bh.Size) buffer.WriteString(str) return buffer.String() }
// InsertBlock inserts raw block and transaction data from a block into the // database. The first block inserted into the database will be treated as the // genesis block. Every subsequent block insert requires the referenced parent // block to already exist. This is part of the database.Db interface // implementation. func (db *MemDb) InsertBlock(block *dcrutil.Block) (int64, error) { db.Lock() defer db.Unlock() if db.closed { return 0, ErrDbClosed } // Reject the insert if the previously reference block does not exist // except in the case there are no blocks inserted yet where the first // inserted block is assumed to be a genesis block. msgBlock := block.MsgBlock() if _, exists := db.blocksBySha[msgBlock.Header.PrevBlock]; !exists { if len(db.blocks) > 0 { return 0, database.ErrPrevShaMissing } } var blockPrev *dcrutil.Block = nil // Decred: WARNING. This function assumes that all block insertion calls have // dcrutil.blocks passed to them with block.blockHeight set correctly. However, // loading the genesis block in dcrd didn't do this (via block manager); pre- // production it should be established that all calls to this function pass // blocks with block.blockHeight set correctly. if len(db.blocks) > 0 { var errBlockPrev error blockPrev, errBlockPrev = db.fetchBlockBySha(&msgBlock.Header.PrevBlock) if errBlockPrev != nil { blockSha := block.Sha() log.Warnf("Failed to fetch parent block of block %v", blockSha) return 0, errBlockPrev } } // Build a map of in-flight transactions because some of the inputs in // this block could be referencing other transactions earlier in this // block which are not yet in the chain. newHeight := int64(len(db.blocks)) txInFlight := map[chainhash.Hash]int{} // Loop through all transactions and inputs to ensure there are no error // conditions that would prevent them from be inserted into the db. // Although these checks could could be done in the loop below, checking // for error conditions up front means the code below doesn't have to // deal with rollback on errors. votebits := block.MsgBlock().Header.VoteBits if dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) && blockPrev != nil { transactions := blockPrev.Transactions() for i, tx := range transactions { txInFlight[*tx.Sha()] = i } for i, tx := range transactions { for _, txIn := range tx.MsgTx().TxIn { if isCoinbaseInput(txIn) { continue } // It is acceptable for a transaction input to reference // the output of another transaction in this block only // if the referenced transaction comes before the // current one in this block. prevOut := &txIn.PreviousOutPoint if inFlightIndex, ok := txInFlight[prevOut.Hash]; ok { if i <= inFlightIndex { log.Warnf("InsertBlock: requested hash "+ " of %s does not exist in-flight", tx.Sha()) return 0, database.ErrTxShaMissing } } else { originTxns, exists := db.txns[prevOut.Hash] if !exists { log.Warnf("InsertBlock: requested hash "+ "of %s by %s does not exist", prevOut.Hash, tx.Sha()) return 0, database.ErrTxShaMissing } originTxD := originTxns[len(originTxns)-1] if prevOut.Index > uint32(len(originTxD.spentBuf)) { log.Warnf("InsertBlock: requested hash "+ "of %s with index %d does not "+ "exist", tx.Sha(), prevOut.Index) return 0, database.ErrTxShaMissing } } } // Prevent duplicate transactions in the same block. if inFlightIndex, exists := txInFlight[*tx.Sha()]; exists && inFlightIndex < i { log.Warnf("Block contains duplicate transaction %s", tx.Sha()) return 0, database.ErrDuplicateSha } // Prevent duplicate transactions unless the old one is fully // spent. if txns, exists := db.txns[*tx.Sha()]; exists { txD := txns[len(txns)-1] if !isFullySpent(txD) { log.Warnf("Attempt to insert duplicate "+ "transaction %s", tx.Sha()) return 0, database.ErrDuplicateSha } } } } db.blocks = append(db.blocks, msgBlock) db.blocksBySha[msgBlock.Header.BlockSha()] = newHeight if dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) && blockPrev != nil { // Insert information about eacj transaction and spend all of the // outputs referenced by the inputs to the transactions. for i, tx := range blockPrev.Transactions() { // Insert the transaction data. txD := tTxInsertData{ tree: dcrutil.TxTreeRegular, blockHeight: newHeight - 1, offset: i, spentBuf: make([]bool, len(tx.MsgTx().TxOut)), } db.txns[*tx.Sha()] = append(db.txns[*tx.Sha()], &txD) // Spend all of the inputs. for _, txIn := range tx.MsgTx().TxIn { // Coinbase transaction has no inputs. if isCoinbaseInput(txIn) { continue } // Already checked for existing and valid ranges above. prevOut := &txIn.PreviousOutPoint originTxns := db.txns[prevOut.Hash] originTxD := originTxns[len(originTxns)-1] originTxD.spentBuf[prevOut.Index] = true } } } for i, tx := range block.STransactions() { // Insert the transaction data. txD := tTxInsertData{ tree: dcrutil.TxTreeStake, blockHeight: newHeight, offset: i, spentBuf: make([]bool, len(tx.MsgTx().TxOut)), } db.txns[*tx.Sha()] = append(db.txns[*tx.Sha()], &txD) // Spend all of the inputs. for _, txIn := range tx.MsgTx().TxIn { // Coinbase transaction has no inputs. if isCoinbaseInput(txIn) { continue } // Already checked for existing and valid ranges above. prevOut := &txIn.PreviousOutPoint originTxns := db.txns[prevOut.Hash] originTxD := originTxns[len(originTxns)-1] originTxD.spentBuf[prevOut.Index] = true } } return newHeight, nil }
// Init initializes the enabled indexes. This is called during chain // initialization and primarily consists of catching up all indexes to the // current best chain tip. This is necessary since each index can be disabled // and re-enabled at any time and attempting to catch-up indexes at the same // time new blocks are being downloaded would lead to an overall longer time to // catch up due to the I/O contention. // // This is part of the blockchain.IndexManager interface. func (m *Manager) Init(chain *blockchain.BlockChain) error { // Nothing to do when no indexes are enabled. if len(m.enabledIndexes) == 0 { return nil } // Finish and drops that were previously interrupted. if err := m.maybeFinishDrops(); err != nil { return err } // Create the initial state for the indexes as needed. err := m.db.Update(func(dbTx database.Tx) error { // Create the bucket for the current tips as needed. meta := dbTx.Metadata() _, err := meta.CreateBucketIfNotExists(indexTipsBucketName) if err != nil { return err } return m.maybeCreateIndexes(dbTx) }) if err != nil { return err } // Initialize each of the enabled indexes. for _, indexer := range m.enabledIndexes { if err := indexer.Init(); err != nil { return err } } // Rollback indexes to the main chain if their tip is an orphaned fork. // This is fairly unlikely, but it can happen if the chain is // reorganized while the index is disabled. This has to be done in // reverse order because later indexes can depend on earlier ones. var cachedBlock *dcrutil.Block for i := len(m.enabledIndexes); i > 0; i-- { indexer := m.enabledIndexes[i-1] // Fetch the current tip for the index. var height uint32 var hash *chainhash.Hash err := m.db.View(func(dbTx database.Tx) error { idxKey := indexer.Key() hash, height, err = dbFetchIndexerTip(dbTx, idxKey) if err != nil { return err } return nil }) if err != nil { return err } // Nothing to do if the index does not have any entries yet. if height == 0 { continue } // Loop until the tip is a block that exists in the main chain. initialHeight := height err = m.db.Update(func(dbTx database.Tx) error { for { if blockchain.DBMainChainHasBlock(dbTx, hash) { break } // Get the block, unless it's already cached. var block *dcrutil.Block if cachedBlock == nil && height > 0 { block, err = blockchain.DBFetchBlockByHeight(dbTx, int64(height)) if err != nil { return err } } else { block = cachedBlock } // Load the parent block for the height since it is // required to remove it. parent, err := blockchain.DBFetchBlockByHeight(dbTx, int64(height)-1) if err != nil { return err } cachedBlock = parent // When the index requires all of the referenced // txouts they need to be retrieved from the // transaction index. var view *blockchain.UtxoViewpoint if indexNeedsInputs(indexer) { var err error view, err = makeUtxoView(dbTx, block, parent) if err != nil { return err } } // Remove all of the index entries associated // with the block and update the indexer tip. err = dbIndexDisconnectBlock(dbTx, indexer, block, parent, view) if err != nil { return err } // Update the tip to the previous block. hash = &block.MsgBlock().Header.PrevBlock height-- } return nil }) if err != nil { return err } if initialHeight != height { log.Infof("Removed %d orphaned blocks from %s "+ "(heights %d to %d)", initialHeight-height, indexer.Name(), height+1, initialHeight) } } // Fetch the current tip heights for each index along with tracking the // lowest one so the catchup code only needs to start at the earliest // block and is able to skip connecting the block for the indexes that // don't need it. bestHeight := chain.BestSnapshot().Height lowestHeight := bestHeight indexerHeights := make([]uint32, len(m.enabledIndexes)) err = m.db.View(func(dbTx database.Tx) error { for i, indexer := range m.enabledIndexes { idxKey := indexer.Key() hash, height, err := dbFetchIndexerTip(dbTx, idxKey) if err != nil { return err } log.Debugf("Current %s tip (height %d, hash %v)", indexer.Name(), height, hash) indexerHeights[i] = height if height < uint32(lowestHeight) { lowestHeight = int64(height) } } return nil }) if err != nil { return err } // Nothing to index if all of the indexes are caught up. if lowestHeight == bestHeight { return nil } // Create a progress logger for the indexing process below. progressLogger := progresslog.NewBlockProgressLogger("Indexed", log) // At this point, one or more indexes are behind the current best chain // tip and need to be caught up, so log the details and loop through // each block that needs to be indexed. log.Infof("Catching up indexes from height %d to %d", lowestHeight, bestHeight) var cachedParent *dcrutil.Block for height := lowestHeight + 1; height <= bestHeight; height++ { var block, parent *dcrutil.Block err = m.db.Update(func(dbTx database.Tx) error { // Get the parent of the block, unless it's already cached. if cachedParent == nil && height > 0 { parent, err = blockchain.DBFetchBlockByHeight(dbTx, height-1) if err != nil { return err } } else { parent = cachedParent } // Load the block for the height since it is required to index // it. block, err = blockchain.DBFetchBlockByHeight(dbTx, height) if err != nil { return err } cachedParent = block // Connect the block for all indexes that need it. var view *blockchain.UtxoViewpoint for i, indexer := range m.enabledIndexes { // Skip indexes that don't need to be updated with this // block. if indexerHeights[i] >= uint32(height) { continue } // When the index requires all of the referenced // txouts and they haven't been loaded yet, they // need to be retrieved from the transaction // index. if view == nil && indexNeedsInputs(indexer) { var errMakeView error view, errMakeView = makeUtxoView(dbTx, block, parent) if errMakeView != nil { return errMakeView } } errLocal := dbIndexConnectBlock(dbTx, indexer, block, parent, view) if errLocal != nil { return errLocal } indexerHeights[i] = uint32(height) } return nil }) if err != nil { return err } progressLogger.LogBlockHeight(block, parent) } log.Infof("Indexes caught up to height %d", bestHeight) return nil }