// dbIndexDisconnectBlock removes all of the index entries associated with the // given block using the provided indexer and updates the tip of the indexer // accordingly. An error will be returned if the current tip for the indexer is // not the passed block. func dbIndexDisconnectBlock(dbTx database.Tx, indexer Indexer, block *btcutil.Block, view *blockchain.UtxoViewpoint) error { // Assert that the block being disconnected is the current tip of the // index. idxKey := indexer.Key() curTipHash, _, err := dbFetchIndexerTip(dbTx, idxKey) if err != nil { return err } if !curTipHash.IsEqual(block.Sha()) { return AssertError(fmt.Sprintf("dbIndexDisconnectBlock must "+ "be called with the block at the current index tip "+ "(%s, tip %s, block %s)", indexer.Name(), curTipHash, block.Sha())) } // Notify the indexer with the disconnected block so it can remove all // of the appropriate entries. if err := indexer.DisconnectBlock(dbTx, block, view); err != nil { return err } // Update the current index tip. prevHash := &block.MsgBlock().Header.PrevBlock return dbPutIndexerTip(dbTx, idxKey, prevHash, block.Height()-1) }
// ConnectBlock is invoked by the index manager when a new block has been // connected to the main chain. This indexer adds a mapping for each address // the transactions in the block involve. // // This is part of the Indexer interface. func (idx *AddrIndex) ConnectBlock(dbTx database.Tx, block *btcutil.Block, view *blockchain.UtxoViewpoint) error { // The offset and length of the transactions within the serialized // block. txLocs, err := block.TxLoc() if err != nil { return err } // Get the internal block ID associated with the block. blockID, err := dbFetchBlockIDByHash(dbTx, block.Sha()) if err != nil { return err } // Build all of the address to transaction mappings in a local map. addrsToTxns := make(writeIndexData) idx.indexBlock(addrsToTxns, block, view) // Add all of the index entries for each address. addrIdxBucket := dbTx.Metadata().Bucket(addrIndexKey) for addrKey, txIdxs := range addrsToTxns { for _, txIdx := range txIdxs { err := dbPutAddrIndexEntry(addrIdxBucket, addrKey, blockID, txLocs[txIdx]) if err != nil { return err } } } return nil }
// checkProofOfWork ensures the block header bits which indicate the target // difficulty is in min/max range and that the block hash is less than the // target difficulty as claimed. // // // The flags modify the behavior of this function as follows: // - BFNoPoWCheck: The check to ensure the block hash is less than the target // difficulty is not performed. func checkProofOfWork(block *btcutil.Block, powLimit *big.Int, flags BehaviorFlags) error { // The target difficulty must be larger than zero. target := CompactToBig(block.MsgBlock().Header.Bits) if target.Sign() <= 0 { str := fmt.Sprintf("block target difficulty of %064x is too low", target) return ruleError(ErrUnexpectedDifficulty, str) } // The target difficulty must be less than the maximum allowed. if target.Cmp(powLimit) > 0 { str := fmt.Sprintf("block target difficulty of %064x is "+ "higher than max of %064x", target, powLimit) return ruleError(ErrUnexpectedDifficulty, str) } // The block hash must be less than the claimed target unless the flag // to avoid proof of work checks is set. if flags&BFNoPoWCheck != BFNoPoWCheck { // The block hash must be less than the claimed target. blockHash, err := block.Sha() if err != nil { return err } hashNum := ShaHashToBig(blockHash) if hashNum.Cmp(target) > 0 { str := fmt.Sprintf("block hash of %064x is higher than "+ "expected max of %064x", hashNum, target) return ruleError(ErrHighHash, str) } } return nil }
// CheckConnectBlock performs several checks to confirm connecting the passed // block to the main chain does not violate any rules. An example of some of // the checks performed are ensuring connecting the block would not cause any // duplicate transaction hashes for old transactions that aren't already fully // spent, double spends, exceeding the maximum allowed signature operations // per block, invalid values in relation to the expected block subsidy, or fail // transaction script validation. // // This function is NOT safe for concurrent access. func (b *BlockChain) CheckConnectBlock(block *btcutil.Block) error { prevNode := b.bestChain newNode := newBlockNode(&block.MsgBlock().Header, block.Sha(), block.Height()) if prevNode != nil { newNode.parent = prevNode newNode.workSum.Add(prevNode.workSum, newNode.workSum) } return b.checkConnectBlock(newNode, block) }
// connectTransactions updates the view by adding all new utxos created by all // of the transactions in the passed block, marking all utxos the transactions // spend as spent, and setting the best hash for the view to the passed block. // In addition, when the 'stxos' argument is not nil, it will be updated to // append an entry for each spent txout. func (view *UtxoViewpoint) connectTransactions(block *btcutil.Block, stxos *[]spentTxOut) error { for _, tx := range block.Transactions() { err := view.connectTransaction(tx, block.Height(), stxos) if err != nil { return err } } // Update the best hash for view to include this block since all of its // transactions have been connected. view.SetBestHash(block.Sha()) return nil }
// DisconnectBlock is invoked by the index manager when a block has been // disconnected from the main chain. This indexer removes the // hash-to-transaction mapping for every transaction in the block. // // This is part of the Indexer interface. func (idx *TxIndex) DisconnectBlock(dbTx database.Tx, block *btcutil.Block, view *blockchain.UtxoViewpoint) error { // Remove all of the transactions in the block from the index. if err := dbRemoveTxIndexEntries(dbTx, block); err != nil { return err } // Remove the block ID index entry for the block being disconnected and // decrement the current internal block ID to account for it. if err := dbRemoveBlockIDIndexEntry(dbTx, block.Sha()); err != nil { return err } idx.curBlockID-- return nil }
// CheckConnectBlock performs several checks to confirm connecting the passed // block to the main chain does not violate any rules. An example of some of // the checks performed are ensuring connecting the block would not cause any // duplicate transaction hashes for old transactions that aren't already fully // spent, double spends, exceeding the maximum allowed signature operations // per block, invalid values in relation to the expected block subsidy, or fail // transaction script validation. // // This function is safe for concurrent access. func (b *BlockChain) CheckConnectBlock(block *btcutil.Block) error { b.chainLock.Lock() defer b.chainLock.Unlock() prevNode := b.bestNode newNode := newBlockNode(&block.MsgBlock().Header, block.Sha(), prevNode.height+1) newNode.parent = prevNode newNode.workSum.Add(prevNode.workSum, newNode.workSum) // Leave the spent txouts entry nil in the state since the information // is not needed and thus extra work can be avoided. view := NewUtxoViewpoint() view.SetBestHash(prevNode.hash) return b.checkConnectBlock(newNode, block, view, nil) }
// addOrphanBlock adds the passed block (which is already determined to be // an orphan prior calling this function) to the orphan pool. It lazily cleans // up any expired blocks so a separate cleanup poller doesn't need to be run. // It also imposes a maximum limit on the number of outstanding orphan // blocks and will remove the oldest received orphan block if the limit is // exceeded. func (b *BlockChain) addOrphanBlock(block *btcutil.Block) { // Remove expired orphan blocks. for _, oBlock := range b.orphans { if time.Now().After(oBlock.expiration) { b.removeOrphanBlock(oBlock) continue } // Update the oldest orphan block pointer so it can be discarded // in case the orphan pool fills up. if b.oldestOrphan == nil || oBlock.expiration.Before(b.oldestOrphan.expiration) { b.oldestOrphan = oBlock } } // Limit orphan blocks to prevent memory exhaustion. if len(b.orphans)+1 > maxOrphanBlocks { // Remove the oldest orphan to make room for the new one. b.removeOrphanBlock(b.oldestOrphan) b.oldestOrphan = nil } // Get the block sha. It is safe to ignore the error here since any // errors would've been caught prior to calling this function. blockSha, _ := block.Sha() // Protect concurrent access. This is intentionally done here instead // of near the top since removeOrphanBlock does its own locking and // the range iterator is not invalidated by removing map entries. b.orphanLock.Lock() defer b.orphanLock.Unlock() // Insert the block into the orphan map with an expiration time // 1 hour from now. expiration := time.Now().Add(time.Hour) oBlock := &orphanBlock{ block: block, expiration: expiration, } b.orphans[*blockSha] = oBlock // Add to previous hash lookup index for faster dependency lookups. prevHash := &block.MsgBlock().Header.PrevBlock b.prevOrphans[*prevHash] = append(b.prevOrphans[*prevHash], oBlock) return }
// ConnectBlock is invoked by the index manager when a new block has been // connected to the main chain. This indexer adds a hash-to-transaction mapping // for every transaction in the passed block. // // This is part of the Indexer interface. func (idx *TxIndex) ConnectBlock(dbTx database.Tx, block *btcutil.Block, view *blockchain.UtxoViewpoint) error { // Increment the internal block ID to use for the block being connected // and add all of the transactions in the block to the index. newBlockID := idx.curBlockID + 1 if err := dbAddTxIndexEntries(dbTx, block, newBlockID); err != nil { return err } // Add the new block ID index entry for the block being connected and // update the current internal block ID accordingly. err := dbPutBlockIDIndexEntry(dbTx, block.Sha(), newBlockID) if err != nil { return err } idx.curBlockID = newBlockID return nil }
// submitBlock submits the passed block to network after ensuring it passes all // of the consensus validation rules. func (m *CPUMiner) submitBlock(block *btcutil.Block) bool { m.submitBlockLock.Lock() defer m.submitBlockLock.Unlock() // Ensure the block is not stale since a new block could have shown up // while the solution was being found. Typically that condition is // detected and all work on the stale block is halted to start work on // a new block, but the check only happens periodically, so it is // possible a block was found and submitted in between. latestHash, _ := m.server.blockManager.chainState.Best() msgBlock := block.MsgBlock() if !msgBlock.Header.PrevBlock.IsEqual(latestHash) { minrLog.Debugf("Block submitted via CPU miner with previous "+ "block %s is stale", msgBlock.Header.PrevBlock) return false } // Process this block using the same rules as blocks coming from other // nodes. This will in turn relay it to the network like normal. isOrphan, err := m.server.blockManager.ProcessBlock(block, blockchain.BFNone) if err != nil { // Anything other than a rule violation is an unexpected error, // so log that error as an internal error. if _, ok := err.(blockchain.RuleError); !ok { minrLog.Errorf("Unexpected error while processing "+ "block submitted via CPU miner: %v", err) return false } minrLog.Debugf("Block submitted via CPU miner rejected: %v", err) return false } if isOrphan { minrLog.Debugf("Block submitted via CPU miner is an orphan") return false } // The block was accepted. blockSha, _ := block.Sha() coinbaseTx := block.MsgBlock().Transactions[0].TxOut[0] minrLog.Infof("Block submitted via CPU miner accepted (hash %s, "+ "amount %v)", blockSha, btcutil.Amount(coinbaseTx.Value)) return true }
// dbFetchSpendJournalEntry fetches the spend journal entry for the passed // block and deserializes it into a slice of spent txout entries. The provided // view MUST have the utxos referenced by all of the transactions available for // the passed block since that information is required to reconstruct the spent // txouts. func dbFetchSpendJournalEntry(dbTx database.Tx, block *btcutil.Block, view *UtxoViewpoint) ([]spentTxOut, error) { // Exclude the coinbase transaction since it can't spend anything. spendBucket := dbTx.Metadata().Bucket(spendJournalBucketName) serialized := spendBucket.Get(block.Sha()[:]) blockTxns := block.MsgBlock().Transactions[1:] stxos, err := deserializeSpendJournalEntry(serialized, blockTxns, view) if err != nil { // Ensure any deserialization errors are returned as database // corruption errors. if isDeserializeErr(err) { return nil, database.Error{ ErrorCode: database.ErrCorruption, Description: fmt.Sprintf("corrupt spend "+ "information for %v: %v", block.Sha(), err), } } return nil, err } return stxos, nil }
// dbIndexConnectBlock adds all of the index entries associated with the // given block using the provided indexer and updates the tip of the indexer // accordingly. An error will be returned if the current tip for the indexer is // not the previous block for the passed block. func dbIndexConnectBlock(dbTx database.Tx, indexer Indexer, block *btcutil.Block, view *blockchain.UtxoViewpoint) error { // Assert that the block being connected properly connects to the // current tip of the index. idxKey := indexer.Key() curTipHash, _, err := dbFetchIndexerTip(dbTx, idxKey) if err != nil { return err } if !curTipHash.IsEqual(&block.MsgBlock().Header.PrevBlock) { return AssertError(fmt.Sprintf("dbIndexConnectBlock must be "+ "called with a block that extends the current index "+ "tip (%s, tip %s, block %s)", indexer.Name(), curTipHash, block.Sha())) } // Notify the indexer with the connected block so it can index it. if err := indexer.ConnectBlock(dbTx, block, view); err != nil { return err } // Update the current index tip. return dbPutIndexerTip(dbTx, idxKey, block.Sha(), block.Height()) }
// IsCheckpointCandidate returns whether or not the passed block is a good // checkpoint candidate. // // The factors used to determine a good checkpoint are: // - The block must be in the main chain // - The block must be at least 'CheckpointConfirmations' blocks prior to the // current end of the main chain // - The timestamps for the blocks before and after the checkpoint must have // timestamps which are also before and after the checkpoint, respectively // (due to the median time allowance this is not always the case) // - The block must not contain any strange transaction such as those with // nonstandard scripts // // The intent is that candidates are reviewed by a developer to make the final // decision and then manually added to the list of checkpoints for a network. func (b *BlockChain) IsCheckpointCandidate(block *btcutil.Block) (bool, error) { // Checkpoints must be enabled. if b.noCheckpoints { return false, fmt.Errorf("checkpoints are disabled") } // A checkpoint must be in the main chain. exists, err := b.db.ExistsSha(block.Sha()) if err != nil { return false, err } if !exists { return false, nil } // A checkpoint must be at least CheckpointConfirmations blocks before // the end of the main chain. blockHeight := block.Height() _, mainChainHeight, err := b.db.NewestSha() if err != nil { return false, err } if blockHeight > (mainChainHeight - CheckpointConfirmations) { return false, nil } // Get the previous block. prevHash := &block.MsgBlock().Header.PrevBlock prevBlock, err := b.db.FetchBlockBySha(prevHash) if err != nil { return false, err } // Get the next block. nextHash, err := b.db.FetchBlockShaByHeight(blockHeight + 1) if err != nil { return false, err } nextBlock, err := b.db.FetchBlockBySha(nextHash) if err != nil { return false, err } // A checkpoint must have timestamps for the block and the blocks on // either side of it in order (due to the median time allowance this is // not always the case). prevTime := prevBlock.MsgBlock().Header.Timestamp curTime := block.MsgBlock().Header.Timestamp nextTime := nextBlock.MsgBlock().Header.Timestamp if prevTime.After(curTime) || nextTime.Before(curTime) { return false, nil } // A checkpoint must have transactions that only contain standard // scripts. for _, tx := range block.Transactions() { if isNonstandardTransaction(tx) { return false, nil } } return true, nil }
// connectBestChain handles connecting the passed block to the chain while // respecting proper chain selection according to the chain with the most // proof of work. In the typical case, the new block simply extends the main // chain. However, it may also be extending (or creating) a side chain (fork) // which may or may not end up becoming the main chain depending on which fork // cumulatively has the most proof of work. // // The flags modify the behavior of this function as follows: // - BFFastAdd: Avoids the call to checkConnectBlock which does several // expensive transaction validation operations. // - BFDryRun: Prevents the block from being connected and avoids modifying the // state of the memory chain index. Also, any log messages related to // modifying the state are avoided. func (b *BlockChain) connectBestChain(node *blockNode, block *btcutil.Block, flags BehaviorFlags) error { fastAdd := flags&BFFastAdd == BFFastAdd dryRun := flags&BFDryRun == BFDryRun // We haven't selected a best chain yet or we are extending the main // (best) chain with a new block. This is the most common case. if b.bestChain == nil || node.parent.hash.IsEqual(b.bestChain.hash) { // Perform several checks to verify the block can be connected // to the main chain (including whatever reorganization might // be necessary to get this node to the main chain) without // violating any rules and without actually connecting the // block. if !fastAdd { err := b.checkConnectBlock(node, block) if err != nil { return err } } // Don't connect the block if performing a dry run. if dryRun { return nil } // Connect the block to the main chain. err := b.connectBlock(node, block) if err != nil { return err } // Connect the parent node to this node. if node.parent != nil { node.parent.children = append(node.parent.children, node) } return nil } if fastAdd { log.Warnf("fastAdd set in the side chain case? %v\n", block.Sha()) } // We're extending (or creating) a side chain which may or may not // become the main chain, but in either case we need the block stored // for future processing, so add the block to the side chain holding // cache. if !dryRun { log.Debugf("Adding block %v to side chain cache", node.hash) } b.blockCache[*node.hash] = block b.index[*node.hash] = node // Connect the parent node to this node. node.inMainChain = false node.parent.children = append(node.parent.children, node) // Remove the block from the side chain cache and disconnect it from the // parent node when the function returns when running in dry run mode. if dryRun { defer func() { children := node.parent.children children = removeChildNode(children, node) node.parent.children = children delete(b.index, *node.hash) delete(b.blockCache, *node.hash) }() } // We're extending (or creating) a side chain, but the cumulative // work for this new side chain is not enough to make it the new chain. if node.workSum.Cmp(b.bestChain.workSum) <= 0 { // Skip Logging info when the dry run flag is set. if dryRun { return nil } // Find the fork point. fork := node for ; fork.parent != nil; fork = fork.parent { if fork.inMainChain { break } } // Log information about how the block is forking the chain. if fork.hash.IsEqual(node.parent.hash) { log.Infof("FORK: Block %v forks the chain at height %d"+ "/block %v, but does not cause a reorganize", node.hash, fork.height, fork.hash) } else { log.Infof("EXTEND FORK: Block %v extends a side chain "+ "which forks the chain at height %d/block %v", node.hash, fork.height, fork.hash) } return nil } // We're extending (or creating) a side chain and the cumulative work // for this new side chain is more than the old best chain, so this side // chain needs to become the main chain. In order to accomplish that, // find the common ancestor of both sides of the fork, disconnect the // blocks that form the (now) old fork from the main chain, and attach // the blocks that form the new chain to the main chain starting at the // common ancenstor (the point where the chain forked). detachNodes, attachNodes := b.getReorganizeNodes(node) // Reorganize the chain. if !dryRun { log.Infof("REORGANIZE: Block %v is causing a reorganize.", node.hash) } err := b.reorganizeChain(detachNodes, attachNodes, flags) if err != nil { return err } return nil }
// IsCheckpointCandidate returns whether or not the passed block is a good // checkpoint candidate. // // The factors used to determine a good checkpoint are: // - The block must be in the main chain // - The block must be at least 'CheckpointConfirmations' blocks prior to the // current end of the main chain // - The timestamps for the blocks before and after the checkpoint must have // timestamps which are also before and after the checkpoint, respectively // (due to the median time allowance this is not always the case) // - The block must not contain any strange transaction such as those with // nonstandard scripts // // The intent is that candidates are reviewed by a developer to make the final // decision and then manually added to the list of checkpoints for a network. // // This function is safe for concurrent access. func (b *BlockChain) IsCheckpointCandidate(block *btcutil.Block) (bool, error) { b.chainLock.RLock() defer b.chainLock.RUnlock() // Checkpoints must be enabled. if b.noCheckpoints { return false, fmt.Errorf("checkpoints are disabled") } var isCandidate bool err := b.db.View(func(dbTx database.Tx) error { // A checkpoint must be in the main chain. blockHeight, err := dbFetchHeightByHash(dbTx, block.Sha()) if err != nil { // Only return an error if it's not due to the block not // being in the main chain. if !isNotInMainChainErr(err) { return err } return nil } // Ensure the height of the passed block and the entry for the // block in the main chain match. This should always be the // case unless the caller provided an invalid block. if blockHeight != block.Height() { return fmt.Errorf("passed block height of %d does not "+ "match the main chain height of %d", block.Height(), blockHeight) } // A checkpoint must be at least CheckpointConfirmations blocks // before the end of the main chain. mainChainHeight := b.bestNode.height if blockHeight > (mainChainHeight - CheckpointConfirmations) { return nil } // Get the previous block header. prevHash := &block.MsgBlock().Header.PrevBlock prevHeader, err := dbFetchHeaderByHash(dbTx, prevHash) if err != nil { return err } // Get the next block header. nextHeader, err := dbFetchHeaderByHeight(dbTx, blockHeight+1) if err != nil { return err } // A checkpoint must have timestamps for the block and the // blocks on either side of it in order (due to the median time // allowance this is not always the case). prevTime := prevHeader.Timestamp curTime := block.MsgBlock().Header.Timestamp nextTime := nextHeader.Timestamp if prevTime.After(curTime) || nextTime.Before(curTime) { return nil } // A checkpoint must have transactions that only contain // standard scripts. for _, tx := range block.Transactions() { if isNonstandardTransaction(tx) { return nil } } // All of the checks passed, so the block is a candidate. isCandidate = true return nil }) return isCandidate, err }
// ProcessBlock is the main workhorse for handling insertion of new blocks into // the block chain. It includes functionality such as rejecting duplicate // blocks, ensuring blocks follow all rules, orphan handling, and insertion into // the block chain along with best chain selection and reorganization. // // It returns a bool which indicates whether or not the block is an orphan and // any errors that occurred during processing. The returned bool is only valid // when the error is nil. func (b *BlockChain) ProcessBlock(block *btcutil.Block, timeSource MedianTimeSource, flags BehaviorFlags) (bool, error) { fastAdd := flags&BFFastAdd == BFFastAdd dryRun := flags&BFDryRun == BFDryRun blockHash := block.Sha() log.Tracef("Processing block %v", blockHash) // The block must not already exist in the main chain or side chains. exists, err := b.blockExists(blockHash) if err != nil { return false, err } if exists { str := fmt.Sprintf("already have block %v", blockHash) return false, ruleError(ErrDuplicateBlock, str) } // The block must not already exist as an orphan. if _, exists := b.orphans[*blockHash]; exists { str := fmt.Sprintf("already have block (orphan) %v", blockHash) return false, ruleError(ErrDuplicateBlock, str) } // Perform preliminary sanity checks on the block and its transactions. err = checkBlockSanity(block, b.chainParams.PowLimit, timeSource, flags) if err != nil { return false, err } // Find the previous checkpoint and perform some additional checks based // on the checkpoint. This provides a few nice properties such as // preventing old side chain blocks before the last checkpoint, // rejecting easy to mine, but otherwise bogus, blocks that could be // used to eat memory, and ensuring expected (versus claimed) proof of // work requirements since the previous checkpoint are met. blockHeader := &block.MsgBlock().Header checkpointBlock, err := b.findPreviousCheckpoint() if err != nil { return false, err } if checkpointBlock != nil { // Ensure the block timestamp is after the checkpoint timestamp. checkpointHeader := &checkpointBlock.MsgBlock().Header checkpointTime := checkpointHeader.Timestamp if blockHeader.Timestamp.Before(checkpointTime) { str := fmt.Sprintf("block %v has timestamp %v before "+ "last checkpoint timestamp %v", blockHash, blockHeader.Timestamp, checkpointTime) return false, ruleError(ErrCheckpointTimeTooOld, str) } if !fastAdd { // Even though the checks prior to now have already ensured the // proof of work exceeds the claimed amount, the claimed amount // is a field in the block header which could be forged. This // check ensures the proof of work is at least the minimum // expected based on elapsed time since the last checkpoint and // maximum adjustment allowed by the retarget rules. duration := blockHeader.Timestamp.Sub(checkpointTime) requiredTarget := CompactToBig(b.calcEasiestDifficulty( checkpointHeader.Bits, duration)) currentTarget := CompactToBig(blockHeader.Bits) if currentTarget.Cmp(requiredTarget) > 0 { str := fmt.Sprintf("block target difficulty of %064x "+ "is too low when compared to the previous "+ "checkpoint", currentTarget) return false, ruleError(ErrDifficultyTooLow, str) } } } // Handle orphan blocks. prevHash := &blockHeader.PrevBlock if !prevHash.IsEqual(zeroHash) { prevHashExists, err := b.blockExists(prevHash) if err != nil { return false, err } if !prevHashExists { if !dryRun { log.Infof("Adding orphan block %v with parent %v", blockHash, prevHash) b.addOrphanBlock(block) } return true, nil } } // The block has passed all context independent checks and appears sane // enough to potentially accept it into the block chain. err = b.maybeAcceptBlock(block, flags) if err != nil { return false, err } // Don't process any orphans or log when the dry run flag is set. if !dryRun { // Accept any orphan blocks that depend on this block (they are // no longer orphans) and repeat for those accepted blocks until // there are no more. err := b.processOrphans(blockHash, flags) if err != nil { return false, err } log.Debugf("Accepted block %v", blockHash) } return false, nil }
// ProcessBlock is the main workhorse for handling insertion of new blocks into // the block chain. It includes functionality such as rejecting duplicate // blocks, ensuring blocks follow all rules, orphan handling, and insertion into // the block chain along with best chain selection and reorganization. // // It returns a bool which indicates whether or not the block is an orphan and // any errors that occurred during processing. The returned bool is only valid // when the error is nil. func (b *BlockChain) ProcessBlock(block *btcutil.Block, timeSource MedianTimeSource, flags BehaviorFlags) (bool, error) { dryRun := flags&BFDryRun == BFDryRun blockHash := block.Sha() // The block must not already exist in the main chain or side chains. exists, err := b.blockExists(blockHash) if err != nil { return false, err } if exists { str := fmt.Sprintf("already have block %v", blockHash) return false, ruleError(ErrDuplicateBlock, str) } // The block must not already exist as an orphan. if _, exists := b.orphans[*blockHash]; exists { str := fmt.Sprintf("already have block (orphan) %v", blockHash) return false, ruleError(ErrDuplicateBlock, str) } // Perform preliminary sanity checks on the block and its transactions. err = checkBlockSanity(block, b.chainParams.PowLimit, timeSource, flags) if err != nil { return false, err } blockHeader := &block.MsgBlock().Header // Can we just go ahead and add this? latestCheckpoint := b.LatestCheckpoint() if blockHeader.PrevBlock.IsEqual(latestCheckpoint.Hash) { block.SetHeight(latestCheckpoint.Height + 1) err = b.maybeAcceptBlock(block, flags|BFFastAdd) if err != nil { log.Infof("ERROR: %v", err) return false, err } log.Infof("Checkpoint (+1) block inserted: %v", block.Sha()) return false, nil } // Handle orphan blocks. prevHash := &blockHeader.PrevBlock if !prevHash.IsEqual(zeroHash) { prevHashExists, err := b.blockExists(prevHash) if err != nil { return false, err } if !prevHashExists { if !dryRun { log.Infof("Adding orphan block %v with parent %v", blockHash, prevHash) b.addOrphanBlock(block) } return true, nil } } // The block has passed all context independent checks and appears sane // enough to potentially accept it into the block chain. err = b.maybeAcceptBlock(block, flags) if err != nil { return false, err } // Don't process any orphans or log when the dry run flag is set. if !dryRun { // Accept any orphan blocks that depend on this block (they are // no longer orphans) and repeat for those accepted blocks until // there are no more. err := b.processOrphans(blockHash, flags) if err != nil { return false, err } // log.Tracef("Accepted block %v", blockHash) } // log.Infof("Accepted block %v (height: %v)", blockHash, block.Height()) return false, nil }
// InsertBlock inserts raw block and transaction data from a block into the // database. The first block inserted into the database will be treated as the // genesis block. Every subsequent block insert requires the referenced parent // block to already exist. func (db *LevelDb) InsertBlock(block *btcutil.Block) (height int64, rerr error) { db.dbLock.Lock() defer db.dbLock.Unlock() defer func() { if rerr == nil { rerr = db.processBatches() } else { db.lBatch().Reset() } }() blocksha := block.Sha() mblock := block.MsgBlock() rawMsg, err := block.Bytes() if err != nil { log.Warnf("Failed to obtain raw block sha %v", blocksha) return 0, err } txloc, err := block.TxLoc() if err != nil { log.Warnf("Failed to obtain raw block sha %v", blocksha) return 0, err } // Insert block into database newheight, err := db.insertBlockData(blocksha, &mblock.Header.PrevBlock, rawMsg) if err != nil { log.Warnf("Failed to insert block %v %v %v", blocksha, &mblock.Header.PrevBlock, err) return 0, err } // At least two blocks in the long past were generated by faulty // miners, the sha of the transaction exists in a previous block, // detect this condition and 'accept' the block. for txidx, tx := range mblock.Transactions { txsha, err := block.TxSha(txidx) if err != nil { log.Warnf("failed to compute tx name block %v idx %v err %v", blocksha, txidx, err) return 0, err } spentbuflen := (len(tx.TxOut) + 7) / 8 spentbuf := make([]byte, spentbuflen, spentbuflen) if len(tx.TxOut)%8 != 0 { for i := uint(len(tx.TxOut) % 8); i < 8; i++ { spentbuf[spentbuflen-1] |= (byte(1) << i) } } err = db.insertTx(txsha, newheight, txloc[txidx].TxStart, txloc[txidx].TxLen, spentbuf) if err != nil { log.Warnf("block %v idx %v failed to insert tx %v %v err %v", blocksha, newheight, &txsha, txidx, err) return 0, err } // Some old blocks contain duplicate transactions // Attempt to cleanly bypass this problem by marking the // first as fully spent. // http://blockexplorer.com/b/91812 dup in 91842 // http://blockexplorer.com/b/91722 dup in 91880 if newheight == 91812 { dupsha, err := wire.NewShaHashFromStr("d5d27987d2a3dfc724e359870c6644b40e497bdc0589a033220fe15429d88599") if err != nil { panic("invalid sha string in source") } if txsha.IsEqual(dupsha) { // marking TxOut[0] as spent po := wire.NewOutPoint(dupsha, 0) txI := wire.NewTxIn(po, []byte("garbage")) var spendtx wire.MsgTx spendtx.AddTxIn(txI) err = db.doSpend(&spendtx) if err != nil { log.Warnf("block %v idx %v failed to spend tx %v %v err %v", blocksha, newheight, &txsha, txidx, err) } } } if newheight == 91722 { dupsha, err := wire.NewShaHashFromStr("e3bf3d07d4b0375638d5f1db5255fe07ba2c4cb067cd81b84ee974b6585fb468") if err != nil { panic("invalid sha string in source") } if txsha.IsEqual(dupsha) { // marking TxOut[0] as spent po := wire.NewOutPoint(dupsha, 0) txI := wire.NewTxIn(po, []byte("garbage")) var spendtx wire.MsgTx spendtx.AddTxIn(txI) err = db.doSpend(&spendtx) if err != nil { log.Warnf("block %v idx %v failed to spend tx %v %v err %v", blocksha, newheight, &txsha, txidx, err) } } } err = db.doSpend(tx) if err != nil { log.Warnf("block %v idx %v failed to spend tx %v %v err %v", blocksha, newheight, txsha, txidx, err) return 0, err } } return newheight, nil }
// maybeAcceptBlock potentially accepts a block into the memory block chain. // It performs several validation checks which depend on its position within // the block chain before adding it. The block is expected to have already gone // through ProcessBlock before calling this function with it. // // The flags modify the behavior of this function as follows: // - BFFastAdd: The somewhat expensive BIP0034 validation is not performed. // - BFDryRun: The memory chain index will not be pruned and no accept // notification will be sent since the block is not being accepted. func (b *BlockChain) maybeAcceptBlock(block *btcutil.Block, flags BehaviorFlags) error { fastAdd := flags&BFFastAdd == BFFastAdd dryRun := flags&BFDryRun == BFDryRun // Get a block node for the block previous to this one. Will be nil // if this is the genesis block. prevNode, err := b.getPrevNodeFromBlock(block) if err != nil { log.Errorf("getPrevNodeFromBlock: %v", err) return err } // The height of this block is one more than the referenced previous // block. blockHeight := int64(0) if prevNode != nil { blockHeight = prevNode.height + 1 } block.SetHeight(blockHeight) blockHeader := &block.MsgBlock().Header if !fastAdd { // Ensure the difficulty specified in the block header matches // the calculated difficulty based on the previous block and // difficulty retarget rules. expectedDifficulty, err := b.calcNextRequiredDifficulty(prevNode, block.MsgBlock().Header.Timestamp) if err != nil { return err } blockDifficulty := blockHeader.Bits if blockDifficulty != expectedDifficulty { str := "block difficulty of %d is not the expected value of %d" str = fmt.Sprintf(str, blockDifficulty, expectedDifficulty) return ruleError(ErrUnexpectedDifficulty, str) } // Ensure the timestamp for the block header is after the // median time of the last several blocks (medianTimeBlocks). medianTime, err := b.calcPastMedianTime(prevNode) if err != nil { log.Errorf("calcPastMedianTime: %v", err) return err } if !blockHeader.Timestamp.After(medianTime) { str := "block timestamp of %v is not after expected %v" str = fmt.Sprintf(str, blockHeader.Timestamp, medianTime) return ruleError(ErrTimeTooOld, str) } // Ensure all transactions in the block are finalized. for _, tx := range block.Transactions() { if !IsFinalizedTransaction(tx, blockHeight, blockHeader.Timestamp) { str := fmt.Sprintf("block contains "+ "unfinalized transaction %v", tx.Sha()) return ruleError(ErrUnfinalizedTx, str) } } } // Ensure chain matches up to predetermined checkpoints. // It's safe to ignore the error on Sha since it's already cached. blockHash, _ := block.Sha() if !b.verifyCheckpoint(blockHeight, blockHash) { str := fmt.Sprintf("block at height %d does not match "+ "checkpoint hash", blockHeight) return ruleError(ErrBadCheckpoint, str) } // Find the previous checkpoint and prevent blocks which fork the main // chain before it. This prevents storage of new, otherwise valid, // blocks which build off of old blocks that are likely at a much easier // difficulty and therefore could be used to waste cache and disk space. checkpointBlock, err := b.findPreviousCheckpoint() if err != nil { return err } if checkpointBlock != nil && blockHeight < checkpointBlock.Height() { str := fmt.Sprintf("block at height %d forks the main chain "+ "before the previous checkpoint at height %d", blockHeight, checkpointBlock.Height()) return ruleError(ErrForkTooOld, str) } if !fastAdd { // Reject version 2 blocks once a majority of the network has // upgraded. This is part of BIP0066. if blockHeader.Version < 3 && b.isMajorityVersion(3, prevNode, b.chainParams.BlockRejectNumRequired) { str := "new blocks with version %d are no longer valid" str = fmt.Sprintf(str, blockHeader.Version) return ruleError(ErrBlockVersionTooOld, str) } // Reject version 1 blocks once a majority of the network has // upgraded. This is part of BIP0034. if blockHeader.Version < 2 && b.isMajorityVersion(2, prevNode, b.chainParams.BlockRejectNumRequired) { str := "new blocks with version %d are no longer valid" str = fmt.Sprintf(str, blockHeader.Version) return ruleError(ErrBlockVersionTooOld, str) } // Ensure coinbase starts with serialized block heights for // blocks whose version is the serializedHeightVersion or // newer once a majority of the network has upgraded. This is // part of BIP0034. if blockHeader.Version >= serializedHeightVersion && b.isMajorityVersion(serializedHeightVersion, prevNode, b.chainParams.BlockEnforceNumRequired) { expectedHeight := int64(0) if prevNode != nil { expectedHeight = prevNode.height + 1 } coinbaseTx := block.Transactions()[0] err := checkSerializedHeight(coinbaseTx, expectedHeight) if err != nil { return err } } } // Prune block nodes which are no longer needed before creating // a new node. if !dryRun { err = b.pruneBlockNodes() if err != nil { return err } } // Create a new block node for the block and add it to the in-memory // block chain (could be either a side chain or the main chain). newNode := newBlockNode(blockHeader, blockHash, blockHeight) if prevNode != nil { newNode.parent = prevNode newNode.height = blockHeight newNode.workSum.Add(prevNode.workSum, newNode.workSum) } // Connect the passed block to the chain while respecting proper chain // selection according to the chain with the most proof of work. This // also handles validation of the transaction scripts. err = b.connectBestChain(newNode, block, flags) if err != nil { return err } // Notify the caller that the new block was accepted into the block // chain. The caller would typically want to react by relaying the // inventory to other peers. if !dryRun { b.sendNotification(NTBlockAccepted, block) } return nil }
// InsertBlock inserts raw block and transaction data from a block into the // database. The first block inserted into the database will be treated as the // genesis block. Every subsequent block insert requires the referenced parent // block to already exist. This is part of the database.Db interface // implementation. func (db *MemDb) InsertBlock(block *btcutil.Block) (int64, error) { db.Lock() defer db.Unlock() if db.closed { return 0, ErrDbClosed } // Reject the insert if the previously reference block does not exist // except in the case there are no blocks inserted yet where the first // inserted block is assumed to be a genesis block. msgBlock := block.MsgBlock() if _, exists := db.blocksBySha[msgBlock.Header.PrevBlock]; !exists { if len(db.blocks) > 0 { return 0, database.ErrPrevShaMissing } } // Build a map of in-flight transactions because some of the inputs in // this block could be referencing other transactions earlier in this // block which are not yet in the chain. txInFlight := map[wire.ShaHash]int{} transactions := block.Transactions() for i, tx := range transactions { txInFlight[*tx.Sha()] = i } // Loop through all transactions and inputs to ensure there are no error // conditions that would prevent them from be inserted into the db. // Although these checks could could be done in the loop below, checking // for error conditions up front means the code below doesn't have to // deal with rollback on errors. newHeight := int64(len(db.blocks)) for i, tx := range transactions { // Two old blocks contain duplicate transactions due to being // mined by faulty miners and accepted by the origin Satoshi // client. Rules have since been added to the ensure this // problem can no longer happen, but the two duplicate // transactions which were originally accepted are forever in // the block chain history and must be dealth with specially. // http://blockexplorer.com/b/91842 // http://blockexplorer.com/b/91880 if newHeight == 91842 && tx.Sha().IsEqual(dupTxHash91842) { continue } if newHeight == 91880 && tx.Sha().IsEqual(dupTxHash91880) { continue } for _, txIn := range tx.MsgTx().TxIn { if isCoinbaseInput(txIn) { continue } // It is acceptable for a transaction input to reference // the output of another transaction in this block only // if the referenced transaction comes before the // current one in this block. prevOut := &txIn.PreviousOutPoint if inFlightIndex, ok := txInFlight[prevOut.Hash]; ok { if i <= inFlightIndex { log.Warnf("InsertBlock: requested hash "+ " of %s does not exist in-flight", tx.Sha()) return 0, database.ErrTxShaMissing } } else { originTxns, exists := db.txns[prevOut.Hash] if !exists { log.Warnf("InsertBlock: requested hash "+ "of %s by %s does not exist", prevOut.Hash, tx.Sha()) return 0, database.ErrTxShaMissing } originTxD := originTxns[len(originTxns)-1] if prevOut.Index > uint32(len(originTxD.spentBuf)) { log.Warnf("InsertBlock: requested hash "+ "of %s with index %d does not "+ "exist", tx.Sha(), prevOut.Index) return 0, database.ErrTxShaMissing } } } // Prevent duplicate transactions in the same block. if inFlightIndex, exists := txInFlight[*tx.Sha()]; exists && inFlightIndex < i { log.Warnf("Block contains duplicate transaction %s", tx.Sha()) return 0, database.ErrDuplicateSha } // Prevent duplicate transactions unless the old one is fully // spent. if txns, exists := db.txns[*tx.Sha()]; exists { txD := txns[len(txns)-1] if !isFullySpent(txD) { log.Warnf("Attempt to insert duplicate "+ "transaction %s", tx.Sha()) return 0, database.ErrDuplicateSha } } } db.blocks = append(db.blocks, msgBlock) db.blocksBySha[*block.Sha()] = newHeight // Insert information about eacj transaction and spend all of the // outputs referenced by the inputs to the transactions. for i, tx := range block.Transactions() { // Insert the transaction data. txD := tTxInsertData{ blockHeight: newHeight, offset: i, spentBuf: make([]bool, len(tx.MsgTx().TxOut)), } db.txns[*tx.Sha()] = append(db.txns[*tx.Sha()], &txD) // Spend all of the inputs. for _, txIn := range tx.MsgTx().TxIn { // Coinbase transaction has no inputs. if isCoinbaseInput(txIn) { continue } // Already checked for existing and valid ranges above. prevOut := &txIn.PreviousOutPoint originTxns := db.txns[prevOut.Hash] originTxD := originTxns[len(originTxns)-1] originTxD.spentBuf[prevOut.Index] = true } } return newHeight, nil }
// maybeAcceptBlock potentially accepts a block into the memory block chain. // It performs several validation checks which depend on its position within // the block chain before adding it. The block is expected to have already gone // through ProcessBlock before calling this function with it. // // The flags modify the behavior of this function as follows: // - BFDryRun: The memory chain index will not be pruned and no accept // notification will be sent since the block is not being accepted. // // The flags are also passed to checkBlockContext and connectBestChain. See // their documentation for how the flags modify their behavior. // // This function MUST be called with the chain state lock held (for writes). func (b *BlockChain) maybeAcceptBlock(block *btcutil.Block, flags BehaviorFlags) error { dryRun := flags&BFDryRun == BFDryRun // Get a block node for the block previous to this one. Will be nil // if this is the genesis block. prevNode, err := b.getPrevNodeFromBlock(block) if err != nil { log.Errorf("getPrevNodeFromBlock: %v", err) return err } // The height of this block is one more than the referenced previous // block. blockHeight := int32(0) if prevNode != nil { blockHeight = prevNode.height + 1 } block.SetHeight(blockHeight) // The block must pass all of the validation rules which depend on the // position of the block within the block chain. err = b.checkBlockContext(block, prevNode, flags) if err != nil { return err } // Prune block nodes which are no longer needed before creating // a new node. if !dryRun { err = b.pruneBlockNodes() if err != nil { return err } } // Create a new block node for the block and add it to the in-memory // block chain (could be either a side chain or the main chain). blockHeader := &block.MsgBlock().Header newNode := newBlockNode(blockHeader, block.Sha(), blockHeight) if prevNode != nil { newNode.parent = prevNode newNode.height = blockHeight newNode.workSum.Add(prevNode.workSum, newNode.workSum) } // Connect the passed block to the chain while respecting proper chain // selection according to the chain with the most proof of work. This // also handles validation of the transaction scripts. err = b.connectBestChain(newNode, block, flags) if err != nil { return err } // Notify the caller that the new block was accepted into the block // chain. The caller would typically want to react by relaying the // inventory to other peers. if !dryRun { b.chainLock.Unlock() b.sendNotification(NTBlockAccepted, block) b.chainLock.Lock() } return nil }