// dbIndexDisconnectBlock removes all of the index entries associated with the // given block using the provided indexer and updates the tip of the indexer // accordingly. An error will be returned if the current tip for the indexer is // not the passed block. func dbIndexDisconnectBlock(dbTx database.Tx, indexer Indexer, block *btcutil.Block, view *blockchain.UtxoViewpoint) error { // Assert that the block being disconnected is the current tip of the // index. idxKey := indexer.Key() curTipHash, _, err := dbFetchIndexerTip(dbTx, idxKey) if err != nil { return err } if !curTipHash.IsEqual(block.Hash()) { return AssertError(fmt.Sprintf("dbIndexDisconnectBlock must "+ "be called with the block at the current index tip "+ "(%s, tip %s, block %s)", indexer.Name(), curTipHash, block.Hash())) } // Notify the indexer with the disconnected block so it can remove all // of the appropriate entries. if err := indexer.DisconnectBlock(dbTx, block, view); err != nil { return err } // Update the current index tip. prevHash := &block.MsgBlock().Header.PrevBlock return dbPutIndexerTip(dbTx, idxKey, prevHash, block.Height()-1) }
// ConnectBlock is invoked by the index manager when a new block has been // connected to the main chain. This indexer adds a mapping for each address // the transactions in the block involve. // // This is part of the Indexer interface. func (idx *AddrIndex) ConnectBlock(dbTx database.Tx, block *btcutil.Block, view *blockchain.UtxoViewpoint) error { // The offset and length of the transactions within the serialized // block. txLocs, err := block.TxLoc() if err != nil { return err } // Get the internal block ID associated with the block. blockID, err := dbFetchBlockIDByHash(dbTx, block.Hash()) if err != nil { return err } // Build all of the address to transaction mappings in a local map. addrsToTxns := make(writeIndexData) idx.indexBlock(addrsToTxns, block, view) // Add all of the index entries for each address. addrIdxBucket := dbTx.Metadata().Bucket(addrIndexKey) for addrKey, txIdxs := range addrsToTxns { for _, txIdx := range txIdxs { err := dbPutAddrIndexEntry(addrIdxBucket, addrKey, blockID, txLocs[txIdx]) if err != nil { return err } } } return nil }
// createBlock creates a new block building from the previous block. func createBlock(prevBlock *btcutil.Block, inclusionTxs []*btcutil.Tx, blockVersion int32, blockTime time.Time, miningAddr btcutil.Address, net *chaincfg.Params) (*btcutil.Block, error) { prevHash := prevBlock.Hash() blockHeight := prevBlock.Height() + 1 // If a target block time was specified, then use that as the header's // timestamp. Otherwise, add one second to the previous block unless // it's the genesis block in which case use the current time. var ts time.Time switch { case !blockTime.IsZero(): ts = blockTime default: ts = prevBlock.MsgBlock().Header.Timestamp.Add(time.Second) } extraNonce := uint64(0) coinbaseScript, err := standardCoinbaseScript(blockHeight, extraNonce) if err != nil { return nil, err } coinbaseTx, err := createCoinbaseTx(coinbaseScript, blockHeight, miningAddr, net) if err != nil { return nil, err } // Create a new block ready to be solved. blockTxns := []*btcutil.Tx{coinbaseTx} if inclusionTxs != nil { blockTxns = append(blockTxns, inclusionTxs...) } merkles := blockchain.BuildMerkleTreeStore(blockTxns) var block wire.MsgBlock block.Header = wire.BlockHeader{ Version: blockVersion, PrevBlock: *prevHash, MerkleRoot: *merkles[len(merkles)-1], Timestamp: ts, Bits: net.PowLimitBits, } for _, tx := range blockTxns { if err := block.AddTransaction(tx.MsgTx()); err != nil { return nil, err } } found := solveBlock(&block.Header, net.PowLimit) if !found { return nil, errors.New("Unable to solve block") } utilBlock := btcutil.NewBlock(&block) utilBlock.SetHeight(blockHeight) return utilBlock, nil }
// maybeAcceptBlock potentially accepts a block into the block chain and, if // accepted, returns whether or not it is on the main chain. It performs // several validation checks which depend on its position within the block chain // before adding it. The block is expected to have already gone through // ProcessBlock before calling this function with it. // // The flags modify the behavior of this function as follows: // - BFDryRun: The memory chain index will not be pruned and no accept // notification will be sent since the block is not being accepted. // // The flags are also passed to checkBlockContext and connectBestChain. See // their documentation for how the flags modify their behavior. // // This function MUST be called with the chain state lock held (for writes). func (b *BlockChain) maybeAcceptBlock(block *btcutil.Block, flags BehaviorFlags) (bool, error) { dryRun := flags&BFDryRun == BFDryRun // Get a block node for the block previous to this one. Will be nil // if this is the genesis block. prevNode, err := b.getPrevNodeFromBlock(block) if err != nil { log.Errorf("getPrevNodeFromBlock: %v", err) return false, err } // The height of this block is one more than the referenced previous // block. blockHeight := int32(0) if prevNode != nil { blockHeight = prevNode.height + 1 } block.SetHeight(blockHeight) // The block must pass all of the validation rules which depend on the // position of the block within the block chain. err = b.checkBlockContext(block, prevNode, flags) if err != nil { return false, err } // Create a new block node for the block and add it to the in-memory // block chain (could be either a side chain or the main chain). blockHeader := &block.MsgBlock().Header newNode := newBlockNode(blockHeader, block.Hash(), blockHeight) if prevNode != nil { newNode.parent = prevNode newNode.height = blockHeight newNode.workSum.Add(prevNode.workSum, newNode.workSum) } // Connect the passed block to the chain while respecting proper chain // selection according to the chain with the most proof of work. This // also handles validation of the transaction scripts. isMainChain, err := b.connectBestChain(newNode, block, flags) if err != nil { return false, err } // Notify the caller that the new block was accepted into the block // chain. The caller would typically want to react by relaying the // inventory to other peers. if !dryRun { b.chainLock.Unlock() b.sendNotification(NTBlockAccepted, block) b.chainLock.Lock() } return isMainChain, nil }
// DisconnectBlock is invoked by the index manager when a block has been // disconnected from the main chain. This indexer removes the // hash-to-transaction mapping for every transaction in the block. // // This is part of the Indexer interface. func (idx *TxIndex) DisconnectBlock(dbTx database.Tx, block *btcutil.Block, view *blockchain.UtxoViewpoint) error { // Remove all of the transactions in the block from the index. if err := dbRemoveTxIndexEntries(dbTx, block); err != nil { return err } // Remove the block ID index entry for the block being disconnected and // decrement the current internal block ID to account for it. if err := dbRemoveBlockIDIndexEntry(dbTx, block.Hash()); err != nil { return err } idx.curBlockID-- return nil }
// CheckConnectBlock performs several checks to confirm connecting the passed // block to the main chain does not violate any rules. An example of some of // the checks performed are ensuring connecting the block would not cause any // duplicate transaction hashes for old transactions that aren't already fully // spent, double spends, exceeding the maximum allowed signature operations // per block, invalid values in relation to the expected block subsidy, or fail // transaction script validation. // // This function is safe for concurrent access. func (b *BlockChain) CheckConnectBlock(block *btcutil.Block) error { b.chainLock.Lock() defer b.chainLock.Unlock() prevNode := b.bestNode newNode := newBlockNode(&block.MsgBlock().Header, block.Hash(), prevNode.height+1) newNode.parent = prevNode newNode.workSum.Add(prevNode.workSum, newNode.workSum) // Leave the spent txouts entry nil in the state since the information // is not needed and thus extra work can be avoided. view := NewUtxoViewpoint() view.SetBestHash(prevNode.hash) return b.checkConnectBlock(newNode, block, view, nil) }
// ConnectBlock is invoked by the index manager when a new block has been // connected to the main chain. This indexer adds a hash-to-transaction mapping // for every transaction in the passed block. // // This is part of the Indexer interface. func (idx *TxIndex) ConnectBlock(dbTx database.Tx, block *btcutil.Block, view *blockchain.UtxoViewpoint) error { // Increment the internal block ID to use for the block being connected // and add all of the transactions in the block to the index. newBlockID := idx.curBlockID + 1 if err := dbAddTxIndexEntries(dbTx, block, newBlockID); err != nil { return err } // Add the new block ID index entry for the block being connected and // update the current internal block ID accordingly. err := dbPutBlockIDIndexEntry(dbTx, block.Hash(), newBlockID) if err != nil { return err } idx.curBlockID = newBlockID return nil }
// submitBlock submits the passed block to network after ensuring it passes all // of the consensus validation rules. func (m *CPUMiner) submitBlock(block *btcutil.Block) bool { m.submitBlockLock.Lock() defer m.submitBlockLock.Unlock() // Ensure the block is not stale since a new block could have shown up // while the solution was being found. Typically that condition is // detected and all work on the stale block is halted to start work on // a new block, but the check only happens periodically, so it is // possible a block was found and submitted in between. latestHash, _ := m.server.blockManager.chainState.Best() msgBlock := block.MsgBlock() if !msgBlock.Header.PrevBlock.IsEqual(latestHash) { minrLog.Debugf("Block submitted via CPU miner with previous "+ "block %s is stale", msgBlock.Header.PrevBlock) return false } // Process this block using the same rules as blocks coming from other // nodes. This will in turn relay it to the network like normal. isOrphan, err := m.server.blockManager.ProcessBlock(block, blockchain.BFNone) if err != nil { // Anything other than a rule violation is an unexpected error, // so log that error as an internal error. if _, ok := err.(blockchain.RuleError); !ok { minrLog.Errorf("Unexpected error while processing "+ "block submitted via CPU miner: %v", err) return false } minrLog.Debugf("Block submitted via CPU miner rejected: %v", err) return false } if isOrphan { minrLog.Debugf("Block submitted via CPU miner is an orphan") return false } // The block was accepted. coinbaseTx := block.MsgBlock().Transactions[0].TxOut[0] minrLog.Infof("Block submitted via CPU miner accepted (hash %s, "+ "amount %v)", block.Hash(), btcutil.Amount(coinbaseTx.Value)) return true }
// dbFetchSpendJournalEntry fetches the spend journal entry for the passed // block and deserializes it into a slice of spent txout entries. The provided // view MUST have the utxos referenced by all of the transactions available for // the passed block since that information is required to reconstruct the spent // txouts. func dbFetchSpendJournalEntry(dbTx database.Tx, block *btcutil.Block, view *UtxoViewpoint) ([]spentTxOut, error) { // Exclude the coinbase transaction since it can't spend anything. spendBucket := dbTx.Metadata().Bucket(spendJournalBucketName) serialized := spendBucket.Get(block.Hash()[:]) blockTxns := block.MsgBlock().Transactions[1:] stxos, err := deserializeSpendJournalEntry(serialized, blockTxns, view) if err != nil { // Ensure any deserialization errors are returned as database // corruption errors. if isDeserializeErr(err) { return nil, database.Error{ ErrorCode: database.ErrCorruption, Description: fmt.Sprintf("corrupt spend "+ "information for %v: %v", block.Hash(), err), } } return nil, err } return stxos, nil }
// dbIndexConnectBlock adds all of the index entries associated with the // given block using the provided indexer and updates the tip of the indexer // accordingly. An error will be returned if the current tip for the indexer is // not the previous block for the passed block. func dbIndexConnectBlock(dbTx database.Tx, indexer Indexer, block *btcutil.Block, view *blockchain.UtxoViewpoint) error { // Assert that the block being connected properly connects to the // current tip of the index. idxKey := indexer.Key() curTipHash, _, err := dbFetchIndexerTip(dbTx, idxKey) if err != nil { return err } if !curTipHash.IsEqual(&block.MsgBlock().Header.PrevBlock) { return AssertError(fmt.Sprintf("dbIndexConnectBlock must be "+ "called with a block that extends the current index "+ "tip (%s, tip %s, block %s)", indexer.Name(), curTipHash, block.Hash())) } // Notify the indexer with the connected block so it can index it. if err := indexer.ConnectBlock(dbTx, block, view); err != nil { return err } // Update the current index tip. return dbPutIndexerTip(dbTx, idxKey, block.Hash(), block.Height()) }
// ProcessBlock is the main workhorse for handling insertion of new blocks into // the block chain. It includes functionality such as rejecting duplicate // blocks, ensuring blocks follow all rules, orphan handling, and insertion into // the block chain along with best chain selection and reorganization. // // When no errors occurred during processing, the first return value indicates // whether or not the block is on the main chain and the second indicates // whether or not the block is an orphan. // // This function is safe for concurrent access. func (b *BlockChain) ProcessBlock(block *btcutil.Block, flags BehaviorFlags) (bool, bool, error) { b.chainLock.Lock() defer b.chainLock.Unlock() fastAdd := flags&BFFastAdd == BFFastAdd dryRun := flags&BFDryRun == BFDryRun blockHash := block.Hash() log.Tracef("Processing block %v", blockHash) // The block must not already exist in the main chain or side chains. exists, err := b.blockExists(blockHash) if err != nil { return false, false, err } if exists { str := fmt.Sprintf("already have block %v", blockHash) return false, false, ruleError(ErrDuplicateBlock, str) } // The block must not already exist as an orphan. if _, exists := b.orphans[*blockHash]; exists { str := fmt.Sprintf("already have block (orphan) %v", blockHash) return false, false, ruleError(ErrDuplicateBlock, str) } // Perform preliminary sanity checks on the block and its transactions. err = checkBlockSanity(block, b.chainParams.PowLimit, b.timeSource, flags) if err != nil { return false, false, err } // Find the previous checkpoint and perform some additional checks based // on the checkpoint. This provides a few nice properties such as // preventing old side chain blocks before the last checkpoint, // rejecting easy to mine, but otherwise bogus, blocks that could be // used to eat memory, and ensuring expected (versus claimed) proof of // work requirements since the previous checkpoint are met. blockHeader := &block.MsgBlock().Header checkpointBlock, err := b.findPreviousCheckpoint() if err != nil { return false, false, err } if checkpointBlock != nil { // Ensure the block timestamp is after the checkpoint timestamp. checkpointHeader := &checkpointBlock.MsgBlock().Header checkpointTime := checkpointHeader.Timestamp if blockHeader.Timestamp.Before(checkpointTime) { str := fmt.Sprintf("block %v has timestamp %v before "+ "last checkpoint timestamp %v", blockHash, blockHeader.Timestamp, checkpointTime) return false, false, ruleError(ErrCheckpointTimeTooOld, str) } if !fastAdd { // Even though the checks prior to now have already ensured the // proof of work exceeds the claimed amount, the claimed amount // is a field in the block header which could be forged. This // check ensures the proof of work is at least the minimum // expected based on elapsed time since the last checkpoint and // maximum adjustment allowed by the retarget rules. duration := blockHeader.Timestamp.Sub(checkpointTime) requiredTarget := CompactToBig(b.calcEasiestDifficulty( checkpointHeader.Bits, duration)) currentTarget := CompactToBig(blockHeader.Bits) if currentTarget.Cmp(requiredTarget) > 0 { str := fmt.Sprintf("block target difficulty of %064x "+ "is too low when compared to the previous "+ "checkpoint", currentTarget) return false, false, ruleError(ErrDifficultyTooLow, str) } } } // Handle orphan blocks. prevHash := &blockHeader.PrevBlock prevHashExists, err := b.blockExists(prevHash) if err != nil { return false, false, err } if !prevHashExists { if !dryRun { log.Infof("Adding orphan block %v with parent %v", blockHash, prevHash) b.addOrphanBlock(block) } return false, true, nil } // The block has passed all context independent checks and appears sane // enough to potentially accept it into the block chain. isMainChain, err := b.maybeAcceptBlock(block, flags) if err != nil { return false, false, err } // Don't process any orphans or log when the dry run flag is set. if !dryRun { // Accept any orphan blocks that depend on this block (they are // no longer orphans) and repeat for those accepted blocks until // there are no more. err := b.processOrphans(blockHash, flags) if err != nil { return false, false, err } log.Debugf("Accepted block %v", blockHash) } return isMainChain, false, nil }
// IsCheckpointCandidate returns whether or not the passed block is a good // checkpoint candidate. // // The factors used to determine a good checkpoint are: // - The block must be in the main chain // - The block must be at least 'CheckpointConfirmations' blocks prior to the // current end of the main chain // - The timestamps for the blocks before and after the checkpoint must have // timestamps which are also before and after the checkpoint, respectively // (due to the median time allowance this is not always the case) // - The block must not contain any strange transaction such as those with // nonstandard scripts // // The intent is that candidates are reviewed by a developer to make the final // decision and then manually added to the list of checkpoints for a network. // // This function is safe for concurrent access. func (b *BlockChain) IsCheckpointCandidate(block *btcutil.Block) (bool, error) { b.chainLock.RLock() defer b.chainLock.RUnlock() // Checkpoints must be enabled. if b.noCheckpoints { return false, fmt.Errorf("checkpoints are disabled") } var isCandidate bool err := b.db.View(func(dbTx database.Tx) error { // A checkpoint must be in the main chain. blockHeight, err := dbFetchHeightByHash(dbTx, block.Hash()) if err != nil { // Only return an error if it's not due to the block not // being in the main chain. if !isNotInMainChainErr(err) { return err } return nil } // Ensure the height of the passed block and the entry for the // block in the main chain match. This should always be the // case unless the caller provided an invalid block. if blockHeight != block.Height() { return fmt.Errorf("passed block height of %d does not "+ "match the main chain height of %d", block.Height(), blockHeight) } // A checkpoint must be at least CheckpointConfirmations blocks // before the end of the main chain. mainChainHeight := b.bestNode.height if blockHeight > (mainChainHeight - CheckpointConfirmations) { return nil } // Get the previous block header. prevHash := &block.MsgBlock().Header.PrevBlock prevHeader, err := dbFetchHeaderByHash(dbTx, prevHash) if err != nil { return err } // Get the next block header. nextHeader, err := dbFetchHeaderByHeight(dbTx, blockHeight+1) if err != nil { return err } // A checkpoint must have timestamps for the block and the // blocks on either side of it in order (due to the median time // allowance this is not always the case). prevTime := prevHeader.Timestamp curTime := block.MsgBlock().Header.Timestamp nextTime := nextHeader.Timestamp if prevTime.After(curTime) || nextTime.Before(curTime) { return nil } // A checkpoint must have transactions that only contain // standard scripts. for _, tx := range block.Transactions() { if isNonstandardTransaction(tx) { return nil } } // All of the checks passed, so the block is a candidate. isCandidate = true return nil }) return isCandidate, err }