// LogBlockHeight logs a new block height as an information message to show // progress to the user. In order to prevent spam, it limits logging to one // message every 10 seconds with duration and totals included. func (b *blockProgressLogger) LogBlockHeight(block *dcrutil.Block) { b.Lock() defer b.Unlock() b.receivedLogBlocks++ b.receivedLogTx += int64(len(block.MsgBlock().Transactions)) now := time.Now() duration := now.Sub(b.lastBlockLogTime) if duration < time.Second*10 { return } // Truncate the duration to 10s of milliseconds. durationMillis := int64(duration / time.Millisecond) tDuration := 10 * time.Millisecond * time.Duration(durationMillis/10) // Log information about new block height. blockStr := "blocks" if b.receivedLogBlocks == 1 { blockStr = "block" } txStr := "transactions" if b.receivedLogTx == 1 { txStr = "transaction" } b.subsystemLogger.Infof("%s %d %s in the last %s (%d %s, height %d, %s)", b.progressAction, b.receivedLogBlocks, blockStr, tDuration, b.receivedLogTx, txStr, block.Height(), block.MsgBlock().Header.Timestamp) b.receivedLogBlocks = 0 b.receivedLogTx = 0 b.lastBlockLogTime = now }
// dbIndexDisconnectBlock removes all of the index entries associated with the // given block using the provided indexer and updates the tip of the indexer // accordingly. An error will be returned if the current tip for the indexer is // not the passed block. func dbIndexDisconnectBlock(dbTx database.Tx, indexer Indexer, block, parent *dcrutil.Block, view *blockchain.UtxoViewpoint) error { // Assert that the block being disconnected is the current tip of the // index. idxKey := indexer.Key() curTipHash, _, err := dbFetchIndexerTip(dbTx, idxKey) if err != nil { return err } if !curTipHash.IsEqual(block.Sha()) { return AssertError(fmt.Sprintf("dbIndexDisconnectBlock must "+ "be called with the block at the current index tip "+ "(%s, tip %s, block %s)", indexer.Name(), curTipHash, block.Sha())) } // Notify the indexer with the disconnected block so it can remove all // of the appropriate entries. if err := indexer.DisconnectBlock(dbTx, block, parent, view); err != nil { return err } // Update the current index tip. prevHash := &block.MsgBlock().Header.PrevBlock return dbPutIndexerTip(dbTx, idxKey, prevHash, uint32(block.Height())-1) }
// submitBlock submits the passed block to network after ensuring it passes all // of the consensus validation rules. func (m *CPUMiner) submitBlock(block *dcrutil.Block) bool { m.submitBlockLock.Lock() defer m.submitBlockLock.Unlock() _, latestHeight := m.server.blockManager.chainState.Best() // Be sure to set this so ProcessBlock doesn't fail! - Decred block.SetHeight(latestHeight + 1) // Process this block using the same rules as blocks coming from other // nodes. This will in turn relay it to the network like normal. isOrphan, err := m.server.blockManager.ProcessBlock(block, blockchain.BFNone) if err != nil { // Anything other than a rule violation is an unexpected error, // so log that error as an internal error. if rErr, ok := err.(blockchain.RuleError); !ok { minrLog.Errorf("Unexpected error while processing "+ "block submitted via CPU miner: %v", err) return false } else { // Occasionally errors are given out for timing errors with // ResetMinDifficulty and high block works that is above // the target. Feed these to debug. if m.server.chainParams.ResetMinDifficulty && rErr.ErrorCode == blockchain.ErrHighHash { minrLog.Debugf("Block submitted via CPU miner rejected "+ "because of ResetMinDifficulty time sync failure: %v", err) return false } else { // Other rule errors should be reported. minrLog.Errorf("Block submitted via CPU miner rejected: %v", err) return false } } } if isOrphan { minrLog.Errorf("Block submitted via CPU miner is an orphan building "+ "on parent %v", block.MsgBlock().Header.PrevBlock) return false } // The block was accepted. coinbaseTxOuts := block.MsgBlock().Transactions[0].TxOut coinbaseTxGenerated := int64(0) for _, out := range coinbaseTxOuts { coinbaseTxGenerated += out.Value } minrLog.Infof("Block submitted via CPU miner accepted (hash %s, "+ "height %v, amount %v)", block.Sha(), block.Height(), dcrutil.Amount(coinbaseTxGenerated)) return true }
// DebugBlockString dumps a verbose message containing information about // the transactions of a block. func DebugBlockString(block *dcrutil.Block) string { if block == nil { return "block pointer nil" } var buffer bytes.Buffer hash := block.Sha() str := fmt.Sprintf("Block Header: %v Height: %v \n", hash, block.Height()) buffer.WriteString(str) str = fmt.Sprintf("Block contains %v regular transactions "+ "and %v stake transactions \n", len(block.Transactions()), len(block.STransactions())) buffer.WriteString(str) str = fmt.Sprintf("List of regular transactions \n") buffer.WriteString(str) for i, tx := range block.Transactions() { str = fmt.Sprintf("Index: %v, Hash: %v \n", i, tx.Sha()) buffer.WriteString(str) } if len(block.STransactions()) == 0 { return buffer.String() } str = fmt.Sprintf("List of stake transactions \n") buffer.WriteString(str) for i, stx := range block.STransactions() { txTypeStr := "" txType := stake.DetermineTxType(stx) switch txType { case stake.TxTypeSStx: txTypeStr = "SStx" case stake.TxTypeSSGen: txTypeStr = "SSGen" case stake.TxTypeSSRtx: txTypeStr = "SSRtx" default: txTypeStr = "Error" } str = fmt.Sprintf("Index: %v, Type: %v, Hash: %v \n", i, txTypeStr, stx.Sha()) buffer.WriteString(str) } return buffer.String() }
// InsertBlock synchronously queues a newly solved block to have its // transactions indexed by address. func (a *addrIndexer) InsertBlock(block *dcrutil.Block, parent *dcrutil.Block) error { addrIndex, err := a.indexBlockAddrs(block, parent) if err != nil { return fmt.Errorf("Unable to index transactions of"+ " block: %v", err) } err = a.server.db.UpdateAddrIndexForBlock(block.Sha(), block.Height(), addrIndex) if err != nil { return fmt.Errorf("Unable to insert block: %v", err.Error()) } return nil }
// connectTxTree lets you connect an arbitrary TxTree to a txStore to push it // forward in history. // TxTree true == TxTreeRegular // TxTree false == TxTreeStake func connectTxTree(txStore TxStore, block *dcrutil.Block, txTree bool) { var transactions []*dcrutil.Tx if txTree { transactions = block.Transactions() } else { transactions = block.STransactions() } // Loop through all of the transactions in the block to see if any of // them are ones we need to update and spend based on the results map. for i, tx := range transactions { // Update the transaction store with the transaction information // if it's one of the requested transactions. msgTx := tx.MsgTx() if txD, exists := txStore[*tx.Sha()]; exists { txD.Tx = tx txD.BlockHeight = block.Height() txD.BlockIndex = uint32(i) txD.Spent = make([]bool, len(msgTx.TxOut)) txD.Err = nil } // Spend the origin transaction output. for _, txIn := range msgTx.TxIn { originHash := &txIn.PreviousOutPoint.Hash originIndex := txIn.PreviousOutPoint.Index if originTx, exists := txStore[*originHash]; exists { if originTx.Spent == nil { continue } if originIndex >= uint32(len(originTx.Spent)) { continue } originTx.Spent[originIndex] = true } } } return }
// dbIndexConnectBlock adds all of the index entries associated with the // given block using the provided indexer and updates the tip of the indexer // accordingly. An error will be returned if the current tip for the indexer is // not the previous block for the passed block. func dbIndexConnectBlock(dbTx database.Tx, indexer Indexer, block, parent *dcrutil.Block, view *blockchain.UtxoViewpoint) error { // Assert that the block being connected properly connects to the // current tip of the index. idxKey := indexer.Key() curTipHash, _, err := dbFetchIndexerTip(dbTx, idxKey) if err != nil { return err } if !curTipHash.IsEqual(&block.MsgBlock().Header.PrevBlock) { return AssertError(fmt.Sprintf("dbIndexConnectBlock must be "+ "called with a block that extends the current index "+ "tip (%s, tip %s, block %s)", indexer.Name(), curTipHash, block.Sha())) } // Notify the indexer with the connected block so it can index it. if err := indexer.ConnectBlock(dbTx, block, parent, view); err != nil { return err } // Update the current index tip. return dbPutIndexerTip(dbTx, idxKey, block.Sha(), uint32(block.Height())) }
// connectTransactions updates the view by adding all new utxos created by all // of the transactions in the passed block, marking all utxos the transactions // spend as spent, and setting the best hash for the view to the passed block. // In addition, when the 'stxos' argument is not nil, it will be updated to // append an entry for each spent txout. func (b *BlockChain) connectTransactions(view *UtxoViewpoint, block *dcrutil.Block, parent *dcrutil.Block, stxos *[]spentTxOut) error { regularTxTreeValid := dcrutil.IsFlagSet16(block.MsgBlock().Header.VoteBits, dcrutil.BlockValid) thisNodeStakeViewpoint := ViewpointPrevInvalidStake if regularTxTreeValid { thisNodeStakeViewpoint = ViewpointPrevValidStake } if parent != nil && block.Height() != 0 { view.SetStakeViewpoint(ViewpointPrevValidInitial) err := view.fetchInputUtxos(b.db, block, parent) if err != nil { return err } mBlock := block.MsgBlock() votebits := mBlock.Header.VoteBits regularTxTreeValid := dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) if regularTxTreeValid { for i, tx := range parent.Transactions() { err := view.connectTransaction(tx, parent.Height(), uint32(i), stxos) if err != nil { return err } } } } for i, stx := range block.STransactions() { view.SetStakeViewpoint(thisNodeStakeViewpoint) err := view.fetchInputUtxos(b.db, block, parent) if err != nil { return err } err = view.connectTransaction(stx, block.Height(), uint32(i), stxos) if err != nil { return err } } // Update the best hash for view to include this block since all of its // transactions have been connected. view.SetBestHash(block.Sha()) return nil }
// InsertBlock inserts raw block and transaction data from a block into the // database. The first block inserted into the database will be treated as the // genesis block. Every subsequent block insert requires the referenced parent // block to already exist. func (db *LevelDb) InsertBlock(block *dcrutil.Block) (height int64, rerr error) { // Be careful with this function on syncs. It contains decred changes. // Obtain the previous block first so long as it's not the genesis block var blockPrev *dcrutil.Block // Decred: WARNING. This function assumes that all block insertion calls have // dcrutil.blocks passed to them with block.blockHeight set correctly. However, // loading the genesis block in btcd didn't do this (via block manager); pre- // production it should be established that all calls to this function pass // blocks with block.blockHeight set correctly. if block.Height() != 0 { var errBlockPrev error blockPrev, errBlockPrev = db.FetchBlockBySha(&block.MsgBlock().Header.PrevBlock) if errBlockPrev != nil { blockSha := block.Sha() log.Warnf("Failed to fetch parent block of block %v", blockSha) return 0, errBlockPrev } } db.dbLock.Lock() defer db.dbLock.Unlock() defer func() { if rerr == nil { rerr = db.processBatches() } else { db.lBatch().Reset() } }() blocksha := block.Sha() mblock := block.MsgBlock() rawMsg, err := block.Bytes() if err != nil { log.Warnf("Failed to obtain raw block sha %v", blocksha) return 0, err } _, sTxLoc, err := block.TxLoc() if err != nil { log.Warnf("Failed to obtain raw block sha %v, stxloc %v", blocksha, sTxLoc) return 0, err } // Insert block into database newheight, err := db.insertBlockData(blocksha, &mblock.Header.PrevBlock, rawMsg) if err != nil { log.Warnf("Failed to insert block %v %v %v", blocksha, &mblock.Header.PrevBlock, err) return 0, err } // Get data necessary to process regular tx tree of parent block if it's not // the genesis block. var mBlockPrev *wire.MsgBlock var txLoc []wire.TxLoc if blockPrev != nil { blockShaPrev := blockPrev.Sha() mBlockPrev = blockPrev.MsgBlock() txLoc, _, err = blockPrev.TxLoc() if err != nil { log.Warnf("Failed to obtain raw block sha %v, txloc %v", blockShaPrev, txLoc) return 0, err } } // Insert the regular tx of the parent block into the tx database if the vote // bits enable it, and if it's not the genesis block. votebits := mblock.Header.VoteBits if dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) && blockPrev != nil { for txidx, tx := range mBlockPrev.Transactions { txsha, err := blockPrev.TxSha(txidx) if err != nil { log.Warnf("failed to compute tx name block %v idx %v err %v", blocksha, txidx, err) return 0, err } spentbuflen := (len(tx.TxOut) + 7) / 8 spentbuf := make([]byte, spentbuflen, spentbuflen) if len(tx.TxOut)%8 != 0 { for i := uint(len(tx.TxOut) % 8); i < 8; i++ { spentbuf[spentbuflen-1] |= (byte(1) << i) } } // newheight-1 instead of newheight below, as the tx is actually found // in the parent. //fmt.Printf("insert tx %v into db at height %v\n", txsha, newheight) err = db.insertTx(txsha, newheight-1, uint32(txidx), txLoc[txidx].TxStart, txLoc[txidx].TxLen, spentbuf) if err != nil { log.Warnf("block %v idx %v failed to insert tx %v %v err %v", blocksha, newheight-1, &txsha, txidx, err) return 0, err } err = db.doSpend(tx) if err != nil { log.Warnf("block %v idx %v failed to spend tx %v %v err %v", blocksha, newheight, txsha, txidx, err) return 0, err } } } // Insert the stake tx of the current block into the tx database. if len(mblock.STransactions) != 0 { for txidx, tx := range mblock.STransactions { txsha, err := block.STxSha(txidx) if err != nil { log.Warnf("failed to compute stake tx name block %v idx %v err %v", blocksha, txidx, err) return 0, err } spentbuflen := (len(tx.TxOut) + 7) / 8 spentbuf := make([]byte, spentbuflen, spentbuflen) if len(tx.TxOut)%8 != 0 { for i := uint(len(tx.TxOut) % 8); i < 8; i++ { spentbuf[spentbuflen-1] |= (byte(1) << i) } } err = db.insertTx(txsha, newheight, uint32(txidx), sTxLoc[txidx].TxStart, sTxLoc[txidx].TxLen, spentbuf) if err != nil { log.Warnf("block %v idx %v failed to insert stake tx %v %v err %v", blocksha, newheight, &txsha, txidx, err) return 0, err } err = db.doSpend(tx) if err != nil { log.Warnf("block %v idx %v failed to spend stx %v %v err %v", blocksha, newheight, txsha, txidx, err) return 0, err } } } return newheight, nil }
// dbAddTxIndexEntries uses an existing database transaction to add a // transaction index entry for every transaction in the passed block. func dbAddTxIndexEntries(dbTx database.Tx, block, parent *dcrutil.Block, blockID uint32) error { // The offset and length of the transactions within the serialized // block, for the regular transactions of the parent (if added) // and the stake transactions of the current block. regularTxTreeValid := dcrutil.IsFlagSet16(block.MsgBlock().Header.VoteBits, dcrutil.BlockValid) var parentRegularTxs []*dcrutil.Tx var parentTxLocs []wire.TxLoc var parentBlockID uint32 if regularTxTreeValid && block.Height() > 1 { var err error parentRegularTxs = parent.Transactions() parentTxLocs, _, err = parent.TxLoc() if err != nil { return err } parentSha := parent.Sha() parentBlockID, err = dbFetchBlockIDByHash(dbTx, *parentSha) if err != nil { return err } } _, blockStxLocs, err := block.TxLoc() if err != nil { return err } allTxs := append(parentRegularTxs, block.STransactions()...) allTxsLocs := append(parentTxLocs, blockStxLocs...) stakeTxStartIdx := len(parentRegularTxs) // As an optimization, allocate a single slice big enough to hold all // of the serialized transaction index entries for the block and // serialize them directly into the slice. Then, pass the appropriate // subslice to the database to be written. This approach significantly // cuts down on the number of required allocations. offset := 0 serializedValues := make([]byte, len(allTxs)*txEntrySize) blockIDToUse := parentBlockID for i, tx := range allTxs { // Switch to using the newest block ID for the stake transactions, // since these are not from the parent. if i == stakeTxStartIdx { blockIDToUse = blockID } putTxIndexEntry(serializedValues[offset:], blockIDToUse, allTxsLocs[i]) endOffset := offset + txEntrySize txSha := tx.Sha() err := dbPutTxIndexEntry(dbTx, *txSha, serializedValues[offset:endOffset:endOffset]) if err != nil { return err } offset += txEntrySize } return nil }
// ProcessBlock is the main workhorse for handling insertion of new blocks into // the block chain. It includes functionality such as rejecting duplicate // blocks, ensuring blocks follow all rules, orphan handling, and insertion into // the block chain along with best chain selection and reorganization. // // It returns a first bool specifying whether or not the block is on on a fork // or on a side chain. True means it's on the main chain. // // This function is safe for concurrent access. func (b *BlockChain) ProcessBlock(block *dcrutil.Block, flags BehaviorFlags) (bool, bool, error) { b.chainLock.Lock() defer b.chainLock.Unlock() fastAdd := flags&BFFastAdd == BFFastAdd dryRun := flags&BFDryRun == BFDryRun blockHash := block.Sha() log.Tracef("Processing block %v", blockHash) currentTime := time.Now() defer func() { elapsedTime := time.Since(currentTime) log.Debugf("Block %v (height %v) finished processing in %s", blockHash, block.Height(), elapsedTime) }() // The block must not already exist in the main chain or side chains. exists, err := b.blockExists(blockHash) if err != nil { return false, false, err } if exists { str := fmt.Sprintf("already have block %v", blockHash) return false, false, ruleError(ErrDuplicateBlock, str) } // The block must not already exist as an orphan. if _, exists := b.orphans[*blockHash]; exists { str := fmt.Sprintf("already have block (orphan) %v", blockHash) return false, false, ruleError(ErrDuplicateBlock, str) } // Perform preliminary sanity checks on the block and its transactions. err = checkBlockSanity(block, b.timeSource, flags, b.chainParams) if err != nil { return false, false, err } // Find the previous checkpoint and perform some additional checks based // on the checkpoint. This provides a few nice properties such as // preventing old side chain blocks before the last checkpoint, // rejecting easy to mine, but otherwise bogus, blocks that could be // used to eat memory, and ensuring expected (versus claimed) proof of // work requirements since the previous checkpoint are met. blockHeader := &block.MsgBlock().Header checkpointBlock, err := b.findPreviousCheckpoint() if err != nil { return false, false, err } if checkpointBlock != nil { // Ensure the block timestamp is after the checkpoint timestamp. checkpointHeader := &checkpointBlock.MsgBlock().Header checkpointTime := checkpointHeader.Timestamp if blockHeader.Timestamp.Before(checkpointTime) { str := fmt.Sprintf("block %v has timestamp %v before "+ "last checkpoint timestamp %v", blockHash, blockHeader.Timestamp, checkpointTime) return false, false, ruleError(ErrCheckpointTimeTooOld, str) } if !fastAdd { // Even though the checks prior to now have already ensured the // proof of work exceeds the claimed amount, the claimed amount // is a field in the block header which could be forged. This // check ensures the proof of work is at least the minimum // expected based on elapsed time since the last checkpoint and // maximum adjustment allowed by the retarget rules. duration := blockHeader.Timestamp.Sub(checkpointTime) requiredTarget := CompactToBig(b.calcEasiestDifficulty( checkpointHeader.Bits, duration)) currentTarget := CompactToBig(blockHeader.Bits) if currentTarget.Cmp(requiredTarget) > 0 { str := fmt.Sprintf("block target difficulty of %064x "+ "is too low when compared to the previous "+ "checkpoint", currentTarget) return false, false, ruleError(ErrDifficultyTooLow, str) } } } // Handle orphan blocks. prevHash := &blockHeader.PrevBlock prevHashExists, err := b.blockExists(prevHash) if err != nil { return false, false, err } if !prevHashExists { if !dryRun { log.Infof("Adding orphan block %v with parent %v", blockHash, prevHash) b.addOrphanBlock(block) } return false, true, err } // The block has passed all context independent checks and appears sane // enough to potentially accept it into the block chain. var onMainChain bool onMainChain, err = b.maybeAcceptBlock(block, flags) if err != nil { return false, false, err } // Don't process any orphans or log when the dry run flag is set. if !dryRun { // Accept any orphan blocks that depend on this block (they are // no longer orphans) and repeat for those accepted blocks until // there are no more. err := b.processOrphans(blockHash, flags) if err != nil { return false, false, err } log.Debugf("Accepted block %v", blockHash) } return onMainChain, false, err }
// findWhereDoubleSpent determines where a tx was previously doublespent. // VERY INTENSIVE BLOCKCHAIN SCANNING, USE TO DEBUG SIMULATED BLOCKCHAINS // ONLY. func (b *BlockChain) findWhereDoubleSpent(block *dcrutil.Block) error { height := int64(1) heightEnd := block.Height() hashes, err := b.db.FetchHeightRange(height, heightEnd) if err != nil { return err } var allTxs []*dcrutil.Tx txs := block.Transactions()[1:] stxs := block.STransactions() allTxs = append(txs, stxs...) for _, hash := range hashes { curBlock, err := b.getBlockFromHash(&hash) if err != nil { return err } log.Errorf("Cur block %v", curBlock.Height()) for _, localTx := range allTxs { for _, localTxIn := range localTx.MsgTx().TxIn { for _, tx := range curBlock.Transactions()[1:] { for _, txIn := range tx.MsgTx().TxIn { if txIn.PreviousOutPoint == localTxIn.PreviousOutPoint { log.Errorf("Double spend of {hash: %v, idx: %v,"+ " tree: %b}, previously found in tx %v "+ "of block %v txtree regular", txIn.PreviousOutPoint.Hash, txIn.PreviousOutPoint.Index, txIn.PreviousOutPoint.Tree, tx.Sha(), hash) } } } for _, tx := range curBlock.STransactions() { for _, txIn := range tx.MsgTx().TxIn { if txIn.PreviousOutPoint == localTxIn.PreviousOutPoint { log.Errorf("Double spend of {hash: %v, idx: %v,"+ " tree: %b}, previously found in tx %v "+ "of block %v txtree stake\n", txIn.PreviousOutPoint.Hash, txIn.PreviousOutPoint.Index, txIn.PreviousOutPoint.Tree, tx.Sha(), hash) } } } } } } for _, localTx := range stxs { for _, localTxIn := range localTx.MsgTx().TxIn { for _, tx := range txs { for _, txIn := range tx.MsgTx().TxIn { if txIn.PreviousOutPoint == localTxIn.PreviousOutPoint { log.Errorf("Double spend of {hash: %v, idx: %v,"+ " tree: %b}, previously found in tx %v "+ "of cur block stake txtree\n", txIn.PreviousOutPoint.Hash, txIn.PreviousOutPoint.Index, txIn.PreviousOutPoint.Tree, tx.Sha()) } } } } } return nil }
// ConnectBlock is invoked by the index manager when a new block has been // connected to the main chain. This indexer adds a mapping for each address // the transactions in the block involve. // // This is part of the Indexer interface. func (idx *AddrIndex) ConnectBlock(dbTx database.Tx, block, parent *dcrutil.Block, view *blockchain.UtxoViewpoint) error { // The offset and length of the transactions within the serialized // block for the regular transactions of the previous block, if // applicable. regularTxTreeValid := dcrutil.IsFlagSet16(block.MsgBlock().Header.VoteBits, dcrutil.BlockValid) var parentTxLocs []wire.TxLoc var parentBlockID uint32 if regularTxTreeValid && block.Height() > 1 { var err error parentTxLocs, _, err = parent.TxLoc() if err != nil { return err } parentSha := parent.Sha() parentBlockID, err = dbFetchBlockIDByHash(dbTx, *parentSha) if err != nil { return err } } // The offset and length of the transactions within the serialized // block for the added stake transactions. _, blockStxLocs, err := block.TxLoc() if err != nil { return err } // Nothing to index, just return. if len(parentTxLocs)+len(blockStxLocs) == 0 { return nil } // Get the internal block ID associated with the block. blockSha := block.Sha() blockID, err := dbFetchBlockIDByHash(dbTx, *blockSha) if err != nil { return err } // Build all of the address to transaction mappings in a local map. addrsToTxns := make(writeIndexData) idx.indexBlock(addrsToTxns, block, parent, view) // Add all of the index entries for each address. stakeIdxsStart := len(parentTxLocs) allTxLocs := append(parentTxLocs, blockStxLocs...) addrIdxBucket := dbTx.Metadata().Bucket(addrIndexKey) for addrKey, txIdxs := range addrsToTxns { for _, txIdx := range txIdxs { // Switch to using the newest block ID for the stake transactions, // since these are not from the parent. Offset the index to be // correct for the location in this given block. blockIDToUse := parentBlockID if txIdx >= stakeIdxsStart { blockIDToUse = blockID } err := dbPutAddrIndexEntry(addrIdxBucket, addrKey, blockIDToUse, allTxLocs[txIdx]) if err != nil { return err } } } return nil }
// indexBlockAddrs returns a populated index of the all the transactions in the // passed block based on the addresses involved in each transaction. func (a *addrIndexer) indexBlockAddrs(blk *dcrutil.Block, parent *dcrutil.Block) (database.BlockAddrIndex, error) { var addrIndex database.BlockAddrIndex _, stxLocs, err := blk.TxLoc() if err != nil { return nil, err } txTreeRegularValid := dcrutil.IsFlagSet16(blk.MsgBlock().Header.VoteBits, dcrutil.BlockValid) // Add regular transactions iff the block was validated. if txTreeRegularValid { txLocs, _, err := parent.TxLoc() if err != nil { return nil, err } for txIdx, tx := range parent.Transactions() { // Tx's offset and length in the block. locInBlock := &txLocs[txIdx] // Coinbases don't have any inputs. if !blockchain.IsCoinBase(tx) { // Index the SPK's of each input's previous outpoint // transaction. for _, txIn := range tx.MsgTx().TxIn { prevOutTx, err := a.lookupTransaction( txIn.PreviousOutPoint.Hash, blk, parent) inputOutPoint := prevOutTx.TxOut[txIn.PreviousOutPoint.Index] toAppend, err := convertToAddrIndex(inputOutPoint.Version, inputOutPoint.PkScript, parent.Height(), locInBlock) if err != nil { adxrLog.Tracef("Error converting tx txin %v: %v", txIn.PreviousOutPoint.Hash, err) continue } addrIndex = append(addrIndex, toAppend...) } } for _, txOut := range tx.MsgTx().TxOut { toAppend, err := convertToAddrIndex(txOut.Version, txOut.PkScript, parent.Height(), locInBlock) if err != nil { adxrLog.Tracef("Error converting tx txout %v: %v", tx.MsgTx().TxSha(), err) continue } addrIndex = append(addrIndex, toAppend...) } } } // Add stake transactions. for stxIdx, stx := range blk.STransactions() { // Tx's offset and length in the block. locInBlock := &stxLocs[stxIdx] isSSGen, _ := stake.IsSSGen(stx) // Index the SPK's of each input's previous outpoint // transaction. for i, txIn := range stx.MsgTx().TxIn { // Stakebases don't have any inputs. if isSSGen && i == 0 { continue } // Lookup and fetch the referenced output's tx. prevOutTx, err := a.lookupTransaction( txIn.PreviousOutPoint.Hash, blk, parent) inputOutPoint := prevOutTx.TxOut[txIn.PreviousOutPoint.Index] toAppend, err := convertToAddrIndex(inputOutPoint.Version, inputOutPoint.PkScript, blk.Height(), locInBlock) if err != nil { adxrLog.Tracef("Error converting stx txin %v: %v", txIn.PreviousOutPoint.Hash, err) continue } addrIndex = append(addrIndex, toAppend...) } for _, txOut := range stx.MsgTx().TxOut { toAppend, err := convertToAddrIndex(txOut.Version, txOut.PkScript, blk.Height(), locInBlock) if err != nil { adxrLog.Tracef("Error converting stx txout %v: %v", stx.MsgTx().TxSha(), err) continue } addrIndex = append(addrIndex, toAppend...) } } return addrIndex, nil }
// insertBlock is the internal function which implements the public // InsertBlock. See the comment for InsertBlock for more details. // // This function MUST be called with the tmdb lock held (for writes). func (tmdb *TicketDB) insertBlock(block *dcrutil.Block) (SStxMemMap, SStxMemMap, SStxMemMap, error) { height := block.Height() if height < tmdb.StakeEnabledHeight { return nil, nil, nil, nil } // Sanity check: Does the number of tickets in ticketMap equal the number // of tickets indicated in the header? poolSizeBlock := int(block.MsgBlock().Header.PoolSize) poolSize := 0 for i := 0; i < BucketsSize; i++ { poolSize += len(tmdb.maps.ticketMap[i]) } if poolSize != poolSizeBlock { return nil, nil, nil, fmt.Errorf("ticketpoolsize in block %v not "+ "equal to the calculated ticketpoolsize, indicating database "+ "corruption (got %v, want %v)", block.Sha(), poolSizeBlock, poolSize) } // Create the block in the spentTicketMap. tmdb.maybeInsertBlock(block.Height()) // Iterate through all the SSGen (vote) tx in the block and add them to // a map of tickets that were actually used. The rest of the tickets in // the buckets were then considered missed --> missedTicketMap. // Note that it doesn't really matter what value you set usedTickets to, // it's just a map of tickets that were actually used in the block. It // would probably be more efficient to use an array. usedTickets := make(map[chainhash.Hash]struct{}) spendingHashes := make(map[chainhash.Hash]chainhash.Hash) revocations := make(map[chainhash.Hash]struct{}) for _, staketx := range block.STransactions() { if is, _ := IsSSGen(staketx); is { msgTx := staketx.MsgTx() sstxIn := msgTx.TxIn[1] // sstx input sstxHash := sstxIn.PreviousOutPoint.Hash usedTickets[sstxHash] = struct{}{} spendingHashes[sstxHash] = *staketx.Sha() } if is, _ := IsSSRtx(staketx); is { msgTx := staketx.MsgTx() sstxIn := msgTx.TxIn[0] // sstx input sstxHash := sstxIn.PreviousOutPoint.Hash revocations[sstxHash] = struct{}{} } } // Spend or miss all the necessary tickets and do some sanity checks. parentBlock, err := tmdb.database.FetchBlockBySha( &block.MsgBlock().Header.PrevBlock) if err != nil { return nil, nil, nil, err } spentAndMissedTickets, err := tmdb.spendTickets(parentBlock, usedTickets, spendingHashes) if err != nil { return nil, nil, nil, err } // Expire all old tickets, and stick them into the spent and missed ticket // map too. expiredTickets, err := tmdb.expireTickets(height) if err != nil { return nil, nil, nil, err } if len(expiredTickets) > 0 && len(spentAndMissedTickets) == 0 { return nil, nil, nil, fmt.Errorf("tried to expire tickets before " + "stake validation height! TicketExpiry may be too small") } if len(expiredTickets) > 0 { for hash, ticket := range expiredTickets { spentAndMissedTickets[hash] = ticket } } revokedTickets, err := tmdb.revokeTickets(revocations) if err != nil { return nil, nil, nil, err } newTickets, err := tmdb.pushMatureTicketsAtHeight(block.Height()) if err != nil { return nil, nil, nil, err } log.Debugf("Connected block %v (height %v) to the ticket database", block.Sha(), block.Height()) return cloneSStxMemMap(spentAndMissedTickets), cloneSStxMemMap(newTickets), cloneSStxMemMap(revokedTickets), nil }
// IsCheckpointCandidate returns whether or not the passed block is a good // checkpoint candidate. // // The factors used to determine a good checkpoint are: // - The block must be in the main chain // - The block must be at least 'CheckpointConfirmations' blocks prior to the // current end of the main chain // - The timestamps for the blocks before and after the checkpoint must have // timestamps which are also before and after the checkpoint, respectively // (due to the median time allowance this is not always the case) // - The block must not contain any strange transaction such as those with // nonstandard scripts // // The intent is that candidates are reviewed by a developer to make the final // decision and then manually added to the list of checkpoints for a network. // // This function is safe for concurrent access. func (b *BlockChain) IsCheckpointCandidate(block *dcrutil.Block) (bool, error) { b.chainLock.RLock() defer b.chainLock.RUnlock() // Checkpoints must be enabled. if b.noCheckpoints { return false, fmt.Errorf("checkpoints are disabled") } var isCandidate bool err := b.db.View(func(dbTx database.Tx) error { // A checkpoint must be in the main chain. blockHeight, err := dbFetchHeightByHash(dbTx, block.Sha()) if err != nil { // Only return an error if it's not due to the block not // being in the main chain. if !isNotInMainChainErr(err) { return err } return nil } // Ensure the height of the passed block and the entry for the // block in the main chain match. This should always be the // case unless the caller provided an invalid block. if blockHeight != block.Height() { return fmt.Errorf("passed block height of %d does not "+ "match the main chain height of %d", block.Height(), blockHeight) } // A checkpoint must be at least CheckpointConfirmations blocks // before the end of the main chain. mainChainHeight := b.bestNode.height if blockHeight > (mainChainHeight - CheckpointConfirmations) { return nil } // Get the previous block header. prevHash := &block.MsgBlock().Header.PrevBlock prevHeader, err := dbFetchHeaderByHash(dbTx, prevHash) if err != nil { return err } // Get the next block header. nextHeader, err := dbFetchHeaderByHeight(dbTx, blockHeight+1) if err != nil { return err } // A checkpoint must have timestamps for the block and the // blocks on either side of it in order (due to the median time // allowance this is not always the case). prevTime := prevHeader.Timestamp curTime := block.MsgBlock().Header.Timestamp nextTime := nextHeader.Timestamp if prevTime.After(curTime) || nextTime.Before(curTime) { return nil } // A checkpoint must have transactions that only contain // standard scripts. for _, tx := range block.Transactions() { if isNonstandardTransaction(tx) { return nil } } // All of the checks passed, so the block is a candidate. isCandidate = true return nil }) return isCandidate, err }
// ConnectBlock is invoked by the index manager when a new block has been // connected to the main chain. This indexer adds a key for each address // the transactions in the block involve. // // This is part of the Indexer interface. func (idx *ExistsAddrIndex) ConnectBlock(dbTx database.Tx, block, parent *dcrutil.Block, view *blockchain.UtxoViewpoint) error { regularTxTreeValid := dcrutil.IsFlagSet16(block.MsgBlock().Header.VoteBits, dcrutil.BlockValid) var parentTxs []*dcrutil.Tx if regularTxTreeValid && block.Height() > 1 { parentTxs = parent.Transactions() } blockTxns := block.STransactions() allTxns := append(parentTxs, blockTxns...) usedAddrs := make(map[[addrKeySize]byte]struct{}) for _, tx := range allTxns { msgTx := tx.MsgTx() isSStx, _ := stake.IsSStx(msgTx) for _, txIn := range msgTx.TxIn { if txscript.IsMultisigSigScript(txIn.SignatureScript) { rs, err := txscript.MultisigRedeemScriptFromScriptSig( txIn.SignatureScript) if err != nil { continue } class, addrs, _, err := txscript.ExtractPkScriptAddrs( txscript.DefaultScriptVersion, rs, idx.chainParams) if err != nil { // Non-standard outputs are skipped. continue } if class != txscript.MultiSigTy { // This should never happen, but be paranoid. continue } for _, addr := range addrs { k, err := addrToKey(addr, idx.chainParams) if err != nil { continue } usedAddrs[k] = struct{}{} } } } for _, txOut := range tx.MsgTx().TxOut { class, addrs, _, err := txscript.ExtractPkScriptAddrs( txOut.Version, txOut.PkScript, idx.chainParams) if err != nil { // Non-standard outputs are skipped. continue } if isSStx && class == txscript.NullDataTy { addr, err := stake.AddrFromSStxPkScrCommitment(txOut.PkScript, idx.chainParams) if err != nil { // Ignore unsupported address types. continue } addrs = append(addrs, addr) } for _, addr := range addrs { k, err := addrToKey(addr, idx.chainParams) if err != nil { // Ignore unsupported address types. continue } usedAddrs[k] = struct{}{} } } } // Write all the newly used addresses to the database, // skipping any keys that already exist. Write any // addresses we see in mempool at this time, too, // then remove them from the unconfirmed map drop // dropping the old map and reassigning a new map. idx.unconfirmedLock.Lock() for k := range idx.mpExistsAddr { usedAddrs[k] = struct{}{} } idx.mpExistsAddr = make(map[[addrKeySize]byte]struct{}) idx.unconfirmedLock.Unlock() meta := dbTx.Metadata() existsAddrIndex := meta.Bucket(existsAddrIndexKey) newUsedAddrs := make(map[[addrKeySize]byte]struct{}) for k := range usedAddrs { if !idx.existsAddress(existsAddrIndex, k) { newUsedAddrs[k] = struct{}{} } } for k := range newUsedAddrs { err := dbPutExistsAddr(existsAddrIndex, k) if err != nil { return err } } return nil }
// IsCheckpointCandidate returns whether or not the passed block is a good // checkpoint candidate. // // The factors used to determine a good checkpoint are: // - The block must be in the main chain // - The block must be at least 'CheckpointConfirmations' blocks prior to the // current end of the main chain // - The timestamps for the blocks before and after the checkpoint must have // timestamps which are also before and after the checkpoint, respectively // (due to the median time allowance this is not always the case) // - The block must not contain any strange transaction such as those with // nonstandard scripts // // The intent is that candidates are reviewed by a developer to make the final // decision and then manually added to the list of checkpoints for a network. func (b *BlockChain) IsCheckpointCandidate(block *dcrutil.Block) (bool, error) { // Checkpoints must be enabled. if b.noCheckpoints { return false, fmt.Errorf("checkpoints are disabled") } // A checkpoint must be in the main chain. exists, err := b.db.ExistsSha(block.Sha()) if err != nil { return false, err } if !exists { return false, nil } // A checkpoint must be at least CheckpointConfirmations blocks before // the end of the main chain. blockHeight := block.Height() _, mainChainHeight, err := b.db.NewestSha() if err != nil { return false, err } if blockHeight > (mainChainHeight - CheckpointConfirmations) { return false, nil } // Get the previous block. prevHash := &block.MsgBlock().Header.PrevBlock prevBlock, err := b.db.FetchBlockBySha(prevHash) if err != nil { return false, err } // Get the next block. nextHash, err := b.db.FetchBlockShaByHeight(blockHeight + 1) if err != nil { return false, err } nextBlock, err := b.db.FetchBlockBySha(nextHash) if err != nil { return false, err } // A checkpoint must have timestamps for the block and the blocks on // either side of it in order (due to the median time allowance this is // not always the case). prevTime := prevBlock.MsgBlock().Header.Timestamp curTime := block.MsgBlock().Header.Timestamp nextTime := nextBlock.MsgBlock().Header.Timestamp if prevTime.After(curTime) || nextTime.Before(curTime) { return false, nil } // A checkpoint must have transactions that only contain standard // scripts. for _, tx := range block.Transactions() { if isNonstandardTransaction(tx) { return false, nil } } return true, nil }
// fetchInputUtxos loads utxo details about the input transactions referenced // by the transactions in the given block into the view from the database as // needed. In particular, referenced entries that are earlier in the block are // added to the view and entries that are already in the view are not modified. func (view *UtxoViewpoint) fetchInputUtxos(db database.DB, block, parent *dcrutil.Block) error { viewpoint := view.StakeViewpoint() // Build a map of in-flight transactions because some of the inputs in // this block could be referencing other transactions earlier in this // block which are not yet in the chain. txInFlight := map[chainhash.Hash]int{} txNeededSet := make(map[chainhash.Hash]struct{}) // Case 1: ViewpointPrevValidInitial. We need the viewpoint of the // current chain without the TxTreeRegular of the previous block // added so we can validate that. if viewpoint == ViewpointPrevValidInitial { transactions := parent.Transactions() for i, tx := range transactions { txInFlight[*tx.Sha()] = i } // Loop through all of the transaction inputs (except for the coinbase // which has no inputs) collecting them into sets of what is needed and // what is already known (in-flight). for i, tx := range transactions[1:] { for _, txIn := range tx.MsgTx().TxIn { // It is acceptable for a transaction input to reference // the output of another transaction in this block only // if the referenced transaction comes before the // current one in this block. Add the outputs of the // referenced transaction as available utxos when this // is the case. Otherwise, the utxo details are still // needed. // // NOTE: The >= is correct here because i is one less // than the actual position of the transaction within // the block due to skipping the coinbase. originHash := &txIn.PreviousOutPoint.Hash if inFlightIndex, ok := txInFlight[*originHash]; ok && i >= inFlightIndex { originTx := transactions[inFlightIndex] view.AddTxOuts(originTx, block.Height(), uint32(i)) continue } // Don't request entries that are already in the view // from the database. if _, ok := view.entries[*originHash]; ok { continue } txNeededSet[*originHash] = struct{}{} } } // Request the input utxos from the database. return view.fetchUtxosMain(db, txNeededSet) } // Case 2+3: ViewpointPrevValidStake and ViewpointPrevValidStake. // For ViewpointPrevValidStake, we need the viewpoint of the // current chain with the TxTreeRegular of the previous block // added so we can validate the TxTreeStake of the current block. // For ViewpointPrevInvalidStake, we need the viewpoint of the // current chain with the TxTreeRegular of the previous block // missing so we can validate the TxTreeStake of the current block. if viewpoint == ViewpointPrevValidStake || viewpoint == ViewpointPrevInvalidStake { // We need all of the stake tx txins. None of these are considered // in-flight in relation to the regular tx tree or to other tx in // the stake tx tree, so don't do any of those expensive checks and // just append it to the tx slice. for _, tx := range block.MsgBlock().STransactions { isSSGen, _ := stake.IsSSGen(tx) for i, txIn := range tx.TxIn { // Ignore stakebases. if isSSGen && i == 0 { continue } // Add an entry to the transaction store for the needed // transaction with it set to missing by default. originHash := &txIn.PreviousOutPoint.Hash // Don't request entries that are already in the view // from the database. if _, ok := view.entries[*originHash]; ok { continue } txNeededSet[*originHash] = struct{}{} } } // Request the input utxos from the database. return view.fetchUtxosMain(db, txNeededSet) } // Case 4+5: ViewpointPrevValidRegular and // ViewpointPrevInvalidRegular. // For ViewpointPrevValidRegular, we need the viewpoint of the // current chain with the TxTreeRegular of the previous block // and the TxTreeStake of the current block added so we can // validate the TxTreeRegular of the current block. // For ViewpointPrevInvalidRegular, we need the viewpoint of the // current chain with the TxTreeRegular of the previous block // missing and the TxTreeStake of the current block added so we // can validate the TxTreeRegular of the current block. if viewpoint == ViewpointPrevValidRegular || viewpoint == ViewpointPrevInvalidRegular { transactions := block.Transactions() for i, tx := range transactions { txInFlight[*tx.Sha()] = i } // Loop through all of the transaction inputs (except for the coinbase // which has no inputs) collecting them into sets of what is needed and // what is already known (in-flight). txNeededSet := make(map[chainhash.Hash]struct{}) for i, tx := range transactions[1:] { for _, txIn := range tx.MsgTx().TxIn { // It is acceptable for a transaction input to reference // the output of another transaction in this block only // if the referenced transaction comes before the // current one in this block. Add the outputs of the // referenced transaction as available utxos when this // is the case. Otherwise, the utxo details are still // needed. // // NOTE: The >= is correct here because i is one less // than the actual position of the transaction within // the block due to skipping the coinbase. originHash := &txIn.PreviousOutPoint.Hash if inFlightIndex, ok := txInFlight[*originHash]; ok && i >= inFlightIndex { originTx := transactions[inFlightIndex] view.AddTxOuts(originTx, block.Height(), uint32(i)) continue } // Don't request entries that are already in the view // from the database. if _, ok := view.entries[*originHash]; ok { continue } txNeededSet[*originHash] = struct{}{} } } // Request the input utxos from the database. return view.fetchUtxosMain(db, txNeededSet) } // TODO actual blockchain error return fmt.Errorf("invalid stake viewpoint") }
// disconnectTransactions updates the view by removing all of the transactions // created by the passed block, restoring all utxos the transactions spent by // using the provided spent txo information, and setting the best hash for the // view to the block before the passed block. // // This function will ONLY work correctly for a single transaction tree at a // time because of index tracking. func (b *BlockChain) disconnectTransactions(view *UtxoViewpoint, block *dcrutil.Block, parent *dcrutil.Block, stxos []spentTxOut) error { // Sanity check the correct number of stxos are provided. if len(stxos) != countSpentOutputs(block, parent) { return AssertError(fmt.Sprintf("disconnectTransactions "+ "called with bad spent transaction out information "+ "(len stxos %v, count is %v)", len(stxos), countSpentOutputs(block, parent))) } // Loop backwards through all transactions so everything is unspent in // reverse order. This is necessary since transactions later in a block // can spend from previous ones. regularTxTreeValid := dcrutil.IsFlagSet16(block.MsgBlock().Header.VoteBits, dcrutil.BlockValid) thisNodeStakeViewpoint := ViewpointPrevInvalidStake if regularTxTreeValid { thisNodeStakeViewpoint = ViewpointPrevValidStake } view.SetStakeViewpoint(thisNodeStakeViewpoint) err := view.fetchInputUtxos(b.db, block, parent) if err != nil { return err } stxoIdx := len(stxos) - 1 transactions := block.STransactions() for txIdx := len(transactions) - 1; txIdx > -1; txIdx-- { tx := transactions[txIdx] msgTx := tx.MsgTx() tt := stake.DetermineTxType(msgTx) // Clear this transaction from the view if it already exists or // create a new empty entry for when it does not. This is done // because the code relies on its existence in the view in order // to signal modifications have happened. entry := view.entries[*tx.Sha()] if entry == nil { entry = newUtxoEntry(msgTx.Version, uint32(block.Height()), uint32(txIdx), IsCoinBaseTx(msgTx), msgTx.Expiry != 0, tt) if tt == stake.TxTypeSStx { stakeExtra := make([]byte, serializeSizeForMinimalOutputs(tx)) putTxToMinimalOutputs(stakeExtra, tx) entry.stakeExtra = stakeExtra } view.entries[*tx.Sha()] = entry } entry.modified = true entry.sparseOutputs = make(map[uint32]*utxoOutput) // Loop backwards through all of the transaction inputs (except // for the coinbase which has no inputs) and unspend the // referenced txos. This is necessary to match the order of the // spent txout entries. for txInIdx := len(tx.MsgTx().TxIn) - 1; txInIdx > -1; txInIdx-- { // Skip empty vote stakebases. if txInIdx == 0 && (tt == stake.TxTypeSSGen) { continue } // Ensure the spent txout index is decremented to stay // in sync with the transaction input. stxo := &stxos[stxoIdx] stxoIdx-- // When there is not already an entry for the referenced // transaction in the view, it means it was fully spent, // so create a new utxo entry in order to resurrect it. txIn := tx.MsgTx().TxIn[txInIdx] originHash := &txIn.PreviousOutPoint.Hash originIndex := txIn.PreviousOutPoint.Index entry := view.LookupEntry(originHash) if entry == nil { if !stxo.txFullySpent { return AssertError(fmt.Sprintf("tried to revive utx %v from "+ "non-fully spent stx entry", originHash)) } entry = newUtxoEntry(tx.MsgTx().Version, stxo.height, stxo.index, stxo.isCoinBase, stxo.hasExpiry, stxo.txType) if stxo.txType == stake.TxTypeSStx { entry.stakeExtra = stxo.stakeExtra } view.entries[*originHash] = entry } // Mark the entry as modified since it is either new // or will be changed below. entry.modified = true // Restore the specific utxo using the stxo data from // the spend journal if it doesn't already exist in the // view. output, ok := entry.sparseOutputs[originIndex] if !ok { // Add the unspent transaction output. entry.sparseOutputs[originIndex] = &utxoOutput{ compressed: stxo.compressed, spent: false, amount: txIn.ValueIn, scriptVersion: stxo.scriptVersion, pkScript: stxo.pkScript, } continue } // Mark the existing referenced transaction output as // unspent. output.spent = false } } // There is no regular tx from before the genesis block, so ignore the genesis // block for the next step. if parent != nil && block.Height() != 0 { // Only bother to unspend transactions if the parent's tx tree was // validated. Otherwise, these transactions were never in the blockchain's // history in the first place. if regularTxTreeValid { view.SetStakeViewpoint(ViewpointPrevValidInitial) err = view.fetchInputUtxos(b.db, block, parent) if err != nil { return err } transactions := parent.Transactions() for txIdx := len(transactions) - 1; txIdx > -1; txIdx-- { tx := transactions[txIdx] // Clear this transaction from the view if it already exists or // create a new empty entry for when it does not. This is done // because the code relies on its existence in the view in order // to signal modifications have happened. isCoinbase := txIdx == 0 entry := view.entries[*tx.Sha()] if entry == nil { entry = newUtxoEntry(tx.MsgTx().Version, uint32(parent.Height()), uint32(txIdx), isCoinbase, tx.MsgTx().Expiry != 0, stake.TxTypeRegular) view.entries[*tx.Sha()] = entry } entry.modified = true entry.sparseOutputs = make(map[uint32]*utxoOutput) // Loop backwards through all of the transaction inputs (except // for the coinbase which has no inputs) and unspend the // referenced txos. This is necessary to match the order of the // spent txout entries. if isCoinbase { continue } for txInIdx := len(tx.MsgTx().TxIn) - 1; txInIdx > -1; txInIdx-- { // Ensure the spent txout index is decremented to stay // in sync with the transaction input. stxo := &stxos[stxoIdx] stxoIdx-- // When there is not already an entry for the referenced // transaction in the view, it means it was fully spent, // so create a new utxo entry in order to resurrect it. txIn := tx.MsgTx().TxIn[txInIdx] originHash := &txIn.PreviousOutPoint.Hash originIndex := txIn.PreviousOutPoint.Index entry := view.entries[*originHash] if entry == nil { if !stxo.txFullySpent { return AssertError(fmt.Sprintf("tried to "+ "revive utx %v from non-fully spent stx entry", originHash)) } entry = newUtxoEntry(tx.MsgTx().Version, stxo.height, stxo.index, stxo.isCoinBase, stxo.hasExpiry, stxo.txType) if stxo.txType == stake.TxTypeSStx { entry.stakeExtra = stxo.stakeExtra } view.entries[*originHash] = entry } // Mark the entry as modified since it is either new // or will be changed below. entry.modified = true // Restore the specific utxo using the stxo data from // the spend journal if it doesn't already exist in the // view. output, ok := entry.sparseOutputs[originIndex] if !ok { // Add the unspent transaction output. entry.sparseOutputs[originIndex] = &utxoOutput{ compressed: stxo.compressed, spent: false, amount: txIn.ValueIn, scriptVersion: stxo.scriptVersion, pkScript: stxo.pkScript, } continue } // Mark the existing referenced transaction output as // unspent. output.spent = false } } } } // Update the best hash for view to the previous block since all of the // transactions for the current block have been disconnected. view.SetBestHash(parent.Sha()) return nil }
func connectTransactions(txStore TxStore, block *dcrutil.Block, parent *dcrutil.Block) error { // There is no regular tx from before the genesis block, so ignore the genesis // block for the next step. if parent != nil && block.Height() != 0 { mBlock := block.MsgBlock() votebits := mBlock.Header.VoteBits regularTxTreeValid := dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) // Only add the transactions in the event that the parent block's regular // tx were validated. if regularTxTreeValid { // Loop through all of the regular transactions in the block to see if // any of them are ones we need to update and spend based on the // results map. for i, tx := range parent.Transactions() { // Update the transaction store with the transaction information // if it's one of the requested transactions. msgTx := tx.MsgTx() if txD, exists := txStore[*tx.Sha()]; exists { txD.Tx = tx txD.BlockHeight = block.Height() - 1 txD.BlockIndex = uint32(i) txD.Spent = make([]bool, len(msgTx.TxOut)) txD.Err = nil } // Spend the origin transaction output. for _, txIn := range msgTx.TxIn { originHash := &txIn.PreviousOutPoint.Hash originIndex := txIn.PreviousOutPoint.Index if originTx, exists := txStore[*originHash]; exists { if originTx.Spent == nil { continue } if originIndex >= uint32(len(originTx.Spent)) { continue } originTx.Spent[originIndex] = true } } } } } // Loop through all of the stake transactions in the block to see if any of // them are ones we need to update and spend based on the results map. for i, tx := range block.STransactions() { // Update the transaction store with the transaction information // if it's one of the requested transactions. msgTx := tx.MsgTx() if txD, exists := txStore[*tx.Sha()]; exists { txD.Tx = tx txD.BlockHeight = block.Height() txD.BlockIndex = uint32(i) txD.Spent = make([]bool, len(msgTx.TxOut)) txD.Err = nil } // Spend the origin transaction output. for _, txIn := range msgTx.TxIn { originHash := &txIn.PreviousOutPoint.Hash originIndex := txIn.PreviousOutPoint.Index if originTx, exists := txStore[*originHash]; exists { if originTx.Spent == nil { continue } if originIndex >= uint32(len(originTx.Spent)) { continue } originTx.Spent[originIndex] = true } } } return nil }
// disconnectTransactions updates the passed map by undoing transaction and // spend information for all transactions in the passed block. Only // transactions in the passed map are updated. func disconnectTransactions(txStore TxStore, block *dcrutil.Block, parent *dcrutil.Block) error { // Loop through all of the stake transactions in the block to see if any of // them are ones that need to be undone based on the transaction store. for _, tx := range block.STransactions() { // Clear this transaction from the transaction store if needed. // Only clear it rather than deleting it because the transaction // connect code relies on its presence to decide whether or not // to update the store and any transactions which exist on both // sides of a fork would otherwise not be updated. if txD, exists := txStore[*tx.Sha()]; exists { txD.Tx = nil txD.BlockHeight = int64(wire.NullBlockHeight) txD.BlockIndex = wire.NullBlockIndex txD.Spent = nil txD.Err = database.ErrTxShaMissing } // Unspend the origin transaction output. for _, txIn := range tx.MsgTx().TxIn { originHash := &txIn.PreviousOutPoint.Hash originIndex := txIn.PreviousOutPoint.Index originTx, exists := txStore[*originHash] if exists && originTx.Tx != nil && originTx.Err == nil { if originTx.Spent == nil { continue } if originIndex >= uint32(len(originTx.Spent)) { continue } originTx.Spent[originIndex] = false } } } // There is no regular tx from before the genesis block, so ignore the genesis // block for the next step. if parent != nil && block.Height() != 0 { mBlock := block.MsgBlock() votebits := mBlock.Header.VoteBits regularTxTreeValid := dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) // Only bother to unspend transactions if the parent's tx tree was // validated. Otherwise, these transactions were never in the blockchain's // history in the first place. if regularTxTreeValid { // Loop through all of the regular transactions in the block to see if // any of them are ones that need to be undone based on the // transaction store. for _, tx := range parent.Transactions() { // Clear this transaction from the transaction store if needed. // Only clear it rather than deleting it because the transaction // connect code relies on its presence to decide whether or not // to update the store and any transactions which exist on both // sides of a fork would otherwise not be updated. if txD, exists := txStore[*tx.Sha()]; exists { txD.Tx = nil txD.BlockHeight = int64(wire.NullBlockHeight) txD.BlockIndex = wire.NullBlockIndex txD.Spent = nil txD.Err = database.ErrTxShaMissing } // Unspend the origin transaction output. for _, txIn := range tx.MsgTx().TxIn { originHash := &txIn.PreviousOutPoint.Hash originIndex := txIn.PreviousOutPoint.Index originTx, exists := txStore[*originHash] if exists && originTx.Tx != nil && originTx.Err == nil { if originTx.Spent == nil { continue } if originIndex >= uint32(len(originTx.Spent)) { continue } originTx.Spent[originIndex] = false } } } } } return nil }
// spendTickets transfers tickets from the ticketMap to the spentTicketMap. Useful // when connecting blocks. Also pushes missed tickets to the missed ticket map. // usedtickets is a map that contains all tickets that were actually used in SSGen // votes; all other tickets are considered missed. // // This function MUST be called with the tmdb lock held (for writes). func (tmdb *TicketDB) spendTickets(parentBlock *dcrutil.Block, usedTickets map[chainhash.Hash]struct{}, spendingHashes map[chainhash.Hash]chainhash.Hash) (SStxMemMap, error) { // If there is nothing being spent, break. if len(spendingHashes) < 1 { return nil, nil } // Make sure there's a bucket in the map for used tickets height := parentBlock.Height() + 1 tmdb.maybeInsertBlock(height) tempTickets := make(SStxMemMap) // Sort the entire list of tickets lexicographically by sorting // each bucket and then appending it to the list. totalTickets := 0 var sortedSlice []*TicketData for i := 0; i < BucketsSize; i++ { mapLen := len(tmdb.maps.ticketMap[i]) totalTickets += mapLen tempTdSlice := NewTicketDataSlice(mapLen) itr := 0 // Iterator for _, td := range tmdb.maps.ticketMap[i] { tempTdSlice[itr] = td itr++ } sort.Sort(tempTdSlice) sortedSlice = append(sortedSlice, tempTdSlice...) } // Use the parent block's header to seed a PRNG that picks the lottery winners. ticketsPerBlock := int(tmdb.chainParams.TicketsPerBlock) pbhB, err := parentBlock.MsgBlock().Header.Bytes() if err != nil { return nil, err } prng := NewHash256PRNG(pbhB) ts, err := FindTicketIdxs(int64(totalTickets), ticketsPerBlock, prng) if err != nil { return nil, err } ticketsToSpendOrMiss := make([]*TicketData, ticketsPerBlock, ticketsPerBlock) for i, idx := range ts { ticketsToSpendOrMiss[i] = sortedSlice[idx] } // Spend or miss these tickets by checking for their existence in the // passed usedtickets map. tixSpent := 0 tixMissed := 0 for _, ticket := range ticketsToSpendOrMiss { // Move the ticket from active tickets map into the used tickets map // if the ticket was spent. _, wasSpent := usedTickets[ticket.SStxHash] if wasSpent { ticket.Missed = false ticket.SpendHash = spendingHashes[ticket.SStxHash] err := tmdb.pushSpentTicket(height, ticket) if err != nil { return nil, err } err = tmdb.removeLiveTicket(ticket) if err != nil { return nil, err } tixSpent++ } else { // Ticket missed being spent and --> false or nil ticket.Missed = true // TODO fix test failure @ L150 due to this err := tmdb.pushSpentTicket(height, ticket) if err != nil { return nil, err } err = tmdb.pushMissedTicket(ticket) if err != nil { return nil, err } err = tmdb.removeLiveTicket(ticket) if err != nil { return nil, err } tixMissed++ } // Report on the spent and missed tickets for the block in debug. if ticket.Missed { log.Debugf("Ticket %v has been missed and expired from "+ "the lottery pool as a missed ticket", ticket.SStxHash) } else { log.Debugf("Ticket %v was spent and removed from "+ "the lottery pool", ticket.SStxHash) } // Add the ticket to the temporary tickets buffer for later use in // map restoration if needed. tempTickets[ticket.SStxHash] = ticket } // Some sanity checks. if tixSpent != len(usedTickets) { errStr := fmt.Sprintf("spendTickets error, an invalid number %v "+ "tickets was spent, but %v many tickets should "+ "have been spent!", tixSpent, len(usedTickets)) return nil, errors.New(errStr) } if tixMissed != (ticketsPerBlock - len(usedTickets)) { errStr := fmt.Sprintf("spendTickets error, an invalid number %v "+ "tickets was missed, but %v many tickets should "+ "have been missed!", tixMissed, ticketsPerBlock-len(usedTickets)) return nil, errors.New(errStr) } if (tixSpent + tixMissed) != ticketsPerBlock { errStr := fmt.Sprintf("spendTickets error, an invalid number %v "+ "tickets was spent and missed, but TicketsPerBlock %v many "+ "tickets should have been spent!", tixSpent, ticketsPerBlock) return nil, errors.New(errStr) } return tempTickets, nil }
// indexBlock extract all of the standard addresses from all of the transactions // in the passed block and maps each of them to the assocaited transaction using // the passed map. func (idx *AddrIndex) indexBlock(data writeIndexData, block, parent *dcrutil.Block, view *blockchain.UtxoViewpoint) { regularTxTreeValid := dcrutil.IsFlagSet16(block.MsgBlock().Header.VoteBits, dcrutil.BlockValid) var stakeStartIdx int if regularTxTreeValid { for txIdx, tx := range parent.Transactions() { // Coinbases do not reference any inputs. Since the block is // required to have already gone through full validation, it has // already been proven on the first transaction in the block is // a coinbase. if txIdx != 0 { for _, txIn := range tx.MsgTx().TxIn { // The view should always have the input since // the index contract requires it, however, be // safe and simply ignore any missing entries. origin := &txIn.PreviousOutPoint entry := view.LookupEntry(&origin.Hash) if entry == nil { log.Warnf("Missing input %v for tx %v while "+ "indexing block %v (height %v)\n", origin.Hash, tx.Sha(), block.Sha(), block.Height()) continue } version := entry.ScriptVersionByIndex(origin.Index) pkScript := entry.PkScriptByIndex(origin.Index) txType := entry.TransactionType() idx.indexPkScript(data, version, pkScript, txIdx, txType == stake.TxTypeSStx) } } for _, txOut := range tx.MsgTx().TxOut { idx.indexPkScript(data, txOut.Version, txOut.PkScript, txIdx, false) } } stakeStartIdx = len(parent.Transactions()) } for txIdx, tx := range block.STransactions() { msgTx := tx.MsgTx() thisTxOffset := txIdx + stakeStartIdx isSSGen, _ := stake.IsSSGen(msgTx) for i, txIn := range msgTx.TxIn { // Skip stakebases. if isSSGen && i == 0 { continue } // The view should always have the input since // the index contract requires it, however, be // safe and simply ignore any missing entries. origin := &txIn.PreviousOutPoint entry := view.LookupEntry(&origin.Hash) if entry == nil { log.Warnf("Missing input %v for tx %v while "+ "indexing block %v (height %v)\n", origin.Hash, tx.Sha(), block.Sha(), block.Height()) continue } version := entry.ScriptVersionByIndex(origin.Index) pkScript := entry.PkScriptByIndex(origin.Index) txType := entry.TransactionType() idx.indexPkScript(data, version, pkScript, thisTxOffset, txType == stake.TxTypeSStx) } isSStx, _ := stake.IsSStx(msgTx) for _, txOut := range msgTx.TxOut { idx.indexPkScript(data, txOut.Version, txOut.PkScript, thisTxOffset, isSStx) } } }