// connectTransactions updates the view by adding all new utxos created by all // of the transactions in the passed block, marking all utxos the transactions // spend as spent, and setting the best hash for the view to the passed block. // In addition, when the 'stxos' argument is not nil, it will be updated to // append an entry for each spent txout. func (b *BlockChain) connectTransactions(view *UtxoViewpoint, block *dcrutil.Block, parent *dcrutil.Block, stxos *[]spentTxOut) error { regularTxTreeValid := dcrutil.IsFlagSet16(block.MsgBlock().Header.VoteBits, dcrutil.BlockValid) thisNodeStakeViewpoint := ViewpointPrevInvalidStake if regularTxTreeValid { thisNodeStakeViewpoint = ViewpointPrevValidStake } if parent != nil && block.Height() != 0 { view.SetStakeViewpoint(ViewpointPrevValidInitial) err := view.fetchInputUtxos(b.db, block, parent) if err != nil { return err } mBlock := block.MsgBlock() votebits := mBlock.Header.VoteBits regularTxTreeValid := dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) if regularTxTreeValid { for i, tx := range parent.Transactions() { err := view.connectTransaction(tx, parent.Height(), uint32(i), stxos) if err != nil { return err } } } } for i, stx := range block.STransactions() { view.SetStakeViewpoint(thisNodeStakeViewpoint) err := view.fetchInputUtxos(b.db, block, parent) if err != nil { return err } err = view.connectTransaction(stx, block.Height(), uint32(i), stxos) if err != nil { return err } } // Update the best hash for view to include this block since all of its // transactions have been connected. view.SetBestHash(block.Sha()) return nil }
// lookupTransaction is a special transaction lookup function that searches // the database, the block, and its parent for a transaction. This is needed // because indexBlockAddrs is called AFTER a block is added/removed in the // blockchain in blockManager, necessitating that the blocks internally be // searched for inputs for any given transaction too. Additionally, it's faster // to get the tx from the blocks here since they're already func (a *addrIndexer) lookupTransaction(txHash chainhash.Hash, blk *dcrutil.Block, parent *dcrutil.Block) (*wire.MsgTx, error) { // Search the previous block and parent first. txTreeRegularValid := dcrutil.IsFlagSet16(blk.MsgBlock().Header.VoteBits, dcrutil.BlockValid) // Search the regular tx tree of this and the last block if the // tx tree regular was validated. if txTreeRegularValid { for _, stx := range parent.STransactions() { if stx.Sha().IsEqual(&txHash) { return stx.MsgTx(), nil } } for _, tx := range parent.Transactions() { if tx.Sha().IsEqual(&txHash) { return tx.MsgTx(), nil } } for _, tx := range blk.Transactions() { if tx.Sha().IsEqual(&txHash) { return tx.MsgTx(), nil } } } else { // Just search this block's regular tx tree and the previous // block's stake tx tree. for _, stx := range parent.STransactions() { if stx.Sha().IsEqual(&txHash) { return stx.MsgTx(), nil } } for _, tx := range blk.Transactions() { if tx.Sha().IsEqual(&txHash) { return tx.MsgTx(), nil } } } // Lookup and fetch the referenced output's tx in the database. txList, err := a.server.db.FetchTxBySha(&txHash) if err != nil { adxrLog.Errorf("Error fetching tx %v: %v", txHash, err) return nil, err } if len(txList) == 0 { return nil, fmt.Errorf("transaction %v not found", txHash) } return txList[len(txList)-1].Tx, nil }
// DropAfterBlockBySha removes any blocks from the database after the given // block. This is different than a simple truncate since the spend information // for each block must also be unwound. This is part of the database.Db interface // implementation. func (db *MemDb) DropAfterBlockBySha(sha *chainhash.Hash) error { db.Lock() defer db.Unlock() if db.closed { return ErrDbClosed } // Begin by attempting to find the height associated with the passed // hash. height, exists := db.blocksBySha[*sha] if !exists { return fmt.Errorf("block %v does not exist in the database", sha) } // The spend information has to be undone in reverse order, so loop // backwards from the last block through the block just after the passed // block. While doing this unspend all transactions in each block and // remove the block. endHeight := int64(len(db.blocks) - 1) for i := endHeight; i > height; i-- { blk := db.blocks[i] blkprev := db.blocks[i-1] // Unspend the stake tx in the current block for _, tx := range blk.STransactions { txSha := tx.TxSha() db.removeTx(tx, &txSha) } // Check to see if the regular txs of the parent were even included; if // they are, unspend all of these regular tx too votebits := blk.Header.VoteBits if dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) && height != 0 { // Unspend the regular tx in the previous block for _, tx := range blkprev.Transactions { txSha := tx.TxSha() db.removeTx(tx, &txSha) } } db.blocks[i] = nil db.blocks = db.blocks[:i] } return nil }
// CalculateAddedSubsidy calculates the amount of subsidy added by a block // and its parent. The blocks passed to this function MUST be valid blocks // that have already been confirmed to abide by the consensus rules of the // network, or the function might panic. func CalculateAddedSubsidy(block, parent *dcrutil.Block) int64 { var subsidy int64 regularTxTreeValid := dcrutil.IsFlagSet16(block.MsgBlock().Header.VoteBits, dcrutil.BlockValid) if regularTxTreeValid { subsidy += parent.MsgBlock().Transactions[0].TxIn[0].ValueIn } for _, stx := range block.MsgBlock().STransactions { if isSSGen, _ := stake.IsSSGen(stx); isSSGen { subsidy += stx.TxIn[0].ValueIn } } return subsidy }
// testExistsTxSha ensures ExistsTxSha conforms to the interface contract. func testExistsTxSha(tc *testContext) bool { var blockPrev *dcrutil.Block = nil // Decred: WARNING. This function assumes that all block insertion calls have // dcrutil.blocks passed to them with block.blockHeight set correctly. However, // loading the genesis block in dcrd didn't do this (via block manager); pre- // production it should be established that all calls to this function pass // blocks with block.blockHeight set correctly. if tc.block.Height() != 0 { var errBlockPrev error blockPrev, errBlockPrev = tc.db.FetchBlockBySha(&tc.block.MsgBlock().Header.PrevBlock) if errBlockPrev != nil { blockSha := tc.block.Sha() tc.t.Errorf("Failed to fetch parent block of block %v", blockSha) } } votebits := tc.block.MsgBlock().Header.VoteBits if dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) && blockPrev != nil { for i, tx := range blockPrev.Transactions() { // The transaction must exist in the database. txHash := tx.Sha() exists, err := tc.db.ExistsTxSha(txHash) if err != nil { tc.t.Errorf("ExistsTxSha (%s): block #%d (%s) tx #%d "+ "(%s) unexpected error: %v", tc.dbType, tc.blockHeight, tc.blockHash, i, txHash, err) return false } if !exists { _, err := tc.db.FetchTxBySha(txHash) if err != nil { tc.t.Errorf("ExistsTxSha (%s): block #%d (%s) "+ "tx #%d (%s) does not exist", tc.dbType, tc.blockHeight, tc.blockHash, i, txHash) } return false } } } return true }
// LogBlockHeight logs a new block height as an information message to show // progress to the user. In order to prevent spam, it limits logging to one // message every 10 seconds with duration and totals included. func (b *BlockProgressLogger) LogBlockHeight(block, parent *dcrutil.Block) { b.Lock() defer b.Unlock() b.receivedLogBlocks++ regularTxTreeValid := dcrutil.IsFlagSet16(block.MsgBlock().Header.VoteBits, dcrutil.BlockValid) if regularTxTreeValid { b.receivedLogTx += int64(len(parent.MsgBlock().Transactions)) } b.receivedLogTx += int64(len(block.MsgBlock().STransactions)) now := time.Now() duration := now.Sub(b.lastBlockLogTime) if duration < time.Second*10 { return } // Truncate the duration to 10s of milliseconds. durationMillis := int64(duration / time.Millisecond) tDuration := 10 * time.Millisecond * time.Duration(durationMillis/10) // Log information about new block height. blockStr := "blocks" if b.receivedLogBlocks == 1 { blockStr = "block" } txStr := "transactions" if b.receivedLogTx == 1 { txStr = "transaction" } b.subsystemLogger.Infof("%s %d %s in the last %s (%d %s, height %d, %s)", b.progressAction, b.receivedLogBlocks, blockStr, tDuration, b.receivedLogTx, txStr, block.Height(), block.MsgBlock().Header.Timestamp) b.receivedLogBlocks = 0 b.receivedLogTx = 0 b.lastBlockLogTime = now }
// dbRemoveTxIndexEntries uses an existing database transaction to remove the // latest transaction entry for every transaction in the passed block. func dbRemoveTxIndexEntries(dbTx database.Tx, block, parent *dcrutil.Block) error { regularTxTreeValid := dcrutil.IsFlagSet16(block.MsgBlock().Header.VoteBits, dcrutil.BlockValid) if regularTxTreeValid { for _, tx := range parent.Transactions() { txSha := tx.Sha() err := dbRemoveTxIndexEntry(dbTx, *txSha) if err != nil { return err } } } for _, tx := range block.STransactions() { txSha := tx.Sha() err := dbRemoveTxIndexEntry(dbTx, *txSha) if err != nil { return err } } return nil }
// ConnectBlock is invoked by the index manager when a new block has been // connected to the main chain. This indexer adds a mapping for each address // the transactions in the block involve. // // This is part of the Indexer interface. func (idx *AddrIndex) ConnectBlock(dbTx database.Tx, block, parent *dcrutil.Block, view *blockchain.UtxoViewpoint) error { // The offset and length of the transactions within the serialized // block for the regular transactions of the previous block, if // applicable. regularTxTreeValid := dcrutil.IsFlagSet16(block.MsgBlock().Header.VoteBits, dcrutil.BlockValid) var parentTxLocs []wire.TxLoc var parentBlockID uint32 if regularTxTreeValid && block.Height() > 1 { var err error parentTxLocs, _, err = parent.TxLoc() if err != nil { return err } parentSha := parent.Sha() parentBlockID, err = dbFetchBlockIDByHash(dbTx, *parentSha) if err != nil { return err } } // The offset and length of the transactions within the serialized // block for the added stake transactions. _, blockStxLocs, err := block.TxLoc() if err != nil { return err } // Nothing to index, just return. if len(parentTxLocs)+len(blockStxLocs) == 0 { return nil } // Get the internal block ID associated with the block. blockSha := block.Sha() blockID, err := dbFetchBlockIDByHash(dbTx, *blockSha) if err != nil { return err } // Build all of the address to transaction mappings in a local map. addrsToTxns := make(writeIndexData) idx.indexBlock(addrsToTxns, block, parent, view) // Add all of the index entries for each address. stakeIdxsStart := len(parentTxLocs) allTxLocs := append(parentTxLocs, blockStxLocs...) addrIdxBucket := dbTx.Metadata().Bucket(addrIndexKey) for addrKey, txIdxs := range addrsToTxns { for _, txIdx := range txIdxs { // Switch to using the newest block ID for the stake transactions, // since these are not from the parent. Offset the index to be // correct for the location in this given block. blockIDToUse := parentBlockID if txIdx >= stakeIdxsStart { blockIDToUse = blockID } err := dbPutAddrIndexEntry(addrIdxBucket, addrKey, blockIDToUse, allTxLocs[txIdx]) if err != nil { return err } } } return nil }
// InsertBlock inserts raw block and transaction data from a block into the // database. The first block inserted into the database will be treated as the // genesis block. Every subsequent block insert requires the referenced parent // block to already exist. func (db *LevelDb) InsertBlock(block *dcrutil.Block) (height int64, rerr error) { // Be careful with this function on syncs. It contains decred changes. // Obtain the previous block first so long as it's not the genesis block var blockPrev *dcrutil.Block // Decred: WARNING. This function assumes that all block insertion calls have // dcrutil.blocks passed to them with block.blockHeight set correctly. However, // loading the genesis block in btcd didn't do this (via block manager); pre- // production it should be established that all calls to this function pass // blocks with block.blockHeight set correctly. if block.Height() != 0 { var errBlockPrev error blockPrev, errBlockPrev = db.FetchBlockBySha(&block.MsgBlock().Header.PrevBlock) if errBlockPrev != nil { blockSha := block.Sha() log.Warnf("Failed to fetch parent block of block %v", blockSha) return 0, errBlockPrev } } db.dbLock.Lock() defer db.dbLock.Unlock() defer func() { if rerr == nil { rerr = db.processBatches() } else { db.lBatch().Reset() } }() blocksha := block.Sha() mblock := block.MsgBlock() rawMsg, err := block.Bytes() if err != nil { log.Warnf("Failed to obtain raw block sha %v", blocksha) return 0, err } _, sTxLoc, err := block.TxLoc() if err != nil { log.Warnf("Failed to obtain raw block sha %v, stxloc %v", blocksha, sTxLoc) return 0, err } // Insert block into database newheight, err := db.insertBlockData(blocksha, &mblock.Header.PrevBlock, rawMsg) if err != nil { log.Warnf("Failed to insert block %v %v %v", blocksha, &mblock.Header.PrevBlock, err) return 0, err } // Get data necessary to process regular tx tree of parent block if it's not // the genesis block. var mBlockPrev *wire.MsgBlock var txLoc []wire.TxLoc if blockPrev != nil { blockShaPrev := blockPrev.Sha() mBlockPrev = blockPrev.MsgBlock() txLoc, _, err = blockPrev.TxLoc() if err != nil { log.Warnf("Failed to obtain raw block sha %v, txloc %v", blockShaPrev, txLoc) return 0, err } } // Insert the regular tx of the parent block into the tx database if the vote // bits enable it, and if it's not the genesis block. votebits := mblock.Header.VoteBits if dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) && blockPrev != nil { for txidx, tx := range mBlockPrev.Transactions { txsha, err := blockPrev.TxSha(txidx) if err != nil { log.Warnf("failed to compute tx name block %v idx %v err %v", blocksha, txidx, err) return 0, err } spentbuflen := (len(tx.TxOut) + 7) / 8 spentbuf := make([]byte, spentbuflen, spentbuflen) if len(tx.TxOut)%8 != 0 { for i := uint(len(tx.TxOut) % 8); i < 8; i++ { spentbuf[spentbuflen-1] |= (byte(1) << i) } } // newheight-1 instead of newheight below, as the tx is actually found // in the parent. //fmt.Printf("insert tx %v into db at height %v\n", txsha, newheight) err = db.insertTx(txsha, newheight-1, uint32(txidx), txLoc[txidx].TxStart, txLoc[txidx].TxLen, spentbuf) if err != nil { log.Warnf("block %v idx %v failed to insert tx %v %v err %v", blocksha, newheight-1, &txsha, txidx, err) return 0, err } err = db.doSpend(tx) if err != nil { log.Warnf("block %v idx %v failed to spend tx %v %v err %v", blocksha, newheight, txsha, txidx, err) return 0, err } } } // Insert the stake tx of the current block into the tx database. if len(mblock.STransactions) != 0 { for txidx, tx := range mblock.STransactions { txsha, err := block.STxSha(txidx) if err != nil { log.Warnf("failed to compute stake tx name block %v idx %v err %v", blocksha, txidx, err) return 0, err } spentbuflen := (len(tx.TxOut) + 7) / 8 spentbuf := make([]byte, spentbuflen, spentbuflen) if len(tx.TxOut)%8 != 0 { for i := uint(len(tx.TxOut) % 8); i < 8; i++ { spentbuf[spentbuflen-1] |= (byte(1) << i) } } err = db.insertTx(txsha, newheight, uint32(txidx), sTxLoc[txidx].TxStart, sTxLoc[txidx].TxLen, spentbuf) if err != nil { log.Warnf("block %v idx %v failed to insert stake tx %v %v err %v", blocksha, newheight, &txsha, txidx, err) return 0, err } err = db.doSpend(tx) if err != nil { log.Warnf("block %v idx %v failed to spend stx %v %v err %v", blocksha, newheight, txsha, txidx, err) return 0, err } } } return newheight, nil }
// DropAfterBlockBySha will remove any blocks from the database after // the given block. func (db *LevelDb) DropAfterBlockBySha(sha *chainhash.Hash) (rerr error) { db.dbLock.Lock() defer db.dbLock.Unlock() defer func() { if rerr == nil { rerr = db.processBatches() } else { db.lBatch().Reset() } }() startheight := db.nextBlock - 1 keepidx, err := db.getBlkLoc(sha) if err != nil { // should the error here be normalized ? log.Tracef("block loc failed %v ", sha) return err } for height := startheight; height > keepidx; height = height - 1 { var blk *dcrutil.Block blksha, buf, err := db.getBlkByHeight(height) if err != nil { return err } blk, err = dcrutil.NewBlockFromBytes(buf) if err != nil { return err } // Obtain previous block sha and buffer var blkprev *dcrutil.Block _, bufprev, errprev := db.getBlkByHeight(height - 1) // discard blkshaprev if errprev != nil { return errprev } // Do the same thing for the parent block blkprev, errprev = dcrutil.NewBlockFromBytes(bufprev) if errprev != nil { return errprev } // Unspend the stake tx in the current block for _, tx := range blk.MsgBlock().STransactions { err = db.unSpend(tx) if err != nil { return err } } // rather than iterate the list of tx backward, do it twice. for _, tx := range blk.STransactions() { var txUo txUpdateObj txUo.delete = true db.txUpdateMap[*tx.Sha()] = &txUo } // Check to see if the regular txs of the parent were even included; if // they are, unspend all of these regular tx too votebits := blk.MsgBlock().Header.VoteBits if dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) && height != 0 { // Unspend the regular tx in the current block for _, tx := range blkprev.MsgBlock().Transactions { err = db.unSpend(tx) if err != nil { return err } } // rather than iterate the list of tx backward, do it twice. for _, tx := range blkprev.Transactions() { var txUo txUpdateObj txUo.delete = true db.txUpdateMap[*tx.Sha()] = &txUo } } db.lBatch().Delete(shaBlkToKey(blksha)) db.lBatch().Delete(int64ToKey(height)) } // update the last block cache db.lastBlkShaCached = true db.lastBlkSha = *sha db.lastBlkIdx = keepidx db.nextBlock = keepidx + 1 return nil }
// makeUtxoView creates a mock unspent transaction output view by using the // transaction index in order to look up all inputs referenced by the // transactions in the block. This is sometimes needed when catching indexes up // because many of the txouts could actually already be spent however the // associated scripts are still required to index them. func makeUtxoView(dbTx database.Tx, block, parent *dcrutil.Block) (*blockchain.UtxoViewpoint, error) { view := blockchain.NewUtxoViewpoint() regularTxTreeValid := dcrutil.IsFlagSet16(block.MsgBlock().Header.VoteBits, dcrutil.BlockValid) if regularTxTreeValid { for txIdx, tx := range parent.Transactions() { // Coinbases do not reference any inputs. Since the block is // required to have already gone through full validation, it has // already been proven on the first transaction in the block is // a coinbase. if txIdx == 0 { continue } // Use the transaction index to load all of the referenced // inputs and add their outputs to the view. for _, txIn := range tx.MsgTx().TxIn { // Skip already fetched outputs. originOut := &txIn.PreviousOutPoint if view.LookupEntry(&originOut.Hash) != nil { continue } originTx, err := dbFetchTx(dbTx, originOut.Hash) if err != nil { return nil, err } view.AddTxOuts(dcrutil.NewTx(originTx), int64(wire.NullBlockHeight), wire.NullBlockIndex) } } } for _, tx := range block.STransactions() { msgTx := tx.MsgTx() isSSGen, _ := stake.IsSSGen(msgTx) // Use the transaction index to load all of the referenced // inputs and add their outputs to the view. for i, txIn := range msgTx.TxIn { // Skip stakebases. if isSSGen && i == 0 { continue } originOut := &txIn.PreviousOutPoint if view.LookupEntry(&originOut.Hash) != nil { continue } originTx, err := dbFetchTx(dbTx, originOut.Hash) if err != nil { return nil, err } view.AddTxOuts(dcrutil.NewTx(originTx), int64(wire.NullBlockHeight), wire.NullBlockIndex) } } return view, nil }
// disconnectTransactions updates the passed map by undoing transaction and // spend information for all transactions in the passed block. Only // transactions in the passed map are updated. func disconnectTransactions(txStore TxStore, block *dcrutil.Block, parent *dcrutil.Block) error { // Loop through all of the stake transactions in the block to see if any of // them are ones that need to be undone based on the transaction store. for _, tx := range block.STransactions() { // Clear this transaction from the transaction store if needed. // Only clear it rather than deleting it because the transaction // connect code relies on its presence to decide whether or not // to update the store and any transactions which exist on both // sides of a fork would otherwise not be updated. if txD, exists := txStore[*tx.Sha()]; exists { txD.Tx = nil txD.BlockHeight = int64(wire.NullBlockHeight) txD.BlockIndex = wire.NullBlockIndex txD.Spent = nil txD.Err = database.ErrTxShaMissing } // Unspend the origin transaction output. for _, txIn := range tx.MsgTx().TxIn { originHash := &txIn.PreviousOutPoint.Hash originIndex := txIn.PreviousOutPoint.Index originTx, exists := txStore[*originHash] if exists && originTx.Tx != nil && originTx.Err == nil { if originTx.Spent == nil { continue } if originIndex >= uint32(len(originTx.Spent)) { continue } originTx.Spent[originIndex] = false } } } // There is no regular tx from before the genesis block, so ignore the genesis // block for the next step. if parent != nil && block.Height() != 0 { mBlock := block.MsgBlock() votebits := mBlock.Header.VoteBits regularTxTreeValid := dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) // Only bother to unspend transactions if the parent's tx tree was // validated. Otherwise, these transactions were never in the blockchain's // history in the first place. if regularTxTreeValid { // Loop through all of the regular transactions in the block to see if // any of them are ones that need to be undone based on the // transaction store. for _, tx := range parent.Transactions() { // Clear this transaction from the transaction store if needed. // Only clear it rather than deleting it because the transaction // connect code relies on its presence to decide whether or not // to update the store and any transactions which exist on both // sides of a fork would otherwise not be updated. if txD, exists := txStore[*tx.Sha()]; exists { txD.Tx = nil txD.BlockHeight = int64(wire.NullBlockHeight) txD.BlockIndex = wire.NullBlockIndex txD.Spent = nil txD.Err = database.ErrTxShaMissing } // Unspend the origin transaction output. for _, txIn := range tx.MsgTx().TxIn { originHash := &txIn.PreviousOutPoint.Hash originIndex := txIn.PreviousOutPoint.Index originTx, exists := txStore[*originHash] if exists && originTx.Tx != nil && originTx.Err == nil { if originTx.Spent == nil { continue } if originIndex >= uint32(len(originTx.Spent)) { continue } originTx.Spent[originIndex] = false } } } } } return nil }
func connectTransactions(txStore TxStore, block *dcrutil.Block, parent *dcrutil.Block) error { // There is no regular tx from before the genesis block, so ignore the genesis // block for the next step. if parent != nil && block.Height() != 0 { mBlock := block.MsgBlock() votebits := mBlock.Header.VoteBits regularTxTreeValid := dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) // Only add the transactions in the event that the parent block's regular // tx were validated. if regularTxTreeValid { // Loop through all of the regular transactions in the block to see if // any of them are ones we need to update and spend based on the // results map. for i, tx := range parent.Transactions() { // Update the transaction store with the transaction information // if it's one of the requested transactions. msgTx := tx.MsgTx() if txD, exists := txStore[*tx.Sha()]; exists { txD.Tx = tx txD.BlockHeight = block.Height() - 1 txD.BlockIndex = uint32(i) txD.Spent = make([]bool, len(msgTx.TxOut)) txD.Err = nil } // Spend the origin transaction output. for _, txIn := range msgTx.TxIn { originHash := &txIn.PreviousOutPoint.Hash originIndex := txIn.PreviousOutPoint.Index if originTx, exists := txStore[*originHash]; exists { if originTx.Spent == nil { continue } if originIndex >= uint32(len(originTx.Spent)) { continue } originTx.Spent[originIndex] = true } } } } } // Loop through all of the stake transactions in the block to see if any of // them are ones we need to update and spend based on the results map. for i, tx := range block.STransactions() { // Update the transaction store with the transaction information // if it's one of the requested transactions. msgTx := tx.MsgTx() if txD, exists := txStore[*tx.Sha()]; exists { txD.Tx = tx txD.BlockHeight = block.Height() txD.BlockIndex = uint32(i) txD.Spent = make([]bool, len(msgTx.TxOut)) txD.Err = nil } // Spend the origin transaction output. for _, txIn := range msgTx.TxIn { originHash := &txIn.PreviousOutPoint.Hash originIndex := txIn.PreviousOutPoint.Index if originTx, exists := txStore[*originHash]; exists { if originTx.Spent == nil { continue } if originIndex >= uint32(len(originTx.Spent)) { continue } originTx.Spent[originIndex] = true } } } return nil }
// insert every block in the test chain // after each insert, fetch all the tx affected by the latest // block and verify that the the tx is spent/unspent // new tx should be fully unspent, referenced tx should have // the associated txout set to spent. // checks tx tree stake only func testUnspentInsertStakeTree(t *testing.T) { // Ignore db remove errors since it means we didn't have an old one. dbname := fmt.Sprintf("tstdbuspnt1") dbnamever := dbname + ".ver" _ = os.RemoveAll(dbname) _ = os.RemoveAll(dbnamever) db, err := database.CreateDB("leveldb", dbname) if err != nil { t.Errorf("Failed to open test database %v", err) return } defer os.RemoveAll(dbname) defer os.RemoveAll(dbnamever) defer func() { if err := db.Close(); err != nil { t.Errorf("Close: unexpected error: %v", err) } }() blocks := loadblocks(t) endtest: for height := int64(0); height < int64(len(blocks))-1; height++ { block := blocks[height] var txneededList []*chainhash.Hash var txlookupList []*chainhash.Hash var txOutList []*chainhash.Hash var txInList []*wire.OutPoint spentFromParent := make(map[wire.OutPoint]struct{}) for _, tx := range block.MsgBlock().STransactions { for _, txin := range tx.TxIn { if txin.PreviousOutPoint.Index == uint32(4294967295) { continue } origintxsha := &txin.PreviousOutPoint.Hash exists, err := db.ExistsTxSha(origintxsha) if err != nil { t.Errorf("ExistsTxSha: unexpected error %v ", err) } if !exists { // Check and see if the outpoint references txtreeregular of // the previous block. If it does, make sure nothing in tx // treeregular spends it in flight. Then check make sure it's // not currently spent for this block. If it isn't, mark it // spent and skip lookup in the db below, since the db won't // yet be able to add it as it's still to be inserted. spentFromParentReg := false parent := blocks[height-1] parentValid := dcrutil.IsFlagSet16(dcrutil.BlockValid, block.MsgBlock().Header.VoteBits) if parentValid { for _, prtx := range parent.Transactions() { // Check and make sure it's not being spent in this tx // tree first by an in flight tx. Mark it spent if it // is so it fails the check below. for _, prtxCheck := range parent.Transactions() { for _, prTxIn := range prtxCheck.MsgTx().TxIn { if prTxIn.PreviousOutPoint == txin.PreviousOutPoint { spentFromParent[txin.PreviousOutPoint] = struct{}{} } } } // If it is in the tree, make sure it's not already spent // somewhere else and mark it spent. Set the flag below // so we skip lookup. if prtx.Sha().IsEqual(origintxsha) { if _, spent := spentFromParent[txin.PreviousOutPoint]; !spent { spentFromParent[txin.PreviousOutPoint] = struct{}{} spentFromParentReg = true } } } } if !spentFromParentReg { t.Errorf("referenced tx not found %v %v", origintxsha, height) } else { continue } } txInList = append(txInList, &txin.PreviousOutPoint) txneededList = append(txneededList, origintxsha) txlookupList = append(txlookupList, origintxsha) } txshaname := tx.TxSha() txlookupList = append(txlookupList, &txshaname) txOutList = append(txOutList, &txshaname) } txneededmap := map[chainhash.Hash]*database.TxListReply{} txlist := db.FetchUnSpentTxByShaList(txneededList) for _, txe := range txlist { if txe.Err != nil { t.Errorf("tx list fetch failed %v err %v", txe.Sha, txe.Err) break endtest } txneededmap[*txe.Sha] = txe } for _, spend := range txInList { itxe := txneededmap[spend.Hash] if itxe.TxSpent[spend.Index] == true { t.Errorf("txin %v:%v is already spent", spend.Hash, spend.Index) } } newheight, err := db.InsertBlock(block) if err != nil { t.Errorf("failed to insert block %v err %v", height, err) break endtest } if newheight != height { t.Errorf("height mismatch expect %v returned %v", height, newheight) break endtest } // only check transactions if current block is valid txlookupmap := map[chainhash.Hash]*database.TxListReply{} txlist = db.FetchTxByShaList(txlookupList) for _, txe := range txlist { if txe.Err != nil { t.Errorf("tx list fetch failed %v err %v ", txe.Sha, txe.Err, height) break endtest } txlookupmap[*txe.Sha] = txe } for _, spend := range txInList { itxe := txlookupmap[spend.Hash] if itxe.TxSpent[spend.Index] == false { t.Errorf("txin %v:%v is unspent %v", spend.Hash, spend.Index, itxe.TxSpent) } } for _, txo := range txOutList { itxe := txlookupmap[*txo] for i, spent := range itxe.TxSpent { if spent == true { t.Errorf("height: %v freshly inserted tx %v already spent %v", height, txo, i) } } } if len(txInList) == 0 { continue } dropblock := blocks[height-1] err = db.DropAfterBlockBySha(dropblock.Sha()) if err != nil { t.Errorf("failed to drop block %v err %v", height, err) break endtest } txlookupmap = map[chainhash.Hash]*database.TxListReply{} txlist = db.FetchUnSpentTxByShaList(txlookupList) for _, txe := range txlist { if txe.Err != nil { if _, ok := txneededmap[*txe.Sha]; ok { t.Errorf("tx list fetch failed %v err %v ", txe.Sha, txe.Err) break endtest } } txlookupmap[*txe.Sha] = txe } for _, spend := range txInList { itxe := txlookupmap[spend.Hash] if itxe.TxSpent[spend.Index] == true { t.Errorf("txin %v:%v is unspent %v", spend.Hash, spend.Index, itxe.TxSpent) } } newheight, err = db.InsertBlock(block) if err != nil { t.Errorf("failed to insert block %v err %v", height, err) break endtest } txlookupmap = map[chainhash.Hash]*database.TxListReply{} txlist = db.FetchTxByShaList(txlookupList) for _, txe := range txlist { if txe.Err != nil { t.Errorf("tx list fetch failed %v err %v ", txe.Sha, txe.Err) break endtest } txlookupmap[*txe.Sha] = txe } for _, spend := range txInList { itxe := txlookupmap[spend.Hash] if itxe.TxSpent[spend.Index] == false { t.Errorf("txin %v:%v is unspent %v", spend.Hash, spend.Index, itxe.TxSpent) } } } }
// indexBlock extract all of the standard addresses from all of the transactions // in the passed block and maps each of them to the assocaited transaction using // the passed map. func (idx *AddrIndex) indexBlock(data writeIndexData, block, parent *dcrutil.Block, view *blockchain.UtxoViewpoint) { regularTxTreeValid := dcrutil.IsFlagSet16(block.MsgBlock().Header.VoteBits, dcrutil.BlockValid) var stakeStartIdx int if regularTxTreeValid { for txIdx, tx := range parent.Transactions() { // Coinbases do not reference any inputs. Since the block is // required to have already gone through full validation, it has // already been proven on the first transaction in the block is // a coinbase. if txIdx != 0 { for _, txIn := range tx.MsgTx().TxIn { // The view should always have the input since // the index contract requires it, however, be // safe and simply ignore any missing entries. origin := &txIn.PreviousOutPoint entry := view.LookupEntry(&origin.Hash) if entry == nil { log.Warnf("Missing input %v for tx %v while "+ "indexing block %v (height %v)\n", origin.Hash, tx.Sha(), block.Sha(), block.Height()) continue } version := entry.ScriptVersionByIndex(origin.Index) pkScript := entry.PkScriptByIndex(origin.Index) txType := entry.TransactionType() idx.indexPkScript(data, version, pkScript, txIdx, txType == stake.TxTypeSStx) } } for _, txOut := range tx.MsgTx().TxOut { idx.indexPkScript(data, txOut.Version, txOut.PkScript, txIdx, false) } } stakeStartIdx = len(parent.Transactions()) } for txIdx, tx := range block.STransactions() { msgTx := tx.MsgTx() thisTxOffset := txIdx + stakeStartIdx isSSGen, _ := stake.IsSSGen(msgTx) for i, txIn := range msgTx.TxIn { // Skip stakebases. if isSSGen && i == 0 { continue } // The view should always have the input since // the index contract requires it, however, be // safe and simply ignore any missing entries. origin := &txIn.PreviousOutPoint entry := view.LookupEntry(&origin.Hash) if entry == nil { log.Warnf("Missing input %v for tx %v while "+ "indexing block %v (height %v)\n", origin.Hash, tx.Sha(), block.Sha(), block.Height()) continue } version := entry.ScriptVersionByIndex(origin.Index) pkScript := entry.PkScriptByIndex(origin.Index) txType := entry.TransactionType() idx.indexPkScript(data, version, pkScript, thisTxOffset, txType == stake.TxTypeSStx) } isSStx, _ := stake.IsSStx(msgTx) for _, txOut := range msgTx.TxOut { idx.indexPkScript(data, txOut.Version, txOut.PkScript, thisTxOffset, isSStx) } } }
// dbAddTxIndexEntries uses an existing database transaction to add a // transaction index entry for every transaction in the passed block. func dbAddTxIndexEntries(dbTx database.Tx, block, parent *dcrutil.Block, blockID uint32) error { // The offset and length of the transactions within the serialized // block, for the regular transactions of the parent (if added) // and the stake transactions of the current block. regularTxTreeValid := dcrutil.IsFlagSet16(block.MsgBlock().Header.VoteBits, dcrutil.BlockValid) var parentRegularTxs []*dcrutil.Tx var parentTxLocs []wire.TxLoc var parentBlockID uint32 if regularTxTreeValid && block.Height() > 1 { var err error parentRegularTxs = parent.Transactions() parentTxLocs, _, err = parent.TxLoc() if err != nil { return err } parentSha := parent.Sha() parentBlockID, err = dbFetchBlockIDByHash(dbTx, *parentSha) if err != nil { return err } } _, blockStxLocs, err := block.TxLoc() if err != nil { return err } allTxs := append(parentRegularTxs, block.STransactions()...) allTxsLocs := append(parentTxLocs, blockStxLocs...) stakeTxStartIdx := len(parentRegularTxs) // As an optimization, allocate a single slice big enough to hold all // of the serialized transaction index entries for the block and // serialize them directly into the slice. Then, pass the appropriate // subslice to the database to be written. This approach significantly // cuts down on the number of required allocations. offset := 0 serializedValues := make([]byte, len(allTxs)*txEntrySize) blockIDToUse := parentBlockID for i, tx := range allTxs { // Switch to using the newest block ID for the stake transactions, // since these are not from the parent. if i == stakeTxStartIdx { blockIDToUse = blockID } putTxIndexEntry(serializedValues[offset:], blockIDToUse, allTxsLocs[i]) endOffset := offset + txEntrySize txSha := tx.Sha() err := dbPutTxIndexEntry(dbTx, *txSha, serializedValues[offset:endOffset:endOffset]) if err != nil { return err } offset += txEntrySize } return nil }
// testFetchTxBySha ensures FetchTxBySha conforms to the interface contract. func testFetchTxBySha(tc *testContext) bool { var blockPrev *dcrutil.Block = nil if tc.block.Height() != 0 { var errBlockPrev error blockPrev, errBlockPrev = tc.db.FetchBlockBySha(&tc.block.MsgBlock().Header.PrevBlock) if errBlockPrev != nil { blockSha := tc.block.Sha() tc.t.Errorf("Failed to fetch parent block of block %v", blockSha) } } votebits := tc.block.MsgBlock().Header.VoteBits if dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) && blockPrev != nil { for i, tx := range blockPrev.Transactions() { txHash := tx.Sha() txReplyList, err := tc.db.FetchTxBySha(txHash) if err != nil { tc.t.Errorf("FetchTxBySha (%s): block #%d (%s) "+ "tx #%d (%s) err: %v", tc.dbType, tc.blockHeight, tc.blockHash, i, txHash, err) return false } if len(txReplyList) == 0 { tc.t.Errorf("FetchTxBySha (%s): block #%d (%s) "+ "tx #%d (%s) did not return reply data", tc.dbType, tc.blockHeight, tc.blockHash, i, txHash) return false } txFromDb := txReplyList[len(txReplyList)-1].Tx if !reflect.DeepEqual(tx.MsgTx(), txFromDb) { tc.t.Errorf("FetchTxBySha (%s): block #%d (%s) "+ "tx #%d (%s, %s) does not match stored tx\n"+ "got: %v\nwant: %v", tc.dbType, tc.blockHeight, tc.blockHash, i, txHash, txFromDb.TxSha(), spew.Sdump(txFromDb), spew.Sdump(tx.MsgTx())) return false } } } for i, tx := range tc.block.MsgBlock().STransactions { txHash := tx.TxSha() txReplyList, err := tc.db.FetchTxBySha(&txHash) if err != nil { tc.t.Errorf("FetchTxBySha (%s): block #%d (%s) "+ "sstx #%d (%s) err: %v", tc.dbType, tc.blockHeight, tc.blockHash, i, txHash, err) return false } if len(txReplyList) == 0 { tc.t.Errorf("FetchTxBySha (%s): block #%d (%s) "+ "sstx #%d (%s) did not return reply data", tc.dbType, tc.blockHeight, tc.blockHash, i, txHash) return false } txFromDb := txReplyList[len(txReplyList)-1].Tx if !reflect.DeepEqual(tx, txFromDb) { tc.t.Errorf("FetchTxBySha (%s): block #%d (%s) "+ "sstx #%d (%s) does not match stored sstx\n"+ "got: %v\nwant: %v", tc.dbType, tc.blockHeight, tc.blockHash, i, txHash, spew.Sdump(txFromDb), spew.Sdump(tx)) return false } } return true }
// connectTickets updates the passed map by removing removing any tickets // from the ticket pool that have been considered spent or missed in this block // according to the block header. Then, it connects all the newly mature tickets // to the passed map. func (b *BlockChain) connectTickets(tixStore TicketStore, node *blockNode, block *dcrutil.Block) error { if tixStore == nil { return fmt.Errorf("nil ticket store!") } // Nothing to do if tickets haven't yet possibly matured. height := node.height if height < b.chainParams.StakeEnabledHeight { return nil } parentBlock, err := b.GetBlockFromHash(node.parentHash) if err != nil { return err } revocations := node.header.Revocations tM := int64(b.chainParams.TicketMaturity) // Skip a number of validation steps before we requiring chain // voting. if node.height >= b.chainParams.StakeValidationHeight { regularTxTreeValid := dcrutil.IsFlagSet16(node.header.VoteBits, dcrutil.BlockValid) thisNodeStakeViewpoint := ViewpointPrevInvalidStake if regularTxTreeValid { thisNodeStakeViewpoint = ViewpointPrevValidStake } // We need the missed tickets bucket from the original perspective of // the node. missedTickets, err := b.GenerateMissedTickets(tixStore) if err != nil { return err } // TxStore at blockchain HEAD + TxTreeRegular of prevBlock (if // validated) for this node. txInputStoreStake, err := b.fetchInputTransactions(node, block, thisNodeStakeViewpoint) if err != nil { errStr := fmt.Sprintf("fetchInputTransactions failed for incoming "+ "node %v; error given: %v", node.hash, err) return errors.New(errStr) } // PART 1: Spend/miss winner tickets // Iterate through all the SSGen (vote) tx in the block and add them to // a map of tickets that were actually used. spentTicketsFromBlock := make(map[chainhash.Hash]bool) numberOfSSgen := 0 for _, staketx := range block.STransactions() { if is, _ := stake.IsSSGen(staketx); is { msgTx := staketx.MsgTx() sstxIn := msgTx.TxIn[1] // sstx input sstxHash := sstxIn.PreviousOutPoint.Hash originTx, exists := txInputStoreStake[sstxHash] if !exists { str := fmt.Sprintf("unable to find input transaction "+ "%v for transaction %v", sstxHash, staketx.Sha()) return ruleError(ErrMissingTx, str) } sstxHeight := originTx.BlockHeight // Check maturity of ticket; we can only spend the ticket after it // hits maturity at height + tM + 1. if (height - sstxHeight) < (tM + 1) { blockSha := block.Sha() errStr := fmt.Sprintf("Error: A ticket spend as an SSGen in "+ "block height %v was immature! Block sha %v", height, blockSha) return errors.New(errStr) } // Fill out the ticket data. spentTicketsFromBlock[sstxHash] = true numberOfSSgen++ } } // Obtain the TicketsPerBlock many tickets that were selected this round, // then check these against the tickets that were actually used to make // sure that any SSGen actually match the selected tickets. Commit the // spent or missed tickets to the ticket store after. spentAndMissedTickets := make(TicketStore) tixSpent := 0 tixMissed := 0 // Sort the entire list of tickets lexicographically by sorting // each bucket and then appending it to the list. Start by generating // a prefix matched map of tickets to speed up the lookup. tpdBucketMap := make(map[uint8][]*TicketPatchData) for _, tpd := range tixStore { // Bucket does not exist. if _, ok := tpdBucketMap[tpd.td.Prefix]; !ok { tpdBucketMap[tpd.td.Prefix] = make([]*TicketPatchData, 1) tpdBucketMap[tpd.td.Prefix][0] = tpd } else { // Bucket exists. data := tpdBucketMap[tpd.td.Prefix] tpdBucketMap[tpd.td.Prefix] = append(data, tpd) } } totalTickets := 0 sortedSlice := make([]*stake.TicketData, 0) for i := 0; i < stake.BucketsSize; i++ { ltb, err := b.GenerateLiveTicketBucket(tixStore, tpdBucketMap, uint8(i)) if err != nil { h := node.hash str := fmt.Sprintf("Failed to generate live ticket bucket "+ "%v for node %v, height %v! Error: %v", i, h, node.height, err.Error()) return fmt.Errorf(str) } mapLen := len(ltb) tempTdSlice := stake.NewTicketDataSlice(mapLen) itr := 0 // Iterator for _, td := range ltb { tempTdSlice[itr] = td itr++ totalTickets++ } sort.Sort(tempTdSlice) sortedSlice = append(sortedSlice, tempTdSlice...) } // Use the parent block's header to seed a PRNG that picks the // lottery winners. ticketsPerBlock := int(b.chainParams.TicketsPerBlock) pbhB, err := parentBlock.MsgBlock().Header.Bytes() if err != nil { return err } prng := stake.NewHash256PRNG(pbhB) ts, err := stake.FindTicketIdxs(int64(totalTickets), ticketsPerBlock, prng) if err != nil { return err } ticketsToSpendOrMiss := make([]*stake.TicketData, ticketsPerBlock, ticketsPerBlock) for i, idx := range ts { ticketsToSpendOrMiss[i] = sortedSlice[idx] } // Spend or miss these tickets by checking for their existence in the // passed spentTicketsFromBlock map. for _, ticket := range ticketsToSpendOrMiss { // Move the ticket from active tickets map into the used tickets // map if the ticket was spent. wasSpent, _ := spentTicketsFromBlock[ticket.SStxHash] if wasSpent { tpd := NewTicketPatchData(ticket, TiSpent, nil) spentAndMissedTickets[ticket.SStxHash] = tpd tixSpent++ } else { // Ticket missed being spent and --> false or nil tpd := NewTicketPatchData(ticket, TiMissed, nil) spentAndMissedTickets[ticket.SStxHash] = tpd tixMissed++ } } // This error is thrown if for some reason there exists an SSGen in // the block that doesn't spend a ticket from the eligible list of // tickets, thus making it invalid. if tixSpent != numberOfSSgen { errStr := fmt.Sprintf("an invalid number %v "+ "tickets was spent, but %v many tickets should "+ "have been spent!", tixSpent, numberOfSSgen) return errors.New(errStr) } if tixMissed != (ticketsPerBlock - numberOfSSgen) { errStr := fmt.Sprintf("an invalid number %v "+ "tickets was missed, but %v many tickets should "+ "have been missed!", tixMissed, ticketsPerBlock-numberOfSSgen) return errors.New(errStr) } if (tixSpent + tixMissed) != int(b.chainParams.TicketsPerBlock) { errStr := fmt.Sprintf("an invalid number %v "+ "tickets was spent and missed, but TicketsPerBlock %v many "+ "tickets should have been spent!", tixSpent, ticketsPerBlock) return errors.New(errStr) } // Calculate all the tickets expiring this block and mark them as missed. tpdBucketMap = make(map[uint8][]*TicketPatchData) for _, tpd := range tixStore { // Bucket does not exist. if _, ok := tpdBucketMap[tpd.td.Prefix]; !ok { tpdBucketMap[tpd.td.Prefix] = make([]*TicketPatchData, 1) tpdBucketMap[tpd.td.Prefix][0] = tpd } else { // Bucket exists. data := tpdBucketMap[tpd.td.Prefix] tpdBucketMap[tpd.td.Prefix] = append(data, tpd) } } toExpireHeight := node.height - int64(b.chainParams.TicketExpiry) if !(toExpireHeight < int64(b.chainParams.StakeEnabledHeight)) { for i := 0; i < stake.BucketsSize; i++ { // Generate the live ticket bucket. ltb, err := b.GenerateLiveTicketBucket(tixStore, tpdBucketMap, uint8(i)) if err != nil { return err } for _, ticket := range ltb { if ticket.BlockHeight == toExpireHeight { tpd := NewTicketPatchData(ticket, TiMissed, nil) spentAndMissedTickets[ticket.SStxHash] = tpd } } } } // Merge the ticket store patch containing the spent and missed tickets // with the ticket store. for hash, tpd := range spentAndMissedTickets { tixStore[hash] = tpd } // At this point our tixStore now contains all the spent and missed tx // as per this block. // PART 2: Remove tickets that were missed and are now revoked. // Iterate through all the SSGen (vote) tx in the block and add them to // a map of tickets that were actually used. revocationsFromBlock := make(map[chainhash.Hash]struct{}) numberOfSSRtx := 0 for _, staketx := range block.STransactions() { if is, _ := stake.IsSSRtx(staketx); is { msgTx := staketx.MsgTx() sstxIn := msgTx.TxIn[0] // sstx input sstxHash := sstxIn.PreviousOutPoint.Hash // Fill out the ticket data. revocationsFromBlock[sstxHash] = struct{}{} numberOfSSRtx++ } } if numberOfSSRtx != int(revocations) { errStr := fmt.Sprintf("an invalid revocations %v was calculated "+ "the block header indicates %v instead", numberOfSSRtx, revocations) return errors.New(errStr) } // Lookup the missed ticket. If we find it in the patch data, // modify the patch data so that it doesn't exist. // Otherwise, just modify load the missed ticket data from // the ticket db and create patch data based on that. for hash, _ := range revocationsFromBlock { ticketWasMissed := false if td, is := missedTickets[hash]; is { maturedHeight := td.BlockHeight // Check maturity of ticket; we can only spend the ticket after it // hits maturity at height + tM + 2. if height < maturedHeight+2 { blockSha := block.Sha() errStr := fmt.Sprintf("Error: A ticket spend as an "+ "SSRtx in block height %v was immature! Block sha %v", height, blockSha) return errors.New(errStr) } ticketWasMissed = true } if !ticketWasMissed { errStr := fmt.Sprintf("SSRtx spent missed sstx %v, "+ "but that missed sstx could not be found!", hash) return errors.New(errStr) } } } // PART 3: Add newly maturing tickets // This is the only chunk we need to do for blocks appearing before // stake validation height. // Calculate block number for where new tickets are maturing from and retrieve // this block from db. // Get the block that is maturing. matureNode, err := b.getNodeAtHeightFromTopNode(node, tM) if err != nil { return err } matureBlock, errBlock := b.getBlockFromHash(matureNode.hash) if errBlock != nil { return errBlock } // Maturing tickets are from the maturingBlock; fill out the ticket patch data // and then push them to the tixStore. for _, stx := range matureBlock.STransactions() { if is, _ := stake.IsSStx(stx); is { // Calculate the prefix for pre-sort. sstxHash := *stx.Sha() prefix := uint8(sstxHash[0]) // Fill out the ticket data. td := stake.NewTicketData(sstxHash, prefix, chainhash.Hash{}, height, false, // not missed false) // not expired tpd := NewTicketPatchData(td, TiAvailable, nil) tixStore[*stx.Sha()] = tpd } } return nil }
// disconnectTransactions updates the view by removing all of the transactions // created by the passed block, restoring all utxos the transactions spent by // using the provided spent txo information, and setting the best hash for the // view to the block before the passed block. // // This function will ONLY work correctly for a single transaction tree at a // time because of index tracking. func (b *BlockChain) disconnectTransactions(view *UtxoViewpoint, block *dcrutil.Block, parent *dcrutil.Block, stxos []spentTxOut) error { // Sanity check the correct number of stxos are provided. if len(stxos) != countSpentOutputs(block, parent) { return AssertError(fmt.Sprintf("disconnectTransactions "+ "called with bad spent transaction out information "+ "(len stxos %v, count is %v)", len(stxos), countSpentOutputs(block, parent))) } // Loop backwards through all transactions so everything is unspent in // reverse order. This is necessary since transactions later in a block // can spend from previous ones. regularTxTreeValid := dcrutil.IsFlagSet16(block.MsgBlock().Header.VoteBits, dcrutil.BlockValid) thisNodeStakeViewpoint := ViewpointPrevInvalidStake if regularTxTreeValid { thisNodeStakeViewpoint = ViewpointPrevValidStake } view.SetStakeViewpoint(thisNodeStakeViewpoint) err := view.fetchInputUtxos(b.db, block, parent) if err != nil { return err } stxoIdx := len(stxos) - 1 transactions := block.STransactions() for txIdx := len(transactions) - 1; txIdx > -1; txIdx-- { tx := transactions[txIdx] msgTx := tx.MsgTx() tt := stake.DetermineTxType(msgTx) // Clear this transaction from the view if it already exists or // create a new empty entry for when it does not. This is done // because the code relies on its existence in the view in order // to signal modifications have happened. entry := view.entries[*tx.Sha()] if entry == nil { entry = newUtxoEntry(msgTx.Version, uint32(block.Height()), uint32(txIdx), IsCoinBaseTx(msgTx), msgTx.Expiry != 0, tt) if tt == stake.TxTypeSStx { stakeExtra := make([]byte, serializeSizeForMinimalOutputs(tx)) putTxToMinimalOutputs(stakeExtra, tx) entry.stakeExtra = stakeExtra } view.entries[*tx.Sha()] = entry } entry.modified = true entry.sparseOutputs = make(map[uint32]*utxoOutput) // Loop backwards through all of the transaction inputs (except // for the coinbase which has no inputs) and unspend the // referenced txos. This is necessary to match the order of the // spent txout entries. for txInIdx := len(tx.MsgTx().TxIn) - 1; txInIdx > -1; txInIdx-- { // Skip empty vote stakebases. if txInIdx == 0 && (tt == stake.TxTypeSSGen) { continue } // Ensure the spent txout index is decremented to stay // in sync with the transaction input. stxo := &stxos[stxoIdx] stxoIdx-- // When there is not already an entry for the referenced // transaction in the view, it means it was fully spent, // so create a new utxo entry in order to resurrect it. txIn := tx.MsgTx().TxIn[txInIdx] originHash := &txIn.PreviousOutPoint.Hash originIndex := txIn.PreviousOutPoint.Index entry := view.LookupEntry(originHash) if entry == nil { if !stxo.txFullySpent { return AssertError(fmt.Sprintf("tried to revive utx %v from "+ "non-fully spent stx entry", originHash)) } entry = newUtxoEntry(tx.MsgTx().Version, stxo.height, stxo.index, stxo.isCoinBase, stxo.hasExpiry, stxo.txType) if stxo.txType == stake.TxTypeSStx { entry.stakeExtra = stxo.stakeExtra } view.entries[*originHash] = entry } // Mark the entry as modified since it is either new // or will be changed below. entry.modified = true // Restore the specific utxo using the stxo data from // the spend journal if it doesn't already exist in the // view. output, ok := entry.sparseOutputs[originIndex] if !ok { // Add the unspent transaction output. entry.sparseOutputs[originIndex] = &utxoOutput{ compressed: stxo.compressed, spent: false, amount: txIn.ValueIn, scriptVersion: stxo.scriptVersion, pkScript: stxo.pkScript, } continue } // Mark the existing referenced transaction output as // unspent. output.spent = false } } // There is no regular tx from before the genesis block, so ignore the genesis // block for the next step. if parent != nil && block.Height() != 0 { // Only bother to unspend transactions if the parent's tx tree was // validated. Otherwise, these transactions were never in the blockchain's // history in the first place. if regularTxTreeValid { view.SetStakeViewpoint(ViewpointPrevValidInitial) err = view.fetchInputUtxos(b.db, block, parent) if err != nil { return err } transactions := parent.Transactions() for txIdx := len(transactions) - 1; txIdx > -1; txIdx-- { tx := transactions[txIdx] // Clear this transaction from the view if it already exists or // create a new empty entry for when it does not. This is done // because the code relies on its existence in the view in order // to signal modifications have happened. isCoinbase := txIdx == 0 entry := view.entries[*tx.Sha()] if entry == nil { entry = newUtxoEntry(tx.MsgTx().Version, uint32(parent.Height()), uint32(txIdx), isCoinbase, tx.MsgTx().Expiry != 0, stake.TxTypeRegular) view.entries[*tx.Sha()] = entry } entry.modified = true entry.sparseOutputs = make(map[uint32]*utxoOutput) // Loop backwards through all of the transaction inputs (except // for the coinbase which has no inputs) and unspend the // referenced txos. This is necessary to match the order of the // spent txout entries. if isCoinbase { continue } for txInIdx := len(tx.MsgTx().TxIn) - 1; txInIdx > -1; txInIdx-- { // Ensure the spent txout index is decremented to stay // in sync with the transaction input. stxo := &stxos[stxoIdx] stxoIdx-- // When there is not already an entry for the referenced // transaction in the view, it means it was fully spent, // so create a new utxo entry in order to resurrect it. txIn := tx.MsgTx().TxIn[txInIdx] originHash := &txIn.PreviousOutPoint.Hash originIndex := txIn.PreviousOutPoint.Index entry := view.entries[*originHash] if entry == nil { if !stxo.txFullySpent { return AssertError(fmt.Sprintf("tried to "+ "revive utx %v from non-fully spent stx entry", originHash)) } entry = newUtxoEntry(tx.MsgTx().Version, stxo.height, stxo.index, stxo.isCoinBase, stxo.hasExpiry, stxo.txType) if stxo.txType == stake.TxTypeSStx { entry.stakeExtra = stxo.stakeExtra } view.entries[*originHash] = entry } // Mark the entry as modified since it is either new // or will be changed below. entry.modified = true // Restore the specific utxo using the stxo data from // the spend journal if it doesn't already exist in the // view. output, ok := entry.sparseOutputs[originIndex] if !ok { // Add the unspent transaction output. entry.sparseOutputs[originIndex] = &utxoOutput{ compressed: stxo.compressed, spent: false, amount: txIn.ValueIn, scriptVersion: stxo.scriptVersion, pkScript: stxo.pkScript, } continue } // Mark the existing referenced transaction output as // unspent. output.spent = false } } } } // Update the best hash for view to the previous block since all of the // transactions for the current block have been disconnected. view.SetBestHash(parent.Sha()) return nil }
// indexBlockAddrs returns a populated index of the all the transactions in the // passed block based on the addresses involved in each transaction. func (a *addrIndexer) indexBlockAddrs(blk *dcrutil.Block, parent *dcrutil.Block) (database.BlockAddrIndex, error) { var addrIndex database.BlockAddrIndex _, stxLocs, err := blk.TxLoc() if err != nil { return nil, err } txTreeRegularValid := dcrutil.IsFlagSet16(blk.MsgBlock().Header.VoteBits, dcrutil.BlockValid) // Add regular transactions iff the block was validated. if txTreeRegularValid { txLocs, _, err := parent.TxLoc() if err != nil { return nil, err } for txIdx, tx := range parent.Transactions() { // Tx's offset and length in the block. locInBlock := &txLocs[txIdx] // Coinbases don't have any inputs. if !blockchain.IsCoinBase(tx) { // Index the SPK's of each input's previous outpoint // transaction. for _, txIn := range tx.MsgTx().TxIn { prevOutTx, err := a.lookupTransaction( txIn.PreviousOutPoint.Hash, blk, parent) inputOutPoint := prevOutTx.TxOut[txIn.PreviousOutPoint.Index] toAppend, err := convertToAddrIndex(inputOutPoint.Version, inputOutPoint.PkScript, parent.Height(), locInBlock) if err != nil { adxrLog.Tracef("Error converting tx txin %v: %v", txIn.PreviousOutPoint.Hash, err) continue } addrIndex = append(addrIndex, toAppend...) } } for _, txOut := range tx.MsgTx().TxOut { toAppend, err := convertToAddrIndex(txOut.Version, txOut.PkScript, parent.Height(), locInBlock) if err != nil { adxrLog.Tracef("Error converting tx txout %v: %v", tx.MsgTx().TxSha(), err) continue } addrIndex = append(addrIndex, toAppend...) } } } // Add stake transactions. for stxIdx, stx := range blk.STransactions() { // Tx's offset and length in the block. locInBlock := &stxLocs[stxIdx] isSSGen, _ := stake.IsSSGen(stx) // Index the SPK's of each input's previous outpoint // transaction. for i, txIn := range stx.MsgTx().TxIn { // Stakebases don't have any inputs. if isSSGen && i == 0 { continue } // Lookup and fetch the referenced output's tx. prevOutTx, err := a.lookupTransaction( txIn.PreviousOutPoint.Hash, blk, parent) inputOutPoint := prevOutTx.TxOut[txIn.PreviousOutPoint.Index] toAppend, err := convertToAddrIndex(inputOutPoint.Version, inputOutPoint.PkScript, blk.Height(), locInBlock) if err != nil { adxrLog.Tracef("Error converting stx txin %v: %v", txIn.PreviousOutPoint.Hash, err) continue } addrIndex = append(addrIndex, toAppend...) } for _, txOut := range stx.MsgTx().TxOut { toAppend, err := convertToAddrIndex(txOut.Version, txOut.PkScript, blk.Height(), locInBlock) if err != nil { adxrLog.Tracef("Error converting stx txout %v: %v", stx.MsgTx().TxSha(), err) continue } addrIndex = append(addrIndex, toAppend...) } } return addrIndex, nil }
// testReorganization performs reorganization tests for the passed DB type. // Much of the setup is copied from the blockchain package, but the test looks // to see if each TX in each block in the best chain can be fetched using // FetchTxBySha. If not, then there's a bug. func testReorganization(t *testing.T, dbType string) { db, teardown, err := createDB(dbType, "reorganization", true) if err != nil { t.Fatalf("Failed to create test database (%s) %v", dbType, err) } defer teardown() blocks, err := loadReorgBlocks("reorgto179.bz2") if err != nil { t.Fatalf("Error loading file: %v", err) } blocksReorg, err := loadReorgBlocks("reorgto180.bz2") if err != nil { t.Fatalf("Error loading file: %v", err) } // Find where chain forks var forkHash chainhash.Hash var forkHeight int64 for i, _ := range blocks { if blocks[i].Sha().IsEqual(blocksReorg[i].Sha()) { blkHash := blocks[i].Sha() forkHash = *blkHash forkHeight = int64(i) } } // Insert all blocks from chain 1 for i := int64(0); i < int64(len(blocks)); i++ { blkHash := blocks[i].Sha() if err != nil { t.Fatalf("Error getting SHA for block %dA: %v", i-2, err) } _, err = db.InsertBlock(blocks[i]) if err != nil { t.Fatalf("Error inserting block %dA (%v): %v", i-2, blkHash, err) } } // Remove blocks to fork point db.DropAfterBlockBySha(&forkHash) if err != nil { t.Errorf("couldn't DropAfterBlockBySha: %v", err.Error()) } // Insert blocks from the other chain to simulate a reorg for i := forkHeight + 1; i < int64(len(blocksReorg)); i++ { blkHash := blocksReorg[i].Sha() if err != nil { t.Fatalf("Error getting SHA for block %dA: %v", i-2, err) } _, err = db.InsertBlock(blocksReorg[i]) if err != nil { t.Fatalf("Error inserting block %dA (%v): %v", i-2, blkHash, err) } } _, maxHeight, err := db.NewestSha() if err != nil { t.Fatalf("Error getting newest block info") } for i := int64(0); i <= maxHeight; i++ { blkHash, err := db.FetchBlockShaByHeight(i) if err != nil { t.Fatalf("Error fetching SHA for block %d: %v", i, err) } block, err := db.FetchBlockBySha(blkHash) if err != nil { t.Fatalf("Error fetching block %d (%v): %v", i, blkHash, err) } prevBlockSha := block.MsgBlock().Header.PrevBlock prevBlock, _ := db.FetchBlockBySha(&prevBlockSha) votebits := blocksReorg[i].MsgBlock().Header.VoteBits if dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) && prevBlock != nil { for _, tx := range prevBlock.Transactions() { _, err := db.FetchTxBySha(tx.Sha()) if err != nil { t.Fatalf("Error fetching transaction %v: %v", tx.Sha(), err) } } } for _, tx := range block.STransactions() { _, err := db.FetchTxBySha(tx.Sha()) if err != nil { t.Fatalf("Error fetching transaction %v: %v", tx.Sha(), err) } } } }
func Test_dupTx(t *testing.T) { // Ignore db remove errors since it means we didn't have an old one. dbname := fmt.Sprintf("tstdbdup0") dbnamever := dbname + ".ver" _ = os.RemoveAll(dbname) _ = os.RemoveAll(dbnamever) db, err := database.CreateDB("leveldb", dbname) if err != nil { t.Errorf("Failed to open test database %v", err) return } defer os.RemoveAll(dbname) defer os.RemoveAll(dbnamever) defer func() { if err := db.Close(); err != nil { t.Errorf("Close: unexpected error: %v", err) } }() testdatafile := filepath.Join("../", "../blockchain/testdata", "blocks0to168.bz2") blocks, err := loadBlocks(t, testdatafile) if err != nil { t.Errorf("Unable to load blocks from test data for: %v", err) return } var lastSha *chainhash.Hash // Populate with the fisrt 256 blocks, so we have blocks to 'mess with' err = nil out: for height := int64(0); height < int64(len(blocks)); height++ { block := blocks[height] if height != 0 { // except for NoVerify which does not allow lookups check inputs mblock := block.MsgBlock() //t.Errorf("%v", blockchain.DebugBlockString(block)) parentBlock := blocks[height-1] mParentBlock := parentBlock.MsgBlock() var txneededList []*chainhash.Hash opSpentInBlock := make(map[wire.OutPoint]struct{}) if dcrutil.IsFlagSet16(dcrutil.BlockValid, mParentBlock.Header.VoteBits) { for _, tx := range mParentBlock.Transactions { for _, txin := range tx.TxIn { if txin.PreviousOutPoint.Index == uint32(4294967295) { continue } if existsInOwnBlockRegTree(mParentBlock, txin.PreviousOutPoint.Hash) { _, used := opSpentInBlock[txin.PreviousOutPoint] if !used { // Origin tx is in the block and so hasn't been // added yet, continue opSpentInBlock[txin.PreviousOutPoint] = struct{}{} continue } else { t.Errorf("output ref %v attempted double spend of previously spend output", txin.PreviousOutPoint) } } origintxsha := &txin.PreviousOutPoint.Hash txneededList = append(txneededList, origintxsha) exists, err := db.ExistsTxSha(origintxsha) if err != nil { t.Errorf("ExistsTxSha: unexpected error %v ", err) } if !exists { t.Errorf("referenced tx not found %v (height %v)", origintxsha, height) } _, err = db.FetchTxBySha(origintxsha) if err != nil { t.Errorf("referenced tx not found %v err %v ", origintxsha, err) } } } } for _, stx := range mblock.STransactions { for _, txin := range stx.TxIn { if txin.PreviousOutPoint.Index == uint32(4294967295) { continue } if existsInOwnBlockRegTree(mParentBlock, txin.PreviousOutPoint.Hash) { _, used := opSpentInBlock[txin.PreviousOutPoint] if !used { // Origin tx is in the block and so hasn't been // added yet, continue opSpentInBlock[txin.PreviousOutPoint] = struct{}{} continue } else { t.Errorf("output ref %v attempted double spend of previously spend output", txin.PreviousOutPoint) } } origintxsha := &txin.PreviousOutPoint.Hash txneededList = append(txneededList, origintxsha) exists, err := db.ExistsTxSha(origintxsha) if err != nil { t.Errorf("ExistsTxSha: unexpected error %v ", err) } if !exists { t.Errorf("referenced tx not found %v", origintxsha) } _, err = db.FetchTxBySha(origintxsha) if err != nil { t.Errorf("referenced tx not found %v err %v ", origintxsha, err) } } } txlist := db.FetchUnSpentTxByShaList(txneededList) for _, txe := range txlist { if txe.Err != nil { t.Errorf("tx list fetch failed %v err %v ", txe.Sha, txe.Err) break out } } } newheight, err := db.InsertBlock(block) if err != nil { t.Errorf("failed to insert block %v err %v", height, err) break out } if newheight != height { t.Errorf("height mismatch expect %v returned %v", height, newheight) break out } newSha, blkid, err := db.NewestSha() if err != nil { t.Errorf("failed to obtain latest sha %v %v", height, err) } if blkid != height { t.Errorf("height doe not match latest block height %v %v %v", blkid, height, err) } blkSha := block.Sha() if *newSha != *blkSha { t.Errorf("Newest block sha does not match freshly inserted one %v %v %v ", newSha, blkSha, err) } lastSha = blkSha } // generate a new block based on the last sha // these block are not verified, so there are a bunch of garbage fields // in the 'generated' block. var bh wire.BlockHeader bh.Version = 0 bh.PrevBlock = *lastSha // Bits, Nonce are not filled in mblk := wire.NewMsgBlock(&bh) hash, _ := chainhash.NewHashFromStr("c23953c56cb2ef8e4698e3ed3b0fc4c837754d3cd16485192d893e35f32626b4") po := wire.NewOutPoint(hash, 0, dcrutil.TxTreeRegular) txI := wire.NewTxIn(po, []byte("garbage")) txO := wire.NewTxOut(50000000, []byte("garbageout")) var tx wire.MsgTx tx.AddTxIn(txI) tx.AddTxOut(txO) mblk.AddTransaction(&tx) blk := dcrutil.NewBlock(mblk) fetchList := []*chainhash.Hash{hash} listReply := db.FetchUnSpentTxByShaList(fetchList) for _, lr := range listReply { if lr.Err != nil { t.Errorf("sha %v spent %v err %v\n", lr.Sha, lr.TxSpent, lr.Err) } } _, err = db.InsertBlock(blk) if err != nil { t.Errorf("failed to insert phony block %v", err) } // ok, did it 'spend' the tx ? listReply = db.FetchUnSpentTxByShaList(fetchList) for _, lr := range listReply { if lr.Err != nil && lr.Err != database.ErrTxShaMissing { t.Errorf("sha %v spent %v err %v\n", lr.Sha, lr.TxSpent, lr.Err) } } txlist := blk.Transactions() for _, tx := range txlist { txsha := tx.Sha() txReply, err := db.FetchTxBySha(txsha) if err != nil { t.Errorf("fully spent lookup %v err %v\n", hash, err) } else { for _, lr := range txReply { if lr.Err != nil { t.Errorf("stx %v spent %v err %v\n", lr.Sha, lr.TxSpent, lr.Err) } } } } err = db.DropAfterBlockBySha(lastSha) if err != nil { t.Errorf("failed to drop spending block %v", err) } }
// insert every block in the test chain // after each insert, fetch all the tx affected by the latest // block and verify that the the tx is spent/unspent // new tx should be fully unspent, referenced tx should have // the associated txout set to spent. // checks tx tree regular only func testUnspentInsertRegTree(t *testing.T) { // Ignore db remove errors since it means we didn't have an old one. dbname := fmt.Sprintf("tstdbuspnt1") dbnamever := dbname + ".ver" _ = os.RemoveAll(dbname) _ = os.RemoveAll(dbnamever) db, err := database.CreateDB("leveldb", dbname) if err != nil { t.Errorf("Failed to open test database %v", err) return } defer os.RemoveAll(dbname) defer os.RemoveAll(dbnamever) defer func() { if err := db.Close(); err != nil { t.Errorf("Close: unexpected error: %v", err) } }() blocks := loadblocks(t) endtest: for height := int64(0); height < int64(len(blocks))-1; height++ { block := blocks[height] // jam in genesis block if height == 0 { _, err := db.InsertBlock(block) if err != nil { t.Errorf("failed to insert block %v err %v", height, err) break endtest } continue } var txneededList []*chainhash.Hash var txlookupList []*chainhash.Hash var txOutList []*chainhash.Hash var txInList []*wire.OutPoint parent := blocks[height-1] unspentStakeOps := unspendStakeTxTree(block) // Check regular tree of parent and make sure it's ok for txIdx, tx := range parent.MsgBlock().Transactions { for txinIdx, txin := range tx.TxIn { if txin.PreviousOutPoint.Index == uint32(4294967295) { continue } origintxsha := &txin.PreviousOutPoint.Hash exists, err := db.ExistsTxSha(origintxsha) if err != nil { t.Errorf("ExistsTxSha: unexpected error %v ", err) } if !exists { // Check and see if something in flight spends it from this // tx tree. We can skip looking for this transaction OP // if that's the case. spentFromParentReg := false alreadySpentOps := getRegTreeOpsSpentBeforeThisOp(parent, txIdx, txinIdx) _, alreadySpent := alreadySpentOps[txin.PreviousOutPoint] if !alreadySpent { spentFromParentReg = true } if !spentFromParentReg { t.Errorf("referenced tx not found %v %v", origintxsha, height) } else { continue } } txInList = append(txInList, &txin.PreviousOutPoint) txneededList = append(txneededList, origintxsha) txlookupList = append(txlookupList, origintxsha) } txshaname := tx.TxSha() txlookupList = append(txlookupList, &txshaname) txOutList = append(txOutList, &txshaname) } txneededmap := map[chainhash.Hash]*database.TxListReply{} txlist := db.FetchUnSpentTxByShaList(txneededList) for _, txe := range txlist { if txe.Err != nil { t.Errorf("tx list fetch failed %v err %v", txe.Sha, txe.Err) break endtest } txneededmap[*txe.Sha] = txe } for _, spend := range txInList { itxe := txneededmap[spend.Hash] if itxe.TxSpent[spend.Index] == true { t.Errorf("txin %v:%v is already spent", spend.Hash, spend.Index) } } newheight, err := db.InsertBlock(block) if err != nil { t.Errorf("failed to insert block %v err %v", height, err) break endtest } if newheight != height { t.Errorf("height mismatch expect %v returned %v", height, newheight) break endtest } // only check transactions if current block validates parent block if !dcrutil.IsFlagSet16(block.MsgBlock().Header.VoteBits, dcrutil.BlockValid) { continue } txlookupmap := map[chainhash.Hash]*database.TxListReply{} txlist = db.FetchTxByShaList(txlookupList) for _, txe := range txlist { if txe.Err != nil { t.Errorf("tx list fetch failed %v err %v (height %v)", txe.Sha, txe.Err, height) break endtest } txlookupmap[*txe.Sha] = txe } for _, spend := range txInList { itxe := txlookupmap[spend.Hash] if itxe.TxSpent[spend.Index] == false { t.Errorf("txin %v:%v is unspent %v", spend.Hash, spend.Index, itxe.TxSpent) } } alreadySpentOps := unspendInflightTxTree(parent) for _, txo := range txOutList { itxe := txlookupmap[*txo] for i, spent := range itxe.TxSpent { if spent == true { // If this was spent in flight, skip thisOP := wire.OutPoint{*txo, uint32(i), dcrutil.TxTreeRegular} _, alreadySpent := alreadySpentOps[thisOP] if alreadySpent { continue } // If it was spent in the stake tree it's actually unspent too _, wasSpentInStakeTree := unspentStakeOps[thisOP] if wasSpentInStakeTree { continue } t.Errorf("height: %v freshly inserted tx %v already spent %v", height, txo, i) } } } if len(txInList) == 0 { continue } dropblock := blocks[height-1] err = db.DropAfterBlockBySha(dropblock.Sha()) if err != nil { t.Errorf("failed to drop block %v err %v", height, err) break endtest } txlookupmap = map[chainhash.Hash]*database.TxListReply{} txlist = db.FetchUnSpentTxByShaList(txlookupList) for _, txe := range txlist { if txe.Err != nil { if _, ok := txneededmap[*txe.Sha]; ok { t.Errorf("tx list fetch failed %v err %v ", txe.Sha, txe.Err) break endtest } } txlookupmap[*txe.Sha] = txe } for _, spend := range txInList { itxe := txlookupmap[spend.Hash] if itxe.TxSpent[spend.Index] == true { t.Errorf("txin %v:%v is unspent %v", spend.Hash, spend.Index, itxe.TxSpent) } } newheight, err = db.InsertBlock(block) if err != nil { t.Errorf("failed to insert block %v err %v", height, err) break endtest } txlookupmap = map[chainhash.Hash]*database.TxListReply{} txlist = db.FetchTxByShaList(txlookupList) for _, txe := range txlist { if txe.Err != nil { t.Errorf("tx list fetch failed %v err %v ", txe.Sha, txe.Err) break endtest } txlookupmap[*txe.Sha] = txe } for _, spend := range txInList { itxe := txlookupmap[spend.Hash] if itxe.TxSpent[spend.Index] == false { t.Errorf("txin %v:%v is unspent %v", spend.Hash, spend.Index, itxe.TxSpent) } } } }
func testFetchTxByShaListCommon(tc *testContext, includeSpent bool) bool { var blockPrev *dcrutil.Block = nil if tc.block.Height() != 0 { var errBlockPrev error blockPrev, errBlockPrev = tc.db.FetchBlockBySha(&tc.block.MsgBlock().Header.PrevBlock) if errBlockPrev != nil { blockSha := tc.block.Sha() tc.t.Errorf("Failed to fetch parent block of block %v", blockSha) } } unspentFromTxTreeStake := unspendStakeTxTree(tc.block) votebits := tc.block.MsgBlock().Header.VoteBits if dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) && blockPrev != nil { fetchFunc := tc.db.FetchUnSpentTxByShaList funcName := "FetchUnSpentTxByShaList" if includeSpent { fetchFunc = tc.db.FetchTxByShaList funcName = "FetchTxByShaList" } transactions := blockPrev.Transactions() txHashes := make([]*chainhash.Hash, len(transactions)) for i, tx := range transactions { txHashes[i] = tx.Sha() } txReplyList := fetchFunc(txHashes) if len(txReplyList) != len(txHashes) { tc.t.Errorf("%s (%s): block #%d (%s) tx reply list does not "+ " match expected length - got: %v, want: %v", funcName, tc.dbType, tc.blockHeight, tc.blockHash, len(txReplyList), len(txHashes)) return false } for i, tx := range transactions { txHash := tx.Sha() txD := txReplyList[i] // The transaction hash in the reply must be the expected value. if !txD.Sha.IsEqual(txHash) { tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s) "+ "hash does not match expected value - got %v", funcName, tc.dbType, tc.blockHeight, tc.blockHash, i, txHash, txD.Sha) return false } // The reply must not indicate any errors. if txD.Err != nil { tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s) "+ "returned unexpected error - got %v, want nil", funcName, tc.dbType, tc.blockHeight, tc.blockHash, i, txHash, txD.Err) return false } // The transaction in the reply fetched from the database must // be the same MsgTx that was stored. if !reflect.DeepEqual(tx.MsgTx(), txD.Tx) { tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s) does "+ "not match stored tx\ngot: %v\nwant: %v", funcName, tc.dbType, tc.blockHeight, tc.blockHash, i, txHash, spew.Sdump(txD.Tx), spew.Sdump(tx.MsgTx())) return false } // The block hash in the reply from the database must be the // expected value. if txD.BlkSha == nil { tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s) "+ "returned nil block hash", funcName, tc.dbType, tc.blockHeight, tc.blockHash, i, txHash) return false } if !txD.BlkSha.IsEqual(&tc.block.MsgBlock().Header.PrevBlock) { tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s)"+ "returned unexpected block hash - got %v", funcName, tc.dbType, tc.blockHeight, tc.blockHash, i, txHash, txD.BlkSha) return false } // The block height in the reply from the database must be the // expected value. if txD.Height != tc.blockHeight-1 { tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s) "+ "returned unexpected block height - got %v", funcName, tc.dbType, tc.blockHeight, tc.blockHash, i, txHash, txD.Height) return false } // The spend data in the reply from the database must not // indicate any of the transactions that were just inserted are // spent. if txD.TxSpent == nil { tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s) "+ "returned nil spend data", funcName, tc.dbType, tc.blockHeight, tc.blockHash, i, txHash) return false } spentBuf := expectedSpentBuf(tc, i) if !reflect.DeepEqual(txD.TxSpent, spentBuf) { stakeInChecksDontPass := false for txoIdx, _ := range spentBuf { if txD.TxSpent[txoIdx] != spentBuf[txoIdx] { op := wire.OutPoint{ *txHash, uint32(txoIdx), dcrutil.TxTreeRegular, } if _, unspent := unspentFromTxTreeStake[op]; !unspent { stakeInChecksDontPass = true } } } if stakeInChecksDontPass { tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s) "+ "returned unexpected spend data - got %v, "+ "want %v", funcName, tc.dbType, tc.blockHeight, tc.blockHash, i, txHash, txD.TxSpent, spentBuf) return false } } } } return true }
func testOperationalMode(t *testing.T) { // simplified basic operation is: // 1) fetch block from remote server // 2) look up all txin (except coinbase in db) // 3) insert block // 4) exercise the optional addridex testDb, err := setUpTestDb(t, "tstdbopmode") if err != nil { t.Errorf("Failed to open test database %v", err) return } defer testDb.cleanUpFunc() err = nil out: for height := int64(0); height < int64(len(testDb.blocks)); height++ { block := testDb.blocks[height] if height != 0 { // except for NoVerify which does not allow lookups check inputs mblock := block.MsgBlock() //t.Errorf("%v", blockchain.DebugBlockString(block)) parentBlock := testDb.blocks[height-1] mParentBlock := parentBlock.MsgBlock() var txneededList []*chainhash.Hash opSpentInBlock := make(map[wire.OutPoint]struct{}) if dcrutil.IsFlagSet16(dcrutil.BlockValid, mParentBlock.Header.VoteBits) { for _, tx := range mParentBlock.Transactions { for _, txin := range tx.TxIn { if txin.PreviousOutPoint.Index == uint32(4294967295) { continue } if existsInOwnBlockRegTree(mParentBlock, txin.PreviousOutPoint.Hash) { _, used := opSpentInBlock[txin.PreviousOutPoint] if !used { // Origin tx is in the block and so hasn't been // added yet, continue opSpentInBlock[txin.PreviousOutPoint] = struct{}{} continue } else { t.Errorf("output ref %v attempted double spend of previously spend output", txin.PreviousOutPoint) } } origintxsha := &txin.PreviousOutPoint.Hash txneededList = append(txneededList, origintxsha) exists, err := testDb.db.ExistsTxSha(origintxsha) if err != nil { t.Errorf("ExistsTxSha: unexpected error %v ", err) } if !exists { t.Errorf("referenced tx not found %v (height %v)", origintxsha, height) } _, err = testDb.db.FetchTxBySha(origintxsha) if err != nil { t.Errorf("referenced tx not found %v err %v ", origintxsha, err) } } } } for _, stx := range mblock.STransactions { for _, txin := range stx.TxIn { if txin.PreviousOutPoint.Index == uint32(4294967295) { continue } if existsInOwnBlockRegTree(mParentBlock, txin.PreviousOutPoint.Hash) { _, used := opSpentInBlock[txin.PreviousOutPoint] if !used { // Origin tx is in the block and so hasn't been // added yet, continue opSpentInBlock[txin.PreviousOutPoint] = struct{}{} continue } else { t.Errorf("output ref %v attempted double spend of previously spend output", txin.PreviousOutPoint) } } origintxsha := &txin.PreviousOutPoint.Hash txneededList = append(txneededList, origintxsha) exists, err := testDb.db.ExistsTxSha(origintxsha) if err != nil { t.Errorf("ExistsTxSha: unexpected error %v ", err) } if !exists { t.Errorf("referenced tx not found %v", origintxsha) } _, err = testDb.db.FetchTxBySha(origintxsha) if err != nil { t.Errorf("referenced tx not found %v err %v ", origintxsha, err) } } } txlist := testDb.db.FetchUnSpentTxByShaList(txneededList) for _, txe := range txlist { if txe.Err != nil { t.Errorf("tx list fetch failed %v err %v ", txe.Sha, txe.Err) break out } } } newheight, err := testDb.db.InsertBlock(block) if err != nil { t.Errorf("failed to insert block %v err %v", height, err) break out } if newheight != height { t.Errorf("height mismatch expect %v returned %v", height, newheight) break out } newSha, blkid, err := testDb.db.NewestSha() if err != nil { t.Errorf("failed to obtain latest sha %v %v", height, err) } if blkid != height { t.Errorf("height does not match latest block height %v %v %v", blkid, height, err) } blkSha := block.Sha() if *newSha != *blkSha { t.Errorf("Newest block sha does not match freshly inserted one %v %v %v ", newSha, blkSha, err) } } // now that the db is populated, do some additional tests testFetchHeightRange(t, testDb.db, testDb.blocks) // Ensure all operations dealing with the optional address index behave // correctly. newSha, blkid, err := testDb.db.NewestSha() testAddrIndexOperations(t, testDb.db, testDb.blocks[len(testDb.blocks)-1], newSha, blkid) }
// InsertBlock inserts raw block and transaction data from a block into the // database. The first block inserted into the database will be treated as the // genesis block. Every subsequent block insert requires the referenced parent // block to already exist. This is part of the database.Db interface // implementation. func (db *MemDb) InsertBlock(block *dcrutil.Block) (int64, error) { db.Lock() defer db.Unlock() if db.closed { return 0, ErrDbClosed } // Reject the insert if the previously reference block does not exist // except in the case there are no blocks inserted yet where the first // inserted block is assumed to be a genesis block. msgBlock := block.MsgBlock() if _, exists := db.blocksBySha[msgBlock.Header.PrevBlock]; !exists { if len(db.blocks) > 0 { return 0, database.ErrPrevShaMissing } } var blockPrev *dcrutil.Block = nil // Decred: WARNING. This function assumes that all block insertion calls have // dcrutil.blocks passed to them with block.blockHeight set correctly. However, // loading the genesis block in dcrd didn't do this (via block manager); pre- // production it should be established that all calls to this function pass // blocks with block.blockHeight set correctly. if len(db.blocks) > 0 { var errBlockPrev error blockPrev, errBlockPrev = db.fetchBlockBySha(&msgBlock.Header.PrevBlock) if errBlockPrev != nil { blockSha := block.Sha() log.Warnf("Failed to fetch parent block of block %v", blockSha) return 0, errBlockPrev } } // Build a map of in-flight transactions because some of the inputs in // this block could be referencing other transactions earlier in this // block which are not yet in the chain. newHeight := int64(len(db.blocks)) txInFlight := map[chainhash.Hash]int{} // Loop through all transactions and inputs to ensure there are no error // conditions that would prevent them from be inserted into the db. // Although these checks could could be done in the loop below, checking // for error conditions up front means the code below doesn't have to // deal with rollback on errors. votebits := block.MsgBlock().Header.VoteBits if dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) && blockPrev != nil { transactions := blockPrev.Transactions() for i, tx := range transactions { txInFlight[*tx.Sha()] = i } for i, tx := range transactions { for _, txIn := range tx.MsgTx().TxIn { if isCoinbaseInput(txIn) { continue } // It is acceptable for a transaction input to reference // the output of another transaction in this block only // if the referenced transaction comes before the // current one in this block. prevOut := &txIn.PreviousOutPoint if inFlightIndex, ok := txInFlight[prevOut.Hash]; ok { if i <= inFlightIndex { log.Warnf("InsertBlock: requested hash "+ " of %s does not exist in-flight", tx.Sha()) return 0, database.ErrTxShaMissing } } else { originTxns, exists := db.txns[prevOut.Hash] if !exists { log.Warnf("InsertBlock: requested hash "+ "of %s by %s does not exist", prevOut.Hash, tx.Sha()) return 0, database.ErrTxShaMissing } originTxD := originTxns[len(originTxns)-1] if prevOut.Index > uint32(len(originTxD.spentBuf)) { log.Warnf("InsertBlock: requested hash "+ "of %s with index %d does not "+ "exist", tx.Sha(), prevOut.Index) return 0, database.ErrTxShaMissing } } } // Prevent duplicate transactions in the same block. if inFlightIndex, exists := txInFlight[*tx.Sha()]; exists && inFlightIndex < i { log.Warnf("Block contains duplicate transaction %s", tx.Sha()) return 0, database.ErrDuplicateSha } // Prevent duplicate transactions unless the old one is fully // spent. if txns, exists := db.txns[*tx.Sha()]; exists { txD := txns[len(txns)-1] if !isFullySpent(txD) { log.Warnf("Attempt to insert duplicate "+ "transaction %s", tx.Sha()) return 0, database.ErrDuplicateSha } } } } db.blocks = append(db.blocks, msgBlock) db.blocksBySha[msgBlock.Header.BlockSha()] = newHeight if dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) && blockPrev != nil { // Insert information about eacj transaction and spend all of the // outputs referenced by the inputs to the transactions. for i, tx := range blockPrev.Transactions() { // Insert the transaction data. txD := tTxInsertData{ tree: dcrutil.TxTreeRegular, blockHeight: newHeight - 1, offset: i, spentBuf: make([]bool, len(tx.MsgTx().TxOut)), } db.txns[*tx.Sha()] = append(db.txns[*tx.Sha()], &txD) // Spend all of the inputs. for _, txIn := range tx.MsgTx().TxIn { // Coinbase transaction has no inputs. if isCoinbaseInput(txIn) { continue } // Already checked for existing and valid ranges above. prevOut := &txIn.PreviousOutPoint originTxns := db.txns[prevOut.Hash] originTxD := originTxns[len(originTxns)-1] originTxD.spentBuf[prevOut.Index] = true } } } for i, tx := range block.STransactions() { // Insert the transaction data. txD := tTxInsertData{ tree: dcrutil.TxTreeStake, blockHeight: newHeight, offset: i, spentBuf: make([]bool, len(tx.MsgTx().TxOut)), } db.txns[*tx.Sha()] = append(db.txns[*tx.Sha()], &txD) // Spend all of the inputs. for _, txIn := range tx.MsgTx().TxIn { // Coinbase transaction has no inputs. if isCoinbaseInput(txIn) { continue } // Already checked for existing and valid ranges above. prevOut := &txIn.PreviousOutPoint originTxns := db.txns[prevOut.Hash] originTxD := originTxns[len(originTxns)-1] originTxD.spentBuf[prevOut.Index] = true } } return newHeight, nil }
// ConnectBlock is invoked by the index manager when a new block has been // connected to the main chain. This indexer adds a key for each address // the transactions in the block involve. // // This is part of the Indexer interface. func (idx *ExistsAddrIndex) ConnectBlock(dbTx database.Tx, block, parent *dcrutil.Block, view *blockchain.UtxoViewpoint) error { regularTxTreeValid := dcrutil.IsFlagSet16(block.MsgBlock().Header.VoteBits, dcrutil.BlockValid) var parentTxs []*dcrutil.Tx if regularTxTreeValid && block.Height() > 1 { parentTxs = parent.Transactions() } blockTxns := block.STransactions() allTxns := append(parentTxs, blockTxns...) usedAddrs := make(map[[addrKeySize]byte]struct{}) for _, tx := range allTxns { msgTx := tx.MsgTx() isSStx, _ := stake.IsSStx(msgTx) for _, txIn := range msgTx.TxIn { if txscript.IsMultisigSigScript(txIn.SignatureScript) { rs, err := txscript.MultisigRedeemScriptFromScriptSig( txIn.SignatureScript) if err != nil { continue } class, addrs, _, err := txscript.ExtractPkScriptAddrs( txscript.DefaultScriptVersion, rs, idx.chainParams) if err != nil { // Non-standard outputs are skipped. continue } if class != txscript.MultiSigTy { // This should never happen, but be paranoid. continue } for _, addr := range addrs { k, err := addrToKey(addr, idx.chainParams) if err != nil { continue } usedAddrs[k] = struct{}{} } } } for _, txOut := range tx.MsgTx().TxOut { class, addrs, _, err := txscript.ExtractPkScriptAddrs( txOut.Version, txOut.PkScript, idx.chainParams) if err != nil { // Non-standard outputs are skipped. continue } if isSStx && class == txscript.NullDataTy { addr, err := stake.AddrFromSStxPkScrCommitment(txOut.PkScript, idx.chainParams) if err != nil { // Ignore unsupported address types. continue } addrs = append(addrs, addr) } for _, addr := range addrs { k, err := addrToKey(addr, idx.chainParams) if err != nil { // Ignore unsupported address types. continue } usedAddrs[k] = struct{}{} } } } // Write all the newly used addresses to the database, // skipping any keys that already exist. Write any // addresses we see in mempool at this time, too, // then remove them from the unconfirmed map drop // dropping the old map and reassigning a new map. idx.unconfirmedLock.Lock() for k := range idx.mpExistsAddr { usedAddrs[k] = struct{}{} } idx.mpExistsAddr = make(map[[addrKeySize]byte]struct{}) idx.unconfirmedLock.Unlock() meta := dbTx.Metadata() existsAddrIndex := meta.Bucket(existsAddrIndexKey) newUsedAddrs := make(map[[addrKeySize]byte]struct{}) for k := range usedAddrs { if !idx.existsAddress(existsAddrIndex, k) { newUsedAddrs[k] = struct{}{} } } for k := range newUsedAddrs { err := dbPutExistsAddr(existsAddrIndex, k) if err != nil { return err } } return nil }