// Receive waits for the response promised by the future and returns the raw // block requested from the server given its hash. func (r FutureGetBlockResult) Receive() (*btcutil.Block, error) { res, err := receiveFuture(r) if err != nil { return nil, err } // Unmarshal result as a string. var blockHex string err = json.Unmarshal(res, &blockHex) if err != nil { return nil, err } // Decode the serialized block hex to raw bytes. serializedBlock, err := hex.DecodeString(blockHex) if err != nil { return nil, err } // Deserialize the block and return it. var msgBlock btcwire.MsgBlock msgBlock.Deserialize(bytes.NewReader(serializedBlock)) if err != nil { return nil, err } return btcutil.NewBlock(&msgBlock), nil }
// Receive waits for the response promised by the future and returns the raw // block requested from the server given its hash. func (r FutureGetBlockResult) Receive() (*btcutil.Block, error) { reply, err := receiveFuture(r) if err != nil { return nil, err } // Ensure the returned data is the expected type. blockHex, ok := reply.(string) if !ok { return nil, fmt.Errorf("unexpected response type for "+ "getblock (verbose=0): %T\n", reply) } // Decode the serialized block hex to raw bytes. serializedBlock, err := hex.DecodeString(blockHex) if err != nil { return nil, err } // Deserialize the block and return it. var msgBlock btcwire.MsgBlock msgBlock.Deserialize(bytes.NewReader(serializedBlock)) if err != nil { return nil, err } return btcutil.NewBlock(&msgBlock), nil }
// loadBlockDB opens the block database and returns a handle to it. func loadBlockDB() (btcdb.Db, error) { db, err := setupBlockDB() if err != nil { return nil, err } // Get the latest block height from the database. _, height, err := db.NewestSha() if err != nil { db.Close() return nil, err } // Insert the appropriate genesis block for the bitcoin network being // connected to if needed. if height == -1 { genesis := btcutil.NewBlock(activeNetParams.GenesisBlock) _, err := db.InsertBlock(genesis) if err != nil { db.Close() return nil, err } btcdLog.Infof("Inserted genesis block %v", activeNetParams.GenesisHash) height = 0 } btcdLog.Infof("Block database loaded with block height %d", height) return db, nil }
// This example demonstrates creating a new database and inserting the genesis // block into it. func ExampleCreateDB() { // Notice in these example imports that the memdb driver is loaded. // Ordinarily this would be whatever driver(s) your application // requires. // import ( // "github.com/conformal/btcdb" // _ "github.com/conformal/btcdb/memdb" // ) // Create a database and schedule it to be closed on exit. This example // uses a memory-only database to avoid needing to write anything to // the disk. Typically, you would specify a persistent database driver // such as "leveldb" and give it a database name as the second // parameter. db, err := btcdb.CreateDB("memdb") if err != nil { fmt.Println(err) return } defer db.Close() // Insert the main network genesis block. genesis := btcutil.NewBlock(btcnet.MainNetParams.GenesisBlock) newHeight, err := db.InsertBlock(genesis) if err != nil { fmt.Println(err) return } fmt.Println("New height:", newHeight) // Output: // New height: 0 }
// generateBlocks is a worker that is controlled by the miningWorkerController. // It is self contained in that it creates block templates and attempts to solve // them while detecting when it is performing stale work and reacting // accordingly by generating a new block template. When a block is solved, it // is submitted. // // It must be run as a goroutine. func (m *CPUMiner) generateBlocks(quit chan struct{}) { minrLog.Tracef("Starting generate blocks worker") // Start a ticker which is used to signal checks for stale work and // updates to the speed monitor. ticker := time.NewTicker(time.Second * hashUpdateSecs) out: for { // Quit when the miner is stopped. select { case <-quit: break out default: // Non-blocking select to fall through } // No point in searching for a solution before the chain is // synced. Also, grab the same lock as used for block // submission, since the current block will be changing and // this would otherwise end up building a new block template on // a block that is in the process of becoming stale. m.submitBlockLock.Lock() _, curHeight := m.server.blockManager.chainState.Best() if curHeight != 0 && !m.server.blockManager.IsCurrent() { m.submitBlockLock.Unlock() time.Sleep(time.Second) continue } // Choose a payment address at random. rand.Seed(time.Now().UnixNano()) payToAddr := cfg.miningAddrs[rand.Intn(len(cfg.miningAddrs))] // Create a new block template using the available transactions // in the memory pool as a source of transactions to potentially // include in the block. template, err := NewBlockTemplate(payToAddr, m.server.txMemPool) m.submitBlockLock.Unlock() if err != nil { errStr := fmt.Sprintf("Failed to create new block "+ "template: %v", err) minrLog.Errorf(errStr) continue } // Attempt to solve the block. The function will exit early // with false when conditions that trigger a stale block, so // a new block template can be generated. When the return is // true a solution was found, so submit the solved block. if m.solveBlock(template.block, curHeight+1, ticker, quit) { block := btcutil.NewBlock(template.block) m.submitBlock(block) } } ticker.Stop() m.workerWg.Done() minrLog.Tracef("Generate blocks worker done") }
func TestCheckBlockSanity(t *testing.T) { powLimit := btcchain.ChainParams(btcwire.MainNet).PowLimit block := btcutil.NewBlock(&Block100000) err := btcchain.CheckBlockSanity(block, powLimit) if err != nil { t.Errorf("CheckBlockSanity: %v", err) } }
// loadBlockDB opens the block database and returns a handle to it. func loadBlockDB() (btcdb.Db, error) { // The database name is based on the database type. dbName := blockDbNamePrefix + "_" + cfg.DbType if cfg.DbType == "sqlite" { dbName = dbName + ".db" } dbPath := filepath.Join(cfg.DataDir, dbName) // The regression test is special in that it needs a clean database for // each run, so remove it now if it already exists. removeRegressionDB(dbPath) log.Infof("[BMGR] Loading block database from '%s'", dbPath) db, err := btcdb.OpenDB(cfg.DbType, dbPath) if err != nil { // Return the error if it's not because the database doesn't // exist. if err != btcdb.DbDoesNotExist { return nil, err } // Create the db if it does not exist. err = os.MkdirAll(cfg.DataDir, 0700) if err != nil { return nil, err } db, err = btcdb.CreateDB(cfg.DbType, dbPath) if err != nil { return nil, err } } // Get the latest block height from the database. _, height, err := db.NewestSha() if err != nil { db.Close() return nil, err } // Insert the appropriate genesis block for the bitcoin network being // connected to if needed. if height == -1 { genesis := btcutil.NewBlock(activeNetParams.genesisBlock) _, err := db.InsertBlock(genesis) if err != nil { db.Close() return nil, err } log.Infof("[BMGR] Inserted genesis block %v", activeNetParams.genesisHash) height = 0 } log.Infof("[BMGR] Block database loaded with block height %d", height) return db, nil }
// TestMerkle tests the BuildMerkleTreeStore API. func TestMerkle(t *testing.T) { block := btcutil.NewBlock(&Block100000) merkles := btcchain.BuildMerkleTreeStore(block) calculatedMerkleRoot := merkles[len(merkles)-1] wantMerkle := &Block100000.Header.MerkleRoot if !wantMerkle.IsEqual(calculatedMerkleRoot) { t.Errorf("BuildMerkleTreeStore: merkle root mismatch - "+ "got %v, want %v", calculatedMerkleRoot, wantMerkle) } }
// exampleLoadDB is used in the example to elide the setup code. func exampleLoadDB() (btcdb.Db, error) { db, err := btcdb.CreateDB("memdb") if err != nil { return nil, err } // Insert the main network genesis block. genesis := btcutil.NewBlock(btcnet.MainNetParams.GenesisBlock) _, err = db.InsertBlock(genesis) if err != nil { return nil, err } return db, err }
// FetchBlockBySha returns a btcutil.Block. The implementation may cache the // underlying data if desired. This is part of the btcdb.Db interface // implementation. // // This implementation does not use any additional cache since the entire // database is already in memory. func (db *MemDb) FetchBlockBySha(sha *btcwire.ShaHash) (*btcutil.Block, error) { db.Lock() defer db.Unlock() if db.closed { return nil, ErrDbClosed } if blockHeight, exists := db.blocksBySha[*sha]; exists { block := btcutil.NewBlock(db.blocks[int(blockHeight)]) block.SetHeight(blockHeight) return block, nil } return nil, fmt.Errorf("block %v is not in database", sha) }
func TestCheckBlockSanity(t *testing.T) { powLimit := btcnet.MainNetParams.PowLimit block := btcutil.NewBlock(&Block100000) err := btcchain.CheckBlockSanity(block, powLimit) if err != nil { t.Errorf("CheckBlockSanity: %v", err) } // Ensure a block that has a timestamp with a precision higher than one // second fails. timestamp := block.MsgBlock().Header.Timestamp block.MsgBlock().Header.Timestamp = timestamp.Add(time.Nanosecond) err = btcchain.CheckBlockSanity(block, powLimit) if err == nil { t.Errorf("CheckBlockSanity: error is nil when it shouldn't be") } }
// setupDB is used to create a new db instance with the genesis block already // inserted. In addition to the new db instance, it returns a teardown function // the caller should invoke when done testing to clean up. func setupDB(dbType, dbName string) (btcdb.Db, func(), error) { db, teardown, err := createDB(dbType, dbName, true) if err != nil { return nil, nil, err } // Insert the main network genesis block. This is part of the initial // database setup. genesisBlock := btcutil.NewBlock(&btcwire.GenesisBlock) _, err = db.InsertBlock(genesisBlock) if err != nil { teardown() err := fmt.Errorf("failed to insert genesis block: %v", err) return nil, nil, err } return db, teardown, nil }
// UpdateExtraNonce updates the extra nonce in the coinbase script of the passed // block by regenerating the coinbase script with the passed value and block // height. It also recalculates and updates the new merkle root that results // from changing the coinbase script. func UpdateExtraNonce(msgBlock *btcwire.MsgBlock, blockHeight int64, extraNonce uint64) error { coinbaseScript := standardCoinbaseScript(blockHeight, extraNonce) if len(coinbaseScript) > btcchain.MaxCoinbaseScriptLen { return fmt.Errorf("coinbase transaction script length "+ "of %d is out of range (min: %d, max: %d)", len(coinbaseScript), btcchain.MinCoinbaseScriptLen, btcchain.MaxCoinbaseScriptLen) } msgBlock.Transactions[0].TxIn[0].SignatureScript = coinbaseScript // TODO(davec): A btcutil.Block should use saved in the state to avoid // recalculating all of the other transaction hashes. // block.Transactions[0].InvalidateCache() // Recalculate the merkle root with the updated extra nonce. block := btcutil.NewBlock(msgBlock) merkles := btcchain.BuildMerkleTreeStore(block.Transactions()) msgBlock.Header.MerkleRoot = *merkles[len(merkles)-1] return nil }
// This example demonstrates how to create a new chain instance and use // ProcessBlock to attempt to attempt add a block to the chain. As the package // overview documentation describes, this includes all of the Bitcoin consensus // rules. This example intentionally attempts to insert a duplicate genesis // block to illustrate how an invalid block is handled. func ExampleBlockChain_ProcessBlock() { // Create a new database to store the accepted blocks into. Typically // this would be opening an existing database and would not use memdb // which is a memory-only database backend, but we create a new db // here so this is a complete working example. db, err := btcdb.CreateDB("memdb") if err != nil { fmt.Printf("Failed to create database: %v\n", err) return } defer db.Close() // Insert the main network genesis block. This is part of the initial // database setup. Like above, this typically would not be needed when // opening an existing database. genesisBlock := btcutil.NewBlock(btcnet.MainNetParams.GenesisBlock) _, err = db.InsertBlock(genesisBlock) if err != nil { fmt.Printf("Failed to insert genesis block: %v\n", err) return } // Create a new BlockChain instance using the underlying database for // the main bitcoin network and ignore notifications. chain := btcchain.New(db, &btcnet.MainNetParams, nil) // Process a block. For this example, we are going to intentionally // cause an error by trying to process the genesis block which already // exists. isOrphan, err := chain.ProcessBlock(genesisBlock, btcchain.BFNone) if err != nil { fmt.Printf("Failed to process block: %v\n", err) return } fmt.Printf("Block accepted. Is it an orphan?: %v", isOrphan) // Output: // Failed to process block: already have block 000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f }
// TestCheckConnectBlock tests the CheckConnectBlock function to ensure it // fails func TestCheckConnectBlock(t *testing.T) { // Create a new database and chain instance to run tests against. chain, teardownFunc, err := chainSetup("checkconnectblock") if err != nil { t.Errorf("Failed to setup chain instance: %v", err) return } defer teardownFunc() err = chain.GenerateInitialIndex() if err != nil { t.Errorf("GenerateInitialIndex: %v", err) } // The genesis block should fail to connect since it's already // inserted. genesisBlock := btcnet.MainNetParams.GenesisBlock err = chain.CheckConnectBlock(btcutil.NewBlock(genesisBlock)) if err == nil { t.Errorf("CheckConnectBlock: Did not received expected error") } }
// TestClosed ensure calling the interface functions on a closed database // returns appropriate errors for the interface functions that return errors // and does not panic or otherwise misbehave for functions which do not return // errors. func TestClosed(t *testing.T) { db, err := btcdb.CreateDB("memdb") if err != nil { t.Errorf("Failed to open test database %v", err) return } _, err = db.InsertBlock(btcutil.NewBlock(&btcwire.GenesisBlock)) if err != nil { t.Errorf("InsertBlock: %v", err) } db.Close() genesisHash := &btcwire.GenesisHash if err := db.DropAfterBlockBySha(genesisHash); err != memdb.ErrDbClosed { t.Errorf("DropAfterBlockBySha: unexpected error %v", err) } if exists := db.ExistsSha(genesisHash); exists != false { t.Errorf("ExistsSha: genesis hash exists after close") } if _, err := db.FetchBlockBySha(genesisHash); err != memdb.ErrDbClosed { t.Errorf("FetchBlockBySha: unexpected error %v", err) } if _, err := db.FetchBlockShaByHeight(0); err != memdb.ErrDbClosed { t.Errorf("FetchBlockShaByHeight: unexpected error %v", err) } if _, err := db.FetchHeightRange(0, 1); err != memdb.ErrDbClosed { t.Errorf("FetchHeightRange: unexpected error %v", err) } genesisMerkleRoot := &btcwire.GenesisMerkleRoot if exists := db.ExistsTxSha(genesisMerkleRoot); exists != false { t.Errorf("ExistsTxSha: hash %v exists when it shouldn't", genesisMerkleRoot) } if _, err := db.FetchTxBySha(genesisHash); err != memdb.ErrDbClosed { t.Errorf("FetchTxBySha: unexpected error %v", err) } requestHashes := []*btcwire.ShaHash{genesisHash} reply := db.FetchTxByShaList(requestHashes) if len(reply) != len(requestHashes) { t.Errorf("FetchUnSpentTxByShaList unexpected number of replies "+ "got: %d, want: %d", len(reply), len(requestHashes)) } for i, txLR := range reply { wantReply := &btcdb.TxListReply{ Sha: requestHashes[i], Err: memdb.ErrDbClosed, } if !reflect.DeepEqual(wantReply, txLR) { t.Errorf("FetchTxByShaList unexpected reply\ngot: %v\n"+ "want: %v", txLR, wantReply) } } reply = db.FetchUnSpentTxByShaList(requestHashes) if len(reply) != len(requestHashes) { t.Errorf("FetchUnSpentTxByShaList unexpected number of replies "+ "got: %d, want: %d", len(reply), len(requestHashes)) } for i, txLR := range reply { wantReply := &btcdb.TxListReply{ Sha: requestHashes[i], Err: memdb.ErrDbClosed, } if !reflect.DeepEqual(wantReply, txLR) { t.Errorf("FetchUnSpentTxByShaList unexpected reply\n"+ "got: %v\nwant: %v", txLR, wantReply) } } if _, _, err := db.NewestSha(); err != memdb.ErrDbClosed { t.Errorf("NewestSha: unexpected error %v", err) } // The following calls don't return errors from the interface to be able // to detect a closed database, so just call them to ensure there are no // panics. db.Sync() db.RollbackClose() }
// NewBlockTemplate returns a new block template that is ready to be solved // using the transactions from the passed transaction memory pool and a coinbase // that either pays to the passed address if it is not nil, or a coinbase that // is redeemable by anyone if the passed address is nil. The nil address // functionality is useful since there are cases such as the getblocktemplate // RPC where external mining software is responsible for creating their own // coinbase which will replace the one generated for the block template. Thus // the need to have configured address can be avoided. // // The transactions selected and included are prioritized according to several // factors. First, each transaction has a priority calculated based on its // value, age of inputs, and size. Transactions which consist of larger // amounts, older inputs, and small sizes have the highest priority. Second, a // fee per kilobyte is calculated for each transaction. Transactions with a // higher fee per kilobyte are preferred. Finally, the block generation related // configuration options are all taken into account. // // Transactions which only spend outputs from other transactions already in the // block chain are immediately added to a priority queue which either // prioritizes based on the priority (then fee per kilobyte) or the fee per // kilobyte (then priority) depending on whether or not the BlockPrioritySize // configuration option allots space for high-priority transactions. // Transactions which spend outputs from other transactions in the memory pool // are added to a dependency map so they can be added to the priority queue once // the transactions they depend on have been included. // // Once the high-priority area (if configured) has been filled with transactions, // or the priority falls below what is considered high-priority, the priority // queue is updated to prioritize by fees per kilobyte (then priority). // // When the fees per kilobyte drop below the TxMinFreeFee configuration option, // the transaction will be skipped unless there is a BlockMinSize set, in which // case the block will be filled with the low-fee/free transactions until the // block size reaches that minimum size. // // Any transactions which would cause the block to exceed the BlockMaxSize // configuration option, exceed the maximum allowed signature operations per // block, or otherwise cause the block to be invalid are skipped. // // Given the above, a block generated by this function is of the following form: // // ----------------------------------- -- -- // | Coinbase Transaction | | | // |-----------------------------------| | | // | | | | ----- cfg.BlockPrioritySize // | High-priority Transactions | | | // | | | | // |-----------------------------------| | -- // | | | // | | | // | | |--- cfg.BlockMaxSize // | Transactions prioritized by fee | | // | until <= cfg.TxMinFreeFee | | // | | | // | | | // | | | // |-----------------------------------| | // | Low-fee/Non high-priority (free) | | // | transactions (while block size | | // | <= cfg.BlockMinSize) | | // ----------------------------------- -- func NewBlockTemplate(mempool *txMemPool, payToAddress btcutil.Address) (*BlockTemplate, error) { blockManager := mempool.server.blockManager chainState := &blockManager.chainState chain := blockManager.blockChain // Extend the most recently known best block. chainState.Lock() prevHash := chainState.newestHash nextBlockHeight := chainState.newestHeight + 1 chainState.Unlock() // Create a standard coinbase transaction paying to the provided // address. NOTE: The coinbase value will be updated to include the // fees from the selected transactions later after they have actually // been selected. It is created here to detect any errors early // before potentially doing a lot of work below. The extra nonce helps // ensure the transaction is not a duplicate transaction (paying the // same value to the same public key address would otherwise be an // identical transaction for block version 1). extraNonce := uint64(0) coinbaseScript := standardCoinbaseScript(nextBlockHeight, extraNonce) coinbaseTx, err := createCoinbaseTx(coinbaseScript, nextBlockHeight, payToAddress) if err != nil { return nil, err } numCoinbaseSigOps := int64(btcchain.CountSigOps(coinbaseTx)) // Get the current memory pool transactions and create a priority queue // to hold the transactions which are ready for inclusion into a block // along with some priority related and fee metadata. Reserve the same // number of items that are in the memory pool for the priority queue. // Also, choose the initial sort order for the priority queue based on // whether or not there is an area allocated for high-priority // transactions. mempoolTxns := mempool.TxDescs() sortedByFee := cfg.BlockPrioritySize == 0 priorityQueue := newTxPriorityQueue(len(mempoolTxns), sortedByFee) // Create a slice to hold the transactions to be included in the // generated block with reserved space. Also create a transaction // store to house all of the input transactions so multiple lookups // can be avoided. blockTxns := make([]*btcutil.Tx, 0, len(mempoolTxns)) blockTxns = append(blockTxns, coinbaseTx) blockTxStore := make(btcchain.TxStore) // dependers is used to track transactions which depend on another // transaction in the memory pool. This, in conjunction with the // dependsOn map kept with each dependent transaction helps quickly // determine which dependent transactions are now eligible for inclusion // in the block once each transaction has been included. dependers := make(map[btcwire.ShaHash]*list.List) // Create slices to hold the fees and number of signature operations // for each of the selected transactions and add an entry for the // coinbase. This allows the code below to simply append details about // a transaction as it is selected for inclusion in the final block. // However, since the total fees aren't known yet, use a dummy value for // the coinbase fee which will be updated later. txFees := make([]int64, 0, len(mempoolTxns)) txSigOpCounts := make([]int64, 0, len(mempoolTxns)) txFees = append(txFees, -1) // Updated once known txSigOpCounts = append(txSigOpCounts, numCoinbaseSigOps) minrLog.Debugf("Considering %d mempool transactions for inclusion to "+ "new block", len(mempoolTxns)) mempoolLoop: for _, txDesc := range mempoolTxns { // A block can't have more than one coinbase or contain // non-finalized transactions. tx := txDesc.Tx if btcchain.IsCoinBase(tx) { minrLog.Tracef("Skipping coinbase tx %s", tx.Sha()) continue } if !btcchain.IsFinalizedTransaction(tx, nextBlockHeight, time.Now()) { minrLog.Tracef("Skipping non-finalized tx %s", tx.Sha()) continue } // Fetch all of the transactions referenced by the inputs to // this transaction. NOTE: This intentionally does not fetch // inputs from the mempool since a transaction which depends on // other transactions in the mempool must come after those // dependencies in the final generated block. txStore, err := chain.FetchTransactionStore(tx) if err != nil { minrLog.Warnf("Unable to fetch transaction store for "+ "tx %s: %v", tx.Sha(), err) continue } // Calculate the input value age sum for the transaction. This // is comprised of the sum all of input amounts multiplied by // their respective age (number of confirmations since the // referenced input transaction). While doing the above, also // setup dependencies for any transactions which reference other // transactions in the mempool so they can be properly ordered // below. prioItem := &txPrioItem{tx: txDesc.Tx} inputValueAge := float64(0.0) for _, txIn := range tx.MsgTx().TxIn { originHash := &txIn.PreviousOutPoint.Hash originIndex := txIn.PreviousOutPoint.Index txData, exists := txStore[*originHash] if !exists || txData.Err != nil || txData.Tx == nil { if !mempool.HaveTransaction(originHash) { minrLog.Tracef("Skipping tx %s because "+ "it references tx %s which is "+ "not available", tx.Sha, originHash) continue mempoolLoop } // The transaction is referencing another // transaction in the memory pool, so setup an // ordering dependency. depList, exists := dependers[*originHash] if !exists { depList = list.New() dependers[*originHash] = depList } depList.PushBack(prioItem) if prioItem.dependsOn == nil { prioItem.dependsOn = make( map[btcwire.ShaHash]struct{}) } prioItem.dependsOn[*originHash] = struct{}{} // No need to calculate or sum input value age // for this input since it's zero due to // the input age multiplier of 0. continue } // Ensure the output index in the referenced transaction // is available. msgTx := txData.Tx.MsgTx() if originIndex > uint32(len(msgTx.TxOut)) { minrLog.Tracef("Skipping tx %s because "+ "it references output %d of tx %s "+ "which is out of bounds", tx.Sha, originIndex, originHash) continue mempoolLoop } // Sum the input value times age. originTxOut := txData.Tx.MsgTx().TxOut[originIndex] inputValue := originTxOut.Value inputAge := nextBlockHeight - txData.BlockHeight inputValueAge += float64(inputValue * inputAge) } // Calculate the final transaction priority using the input // value age sum as well as the adjusted transaction size. The // formula is: sum(inputValue * inputAge) / adjustedTxSize txSize := tx.MsgTx().SerializeSize() prioItem.priority = calcPriority(tx, txSize, inputValueAge) // Calculate the fee in Satoshi/KB. // NOTE: This is a more precise value than the one calculated // during calcMinRelayFee which rounds up to the nearest full // kilobyte boundary. This is beneficial since it provides an // incentive to create smaller transactions. prioItem.feePerKB = float64(txDesc.Fee) / (float64(txSize) / 1000) prioItem.fee = txDesc.Fee // Add the transaction to the priority queue to mark it ready // for inclusion in the block unless it has dependencies. if prioItem.dependsOn == nil { heap.Push(priorityQueue, prioItem) } // Merge the store which contains all of the input transactions // for this transaction into the input transaction store. This // allows the code below to avoid a second lookup. mergeTxStore(blockTxStore, txStore) } minrLog.Tracef("Priority queue len %d, dependers len %d", priorityQueue.Len(), len(dependers)) // The starting block size is the size of the block header plus the max // possible transaction count size, plus the size of the coinbase // transaction. blockSize := blockHeaderOverhead + uint32(coinbaseTx.MsgTx().SerializeSize()) blockSigOps := numCoinbaseSigOps totalFees := int64(0) // Choose which transactions make it into the block. for priorityQueue.Len() > 0 { // Grab the highest priority (or highest fee per kilobyte // depending on the sort order) transaction. prioItem := heap.Pop(priorityQueue).(*txPrioItem) tx := prioItem.tx // Grab the list of transactions which depend on this one (if // any) and remove the entry for this transaction as it will // either be included or skipped, but in either case the deps // are no longer needed. deps := dependers[*tx.Sha()] delete(dependers, *tx.Sha()) // Enforce maximum block size. Also check for overflow. txSize := uint32(tx.MsgTx().SerializeSize()) blockPlusTxSize := blockSize + txSize if blockPlusTxSize < blockSize || blockPlusTxSize >= cfg.BlockMaxSize { minrLog.Tracef("Skipping tx %s because it would exceed "+ "the max block size", tx.Sha()) logSkippedDeps(tx, deps) continue } // Enforce maximum signature operations per block. Also check // for overflow. numSigOps := int64(btcchain.CountSigOps(tx)) if blockSigOps+numSigOps < blockSigOps || blockSigOps+numSigOps > btcchain.MaxSigOpsPerBlock { minrLog.Tracef("Skipping tx %s because it would "+ "exceed the maximum sigops per block", tx.Sha()) logSkippedDeps(tx, deps) continue } numP2SHSigOps, err := btcchain.CountP2SHSigOps(tx, false, blockTxStore) if err != nil { minrLog.Tracef("Skipping tx %s due to error in "+ "CountP2SHSigOps: %v", tx.Sha(), err) logSkippedDeps(tx, deps) continue } numSigOps += int64(numP2SHSigOps) if blockSigOps+numSigOps < blockSigOps || blockSigOps+numSigOps > btcchain.MaxSigOpsPerBlock { minrLog.Tracef("Skipping tx %s because it would "+ "exceed the maximum sigops per block (p2sh)", tx.Sha()) logSkippedDeps(tx, deps) continue } // Skip free transactions once the block is larger than the // minimum block size. if sortedByFee && prioItem.feePerKB < minTxRelayFee && blockPlusTxSize >= cfg.BlockMinSize { minrLog.Tracef("Skipping tx %s with feePerKB %.2f "+ "< minTxRelayFee %d and block size %d >= "+ "minBlockSize %d", tx.Sha(), prioItem.feePerKB, minTxRelayFee, blockPlusTxSize, cfg.BlockMinSize) logSkippedDeps(tx, deps) continue } // Prioritize by fee per kilobyte once the block is larger than // the priority size or there are no more high-priority // transactions. if !sortedByFee && (blockPlusTxSize >= cfg.BlockPrioritySize || prioItem.priority <= minHighPriority) { minrLog.Tracef("Switching to sort by fees per "+ "kilobyte blockSize %d >= BlockPrioritySize "+ "%d || priority %.2f <= minHighPriority %.2f", blockPlusTxSize, cfg.BlockPrioritySize, prioItem.priority, minHighPriority) sortedByFee = true priorityQueue.SetLessFunc(txPQByFee) // Put the transaction back into the priority queue and // skip it so it is re-priortized by fees if it won't // fit into the high-priority section or the priority is // too low. Otherwise this transaction will be the // final one in the high-priority section, so just fall // though to the code below so it is added now. if blockPlusTxSize > cfg.BlockPrioritySize || prioItem.priority < minHighPriority { heap.Push(priorityQueue, prioItem) continue } } // Ensure the transaction inputs pass all of the necessary // preconditions before allowing it to be added to the block. _, err = btcchain.CheckTransactionInputs(tx, nextBlockHeight, blockTxStore) if err != nil { minrLog.Tracef("Skipping tx %s due to error in "+ "CheckTransactionInputs: %v", tx.Sha(), err) logSkippedDeps(tx, deps) continue } err = btcchain.ValidateTransactionScripts(tx, blockTxStore, standardScriptVerifyFlags) if err != nil { minrLog.Tracef("Skipping tx %s due to error in "+ "ValidateTransactionScripts: %v", tx.Sha(), err) logSkippedDeps(tx, deps) continue } // Spend the transaction inputs in the block transaction store // and add an entry for it to ensure any transactions which // reference this one have it available as an input and can // ensure they aren't double spending. spendTransaction(blockTxStore, tx, nextBlockHeight) // Add the transaction to the block, increment counters, and // save the fees and signature operation counts to the block // template. blockTxns = append(blockTxns, tx) blockSize += txSize blockSigOps += numSigOps totalFees += prioItem.fee txFees = append(txFees, prioItem.fee) txSigOpCounts = append(txSigOpCounts, numSigOps) minrLog.Tracef("Adding tx %s (priority %.2f, feePerKB %.2f)", prioItem.tx.Sha(), prioItem.priority, prioItem.feePerKB) // Add transactions which depend on this one (and also do not // have any other unsatisified dependencies) to the priority // queue. if deps != nil { for e := deps.Front(); e != nil; e = e.Next() { // Add the transaction to the priority queue if // there are no more dependencies after this // one. item := e.Value.(*txPrioItem) delete(item.dependsOn, *tx.Sha()) if len(item.dependsOn) == 0 { heap.Push(priorityQueue, item) } } } } // Now that the actual transactions have been selected, update the // block size for the real transaction count and coinbase value with // the total fees accordingly. blockSize -= btcwire.MaxVarIntPayload - uint32(btcwire.VarIntSerializeSize(uint64(len(blockTxns)))) coinbaseTx.MsgTx().TxOut[0].Value += totalFees txFees[0] = -totalFees // Calculate the required difficulty for the block. The timestamp // is potentially adjusted to ensure it comes after the median time of // the last several blocks per the chain consensus rules. ts, err := medianAdjustedTime(chainState) if err != nil { return nil, err } requiredDifficulty, err := blockManager.CalcNextRequiredDifficulty(ts) if err != nil { return nil, err } // Create a new block ready to be solved. merkles := btcchain.BuildMerkleTreeStore(blockTxns) var msgBlock btcwire.MsgBlock msgBlock.Header = btcwire.BlockHeader{ Version: generatedBlockVersion, PrevBlock: *prevHash, MerkleRoot: *merkles[len(merkles)-1], Timestamp: ts, Bits: requiredDifficulty, } for _, tx := range blockTxns { if err := msgBlock.AddTransaction(tx.MsgTx()); err != nil { return nil, err } } // Finally, perform a full check on the created block against the chain // consensus rules to ensure it properly connects to the current best // chain with no issues. block := btcutil.NewBlock(&msgBlock) block.SetHeight(nextBlockHeight) if err := blockManager.CheckConnectBlock(block); err != nil { return nil, err } minrLog.Debugf("Created new block template (%d transactions, %d in "+ "fees, %d signature operations, %d bytes, target difficulty "+ "%064x)", len(msgBlock.Transactions), totalFees, blockSigOps, blockSize, btcchain.CompactToBig(msgBlock.Header.Bits)) return &BlockTemplate{ block: &msgBlock, fees: txFees, sigOpCounts: txSigOpCounts, height: nextBlockHeight, validPayAddress: payToAddress != nil, }, nil }
// TestBlock tests the API for Block. func TestBlock(t *testing.T) { b := btcutil.NewBlock(&Block100000) // Ensure we get the same data back out. if msgBlock := b.MsgBlock(); !reflect.DeepEqual(msgBlock, &Block100000) { t.Errorf("MsgBlock: mismatched MsgBlock - got %v, want %v", spew.Sdump(msgBlock), spew.Sdump(&Block100000)) } // Ensure block height set and get work properly. wantHeight := int64(100000) b.SetHeight(wantHeight) if gotHeight := b.Height(); gotHeight != wantHeight { t.Errorf("Height: mismatched height - got %v, want %v", gotHeight, wantHeight) } // Hash for block 100,000. wantShaStr := "3ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506" wantSha, err := btcwire.NewShaHashFromStr(wantShaStr) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } // Request the sha multiple times to test generation and caching. for i := 0; i < 2; i++ { sha, err := b.Sha() if err != nil { t.Errorf("Sha: %v", err) continue } if !sha.IsEqual(wantSha) { t.Errorf("Sha #%d mismatched sha - got %v, want %v", i, sha, wantSha) } } // Shas for the transactions in Block100000. wantTxShas := []string{ "8c14f0db3df150123e6f3dbbf30f8b955a8249b62ac1d1ff16284aefa3d06d87", "fff2525b8931402dd09222c50775608f75787bd2b87e56995a7bdd30f79702c4", "6359f0868171b1d194cbee1af2f16ea598ae8fad666d9b012c8ed2b79a236ec4", "e9a66845e05d5abc0ad04ec80f774a7e585c6e8db975962d069a522137b80c1d", } // Request sha for all transactions one at a time via TxSha. for i, txSha := range wantTxShas { wantSha, err := btcwire.NewShaHashFromStr(txSha) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } // Request the sha multiple times to test generation and caching. for j := 0; j < 2; j++ { sha, err := b.TxSha(i) if err != nil { t.Errorf("TxSha: %v", err) continue } if !sha.IsEqual(wantSha) { t.Errorf("TxSha #%d mismatched sha - got %v, "+ "want %v", j, sha, wantSha) continue } } } // Create a new block to nuke all cached data. b = btcutil.NewBlock(&Block100000) // Request sha for all transactions one at a time via Tx. for i, txSha := range wantTxShas { wantSha, err := btcwire.NewShaHashFromStr(txSha) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } // Request the sha multiple times to test generation and caching. for j := 0; j < 2; j++ { tx, err := b.Tx(i) if err != nil { t.Errorf("Tx #%d: %v", i, err) continue } sha := tx.Sha() if !sha.IsEqual(wantSha) { t.Errorf("Sha #%d mismatched sha - got %v, "+ "want %v", j, sha, wantSha) continue } } } // Create a new block to nuke all cached data. b = btcutil.NewBlock(&Block100000) // Request slice of all transaction shas multiple times to test // generation and caching. for i := 0; i < 2; i++ { txShas, err := b.TxShas() if err != nil { t.Errorf("TxShas: %v", err) continue } // Ensure we get the expected number of transaction shas. if len(txShas) != len(wantTxShas) { t.Errorf("TxShas #%d mismatched number of shas -"+ "got %d, want %d", i, len(txShas), len(wantTxShas)) continue } // Ensure all of the shas match. for j, txSha := range wantTxShas { wantSha, err := btcwire.NewShaHashFromStr(txSha) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } if !txShas[j].IsEqual(wantSha) { t.Errorf("TxShas #%d mismatched shas - "+ "got %v, want %v", j, spew.Sdump(txShas), spew.Sdump(wantTxShas)) continue } } } // Create a new block to nuke all cached data. b = btcutil.NewBlock(&Block100000) // Request slice of all transactions multiple times to test generation // and caching. for i := 0; i < 2; i++ { transactions := b.Transactions() // Ensure we get the expected number of transactions. if len(transactions) != len(wantTxShas) { t.Errorf("Transactions #%d mismatched number of "+ "transactions - got %d, want %d", i, len(transactions), len(wantTxShas)) continue } // Ensure all of the shas match. for j, tx := range transactions { wantSha, err := btcwire.NewShaHashFromStr(wantTxShas[j]) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } sha := tx.Sha() if !sha.IsEqual(wantSha) { t.Errorf("Transactions #%d mismatched shas - "+ "got %v, want %v", j, sha, wantSha) continue } } } // Serialize the test block. var block100000Buf bytes.Buffer err = Block100000.Serialize(&block100000Buf) if err != nil { t.Errorf("Serialize: %v", err) } block100000Bytes := block100000Buf.Bytes() // Request serialized bytes multiple times to test generation and // caching. for i := 0; i < 2; i++ { serializedBytes, err := b.Bytes() if err != nil { t.Errorf("Bytes: %v", err) continue } if !bytes.Equal(serializedBytes, block100000Bytes) { t.Errorf("Bytes #%d wrong bytes - got %v, want %v", i, spew.Sdump(serializedBytes), spew.Sdump(block100000Bytes)) continue } } // Transaction offsets and length for the transaction in Block100000. wantTxLocs := []btcwire.TxLoc{ btcwire.TxLoc{TxStart: 81, TxLen: 135}, btcwire.TxLoc{TxStart: 216, TxLen: 259}, btcwire.TxLoc{TxStart: 475, TxLen: 257}, btcwire.TxLoc{TxStart: 732, TxLen: 225}, } // Ensure the transaction location information is accurate. txLocs, err := b.TxLoc() if err != nil { t.Errorf("TxLoc: %v", err) return } if !reflect.DeepEqual(txLocs, wantTxLocs) { t.Errorf("TxLoc: mismatched transaction location information "+ "- got %v, want %v", spew.Sdump(txLocs), spew.Sdump(wantTxLocs)) } }
// chainSetup is used to create a new db and chain instance with the genesis // block already inserted. In addition to the new chain instnce, it returns // a teardown function the caller should invoke when done testing to clean up. func chainSetup(dbName string) (*btcchain.BlockChain, func(), error) { if !isSupportedDbType(testDbType) { return nil, nil, fmt.Errorf("unsupported db type %v", testDbType) } // Handle memory database specially since it doesn't need the disk // specific handling. var db btcdb.Db var teardown func() if testDbType == "memdb" { ndb, err := btcdb.CreateDB(testDbType) if err != nil { return nil, nil, fmt.Errorf("error creating db: %v", err) } db = ndb // Setup a teardown function for cleaning up. This function is // returned to the caller to be invoked when it is done testing. teardown = func() { db.Close() } } else { // Create the root directory for test databases. if !fileExists(testDbRoot) { if err := os.MkdirAll(testDbRoot, 0700); err != nil { err := fmt.Errorf("unable to create test db "+ "root: %v", err) return nil, nil, err } } // Create a new database to store the accepted blocks into. dbPath := filepath.Join(testDbRoot, dbName) _ = os.RemoveAll(dbPath) ndb, err := btcdb.CreateDB(testDbType, dbPath) if err != nil { return nil, nil, fmt.Errorf("error creating db: %v", err) } db = ndb // Setup a teardown function for cleaning up. This function is // returned to the caller to be invoked when it is done testing. teardown = func() { dbVersionPath := filepath.Join(testDbRoot, dbName+".ver") db.Sync() db.Close() os.RemoveAll(dbPath) os.Remove(dbVersionPath) os.RemoveAll(testDbRoot) } } // Insert the main network genesis block. This is part of the initial // database setup. genesisBlock := btcutil.NewBlock(&btcwire.GenesisBlock) _, err := db.InsertBlock(genesisBlock) if err != nil { teardown() err := fmt.Errorf("failed to insert genesis block: %v", err) return nil, nil, err } chain := btcchain.New(db, btcwire.MainNet, nil) return chain, teardown, nil }
// TestHaveBlock tests the HaveBlock API to ensure proper functionality. func TestHaveBlock(t *testing.T) { // Load up blocks such that there is a side chain. // (genesis block) -> 1 -> 2 -> 3 -> 4 // \-> 3a testFiles := []string{ "blk_0_to_4.dat.bz2", "blk_3A.dat.bz2", } var blocks []*btcutil.Block for _, file := range testFiles { blockTmp, err := loadBlocks(file) if err != nil { t.Errorf("Error loading file: %v\n", err) return } for _, block := range blockTmp { blocks = append(blocks, block) } } // Create a new database and chain instance to run tests against. chain, teardownFunc, err := chainSetup("haveblock") if err != nil { t.Errorf("Failed to setup chain instance: %v", err) return } defer teardownFunc() // Since we're not dealing with the real block chain, disable // checkpoints and set the coinbase maturity to 1. chain.DisableCheckpoints(true) btcchain.TstSetCoinbaseMaturity(1) for i := 1; i < len(blocks); i++ { isOrphan, err := chain.ProcessBlock(blocks[i], btcchain.BFNone) if err != nil { t.Errorf("ProcessBlock fail on block %v: %v\n", i, err) return } if isOrphan { t.Errorf("ProcessBlock incorrectly returned block %v "+ "is an orphan\n", i) return } } // Insert an orphan block. isOrphan, err := chain.ProcessBlock(btcutil.NewBlock(&Block100000), btcchain.BFNone) if err != nil { t.Errorf("Unable to process block: %v", err) return } if !isOrphan { t.Errorf("ProcessBlock indicated block is an not orphan when " + "it should be\n") return } tests := []struct { hash string want bool }{ // Genesis block should be present (in the main chain). {hash: btcnet.MainNetParams.GenesisHash.String(), want: true}, // Block 3a should be present (on a side chain). {hash: "00000000474284d20067a4d33f6a02284e6ef70764a3a26d6a5b9df52ef663dd", want: true}, // Block 100000 should be present (as an orphan). {hash: "000000000003ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506", want: true}, // Random hashes should not be availble. {hash: "123", want: false}, } for i, test := range tests { hash, err := btcwire.NewShaHashFromStr(test.hash) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) continue } result, err := chain.HaveBlock(hash) if err != nil { t.Errorf("HaveBlock #%d unexpected error: %v", i, err) return } if result != test.want { t.Errorf("HaveBlock #%d got %v want %v", i, result, test.want) continue } } }
func loadBlocks(t *testing.T, file string) (blocks []*btcutil.Block, err error) { if len(savedblocks) != 0 { blocks = savedblocks return } testdatafile := filepath.Join("..", "testdata", "blocks1-256.bz2") var dr io.Reader var fi io.ReadCloser fi, err = os.Open(testdatafile) if err != nil { t.Errorf("failed to open file %v, err %v", testdatafile, err) return } if strings.HasSuffix(testdatafile, ".bz2") { z := bzip2.NewReader(fi) dr = z } else { dr = fi } defer func() { if err := fi.Close(); err != nil { t.Errorf("failed to close file %v %v", testdatafile, err) } }() // Set the first block as the genesis block. genesis := btcutil.NewBlock(btcnet.MainNetParams.GenesisBlock) blocks = append(blocks, genesis) var block *btcutil.Block err = nil for height := int64(1); err == nil; height++ { var rintbuf uint32 err = binary.Read(dr, binary.LittleEndian, &rintbuf) if err == io.EOF { // hit end of file at expected offset: no warning height-- err = nil break } if err != nil { t.Errorf("failed to load network type, err %v", err) break } if rintbuf != uint32(network) { t.Errorf("Block doesn't match network: %v expects %v", rintbuf, network) break } err = binary.Read(dr, binary.LittleEndian, &rintbuf) blocklen := rintbuf rbytes := make([]byte, blocklen) // read block dr.Read(rbytes) block, err = btcutil.NewBlockFromBytes(rbytes) if err != nil { t.Errorf("failed to parse block %v", height) return } blocks = append(blocks, block) } savedblocks = blocks return }
// loadBlocks loads the blocks contained in the testdata directory and returns // a slice of them. func loadBlocks(t *testing.T) ([]*btcutil.Block, error) { if len(savedBlocks) != 0 { return savedBlocks, nil } var dr io.Reader fi, err := os.Open(blockDataFile) if err != nil { t.Errorf("failed to open file %v, err %v", blockDataFile, err) return nil, err } if strings.HasSuffix(blockDataFile, ".bz2") { z := bzip2.NewReader(fi) dr = z } else { dr = fi } defer func() { if err := fi.Close(); err != nil { t.Errorf("failed to close file %v %v", blockDataFile, err) } }() // Set the first block as the genesis block. blocks := make([]*btcutil.Block, 0, 256) genesis := btcutil.NewBlock(&btcwire.GenesisBlock) blocks = append(blocks, genesis) for height := int64(1); err == nil; height++ { var rintbuf uint32 err := binary.Read(dr, binary.LittleEndian, &rintbuf) if err == io.EOF { // hit end of file at expected offset: no warning height-- err = nil break } if err != nil { t.Errorf("failed to load network type, err %v", err) break } if rintbuf != uint32(network) { t.Errorf("Block doesn't match network: %v expects %v", rintbuf, network) break } err = binary.Read(dr, binary.LittleEndian, &rintbuf) blocklen := rintbuf rbytes := make([]byte, blocklen) // read block dr.Read(rbytes) block, err := btcutil.NewBlockFromBytes(rbytes) if err != nil { t.Errorf("failed to parse block %v", height) return nil, err } blocks = append(blocks, block) } savedBlocks = blocks return blocks, nil }
func Test_dupTx(t *testing.T) { // Ignore db remove errors since it means we didn't have an old one. dbname := fmt.Sprintf("tstdbdup0") dbnamever := dbname + ".ver" _ = os.RemoveAll(dbname) _ = os.RemoveAll(dbnamever) db, err := btcdb.CreateDB("leveldb", dbname) if err != nil { t.Errorf("Failed to open test database %v", err) return } defer os.RemoveAll(dbname) defer os.RemoveAll(dbnamever) defer db.Close() testdatafile := filepath.Join("testdata", "blocks1-256.bz2") blocks, err := loadBlocks(t, testdatafile) if err != nil { t.Errorf("Unable to load blocks from test data for: %v", err) return } var lastSha *btcwire.ShaHash // Populate with the fisrt 256 blocks, so we have blocks to 'mess with' err = nil out: for height := int64(0); height < int64(len(blocks)); height++ { block := blocks[height] // except for NoVerify which does not allow lookups check inputs mblock := block.MsgBlock() var txneededList []*btcwire.ShaHash for _, tx := range mblock.Transactions { for _, txin := range tx.TxIn { if txin.PreviousOutpoint.Index == uint32(4294967295) { continue } origintxsha := &txin.PreviousOutpoint.Hash txneededList = append(txneededList, origintxsha) if !db.ExistsTxSha(origintxsha) { t.Errorf("referenced tx not found %v ", origintxsha) } _, err = db.FetchTxBySha(origintxsha) if err != nil { t.Errorf("referenced tx not found %v err %v ", origintxsha, err) } } } txlist := db.FetchUnSpentTxByShaList(txneededList) for _, txe := range txlist { if txe.Err != nil { t.Errorf("tx list fetch failed %v err %v ", txe.Sha, txe.Err) break out } } newheight, err := db.InsertBlock(block) if err != nil { t.Errorf("failed to insert block %v err %v", height, err) break out } if newheight != height { t.Errorf("height mismatch expect %v returned %v", height, newheight) break out } newSha, blkid, err := db.NewestSha() if err != nil { t.Errorf("failed to obtain latest sha %v %v", height, err) } if blkid != height { t.Errorf("height doe not match latest block height %v %v %v", blkid, height, err) } blkSha, _ := block.Sha() if *newSha != *blkSha { t.Errorf("Newest block sha does not match freshly inserted one %v %v %v ", newSha, blkSha, err) } lastSha = blkSha } // genrate a new block based on the last sha // these block are not verified, so there are a bunch of garbage fields // in the 'generated' block. var bh btcwire.BlockHeader bh.Version = 2 bh.PrevBlock = *lastSha // Bits, Nonce are not filled in mblk := btcwire.NewMsgBlock(&bh) hash, _ := btcwire.NewShaHashFromStr("df2b060fa2e5e9c8ed5eaf6a45c13753ec8c63282b2688322eba40cd98ea067a") po := btcwire.NewOutPoint(hash, 0) txI := btcwire.NewTxIn(po, []byte("garbage")) txO := btcwire.NewTxOut(50000000, []byte("garbageout")) var tx btcwire.MsgTx tx.AddTxIn(txI) tx.AddTxOut(txO) mblk.AddTransaction(&tx) blk := btcutil.NewBlock(mblk) fetchList := []*btcwire.ShaHash{hash} listReply := db.FetchUnSpentTxByShaList(fetchList) for _, lr := range listReply { if lr.Err != nil { t.Errorf("sha %v spent %v err %v\n", lr.Sha, lr.TxSpent, lr.Err) } } _, err = db.InsertBlock(blk) if err != nil { t.Errorf("failed to insert phony block %v", err) } // ok, did it 'spend' the tx ? listReply = db.FetchUnSpentTxByShaList(fetchList) for _, lr := range listReply { if lr.Err != btcdb.TxShaMissing { t.Errorf("sha %v spent %v err %v\n", lr.Sha, lr.TxSpent, lr.Err) } } txshalist, _ := blk.TxShas() for _, txsha := range txshalist { txReply, err := db.FetchTxBySha(txsha) if err != nil { t.Errorf("fully spent lookup %v err %v\n", hash, err) } else { for _, lr := range txReply { if lr.Err != nil { fmt.Errorf("stx %v spent %v err %v\n", lr.Sha, lr.TxSpent, lr.Err) } } } } t.Logf("Dropping block") err = db.DropAfterBlockBySha(lastSha) if err != nil { t.Errorf("failed to drop spending block %v", err) } }
// TestClosed ensure calling the interface functions on a closed database // returns appropriate errors for the interface functions that return errors // and does not panic or otherwise misbehave for functions which do not return // errors. func TestClosed(t *testing.T) { db, err := btcdb.CreateDB("memdb") if err != nil { t.Errorf("Failed to open test database %v", err) return } _, err = db.InsertBlock(btcutil.NewBlock(btcnet.MainNetParams.GenesisBlock)) if err != nil { t.Errorf("InsertBlock: %v", err) } if err := db.Close(); err != nil { t.Errorf("Close: unexpected error %v", err) } genesisHash := btcnet.MainNetParams.GenesisHash if err := db.DropAfterBlockBySha(genesisHash); err != memdb.ErrDbClosed { t.Errorf("DropAfterBlockBySha: unexpected error %v", err) } if _, err := db.ExistsSha(genesisHash); err != memdb.ErrDbClosed { t.Errorf("ExistsSha: Unexpected error: %v", err) } if _, err := db.FetchBlockBySha(genesisHash); err != memdb.ErrDbClosed { t.Errorf("FetchBlockBySha: unexpected error %v", err) } if _, err := db.FetchBlockShaByHeight(0); err != memdb.ErrDbClosed { t.Errorf("FetchBlockShaByHeight: unexpected error %v", err) } if _, err := db.FetchHeightRange(0, 1); err != memdb.ErrDbClosed { t.Errorf("FetchHeightRange: unexpected error %v", err) } genesisCoinbaseTx := btcnet.MainNetParams.GenesisBlock.Transactions[0] coinbaseHash, err := genesisCoinbaseTx.TxSha() if err != nil { t.Errorf("TxSha: unexpected error %v", err) } if _, err := db.ExistsTxSha(&coinbaseHash); err != memdb.ErrDbClosed { t.Errorf("ExistsTxSha: unexpected error %v", err) } if _, err := db.FetchTxBySha(genesisHash); err != memdb.ErrDbClosed { t.Errorf("FetchTxBySha: unexpected error %v", err) } requestHashes := []*btcwire.ShaHash{genesisHash} reply := db.FetchTxByShaList(requestHashes) if len(reply) != len(requestHashes) { t.Errorf("FetchUnSpentTxByShaList unexpected number of replies "+ "got: %d, want: %d", len(reply), len(requestHashes)) } for i, txLR := range reply { wantReply := &btcdb.TxListReply{ Sha: requestHashes[i], Err: memdb.ErrDbClosed, } if !reflect.DeepEqual(wantReply, txLR) { t.Errorf("FetchTxByShaList unexpected reply\ngot: %v\n"+ "want: %v", txLR, wantReply) } } reply = db.FetchUnSpentTxByShaList(requestHashes) if len(reply) != len(requestHashes) { t.Errorf("FetchUnSpentTxByShaList unexpected number of replies "+ "got: %d, want: %d", len(reply), len(requestHashes)) } for i, txLR := range reply { wantReply := &btcdb.TxListReply{ Sha: requestHashes[i], Err: memdb.ErrDbClosed, } if !reflect.DeepEqual(wantReply, txLR) { t.Errorf("FetchUnSpentTxByShaList unexpected reply\n"+ "got: %v\nwant: %v", txLR, wantReply) } } if _, _, err := db.NewestSha(); err != memdb.ErrDbClosed { t.Errorf("NewestSha: unexpected error %v", err) } if err := db.Sync(); err != memdb.ErrDbClosed { t.Errorf("Sync: unexpected error %v", err) } if err := db.RollbackClose(); err != memdb.ErrDbClosed { t.Errorf("RollbackClose: unexpected error %v", err) } if err := db.Close(); err != memdb.ErrDbClosed { t.Errorf("Close: unexpected error %v", err) } }