// Receive waits for the response promised by the future and returns the raw // block requested from the server given its hash. func (r FutureGetBlockResult) Receive() (*wire.MsgBlock, error) { res, err := receiveFuture(r) if err != nil { return nil, err } // Unmarshal result as a string. var blockHex string err = json.Unmarshal(res, &blockHex) if err != nil { return nil, err } // Decode the serialized block hex to raw bytes. serializedBlock, err := hex.DecodeString(blockHex) if err != nil { return nil, err } // Deserialize the block and return it. var msgBlock wire.MsgBlock err = msgBlock.Deserialize(bytes.NewReader(serializedBlock)) if err != nil { return nil, err } return &msgBlock, nil }
// createBlock creates a new block building from the previous block. func createBlock(prevBlock *btcutil.Block, inclusionTxs []*btcutil.Tx, blockVersion int32, blockTime time.Time, miningAddr btcutil.Address, net *chaincfg.Params) (*btcutil.Block, error) { prevHash := prevBlock.Hash() blockHeight := prevBlock.Height() + 1 // If a target block time was specified, then use that as the header's // timestamp. Otherwise, add one second to the previous block unless // it's the genesis block in which case use the current time. var ts time.Time switch { case !blockTime.IsZero(): ts = blockTime default: ts = prevBlock.MsgBlock().Header.Timestamp.Add(time.Second) } extraNonce := uint64(0) coinbaseScript, err := standardCoinbaseScript(blockHeight, extraNonce) if err != nil { return nil, err } coinbaseTx, err := createCoinbaseTx(coinbaseScript, blockHeight, miningAddr, net) if err != nil { return nil, err } // Create a new block ready to be solved. blockTxns := []*btcutil.Tx{coinbaseTx} if inclusionTxs != nil { blockTxns = append(blockTxns, inclusionTxs...) } merkles := blockchain.BuildMerkleTreeStore(blockTxns) var block wire.MsgBlock block.Header = wire.BlockHeader{ Version: blockVersion, PrevBlock: *prevHash, MerkleRoot: *merkles[len(merkles)-1], Timestamp: ts, Bits: net.PowLimitBits, } for _, tx := range blockTxns { if err := block.AddTransaction(tx.MsgTx()); err != nil { return nil, err } } found := solveBlock(&block.Header, net.PowLimit) if !found { return nil, errors.New("Unable to solve block") } utilBlock := btcutil.NewBlock(&block) utilBlock.SetHeight(blockHeight) return utilBlock, nil }
// TestBlockWireErrors performs negative tests against wire encode and decode // of MsgBlock to confirm error paths work correctly. func TestBlockWireErrors(t *testing.T) { // Use protocol version 60002 specifically here instead of the latest // because the test data is using bytes encoded with that protocol // version. pver := uint32(60002) tests := []struct { in *wire.MsgBlock // Value to encode buf []byte // Wire encoding pver uint32 // Protocol version for wire encoding max int // Max size of fixed buffer to induce errors writeErr error // Expected write error readErr error // Expected read error }{ // Force error in version. {&blockOne, blockOneBytes, pver, 0, io.ErrShortWrite, io.EOF}, // Force error in prev block hash. {&blockOne, blockOneBytes, pver, 4, io.ErrShortWrite, io.EOF}, // Force error in merkle root. {&blockOne, blockOneBytes, pver, 36, io.ErrShortWrite, io.EOF}, // Force error in timestamp. {&blockOne, blockOneBytes, pver, 68, io.ErrShortWrite, io.EOF}, // Force error in difficulty bits. {&blockOne, blockOneBytes, pver, 72, io.ErrShortWrite, io.EOF}, // Force error in header nonce. {&blockOne, blockOneBytes, pver, 76, io.ErrShortWrite, io.EOF}, // Force error in transaction count. {&blockOne, blockOneBytes, pver, 80, io.ErrShortWrite, io.EOF}, // Force error in transactions. {&blockOne, blockOneBytes, pver, 81, io.ErrShortWrite, io.EOF}, } t.Logf("Running %d tests", len(tests)) for i, test := range tests { // Encode to wire format. w := newFixedWriter(test.max) err := test.in.BtcEncode(w, test.pver) if err != test.writeErr { t.Errorf("BtcEncode #%d wrong error got: %v, want: %v", i, err, test.writeErr) continue } // Decode from wire format. var msg wire.MsgBlock r := newFixedReader(test.max, test.buf) err = msg.BtcDecode(r, test.pver) if err != test.readErr { t.Errorf("BtcDecode #%d wrong error got: %v, want: %v", i, err, test.readErr) continue } } }
// NewBlockFromReader returns a new instance of a bitcoin block given a // Reader to deserialize the block. See Block. func NewBlockFromReader(r io.Reader) (*Block, error) { // Deserialize the bytes into a MsgBlock. var msgBlock wire.MsgBlock err := msgBlock.Deserialize(r) if err != nil { return nil, err } b := Block{ msgBlock: &msgBlock, blockHeight: BlockHeightUnknown, } return &b, nil }
// TxLoc returns the offsets and lengths of each transaction in a raw block. // It is used to allow fast indexing into transactions within the raw byte // stream. func (b *Block) TxLoc() ([]wire.TxLoc, error) { rawMsg, err := b.Bytes() if err != nil { return nil, err } rbuf := bytes.NewBuffer(rawMsg) var mblock wire.MsgBlock txLocs, err := mblock.DeserializeTxLoc(rbuf) if err != nil { return nil, err } return txLocs, err }
func NewBlockRecord(msg *wire.MsgBlock, ra *net.TCPAddr, la *net.TCPAddr) *BlockRecord { record := &BlockRecord{ Record: Record{ stamp: time.Now(), ra: ra, la: la, cmd: msg.Command(), }, hdr: NewHeaderRecord(&msg.Header), details: make([]*DetailsRecord, len(msg.Transactions)), } for i, tx := range msg.Transactions { record.details[i] = NewDetailsRecord(tx) } return record }
// TestBlockSerializeErrors performs negative tests against wire encode and // decode of MsgBlock to confirm error paths work correctly. func TestBlockSerializeErrors(t *testing.T) { tests := []struct { in *wire.MsgBlock // Value to encode buf []byte // Serialized data max int // Max size of fixed buffer to induce errors writeErr error // Expected write error readErr error // Expected read error }{ // Force error in version. {&blockOne, blockOneBytes, 0, io.ErrShortWrite, io.EOF}, // Force error in prev block hash. {&blockOne, blockOneBytes, 4, io.ErrShortWrite, io.EOF}, // Force error in merkle root. {&blockOne, blockOneBytes, 36, io.ErrShortWrite, io.EOF}, // Force error in timestamp. {&blockOne, blockOneBytes, 68, io.ErrShortWrite, io.EOF}, // Force error in difficulty bits. {&blockOne, blockOneBytes, 72, io.ErrShortWrite, io.EOF}, // Force error in header nonce. {&blockOne, blockOneBytes, 76, io.ErrShortWrite, io.EOF}, // Force error in transaction count. {&blockOne, blockOneBytes, 80, io.ErrShortWrite, io.EOF}, // Force error in transactions. {&blockOne, blockOneBytes, 81, io.ErrShortWrite, io.EOF}, } t.Logf("Running %d tests", len(tests)) for i, test := range tests { // Serialize the block. w := newFixedWriter(test.max) err := test.in.Serialize(w) if err != test.writeErr { t.Errorf("Serialize #%d wrong error got: %v, want: %v", i, err, test.writeErr) continue } // Deserialize the block. var block wire.MsgBlock r := newFixedReader(test.max, test.buf) err = block.Deserialize(r) if err != test.readErr { t.Errorf("Deserialize #%d wrong error got: %v, want: %v", i, err, test.readErr) continue } var txLocBlock wire.MsgBlock br := bytes.NewBuffer(test.buf[0:test.max]) _, err = txLocBlock.DeserializeTxLoc(br) if err != test.readErr { t.Errorf("DeserializeTxLoc #%d wrong error got: %v, want: %v", i, err, test.readErr) continue } } }
// initChainState attempts to load and initialize the chain state from the // database. When the db does not yet contain any chain state, both it and the // chain state are initialized to the genesis block. func (b *BlockChain) initChainState() error { // Attempt to load the chain state from the database. var isStateInitialized bool err := b.db.View(func(dbTx database.Tx) error { // Fetch the stored chain state from the database metadata. // When it doesn't exist, it means the database hasn't been // initialized for use with chain yet, so break out now to allow // that to happen under a writable database transaction. serializedData := dbTx.Metadata().Get(chainStateKeyName) if serializedData == nil { return nil } log.Tracef("Serialized chain state: %x", serializedData) state, err := deserializeBestChainState(serializedData) if err != nil { return err } // Load the raw block bytes for the best block. blockBytes, err := dbTx.FetchBlock(&state.hash) if err != nil { return err } var block wire.MsgBlock err = block.Deserialize(bytes.NewReader(blockBytes)) if err != nil { return err } // Create a new node and set it as the best node. The preceding // nodes will be loaded on demand as needed. header := &block.Header node := newBlockNode(header, &state.hash, int32(state.height)) node.inMainChain = true node.workSum = state.workSum b.bestNode = node // Add the new node to the indices for faster lookups. prevHash := node.parentHash b.index[*node.hash] = node b.depNodes[*prevHash] = append(b.depNodes[*prevHash], node) // Initialize the state related to the best block. blockSize := uint64(len(blockBytes)) numTxns := uint64(len(block.Transactions)) b.stateSnapshot = newBestState(b.bestNode, blockSize, numTxns, state.totalTxns) isStateInitialized = true return nil }) if err != nil { return err } // There is nothing more to do if the chain state was initialized. if isStateInitialized { return nil } // At this point the database has not already been initialized, so // initialize both it and the chain state to the genesis block. return b.createChainState() }
// NewBlockTemplate returns a new block template that is ready to be solved // using the transactions from the passed transaction memory pool and a coinbase // that either pays to the passed address if it is not nil, or a coinbase that // is redeemable by anyone if the passed address is nil. The nil address // functionality is useful since there are cases such as the getblocktemplate // RPC where external mining software is responsible for creating their own // coinbase which will replace the one generated for the block template. Thus // the need to have configured address can be avoided. // // The transactions selected and included are prioritized according to several // factors. First, each transaction has a priority calculated based on its // value, age of inputs, and size. Transactions which consist of larger // amounts, older inputs, and small sizes have the highest priority. Second, a // fee per kilobyte is calculated for each transaction. Transactions with a // higher fee per kilobyte are preferred. Finally, the block generation related // configuration options are all taken into account. // // Transactions which only spend outputs from other transactions already in the // block chain are immediately added to a priority queue which either // prioritizes based on the priority (then fee per kilobyte) or the fee per // kilobyte (then priority) depending on whether or not the BlockPrioritySize // configuration option allots space for high-priority transactions. // Transactions which spend outputs from other transactions in the memory pool // are added to a dependency map so they can be added to the priority queue once // the transactions they depend on have been included. // // Once the high-priority area (if configured) has been filled with transactions, // or the priority falls below what is considered high-priority, the priority // queue is updated to prioritize by fees per kilobyte (then priority). // // When the fees per kilobyte drop below the TxMinFreeFee configuration option, // the transaction will be skipped unless there is a BlockMinSize set, in which // case the block will be filled with the low-fee/free transactions until the // block size reaches that minimum size. // // Any transactions which would cause the block to exceed the BlockMaxSize // configuration option, exceed the maximum allowed signature operations per // block, or otherwise cause the block to be invalid are skipped. // // Given the above, a block generated by this function is of the following form: // // ----------------------------------- -- -- // | Coinbase Transaction | | | // |-----------------------------------| | | // | | | | ----- cfg.BlockPrioritySize // | High-priority Transactions | | | // | | | | // |-----------------------------------| | -- // | | | // | | | // | | |--- cfg.BlockMaxSize // | Transactions prioritized by fee | | // | until <= cfg.TxMinFreeFee | | // | | | // | | | // | | | // |-----------------------------------| | // | Low-fee/Non high-priority (free) | | // | transactions (while block size | | // | <= cfg.BlockMinSize) | | // ----------------------------------- -- func NewBlockTemplate(server *server, payToAddress btcutil.Address) (*BlockTemplate, error) { blockManager := server.blockManager timeSource := server.timeSource chainState := &blockManager.chainState // Extend the most recently known best block. chainState.Lock() prevHash := chainState.newestHash nextBlockHeight := chainState.newestHeight + 1 chainState.Unlock() // Create a standard coinbase transaction paying to the provided // address. NOTE: The coinbase value will be updated to include the // fees from the selected transactions later after they have actually // been selected. It is created here to detect any errors early // before potentially doing a lot of work below. The extra nonce helps // ensure the transaction is not a duplicate transaction (paying the // same value to the same public key address would otherwise be an // identical transaction for block version 1). extraNonce := uint64(0) coinbaseScript, err := standardCoinbaseScript(nextBlockHeight, extraNonce) if err != nil { return nil, err } coinbaseTx, err := createCoinbaseTx(coinbaseScript, nextBlockHeight, payToAddress) if err != nil { return nil, err } numCoinbaseSigOps := int64(blockchain.CountSigOps(coinbaseTx)) // Get the current memory pool transactions and create a priority queue // to hold the transactions which are ready for inclusion into a block // along with some priority related and fee metadata. Reserve the same // number of items that are in the memory pool for the priority queue. // Also, choose the initial sort order for the priority queue based on // whether or not there is an area allocated for high-priority // transactions. mempoolTxns := server.txMemPool.TxDescs() sortedByFee := cfg.BlockPrioritySize == 0 priorityQueue := newTxPriorityQueue(len(mempoolTxns), sortedByFee) // Create a slice to hold the transactions to be included in the // generated block with reserved space. Also create a transaction // store to house all of the input transactions so multiple lookups // can be avoided. blockTxns := make([]*btcutil.Tx, 0, len(mempoolTxns)) blockTxns = append(blockTxns, coinbaseTx) blockTxStore := make(blockchain.TxStore) // dependers is used to track transactions which depend on another // transaction in the memory pool. This, in conjunction with the // dependsOn map kept with each dependent transaction helps quickly // determine which dependent transactions are now eligible for inclusion // in the block once each transaction has been included. dependers := make(map[wire.ShaHash]*list.List) // Create slices to hold the fees and number of signature operations // for each of the selected transactions and add an entry for the // coinbase. This allows the code below to simply append details about // a transaction as it is selected for inclusion in the final block. // However, since the total fees aren't known yet, use a dummy value for // the coinbase fee which will be updated later. txFees := make([]int64, 0, len(mempoolTxns)) txSigOpCounts := make([]int64, 0, len(mempoolTxns)) txFees = append(txFees, -1) // Updated once known txSigOpCounts = append(txSigOpCounts, numCoinbaseSigOps) minrLog.Debugf("Considering %d mempool transactions for inclusion to "+ "new block", len(mempoolTxns)) mempoolLoop: for _, txDesc := range mempoolTxns { // A block can't have more than one coinbase or contain // non-finalized transactions. tx := txDesc.Tx if blockchain.IsCoinBase(tx) { minrLog.Tracef("Skipping coinbase tx %s", tx.Sha()) continue } if !blockchain.IsFinalizedTransaction(tx, nextBlockHeight, timeSource.AdjustedTime()) { minrLog.Tracef("Skipping non-finalized tx %s", tx.Sha()) continue } // Fetch all of the transactions referenced by the inputs to // this transaction. NOTE: This intentionally does not fetch // inputs from the mempool since a transaction which depends on // other transactions in the mempool must come after those // dependencies in the final generated block. txStore, err := blockManager.FetchTransactionStore(tx) if err != nil { minrLog.Warnf("Unable to fetch transaction store for "+ "tx %s: %v", tx.Sha(), err) continue } // Setup dependencies for any transactions which reference // other transactions in the mempool so they can be properly // ordered below. prioItem := &txPrioItem{tx: txDesc.Tx} for _, txIn := range tx.MsgTx().TxIn { originHash := &txIn.PreviousOutPoint.Hash originIndex := txIn.PreviousOutPoint.Index txData, exists := txStore[*originHash] if !exists || txData.Err != nil || txData.Tx == nil { if !server.txMemPool.HaveTransaction(originHash) { minrLog.Tracef("Skipping tx %s because "+ "it references tx %s which is "+ "not available", tx.Sha, originHash) continue mempoolLoop } // The transaction is referencing another // transaction in the memory pool, so setup an // ordering dependency. depList, exists := dependers[*originHash] if !exists { depList = list.New() dependers[*originHash] = depList } depList.PushBack(prioItem) if prioItem.dependsOn == nil { prioItem.dependsOn = make( map[wire.ShaHash]struct{}) } prioItem.dependsOn[*originHash] = struct{}{} // Skip the check below. We already know the // referenced transaction is available. continue } // Ensure the output index in the referenced transaction // is available. msgTx := txData.Tx.MsgTx() if originIndex > uint32(len(msgTx.TxOut)) { minrLog.Tracef("Skipping tx %s because "+ "it references output %d of tx %s "+ "which is out of bounds", tx.Sha, originIndex, originHash) continue mempoolLoop } } // Calculate the final transaction priority using the input // value age sum as well as the adjusted transaction size. The // formula is: sum(inputValue * inputAge) / adjustedTxSize prioItem.priority = txDesc.CurrentPriority(txStore, nextBlockHeight) // Calculate the fee in Satoshi/KB. // NOTE: This is a more precise value than the one calculated // during calcMinRelayFee which rounds up to the nearest full // kilobyte boundary. This is beneficial since it provides an // incentive to create smaller transactions. txSize := tx.MsgTx().SerializeSize() prioItem.feePerKB = float64(txDesc.Fee) / (float64(txSize) / 1000) prioItem.fee = txDesc.Fee // Add the transaction to the priority queue to mark it ready // for inclusion in the block unless it has dependencies. if prioItem.dependsOn == nil { heap.Push(priorityQueue, prioItem) } // Merge the store which contains all of the input transactions // for this transaction into the input transaction store. This // allows the code below to avoid a second lookup. mergeTxStore(blockTxStore, txStore) } minrLog.Tracef("Priority queue len %d, dependers len %d", priorityQueue.Len(), len(dependers)) // The starting block size is the size of the block header plus the max // possible transaction count size, plus the size of the coinbase // transaction. blockSize := blockHeaderOverhead + uint32(coinbaseTx.MsgTx().SerializeSize()) blockSigOps := numCoinbaseSigOps totalFees := int64(0) // Choose which transactions make it into the block. for priorityQueue.Len() > 0 { // Grab the highest priority (or highest fee per kilobyte // depending on the sort order) transaction. prioItem := heap.Pop(priorityQueue).(*txPrioItem) tx := prioItem.tx // Grab the list of transactions which depend on this one (if // any) and remove the entry for this transaction as it will // either be included or skipped, but in either case the deps // are no longer needed. deps := dependers[*tx.Sha()] delete(dependers, *tx.Sha()) // Enforce maximum block size. Also check for overflow. txSize := uint32(tx.MsgTx().SerializeSize()) blockPlusTxSize := blockSize + txSize if blockPlusTxSize < blockSize || blockPlusTxSize >= cfg.BlockMaxSize { minrLog.Tracef("Skipping tx %s because it would exceed "+ "the max block size", tx.Sha()) logSkippedDeps(tx, deps) continue } // Enforce maximum signature operations per block. Also check // for overflow. numSigOps := int64(blockchain.CountSigOps(tx)) if blockSigOps+numSigOps < blockSigOps || blockSigOps+numSigOps > blockchain.MaxSigOpsPerBlock { minrLog.Tracef("Skipping tx %s because it would "+ "exceed the maximum sigops per block", tx.Sha()) logSkippedDeps(tx, deps) continue } numP2SHSigOps, err := blockchain.CountP2SHSigOps(tx, false, blockTxStore) if err != nil { minrLog.Tracef("Skipping tx %s due to error in "+ "CountP2SHSigOps: %v", tx.Sha(), err) logSkippedDeps(tx, deps) continue } numSigOps += int64(numP2SHSigOps) if blockSigOps+numSigOps < blockSigOps || blockSigOps+numSigOps > blockchain.MaxSigOpsPerBlock { minrLog.Tracef("Skipping tx %s because it would "+ "exceed the maximum sigops per block (p2sh)", tx.Sha()) logSkippedDeps(tx, deps) continue } // Skip free transactions once the block is larger than the // minimum block size. if sortedByFee && prioItem.feePerKB < float64(cfg.minRelayTxFee) && blockPlusTxSize >= cfg.BlockMinSize { minrLog.Tracef("Skipping tx %s with feePerKB %.2f "+ "< minTxRelayFee %d and block size %d >= "+ "minBlockSize %d", tx.Sha(), prioItem.feePerKB, cfg.minRelayTxFee, blockPlusTxSize, cfg.BlockMinSize) logSkippedDeps(tx, deps) continue } // Prioritize by fee per kilobyte once the block is larger than // the priority size or there are no more high-priority // transactions. if !sortedByFee && (blockPlusTxSize >= cfg.BlockPrioritySize || prioItem.priority <= minHighPriority) { minrLog.Tracef("Switching to sort by fees per "+ "kilobyte blockSize %d >= BlockPrioritySize "+ "%d || priority %.2f <= minHighPriority %.2f", blockPlusTxSize, cfg.BlockPrioritySize, prioItem.priority, minHighPriority) sortedByFee = true priorityQueue.SetLessFunc(txPQByFee) // Put the transaction back into the priority queue and // skip it so it is re-priortized by fees if it won't // fit into the high-priority section or the priority is // too low. Otherwise this transaction will be the // final one in the high-priority section, so just fall // though to the code below so it is added now. if blockPlusTxSize > cfg.BlockPrioritySize || prioItem.priority < minHighPriority { heap.Push(priorityQueue, prioItem) continue } } // Ensure the transaction inputs pass all of the necessary // preconditions before allowing it to be added to the block. _, err = blockchain.CheckTransactionInputs(tx, nextBlockHeight, blockTxStore) if err != nil { minrLog.Tracef("Skipping tx %s due to error in "+ "CheckTransactionInputs: %v", tx.Sha(), err) logSkippedDeps(tx, deps) continue } err = blockchain.ValidateTransactionScripts(tx, blockTxStore, txscript.StandardVerifyFlags, server.sigCache) if err != nil { minrLog.Tracef("Skipping tx %s due to error in "+ "ValidateTransactionScripts: %v", tx.Sha(), err) logSkippedDeps(tx, deps) continue } // Spend the transaction inputs in the block transaction store // and add an entry for it to ensure any transactions which // reference this one have it available as an input and can // ensure they aren't double spending. spendTransaction(blockTxStore, tx, nextBlockHeight) // Add the transaction to the block, increment counters, and // save the fees and signature operation counts to the block // template. blockTxns = append(blockTxns, tx) blockSize += txSize blockSigOps += numSigOps totalFees += prioItem.fee txFees = append(txFees, prioItem.fee) txSigOpCounts = append(txSigOpCounts, numSigOps) minrLog.Tracef("Adding tx %s (priority %.2f, feePerKB %.2f)", prioItem.tx.Sha(), prioItem.priority, prioItem.feePerKB) // Add transactions which depend on this one (and also do not // have any other unsatisified dependencies) to the priority // queue. if deps != nil { for e := deps.Front(); e != nil; e = e.Next() { // Add the transaction to the priority queue if // there are no more dependencies after this // one. item := e.Value.(*txPrioItem) delete(item.dependsOn, *tx.Sha()) if len(item.dependsOn) == 0 { heap.Push(priorityQueue, item) } } } } // Now that the actual transactions have been selected, update the // block size for the real transaction count and coinbase value with // the total fees accordingly. blockSize -= wire.MaxVarIntPayload - uint32(wire.VarIntSerializeSize(uint64(len(blockTxns)))) coinbaseTx.MsgTx().TxOut[0].Value += totalFees txFees[0] = -totalFees // Calculate the required difficulty for the block. The timestamp // is potentially adjusted to ensure it comes after the median time of // the last several blocks per the chain consensus rules. ts, err := medianAdjustedTime(chainState, timeSource) if err != nil { return nil, err } requiredDifficulty, err := blockManager.CalcNextRequiredDifficulty(ts) if err != nil { return nil, err } // Create a new block ready to be solved. merkles := blockchain.BuildMerkleTreeStore(blockTxns) var msgBlock wire.MsgBlock msgBlock.Header = wire.BlockHeader{ Version: generatedBlockVersion, PrevBlock: *prevHash, MerkleRoot: *merkles[len(merkles)-1], Timestamp: ts, Bits: requiredDifficulty, } for _, tx := range blockTxns { if err := msgBlock.AddTransaction(tx.MsgTx()); err != nil { return nil, err } } // Finally, perform a full check on the created block against the chain // consensus rules to ensure it properly connects to the current best // chain with no issues. block := btcutil.NewBlock(&msgBlock) block.SetHeight(nextBlockHeight) if err := blockManager.CheckConnectBlock(block); err != nil { return nil, err } minrLog.Debugf("Created new block template (%d transactions, %d in "+ "fees, %d signature operations, %d bytes, target difficulty "+ "%064x)", len(msgBlock.Transactions), totalFees, blockSigOps, blockSize, blockchain.CompactToBig(msgBlock.Header.Bits)) return &BlockTemplate{ block: &msgBlock, fees: txFees, sigOpCounts: txSigOpCounts, height: nextBlockHeight, validPayAddress: payToAddress != nil, }, nil }
// TestBlockOverflowErrors performs tests to ensure deserializing blocks which // are intentionally crafted to use large values for the number of transactions // are handled properly. This could otherwise potentially be used as an attack // vector. func TestBlockOverflowErrors(t *testing.T) { // Use protocol version 70001 specifically here instead of the latest // protocol version because the test data is using bytes encoded with // that version. pver := uint32(70001) tests := []struct { buf []byte // Wire encoding pver uint32 // Protocol version for wire encoding err error // Expected error }{ // Block that claims to have ~uint64(0) transactions. { []byte{ 0x01, 0x00, 0x00, 0x00, // Version 1 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, // PrevBlock 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, // MerkleRoot 0x61, 0xbc, 0x66, 0x49, // Timestamp 0xff, 0xff, 0x00, 0x1d, // Bits 0x01, 0xe3, 0x62, 0x99, // Nonce 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // TxnCount }, pver, &wire.MessageError{}, }, } t.Logf("Running %d tests", len(tests)) for i, test := range tests { // Decode from wire format. var msg wire.MsgBlock r := bytes.NewReader(test.buf) err := msg.BtcDecode(r, test.pver) if reflect.TypeOf(err) != reflect.TypeOf(test.err) { t.Errorf("BtcDecode #%d wrong error got: %v, want: %v", i, err, reflect.TypeOf(test.err)) continue } // Deserialize from wire format. r = bytes.NewReader(test.buf) err = msg.Deserialize(r) if reflect.TypeOf(err) != reflect.TypeOf(test.err) { t.Errorf("Deserialize #%d wrong error got: %v, want: %v", i, err, reflect.TypeOf(test.err)) continue } // Deserialize with transaction location info from wire format. br := bytes.NewBuffer(test.buf) _, err = msg.DeserializeTxLoc(br) if reflect.TypeOf(err) != reflect.TypeOf(test.err) { t.Errorf("DeserializeTxLoc #%d wrong error got: %v, "+ "want: %v", i, err, reflect.TypeOf(test.err)) continue } } }
// TestBlockSerialize tests MsgBlock serialize and deserialize. func TestBlockSerialize(t *testing.T) { tests := []struct { in *wire.MsgBlock // Message to encode out *wire.MsgBlock // Expected decoded message buf []byte // Serialized data txLocs []wire.TxLoc // Expected transaction locations }{ { &blockOne, &blockOne, blockOneBytes, blockOneTxLocs, }, } t.Logf("Running %d tests", len(tests)) for i, test := range tests { // Serialize the block. var buf bytes.Buffer err := test.in.Serialize(&buf) if err != nil { t.Errorf("Serialize #%d error %v", i, err) continue } if !bytes.Equal(buf.Bytes(), test.buf) { t.Errorf("Serialize #%d\n got: %s want: %s", i, spew.Sdump(buf.Bytes()), spew.Sdump(test.buf)) continue } // Deserialize the block. var block wire.MsgBlock rbuf := bytes.NewReader(test.buf) err = block.Deserialize(rbuf) if err != nil { t.Errorf("Deserialize #%d error %v", i, err) continue } if !reflect.DeepEqual(&block, test.out) { t.Errorf("Deserialize #%d\n got: %s want: %s", i, spew.Sdump(&block), spew.Sdump(test.out)) continue } // Deserialize the block while gathering transaction location // information. var txLocBlock wire.MsgBlock br := bytes.NewBuffer(test.buf) txLocs, err := txLocBlock.DeserializeTxLoc(br) if err != nil { t.Errorf("DeserializeTxLoc #%d error %v", i, err) continue } if !reflect.DeepEqual(&txLocBlock, test.out) { t.Errorf("DeserializeTxLoc #%d\n got: %s want: %s", i, spew.Sdump(&txLocBlock), spew.Sdump(test.out)) continue } if !reflect.DeepEqual(txLocs, test.txLocs) { t.Errorf("DeserializeTxLoc #%d\n got: %s want: %s", i, spew.Sdump(txLocs), spew.Sdump(test.txLocs)) continue } } }
// TestBlockWire tests the MsgBlock wire encode and decode for various numbers // of transaction inputs and outputs and protocol versions. func TestBlockWire(t *testing.T) { tests := []struct { in *wire.MsgBlock // Message to encode out *wire.MsgBlock // Expected decoded message buf []byte // Wire encoding txLocs []wire.TxLoc // Expected transaction locations pver uint32 // Protocol version for wire encoding }{ // Latest protocol version. { &blockOne, &blockOne, blockOneBytes, blockOneTxLocs, wire.ProtocolVersion, }, // Protocol version BIP0035Version. { &blockOne, &blockOne, blockOneBytes, blockOneTxLocs, wire.BIP0035Version, }, // Protocol version BIP0031Version. { &blockOne, &blockOne, blockOneBytes, blockOneTxLocs, wire.BIP0031Version, }, // Protocol version NetAddressTimeVersion. { &blockOne, &blockOne, blockOneBytes, blockOneTxLocs, wire.NetAddressTimeVersion, }, // Protocol version MultipleAddressVersion. { &blockOne, &blockOne, blockOneBytes, blockOneTxLocs, wire.MultipleAddressVersion, }, } t.Logf("Running %d tests", len(tests)) for i, test := range tests { // Encode the message to wire format. var buf bytes.Buffer err := test.in.BtcEncode(&buf, test.pver) if err != nil { t.Errorf("BtcEncode #%d error %v", i, err) continue } if !bytes.Equal(buf.Bytes(), test.buf) { t.Errorf("BtcEncode #%d\n got: %s want: %s", i, spew.Sdump(buf.Bytes()), spew.Sdump(test.buf)) continue } // Decode the message from wire format. var msg wire.MsgBlock rbuf := bytes.NewReader(test.buf) err = msg.BtcDecode(rbuf, test.pver) if err != nil { t.Errorf("BtcDecode #%d error %v", i, err) continue } if !reflect.DeepEqual(&msg, test.out) { t.Errorf("BtcDecode #%d\n got: %s want: %s", i, spew.Sdump(&msg), spew.Sdump(test.out)) continue } } }
// TestFullBlocks ensures all tests generated by the fullblocktests package // have the expected result when processed via ProcessBlock. func TestFullBlocks(t *testing.T) { tests, err := fullblocktests.Generate(false) if err != nil { t.Fatalf("failed to generate tests: %v", err) } // Create a new database and chain instance to run tests against. chain, teardownFunc, err := chainSetup("fullblocktest", &chaincfg.RegressionNetParams) if err != nil { t.Errorf("Failed to setup chain instance: %v", err) return } defer teardownFunc() // testAcceptedBlock attempts to process the block in the provided test // instance and ensures that it was accepted according to the flags // specified in the test. testAcceptedBlock := func(item fullblocktests.AcceptedBlock) { blockHeight := item.Height block := btcutil.NewBlock(item.Block) block.SetHeight(blockHeight) t.Logf("Testing block %s (hash %s, height %d)", item.Name, block.Hash(), blockHeight) isMainChain, isOrphan, err := chain.ProcessBlock(block, blockchain.BFNone) if err != nil { t.Fatalf("block %q (hash %s, height %d) should "+ "have been accepted: %v", item.Name, block.Hash(), blockHeight, err) } // Ensure the main chain and orphan flags match the values // specified in the test. if isMainChain != item.IsMainChain { t.Fatalf("block %q (hash %s, height %d) unexpected main "+ "chain flag -- got %v, want %v", item.Name, block.Hash(), blockHeight, isMainChain, item.IsMainChain) } if isOrphan != item.IsOrphan { t.Fatalf("block %q (hash %s, height %d) unexpected "+ "orphan flag -- got %v, want %v", item.Name, block.Hash(), blockHeight, isOrphan, item.IsOrphan) } } // testRejectedBlock attempts to process the block in the provided test // instance and ensures that it was rejected with the reject code // specified in the test. testRejectedBlock := func(item fullblocktests.RejectedBlock) { blockHeight := item.Height block := btcutil.NewBlock(item.Block) block.SetHeight(blockHeight) t.Logf("Testing block %s (hash %s, height %d)", item.Name, block.Hash(), blockHeight) _, _, err := chain.ProcessBlock(block, blockchain.BFNone) if err == nil { t.Fatalf("block %q (hash %s, height %d) should not "+ "have been accepted", item.Name, block.Hash(), blockHeight) } // Ensure the error code is of the expected type and the reject // code matches the value specified in the test instance. rerr, ok := err.(blockchain.RuleError) if !ok { t.Fatalf("block %q (hash %s, height %d) returned "+ "unexpected error type -- got %T, want "+ "blockchain.RuleError", item.Name, block.Hash(), blockHeight, err) } if rerr.ErrorCode != item.RejectCode { t.Fatalf("block %q (hash %s, height %d) does not have "+ "expected reject code -- got %v, want %v", item.Name, block.Hash(), blockHeight, rerr.ErrorCode, item.RejectCode) } } // testRejectedNonCanonicalBlock attempts to decode the block in the // provided test instance and ensures that it failed to decode with a // message error. testRejectedNonCanonicalBlock := func(item fullblocktests.RejectedNonCanonicalBlock) { headerLen := len(item.RawBlock) if headerLen > 80 { headerLen = 80 } blockHash := chainhash.DoubleHashH(item.RawBlock[0:headerLen]) blockHeight := item.Height t.Logf("Testing block %s (hash %s, height %d)", item.Name, blockHash, blockHeight) // Ensure there is an error due to deserializing the block. var msgBlock wire.MsgBlock err := msgBlock.BtcDecode(bytes.NewReader(item.RawBlock), 0) if _, ok := err.(*wire.MessageError); !ok { t.Fatalf("block %q (hash %s, height %d) should have "+ "failed to decode", item.Name, blockHash, blockHeight) } } // testOrphanOrRejectedBlock attempts to process the block in the // provided test instance and ensures that it was either accepted as an // orphan or rejected with a rule violation. testOrphanOrRejectedBlock := func(item fullblocktests.OrphanOrRejectedBlock) { blockHeight := item.Height block := btcutil.NewBlock(item.Block) block.SetHeight(blockHeight) t.Logf("Testing block %s (hash %s, height %d)", item.Name, block.Hash(), blockHeight) _, isOrphan, err := chain.ProcessBlock(block, blockchain.BFNone) if err != nil { // Ensure the error code is of the expected type. if _, ok := err.(blockchain.RuleError); !ok { t.Fatalf("block %q (hash %s, height %d) "+ "returned unexpected error type -- "+ "got %T, want blockchain.RuleError", item.Name, block.Hash(), blockHeight, err) } } if !isOrphan { t.Fatalf("block %q (hash %s, height %d) was accepted, "+ "but is not considered an orphan", item.Name, block.Hash(), blockHeight) } } // testExpectedTip ensures the current tip of the blockchain is the // block specified in the provided test instance. testExpectedTip := func(item fullblocktests.ExpectedTip) { blockHeight := item.Height block := btcutil.NewBlock(item.Block) block.SetHeight(blockHeight) t.Logf("Testing tip for block %s (hash %s, height %d)", item.Name, block.Hash(), blockHeight) // Ensure hash and height match. best := chain.BestSnapshot() if *best.Hash != item.Block.BlockHash() || best.Height != blockHeight { t.Fatalf("block %q (hash %s, height %d) should be "+ "the current tip -- got (hash %s, height %d)", item.Name, block.Hash(), blockHeight, best.Hash, best.Height) } } for testNum, test := range tests { for itemNum, item := range test { switch item := item.(type) { case fullblocktests.AcceptedBlock: testAcceptedBlock(item) case fullblocktests.RejectedBlock: testRejectedBlock(item) case fullblocktests.RejectedNonCanonicalBlock: testRejectedNonCanonicalBlock(item) case fullblocktests.OrphanOrRejectedBlock: testOrphanOrRejectedBlock(item) case fullblocktests.ExpectedTip: testExpectedTip(item) default: t.Fatalf("test #%d, item #%d is not one of "+ "the supported test instance types -- "+ "got type: %T", testNum, itemNum, item) } } } }