// TestBlockWireErrors performs negative tests against wire encode and decode // of MsgBlock to confirm error paths work correctly. func TestBlockWireErrors(t *testing.T) { // Use protocol version 60002 specifically here instead of the latest // because the test data is using bytes encoded with that protocol // version. pver := uint32(60002) tests := []struct { in *wire.MsgBlock // Value to encode buf []byte // Wire encoding pver uint32 // Protocol version for wire encoding max int // Max size of fixed buffer to induce errors writeErr error // Expected write error readErr error // Expected read error }{ // Force error in version. {&blockOne, blockOneBytes, pver, 0, io.ErrShortWrite, io.EOF}, // Force error in prev block hash. {&blockOne, blockOneBytes, pver, 4, io.ErrShortWrite, io.EOF}, // Force error in merkle root. {&blockOne, blockOneBytes, pver, 36, io.ErrShortWrite, io.EOF}, // Force error in timestamp. {&blockOne, blockOneBytes, pver, 68, io.ErrShortWrite, io.EOF}, // Force error in difficulty bits. {&blockOne, blockOneBytes, pver, 72, io.ErrShortWrite, io.EOF}, // Force error in header nonce. {&blockOne, blockOneBytes, pver, 76, io.ErrShortWrite, io.EOF}, // Force error in transaction count. {&blockOne, blockOneBytes, pver, 80, io.ErrShortWrite, io.EOF}, // Force error in transactions. {&blockOne, blockOneBytes, pver, 81, io.ErrShortWrite, io.EOF}, } t.Logf("Running %d tests", len(tests)) for i, test := range tests { // Encode to wire format. w := newFixedWriter(test.max) err := test.in.BtcEncode(w, test.pver) if err != test.writeErr { t.Errorf("BtcEncode #%d wrong error got: %v, want: %v", i, err, test.writeErr) continue } // Decode from wire format. var msg wire.MsgBlock r := newFixedReader(test.max, test.buf) err = msg.BtcDecode(r, test.pver) if err != test.readErr { t.Errorf("BtcDecode #%d wrong error got: %v, want: %v", i, err, test.readErr) continue } } }
// TestBlockSerializeErrors performs negative tests against wire encode and // decode of MsgBlock to confirm error paths work correctly. func TestBlockSerializeErrors(t *testing.T) { tests := []struct { in *wire.MsgBlock // Value to encode buf []byte // Serialized data max int // Max size of fixed buffer to induce errors writeErr error // Expected write error readErr error // Expected read error }{ // Force error in version. {&blockOne, blockOneBytes, 0, io.ErrShortWrite, io.EOF}, // Force error in prev block hash. {&blockOne, blockOneBytes, 4, io.ErrShortWrite, io.EOF}, // Force error in merkle root. {&blockOne, blockOneBytes, 36, io.ErrShortWrite, io.EOF}, // Force error in timestamp. {&blockOne, blockOneBytes, 68, io.ErrShortWrite, io.EOF}, // Force error in difficulty bits. {&blockOne, blockOneBytes, 72, io.ErrShortWrite, io.EOF}, // Force error in header nonce. {&blockOne, blockOneBytes, 76, io.ErrShortWrite, io.EOF}, // Force error in transaction count. {&blockOne, blockOneBytes, 80, io.ErrShortWrite, io.EOF}, // Force error in transactions. {&blockOne, blockOneBytes, 81, io.ErrShortWrite, io.EOF}, } t.Logf("Running %d tests", len(tests)) for i, test := range tests { // Serialize the block. w := newFixedWriter(test.max) err := test.in.Serialize(w) if err != test.writeErr { t.Errorf("Serialize #%d wrong error got: %v, want: %v", i, err, test.writeErr) continue } // Deserialize the block. var block wire.MsgBlock r := newFixedReader(test.max, test.buf) err = block.Deserialize(r) if err != test.readErr { t.Errorf("Deserialize #%d wrong error got: %v, want: %v", i, err, test.readErr) continue } var txLocBlock wire.MsgBlock br := bytes.NewBuffer(test.buf[0:test.max]) _, err = txLocBlock.DeserializeTxLoc(br) if err != test.readErr { t.Errorf("DeserializeTxLoc #%d wrong error got: %v, want: %v", i, err, test.readErr) continue } } }
// TestBlockOverflowErrors performs tests to ensure deserializing blocks which // are intentionally crafted to use large values for the number of transactions // are handled properly. This could otherwise potentially be used as an attack // vector. func TestBlockOverflowErrors(t *testing.T) { // Use protocol version 70001 specifically here instead of the latest // protocol version because the test data is using bytes encoded with // that version. pver := uint32(70001) tests := []struct { buf []byte // Wire encoding pver uint32 // Protocol version for wire encoding err error // Expected error }{ // Block that claims to have ~uint64(0) transactions. { []byte{ 0x01, 0x00, 0x00, 0x00, // Version 1 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, // PrevBlock 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, // MerkleRoot 0x61, 0xbc, 0x66, 0x49, // Timestamp 0xff, 0xff, 0x00, 0x1d, // Bits 0x01, 0xe3, 0x62, 0x99, // Nonce 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // TxnCount }, pver, &wire.MessageError{}, }, } t.Logf("Running %d tests", len(tests)) for i, test := range tests { // Decode from wire format. var msg wire.MsgBlock r := bytes.NewReader(test.buf) err := msg.BtcDecode(r, test.pver) if reflect.TypeOf(err) != reflect.TypeOf(test.err) { t.Errorf("BtcDecode #%d wrong error got: %v, want: %v", i, err, reflect.TypeOf(test.err)) continue } // Deserialize from wire format. r = bytes.NewReader(test.buf) err = msg.Deserialize(r) if reflect.TypeOf(err) != reflect.TypeOf(test.err) { t.Errorf("Deserialize #%d wrong error got: %v, want: %v", i, err, reflect.TypeOf(test.err)) continue } // Deserialize with transaction location info from wire format. br := bytes.NewBuffer(test.buf) _, err = msg.DeserializeTxLoc(br) if reflect.TypeOf(err) != reflect.TypeOf(test.err) { t.Errorf("DeserializeTxLoc #%d wrong error got: %v, "+ "want: %v", i, err, reflect.TypeOf(test.err)) continue } } }
// TestBlockSerialize tests MsgBlock serialize and deserialize. func TestBlockSerialize(t *testing.T) { tests := []struct { in *wire.MsgBlock // Message to encode out *wire.MsgBlock // Expected decoded message buf []byte // Serialized data txLocs []wire.TxLoc // Expected transaction locations }{ { &blockOne, &blockOne, blockOneBytes, blockOneTxLocs, }, } t.Logf("Running %d tests", len(tests)) for i, test := range tests { // Serialize the block. var buf bytes.Buffer err := test.in.Serialize(&buf) if err != nil { t.Errorf("Serialize #%d error %v", i, err) continue } if !bytes.Equal(buf.Bytes(), test.buf) { t.Errorf("Serialize #%d\n got: %s want: %s", i, spew.Sdump(buf.Bytes()), spew.Sdump(test.buf)) continue } // Deserialize the block. var block wire.MsgBlock rbuf := bytes.NewReader(test.buf) err = block.Deserialize(rbuf) if err != nil { t.Errorf("Deserialize #%d error %v", i, err) continue } if !reflect.DeepEqual(&block, test.out) { t.Errorf("Deserialize #%d\n got: %s want: %s", i, spew.Sdump(&block), spew.Sdump(test.out)) continue } // Deserialize the block while gathering transaction location // information. var txLocBlock wire.MsgBlock br := bytes.NewBuffer(test.buf) txLocs, err := txLocBlock.DeserializeTxLoc(br) if err != nil { t.Errorf("DeserializeTxLoc #%d error %v", i, err) continue } if !reflect.DeepEqual(&txLocBlock, test.out) { t.Errorf("DeserializeTxLoc #%d\n got: %s want: %s", i, spew.Sdump(&txLocBlock), spew.Sdump(test.out)) continue } if !reflect.DeepEqual(txLocs, test.txLocs) { t.Errorf("DeserializeTxLoc #%d\n got: %s want: %s", i, spew.Sdump(txLocs), spew.Sdump(test.txLocs)) continue } } }
// TestBlockWire tests the MsgBlock wire encode and decode for various numbers // of transaction inputs and outputs and protocol versions. func TestBlockWire(t *testing.T) { tests := []struct { in *wire.MsgBlock // Message to encode out *wire.MsgBlock // Expected decoded message buf []byte // Wire encoding txLocs []wire.TxLoc // Expected transaction locations pver uint32 // Protocol version for wire encoding }{ // Latest protocol version. { &blockOne, &blockOne, blockOneBytes, blockOneTxLocs, wire.ProtocolVersion, }, // Protocol version BIP0035Version. { &blockOne, &blockOne, blockOneBytes, blockOneTxLocs, wire.BIP0035Version, }, // Protocol version BIP0031Version. { &blockOne, &blockOne, blockOneBytes, blockOneTxLocs, wire.BIP0031Version, }, // Protocol version NetAddressTimeVersion. { &blockOne, &blockOne, blockOneBytes, blockOneTxLocs, wire.NetAddressTimeVersion, }, // Protocol version MultipleAddressVersion. { &blockOne, &blockOne, blockOneBytes, blockOneTxLocs, wire.MultipleAddressVersion, }, } t.Logf("Running %d tests", len(tests)) for i, test := range tests { // Encode the message to wire format. var buf bytes.Buffer err := test.in.BtcEncode(&buf, test.pver) if err != nil { t.Errorf("BtcEncode #%d error %v", i, err) continue } if !bytes.Equal(buf.Bytes(), test.buf) { t.Errorf("BtcEncode #%d\n got: %s want: %s", i, spew.Sdump(buf.Bytes()), spew.Sdump(test.buf)) continue } // Decode the message from wire format. var msg wire.MsgBlock rbuf := bytes.NewReader(test.buf) err = msg.BtcDecode(rbuf, test.pver) if err != nil { t.Errorf("BtcDecode #%d error %v", i, err) continue } if !reflect.DeepEqual(&msg, test.out) { t.Errorf("BtcDecode #%d\n got: %s want: %s", i, spew.Sdump(&msg), spew.Sdump(test.out)) continue } } }
// NewBlockTemplate returns a new block template that is ready to be solved // using the transactions from the passed transaction source pool and a coinbase // that either pays to the passed address if it is not nil, or a coinbase that // is redeemable by anyone if the passed address is nil. The nil address // functionality is useful since there are cases such as the getblocktemplate // RPC where external mining software is responsible for creating their own // coinbase which will replace the one generated for the block template. Thus // the need to have configured address can be avoided. // // The transactions selected and included are prioritized according to several // factors. First, each transaction has a priority calculated based on its // value, age of inputs, and size. Transactions which consist of larger // amounts, older inputs, and small sizes have the highest priority. Second, a // fee per kilobyte is calculated for each transaction. Transactions with a // higher fee per kilobyte are preferred. Finally, the block generation related // policy settings are all taken into account. // // Transactions which only spend outputs from other transactions already in the // block chain are immediately added to a priority queue which either // prioritizes based on the priority (then fee per kilobyte) or the fee per // kilobyte (then priority) depending on whether or not the BlockPrioritySize // policy setting allots space for high-priority transactions. Transactions // which spend outputs from other transactions in the source pool are added to a // dependency map so they can be added to the priority queue once the // transactions they depend on have been included. // // Once the high-priority area (if configured) has been filled with // transactions, or the priority falls below what is considered high-priority, // the priority queue is updated to prioritize by fees per kilobyte (then // priority). // // When the fees per kilobyte drop below the TxMinFreeFee policy setting, the // transaction will be skipped unless the BlockMinSize policy setting is // nonzero, in which case the block will be filled with the low-fee/free // transactions until the block size reaches that minimum size. // // Any transactions which would cause the block to exceed the BlockMaxSize // policy setting, exceed the maximum allowed signature operations per block, or // otherwise cause the block to be invalid are skipped. // // Given the above, a block generated by this function is of the following form: // // ----------------------------------- -- -- // | Coinbase Transaction | | | // |-----------------------------------| | | // | | | | ----- policy.BlockPrioritySize // | High-priority Transactions | | | // | | | | // |-----------------------------------| | -- // | | | // | | | // | | |--- policy.BlockMaxSize // | Transactions prioritized by fee | | // | until <= policy.TxMinFreeFee | | // | | | // | | | // | | | // |-----------------------------------| | // | Low-fee/Non high-priority (free) | | // | transactions (while block size | | // | <= policy.BlockMinSize) | | // ----------------------------------- -- func NewBlockTemplate(policy *mining.Policy, server *server, payToAddress btcutil.Address) (*BlockTemplate, error) { var txSource mining.TxSource = server.txMemPool blockManager := server.blockManager timeSource := server.timeSource chainState := &blockManager.chainState // Extend the most recently known best block. chainState.Lock() prevHash := chainState.newestHash nextBlockHeight := chainState.newestHeight + 1 chainState.Unlock() // Create a standard coinbase transaction paying to the provided // address. NOTE: The coinbase value will be updated to include the // fees from the selected transactions later after they have actually // been selected. It is created here to detect any errors early // before potentially doing a lot of work below. The extra nonce helps // ensure the transaction is not a duplicate transaction (paying the // same value to the same public key address would otherwise be an // identical transaction for block version 1). extraNonce := uint64(0) coinbaseScript, err := standardCoinbaseScript(nextBlockHeight, extraNonce) if err != nil { return nil, err } coinbaseTx, err := createCoinbaseTx(coinbaseScript, nextBlockHeight, payToAddress) if err != nil { return nil, err } numCoinbaseSigOps := int64(blockchain.CountSigOps(coinbaseTx)) // Get the current source transactions and create a priority queue to // hold the transactions which are ready for inclusion into a block // along with some priority related and fee metadata. Reserve the same // number of items that are available for the priority queue. Also, // choose the initial sort order for the priority queue based on whether // or not there is an area allocated for high-priority transactions. sourceTxns := txSource.MiningDescs() sortedByFee := policy.BlockPrioritySize == 0 priorityQueue := newTxPriorityQueue(len(sourceTxns), sortedByFee) // Create a slice to hold the transactions to be included in the // generated block with reserved space. Also create a transaction // store to house all of the input transactions so multiple lookups // can be avoided. blockTxns := make([]*btcutil.Tx, 0, len(sourceTxns)) blockTxns = append(blockTxns, coinbaseTx) blockTxStore := make(blockchain.TxStore) // dependers is used to track transactions which depend on another // transaction in the source pool. This, in conjunction with the // dependsOn map kept with each dependent transaction helps quickly // determine which dependent transactions are now eligible for inclusion // in the block once each transaction has been included. dependers := make(map[wire.ShaHash]*list.List) // Create slices to hold the fees and number of signature operations // for each of the selected transactions and add an entry for the // coinbase. This allows the code below to simply append details about // a transaction as it is selected for inclusion in the final block. // However, since the total fees aren't known yet, use a dummy value for // the coinbase fee which will be updated later. txFees := make([]int64, 0, len(sourceTxns)) txSigOpCounts := make([]int64, 0, len(sourceTxns)) txFees = append(txFees, -1) // Updated once known txSigOpCounts = append(txSigOpCounts, numCoinbaseSigOps) minrLog.Debugf("Considering %d transactions for inclusion to new block", len(sourceTxns)) mempoolLoop: for _, txDesc := range sourceTxns { // A block can't have more than one coinbase or contain // non-finalized transactions. tx := txDesc.Tx if blockchain.IsCoinBase(tx) { minrLog.Tracef("Skipping coinbase tx %s", tx.Sha()) continue } if !blockchain.IsFinalizedTransaction(tx, nextBlockHeight, timeSource.AdjustedTime()) { minrLog.Tracef("Skipping non-finalized tx %s", tx.Sha()) continue } // Fetch all of the transactions referenced by the inputs to // this transaction. NOTE: This intentionally does not fetch // inputs from the mempool since a transaction which depends on // other transactions in the mempool must come after those // dependencies in the final generated block. txStore, err := blockManager.FetchTransactionStore(tx) if err != nil { minrLog.Warnf("Unable to fetch transaction store for "+ "tx %s: %v", tx.Sha(), err) continue } // Setup dependencies for any transactions which reference // other transactions in the mempool so they can be properly // ordered below. prioItem := &txPrioItem{tx: tx} for _, txIn := range tx.MsgTx().TxIn { originHash := &txIn.PreviousOutPoint.Hash originIndex := txIn.PreviousOutPoint.Index txData, exists := txStore[*originHash] if !exists || txData.Err != nil || txData.Tx == nil { if !txSource.HaveTransaction(originHash) { minrLog.Tracef("Skipping tx %s because "+ "it references tx %s which is "+ "not available", tx.Sha, originHash) continue mempoolLoop } // The transaction is referencing another // transaction in the source pool, so setup an // ordering dependency. depList, exists := dependers[*originHash] if !exists { depList = list.New() dependers[*originHash] = depList } depList.PushBack(prioItem) if prioItem.dependsOn == nil { prioItem.dependsOn = make( map[wire.ShaHash]struct{}) } prioItem.dependsOn[*originHash] = struct{}{} // Skip the check below. We already know the // referenced transaction is available. continue } // Ensure the output index in the referenced transaction // is available. msgTx := txData.Tx.MsgTx() if originIndex > uint32(len(msgTx.TxOut)) { minrLog.Tracef("Skipping tx %s because "+ "it references output %d of tx %s "+ "which is out of bounds", tx.Sha, originIndex, originHash) continue mempoolLoop } } // Calculate the final transaction priority using the input // value age sum as well as the adjusted transaction size. The // formula is: sum(inputValue * inputAge) / adjustedTxSize prioItem.priority = calcPriority(tx.MsgTx(), txStore, nextBlockHeight) // Calculate the fee in Satoshi/kB. txSize := tx.MsgTx().SerializeSize() prioItem.feePerKB = (txDesc.Fee * 1000) / int64(txSize) prioItem.fee = txDesc.Fee // Add the transaction to the priority queue to mark it ready // for inclusion in the block unless it has dependencies. if prioItem.dependsOn == nil { heap.Push(priorityQueue, prioItem) } // Merge the store which contains all of the input transactions // for this transaction into the input transaction store. This // allows the code below to avoid a second lookup. mergeTxStore(blockTxStore, txStore) } minrLog.Tracef("Priority queue len %d, dependers len %d", priorityQueue.Len(), len(dependers)) // The starting block size is the size of the block header plus the max // possible transaction count size, plus the size of the coinbase // transaction. blockSize := blockHeaderOverhead + uint32(coinbaseTx.MsgTx().SerializeSize()) blockSigOps := numCoinbaseSigOps totalFees := int64(0) // Choose which transactions make it into the block. for priorityQueue.Len() > 0 { // Grab the highest priority (or highest fee per kilobyte // depending on the sort order) transaction. prioItem := heap.Pop(priorityQueue).(*txPrioItem) tx := prioItem.tx // Grab the list of transactions which depend on this one (if // any) and remove the entry for this transaction as it will // either be included or skipped, but in either case the deps // are no longer needed. deps := dependers[*tx.Sha()] delete(dependers, *tx.Sha()) // Enforce maximum block size. Also check for overflow. txSize := uint32(tx.MsgTx().SerializeSize()) blockPlusTxSize := blockSize + txSize if blockPlusTxSize < blockSize || blockPlusTxSize >= policy.BlockMaxSize { minrLog.Tracef("Skipping tx %s because it would exceed "+ "the max block size", tx.Sha()) logSkippedDeps(tx, deps) continue } // Enforce maximum signature operations per block. Also check // for overflow. numSigOps := int64(blockchain.CountSigOps(tx)) if blockSigOps+numSigOps < blockSigOps || blockSigOps+numSigOps > blockchain.MaxSigOpsPerBlock { minrLog.Tracef("Skipping tx %s because it would "+ "exceed the maximum sigops per block", tx.Sha()) logSkippedDeps(tx, deps) continue } numP2SHSigOps, err := blockchain.CountP2SHSigOps(tx, false, blockTxStore) if err != nil { minrLog.Tracef("Skipping tx %s due to error in "+ "CountP2SHSigOps: %v", tx.Sha(), err) logSkippedDeps(tx, deps) continue } numSigOps += int64(numP2SHSigOps) if blockSigOps+numSigOps < blockSigOps || blockSigOps+numSigOps > blockchain.MaxSigOpsPerBlock { minrLog.Tracef("Skipping tx %s because it would "+ "exceed the maximum sigops per block (p2sh)", tx.Sha()) logSkippedDeps(tx, deps) continue } // Skip free transactions once the block is larger than the // minimum block size. if sortedByFee && prioItem.feePerKB < int64(policy.TxMinFreeFee) && blockPlusTxSize >= policy.BlockMinSize { minrLog.Tracef("Skipping tx %s with feePerKB %.2f "+ "< TxMinFreeFee %d and block size %d >= "+ "minBlockSize %d", tx.Sha(), prioItem.feePerKB, policy.TxMinFreeFee, blockPlusTxSize, policy.BlockMinSize) logSkippedDeps(tx, deps) continue } // Prioritize by fee per kilobyte once the block is larger than // the priority size or there are no more high-priority // transactions. if !sortedByFee && (blockPlusTxSize >= policy.BlockPrioritySize || prioItem.priority <= minHighPriority) { minrLog.Tracef("Switching to sort by fees per "+ "kilobyte blockSize %d >= BlockPrioritySize "+ "%d || priority %.2f <= minHighPriority %.2f", blockPlusTxSize, policy.BlockPrioritySize, prioItem.priority, minHighPriority) sortedByFee = true priorityQueue.SetLessFunc(txPQByFee) // Put the transaction back into the priority queue and // skip it so it is re-priortized by fees if it won't // fit into the high-priority section or the priority is // too low. Otherwise this transaction will be the // final one in the high-priority section, so just fall // though to the code below so it is added now. if blockPlusTxSize > policy.BlockPrioritySize || prioItem.priority < minHighPriority { heap.Push(priorityQueue, prioItem) continue } } // Ensure the transaction inputs pass all of the necessary // preconditions before allowing it to be added to the block. _, err = blockchain.CheckTransactionInputs(tx, nextBlockHeight, blockTxStore) if err != nil { minrLog.Tracef("Skipping tx %s due to error in "+ "CheckTransactionInputs: %v", tx.Sha(), err) logSkippedDeps(tx, deps) continue } err = blockchain.ValidateTransactionScripts(tx, blockTxStore, txscript.StandardVerifyFlags, server.sigCache) if err != nil { minrLog.Tracef("Skipping tx %s due to error in "+ "ValidateTransactionScripts: %v", tx.Sha(), err) logSkippedDeps(tx, deps) continue } // Spend the transaction inputs in the block transaction store // and add an entry for it to ensure any transactions which // reference this one have it available as an input and can // ensure they aren't double spending. spendTransaction(blockTxStore, tx, nextBlockHeight) // Add the transaction to the block, increment counters, and // save the fees and signature operation counts to the block // template. blockTxns = append(blockTxns, tx) blockSize += txSize blockSigOps += numSigOps totalFees += prioItem.fee txFees = append(txFees, prioItem.fee) txSigOpCounts = append(txSigOpCounts, numSigOps) minrLog.Tracef("Adding tx %s (priority %.2f, feePerKB %.2f)", prioItem.tx.Sha(), prioItem.priority, prioItem.feePerKB) // Add transactions which depend on this one (and also do not // have any other unsatisified dependencies) to the priority // queue. if deps != nil { for e := deps.Front(); e != nil; e = e.Next() { // Add the transaction to the priority queue if // there are no more dependencies after this // one. item := e.Value.(*txPrioItem) delete(item.dependsOn, *tx.Sha()) if len(item.dependsOn) == 0 { heap.Push(priorityQueue, item) } } } } // Now that the actual transactions have been selected, update the // block size for the real transaction count and coinbase value with // the total fees accordingly. blockSize -= wire.MaxVarIntPayload - uint32(wire.VarIntSerializeSize(uint64(len(blockTxns)))) coinbaseTx.MsgTx().TxOut[0].Value += totalFees txFees[0] = -totalFees // Calculate the required difficulty for the block. The timestamp // is potentially adjusted to ensure it comes after the median time of // the last several blocks per the chain consensus rules. ts, err := medianAdjustedTime(chainState, timeSource) if err != nil { return nil, err } requiredDifficulty, err := blockManager.CalcNextRequiredDifficulty(ts) if err != nil { return nil, err } // Create a new block ready to be solved. merkles := blockchain.BuildMerkleTreeStore(blockTxns) var msgBlock wire.MsgBlock msgBlock.Header = wire.BlockHeader{ Version: generatedBlockVersion, PrevBlock: *prevHash, MerkleRoot: *merkles[len(merkles)-1], Timestamp: ts, Bits: requiredDifficulty, } for _, tx := range blockTxns { if err := msgBlock.AddTransaction(tx.MsgTx()); err != nil { return nil, err } } // Finally, perform a full check on the created block against the chain // consensus rules to ensure it properly connects to the current best // chain with no issues. block := btcutil.NewBlock(&msgBlock) block.SetHeight(nextBlockHeight) if err := blockManager.CheckConnectBlock(block); err != nil { return nil, err } minrLog.Debugf("Created new block template (%d transactions, %d in "+ "fees, %d signature operations, %d bytes, target difficulty "+ "%064x)", len(msgBlock.Transactions), totalFees, blockSigOps, blockSize, blockchain.CompactToBig(msgBlock.Header.Bits)) return &BlockTemplate{ block: &msgBlock, fees: txFees, sigOpCounts: txSigOpCounts, height: nextBlockHeight, validPayAddress: payToAddress != nil, }, nil }