// getDifficultyRatio returns the proof-of-work difficulty as a multiple of the // minimum difficulty using the passed bits field from the header of a block. func getDifficultyRatio(bits uint32) float64 { // The minimum difficulty is the max possible proof-of-work limit bits // converted back to a number. Note this is not the same as the the // proof of work limit directly because the block difficulty is encoded // in a block with the compact form which loses precision. max := btcchain.CompactToBig(activeNetParams.powLimitBits) target := btcchain.CompactToBig(bits) difficulty := new(big.Rat).SetFrac(max, target) outString := difficulty.FloatString(2) diff, err := strconv.ParseFloat(outString, 64) if err != nil { rpcsLog.Errorf("Cannot get difficulty: %v", err) return 0 } return diff }
// This example demonstrates how to convert the compact "bits" in a block header // which represent the target difficulty to a big integer and display it using // the typical hex notation. func ExampleCompactToBig() { // Convert the bits from block 300000 in the main block chain. bits := uint32(419465580) targetDifficulty := btcchain.CompactToBig(bits) // Display it in hex. fmt.Printf("%064x\n", targetDifficulty.Bytes()) // Output: // 0000000000000000896c00000000000000000000000000000000000000000000 }
func TestCompactToBig(t *testing.T) { tests := []struct { in uint32 out int64 }{ {10000000, 0}, } for x, test := range tests { n := btcchain.CompactToBig(test.in) want := big.NewInt(test.out) if n.Cmp(want) != 0 { t.Errorf("TestCompactToBig test #%d failed: got %d want %d\n", x, n.Int64(), want.Int64()) return } } }
// NewBlockTemplate returns a new block template that is ready to be solved // using the transactions from the passed transaction memory pool and a coinbase // that either pays to the passed address if it is not nil, or a coinbase that // is redeemable by anyone if the passed address is nil. The nil address // functionality is useful since there are cases such as the getblocktemplate // RPC where external mining software is responsible for creating their own // coinbase which will replace the one generated for the block template. Thus // the need to have configured address can be avoided. // // The transactions selected and included are prioritized according to several // factors. First, each transaction has a priority calculated based on its // value, age of inputs, and size. Transactions which consist of larger // amounts, older inputs, and small sizes have the highest priority. Second, a // fee per kilobyte is calculated for each transaction. Transactions with a // higher fee per kilobyte are preferred. Finally, the block generation related // configuration options are all taken into account. // // Transactions which only spend outputs from other transactions already in the // block chain are immediately added to a priority queue which either // prioritizes based on the priority (then fee per kilobyte) or the fee per // kilobyte (then priority) depending on whether or not the BlockPrioritySize // configuration option allots space for high-priority transactions. // Transactions which spend outputs from other transactions in the memory pool // are added to a dependency map so they can be added to the priority queue once // the transactions they depend on have been included. // // Once the high-priority area (if configured) has been filled with transactions, // or the priority falls below what is considered high-priority, the priority // queue is updated to prioritize by fees per kilobyte (then priority). // // When the fees per kilobyte drop below the TxMinFreeFee configuration option, // the transaction will be skipped unless there is a BlockMinSize set, in which // case the block will be filled with the low-fee/free transactions until the // block size reaches that minimum size. // // Any transactions which would cause the block to exceed the BlockMaxSize // configuration option, exceed the maximum allowed signature operations per // block, or otherwise cause the block to be invalid are skipped. // // Given the above, a block generated by this function is of the following form: // // ----------------------------------- -- -- // | Coinbase Transaction | | | // |-----------------------------------| | | // | | | | ----- cfg.BlockPrioritySize // | High-priority Transactions | | | // | | | | // |-----------------------------------| | -- // | | | // | | | // | | |--- cfg.BlockMaxSize // | Transactions prioritized by fee | | // | until <= cfg.TxMinFreeFee | | // | | | // | | | // | | | // |-----------------------------------| | // | Low-fee/Non high-priority (free) | | // | transactions (while block size | | // | <= cfg.BlockMinSize) | | // ----------------------------------- -- func NewBlockTemplate(mempool *txMemPool, payToAddress btcutil.Address) (*BlockTemplate, error) { blockManager := mempool.server.blockManager chainState := &blockManager.chainState chain := blockManager.blockChain // Extend the most recently known best block. chainState.Lock() prevHash := chainState.newestHash nextBlockHeight := chainState.newestHeight + 1 chainState.Unlock() // Create a standard coinbase transaction paying to the provided // address. NOTE: The coinbase value will be updated to include the // fees from the selected transactions later after they have actually // been selected. It is created here to detect any errors early // before potentially doing a lot of work below. The extra nonce helps // ensure the transaction is not a duplicate transaction (paying the // same value to the same public key address would otherwise be an // identical transaction for block version 1). extraNonce := uint64(0) coinbaseScript := standardCoinbaseScript(nextBlockHeight, extraNonce) coinbaseTx, err := createCoinbaseTx(coinbaseScript, nextBlockHeight, payToAddress) if err != nil { return nil, err } numCoinbaseSigOps := int64(btcchain.CountSigOps(coinbaseTx)) // Get the current memory pool transactions and create a priority queue // to hold the transactions which are ready for inclusion into a block // along with some priority related and fee metadata. Reserve the same // number of items that are in the memory pool for the priority queue. // Also, choose the initial sort order for the priority queue based on // whether or not there is an area allocated for high-priority // transactions. mempoolTxns := mempool.TxDescs() sortedByFee := cfg.BlockPrioritySize == 0 priorityQueue := newTxPriorityQueue(len(mempoolTxns), sortedByFee) // Create a slice to hold the transactions to be included in the // generated block with reserved space. Also create a transaction // store to house all of the input transactions so multiple lookups // can be avoided. blockTxns := make([]*btcutil.Tx, 0, len(mempoolTxns)) blockTxns = append(blockTxns, coinbaseTx) blockTxStore := make(btcchain.TxStore) // dependers is used to track transactions which depend on another // transaction in the memory pool. This, in conjunction with the // dependsOn map kept with each dependent transaction helps quickly // determine which dependent transactions are now eligible for inclusion // in the block once each transaction has been included. dependers := make(map[btcwire.ShaHash]*list.List) // Create slices to hold the fees and number of signature operations // for each of the selected transactions and add an entry for the // coinbase. This allows the code below to simply append details about // a transaction as it is selected for inclusion in the final block. // However, since the total fees aren't known yet, use a dummy value for // the coinbase fee which will be updated later. txFees := make([]int64, 0, len(mempoolTxns)) txSigOpCounts := make([]int64, 0, len(mempoolTxns)) txFees = append(txFees, -1) // Updated once known txSigOpCounts = append(txSigOpCounts, numCoinbaseSigOps) minrLog.Debugf("Considering %d mempool transactions for inclusion to "+ "new block", len(mempoolTxns)) mempoolLoop: for _, txDesc := range mempoolTxns { // A block can't have more than one coinbase or contain // non-finalized transactions. tx := txDesc.Tx if btcchain.IsCoinBase(tx) { minrLog.Tracef("Skipping coinbase tx %s", tx.Sha()) continue } if !btcchain.IsFinalizedTransaction(tx, nextBlockHeight, time.Now()) { minrLog.Tracef("Skipping non-finalized tx %s", tx.Sha()) continue } // Fetch all of the transactions referenced by the inputs to // this transaction. NOTE: This intentionally does not fetch // inputs from the mempool since a transaction which depends on // other transactions in the mempool must come after those // dependencies in the final generated block. txStore, err := chain.FetchTransactionStore(tx) if err != nil { minrLog.Warnf("Unable to fetch transaction store for "+ "tx %s: %v", tx.Sha(), err) continue } // Calculate the input value age sum for the transaction. This // is comprised of the sum all of input amounts multiplied by // their respective age (number of confirmations since the // referenced input transaction). While doing the above, also // setup dependencies for any transactions which reference other // transactions in the mempool so they can be properly ordered // below. prioItem := &txPrioItem{tx: txDesc.Tx} inputValueAge := float64(0.0) for _, txIn := range tx.MsgTx().TxIn { originHash := &txIn.PreviousOutPoint.Hash originIndex := txIn.PreviousOutPoint.Index txData, exists := txStore[*originHash] if !exists || txData.Err != nil || txData.Tx == nil { if !mempool.HaveTransaction(originHash) { minrLog.Tracef("Skipping tx %s because "+ "it references tx %s which is "+ "not available", tx.Sha, originHash) continue mempoolLoop } // The transaction is referencing another // transaction in the memory pool, so setup an // ordering dependency. depList, exists := dependers[*originHash] if !exists { depList = list.New() dependers[*originHash] = depList } depList.PushBack(prioItem) if prioItem.dependsOn == nil { prioItem.dependsOn = make( map[btcwire.ShaHash]struct{}) } prioItem.dependsOn[*originHash] = struct{}{} // No need to calculate or sum input value age // for this input since it's zero due to // the input age multiplier of 0. continue } // Ensure the output index in the referenced transaction // is available. msgTx := txData.Tx.MsgTx() if originIndex > uint32(len(msgTx.TxOut)) { minrLog.Tracef("Skipping tx %s because "+ "it references output %d of tx %s "+ "which is out of bounds", tx.Sha, originIndex, originHash) continue mempoolLoop } // Sum the input value times age. originTxOut := txData.Tx.MsgTx().TxOut[originIndex] inputValue := originTxOut.Value inputAge := nextBlockHeight - txData.BlockHeight inputValueAge += float64(inputValue * inputAge) } // Calculate the final transaction priority using the input // value age sum as well as the adjusted transaction size. The // formula is: sum(inputValue * inputAge) / adjustedTxSize txSize := tx.MsgTx().SerializeSize() prioItem.priority = calcPriority(tx, txSize, inputValueAge) // Calculate the fee in Satoshi/KB. // NOTE: This is a more precise value than the one calculated // during calcMinRelayFee which rounds up to the nearest full // kilobyte boundary. This is beneficial since it provides an // incentive to create smaller transactions. prioItem.feePerKB = float64(txDesc.Fee) / (float64(txSize) / 1000) prioItem.fee = txDesc.Fee // Add the transaction to the priority queue to mark it ready // for inclusion in the block unless it has dependencies. if prioItem.dependsOn == nil { heap.Push(priorityQueue, prioItem) } // Merge the store which contains all of the input transactions // for this transaction into the input transaction store. This // allows the code below to avoid a second lookup. mergeTxStore(blockTxStore, txStore) } minrLog.Tracef("Priority queue len %d, dependers len %d", priorityQueue.Len(), len(dependers)) // The starting block size is the size of the block header plus the max // possible transaction count size, plus the size of the coinbase // transaction. blockSize := blockHeaderOverhead + uint32(coinbaseTx.MsgTx().SerializeSize()) blockSigOps := numCoinbaseSigOps totalFees := int64(0) // Choose which transactions make it into the block. for priorityQueue.Len() > 0 { // Grab the highest priority (or highest fee per kilobyte // depending on the sort order) transaction. prioItem := heap.Pop(priorityQueue).(*txPrioItem) tx := prioItem.tx // Grab the list of transactions which depend on this one (if // any) and remove the entry for this transaction as it will // either be included or skipped, but in either case the deps // are no longer needed. deps := dependers[*tx.Sha()] delete(dependers, *tx.Sha()) // Enforce maximum block size. Also check for overflow. txSize := uint32(tx.MsgTx().SerializeSize()) blockPlusTxSize := blockSize + txSize if blockPlusTxSize < blockSize || blockPlusTxSize >= cfg.BlockMaxSize { minrLog.Tracef("Skipping tx %s because it would exceed "+ "the max block size", tx.Sha()) logSkippedDeps(tx, deps) continue } // Enforce maximum signature operations per block. Also check // for overflow. numSigOps := int64(btcchain.CountSigOps(tx)) if blockSigOps+numSigOps < blockSigOps || blockSigOps+numSigOps > btcchain.MaxSigOpsPerBlock { minrLog.Tracef("Skipping tx %s because it would "+ "exceed the maximum sigops per block", tx.Sha()) logSkippedDeps(tx, deps) continue } numP2SHSigOps, err := btcchain.CountP2SHSigOps(tx, false, blockTxStore) if err != nil { minrLog.Tracef("Skipping tx %s due to error in "+ "CountP2SHSigOps: %v", tx.Sha(), err) logSkippedDeps(tx, deps) continue } numSigOps += int64(numP2SHSigOps) if blockSigOps+numSigOps < blockSigOps || blockSigOps+numSigOps > btcchain.MaxSigOpsPerBlock { minrLog.Tracef("Skipping tx %s because it would "+ "exceed the maximum sigops per block (p2sh)", tx.Sha()) logSkippedDeps(tx, deps) continue } // Skip free transactions once the block is larger than the // minimum block size. if sortedByFee && prioItem.feePerKB < minTxRelayFee && blockPlusTxSize >= cfg.BlockMinSize { minrLog.Tracef("Skipping tx %s with feePerKB %.2f "+ "< minTxRelayFee %d and block size %d >= "+ "minBlockSize %d", tx.Sha(), prioItem.feePerKB, minTxRelayFee, blockPlusTxSize, cfg.BlockMinSize) logSkippedDeps(tx, deps) continue } // Prioritize by fee per kilobyte once the block is larger than // the priority size or there are no more high-priority // transactions. if !sortedByFee && (blockPlusTxSize >= cfg.BlockPrioritySize || prioItem.priority <= minHighPriority) { minrLog.Tracef("Switching to sort by fees per "+ "kilobyte blockSize %d >= BlockPrioritySize "+ "%d || priority %.2f <= minHighPriority %.2f", blockPlusTxSize, cfg.BlockPrioritySize, prioItem.priority, minHighPriority) sortedByFee = true priorityQueue.SetLessFunc(txPQByFee) // Put the transaction back into the priority queue and // skip it so it is re-priortized by fees if it won't // fit into the high-priority section or the priority is // too low. Otherwise this transaction will be the // final one in the high-priority section, so just fall // though to the code below so it is added now. if blockPlusTxSize > cfg.BlockPrioritySize || prioItem.priority < minHighPriority { heap.Push(priorityQueue, prioItem) continue } } // Ensure the transaction inputs pass all of the necessary // preconditions before allowing it to be added to the block. _, err = btcchain.CheckTransactionInputs(tx, nextBlockHeight, blockTxStore) if err != nil { minrLog.Tracef("Skipping tx %s due to error in "+ "CheckTransactionInputs: %v", tx.Sha(), err) logSkippedDeps(tx, deps) continue } err = btcchain.ValidateTransactionScripts(tx, blockTxStore, standardScriptVerifyFlags) if err != nil { minrLog.Tracef("Skipping tx %s due to error in "+ "ValidateTransactionScripts: %v", tx.Sha(), err) logSkippedDeps(tx, deps) continue } // Spend the transaction inputs in the block transaction store // and add an entry for it to ensure any transactions which // reference this one have it available as an input and can // ensure they aren't double spending. spendTransaction(blockTxStore, tx, nextBlockHeight) // Add the transaction to the block, increment counters, and // save the fees and signature operation counts to the block // template. blockTxns = append(blockTxns, tx) blockSize += txSize blockSigOps += numSigOps totalFees += prioItem.fee txFees = append(txFees, prioItem.fee) txSigOpCounts = append(txSigOpCounts, numSigOps) minrLog.Tracef("Adding tx %s (priority %.2f, feePerKB %.2f)", prioItem.tx.Sha(), prioItem.priority, prioItem.feePerKB) // Add transactions which depend on this one (and also do not // have any other unsatisified dependencies) to the priority // queue. if deps != nil { for e := deps.Front(); e != nil; e = e.Next() { // Add the transaction to the priority queue if // there are no more dependencies after this // one. item := e.Value.(*txPrioItem) delete(item.dependsOn, *tx.Sha()) if len(item.dependsOn) == 0 { heap.Push(priorityQueue, item) } } } } // Now that the actual transactions have been selected, update the // block size for the real transaction count and coinbase value with // the total fees accordingly. blockSize -= btcwire.MaxVarIntPayload - uint32(btcwire.VarIntSerializeSize(uint64(len(blockTxns)))) coinbaseTx.MsgTx().TxOut[0].Value += totalFees txFees[0] = -totalFees // Calculate the required difficulty for the block. The timestamp // is potentially adjusted to ensure it comes after the median time of // the last several blocks per the chain consensus rules. ts, err := medianAdjustedTime(chainState) if err != nil { return nil, err } requiredDifficulty, err := blockManager.CalcNextRequiredDifficulty(ts) if err != nil { return nil, err } // Create a new block ready to be solved. merkles := btcchain.BuildMerkleTreeStore(blockTxns) var msgBlock btcwire.MsgBlock msgBlock.Header = btcwire.BlockHeader{ Version: generatedBlockVersion, PrevBlock: *prevHash, MerkleRoot: *merkles[len(merkles)-1], Timestamp: ts, Bits: requiredDifficulty, } for _, tx := range blockTxns { if err := msgBlock.AddTransaction(tx.MsgTx()); err != nil { return nil, err } } // Finally, perform a full check on the created block against the chain // consensus rules to ensure it properly connects to the current best // chain with no issues. block := btcutil.NewBlock(&msgBlock) block.SetHeight(nextBlockHeight) if err := blockManager.CheckConnectBlock(block); err != nil { return nil, err } minrLog.Debugf("Created new block template (%d transactions, %d in "+ "fees, %d signature operations, %d bytes, target difficulty "+ "%064x)", len(msgBlock.Transactions), totalFees, blockSigOps, blockSize, btcchain.CompactToBig(msgBlock.Header.Bits)) return &BlockTemplate{ block: &msgBlock, fees: txFees, sigOpCounts: txSigOpCounts, height: nextBlockHeight, validPayAddress: payToAddress != nil, }, nil }
// solveBlock attempts to find some combination of a nonce, extra nonce, and // current timestamp which makes the passed block hash to a value less than the // target difficulty. The timestamp is updated periodically and the passed // block is modified with all tweaks during this process. This means that // when the function returns true, the block is ready for submission. // // This function will return early with false when conditions that trigger a // stale block such as a new block showing up or periodically when there are // new transactions and enough time has elapsed without finding a solution. func (m *CPUMiner) solveBlock(msgBlock *btcwire.MsgBlock, blockHeight int64, ticker *time.Ticker, quit chan struct{}) bool { // Choose a random extra nonce offset for this block template and // worker. enOffset, err := btcwire.RandomUint64() if err != nil { minrLog.Errorf("Unexpected error while generating random "+ "extra nonce offset: %v", err) enOffset = 0 } // Create a couple of convenience variables. header := &msgBlock.Header targetDifficulty := btcchain.CompactToBig(header.Bits) // Initial state. lastGenerated := time.Now() lastTxUpdate := m.server.txMemPool.LastUpdated() hashesCompleted := uint64(0) // Note that the entire extra nonce range is iterated and the offset is // added relying on the fact that overflow will wrap around 0 as // provided by the Go spec. for extraNonce := uint64(0); extraNonce < maxExtraNonce; extraNonce++ { // Update the extra nonce in the block template with the // new value by regenerating the coinbase script and // setting the merkle root to the new value. The UpdateExtraNonce(msgBlock, blockHeight, extraNonce+enOffset) // Search through the entire nonce range for a solution while // periodically checking for early quit and stale block // conditions along with updates to the speed monitor. for i := uint32(0); i <= maxNonce; i++ { select { case <-quit: return false case <-ticker.C: m.updateHashes <- hashesCompleted hashesCompleted = 0 // The current block is stale if the best block // has changed. bestHash, _ := m.server.blockManager.chainState.Best() if !header.PrevBlock.IsEqual(bestHash) { return false } // The current block is stale if the memory pool // has been updated since the block template was // generated and it has been at least one // minute. if lastTxUpdate != m.server.txMemPool.LastUpdated() && time.Now().After(lastGenerated.Add(time.Minute)) { return false } UpdateBlockTime(msgBlock, m.server.blockManager) default: // Non-blocking select to fall through } // Update the nonce and hash the block header. Each // hash is actually a double sha256 (two hashes), so // increment the number of hashes completed for each // attempt accordingly. header.Nonce = i hash, _ := header.BlockSha() hashesCompleted += 2 // The block is solved when the new block hash is less // than the target difficulty. Yay! if btcchain.ShaHashToBig(&hash).Cmp(targetDifficulty) <= 0 { m.updateHashes <- hashesCompleted return true } } } return false }