Beispiel #1
0
// BenchmarkIsCoinBase performs a simple benchmark against the IsCoinBase
// function.
func BenchmarkIsCoinBase(b *testing.B) {
	tx, _ := coinutil.NewBlock(&Block100000).Tx(1)
	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		blockchain.IsCoinBase(tx)
	}
}
Beispiel #2
0
// This example demonstrates creating a new database and inserting the genesis
// block into it.
func ExampleCreateDB() {
	// Notice in these example imports that the memdb driver is loaded.
	// Ordinarily this would be whatever driver(s) your application
	// requires.
	// import (
	//	"github.com/conseweb/stcd/database"
	// 	_ "github.com/conseweb/stcd/database/memdb"
	// )

	// Create a database and schedule it to be closed on exit.  This example
	// uses a memory-only database to avoid needing to write anything to
	// the disk.  Typically, you would specify a persistent database driver
	// such as "leveldb" and give it a database name as the second
	// parameter.
	db, err := database.CreateDB("memdb")
	if err != nil {
		fmt.Println(err)
		return
	}
	defer db.Close()

	// Insert the main network genesis block.
	genesis := coinutil.NewBlock(chaincfg.MainNetParams.GenesisBlock)
	newHeight, err := db.InsertBlock(genesis)
	if err != nil {
		fmt.Println(err)
		return
	}

	fmt.Println("New height:", newHeight)

	// Output:
	// New height: 0
}
Beispiel #3
0
// TestMerkle tests the BuildMerkleTreeStore API.
func TestMerkle(t *testing.T) {
	block := coinutil.NewBlock(&Block100000)
	merkles := blockchain.BuildMerkleTreeStore(block.Transactions())
	calculatedMerkleRoot := merkles[len(merkles)-1]
	wantMerkle := &Block100000.Header.MerkleRoot
	if !wantMerkle.IsEqual(calculatedMerkleRoot) {
		t.Errorf("BuildMerkleTreeStore: merkle root mismatch - "+
			"got %v, want %v", calculatedMerkleRoot, wantMerkle)
	}
}
Beispiel #4
0
// exampleLoadDB is used in the example to elide the setup code.
func exampleLoadDB() (database.Db, error) {
	db, err := database.CreateDB("memdb")
	if err != nil {
		return nil, err
	}

	// Insert the main network genesis block.
	genesis := coinutil.NewBlock(chaincfg.MainNetParams.GenesisBlock)
	_, err = db.InsertBlock(genesis)
	if err != nil {
		return nil, err
	}

	return db, err
}
Beispiel #5
0
// FetchBlockBySha returns a coinutil.Block.  The implementation may cache the
// underlying data if desired.  This is part of the database.Db interface
// implementation.
//
// This implementation does not use any additional cache since the entire
// database is already in memory.
func (db *MemDb) FetchBlockBySha(sha *wire.ShaHash) (*coinutil.Block, error) {
	db.Lock()
	defer db.Unlock()

	if db.closed {
		return nil, ErrDbClosed
	}

	if blockHeight, exists := db.blocksBySha[*sha]; exists {
		block := coinutil.NewBlock(db.blocks[int(blockHeight)])
		block.SetHeight(blockHeight)
		return block, nil
	}

	return nil, fmt.Errorf("block %v is not in database", sha)
}
Beispiel #6
0
// This example demonstrates how to create a new chain instance and use
// ProcessBlock to attempt to attempt add a block to the chain.  As the package
// overview documentation describes, this includes all of the Bitcoin consensus
// rules.  This example intentionally attempts to insert a duplicate genesis
// block to illustrate how an invalid block is handled.
func ExampleBlockChain_ProcessBlock() {
	// Create a new database to store the accepted blocks into.  Typically
	// this would be opening an existing database and would not use memdb
	// which is a memory-only database backend, but we create a new db
	// here so this is a complete working example.
	db, err := database.CreateDB("memdb")
	if err != nil {
		fmt.Printf("Failed to create database: %v\n", err)
		return
	}
	defer db.Close()

	// Insert the main network genesis block.  This is part of the initial
	// database setup.  Like above, this typically would not be needed when
	// opening an existing database.
	genesisBlock := coinutil.NewBlock(chaincfg.MainNetParams.GenesisBlock)
	_, err = db.InsertBlock(genesisBlock)
	if err != nil {
		fmt.Printf("Failed to insert genesis block: %v\n", err)
		return
	}

	// Create a new BlockChain instance without an initialized signature
	// verification cache, using the underlying database for the main
	// bitcoin network and ignore notifications.
	chain := blockchain.New(db, &chaincfg.MainNetParams, nil, nil)

	// Create a new median time source that is required by the upcoming
	// call to ProcessBlock.  Ordinarily this would also add time values
	// obtained from other peers on the network so the local time is
	// adjusted to be in agreement with other peers.
	timeSource := blockchain.NewMedianTime()

	// Process a block.  For this example, we are going to intentionally
	// cause an error by trying to process the genesis block which already
	// exists.
	isOrphan, err := chain.ProcessBlock(genesisBlock, timeSource, blockchain.BFNone)
	if err != nil {
		fmt.Printf("Failed to process block: %v\n", err)
		return
	}
	fmt.Printf("Block accepted. Is it an orphan?: %v", isOrphan)

	// Output:
	// Failed to process block: already have block 000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f
}
Beispiel #7
0
// setupDB is used to create a new db instance with the genesis block already
// inserted.  In addition to the new db instance, it returns a teardown function
// the caller should invoke when done testing to clean up.
func setupDB(dbType, dbName string) (database.Db, func(), error) {
	db, teardown, err := createDB(dbType, dbName, true)
	if err != nil {
		return nil, nil, err
	}

	// Insert the main network genesis block.  This is part of the initial
	// database setup.
	genesisBlock := coinutil.NewBlock(chaincfg.MainNetParams.GenesisBlock)
	_, err = db.InsertBlock(genesisBlock)
	if err != nil {
		teardown()
		err := fmt.Errorf("failed to insert genesis block: %v", err)
		return nil, nil, err
	}

	return db, teardown, nil
}
Beispiel #8
0
// TestCheckBlockSanity tests the CheckBlockSanity function to ensure it works
// as expected.
func TestCheckBlockSanity(t *testing.T) {
	powLimit := chaincfg.MainNetParams.PowLimit
	block := coinutil.NewBlock(&Block100000)
	timeSource := blockchain.NewMedianTime()
	err := blockchain.CheckBlockSanity(block, powLimit, timeSource)
	if err != nil {
		t.Errorf("CheckBlockSanity: %v", err)
	}

	// Ensure a block that has a timestamp with a precision higher than one
	// second fails.
	timestamp := block.MsgBlock().Header.Timestamp
	block.MsgBlock().Header.Timestamp = timestamp.Add(time.Nanosecond)
	err = blockchain.CheckBlockSanity(block, powLimit, timeSource)
	if err == nil {
		t.Errorf("CheckBlockSanity: error is nil when it shouldn't be")
	}
}
Beispiel #9
0
// TestCheckConnectBlock tests the CheckConnectBlock function to ensure it
// fails
func TestCheckConnectBlock(t *testing.T) {
	// Create a new database and chain instance to run tests against.
	chain, teardownFunc, err := chainSetup("checkconnectblock")
	if err != nil {
		t.Errorf("Failed to setup chain instance: %v", err)
		return
	}
	defer teardownFunc()

	err = chain.GenerateInitialIndex()
	if err != nil {
		t.Errorf("GenerateInitialIndex: %v", err)
	}

	// The genesis block should fail to connect since it's already
	// inserted.
	genesisBlock := chaincfg.MainNetParams.GenesisBlock
	err = chain.CheckConnectBlock(coinutil.NewBlock(genesisBlock))
	if err == nil {
		t.Errorf("CheckConnectBlock: Did not received expected error")
	}
}
Beispiel #10
0
// UpdateExtraNonce updates the extra nonce in the coinbase script of the passed
// block by regenerating the coinbase script with the passed value and block
// height.  It also recalculates and updates the new merkle root that results
// from changing the coinbase script.
func UpdateExtraNonce(msgBlock *wire.MsgBlock, blockHeight int32, extraNonce uint64) error {
	coinbaseScript, err := standardCoinbaseScript(blockHeight, extraNonce)
	if err != nil {
		return err
	}
	if len(coinbaseScript) > blockchain.MaxCoinbaseScriptLen {
		return fmt.Errorf("coinbase transaction script length "+
			"of %d is out of range (min: %d, max: %d)",
			len(coinbaseScript), blockchain.MinCoinbaseScriptLen,
			blockchain.MaxCoinbaseScriptLen)
	}
	msgBlock.Transactions[0].TxIn[0].SignatureScript = coinbaseScript

	// TODO(davec): A coinutil.Block should use saved in the state to avoid
	// recalculating all of the other transaction hashes.
	// block.Transactions[0].InvalidateCache()

	// Recalculate the merkle root with the updated extra nonce.
	block := coinutil.NewBlock(msgBlock)
	merkles := blockchain.BuildMerkleTreeStore(block.Transactions())
	msgBlock.Header.MerkleRoot = *merkles[len(merkles)-1]
	return nil
}
Beispiel #11
0
// TestHaveBlock tests the HaveBlock API to ensure proper functionality.
func TestHaveBlock(t *testing.T) {
	// Load up blocks such that there is a side chain.
	// (genesis block) -> 1 -> 2 -> 3 -> 4
	//                          \-> 3a
	testFiles := []string{
		"blk_0_to_4.dat.bz2",
		"blk_3A.dat.bz2",
	}

	var blocks []*coinutil.Block
	for _, file := range testFiles {
		blockTmp, err := loadBlocks(file)
		if err != nil {
			t.Errorf("Error loading file: %v\n", err)
			return
		}
		for _, block := range blockTmp {
			blocks = append(blocks, block)
		}
	}

	// Create a new database and chain instance to run tests against.
	chain, teardownFunc, err := chainSetup("haveblock")
	if err != nil {
		t.Errorf("Failed to setup chain instance: %v", err)
		return
	}
	defer teardownFunc()

	// Since we're not dealing with the real block chain, disable
	// checkpoints and set the coinbase maturity to 1.
	chain.DisableCheckpoints(true)
	blockchain.TstSetCoinbaseMaturity(1)

	timeSource := blockchain.NewMedianTime()
	for i := 1; i < len(blocks); i++ {
		isOrphan, err := chain.ProcessBlock(blocks[i], timeSource,
			blockchain.BFNone)
		if err != nil {
			t.Errorf("ProcessBlock fail on block %v: %v\n", i, err)
			return
		}
		if isOrphan {
			t.Errorf("ProcessBlock incorrectly returned block %v "+
				"is an orphan\n", i)
			return
		}
	}

	// Insert an orphan block.
	isOrphan, err := chain.ProcessBlock(coinutil.NewBlock(&Block100000),
		timeSource, blockchain.BFNone)
	if err != nil {
		t.Errorf("Unable to process block: %v", err)
		return
	}
	if !isOrphan {
		t.Errorf("ProcessBlock indicated block is an not orphan when " +
			"it should be\n")
		return
	}

	tests := []struct {
		hash string
		want bool
	}{
		// Genesis block should be present (in the main chain).
		{hash: chaincfg.MainNetParams.GenesisHash.String(), want: true},

		// Block 3a should be present (on a side chain).
		{hash: "00000000474284d20067a4d33f6a02284e6ef70764a3a26d6a5b9df52ef663dd", want: true},

		// Block 100000 should be present (as an orphan).
		{hash: "000000000003ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506", want: true},

		// Random hashes should not be availble.
		{hash: "123", want: false},
	}

	for i, test := range tests {
		hash, err := wire.NewShaHashFromStr(test.hash)
		if err != nil {
			t.Errorf("NewShaHashFromStr: %v", err)
			continue
		}

		result, err := chain.HaveBlock(hash)
		if err != nil {
			t.Errorf("HaveBlock #%d unexpected error: %v", i, err)
			return
		}
		if result != test.want {
			t.Errorf("HaveBlock #%d got %v want %v", i, result,
				test.want)
			continue
		}
	}
}
Beispiel #12
0
// TestClosed ensure calling the interface functions on a closed database
// returns appropriate errors for the interface functions that return errors
// and does not panic or otherwise misbehave for functions which do not return
// errors.
func TestClosed(t *testing.T) {
	db, err := database.CreateDB("memdb")
	if err != nil {
		t.Errorf("Failed to open test database %v", err)
		return
	}
	_, err = db.InsertBlock(coinutil.NewBlock(chaincfg.MainNetParams.GenesisBlock))
	if err != nil {
		t.Errorf("InsertBlock: %v", err)
	}
	if err := db.Close(); err != nil {
		t.Errorf("Close: unexpected error %v", err)
	}

	genesisHash := chaincfg.MainNetParams.GenesisHash
	if err := db.DropAfterBlockBySha(genesisHash); err != memdb.ErrDbClosed {
		t.Errorf("DropAfterBlockBySha: unexpected error %v", err)
	}

	if _, err := db.ExistsSha(genesisHash); err != memdb.ErrDbClosed {
		t.Errorf("ExistsSha: Unexpected error: %v", err)
	}

	if _, err := db.FetchBlockBySha(genesisHash); err != memdb.ErrDbClosed {
		t.Errorf("FetchBlockBySha: unexpected error %v", err)
	}

	if _, err := db.FetchBlockShaByHeight(0); err != memdb.ErrDbClosed {
		t.Errorf("FetchBlockShaByHeight: unexpected error %v", err)
	}

	if _, err := db.FetchHeightRange(0, 1); err != memdb.ErrDbClosed {
		t.Errorf("FetchHeightRange: unexpected error %v", err)
	}

	genesisCoinbaseTx := chaincfg.MainNetParams.GenesisBlock.Transactions[0]
	coinbaseHash := genesisCoinbaseTx.TxSha()
	if _, err := db.ExistsTxSha(&coinbaseHash); err != memdb.ErrDbClosed {
		t.Errorf("ExistsTxSha: unexpected error %v", err)
	}

	if _, err := db.FetchTxBySha(genesisHash); err != memdb.ErrDbClosed {
		t.Errorf("FetchTxBySha: unexpected error %v", err)
	}

	requestHashes := []*wire.ShaHash{genesisHash}
	reply := db.FetchTxByShaList(requestHashes)
	if len(reply) != len(requestHashes) {
		t.Errorf("FetchUnSpentTxByShaList unexpected number of replies "+
			"got: %d, want: %d", len(reply), len(requestHashes))
	}
	for i, txLR := range reply {
		wantReply := &database.TxListReply{
			Sha: requestHashes[i],
			Err: memdb.ErrDbClosed,
		}
		if !reflect.DeepEqual(wantReply, txLR) {
			t.Errorf("FetchTxByShaList unexpected reply\ngot: %v\n"+
				"want: %v", txLR, wantReply)
		}
	}

	reply = db.FetchUnSpentTxByShaList(requestHashes)
	if len(reply) != len(requestHashes) {
		t.Errorf("FetchUnSpentTxByShaList unexpected number of replies "+
			"got: %d, want: %d", len(reply), len(requestHashes))
	}
	for i, txLR := range reply {
		wantReply := &database.TxListReply{
			Sha: requestHashes[i],
			Err: memdb.ErrDbClosed,
		}
		if !reflect.DeepEqual(wantReply, txLR) {
			t.Errorf("FetchUnSpentTxByShaList unexpected reply\n"+
				"got: %v\nwant: %v", txLR, wantReply)
		}
	}

	if _, _, err := db.NewestSha(); err != memdb.ErrDbClosed {
		t.Errorf("NewestSha: unexpected error %v", err)
	}

	if err := db.Sync(); err != memdb.ErrDbClosed {
		t.Errorf("Sync: unexpected error %v", err)
	}

	if err := db.RollbackClose(); err != memdb.ErrDbClosed {
		t.Errorf("RollbackClose: unexpected error %v", err)
	}

	if err := db.Close(); err != memdb.ErrDbClosed {
		t.Errorf("Close: unexpected error %v", err)
	}
}
Beispiel #13
0
// NewBlockTemplate returns a new block template that is ready to be solved
// using the transactions from the passed transaction source pool and a coinbase
// that either pays to the passed address if it is not nil, or a coinbase that
// is redeemable by anyone if the passed address is nil.  The nil address
// functionality is useful since there are cases such as the getblocktemplate
// RPC where external mining software is responsible for creating their own
// coinbase which will replace the one generated for the block template.  Thus
// the need to have configured address can be avoided.
//
// The transactions selected and included are prioritized according to several
// factors.  First, each transaction has a priority calculated based on its
// value, age of inputs, and size.  Transactions which consist of larger
// amounts, older inputs, and small sizes have the highest priority.  Second, a
// fee per kilobyte is calculated for each transaction.  Transactions with a
// higher fee per kilobyte are preferred.  Finally, the block generation related
// policy settings are all taken into account.
//
// Transactions which only spend outputs from other transactions already in the
// block chain are immediately added to a priority queue which either
// prioritizes based on the priority (then fee per kilobyte) or the fee per
// kilobyte (then priority) depending on whether or not the BlockPrioritySize
// policy setting allots space for high-priority transactions.  Transactions
// which spend outputs from other transactions in the source pool are added to a
// dependency map so they can be added to the priority queue once the
// transactions they depend on have been included.
//
// Once the high-priority area (if configured) has been filled with
// transactions, or the priority falls below what is considered high-priority,
// the priority queue is updated to prioritize by fees per kilobyte (then
// priority).
//
// When the fees per kilobyte drop below the TxMinFreeFee policy setting, the
// transaction will be skipped unless the BlockMinSize policy setting is
// nonzero, in which case the block will be filled with the low-fee/free
// transactions until the block size reaches that minimum size.
//
// Any transactions which would cause the block to exceed the BlockMaxSize
// policy setting, exceed the maximum allowed signature operations per block, or
// otherwise cause the block to be invalid are skipped.
//
// Given the above, a block generated by this function is of the following form:
//
//   -----------------------------------  --  --
//  |      Coinbase Transaction         |   |   |
//  |-----------------------------------|   |   |
//  |                                   |   |   | ----- policy.BlockPrioritySize
//  |   High-priority Transactions      |   |   |
//  |                                   |   |   |
//  |-----------------------------------|   | --
//  |                                   |   |
//  |                                   |   |
//  |                                   |   |--- policy.BlockMaxSize
//  |  Transactions prioritized by fee  |   |
//  |  until <= policy.TxMinFreeFee     |   |
//  |                                   |   |
//  |                                   |   |
//  |                                   |   |
//  |-----------------------------------|   |
//  |  Low-fee/Non high-priority (free) |   |
//  |  transactions (while block size   |   |
//  |  <= policy.BlockMinSize)          |   |
//   -----------------------------------  --
func NewBlockTemplate(policy *mining.Policy, server *server, payToAddress coinutil.Address) (*BlockTemplate, error) {
	var txSource mining.TxSource = server.txMemPool
	blockManager := server.blockManager
	timeSource := server.timeSource
	chainState := &blockManager.chainState

	// Extend the most recently known best block.
	chainState.Lock()
	prevHash := chainState.newestHash
	nextBlockHeight := chainState.newestHeight + 1
	chainState.Unlock()

	// Create a standard coinbase transaction paying to the provided
	// address.  NOTE: The coinbase value will be updated to include the
	// fees from the selected transactions later after they have actually
	// been selected.  It is created here to detect any errors early
	// before potentially doing a lot of work below.  The extra nonce helps
	// ensure the transaction is not a duplicate transaction (paying the
	// same value to the same public key address would otherwise be an
	// identical transaction for block version 1).
	extraNonce := uint64(0)
	coinbaseScript, err := standardCoinbaseScript(nextBlockHeight, extraNonce)
	if err != nil {
		return nil, err
	}
	coinbaseTx, err := createCoinbaseTx(coinbaseScript, nextBlockHeight,
		payToAddress)
	if err != nil {
		return nil, err
	}
	numCoinbaseSigOps := int64(blockchain.CountSigOps(coinbaseTx))

	// Get the current source transactions and create a priority queue to
	// hold the transactions which are ready for inclusion into a block
	// along with some priority related and fee metadata.  Reserve the same
	// number of items that are available for the priority queue.  Also,
	// choose the initial sort order for the priority queue based on whether
	// or not there is an area allocated for high-priority transactions.
	sourceTxns := txSource.MiningDescs()
	sortedByFee := policy.BlockPrioritySize == 0
	priorityQueue := newTxPriorityQueue(len(sourceTxns), sortedByFee)

	// Create a slice to hold the transactions to be included in the
	// generated block with reserved space.  Also create a transaction
	// store to house all of the input transactions so multiple lookups
	// can be avoided.
	blockTxns := make([]*coinutil.Tx, 0, len(sourceTxns))
	blockTxns = append(blockTxns, coinbaseTx)
	blockTxStore := make(blockchain.TxStore)

	// dependers is used to track transactions which depend on another
	// transaction in the source pool.  This, in conjunction with the
	// dependsOn map kept with each dependent transaction helps quickly
	// determine which dependent transactions are now eligible for inclusion
	// in the block once each transaction has been included.
	dependers := make(map[wire.ShaHash]*list.List)

	// Create slices to hold the fees and number of signature operations
	// for each of the selected transactions and add an entry for the
	// coinbase.  This allows the code below to simply append details about
	// a transaction as it is selected for inclusion in the final block.
	// However, since the total fees aren't known yet, use a dummy value for
	// the coinbase fee which will be updated later.
	txFees := make([]int64, 0, len(sourceTxns))
	txSigOpCounts := make([]int64, 0, len(sourceTxns))
	txFees = append(txFees, -1) // Updated once known
	txSigOpCounts = append(txSigOpCounts, numCoinbaseSigOps)

	minrLog.Debugf("Considering %d transactions for inclusion to new block",
		len(sourceTxns))

mempoolLoop:
	for _, txDesc := range sourceTxns {
		// A block can't have more than one coinbase or contain
		// non-finalized transactions.
		tx := txDesc.Tx
		if blockchain.IsCoinBase(tx) {
			minrLog.Tracef("Skipping coinbase tx %s", tx.Sha())
			continue
		}
		if !blockchain.IsFinalizedTransaction(tx, nextBlockHeight,
			timeSource.AdjustedTime()) {

			minrLog.Tracef("Skipping non-finalized tx %s", tx.Sha())
			continue
		}

		// Fetch all of the transactions referenced by the inputs to
		// this transaction.  NOTE: This intentionally does not fetch
		// inputs from the mempool since a transaction which depends on
		// other transactions in the mempool must come after those
		// dependencies in the final generated block.
		txStore, err := blockManager.FetchTransactionStore(tx)
		if err != nil {
			minrLog.Warnf("Unable to fetch transaction store for "+
				"tx %s: %v", tx.Sha(), err)
			continue
		}

		// Setup dependencies for any transactions which reference
		// other transactions in the mempool so they can be properly
		// ordered below.
		prioItem := &txPrioItem{tx: tx}
		for _, txIn := range tx.MsgTx().TxIn {
			originHash := &txIn.PreviousOutPoint.Hash
			originIndex := txIn.PreviousOutPoint.Index
			txData, exists := txStore[*originHash]
			if !exists || txData.Err != nil || txData.Tx == nil {
				if !txSource.HaveTransaction(originHash) {
					minrLog.Tracef("Skipping tx %s because "+
						"it references tx %s which is "+
						"not available", tx.Sha,
						originHash)
					continue mempoolLoop
				}

				// The transaction is referencing another
				// transaction in the source pool, so setup an
				// ordering dependency.
				depList, exists := dependers[*originHash]
				if !exists {
					depList = list.New()
					dependers[*originHash] = depList
				}
				depList.PushBack(prioItem)
				if prioItem.dependsOn == nil {
					prioItem.dependsOn = make(
						map[wire.ShaHash]struct{})
				}
				prioItem.dependsOn[*originHash] = struct{}{}

				// Skip the check below. We already know the
				// referenced transaction is available.
				continue
			}

			// Ensure the output index in the referenced transaction
			// is available.
			msgTx := txData.Tx.MsgTx()
			if originIndex > uint32(len(msgTx.TxOut)) {
				minrLog.Tracef("Skipping tx %s because "+
					"it references output %d of tx %s "+
					"which is out of bounds", tx.Sha,
					originIndex, originHash)
				continue mempoolLoop
			}
		}

		// Calculate the final transaction priority using the input
		// value age sum as well as the adjusted transaction size.  The
		// formula is: sum(inputValue * inputAge) / adjustedTxSize
		prioItem.priority = calcPriority(tx.MsgTx(), txStore, nextBlockHeight)

		// Calculate the fee in Satoshi/kB.
		txSize := tx.MsgTx().SerializeSize()
		prioItem.feePerKB = (txDesc.Fee * 1000) / int64(txSize)
		prioItem.fee = txDesc.Fee

		// Add the transaction to the priority queue to mark it ready
		// for inclusion in the block unless it has dependencies.
		if prioItem.dependsOn == nil {
			heap.Push(priorityQueue, prioItem)
		}

		// Merge the store which contains all of the input transactions
		// for this transaction into the input transaction store.  This
		// allows the code below to avoid a second lookup.
		mergeTxStore(blockTxStore, txStore)
	}

	minrLog.Tracef("Priority queue len %d, dependers len %d",
		priorityQueue.Len(), len(dependers))

	// The starting block size is the size of the block header plus the max
	// possible transaction count size, plus the size of the coinbase
	// transaction.
	blockSize := blockHeaderOverhead + uint32(coinbaseTx.MsgTx().SerializeSize())
	blockSigOps := numCoinbaseSigOps
	totalFees := int64(0)

	// Choose which transactions make it into the block.
	for priorityQueue.Len() > 0 {
		// Grab the highest priority (or highest fee per kilobyte
		// depending on the sort order) transaction.
		prioItem := heap.Pop(priorityQueue).(*txPrioItem)
		tx := prioItem.tx

		// Grab the list of transactions which depend on this one (if
		// any) and remove the entry for this transaction as it will
		// either be included or skipped, but in either case the deps
		// are no longer needed.
		deps := dependers[*tx.Sha()]
		delete(dependers, *tx.Sha())

		// Enforce maximum block size.  Also check for overflow.
		txSize := uint32(tx.MsgTx().SerializeSize())
		blockPlusTxSize := blockSize + txSize
		if blockPlusTxSize < blockSize || blockPlusTxSize >= policy.BlockMaxSize {
			minrLog.Tracef("Skipping tx %s because it would exceed "+
				"the max block size", tx.Sha())
			logSkippedDeps(tx, deps)
			continue
		}

		// Enforce maximum signature operations per block.  Also check
		// for overflow.
		numSigOps := int64(blockchain.CountSigOps(tx))
		if blockSigOps+numSigOps < blockSigOps ||
			blockSigOps+numSigOps > blockchain.MaxSigOpsPerBlock {
			minrLog.Tracef("Skipping tx %s because it would "+
				"exceed the maximum sigops per block", tx.Sha())
			logSkippedDeps(tx, deps)
			continue
		}
		numP2SHSigOps, err := blockchain.CountP2SHSigOps(tx, false,
			blockTxStore)
		if err != nil {
			minrLog.Tracef("Skipping tx %s due to error in "+
				"CountP2SHSigOps: %v", tx.Sha(), err)
			logSkippedDeps(tx, deps)
			continue
		}
		numSigOps += int64(numP2SHSigOps)
		if blockSigOps+numSigOps < blockSigOps ||
			blockSigOps+numSigOps > blockchain.MaxSigOpsPerBlock {
			minrLog.Tracef("Skipping tx %s because it would "+
				"exceed the maximum sigops per block (p2sh)",
				tx.Sha())
			logSkippedDeps(tx, deps)
			continue
		}

		// Skip free transactions once the block is larger than the
		// minimum block size.
		if sortedByFee &&
			prioItem.feePerKB < int64(policy.TxMinFreeFee) &&
			blockPlusTxSize >= policy.BlockMinSize {

			minrLog.Tracef("Skipping tx %s with feePerKB %.2f "+
				"< TxMinFreeFee %d and block size %d >= "+
				"minBlockSize %d", tx.Sha(), prioItem.feePerKB,
				policy.TxMinFreeFee, blockPlusTxSize,
				policy.BlockMinSize)
			logSkippedDeps(tx, deps)
			continue
		}

		// Prioritize by fee per kilobyte once the block is larger than
		// the priority size or there are no more high-priority
		// transactions.
		if !sortedByFee && (blockPlusTxSize >= policy.BlockPrioritySize ||
			prioItem.priority <= minHighPriority) {

			minrLog.Tracef("Switching to sort by fees per "+
				"kilobyte blockSize %d >= BlockPrioritySize "+
				"%d || priority %.2f <= minHighPriority %.2f",
				blockPlusTxSize, policy.BlockPrioritySize,
				prioItem.priority, minHighPriority)

			sortedByFee = true
			priorityQueue.SetLessFunc(txPQByFee)

			// Put the transaction back into the priority queue and
			// skip it so it is re-priortized by fees if it won't
			// fit into the high-priority section or the priority is
			// too low.  Otherwise this transaction will be the
			// final one in the high-priority section, so just fall
			// though to the code below so it is added now.
			if blockPlusTxSize > policy.BlockPrioritySize ||
				prioItem.priority < minHighPriority {

				heap.Push(priorityQueue, prioItem)
				continue
			}
		}

		// Ensure the transaction inputs pass all of the necessary
		// preconditions before allowing it to be added to the block.
		_, err = blockchain.CheckTransactionInputs(tx, nextBlockHeight,
			blockTxStore)
		if err != nil {
			minrLog.Tracef("Skipping tx %s due to error in "+
				"CheckTransactionInputs: %v", tx.Sha(), err)
			logSkippedDeps(tx, deps)
			continue
		}
		err = blockchain.ValidateTransactionScripts(tx, blockTxStore,
			txscript.StandardVerifyFlags, server.sigCache)
		if err != nil {
			minrLog.Tracef("Skipping tx %s due to error in "+
				"ValidateTransactionScripts: %v", tx.Sha(), err)
			logSkippedDeps(tx, deps)
			continue
		}

		// Spend the transaction inputs in the block transaction store
		// and add an entry for it to ensure any transactions which
		// reference this one have it available as an input and can
		// ensure they aren't double spending.
		spendTransaction(blockTxStore, tx, nextBlockHeight)

		// Add the transaction to the block, increment counters, and
		// save the fees and signature operation counts to the block
		// template.
		blockTxns = append(blockTxns, tx)
		blockSize += txSize
		blockSigOps += numSigOps
		totalFees += prioItem.fee
		txFees = append(txFees, prioItem.fee)
		txSigOpCounts = append(txSigOpCounts, numSigOps)

		minrLog.Tracef("Adding tx %s (priority %.2f, feePerKB %.2f)",
			prioItem.tx.Sha(), prioItem.priority, prioItem.feePerKB)

		// Add transactions which depend on this one (and also do not
		// have any other unsatisified dependencies) to the priority
		// queue.
		if deps != nil {
			for e := deps.Front(); e != nil; e = e.Next() {
				// Add the transaction to the priority queue if
				// there are no more dependencies after this
				// one.
				item := e.Value.(*txPrioItem)
				delete(item.dependsOn, *tx.Sha())
				if len(item.dependsOn) == 0 {
					heap.Push(priorityQueue, item)
				}
			}
		}
	}

	// Now that the actual transactions have been selected, update the
	// block size for the real transaction count and coinbase value with
	// the total fees accordingly.
	blockSize -= wire.MaxVarIntPayload -
		uint32(wire.VarIntSerializeSize(uint64(len(blockTxns))))
	coinbaseTx.MsgTx().TxOut[0].Value += totalFees
	txFees[0] = -totalFees

	// Calculate the required difficulty for the block.  The timestamp
	// is potentially adjusted to ensure it comes after the median time of
	// the last several blocks per the chain consensus rules.
	ts, err := medianAdjustedTime(chainState, timeSource)
	if err != nil {
		return nil, err
	}
	requiredDifficulty, err := blockManager.CalcNextRequiredDifficulty(ts)
	if err != nil {
		return nil, err
	}

	// Create a new block ready to be solved.
	merkles := blockchain.BuildMerkleTreeStore(blockTxns)
	var msgBlock wire.MsgBlock
	msgBlock.Header = wire.BlockHeader{
		Version:    generatedBlockVersion,
		PrevBlock:  *prevHash,
		MerkleRoot: *merkles[len(merkles)-1],
		Timestamp:  ts,
		Bits:       requiredDifficulty,
	}
	for _, tx := range blockTxns {
		if err := msgBlock.AddTransaction(tx.MsgTx()); err != nil {
			return nil, err
		}
	}

	// Finally, perform a full check on the created block against the chain
	// consensus rules to ensure it properly connects to the current best
	// chain with no issues.
	block := coinutil.NewBlock(&msgBlock)
	block.SetHeight(nextBlockHeight)
	if err := blockManager.CheckConnectBlock(block); err != nil {
		return nil, err
	}

	minrLog.Debugf("Created new block template (%d transactions, %d in "+
		"fees, %d signature operations, %d bytes, target difficulty "+
		"%064x)", len(msgBlock.Transactions), totalFees, blockSigOps,
		blockSize, blockchain.CompactToBig(msgBlock.Header.Bits))

	return &BlockTemplate{
		block:           &msgBlock,
		fees:            txFees,
		sigOpCounts:     txSigOpCounts,
		height:          nextBlockHeight,
		validPayAddress: payToAddress != nil,
	}, nil
}
Beispiel #14
0
// TestBlock tests the API for Block.
func TestBlock(t *testing.T) {
	b := coinutil.NewBlock(&Block100000)

	// Ensure we get the same data back out.
	if msgBlock := b.MsgBlock(); !reflect.DeepEqual(msgBlock, &Block100000) {
		t.Errorf("MsgBlock: mismatched MsgBlock - got %v, want %v",
			spew.Sdump(msgBlock), spew.Sdump(&Block100000))
	}

	// Ensure block height set and get work properly.
	wantHeight := int32(100000)
	b.SetHeight(wantHeight)
	if gotHeight := b.Height(); gotHeight != wantHeight {
		t.Errorf("Height: mismatched height - got %v, want %v",
			gotHeight, wantHeight)
	}

	// Hash for block 100,000.
	wantShaStr := "3ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506"
	wantSha, err := wire.NewShaHashFromStr(wantShaStr)
	if err != nil {
		t.Errorf("NewShaHashFromStr: %v", err)
	}

	// Request the sha multiple times to test generation and caching.
	for i := 0; i < 2; i++ {
		sha := b.Sha()
		if !sha.IsEqual(wantSha) {
			t.Errorf("Sha #%d mismatched sha - got %v, want %v", i,
				sha, wantSha)
		}
	}

	// Shas for the transactions in Block100000.
	wantTxShas := []string{
		"8c14f0db3df150123e6f3dbbf30f8b955a8249b62ac1d1ff16284aefa3d06d87",
		"fff2525b8931402dd09222c50775608f75787bd2b87e56995a7bdd30f79702c4",
		"6359f0868171b1d194cbee1af2f16ea598ae8fad666d9b012c8ed2b79a236ec4",
		"e9a66845e05d5abc0ad04ec80f774a7e585c6e8db975962d069a522137b80c1d",
	}

	// Create a new block to nuke all cached data.
	b = coinutil.NewBlock(&Block100000)

	// Request sha for all transactions one at a time via Tx.
	for i, txSha := range wantTxShas {
		wantSha, err := wire.NewShaHashFromStr(txSha)
		if err != nil {
			t.Errorf("NewShaHashFromStr: %v", err)
		}

		// Request the sha multiple times to test generation and caching.
		for j := 0; j < 2; j++ {
			tx, err := b.Tx(i)
			if err != nil {
				t.Errorf("Tx #%d: %v", i, err)
				continue
			}

			sha := tx.Sha()
			if !sha.IsEqual(wantSha) {
				t.Errorf("Sha #%d mismatched sha - got %v, "+
					"want %v", j, sha, wantSha)
				continue
			}
		}
	}

	// Create a new block to nuke all cached data.
	b = coinutil.NewBlock(&Block100000)

	// Request slice of all transactions multiple times to test generation
	// and caching.
	for i := 0; i < 2; i++ {
		transactions := b.Transactions()

		// Ensure we get the expected number of transactions.
		if len(transactions) != len(wantTxShas) {
			t.Errorf("Transactions #%d mismatched number of "+
				"transactions - got %d, want %d", i,
				len(transactions), len(wantTxShas))
			continue
		}

		// Ensure all of the shas match.
		for j, tx := range transactions {
			wantSha, err := wire.NewShaHashFromStr(wantTxShas[j])
			if err != nil {
				t.Errorf("NewShaHashFromStr: %v", err)
			}

			sha := tx.Sha()
			if !sha.IsEqual(wantSha) {
				t.Errorf("Transactions #%d mismatched shas - "+
					"got %v, want %v", j, sha, wantSha)
				continue
			}
		}
	}

	// Serialize the test block.
	var block100000Buf bytes.Buffer
	err = Block100000.Serialize(&block100000Buf)
	if err != nil {
		t.Errorf("Serialize: %v", err)
	}
	block100000Bytes := block100000Buf.Bytes()

	// Request serialized bytes multiple times to test generation and
	// caching.
	for i := 0; i < 2; i++ {
		serializedBytes, err := b.Bytes()
		if err != nil {
			t.Errorf("Bytes: %v", err)
			continue
		}
		if !bytes.Equal(serializedBytes, block100000Bytes) {
			t.Errorf("Bytes #%d wrong bytes - got %v, want %v", i,
				spew.Sdump(serializedBytes),
				spew.Sdump(block100000Bytes))
			continue
		}
	}

	// Transaction offsets and length for the transaction in Block100000.
	wantTxLocs := []wire.TxLoc{
		{TxStart: 81, TxLen: 135},
		{TxStart: 216, TxLen: 259},
		{TxStart: 475, TxLen: 257},
		{TxStart: 732, TxLen: 225},
	}

	// Ensure the transaction location information is accurate.
	txLocs, err := b.TxLoc()
	if err != nil {
		t.Errorf("TxLoc: %v", err)
		return
	}
	if !reflect.DeepEqual(txLocs, wantTxLocs) {
		t.Errorf("TxLoc: mismatched transaction location information "+
			"- got %v, want %v", spew.Sdump(txLocs),
			spew.Sdump(wantTxLocs))
	}
}
Beispiel #15
0
// chainSetup is used to create a new db and chain instance with the genesis
// block already inserted.  In addition to the new chain instnce, it returns
// a teardown function the caller should invoke when done testing to clean up.
func chainSetup(dbName string) (*blockchain.BlockChain, func(), error) {
	if !isSupportedDbType(testDbType) {
		return nil, nil, fmt.Errorf("unsupported db type %v", testDbType)
	}

	// Handle memory database specially since it doesn't need the disk
	// specific handling.
	var db database.Db
	var teardown func()
	if testDbType == "memdb" {
		ndb, err := database.CreateDB(testDbType)
		if err != nil {
			return nil, nil, fmt.Errorf("error creating db: %v", err)
		}
		db = ndb

		// Setup a teardown function for cleaning up.  This function is
		// returned to the caller to be invoked when it is done testing.
		teardown = func() {
			db.Close()
		}
	} else {
		// Create the root directory for test databases.
		if !fileExists(testDbRoot) {
			if err := os.MkdirAll(testDbRoot, 0700); err != nil {
				err := fmt.Errorf("unable to create test db "+
					"root: %v", err)
				return nil, nil, err
			}
		}

		// Create a new database to store the accepted blocks into.
		dbPath := filepath.Join(testDbRoot, dbName)
		_ = os.RemoveAll(dbPath)
		ndb, err := database.CreateDB(testDbType, dbPath)
		if err != nil {
			return nil, nil, fmt.Errorf("error creating db: %v", err)
		}
		db = ndb

		// Setup a teardown function for cleaning up.  This function is
		// returned to the caller to be invoked when it is done testing.
		teardown = func() {
			dbVersionPath := filepath.Join(testDbRoot, dbName+".ver")
			db.Sync()
			db.Close()
			os.RemoveAll(dbPath)
			os.Remove(dbVersionPath)
			os.RemoveAll(testDbRoot)
		}
	}

	// Insert the main network genesis block.  This is part of the initial
	// database setup.
	genesisBlock := coinutil.NewBlock(chaincfg.MainNetParams.GenesisBlock)
	_, err := db.InsertBlock(genesisBlock)
	if err != nil {
		teardown()
		err := fmt.Errorf("failed to insert genesis block: %v", err)
		return nil, nil, err
	}

	chain := blockchain.New(db, &chaincfg.MainNetParams, nil, nil)
	return chain, teardown, nil
}
Beispiel #16
0
func Test_dupTx(t *testing.T) {

	// Ignore db remove errors since it means we didn't have an old one.
	dbname := fmt.Sprintf("tstdbdup0")
	dbnamever := dbname + ".ver"
	_ = os.RemoveAll(dbname)
	_ = os.RemoveAll(dbnamever)
	db, err := database.CreateDB("leveldb", dbname)
	if err != nil {
		t.Errorf("Failed to open test database %v", err)
		return
	}
	defer os.RemoveAll(dbname)
	defer os.RemoveAll(dbnamever)
	defer func() {
		if err := db.Close(); err != nil {
			t.Errorf("Close: unexpected error: %v", err)
		}
	}()

	testdatafile := filepath.Join("testdata", "blocks1-256.bz2")
	blocks, err := loadBlocks(t, testdatafile)
	if err != nil {
		t.Errorf("Unable to load blocks from test data for: %v",
			err)
		return
	}

	var lastSha *wire.ShaHash

	// Populate with the fisrt 256 blocks, so we have blocks to 'mess with'
	err = nil
out:
	for height := int32(0); height < int32(len(blocks)); height++ {
		block := blocks[height]

		// except for NoVerify which does not allow lookups check inputs
		mblock := block.MsgBlock()
		var txneededList []*wire.ShaHash
		for _, tx := range mblock.Transactions {
			for _, txin := range tx.TxIn {
				if txin.PreviousOutPoint.Index == uint32(4294967295) {
					continue
				}
				origintxsha := &txin.PreviousOutPoint.Hash
				txneededList = append(txneededList, origintxsha)

				exists, err := db.ExistsTxSha(origintxsha)
				if err != nil {
					t.Errorf("ExistsTxSha: unexpected error %v ", err)
				}
				if !exists {
					t.Errorf("referenced tx not found %v ", origintxsha)
				}

				_, err = db.FetchTxBySha(origintxsha)
				if err != nil {
					t.Errorf("referenced tx not found %v err %v ", origintxsha, err)
				}
			}
		}
		txlist := db.FetchUnSpentTxByShaList(txneededList)
		for _, txe := range txlist {
			if txe.Err != nil {
				t.Errorf("tx list fetch failed %v err %v ", txe.Sha, txe.Err)
				break out
			}
		}

		newheight, err := db.InsertBlock(block)
		if err != nil {
			t.Errorf("failed to insert block %v err %v", height, err)
			break out
		}
		if newheight != height {
			t.Errorf("height mismatch expect %v returned %v", height, newheight)
			break out
		}

		newSha, blkid, err := db.NewestSha()
		if err != nil {
			t.Errorf("failed to obtain latest sha %v %v", height, err)
		}

		if blkid != height {
			t.Errorf("height doe not match latest block height %v %v %v", blkid, height, err)
		}

		blkSha := block.Sha()
		if *newSha != *blkSha {
			t.Errorf("Newest block sha does not match freshly inserted one %v %v %v ", newSha, blkSha, err)
		}
		lastSha = blkSha
	}

	// generate a new block based on the last sha
	// these block are not verified, so there are a bunch of garbage fields
	// in the 'generated' block.

	var bh wire.BlockHeader

	bh.Version = 2
	bh.PrevBlock = *lastSha
	// Bits, Nonce are not filled in

	mblk := wire.NewMsgBlock(&bh)

	hash, _ := wire.NewShaHashFromStr("df2b060fa2e5e9c8ed5eaf6a45c13753ec8c63282b2688322eba40cd98ea067a")

	po := wire.NewOutPoint(hash, 0)
	txI := wire.NewTxIn(po, []byte("garbage"))
	txO := wire.NewTxOut(50000000, []byte("garbageout"))

	var tx wire.MsgTx
	tx.AddTxIn(txI)
	tx.AddTxOut(txO)

	mblk.AddTransaction(&tx)

	blk := coinutil.NewBlock(mblk)

	fetchList := []*wire.ShaHash{hash}
	listReply := db.FetchUnSpentTxByShaList(fetchList)
	for _, lr := range listReply {
		if lr.Err != nil {
			t.Errorf("sha %v spent %v err %v\n", lr.Sha,
				lr.TxSpent, lr.Err)
		}
	}

	_, err = db.InsertBlock(blk)
	if err != nil {
		t.Errorf("failed to insert phony block %v", err)
	}

	// ok, did it 'spend' the tx ?

	listReply = db.FetchUnSpentTxByShaList(fetchList)
	for _, lr := range listReply {
		if lr.Err != database.ErrTxShaMissing {
			t.Errorf("sha %v spent %v err %v\n", lr.Sha,
				lr.TxSpent, lr.Err)
		}
	}

	txlist := blk.Transactions()
	for _, tx := range txlist {
		txsha := tx.Sha()
		txReply, err := db.FetchTxBySha(txsha)
		if err != nil {
			t.Errorf("fully spent lookup %v err %v\n", hash, err)
		} else {
			for _, lr := range txReply {
				if lr.Err != nil {
					t.Errorf("stx %v spent %v err %v\n", lr.Sha,
						lr.TxSpent, lr.Err)
				}
			}
		}
	}

	t.Logf("Dropping block")

	err = db.DropAfterBlockBySha(lastSha)
	if err != nil {
		t.Errorf("failed to drop spending block %v", err)
	}
}
Beispiel #17
0
func TestLimitAndSkipFetchTxsForAddr(t *testing.T) {
	testDb, err := setUpTestDb(t, "tstdbtxaddr")
	if err != nil {
		t.Errorf("Failed to open test database %v", err)
		return
	}
	defer testDb.cleanUpFunc()

	// Insert a block with some fake test transactions. The block will have
	// 10 copies of a fake transaction involving same address.
	addrString := "1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa"
	targetAddr, err := coinutil.DecodeAddress(addrString, &chaincfg.MainNetParams)
	if err != nil {
		t.Fatalf("Unable to decode test address: %v", err)
	}
	outputScript, err := txscript.PayToAddrScript(targetAddr)
	if err != nil {
		t.Fatalf("Unable make test pkScript %v", err)
	}
	fakeTxOut := wire.NewTxOut(10, outputScript)
	var emptyHash wire.ShaHash
	fakeHeader := wire.NewBlockHeader(&emptyHash, &emptyHash, 1, 1)
	msgBlock := wire.NewMsgBlock(fakeHeader)
	for i := 0; i < 10; i++ {
		mtx := wire.NewMsgTx()
		mtx.AddTxOut(fakeTxOut)
		msgBlock.AddTransaction(mtx)
	}

	// Insert the test block into the DB.
	testBlock := coinutil.NewBlock(msgBlock)
	newheight, err := testDb.db.InsertBlock(testBlock)
	if err != nil {
		t.Fatalf("Unable to insert block into db: %v", err)
	}

	// Create and insert an address index for out test addr.
	txLoc, _ := testBlock.TxLoc()
	index := make(database.BlockAddrIndex)
	for i := range testBlock.Transactions() {
		var hash160 [ripemd160.Size]byte
		scriptAddr := targetAddr.ScriptAddress()
		copy(hash160[:], scriptAddr[:])
		index[hash160] = append(index[hash160], &txLoc[i])
	}
	blkSha := testBlock.Sha()
	err = testDb.db.UpdateAddrIndexForBlock(blkSha, newheight, index)
	if err != nil {
		t.Fatalf("UpdateAddrIndexForBlock: failed to index"+
			" addrs for block #%d (%s) "+
			"err %v", newheight, blkSha, err)
		return
	}

	// Try skipping the first 4 results, should get 6 in return.
	txReply, txSkipped, err := testDb.db.FetchTxsForAddr(targetAddr, 4, 100000, false)
	if err != nil {
		t.Fatalf("Unable to fetch transactions for address: %v", err)
	}
	if txSkipped != 4 {
		t.Fatalf("Did not correctly return skipped amount"+
			" got %v txs, expected %v", txSkipped, 4)
	}
	if len(txReply) != 6 {
		t.Fatalf("Did not correctly skip forward in txs for address reply"+
			" got %v txs, expected %v", len(txReply), 6)
	}

	// Limit the number of results to 3.
	txReply, txSkipped, err = testDb.db.FetchTxsForAddr(targetAddr, 0, 3, false)
	if err != nil {
		t.Fatalf("Unable to fetch transactions for address: %v", err)
	}
	if txSkipped != 0 {
		t.Fatalf("Did not correctly return skipped amount"+
			" got %v txs, expected %v", txSkipped, 0)
	}
	if len(txReply) != 3 {
		t.Fatalf("Did not correctly limit in txs for address reply"+
			" got %v txs, expected %v", len(txReply), 3)
	}

	// Skip 1, limit 5.
	txReply, txSkipped, err = testDb.db.FetchTxsForAddr(targetAddr, 1, 5, false)
	if err != nil {
		t.Fatalf("Unable to fetch transactions for address: %v", err)
	}
	if txSkipped != 1 {
		t.Fatalf("Did not correctly return skipped amount"+
			" got %v txs, expected %v", txSkipped, 1)
	}
	if len(txReply) != 5 {
		t.Fatalf("Did not correctly limit in txs for address reply"+
			" got %v txs, expected %v", len(txReply), 5)
	}
}
Beispiel #18
0
func loadBlocks(t *testing.T, file string) (blocks []*coinutil.Block, err error) {
	if len(savedblocks) != 0 {
		blocks = savedblocks
		return
	}
	testdatafile := filepath.Join("..", "testdata", "blocks1-256.bz2")
	var dr io.Reader
	var fi io.ReadCloser
	fi, err = os.Open(testdatafile)
	if err != nil {
		t.Errorf("failed to open file %v, err %v", testdatafile, err)
		return
	}
	if strings.HasSuffix(testdatafile, ".bz2") {
		z := bzip2.NewReader(fi)
		dr = z
	} else {
		dr = fi
	}

	defer func() {
		if err := fi.Close(); err != nil {
			t.Errorf("failed to close file %v %v", testdatafile, err)
		}
	}()

	// Set the first block as the genesis block.
	genesis := coinutil.NewBlock(chaincfg.MainNetParams.GenesisBlock)
	blocks = append(blocks, genesis)

	var block *coinutil.Block
	err = nil
	for height := int32(1); err == nil; height++ {
		var rintbuf uint32
		err = binary.Read(dr, binary.LittleEndian, &rintbuf)
		if err == io.EOF {
			// hit end of file at expected offset: no warning
			height--
			err = nil
			break
		}
		if err != nil {
			t.Errorf("failed to load network type, err %v", err)
			break
		}
		if rintbuf != uint32(network) {
			t.Errorf("Block doesn't match network: %v expects %v",
				rintbuf, network)
			break
		}
		err = binary.Read(dr, binary.LittleEndian, &rintbuf)
		blocklen := rintbuf

		rbytes := make([]byte, blocklen)

		// read block
		dr.Read(rbytes)

		block, err = coinutil.NewBlockFromBytes(rbytes)
		if err != nil {
			t.Errorf("failed to parse block %v", height)
			return
		}
		blocks = append(blocks, block)
	}
	savedblocks = blocks
	return
}
Beispiel #19
0
// GenerateNBlocks generates the requested number of blocks. It is self
// contained in that it creates block templates and attempts to solve them while
// detecting when it is performing stale work and reacting accordingly by
// generating a new block template.  When a block is solved, it is submitted.
// The function returns a list of the hashes of generated blocks.
func (m *CPUMiner) GenerateNBlocks(n uint32) ([]*wire.ShaHash, error) {
	m.Lock()

	// Respond with an error if there's virtually 0 chance of CPU-mining a block.
	if !m.server.chainParams.GenerateSupported {
		m.Unlock()
		return nil, errors.New("No support for `generate` on the current " +
			"network, " + m.server.chainParams.Net.String() +
			", as it's unlikely to be possible to CPU-mine a block.")
	}

	// Respond with an error if server is already mining.
	if m.started || m.discreteMining {
		m.Unlock()
		return nil, errors.New("Server is already CPU mining. Please call " +
			"`setgenerate 0` before calling discrete `generate` commands.")
	}

	m.started = true
	m.discreteMining = true

	m.speedMonitorQuit = make(chan struct{})
	m.wg.Add(1)
	go m.speedMonitor()

	m.Unlock()

	minrLog.Tracef("Generating %d blocks", n)

	i := uint32(0)
	blockHashes := make([]*wire.ShaHash, n, n)

	// Start a ticker which is used to signal checks for stale work and
	// updates to the speed monitor.
	ticker := time.NewTicker(time.Second * hashUpdateSecs)
	defer ticker.Stop()

	for {
		// Read updateNumWorkers in case someone tries a `setgenerate` while
		// we're generating. We can ignore it as the `generate` RPC call only
		// uses 1 worker.
		select {
		case <-m.updateNumWorkers:
		default:
		}

		// Grab the lock used for block submission, since the current block will
		// be changing and this would otherwise end up building a new block
		// template on a block that is in the process of becoming stale.
		m.submitBlockLock.Lock()
		_, curHeight := m.server.blockManager.chainState.Best()

		// Choose a payment address at random.
		rand.Seed(time.Now().UnixNano())
		payToAddr := cfg.miningAddrs[rand.Intn(len(cfg.miningAddrs))]

		// Create a new block template using the available transactions
		// in the memory pool as a source of transactions to potentially
		// include in the block.
		template, err := NewBlockTemplate(m.policy, m.server, payToAddr)
		m.submitBlockLock.Unlock()
		if err != nil {
			errStr := fmt.Sprintf("Failed to create new block "+
				"template: %v", err)
			minrLog.Errorf(errStr)
			continue
		}

		// Attempt to solve the block.  The function will exit early
		// with false when conditions that trigger a stale block, so
		// a new block template can be generated.  When the return is
		// true a solution was found, so submit the solved block.
		if m.solveBlock(template.block, curHeight+1, ticker, nil) {
			block := coinutil.NewBlock(template.block)
			m.submitBlock(block)
			blockHashes[i] = block.Sha()
			i++
			if i == n {
				minrLog.Tracef("Generated %d blocks", i)
				m.Lock()
				close(m.speedMonitorQuit)
				m.wg.Wait()
				m.started = false
				m.discreteMining = false
				m.Unlock()
				return blockHashes, nil
			}
		}
	}
}
Beispiel #20
0
// generateBlocks is a worker that is controlled by the miningWorkerController.
// It is self contained in that it creates block templates and attempts to solve
// them while detecting when it is performing stale work and reacting
// accordingly by generating a new block template.  When a block is solved, it
// is submitted.
//
// It must be run as a goroutine.
func (m *CPUMiner) generateBlocks(quit chan struct{}) {
	minrLog.Tracef("Starting generate blocks worker")

	// Start a ticker which is used to signal checks for stale work and
	// updates to the speed monitor.
	ticker := time.NewTicker(time.Second * hashUpdateSecs)
	defer ticker.Stop()
out:
	for {
		// Quit when the miner is stopped.
		select {
		case <-quit:
			break out
		default:
			// Non-blocking select to fall through
		}

		// Wait until there is a connection to at least one other peer
		// since there is no way to relay a found block or receive
		// transactions to work on when there are no connected peers.
		if m.server.ConnectedCount() == 0 {
			time.Sleep(time.Second)
			continue
		}

		// No point in searching for a solution before the chain is
		// synced.  Also, grab the same lock as used for block
		// submission, since the current block will be changing and
		// this would otherwise end up building a new block template on
		// a block that is in the process of becoming stale.
		m.submitBlockLock.Lock()
		_, curHeight := m.server.blockManager.chainState.Best()
		if curHeight != 0 && !m.server.blockManager.IsCurrent() {
			m.submitBlockLock.Unlock()
			time.Sleep(time.Second)
			continue
		}

		// Choose a payment address at random.
		rand.Seed(time.Now().UnixNano())
		payToAddr := cfg.miningAddrs[rand.Intn(len(cfg.miningAddrs))]

		// Create a new block template using the available transactions
		// in the memory pool as a source of transactions to potentially
		// include in the block.
		template, err := NewBlockTemplate(m.policy, m.server, payToAddr)
		m.submitBlockLock.Unlock()
		if err != nil {
			errStr := fmt.Sprintf("Failed to create new block "+
				"template: %v", err)
			minrLog.Errorf(errStr)
			continue
		}

		// Attempt to solve the block.  The function will exit early
		// with false when conditions that trigger a stale block, so
		// a new block template can be generated.  When the return is
		// true a solution was found, so submit the solved block.
		if m.solveBlock(template.block, curHeight+1, ticker, quit) {
			block := coinutil.NewBlock(template.block)
			m.submitBlock(block)
		}
	}

	m.workerWg.Done()
	minrLog.Tracef("Generate blocks worker done")
}