Exemple #1
0
// checkBlockScripts executes and validates the scripts for all transactions in
// the passed block.
func checkBlockScripts(block *coinutil.Block, txStore TxStore,
	scriptFlags txscript.ScriptFlags, sigCache *txscript.SigCache) error {

	// Collect all of the transaction inputs and required information for
	// validation for all transactions in the block into a single slice.
	numInputs := 0
	for _, tx := range block.Transactions() {
		numInputs += len(tx.MsgTx().TxIn)
	}
	txValItems := make([]*txValidateItem, 0, numInputs)
	for _, tx := range block.Transactions() {
		for txInIdx, txIn := range tx.MsgTx().TxIn {
			// Skip coinbases.
			if txIn.PreviousOutPoint.Index == math.MaxUint32 {
				continue
			}

			txVI := &txValidateItem{
				txInIndex: txInIdx,
				txIn:      txIn,
				tx:        tx,
			}
			txValItems = append(txValItems, txVI)
		}
	}

	// Validate all of the inputs.
	validator := newTxValidator(txStore, scriptFlags, sigCache)
	if err := validator.Validate(txValItems); err != nil {
		return err
	}

	return nil
}
Exemple #2
0
// connectTransactions updates the passed map by applying transaction and
// spend information for all the transactions in the passed block.  Only
// transactions in the passed map are updated.
func connectTransactions(txStore TxStore, block *coinutil.Block) error {
	// Loop through all of the transactions in the block to see if any of
	// them are ones we need to update and spend based on the results map.
	for _, tx := range block.Transactions() {
		// Update the transaction store with the transaction information
		// if it's one of the requested transactions.
		msgTx := tx.MsgTx()
		if txD, exists := txStore[*tx.Sha()]; exists {
			txD.Tx = tx
			txD.BlockHeight = block.Height()
			txD.Spent = make([]bool, len(msgTx.TxOut))
			txD.Err = nil
		}

		// Spend the origin transaction output.
		for _, txIn := range msgTx.TxIn {
			originHash := &txIn.PreviousOutPoint.Hash
			originIndex := txIn.PreviousOutPoint.Index
			if originTx, exists := txStore[*originHash]; exists {
				if originIndex > uint32(len(originTx.Spent)) {
					continue
				}
				originTx.Spent[originIndex] = true
			}
		}
	}

	return nil
}
Exemple #3
0
// connectBlock handles connecting the passed node/block to the end of the main
// (best) chain.
func (b *BlockChain) connectBlock(node *blockNode, block *coinutil.Block) error {
	// Make sure it's extending the end of the best chain.
	prevHash := &block.MsgBlock().Header.PrevBlock
	if b.bestChain != nil && !prevHash.IsEqual(b.bestChain.hash) {
		return fmt.Errorf("connectBlock must be called with a block " +
			"that extends the main chain")
	}

	// Insert the block into the database which houses the main chain.
	_, err := b.db.InsertBlock(block)
	if err != nil {
		return err
	}

	// Add the new node to the memory main chain indices for faster
	// lookups.
	node.inMainChain = true
	b.index[*node.hash] = node
	b.depNodes[*prevHash] = append(b.depNodes[*prevHash], node)

	// This node is now the end of the best chain.
	b.bestChain = node

	// Notify the caller that the block was connected to the main chain.
	// The caller would typically want to react with actions such as
	// updating wallets.
	b.sendNotification(NTBlockConnected, block)

	return nil
}
Exemple #4
0
// disconnectTransactions updates the passed map by undoing transaction and
// spend information for all transactions in the passed block.  Only
// transactions in the passed map are updated.
func disconnectTransactions(txStore TxStore, block *coinutil.Block) error {
	// Loop through all of the transactions in the block to see if any of
	// them are ones that need to be undone based on the transaction store.
	for _, tx := range block.Transactions() {
		// Clear this transaction from the transaction store if needed.
		// Only clear it rather than deleting it because the transaction
		// connect code relies on its presence to decide whether or not
		// to update the store and any transactions which exist on both
		// sides of a fork would otherwise not be updated.
		if txD, exists := txStore[*tx.Sha()]; exists {
			txD.Tx = nil
			txD.BlockHeight = 0
			txD.Spent = nil
			txD.Err = database.ErrTxShaMissing
		}

		// Unspend the origin transaction output.
		for _, txIn := range tx.MsgTx().TxIn {
			originHash := &txIn.PreviousOutPoint.Hash
			originIndex := txIn.PreviousOutPoint.Index
			originTx, exists := txStore[*originHash]
			if exists && originTx.Tx != nil && originTx.Err == nil {
				if originIndex > uint32(len(originTx.Spent)) {
					continue
				}
				originTx.Spent[originIndex] = false
			}
		}
	}

	return nil
}
Exemple #5
0
// DropAfterBlockBySha will remove any blocks from the database after
// the given block.
func (db *LevelDb) DropAfterBlockBySha(sha *wire.ShaHash) (rerr error) {
	db.dbLock.Lock()
	defer db.dbLock.Unlock()
	defer func() {
		if rerr == nil {
			rerr = db.processBatches()
		} else {
			db.lBatch().Reset()
		}
	}()

	startheight := db.nextBlock - 1

	keepidx, err := db.getBlkLoc(sha)
	if err != nil {
		// should the error here be normalized ?
		log.Tracef("block loc failed %v ", sha)
		return err
	}

	for height := startheight; height > keepidx; height = height - 1 {
		var blk *coinutil.Block
		blksha, buf, err := db.getBlkByHeight(height)
		if err != nil {
			return err
		}
		blk, err = coinutil.NewBlockFromBytes(buf)
		if err != nil {
			return err
		}

		for _, tx := range blk.MsgBlock().Transactions {
			err = db.unSpend(tx)
			if err != nil {
				return err
			}
		}
		// rather than iterate the list of tx backward, do it twice.
		for _, tx := range blk.Transactions() {
			var txUo txUpdateObj
			txUo.delete = true
			db.txUpdateMap[*tx.Sha()] = &txUo
		}
		db.lBatch().Delete(shaBlkToKey(blksha))
		db.lBatch().Delete(int64ToKey(int64(height)))
	}

	// update the last block cache
	db.lastBlkShaCached = true
	db.lastBlkSha = *sha
	db.lastBlkIdx = keepidx
	db.nextBlock = keepidx + 1

	return nil
}
Exemple #6
0
// addOrphanBlock adds the passed block (which is already determined to be
// an orphan prior calling this function) to the orphan pool.  It lazily cleans
// up any expired blocks so a separate cleanup poller doesn't need to be run.
// It also imposes a maximum limit on the number of outstanding orphan
// blocks and will remove the oldest received orphan block if the limit is
// exceeded.
func (b *BlockChain) addOrphanBlock(block *coinutil.Block) {
	// Remove expired orphan blocks.
	for _, oBlock := range b.orphans {
		if time.Now().After(oBlock.expiration) {
			b.removeOrphanBlock(oBlock)
			continue
		}

		// Update the oldest orphan block pointer so it can be discarded
		// in case the orphan pool fills up.
		if b.oldestOrphan == nil || oBlock.expiration.Before(b.oldestOrphan.expiration) {
			b.oldestOrphan = oBlock
		}
	}

	// Limit orphan blocks to prevent memory exhaustion.
	if len(b.orphans)+1 > maxOrphanBlocks {
		// Remove the oldest orphan to make room for the new one.
		b.removeOrphanBlock(b.oldestOrphan)
		b.oldestOrphan = nil
	}

	// Protect concurrent access.  This is intentionally done here instead
	// of near the top since removeOrphanBlock does its own locking and
	// the range iterator is not invalidated by removing map entries.
	b.orphanLock.Lock()
	defer b.orphanLock.Unlock()

	// Insert the block into the orphan map with an expiration time
	// 1 hour from now.
	expiration := time.Now().Add(time.Hour)
	oBlock := &orphanBlock{
		block:      block,
		expiration: expiration,
	}
	b.orphans[*block.Sha()] = oBlock

	// Add to previous hash lookup index for faster dependency lookups.
	prevHash := &block.MsgBlock().Header.PrevBlock
	b.prevOrphans[*prevHash] = append(b.prevOrphans[*prevHash], oBlock)

	return
}
Exemple #7
0
// getPrevNodeFromBlock returns a block node for the block previous to the
// passed block (the passed block's parent).  When it is already in the memory
// block chain, it simply returns it.  Otherwise, it loads the previous block
// from the block database, creates a new block node from it, and returns it.
// The returned node will be nil if the genesis block is passed.
func (b *BlockChain) getPrevNodeFromBlock(block *coinutil.Block) (*blockNode, error) {
	// Genesis block.
	prevHash := &block.MsgBlock().Header.PrevBlock
	if prevHash.IsEqual(zeroHash) {
		return nil, nil
	}

	// Return the existing previous block node if it's already there.
	if bn, ok := b.index[*prevHash]; ok {
		return bn, nil
	}

	// Dynamically load the previous block from the block database, create
	// a new block node for it, and update the memory chain accordingly.
	prevBlockNode, err := b.loadBlockNode(prevHash)
	if err != nil {
		return nil, err
	}
	return prevBlockNode, nil
}
Exemple #8
0
// indexBlockAddrs returns a populated index of the all the transactions in the
// passed block based on the addresses involved in each transaction.
func (a *addrIndexer) indexBlockAddrs(blk *coinutil.Block) (database.BlockAddrIndex, error) {
	addrIndex := make(database.BlockAddrIndex)
	txLocs, err := blk.TxLoc()
	if err != nil {
		return nil, err
	}

	for txIdx, tx := range blk.Transactions() {
		// Tx's offset and length in the block.
		locInBlock := &txLocs[txIdx]

		// Coinbases don't have any inputs.
		if !blockchain.IsCoinBase(tx) {
			// Index the SPK's of each input's previous outpoint
			// transaction.
			for _, txIn := range tx.MsgTx().TxIn {
				// Lookup and fetch the referenced output's tx.
				prevOut := txIn.PreviousOutPoint
				txList, err := a.server.db.FetchTxBySha(&prevOut.Hash)
				if len(txList) == 0 {
					return nil, fmt.Errorf("transaction %v not found",
						prevOut.Hash)
				}
				if err != nil {
					adxrLog.Errorf("Error fetching tx %v: %v",
						prevOut.Hash, err)
					return nil, err
				}
				prevOutTx := txList[len(txList)-1]
				inputOutPoint := prevOutTx.Tx.TxOut[prevOut.Index]

				indexScriptPubKey(addrIndex, inputOutPoint.PkScript, locInBlock)
			}
		}

		for _, txOut := range tx.MsgTx().TxOut {
			indexScriptPubKey(addrIndex, txOut.PkScript, locInBlock)
		}
	}
	return addrIndex, nil
}
Exemple #9
0
// LogBlockHeight logs a new block height as an information message to show
// progress to the user. In order to prevent spam, it limits logging to one
// message every 10 seconds with duration and totals included.
func (b *blockProgressLogger) LogBlockHeight(block *coinutil.Block) {
	b.Lock()
	defer b.Unlock()

	b.receivedLogBlocks++
	b.receivedLogTx += int64(len(block.MsgBlock().Transactions))

	now := time.Now()
	duration := now.Sub(b.lastBlockLogTime)
	if duration < time.Second*10 {
		return
	}

	// Truncate the duration to 10s of milliseconds.
	durationMillis := int64(duration / time.Millisecond)
	tDuration := 10 * time.Millisecond * time.Duration(durationMillis/10)

	// Log information about new block height.
	blockStr := "blocks"
	if b.receivedLogBlocks == 1 {
		blockStr = "block"
	}
	txStr := "transactions"
	if b.receivedLogTx == 1 {
		txStr = "transaction"
	}
	b.subsystemLogger.Infof("%s %d %s in the last %s (%d %s, height %d, %s)",
		b.progressAction, b.receivedLogBlocks, blockStr, tDuration, b.receivedLogTx,
		txStr, block.Height(), block.MsgBlock().Header.Timestamp)

	b.receivedLogBlocks = 0
	b.receivedLogTx = 0
	b.lastBlockLogTime = now
}
Exemple #10
0
// checkBIP0030 ensures blocks do not contain duplicate transactions which
// 'overwrite' older transactions that are not fully spent.  This prevents an
// attack where a coinbase and all of its dependent transactions could be
// duplicated to effectively revert the overwritten transactions to a single
// confirmation thereby making them vulnerable to a double spend.
//
// For more details, see https://en.bitcoin.it/wiki/BIP_0030 and
// http://r6.ca/blog/20120206T005236Z.html.
func (b *BlockChain) checkBIP0030(node *blockNode, block *coinutil.Block) error {
	// Attempt to fetch duplicate transactions for all of the transactions
	// in this block from the point of view of the parent node.
	fetchSet := make(map[wire.ShaHash]struct{})
	for _, tx := range block.Transactions() {
		fetchSet[*tx.Sha()] = struct{}{}
	}
	txResults, err := b.fetchTxStore(node, fetchSet)
	if err != nil {
		return err
	}

	// Examine the resulting data about the requested transactions.
	for _, txD := range txResults {
		switch txD.Err {
		// A duplicate transaction was not found.  This is the most
		// common case.
		case database.ErrTxShaMissing:
			continue

		// A duplicate transaction was found.  This is only allowed if
		// the duplicate transaction is fully spent.
		case nil:
			if !isTransactionSpent(txD) {
				str := fmt.Sprintf("tried to overwrite "+
					"transaction %v at block height %d "+
					"that is not fully spent", txD.Hash,
					txD.BlockHeight)
				return ruleError(ErrOverwriteTx, str)
			}

		// Some other unexpected error occurred.  Return it now.
		default:
			return txD.Err
		}
	}

	return nil
}
Exemple #11
0
// CheckConnectBlock performs several checks to confirm connecting the passed
// block to the main chain does not violate any rules.  An example of some of
// the checks performed are ensuring connecting the block would not cause any
// duplicate transaction hashes for old transactions that aren't already fully
// spent, double spends, exceeding the maximum allowed signature operations
// per block, invalid values in relation to the expected block subsidy, or fail
// transaction script validation.
//
// This function is NOT safe for concurrent access.
func (b *BlockChain) CheckConnectBlock(block *coinutil.Block) error {
	prevNode := b.bestChain
	newNode := newBlockNode(&block.MsgBlock().Header, block.Sha(),
		block.Height())
	if prevNode != nil {
		newNode.parent = prevNode
		newNode.workSum.Add(prevNode.workSum, newNode.workSum)
	}

	return b.checkConnectBlock(newNode, block)
}
Exemple #12
0
// checkBlockContext peforms several validation checks on the block which depend
// on its position within the block chain.
//
// The flags modify the behavior of this function as follows:
//  - BFFastAdd: The transaction are not checked to see if they are finalized
//    and the somewhat expensive BIP0034 validation is not performed.
//
// The flags are also passed to checkBlockHeaderContext.  See its documentation
// for how the flags modify its behavior.
func (b *BlockChain) checkBlockContext(block *coinutil.Block, prevNode *blockNode, flags BehaviorFlags) error {
	// The genesis block is valid by definition.
	if prevNode == nil {
		return nil
	}

	// Perform all block header related validation checks.
	header := &block.MsgBlock().Header
	err := b.checkBlockHeaderContext(header, prevNode, flags)
	if err != nil {
		return err
	}

	fastAdd := flags&BFFastAdd == BFFastAdd
	if !fastAdd {
		// The height of this block is one more than the referenced
		// previous block.
		blockHeight := prevNode.height + 1

		// Ensure all transactions in the block are finalized.
		for _, tx := range block.Transactions() {
			if !IsFinalizedTransaction(tx, blockHeight,
				header.Timestamp) {

				str := fmt.Sprintf("block contains unfinalized "+
					"transaction %v", tx.Sha())
				return ruleError(ErrUnfinalizedTx, str)
			}
		}

		// Ensure coinbase starts with serialized block heights for
		// blocks whose version is the serializedHeightVersion or newer
		// once a majority of the network has upgraded.  This is part of
		// BIP0034.
		if ShouldHaveSerializedBlockHeight(header) &&
			b.isMajorityVersion(serializedHeightVersion, prevNode,
				b.chainParams.BlockEnforceNumRequired) {

			coinbaseTx := block.Transactions()[0]
			err := checkSerializedHeight(coinbaseTx, blockHeight)
			if err != nil {
				return err
			}
		}
	}

	return nil
}
Exemple #13
0
// NewMerkleBlock returns a new *wire.MsgMerkleBlock and an array of the matched
// transaction index numbers based on the passed block and filter.
func NewMerkleBlock(block *coinutil.Block, filter *Filter) (*wire.MsgMerkleBlock, []uint32) {
	numTx := uint32(len(block.Transactions()))
	mBlock := merkleBlock{
		numTx:       numTx,
		allHashes:   make([]*wire.ShaHash, 0, numTx),
		matchedBits: make([]byte, 0, numTx),
	}

	// Find and keep track of any transactions that match the filter.
	var matchedIndices []uint32
	for txIndex, tx := range block.Transactions() {
		if filter.MatchTxAndUpdate(tx) {
			mBlock.matchedBits = append(mBlock.matchedBits, 0x01)
			matchedIndices = append(matchedIndices, uint32(txIndex))
		} else {
			mBlock.matchedBits = append(mBlock.matchedBits, 0x00)
		}
		mBlock.allHashes = append(mBlock.allHashes, tx.Sha())
	}

	// Calculate the number of merkle branches (height) in the tree.
	height := uint32(0)
	for mBlock.calcTreeWidth(height) > 1 {
		height++
	}

	// Build the depth-first partial merkle tree.
	mBlock.traverseAndBuild(height, 0)

	// Create and return the merkle block.
	msgMerkleBlock := wire.MsgMerkleBlock{
		Header:       block.MsgBlock().Header,
		Transactions: uint32(mBlock.numTx),
		Hashes:       make([]*wire.ShaHash, 0, len(mBlock.finalHashes)),
		Flags:        make([]byte, (len(mBlock.bits)+7)/8),
	}
	for _, sha := range mBlock.finalHashes {
		msgMerkleBlock.AddTxHash(sha)
	}
	for i := uint32(0); i < uint32(len(mBlock.bits)); i++ {
		msgMerkleBlock.Flags[i/8] |= mBlock.bits[i] << (i % 8)
	}
	return &msgMerkleBlock, matchedIndices
}
Exemple #14
0
// submitBlock submits the passed block to network after ensuring it passes all
// of the consensus validation rules.
func (m *CPUMiner) submitBlock(block *coinutil.Block) bool {
	m.submitBlockLock.Lock()
	defer m.submitBlockLock.Unlock()

	// Ensure the block is not stale since a new block could have shown up
	// while the solution was being found.  Typically that condition is
	// detected and all work on the stale block is halted to start work on
	// a new block, but the check only happens periodically, so it is
	// possible a block was found and submitted in between.
	latestHash, _ := m.server.blockManager.chainState.Best()
	msgBlock := block.MsgBlock()
	if !msgBlock.Header.PrevBlock.IsEqual(latestHash) {
		minrLog.Debugf("Block submitted via CPU miner with previous "+
			"block %s is stale", msgBlock.Header.PrevBlock)
		return false
	}

	// Process this block using the same rules as blocks coming from other
	// nodes.  This will in turn relay it to the network like normal.
	isOrphan, err := m.server.blockManager.ProcessBlock(block, blockchain.BFNone)
	if err != nil {
		// Anything other than a rule violation is an unexpected error,
		// so log that error as an internal error.
		if _, ok := err.(blockchain.RuleError); !ok {
			minrLog.Errorf("Unexpected error while processing "+
				"block submitted via CPU miner: %v", err)
			return false
		}

		minrLog.Debugf("Block submitted via CPU miner rejected: %v", err)
		return false
	}
	if isOrphan {
		minrLog.Debugf("Block submitted via CPU miner is an orphan")
		return false
	}

	// The block was accepted.
	coinbaseTx := block.MsgBlock().Transactions[0].TxOut[0]
	minrLog.Infof("Block submitted via CPU miner accepted (hash %s, "+
		"amount %v)", block.Sha(), coinutil.Amount(coinbaseTx.Value))
	return true
}
Exemple #15
0
// ProcessBlock is the main workhorse for handling insertion of new blocks into
// the block chain.  It includes functionality such as rejecting duplicate
// blocks, ensuring blocks follow all rules, orphan handling, and insertion into
// the block chain along with best chain selection and reorganization.
//
// It returns a bool which indicates whether or not the block is an orphan and
// any errors that occurred during processing.  The returned bool is only valid
// when the error is nil.
func (b *BlockChain) ProcessBlock(block *coinutil.Block, timeSource MedianTimeSource, flags BehaviorFlags) (bool, error) {
	fastAdd := flags&BFFastAdd == BFFastAdd
	dryRun := flags&BFDryRun == BFDryRun

	blockHash := block.Sha()
	log.Tracef("Processing block %v", blockHash)

	// The block must not already exist in the main chain or side chains.
	exists, err := b.blockExists(blockHash)
	if err != nil {
		return false, err
	}
	if exists {
		str := fmt.Sprintf("already have block %v", blockHash)
		return false, ruleError(ErrDuplicateBlock, str)
	}

	// The block must not already exist as an orphan.
	if _, exists := b.orphans[*blockHash]; exists {
		str := fmt.Sprintf("already have block (orphan) %v", blockHash)
		return false, ruleError(ErrDuplicateBlock, str)
	}

	// Perform preliminary sanity checks on the block and its transactions.
	err = checkBlockSanity(block, b.chainParams.PowLimit, timeSource, flags)
	if err != nil {
		return false, err
	}

	// Find the previous checkpoint and perform some additional checks based
	// on the checkpoint.  This provides a few nice properties such as
	// preventing old side chain blocks before the last checkpoint,
	// rejecting easy to mine, but otherwise bogus, blocks that could be
	// used to eat memory, and ensuring expected (versus claimed) proof of
	// work requirements since the previous checkpoint are met.
	blockHeader := &block.MsgBlock().Header
	checkpointBlock, err := b.findPreviousCheckpoint()
	if err != nil {
		return false, err
	}
	if checkpointBlock != nil {
		// Ensure the block timestamp is after the checkpoint timestamp.
		checkpointHeader := &checkpointBlock.MsgBlock().Header
		checkpointTime := checkpointHeader.Timestamp
		if blockHeader.Timestamp.Before(checkpointTime) {
			str := fmt.Sprintf("block %v has timestamp %v before "+
				"last checkpoint timestamp %v", blockHash,
				blockHeader.Timestamp, checkpointTime)
			return false, ruleError(ErrCheckpointTimeTooOld, str)
		}
		if !fastAdd {
			// Even though the checks prior to now have already ensured the
			// proof of work exceeds the claimed amount, the claimed amount
			// is a field in the block header which could be forged.  This
			// check ensures the proof of work is at least the minimum
			// expected based on elapsed time since the last checkpoint and
			// maximum adjustment allowed by the retarget rules.
			duration := blockHeader.Timestamp.Sub(checkpointTime)
			requiredTarget := CompactToBig(b.calcEasiestDifficulty(
				checkpointHeader.Bits, duration))
			currentTarget := CompactToBig(blockHeader.Bits)
			if currentTarget.Cmp(requiredTarget) > 0 {
				str := fmt.Sprintf("block target difficulty of %064x "+
					"is too low when compared to the previous "+
					"checkpoint", currentTarget)
				return false, ruleError(ErrDifficultyTooLow, str)
			}
		}
	}

	// Handle orphan blocks.
	prevHash := &blockHeader.PrevBlock
	if !prevHash.IsEqual(zeroHash) {
		prevHashExists, err := b.blockExists(prevHash)
		if err != nil {
			return false, err
		}
		if !prevHashExists {
			if !dryRun {
				log.Infof("Adding orphan block %v with parent %v",
					blockHash, prevHash)
				b.addOrphanBlock(block)
			}

			return true, nil
		}
	}

	// The block has passed all context independent checks and appears sane
	// enough to potentially accept it into the block chain.
	err = b.maybeAcceptBlock(block, flags)
	if err != nil {
		return false, err
	}

	// Don't process any orphans or log when the dry run flag is set.
	if !dryRun {
		// Accept any orphan blocks that depend on this block (they are
		// no longer orphans) and repeat for those accepted blocks until
		// there are no more.
		err := b.processOrphans(blockHash, flags)
		if err != nil {
			return false, err
		}

		log.Debugf("Accepted block %v", blockHash)
	}

	return false, nil
}
Exemple #16
0
// InsertBlock inserts raw block and transaction data from a block into the
// database.  The first block inserted into the database will be treated as the
// genesis block.  Every subsequent block insert requires the referenced parent
// block to already exist.  This is part of the database.Db interface
// implementation.
func (db *MemDb) InsertBlock(block *coinutil.Block) (int32, error) {
	db.Lock()
	defer db.Unlock()

	if db.closed {
		return 0, ErrDbClosed
	}

	// Reject the insert if the previously reference block does not exist
	// except in the case there are no blocks inserted yet where the first
	// inserted block is assumed to be a genesis block.
	msgBlock := block.MsgBlock()
	if _, exists := db.blocksBySha[msgBlock.Header.PrevBlock]; !exists {
		if len(db.blocks) > 0 {
			return 0, database.ErrPrevShaMissing
		}
	}

	// Build a map of in-flight transactions because some of the inputs in
	// this block could be referencing other transactions earlier in this
	// block which are not yet in the chain.
	txInFlight := map[wire.ShaHash]int{}
	transactions := block.Transactions()
	for i, tx := range transactions {
		txInFlight[*tx.Sha()] = i
	}

	// Loop through all transactions and inputs to ensure there are no error
	// conditions that would prevent them from be inserted into the db.
	// Although these checks could could be done in the loop below, checking
	// for error conditions up front means the code below doesn't have to
	// deal with rollback on errors.
	newHeight := int32(len(db.blocks))
	for i, tx := range transactions {
		// Two old blocks contain duplicate transactions due to being
		// mined by faulty miners and accepted by the origin Satoshi
		// client.  Rules have since been added to the ensure this
		// problem can no longer happen, but the two duplicate
		// transactions which were originally accepted are forever in
		// the block chain history and must be dealth with specially.
		// http://blockexplorer.com/b/91842
		// http://blockexplorer.com/b/91880
		if newHeight == 91842 && tx.Sha().IsEqual(dupTxHash91842) {
			continue
		}

		if newHeight == 91880 && tx.Sha().IsEqual(dupTxHash91880) {
			continue
		}

		for _, txIn := range tx.MsgTx().TxIn {
			if isCoinbaseInput(txIn) {
				continue
			}

			// It is acceptable for a transaction input to reference
			// the output of another transaction in this block only
			// if the referenced transaction comes before the
			// current one in this block.
			prevOut := &txIn.PreviousOutPoint
			if inFlightIndex, ok := txInFlight[prevOut.Hash]; ok {
				if i <= inFlightIndex {
					log.Warnf("InsertBlock: requested hash "+
						" of %s does not exist in-flight",
						tx.Sha())
					return 0, database.ErrTxShaMissing
				}
			} else {
				originTxns, exists := db.txns[prevOut.Hash]
				if !exists {
					log.Warnf("InsertBlock: requested hash "+
						"of %s by %s does not exist",
						prevOut.Hash, tx.Sha())
					return 0, database.ErrTxShaMissing
				}
				originTxD := originTxns[len(originTxns)-1]
				if prevOut.Index > uint32(len(originTxD.spentBuf)) {
					log.Warnf("InsertBlock: requested hash "+
						"of %s with index %d does not "+
						"exist", tx.Sha(), prevOut.Index)
					return 0, database.ErrTxShaMissing
				}
			}
		}

		// Prevent duplicate transactions in the same block.
		if inFlightIndex, exists := txInFlight[*tx.Sha()]; exists &&
			inFlightIndex < i {
			log.Warnf("Block contains duplicate transaction %s",
				tx.Sha())
			return 0, database.ErrDuplicateSha
		}

		// Prevent duplicate transactions unless the old one is fully
		// spent.
		if txns, exists := db.txns[*tx.Sha()]; exists {
			txD := txns[len(txns)-1]
			if !isFullySpent(txD) {
				log.Warnf("Attempt to insert duplicate "+
					"transaction %s", tx.Sha())
				return 0, database.ErrDuplicateSha
			}
		}
	}

	db.blocks = append(db.blocks, msgBlock)
	db.blocksBySha[*block.Sha()] = newHeight

	// Insert information about eacj transaction and spend all of the
	// outputs referenced by the inputs to the transactions.
	for i, tx := range block.Transactions() {
		// Insert the transaction data.
		txD := tTxInsertData{
			blockHeight: newHeight,
			offset:      i,
			spentBuf:    make([]bool, len(tx.MsgTx().TxOut)),
		}
		db.txns[*tx.Sha()] = append(db.txns[*tx.Sha()], &txD)

		// Spend all of the inputs.
		for _, txIn := range tx.MsgTx().TxIn {
			// Coinbase transaction has no inputs.
			if isCoinbaseInput(txIn) {
				continue
			}

			// Already checked for existing and valid ranges above.
			prevOut := &txIn.PreviousOutPoint
			originTxns := db.txns[prevOut.Hash]
			originTxD := originTxns[len(originTxns)-1]
			originTxD.spentBuf[prevOut.Index] = true
		}
	}

	return newHeight, nil
}
Exemple #17
0
// checkBlockSanity performs some preliminary checks on a block to ensure it is
// sane before continuing with block processing.  These checks are context free.
//
// The flags do not modify the behavior of this function directly, however they
// are needed to pass along to checkBlockHeaderSanity.
func checkBlockSanity(block *coinutil.Block, powLimit *big.Int, timeSource MedianTimeSource, flags BehaviorFlags) error {
	msgBlock := block.MsgBlock()
	header := &msgBlock.Header
	err := checkBlockHeaderSanity(header, powLimit, timeSource, flags)
	if err != nil {
		return err
	}

	// A block must have at least one transaction.
	numTx := len(msgBlock.Transactions)
	if numTx == 0 {
		return ruleError(ErrNoTransactions, "block does not contain "+
			"any transactions")
	}

	// A block must not have more transactions than the max block payload.
	if numTx > wire.MaxBlockPayload {
		str := fmt.Sprintf("block contains too many transactions - "+
			"got %d, max %d", numTx, wire.MaxBlockPayload)
		return ruleError(ErrTooManyTransactions, str)
	}

	// A block must not exceed the maximum allowed block payload when
	// serialized.
	serializedSize := msgBlock.SerializeSize()
	if serializedSize > wire.MaxBlockPayload {
		str := fmt.Sprintf("serialized block is too big - got %d, "+
			"max %d", serializedSize, wire.MaxBlockPayload)
		return ruleError(ErrBlockTooBig, str)
	}

	// The first transaction in a block must be a coinbase.
	transactions := block.Transactions()
	if !IsCoinBase(transactions[0]) {
		return ruleError(ErrFirstTxNotCoinbase, "first transaction in "+
			"block is not a coinbase")
	}

	// A block must not have more than one coinbase.
	for i, tx := range transactions[1:] {
		if IsCoinBase(tx) {
			str := fmt.Sprintf("block contains second coinbase at "+
				"index %d", i)
			return ruleError(ErrMultipleCoinbases, str)
		}
	}

	// Do some preliminary checks on each transaction to ensure they are
	// sane before continuing.
	for _, tx := range transactions {
		err := CheckTransactionSanity(tx)
		if err != nil {
			return err
		}
	}

	// Build merkle tree and ensure the calculated merkle root matches the
	// entry in the block header.  This also has the effect of caching all
	// of the transaction hashes in the block to speed up future hash
	// checks.  Bitcoind builds the tree here and checks the merkle root
	// after the following checks, but there is no reason not to check the
	// merkle root matches here.
	merkles := BuildMerkleTreeStore(block.Transactions())
	calculatedMerkleRoot := merkles[len(merkles)-1]
	if !header.MerkleRoot.IsEqual(calculatedMerkleRoot) {
		str := fmt.Sprintf("block merkle root is invalid - block "+
			"header indicates %v, but calculated value is %v",
			header.MerkleRoot, calculatedMerkleRoot)
		return ruleError(ErrBadMerkleRoot, str)
	}

	// Check for duplicate transactions.  This check will be fairly quick
	// since the transaction hashes are already cached due to building the
	// merkle tree above.
	existingTxHashes := make(map[wire.ShaHash]struct{})
	for _, tx := range transactions {
		hash := tx.Sha()
		if _, exists := existingTxHashes[*hash]; exists {
			str := fmt.Sprintf("block contains duplicate "+
				"transaction %v", hash)
			return ruleError(ErrDuplicateTx, str)
		}
		existingTxHashes[*hash] = struct{}{}
	}

	// The number of signature operations must be less than the maximum
	// allowed per block.
	totalSigOps := 0
	for _, tx := range transactions {
		// We could potentially overflow the accumulator so check for
		// overflow.
		lastSigOps := totalSigOps
		totalSigOps += CountSigOps(tx)
		if totalSigOps < lastSigOps || totalSigOps > MaxSigOpsPerBlock {
			str := fmt.Sprintf("block contains too many signature "+
				"operations - got %v, max %v", totalSigOps,
				MaxSigOpsPerBlock)
			return ruleError(ErrTooManySigOps, str)
		}
	}

	return nil
}
Exemple #18
0
// fetchInputTransactions fetches the input transactions referenced by the
// transactions in the given block from its point of view.  See fetchTxList
// for more details on what the point of view entails.
func (b *BlockChain) fetchInputTransactions(node *blockNode, block *coinutil.Block) (TxStore, error) {
	// Build a map of in-flight transactions because some of the inputs in
	// this block could be referencing other transactions earlier in this
	// block which are not yet in the chain.
	txInFlight := map[wire.ShaHash]int{}
	transactions := block.Transactions()
	for i, tx := range transactions {
		txInFlight[*tx.Sha()] = i
	}

	// Loop through all of the transaction inputs (except for the coinbase
	// which has no inputs) collecting them into sets of what is needed and
	// what is already known (in-flight).
	txNeededSet := make(map[wire.ShaHash]struct{})
	txStore := make(TxStore)
	for i, tx := range transactions[1:] {
		for _, txIn := range tx.MsgTx().TxIn {
			// Add an entry to the transaction store for the needed
			// transaction with it set to missing by default.
			originHash := &txIn.PreviousOutPoint.Hash
			txD := &TxData{Hash: originHash, Err: database.ErrTxShaMissing}
			txStore[*originHash] = txD

			// It is acceptable for a transaction input to reference
			// the output of another transaction in this block only
			// if the referenced transaction comes before the
			// current one in this block.  Update the transaction
			// store acccordingly when this is the case.  Otherwise,
			// we still need the transaction.
			//
			// NOTE: The >= is correct here because i is one less
			// than the actual position of the transaction within
			// the block due to skipping the coinbase.
			if inFlightIndex, ok := txInFlight[*originHash]; ok &&
				i >= inFlightIndex {

				originTx := transactions[inFlightIndex]
				txD.Tx = originTx
				txD.BlockHeight = node.height
				txD.Spent = make([]bool, len(originTx.MsgTx().TxOut))
				txD.Err = nil
			} else {
				txNeededSet[*originHash] = struct{}{}
			}
		}
	}

	// Request the input transactions from the point of view of the node.
	txNeededStore, err := b.fetchTxStore(node, txNeededSet)
	if err != nil {
		return nil, err
	}

	// Merge the results of the requested transactions and the in-flight
	// transactions.
	for _, txD := range txNeededStore {
		txStore[*txD.Hash] = txD
	}

	return txStore, nil
}
Exemple #19
0
// InsertBlock inserts raw block and transaction data from a block into the
// database.  The first block inserted into the database will be treated as the
// genesis block.  Every subsequent block insert requires the referenced parent
// block to already exist.
func (db *LevelDb) InsertBlock(block *coinutil.Block) (height int32, rerr error) {
	db.dbLock.Lock()
	defer db.dbLock.Unlock()
	defer func() {
		if rerr == nil {
			rerr = db.processBatches()
		} else {
			db.lBatch().Reset()
		}
	}()

	blocksha := block.Sha()
	mblock := block.MsgBlock()
	rawMsg, err := block.Bytes()
	if err != nil {
		log.Warnf("Failed to obtain raw block sha %v", blocksha)
		return 0, err
	}
	txloc, err := block.TxLoc()
	if err != nil {
		log.Warnf("Failed to obtain raw block sha %v", blocksha)
		return 0, err
	}

	// Insert block into database
	newheight, err := db.insertBlockData(blocksha, &mblock.Header.PrevBlock,
		rawMsg)
	if err != nil {
		log.Warnf("Failed to insert block %v %v %v", blocksha,
			&mblock.Header.PrevBlock, err)
		return 0, err
	}

	// At least two blocks in the long past were generated by faulty
	// miners, the sha of the transaction exists in a previous block,
	// detect this condition and 'accept' the block.
	for txidx, tx := range mblock.Transactions {
		txsha, err := block.TxSha(txidx)
		if err != nil {
			log.Warnf("failed to compute tx name block %v idx %v err %v", blocksha, txidx, err)
			return 0, err
		}
		spentbuflen := (len(tx.TxOut) + 7) / 8
		spentbuf := make([]byte, spentbuflen, spentbuflen)
		if len(tx.TxOut)%8 != 0 {
			for i := uint(len(tx.TxOut) % 8); i < 8; i++ {
				spentbuf[spentbuflen-1] |= (byte(1) << i)
			}
		}

		err = db.insertTx(txsha, newheight, txloc[txidx].TxStart, txloc[txidx].TxLen, spentbuf)
		if err != nil {
			log.Warnf("block %v idx %v failed to insert tx %v %v err %v", blocksha, newheight, &txsha, txidx, err)
			return 0, err
		}

		// Some old blocks contain duplicate transactions
		// Attempt to cleanly bypass this problem by marking the
		// first as fully spent.
		// http://blockexplorer.com/b/91812 dup in 91842
		// http://blockexplorer.com/b/91722 dup in 91880
		if newheight == 91812 {
			dupsha, err := wire.NewShaHashFromStr("d5d27987d2a3dfc724e359870c6644b40e497bdc0589a033220fe15429d88599")
			if err != nil {
				panic("invalid sha string in source")
			}
			if txsha.IsEqual(dupsha) {
				// marking TxOut[0] as spent
				po := wire.NewOutPoint(dupsha, 0)
				txI := wire.NewTxIn(po, []byte("garbage"))

				var spendtx wire.MsgTx
				spendtx.AddTxIn(txI)
				err = db.doSpend(&spendtx)
				if err != nil {
					log.Warnf("block %v idx %v failed to spend tx %v %v err %v", blocksha, newheight, &txsha, txidx, err)
				}
			}
		}
		if newheight == 91722 {
			dupsha, err := wire.NewShaHashFromStr("e3bf3d07d4b0375638d5f1db5255fe07ba2c4cb067cd81b84ee974b6585fb468")
			if err != nil {
				panic("invalid sha string in source")
			}
			if txsha.IsEqual(dupsha) {
				// marking TxOut[0] as spent
				po := wire.NewOutPoint(dupsha, 0)
				txI := wire.NewTxIn(po, []byte("garbage"))

				var spendtx wire.MsgTx
				spendtx.AddTxIn(txI)
				err = db.doSpend(&spendtx)
				if err != nil {
					log.Warnf("block %v idx %v failed to spend tx %v %v err %v", blocksha, newheight, &txsha, txidx, err)
				}
			}
		}

		err = db.doSpend(tx)
		if err != nil {
			log.Warnf("block %v idx %v failed to spend tx %v %v err %v", blocksha, newheight, txsha, txidx, err)
			return 0, err
		}
	}
	return newheight, nil
}
Exemple #20
0
// checkConnectBlock performs several checks to confirm connecting the passed
// block to the main chain (including whatever reorganization might be necessary
// to get this node to the main chain) does not violate any rules.
//
// The CheckConnectBlock function makes use of this function to perform the
// bulk of its work.  The only difference is this function accepts a node which
// may or may not require reorganization to connect it to the main chain whereas
// CheckConnectBlock creates a new node which specifically connects to the end
// of the current main chain and then calls this function with that node.
//
// See the comments for CheckConnectBlock for some examples of the type of
// checks performed by this function.
func (b *BlockChain) checkConnectBlock(node *blockNode, block *coinutil.Block) error {
	// If the side chain blocks end up in the database, a call to
	// CheckBlockSanity should be done here in case a previous version
	// allowed a block that is no longer valid.  However, since the
	// implementation only currently uses memory for the side chain blocks,
	// it isn't currently necessary.

	// The coinbase for the Genesis block is not spendable, so just return
	// now.
	if node.hash.IsEqual(b.chainParams.GenesisHash) && b.bestChain == nil {
		return nil
	}

	// BIP0030 added a rule to prevent blocks which contain duplicate
	// transactions that 'overwrite' older transactions which are not fully
	// spent.  See the documentation for checkBIP0030 for more details.
	//
	// There are two blocks in the chain which violate this
	// rule, so the check must be skipped for those blocks. The
	// isBIP0030Node function is used to determine if this block is one
	// of the two blocks that must be skipped.
	enforceBIP0030 := !isBIP0030Node(node)
	if enforceBIP0030 {
		err := b.checkBIP0030(node, block)
		if err != nil {
			return err
		}
	}

	// Request a map that contains all input transactions for the block from
	// the point of view of its position within the block chain.  These
	// transactions are needed for verification of things such as
	// transaction inputs, counting pay-to-script-hashes, and scripts.
	txInputStore, err := b.fetchInputTransactions(node, block)
	if err != nil {
		return err
	}

	// BIP0016 describes a pay-to-script-hash type that is considered a
	// "standard" type.  The rules for this BIP only apply to transactions
	// after the timestamp defined by txscript.Bip16Activation.  See
	// https://en.bitcoin.it/wiki/BIP_0016 for more details.
	enforceBIP0016 := false
	if node.timestamp.After(txscript.Bip16Activation) {
		enforceBIP0016 = true
	}

	// The number of signature operations must be less than the maximum
	// allowed per block.  Note that the preliminary sanity checks on a
	// block also include a check similar to this one, but this check
	// expands the count to include a precise count of pay-to-script-hash
	// signature operations in each of the input transaction public key
	// scripts.
	transactions := block.Transactions()
	totalSigOps := 0
	for i, tx := range transactions {
		numsigOps := CountSigOps(tx)
		if enforceBIP0016 {
			// Since the first (and only the first) transaction has
			// already been verified to be a coinbase transaction,
			// use i == 0 as an optimization for the flag to
			// countP2SHSigOps for whether or not the transaction is
			// a coinbase transaction rather than having to do a
			// full coinbase check again.
			numP2SHSigOps, err := CountP2SHSigOps(tx, i == 0,
				txInputStore)
			if err != nil {
				return err
			}
			numsigOps += numP2SHSigOps
		}

		// Check for overflow or going over the limits.  We have to do
		// this on every loop iteration to avoid overflow.
		lastSigops := totalSigOps
		totalSigOps += numsigOps
		if totalSigOps < lastSigops || totalSigOps > MaxSigOpsPerBlock {
			str := fmt.Sprintf("block contains too many "+
				"signature operations - got %v, max %v",
				totalSigOps, MaxSigOpsPerBlock)
			return ruleError(ErrTooManySigOps, str)
		}
	}

	// Perform several checks on the inputs for each transaction.  Also
	// accumulate the total fees.  This could technically be combined with
	// the loop above instead of running another loop over the transactions,
	// but by separating it we can avoid running the more expensive (though
	// still relatively cheap as compared to running the scripts) checks
	// against all the inputs when the signature operations are out of
	// bounds.
	var totalFees int64
	for _, tx := range transactions {
		txFee, err := CheckTransactionInputs(tx, node.height, txInputStore)
		if err != nil {
			return err
		}

		// Sum the total fees and ensure we don't overflow the
		// accumulator.
		lastTotalFees := totalFees
		totalFees += txFee
		if totalFees < lastTotalFees {
			return ruleError(ErrBadFees, "total fees for block "+
				"overflows accumulator")
		}
	}

	// The total output values of the coinbase transaction must not exceed
	// the expected subsidy value plus total transaction fees gained from
	// mining the block.  It is safe to ignore overflow and out of range
	// errors here because those error conditions would have already been
	// caught by checkTransactionSanity.
	var totalSatoshiOut int64
	for _, txOut := range transactions[0].MsgTx().TxOut {
		totalSatoshiOut += txOut.Value
	}
	expectedSatoshiOut := CalcBlockSubsidy(node.height, b.chainParams) +
		totalFees
	if totalSatoshiOut > expectedSatoshiOut {
		str := fmt.Sprintf("coinbase transaction for block pays %v "+
			"which is more than expected value of %v",
			totalSatoshiOut, expectedSatoshiOut)
		return ruleError(ErrBadCoinbaseValue, str)
	}

	// Don't run scripts if this node is before the latest known good
	// checkpoint since the validity is verified via the checkpoints (all
	// transactions are included in the merkle root hash and any changes
	// will therefore be detected by the next checkpoint).  This is a huge
	// optimization because running the scripts is the most time consuming
	// portion of block handling.
	checkpoint := b.LatestCheckpoint()
	runScripts := !b.noVerify
	if checkpoint != nil && node.height <= checkpoint.Height {
		runScripts = false
	}

	// Get the previous block node.  This function is used over simply
	// accessing node.parent directly as it will dynamically create previous
	// block nodes as needed.  This helps allow only the pieces of the chain
	// that are needed to remain in memory.
	prevNode, err := b.getPrevNodeFromNode(node)
	if err != nil {
		log.Errorf("getPrevNodeFromNode: %v", err)
		return err
	}

	// Blocks created after the BIP0016 activation time need to have the
	// pay-to-script-hash checks enabled.
	var scriptFlags txscript.ScriptFlags
	if block.MsgBlock().Header.Timestamp.After(txscript.Bip16Activation) {
		scriptFlags |= txscript.ScriptBip16
	}

	// Enforce DER signatures for block versions 3+ once the majority of the
	// network has upgraded to the enforcement threshold.  This is part of
	// BIP0066.
	blockHeader := &block.MsgBlock().Header
	if blockHeader.Version >= 3 && b.isMajorityVersion(3, prevNode,
		b.chainParams.BlockEnforceNumRequired) {

		scriptFlags |= txscript.ScriptVerifyDERSignatures
	}

	// Enforce CHECKLOCKTIMEVERIFY for block versions 4+ once the majority
	// of the network has upgraded to the enforcement threshold.  This is
	// part of BIP0065.
	if blockHeader.Version >= 4 && b.isMajorityVersion(4, prevNode,
		b.chainParams.BlockEnforceNumRequired) {

		scriptFlags |= txscript.ScriptVerifyCheckLockTimeVerify
	}

	// Now that the inexpensive checks are done and have passed, verify the
	// transactions are actually allowed to spend the coins by running the
	// expensive ECDSA signature check scripts.  Doing this last helps
	// prevent CPU exhaustion attacks.
	if runScripts {
		err := checkBlockScripts(block, txInputStore, scriptFlags, b.sigCache)
		if err != nil {
			return err
		}
	}

	return nil
}
Exemple #21
0
// testAddrIndexOperations ensures that all normal operations concerning
// the optional address index function correctly.
func testAddrIndexOperations(t *testing.T, db database.Db, newestBlock *coinutil.Block, newestSha *wire.ShaHash, newestBlockIdx int32) {
	// Metadata about the current addr index state should be unset.
	sha, height, err := db.FetchAddrIndexTip()
	if err != database.ErrAddrIndexDoesNotExist {
		t.Fatalf("Address index metadata shouldn't be in db, hasn't been built up yet.")
	}

	var zeroHash wire.ShaHash
	if !sha.IsEqual(&zeroHash) {
		t.Fatalf("AddrIndexTip wrong hash got: %s, want %s", sha, &zeroHash)

	}

	if height != -1 {
		t.Fatalf("Addrindex not built up, yet a block index tip has been set to: %d.", height)
	}

	// Test enforcement of constraints for "limit" and "skip"
	var fakeAddr coinutil.Address
	_, _, err = db.FetchTxsForAddr(fakeAddr, -1, 0, false)
	if err == nil {
		t.Fatalf("Negative value for skip passed, should return an error")
	}

	_, _, err = db.FetchTxsForAddr(fakeAddr, 0, -1, false)
	if err == nil {
		t.Fatalf("Negative value for limit passed, should return an error")
	}

	// Simple test to index outputs(s) of the first tx.
	testIndex := make(database.BlockAddrIndex)
	testTx, err := newestBlock.Tx(0)
	if err != nil {
		t.Fatalf("Block has no transactions, unable to test addr "+
			"indexing, err %v", err)
	}

	// Extract the dest addr from the tx.
	_, testAddrs, _, err := txscript.ExtractPkScriptAddrs(testTx.MsgTx().TxOut[0].PkScript, &chaincfg.MainNetParams)
	if err != nil {
		t.Fatalf("Unable to decode tx output, err %v", err)
	}

	// Extract the hash160 from the output script.
	var hash160Bytes [ripemd160.Size]byte
	testHash160 := testAddrs[0].(*coinutil.AddressPubKey).AddressPubKeyHash().ScriptAddress()
	copy(hash160Bytes[:], testHash160[:])

	// Create a fake index.
	blktxLoc, _ := newestBlock.TxLoc()
	testIndex[hash160Bytes] = []*wire.TxLoc{&blktxLoc[0]}

	// Insert our test addr index into the DB.
	err = db.UpdateAddrIndexForBlock(newestSha, newestBlockIdx, testIndex)
	if err != nil {
		t.Fatalf("UpdateAddrIndexForBlock: failed to index"+
			" addrs for block #%d (%s) "+
			"err %v", newestBlockIdx, newestSha, err)
	}

	// Chain Tip of address should've been updated.
	assertAddrIndexTipIsUpdated(db, t, newestSha, newestBlockIdx)

	// Check index retrieval.
	txReplies, _, err := db.FetchTxsForAddr(testAddrs[0], 0, 1000, false)
	if err != nil {
		t.Fatalf("FetchTxsForAddr failed to correctly fetch txs for an "+
			"address, err %v", err)
	}
	// Should have one reply.
	if len(txReplies) != 1 {
		t.Fatalf("Failed to properly index tx by address.")
	}

	// Our test tx and indexed tx should have the same sha.
	indexedTx := txReplies[0]
	if !bytes.Equal(indexedTx.Sha.Bytes(), testTx.Sha().Bytes()) {
		t.Fatalf("Failed to fetch proper indexed tx. Expected sha %v, "+
			"fetched %v", testTx.Sha(), indexedTx.Sha)
	}

	// Shut down DB.
	db.Sync()
	db.Close()

	// Re-Open, tip still should be updated to current height and sha.
	db, err = database.OpenDB("leveldb", "tstdbopmode")
	if err != nil {
		t.Fatalf("Unable to re-open created db, err %v", err)
	}
	assertAddrIndexTipIsUpdated(db, t, newestSha, newestBlockIdx)

	// Delete the entire index.
	err = db.DeleteAddrIndex()
	if err != nil {
		t.Fatalf("Couldn't delete address index, err %v", err)
	}

	// Former index should no longer exist.
	txReplies, _, err = db.FetchTxsForAddr(testAddrs[0], 0, 1000, false)
	if err != nil {
		t.Fatalf("Unable to fetch transactions for address: %v", err)
	}
	if len(txReplies) != 0 {
		t.Fatalf("Address index was not successfully deleted. "+
			"Should have 0 tx's indexed, %v were returned.",
			len(txReplies))
	}

	// Tip should be blanked out.
	if _, _, err := db.FetchAddrIndexTip(); err != database.ErrAddrIndexDoesNotExist {
		t.Fatalf("Address index was not fully deleted.")
	}

}
Exemple #22
0
// IsCheckpointCandidate returns whether or not the passed block is a good
// checkpoint candidate.
//
// The factors used to determine a good checkpoint are:
//  - The block must be in the main chain
//  - The block must be at least 'CheckpointConfirmations' blocks prior to the
//    current end of the main chain
//  - The timestamps for the blocks before and after the checkpoint must have
//    timestamps which are also before and after the checkpoint, respectively
//    (due to the median time allowance this is not always the case)
//  - The block must not contain any strange transaction such as those with
//    nonstandard scripts
//
// The intent is that candidates are reviewed by a developer to make the final
// decision and then manually added to the list of checkpoints for a network.
func (b *BlockChain) IsCheckpointCandidate(block *coinutil.Block) (bool, error) {
	// Checkpoints must be enabled.
	if b.noCheckpoints {
		return false, fmt.Errorf("checkpoints are disabled")
	}

	// A checkpoint must be in the main chain.
	exists, err := b.db.ExistsSha(block.Sha())
	if err != nil {
		return false, err
	}
	if !exists {
		return false, nil
	}

	// A checkpoint must be at least CheckpointConfirmations blocks before
	// the end of the main chain.
	blockHeight := block.Height()
	_, mainChainHeight, err := b.db.NewestSha()
	if err != nil {
		return false, err
	}
	if blockHeight > (mainChainHeight - CheckpointConfirmations) {
		return false, nil
	}

	// Get the previous block.
	prevHash := &block.MsgBlock().Header.PrevBlock
	prevBlock, err := b.db.FetchBlockBySha(prevHash)
	if err != nil {
		return false, err
	}

	// Get the next block.
	nextHash, err := b.db.FetchBlockShaByHeight(blockHeight + 1)
	if err != nil {
		return false, err
	}
	nextBlock, err := b.db.FetchBlockBySha(nextHash)
	if err != nil {
		return false, err
	}

	// A checkpoint must have timestamps for the block and the blocks on
	// either side of it in order (due to the median time allowance this is
	// not always the case).
	prevTime := prevBlock.MsgBlock().Header.Timestamp
	curTime := block.MsgBlock().Header.Timestamp
	nextTime := nextBlock.MsgBlock().Header.Timestamp
	if prevTime.After(curTime) || nextTime.Before(curTime) {
		return false, nil
	}

	// A checkpoint must have transactions that only contain standard
	// scripts.
	for _, tx := range block.Transactions() {
		if isNonstandardTransaction(tx) {
			return false, nil
		}
	}

	return true, nil
}
Exemple #23
0
// connectBestChain handles connecting the passed block to the chain while
// respecting proper chain selection according to the chain with the most
// proof of work.  In the typical case, the new block simply extends the main
// chain.  However, it may also be extending (or creating) a side chain (fork)
// which may or may not end up becoming the main chain depending on which fork
// cumulatively has the most proof of work.
//
// The flags modify the behavior of this function as follows:
//  - BFFastAdd: Avoids the call to checkConnectBlock which does several
//    expensive transaction validation operations.
//  - BFDryRun: Prevents the block from being connected and avoids modifying the
//    state of the memory chain index.  Also, any log messages related to
//    modifying the state are avoided.
func (b *BlockChain) connectBestChain(node *blockNode, block *coinutil.Block, flags BehaviorFlags) error {
	fastAdd := flags&BFFastAdd == BFFastAdd
	dryRun := flags&BFDryRun == BFDryRun

	// We haven't selected a best chain yet or we are extending the main
	// (best) chain with a new block.  This is the most common case.
	if b.bestChain == nil || node.parent.hash.IsEqual(b.bestChain.hash) {
		// Perform several checks to verify the block can be connected
		// to the main chain (including whatever reorganization might
		// be necessary to get this node to the main chain) without
		// violating any rules and without actually connecting the
		// block.
		if !fastAdd {
			err := b.checkConnectBlock(node, block)
			if err != nil {
				return err
			}
		}

		// Don't connect the block if performing a dry run.
		if dryRun {
			return nil
		}

		// Connect the block to the main chain.
		err := b.connectBlock(node, block)
		if err != nil {
			return err
		}

		// Connect the parent node to this node.
		if node.parent != nil {
			node.parent.children = append(node.parent.children, node)
		}

		return nil
	}
	if fastAdd {
		log.Warnf("fastAdd set in the side chain case? %v\n",
			block.Sha())
	}

	// We're extending (or creating) a side chain which may or may not
	// become the main chain, but in either case we need the block stored
	// for future processing, so add the block to the side chain holding
	// cache.
	if !dryRun {
		log.Debugf("Adding block %v to side chain cache", node.hash)
	}
	b.blockCache[*node.hash] = block
	b.index[*node.hash] = node

	// Connect the parent node to this node.
	node.inMainChain = false
	node.parent.children = append(node.parent.children, node)

	// Remove the block from the side chain cache and disconnect it from the
	// parent node when the function returns when running in dry run mode.
	if dryRun {
		defer func() {
			children := node.parent.children
			children = removeChildNode(children, node)
			node.parent.children = children

			delete(b.index, *node.hash)
			delete(b.blockCache, *node.hash)
		}()
	}

	// We're extending (or creating) a side chain, but the cumulative
	// work for this new side chain is not enough to make it the new chain.
	if node.workSum.Cmp(b.bestChain.workSum) <= 0 {
		// Skip Logging info when the dry run flag is set.
		if dryRun {
			return nil
		}

		// Find the fork point.
		fork := node
		for ; fork.parent != nil; fork = fork.parent {
			if fork.inMainChain {
				break
			}
		}

		// Log information about how the block is forking the chain.
		if fork.hash.IsEqual(node.parent.hash) {
			log.Infof("FORK: Block %v forks the chain at height %d"+
				"/block %v, but does not cause a reorganize",
				node.hash, fork.height, fork.hash)
		} else {
			log.Infof("EXTEND FORK: Block %v extends a side chain "+
				"which forks the chain at height %d/block %v",
				node.hash, fork.height, fork.hash)
		}

		return nil
	}

	// We're extending (or creating) a side chain and the cumulative work
	// for this new side chain is more than the old best chain, so this side
	// chain needs to become the main chain.  In order to accomplish that,
	// find the common ancestor of both sides of the fork, disconnect the
	// blocks that form the (now) old fork from the main chain, and attach
	// the blocks that form the new chain to the main chain starting at the
	// common ancenstor (the point where the chain forked).
	detachNodes, attachNodes := b.getReorganizeNodes(node)

	// Reorganize the chain.
	if !dryRun {
		log.Infof("REORGANIZE: Block %v is causing a reorganize.",
			node.hash)
	}
	err := b.reorganizeChain(detachNodes, attachNodes, flags)
	if err != nil {
		return err
	}

	return nil
}
Exemple #24
0
// CheckProofOfWork ensures the block header bits which indicate the target
// difficulty is in min/max range and that the block hash is less than the
// target difficulty as claimed.
func CheckProofOfWork(block *coinutil.Block, powLimit *big.Int) error {
	return checkProofOfWork(&block.MsgBlock().Header, powLimit, BFNone)
}
Exemple #25
0
// maybeAcceptBlock potentially accepts a block into the memory block chain.
// It performs several validation checks which depend on its position within
// the block chain before adding it.  The block is expected to have already gone
// through ProcessBlock before calling this function with it.
//
// The flags modify the behavior of this function as follows:
//  - BFDryRun: The memory chain index will not be pruned and no accept
//    notification will be sent since the block is not being accepted.
//
// The flags are also passed to checkBlockContext and connectBestChain.  See
// their documentation for how the flags modify their behavior.
func (b *BlockChain) maybeAcceptBlock(block *coinutil.Block, flags BehaviorFlags) error {
	dryRun := flags&BFDryRun == BFDryRun

	// Get a block node for the block previous to this one.  Will be nil
	// if this is the genesis block.
	prevNode, err := b.getPrevNodeFromBlock(block)
	if err != nil {
		log.Errorf("getPrevNodeFromBlock: %v", err)
		return err
	}

	// The height of this block is one more than the referenced previous
	// block.
	blockHeight := int32(0)
	if prevNode != nil {
		blockHeight = prevNode.height + 1
	}
	block.SetHeight(blockHeight)

	// The block must pass all of the validation rules which depend on the
	// position of the block within the block chain.
	err = b.checkBlockContext(block, prevNode, flags)
	if err != nil {
		return err
	}

	// Prune block nodes which are no longer needed before creating
	// a new node.
	if !dryRun {
		err = b.pruneBlockNodes()
		if err != nil {
			return err
		}
	}

	// Create a new block node for the block and add it to the in-memory
	// block chain (could be either a side chain or the main chain).
	blockHeader := &block.MsgBlock().Header
	newNode := newBlockNode(blockHeader, block.Sha(), blockHeight)
	if prevNode != nil {
		newNode.parent = prevNode
		newNode.height = blockHeight
		newNode.workSum.Add(prevNode.workSum, newNode.workSum)
	}

	// Connect the passed block to the chain while respecting proper chain
	// selection according to the chain with the most proof of work.  This
	// also handles validation of the transaction scripts.
	err = b.connectBestChain(newNode, block, flags)
	if err != nil {
		return err
	}

	// Notify the caller that the new block was accepted into the block
	// chain.  The caller would typically want to react by relaying the
	// inventory to other peers.
	if !dryRun {
		b.sendNotification(NTBlockAccepted, block)
	}

	return nil
}