Beispiel #1
0
// dbIndexDisconnectBlock removes all of the index entries associated with the
// given block using the provided indexer and updates the tip of the indexer
// accordingly.  An error will be returned if the current tip for the indexer is
// not the passed block.
func dbIndexDisconnectBlock(dbTx database.Tx, indexer Indexer, block, parent *dcrutil.Block, view *blockchain.UtxoViewpoint) error {
	// Assert that the block being disconnected is the current tip of the
	// index.
	idxKey := indexer.Key()
	curTipHash, _, err := dbFetchIndexerTip(dbTx, idxKey)
	if err != nil {
		return err
	}
	if !curTipHash.IsEqual(block.Sha()) {
		return AssertError(fmt.Sprintf("dbIndexDisconnectBlock must "+
			"be called with the block at the current index tip "+
			"(%s, tip %s, block %s)", indexer.Name(),
			curTipHash, block.Sha()))
	}

	// Notify the indexer with the disconnected block so it can remove all
	// of the appropriate entries.
	if err := indexer.DisconnectBlock(dbTx, block, parent, view); err != nil {
		return err
	}

	// Update the current index tip.
	prevHash := &block.MsgBlock().Header.PrevBlock
	return dbPutIndexerTip(dbTx, idxKey, prevHash, uint32(block.Height())-1)
}
Beispiel #2
0
// submitBlock submits the passed block to network after ensuring it passes all
// of the consensus validation rules.
func (m *CPUMiner) submitBlock(block *dcrutil.Block) bool {
	m.submitBlockLock.Lock()
	defer m.submitBlockLock.Unlock()

	_, latestHeight := m.server.blockManager.chainState.Best()

	// Be sure to set this so ProcessBlock doesn't fail! - Decred
	block.SetHeight(latestHeight + 1)

	// Process this block using the same rules as blocks coming from other
	// nodes. This will in turn relay it to the network like normal.
	isOrphan, err := m.server.blockManager.ProcessBlock(block, blockchain.BFNone)
	if err != nil {
		// Anything other than a rule violation is an unexpected error,
		// so log that error as an internal error.
		if rErr, ok := err.(blockchain.RuleError); !ok {
			minrLog.Errorf("Unexpected error while processing "+
				"block submitted via CPU miner: %v", err)
			return false
		} else {
			// Occasionally errors are given out for timing errors with
			// ResetMinDifficulty and high block works that is above
			// the target. Feed these to debug.
			if m.server.chainParams.ResetMinDifficulty &&
				rErr.ErrorCode == blockchain.ErrHighHash {
				minrLog.Debugf("Block submitted via CPU miner rejected "+
					"because of ResetMinDifficulty time sync failure: %v",
					err)
				return false
			} else {
				// Other rule errors should be reported.
				minrLog.Errorf("Block submitted via CPU miner rejected: %v", err)
				return false
			}
		}

	}
	if isOrphan {
		minrLog.Errorf("Block submitted via CPU miner is an orphan building "+
			"on parent %v", block.MsgBlock().Header.PrevBlock)
		return false
	}

	// The block was accepted.
	coinbaseTxOuts := block.MsgBlock().Transactions[0].TxOut
	coinbaseTxGenerated := int64(0)
	for _, out := range coinbaseTxOuts {
		coinbaseTxGenerated += out.Value
	}
	minrLog.Infof("Block submitted via CPU miner accepted (hash %s, "+
		"height %v, amount %v)",
		block.Sha(),
		block.Height(),
		dcrutil.Amount(coinbaseTxGenerated))
	return true
}
Beispiel #3
0
// DebugBlockString dumps a verbose message containing information about
// the transactions of a block.
func DebugBlockString(block *dcrutil.Block) string {
	if block == nil {
		return "block pointer nil"
	}

	var buffer bytes.Buffer

	hash := block.Sha()

	str := fmt.Sprintf("Block Header: %v Height: %v \n",
		hash, block.Height())
	buffer.WriteString(str)

	str = fmt.Sprintf("Block contains %v regular transactions "+
		"and %v stake transactions \n",
		len(block.Transactions()),
		len(block.STransactions()))
	buffer.WriteString(str)

	str = fmt.Sprintf("List of regular transactions \n")
	buffer.WriteString(str)

	for i, tx := range block.Transactions() {
		str = fmt.Sprintf("Index: %v, Hash: %v \n", i, tx.Sha())
		buffer.WriteString(str)
	}

	if len(block.STransactions()) == 0 {
		return buffer.String()
	}

	str = fmt.Sprintf("List of stake transactions \n")
	buffer.WriteString(str)

	for i, stx := range block.STransactions() {
		txTypeStr := ""
		txType := stake.DetermineTxType(stx)
		switch txType {
		case stake.TxTypeSStx:
			txTypeStr = "SStx"
		case stake.TxTypeSSGen:
			txTypeStr = "SSGen"
		case stake.TxTypeSSRtx:
			txTypeStr = "SSRtx"
		default:
			txTypeStr = "Error"
		}

		str = fmt.Sprintf("Index: %v, Type: %v, Hash: %v \n",
			i, txTypeStr, stx.Sha())
		buffer.WriteString(str)
	}

	return buffer.String()
}
Beispiel #4
0
// DisconnectBlock is invoked by the index manager when a block has been
// disconnected from the main chain.  This indexer removes the
// hash-to-transaction mapping for every transaction in the block.
//
// This is part of the Indexer interface.
func (idx *TxIndex) DisconnectBlock(dbTx database.Tx, block, parent *dcrutil.Block, view *blockchain.UtxoViewpoint) error {
	// Remove all of the transactions in the block from the index.
	if err := dbRemoveTxIndexEntries(dbTx, block, parent); err != nil {
		return err
	}

	// Remove the block ID index entry for the block being disconnected and
	// decrement the current internal block ID to account for it.
	blockSha := block.Sha()
	if err := dbRemoveBlockIDIndexEntry(dbTx, *blockSha); err != nil {
		return err
	}
	idx.curBlockID--
	return nil
}
Beispiel #5
0
// InsertBlock synchronously queues a newly solved block to have its
// transactions indexed by address.
func (a *addrIndexer) InsertBlock(block *dcrutil.Block, parent *dcrutil.Block) error {
	addrIndex, err := a.indexBlockAddrs(block, parent)
	if err != nil {
		return fmt.Errorf("Unable to index transactions of"+
			" block: %v", err)
	}
	err = a.server.db.UpdateAddrIndexForBlock(block.Sha(),
		block.Height(),
		addrIndex)
	if err != nil {
		return fmt.Errorf("Unable to insert block: %v", err.Error())
	}

	return nil
}
Beispiel #6
0
// checkCoinbaseUniqueHeight checks to ensure that for all blocks height > 1
// that the coinbase contains the height encoding to make coinbase hash collisions
// impossible.
func checkCoinbaseUniqueHeight(blockHeight int64, block *dcrutil.Block) error {
	if !(len(block.MsgBlock().Transactions) > 0) {
		str := fmt.Sprintf("block %v has no coinbase", block.Sha())
		return ruleError(ErrNoTransactions, str)
	}

	// Coinbase TxOut[0] is always tax, TxOut[1] is always
	// height + extranonce, so at least two outputs must
	// exist.
	if !(len(block.MsgBlock().Transactions[0].TxOut) > 1) {
		str := fmt.Sprintf("block %v is missing necessary coinbase "+
			"outputs", block.Sha())
		return ruleError(ErrFirstTxNotCoinbase, str)
	}

	// The first 4 bytes of the NullData output must be the
	// encoded height of the block, so that every coinbase
	// created has a unique transaction hash.
	nullData, err := txscript.GetNullDataContent(
		block.MsgBlock().Transactions[0].TxOut[1].Version,
		block.MsgBlock().Transactions[0].TxOut[1].PkScript)
	if err != nil {
		str := fmt.Sprintf("block %v txOut 1 has wrong pkScript "+
			"type", block.Sha())
		return ruleError(ErrFirstTxNotCoinbase, str)
	}

	if len(nullData) < 4 {
		str := fmt.Sprintf("block %v txOut 1 has too short nullData "+
			"push to contain height", block.Sha())
		return ruleError(ErrFirstTxNotCoinbase, str)
	}

	// Check the height and ensure it is correct.
	cbHeight := binary.LittleEndian.Uint32(nullData[0:4])
	if cbHeight != uint32(blockHeight) {
		prevBlock := block.MsgBlock().Header.PrevBlock
		str := fmt.Sprintf("block %v txOut 1 has wrong height in "+
			"coinbase; want %v, got %v; prevBlock %v, header height %v",
			block.Sha(), blockHeight, cbHeight, prevBlock,
			block.MsgBlock().Header.Height)
		return ruleError(ErrCoinbaseHeight, str)
	}

	return nil
}
Beispiel #7
0
// connectTransactions updates the view by adding all new utxos created by all
// of the transactions in the passed block, marking all utxos the transactions
// spend as spent, and setting the best hash for the view to the passed block.
// In addition, when the 'stxos' argument is not nil, it will be updated to
// append an entry for each spent txout.
func (b *BlockChain) connectTransactions(view *UtxoViewpoint, block *dcrutil.Block,
	parent *dcrutil.Block, stxos *[]spentTxOut) error {
	regularTxTreeValid := dcrutil.IsFlagSet16(block.MsgBlock().Header.VoteBits,
		dcrutil.BlockValid)
	thisNodeStakeViewpoint := ViewpointPrevInvalidStake
	if regularTxTreeValid {
		thisNodeStakeViewpoint = ViewpointPrevValidStake
	}

	if parent != nil && block.Height() != 0 {
		view.SetStakeViewpoint(ViewpointPrevValidInitial)
		err := view.fetchInputUtxos(b.db, block, parent)
		if err != nil {
			return err
		}
		mBlock := block.MsgBlock()
		votebits := mBlock.Header.VoteBits
		regularTxTreeValid := dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid)
		if regularTxTreeValid {
			for i, tx := range parent.Transactions() {
				err := view.connectTransaction(tx, parent.Height(), uint32(i),
					stxos)
				if err != nil {
					return err
				}
			}
		}
	}

	for i, stx := range block.STransactions() {
		view.SetStakeViewpoint(thisNodeStakeViewpoint)
		err := view.fetchInputUtxos(b.db, block, parent)
		if err != nil {
			return err
		}
		err = view.connectTransaction(stx, block.Height(), uint32(i), stxos)
		if err != nil {
			return err
		}
	}

	// Update the best hash for view to include this block since all of its
	// transactions have been connected.
	view.SetBestHash(block.Sha())
	return nil
}
Beispiel #8
0
// ConnectBlock is invoked by the index manager when a new block has been
// connected to the main chain.  This indexer adds a hash-to-transaction mapping
// for every transaction in the passed block.
//
// This is part of the Indexer interface.
func (idx *TxIndex) ConnectBlock(dbTx database.Tx, block, parent *dcrutil.Block, view *blockchain.UtxoViewpoint) error {
	// Increment the internal block ID to use for the block being connected
	// and add all of the transactions in the block to the index.
	newBlockID := idx.curBlockID + 1
	if err := dbAddTxIndexEntries(dbTx, block, parent, newBlockID); err != nil {
		return err
	}

	// Add the new block ID index entry for the block being connected and
	// update the current internal block ID accordingly.
	blockSha := block.Sha()
	err := dbPutBlockIDIndexEntry(dbTx, *blockSha, newBlockID)
	if err != nil {
		return err
	}
	idx.curBlockID = newBlockID
	return nil
}
Beispiel #9
0
// dbIndexConnectBlock adds all of the index entries associated with the
// given block using the provided indexer and updates the tip of the indexer
// accordingly.  An error will be returned if the current tip for the indexer is
// not the previous block for the passed block.
func dbIndexConnectBlock(dbTx database.Tx, indexer Indexer, block, parent *dcrutil.Block, view *blockchain.UtxoViewpoint) error {
	// Assert that the block being connected properly connects to the
	// current tip of the index.
	idxKey := indexer.Key()
	curTipHash, _, err := dbFetchIndexerTip(dbTx, idxKey)
	if err != nil {
		return err
	}
	if !curTipHash.IsEqual(&block.MsgBlock().Header.PrevBlock) {
		return AssertError(fmt.Sprintf("dbIndexConnectBlock must be "+
			"called with a block that extends the current index "+
			"tip (%s, tip %s, block %s)", indexer.Name(),
			curTipHash, block.Sha()))
	}

	// Notify the indexer with the connected block so it can index it.
	if err := indexer.ConnectBlock(dbTx, block, parent, view); err != nil {
		return err
	}

	// Update the current index tip.
	return dbPutIndexerTip(dbTx, idxKey, block.Sha(), uint32(block.Height()))
}
Beispiel #10
0
// dbAddTxIndexEntries uses an existing database transaction to add a
// transaction index entry for every transaction in the passed block.
func dbAddTxIndexEntries(dbTx database.Tx, block, parent *dcrutil.Block, blockID uint32) error {
	// The offset and length of the transactions within the serialized
	// block, for the regular transactions of the parent (if added)
	// and the stake transactions of the current block.
	regularTxTreeValid := dcrutil.IsFlagSet16(block.MsgBlock().Header.VoteBits,
		dcrutil.BlockValid)
	var parentRegularTxs []*dcrutil.Tx
	var parentTxLocs []wire.TxLoc
	var parentBlockID uint32
	if regularTxTreeValid && block.Height() > 1 {
		var err error
		parentRegularTxs = parent.Transactions()

		parentTxLocs, _, err = parent.TxLoc()
		if err != nil {
			return err
		}

		parentSha := parent.Sha()
		parentBlockID, err = dbFetchBlockIDByHash(dbTx, *parentSha)
		if err != nil {
			return err
		}
	}
	_, blockStxLocs, err := block.TxLoc()
	if err != nil {
		return err
	}

	allTxs := append(parentRegularTxs, block.STransactions()...)
	allTxsLocs := append(parentTxLocs, blockStxLocs...)
	stakeTxStartIdx := len(parentRegularTxs)

	// As an optimization, allocate a single slice big enough to hold all
	// of the serialized transaction index entries for the block and
	// serialize them directly into the slice.  Then, pass the appropriate
	// subslice to the database to be written.  This approach significantly
	// cuts down on the number of required allocations.
	offset := 0
	serializedValues := make([]byte, len(allTxs)*txEntrySize)
	blockIDToUse := parentBlockID
	for i, tx := range allTxs {
		// Switch to using the newest block ID for the stake transactions,
		// since these are not from the parent.
		if i == stakeTxStartIdx {
			blockIDToUse = blockID
		}

		putTxIndexEntry(serializedValues[offset:], blockIDToUse, allTxsLocs[i])
		endOffset := offset + txEntrySize
		txSha := tx.Sha()
		err := dbPutTxIndexEntry(dbTx, *txSha,
			serializedValues[offset:endOffset:endOffset])
		if err != nil {
			return err
		}
		offset += txEntrySize
	}

	return nil
}
Beispiel #11
0
// fetchInputTransactions fetches the input transactions referenced by the
// transactions in the given block from its point of view.  See fetchTxList
// for more details on what the point of view entails.
// Decred: This function is for verifying the validity of the regular tx tree in
// 		this block for the case that it does get accepted in the next block.
func (b *BlockChain) fetchInputTransactions(node *blockNode, block *dcrutil.Block, viewpoint int8) (TxStore, error) {
	// Verify we have the same node as we do block.
	blockHash := block.Sha()
	if !node.hash.IsEqual(blockHash) {
		return nil, fmt.Errorf("node and block hash are different!")
	}

	// If we need the previous block, grab it.
	var parentBlock *dcrutil.Block
	if viewpoint == ViewpointPrevValidInitial ||
		viewpoint == ViewpointPrevValidStake ||
		viewpoint == ViewpointPrevValidRegular {
		var errFetchBlock error
		parentBlock, errFetchBlock = b.getBlockFromHash(node.parentHash)
		if errFetchBlock != nil {
			return nil, errFetchBlock
		}
	}

	txInFlight := map[chainhash.Hash]int{}
	txNeededSet := make(map[chainhash.Hash]struct{})
	txStore := make(TxStore)

	// Case 1: ViewpointPrevValidInitial. We need the viewpoint of the
	// current chain without the TxTreeRegular of the previous block
	// added so we can validate that.
	if viewpoint == ViewpointPrevValidInitial {
		// Build a map of in-flight transactions because some of the inputs in
		// this block could be referencing other transactions earlier in this
		// block which are not yet in the chain.
		transactions := parentBlock.Transactions()
		for i, tx := range transactions {
			txInFlight[*tx.Sha()] = i
		}

		// Loop through all of the transaction inputs (except for the coinbase
		// which has no inputs) collecting them into sets of what is needed and
		// what is already known (in-flight).
		for i, tx := range transactions[1:] {
			for _, txIn := range tx.MsgTx().TxIn {
				// Add an entry to the transaction store for the needed
				// transaction with it set to missing by default.
				originHash := &txIn.PreviousOutPoint.Hash
				txD := &TxData{Hash: originHash, Err: database.ErrTxShaMissing}
				txStore[*originHash] = txD

				// It is acceptable for a transaction input to reference
				// the output of another transaction in this block only
				// if the referenced transaction comes before the
				// current one in this block.  Update the transaction
				// store acccordingly when this is the case.  Otherwise,
				// we still need the transaction.
				//
				// NOTE: The >= is correct here because i is one less
				// than the actual position of the transaction within
				// the block due to skipping the coinbase.
				if inFlightIndex, ok := txInFlight[*originHash]; ok &&
					i >= inFlightIndex {

					originTx := transactions[inFlightIndex]
					txD.Tx = originTx
					txD.BlockHeight = node.height - 1
					txD.BlockIndex = uint32(inFlightIndex)
					txD.Spent = make([]bool, len(originTx.MsgTx().TxOut))
					txD.Err = nil
				} else {
					txNeededSet[*originHash] = struct{}{}
				}
			}
		}

		// Request the input transactions from the point of view of the node.
		txNeededStore, err := b.fetchTxStore(node, block, txNeededSet, viewpoint)
		if err != nil {
			return nil, err
		}

		// Merge the results of the requested transactions and the in-flight
		// transactions.
		for _, txD := range txNeededStore {
			txStore[*txD.Hash] = txD
		}

		return txStore, nil
	}

	// Case 2+3: ViewpointPrevValidStake and ViewpointPrevValidStake.
	// For ViewpointPrevValidStake, we need the viewpoint of the
	// current chain with the TxTreeRegular of the previous block
	// added so we can validate the TxTreeStake of the current block.
	// For ViewpointPrevInvalidStake, we need the viewpoint of the
	// current chain with the TxTreeRegular of the previous block
	// missing so we can validate the TxTreeStake of the current block.
	if viewpoint == ViewpointPrevValidStake ||
		viewpoint == ViewpointPrevInvalidStake {
		// We need all of the stake tx txins. None of these are considered
		// in-flight in relation to the regular tx tree or to other tx in
		// the stake tx tree, so don't do any of those expensive checks and
		// just append it to the tx slice.
		stransactions := block.STransactions()
		for _, tx := range stransactions {
			isSSGen, _ := stake.IsSSGen(tx)

			for i, txIn := range tx.MsgTx().TxIn {
				// Ignore stakebases.
				if isSSGen && i == 0 {
					continue
				}

				// Add an entry to the transaction store for the needed
				// transaction with it set to missing by default.
				originHash := &txIn.PreviousOutPoint.Hash
				txD := &TxData{Hash: originHash, Err: database.ErrTxShaMissing}
				txStore[*originHash] = txD

				txNeededSet[*originHash] = struct{}{}
			}
		}

		// Request the input transactions from the point of view of the node.
		txNeededStore, err := b.fetchTxStore(node, block, txNeededSet, viewpoint)
		if err != nil {
			return nil, err
		}

		return txNeededStore, nil
	}

	// Case 4+5: ViewpointPrevValidRegular and
	// ViewpointPrevInvalidRegular.
	// For ViewpointPrevValidRegular, we need the viewpoint of the
	// current chain with the TxTreeRegular of the previous block
	// and the TxTreeStake of the current block added so we can
	// validate the TxTreeRegular of the current block.
	// For ViewpointPrevInvalidRegular, we need the viewpoint of the
	// current chain with the TxTreeRegular of the previous block
	// missing and the TxTreeStake of the current block added so we
	// can validate the TxTreeRegular of the current block.
	if viewpoint == ViewpointPrevValidRegular ||
		viewpoint == ViewpointPrevInvalidRegular {
		transactions := block.Transactions()
		for i, tx := range transactions {
			txInFlight[*tx.Sha()] = i
		}

		// Loop through all of the transaction inputs (except for the coinbase
		// which has no inputs) collecting them into sets of what is needed and
		// what is already known (in-flight).
		txNeededSet := make(map[chainhash.Hash]struct{})
		txStore = make(TxStore)
		for i, tx := range transactions[1:] {
			for _, txIn := range tx.MsgTx().TxIn {
				// Add an entry to the transaction store for the needed
				// transaction with it set to missing by default.
				originHash := &txIn.PreviousOutPoint.Hash
				txD := &TxData{Hash: originHash, Err: database.ErrTxShaMissing}
				txStore[*originHash] = txD

				// It is acceptable for a transaction input to reference
				// the output of another transaction in this block only
				// if the referenced transaction comes before the
				// current one in this block.  Update the transaction
				// store acccordingly when this is the case.  Otherwise,
				// we still need the transaction.
				//
				// NOTE: The >= is correct here because i is one less
				// than the actual position of the transaction within
				// the block due to skipping the coinbase.
				if inFlightIndex, ok := txInFlight[*originHash]; ok &&
					i >= inFlightIndex {

					originTx := transactions[inFlightIndex]
					txD.Tx = originTx
					txD.BlockHeight = node.height
					txD.BlockIndex = uint32(inFlightIndex)
					txD.Spent = make([]bool, len(originTx.MsgTx().TxOut))
					txD.Err = nil
				} else {
					txNeededSet[*originHash] = struct{}{}
				}
			}
		}

		// Request the input transactions from the point of view of the node.
		txNeededStore, err := b.fetchTxStore(node, block, txNeededSet, viewpoint)
		if err != nil {
			return nil, err
		}

		// Merge the results of the requested transactions and the in-flight
		// transactions.
		for _, txD := range txNeededStore {
			txStore[*txD.Hash] = txD
		}

		return txStore, nil
	}

	return nil, fmt.Errorf("Invalid viewpoint passed to fetchInputTransactions")
}
Beispiel #12
0
// IsCheckpointCandidate returns whether or not the passed block is a good
// checkpoint candidate.
//
// The factors used to determine a good checkpoint are:
//  - The block must be in the main chain
//  - The block must be at least 'CheckpointConfirmations' blocks prior to the
//    current end of the main chain
//  - The timestamps for the blocks before and after the checkpoint must have
//    timestamps which are also before and after the checkpoint, respectively
//    (due to the median time allowance this is not always the case)
//  - The block must not contain any strange transaction such as those with
//    nonstandard scripts
//
// The intent is that candidates are reviewed by a developer to make the final
// decision and then manually added to the list of checkpoints for a network.
//
// This function is safe for concurrent access.
func (b *BlockChain) IsCheckpointCandidate(block *dcrutil.Block) (bool, error) {
	b.chainLock.RLock()
	defer b.chainLock.RUnlock()

	// Checkpoints must be enabled.
	if b.noCheckpoints {
		return false, fmt.Errorf("checkpoints are disabled")
	}

	var isCandidate bool
	err := b.db.View(func(dbTx database.Tx) error {
		// A checkpoint must be in the main chain.
		blockHeight, err := dbFetchHeightByHash(dbTx, block.Sha())
		if err != nil {
			// Only return an error if it's not due to the block not
			// being in the main chain.
			if !isNotInMainChainErr(err) {
				return err
			}
			return nil
		}

		// Ensure the height of the passed block and the entry for the
		// block in the main chain match.  This should always be the
		// case unless the caller provided an invalid block.
		if blockHeight != block.Height() {
			return fmt.Errorf("passed block height of %d does not "+
				"match the main chain height of %d",
				block.Height(), blockHeight)
		}

		// A checkpoint must be at least CheckpointConfirmations blocks
		// before the end of the main chain.
		mainChainHeight := b.bestNode.height
		if blockHeight > (mainChainHeight - CheckpointConfirmations) {
			return nil
		}

		// Get the previous block header.
		prevHash := &block.MsgBlock().Header.PrevBlock
		prevHeader, err := dbFetchHeaderByHash(dbTx, prevHash)
		if err != nil {
			return err
		}

		// Get the next block header.
		nextHeader, err := dbFetchHeaderByHeight(dbTx, blockHeight+1)
		if err != nil {
			return err
		}

		// A checkpoint must have timestamps for the block and the
		// blocks on either side of it in order (due to the median time
		// allowance this is not always the case).
		prevTime := prevHeader.Timestamp
		curTime := block.MsgBlock().Header.Timestamp
		nextTime := nextHeader.Timestamp
		if prevTime.After(curTime) || nextTime.Before(curTime) {
			return nil
		}

		// A checkpoint must have transactions that only contain
		// standard scripts.
		for _, tx := range block.Transactions() {
			if isNonstandardTransaction(tx) {
				return nil
			}
		}

		// All of the checks passed, so the block is a candidate.
		isCandidate = true
		return nil
	})
	return isCandidate, err
}
Beispiel #13
0
// ConnectBlock is invoked by the index manager when a new block has been
// connected to the main chain.  This indexer adds a mapping for each address
// the transactions in the block involve.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) ConnectBlock(dbTx database.Tx, block, parent *dcrutil.Block,
	view *blockchain.UtxoViewpoint) error {
	// The offset and length of the transactions within the serialized
	// block for the regular transactions of the previous block, if
	// applicable.
	regularTxTreeValid := dcrutil.IsFlagSet16(block.MsgBlock().Header.VoteBits,
		dcrutil.BlockValid)
	var parentTxLocs []wire.TxLoc
	var parentBlockID uint32
	if regularTxTreeValid && block.Height() > 1 {
		var err error
		parentTxLocs, _, err = parent.TxLoc()
		if err != nil {
			return err
		}

		parentSha := parent.Sha()
		parentBlockID, err = dbFetchBlockIDByHash(dbTx, *parentSha)
		if err != nil {
			return err
		}
	}

	// The offset and length of the transactions within the serialized
	// block for the added stake transactions.
	_, blockStxLocs, err := block.TxLoc()
	if err != nil {
		return err
	}

	// Nothing to index, just return.
	if len(parentTxLocs)+len(blockStxLocs) == 0 {
		return nil
	}

	// Get the internal block ID associated with the block.
	blockSha := block.Sha()
	blockID, err := dbFetchBlockIDByHash(dbTx, *blockSha)
	if err != nil {
		return err
	}

	// Build all of the address to transaction mappings in a local map.
	addrsToTxns := make(writeIndexData)
	idx.indexBlock(addrsToTxns, block, parent, view)

	// Add all of the index entries for each address.
	stakeIdxsStart := len(parentTxLocs)
	allTxLocs := append(parentTxLocs, blockStxLocs...)
	addrIdxBucket := dbTx.Metadata().Bucket(addrIndexKey)
	for addrKey, txIdxs := range addrsToTxns {
		for _, txIdx := range txIdxs {
			// Switch to using the newest block ID for the stake transactions,
			// since these are not from the parent. Offset the index to be
			// correct for the location in this given block.
			blockIDToUse := parentBlockID
			if txIdx >= stakeIdxsStart {
				blockIDToUse = blockID
			}

			err := dbPutAddrIndexEntry(addrIdxBucket, addrKey,
				blockIDToUse, allTxLocs[txIdx])
			if err != nil {
				return err
			}
		}
	}

	return nil
}
Beispiel #14
0
// insertBlock is the internal function which implements the public
// InsertBlock.  See the comment for InsertBlock for more details.
//
// This function MUST be called with the tmdb lock held (for writes).
func (tmdb *TicketDB) insertBlock(block *dcrutil.Block) (SStxMemMap,
	SStxMemMap, SStxMemMap, error) {

	height := block.Height()
	if height < tmdb.StakeEnabledHeight {
		return nil, nil, nil, nil
	}

	// Sanity check: Does the number of tickets in ticketMap equal the number
	// of tickets indicated in the header?
	poolSizeBlock := int(block.MsgBlock().Header.PoolSize)
	poolSize := 0
	for i := 0; i < BucketsSize; i++ {
		poolSize += len(tmdb.maps.ticketMap[i])
	}
	if poolSize != poolSizeBlock {
		return nil, nil, nil, fmt.Errorf("ticketpoolsize in block %v not "+
			"equal to the calculated ticketpoolsize, indicating database "+
			"corruption (got %v, want %v)",
			block.Sha(),
			poolSizeBlock,
			poolSize)
	}

	// Create the block in the spentTicketMap.
	tmdb.maybeInsertBlock(block.Height())

	// Iterate through all the SSGen (vote) tx in the block and add them to
	// a map of tickets that were actually used. The rest of the tickets in
	// the buckets were then considered missed --> missedTicketMap.
	// Note that it doesn't really matter what value you set usedTickets to,
	// it's just a map of tickets that were actually used in the block. It
	// would probably be more efficient to use an array.
	usedTickets := make(map[chainhash.Hash]struct{})
	spendingHashes := make(map[chainhash.Hash]chainhash.Hash)
	revocations := make(map[chainhash.Hash]struct{})

	for _, staketx := range block.STransactions() {
		if is, _ := IsSSGen(staketx); is {
			msgTx := staketx.MsgTx()
			sstxIn := msgTx.TxIn[1] // sstx input
			sstxHash := sstxIn.PreviousOutPoint.Hash

			usedTickets[sstxHash] = struct{}{}
			spendingHashes[sstxHash] = *staketx.Sha()
		}

		if is, _ := IsSSRtx(staketx); is {
			msgTx := staketx.MsgTx()
			sstxIn := msgTx.TxIn[0] // sstx input
			sstxHash := sstxIn.PreviousOutPoint.Hash

			revocations[sstxHash] = struct{}{}
		}
	}

	// Spend or miss all the necessary tickets and do some sanity checks.
	parentBlock, err := tmdb.database.FetchBlockBySha(
		&block.MsgBlock().Header.PrevBlock)
	if err != nil {
		return nil, nil, nil, err
	}
	spentAndMissedTickets, err := tmdb.spendTickets(parentBlock,
		usedTickets,
		spendingHashes)
	if err != nil {
		return nil, nil, nil, err
	}

	// Expire all old tickets, and stick them into the spent and missed ticket
	// map too.
	expiredTickets, err := tmdb.expireTickets(height)
	if err != nil {
		return nil, nil, nil, err
	}
	if len(expiredTickets) > 0 && len(spentAndMissedTickets) == 0 {
		return nil, nil, nil, fmt.Errorf("tried to expire tickets before " +
			"stake validation height! TicketExpiry may be too small")
	}
	if len(expiredTickets) > 0 {
		for hash, ticket := range expiredTickets {
			spentAndMissedTickets[hash] = ticket
		}
	}

	revokedTickets, err := tmdb.revokeTickets(revocations)
	if err != nil {
		return nil, nil, nil, err
	}

	newTickets, err := tmdb.pushMatureTicketsAtHeight(block.Height())
	if err != nil {
		return nil, nil, nil, err
	}

	log.Debugf("Connected block %v (height %v) to the ticket database",
		block.Sha(), block.Height())

	return cloneSStxMemMap(spentAndMissedTickets), cloneSStxMemMap(newTickets),
		cloneSStxMemMap(revokedTickets), nil
}
Beispiel #15
0
// maybeAcceptBlock potentially accepts a block into the memory block chain.
// It performs several validation checks which depend on its position within
// the block chain before adding it.  The block is expected to have already gone
// through ProcessBlock before calling this function with it.
//
// The flags modify the behavior of this function as follows:
//  - BFDryRun: The memory chain index will not be pruned and no accept
//    notification will be sent since the block is not being accepted.
//
// This function MUST be called with the chain state lock held (for writes).
func (b *BlockChain) maybeAcceptBlock(block *dcrutil.Block,
	flags BehaviorFlags) (bool, error) {
	dryRun := flags&BFDryRun == BFDryRun

	// Get a block node for the block previous to this one.  Will be nil
	// if this is the genesis block.
	prevNode, err := b.getPrevNodeFromBlock(block)
	if err != nil {
		log.Debugf("getPrevNodeFromBlock: %v", err)
		return false, err
	}

	// The height of this block is one more than the referenced previous
	// block.
	blockHeight := int64(0)
	if prevNode != nil {
		blockHeight = prevNode.height + 1
	}
	block.SetHeight(blockHeight)

	// The block must pass all of the validation rules which depend on the
	// position of the block within the block chain.
	err = b.checkBlockContext(block, prevNode, flags)
	if err != nil {
		return false, err
	}

	// Prune stake nodes and block nodes which are no longer needed before
	// creating a new node.
	if !dryRun {
		err := b.pruner.pruneChainIfNeeded()
		if err != nil {
			return false, err
		}
	}

	// Create a new block node for the block and add it to the in-memory
	// block chain (could be either a side chain or the main chain).
	blockHeader := &block.MsgBlock().Header
	newNode := newBlockNode(blockHeader, block.Sha(), blockHeight,
		ticketsSpentInBlock(block), ticketsRevokedInBlock(block),
		voteVersionsInBlock(block, b.chainParams))
	if prevNode != nil {
		newNode.parent = prevNode
		newNode.height = blockHeight
		newNode.workSum.Add(prevNode.workSum, newNode.workSum)
	}

	// Fetching a stake node could enable a new DoS vector, so restrict
	// this only to blocks that are recent in history.
	if newNode.height < b.bestNode.height-minMemoryNodes {
		newNode.stakeNode, err = b.fetchStakeNode(newNode)
		if err != nil {
			return false, err
		}
		newNode.stakeUndoData = newNode.stakeNode.UndoData()
	}

	// Connect the passed block to the chain while respecting proper chain
	// selection according to the chain with the most proof of work.  This
	// also handles validation of the transaction scripts.
	var onMainChain bool
	onMainChain, err = b.connectBestChain(newNode, block, flags)
	if err != nil {
		return false, err
	}

	// Notify the caller that the new block was accepted into the block
	// chain.  The caller would typically want to react by relaying the
	// inventory to other peers.
	if !dryRun {
		b.chainLock.Unlock()
		b.sendNotification(NTBlockAccepted,
			&BlockAcceptedNtfnsData{onMainChain, block})
		b.chainLock.Lock()
	}

	return onMainChain, nil
}
Beispiel #16
0
// maybeAcceptBlock potentially accepts a block into the memory block chain.
// It performs several validation checks which depend on its position within
// the block chain before adding it.  The block is expected to have already gone
// through ProcessBlock before calling this function with it.
//
// The flags modify the behavior of this function as follows:
//  - BFDryRun: The memory chain index will not be pruned and no accept
//    notification will be sent since the block is not being accepted.
func (b *BlockChain) maybeAcceptBlock(block *dcrutil.Block,
	flags BehaviorFlags) (bool, error) {
	dryRun := flags&BFDryRun == BFDryRun

	// Get a block node for the block previous to this one.  Will be nil
	// if this is the genesis block.
	prevNode, err := b.getPrevNodeFromBlock(block)
	if err != nil {
		log.Debugf("getPrevNodeFromBlock: %v", err)
		return false, err
	}

	// The height of this block is one more than the referenced previous
	// block.
	blockHeight := int64(0)
	if prevNode != nil {
		blockHeight = prevNode.height + 1
	}
	block.SetHeight(blockHeight)

	// The block must pass all of the validation rules which depend on the
	// position of the block within the block chain.
	err = b.checkBlockContext(block, prevNode, flags)
	if err != nil {
		return false, err
	}

	// Prune block nodes which are no longer needed before creating
	// a new node.
	if !dryRun {
		err = b.pruneBlockNodes()
		if err != nil {
			return false, err
		}
	}

	// Create a new block node for the block and add it to the in-memory
	// block chain (could be either a side chain or the main chain).
	blockHeader := &block.MsgBlock().Header
	var voteBitsStake []uint16
	for _, stx := range block.STransactions() {
		if is, _ := stake.IsSSGen(stx); is {
			vb := stake.GetSSGenVoteBits(stx)
			voteBitsStake = append(voteBitsStake, vb)
		}
	}

	newNode := newBlockNode(blockHeader, block.Sha(), blockHeight, voteBitsStake)
	if prevNode != nil {
		newNode.parent = prevNode
		newNode.height = blockHeight
		newNode.workSum.Add(prevNode.workSum, newNode.workSum)
	}

	// Connect the passed block to the chain while respecting proper chain
	// selection according to the chain with the most proof of work.  This
	// also handles validation of the transaction scripts.
	var onMainChain bool
	onMainChain, err = b.connectBestChain(newNode, block, flags)
	if err != nil {
		return false, err
	}

	// Notify the caller that the new block was accepted into the block
	// chain.  The caller would typically want to react by relaying the
	// inventory to other peers.
	if !dryRun {
		b.sendNotification(NTBlockAccepted,
			&BlockAcceptedNtfnsData{onMainChain, block})
	}

	return onMainChain, nil
}
Beispiel #17
0
// disconnectTransactions updates the view by removing all of the transactions
// created by the passed block, restoring all utxos the transactions spent by
// using the provided spent txo information, and setting the best hash for the
// view to the block before the passed block.
//
// This function will ONLY work correctly for a single transaction tree at a
// time because of index tracking.
func (b *BlockChain) disconnectTransactions(view *UtxoViewpoint,
	block *dcrutil.Block, parent *dcrutil.Block, stxos []spentTxOut) error {
	// Sanity check the correct number of stxos are provided.
	if len(stxos) != countSpentOutputs(block, parent) {
		return AssertError(fmt.Sprintf("disconnectTransactions "+
			"called with bad spent transaction out information "+
			"(len stxos %v, count is %v)", len(stxos),
			countSpentOutputs(block, parent)))
	}

	// Loop backwards through all transactions so everything is unspent in
	// reverse order.  This is necessary since transactions later in a block
	// can spend from previous ones.
	regularTxTreeValid := dcrutil.IsFlagSet16(block.MsgBlock().Header.VoteBits,
		dcrutil.BlockValid)
	thisNodeStakeViewpoint := ViewpointPrevInvalidStake
	if regularTxTreeValid {
		thisNodeStakeViewpoint = ViewpointPrevValidStake
	}
	view.SetStakeViewpoint(thisNodeStakeViewpoint)
	err := view.fetchInputUtxos(b.db, block, parent)
	if err != nil {
		return err
	}
	stxoIdx := len(stxos) - 1
	transactions := block.STransactions()
	for txIdx := len(transactions) - 1; txIdx > -1; txIdx-- {
		tx := transactions[txIdx]
		msgTx := tx.MsgTx()
		tt := stake.DetermineTxType(msgTx)

		// Clear this transaction from the view if it already exists or
		// create a new empty entry for when it does not.  This is done
		// because the code relies on its existence in the view in order
		// to signal modifications have happened.
		entry := view.entries[*tx.Sha()]
		if entry == nil {
			entry = newUtxoEntry(msgTx.Version, uint32(block.Height()),
				uint32(txIdx), IsCoinBaseTx(msgTx), msgTx.Expiry != 0, tt)
			if tt == stake.TxTypeSStx {
				stakeExtra := make([]byte, serializeSizeForMinimalOutputs(tx))
				putTxToMinimalOutputs(stakeExtra, tx)
				entry.stakeExtra = stakeExtra
			}
			view.entries[*tx.Sha()] = entry
		}
		entry.modified = true
		entry.sparseOutputs = make(map[uint32]*utxoOutput)

		// Loop backwards through all of the transaction inputs (except
		// for the coinbase which has no inputs) and unspend the
		// referenced txos.  This is necessary to match the order of the
		// spent txout entries.
		for txInIdx := len(tx.MsgTx().TxIn) - 1; txInIdx > -1; txInIdx-- {
			// Skip empty vote stakebases.
			if txInIdx == 0 && (tt == stake.TxTypeSSGen) {
				continue
			}

			// Ensure the spent txout index is decremented to stay
			// in sync with the transaction input.
			stxo := &stxos[stxoIdx]
			stxoIdx--

			// When there is not already an entry for the referenced
			// transaction in the view, it means it was fully spent,
			// so create a new utxo entry in order to resurrect it.
			txIn := tx.MsgTx().TxIn[txInIdx]
			originHash := &txIn.PreviousOutPoint.Hash
			originIndex := txIn.PreviousOutPoint.Index
			entry := view.LookupEntry(originHash)
			if entry == nil {
				if !stxo.txFullySpent {
					return AssertError(fmt.Sprintf("tried to revive utx %v from "+
						"non-fully spent stx entry", originHash))
				}

				entry = newUtxoEntry(tx.MsgTx().Version, stxo.height,
					stxo.index, stxo.isCoinBase, stxo.hasExpiry,
					stxo.txType)
				if stxo.txType == stake.TxTypeSStx {
					entry.stakeExtra = stxo.stakeExtra
				}
				view.entries[*originHash] = entry
			}

			// Mark the entry as modified since it is either new
			// or will be changed below.
			entry.modified = true

			// Restore the specific utxo using the stxo data from
			// the spend journal if it doesn't already exist in the
			// view.
			output, ok := entry.sparseOutputs[originIndex]
			if !ok {
				// Add the unspent transaction output.
				entry.sparseOutputs[originIndex] = &utxoOutput{
					compressed:    stxo.compressed,
					spent:         false,
					amount:        txIn.ValueIn,
					scriptVersion: stxo.scriptVersion,
					pkScript:      stxo.pkScript,
				}
				continue
			}

			// Mark the existing referenced transaction output as
			// unspent.
			output.spent = false
		}
	}

	// There is no regular tx from before the genesis block, so ignore the genesis
	// block for the next step.
	if parent != nil && block.Height() != 0 {
		// Only bother to unspend transactions if the parent's tx tree was
		// validated. Otherwise, these transactions were never in the blockchain's
		// history in the first place.
		if regularTxTreeValid {
			view.SetStakeViewpoint(ViewpointPrevValidInitial)
			err = view.fetchInputUtxos(b.db, block, parent)
			if err != nil {
				return err
			}

			transactions := parent.Transactions()
			for txIdx := len(transactions) - 1; txIdx > -1; txIdx-- {
				tx := transactions[txIdx]

				// Clear this transaction from the view if it already exists or
				// create a new empty entry for when it does not.  This is done
				// because the code relies on its existence in the view in order
				// to signal modifications have happened.
				isCoinbase := txIdx == 0
				entry := view.entries[*tx.Sha()]
				if entry == nil {
					entry = newUtxoEntry(tx.MsgTx().Version,
						uint32(parent.Height()), uint32(txIdx), isCoinbase,
						tx.MsgTx().Expiry != 0, stake.TxTypeRegular)
					view.entries[*tx.Sha()] = entry
				}
				entry.modified = true
				entry.sparseOutputs = make(map[uint32]*utxoOutput)

				// Loop backwards through all of the transaction inputs (except
				// for the coinbase which has no inputs) and unspend the
				// referenced txos.  This is necessary to match the order of the
				// spent txout entries.
				if isCoinbase {
					continue
				}
				for txInIdx := len(tx.MsgTx().TxIn) - 1; txInIdx > -1; txInIdx-- {
					// Ensure the spent txout index is decremented to stay
					// in sync with the transaction input.
					stxo := &stxos[stxoIdx]
					stxoIdx--

					// When there is not already an entry for the referenced
					// transaction in the view, it means it was fully spent,
					// so create a new utxo entry in order to resurrect it.
					txIn := tx.MsgTx().TxIn[txInIdx]
					originHash := &txIn.PreviousOutPoint.Hash
					originIndex := txIn.PreviousOutPoint.Index
					entry := view.entries[*originHash]
					if entry == nil {
						if !stxo.txFullySpent {
							return AssertError(fmt.Sprintf("tried to "+
								"revive utx %v from non-fully spent stx entry",
								originHash))
						}
						entry = newUtxoEntry(tx.MsgTx().Version,
							stxo.height, stxo.index, stxo.isCoinBase,
							stxo.hasExpiry, stxo.txType)
						if stxo.txType == stake.TxTypeSStx {
							entry.stakeExtra = stxo.stakeExtra
						}
						view.entries[*originHash] = entry
					}

					// Mark the entry as modified since it is either new
					// or will be changed below.
					entry.modified = true

					// Restore the specific utxo using the stxo data from
					// the spend journal if it doesn't already exist in the
					// view.
					output, ok := entry.sparseOutputs[originIndex]
					if !ok {
						// Add the unspent transaction output.
						entry.sparseOutputs[originIndex] = &utxoOutput{
							compressed:    stxo.compressed,
							spent:         false,
							amount:        txIn.ValueIn,
							scriptVersion: stxo.scriptVersion,
							pkScript:      stxo.pkScript,
						}
						continue
					}

					// Mark the existing referenced transaction output as
					// unspent.
					output.spent = false
				}
			}
		}
	}

	// Update the best hash for view to the previous block since all of the
	// transactions for the current block have been disconnected.
	view.SetBestHash(parent.Sha())
	return nil
}
Beispiel #18
0
// InsertBlock inserts raw block and transaction data from a block into the
// database.  The first block inserted into the database will be treated as the
// genesis block.  Every subsequent block insert requires the referenced parent
// block to already exist.
func (db *LevelDb) InsertBlock(block *dcrutil.Block) (height int64, rerr error) {
	// Be careful with this function on syncs.  It contains decred changes.

	// Obtain the previous block first so long as it's not the genesis block
	var blockPrev *dcrutil.Block

	// Decred: WARNING. This function assumes that all block insertion calls have
	// dcrutil.blocks passed to them with block.blockHeight set correctly. However,
	// loading the genesis block in btcd didn't do this (via block manager); pre-
	// production it should be established that all calls to this function pass
	// blocks with block.blockHeight set correctly.
	if block.Height() != 0 {
		var errBlockPrev error
		blockPrev, errBlockPrev = db.FetchBlockBySha(&block.MsgBlock().Header.PrevBlock)
		if errBlockPrev != nil {
			blockSha := block.Sha()
			log.Warnf("Failed to fetch parent block of block %v", blockSha)
			return 0, errBlockPrev
		}
	}

	db.dbLock.Lock()
	defer db.dbLock.Unlock()
	defer func() {
		if rerr == nil {
			rerr = db.processBatches()
		} else {
			db.lBatch().Reset()
		}
	}()

	blocksha := block.Sha()
	mblock := block.MsgBlock()
	rawMsg, err := block.Bytes()
	if err != nil {
		log.Warnf("Failed to obtain raw block sha %v", blocksha)
		return 0, err
	}
	_, sTxLoc, err := block.TxLoc()
	if err != nil {
		log.Warnf("Failed to obtain raw block sha %v, stxloc %v", blocksha, sTxLoc)
		return 0, err
	}

	// Insert block into database
	newheight, err := db.insertBlockData(blocksha, &mblock.Header.PrevBlock,
		rawMsg)
	if err != nil {
		log.Warnf("Failed to insert block %v %v %v", blocksha,
			&mblock.Header.PrevBlock, err)
		return 0, err
	}

	// Get data necessary to process regular tx tree of parent block if it's not
	// the genesis block.
	var mBlockPrev *wire.MsgBlock
	var txLoc []wire.TxLoc

	if blockPrev != nil {
		blockShaPrev := blockPrev.Sha()

		mBlockPrev = blockPrev.MsgBlock()

		txLoc, _, err = blockPrev.TxLoc()
		if err != nil {
			log.Warnf("Failed to obtain raw block sha %v, txloc %v", blockShaPrev, txLoc)
			return 0, err
		}
	}

	// Insert the regular tx of the parent block into the tx database if the vote
	// bits enable it, and if it's not the genesis block.
	votebits := mblock.Header.VoteBits
	if dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) && blockPrev != nil {
		for txidx, tx := range mBlockPrev.Transactions {
			txsha, err := blockPrev.TxSha(txidx)

			if err != nil {
				log.Warnf("failed to compute tx name block %v idx %v err %v", blocksha, txidx, err)
				return 0, err
			}
			spentbuflen := (len(tx.TxOut) + 7) / 8
			spentbuf := make([]byte, spentbuflen, spentbuflen)
			if len(tx.TxOut)%8 != 0 {
				for i := uint(len(tx.TxOut) % 8); i < 8; i++ {
					spentbuf[spentbuflen-1] |= (byte(1) << i)
				}
			}

			// newheight-1 instead of newheight below, as the tx is actually found
			// in the parent.
			//fmt.Printf("insert tx %v into db at height %v\n", txsha, newheight)
			err = db.insertTx(txsha, newheight-1, uint32(txidx), txLoc[txidx].TxStart, txLoc[txidx].TxLen, spentbuf)
			if err != nil {
				log.Warnf("block %v idx %v failed to insert tx %v %v err %v", blocksha, newheight-1, &txsha, txidx, err)
				return 0, err
			}

			err = db.doSpend(tx)
			if err != nil {
				log.Warnf("block %v idx %v failed to spend tx %v %v err %v", blocksha, newheight, txsha, txidx, err)
				return 0, err
			}
		}
	}

	// Insert the stake tx of the current block into the tx database.
	if len(mblock.STransactions) != 0 {
		for txidx, tx := range mblock.STransactions {
			txsha, err := block.STxSha(txidx)

			if err != nil {
				log.Warnf("failed to compute stake tx name block %v idx %v err %v", blocksha, txidx, err)
				return 0, err
			}
			spentbuflen := (len(tx.TxOut) + 7) / 8
			spentbuf := make([]byte, spentbuflen, spentbuflen)
			if len(tx.TxOut)%8 != 0 {
				for i := uint(len(tx.TxOut) % 8); i < 8; i++ {
					spentbuf[spentbuflen-1] |= (byte(1) << i)
				}
			}

			err = db.insertTx(txsha, newheight, uint32(txidx), sTxLoc[txidx].TxStart, sTxLoc[txidx].TxLen, spentbuf)
			if err != nil {
				log.Warnf("block %v idx %v failed to insert stake tx %v %v err %v", blocksha, newheight, &txsha, txidx, err)
				return 0, err
			}

			err = db.doSpend(tx)
			if err != nil {
				log.Warnf("block %v idx %v failed to spend stx %v %v err %v", blocksha, newheight, txsha, txidx, err)
				return 0, err
			}
		}
	}

	return newheight, nil
}
Beispiel #19
0
// ProcessBlock is the main workhorse for handling insertion of new blocks into
// the block chain.  It includes functionality such as rejecting duplicate
// blocks, ensuring blocks follow all rules, orphan handling, and insertion into
// the block chain along with best chain selection and reorganization.
//
// It returns a first bool specifying whether or not the block is on on a fork
// or on a side chain. True means it's on the main chain.
//
// It returns a second bool which indicates whether or not the block is an orphan
// and any errors that occurred during processing.  The returned bool is only
// valid when the error is nil.
func (b *BlockChain) ProcessBlock(block *dcrutil.Block,
	timeSource MedianTimeSource, flags BehaviorFlags) (bool, bool, error) {
	fastAdd := flags&BFFastAdd == BFFastAdd
	dryRun := flags&BFDryRun == BFDryRun

	blockHash := block.Sha()
	log.Tracef("Processing block %v", blockHash)

	// The block must not already exist in the main chain or side chains.
	exists, err := b.blockExists(blockHash)
	if err != nil {
		return false, false, err
	}
	if exists {
		str := fmt.Sprintf("already have block %v", blockHash)
		return false, false, ruleError(ErrDuplicateBlock, str)
	}

	// The block must not already exist as an orphan.
	if _, exists := b.orphans[*blockHash]; exists {
		str := fmt.Sprintf("already have block (orphan) %v", blockHash)
		return false, false, ruleError(ErrDuplicateBlock, str)
	}

	// Perform preliminary sanity checks on the block and its transactions.
	err = checkBlockSanity(block, timeSource, flags, b.chainParams)
	if err != nil {
		return false, false, err
	}

	// Find the previous checkpoint and perform some additional checks based
	// on the checkpoint.  This provides a few nice properties such as
	// preventing old side chain blocks before the last checkpoint,
	// rejecting easy to mine, but otherwise bogus, blocks that could be
	// used to eat memory, and ensuring expected (versus claimed) proof of
	// work requirements since the previous checkpoint are met.
	blockHeader := &block.MsgBlock().Header
	checkpointBlock, err := b.findPreviousCheckpoint()
	if err != nil {
		return false, false, err
	}
	if checkpointBlock != nil {
		// Ensure the block timestamp is after the checkpoint timestamp.
		checkpointHeader := &checkpointBlock.MsgBlock().Header
		checkpointTime := checkpointHeader.Timestamp
		if blockHeader.Timestamp.Before(checkpointTime) {
			str := fmt.Sprintf("block %v has timestamp %v before "+
				"last checkpoint timestamp %v", blockHash,
				blockHeader.Timestamp, checkpointTime)
			return false, false, ruleError(ErrCheckpointTimeTooOld, str)
		}
		if !fastAdd {
			// Even though the checks prior to now have already ensured the
			// proof of work exceeds the claimed amount, the claimed amount
			// is a field in the block header which could be forged.  This
			// check ensures the proof of work is at least the minimum
			// expected based on elapsed time since the last checkpoint and
			// maximum adjustment allowed by the retarget rules.
			duration := blockHeader.Timestamp.Sub(checkpointTime)
			requiredTarget := CompactToBig(b.calcEasiestDifficulty(
				checkpointHeader.Bits, duration))
			currentTarget := CompactToBig(blockHeader.Bits)
			if currentTarget.Cmp(requiredTarget) > 0 {
				str := fmt.Sprintf("block target difficulty of %064x "+
					"is too low when compared to the previous "+
					"checkpoint", currentTarget)
				return false, false, ruleError(ErrDifficultyTooLow, str)
			}
		}
	}

	// Handle orphan blocks.
	prevHash := &blockHeader.PrevBlock
	if !prevHash.IsEqual(zeroHash) {
		prevHashExists, err := b.blockExists(prevHash)
		if err != nil {
			return false, false, err
		}
		if !prevHashExists {
			if !dryRun {
				log.Infof("Adding orphan block %v with parent %v",
					blockHash, prevHash)
				b.addOrphanBlock(block)
			}

			return false, true, err
		}
	}

	// The block has passed all context independent checks and appears sane
	// enough to potentially accept it into the block chain.
	var onMainChain bool
	onMainChain, err = b.maybeAcceptBlock(block, flags)
	if err != nil {
		return false, false, err
	}

	// Don't process any orphans or log when the dry run flag is set.
	if !dryRun {
		// Accept any orphan blocks that depend on this block (they are
		// no longer orphans) and repeat for those accepted blocks until
		// there are no more.
		err := b.processOrphans(blockHash, flags)
		if err != nil {
			return false, false, err
		}

		log.Debugf("Accepted block %v", blockHash)
	}

	return onMainChain, false, err
}
Beispiel #20
0
// connectTickets updates the passed map by removing removing any tickets
// from the ticket pool that have been considered spent or missed in this block
// according to the block header. Then, it connects all the newly mature tickets
// to the passed map.
func (b *BlockChain) connectTickets(tixStore TicketStore,
	node *blockNode,
	block *dcrutil.Block) error {
	if tixStore == nil {
		return fmt.Errorf("nil ticket store!")
	}

	// Nothing to do if tickets haven't yet possibly matured.
	height := node.height
	if height < b.chainParams.StakeEnabledHeight {
		return nil
	}

	parentBlock, err := b.GetBlockFromHash(node.parentHash)
	if err != nil {
		return err
	}

	revocations := node.header.Revocations

	tM := int64(b.chainParams.TicketMaturity)

	// Skip a number of validation steps before we requiring chain
	// voting.
	if node.height >= b.chainParams.StakeValidationHeight {
		regularTxTreeValid := dcrutil.IsFlagSet16(node.header.VoteBits,
			dcrutil.BlockValid)
		thisNodeStakeViewpoint := ViewpointPrevInvalidStake
		if regularTxTreeValid {
			thisNodeStakeViewpoint = ViewpointPrevValidStake
		}

		// We need the missed tickets bucket from the original perspective of
		// the node.
		missedTickets, err := b.GenerateMissedTickets(tixStore)
		if err != nil {
			return err
		}

		// TxStore at blockchain HEAD + TxTreeRegular of prevBlock (if
		// validated) for this node.
		txInputStoreStake, err := b.fetchInputTransactions(node, block,
			thisNodeStakeViewpoint)
		if err != nil {
			errStr := fmt.Sprintf("fetchInputTransactions failed for incoming "+
				"node %v; error given: %v", node.hash, err)
			return errors.New(errStr)
		}

		// PART 1: Spend/miss winner tickets

		// Iterate through all the SSGen (vote) tx in the block and add them to
		// a map of tickets that were actually used.
		spentTicketsFromBlock := make(map[chainhash.Hash]bool)
		numberOfSSgen := 0
		for _, staketx := range block.STransactions() {
			if is, _ := stake.IsSSGen(staketx); is {
				msgTx := staketx.MsgTx()
				sstxIn := msgTx.TxIn[1] // sstx input
				sstxHash := sstxIn.PreviousOutPoint.Hash

				originTx, exists := txInputStoreStake[sstxHash]
				if !exists {
					str := fmt.Sprintf("unable to find input transaction "+
						"%v for transaction %v", sstxHash, staketx.Sha())
					return ruleError(ErrMissingTx, str)
				}

				sstxHeight := originTx.BlockHeight

				// Check maturity of ticket; we can only spend the ticket after it
				// hits maturity at height + tM + 1.
				if (height - sstxHeight) < (tM + 1) {
					blockSha := block.Sha()
					errStr := fmt.Sprintf("Error: A ticket spend as an SSGen in "+
						"block height %v was immature! Block sha %v",
						height,
						blockSha)
					return errors.New(errStr)
				}

				// Fill out the ticket data.
				spentTicketsFromBlock[sstxHash] = true
				numberOfSSgen++
			}
		}

		// Obtain the TicketsPerBlock many tickets that were selected this round,
		// then check these against the tickets that were actually used to make
		// sure that any SSGen actually match the selected tickets. Commit the
		// spent or missed tickets to the ticket store after.
		spentAndMissedTickets := make(TicketStore)
		tixSpent := 0
		tixMissed := 0

		// Sort the entire list of tickets lexicographically by sorting
		// each bucket and then appending it to the list. Start by generating
		// a prefix matched map of tickets to speed up the lookup.
		tpdBucketMap := make(map[uint8][]*TicketPatchData)
		for _, tpd := range tixStore {
			// Bucket does not exist.
			if _, ok := tpdBucketMap[tpd.td.Prefix]; !ok {
				tpdBucketMap[tpd.td.Prefix] = make([]*TicketPatchData, 1)
				tpdBucketMap[tpd.td.Prefix][0] = tpd
			} else {
				// Bucket exists.
				data := tpdBucketMap[tpd.td.Prefix]
				tpdBucketMap[tpd.td.Prefix] = append(data, tpd)
			}
		}
		totalTickets := 0
		sortedSlice := make([]*stake.TicketData, 0)
		for i := 0; i < stake.BucketsSize; i++ {
			ltb, err := b.GenerateLiveTicketBucket(tixStore, tpdBucketMap,
				uint8(i))
			if err != nil {
				h := node.hash
				str := fmt.Sprintf("Failed to generate live ticket bucket "+
					"%v for node %v, height %v! Error: %v",
					i,
					h,
					node.height,
					err.Error())
				return fmt.Errorf(str)
			}
			mapLen := len(ltb)

			tempTdSlice := stake.NewTicketDataSlice(mapLen)
			itr := 0 // Iterator
			for _, td := range ltb {
				tempTdSlice[itr] = td
				itr++
				totalTickets++
			}
			sort.Sort(tempTdSlice)
			sortedSlice = append(sortedSlice, tempTdSlice...)
		}

		// Use the parent block's header to seed a PRNG that picks the
		// lottery winners.
		ticketsPerBlock := int(b.chainParams.TicketsPerBlock)
		pbhB, err := parentBlock.MsgBlock().Header.Bytes()
		if err != nil {
			return err
		}
		prng := stake.NewHash256PRNG(pbhB)
		ts, err := stake.FindTicketIdxs(int64(totalTickets), ticketsPerBlock, prng)
		if err != nil {
			return err
		}

		ticketsToSpendOrMiss := make([]*stake.TicketData, ticketsPerBlock,
			ticketsPerBlock)
		for i, idx := range ts {
			ticketsToSpendOrMiss[i] = sortedSlice[idx]
		}

		// Spend or miss these tickets by checking for their existence in the
		// passed spentTicketsFromBlock map.
		for _, ticket := range ticketsToSpendOrMiss {
			// Move the ticket from active tickets map into the used tickets
			// map if the ticket was spent.
			wasSpent, _ := spentTicketsFromBlock[ticket.SStxHash]

			if wasSpent {
				tpd := NewTicketPatchData(ticket, TiSpent, nil)
				spentAndMissedTickets[ticket.SStxHash] = tpd
				tixSpent++
			} else { // Ticket missed being spent and --> false or nil
				tpd := NewTicketPatchData(ticket, TiMissed, nil)
				spentAndMissedTickets[ticket.SStxHash] = tpd
				tixMissed++
			}
		}

		// This error is thrown if for some reason there exists an SSGen in
		// the block that doesn't spend a ticket from the eligible list of
		// tickets, thus making it invalid.
		if tixSpent != numberOfSSgen {
			errStr := fmt.Sprintf("an invalid number %v "+
				"tickets was spent, but %v many tickets should "+
				"have been spent!", tixSpent, numberOfSSgen)
			return errors.New(errStr)
		}

		if tixMissed != (ticketsPerBlock - numberOfSSgen) {
			errStr := fmt.Sprintf("an invalid number %v "+
				"tickets was missed, but %v many tickets should "+
				"have been missed!", tixMissed,
				ticketsPerBlock-numberOfSSgen)
			return errors.New(errStr)
		}

		if (tixSpent + tixMissed) != int(b.chainParams.TicketsPerBlock) {
			errStr := fmt.Sprintf("an invalid number %v "+
				"tickets was spent and missed, but TicketsPerBlock %v many "+
				"tickets should have been spent!", tixSpent,
				ticketsPerBlock)
			return errors.New(errStr)
		}

		// Calculate all the tickets expiring this block and mark them as missed.
		tpdBucketMap = make(map[uint8][]*TicketPatchData)
		for _, tpd := range tixStore {
			// Bucket does not exist.
			if _, ok := tpdBucketMap[tpd.td.Prefix]; !ok {
				tpdBucketMap[tpd.td.Prefix] = make([]*TicketPatchData, 1)
				tpdBucketMap[tpd.td.Prefix][0] = tpd
			} else {
				// Bucket exists.
				data := tpdBucketMap[tpd.td.Prefix]
				tpdBucketMap[tpd.td.Prefix] = append(data, tpd)
			}
		}
		toExpireHeight := node.height - int64(b.chainParams.TicketExpiry)
		if !(toExpireHeight < int64(b.chainParams.StakeEnabledHeight)) {
			for i := 0; i < stake.BucketsSize; i++ {
				// Generate the live ticket bucket.
				ltb, err := b.GenerateLiveTicketBucket(tixStore,
					tpdBucketMap, uint8(i))
				if err != nil {
					return err
				}

				for _, ticket := range ltb {
					if ticket.BlockHeight == toExpireHeight {
						tpd := NewTicketPatchData(ticket, TiMissed, nil)
						spentAndMissedTickets[ticket.SStxHash] = tpd
					}
				}
			}
		}

		// Merge the ticket store patch containing the spent and missed tickets
		// with the ticket store.
		for hash, tpd := range spentAndMissedTickets {
			tixStore[hash] = tpd
		}

		// At this point our tixStore now contains all the spent and missed tx
		// as per this block.

		// PART 2: Remove tickets that were missed and are now revoked.

		// Iterate through all the SSGen (vote) tx in the block and add them to
		// a map of tickets that were actually used.
		revocationsFromBlock := make(map[chainhash.Hash]struct{})
		numberOfSSRtx := 0
		for _, staketx := range block.STransactions() {
			if is, _ := stake.IsSSRtx(staketx); is {
				msgTx := staketx.MsgTx()
				sstxIn := msgTx.TxIn[0] // sstx input
				sstxHash := sstxIn.PreviousOutPoint.Hash

				// Fill out the ticket data.
				revocationsFromBlock[sstxHash] = struct{}{}
				numberOfSSRtx++
			}
		}

		if numberOfSSRtx != int(revocations) {
			errStr := fmt.Sprintf("an invalid revocations %v was calculated "+
				"the block header indicates %v instead", numberOfSSRtx,
				revocations)
			return errors.New(errStr)
		}

		// Lookup the missed ticket. If we find it in the patch data,
		// modify the patch data so that it doesn't exist.
		// Otherwise, just modify load the missed ticket data from
		// the ticket db and create patch data based on that.
		for hash, _ := range revocationsFromBlock {
			ticketWasMissed := false
			if td, is := missedTickets[hash]; is {
				maturedHeight := td.BlockHeight

				// Check maturity of ticket; we can only spend the ticket after it
				// hits maturity at height + tM + 2.
				if height < maturedHeight+2 {
					blockSha := block.Sha()
					errStr := fmt.Sprintf("Error: A ticket spend as an "+
						"SSRtx in block height %v was immature! Block sha %v",
						height,
						blockSha)
					return errors.New(errStr)
				}

				ticketWasMissed = true
			}

			if !ticketWasMissed {
				errStr := fmt.Sprintf("SSRtx spent missed sstx %v, "+
					"but that missed sstx could not be found!",
					hash)
				return errors.New(errStr)
			}
		}
	}

	// PART 3: Add newly maturing tickets
	// This is the only chunk we need to do for blocks appearing before
	// stake validation height.

	// Calculate block number for where new tickets are maturing from and retrieve
	// this block from db.

	// Get the block that is maturing.
	matureNode, err := b.getNodeAtHeightFromTopNode(node, tM)
	if err != nil {
		return err
	}

	matureBlock, errBlock := b.getBlockFromHash(matureNode.hash)
	if errBlock != nil {
		return errBlock
	}

	// Maturing tickets are from the maturingBlock; fill out the ticket patch data
	// and then push them to the tixStore.
	for _, stx := range matureBlock.STransactions() {
		if is, _ := stake.IsSStx(stx); is {
			// Calculate the prefix for pre-sort.
			sstxHash := *stx.Sha()
			prefix := uint8(sstxHash[0])

			// Fill out the ticket data.
			td := stake.NewTicketData(sstxHash,
				prefix,
				chainhash.Hash{},
				height,
				false, // not missed
				false) // not expired

			tpd := NewTicketPatchData(td,
				TiAvailable,
				nil)
			tixStore[*stx.Sha()] = tpd
		}
	}

	return nil
}
Beispiel #21
0
// IsCheckpointCandidate returns whether or not the passed block is a good
// checkpoint candidate.
//
// The factors used to determine a good checkpoint are:
//  - The block must be in the main chain
//  - The block must be at least 'CheckpointConfirmations' blocks prior to the
//    current end of the main chain
//  - The timestamps for the blocks before and after the checkpoint must have
//    timestamps which are also before and after the checkpoint, respectively
//    (due to the median time allowance this is not always the case)
//  - The block must not contain any strange transaction such as those with
//    nonstandard scripts
//
// The intent is that candidates are reviewed by a developer to make the final
// decision and then manually added to the list of checkpoints for a network.
func (b *BlockChain) IsCheckpointCandidate(block *dcrutil.Block) (bool, error) {
	// Checkpoints must be enabled.
	if b.noCheckpoints {
		return false, fmt.Errorf("checkpoints are disabled")
	}

	// A checkpoint must be in the main chain.
	exists, err := b.db.ExistsSha(block.Sha())
	if err != nil {
		return false, err
	}
	if !exists {
		return false, nil
	}

	// A checkpoint must be at least CheckpointConfirmations blocks before
	// the end of the main chain.
	blockHeight := block.Height()
	_, mainChainHeight, err := b.db.NewestSha()
	if err != nil {
		return false, err
	}
	if blockHeight > (mainChainHeight - CheckpointConfirmations) {
		return false, nil
	}

	// Get the previous block.
	prevHash := &block.MsgBlock().Header.PrevBlock
	prevBlock, err := b.db.FetchBlockBySha(prevHash)
	if err != nil {
		return false, err
	}

	// Get the next block.
	nextHash, err := b.db.FetchBlockShaByHeight(blockHeight + 1)
	if err != nil {
		return false, err
	}
	nextBlock, err := b.db.FetchBlockBySha(nextHash)
	if err != nil {
		return false, err
	}

	// A checkpoint must have timestamps for the block and the blocks on
	// either side of it in order (due to the median time allowance this is
	// not always the case).
	prevTime := prevBlock.MsgBlock().Header.Timestamp
	curTime := block.MsgBlock().Header.Timestamp
	nextTime := nextBlock.MsgBlock().Header.Timestamp
	if prevTime.After(curTime) || nextTime.Before(curTime) {
		return false, nil
	}

	// A checkpoint must have transactions that only contain standard
	// scripts.
	for _, tx := range block.Transactions() {
		if isNonstandardTransaction(tx) {
			return false, nil
		}
	}

	return true, nil
}
Beispiel #22
0
// InsertBlock inserts raw block and transaction data from a block into the
// database.  The first block inserted into the database will be treated as the
// genesis block.  Every subsequent block insert requires the referenced parent
// block to already exist.  This is part of the database.Db interface
// implementation.
func (db *MemDb) InsertBlock(block *dcrutil.Block) (int64, error) {
	db.Lock()
	defer db.Unlock()

	if db.closed {
		return 0, ErrDbClosed
	}

	// Reject the insert if the previously reference block does not exist
	// except in the case there are no blocks inserted yet where the first
	// inserted block is assumed to be a genesis block.
	msgBlock := block.MsgBlock()
	if _, exists := db.blocksBySha[msgBlock.Header.PrevBlock]; !exists {
		if len(db.blocks) > 0 {
			return 0, database.ErrPrevShaMissing
		}
	}
	var blockPrev *dcrutil.Block = nil
	// Decred: WARNING. This function assumes that all block insertion calls have
	// dcrutil.blocks passed to them with block.blockHeight set correctly. However,
	// loading the genesis block in dcrd didn't do this (via block manager); pre-
	// production it should be established that all calls to this function pass
	// blocks with block.blockHeight set correctly.
	if len(db.blocks) > 0 {
		var errBlockPrev error
		blockPrev, errBlockPrev = db.fetchBlockBySha(&msgBlock.Header.PrevBlock)
		if errBlockPrev != nil {
			blockSha := block.Sha()
			log.Warnf("Failed to fetch parent block of block %v", blockSha)
			return 0, errBlockPrev
		}
	}

	// Build a map of in-flight transactions because some of the inputs in
	// this block could be referencing other transactions earlier in this
	// block which are not yet in the chain.
	newHeight := int64(len(db.blocks))
	txInFlight := map[chainhash.Hash]int{}
	// Loop through all transactions and inputs to ensure there are no error
	// conditions that would prevent them from be inserted into the db.
	// Although these checks could could be done in the loop below, checking
	// for error conditions up front means the code below doesn't have to
	// deal with rollback on errors.
	votebits := block.MsgBlock().Header.VoteBits
	if dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) && blockPrev != nil {
		transactions := blockPrev.Transactions()
		for i, tx := range transactions {
			txInFlight[*tx.Sha()] = i
		}
		for i, tx := range transactions {
			for _, txIn := range tx.MsgTx().TxIn {
				if isCoinbaseInput(txIn) {
					continue
				}

				// It is acceptable for a transaction input to reference
				// the output of another transaction in this block only
				// if the referenced transaction comes before the
				// current one in this block.
				prevOut := &txIn.PreviousOutPoint
				if inFlightIndex, ok := txInFlight[prevOut.Hash]; ok {
					if i <= inFlightIndex {
						log.Warnf("InsertBlock: requested hash "+
							" of %s does not exist in-flight",
							tx.Sha())
						return 0, database.ErrTxShaMissing
					}
				} else {
					originTxns, exists := db.txns[prevOut.Hash]
					if !exists {
						log.Warnf("InsertBlock: requested hash "+
							"of %s by %s does not exist",
							prevOut.Hash, tx.Sha())
						return 0, database.ErrTxShaMissing
					}
					originTxD := originTxns[len(originTxns)-1]
					if prevOut.Index > uint32(len(originTxD.spentBuf)) {
						log.Warnf("InsertBlock: requested hash "+
							"of %s with index %d does not "+
							"exist", tx.Sha(), prevOut.Index)
						return 0, database.ErrTxShaMissing
					}
				}
			}

			// Prevent duplicate transactions in the same block.
			if inFlightIndex, exists := txInFlight[*tx.Sha()]; exists &&
				inFlightIndex < i {
				log.Warnf("Block contains duplicate transaction %s",
					tx.Sha())
				return 0, database.ErrDuplicateSha
			}

			// Prevent duplicate transactions unless the old one is fully
			// spent.
			if txns, exists := db.txns[*tx.Sha()]; exists {
				txD := txns[len(txns)-1]
				if !isFullySpent(txD) {
					log.Warnf("Attempt to insert duplicate "+
						"transaction %s", tx.Sha())
					return 0, database.ErrDuplicateSha
				}
			}
		}
	}

	db.blocks = append(db.blocks, msgBlock)
	db.blocksBySha[msgBlock.Header.BlockSha()] = newHeight
	if dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) && blockPrev != nil {
		// Insert information about eacj transaction and spend all of the
		// outputs referenced by the inputs to the transactions.
		for i, tx := range blockPrev.Transactions() {
			// Insert the transaction data.
			txD := tTxInsertData{
				tree:        dcrutil.TxTreeRegular,
				blockHeight: newHeight - 1,
				offset:      i,
				spentBuf:    make([]bool, len(tx.MsgTx().TxOut)),
			}
			db.txns[*tx.Sha()] = append(db.txns[*tx.Sha()], &txD)
			// Spend all of the inputs.
			for _, txIn := range tx.MsgTx().TxIn {
				// Coinbase transaction has no inputs.
				if isCoinbaseInput(txIn) {
					continue
				}

				// Already checked for existing and valid ranges above.
				prevOut := &txIn.PreviousOutPoint
				originTxns := db.txns[prevOut.Hash]
				originTxD := originTxns[len(originTxns)-1]
				originTxD.spentBuf[prevOut.Index] = true
			}
		}
	}
	for i, tx := range block.STransactions() {
		// Insert the transaction data.
		txD := tTxInsertData{
			tree:        dcrutil.TxTreeStake,
			blockHeight: newHeight,
			offset:      i,
			spentBuf:    make([]bool, len(tx.MsgTx().TxOut)),
		}
		db.txns[*tx.Sha()] = append(db.txns[*tx.Sha()], &txD)

		// Spend all of the inputs.
		for _, txIn := range tx.MsgTx().TxIn {
			// Coinbase transaction has no inputs.
			if isCoinbaseInput(txIn) {
				continue
			}

			// Already checked for existing and valid ranges above.
			prevOut := &txIn.PreviousOutPoint
			originTxns := db.txns[prevOut.Hash]
			originTxD := originTxns[len(originTxns)-1]
			originTxD.spentBuf[prevOut.Index] = true
		}

	}
	return newHeight, nil
}
Beispiel #23
0
// indexBlock extract all of the standard addresses from all of the transactions
// in the passed block and maps each of them to the assocaited transaction using
// the passed map.
func (idx *AddrIndex) indexBlock(data writeIndexData, block, parent *dcrutil.Block, view *blockchain.UtxoViewpoint) {
	regularTxTreeValid := dcrutil.IsFlagSet16(block.MsgBlock().Header.VoteBits,
		dcrutil.BlockValid)
	var stakeStartIdx int
	if regularTxTreeValid {
		for txIdx, tx := range parent.Transactions() {
			// Coinbases do not reference any inputs.  Since the block is
			// required to have already gone through full validation, it has
			// already been proven on the first transaction in the block is
			// a coinbase.
			if txIdx != 0 {
				for _, txIn := range tx.MsgTx().TxIn {
					// The view should always have the input since
					// the index contract requires it, however, be
					// safe and simply ignore any missing entries.
					origin := &txIn.PreviousOutPoint
					entry := view.LookupEntry(&origin.Hash)
					if entry == nil {
						log.Warnf("Missing input %v for tx %v while "+
							"indexing block %v (height %v)\n", origin.Hash,
							tx.Sha(), block.Sha(), block.Height())
						continue
					}

					version := entry.ScriptVersionByIndex(origin.Index)
					pkScript := entry.PkScriptByIndex(origin.Index)
					txType := entry.TransactionType()
					idx.indexPkScript(data, version, pkScript, txIdx,
						txType == stake.TxTypeSStx)
				}
			}

			for _, txOut := range tx.MsgTx().TxOut {
				idx.indexPkScript(data, txOut.Version, txOut.PkScript, txIdx,
					false)
			}
		}

		stakeStartIdx = len(parent.Transactions())
	}

	for txIdx, tx := range block.STransactions() {
		msgTx := tx.MsgTx()
		thisTxOffset := txIdx + stakeStartIdx

		isSSGen, _ := stake.IsSSGen(msgTx)
		for i, txIn := range msgTx.TxIn {
			// Skip stakebases.
			if isSSGen && i == 0 {
				continue
			}

			// The view should always have the input since
			// the index contract requires it, however, be
			// safe and simply ignore any missing entries.
			origin := &txIn.PreviousOutPoint
			entry := view.LookupEntry(&origin.Hash)
			if entry == nil {
				log.Warnf("Missing input %v for tx %v while "+
					"indexing block %v (height %v)\n", origin.Hash,
					tx.Sha(), block.Sha(), block.Height())
				continue
			}

			version := entry.ScriptVersionByIndex(origin.Index)
			pkScript := entry.PkScriptByIndex(origin.Index)
			txType := entry.TransactionType()
			idx.indexPkScript(data, version, pkScript, thisTxOffset,
				txType == stake.TxTypeSStx)
		}

		isSStx, _ := stake.IsSStx(msgTx)
		for _, txOut := range msgTx.TxOut {
			idx.indexPkScript(data, txOut.Version, txOut.PkScript,
				thisTxOffset, isSStx)
		}
	}
}