Ejemplo n.º 1
0
// updateSSRtxRecord updates an SSRtx record in the SSRtx records bucket.
func updateSSRtxRecord(tx walletdb.Tx, hash *chainhash.Hash,
	record *ssrtxRecord) error {
	// Fetch the current content of the key.
	// Possible buggy behaviour: If deserialization fails,
	// we won't detect it here. We assume we're throwing
	// ErrSSRtxsNotFound.
	oldRecords, _ := fetchSSRtxRecords(tx, hash)

	// Don't reinsert records we already have.
	if ssrtxRecordExistsInRecords(record, oldRecords) {
		return nil
	}

	bucket := tx.RootBucket().Bucket(ssrtxRecordsBucketName)

	var records []*ssrtxRecord
	// Either create a slice if currently nothing exists for this
	// key in the db, or append the entry to the slice.
	if oldRecords == nil {
		records = make([]*ssrtxRecord, 1)
		records[0] = record
	} else {
		records = append(oldRecords, record)
	}

	// Write the serialized SSRtxs keyed by the sstx hash.
	serializedSSRtxsRecords := serializeSSRtxRecords(records)

	err := bucket.Put(hash.Bytes(), serializedSSRtxsRecords)
	if err != nil {
		str := fmt.Sprintf("failed to store ssrtx records '%s'", hash)
		return stakeStoreError(ErrDatabase, str, err)
	}
	return nil
}
Ejemplo n.º 2
0
// FetchHeightRange looks up a range of blocks by the start and ending
// heights.  Fetch is inclusive of the start height and exclusive of the
// ending height. To fetch all hashes from the start height until no
// more are present, use the special id `AllShas'.
func (db *LevelDb) FetchHeightRange(startHeight, endHeight int64) (rshalist []chainhash.Hash, err error) {
	db.dbLock.Lock()
	defer db.dbLock.Unlock()

	var endidx int64
	if endHeight == database.AllShas {
		endidx = startHeight + 500
	} else {
		endidx = endHeight
	}

	shalist := make([]chainhash.Hash, 0, endidx-startHeight)
	for height := startHeight; height < endidx; height++ {
		// TODO(drahn) fix blkFile from height

		key := int64ToKey(height)
		blkVal, lerr := db.lDb.Get(key, db.ro)
		if lerr != nil {
			break
		}

		var sha chainhash.Hash
		sha.SetBytes(blkVal[0:32])
		shalist = append(shalist, sha)
	}

	if err != nil {
		return
	}
	//log.Tracef("FetchIdxRange idx %v %v returned %v shas err %v", startHeight, endHeight, len(shalist), err)

	return shalist, nil
}
Ejemplo n.º 3
0
func parsesha(argstr string) (argtype int, height int64, psha *chainhash.Hash, err error) {
	var sha chainhash.Hash

	var hashbuf string

	switch len(argstr) {
	case 64:
		hashbuf = argstr
	case 66:
		if argstr[0:2] != "0x" {
			log.Infof("prefix is %v", argstr[0:2])
			err = ErrBadShaPrefix
			return
		}
		hashbuf = argstr[2:]
	default:
		if len(argstr) <= 16 {
			// assume value is height
			argtype = ArgHeight
			var h int
			h, err = strconv.Atoi(argstr)
			if err == nil {
				height = int64(h)
				return
			}
			log.Infof("Unable to parse height %v, err %v", height, err)
		}
		err = ErrBadShaLen
		return
	}

	var buf [32]byte
	for idx, ch := range hashbuf {
		var val rune

		switch {
		case ch >= '0' && ch <= '9':
			val = ch - '0'
		case ch >= 'a' && ch <= 'f':
			val = ch - 'a' + rune(10)
		case ch >= 'A' && ch <= 'F':
			val = ch - 'A' + rune(10)
		default:
			err = ErrBadShaChar
			return
		}
		b := buf[31-idx/2]
		if idx&1 == 1 {
			b |= byte(val)
		} else {
			b |= (byte(val) << 4)
		}
		buf[31-idx/2] = b
	}
	sha.SetBytes(buf[0:32])
	psha = &sha
	return
}
Ejemplo n.º 4
0
// GetBlockAsync returns an instance of a type that can be used to get the
// result of the RPC at some future time by invoking the Receive function on the
// returned instance.
//
// See GetBlock for the blocking version and more details.
func (c *Client) GetBlockAsync(blockHash *chainhash.Hash) FutureGetBlockResult {
	hash := ""
	if blockHash != nil {
		hash = blockHash.String()
	}

	cmd := dcrjson.NewGetBlockCmd(hash, dcrjson.Bool(false), nil)
	return c.sendCmd(cmd)
}
Ejemplo n.º 5
0
// hashInSlice returns whether a hash exists in a slice or not.
func hashInSlice(h *chainhash.Hash, list []*chainhash.Hash) bool {
	for _, hash := range list {
		if h.IsEqual(hash) {
			return true
		}
	}

	return false
}
Ejemplo n.º 6
0
// GetHeadersAsync returns an instance of a type that can be used to get the result
// of the RPC at some future time by invoking the Receive function on the returned instance.
//
// See GetHeaders for the blocking version and more details.
func (c *Client) GetHeadersAsync(blockLocators []chainhash.Hash, hashStop *chainhash.Hash) FutureGetHeadersResult {
	concatenatedLocators := make([]byte, chainhash.HashSize*len(blockLocators))
	for i := range blockLocators {
		copy(concatenatedLocators[i*chainhash.HashSize:], blockLocators[i][:])
	}
	cmd := dcrjson.NewGetHeadersCmd(hex.EncodeToString(concatenatedLocators),
		hashStop.String())
	return c.sendCmd(cmd)
}
// GetRawTransactionAsync returns an instance of a type that can be used to get
// the result of the RPC at some future time by invoking the Receive function on
// the returned instance.
//
// See GetRawTransaction for the blocking version and more details.
func (c *Client) GetRawTransactionAsync(txHash *chainhash.Hash) FutureGetRawTransactionResult {
	hash := ""
	if txHash != nil {
		hash = txHash.String()
	}

	cmd := dcrjson.NewGetRawTransactionCmd(hash, dcrjson.Int(0))
	return c.sendCmd(cmd)
}
Ejemplo n.º 8
0
// GetTxOutAsync returns an instance of a type that can be used to get
// the result of the RPC at some future time by invoking the Receive function on
// the returned instance.
//
// See GetTxOut for the blocking version and more details.
func (c *Client) GetTxOutAsync(txHash *chainhash.Hash, index uint32, mempool bool) FutureGetTxOutResult {
	hash := ""
	if txHash != nil {
		hash = txHash.String()
	}

	cmd := dcrjson.NewGetTxOutCmd(hash, index, &mempool)
	return c.sendCmd(cmd)
}
Ejemplo n.º 9
0
// GetBlockVerboseAsync returns an instance of a type that can be used to get
// the result of the RPC at some future time by invoking the Receive function on
// the returned instance.
//
// See GetBlockVerbose for the blocking version and more details.
func (c *Client) GetBlockVerboseAsync(blockHash *chainhash.Hash, verboseTx bool) FutureGetBlockVerboseResult {
	hash := ""
	if blockHash != nil {
		hash = blockHash.String()
	}

	cmd := dcrjson.NewGetBlockCmd(hash, dcrjson.Bool(true), &verboseTx)
	return c.sendCmd(cmd)
}
Ejemplo n.º 10
0
// fetchSStxRecord retrieves a tx record from the sstx records bucket
// with the given hash.
func fetchSStxRecord(tx walletdb.Tx, hash *chainhash.Hash) (*sstxRecord, error) {
	bucket := tx.RootBucket().Bucket(sstxRecordsBucketName)

	key := hash.Bytes()
	val := bucket.Get(key)
	if val == nil {
		str := fmt.Sprintf("missing sstx record for hash '%s'", hash.String())
		return nil, stakeStoreError(ErrSStxNotFound, str, nil)
	}

	return deserializeSStxRecord(val)
}
Ejemplo n.º 11
0
func assertAddrIndexTipIsUpdated(db database.Db, t *testing.T, newestSha *chainhash.Hash, newestBlockIdx int64) {
	// Safe to ignore error, since height will be < 0 in "error" case.
	sha, height, _ := db.FetchAddrIndexTip()
	if newestBlockIdx != height {
		t.Fatalf("Height of address index tip failed to update, "+
			"expected %v, got %v", newestBlockIdx, height)
	}
	if !bytes.Equal(newestSha.Bytes(), sha.Bytes()) {
		t.Fatalf("Sha of address index tip failed to update, "+
			"expected %v, got %v", newestSha, sha)
	}
}
Ejemplo n.º 12
0
// fetchSSRtxRecords retrieves SSRtx records from the SSRtxRecords bucket with
// the given hash.
func fetchSSRtxRecords(ns walletdb.ReadBucket, hash *chainhash.Hash) ([]*ssrtxRecord,
	error) {

	bucket := ns.NestedReadBucket(ssrtxRecordsBucketName)

	key := hash.Bytes()
	val := bucket.Get(key)
	if val == nil {
		str := fmt.Sprintf("missing ssrtx records for hash '%s'", hash.String())
		return nil, stakeStoreError(ErrSSRtxsNotFound, str, nil)
	}

	return deserializeSSRtxRecords(val)
}
Ejemplo n.º 13
0
// fetchSStxRecordSStxTicketHash160 retrieves a ticket 0th output script or
// pubkeyhash from the sstx records bucket with the given hash.
func fetchSStxRecordSStxTicketHash160(ns walletdb.ReadBucket,
	hash *chainhash.Hash) (hash160 []byte, p2sh bool, err error) {

	bucket := ns.NestedReadBucket(sstxRecordsBucketName)

	key := hash.Bytes()
	val := bucket.Get(key)
	if val == nil {
		str := fmt.Sprintf("missing sstx record for hash '%s'", hash.String())
		return nil, false, stakeStoreError(ErrSStxNotFound, str, nil)
	}

	return deserializeSStxTicketHash160(val)
}
Ejemplo n.º 14
0
// fetchBlockShaByHeight returns a block hash based on its height in the
// block chain.
func (db *LevelDb) fetchBlockShaByHeight(height int64) (rsha *chainhash.Hash, err error) {
	key := int64ToKey(height)

	blkVal, err := db.lDb.Get(key, db.ro)
	if err != nil {
		log.Tracef("failed to find height %v", height)
		return // exists ???
	}

	var sha chainhash.Hash
	sha.SetBytes(blkVal[0:32])

	return &sha, nil
}
Ejemplo n.º 15
0
// fetchAddrIndexTip returns the last block height and block sha to be indexed.
// Meta-data about the address tip is currently cached in memory, and will be
// updated accordingly by functions that modify the state. This function is
// used on start up to load the info into memory. Callers will use the public
// version of this function below, which returns our cached copy.
func (db *LevelDb) fetchAddrIndexTip() (*chainhash.Hash, int64, error) {
	db.dbLock.Lock()
	defer db.dbLock.Unlock()

	data, err := db.lDb.Get(addrIndexMetaDataKey, db.ro)
	if err != nil {
		return &chainhash.Hash{}, -1, database.ErrAddrIndexDoesNotExist
	}

	var blkSha chainhash.Hash
	blkSha.SetBytes(data[0:32])

	blkHeight := binary.LittleEndian.Uint64(data[32:])

	return &blkSha, int64(blkHeight), nil
}
Ejemplo n.º 16
0
// newDummyCredit creates a new credit with the given hash and outpointIdx,
// locked to the votingpool address identified by the given
// series/index/branch.
func newDummyCredit(t *testing.T, pool *Pool, series uint32, index Index, branch Branch,
	txSha []byte, outpointIdx uint32) credit {
	var hash chainhash.Hash
	if err := hash.SetBytes(txSha); err != nil {
		t.Fatal(err)
	}
	// Ensure the address defined by the given series/branch/index is present on
	// the set of used addresses as that's a requirement of WithdrawalAddress.
	TstEnsureUsedAddr(t, pool, series, branch, index)
	addr := TstNewWithdrawalAddress(t, pool, series, branch, index)
	c := wtxmgr.Credit{
		OutPoint: wire.OutPoint{
			Hash:  hash,
			Index: outpointIdx,
		},
	}
	return newCredit(c, *addr)
}
Ejemplo n.º 17
0
// removeStakePoolInvalUserTickets removes the ticket hash from the inval
// ticket bucket.
func removeStakePoolInvalUserTickets(ns walletdb.ReadWriteBucket, scriptHash [20]byte,
	record *chainhash.Hash) error {

	// Fetch the current content of the key.
	// Possible buggy behaviour: If deserialization fails,
	// we won't detect it here. We assume we're throwing
	// ErrPoolUserInvalTcktsNotFound.
	oldRecords, _ := fetchStakePoolUserInvalTickets(ns, scriptHash)

	// Don't need to remove records that don't exist.
	if !duplicateExistsInInvalTickets(record, oldRecords) {
		return nil
	}

	var newRecords []*chainhash.Hash
	for i := range oldRecords {
		if record.IsEqual(oldRecords[i]) {
			newRecords = append(oldRecords[:i:i], oldRecords[i+1:]...)
		}
	}

	if newRecords == nil {
		return nil
	}

	bucket := ns.NestedReadWriteBucket(metaBucketName)
	key := make([]byte, stakePoolInvalidPrefixSize+scriptHashSize)
	copy(key[0:stakePoolInvalidPrefixSize], stakePoolInvalidPrefix)
	copy(key[stakePoolInvalidPrefixSize:stakePoolInvalidPrefixSize+scriptHashSize],
		scriptHash[:])

	// Write the serialized invalid user ticket hashes.
	serializedRecords := serializeUserInvalTickets(newRecords)

	err := bucket.Put(key, serializedRecords)
	if err != nil {
		str := fmt.Sprintf("failed to store pool user invalid ticket "+
			"records '%x'", scriptHash)
		return stakeStoreError(ErrDatabase, str, err)
	}

	return nil
}
Ejemplo n.º 18
0
// updateSStxRecordVoteBits updates an individual ticket's intended voteBits
// which are used to override the default voteBits when voting.
func updateSStxRecordVoteBits(ns walletdb.ReadWriteBucket, hash *chainhash.Hash,
	voteBits stake.VoteBits) error {
	if len(voteBits.ExtendedBits) > stake.MaxSingleBytePushLength-2 {
		str := fmt.Sprintf("voteBitsExt too long (got %v bytes, want max %v)",
			len(voteBits.ExtendedBits), stake.MaxSingleBytePushLength-2)
		return stakeStoreError(ErrData, str, nil)
	}

	bucket := ns.NestedReadWriteBucket(sstxRecordsBucketName)

	key := hash.Bytes()
	val := bucket.Get(key)
	if val == nil {
		str := fmt.Sprintf("missing sstx record for hash '%s'", hash.String())
		return stakeStoreError(ErrSStxNotFound, str, nil)
	}
	valLen := len(val)
	valCopy := make([]byte, valLen, valLen)
	copy(valCopy, val)

	// Move the cursor to the voteBits position and rewrite it.
	curPos := 0
	curPos += int64Size
	curPos += int32Size

	// Write the intended votebits length (uint8).
	valCopy[curPos] = byte(int16Size + len(voteBits.ExtendedBits))
	curPos += int8Size

	// Write the first two bytes for the intended votebits.
	byteOrder.PutUint16(valCopy[curPos:curPos+int16Size], voteBits.Bits)
	curPos += int16Size

	// Copy the remaining data from voteBitsExt.
	copy(valCopy[curPos:], voteBits.ExtendedBits[:])

	err := bucket.Put(key, valCopy)
	if err != nil {
		str := fmt.Sprintf("failed to update sstxrecord votebits for '%s'", hash)
		return stakeStoreError(ErrDatabase, str, err)
	}
	return nil
}
Ejemplo n.º 19
0
// fetchSStxRecordVoteBits fetches an individual ticket's intended voteBits
// which are used to override the default voteBits when voting.
func fetchSStxRecordVoteBits(ns walletdb.ReadBucket, hash *chainhash.Hash) (bool,
	stake.VoteBits, error) {

	bucket := ns.NestedReadBucket(sstxRecordsBucketName)

	key := hash.Bytes()
	val := bucket.Get(key)
	if val == nil {
		str := fmt.Sprintf("missing sstx record for hash '%s'", hash.String())
		return false, stake.VoteBits{}, stakeStoreError(ErrSStxNotFound, str, nil)
	}
	valLen := len(val)
	valCopy := make([]byte, valLen, valLen)
	copy(valCopy, val)

	// Move the cursor to the voteBits position and rewrite it.
	curPos := 0
	curPos += int64Size
	curPos += int32Size

	// Read the intended votebits length (uint8). If it is unset, return now.
	// Check it for sanity.
	voteBitsLen := uint8(val[curPos])
	if voteBitsLen == 0 {
		return false, stake.VoteBits{}, nil
	}
	if voteBitsLen < 2 || voteBitsLen > stake.MaxSingleBytePushLength {
		str := fmt.Sprintf("corrupt votebits length '%v'", voteBitsLen)
		return false, stake.VoteBits{}, stakeStoreError(ErrData, str, nil)
	}
	curPos += int8Size

	// Read the first two bytes for the intended votebits.
	voteBits := byteOrder.Uint16(valCopy[curPos : curPos+int16Size])
	curPos += int16Size

	// Retrieve the extended vote bits.
	voteBitsExt := make([]byte, voteBitsLen-int16Size)
	copy(voteBitsExt[:], valCopy[curPos:(curPos+int(voteBitsLen)-int16Size)])

	return true, stake.VoteBits{Bits: voteBits, ExtendedBits: voteBitsExt}, nil
}
Ejemplo n.º 20
0
// GenerateSSGenBlockRef generates an OP_RETURN push for the block header hash and
// height which the block votes on.
func GenerateSSGenBlockRef(blockHash chainhash.Hash, height uint32) ([]byte,
	error) {
	// Prefix
	dataPushes := []byte{
		0x6a, // OP_RETURN
		0x24, // OP_DATA_36
	}

	// Serialize the block hash and height
	blockHashBytes := blockHash.Bytes()
	blockHeightBytes := make([]byte, 4)
	binary.LittleEndian.PutUint32(blockHeightBytes, height)

	blockData := append(blockHashBytes, blockHeightBytes...)

	// Concatenate the prefix and block data
	blockDataOut := append(dataPushes, blockData...)

	return blockDataOut, nil
}
Ejemplo n.º 21
0
func (db *LevelDb) getBlkByHeight(blkHeight int64) (rsha *chainhash.Hash, rbuf []byte, err error) {
	var blkVal []byte

	key := int64ToKey(blkHeight)

	blkVal, err = db.lDb.Get(key, db.ro)
	if err != nil {
		log.Tracef("failed to find height %v", blkHeight)
		return // exists ???
	}

	var sha chainhash.Hash

	sha.SetBytes(blkVal[0:32])

	blockdata := make([]byte, len(blkVal[32:]))
	copy(blockdata[:], blkVal[32:])

	return &sha, blockdata, nil
}
Ejemplo n.º 22
0
// BlockLocatorFromHash returns a block locator for the passed block hash.
// See BlockLocator for details on the algotirhm used to create a block locator.
//
// In addition to the general algorithm referenced above, there are a couple of
// special cases which are handled:
//
//  - If the genesis hash is passed, there are no previous hashes to add and
//    therefore the block locator will only consist of the genesis hash
//  - If the passed hash is not currently known, the block locator will only
//    consist of the passed hash
func (b *BlockChain) BlockLocatorFromHash(hash *chainhash.Hash) BlockLocator {
	// The locator contains the requested hash at the very least.
	locator := make(BlockLocator, 0, wire.MaxBlockLocatorsPerMsg)
	locator = append(locator, hash)

	// Nothing more to do if a locator for the genesis hash was requested.
	if hash.IsEqual(b.chainParams.GenesisHash) {
		return locator
	}

	// Attempt to find the height of the block that corresponds to the
	// passed hash, and if it's on a side chain, also find the height at
	// which it forks from the main chain.
	blockHeight := int64(-1)
	forkHeight := int64(-1)
	node, exists := b.index[*hash]
	if !exists {
		// Try to look up the height for passed block hash.  Assume an
		// error means it doesn't exist and just return the locator for
		// the block itself.
		height, err := b.db.FetchBlockHeightBySha(hash)
		if err != nil {
			return locator
		}

		blockHeight = height
	} else {
		blockHeight = node.height

		// Find the height at which this node forks from the main chain
		// if the node is on a side chain.
		if !node.inMainChain {
			for n := node; n.parent != nil; n = n.parent {
				if n.inMainChain {
					forkHeight = n.height
					break
				}
			}
		}
	}

	// Generate the block locators according to the algorithm described in
	// in the BlockLocator comment and make sure to leave room for the
	// final genesis hash.
	iterNode := node
	increment := int64(1)
	for len(locator) < wire.MaxBlockLocatorsPerMsg-1 {
		// Once there are 10 locators, exponentially increase the
		// distance between each block locator.
		if len(locator) > 10 {
			increment *= 2
		}
		blockHeight -= increment
		if blockHeight < 1 {
			break
		}

		// As long as this is still on the side chain, walk backwards
		// along the side chain nodes to each block height.
		if forkHeight != -1 && blockHeight > forkHeight {
			// Intentionally use parent field instead of the
			// getPrevNodeFromNode function since we don't want to
			// dynamically load nodes when building block locators.
			// Side chain blocks should always be in memory already,
			// and if they aren't for some reason it's ok to skip
			// them.
			for iterNode != nil && blockHeight > iterNode.height {
				iterNode = iterNode.parent
			}
			if iterNode != nil && iterNode.height == blockHeight {
				locator = append(locator, iterNode.hash)
			}
			continue
		}

		// The desired block height is in the main chain, so look it up
		// from the main chain database.
		h, err := b.db.FetchBlockShaByHeight(blockHeight)
		if err != nil {
			// This shouldn't happen and it's ok to ignore block
			// locators, so just continue to the next one.
			log.Warnf("Lookup of known valid height failed %v",
				blockHeight)
			continue
		}
		locator = append(locator, h)
	}

	// Append the appropriate genesis block.
	locator = append(locator, b.chainParams.GenesisHash)
	return locator
}
Ejemplo n.º 23
0
// calcSignatureHash will, given a script and hash type for the current script
// engine instance, calculate the signature hash to be used for signing and
// verification.
func calcSignatureHash(script []parsedOpcode, hashType SigHashType,
	tx *wire.MsgTx, idx int, cachedPrefix *chainhash.Hash) ([]byte, error) {
	// The SigHashSingle signature type signs only the corresponding input
	// and output (the output with the same index number as the input).
	//
	// Since transactions can have more inputs than outputs, this means it
	// is improper to use SigHashSingle on input indices that don't have a
	// corresponding output.
	//
	// A bug in the original Satoshi client implementation means specifying
	// an index that is out of range results in a signature hash of 1 (as a
	// uint256 little endian).  The original intent appeared to be to
	// indicate failure, but unfortunately, it was never checked and thus is
	// treated as the actual signature hash.  This buggy behavior is now
	// part of the consensus and a hard fork would be required to fix it.
	//
	// Due to this, care must be taken by software that creates transactions
	// which make use of SigHashSingle because it can lead to an extremely
	// dangerous situation where the invalid inputs will end up signing a
	// hash of 1.  This in turn presents an opportunity for attackers to
	// cleverly construct transactions which can steal those coins provided
	// they can reuse signatures.
	//
	// Decred mitigates this by actually returning an error instead.
	if hashType&sigHashMask == SigHashSingle && idx >= len(tx.TxOut) {
		return nil, ErrSighashSingleIdx
	}

	// Remove all instances of OP_CODESEPARATOR from the script.
	script = removeOpcode(script, OP_CODESEPARATOR)

	// Make a deep copy of the transaction, zeroing out the script for all
	// inputs that are not currently being processed.
	txCopy := tx.Copy()
	for i := range txCopy.TxIn {
		if i == idx {
			// UnparseScript cannot fail here because removeOpcode
			// above only returns a valid script.
			sigScript, _ := unparseScript(script)
			txCopy.TxIn[idx].SignatureScript = sigScript
		} else {
			txCopy.TxIn[i].SignatureScript = nil
		}
	}

	switch hashType & sigHashMask {
	case SigHashNone:
		txCopy.TxOut = txCopy.TxOut[0:0] // Empty slice.
		for i := range txCopy.TxIn {
			if i != idx {
				txCopy.TxIn[i].Sequence = 0
			}
		}

	case SigHashSingle:
		// Resize output array to up to and including requested index.
		txCopy.TxOut = txCopy.TxOut[:idx+1]

		// All but current output get zeroed out.
		for i := 0; i < idx; i++ {
			txCopy.TxOut[i].Value = -1
			txCopy.TxOut[i].PkScript = nil
		}

		// Sequence on all other inputs is 0, too.
		for i := range txCopy.TxIn {
			if i != idx {
				txCopy.TxIn[i].Sequence = 0
			}
		}

	default:
		// Consensus treats undefined hashtypes like normal SigHashAll
		// for purposes of hash generation.
		fallthrough
	case SigHashOld:
		fallthrough
	case SigHashAllValue:
		fallthrough
	case SigHashAll:
		// Nothing special here.
	}
	if hashType&SigHashAnyOneCanPay != 0 {
		txCopy.TxIn = txCopy.TxIn[idx : idx+1]
		idx = 0
	}

	// The final hash (message to sign) is the hash of:
	// 1) hash of the prefix ||
	// 2) hash of the witness for signing ||
	// 3) the hash type (encoded as a 4-byte little-endian value)
	var wbuf bytes.Buffer
	binary.Write(&wbuf, binary.LittleEndian, uint32(hashType))

	// Optimization for SIGHASH_ALL. In this case, the prefix hash is
	// the same as the transaction hash because only the inputs have
	// been modified, so don't bother to do the wasteful O(N^2) extra
	// hash here.
	// The caching only works if the "anyone can pay flag" is also
	// disabled.
	var prefixHash chainhash.Hash
	if cachedPrefix != nil &&
		(hashType&sigHashMask == SigHashAll) &&
		(hashType&SigHashAnyOneCanPay == 0) &&
		chaincfg.SigHashOptimization {
		prefixHash = *cachedPrefix
	} else {
		prefixHash = txCopy.TxSha()
	}

	// If the ValueIn is to be included in what we're signing, sign
	// the witness hash that includes it. Otherwise, just sign the
	// prefix and signature scripts.
	var witnessHash chainhash.Hash
	if hashType&sigHashMask != SigHashAllValue {
		witnessHash = txCopy.TxShaWitnessSigning()
	} else {
		witnessHash = txCopy.TxShaWitnessValueSigning()
	}
	wbuf.Write(prefixHash.Bytes())
	wbuf.Write(witnessHash.Bytes())
	return chainhash.HashFuncB(wbuf.Bytes()), nil
}
Ejemplo n.º 24
0
// ExistsLiveTicketAsync returns an instance of a type that can be used to get the
// result of the RPC at some future time by invoking the Receive function on the
// returned instance.
func (c *Client) ExistsLiveTicketAsync(hash *chainhash.Hash) FutureExistsLiveTicketResult {
	cmd := dcrjson.NewExistsLiveTicketCmd(hash.String())
	return c.sendCmd(cmd)
}
Ejemplo n.º 25
0
// AddShaHash adds the passed chainhash.Hash to the Filter.
//
// This function is safe for concurrent access.
func (bf *Filter) AddShaHash(sha *chainhash.Hash) {
	bf.mtx.Lock()
	bf.add(sha.Bytes())
	bf.mtx.Unlock()
}