// newChild creates a blockNode from a block and adds it to the parent's set of
// children. The new node is also returned. It necessairly modifies the database
//
// TODO: newChild has a fair amount of room for optimization.
func (cs *ConsensusSet) newChild(pb *processedBlock, b types.Block) *processedBlock {
	// Create the child node.
	childID := b.ID()
	child := &processedBlock{
		Block:  b,
		Parent: b.ParentID,

		Height: pb.Height + 1,
		Depth:  pb.childDepth(),
	}
	err := cs.db.Update(func(tx *bolt.Tx) error {
		blockMap := tx.Bucket(BlockMap)
		err := cs.setChildTarget(blockMap, child)
		if err != nil {
			return err
		}
		pb.Children = append(pb.Children, childID)
		err = blockMap.Put(child.Block.ParentID[:], encoding.Marshal(*pb))
		if err != nil {
			return err
		}
		return blockMap.Put(childID[:], encoding.Marshal(*child))
	})
	if build.DEBUG && err != nil {
		panic(err)
	}
	return child
}
Beispiel #2
0
// managedSubmitBlock takes a solved block and submits it to the blockchain.
// managedSubmitBlock should not be called with a lock.
func (m *Miner) managedSubmitBlock(b types.Block) error {
	// Give the block to the consensus set.
	err := m.cs.AcceptBlock(b)
	// Add the miner to the blocks list if the only problem is that it's stale.
	if err == modules.ErrNonExtendingBlock {
		m.mu.Lock()
		m.persist.BlocksFound = append(m.persist.BlocksFound, b.ID())
		m.mu.Unlock()
		m.log.Println("Mined a stale block - block appears valid but does not extend the blockchain")
		return err
	}
	if err == modules.ErrBlockUnsolved {
		m.log.Println("Mined an unsolved block - header submission appears to be incorrect")
		return err
	}
	if err != nil {
		m.tpool.PurgeTransactionPool()
		m.log.Critical("ERROR: an invalid block was submitted:", err)
		return err
	}
	m.mu.Lock()
	defer m.mu.Unlock()

	// Grab a new address for the miner. Call may fail if the wallet is locked
	// or if the wallet addresses have been exhausted.
	m.persist.BlocksFound = append(m.persist.BlocksFound, b.ID())
	var uc types.UnlockConditions
	uc, err = m.wallet.NextAddress()
	if err != nil {
		return err
	}
	m.persist.Address = uc.UnlockHash()
	return m.saveSync()
}
Beispiel #3
0
// SubmitBlock takes a solved block and submits it to the blockchain.
// SubmitBlock should not be called with a lock.
func (m *Miner) SubmitBlock(b types.Block) error {
	// Give the block to the consensus set.
	err := m.cs.AcceptBlock(b)
	// Add the miner to the blocks list if the only problem is that it's stale.
	if err == modules.ErrNonExtendingBlock {
		m.mu.Lock()
		m.blocksFound = append(m.blocksFound, b.ID())
		m.mu.Unlock()
	}
	if err != nil {
		m.tpool.PurgeTransactionPool()
		m.log.Println("ERROR: an invalid block was submitted:", err)
		return err
	}
	m.mu.Lock()
	defer m.mu.Unlock()

	// Grab a new address for the miner. Call may fail if the wallet is locked
	// or if the wallet addresses have been exhausted.
	m.blocksFound = append(m.blocksFound, b.ID())
	var uc types.UnlockConditions
	uc, err = m.wallet.NextAddress()
	if err == nil { // Only update the address if there was no error.
		m.address = uc.UnlockHash()
	}
	return err
}
Beispiel #4
0
// validateHeader does some early, low computation verification on the block.
// Callers should not assume that validation will happen in a particular order.
func (cs *ConsensusSet) validateHeader(tx dbTx, b types.Block) error {
	// See if the block is known already.
	id := b.ID()
	_, exists := cs.dosBlocks[id]
	if exists {
		return errDoSBlock
	}

	// Check if the block is already known.
	blockMap := tx.Bucket(BlockMap)
	if blockMap == nil {
		return errNoBlockMap
	}
	if blockMap.Get(id[:]) != nil {
		return modules.ErrBlockKnown
	}

	// Check for the parent.
	parentID := b.ParentID
	parentBytes := blockMap.Get(parentID[:])
	if parentBytes == nil {
		return errOrphan
	}

	var parent processedBlock
	err := cs.marshaler.Unmarshal(parentBytes, &parent)
	if err != nil {
		return err
	}
	// Check that the timestamp is not too far in the past to be acceptable.
	minTimestamp := cs.blockRuleHelper.minimumValidChildTimestamp(blockMap, &parent)

	return cs.blockValidator.ValidateBlock(b, minTimestamp, parent.ChildTarget, parent.Height+1)
}
Beispiel #5
0
Datei: accept.go Projekt: mm3/Sia
// addBlockToTree inserts a block into the blockNode tree by adding it to its
// parent's list of children. If the new blockNode is heavier than the current
// node, the blockchain is forked to put the new block and its parents at the
// tip. An error will be returned if block verification fails or if the block
// does not extend the longest fork.
func (cs *State) addBlockToTree(b types.Block) (revertedNodes, appliedNodes []*blockNode, err error) {
	parentNode := cs.blockMap[b.ParentID]
	newNode := parentNode.newChild(b)
	cs.blockMap[b.ID()] = newNode
	if newNode.heavierThan(cs.currentBlockNode()) {
		return cs.forkBlockchain(newNode)
	}
	return nil, nil, modules.ErrNonExtendingBlock
}
Beispiel #6
0
// HeaderForWork returns a block that is ready for nonce grinding, along with
// the root hash of the block.
func (m *Miner) HeaderForWork() (types.BlockHeader, types.Target, error) {
	if !m.wallet.Unlocked() {
		return types.BlockHeader{}, types.Target{}, modules.ErrLockedWallet
	}
	lockID := m.mu.Lock()
	defer m.mu.Unlock(lockID)
	err := m.checkAddress()
	if err != nil {
		return types.BlockHeader{}, types.Target{}, err
	}

	if time.Since(m.lastBlock).Seconds() > secondsBetweenBlocks {
		m.prepareNewBlock()
	}

	// The header that will be returned for nonce grinding.
	// The header is constructed from a block and some arbitrary data. The
	// arbitrary data allows for multiple unique blocks to be generated from
	// a single block in memory. A block pointer is used in order to avoid
	// storing multiple copies of the same block in memory
	var header types.BlockHeader
	var arbData []byte
	var block *types.Block

	if m.memProgress%(headerForWorkMemory/blockForWorkMemory) == 0 {
		// Grab a new block. Allocate space for the pointer to store it as well
		block = new(types.Block)
		*block, _ = m.blockForWork()
		header = block.Header()
		arbData = block.Transactions[0].ArbitraryData[0]

		m.lastBlock = time.Now()
	} else {
		// Set block to previous block, but create new arbData
		block = m.blockMem[m.headerMem[m.memProgress-1]]
		arbData, _ = crypto.RandBytes(types.SpecifierLen)
		block.Transactions[0].ArbitraryData[0] = arbData
		header = block.Header()
	}

	// Save a mapping from the header to its block as well as from the
	// header to its arbitrary data, replacing the block that was
	// stored 'headerForWorkMemory' requests ago.
	delete(m.blockMem, m.headerMem[m.memProgress])
	delete(m.arbDataMem, m.headerMem[m.memProgress])
	m.blockMem[header] = block
	m.arbDataMem[header] = arbData
	m.headerMem[m.memProgress] = header
	m.memProgress++
	if m.memProgress == headerForWorkMemory {
		m.memProgress = 0
	}

	// Return the header and target.
	return header, m.target, nil
}
Beispiel #7
0
// checkMinerPayouts compares a block's miner payouts to the block's subsidy and
// returns true if they are equal.
func checkMinerPayouts(b types.Block, height types.BlockHeight) bool {
	// Add up the payouts and check that all values are legal.
	var payoutSum types.Currency
	for _, payout := range b.MinerPayouts {
		if payout.Value.IsZero() {
			return false
		}
		payoutSum = payoutSum.Add(payout.Value)
	}
	return b.CalculateSubsidy(height).Cmp(payoutSum) == 0
}
Beispiel #8
0
Datei: accept.go Projekt: mm3/Sia
// validHeader does some early, low computation verification on the block.
func (cs *State) validHeader(b types.Block) error {
	// Grab the parent of the block and verify the ID of the child meets the
	// target. This is done as early as possible to enforce that any
	// block-related DoS must use blocks that have sufficient work.
	parent, exists := cs.blockMap[b.ParentID]
	if !exists {
		return ErrOrphan
	}
	if !b.CheckTarget(parent.childTarget) {
		return ErrMissedTarget
	}

	// Check that the block is below the size limit.
	if uint64(len(encoding.Marshal(b))) > types.BlockSizeLimit {
		return ErrLargeBlock
	}

	// Check that the timestamp is not in 'the past', where the past is defined
	// by earliestChildTimestamp.
	if parent.earliestChildTimestamp() > b.Timestamp {
		return ErrEarlyTimestamp
	}

	// If the block is in the extreme future, return an error and do nothing
	// more with the block. There is an assumption that by the time the extreme
	// future arrives, this block will no longer be a part of the longest fork
	// because it will have been ignored by all of the miners.
	if b.Timestamp > types.CurrentTimestamp()+types.ExtremeFutureThreshold {
		return ErrExtremeFutureTimestamp
	}

	// Verify that the miner payouts are valid.
	if !b.CheckMinerPayouts(parent.height + 1) {
		return ErrBadMinerPayouts
	}

	// If the block is in the near future, but too far to be acceptable, then
	// the block will be saved and added to the consensus set after it is no
	// longer too far in the future. This is the last check because it's an
	// expensive check, and not worth performing if the payouts are incorrect.
	if b.Timestamp > types.CurrentTimestamp()+types.FutureThreshold {
		go func() {
			time.Sleep(time.Duration(b.Timestamp-(types.CurrentTimestamp()+types.FutureThreshold)) * time.Second)
			lockID := cs.mu.Lock()
			defer cs.mu.Unlock(lockID)
			cs.acceptBlock(b) // NOTE: Error is not handled.
		}()
		return ErrFutureTimestamp
	}

	return nil
}
Beispiel #9
0
// managedBroadcastBlock will broadcast a block to the consensus set's peers.
func (cs *ConsensusSet) managedBroadcastBlock(b types.Block) {
	// COMPATv0.5.1 - broadcast the block to all peers <= v0.5.1 and block header to all peers > v0.5.1.
	var relayBlockPeers, relayHeaderPeers []modules.Peer
	for _, p := range cs.gateway.Peers() {
		if build.VersionCmp(p.Version, "0.5.1") <= 0 {
			relayBlockPeers = append(relayBlockPeers, p)
		} else {
			relayHeaderPeers = append(relayHeaderPeers, p)
		}
	}
	go cs.gateway.Broadcast("RelayBlock", b, relayBlockPeers)
	go cs.gateway.Broadcast("RelayHeader", b.Header(), relayHeaderPeers)
}
Beispiel #10
0
// SubmitHeader accepts a block header.
func (m *Miner) SubmitHeader(bh types.BlockHeader) error {
	if err := m.tg.Add(); err != nil {
		return err
	}
	defer m.tg.Done()

	// Because a call to managedSubmitBlock is required at the end of this
	// function, the first part needs to be wrapped in an anonymous function
	// for lock safety.
	var b types.Block
	err := func() error {
		m.mu.Lock()
		defer m.mu.Unlock()

		// Lookup the block that corresponds to the provided header.
		nonce := bh.Nonce
		bh.Nonce = [8]byte{}
		bPointer, bExists := m.blockMem[bh]
		arbData, arbExists := m.arbDataMem[bh]
		if !bExists || !arbExists {
			return errLateHeader
		}

		// Block is going to be passed to external memory, but the memory pointed
		// to by the transactions slice is still being modified - needs to be
		// copied. Same with the memory being pointed to by the arb data slice.
		b = *bPointer
		txns := make([]types.Transaction, len(b.Transactions))
		copy(txns, b.Transactions)
		b.Transactions = txns
		b.Transactions[0].ArbitraryData = [][]byte{arbData[:]}
		b.Nonce = nonce

		// Sanity check - block should have same id as header.
		bh.Nonce = nonce
		if types.BlockID(crypto.HashObject(bh)) != b.ID() {
			m.log.Critical("block reconstruction failed")
		}
		return nil
	}()
	if err != nil {
		m.log.Println("ERROR during call to SubmitHeader, pre SubmitBlock:", err)
		return err
	}
	err = m.managedSubmitBlock(b)
	if err != nil {
		m.log.Println("ERROR returned by managedSubmitBlock:", err)
		return err
	}
	return nil
}
Beispiel #11
0
// TestCheckTarget probes the checkTarget function.
func TestCheckTarget(t *testing.T) {
	var b types.Block
	lowTarget := types.RootDepth
	highTarget := types.Target{}
	sameTarget := types.Target(b.ID())

	if !checkTarget(b, lowTarget) {
		t.Error("CheckTarget failed for a low target")
	}
	if checkTarget(b, highTarget) {
		t.Error("CheckTarget passed for a high target")
	}
	if !checkTarget(b, sameTarget) {
		t.Error("CheckTarget failed for a same target")
	}
}
Beispiel #12
0
// newChild creates a blockNode from a block and adds it to the parent's set of
// children. The new node is also returned. It necessairly modifies the database
func (cs *ConsensusSet) newChild(tx *bolt.Tx, pb *processedBlock, b types.Block) *processedBlock {
	// Create the child node.
	childID := b.ID()
	child := &processedBlock{
		Block:  b,
		Height: pb.Height + 1,
		Depth:  pb.childDepth(),
	}
	blockMap := tx.Bucket(BlockMap)
	cs.setChildTarget(blockMap, child)
	err := blockMap.Put(childID[:], encoding.Marshal(*child))
	if build.DEBUG && err != nil {
		panic(err)
	}
	return child
}
Beispiel #13
0
// TestBuriedBadFork creates a block with an invalid transaction that's not on
// the longest fork. The consensus set will not validate that block. Then valid
// blocks are added on top of it to make it the longest fork. When it becomes
// the longest fork, all the blocks should be fully validated and thrown out
// because a parent is invalid.
func TestBuriedBadFork(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	t.Parallel()
	cst, err := createConsensusSetTester("TestBuriedBadFork")
	if err != nil {
		t.Fatal(err)
	}
	defer cst.Close()
	pb := cst.cs.dbCurrentProcessedBlock()

	// Create a bad block that builds on a parent, so that it is part of not
	// the longest fork.
	badBlock := types.Block{
		ParentID:     pb.Block.ParentID,
		Timestamp:    types.CurrentTimestamp(),
		MinerPayouts: []types.SiacoinOutput{{Value: types.CalculateCoinbase(pb.Height)}},
		Transactions: []types.Transaction{{
			SiacoinInputs: []types.SiacoinInput{{}}, // Will trigger an error on full verification but not partial verification.
		}},
	}
	parent, err := cst.cs.dbGetBlockMap(pb.Block.ParentID)
	if err != nil {
		t.Fatal(err)
	}
	badBlock, _ = cst.miner.SolveBlock(badBlock, parent.ChildTarget)
	err = cst.cs.AcceptBlock(badBlock)
	if err != modules.ErrNonExtendingBlock {
		t.Fatal(err)
	}

	// Build another bock on top of the bad block that is fully valid, this
	// will cause a fork and full validation of the bad block, both the bad
	// block and this block should be thrown away.
	block := types.Block{
		ParentID:     badBlock.ID(),
		Timestamp:    types.CurrentTimestamp(),
		MinerPayouts: []types.SiacoinOutput{{Value: types.CalculateCoinbase(pb.Height + 1)}},
	}
	block, _ = cst.miner.SolveBlock(block, parent.ChildTarget) // okay because the target will not change
	err = cst.cs.AcceptBlock(block)
	if err == nil {
		t.Fatal("a bad block failed to cause an error")
	}
}
Beispiel #14
0
// SolveBlock takes a block, target, and number of iterations as input and
// tries to find a block that meets the target. This function can take a long
// time to complete, and should not be called with a lock.
func (m *Miner) SolveBlock(b types.Block, target types.Target) (types.Block, bool) {
	// Assemble the header.
	merkleRoot := b.MerkleRoot()
	header := make([]byte, 80)
	copy(header, b.ParentID[:])
	binary.LittleEndian.PutUint64(header[40:48], uint64(b.Timestamp))
	copy(header[48:], merkleRoot[:])

	nonce := (*uint64)(unsafe.Pointer(&header[32]))
	for i := 0; i < iterationsPerAttempt; i++ {
		id := crypto.HashBytes(header)
		if bytes.Compare(target[:], id[:]) >= 0 {
			copy(b.Nonce[:], header[32:40])
			return b, true
		}
		*nonce++
	}
	return b, false
}
Beispiel #15
0
// submitBlock takes a solved block and submits it to the blockchain.
// submitBlock should not be called with a lock.
func (m *Miner) SubmitBlock(b types.Block) error {
	// Give the block to the consensus set.
	err := m.cs.AcceptBlock(b)
	if err != nil {
		m.tpool.PurgeTransactionPool()
		m.log.Println("ERROR: an invalid block was submitted:", err)
		return err
	}

	// Grab a new address for the miner.
	lockID := m.mu.Lock()
	m.blocksFound = append(m.blocksFound, b.ID())
	var addr types.UnlockHash
	addr, _, err = m.wallet.CoinAddress(false) // false indicates that the address should not be visible to the user.
	if err == nil {                            // Special case: only update the address if there was no error.
		m.address = addr
	}
	m.mu.Unlock(lockID)
	return err
}
Beispiel #16
0
Datei: accept.go Projekt: mm3/Sia
// acceptBlock is the internal consensus function for adding blocks. There is
// no block relaying. The speed of 'acceptBlock' is effected by the value of
// 'cs.verificationRigor'. If rigor is set to 'fullVerification', all of the
// transactions will be checked and verified. This is a requirement when
// receiving blocks from untrusted sources. When set to 'partialVerification',
// verification of transactions is skipped. This is acceptable when receiving
// blocks from a trust source, such as blocks that were previously verified and
// saved to disk. The value of 'cs.verificationRigor' should be set before
// 'acceptBlock' is called.
func (cs *State) acceptBlock(b types.Block) error {
	// See if the block is known already.
	_, exists := cs.dosBlocks[b.ID()]
	if exists {
		return ErrDoSBlock
	}
	_, exists = cs.blockMap[b.ID()]
	if exists {
		return ErrBlockKnown
	}

	// Check that the header is valid. The header is checked first because it
	// is not computationally expensive to verify, but it is computationally
	// expensive to create.
	err := cs.validHeader(b)
	if err != nil {
		return err
	}

	// Try adding the block to the block tree. This call will perform
	// verification on the block before adding the block to the block tree. An
	// error is returned if verification fails or if the block does not extend
	// the longest fork.
	revertedNodes, appliedNodes, err := cs.addBlockToTree(b)
	if err != nil {
		return err
	}
	if len(appliedNodes) > 0 {
		cs.updateSubscribers(revertedNodes, appliedNodes)
	}

	// Sanity check - if applied nodes is len 0, revertedNodes should also be
	// len 0.
	if build.DEBUG {
		if len(appliedNodes) == 0 && len(revertedNodes) != 0 {
			panic("appliedNodes and revertedNodes are mismatched!")
		}
	}

	return nil
}
Beispiel #17
0
// newChild creates a blockNode from a block and adds it to the parent's set of
// children. The new node is also returned. It necessairly modifies the database
//
// TODO: newChild has a fair amount of room for optimization.
func (cs *ConsensusSet) newChild(tx *bolt.Tx, pb *processedBlock, b types.Block) (*processedBlock, error) {
	// Create the child node.
	childID := b.ID()
	child := &processedBlock{
		Block:  b,
		Parent: b.ParentID,

		Height: pb.Height + 1,
		Depth:  pb.childDepth(),
	}
	blockMap := tx.Bucket(BlockMap)
	err := cs.setChildTarget(blockMap, child)
	if err != nil {
		return nil, err
	}
	err = blockMap.Put(childID[:], encoding.Marshal(*child))
	if err != nil {
		return nil, err
	}
	return child, nil
}
Beispiel #18
0
// submitBlock takes a solved block and submits it to the blockchain.
// submitBlock should not be called with a lock.
func (m *Miner) SubmitBlock(b types.Block) error {
	// Give the block to the consensus set.
	err := m.cs.AcceptBlock(b)
	if err != nil {
		m.tpool.PurgeTransactionPool()
		m.log.Println("ERROR: an invalid block was submitted:", err)
		return err
	}
	lockID := m.mu.Lock()
	defer m.mu.Unlock(lockID)

	// Grab a new address for the miner. Call may fail if the wallet is locked
	// or if the wallet addresses have been exhausted.
	m.blocksFound = append(m.blocksFound, b.ID())
	var uc types.UnlockConditions
	uc, err = m.wallet.NextAddress()
	if err == nil { // Special case: only update the address if there was no error.
		m.address = uc.UnlockHash()
	}
	return err
}
Beispiel #19
0
// SubmitHeader accepts a block header.
func (m *Miner) SubmitHeader(bh types.BlockHeader) error {
	m.mu.Lock()

	// Lookup the block that corresponds to the provided header.
	var b types.Block
	nonce := bh.Nonce
	bh.Nonce = [8]byte{}
	bPointer, bExists := m.blockMem[bh]
	arbData, arbExists := m.arbDataMem[bh]
	if !bExists || !arbExists {
		m.log.Println("ERROR:", errLateHeader)
		m.mu.Unlock()
		return errLateHeader
	}

	// Block is going to be passed to external memory, but the memory pointed
	// to by the transactions slice is still being modified - needs to be
	// copied. Same with the memory being pointed to by the arb data slice.
	b = *bPointer
	txns := make([]types.Transaction, len(b.Transactions))
	copy(txns, b.Transactions)
	b.Transactions = txns
	b.Transactions[0].ArbitraryData = [][]byte{arbData[:]}
	b.Nonce = nonce

	// Sanity check - block should have same id as header.
	if build.DEBUG {
		bh.Nonce = nonce
		if types.BlockID(crypto.HashObject(bh)) != b.ID() {
			panic("block reconstruction failed")
		}
	}

	m.mu.Unlock()
	return m.SubmitBlock(b)
}
Beispiel #20
0
// buildExplorerBlock takes a block and its height and uses it to construct an
// explorer block.
func (srv *Server) buildExplorerBlock(height types.BlockHeight, block types.Block) ExplorerBlock {
	var mpoids []types.SiacoinOutputID
	for i := range block.MinerPayouts {
		mpoids = append(mpoids, block.MinerPayoutID(uint64(i)))
	}

	var etxns []ExplorerTransaction
	for _, txn := range block.Transactions {
		etxns = append(etxns, srv.buildExplorerTransaction(height, block.ID(), txn))
	}

	facts, exists := srv.explorer.BlockFacts(height)
	if build.DEBUG && !exists {
		panic("incorrect request to buildExplorerBlock - block does not exist")
	}

	return ExplorerBlock{
		MinerPayoutIDs: mpoids,
		Transactions:   etxns,
		RawBlock:       block,

		BlockFacts: facts,
	}
}
Beispiel #21
0
// mockDbTx is an implementation of dbTx for unit testing. It uses an in-memory
// key/value store to mock a database.
type mockDbTx struct {
	buckets map[string]dbBucket
}

// Bucket returns a mock dbBucket object associated with the given bucket name.
func (db mockDbTx) Bucket(name []byte) dbBucket {
	return db.buckets[string(name)]
}

var (
	// validBlock is a mock valid Block.
	validBlock = types.Block{Timestamp: 100}
	// emptyDosBlocks is a dosBlocks map where no block is marked bad.
	emptyDosBlocks = map[types.BlockID]struct{}{}
	// emptyTx is a transaction associated with an empty block map.
	emptyTx = mockDbTx{map[string]dbBucket{
		string(BlockMap): mockDbBucket{},
	}}
)

var validateHeaderTests = []struct {
	block     types.Block
	dosBlocks map[types.BlockID]struct{}
	tx        mockDbTx
	errWant   error
	msg       string
}{
	{
		block:     validBlock,
		dosBlocks: nil,
Beispiel #22
0
// testRewinding adds transactions in a block, then removes the block and
// verifies that the transaction pool adds the block transactions.
func (tpt *tpoolTester) testRewinding() {
	// Put some transactions into the unconfirmed set.
	tpt.addSiacoinTransactionToPool()
	if len(tpt.tpool.TransactionSet()) == 0 {
		tpt.t.Fatal("transaction pool has no transactions")
	}

	// Prepare an empty block to cause a rewind (by forking).
	target, exists := tpt.cs.ChildTarget(tpt.cs.CurrentBlock().ID())
	if !exists {
		tpt.t.Fatal("unable to recover child target")
	}
	forkStart := types.Block{
		ParentID:  tpt.cs.CurrentBlock().ID(),
		Timestamp: types.Timestamp(time.Now().Unix()),
		MinerPayouts: []types.SiacoinOutput{
			types.SiacoinOutput{Value: types.CalculateCoinbase(tpt.cs.Height() + 1)},
		},
	}
	for {
		var found bool
		forkStart, found = tpt.miner.SolveBlock(forkStart, target)
		if found {
			break
		}
	}

	// Mine a block with the transaction.
	b, _ := tpt.miner.FindBlock()
	err := tpt.cs.AcceptBlock(b)
	if err != nil {
		tpt.t.Fatal(err)
	}
	tpt.csUpdateWait()
	if len(tpt.tpool.TransactionSet()) != 0 {
		tpt.t.Fatal("tset should be empty after FindBlock()")
	}

	// Fork around the block with the transaction.
	err = tpt.cs.AcceptBlock(forkStart)
	if err != nil && err != modules.ErrNonExtendingBlock {
		tpt.t.Fatal(err)
	}
	target, exists = tpt.cs.ChildTarget(tpt.cs.CurrentBlock().ID())
	if !exists {
		tpt.t.Fatal("unable to recover child target")
	}
	forkCommit := types.Block{
		ParentID:  forkStart.ID(),
		Timestamp: types.Timestamp(time.Now().Unix()),
		MinerPayouts: []types.SiacoinOutput{
			types.SiacoinOutput{Value: types.CalculateCoinbase(tpt.cs.Height() + 1)},
		},
	}
	for {
		var found bool
		forkCommit, found = tpt.miner.SolveBlock(forkCommit, target)
		if found {
			tpt.cs.AcceptBlock(forkCommit)
			break
		}
	}
	tpt.csUpdateWait()

	// Check that the transaction which was once confirmed but no longer is
	// confirmed is now unconfirmed.
	if len(tpt.tpool.TransactionSet()) == 0 {
		tpt.t.Error("tset should contain transactions that used to be confirmed but no longer are")
	}
}
Beispiel #23
0
// checkTarget returns true if the block's ID meets the given target.
func checkTarget(b types.Block, target types.Target) bool {
	blockHash := b.ID()
	return bytes.Compare(target[:], blockHash[:]) >= 0
}
Beispiel #24
0
// TestSendBlk probes the ConsensusSet.rpcSendBlk method and tests that it
// correctly receives block ids and writes out the corresponding blocks.
func TestSendBlk(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	cst, err := blankConsensusSetTester("TestSendBlk")
	if err != nil {
		t.Fatal(err)
	}
	defer cst.Close()

	p1, p2 := net.Pipe()
	mockP1 := mockPeerConn{p1}
	fnErr := make(chan error)

	tests := []struct {
		id      types.BlockID
		conn    modules.PeerConn
		fn      func() // handle reading and writing over the pipe to the mock conn.
		errWant error
		msg     string
	}{
		// TODO: Test with a failing database.
		// Test with a failing reader.
		{
			conn:    mockPeerConnFailingReader{mockP1},
			fn:      func() { fnErr <- nil },
			errWant: errFailingReader,
			msg:     "expected rpcSendBlk to error with a failing reader conn",
		},
		// Test with a block id not found in the blockmap.
		{
			conn: mockP1,
			fn: func() {
				// Write a block id to the conn.
				fnErr <- encoding.WriteObject(p2, types.BlockID{})
			},
			errWant: errNilItem,
			msg:     "expected rpcSendBlk to error with a nonexistent block id",
		},
		// Test with a failing writer.
		{
			conn: mockPeerConnFailingWriter{mockP1},
			fn: func() {
				// Write a valid block id to the conn.
				fnErr <- encoding.WriteObject(p2, types.GenesisID)
			},
			errWant: errFailingWriter,
			msg:     "expected rpcSendBlk to error with a failing writer conn",
		},
		// Test with a valid conn and valid block.
		{
			conn: mockP1,
			fn: func() {
				// Write a valid block id to the conn.
				if err := encoding.WriteObject(p2, types.GenesisID); err != nil {
					fnErr <- err
				}

				// Read the block written to the conn.
				var block types.Block
				if err := encoding.ReadObject(p2, &block, types.BlockSizeLimit); err != nil {
					fnErr <- err
				}
				// Verify the block is the expected block.
				if block.ID() != types.GenesisID {
					fnErr <- fmt.Errorf("rpcSendBlk wrote a different block to conn than the block requested. requested block id: %v, received block id: %v", types.GenesisID, block.ID())
				}

				fnErr <- nil
			},
			errWant: nil,
			msg:     "expected rpcSendBlk to succeed with a valid conn and valid block",
		},
	}
	for _, tt := range tests {
		go tt.fn()
		err := cst.cs.rpcSendBlk(tt.conn)
		if err != tt.errWant {
			t.Errorf("%s: expected to fail with `%v', got: `%v'", tt.msg, tt.errWant, err)
		}
		err = <-fnErr
		if err != nil {
			t.Fatal(err)
		}
	}
}
Beispiel #25
0
// Add a couple blocks to the database, then perform lookups to see if
// they were added and crossed referenced correctly
func (et *explorerTester) testAddBlock(t *testing.T) error {
	// This block will *NOT* be valid, but should contain
	// addresses that can cross reference each other.
	b1 := types.Block{
		ParentID:  types.BlockID(genHashNum(1)),
		Nonce:     [8]byte{2, 2, 2, 2, 2, 2, 2, 2},
		Timestamp: 3,
		MinerPayouts: []types.SiacoinOutput{types.SiacoinOutput{
			Value:      types.NewCurrency64(4),
			UnlockHash: types.UnlockHash(genHashNum(5)),
		}},
		Transactions: nil,
	}

	// This should not error at least...
	lockID := et.explorer.mu.Lock()
	err := et.explorer.addBlockDB(b1)
	et.explorer.mu.Unlock(lockID)
	if err != nil {
		return errors.New("Error inserting basic block: " + err.Error())
	}

	// Again, not a valid block at all.
	b2 := types.Block{
		ParentID:     b1.ID(),
		Nonce:        [8]byte{7, 7, 7, 7, 7, 7, 7, 7},
		Timestamp:    8,
		MinerPayouts: nil,
		Transactions: []types.Transaction{types.Transaction{
			SiacoinInputs: []types.SiacoinInput{types.SiacoinInput{
				ParentID: b1.MinerPayoutID(0),
			}},
			FileContracts: []types.FileContract{types.FileContract{
				UnlockHash: types.UnlockHash(genHashNum(10)),
			}},
		}},
	}

	lockID = et.explorer.mu.Lock()
	err = et.explorer.addBlockDB(b2)
	et.explorer.mu.Unlock(lockID)
	if err != nil {
		return errors.New("Error inserting block 2: " + err.Error())
	}

	// Now query the database to see if it has been linked properly
	lockID = et.explorer.mu.RLock()
	bytes, err := et.explorer.db.GetFromBucket("Blocks", encoding.Marshal(b1.ID()))
	et.explorer.mu.RUnlock(lockID)
	var b types.Block
	err = encoding.Unmarshal(bytes, &b)
	if err != nil {
		return errors.New("Could not decode loaded block")
	}
	if b.ID() != b1.ID() {
		return errors.New("Block 1 not stored properly")
	}

	// Query to see if the input is added to the output field
	lockID = et.explorer.mu.RLock()
	bytes, err = et.explorer.db.GetFromBucket("SiacoinOutputs", encoding.Marshal(b1.MinerPayoutID(0)))
	et.explorer.mu.RUnlock(lockID)
	if err != nil {
		t.Fatal(err.Error())
	}
	if bytes == nil {
		return errors.New("Output is nil")
	}
	var ot outputTransactions
	err = encoding.Unmarshal(bytes, &ot)
	if err != nil {
		return errors.New("Could not decode loaded block")
	}
	if ot.InputTx == (types.TransactionID{}) {
		return errors.New("Input not added as output")
	}
	return nil
}
Beispiel #26
0
// New returns a new State, containing at least the genesis block. If there is
// an existing block database present in saveDir, it will be loaded. Otherwise,
// a new database will be created.
func New(gateway modules.Gateway, saveDir string) (*State, error) {
	if gateway == nil {
		return nil, ErrNilGateway
	}

	// Create the State object.
	cs := &State{
		blockMap:  make(map[types.BlockID]*blockNode),
		dosBlocks: make(map[types.BlockID]struct{}),

		currentPath: make([]types.BlockID, 1),

		siacoinOutputs:        make(map[types.SiacoinOutputID]types.SiacoinOutput),
		fileContracts:         make(map[types.FileContractID]types.FileContract),
		siafundOutputs:        make(map[types.SiafundOutputID]types.SiafundOutput),
		delayedSiacoinOutputs: make(map[types.BlockHeight]map[types.SiacoinOutputID]types.SiacoinOutput),

		gateway: gateway,

		mu: sync.New(modules.SafeMutexDelay, 1),
	}

	// Create the genesis block and add it as the BlockRoot.
	genesisBlock := types.Block{
		Timestamp: types.GenesisTimestamp,
		Transactions: []types.Transaction{
			{SiafundOutputs: types.GenesisSiafundAllocation},
		},
	}
	cs.blockRoot = &blockNode{
		block:       genesisBlock,
		childTarget: types.RootTarget,
		depth:       types.RootDepth,

		diffsGenerated: true,
	}
	cs.blockMap[genesisBlock.ID()] = cs.blockRoot

	// Fill out the consensus information for the genesis block.
	cs.currentPath[0] = genesisBlock.ID()
	cs.siacoinOutputs[genesisBlock.MinerPayoutID(0)] = types.SiacoinOutput{
		Value:      types.CalculateCoinbase(0),
		UnlockHash: types.ZeroUnlockHash,
	}

	// Allocate the Siafund addresses by putting them all in a big transaction
	// and applying the diffs.
	for i, siafundOutput := range genesisBlock.Transactions[0].SiafundOutputs {
		sfid := genesisBlock.Transactions[0].SiafundOutputID(i)
		sfod := modules.SiafundOutputDiff{
			Direction:     modules.DiffApply,
			ID:            sfid,
			SiafundOutput: siafundOutput,
		}
		cs.commitSiafundOutputDiff(sfod, modules.DiffApply)
		cs.blockRoot.siafundOutputDiffs = append(cs.blockRoot.siafundOutputDiffs, sfod)
	}

	// Send out genesis block update.
	cs.updateSubscribers(nil, []*blockNode{cs.blockRoot})

	// Create the consensus directory.
	err := os.MkdirAll(saveDir, 0700)
	if err != nil {
		return nil, err
	}

	// During short tests, use an in-memory database.
	if build.Release == "testing" && testing.Short() {
		cs.db = persist.NilDB
	} else {
		// Otherwise, try to load an existing database from disk.
		err = cs.load(saveDir)
		if err != nil {
			return nil, err
		}
	}

	// Register RPCs
	gateway.RegisterRPC("SendBlocks", cs.sendBlocks)
	gateway.RegisterRPC("RelayBlock", cs.RelayBlock)
	gateway.RegisterConnectCall("SendBlocks", cs.receiveBlocks)

	// Spawn resynchronize loop.
	go cs.threadedResynchronize()

	return cs, nil
}
Beispiel #27
0
// addBlockDB parses a block and adds it to the database
func (e *Explorer) addBlockDB(b types.Block) error {
	// Special case for the genesis block, which does not have a
	// valid parent, and for testing, as tests will not always use
	// blocks in consensus
	var blocktarget types.Target
	if b.ID() == e.genesisBlockID {
		blocktarget = types.RootDepth
		e.blockchainHeight = 0
	} else {
		var exists bool
		blocktarget, exists = e.cs.ChildTarget(b.ParentID)
		if build.DEBUG {
			if build.Release == "testing" {
				blocktarget = types.RootDepth
			} else if !exists {
				panic("Applied block not in consensus")
			}

		}
	}

	// Check if the block exsts.
	var exists bool
	dbErr := e.db.View(func(tx *bolt.Tx) error {
		id := b.ID()
		block := tx.Bucket([]byte("Blocks")).Get(id[:])
		exists = block != nil
		return nil
	})
	if dbErr != nil {
		return dbErr
	}
	if exists {
		return nil
	}

	tx, err := newBoltTx(e.db)
	if err != nil {
		return err
	}
	defer tx.Rollback()

	// Construct the struct that will be inside the heights map
	blockStruct := blockData{
		Block:  b,
		Height: e.blockchainHeight,
	}

	tx.addNewHash("Blocks", hashBlock, crypto.Hash(b.ID()), blockStruct)

	bSum := modules.ExplorerBlockData{
		ID:        b.ID(),
		Timestamp: b.Timestamp,
		Target:    blocktarget,
		Size:      uint64(len(encoding.Marshal(b))),
	}

	tx.putObject("Heights", e.blockchainHeight, bSum)
	tx.putObject("Hashes", crypto.Hash(b.ID()), hashBlock)

	// Insert the miner payouts as new outputs
	for i, payout := range b.MinerPayouts {
		tx.addAddress(payout.UnlockHash, types.TransactionID(b.ID()))
		tx.addNewOutput(b.MinerPayoutID(uint64(i)), types.TransactionID(b.ID()))
	}

	// Insert each transaction
	for i, txn := range b.Transactions {
		tx.addNewHash("Transactions", hashTransaction, crypto.Hash(txn.ID()), txInfo{b.ID(), i})
		tx.addTransaction(txn)
	}

	return tx.commit()
}
Beispiel #28
0
// validHeader does some early, low computation verification on the block.
func (cs *ConsensusSet) validHeader(tx *bolt.Tx, b types.Block) error {
	// See if the block is known already.
	id := b.ID()
	_, exists := cs.dosBlocks[id]
	if exists {
		return errDoSBlock
	}

	// Check if the block is already known.
	blockMap := tx.Bucket(BlockMap)
	if blockMap.Get(id[:]) != nil {
		return modules.ErrBlockKnown
	}

	// Check for the parent.
	parentBytes := blockMap.Get(b.ParentID[:])
	if parentBytes == nil {
		return errOrphan
	}
	var parent processedBlock
	err := encoding.Unmarshal(parentBytes, &parent)
	if err != nil {
		return err
	}

	// Check that the target of the new block is sufficient.
	if !b.CheckTarget(parent.ChildTarget) {
		return modules.ErrBlockUnsolved
	}

	// Check that the timestamp is not too far in the past to be
	// acceptable.
	if earliestChildTimestamp(blockMap, &parent) > b.Timestamp {
		return errEarlyTimestamp
	}

	// Check that the block is below the size limit.
	if uint64(len(encoding.Marshal(b))) > types.BlockSizeLimit {
		return errLargeBlock
	}

	// If the block is in the extreme future, return an error and do nothing
	// more with the block. There is an assumption that by the time the extreme
	// future arrives, this block will no longer be a part of the longest fork
	// because it will have been ignored by all of the miners.
	if b.Timestamp > types.CurrentTimestamp()+types.ExtremeFutureThreshold {
		return errExtremeFutureTimestamp
	}

	// Verify that the miner payouts are valid.
	if !b.CheckMinerPayouts(parent.Height + 1) {
		return errBadMinerPayouts
	}

	// If the block is in the near future, but too far to be acceptable, then
	// the block will be saved and added to the consensus set after it is no
	// longer too far in the future. This is the last check because it's an
	// expensive check, and not worth performing if the payouts are incorrect.
	if b.Timestamp > types.CurrentTimestamp()+types.FutureThreshold {
		go func() {
			time.Sleep(time.Duration(b.Timestamp-(types.CurrentTimestamp()+types.FutureThreshold)) * time.Second)
			cs.AcceptBlock(b) // NOTE: Error is not handled.
		}()
		return errFutureTimestamp
	}
	return nil
}
Beispiel #29
0
func dbCalculateBlockFacts(tx *bolt.Tx, cs modules.ConsensusSet, block types.Block) blockFacts {
	// get the parent block facts
	var bf blockFacts
	err := dbGetAndDecode(bucketBlockFacts, block.ParentID, &bf)(tx)
	assertNil(err)

	// get target
	target, exists := cs.ChildTarget(block.ParentID)
	if !exists {
		panic(fmt.Sprint("ConsensusSet is missing target of known block", block.ParentID))
	}

	// update fields
	bf.BlockID = block.ID()
	bf.Height++
	bf.Difficulty = target.Difficulty()
	bf.Target = target
	bf.Timestamp = block.Timestamp
	bf.TotalCoins = types.CalculateNumSiacoins(bf.Height)

	// calculate maturity timestamp
	var maturityTimestamp types.Timestamp
	if bf.Height > types.MaturityDelay {
		oldBlock, exists := cs.BlockAtHeight(bf.Height - types.MaturityDelay)
		if !exists {
			panic(fmt.Sprint("ConsensusSet is missing block at height", bf.Height-types.MaturityDelay))
		}
		maturityTimestamp = oldBlock.Timestamp
	}
	bf.MaturityTimestamp = maturityTimestamp

	// calculate hashrate by averaging last 'hashrateEstimationBlocks' blocks
	var estimatedHashrate types.Currency
	if bf.Height > hashrateEstimationBlocks {
		var totalDifficulty = bf.Target
		var oldestTimestamp types.Timestamp
		for i := types.BlockHeight(1); i < hashrateEstimationBlocks; i++ {
			b, exists := cs.BlockAtHeight(bf.Height - i)
			if !exists {
				panic(fmt.Sprint("ConsensusSet is missing block at height", bf.Height-hashrateEstimationBlocks))
			}
			target, exists := cs.ChildTarget(b.ParentID)
			if !exists {
				panic(fmt.Sprint("ConsensusSet is missing target of known block", b.ParentID))
			}
			totalDifficulty = totalDifficulty.AddDifficulties(target)
			oldestTimestamp = b.Timestamp
		}
		secondsPassed := bf.Timestamp - oldestTimestamp
		estimatedHashrate = totalDifficulty.Difficulty().Div64(uint64(secondsPassed))
	}
	bf.EstimatedHashrate = estimatedHashrate

	bf.MinerPayoutCount += uint64(len(block.MinerPayouts))
	bf.TransactionCount += uint64(len(block.Transactions))
	for _, txn := range block.Transactions {
		bf.SiacoinInputCount += uint64(len(txn.SiacoinInputs))
		bf.SiacoinOutputCount += uint64(len(txn.SiacoinOutputs))
		bf.FileContractCount += uint64(len(txn.FileContracts))
		bf.FileContractRevisionCount += uint64(len(txn.FileContractRevisions))
		bf.StorageProofCount += uint64(len(txn.StorageProofs))
		bf.SiafundInputCount += uint64(len(txn.SiafundInputs))
		bf.SiafundOutputCount += uint64(len(txn.SiafundOutputs))
		bf.MinerFeeCount += uint64(len(txn.MinerFees))
		bf.ArbitraryDataCount += uint64(len(txn.ArbitraryData))
		bf.TransactionSignatureCount += uint64(len(txn.TransactionSignatures))

		for _, fc := range txn.FileContracts {
			bf.TotalContractCost = bf.TotalContractCost.Add(fc.Payout)
			bf.TotalContractSize = bf.TotalContractSize.Add(types.NewCurrency64(fc.FileSize))
		}
		for _, fcr := range txn.FileContractRevisions {
			bf.TotalContractSize = bf.TotalContractSize.Add(types.NewCurrency64(fcr.NewFileSize))
			bf.TotalRevisionVolume = bf.TotalRevisionVolume.Add(types.NewCurrency64(fcr.NewFileSize))
		}
	}

	return bf
}