Exemplo n.º 1
0
Arquivo: accept.go Projeto: mm3/Sia
// validHeader does some early, low computation verification on the block.
func (cs *State) validHeader(b types.Block) error {
	// Grab the parent of the block and verify the ID of the child meets the
	// target. This is done as early as possible to enforce that any
	// block-related DoS must use blocks that have sufficient work.
	parent, exists := cs.blockMap[b.ParentID]
	if !exists {
		return ErrOrphan
	}
	if !b.CheckTarget(parent.childTarget) {
		return ErrMissedTarget
	}

	// Check that the block is below the size limit.
	if uint64(len(encoding.Marshal(b))) > types.BlockSizeLimit {
		return ErrLargeBlock
	}

	// Check that the timestamp is not in 'the past', where the past is defined
	// by earliestChildTimestamp.
	if parent.earliestChildTimestamp() > b.Timestamp {
		return ErrEarlyTimestamp
	}

	// If the block is in the extreme future, return an error and do nothing
	// more with the block. There is an assumption that by the time the extreme
	// future arrives, this block will no longer be a part of the longest fork
	// because it will have been ignored by all of the miners.
	if b.Timestamp > types.CurrentTimestamp()+types.ExtremeFutureThreshold {
		return ErrExtremeFutureTimestamp
	}

	// Verify that the miner payouts are valid.
	if !b.CheckMinerPayouts(parent.height + 1) {
		return ErrBadMinerPayouts
	}

	// If the block is in the near future, but too far to be acceptable, then
	// the block will be saved and added to the consensus set after it is no
	// longer too far in the future. This is the last check because it's an
	// expensive check, and not worth performing if the payouts are incorrect.
	if b.Timestamp > types.CurrentTimestamp()+types.FutureThreshold {
		go func() {
			time.Sleep(time.Duration(b.Timestamp-(types.CurrentTimestamp()+types.FutureThreshold)) * time.Second)
			lockID := cs.mu.Lock()
			defer cs.mu.Unlock(lockID)
			cs.acceptBlock(b) // NOTE: Error is not handled.
		}()
		return ErrFutureTimestamp
	}

	return nil
}
Exemplo n.º 2
0
// testFutureTimestampHandling checks that blocks in the future (but not
// extreme future) are handled correctly.
func TestFutureTimestampHandling(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	cst, err := createConsensusSetTester("TestFutureTimestampHandling")
	if err != nil {
		t.Fatal(err)
	}
	defer cst.closeCst()

	// Submit a block with a timestamp in the future, but not the extreme
	// future.
	block, target, err := cst.miner.BlockForWork()
	if err != nil {
		t.Fatal(err)
	}
	block.Timestamp = types.CurrentTimestamp() + 2 + types.FutureThreshold
	solvedBlock, _ := cst.miner.SolveBlock(block, target)
	err = cst.cs.AcceptBlock(solvedBlock)
	if err != errFutureTimestamp {
		t.Fatalf("expected %v, got %v", errFutureTimestamp, err)
	}

	// Check that after waiting until the block is no longer too far in the
	// future, the block gets added to the consensus set.
	time.Sleep(time.Second * 3) // 3 seconds, as the block was originally 2 seconds too far into the future.
	_, err = cst.cs.dbGetBlockMap(solvedBlock.ID())
	if err == errNilItem {
		t.Fatalf("future block was not added to the consensus set after waiting the appropriate amount of time")
	}
}
Exemplo n.º 3
0
// TestExtremeFutureTimestampHandling checks that blocks with extreme future
// timestamps handled correclty.
func TestExtremeFutureTimestampHandling(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	cst, err := createConsensusSetTester("TestExtremeFutureTimestampHandling")
	if err != nil {
		t.Fatal(err)
	}

	// Submit a block with a timestamp in the extreme future.
	block, _, target := cst.miner.BlockForWork()
	block.Timestamp = types.CurrentTimestamp() + 2 + types.ExtremeFutureThreshold
	solvedBlock, _ := cst.miner.SolveBlock(block, target)
	err = cst.cs.acceptBlock(solvedBlock)
	if err != ErrExtremeFutureTimestamp {
		t.Error("Expecting ErrExtremeFutureTimestamp", err)
	}

	// Check that after waiting until the block is no longer in the future, the
	// block still has not been added to the consensus set (prove that the
	// block was correctly discarded).
	time.Sleep(time.Second * time.Duration(3+types.ExtremeFutureThreshold))
	lockID := cst.cs.mu.RLock()
	defer cst.cs.mu.RUnlock(lockID)
	_, exists := cst.cs.blockMap[solvedBlock.ID()]
	if exists {
		t.Error("extreme future block made it into the consensus set after waiting")
	}
}
Exemplo n.º 4
0
// TestEarlyTimestampHandling checks that blocks too far in the past are
// rejected.
func TestEarlyTimestampHandling(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	t.Parallel()
	cst, err := createConsensusSetTester("TestEarlyTimestampHandling")
	if err != nil {
		t.Fatal(err)
	}
	defer cst.Close()
	minTimestamp := types.CurrentTimestamp()
	cst.cs.blockRuleHelper = mockBlockRuleHelper{
		minTimestamp: minTimestamp,
	}

	// Submit a block with a timestamp in the past, before minTimestamp.
	block, target, err := cst.miner.BlockForWork()
	if err != nil {
		t.Fatal(err)
	}
	block.Timestamp = minTimestamp - 1
	solvedBlock, _ := cst.miner.SolveBlock(block, target)
	err = cst.cs.AcceptBlock(solvedBlock)
	if err != errEarlyTimestamp {
		t.Fatalf("expected %v, got %v", errEarlyTimestamp, err)
	}
}
Exemplo n.º 5
0
// TestExtremeFutureTimestampHandling checks that blocks with extreme future
// timestamps handled correclty.
func TestExtremeFutureTimestampHandling(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	cst, err := createConsensusSetTester("TestExtremeFutureTimestampHandling")
	if err != nil {
		t.Fatal(err)
	}
	defer cst.closeCst()

	// Submit a block with a timestamp in the extreme future.
	block, target, err := cst.miner.BlockForWork()
	if err != nil {
		t.Fatal(err)
	}
	block.Timestamp = types.CurrentTimestamp() + 2 + types.ExtremeFutureThreshold
	solvedBlock, _ := cst.miner.SolveBlock(block, target)
	err = cst.cs.AcceptBlock(solvedBlock)
	if err != errExtremeFutureTimestamp {
		t.Fatalf("expected %v, got %v", errExtremeFutureTimestamp, err)
	}

	// Check that after waiting until the block is no longer in the future, the
	// block still has not been added to the consensus set (prove that the
	// block was correctly discarded).
	time.Sleep(time.Second * time.Duration(3+types.ExtremeFutureThreshold))
	_, err = cst.cs.dbGetBlockMap(solvedBlock.ID())
	if err != errNilItem {
		t.Error("extreme future block made it into the consensus set after waiting")
	}
}
Exemplo n.º 6
0
// TestBuriedBadFork creates a block with an invalid transaction that's not on
// the longest fork. The consensus set will not validate that block. Then valid
// blocks are added on top of it to make it the longest fork. When it becomes
// the longest fork, all the blocks should be fully validated and thrown out
// because a parent is invalid.
func TestBuriedBadFork(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	t.Parallel()
	cst, err := createConsensusSetTester("TestBuriedBadFork")
	if err != nil {
		t.Fatal(err)
	}
	defer cst.Close()
	pb := cst.cs.dbCurrentProcessedBlock()

	// Create a bad block that builds on a parent, so that it is part of not
	// the longest fork.
	badBlock := types.Block{
		ParentID:     pb.Block.ParentID,
		Timestamp:    types.CurrentTimestamp(),
		MinerPayouts: []types.SiacoinOutput{{Value: types.CalculateCoinbase(pb.Height)}},
		Transactions: []types.Transaction{{
			SiacoinInputs: []types.SiacoinInput{{}}, // Will trigger an error on full verification but not partial verification.
		}},
	}
	parent, err := cst.cs.dbGetBlockMap(pb.Block.ParentID)
	if err != nil {
		t.Fatal(err)
	}
	badBlock, _ = cst.miner.SolveBlock(badBlock, parent.ChildTarget)
	err = cst.cs.AcceptBlock(badBlock)
	if err != modules.ErrNonExtendingBlock {
		t.Fatal(err)
	}

	// Build another bock on top of the bad block that is fully valid, this
	// will cause a fork and full validation of the bad block, both the bad
	// block and this block should be thrown away.
	block := types.Block{
		ParentID:     badBlock.ID(),
		Timestamp:    types.CurrentTimestamp(),
		MinerPayouts: []types.SiacoinOutput{{Value: types.CalculateCoinbase(pb.Height + 1)}},
	}
	block, _ = cst.miner.SolveBlock(block, parent.ChildTarget) // okay because the target will not change
	err = cst.cs.AcceptBlock(block)
	if err == nil {
		t.Fatal("a bad block failed to cause an error")
	}
}
Exemplo n.º 7
0
// validateHeader does some early, low computation verification on the header
// to determine if the block should be downloaded. Callers should not assume
// that validation will happen in a particular order.
func (cs *ConsensusSet) validateHeader(tx dbTx, h types.BlockHeader) error {
	// Check if the block is a DoS block - a known invalid block that is expensive
	// to validate.
	id := h.ID()
	_, exists := cs.dosBlocks[id]
	if exists {
		return errDoSBlock
	}

	// Check if the block is already known.
	blockMap := tx.Bucket(BlockMap)
	if blockMap == nil {
		return errNoBlockMap
	}
	if blockMap.Get(id[:]) != nil {
		return modules.ErrBlockKnown
	}

	// Check for the parent.
	parentID := h.ParentID
	parentBytes := blockMap.Get(parentID[:])
	if parentBytes == nil {
		return errOrphan
	}
	var parent processedBlock
	err := cs.marshaler.Unmarshal(parentBytes, &parent)
	if err != nil {
		return err
	}

	// Check that the target of the new block is sufficient.
	if !checkHeaderTarget(h, parent.ChildTarget) {
		return modules.ErrBlockUnsolved
	}

	// TODO: check if the block is a non extending block once headers-first
	// downloads are implemented.

	// Check that the timestamp is not too far in the past to be acceptable.
	minTimestamp := cs.blockRuleHelper.minimumValidChildTimestamp(blockMap, &parent)
	if minTimestamp > h.Timestamp {
		return errEarlyTimestamp
	}

	// Check if the block is in the extreme future. We make a distinction between
	// future and extreme future because there is an assumption that by the time
	// the extreme future arrives, this block will no longer be a part of the
	// longest fork because it will have been ignored by all of the miners.
	if h.Timestamp > types.CurrentTimestamp()+types.ExtremeFutureThreshold {
		return errExtremeFutureTimestamp
	}

	// We do not check if the header is in the near future here, because we want
	// to get the corresponding block as soon as possible, even if the block is in
	// the near future.

	return nil
}
Exemplo n.º 8
0
// blockForWork returns a block that is ready for nonce grinding, including
// correct miner payouts and a random transaction to prevent collisions and
// overlapping work with other blocks being mined in parallel or for different
// forks (during testing).
func (m *Miner) blockForWork() types.Block {
	b := m.unsolvedBlock

	// Update the timestmap.
	if b.Timestamp < types.CurrentTimestamp() {
		b.Timestamp = types.CurrentTimestamp()
	}

	// Update the address + payouts.
	_ = m.checkAddress() // Err is ignored - address generation failed but can't do anything about it (log maybe).
	b.MinerPayouts = []types.SiacoinOutput{{Value: b.CalculateSubsidy(m.height + 1), UnlockHash: m.address}}

	// Add an arb-data txn to the block to create a unique merkle root.
	randBytes, _ := crypto.RandBytes(types.SpecifierLen)
	randTxn := types.Transaction{
		ArbitraryData: [][]byte{append(modules.PrefixNonSia[:], randBytes...)},
	}
	b.Transactions = append([]types.Transaction{randTxn}, b.Transactions...)

	return b
}
Exemplo n.º 9
0
// blockForWork returns a block that is ready for nonce grinding, including
// correct miner payouts and a random transaction to prevent collisions and
// overlapping work with other blocks being mined in parallel or for different
// forks (during testing).
func (m *Miner) blockForWork() types.Block {
	b := m.persist.UnsolvedBlock

	// Update the timestmap.
	if b.Timestamp < types.CurrentTimestamp() {
		b.Timestamp = types.CurrentTimestamp()
	}

	// Update the address + payouts.
	err := m.checkAddress()
	if err != nil {
		m.log.Println(err)
	}
	b.MinerPayouts = []types.SiacoinOutput{{Value: b.CalculateSubsidy(m.persist.Height + 1), UnlockHash: m.persist.Address}}

	// Add an arb-data txn to the block to create a unique merkle root.
	randBytes, _ := crypto.RandBytes(types.SpecifierLen)
	randTxn := types.Transaction{
		ArbitraryData: [][]byte{append(modules.PrefixNonSia[:], randBytes...)},
	}
	b.Transactions = append([]types.Transaction{randTxn}, b.Transactions...)

	return b
}
Exemplo n.º 10
0
// TestBuriedBadTransaction tries submitting a block with a bad transaction
// that is buried under good transactions.
func TestBuriedBadTransaction(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	t.Parallel()
	cst, err := createConsensusSetTester("TestBuriedBadTransaction")
	if err != nil {
		t.Fatal(err)
	}
	defer cst.Close()
	pb := cst.cs.dbCurrentProcessedBlock()

	// Create a good transaction using the wallet.
	txnValue := types.NewCurrency64(1200)
	txnBuilder := cst.wallet.StartTransaction()
	err = txnBuilder.FundSiacoins(txnValue)
	if err != nil {
		t.Fatal(err)
	}
	txnBuilder.AddSiacoinOutput(types.SiacoinOutput{Value: txnValue})
	txnSet, err := txnBuilder.Sign(true)
	if err != nil {
		t.Fatal(err)
	}
	err = cst.tpool.AcceptTransactionSet(txnSet)
	if err != nil {
		t.Fatal(err)
	}

	// Create a bad transaction
	badTxn := types.Transaction{
		SiacoinInputs: []types.SiacoinInput{{}},
	}
	txns := append(cst.tpool.TransactionList(), badTxn)

	// Create a block with a buried bad transaction.
	block := types.Block{
		ParentID:     pb.Block.ID(),
		Timestamp:    types.CurrentTimestamp(),
		MinerPayouts: []types.SiacoinOutput{{Value: types.CalculateCoinbase(pb.Height + 1)}},
		Transactions: txns,
	}
	block, _ = cst.miner.SolveBlock(block, pb.ChildTarget)
	err = cst.cs.AcceptBlock(block)
	if err == nil {
		t.Error("buried transaction didn't cause an error")
	}
}
Exemplo n.º 11
0
// Creates a block ready for nonce grinding, also returning the MerkleRoot of
// the block. Getting the MerkleRoot of a block requires encoding and hashing
// in a specific way, which are implementation details we didn't want to
// require external miners to need to worry about. All blocks returned are
// unique, which means all miners can safely start at the '0' nonce.
func (m *Miner) blockForWork() (types.Block, types.Target) {
	// Determine the timestamp.
	blockTimestamp := types.CurrentTimestamp()
	if blockTimestamp < m.earliestTimestamp {
		blockTimestamp = m.earliestTimestamp
	}

	// Create the miner payouts.
	subsidy := types.CalculateCoinbase(m.height)
	for _, txn := range m.transactions {
		for _, fee := range txn.MinerFees {
			subsidy = subsidy.Add(fee)
		}
	}
	blockPayouts := []types.SiacoinOutput{types.SiacoinOutput{Value: subsidy, UnlockHash: m.address}}

	// Create the list of transacitons, including the randomized transaction.
	// The transactions are assembled by calling append(singleElem,
	// existingSlic) because doing it the reverse way has some side effects,
	// creating a race condition and ultimately changing the block hash for
	// other parts of the program. This is related to the fact that slices are
	// pointers, and not immutable objects. Use of the builtin `copy` function
	// when passing objects like blocks around may fix this problem.
	randBytes := make([]byte, types.SpecifierLen)
	rand.Read(randBytes)
	randTxn := types.Transaction{
		ArbitraryData: [][]byte{append(modules.PrefixNonSia[:], randBytes...)},
	}
	blockTransactions := append([]types.Transaction{randTxn}, m.transactions...)

	// Assemble the block
	b := types.Block{
		ParentID:     m.parent,
		Timestamp:    blockTimestamp,
		MinerPayouts: blockPayouts,
		Transactions: blockTransactions,
	}
	return b, m.target
}
Exemplo n.º 12
0
// testFutureTimestampHandling checks that blocks in the future (but not
// extreme future) are handled correctly.
func (cst *consensusSetTester) testFutureTimestampHandling() error {
	// Submit a block with a timestamp in the future, but not the extreme
	// future.
	block, _, target := cst.miner.BlockForWork()
	block.Timestamp = types.CurrentTimestamp() + 2 + types.FutureThreshold
	solvedBlock, _ := cst.miner.SolveBlock(block, target)
	err := cst.cs.acceptBlock(solvedBlock)
	if err != ErrFutureTimestamp {
		return errors.New("Expecting ErrExtremeFutureTimestamp: " + err.Error())
	}

	// Check that after waiting until the block is no longer too far in the
	// future, the block gets added to the consensus set.
	time.Sleep(time.Second * 3) // 3 seconds, as the block was originally 2 seconds too far into the future.
	lockID := cst.cs.mu.RLock()
	defer cst.cs.mu.RUnlock(lockID)
	_, exists := cst.cs.blockMap[solvedBlock.ID()]
	if !exists {
		return errors.New("future block was not added to the consensus set after waiting the appropriate amount of time.")
	}
	return nil
}
Exemplo n.º 13
0
// TestExtremeFutureTimestampHandling checks that blocks in the extreme future
// are rejected.
func TestExtremeFutureTimestampHandling(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	t.Parallel()
	cst, err := createConsensusSetTester("TestExtremeFutureTimestampHandling")
	if err != nil {
		t.Fatal(err)
	}
	defer cst.Close()

	// Submit a block with a timestamp in the extreme future.
	block, target, err := cst.miner.BlockForWork()
	if err != nil {
		t.Fatal(err)
	}
	block.Timestamp = types.CurrentTimestamp() + 2 + types.ExtremeFutureThreshold
	solvedBlock, _ := cst.miner.SolveBlock(block, target)
	err = cst.cs.AcceptBlock(solvedBlock)
	if err != errExtremeFutureTimestamp {
		t.Fatalf("expected %v, got %v", errFutureTimestamp, err)
	}
}
Exemplo n.º 14
0
// testFutureTimestampHandling checks that blocks in the future (but not
// extreme future) are handled correctly.
func TestFutureTimestampHandling(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	t.Parallel()
	cst, err := createConsensusSetTester("TestFutureTimestampHandling")
	if err != nil {
		t.Fatal(err)
	}
	defer cst.Close()

	// Submit a block with a timestamp in the future, but not the extreme
	// future.
	block, target, err := cst.miner.BlockForWork()
	if err != nil {
		t.Fatal(err)
	}
	block.Timestamp = types.CurrentTimestamp() + 2 + types.FutureThreshold
	solvedBlock, _ := cst.miner.SolveBlock(block, target)
	err = cst.cs.AcceptBlock(solvedBlock)
	if err != errFutureTimestamp {
		t.Fatalf("expected %v, got %v", errFutureTimestamp, err)
	}

	// Poll the consensus set until the future block appears.
	for i := 0; i < 30; i++ {
		time.Sleep(time.Second * 3)
		_, err = cst.cs.dbGetBlockMap(solvedBlock.ID())
		if err == nil {
			break
		}
	}
	_, err = cst.cs.dbGetBlockMap(solvedBlock.ID())
	if err != nil {
		t.Errorf("Future block not added to consensus set.\nCurrent Timestamp %v\nFutureThreshold: %v\nBlock Timestamp %v\n", types.CurrentTimestamp(), types.FutureThreshold, block.Timestamp)
	}
}
Exemplo n.º 15
0
// TestRelayHeader tests that rpcRelayHeader requests the corresponding blocks
// to valid headers with known parents, or requests the block history to orphan
// headers.
func TestRelayHeader(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}
	cst, err := blankConsensusSetTester("TestRelayHeader")
	if err != nil {
		t.Fatal(err)
	}
	defer cst.Close()

	mg := &mockGatewayCallsRPC{
		Gateway:   cst.cs.gateway,
		rpcCalled: make(chan string),
	}
	cst.cs.gateway = mg

	p1, p2 := net.Pipe()
	mockP2 := mockPeerConn{p2}

	// Valid block that rpcRelayHeader should accept.
	validBlock, err := cst.miner.FindBlock()
	if err != nil {
		t.Fatal(err)
	}

	// A block in the near future that rpcRelayHeader return an error for, but
	// still request the corresponding block.
	block, target, err := cst.miner.BlockForWork()
	if err != nil {
		t.Fatal(err)
	}
	block.Timestamp = types.CurrentTimestamp() + 2 + types.FutureThreshold
	futureBlock, _ := cst.miner.SolveBlock(block, target)

	tests := []struct {
		header  types.BlockHeader
		errWant error
		errMSG  string
		rpcWant string
		rpcMSG  string
	}{
		// Test that rpcRelayHeader rejects known blocks.
		{
			header:  types.GenesisBlock.Header(),
			errWant: modules.ErrBlockKnown,
			errMSG:  "rpcRelayHeader should reject headers to known blocks",
		},
		// Test that rpcRelayHeader requests the parent blocks of orphan headers.
		{
			header:  types.BlockHeader{},
			errWant: nil,
			errMSG:  "rpcRelayHeader should not return an error for orphan headers",
			rpcWant: "SendBlocks",
			rpcMSG:  "rpcRelayHeader should request blocks when the relayed header is an orphan",
		},
		// Test that rpcRelayHeader accepts a valid header that extends the longest chain.
		{
			header:  validBlock.Header(),
			errWant: nil,
			errMSG:  "rpcRelayHeader should accept a valid header",
			rpcWant: "SendBlk",
			rpcMSG:  "rpcRelayHeader should request the block of a valid header",
		},
		// Test that rpcRelayHeader requests a future, but otherwise valid block.
		{
			header:  futureBlock.Header(),
			errWant: nil,
			errMSG:  "rpcRelayHeader should not return an error for a future header",
			rpcWant: "SendBlk",
			rpcMSG:  "rpcRelayHeader should request the corresponding block to a future, but otherwise valid header",
		},
	}
	errChan := make(chan error)
	for _, tt := range tests {
		go func() {
			errChan <- encoding.WriteObject(p1, tt.header)
		}()
		err = cst.cs.threadedRPCRelayHeader(mockP2)
		if err != tt.errWant {
			t.Errorf("%s: expected '%v', got '%v'", tt.errMSG, tt.errWant, err)
		}
		err = <-errChan
		if err != nil {
			t.Fatal(err)
		}
		if tt.rpcWant == "" {
			select {
			case rpc := <-mg.rpcCalled:
				t.Errorf("no RPC call expected, but '%v' was called", rpc)
			case <-time.After(10 * time.Millisecond):
			}
		} else {
			select {
			case rpc := <-mg.rpcCalled:
				if rpc != tt.rpcWant {
					t.Errorf("%s: expected '%v', got '%v'", tt.rpcMSG, tt.rpcWant, rpc)
				}
			case <-time.After(10 * time.Millisecond):
				t.Errorf("%s: expected '%v', but no RPC was called", tt.rpcMSG, tt.rpcWant)
			}
		}
	}
}
Exemplo n.º 16
0
// TestUnitValidateHeader runs a series of unit tests for validateHeader.
func TestUnitValidateHeader(t *testing.T) {
	mockValidBlockID := mockValidBlock.ID()

	var tests = []struct {
		header                 types.BlockHeader
		dosBlocks              map[types.BlockID]struct{}
		blockMapPairs          []blockMapPair
		earliestValidTimestamp types.Timestamp
		marshaler              mockBlockMarshaler
		useNilBlockMap         bool
		errWant                error
		msg                    string
	}{
		// Test that known dos blocks are rejected.
		{
			header: mockValidBlock.Header(),
			// Create a dosBlocks map where mockValidBlock is marked as a bad block.
			dosBlocks: map[types.BlockID]struct{}{
				mockValidBlock.ID(): {},
			},
			blockMapPairs:          serializedParentBlockMap,
			earliestValidTimestamp: mockValidBlock.Timestamp,
			marshaler:              parentBlockUnmarshaler,
			errWant:                errDoSBlock,
			msg:                    "validateHeader should reject known bad blocks",
		},
		// Test that blocks are rejected if a block map doesn't exist.
		{
			header:                 mockValidBlock.Header(),
			dosBlocks:              make(map[types.BlockID]struct{}),
			blockMapPairs:          serializedParentBlockMap,
			earliestValidTimestamp: mockValidBlock.Timestamp,
			marshaler:              parentBlockUnmarshaler,
			useNilBlockMap:         true,
			errWant:                errNoBlockMap,
			msg:                    "validateHeader should fail when no block map is found in the database",
		},
		// Test that known blocks are rejected.
		{
			header:                 mockValidBlock.Header(),
			dosBlocks:              make(map[types.BlockID]struct{}),
			blockMapPairs:          []blockMapPair{{mockValidBlockID[:], []byte{}}},
			earliestValidTimestamp: mockValidBlock.Timestamp,
			marshaler:              parentBlockUnmarshaler,
			errWant:                modules.ErrBlockKnown,
			msg:                    "validateHeader should fail when the block has been seen before",
		},
		// Test that blocks with unknown parents (orphans) are rejected.
		{
			header:                 mockValidBlock.Header(),
			dosBlocks:              make(map[types.BlockID]struct{}),
			earliestValidTimestamp: mockValidBlock.Timestamp,
			marshaler:              parentBlockUnmarshaler,
			errWant:                errOrphan,
			msg:                    "validateHeader should reject a block if its parent block does not appear in the block database",
		},
		// Test that blocks whose parents don't unmarshal are rejected.
		{
			header:                 mockValidBlock.Header(),
			dosBlocks:              make(map[types.BlockID]struct{}),
			blockMapPairs:          serializedParentBlockMap,
			earliestValidTimestamp: mockValidBlock.Timestamp,
			marshaler:              failingBlockUnmarshaler,
			errWant:                unmarshalFailedErr,
			msg:                    "validateHeader should fail when unmarshaling the parent block fails",
		},
		// Test that blocks with too early of a timestamp are rejected.
		{
			header:                 mockValidBlock.Header(),
			dosBlocks:              make(map[types.BlockID]struct{}),
			blockMapPairs:          serializedParentBlockMap,
			earliestValidTimestamp: mockValidBlock.Timestamp + 1,
			marshaler:              parentBlockHighTargetUnmarshaler,
			errWant:                errEarlyTimestamp,
			msg:                    "validateHeader should fail when the header's timestamp is too early",
		},
		// Test that headers in the extreme future are rejected.
		{
			header: types.BlockHeader{
				Timestamp: types.CurrentTimestamp() + types.ExtremeFutureThreshold + 2,
				ParentID:  mockParentID(),
			},
			dosBlocks:     make(map[types.BlockID]struct{}),
			blockMapPairs: serializedParentBlockMap,
			marshaler:     parentBlockHighTargetUnmarshaler,
			errWant:       errExtremeFutureTimestamp,
			msg:           "validateHeader should fail when the header's timestamp is in the extreme future",
		},
		// Test that headers in the near future are not rejected.
		{
			header: types.BlockHeader{
				Timestamp: types.CurrentTimestamp() + types.FutureThreshold + 2,
				ParentID:  mockParentID(),
			},
			dosBlocks:     make(map[types.BlockID]struct{}),
			blockMapPairs: serializedParentBlockMap,
			marshaler:     parentBlockHighTargetUnmarshaler,
			errWant:       nil,
			msg:           "validateHeader should not reject headers whose timestamps are in the near future",
		},
		// Test that blocks with too large of a target are rejected.
		{
			header:                 mockValidBlock.Header(),
			dosBlocks:              make(map[types.BlockID]struct{}),
			blockMapPairs:          serializedParentBlockMap,
			earliestValidTimestamp: mockValidBlock.Timestamp,
			marshaler:              parentBlockLowTargetUnmarshaler,
			errWant:                modules.ErrBlockUnsolved,
			msg:                    "validateHeader should reject blocks with an insufficiently low target",
		},
		// Test that valid blocks are accepted.
		{
			header:                 mockValidBlock.Header(),
			dosBlocks:              make(map[types.BlockID]struct{}),
			blockMapPairs:          serializedParentBlockMap,
			earliestValidTimestamp: mockValidBlock.Timestamp,
			marshaler:              parentBlockHighTargetUnmarshaler,
			errWant:                nil,
			msg:                    "validateHeader should accept a valid block",
		},
	}
	for _, tt := range tests {
		// Initialize the blockmap in the tx.
		bucket := mockDbBucket{map[string][]byte{}}
		for _, mapPair := range tt.blockMapPairs {
			bucket.Set(mapPair.key, mapPair.val)
		}
		dbBucketMap := map[string]dbBucket{}
		if tt.useNilBlockMap {
			dbBucketMap[string(BlockMap)] = nil
		} else {
			dbBucketMap[string(BlockMap)] = bucket
		}
		tx := mockDbTx{dbBucketMap}

		cs := ConsensusSet{
			dosBlocks: tt.dosBlocks,
			marshaler: tt.marshaler,
			blockRuleHelper: mockBlockRuleHelper{
				minTimestamp: tt.earliestValidTimestamp,
			},
		}
		err := cs.validateHeader(tx, tt.header)
		if err != tt.errWant {
			t.Errorf("%s: expected to fail with `%v', got: `%v'", tt.msg, tt.errWant, err)
		}
	}
}
Exemplo n.º 17
0
// AcceptBlock will add a block to the state, forking the blockchain if it is
// on a fork that is heavier than the current fork. If the block is accepted,
// it will be relayed to connected peers. This function should only be called
// for new, untrusted blocks.
func (cs *ConsensusSet) AcceptBlock(b types.Block) error {
	// Grab a lock on the consensus set. Lock is demoted later in the function,
	// failure to unlock before returning an error will cause a deadlock.
	cs.mu.Lock()

	// Start verification inside of a bolt View tx.
	err := cs.db.View(func(tx *bolt.Tx) error {
		// Do not accept a block if the database is inconsistent.
		if inconsistencyDetected(tx) {
			return errors.New("inconsistent database")
		}

		// Check that the header is valid. The header is checked first because it
		// is not computationally expensive to verify, but it is computationally
		// expensive to create.
		err := cs.validateHeader(boltTxWrapper{tx}, b)
		if err != nil {
			// If the block is in the near future, but too far to be acceptable, then
			// save the block and add it to the consensus set after it is no longer
			// too far in the future.
			if err == errFutureTimestamp {
				go func() {
					time.Sleep(time.Duration(b.Timestamp-(types.CurrentTimestamp()+types.FutureThreshold)) * time.Second)
					cs.AcceptBlock(b) // NOTE: Error is not handled.
				}()
			}
			return err
		}
		return nil
	})
	if err != nil {
		cs.mu.Unlock()
		return err
	}

	// Try adding the block to the block tree. This call will perform
	// verification on the block before adding the block to the block tree. An
	// error is returned if verification fails or if the block does not extend
	// the longest fork.
	changeEntry, err := cs.addBlockToTree(b)
	if err != nil {
		cs.mu.Unlock()
		return err
	}
	// If appliedBlocks is 0, revertedBlocks will also be 0.
	if build.DEBUG && len(changeEntry.AppliedBlocks) == 0 && len(changeEntry.RevertedBlocks) != 0 {
		panic("appliedBlocks and revertedBlocks are mismatched!")
	}

	// Updates complete, demote the lock.
	cs.mu.Demote()
	defer cs.mu.DemotedUnlock()
	if len(changeEntry.AppliedBlocks) > 0 {
		cs.readlockUpdateSubscribers(changeEntry)
	}

	// Broadcast the new block to all peers.
	go cs.gateway.Broadcast("RelayBlock", b)

	return nil
}
Exemplo n.º 18
0
// validHeader does some early, low computation verification on the block.
func (cs *ConsensusSet) validHeader(tx *bolt.Tx, b types.Block) error {
	// See if the block is known already.
	id := b.ID()
	_, exists := cs.dosBlocks[id]
	if exists {
		return errDoSBlock
	}

	// Check if the block is already known.
	blockMap := tx.Bucket(BlockMap)
	if blockMap.Get(id[:]) != nil {
		return modules.ErrBlockKnown
	}

	// Check for the parent.
	parentBytes := blockMap.Get(b.ParentID[:])
	if parentBytes == nil {
		return errOrphan
	}
	var parent processedBlock
	err := encoding.Unmarshal(parentBytes, &parent)
	if err != nil {
		return err
	}

	// Check that the target of the new block is sufficient.
	if !b.CheckTarget(parent.ChildTarget) {
		return modules.ErrBlockUnsolved
	}

	// Check that the timestamp is not too far in the past to be
	// acceptable.
	if earliestChildTimestamp(blockMap, &parent) > b.Timestamp {
		return errEarlyTimestamp
	}

	// Check that the block is below the size limit.
	if uint64(len(encoding.Marshal(b))) > types.BlockSizeLimit {
		return errLargeBlock
	}

	// If the block is in the extreme future, return an error and do nothing
	// more with the block. There is an assumption that by the time the extreme
	// future arrives, this block will no longer be a part of the longest fork
	// because it will have been ignored by all of the miners.
	if b.Timestamp > types.CurrentTimestamp()+types.ExtremeFutureThreshold {
		return errExtremeFutureTimestamp
	}

	// Verify that the miner payouts are valid.
	if !b.CheckMinerPayouts(parent.Height + 1) {
		return errBadMinerPayouts
	}

	// If the block is in the near future, but too far to be acceptable, then
	// the block will be saved and added to the consensus set after it is no
	// longer too far in the future. This is the last check because it's an
	// expensive check, and not worth performing if the payouts are incorrect.
	if b.Timestamp > types.CurrentTimestamp()+types.FutureThreshold {
		go func() {
			time.Sleep(time.Duration(b.Timestamp-(types.CurrentTimestamp()+types.FutureThreshold)) * time.Second)
			cs.AcceptBlock(b) // NOTE: Error is not handled.
		}()
		return errFutureTimestamp
	}
	return nil
}
Exemplo n.º 19
0
func BenchmarkAcceptBigTxBlocks(b *testing.B) {
	b.ReportAllocs()

	numSigs := 7

	cst, err := createConsensusSetTester("BenchmarkEmptyBlocksA")
	if err != nil {
		b.Fatal(err)
	}
	defer cst.closeCst()

	// Mine until the wallet has 100 utxos
	for cst.cs.height() < (types.BlockHeight(numSigs) + types.MaturityDelay) {
		_, err := cst.miner.AddBlock()
		if err != nil {
			b.Fatal(err)
		}
	}

	// Create an alternate testing consensus set, which does not
	// have any subscribers
	testdir := build.TempDir(modules.ConsensusDir, "BenchmarkEmptyBlocksB")
	g, err := gateway.New(":0", filepath.Join(testdir, modules.GatewayDir))
	if err != nil {
		b.Fatal(err)
	}
	cs, err := New(g, filepath.Join(testdir, modules.ConsensusDir))
	if err != nil {
		b.Fatal("Error creating consensus: " + err.Error())
	}
	defer cs.Close()
	h := cst.cs.db.pathHeight()
	for i := types.BlockHeight(1); i < h; i++ {
		err = cs.AcceptBlock(cst.cs.db.getBlockMap(cst.cs.db.getPath(i)).Block)
		if err != nil {
			b.Fatal(err)
		}
	}

	// construct a transaction using numSigs utxo's, and signed numSigs times
	outputValues := make([]types.Currency, numSigs)
	txValue := types.ZeroCurrency
	for i := 1; i <= numSigs; i++ {
		outputValues[i-1] = types.CalculateCoinbase(types.BlockHeight(i))
		txValue = txValue.Add(outputValues[i-1])
	}

	b.ResetTimer()
	b.StopTimer()
	for j := 0; j < b.N; j++ {
		txnBuilder := cst.wallet.StartTransaction()
		err = txnBuilder.FundSiacoins(txValue)
		if err != nil {
			b.Fatal(err)
		}

		for i := 0; i < numSigs; i++ {
			unlockConditions, err := cst.wallet.NextAddress()
			if err != nil {
				b.Fatal(err)
			}
			txnBuilder.AddSiacoinOutput(types.SiacoinOutput{Value: outputValues[i], UnlockHash: unlockConditions.UnlockHash()})
		}

		txnSet, err := txnBuilder.Sign(true)
		if err != nil {
			b.Fatal(err)
		}

		outputVolume := types.ZeroCurrency
		for _, out := range txnSet[0].SiacoinOutputs {
			outputVolume = outputVolume.Add(out.Value)
		}

		blk := types.Block{
			ParentID:  cst.cs.CurrentBlock().ID(),
			Timestamp: types.CurrentTimestamp(),
			MinerPayouts: []types.SiacoinOutput{
				{Value: types.CalculateCoinbase(cst.cs.height())},
			},
			Transactions: txnSet,
		}

		target, _ := cst.cs.ChildTarget(cst.cs.CurrentBlock().ID())
		block, _ := cst.miner.SolveBlock(blk, target)
		// Submit it to the first consensus set for validity
		err = cst.cs.AcceptBlock(block)
		if err != nil {
			b.Fatal(err)
		}
		b.StartTimer()
		// Time the consensus set without subscribers
		err = cs.AcceptBlock(block)
		if err != nil {
			b.Fatal(err)
		}
		b.StopTimer()
	}
}
Exemplo n.º 20
0
// managedAcceptBlock will try to add a block to the consensus set. If the
// block does not extend the longest currently known chain, an error is
// returned but the block is still kept in memory. If the block extends a fork
// such that the fork becomes the longest currently known chain, the consensus
// set will reorganize itself to recognize the new longest fork. Accepted
// blocks are not relayed.
//
// Typically AcceptBlock should be used so that the accepted block is relayed.
// This method is typically only be used when there would otherwise be multiple
// consecutive calls to AcceptBlock with each successive call accepting the
// child block of the previous call.
func (cs *ConsensusSet) managedAcceptBlock(b types.Block) error {
	// Grab a lock on the consensus set. Lock is demoted later in the function,
	// failure to unlock before returning an error will cause a deadlock.
	cs.mu.Lock()

	// Start verification inside of a bolt View tx.
	err := cs.db.View(func(tx *bolt.Tx) error {
		// Do not accept a block if the database is inconsistent.
		if inconsistencyDetected(tx) {
			return errInconsistentSet
		}

		// Do some relatively inexpensive checks to validate the header and block.
		// Validation generally occurs in the order of least expensive validation
		// first.
		err := cs.validateHeaderAndBlock(boltTxWrapper{tx}, b)
		if err != nil {
			// If the block is in the near future, but too far to be acceptable, then
			// save the block and add it to the consensus set after it is no longer
			// too far in the future.
			//
			// TODO: an attacker could mine many blocks off the genesis block all in the
			// future and we would spawn a goroutine per each block. To fix this, either
			// ban peers that send lots of future blocks and stop spawning goroutines
			// after we are already waiting on a large number of future blocks.
			//
			// TODO: an attacker could broadcast a future block many times and we would
			// spawn a goroutine for each broadcast. To fix this we should create a
			// cache of future blocks, like we already do for DoS blocks, and only spawn
			// a goroutine if we haven't already spawned one for that block. To limit
			// the size of the cache of future blocks, make it a constant size (say 50)
			// over which we would evict the block furthest in the future before adding
			// a new block to the cache.
			if err == errFutureTimestamp {
				go func() {
					time.Sleep(time.Duration(b.Timestamp-(types.CurrentTimestamp()+types.FutureThreshold)) * time.Second)
					err := cs.managedAcceptBlock(b)
					if err != nil {
						cs.log.Debugln("WARN: failed to accept a future block:", err)
					}
					cs.managedBroadcastBlock(b)
				}()
			}
			return err
		}
		return nil
	})
	if err != nil {
		cs.mu.Unlock()
		return err
	}

	// Try adding the block to the block tree. This call will perform
	// verification on the block before adding the block to the block tree. An
	// error is returned if verification fails or if the block does not extend
	// the longest fork.
	changeEntry, err := cs.addBlockToTree(b)
	if err != nil {
		cs.mu.Unlock()
		return err
	}
	// If appliedBlocks is 0, revertedBlocks will also be 0.
	if build.DEBUG && len(changeEntry.AppliedBlocks) == 0 && len(changeEntry.RevertedBlocks) != 0 {
		panic("appliedBlocks and revertedBlocks are mismatched!")
	}

	// Updates complete, demote the lock.
	cs.mu.Demote()
	defer cs.mu.DemotedUnlock()
	if len(changeEntry.AppliedBlocks) > 0 {
		cs.readlockUpdateSubscribers(changeEntry)
	}
	return nil
}