Example #1
0
// BroadcastBlock will either propagate a block to a subset of it's peers, or
// will only announce it's availability (depending what's requested).
func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
	hash := block.Hash()
	peers := pm.peers.PeersWithoutBlock(hash)

	// If propagation is requested, send to a subset of the peer
	if propagate {
		// Calculate the TD of the block (it's not imported yet, so block.Td is not valid)
		var td *big.Int
		if parent := pm.blockchain.GetBlock(block.ParentHash()); parent != nil {
			td = new(big.Int).Add(block.Difficulty(), pm.blockchain.GetTd(block.ParentHash()))
		} else {
			glog.V(logger.Error).Infof("propagating dangling block #%d [%x]", block.NumberU64(), hash[:4])
			return
		}
		// Send the block to a subset of our peers
		transfer := peers[:int(math.Sqrt(float64(len(peers))))]
		for _, peer := range transfer {
			peer.SendNewBlock(block, td)
		}
		glog.V(logger.Detail).Infof("propagated block %x to %d peers in %v", hash[:4], len(transfer), time.Since(block.ReceivedAt))
	}
	// Otherwise if the block is indeed in out own chain, announce it
	if pm.blockchain.HasBlock(hash) {
		for _, peer := range peers {
			if peer.version < eth62 {
				peer.SendNewBlockHashes61([]common.Hash{hash})
			} else {
				peer.SendNewBlockHashes([]common.Hash{hash}, []uint64{block.NumberU64()})
			}
		}
		glog.V(logger.Detail).Infof("announced block %x to %d peers in %v", hash[:4], len(peers), time.Since(block.ReceivedAt))
	}
}
Example #2
0
// Creates a new QML Block from a chain block
func NewBlock(block *types.Block) *Block {
	if block == nil {
		return &Block{}
	}

	ptxs := make([]*Transaction, len(block.Transactions()))
	/*
		for i, tx := range block.Transactions() {
			ptxs[i] = NewTx(tx)
		}
	*/
	txlist := common.NewList(ptxs)

	puncles := make([]*Block, len(block.Uncles()))
	/*
		for i, uncle := range block.Uncles() {
			puncles[i] = NewBlock(types.NewBlockWithHeader(uncle))
		}
	*/
	ulist := common.NewList(puncles)

	return &Block{
		ref: block, Size: block.Size().String(),
		Number: int(block.NumberU64()), GasUsed: block.GasUsed().String(),
		GasLimit: block.GasLimit().String(), Hash: block.Hash().Hex(),
		Transactions: txlist, Uncles: ulist,
		Time:     block.Time(),
		Coinbase: block.Coinbase().Hex(),
		PrevHash: block.ParentHash().Hex(),
		Bloom:    common.ToHex(block.Bloom().Bytes()),
		Raw:      block.String(),
	}
}
Example #3
0
// insert spawns a new goroutine to run a block insertion into the chain. If the
// block's number is at the same height as the current import phase, if updates
// the phase states accordingly.
func (f *Fetcher) insert(peer string, block *types.Block) {
	hash := block.Hash()

	// Run the import on a new thread
	glog.V(logger.Debug).Infof("Peer %s: importing block #%d [%x]", peer, block.NumberU64(), hash[:4])
	go func() {
		defer func() { f.done <- hash }()

		// If the parent's unknown, abort insertion
		parent := f.getBlock(block.ParentHash())
		if parent == nil {
			return
		}
		// Quickly validate the header and propagate the block if it passes
		if err := f.validateBlock(block, parent); err != nil {
			glog.V(logger.Debug).Infof("Peer %s: block #%d [%x] verification failed: %v", peer, block.NumberU64(), hash[:4], err)
			f.dropPeer(peer)
			return
		}
		go f.broadcastBlock(block, true)

		// Run the actual import and log any issues
		if _, err := f.insertChain(types.Blocks{block}); err != nil {
			glog.V(logger.Warn).Infof("Peer %s: block #%d [%x] import failed: %v", peer, block.NumberU64(), hash[:4], err)
			return
		}
		// If import succeeded, broadcast the block
		go f.broadcastBlock(block, false)

		// Invoke the testing hook if needed
		if f.importedHook != nil {
			f.importedHook(block)
		}
	}()
}
Example #4
0
func (self *ChainManager) GetUnclesInChain(block *types.Block, length int) (uncles []*types.Header) {
	for i := 0; block != nil && i < length; i++ {
		uncles = append(uncles, block.Uncles()...)
		block = self.GetBlock(block.ParentHash())
	}

	return
}
Example #5
0
// GetUnclesInChain retrieves all the uncles from a given block backwards until
// a specific distance is reached.
func (self *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header {
	uncles := []*types.Header{}
	for i := 0; block != nil && i < length; i++ {
		uncles = append(uncles, block.Uncles()...)
		block = self.GetBlock(block.ParentHash())
	}
	return uncles
}
Example #6
0
func (self *ChainManager) GetAncestors(block *types.Block, length int) (blocks []*types.Block) {
	for i := 0; i < length; i++ {
		block = self.GetBlock(block.ParentHash())
		if block == nil {
			break
		}

		blocks = append(blocks, block)
	}

	return
}
Example #7
0
// diff takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
// to be part of the new canonical chain.
func (self *ChainManager) diff(oldBlock, newBlock *types.Block) (types.Blocks, error) {
	var (
		newChain    types.Blocks
		commonBlock *types.Block
		oldStart    = oldBlock
		newStart    = newBlock
	)

	// first reduce whoever is higher bound
	if oldBlock.NumberU64() > newBlock.NumberU64() {
		// reduce old chain
		for oldBlock = oldBlock; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = self.GetBlock(oldBlock.ParentHash()) {
		}
	} else {
		// reduce new chain and append new chain blocks for inserting later on
		for newBlock = newBlock; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = self.GetBlock(newBlock.ParentHash()) {
			newChain = append(newChain, newBlock)
		}
	}
	if oldBlock == nil {
		return nil, fmt.Errorf("Invalid old chain")
	}
	if newBlock == nil {
		return nil, fmt.Errorf("Invalid new chain")
	}

	numSplit := newBlock.Number()
	for {
		if oldBlock.Hash() == newBlock.Hash() {
			commonBlock = oldBlock
			break
		}
		newChain = append(newChain, newBlock)

		oldBlock, newBlock = self.GetBlock(oldBlock.ParentHash()), self.GetBlock(newBlock.ParentHash())
		if oldBlock == nil {
			return nil, fmt.Errorf("Invalid old chain")
		}
		if newBlock == nil {
			return nil, fmt.Errorf("Invalid new chain")
		}
	}

	if glog.V(logger.Debug) {
		commonHash := commonBlock.Hash()
		glog.Infof("Chain split detected @ %x. Reorganising chain from #%v %x to %x", commonHash[:4], numSplit, oldStart.Hash().Bytes()[:4], newStart.Hash().Bytes()[:4])
	}

	return newChain, nil
}
Example #8
0
// WriteBlock writes the block to the chain (or pending queue)
func (self *ChainManager) WriteBlock(block *types.Block, queued bool) (status writeStatus, err error) {
	self.wg.Add(1)
	defer self.wg.Done()

	cblock := self.currentBlock
	// Compare the TD of the last known block in the canonical chain to make sure it's greater.
	// At this point it's possible that a different chain (fork) becomes the new canonical chain.
	if block.Td.Cmp(self.Td()) > 0 {
		// chain fork
		if block.ParentHash() != cblock.Hash() {
			// during split we merge two different chains and create the new canonical chain
			err := self.merge(cblock, block)
			if err != nil {
				return nonStatTy, err
			}

			status = splitStatTy
		}

		self.mu.Lock()
		self.setTotalDifficulty(block.Td)
		self.insert(block)
		self.mu.Unlock()

		self.setTransState(state.New(block.Root(), self.stateDb))
		self.txState.SetState(state.New(block.Root(), self.stateDb))

		status = canonStatTy
	} else {
		status = sideStatTy
	}

	if queued {
		// Write block to database. Eventually we'll have to improve on this and throw away blocks that are
		// not in the canonical chain.
		self.mu.Lock()
		self.enqueueForWrite(block)
		self.mu.Unlock()
	} else {
		self.write(block)
	}
	// Delete from future blocks
	self.futureBlocks.Remove(block.Hash())

	return
}
// Process block will attempt to process the given block's transactions and applies them
// on top of the block's parent state (given it exists) and will return wether it was
// successful or not.
func (sm *BlockProcessor) Process(block *types.Block) (logs vm.Logs, receipts types.Receipts, err error) {
	// Processing a blocks may never happen simultaneously
	sm.mutex.Lock()
	defer sm.mutex.Unlock()

	if sm.bc.HasBlock(block.Hash()) {
		if _, err := state.New(block.Root(), sm.chainDb); err == nil {
			return nil, nil, &KnownBlockError{block.Number(), block.Hash()}
		}
	}
	if parent := sm.bc.GetBlock(block.ParentHash()); parent != nil {
		if _, err := state.New(parent.Root(), sm.chainDb); err == nil {
			return sm.processWithParent(block, parent)
		}
	}
	return nil, nil, ParentError(block.ParentHash())
}
Example #10
0
// VerifyUncles verifies the given block's uncles and applies the Ethereum
// consensus rules to the various block headers included; it will return an
// error if any of the included uncle headers were invalid. It returns an error
// if the validation failed.
func (v *BlockValidator) VerifyUncles(block, parent *types.Block) error {
	// validate that there at most 2 uncles included in this block
	if len(block.Uncles()) > 2 {
		return ValidationError("Block can only contain maximum 2 uncles (contained %v)", len(block.Uncles()))
	}

	uncles := set.New()
	ancestors := make(map[common.Hash]*types.Block)
	for _, ancestor := range v.bc.GetBlocksFromHash(block.ParentHash(), 7) {
		ancestors[ancestor.Hash()] = ancestor
		// Include ancestors uncles in the uncle set. Uncles must be unique.
		for _, uncle := range ancestor.Uncles() {
			uncles.Add(uncle.Hash())
		}
	}
	ancestors[block.Hash()] = block
	uncles.Add(block.Hash())

	for i, uncle := range block.Uncles() {
		hash := uncle.Hash()
		if uncles.Has(hash) {
			// Error not unique
			return UncleError("uncle[%d](%x) not unique", i, hash[:4])
		}
		uncles.Add(hash)

		if ancestors[hash] != nil {
			branch := fmt.Sprintf("  O - %x\n  |\n", block.Hash())
			for h := range ancestors {
				branch += fmt.Sprintf("  O - %x\n  |\n", h)
			}
			glog.Infoln(branch)
			return UncleError("uncle[%d](%x) is ancestor", i, hash[:4])
		}

		if ancestors[uncle.ParentHash] == nil || uncle.ParentHash == parent.Hash() {
			return UncleError("uncle[%d](%x)'s parent is not ancestor (%x)", i, hash[:4], uncle.ParentHash[0:4])
		}

		if err := ValidateHeader(v.config, v.Pow, uncle, ancestors[uncle.ParentHash].Header(), true, true); err != nil {
			return ValidationError(fmt.Sprintf("uncle[%d](%x) header invalid: %v", i, hash[:4], err))
		}
	}

	return nil
}
Example #11
0
// insert spawns a new goroutine to run a block insertion into the chain. If the
// block's number is at the same height as the current import phase, if updates
// the phase states accordingly.
func (f *Fetcher) insert(peer string, block *types.Block) {
	hash := block.Hash()

	// Run the import on a new thread
	glog.V(logger.Debug).Infof("Peer %s: importing block #%d [%x]", peer, block.NumberU64(), hash[:4])
	go func() {
		defer func() { f.done <- hash }()

		// If the parent's unknown, abort insertion
		parent := f.getBlock(block.ParentHash())
		if parent == nil {
			return
		}
		// Quickly validate the header and propagate the block if it passes
		switch err := f.validateBlock(block, parent); err {
		case nil:
			// All ok, quickly propagate to our peers
			broadcastTimer.UpdateSince(block.ReceivedAt)
			go f.broadcastBlock(block, true)

		case core.BlockFutureErr:
			futureMeter.Mark(1)
			// Weird future block, don't fail, but neither propagate

		default:
			// Something went very wrong, drop the peer
			glog.V(logger.Debug).Infof("Peer %s: block #%d [%x] verification failed: %v", peer, block.NumberU64(), hash[:4], err)
			f.dropPeer(peer)
			return
		}
		// Run the actual import and log any issues
		if _, err := f.insertChain(types.Blocks{block}); err != nil {
			glog.V(logger.Warn).Infof("Peer %s: block #%d [%x] import failed: %v", peer, block.NumberU64(), hash[:4], err)
			return
		}
		// If import succeeded, broadcast the block
		announceTimer.UpdateSince(block.ReceivedAt)
		go f.broadcastBlock(block, false)

		// Invoke the testing hook if needed
		if f.importedHook != nil {
			f.importedHook(block)
		}
	}()
}
Example #12
0
func NewBlockRes(block *types.Block, td *big.Int, fullTx bool) *BlockRes {
	if block == nil {
		return nil
	}

	res := new(BlockRes)
	res.fullTx = fullTx
	res.BlockNumber = newHexNum(block.Number())
	res.BlockHash = newHexData(block.Hash())
	res.ParentHash = newHexData(block.ParentHash())
	res.Nonce = newHexData(block.Nonce())
	res.Sha3Uncles = newHexData(block.UncleHash())
	res.LogsBloom = newHexData(block.Bloom())
	res.TransactionRoot = newHexData(block.TxHash())
	res.StateRoot = newHexData(block.Root())
	res.ReceiptRoot = newHexData(block.ReceiptHash())
	res.Miner = newHexData(block.Coinbase())
	res.Difficulty = newHexNum(block.Difficulty())
	res.TotalDifficulty = newHexNum(td)
	res.Size = newHexNum(block.Size().Int64())
	res.ExtraData = newHexData(block.Extra())
	res.GasLimit = newHexNum(block.GasLimit())
	res.GasUsed = newHexNum(block.GasUsed())
	res.UnixTimestamp = newHexNum(block.Time())

	txs := block.Transactions()
	res.Transactions = make([]*TransactionRes, len(txs))
	for i, tx := range txs {
		res.Transactions[i] = NewTransactionRes(tx)
		res.Transactions[i].BlockHash = res.BlockHash
		res.Transactions[i].BlockNumber = res.BlockNumber
		res.Transactions[i].TxIndex = newHexNum(i)
	}

	uncles := block.Uncles()
	res.Uncles = make([]*UncleRes, len(uncles))
	for i, uncle := range uncles {
		res.Uncles[i] = NewUncleRes(uncle)
	}

	return res
}
Example #13
0
// GetLogs returns the logs of the given block. This method is using a two step approach
// where it tries to get it from the (updated) method which gets them from the receipts or
// the depricated way by re-processing the block.
func (sm *BlockProcessor) GetLogs(block *types.Block) (logs state.Logs, err error) {
	receipts := GetReceiptsFromBlock(sm.extraDb, block)
	if len(receipts) > 0 {
		// coalesce logs
		for _, receipt := range receipts {
			logs = append(logs, receipt.Logs()...)
		}
		return
	}

	// TODO: remove backward compatibility
	var (
		parent = sm.bc.GetBlock(block.ParentHash())
		state  = state.New(parent.Root(), sm.db)
	)

	sm.TransitionState(state, parent, block, true)

	return state.Logs(), nil
}
Example #14
0
func (self *ImportMaster) importBlock(block *types.Block) {
	blockHash := block.Header().Hash().Hex()
	txAmount := uint64(len(block.Transactions()))

	glog.V(logger.Info).Infoln("Importing block", blockHash, "Hash with ", txAmount, "transactions")
	extData := string(block.Header().Extra[:])

	err := self.blockCollection.Insert(&Block{blockHash, block.ParentHash().Hex(), block.Header().UncleHash.Hex(), block.Header().Coinbase.Hex(), block.Header().Root.Hex(), block.Header().TxHash.Hex(), block.Header().ReceiptHash.Hex(), block.Header().Number.String(), block.Header().Difficulty.String(), block.Header().GasLimit.String(), block.Header().GasUsed.String(), block.Header().Time, txAmount, extData, string(block.Nonce()), block.Size().String(), block.Header().MixDigest.Hex(), false, nil})
	if err != nil {
		clilogger.Infoln(err)
	}
	result := Block{}
	err = self.blockCollection.Find(bson.M{"block_hash": blockHash}).One(&result)
	if err != nil {
		utils.Fatalf("Could not find the block we just added, saving faild: %v", err)
	}
	for _, tx := range block.Transactions() {
		self.importTx(tx, result.Id)
	}
}
// WriteBlock writes the block to the chain (or pending queue)
func (self *ChainManager) WriteBlock(block *types.Block, queued bool) (status writeStatus, err error) {
	self.wg.Add(1)
	defer self.wg.Done()

	cblock := self.currentBlock
	// Compare the TD of the last known block in the canonical chain to make sure it's greater.
	// At this point it's possible that a different chain (fork) becomes the new canonical chain.
	if block.Td.Cmp(self.Td()) > 0 {
		// chain fork
		if block.ParentHash() != cblock.Hash() {
			// during split we merge two different chains and create the new canonical chain
			err := self.merge(cblock, block)
			if err != nil {
				return NonStatTy, err
			}

			status = SplitStatTy
		}

		self.mu.Lock()
		self.setTotalDifficulty(block.Td)
		self.insert(block)
		self.mu.Unlock()

		self.setTransState(state.New(block.Root(), self.stateDb))
		self.txState.SetState(state.New(block.Root(), self.stateDb))

		status = CanonStatTy
	} else {
		status = SideStatTy
	}

	err = WriteBlock(self.blockDb, block)
	if err != nil {
		glog.Fatalln("db err:", err)
	}
	// Delete from future blocks
	self.futureBlocks.Remove(block.Hash())

	return
}
Example #16
0
func (sm *BlockProcessor) VerifyUncles(statedb *state.StateDB, block, parent *types.Block) error {
	uncles := set.New()
	ancestors := make(map[common.Hash]*types.Block)
	for _, ancestor := range sm.bc.GetBlocksFromHash(block.ParentHash(), 7) {
		ancestors[ancestor.Hash()] = ancestor
		// Include ancestors uncles in the uncle set. Uncles must be unique.
		for _, uncle := range ancestor.Uncles() {
			uncles.Add(uncle.Hash())
		}
	}
	ancestors[block.Hash()] = block
	uncles.Add(block.Hash())

	for i, uncle := range block.Uncles() {
		hash := uncle.Hash()
		if uncles.Has(hash) {
			// Error not unique
			return UncleError("uncle[%d](%x) not unique", i, hash[:4])
		}
		uncles.Add(hash)

		if ancestors[hash] != nil {
			branch := fmt.Sprintf("  O - %x\n  |\n", block.Hash())
			for h := range ancestors {
				branch += fmt.Sprintf("  O - %x\n  |\n", h)
			}
			glog.Infoln(branch)
			return UncleError("uncle[%d](%x) is ancestor", i, hash[:4])
		}

		if ancestors[uncle.ParentHash] == nil || uncle.ParentHash == parent.Hash() {
			return UncleError("uncle[%d](%x)'s parent is not ancestor (%x)", i, hash[:4], uncle.ParentHash[0:4])
		}

		if err := ValidateHeader(sm.Pow, uncle, ancestors[uncle.ParentHash].Header(), true, true); err != nil {
			return ValidationError(fmt.Sprintf("uncle[%d](%x) header invalid: %v", i, hash[:4], err))
		}
	}

	return nil
}
Example #17
0
// diff takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
// to be part of the new canonical chain.
func (self *ChainManager) diff(oldBlock, newBlock *types.Block) types.Blocks {
	glog.V(logger.Debug).Infof("Applying diff to %x & %x\n", oldBlock.Hash().Bytes()[:4], newBlock.Hash().Bytes()[:4])

	var newChain types.Blocks
	// first find common number
	for newBlock = newBlock; newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = self.GetBlock(newBlock.ParentHash()) {
		newChain = append(newChain, newBlock)
	}

	glog.V(logger.Debug).Infoln("Found common number", newBlock.Number())
	for {
		if oldBlock.Hash() == newBlock.Hash() {
			break
		}
		newChain = append(newChain, newBlock)

		oldBlock, newBlock = self.GetBlock(oldBlock.ParentHash()), self.GetBlock(newBlock.ParentHash())
	}

	return newChain
}
Example #18
0
// Process block will attempt to process the given block's transactions and applies them
// on top of the block's parent state (given it exists) and will return wether it was
// successful or not.
func (sm *BlockProcessor) Process(block *types.Block) (logs state.Logs, receipts types.Receipts, err error) {
	// Processing a blocks may never happen simultaneously
	sm.mutex.Lock()
	defer sm.mutex.Unlock()

	if sm.bc.HasBlock(block.Hash()) {
		return nil, nil, &KnownBlockError{block.Number(), block.Hash()}
	}

	if !sm.bc.HasBlock(block.ParentHash()) {
		return nil, nil, ParentError(block.ParentHash())
	}
	parent := sm.bc.GetBlock(block.ParentHash())
	return sm.processWithParent(block, parent)
}
Example #19
0
// WriteBlock writes the block to the chain.
func (self *ChainManager) WriteBlock(block *types.Block) (status writeStatus, err error) {
	self.wg.Add(1)
	defer self.wg.Done()

	// Calculate the total difficulty of the block
	ptd := self.GetTd(block.ParentHash())
	if ptd == nil {
		return NonStatTy, ParentError(block.ParentHash())
	}
	td := new(big.Int).Add(block.Difficulty(), ptd)

	self.mu.RLock()
	cblock := self.currentBlock
	self.mu.RUnlock()

	// Compare the TD of the last known block in the canonical chain to make sure it's greater.
	// At this point it's possible that a different chain (fork) becomes the new canonical chain.
	if td.Cmp(self.Td()) > 0 {
		// chain fork
		if block.ParentHash() != cblock.Hash() {
			// during split we merge two different chains and create the new canonical chain
			err := self.reorg(cblock, block)
			if err != nil {
				return NonStatTy, err
			}
		}
		status = CanonStatTy

		self.mu.Lock()
		self.setTotalDifficulty(td)
		self.insert(block)
		self.mu.Unlock()
	} else {
		status = SideStatTy
	}

	if err := WriteTd(self.chainDb, block.Hash(), td); err != nil {
		glog.Fatalf("failed to write block total difficulty: %v", err)
	}
	if err := WriteBlock(self.chainDb, block); err != nil {
		glog.Fatalf("filed to write block contents: %v", err)
	}
	// Delete from future blocks
	self.futureBlocks.Remove(block.Hash())

	return
}
Example #20
0
// WriteBlock writes the block to the chain.
func (self *BlockChain) WriteBlock(block *types.Block) (status WriteStatus, err error) {
	self.wg.Add(1)
	defer self.wg.Done()

	// Calculate the total difficulty of the block
	ptd := self.GetTd(block.ParentHash())
	if ptd == nil {
		return NonStatTy, ParentError(block.ParentHash())
	}

	localTd := self.GetTd(self.currentBlock.Hash())
	externTd := new(big.Int).Add(block.Difficulty(), ptd)

	// Make sure no inconsistent state is leaked during insertion
	self.mu.Lock()
	defer self.mu.Unlock()

	// If the total difficulty is higher than our known, add it to the canonical chain
	// Second clause in the if statement reduces the vulnerability to selfish mining.
	// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
	if externTd.Cmp(localTd) > 0 || (externTd.Cmp(localTd) == 0 && mrand.Float64() < 0.5) {
		// Reorganise the chain if the parent is not the head block
		if block.ParentHash() != self.currentBlock.Hash() {
			if err := self.reorg(self.currentBlock, block); err != nil {
				return NonStatTy, err
			}
		}
		// Insert the block as the new head of the chain
		self.insert(block)
		status = CanonStatTy
	} else {
		status = SideStatTy
	}
	// Irrelevant of the canonical status, write the block itself to the database
	if err := self.hc.WriteTd(block.Hash(), externTd); err != nil {
		glog.Fatalf("failed to write block total difficulty: %v", err)
	}
	if err := WriteBlock(self.chainDb, block); err != nil {
		glog.Fatalf("failed to write block contents: %v", err)
	}

	self.futureBlocks.Remove(block.Hash())

	return
}
Example #21
0
func (sm *BlockProcessor) RetryProcess(block *types.Block) (logs state.Logs, err error) {
	// Processing a blocks may never happen simultaneously
	sm.mutex.Lock()
	defer sm.mutex.Unlock()

	if !sm.bc.HasBlock(block.ParentHash()) {
		return nil, ParentError(block.ParentHash())
	}
	parent := sm.bc.GetBlock(block.ParentHash())

	// FIXME Change to full header validation. See #1225
	errch := make(chan bool)
	go func() { errch <- sm.Pow.Verify(block) }()

	logs, _, err = sm.processWithParent(block, parent)
	if !<-errch {
		return nil, ValidationError("Block's nonce is invalid (= %x)", block.Nonce)
	}

	return logs, err
}
Example #22
0
// WriteBlock writes the block to the chain.
func (self *BlockChain) WriteBlock(block *types.Block) (status writeStatus, err error) {
	self.wg.Add(1)
	defer self.wg.Done()

	// Calculate the total difficulty of the block
	ptd := self.GetTd(block.ParentHash())
	if ptd == nil {
		return NonStatTy, ParentError(block.ParentHash())
	}
	td := new(big.Int).Add(block.Difficulty(), ptd)

	// Make sure no inconsistent state is leaked during insertion
	self.mu.Lock()
	defer self.mu.Unlock()

	// If the total difficulty is higher than our known, add it to the canonical chain
	if td.Cmp(self.GetTd(self.currentBlock.Hash())) > 0 {
		// Reorganize the chain if the parent is not the head block
		if block.ParentHash() != self.currentBlock.Hash() {
			if err := self.reorg(self.currentBlock, block); err != nil {
				return NonStatTy, err
			}
		}
		// Insert the block as the new head of the chain
		self.insert(block)
		status = CanonStatTy
	} else {
		status = SideStatTy
	}
	// Irrelevant of the canonical status, write the block itself to the database
	if err := WriteTd(self.chainDb, block.Hash(), td); err != nil {
		glog.Fatalf("failed to write block total difficulty: %v", err)
	}
	if err := WriteBlock(self.chainDb, block); err != nil {
		glog.Fatalf("filed to write block contents: %v", err)
	}
	self.futureBlocks.Remove(block.Hash())

	return
}
Example #23
0
// ValidateBlock validates the given block's header and uncles and verifies the
// the block header's transaction and uncle roots.
//
// ValidateBlock does not validate the header's pow. The pow work validated
// separately so we can process them in parallel.
//
// ValidateBlock also validates and makes sure that any previous state (or present)
// state that might or might not be present is checked to make sure that fast
// sync has done it's job proper. This prevents the block validator form accepting
// false positives where a header is present but the state is not.
func (v *BlockValidator) ValidateBlock(block *types.Block) error {
	if v.bc.HasBlock(block.Hash()) {
		if _, err := state.New(block.Root(), v.bc.chainDb); err == nil {
			return &KnownBlockError{block.Number(), block.Hash()}
		}
	}
	parent := v.bc.GetBlock(block.ParentHash())
	if parent == nil {
		return ParentError(block.ParentHash())
	}
	if _, err := state.New(parent.Root(), v.bc.chainDb); err != nil {
		return ParentError(block.ParentHash())
	}

	header := block.Header()
	// validate the block header
	if err := ValidateHeader(v.config, v.Pow, header, parent.Header(), false, false); err != nil {
		return err
	}
	// verify the uncles are correctly rewarded
	if err := v.VerifyUncles(block, parent); err != nil {
		return err
	}

	// Verify UncleHash before running other uncle validations
	unclesSha := types.CalcUncleHash(block.Uncles())
	if unclesSha != header.UncleHash {
		return fmt.Errorf("invalid uncles root hash. received=%x calculated=%x", header.UncleHash, unclesSha)
	}

	// The transactions Trie's root (R = (Tr [[i, RLP(T1)], [i, RLP(T2)], ... [n, RLP(Tn)]]))
	// can be used by light clients to make sure they've received the correct Txs
	txSha := types.DeriveSha(block.Transactions())
	if txSha != header.TxHash {
		return fmt.Errorf("invalid transaction root hash. received=%x calculated=%x", header.TxHash, txSha)
	}

	return nil
}
Example #24
0
// reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
// to be part of the new canonical chain and accumulates potential missing transactions and post an
// event about them
func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
	var (
		newChain    types.Blocks
		commonBlock *types.Block
		oldStart    = oldBlock
		newStart    = newBlock
		deletedTxs  types.Transactions
	)

	// first reduce whoever is higher bound
	if oldBlock.NumberU64() > newBlock.NumberU64() {
		// reduce old chain
		for oldBlock = oldBlock; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = self.GetBlock(oldBlock.ParentHash()) {
			deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
		}
	} else {
		// reduce new chain and append new chain blocks for inserting later on
		for newBlock = newBlock; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = self.GetBlock(newBlock.ParentHash()) {
			newChain = append(newChain, newBlock)
		}
	}
	if oldBlock == nil {
		return fmt.Errorf("Invalid old chain")
	}
	if newBlock == nil {
		return fmt.Errorf("Invalid new chain")
	}

	numSplit := newBlock.Number()
	for {
		if oldBlock.Hash() == newBlock.Hash() {
			commonBlock = oldBlock
			break
		}
		newChain = append(newChain, newBlock)
		deletedTxs = append(deletedTxs, oldBlock.Transactions()...)

		oldBlock, newBlock = self.GetBlock(oldBlock.ParentHash()), self.GetBlock(newBlock.ParentHash())
		if oldBlock == nil {
			return fmt.Errorf("Invalid old chain")
		}
		if newBlock == nil {
			return fmt.Errorf("Invalid new chain")
		}
	}

	if glog.V(logger.Debug) {
		commonHash := commonBlock.Hash()
		glog.Infof("Chain split detected @ %x. Reorganising chain from #%v %x to %x", commonHash[:4], numSplit, oldStart.Hash().Bytes()[:4], newStart.Hash().Bytes()[:4])
	}

	var addedTxs types.Transactions
	// insert blocks. Order does not matter. Last block will be written in ImportChain itself which creates the new head properly
	for _, block := range newChain {
		// insert the block in the canonical way, re-writing history
		self.insert(block)
		// write canonical receipts and transactions
		if err := PutTransactions(self.chainDb, block, block.Transactions()); err != nil {
			return err
		}
		receipts := GetBlockReceipts(self.chainDb, block.Hash())
		// write receipts
		if err := PutReceipts(self.chainDb, receipts); err != nil {
			return err
		}
		// Write map map bloom filters
		if err := WriteMipmapBloom(self.chainDb, block.NumberU64(), receipts); err != nil {
			return err
		}

		addedTxs = append(addedTxs, block.Transactions()...)
	}

	// calculate the difference between deleted and added transactions
	diff := types.TxDifference(deletedTxs, addedTxs)
	// When transactions get deleted from the database that means the
	// receipts that were created in the fork must also be deleted
	for _, tx := range diff {
		DeleteReceipt(self.chainDb, tx.Hash())
		DeleteTransaction(self.chainDb, tx.Hash())
	}
	// Must be posted in a goroutine because of the transaction pool trying
	// to acquire the chain manager lock
	go self.eventMux.Post(RemovedTransactionEvent{diff})

	return nil
}
Example #25
0
// reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
// to be part of the new canonical chain and accumulates potential missing transactions and post an
// event about them
func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
	var (
		newChain          types.Blocks
		oldChain          types.Blocks
		commonBlock       *types.Block
		oldStart          = oldBlock
		newStart          = newBlock
		deletedTxs        types.Transactions
		deletedLogs       vm.Logs
		deletedLogsByHash = make(map[common.Hash]vm.Logs)
		// collectLogs collects the logs that were generated during the
		// processing of the block that corresponds with the given hash.
		// These logs are later announced as deleted.
		collectLogs = func(h common.Hash) {
			// Coalesce logs
			receipts := GetBlockReceipts(self.chainDb, h)
			for _, receipt := range receipts {
				deletedLogs = append(deletedLogs, receipt.Logs...)

				deletedLogsByHash[h] = receipt.Logs
			}
		}
	)

	// first reduce whoever is higher bound
	if oldBlock.NumberU64() > newBlock.NumberU64() {
		// reduce old chain
		for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = self.GetBlock(oldBlock.ParentHash()) {
			oldChain = append(oldChain, oldBlock)
			deletedTxs = append(deletedTxs, oldBlock.Transactions()...)

			collectLogs(oldBlock.Hash())
		}
	} else {
		// reduce new chain and append new chain blocks for inserting later on
		for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = self.GetBlock(newBlock.ParentHash()) {
			newChain = append(newChain, newBlock)
		}
	}
	if oldBlock == nil {
		return fmt.Errorf("Invalid old chain")
	}
	if newBlock == nil {
		return fmt.Errorf("Invalid new chain")
	}

	numSplit := newBlock.Number()
	for {
		if oldBlock.Hash() == newBlock.Hash() {
			commonBlock = oldBlock
			break
		}

		oldChain = append(oldChain, oldBlock)
		newChain = append(newChain, newBlock)
		deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
		collectLogs(oldBlock.Hash())

		oldBlock, newBlock = self.GetBlock(oldBlock.ParentHash()), self.GetBlock(newBlock.ParentHash())
		if oldBlock == nil {
			return fmt.Errorf("Invalid old chain")
		}
		if newBlock == nil {
			return fmt.Errorf("Invalid new chain")
		}
	}

	if glog.V(logger.Debug) {
		commonHash := commonBlock.Hash()
		glog.Infof("Chain split detected @ %x. Reorganising chain from #%v %x to %x", commonHash[:4], numSplit, oldStart.Hash().Bytes()[:4], newStart.Hash().Bytes()[:4])
	}

	var addedTxs types.Transactions
	// insert blocks. Order does not matter. Last block will be written in ImportChain itself which creates the new head properly
	for _, block := range newChain {
		// insert the block in the canonical way, re-writing history
		self.insert(block)
		// write canonical receipts and transactions
		if err := WriteTransactions(self.chainDb, block); err != nil {
			return err
		}
		receipts := GetBlockReceipts(self.chainDb, block.Hash())
		// write receipts
		if err := WriteReceipts(self.chainDb, receipts); err != nil {
			return err
		}
		// Write map map bloom filters
		if err := WriteMipmapBloom(self.chainDb, block.NumberU64(), receipts); err != nil {
			return err
		}
		addedTxs = append(addedTxs, block.Transactions()...)
	}

	// calculate the difference between deleted and added transactions
	diff := types.TxDifference(deletedTxs, addedTxs)
	// When transactions get deleted from the database that means the
	// receipts that were created in the fork must also be deleted
	for _, tx := range diff {
		DeleteReceipt(self.chainDb, tx.Hash())
		DeleteTransaction(self.chainDb, tx.Hash())
	}
	// Must be posted in a goroutine because of the transaction pool trying
	// to acquire the chain manager lock
	if len(diff) > 0 {
		go self.eventMux.Post(RemovedTransactionEvent{diff})
	}
	if len(deletedLogs) > 0 {
		go self.eventMux.Post(RemovedLogsEvent{deletedLogs})
	}

	if len(oldChain) > 0 {
		go func() {
			for _, block := range oldChain {
				self.eventMux.Post(ChainSideEvent{Block: block, Logs: deletedLogsByHash[block.Hash()]})
			}
		}()
	}

	return nil
}