Example #1
0
// returns the lowers possible price with which a tx was or could have been included
func (self *GasPriceOracle) lowestPrice(block *types.Block) *big.Int {
	gasUsed := big.NewInt(0)

	receipts := self.eth.BlockProcessor().GetBlockReceipts(block.Hash())
	if len(receipts) > 0 {
		if cgu := receipts[len(receipts)-1].CumulativeGasUsed; cgu != nil {
			gasUsed = receipts[len(receipts)-1].CumulativeGasUsed
		}
	}

	if new(big.Int).Mul(gasUsed, big.NewInt(100)).Cmp(new(big.Int).Mul(block.GasLimit(),
		big.NewInt(int64(self.eth.GpoFullBlockRatio)))) < 0 {
		// block is not full, could have posted a tx with MinGasPrice
		return big.NewInt(0)
	}

	txs := block.Transactions()
	if len(txs) == 0 {
		return big.NewInt(0)
	}
	// block is full, find smallest gasPrice
	minPrice := txs[0].GasPrice()
	for i := 1; i < len(txs); i++ {
		price := txs[i].GasPrice()
		if price.Cmp(minPrice) < 0 {
			minPrice = price
		}
	}
	return minPrice
}
Example #2
0
func (self *BlockProcessor) ApplyTransactions(gp GasPool, statedb *state.StateDB, block *types.Block, txs types.Transactions, transientProcess bool) (types.Receipts, error) {
	var (
		receipts      types.Receipts
		totalUsedGas  = big.NewInt(0)
		err           error
		cumulativeSum = new(big.Int)
		header        = block.Header()
	)

	for i, tx := range txs {
		statedb.StartRecord(tx.Hash(), block.Hash(), i)

		receipt, txGas, err := self.ApplyTransaction(gp, statedb, header, tx, totalUsedGas, transientProcess)
		if err != nil {
			return nil, err
		}

		if err != nil {
			glog.V(logger.Core).Infoln("TX err:", err)
		}
		receipts = append(receipts, receipt)

		cumulativeSum.Add(cumulativeSum, new(big.Int).Mul(txGas, tx.GasPrice()))
	}

	if block.GasUsed().Cmp(totalUsedGas) != 0 {
		return nil, ValidationError(fmt.Sprintf("gas used error (%v / %v)", block.GasUsed(), totalUsedGas))
	}

	if transientProcess {
		go self.eventMux.Post(PendingBlockEvent{block, statedb.Logs()})
	}

	return receipts, err
}
Example #3
0
// enqueue schedules a new future import operation, if the block to be imported
// has not yet been seen.
func (f *Fetcher) enqueue(peer string, block *types.Block) {
	hash := block.Hash()

	// Ensure the peer isn't DOSing us
	count := f.queues[peer] + 1
	if count > blockLimit {
		glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x], exceeded allowance (%d)", peer, block.NumberU64(), hash.Bytes()[:4], blockLimit)
		return
	}
	// Discard any past or too distant blocks
	if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
		glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x], distance %d", peer, block.NumberU64(), hash.Bytes()[:4], dist)
		discardMeter.Mark(1)
		return
	}
	// Schedule the block for future importing
	if _, ok := f.queued[hash]; !ok {
		op := &inject{
			origin: peer,
			block:  block,
		}
		f.queues[peer] = count
		f.queued[hash] = op
		f.queue.Push(op, -float32(block.NumberU64()))

		if glog.V(logger.Debug) {
			glog.Infof("Peer %s: queued block #%d [%x], total %v", peer, block.NumberU64(), hash.Bytes()[:4], f.queue.Size())
		}
	}
}
Example #4
0
// BroadcastBlock will either propagate a block to a subset of it's peers, or
// will only announce it's availability (depending what's requested).
func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
	hash := block.Hash()
	peers := pm.peers.PeersWithoutBlock(hash)

	// If propagation is requested, send to a subset of the peer
	if propagate {
		// Calculate the TD of the block (it's not imported yet, so block.Td is not valid)
		var td *big.Int
		if parent := pm.chainman.GetBlock(block.ParentHash()); parent != nil {
			td = new(big.Int).Add(parent.Td, block.Difficulty())
		} else {
			glog.V(logger.Error).Infof("propagating dangling block #%d [%x]", block.NumberU64(), hash[:4])
			return
		}
		// Send the block to a subset of our peers
		transfer := peers[:int(math.Sqrt(float64(len(peers))))]
		for _, peer := range transfer {
			peer.SendNewBlock(block, td)
		}
		glog.V(logger.Detail).Infof("propagated block %x to %d peers in %v", hash[:4], len(transfer), time.Since(block.ReceivedAt))
	}
	// Otherwise if the block is indeed in out own chain, announce it
	if pm.chainman.HasBlock(hash) {
		for _, peer := range peers {
			peer.SendNewBlockHashes([]common.Hash{hash})
		}
		glog.V(logger.Detail).Infof("announced block %x to %d peers in %v", hash[:4], len(peers), time.Since(block.ReceivedAt))
	}
}
Example #5
0
// SendNewBlock propagates an entire block to a remote peer.
func (p *peer) SendNewBlock(block *types.Block, td *big.Int) error {
	propBlockOutPacketsMeter.Mark(1)
	propBlockOutTrafficMeter.Mark(block.Size().Int64())

	p.knownBlocks.Add(block.Hash())
	return p2p.Send(p.rw, NewBlockMsg, []interface{}{block, td})
}
Example #6
0
// makeCurrent creates a new environment for the current cycle.
func (self *worker) makeCurrent(parent *types.Block, header *types.Header) {
	state := state.New(parent.Root(), self.eth.ChainDb())
	work := &Work{
		state:     state,
		ancestors: set.New(),
		family:    set.New(),
		uncles:    set.New(),
		header:    header,
		coinbase:  state.GetOrNewStateObject(self.coinbase),
		createdAt: time.Now(),
	}

	// when 08 is processed ancestors contain 07 (quick block)
	for _, ancestor := range self.chain.GetBlocksFromHash(parent.Hash(), 7) {
		for _, uncle := range ancestor.Uncles() {
			work.family.Add(uncle.Hash())
		}
		work.family.Add(ancestor.Hash())
		work.ancestors.Add(ancestor.Hash())
	}
	accounts, _ := self.eth.AccountManager().Accounts()

	// Keep track of transactions which return errors so they can be removed
	work.remove = set.New()
	work.tcount = 0
	work.ignoredTransactors = set.New()
	work.lowGasTransactors = set.New()
	work.ownedAccounts = accountAddressesSet(accounts)
	if self.current != nil {
		work.localMinedBlocks = self.current.localMinedBlocks
	}
	self.current = work
}
Example #7
0
func blockRecovery(ctx *cli.Context) {
	utils.CheckLegalese(ctx.GlobalString(utils.DataDirFlag.Name))

	arg := ctx.Args().First()
	if len(ctx.Args()) < 1 && len(arg) > 0 {
		glog.Fatal("recover requires block number or hash")
	}

	cfg := utils.MakeEthConfig(ClientIdentifier, nodeNameVersion, ctx)
	utils.CheckLegalese(cfg.DataDir)

	blockDb, err := ethdb.NewLDBDatabase(filepath.Join(cfg.DataDir, "blockchain"), cfg.DatabaseCache)
	if err != nil {
		glog.Fatalln("could not open db:", err)
	}

	var block *types.Block
	if arg[0] == '#' {
		block = core.GetBlockByNumber(blockDb, common.String2Big(arg[1:]).Uint64())
	} else {
		block = core.GetBlockByHash(blockDb, common.HexToHash(arg))
	}

	if block == nil {
		glog.Fatalln("block not found. Recovery failed")
	}

	err = core.WriteHead(blockDb, block)
	if err != nil {
		glog.Fatalln("block write err", err)
	}
	glog.Infof("Recovery succesful. New HEAD %x\n", block.Hash())
}
Example #8
0
// Creates a new QML Block from a chain block
func NewBlock(block *types.Block) *Block {
	if block == nil {
		return &Block{}
	}

	ptxs := make([]*Transaction, len(block.Transactions()))
	/*
		for i, tx := range block.Transactions() {
			ptxs[i] = NewTx(tx)
		}
	*/
	txlist := common.NewList(ptxs)

	puncles := make([]*Block, len(block.Uncles()))
	/*
		for i, uncle := range block.Uncles() {
			puncles[i] = NewBlock(types.NewBlockWithHeader(uncle))
		}
	*/
	ulist := common.NewList(puncles)

	return &Block{
		ref: block, Size: block.Size().String(),
		Number: int(block.NumberU64()), GasUsed: block.GasUsed().String(),
		GasLimit: block.GasLimit().String(), Hash: block.Hash().Hex(),
		Transactions: txlist, Uncles: ulist,
		Time:     block.Time(),
		Coinbase: block.Coinbase().Hex(),
		PrevHash: block.ParentHash().Hex(),
		Bloom:    common.ToHex(block.Bloom().Bytes()),
		Raw:      block.String(),
	}
}
Example #9
0
func (bc *ChainManager) removeBlock(block *types.Block) {
	if bc.sqlDB != nil {
		bc.sqlDB.DeleteBlock(block)
	}

	bc.chainDb.Delete(append(blockHashPre, block.Hash().Bytes()...))
}
Example #10
0
// WriteCanonNumber writes the canonical hash for the given block
func WriteCanonNumber(db common.Database, block *types.Block) error {
	key := append(blockNumPre, block.Number().Bytes()...)
	err := db.Put(key, block.Hash().Bytes())
	if err != nil {
		return err
	}
	return nil
}
Example #11
0
// GetLogs returns the logs of the given block. This method is using a two step approach
// where it tries to get it from the (updated) method which gets them from the receipts or
// the depricated way by re-processing the block.
func (sm *BlockProcessor) GetLogs(block *types.Block) (logs state.Logs, err error) {
	receipts := GetBlockReceipts(sm.chainDb, block.Hash())
	// coalesce logs
	for _, receipt := range receipts {
		logs = append(logs, receipt.Logs()...)
	}
	return logs, nil
}
Example #12
0
// WriteHead force writes the current head
func WriteHead(db common.Database, block *types.Block) error {
	err := WriteCanonNumber(db, block)
	if err != nil {
		return err
	}
	err = db.Put([]byte("LastBlock"), block.Hash().Bytes())
	if err != nil {
		return err
	}
	return nil
}
Example #13
0
func (self *SQLDB) InsertBlock(block *types.Block) {
	tx, err := self.db.Begin()
	if err != nil {
		glog.V(logger.Error).Infoln("SQL DB Begin:", err)
		return
	}

	stmtBlock, err := tx.Prepare(`insert or replace into shift_blocks(number, hash) values(?, ?)`)
	if err != nil {
		glog.V(logger.Error).Infoln("SQL DB:", err)
		return
	}
	defer stmtBlock.Close()

	stmtTrans, err := tx.Prepare(`insert or replace into shift_transactions(hash, blocknumber, sender, receiver) values(?, ?, ?, ?)`)
	if err != nil {
		glog.V(logger.Error).Infoln("SQL DB:", err)
		return
	}
	defer stmtTrans.Close()

	// block
	_, err = stmtBlock.Exec(block.Number().Uint64(), block.Hash().Hex())
	if err != nil {
		glog.V(logger.Error).Infoln("SQL DB:", err)
		tx.Rollback()
		return
	}
	// transactions

	for _, trans := range block.Transactions() {
		sender, err := trans.From()
		if err != nil {
			glog.V(logger.Error).Infoln("SQL DB:", err)
			continue
		}
		senderHex := sender.Hex()
		receiver := trans.To()
		receiverHex := ""
		if receiver != nil {
			receiverHex = receiver.Hex()
		}

		_, err = stmtTrans.Exec(trans.Hash().Hex(), block.Number().Uint64(), senderHex, receiverHex)
		if err != nil {
			glog.V(logger.Error).Infoln("SQL DB:", err)
			tx.Rollback()
			return
		}
	}

	tx.Commit()
}
Example #14
0
// makeChain creates a chain of n blocks starting at but not including
// parent. the returned hash chain is ordered head->parent.
func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common.Hash]*types.Block) {
	blocks := core.GenerateChain(parent, testdb, n, func(i int, gen *core.BlockGen) {
		gen.SetCoinbase(common.Address{seed})
	})
	hashes := make([]common.Hash, n+1)
	hashes[len(hashes)-1] = parent.Hash()
	blockm := make(map[common.Hash]*types.Block, n+1)
	blockm[parent.Hash()] = parent
	for i, b := range blocks {
		hashes[len(hashes)-i-2] = b.Hash()
		blockm[b.Hash()] = b
	}
	return hashes, blockm
}
Example #15
0
// Process block will attempt to process the given block's transactions and applies them
// on top of the block's parent state (given it exists) and will return wether it was
// successful or not.
func (sm *BlockProcessor) Process(block *types.Block) (logs state.Logs, receipts types.Receipts, err error) {
	// Processing a blocks may never happen simultaneously
	sm.mutex.Lock()
	defer sm.mutex.Unlock()

	if sm.bc.HasBlock(block.Hash()) {
		return nil, nil, &KnownBlockError{block.Number(), block.Hash()}
	}

	if !sm.bc.HasBlock(block.ParentHash()) {
		return nil, nil, ParentError(block.ParentHash())
	}
	parent := sm.bc.GetBlock(block.ParentHash())
	return sm.processWithParent(block, parent)
}
Example #16
0
func (bc *ChainManager) SetHead(head *types.Block) {
	bc.mu.Lock()
	defer bc.mu.Unlock()

	for block := bc.currentBlock; block != nil && block.Hash() != head.Hash(); block = bc.GetBlock(block.ParentHash()) {
		bc.removeBlock(block)
	}

	bc.cache, _ = lru.New(blockCacheLimit)
	bc.currentBlock = head
	bc.makeCache()

	bc.setTotalDifficulty(head.Td)
	bc.insert(head)
	bc.setLastState()
}
Example #17
0
// WriteBlock writes a block to the database
func WriteBlock(db common.Database, block *types.Block) error {
	tstart := time.Now()

	enc, _ := rlp.EncodeToBytes((*types.StorageBlock)(block))
	key := append(blockHashPre, block.Hash().Bytes()...)
	err := db.Put(key, enc)
	if err != nil {
		glog.Fatal("db write fail:", err)
		return err
	}

	if glog.V(logger.Debug) {
		glog.Infof("wrote block #%v %s. Took %v\n", block.Number(), common.PP(block.Hash().Bytes()), time.Since(tstart))
	}

	return nil
}
Example #18
0
// insert spawns a new goroutine to run a block insertion into the chain. If the
// block's number is at the same height as the current import phase, if updates
// the phase states accordingly.
func (f *Fetcher) insert(peer string, block *types.Block) {
	hash := block.Hash()

	// Run the import on a new thread
	glog.V(logger.Debug).Infof("Peer %s: importing block #%d [%x]", peer, block.NumberU64(), hash[:4])
	go func() {
		defer func() { f.done <- hash }()

		// If the parent's unknown, abort insertion
		parent := f.getBlock(block.ParentHash())
		if parent == nil {
			return
		}
		// Quickly validate the header and propagate the block if it passes
		switch err := f.validateBlock(block, parent); err {
		case nil:
			// All ok, quickly propagate to our peers
			broadcastTimer.UpdateSince(block.ReceivedAt)
			go f.broadcastBlock(block, true)

		case core.BlockFutureErr:
			futureMeter.Mark(1)
			// Weird future block, don't fail, but neither propagate

		default:
			// Something went very wrong, drop the peer
			glog.V(logger.Debug).Infof("Peer %s: block #%d [%x] verification failed: %v", peer, block.NumberU64(), hash[:4], err)
			f.dropPeer(peer)
			return
		}
		// Run the actual import and log any issues
		if _, err := f.insertChain(types.Blocks{block}); err != nil {
			glog.V(logger.Warn).Infof("Peer %s: block #%d [%x] import failed: %v", peer, block.NumberU64(), hash[:4], err)
			return
		}
		// If import succeeded, broadcast the block
		announceTimer.UpdateSince(block.ReceivedAt)
		go f.broadcastBlock(block, false)

		// Invoke the testing hook if needed
		if f.importedHook != nil {
			f.importedHook(block)
		}
	}()
}
Example #19
0
func makeHeader(parent *types.Block, state *state.StateDB) *types.Header {
	var time *big.Int
	if parent.Time() == nil {
		time = big.NewInt(10)
	} else {
		time = new(big.Int).Add(parent.Time(), big.NewInt(25)) // block time is fixed at 25 seconds
	}
	return &types.Header{
		Root:       state.Root(),
		ParentHash: parent.Hash(),
		Coinbase:   parent.Coinbase(),
		Difficulty: CalcDifficulty(time.Uint64(), new(big.Int).Sub(time, big.NewInt(10)).Uint64(), parent.Number(), parent.Difficulty()),
		GasLimit:   CalcGasLimit(parent),
		GasUsed:    new(big.Int),
		Number:     new(big.Int).Add(parent.Number(), common.Big1),
		Time:       time,
	}
}
Example #20
0
func makeChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Block {
	var chain []*types.Block
	for i, difficulty := range d {
		header := &types.Header{
			Coinbase:   common.Address{seed},
			Number:     big.NewInt(int64(i + 1)),
			Difficulty: big.NewInt(int64(difficulty)),
		}
		if i == 0 {
			header.ParentHash = genesis.Hash()
		} else {
			header.ParentHash = chain[i-1].Hash()
		}
		block := types.NewBlockWithHeader(header)
		chain = append(chain, block)
	}
	return chain
}
Example #21
0
// PutBlockReceipts stores the block's transactions associated receipts
// and stores them by block hash in a single slice. This is required for
// forks and chain reorgs
func PutBlockReceipts(db common.Database, block *types.Block, receipts types.Receipts) error {
	rs := make([]*types.ReceiptForStorage, len(receipts))
	for i, receipt := range receipts {
		rs[i] = (*types.ReceiptForStorage)(receipt)
	}
	bytes, err := rlp.EncodeToBytes(rs)
	if err != nil {
		return err
	}

	hash := block.Hash()
	err = db.Put(append(blockReceiptsPre, hash[:]...), bytes)
	if err != nil {
		return err
	}

	return nil
}
Example #22
0
// PutTransactions stores the transactions in the given database
func PutTransactions(db common.Database, block *types.Block, txs types.Transactions) {
	batch := new(leveldb.Batch)
	_, batchWrite := db.(*ethdb.LDBDatabase)

	for i, tx := range block.Transactions() {
		rlpEnc, err := rlp.EncodeToBytes(tx)
		if err != nil {
			glog.V(logger.Debug).Infoln("Failed encoding tx", err)
			return
		}

		if batchWrite {
			batch.Put(tx.Hash().Bytes(), rlpEnc)
		} else {
			db.Put(tx.Hash().Bytes(), rlpEnc)
		}

		var txExtra struct {
			BlockHash  common.Hash
			BlockIndex uint64
			Index      uint64
		}
		txExtra.BlockHash = block.Hash()
		txExtra.BlockIndex = block.NumberU64()
		txExtra.Index = uint64(i)
		rlpMeta, err := rlp.EncodeToBytes(txExtra)
		if err != nil {
			glog.V(logger.Debug).Infoln("Failed encoding tx meta data", err)
			return
		}

		if batchWrite {
			batch.Put(append(tx.Hash().Bytes(), 0x0001), rlpMeta)
		} else {
			db.Put(append(tx.Hash().Bytes(), 0x0001), rlpMeta)
		}
	}

	if db, ok := db.(*ethdb.LDBDatabase); ok {
		if err := db.LDB().Write(batch, nil); err != nil {
			glog.V(logger.Error).Infoln("db write err:", err)
		}
	}
}
Example #23
0
// insert injects a block into the current chain block chain. Note, this function
// assumes that the `mu` mutex is held!
func (bc *ChainManager) insert(block *types.Block) {
	err := WriteHead(bc.chainDb, block)
	if err != nil {
		glog.Fatal("db write fail:", err)
	}

	bc.checkpoint++
	if bc.checkpoint > checkpointLimit {
		err = bc.chainDb.Put([]byte("checkpoint"), block.Hash().Bytes())
		if err != nil {
			glog.Fatal("db write fail:", err)
		}

		bc.checkpoint = 0
	}

	bc.currentBlock = block
	bc.lastBlockHash = block.Hash()
}
Example #24
0
func NewBlockRes(block *types.Block, fullTx bool) *BlockRes {
	if block == nil {
		return nil
	}

	res := new(BlockRes)
	res.fullTx = fullTx
	res.BlockNumber = newHexNum(block.Number())
	res.BlockHash = newHexData(block.Hash())
	res.ParentHash = newHexData(block.ParentHash())
	res.Nonce = newHexData(block.Nonce())
	res.Sha3Uncles = newHexData(block.UncleHash())
	res.LogsBloom = newHexData(block.Bloom())
	res.TransactionRoot = newHexData(block.TxHash())
	res.StateRoot = newHexData(block.Root())
	res.Miner = newHexData(block.Coinbase())
	res.Difficulty = newHexNum(block.Difficulty())
	res.TotalDifficulty = newHexNum(block.Td)
	res.Size = newHexNum(block.Size().Int64())
	res.ExtraData = newHexData(block.Extra())
	res.GasLimit = newHexNum(block.GasLimit())
	res.GasUsed = newHexNum(block.GasUsed())
	res.UnixTimestamp = newHexNum(block.Time())

	txs := block.Transactions()
	res.Transactions = make([]*TransactionRes, len(txs))
	for i, tx := range txs {
		res.Transactions[i] = NewTransactionRes(tx)
		res.Transactions[i].BlockHash = res.BlockHash
		res.Transactions[i].BlockNumber = res.BlockNumber
		res.Transactions[i].TxIndex = newHexNum(i)
	}

	uncles := block.Uncles()
	res.Uncles = make([]*UncleRes, len(uncles))
	for i, uncle := range uncles {
		res.Uncles[i] = NewUncleRes(uncle)
	}

	return res
}
Example #25
0
func (bc *BlockCache) Push(block *types.Block) {
	bc.mu.Lock()
	defer bc.mu.Unlock()

	if len(bc.hashes) == bc.size {
		delete(bc.blocks, bc.hashes[0])

		// XXX There are a few other options on solving this
		// 1) use a poller / GC like mechanism to clean up untracked objects
		// 2) copy as below
		// re-use the slice and remove the reference to bc.hashes[0]
		// this will allow the element to be garbage collected.
		copy(bc.hashes, bc.hashes[1:])
	} else {
		bc.hashes = append(bc.hashes, common.Hash{})
	}

	hash := block.Hash()
	bc.blocks[hash] = block
	bc.hashes[len(bc.hashes)-1] = hash
}
Example #26
0
// WriteBlock writes the block to the chain (or pending queue)
func (self *ChainManager) WriteBlock(block *types.Block, queued bool) (status writeStatus, err error) {
	self.wg.Add(1)
	defer self.wg.Done()

	cblock := self.currentBlock
	// Compare the TD of the last known block in the canonical chain to make sure it's greater.
	// At this point it's possible that a different chain (fork) becomes the new canonical chain.
	if block.Td.Cmp(self.Td()) > 0 {
		// chain fork
		if block.ParentHash() != cblock.Hash() {
			// during split we merge two different chains and create the new canonical chain
			err := self.merge(cblock, block)
			if err != nil {
				return NonStatTy, err
			}

			status = SplitStatTy
		}

		self.mu.Lock()
		self.setTotalDifficulty(block.Td)
		self.insert(block)
		self.mu.Unlock()

		status = CanonStatTy
	} else {
		status = SideStatTy
	}

	err = WriteBlock(self.chainDb, block)
	if err != nil {
		glog.Fatalln("db err:", err)
	}
	// Delete from future blocks
	self.futureBlocks.Remove(block.Hash())

	return
}
Example #27
0
func (sm *BlockProcessor) VerifyUncles(statedb *state.StateDB, block, parent *types.Block) error {
	uncles := set.New()
	ancestors := make(map[common.Hash]*types.Block)
	for _, ancestor := range sm.bc.GetBlocksFromHash(block.ParentHash(), 7) {
		ancestors[ancestor.Hash()] = ancestor
		// Include ancestors uncles in the uncle set. Uncles must be unique.
		for _, uncle := range ancestor.Uncles() {
			uncles.Add(uncle.Hash())
		}
	}
	ancestors[block.Hash()] = block
	uncles.Add(block.Hash())

	for i, uncle := range block.Uncles() {
		hash := uncle.Hash()
		if uncles.Has(hash) {
			// Error not unique
			return UncleError("uncle[%d](%x) not unique", i, hash[:4])
		}
		uncles.Add(hash)

		if ancestors[hash] != nil {
			branch := fmt.Sprintf("  O - %x\n  |\n", block.Hash())
			for h := range ancestors {
				branch += fmt.Sprintf("  O - %x\n  |\n", h)
			}
			glog.Infoln(branch)
			return UncleError("uncle[%d](%x) is ancestor", i, hash[:4])
		}

		if ancestors[uncle.ParentHash] == nil || uncle.ParentHash == parent.Hash() {
			return UncleError("uncle[%d](%x)'s parent is not ancestor (%x)", i, hash[:4], uncle.ParentHash[0:4])
		}

		if err := ValidateHeader(sm.Pow, uncle, ancestors[uncle.ParentHash], true, true); err != nil {
			return ValidationError(fmt.Sprintf("uncle[%d](%x) header invalid: %v", i, hash[:4], err))
		}
	}

	return nil
}
Example #28
0
// diff takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
// to be part of the new canonical chain.
func (self *ChainManager) diff(oldBlock, newBlock *types.Block) (types.Blocks, error) {
	var (
		newChain    types.Blocks
		commonBlock *types.Block
		oldStart    = oldBlock
		newStart    = newBlock
	)

	// first reduce whoever is higher bound
	if oldBlock.NumberU64() > newBlock.NumberU64() {
		// reduce old chain
		for oldBlock = oldBlock; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = self.GetBlock(oldBlock.ParentHash()) {
		}
	} else {
		// reduce new chain and append new chain blocks for inserting later on
		for newBlock = newBlock; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = self.GetBlock(newBlock.ParentHash()) {
			newChain = append(newChain, newBlock)
		}
	}
	if oldBlock == nil {
		return nil, fmt.Errorf("Invalid old chain")
	}
	if newBlock == nil {
		return nil, fmt.Errorf("Invalid new chain")
	}

	numSplit := newBlock.Number()
	for {
		if oldBlock.Hash() == newBlock.Hash() {
			commonBlock = oldBlock
			break
		}
		newChain = append(newChain, newBlock)

		oldBlock, newBlock = self.GetBlock(oldBlock.ParentHash()), self.GetBlock(newBlock.ParentHash())
		if oldBlock == nil {
			return nil, fmt.Errorf("Invalid old chain")
		}
		if newBlock == nil {
			return nil, fmt.Errorf("Invalid new chain")
		}
	}

	if glog.V(logger.Debug) {
		commonHash := commonBlock.Hash()
		glog.Infof("Chain split detected @ %x. Reorganising chain from #%v %x to %x", commonHash[:4], numSplit, oldStart.Hash().Bytes()[:4], newStart.Hash().Bytes()[:4])
	}

	return newChain, nil
}
Example #29
0
// reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
// to be part of the new canonical chain and accumulates potential missing transactions and post an
// event about them
func (self *ChainManager) reorg(oldBlock, newBlock *types.Block) error {
	self.mu.Lock()
	defer self.mu.Unlock()

	var (
		newChain    types.Blocks
		commonBlock *types.Block
		oldStart    = oldBlock
		newStart    = newBlock
		deletedTxs  types.Transactions
		addedTxs    types.Transactions
	)

	// first reduce whoever is higher bound
	if oldBlock.NumberU64() > newBlock.NumberU64() {
		// reduce old chain
		for oldBlock = oldBlock; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = self.GetBlock(oldBlock.ParentHash()) {
			deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
		}
	} else {
		// reduce new chain and append new chain blocks for inserting later on
		for newBlock = newBlock; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = self.GetBlock(newBlock.ParentHash()) {
			newChain = append(newChain, newBlock)
		}
	}
	if oldBlock == nil {
		return fmt.Errorf("Invalid old chain")
	}
	if newBlock == nil {
		return fmt.Errorf("Invalid new chain")
	}

	numSplit := newBlock.Number()
	for {
		if oldBlock.Hash() == newBlock.Hash() {
			commonBlock = oldBlock
			break
		}
		newChain = append(newChain, newBlock)

		oldBlock, newBlock = self.GetBlock(oldBlock.ParentHash()), self.GetBlock(newBlock.ParentHash())
		if oldBlock == nil {
			return fmt.Errorf("Invalid old chain")
		}
		if newBlock == nil {
			return fmt.Errorf("Invalid new chain")
		}
		deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
	}

	if glog.V(logger.Debug) {
		commonHash := commonBlock.Hash()
		glog.Infof("Chain split detected @ %x. Reorganising chain from #%v %x to %x", commonHash[:4], numSplit, oldStart.Hash().Bytes()[:4], newStart.Hash().Bytes()[:4])
	}

	// insert blocks. Order does not matter. Last block will be written in ImportChain itself which creates the new head properly
	for _, block := range newChain {
		// insert the block in the canonical way, re-writing history
		self.insert(block)
		// write canonical receipts and transactions
		PutTransactions(self.chainDb, block, block.Transactions())
		PutReceipts(self.chainDb, GetBlockReceipts(self.chainDb, block.Hash()))

		addedTxs = append(addedTxs, block.Transactions()...)
	}

	var diff types.Transactions
	diff.Difference(deletedTxs, addedTxs)
	self.eventMux.Post(RemovedTransactionEvent{diff})

	return nil
}