Ejemplo n.º 1
0
// insert injects a block into the current chain block chain. Note, this function
// assumes that the `mu` mutex is held!
func (bc *ChainManager) insert(block *types.Block) {
	key := append(blockNumPre, block.Number().Bytes()...)
	err := bc.blockDb.Put(key, block.Hash().Bytes())
	if err != nil {
		glog.Fatal("db write fail:", err)
	}
	err = bc.blockDb.Put([]byte("LastBlock"), block.Hash().Bytes())
	if err != nil {
		glog.Fatal("db write fail:", err)
	}

	bc.currentBlock = block
	bc.lastBlockHash = block.Hash()
}
Ejemplo n.º 2
0
func blockRecovery(ctx *cli.Context) {
	utils.CheckLegalese(ctx.GlobalString(utils.DataDirFlag.Name))

	arg := ctx.Args().First()
	if len(ctx.Args()) < 1 && len(arg) > 0 {
		glog.Fatal("recover requires block number or hash")
	}

	cfg := utils.MakeEthConfig(ClientIdentifier, nodeNameVersion, ctx)
	utils.CheckLegalese(cfg.DataDir)

	blockDb, err := ethdb.NewLDBDatabase(filepath.Join(cfg.DataDir, "blockchain"), cfg.DatabaseCache)
	if err != nil {
		glog.Fatalln("could not open db:", err)
	}

	var block *types.Block
	if arg[0] == '#' {
		block = core.GetBlockByNumber(blockDb, common.String2Big(arg[1:]).Uint64())
	} else {
		block = core.GetBlockByHash(blockDb, common.HexToHash(arg))
	}

	if block == nil {
		glog.Fatalln("block not found. Recovery failed")
	}

	err = core.WriteHead(blockDb, block)
	if err != nil {
		glog.Fatalln("block write err", err)
	}
	glog.Infof("Recovery succesful. New HEAD %x\n", block.Hash())
}
Ejemplo n.º 3
0
// insert injects a block into the current chain block chain. Note, this function
// assumes that the `mu` mutex is held!
func (bc *ChainManager) insert(block *types.Block) {
	err := WriteHead(bc.chainDb, block)
	if err != nil {
		glog.Fatal("db write fail:", err)
	}

	bc.checkpoint++
	if bc.checkpoint > checkpointLimit {
		err = bc.chainDb.Put([]byte("checkpoint"), block.Hash().Bytes())
		if err != nil {
			glog.Fatal("db write fail:", err)
		}

		bc.checkpoint = 0
	}

	bc.currentBlock = block
	bc.lastBlockHash = block.Hash()
}
Ejemplo n.º 4
0
// Flush flushes the trie to the backing layer. If this is a leveldb instance
// we'll use a batched write, otherwise we'll use regular put.
func (self *Cache) Flush() {
	if db, ok := self.backend.(*ethdb.LDBDatabase); ok {
		if err := db.LDB().Write(self.batch, nil); err != nil {
			glog.Fatal("db write err:", err)
		}
	} else {
		for k, v := range self.store {
			self.backend.Put([]byte(k), v)
		}
	}
}
Ejemplo n.º 5
0
func (self *Cache) Flush() {
	for k, v := range self.store {
		if err := self.backend.Put([]byte(k), v); err != nil {
			glog.Fatal("db write err:", err)
		}
	}

	// This will eventually grow too large. We'd could
	// do a make limit on storage and push out not-so-popular nodes.
	//self.Reset()
}
Ejemplo n.º 6
0
func (bc *ChainManager) write(block *types.Block) {
	enc, _ := rlp.EncodeToBytes((*types.StorageBlock)(block))
	key := append(blockHashPre, block.Hash().Bytes()...)
	err := bc.blockDb.Put(key, enc)
	if err != nil {
		glog.Fatal("db write fail:", err)
	}

	// Push block to cache
	bc.cache.Push(block)
}
Ejemplo n.º 7
0
func NewChainManager(chainDb ethdb.Database, pow pow.PoW, mux *event.TypeMux) (*ChainManager, error) {
	headerCache, _ := lru.New(headerCacheLimit)
	bodyCache, _ := lru.New(bodyCacheLimit)
	bodyRLPCache, _ := lru.New(bodyCacheLimit)
	tdCache, _ := lru.New(tdCacheLimit)
	blockCache, _ := lru.New(blockCacheLimit)
	futureBlocks, _ := lru.New(maxFutureBlocks)

	bc := &ChainManager{
		chainDb:      chainDb,
		eventMux:     mux,
		quit:         make(chan struct{}),
		headerCache:  headerCache,
		bodyCache:    bodyCache,
		bodyRLPCache: bodyRLPCache,
		tdCache:      tdCache,
		blockCache:   blockCache,
		futureBlocks: futureBlocks,
		pow:          pow,
	}

	bc.genesisBlock = bc.GetBlockByNumber(0)
	if bc.genesisBlock == nil {
		reader, err := NewDefaultGenesisReader()
		if err != nil {
			return nil, err
		}
		bc.genesisBlock, err = WriteGenesisBlock(chainDb, reader)
		if err != nil {
			return nil, err
		}
		glog.V(logger.Info).Infoln("WARNING: Wrote default ethereum genesis block")
	}
	if err := bc.setLastState(); err != nil {
		return nil, err
	}
	// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
	for hash, _ := range BadHashes {
		if block := bc.GetBlock(hash); block != nil {
			glog.V(logger.Error).Infof("Found bad hash. Reorganising chain to state %x\n", block.ParentHash().Bytes()[:4])
			block = bc.GetBlock(block.ParentHash())
			if block == nil {
				glog.Fatal("Unable to complete. Parent block not found. Corrupted DB?")
			}
			bc.SetHead(block)

			glog.V(logger.Error).Infoln("Chain reorg was successfull. Resuming normal operation")
		}
	}
	// Take ownership of this particular state
	go bc.update()
	return bc, nil
}
Ejemplo n.º 8
0
func (bc *ChainManager) write(block *types.Block) {
	tstart := time.Now()

	enc, _ := rlp.EncodeToBytes((*types.StorageBlock)(block))
	key := append(blockHashPre, block.Hash().Bytes()...)
	err := bc.blockDb.Put(key, enc)
	if err != nil {
		glog.Fatal("db write fail:", err)
	}

	if glog.V(logger.Debug) {
		glog.Infof("wrote block #%v %s. Took %v\n", block.Number(), common.PP(block.Hash().Bytes()), time.Since(tstart))
	}
}
Ejemplo n.º 9
0
// WriteBlock writes a block to the database
func WriteBlock(db common.Database, block *types.Block) error {
	tstart := time.Now()

	enc, _ := rlp.EncodeToBytes((*types.StorageBlock)(block))
	key := append(blockHashPre, block.Hash().Bytes()...)
	err := db.Put(key, enc)
	if err != nil {
		glog.Fatal("db write fail:", err)
		return err
	}

	if glog.V(logger.Debug) {
		glog.Infof("wrote block #%v %s. Took %v\n", block.Number(), common.PP(block.Hash().Bytes()), time.Since(tstart))
	}

	return nil
}
Ejemplo n.º 10
0
func NewChainManager(blockDb, stateDb, extraDb common.Database, pow pow.PoW, mux *event.TypeMux) (*ChainManager, error) {
	cache, _ := lru.New(blockCacheLimit)
	bc := &ChainManager{
		blockDb:  blockDb,
		stateDb:  stateDb,
		extraDb:  extraDb,
		eventMux: mux,
		quit:     make(chan struct{}),
		cache:    cache,
		pow:      pow,
	}

	bc.genesisBlock = bc.GetBlockByNumber(0)
	if bc.genesisBlock == nil {
		return nil, ErrNoGenesis
	}

	if err := bc.setLastState(); err != nil {
		return nil, err
	}

	// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
	for hash, _ := range BadHashes {
		if block := bc.GetBlock(hash); block != nil {
			glog.V(logger.Error).Infof("Found bad hash. Reorganising chain to state %x\n", block.ParentHash().Bytes()[:4])
			block = bc.GetBlock(block.ParentHash())
			if block == nil {
				glog.Fatal("Unable to complete. Parent block not found. Corrupted DB?")
			}
			bc.SetHead(block)

			glog.V(logger.Error).Infoln("Chain reorg was successfull. Resuming normal operation")
		}
	}

	bc.transState = bc.State().Copy()
	// Take ownership of this particular state
	bc.txState = state.ManageState(bc.State().Copy())

	bc.futureBlocks, _ = lru.New(maxFutureBlocks)
	bc.makeCache()

	go bc.update()

	return bc, nil
}
Ejemplo n.º 11
0
func NewChainManager(genesis *types.Block, blockDb, stateDb, extraDb common.Database, pow pow.PoW, mux *event.TypeMux) (*ChainManager, error) {
	cache, _ := lru.New(blockCacheLimit)
	bc := &ChainManager{
		blockDb:      blockDb,
		stateDb:      stateDb,
		extraDb:      extraDb,
		genesisBlock: GenesisBlock(42, stateDb),
		eventMux:     mux,
		quit:         make(chan struct{}),
		cache:        cache,
		pow:          pow,
	}
	// Check the genesis block given to the chain manager. If the genesis block mismatches block number 0
	// throw an error. If no block or the same block's found continue.
	if g := bc.GetBlockByNumber(0); g != nil && g.Hash() != genesis.Hash() {
		return nil, fmt.Errorf("Genesis mismatch. Maybe different nonce (%d vs %d)? %x / %x", g.Nonce(), genesis.Nonce(), g.Hash().Bytes()[:4], genesis.Hash().Bytes()[:4])
	}
	bc.genesisBlock = genesis
	bc.setLastState()

	// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
	for hash, _ := range BadHashes {
		if block := bc.GetBlock(hash); block != nil {
			glog.V(logger.Error).Infof("Found bad hash. Reorganising chain to state %x\n", block.ParentHash().Bytes()[:4])
			block = bc.GetBlock(block.ParentHash())
			if block == nil {
				glog.Fatal("Unable to complete. Parent block not found. Corrupted DB?")
			}
			bc.SetHead(block)

			glog.V(logger.Error).Infoln("Chain reorg was successfull. Resuming normal operation")
		}
	}

	bc.transState = bc.State().Copy()
	// Take ownership of this particular state
	bc.txState = state.ManageState(bc.State().Copy())

	bc.futureBlocks, _ = lru.New(maxFutureBlocks)
	bc.makeCache()

	go bc.update()

	return bc, nil
}
Ejemplo n.º 12
0
func NewChainManager(blockDb, stateDb common.Database, mux *event.TypeMux) *ChainManager {
	bc := &ChainManager{
		blockDb:      blockDb,
		stateDb:      stateDb,
		genesisBlock: GenesisBlock(stateDb),
		eventMux:     mux,
		quit:         make(chan struct{}),
		cache:        NewBlockCache(blockCacheLimit),
	}
	bc.setLastState()

	// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
	for _, hash := range badHashes {
		if block := bc.GetBlock(hash); block != nil {
			glog.V(logger.Error).Infof("Found bad hash. Reorganising chain to state %x\n", block.ParentHash().Bytes()[:4])
			block = bc.GetBlock(block.ParentHash())
			if block == nil {
				glog.Fatal("Unable to complete. Parent block not found. Corrupted DB?")
			}
			bc.SetHead(block)

			glog.V(logger.Error).Infoln("Chain reorg was successfull. Resuming normal operation")
		}
	}

	bc.transState = bc.State().Copy()
	// Take ownership of this particular state
	bc.txState = state.ManageState(bc.State().Copy())

	bc.futureBlocks = NewBlockCache(maxFutureBlocks)
	bc.makeCache()

	go bc.update()

	return bc
}
Ejemplo n.º 13
0
// InsertReceiptChain attempts to complete an already existing header chain with
// transaction and receipt data.
func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
	self.wg.Add(1)
	defer self.wg.Done()

	// Collect some import statistics to report on
	stats := struct{ processed, ignored int32 }{}
	start := time.Now()

	// Create the block importing task queue and worker functions
	tasks := make(chan int, len(blockChain))
	for i := 0; i < len(blockChain) && i < len(receiptChain); i++ {
		tasks <- i
	}
	close(tasks)

	errs, failed := make([]error, len(tasks)), int32(0)
	process := func(worker int) {
		for index := range tasks {
			block, receipts := blockChain[index], receiptChain[index]

			// Short circuit insertion if shutting down or processing failed
			if atomic.LoadInt32(&self.procInterrupt) == 1 {
				return
			}
			if atomic.LoadInt32(&failed) > 0 {
				return
			}
			// Short circuit if the owner header is unknown
			if !self.HasHeader(block.Hash()) {
				errs[index] = fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
				atomic.AddInt32(&failed, 1)
				return
			}
			// Skip if the entire data is already known
			if self.HasBlock(block.Hash()) {
				atomic.AddInt32(&stats.ignored, 1)
				continue
			}
			// Compute all the non-consensus fields of the receipts
			transactions, logIndex := block.Transactions(), uint(0)
			for j := 0; j < len(receipts); j++ {
				// The transaction hash can be retrieved from the transaction itself
				receipts[j].TxHash = transactions[j].Hash()

				// The contract address can be derived from the transaction itself
				if MessageCreatesContract(transactions[j]) {
					from, _ := transactions[j].From()
					receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce())
				}
				// The used gas can be calculated based on previous receipts
				if j == 0 {
					receipts[j].GasUsed = new(big.Int).Set(receipts[j].CumulativeGasUsed)
				} else {
					receipts[j].GasUsed = new(big.Int).Sub(receipts[j].CumulativeGasUsed, receipts[j-1].CumulativeGasUsed)
				}
				// The derived log fields can simply be set from the block and transaction
				for k := 0; k < len(receipts[j].Logs); k++ {
					receipts[j].Logs[k].BlockNumber = block.NumberU64()
					receipts[j].Logs[k].BlockHash = block.Hash()
					receipts[j].Logs[k].TxHash = receipts[j].TxHash
					receipts[j].Logs[k].TxIndex = uint(j)
					receipts[j].Logs[k].Index = logIndex
					logIndex++
				}
			}
			// Write all the data out into the database
			if err := WriteBody(self.chainDb, block.Hash(), &types.Body{block.Transactions(), block.Uncles()}); err != nil {
				errs[index] = fmt.Errorf("failed to write block body: %v", err)
				atomic.AddInt32(&failed, 1)
				glog.Fatal(errs[index])
				return
			}
			if err := PutBlockReceipts(self.chainDb, block.Hash(), receipts); err != nil {
				errs[index] = fmt.Errorf("failed to write block receipts: %v", err)
				atomic.AddInt32(&failed, 1)
				glog.Fatal(errs[index])
				return
			}
			if err := WriteMipmapBloom(self.chainDb, block.NumberU64(), receipts); err != nil {
				errs[index] = fmt.Errorf("failed to write log blooms: %v", err)
				atomic.AddInt32(&failed, 1)
				glog.Fatal(errs[index])
				return
			}
			atomic.AddInt32(&stats.processed, 1)
		}
	}
	// Start as many worker threads as goroutines allowed
	pending := new(sync.WaitGroup)
	for i := 0; i < runtime.GOMAXPROCS(0); i++ {
		pending.Add(1)
		go func(id int) {
			defer pending.Done()
			process(id)
		}(i)
	}
	pending.Wait()

	// If anything failed, report
	if failed > 0 {
		for i, err := range errs {
			if err != nil {
				return i, err
			}
		}
	}
	if atomic.LoadInt32(&self.procInterrupt) == 1 {
		glog.V(logger.Debug).Infoln("premature abort during receipt chain processing")
		return 0, nil
	}
	// Update the head fast sync block if better
	self.mu.Lock()
	head := blockChain[len(errs)-1]
	if self.GetTd(self.currentFastBlock.Hash()).Cmp(self.GetTd(head.Hash())) < 0 {
		if err := WriteHeadFastBlockHash(self.chainDb, head.Hash()); err != nil {
			glog.Fatalf("failed to update head fast block hash: %v", err)
		}
		self.currentFastBlock = head
	}
	self.mu.Unlock()

	// Report some public statistics so the user has a clue what's going on
	first, last := blockChain[0], blockChain[len(blockChain)-1]
	glog.V(logger.Info).Infof("imported %d receipt(s) (%d ignored) in %v. #%d [%x… / %x…]", stats.processed, stats.ignored,
		time.Since(start), last.Number(), first.Hash().Bytes()[:4], last.Hash().Bytes()[:4])

	return 0, nil
}