func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) { // Create the database in memory or in a temporary directory. var db ethdb.Database if !disk { db, _ = ethdb.NewMemDatabase() } else { dir, err := ioutil.TempDir("", "exp-core-bench") if err != nil { b.Fatalf("cannot create temporary directory: %v", err) } defer os.RemoveAll(dir) db, err = ethdb.NewLDBDatabase(dir, 0) if err != nil { b.Fatalf("cannot create temporary database: %v", err) } defer db.Close() } // Generate a chain of b.N blocks using the supplied block // generator function. genesis := WriteGenesisBlockForTesting(db, GenesisAccount{benchRootAddr, benchRootFunds}) chain, _ := GenerateChain(genesis, db, b.N, gen) // Time the insertion of the new chain. // State and blocks are stored in the same DB. evmux := new(event.TypeMux) chainman, _ := NewBlockChain(db, FakePow{}, evmux) defer chainman.Stop() b.ReportAllocs() b.ResetTimer() if i, err := chainman.InsertChain(chain); err != nil { b.Fatalf("insert error (block %d): %v\n", i, err) } }
func NewStateObjectFromBytes(address common.Address, data []byte, db ethdb.Database) *StateObject { var extobject struct { Nonce uint64 Balance *big.Int Root common.Hash CodeHash []byte } err := rlp.Decode(bytes.NewReader(data), &extobject) if err != nil { glog.Errorf("can't decode state object %x: %v", address, err) return nil } trie, err := trie.NewSecure(extobject.Root, db) if err != nil { // TODO: bubble this up or panic glog.Errorf("can't create account trie with root %x: %v", extobject.Root[:], err) return nil } object := &StateObject{address: address, db: db} object.nonce = extobject.Nonce object.balance = extobject.Balance object.codeHash = extobject.CodeHash object.trie = trie object.storage = make(map[string]common.Hash) object.code, _ = db.Get(extobject.CodeHash) return object }
// GetHeadFastBlockHash retrieves the hash of the current canonical head block during // fast synchronization. The difference between this and GetHeadBlockHash is that // whereas the last block hash is only updated upon a full block import, the last // fast hash is updated when importing pre-processed blocks. func GetHeadFastBlockHash(db ethdb.Database) common.Hash { data, _ := db.Get(headFastKey) if len(data) == 0 { return common.Hash{} } return common.BytesToHash(data) }
// GetCanonicalHash retrieves a hash assigned to a canonical block number. func GetCanonicalHash(db ethdb.Database, number uint64) common.Hash { data, _ := db.Get(append(blockNumPrefix, big.NewInt(int64(number)).Bytes()...)) if len(data) == 0 { return common.Hash{} } return common.BytesToHash(data) }
// WriteHeadFastBlockHash stores the fast head block's hash. func WriteHeadFastBlockHash(db ethdb.Database, hash common.Hash) error { if err := db.Put(headFastKey, hash.Bytes()); err != nil { glog.Fatalf("failed to store last fast block's hash into database: %v", err) return err } return nil }
// GetTransaction retrieves a specific transaction from the database, along with // its added positional metadata. func GetTransaction(db ethdb.Database, hash common.Hash) (*types.Transaction, common.Hash, uint64, uint64) { // Retrieve the transaction itself from the database data, _ := db.Get(hash.Bytes()) if len(data) == 0 { return nil, common.Hash{}, 0, 0 } var tx types.Transaction if err := rlp.DecodeBytes(data, &tx); err != nil { return nil, common.Hash{}, 0, 0 } // Retrieve the blockchain positional metadata data, _ = db.Get(append(hash.Bytes(), txMetaSuffix...)) if len(data) == 0 { return nil, common.Hash{}, 0, 0 } var meta struct { BlockHash common.Hash BlockIndex uint64 Index uint64 } if err := rlp.DecodeBytes(data, &meta); err != nil { return nil, common.Hash{}, 0, 0 } return &tx, meta.BlockHash, meta.BlockIndex, meta.Index }
func saveBlockchainVersion(db ethdb.Database, bcVersion int) { d, _ := db.Get([]byte("BlockchainVersion")) blockchainVersion := common.NewValue(d).Uint() if blockchainVersion == 0 { db.Put([]byte("BlockchainVersion"), common.NewValue(bcVersion).Bytes()) } }
// WriteCanonicalHash stores the canonical hash for the given block number. func WriteCanonicalHash(db ethdb.Database, hash common.Hash, number uint64) error { key := append(blockNumPrefix, big.NewInt(int64(number)).Bytes()...) if err := db.Put(key, hash.Bytes()); err != nil { glog.Fatalf("failed to store number to hash mapping into database: %v", err) return err } return nil }
// storeProof stores the new trie nodes obtained from a merkle proof in the database func storeProof(db ethdb.Database, proof []rlp.RawValue) { for _, buf := range proof { hash := crypto.Keccak256(buf) val, _ := db.Get(hash) if val == nil { db.Put(hash, buf) } } }
// GetReceipt returns a receipt by hash func GetReceipt(db ethdb.Database, txHash common.Hash) *types.Receipt { data, _ := db.Get(append(receiptsPrefix, txHash[:]...)) if len(data) == 0 { return nil } var receipt types.ReceiptForStorage err := rlp.DecodeBytes(data, &receipt) if err != nil { glog.V(logger.Core).Infoln("GetReceipt err:", err) } return (*types.Receipt)(&receipt) }
// [deprecated by the header/block split, remove eventually] // GetBlockByHashOld returns the old combined block corresponding to the hash // or nil if not found. This method is only used by the upgrade mechanism to // access the old combined block representation. It will be dropped after the // network transitions to eth/63. func GetBlockByHashOld(db ethdb.Database, hash common.Hash) *types.Block { data, _ := db.Get(append(blockHashPrefix, hash[:]...)) if len(data) == 0 { return nil } var block types.StorageBlock if err := rlp.Decode(bytes.NewReader(data), &block); err != nil { glog.V(logger.Error).Infof("invalid block RLP for hash %x: %v", hash, err) return nil } return (*types.Block)(&block) }
// GetTd retrieves a block's total difficulty corresponding to the hash, nil if // none found. func GetTd(db ethdb.Database, hash common.Hash) *big.Int { data, _ := db.Get(append(append(blockPrefix, hash.Bytes()...), tdSuffix...)) if len(data) == 0 { return nil } td := new(big.Int) if err := rlp.Decode(bytes.NewReader(data), td); err != nil { glog.V(logger.Error).Infof("invalid block total difficulty RLP for hash %x: %v", hash, err) return nil } return td }
// WriteTd serializes the total difficulty of a block into the database. func WriteTd(db ethdb.Database, hash common.Hash, td *big.Int) error { data, err := rlp.EncodeToBytes(td) if err != nil { return err } key := append(append(blockPrefix, hash.Bytes()...), tdSuffix...) if err := db.Put(key, data); err != nil { glog.Fatalf("failed to store block total difficulty into database: %v", err) return err } glog.V(logger.Debug).Infof("stored block total difficulty [%x…]: %v", hash.Bytes()[:4], td) return nil }
// GetChainConfig will fetch the network settings based on the given hash. func GetChainConfig(db ethdb.Database, hash common.Hash) (*ChainConfig, error) { jsonChainConfig, _ := db.Get(append(configPrefix, hash[:]...)) if len(jsonChainConfig) == 0 { return nil, ChainConfigNotFoundErr } var config ChainConfig if err := json.Unmarshal(jsonChainConfig, &config); err != nil { return nil, err } return &config, nil }
// WriteBody serializes the body of a block into the database. func WriteBody(db ethdb.Database, hash common.Hash, body *types.Body) error { data, err := rlp.EncodeToBytes(body) if err != nil { return err } key := append(append(blockPrefix, hash.Bytes()...), bodySuffix...) if err := db.Put(key, data); err != nil { glog.Fatalf("failed to store block body into database: %v", err) return err } glog.V(logger.Debug).Infof("stored block body [%x…]", hash.Bytes()[:4]) return nil }
// WriteHeader serializes a block header into the database. func WriteHeader(db ethdb.Database, header *types.Header) error { data, err := rlp.EncodeToBytes(header) if err != nil { return err } key := append(append(blockPrefix, header.Hash().Bytes()...), headerSuffix...) if err := db.Put(key, data); err != nil { glog.Fatalf("failed to store header into database: %v", err) return err } glog.V(logger.Debug).Infof("stored header #%v [%x…]", header.Number, header.Hash().Bytes()[:4]) return nil }
// checkStateConsistency checks that all nodes in a state trie are indeed present. func checkStateConsistency(db ethdb.Database, root common.Hash) error { // Create and iterate a state trie rooted in a sub-node if _, err := db.Get(root.Bytes()); err != nil { return nil // Consider a non existent state consistent } state, err := New(root, db) if err != nil { return err } it := NewNodeIterator(state) for it.Next() { } return it.Error }
// WriteChainConfig writes the chain config settings to the database. func WriteChainConfig(db ethdb.Database, hash common.Hash, cfg *ChainConfig) error { // short circuit and ignore if nil config. GetChainConfig // will return a default. if cfg == nil { return nil } jsonChainConfig, err := json.Marshal(cfg) if err != nil { return err } return db.Put(append(configPrefix, hash[:]...), jsonChainConfig) }
// GetBlockReceipts retrieves the receipts generated by the transactions included // in a block given by its hash. func GetBlockReceipts(db ethdb.Database, hash common.Hash) types.Receipts { data, _ := db.Get(append(blockReceiptsPrefix, hash[:]...)) if len(data) == 0 { return nil } storageReceipts := []*types.ReceiptForStorage{} if err := rlp.DecodeBytes(data, &storageReceipts); err != nil { glog.V(logger.Error).Infof("invalid receipt array RLP for hash %x: %v", hash, err) return nil } receipts := make(types.Receipts, len(storageReceipts)) for i, receipt := range storageReceipts { receipts[i] = (*types.Receipt)(receipt) } return receipts }
// WriteMapmapBloom writes each address included in the receipts' logs to the // MIP bloom bin. func WriteMipmapBloom(db ethdb.Database, number uint64, receipts types.Receipts) error { batch := db.NewBatch() for _, level := range MIPMapLevels { key := mipmapKey(number, level) bloomDat, _ := db.Get(key) bloom := types.BytesToBloom(bloomDat) for _, receipt := range receipts { for _, log := range receipt.Logs { bloom.Add(log.Address.Big()) } } batch.Put(key, bloom.Bytes()) } if err := batch.Write(); err != nil { return fmt.Errorf("mipmap write fail for: %d: %v", number, err) } return nil }
// WriteBlockReceipts stores all the transaction receipts belonging to a block // as a single receipt slice. This is used during chain reorganisations for // rescheduling dropped transactions. func WriteBlockReceipts(db ethdb.Database, hash common.Hash, receipts types.Receipts) error { // Convert the receipts into their storage form and serialize them storageReceipts := make([]*types.ReceiptForStorage, len(receipts)) for i, receipt := range receipts { storageReceipts[i] = (*types.ReceiptForStorage)(receipt) } bytes, err := rlp.EncodeToBytes(storageReceipts) if err != nil { return err } // Store the flattened receipt slice if err := db.Put(append(blockReceiptsPrefix, hash.Bytes()...), bytes); err != nil { glog.Fatalf("failed to store block receipts into database: %v", err) return err } glog.V(logger.Debug).Infof("stored block receipts [%x…]", hash.Bytes()[:4]) return nil }
func addMipmapBloomBins(db ethdb.Database) (err error) { const mipmapVersion uint = 2 // check if the version is set. We ignore data for now since there's // only one version so we can easily ignore it for now var data []byte data, _ = db.Get([]byte("setting-mipmap-version")) if len(data) > 0 { var version uint if err := rlp.DecodeBytes(data, &version); err == nil && version == mipmapVersion { return nil } } defer func() { if err == nil { var val []byte val, err = rlp.EncodeToBytes(mipmapVersion) if err == nil { err = db.Put([]byte("setting-mipmap-version"), val) } return } }() latestBlock := core.GetBlock(db, core.GetHeadBlockHash(db)) if latestBlock == nil { // clean database return } tstart := time.Now() glog.V(logger.Info).Infoln("upgrading db log bloom bins") for i := uint64(0); i <= latestBlock.NumberU64(); i++ { hash := core.GetCanonicalHash(db, i) if (hash == common.Hash{}) { return fmt.Errorf("chain db corrupted. Could not find block %d.", i) } core.WriteMipmapBloom(db, i, core.GetBlockReceipts(db, hash)) } glog.V(logger.Info).Infoln("upgrade completed in", time.Since(tstart)) return nil }
// WriteReceipts stores a batch of transaction receipts into the database. func WriteReceipts(db ethdb.Database, receipts types.Receipts) error { batch := db.NewBatch() // Iterate over all the receipts and queue them for database injection for _, receipt := range receipts { storageReceipt := (*types.ReceiptForStorage)(receipt) data, err := rlp.EncodeToBytes(storageReceipt) if err != nil { return err } if err := batch.Put(append(receiptsPrefix, receipt.TxHash.Bytes()...), data); err != nil { return err } } // Write the scheduled data into the database if err := batch.Write(); err != nil { glog.Fatalf("failed to store receipts into database: %v", err) return err } return nil }
// WriteTransactions stores the transactions associated with a specific block // into the given database. Beside writing the transaction, the function also // stores a metadata entry along with the transaction, detailing the position // of this within the blockchain. func WriteTransactions(db ethdb.Database, block *types.Block) error { batch := db.NewBatch() // Iterate over each transaction and encode it with its metadata for i, tx := range block.Transactions() { // Encode and queue up the transaction for storage data, err := rlp.EncodeToBytes(tx) if err != nil { return err } if err := batch.Put(tx.Hash().Bytes(), data); err != nil { return err } // Encode and queue up the transaction metadata for storage meta := struct { BlockHash common.Hash BlockIndex uint64 Index uint64 }{ BlockHash: block.Hash(), BlockIndex: block.NumberU64(), Index: uint64(i), } data, err = rlp.EncodeToBytes(meta) if err != nil { return err } if err := batch.Put(append(tx.Hash().Bytes(), txMetaSuffix...), data); err != nil { return err } } // Write the scheduled data into the database if err := batch.Write(); err != nil { glog.Fatalf("failed to store transactions into database: %v", err) return err } return nil }
// upgradeChainDatabase ensures that the chain database stores block split into // separate header and body entries. func upgradeChainDatabase(db ethdb.Database) error { // Short circuit if the head block is stored already as separate header and body data, err := db.Get([]byte("LastBlock")) if err != nil { return nil } head := common.BytesToHash(data) if block := core.GetBlockByHashOld(db, head); block == nil { return nil } // At least some of the database is still the old format, upgrade (skip the head block!) glog.V(logger.Info).Info("Old database detected, upgrading...") if db, ok := db.(*ethdb.LDBDatabase); ok { blockPrefix := []byte("block-hash-") for it := db.NewIterator(); it.Next(); { // Skip anything other than a combined block if !bytes.HasPrefix(it.Key(), blockPrefix) { continue } // Skip the head block (merge last to signal upgrade completion) if bytes.HasSuffix(it.Key(), head.Bytes()) { continue } // Load the block, split and serialize (order!) block := core.GetBlockByHashOld(db, common.BytesToHash(bytes.TrimPrefix(it.Key(), blockPrefix))) if err := core.WriteTd(db, block.Hash(), block.DeprecatedTd()); err != nil { return err } if err := core.WriteBody(db, block.Hash(), &types.Body{block.Transactions(), block.Uncles()}); err != nil { return err } if err := core.WriteHeader(db, block.Header()); err != nil { return err } if err := db.Delete(it.Key()); err != nil { return err } } // Lastly, upgrade the head block, disabling the upgrade mechanism current := core.GetBlockByHashOld(db, head) if err := core.WriteTd(db, current.Hash(), current.DeprecatedTd()); err != nil { return err } if err := core.WriteBody(db, current.Hash(), &types.Body{current.Transactions(), current.Uncles()}); err != nil { return err } if err := core.WriteHeader(db, current.Header()); err != nil { return err } } return nil }
// DeleteReceipt removes all receipt data associated with a transaction hash. func DeleteReceipt(db ethdb.Database, hash common.Hash) { db.Delete(append(receiptsPrefix, hash.Bytes()...)) }
// DeleteTransaction removes all transaction data associated with a hash. func DeleteTransaction(db ethdb.Database, hash common.Hash) { db.Delete(hash.Bytes()) db.Delete(append(hash.Bytes(), txMetaSuffix...)) }
// GetMipmapBloom returns a bloom filter using the number and level as input // parameters. For available levels see MIPMapLevels. func GetMipmapBloom(db ethdb.Database, number, level uint64) types.Bloom { bloomDat, _ := db.Get(mipmapKey(number, level)) return types.BytesToBloom(bloomDat) }
// GetBlockChainVersion reads the version number from db. func GetBlockChainVersion(db ethdb.Database) int { var vsn uint enc, _ := db.Get([]byte("BlockchainVersion")) rlp.DecodeBytes(enc, &vsn) return int(vsn) }
// WriteBlockChainVersion writes vsn as the version number to db. func WriteBlockChainVersion(db ethdb.Database, vsn int) { enc, _ := rlp.EncodeToBytes(uint(vsn)) db.Put([]byte("BlockchainVersion"), enc) }