func NewStateObjectFromBytes(address common.Address, data []byte, db ethdb.Database) *StateObject { var extobject struct { Nonce uint64 Balance *big.Int Root common.Hash CodeHash []byte } err := rlp.Decode(bytes.NewReader(data), &extobject) if err != nil { glog.Errorf("can't decode state object %x: %v", address, err) return nil } trie, err := trie.NewSecure(extobject.Root, db) if err != nil { // TODO: bubble this up or panic glog.Errorf("can't create account trie with root %x: %v", extobject.Root[:], err) return nil } object := &StateObject{address: address, db: db} object.nonce = extobject.Nonce object.balance = extobject.Balance object.codeHash = extobject.CodeHash object.trie = trie object.storage = make(map[string]common.Hash) object.code, _ = db.Get(extobject.CodeHash) return object }
func NewStateObjectFromBytes(address common.Address, data []byte, db ethdb.Database) *StateObject { // TODO clean me up var extobject struct { Nonce uint64 Balance *big.Int Root common.Hash CodeHash []byte } err := rlp.Decode(bytes.NewReader(data), &extobject) if err != nil { fmt.Println(err) return nil } object := &StateObject{address: address, db: db} object.nonce = extobject.Nonce object.balance = extobject.Balance object.codeHash = extobject.CodeHash object.trie = trie.NewSecure(extobject.Root[:], db) object.storage = make(map[string]common.Hash) object.gasPool = new(big.Int) object.code, _ = db.Get(extobject.CodeHash) return object }
func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) { // Create the database in memory or in a temporary directory. var db ethdb.Database if !disk { db, _ = ethdb.NewMemDatabase() } else { dir, err := ioutil.TempDir("", "eth-core-bench") if err != nil { b.Fatalf("cannot create temporary directory: %v", err) } defer os.RemoveAll(dir) db, err = ethdb.NewLDBDatabase(dir, 0) if err != nil { b.Fatalf("cannot create temporary database: %v", err) } defer db.Close() } // Generate a chain of b.N blocks using the supplied block // generator function. genesis := WriteGenesisBlockForTesting(db, GenesisAccount{benchRootAddr, benchRootFunds}) chain, _ := GenerateChain(genesis, db, b.N, gen) // Time the insertion of the new chain. // State and blocks are stored in the same DB. evmux := new(event.TypeMux) chainman, _ := NewBlockChain(db, FakePow{}, evmux) chainman.SetProcessor(NewBlockProcessor(db, FakePow{}, chainman, evmux)) defer chainman.Stop() b.ReportAllocs() b.ResetTimer() if i, err := chainman.InsertChain(chain); err != nil { b.Fatalf("insert error (block %d): %v\n", i, err) } }
// WriteHeadFastBlockHash stores the fast head block's hash. func WriteHeadFastBlockHash(db ethdb.Database, hash common.Hash) error { if err := db.Put(headFastKey, hash.Bytes()); err != nil { glog.Fatalf("failed to store last fast block's hash into database: %v", err) return err } return nil }
// PutTransactions stores the transactions in the given database func PutTransactions(db ethdb.Database, block *types.Block, txs types.Transactions) error { batch := db.NewBatch() for i, tx := range block.Transactions() { rlpEnc, err := rlp.EncodeToBytes(tx) if err != nil { return fmt.Errorf("failed encoding tx: %v", err) } batch.Put(tx.Hash().Bytes(), rlpEnc) var txExtra struct { BlockHash common.Hash BlockIndex uint64 Index uint64 } txExtra.BlockHash = block.Hash() txExtra.BlockIndex = block.NumberU64() txExtra.Index = uint64(i) rlpMeta, err := rlp.EncodeToBytes(txExtra) if err != nil { return fmt.Errorf("failed encoding tx meta data: %v", err) } batch.Put(append(tx.Hash().Bytes(), 0x0001), rlpMeta) } if err := batch.Write(); err != nil { return fmt.Errorf("failed writing tx to db: %v", err) } return nil }
// PutReceipts stores the receipts in the current database func PutReceipts(db ethdb.Database, receipts types.Receipts) error { batch := new(leveldb.Batch) _, batchWrite := db.(*ethdb.LDBDatabase) for _, receipt := range receipts { storageReceipt := (*types.ReceiptForStorage)(receipt) bytes, err := rlp.EncodeToBytes(storageReceipt) if err != nil { return err } if batchWrite { batch.Put(append(receiptsPre, receipt.TxHash[:]...), bytes) } else { err = db.Put(append(receiptsPre, receipt.TxHash[:]...), bytes) if err != nil { return err } } } if db, ok := db.(*ethdb.LDBDatabase); ok { if err := db.LDB().Write(batch, nil); err != nil { return err } } return nil }
// GetCanonicalHash retrieves a hash assigned to a canonical block number. func GetCanonicalHash(db ethdb.Database, number uint64) common.Hash { data, _ := db.Get(append(blockNumPrefix, big.NewInt(int64(number)).Bytes()...)) if len(data) == 0 { return common.Hash{} } return common.BytesToHash(data) }
// GetTransaction retrieves a specific transaction from the database, along with // its added positional metadata. func GetTransaction(db ethdb.Database, hash common.Hash) (*types.Transaction, common.Hash, uint64, uint64) { // Retrieve the transaction itself from the database data, _ := db.Get(hash.Bytes()) if len(data) == 0 { return nil, common.Hash{}, 0, 0 } var tx types.Transaction if err := rlp.DecodeBytes(data, &tx); err != nil { return nil, common.Hash{}, 0, 0 } // Retrieve the blockchain positional metadata data, _ = db.Get(append(hash.Bytes(), txMetaSuffix...)) if len(data) == 0 { return nil, common.Hash{}, 0, 0 } var meta struct { BlockHash common.Hash BlockIndex uint64 Index uint64 } if err := rlp.DecodeBytes(data, &meta); err != nil { return nil, common.Hash{}, 0, 0 } return &tx, meta.BlockHash, meta.BlockIndex, meta.Index }
// GetHeadFastBlockHash retrieves the hash of the current canonical head block during // fast synchronization. The difference between this and GetHeadBlockHash is that // whereas the last block hash is only updated upon a full block import, the last // fast hash is updated when importing pre-processed blocks. func GetHeadFastBlockHash(db ethdb.Database) common.Hash { data, _ := db.Get(headFastKey) if len(data) == 0 { return common.Hash{} } return common.BytesToHash(data) }
// WriteCanonicalHash stores the canonical hash for the given block number. func WriteCanonicalHash(db ethdb.Database, hash common.Hash, number uint64) error { key := append(blockNumPrefix, big.NewInt(int64(number)).Bytes()...) if err := db.Put(key, hash.Bytes()); err != nil { glog.Fatalf("failed to store number to hash mapping into database: %v", err) return err } return nil }
func saveBlockchainVersion(db ethdb.Database, bcVersion int) { d, _ := db.Get([]byte("BlockchainVersion")) blockchainVersion := common.NewValue(d).Uint() if blockchainVersion == 0 { db.Put([]byte("BlockchainVersion"), common.NewValue(bcVersion).Bytes()) } }
// storeProof stores the new trie nodes obtained from a merkle proof in the database func storeProof(db ethdb.Database, proof []rlp.RawValue) { for _, buf := range proof { hash := crypto.Keccak256(buf) val, _ := db.Get(hash) if val == nil { db.Put(hash, buf) } } }
func GetTransaction(db ethdb.Database, txhash common.Hash) *types.Transaction { data, _ := db.Get(txhash[:]) if len(data) != 0 { var tx types.Transaction if err := rlp.DecodeBytes(data, &tx); err != nil { return nil } return &tx } return nil }
// GetTd retrieves a block's total difficulty corresponding to the hash, nil if // none found. func GetTd(db ethdb.Database, hash common.Hash) *big.Int { data, _ := db.Get(append(append(blockPrefix, hash.Bytes()...), tdSuffix...)) if len(data) == 0 { return nil } td := new(big.Int) if err := rlp.Decode(bytes.NewReader(data), td); err != nil { glog.V(logger.Error).Infof("invalid block total difficulty RLP for hash %x: %v", hash, err) return nil } return td }
// GetReceipt returns a receipt by hash func GetReceipt(db ethdb.Database, txHash common.Hash) *types.Receipt { data, _ := db.Get(append(receiptsPrefix, txHash[:]...)) if len(data) == 0 { return nil } var receipt types.ReceiptForStorage err := rlp.DecodeBytes(data, &receipt) if err != nil { glog.V(logger.Core).Infoln("GetReceipt err:", err) } return (*types.Receipt)(&receipt) }
// [deprecated by the header/block split, remove eventually] // GetBlockByHashOld returns the old combined block corresponding to the hash // or nil if not found. This method is only used by the upgrade mechanism to // access the old combined block representation. It will be dropped after the // network transitions to eth/63. func GetBlockByHashOld(db ethdb.Database, hash common.Hash) *types.Block { data, _ := db.Get(append(blockHashPrefix, hash[:]...)) if len(data) == 0 { return nil } var block types.StorageBlock if err := rlp.Decode(bytes.NewReader(data), &block); err != nil { glog.V(logger.Error).Infof("invalid block RLP for hash %x: %v", hash, err) return nil } return (*types.Block)(&block) }
// GetBlockReceipts returns the receipts generated by the transactions // included in block's given hash. func GetBlockReceipts(db ethdb.Database, hash common.Hash) types.Receipts { data, _ := db.Get(append(blockReceiptsPre, hash[:]...)) if len(data) == 0 { return nil } var receipts types.Receipts err := rlp.DecodeBytes(data, &receipts) if err != nil { glog.V(logger.Core).Infoln("GetReceiptse err", err) } return receipts }
// GetChainConfig will fetch the network settings based on the given hash. func GetChainConfig(db ethdb.Database, hash common.Hash) (*ChainConfig, error) { jsonChainConfig, _ := db.Get(append(configPrefix, hash[:]...)) if len(jsonChainConfig) == 0 { return nil, ChainConfigNotFoundErr } var config ChainConfig if err := json.Unmarshal(jsonChainConfig, &config); err != nil { return nil, err } return &config, nil }
// WriteTd serializes the total difficulty of a block into the database. func WriteTd(db ethdb.Database, hash common.Hash, td *big.Int) error { data, err := rlp.EncodeToBytes(td) if err != nil { return err } key := append(append(blockPrefix, hash.Bytes()...), tdSuffix...) if err := db.Put(key, data); err != nil { glog.Fatalf("failed to store block total difficulty into database: %v", err) return err } glog.V(logger.Debug).Infof("stored block total difficulty [%x…]: %v", hash.Bytes()[:4], td) return nil }
// WriteBody serializes the body of a block into the database. func WriteBody(db ethdb.Database, hash common.Hash, body *types.Body) error { data, err := rlp.EncodeToBytes(body) if err != nil { return err } key := append(append(blockPrefix, hash.Bytes()...), bodySuffix...) if err := db.Put(key, data); err != nil { glog.Fatalf("failed to store block body into database: %v", err) return err } glog.V(logger.Debug).Infof("stored block body [%x…]", hash.Bytes()[:4]) return nil }
// WriteHeader serializes a block header into the database. func WriteHeader(db ethdb.Database, header *types.Header) error { data, err := rlp.EncodeToBytes(header) if err != nil { return err } key := append(append(blockPrefix, header.Hash().Bytes()...), headerSuffix...) if err := db.Put(key, data); err != nil { glog.Fatalf("failed to store header into database: %v", err) return err } glog.V(logger.Debug).Infof("stored header #%v [%x…]", header.Number, header.Hash().Bytes()[:4]) return nil }
// WriteChainConfig writes the chain config settings to the database. func WriteChainConfig(db ethdb.Database, hash common.Hash, cfg *ChainConfig) error { // short circuit and ignore if nil config. GetChainConfig // will return a default. if cfg == nil { return nil } jsonChainConfig, err := json.Marshal(cfg) if err != nil { return err } return db.Put(append(configPrefix, hash[:]...), jsonChainConfig) }
// PutBlockReceipts stores the block's transactions associated receipts // and stores them by block hash in a single slice. This is required for // forks and chain reorgs func PutBlockReceipts(db ethdb.Database, hash common.Hash, receipts types.Receipts) error { rs := make([]*types.ReceiptForStorage, len(receipts)) for i, receipt := range receipts { rs[i] = (*types.ReceiptForStorage)(receipt) } bytes, err := rlp.EncodeToBytes(rs) if err != nil { return err } err = db.Put(append(blockReceiptsPre, hash[:]...), bytes) if err != nil { return err } return nil }
// GetBlockReceipts retrieves the receipts generated by the transactions included // in a block given by its hash. func GetBlockReceipts(db ethdb.Database, hash common.Hash) types.Receipts { data, _ := db.Get(append(blockReceiptsPrefix, hash[:]...)) if len(data) == 0 { return nil } storageReceipts := []*types.ReceiptForStorage{} if err := rlp.DecodeBytes(data, &storageReceipts); err != nil { glog.V(logger.Error).Infof("invalid receipt array RLP for hash %x: %v", hash, err) return nil } receipts := make(types.Receipts, len(storageReceipts)) for i, receipt := range storageReceipts { receipts[i] = (*types.Receipt)(receipt) } return receipts }
// checkStateConsistency checks that all nodes in a state trie are indeed present. func checkStateConsistency(db ethdb.Database, root common.Hash) error { // Remove any potentially cached data from the test state creation or previous checks trie.ClearGlobalCache() // Create and iterate a state trie rooted in a sub-node if _, err := db.Get(root.Bytes()); err != nil { return nil // Consider a non existent state consistent } state, err := New(root, db) if err != nil { return err } it := NewNodeIterator(state) for it.Next() { } return it.Error }
// WriteMapmapBloom writes each address included in the receipts' logs to the // MIP bloom bin. func WriteMipmapBloom(db ethdb.Database, number uint64, receipts types.Receipts) error { batch := db.NewBatch() for _, level := range MIPMapLevels { key := mipmapKey(number, level) bloomDat, _ := db.Get(key) bloom := types.BytesToBloom(bloomDat) for _, receipt := range receipts { for _, log := range receipt.Logs { bloom.Add(log.Address.Big()) } } batch.Put(key, bloom.Bytes()) } if err := batch.Write(); err != nil { return fmt.Errorf("mipmap write fail for: %d: %v", number, err) } return nil }
// WriteBlockReceipts stores all the transaction receipts belonging to a block // as a single receipt slice. This is used during chain reorganisations for // rescheduling dropped transactions. func WriteBlockReceipts(db ethdb.Database, hash common.Hash, receipts types.Receipts) error { // Convert the receipts into their storage form and serialize them storageReceipts := make([]*types.ReceiptForStorage, len(receipts)) for i, receipt := range receipts { storageReceipts[i] = (*types.ReceiptForStorage)(receipt) } bytes, err := rlp.EncodeToBytes(storageReceipts) if err != nil { return err } // Store the flattened receipt slice if err := db.Put(append(blockReceiptsPrefix, hash.Bytes()...), bytes); err != nil { glog.Fatalf("failed to store block receipts into database: %v", err) return err } glog.V(logger.Debug).Infof("stored block receipts [%x…]", hash.Bytes()[:4]) return nil }
func addMipmapBloomBins(db ethdb.Database) (err error) { const mipmapVersion uint = 2 // check if the version is set. We ignore data for now since there's // only one version so we can easily ignore it for now var data []byte data, _ = db.Get([]byte("setting-mipmap-version")) if len(data) > 0 { var version uint if err := rlp.DecodeBytes(data, &version); err == nil && version == mipmapVersion { return nil } } defer func() { if err == nil { var val []byte val, err = rlp.EncodeToBytes(mipmapVersion) if err == nil { err = db.Put([]byte("setting-mipmap-version"), val) } return } }() latestBlock := core.GetBlock(db, core.GetHeadBlockHash(db)) if latestBlock == nil { // clean database return } tstart := time.Now() glog.V(logger.Info).Infoln("upgrading db log bloom bins") for i := uint64(0); i <= latestBlock.NumberU64(); i++ { hash := core.GetCanonicalHash(db, i) if (hash == common.Hash{}) { return fmt.Errorf("chain db corrupted. Could not find block %d.", i) } core.WriteMipmapBloom(db, i, core.GetBlockReceipts(db, hash)) } glog.V(logger.Info).Infoln("upgrade completed in", time.Since(tstart)) return nil }
// WriteReceipts stores a batch of transaction receipts into the database. func WriteReceipts(db ethdb.Database, receipts types.Receipts) error { batch := db.NewBatch() // Iterate over all the receipts and queue them for database injection for _, receipt := range receipts { storageReceipt := (*types.ReceiptForStorage)(receipt) data, err := rlp.EncodeToBytes(storageReceipt) if err != nil { return err } if err := batch.Put(append(receiptsPrefix, receipt.TxHash.Bytes()...), data); err != nil { return err } } // Write the scheduled data into the database if err := batch.Write(); err != nil { glog.Fatalf("failed to store receipts into database: %v", err) return err } return nil }
// WriteTransactions stores the transactions associated with a specific block // into the given database. Beside writing the transaction, the function also // stores a metadata entry along with the transaction, detailing the position // of this within the blockchain. func WriteTransactions(db ethdb.Database, block *types.Block) error { batch := db.NewBatch() // Iterate over each transaction and encode it with its metadata for i, tx := range block.Transactions() { // Encode and queue up the transaction for storage data, err := rlp.EncodeToBytes(tx) if err != nil { return err } if err := batch.Put(tx.Hash().Bytes(), data); err != nil { return err } // Encode and queue up the transaction metadata for storage meta := struct { BlockHash common.Hash BlockIndex uint64 Index uint64 }{ BlockHash: block.Hash(), BlockIndex: block.NumberU64(), Index: uint64(i), } data, err = rlp.EncodeToBytes(meta) if err != nil { return err } if err := batch.Put(append(tx.Hash().Bytes(), txMetaSuffix...), data); err != nil { return err } } // Write the scheduled data into the database if err := batch.Write(); err != nil { glog.Fatalf("failed to store transactions into database: %v", err) return err } return nil }