Example #1
0
// Tests that receipts can be stored and retrieved.
func TestReceiptStorage(t *testing.T) {
	db, _ := ethdb.NewMemDatabase()

	receipt1 := &types.Receipt{
		PostState:         []byte{0x01},
		CumulativeGasUsed: big.NewInt(1),
		Logs: vm.Logs{
			&vm.Log{Address: common.BytesToAddress([]byte{0x11})},
			&vm.Log{Address: common.BytesToAddress([]byte{0x01, 0x11})},
		},
		TxHash:          common.BytesToHash([]byte{0x11, 0x11}),
		ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}),
		GasUsed:         big.NewInt(111111),
	}
	receipt2 := &types.Receipt{
		PostState:         []byte{0x02},
		CumulativeGasUsed: big.NewInt(2),
		Logs: vm.Logs{
			&vm.Log{Address: common.BytesToAddress([]byte{0x22})},
			&vm.Log{Address: common.BytesToAddress([]byte{0x02, 0x22})},
		},
		TxHash:          common.BytesToHash([]byte{0x22, 0x22}),
		ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}),
		GasUsed:         big.NewInt(222222),
	}
	receipts := []*types.Receipt{receipt1, receipt2}

	// Check that no receipt entries are in a pristine database
	for i, receipt := range receipts {
		if r := GetReceipt(db, receipt.TxHash); r != nil {
			t.Fatalf("receipt #%d [%x]: non existent receipt returned: %v", i, receipt.TxHash, r)
		}
	}
	// Insert all the receipts into the database, and verify contents
	if err := WriteReceipts(db, receipts); err != nil {
		t.Fatalf("failed to write receipts: %v", err)
	}
	for i, receipt := range receipts {
		if r := GetReceipt(db, receipt.TxHash); r == nil {
			t.Fatalf("receipt #%d [%x]: receipt not found", i, receipt.TxHash)
		} else {
			rlpHave, _ := rlp.EncodeToBytes(r)
			rlpWant, _ := rlp.EncodeToBytes(receipt)

			if bytes.Compare(rlpHave, rlpWant) != 0 {
				t.Fatalf("receipt #%d [%x]: receipt mismatch: have %v, want %v", i, receipt.TxHash, r, receipt)
			}
		}
	}
	// Delete the receipts and check purge
	for i, receipt := range receipts {
		DeleteReceipt(db, receipt.TxHash)
		if r := GetReceipt(db, receipt.TxHash); r != nil {
			t.Fatalf("receipt #%d [%x]: deleted receipt returned: %v", i, receipt.TxHash, r)
		}
	}
}
Example #2
0
// use testing instead of checker because checker does not support
// printing/logging in tests (-check.vv does not work)
func TestSnapshot2(t *testing.T) {
	db, _ := ethdb.NewMemDatabase()
	state, _ := New(common.Hash{}, db)

	stateobjaddr0 := toAddr([]byte("so0"))
	stateobjaddr1 := toAddr([]byte("so1"))
	var storageaddr common.Hash

	data0 := common.BytesToHash([]byte{17})
	data1 := common.BytesToHash([]byte{18})

	state.SetState(stateobjaddr0, storageaddr, data0)
	state.SetState(stateobjaddr1, storageaddr, data1)

	// db, trie are already non-empty values
	so0 := state.GetStateObject(stateobjaddr0)
	so0.SetBalance(big.NewInt(42))
	so0.SetNonce(43)
	so0.SetCode(crypto.Keccak256Hash([]byte{'c', 'a', 'f', 'e'}), []byte{'c', 'a', 'f', 'e'})
	so0.suicided = false
	so0.deleted = false
	state.setStateObject(so0)

	root, _ := state.Commit()
	state.Reset(root)

	// and one with deleted == true
	so1 := state.GetStateObject(stateobjaddr1)
	so1.SetBalance(big.NewInt(52))
	so1.SetNonce(53)
	so1.SetCode(crypto.Keccak256Hash([]byte{'c', 'a', 'f', 'e', '2'}), []byte{'c', 'a', 'f', 'e', '2'})
	so1.suicided = true
	so1.deleted = true
	state.setStateObject(so1)

	so1 = state.GetStateObject(stateobjaddr1)
	if so1 != nil {
		t.Fatalf("deleted object not nil when getting")
	}

	snapshot := state.Snapshot()
	state.RevertToSnapshot(snapshot)

	so0Restored := state.GetStateObject(stateobjaddr0)
	// Update lazily-loaded values before comparing.
	so0Restored.GetState(db, storageaddr)
	so0Restored.Code(db)
	// non-deleted is equal (restored)
	compareStateObjects(so0Restored, so0, t)

	// deleted should be nil, both before and after restore of state copy
	so1Restored := state.GetStateObject(stateobjaddr1)
	if so1Restored != nil {
		t.Fatalf("deleted object not nil after restoring snapshot: %+v", so1Restored)
	}
}
Example #3
0
// upgradeChainDatabase ensures that the chain database stores block split into
// separate header and body entries.
func upgradeChainDatabase(db ethdb.Database) error {
	// Short circuit if the head block is stored already as separate header and body
	data, err := db.Get([]byte("LastBlock"))
	if err != nil {
		return nil
	}
	head := common.BytesToHash(data)

	if block := core.GetBlockByHashOld(db, head); block == nil {
		return nil
	}
	// At least some of the database is still the old format, upgrade (skip the head block!)
	glog.V(logger.Info).Info("Old database detected, upgrading...")

	if db, ok := db.(*ethdb.LDBDatabase); ok {
		blockPrefix := []byte("block-hash-")
		for it := db.NewIterator(); it.Next(); {
			// Skip anything other than a combined block
			if !bytes.HasPrefix(it.Key(), blockPrefix) {
				continue
			}
			// Skip the head block (merge last to signal upgrade completion)
			if bytes.HasSuffix(it.Key(), head.Bytes()) {
				continue
			}
			// Load the block, split and serialize (order!)
			block := core.GetBlockByHashOld(db, common.BytesToHash(bytes.TrimPrefix(it.Key(), blockPrefix)))

			if err := core.WriteTd(db, block.Hash(), block.DeprecatedTd()); err != nil {
				return err
			}
			if err := core.WriteBody(db, block.Hash(), &types.Body{block.Transactions(), block.Uncles()}); err != nil {
				return err
			}
			if err := core.WriteHeader(db, block.Header()); err != nil {
				return err
			}
			if err := db.Delete(it.Key()); err != nil {
				return err
			}
		}
		// Lastly, upgrade the head block, disabling the upgrade mechanism
		current := core.GetBlockByHashOld(db, head)

		if err := core.WriteTd(db, current.Hash(), current.DeprecatedTd()); err != nil {
			return err
		}
		if err := core.WriteBody(db, current.Hash(), &types.Body{current.Transactions(), current.Uncles()}); err != nil {
			return err
		}
		if err := core.WriteHeader(db, current.Header()); err != nil {
			return err
		}
	}
	return nil
}
Example #4
0
// Tests that receipts associated with a single block can be stored and retrieved.
func TestBlockReceiptStorage(t *testing.T) {
	db, _ := ethdb.NewMemDatabase()

	receipt1 := &types.Receipt{
		PostState:         []byte{0x01},
		CumulativeGasUsed: big.NewInt(1),
		Logs: vm.Logs{
			&vm.Log{Address: common.BytesToAddress([]byte{0x11})},
			&vm.Log{Address: common.BytesToAddress([]byte{0x01, 0x11})},
		},
		TxHash:          common.BytesToHash([]byte{0x11, 0x11}),
		ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}),
		GasUsed:         big.NewInt(111111),
	}
	receipt2 := &types.Receipt{
		PostState:         []byte{0x02},
		CumulativeGasUsed: big.NewInt(2),
		Logs: vm.Logs{
			&vm.Log{Address: common.BytesToAddress([]byte{0x22})},
			&vm.Log{Address: common.BytesToAddress([]byte{0x02, 0x22})},
		},
		TxHash:          common.BytesToHash([]byte{0x22, 0x22}),
		ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}),
		GasUsed:         big.NewInt(222222),
	}
	receipts := []*types.Receipt{receipt1, receipt2}

	// Check that no receipt entries are in a pristine database
	hash := common.BytesToHash([]byte{0x03, 0x14})
	if rs := GetBlockReceipts(db, hash); len(rs) != 0 {
		t.Fatalf("non existent receipts returned: %v", rs)
	}
	// Insert the receipt slice into the database and check presence
	if err := WriteBlockReceipts(db, hash, receipts); err != nil {
		t.Fatalf("failed to write block receipts: %v", err)
	}
	if rs := GetBlockReceipts(db, hash); len(rs) == 0 {
		t.Fatalf("no receipts returned")
	} else {
		for i := 0; i < len(receipts); i++ {
			rlpHave, _ := rlp.EncodeToBytes(rs[i])
			rlpWant, _ := rlp.EncodeToBytes(receipts[i])

			if bytes.Compare(rlpHave, rlpWant) != 0 {
				t.Fatalf("receipt #%d: receipt mismatch: have %v, want %v", i, rs[i], receipts[i])
			}
		}
	}
	// Delete the receipt slice and check purge
	DeleteBlockReceipts(db, hash)
	if rs := GetBlockReceipts(db, hash); len(rs) != 0 {
		t.Fatalf("deleted receipts returned: %v", rs)
	}
}
Example #5
0
// use testing instead of checker because checker does not support
// printing/logging in tests (-check.vv does not work)
func TestSnapshot2(t *testing.T) {
	db, _ := ethdb.NewMemDatabase()
	state, _ := New(common.Hash{}, db)

	stateobjaddr0 := toAddr([]byte("so0"))
	stateobjaddr1 := toAddr([]byte("so1"))
	var storageaddr common.Hash

	data0 := common.BytesToHash([]byte{17})
	data1 := common.BytesToHash([]byte{18})

	state.SetState(stateobjaddr0, storageaddr, data0)
	state.SetState(stateobjaddr1, storageaddr, data1)

	// db, trie are already non-empty values
	so0 := state.GetStateObject(stateobjaddr0)
	so0.balance = big.NewInt(42)
	so0.nonce = 43
	so0.code = []byte{'c', 'a', 'f', 'e'}
	so0.codeHash = so0.CodeHash()
	so0.remove = true
	so0.deleted = false
	so0.dirty = false
	state.SetStateObject(so0)

	// and one with deleted == true
	so1 := state.GetStateObject(stateobjaddr1)
	so1.balance = big.NewInt(52)
	so1.nonce = 53
	so1.code = []byte{'c', 'a', 'f', 'e', '2'}
	so1.codeHash = so1.CodeHash()
	so1.remove = true
	so1.deleted = true
	so1.dirty = true
	state.SetStateObject(so1)

	so1 = state.GetStateObject(stateobjaddr1)
	if so1 != nil {
		t.Fatalf("deleted object not nil when getting")
	}

	snapshot := state.Copy()
	state.Set(snapshot)

	so0Restored := state.GetStateObject(stateobjaddr0)
	so1Restored := state.GetStateObject(stateobjaddr1)
	// non-deleted is equal (restored)
	compareStateObjects(so0Restored, so0, t)
	// deleted should be nil, both before and after restore of state copy
	if so1Restored != nil {
		t.Fatalf("deleted object not nil after restoring snapshot")
	}
}
Example #6
0
// checkTrieContents cross references a reconstructed trie with an expected data
// content map.
func checkTrieContents(t *testing.T, db Database, root []byte, content map[string][]byte) {
	// Check root availability and trie contents
	trie, err := New(common.BytesToHash(root), db)
	if err != nil {
		t.Fatalf("failed to create trie at %x: %v", root, err)
	}
	if err := checkTrieConsistency(db, common.BytesToHash(root)); err != nil {
		t.Fatalf("inconsistent trie at %x: %v", root, err)
	}
	for key, val := range content {
		if have := trie.Get([]byte(key)); bytes.Compare(have, val) != 0 {
			t.Errorf("entry %x: content mismatch: have %x, want %x", key, have, val)
		}
	}
}
func (self *StateObject) ForEachStorage(cb func(key, value common.Hash) bool) {
	// When iterating over the storage check the cache first
	for h, value := range self.cachedStorage {
		cb(h, value)
	}

	it := self.getTrie(self.db.db).Iterator()
	for it.Next() {
		// ignore cached values
		key := common.BytesToHash(self.trie.GetKey(it.Key))
		if _, ok := self.cachedStorage[key]; !ok {
			cb(key, common.BytesToHash(it.Value))
		}
	}
}
// GetHeadFastBlockHash retrieves the hash of the current canonical head block during
// fast synchronization. The difference between this and GetHeadBlockHash is that
// whereas the last block hash is only updated upon a full block import, the last
// fast hash is updated when importing pre-processed blocks.
func GetHeadFastBlockHash(db ethdb.Database) common.Hash {
	data, _ := db.Get(headFastKey)
	if len(data) == 0 {
		return common.Hash{}
	}
	return common.BytesToHash(data)
}
// GetCanonicalHash retrieves a hash assigned to a canonical block number.
func GetCanonicalHash(db ethdb.Database, number uint64) common.Hash {
	data, _ := db.Get(append(blockNumPrefix, big.NewInt(int64(number)).Bytes()...))
	if len(data) == 0 {
		return common.Hash{}
	}
	return common.BytesToHash(data)
}
Example #10
0
func TestEthashConcurrentVerify(t *testing.T) {
	exp, err := NewForTesting()
	if err != nil {
		t.Fatal(err)
	}
	defer os.RemoveAll(exp.Full.Dir)

	block := &testBlock{difficulty: big.NewInt(10)}
	nonce, md := exp.Search(block, nil)
	block.nonce = nonce
	block.mixDigest = common.BytesToHash(md)

	// Verify the block concurrently to check for data races.
	var wg sync.WaitGroup
	wg.Add(100)
	for i := 0; i < 100; i++ {
		go func() {
			if !exp.Verify(block) {
				t.Error("Block could not be verified")
			}
			wg.Done()
		}()
	}
	wg.Wait()
}
Example #11
0
func (bc *ChainManager) setLastState() error {
	data, _ := bc.chainDb.Get([]byte("LastBlock"))
	if len(data) != 0 {
		block := bc.GetBlock(common.BytesToHash(data))
		if block != nil {
			bc.currentBlock = block
			bc.lastBlockHash = block.Hash()
		} else {
			glog.Infof("LastBlock (%x) not found. Recovering...\n", data)
			if bc.recover() {
				glog.Infof("Recover successful")
			} else {
				glog.Fatalf("Recover failed. Please report")
			}
		}
	} else {
		bc.Reset()
	}
	bc.td = bc.currentBlock.Td
	bc.currentGasLimit = CalcGasLimit(bc.currentBlock)

	if glog.V(logger.Info) {
		glog.Infof("Last block (#%v) %x TD=%v\n", bc.currentBlock.Number(), bc.currentBlock.Hash(), bc.td)
	}

	return nil
}
// Tests that the node iterator indeed walks over the entire database contents.
func TestNodeIteratorCoverage(t *testing.T) {
	// Create some arbitrary test state to iterate
	db, root, _ := makeTestState()

	state, err := New(root, db)
	if err != nil {
		t.Fatalf("failed to create state trie at %x: %v", root, err)
	}
	// Gather all the node hashes found by the iterator
	hashes := make(map[common.Hash]struct{})
	for it := NewNodeIterator(state); it.Next(); {
		if it.Hash != (common.Hash{}) {
			hashes[it.Hash] = struct{}{}
		}
	}
	// Cross check the hashes and the database itself
	for hash, _ := range hashes {
		if _, err := db.Get(hash.Bytes()); err != nil {
			t.Errorf("failed to retrieve reported node %x: %v", hash, err)
		}
	}
	for _, key := range db.(*ethdb.MemDatabase).Keys() {
		if bytes.HasPrefix(key, []byte("secure-key-")) {
			continue
		}
		if _, ok := hashes[common.BytesToHash(key)]; !ok {
			t.Errorf("state entry not reported %x", key)
		}
	}
}
Example #13
0
// sets defaults on the config
func setDefaults(cfg *Config) {
	if cfg.RuleSet == nil {
		cfg.RuleSet = ruleSet{}
	}

	if cfg.Difficulty == nil {
		cfg.Difficulty = new(big.Int)
	}
	if cfg.Time == nil {
		cfg.Time = big.NewInt(time.Now().Unix())
	}
	if cfg.GasLimit == nil {
		cfg.GasLimit = new(big.Int).Set(common.MaxBig)
	}
	if cfg.GasPrice == nil {
		cfg.GasPrice = new(big.Int)
	}
	if cfg.Value == nil {
		cfg.Value = new(big.Int)
	}
	if cfg.BlockNumber == nil {
		cfg.BlockNumber = new(big.Int)
	}
	if cfg.GetHashFn == nil {
		cfg.GetHashFn = func(n uint64) common.Hash {
			return common.BytesToHash(crypto.Keccak256([]byte(new(big.Int).SetUint64(n).String())))
		}
	}
}
Example #14
0
// Tests block header storage and retrieval operations.
func TestHeaderStorage(t *testing.T) {
	db, _ := ethdb.NewMemDatabase()

	// Create a test header to move around the database and make sure it's really new
	header := &types.Header{Extra: []byte("test header")}
	if entry := GetHeader(db, header.Hash()); entry != nil {
		t.Fatalf("Non existent header returned: %v", entry)
	}
	// Write and verify the header in the database
	if err := WriteHeader(db, header); err != nil {
		t.Fatalf("Failed to write header into database: %v", err)
	}
	if entry := GetHeader(db, header.Hash()); entry == nil {
		t.Fatalf("Stored header not found")
	} else if entry.Hash() != header.Hash() {
		t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, header)
	}
	if entry := GetHeaderRLP(db, header.Hash()); entry == nil {
		t.Fatalf("Stored header RLP not found")
	} else {
		hasher := sha3.NewKeccak256()
		hasher.Write(entry)

		if hash := common.BytesToHash(hasher.Sum(nil)); hash != header.Hash() {
			t.Fatalf("Retrieved RLP header mismatch: have %v, want %v", entry, header)
		}
	}
	// Delete the header and verify the execution
	DeleteHeader(db, header.Hash())
	if entry := GetHeader(db, header.Hash()); entry != nil {
		t.Fatalf("Deleted header returned: %v", entry)
	}
}
Example #15
0
// Tests that a trie sync will not request nodes multiple times, even if they
// have such references.
func TestDuplicateAvoidanceTrieSync(t *testing.T) {
	// Create a random trie to copy
	srcDb, srcTrie, srcData := makeTestTrie()

	// Create a destination trie and sync with the scheduler
	dstDb, _ := ethdb.NewMemDatabase()
	sched := NewTrieSync(common.BytesToHash(srcTrie.Root()), dstDb, nil)

	queue := append([]common.Hash{}, sched.Missing(0)...)
	requested := make(map[common.Hash]struct{})

	for len(queue) > 0 {
		results := make([]SyncResult, len(queue))
		for i, hash := range queue {
			data, err := srcDb.Get(hash.Bytes())
			if err != nil {
				t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
			}
			if _, ok := requested[hash]; ok {
				t.Errorf("hash %x already requested once", hash)
			}
			requested[hash] = struct{}{}

			results[i] = SyncResult{hash, data}
		}
		if index, err := sched.Process(results); err != nil {
			t.Fatalf("failed to process result #%d: %v", index, err)
		}
		queue = append(queue[:0], sched.Missing(0)...)
	}
	// Cross check that the two tries re in sync
	checkTrieContents(t, dstDb, srcTrie.Root(), srcData)
}
Example #16
0
func (self *StateDB) GetCodeHash(addr common.Address) common.Hash {
	stateObject := self.GetStateObject(addr)
	if stateObject == nil {
		return common.Hash{}
	}
	return common.BytesToHash(stateObject.CodeHash())
}
Example #17
0
func testIterativeTrieSync(t *testing.T, batch int) {
	// Create a random trie to copy
	srcDb, srcTrie, srcData := makeTestTrie()

	// Create a destination trie and sync with the scheduler
	dstDb, _ := ethdb.NewMemDatabase()
	sched := NewTrieSync(common.BytesToHash(srcTrie.Root()), dstDb, nil)

	queue := append([]common.Hash{}, sched.Missing(batch)...)
	for len(queue) > 0 {
		results := make([]SyncResult, len(queue))
		for i, hash := range queue {
			data, err := srcDb.Get(hash.Bytes())
			if err != nil {
				t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
			}
			results[i] = SyncResult{hash, data}
		}
		if index, err := sched.Process(results); err != nil {
			t.Fatalf("failed to process result #%d: %v", index, err)
		}
		queue = append(queue[:0], sched.Missing(batch)...)
	}
	// Cross check that the two tries re in sync
	checkTrieContents(t, dstDb, srcTrie.Root(), srcData)
}
func mustConvertHash(in string) common.Hash {
	out, err := hex.DecodeString(strings.TrimPrefix(in, "0x"))
	if err != nil {
		panic(fmt.Errorf("invalid hex: %q", in))
	}
	return common.BytesToHash(out)
}
Example #19
0
func testIterativeRandomTrieSync(t *testing.T, batch int) {
	// Create a random trie to copy
	srcDb, srcTrie, srcData := makeTestTrie()

	// Create a destination trie and sync with the scheduler
	dstDb, _ := ethdb.NewMemDatabase()
	sched := NewTrieSync(common.BytesToHash(srcTrie.Root()), dstDb, nil)

	queue := make(map[common.Hash]struct{})
	for _, hash := range sched.Missing(batch) {
		queue[hash] = struct{}{}
	}
	for len(queue) > 0 {
		// Fetch all the queued nodes in a random order
		results := make([]SyncResult, 0, len(queue))
		for hash, _ := range queue {
			data, err := srcDb.Get(hash.Bytes())
			if err != nil {
				t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
			}
			results = append(results, SyncResult{hash, data})
		}
		// Feed the retrieved results back and queue new tasks
		if index, err := sched.Process(results); err != nil {
			t.Fatalf("failed to process result #%d: %v", index, err)
		}
		queue = make(map[common.Hash]struct{})
		for _, hash := range sched.Missing(batch) {
			queue[hash] = struct{}{}
		}
	}
	// Cross check that the two tries re in sync
	checkTrieContents(t, dstDb, srcTrie.Root(), srcData)
}
Example #20
0
// GetBlockByHash returns the canonical block by number or nil if not found
func GetBlockByNumber(db common.Database, number uint64) *types.Block {
	key, _ := db.Get(append(blockNumPre, big.NewInt(int64(number)).Bytes()...))
	if len(key) == 0 {
		return nil
	}

	return GetBlockByHash(db, common.BytesToHash(key))
}
Example #21
0
// CommitTo writes all nodes to the given database.
// Nodes are stored with their sha3 hash as the key.
//
// Committing flushes nodes from memory. Subsequent Get calls will
// load nodes from the trie's database. Calling code must ensure that
// the changes made to db are written back to the trie's attached
// database before using the trie.
func (t *Trie) CommitTo(db DatabaseWriter) (root common.Hash, err error) {
	n, err := t.hashRoot(db)
	if err != nil {
		return (common.Hash{}), err
	}
	t.root = n
	return common.BytesToHash(n.(hashNode)), nil
}
Example #22
0
// children retrieves all the missing children of a state trie entry for future
// retrieval scheduling.
func (s *TrieSync) children(req *request) ([]*request, error) {
	// Gather all the children of the node, irrelevant whether known or not
	type child struct {
		node  *node
		depth int
	}
	children := []child{}

	switch node := (*req.object).(type) {
	case shortNode:
		children = []child{{
			node:  &node.Val,
			depth: req.depth + len(node.Key),
		}}
	case fullNode:
		for i := 0; i < 17; i++ {
			if node[i] != nil {
				children = append(children, child{
					node:  &node[i],
					depth: req.depth + 1,
				})
			}
		}
	default:
		panic(fmt.Sprintf("unknown node: %+v", node))
	}
	// Iterate over the children, and request all unknown ones
	requests := make([]*request, 0, len(children))
	for _, child := range children {
		// Notify any external watcher of a new key/value node
		if req.callback != nil {
			if node, ok := (*child.node).(valueNode); ok {
				if err := req.callback(node, req.hash); err != nil {
					return nil, err
				}
			}
		}
		// If the child references another node, resolve or schedule
		if node, ok := (*child.node).(hashNode); ok {
			// Try to resolve the node from the local database
			blob, _ := s.database.Get(node)
			if local, err := decodeNode(blob); local != nil && err == nil {
				*child.node = local
				continue
			}
			// Locally unknown node, schedule for retrieval
			requests = append(requests, &request{
				object:   child.node,
				hash:     common.BytesToHash(node),
				parents:  []*request{req},
				depth:    child.depth,
				callback: req.callback,
			})
		}
	}
	return requests, nil
}
Example #23
0
func SaveInfo(info *ContractInfo, filename string) (contenthash common.Hash, err error) {
	infojson, err := json.Marshal(info)
	if err != nil {
		return
	}
	contenthash = common.BytesToHash(crypto.Keccak256(infojson))
	err = ioutil.WriteFile(filename, infojson, 0600)
	return
}
Example #24
0
// Id returns the canonical representation of the event's signature used by the
// abi definition to identify event names and types.
func (e Event) Id() common.Hash {
	types := make([]string, len(e.Inputs))
	i := 0
	for _, input := range e.Inputs {
		types[i] = input.Type.String()
		i++
	}
	return common.BytesToHash(crypto.Keccak256([]byte(fmt.Sprintf("%v(%v)", e.Name, strings.Join(types, ",")))))
}
// getAddr gets the storage value at the given address from the trie
func (c *StateObject) getAddr(ctx context.Context, addr common.Hash) (common.Hash, error) {
	var ret []byte
	val, err := c.trie.Get(ctx, addr[:])
	if err != nil {
		return common.Hash{}, err
	}
	rlp.DecodeBytes(val, &ret)
	return common.BytesToHash(ret), nil
}
Example #26
0
func (self *StateDB) GetCode(addr common.Address) []byte {
	stateObject := self.GetStateObject(addr)
	if stateObject != nil {
		code := stateObject.Code(self.db)
		key := common.BytesToHash(stateObject.CodeHash())
		self.codeSizeCache.Add(key, len(code))
		return code
	}
	return nil
}
Example #27
0
func DeriveSha(list DerivableList) common.Hash {
	db, _ := ethdb.NewMemDatabase()
	trie := trie.New(nil, db)
	for i := 0; i < list.Len(); i++ {
		key, _ := rlp.EncodeToBytes(uint(i))
		trie.Update(key, list.GetRlp(i))
	}

	return common.BytesToHash(trie.Root())
}
Example #28
0
// DeliverNodeData injects a node state data retrieval response into the queue.
// The method returns the number of node state entries originally requested, and
// the number of them actually accepted from the delivery.
func (q *queue) DeliverNodeData(id string, data [][]byte, callback func(error, int)) (int, error) {
	q.lock.Lock()
	defer q.lock.Unlock()

	// Short circuit if the data was never requested
	request := q.statePendPool[id]
	if request == nil {
		return 0, errNoFetchesPending
	}
	stateReqTimer.UpdateSince(request.Time)
	delete(q.statePendPool, id)

	// If no data was retrieved, mark their hashes as unavailable for the origin peer
	if len(data) == 0 {
		for hash, _ := range request.Hashes {
			request.Peer.MarkLacking(hash)
		}
	}
	// Iterate over the downloaded data and verify each of them
	accepted, errs := 0, make([]error, 0)
	process := []trie.SyncResult{}
	for _, blob := range data {
		// Skip any state trie entires that were not requested
		hash := common.BytesToHash(crypto.Sha3(blob))
		if _, ok := request.Hashes[hash]; !ok {
			errs = append(errs, fmt.Errorf("non-requested state data %x", hash))
			continue
		}
		// Inject the next state trie item into the processing queue
		process = append(process, trie.SyncResult{hash, blob})
		accepted++

		delete(request.Hashes, hash)
		delete(q.stateTaskPool, hash)
	}
	// Start the asynchronous node state data injection
	atomic.AddInt32(&q.stateProcessors, 1)
	go func() {
		defer atomic.AddInt32(&q.stateProcessors, -1)
		q.deliverNodeData(process, callback)
	}()
	// Return all failed or missing fetches to the queue
	for hash, index := range request.Hashes {
		q.stateTaskQueue.Push(hash, float32(index))
	}
	// If none of the data items were good, it's a stale delivery
	switch {
	case len(errs) == 0:
		return accepted, nil
	case len(errs) == len(request.Hashes):
		return accepted, errStaleDelivery
	default:
		return accepted, fmt.Errorf("multiple failures: %v", errs)
	}
}
Example #29
0
// Tests that an empty trie is not scheduled for syncing.
func TestEmptyTrieSync(t *testing.T) {
	emptyA, _ := New(common.Hash{}, nil)
	emptyB, _ := New(emptyRoot, nil)

	for i, trie := range []*Trie{emptyA, emptyB} {
		db, _ := ethdb.NewMemDatabase()
		if req := NewTrieSync(common.BytesToHash(trie.Root()), db, nil).Missing(1); len(req) != 0 {
			t.Errorf("test %d: content requested for empty trie: %v", i, req)
		}
	}
}
Example #30
0
func (s *StateSuite) TestSnapshot(c *checker.C) {
	stateobjaddr := toAddr([]byte("aa"))
	var storageaddr common.Hash
	data1 := common.BytesToHash([]byte{42})
	data2 := common.BytesToHash([]byte{43})

	// set inital state object value
	s.state.SetState(stateobjaddr, storageaddr, data1)
	// get snapshot of current state
	snapshot := s.state.Copy()

	// set new state object value
	s.state.SetState(stateobjaddr, storageaddr, data2)
	// restore snapshot
	s.state.Set(snapshot)

	// get state storage value
	res := s.state.GetState(stateobjaddr, storageaddr)

	c.Assert(data1, checker.DeepEquals, res)
}