// Tests that receipts can be stored and retrieved.
func TestReceiptStorage(t *testing.T) {
	db, _ := ethdb.NewMemDatabase()

	receipt1 := &types.Receipt{
		PostState:         []byte{0x01},
		CumulativeGasUsed: big.NewInt(1),
		Logs: vm.Logs{
			&vm.Log{Address: common.BytesToAddress([]byte{0x11})},
			&vm.Log{Address: common.BytesToAddress([]byte{0x01, 0x11})},
		},
		TxHash:          common.BytesToHash([]byte{0x11, 0x11}),
		ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}),
		GasUsed:         big.NewInt(111111),
	}
	receipt2 := &types.Receipt{
		PostState:         []byte{0x02},
		CumulativeGasUsed: big.NewInt(2),
		Logs: vm.Logs{
			&vm.Log{Address: common.BytesToAddress([]byte{0x22})},
			&vm.Log{Address: common.BytesToAddress([]byte{0x02, 0x22})},
		},
		TxHash:          common.BytesToHash([]byte{0x22, 0x22}),
		ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}),
		GasUsed:         big.NewInt(222222),
	}
	receipts := []*types.Receipt{receipt1, receipt2}

	// Check that no receipt entries are in a pristine database
	for i, receipt := range receipts {
		if r := GetReceipt(db, receipt.TxHash); r != nil {
			t.Fatalf("receipt #%d [%x]: non existent receipt returned: %v", i, receipt.TxHash, r)
		}
	}
	// Insert all the receipts into the database, and verify contents
	if err := WriteReceipts(db, receipts); err != nil {
		t.Fatalf("failed to write receipts: %v", err)
	}
	for i, receipt := range receipts {
		if r := GetReceipt(db, receipt.TxHash); r == nil {
			t.Fatalf("receipt #%d [%x]: receipt not found", i, receipt.TxHash)
		} else {
			rlpHave, _ := rlp.EncodeToBytes(r)
			rlpWant, _ := rlp.EncodeToBytes(receipt)

			if bytes.Compare(rlpHave, rlpWant) != 0 {
				t.Fatalf("receipt #%d [%x]: receipt mismatch: have %v, want %v", i, receipt.TxHash, r, receipt)
			}
		}
	}
	// Delete the receipts and check purge
	for i, receipt := range receipts {
		DeleteReceipt(db, receipt.TxHash)
		if r := GetReceipt(db, receipt.TxHash); r != nil {
			t.Fatalf("receipt #%d [%x]: deleted receipt returned: %v", i, receipt.TxHash, r)
		}
	}
}
Example #2
0
// upgradeChainDatabase ensures that the chain database stores block split into
// separate header and body entries.
func upgradeChainDatabase(db ethdb.Database) error {
	// Short circuit if the head block is stored already as separate header and body
	data, err := db.Get([]byte("LastBlock"))
	if err != nil {
		return nil
	}
	head := common.BytesToHash(data)

	if block := core.GetBlockByHashOld(db, head); block == nil {
		return nil
	}
	// At least some of the database is still the old format, upgrade (skip the head block!)
	glog.V(logger.Info).Info("Old database detected, upgrading...")

	if db, ok := db.(*ethdb.LDBDatabase); ok {
		blockPrefix := []byte("block-hash-")
		for it := db.NewIterator(); it.Next(); {
			// Skip anything other than a combined block
			if !bytes.HasPrefix(it.Key(), blockPrefix) {
				continue
			}
			// Skip the head block (merge last to signal upgrade completion)
			if bytes.HasSuffix(it.Key(), head.Bytes()) {
				continue
			}
			// Load the block, split and serialize (order!)
			block := core.GetBlockByHashOld(db, common.BytesToHash(bytes.TrimPrefix(it.Key(), blockPrefix)))

			if err := core.WriteTd(db, block.Hash(), block.DeprecatedTd()); err != nil {
				return err
			}
			if err := core.WriteBody(db, block.Hash(), block.Body()); err != nil {
				return err
			}
			if err := core.WriteHeader(db, block.Header()); err != nil {
				return err
			}
			if err := db.Delete(it.Key()); err != nil {
				return err
			}
		}
		// Lastly, upgrade the head block, disabling the upgrade mechanism
		current := core.GetBlockByHashOld(db, head)

		if err := core.WriteTd(db, current.Hash(), current.DeprecatedTd()); err != nil {
			return err
		}
		if err := core.WriteBody(db, current.Hash(), current.Body()); err != nil {
			return err
		}
		if err := core.WriteHeader(db, current.Header()); err != nil {
			return err
		}
	}
	return nil
}
func TestNatspecE2E(t *testing.T) {
	t.Skip()

	tf := testInit(t)
	defer tf.ethereum.Stop()

	resolver.CreateContracts(tf.xeth, testAccount)
	t.Logf("URLHint contract registered at %v", resolver.URLHintContractAddress)
	t.Logf("HashReg contract registered at %v", resolver.HashRegContractAddress)
	tf.applyTxs()

	ioutil.WriteFile("/tmp/"+testFileName, []byte(testDocs), os.ModePerm)
	dochash := common.BytesToHash(crypto.Sha3([]byte(testDocs)))

	codehex := tf.xeth.CodeAt(resolver.HashRegContractAddress)
	codehash := common.BytesToHash(crypto.Sha3(common.Hex2Bytes(codehex[2:])))

	tf.setOwner()
	tf.registerNatSpec(codehash, dochash)
	tf.registerURL(dochash, "file:///"+testFileName)
	tf.applyTxs()

	chash, err := tf.testResolver().KeyToContentHash(codehash)
	if err != nil {
		t.Errorf("Can't find content hash")
	}
	t.Logf("chash = %x  err = %v", chash, err)
	url, err2 := tf.testResolver().ContentHashToUrl(dochash)
	if err2 != nil {
		t.Errorf("Can't find URL hint")
	}
	t.Logf("url = %v  err = %v", url, err2)

	// NatSpec info for register method of HashReg contract installed
	// now using the same transactions to check confirm messages

	tf.makeNatSpec = true
	tf.registerNatSpec(codehash, dochash)
	t.Logf("Confirm message: %v\n", tf.lastConfirm)
	if tf.lastConfirm != testExpNotice {
		t.Errorf("Wrong confirm message, expected '%v', got '%v'", testExpNotice, tf.lastConfirm)
	}

	tf.setOwner()
	t.Logf("Confirm message for unknown method: %v\n", tf.lastConfirm)
	if tf.lastConfirm != testExpNotice2 {
		t.Errorf("Wrong confirm message, expected '%v', got '%v'", testExpNotice2, tf.lastConfirm)
	}

	tf.registerURL(dochash, "file:///test.content")
	t.Logf("Confirm message for unknown contract: %v\n", tf.lastConfirm)
	if tf.lastConfirm != testExpNotice3 {
		t.Errorf("Wrong confirm message, expected '%v', got '%v'", testExpNotice3, tf.lastConfirm)
	}

}
// Tests that receipts associated with a single block can be stored and retrieved.
func TestBlockReceiptStorage(t *testing.T) {
	db, _ := ethdb.NewMemDatabase()

	receipt1 := &types.Receipt{
		PostState:         []byte{0x01},
		CumulativeGasUsed: big.NewInt(1),
		Logs: vm.Logs{
			&vm.Log{Address: common.BytesToAddress([]byte{0x11})},
			&vm.Log{Address: common.BytesToAddress([]byte{0x01, 0x11})},
		},
		TxHash:          common.BytesToHash([]byte{0x11, 0x11}),
		ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}),
		GasUsed:         big.NewInt(111111),
	}
	receipt2 := &types.Receipt{
		PostState:         []byte{0x02},
		CumulativeGasUsed: big.NewInt(2),
		Logs: vm.Logs{
			&vm.Log{Address: common.BytesToAddress([]byte{0x22})},
			&vm.Log{Address: common.BytesToAddress([]byte{0x02, 0x22})},
		},
		TxHash:          common.BytesToHash([]byte{0x22, 0x22}),
		ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}),
		GasUsed:         big.NewInt(222222),
	}
	receipts := []*types.Receipt{receipt1, receipt2}

	// Check that no receipt entries are in a pristine database
	hash := common.BytesToHash([]byte{0x03, 0x14})
	if rs := GetBlockReceipts(db, hash); len(rs) != 0 {
		t.Fatalf("non existent receipts returned: %v", rs)
	}
	// Insert the receipt slice into the database and check presence
	if err := WriteBlockReceipts(db, hash, receipts); err != nil {
		t.Fatalf("failed to write block receipts: %v", err)
	}
	if rs := GetBlockReceipts(db, hash); len(rs) == 0 {
		t.Fatalf("no receipts returned")
	} else {
		for i := 0; i < len(receipts); i++ {
			rlpHave, _ := rlp.EncodeToBytes(rs[i])
			rlpWant, _ := rlp.EncodeToBytes(receipts[i])

			if bytes.Compare(rlpHave, rlpWant) != 0 {
				t.Fatalf("receipt #%d: receipt mismatch: have %v, want %v", i, rs[i], receipts[i])
			}
		}
	}
	// Delete the receipt slice and check purge
	DeleteBlockReceipts(db, hash)
	if rs := GetBlockReceipts(db, hash); len(rs) != 0 {
		t.Fatalf("deleted receipts returned: %v", rs)
	}
}
Example #5
0
// use testing instead of checker because checker does not support
// printing/logging in tests (-check.vv does not work)
func TestSnapshot2(t *testing.T) {
	db, _ := ethdb.NewMemDatabase()
	state, _ := New(common.Hash{}, db)

	stateobjaddr0 := toAddr([]byte("so0"))
	stateobjaddr1 := toAddr([]byte("so1"))
	var storageaddr common.Hash

	data0 := common.BytesToHash([]byte{17})
	data1 := common.BytesToHash([]byte{18})

	state.SetState(stateobjaddr0, storageaddr, data0)
	state.SetState(stateobjaddr1, storageaddr, data1)

	// db, trie are already non-empty values
	so0 := state.GetStateObject(stateobjaddr0)
	so0.balance = big.NewInt(42)
	so0.nonce = 43
	so0.code = []byte{'c', 'a', 'f', 'e'}
	so0.codeHash = so0.CodeHash()
	so0.remove = true
	so0.deleted = false
	so0.dirty = false
	state.SetStateObject(so0)

	// and one with deleted == true
	so1 := state.GetStateObject(stateobjaddr1)
	so1.balance = big.NewInt(52)
	so1.nonce = 53
	so1.code = []byte{'c', 'a', 'f', 'e', '2'}
	so1.codeHash = so1.CodeHash()
	so1.remove = true
	so1.deleted = true
	so1.dirty = true
	state.SetStateObject(so1)

	so1 = state.GetStateObject(stateobjaddr1)
	if so1 != nil {
		t.Fatalf("deleted object not nil when getting")
	}

	snapshot := state.Copy()
	state.Set(snapshot)

	so0Restored := state.GetStateObject(stateobjaddr0)
	so1Restored := state.GetStateObject(stateobjaddr1)
	// non-deleted is equal (restored)
	compareStateObjects(so0Restored, so0, t)
	// deleted should be nil, both before and after restore of state copy
	if so1Restored != nil {
		t.Fatalf("deleted object not nil after restoring snapshot")
	}
}
Example #6
0
func (self *StateObject) ForEachStorage(cb func(key, value common.Hash) bool) {
	// When iterating over the storage check the cache first
	for h, value := range self.storage {
		cb(h, value)
	}

	it := self.trie.Iterator()
	for it.Next() {
		// ignore cached values
		key := common.BytesToHash(self.trie.GetKey(it.Key))
		if _, ok := self.storage[key]; !ok {
			cb(key, common.BytesToHash(it.Value))
		}
	}
}
Example #7
0
// Tests that the node iterator indeed walks over the entire database contents.
func TestNodeIteratorCoverage(t *testing.T) {
	// Create some arbitrary test state to iterate
	db, root, _ := makeTestState()

	state, err := New(root, db)
	if err != nil {
		t.Fatalf("failed to create state trie at %x: %v", root, err)
	}
	// Gather all the node hashes found by the iterator
	hashes := make(map[common.Hash]struct{})
	for it := NewNodeIterator(state); it.Next(); {
		if it.Hash != (common.Hash{}) {
			hashes[it.Hash] = struct{}{}
		}
	}
	// Cross check the hashes and the database itself
	for hash, _ := range hashes {
		if _, err := db.Get(hash.Bytes()); err != nil {
			t.Errorf("failed to retrieve reported node %x: %v", hash, err)
		}
	}
	for _, key := range db.(*ethdb.MemDatabase).Keys() {
		if bytes.HasPrefix(key, []byte("secure-key-")) {
			continue
		}
		if _, ok := hashes[common.BytesToHash(key)]; !ok {
			t.Errorf("state entry not reported %x", key)
		}
	}
}
Example #8
0
// GetHeadFastBlockHash retrieves the hash of the current canonical head block during
// fast synchronization. The difference between this and GetHeadBlockHash is that
// whereas the last block hash is only updated upon a full block import, the last
// fast hash is updated when importing pre-processed blocks.
func GetHeadFastBlockHash(db ethdb.Database) common.Hash {
	data, _ := db.Get(headFastKey)
	if len(data) == 0 {
		return common.Hash{}
	}
	return common.BytesToHash(data)
}
Example #9
0
// SubscriptionId returns the canonical channel name for transactor and beneficiary
func (c *Contract) SubscriptionId(from common.Address, serviceId *big.Int) common.Hash {
	var id []byte
	if err := c.Call(&id, "makeSubscriptionId", from, serviceId); err != nil {
		log15.Warn("makeSubscriptionId", "error", err)
	}
	return common.BytesToHash(id)
}
Example #10
0
func TestEthashConcurrentVerify(t *testing.T) {
	eth, err := NewForTesting()
	if err != nil {
		t.Fatal(err)
	}
	defer os.RemoveAll(eth.Full.Dir)

	block := &testBlock{difficulty: big.NewInt(10)}
	nonce, md := eth.Search(block, nil)
	block.nonce = nonce
	block.mixDigest = common.BytesToHash(md)

	// Verify the block concurrently to check for data races.
	var wg sync.WaitGroup
	wg.Add(100)
	for i := 0; i < 100; i++ {
		go func() {
			if !eth.Verify(block) {
				t.Error("Block could not be verified")
			}
			wg.Done()
		}()
	}
	wg.Wait()
}
Example #11
0
func testIterativeTrieSync(t *testing.T, batch int) {
	// Create a random trie to copy
	srcDb, srcTrie, srcData := makeTestTrie()

	// Create a destination trie and sync with the scheduler
	dstDb, _ := ethdb.NewMemDatabase()
	sched := NewTrieSync(common.BytesToHash(srcTrie.Root()), dstDb, nil)

	queue := append([]common.Hash{}, sched.Missing(batch)...)
	for len(queue) > 0 {
		results := make([]SyncResult, len(queue))
		for i, hash := range queue {
			data, err := srcDb.Get(hash.Bytes())
			if err != nil {
				t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
			}
			results[i] = SyncResult{hash, data}
		}
		if index, err := sched.Process(results); err != nil {
			t.Fatalf("failed to process result #%d: %v", index, err)
		}
		queue = append(queue[:0], sched.Missing(batch)...)
	}
	// Cross check that the two tries re in sync
	checkTrieContents(t, dstDb, srcTrie.Root(), srcData)
}
func mustConvertHash(in string) common.Hash {
	out, err := hex.DecodeString(strings.TrimPrefix(in, "0x"))
	if err != nil {
		panic(fmt.Errorf("invalid hex: %q", in))
	}
	return common.BytesToHash(out)
}
Example #13
0
func testIterativeRandomTrieSync(t *testing.T, batch int) {
	// Create a random trie to copy
	srcDb, srcTrie, srcData := makeTestTrie()

	// Create a destination trie and sync with the scheduler
	dstDb, _ := ethdb.NewMemDatabase()
	sched := NewTrieSync(common.BytesToHash(srcTrie.Root()), dstDb, nil)

	queue := make(map[common.Hash]struct{})
	for _, hash := range sched.Missing(batch) {
		queue[hash] = struct{}{}
	}
	for len(queue) > 0 {
		// Fetch all the queued nodes in a random order
		results := make([]SyncResult, 0, len(queue))
		for hash, _ := range queue {
			data, err := srcDb.Get(hash.Bytes())
			if err != nil {
				t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
			}
			results = append(results, SyncResult{hash, data})
		}
		// Feed the retrieved results back and queue new tasks
		if index, err := sched.Process(results); err != nil {
			t.Fatalf("failed to process result #%d: %v", index, err)
		}
		queue = make(map[common.Hash]struct{})
		for _, hash := range sched.Missing(batch) {
			queue[hash] = struct{}{}
		}
	}
	// Cross check that the two tries re in sync
	checkTrieContents(t, dstDb, srcTrie.Root(), srcData)
}
Example #14
0
// GetCanonicalHash retrieves a hash assigned to a canonical block number.
func GetCanonicalHash(db ethdb.Database, number uint64) common.Hash {
	data, _ := db.Get(append(blockNumPrefix, big.NewInt(int64(number)).Bytes()...))
	if len(data) == 0 {
		return common.Hash{}
	}
	return common.BytesToHash(data)
}
Example #15
0
// sets defaults on the config
func setDefaults(cfg *Config) {
	if cfg.Difficulty == nil {
		cfg.Difficulty = new(big.Int)
	}
	if cfg.Time == nil {
		cfg.Time = big.NewInt(time.Now().Unix())
	}
	if cfg.GasLimit == nil {
		cfg.GasLimit = new(big.Int).Set(common.MaxBig)
	}
	if cfg.GasPrice == nil {
		cfg.GasPrice = new(big.Int)
	}
	if cfg.Value == nil {
		cfg.Value = new(big.Int)
	}
	if cfg.BlockNumber == nil {
		cfg.BlockNumber = new(big.Int)
	}
	if cfg.GetHashFn == nil {
		cfg.GetHashFn = func(n uint64) common.Hash {
			return common.BytesToHash(crypto.Keccak256([]byte(new(big.Int).SetUint64(n).String())))
		}
	}
}
Example #16
0
// also called by admin.contractInfo.get
func FetchDocsForContract(contractAddress string, xeth *xeth.XEth, http *docserver.DocServer) (content []byte, err error) {
	// retrieve contract hash from state
	codehex := xeth.CodeAt(contractAddress)
	codeb := xeth.CodeAtBytes(contractAddress)

	if codehex == "0x" {
		err = fmt.Errorf("contract (%v) not found", contractAddress)
		return
	}
	codehash := common.BytesToHash(crypto.Sha3(codeb))
	// set up nameresolver with natspecreg + urlhint contract addresses
	res := resolver.New(xeth)

	// resolve host via HashReg/UrlHint Resolver
	uri, hash, err := res.KeyToUrl(codehash)
	if err != nil {
		return
	}

	// get content via http client and authenticate content using hash
	content, err = http.GetAuthContent(uri, hash)
	if err != nil {
		return
	}

	return
}
// Tests block header storage and retrieval operations.
func TestHeaderStorage(t *testing.T) {
	db, _ := ethdb.NewMemDatabase()

	// Create a test header to move around the database and make sure it's really new
	header := &types.Header{Extra: []byte("test header")}
	if entry := GetHeader(db, header.Hash()); entry != nil {
		t.Fatalf("Non existent header returned: %v", entry)
	}
	// Write and verify the header in the database
	if err := WriteHeader(db, header); err != nil {
		t.Fatalf("Failed to write header into database: %v", err)
	}
	if entry := GetHeader(db, header.Hash()); entry == nil {
		t.Fatalf("Stored header not found")
	} else if entry.Hash() != header.Hash() {
		t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, header)
	}
	if entry := GetHeaderRLP(db, header.Hash()); entry == nil {
		t.Fatalf("Stored header RLP not found")
	} else {
		hasher := sha3.NewKeccak256()
		hasher.Write(entry)

		if hash := common.BytesToHash(hasher.Sum(nil)); hash != header.Hash() {
			t.Fatalf("Retrieved RLP header mismatch: have %v, want %v", entry, header)
		}
	}
	// Delete the header and verify the execution
	DeleteHeader(db, header.Hash())
	if entry := GetHeader(db, header.Hash()); entry != nil {
		t.Fatalf("Deleted header returned: %v", entry)
	}
}
Example #18
0
func (bc *ChainManager) setLastState() error {
	data, _ := bc.chainDb.Get([]byte("LastBlock"))
	if len(data) != 0 {
		block := bc.GetBlock(common.BytesToHash(data))
		if block != nil {
			bc.currentBlock = block
			bc.lastBlockHash = block.Hash()
		} else {
			glog.Infof("LastBlock (%x) not found. Recovering...\n", data)
			if bc.recover() {
				glog.Infof("Recover successful")
			} else {
				glog.Fatalf("Recover failed. Please report")
			}
		}
	} else {
		bc.Reset()
	}
	bc.td = bc.currentBlock.Td
	bc.currentGasLimit = CalcGasLimit(bc.currentBlock)

	if glog.V(logger.Info) {
		glog.Infof("Last block (#%v) %x TD=%v\n", bc.currentBlock.Number(), bc.currentBlock.Hash(), bc.td)
	}

	return nil
}
Example #19
0
// Tests that a trie sync will not request nodes multiple times, even if they
// have such references.
func TestDuplicateAvoidanceTrieSync(t *testing.T) {
	// Create a random trie to copy
	srcDb, srcTrie, srcData := makeTestTrie()

	// Create a destination trie and sync with the scheduler
	dstDb, _ := ethdb.NewMemDatabase()
	sched := NewTrieSync(common.BytesToHash(srcTrie.Root()), dstDb, nil)

	queue := append([]common.Hash{}, sched.Missing(0)...)
	requested := make(map[common.Hash]struct{})

	for len(queue) > 0 {
		results := make([]SyncResult, len(queue))
		for i, hash := range queue {
			data, err := srcDb.Get(hash.Bytes())
			if err != nil {
				t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
			}
			if _, ok := requested[hash]; ok {
				t.Errorf("hash %x already requested once", hash)
			}
			requested[hash] = struct{}{}

			results[i] = SyncResult{hash, data}
		}
		if index, err := sched.Process(results); err != nil {
			t.Fatalf("failed to process result #%d: %v", index, err)
		}
		queue = append(queue[:0], sched.Missing(0)...)
	}
	// Cross check that the two tries re in sync
	checkTrieContents(t, dstDb, srcTrie.Root(), srcData)
}
Example #20
0
// CommitTo writes all nodes to the given database.
// Nodes are stored with their sha3 hash as the key.
//
// Committing flushes nodes from memory. Subsequent Get calls will
// load nodes from the trie's database. Calling code must ensure that
// the changes made to db are written back to the trie's attached
// database before using the trie.
func (t *Trie) CommitTo(db DatabaseWriter) (root common.Hash, err error) {
	n, err := t.hashRoot(db)
	if err != nil {
		return (common.Hash{}), err
	}
	t.root = n
	return common.BytesToHash(n.(hashNode)), nil
}
Example #21
0
// GetBlockByHash returns the canonical block by number or nil if not found
func GetBlockByNumber(db common.Database, number uint64) *types.Block {
	key, _ := db.Get(append(blockNumPre, big.NewInt(int64(number)).Bytes()...))
	if len(key) == 0 {
		return nil
	}

	return GetBlockByHash(db, common.BytesToHash(key))
}
Example #22
0
// non blocking version
func (self *ChainManager) getBlockByNumber(num uint64) *types.Block {
	key, _ := self.blockDb.Get(append(blockNumPre, big.NewInt(int64(num)).Bytes()...))
	if len(key) == 0 {
		return nil
	}

	return self.GetBlock(common.BytesToHash(key))
}
Example #23
0
// checkTrieContents cross references a reconstructed trie with an expected data
// content map.
func checkTrieContents(t *testing.T, db Database, root []byte, content map[string][]byte) {
	// Remove any potentially cached data from the trie synchronisation
	globalCache.Clear()

	// Check root availability and trie contents
	trie, err := New(common.BytesToHash(root), db)
	if err != nil {
		t.Fatalf("failed to create trie at %x: %v", root, err)
	}
	if err := checkTrieConsistency(db, common.BytesToHash(root)); err != nil {
		t.Fatalf("inconsistent trie at %x: %v", root, err)
	}
	for key, val := range content {
		if have := trie.Get([]byte(key)); bytes.Compare(have, val) != 0 {
			t.Errorf("entry %x: content mismatch: have %x, want %x", key, have, val)
		}
	}
}
Example #24
0
// Id returns the canonical representation of the event's signature used by the
// abi definition to identify event names and types.
func (e Event) Id() common.Hash {
	types := make([]string, len(e.Inputs))
	i := 0
	for _, input := range e.Inputs {
		types[i] = input.Type.String()
		i++
	}
	return common.BytesToHash(crypto.Keccak256([]byte(fmt.Sprintf("%v(%v)", e.Name, strings.Join(types, ",")))))
}
Example #25
0
func ExtractInfo(contract *Contract, filename string) (contenthash common.Hash, err error) {
	contractInfo, err := json.Marshal(contract.Info)
	if err != nil {
		return
	}
	contenthash = common.BytesToHash(crypto.Sha3(contractInfo))
	err = ioutil.WriteFile(filename, contractInfo, 0600)
	return
}
Example #26
0
// children retrieves all the missing children of a state trie entry for future
// retrieval scheduling.
func (s *TrieSync) children(req *request) ([]*request, error) {
	// Gather all the children of the node, irrelevant whether known or not
	type child struct {
		node  *node
		depth int
	}
	children := []child{}

	switch node := (*req.object).(type) {
	case shortNode:
		children = []child{{
			node:  &node.Val,
			depth: req.depth + len(node.Key),
		}}
	case fullNode:
		for i := 0; i < 17; i++ {
			if node[i] != nil {
				children = append(children, child{
					node:  &node[i],
					depth: req.depth + 1,
				})
			}
		}
	default:
		panic(fmt.Sprintf("unknown node: %+v", node))
	}
	// Iterate over the children, and request all unknown ones
	requests := make([]*request, 0, len(children))
	for _, child := range children {
		// Notify any external watcher of a new key/value node
		if req.callback != nil {
			if node, ok := (*child.node).(valueNode); ok {
				if err := req.callback(node, req.hash); err != nil {
					return nil, err
				}
			}
		}
		// If the child references another node, resolve or schedule
		if node, ok := (*child.node).(hashNode); ok {
			// Try to resolve the node from the local database
			blob, _ := s.database.Get(node)
			if local, err := decodeNode(blob); local != nil && err == nil {
				*child.node = local
				continue
			}
			// Locally unknown node, schedule for retrieval
			requests = append(requests, &request{
				object:   child.node,
				hash:     common.BytesToHash(node),
				parents:  []*request{req},
				depth:    child.depth,
				callback: req.callback,
			})
		}
	}
	return requests, nil
}
Example #27
0
// getAddr gets the storage value at the given address from the trie
func (c *StateObject) getAddr(ctx context.Context, addr common.Hash) (common.Hash, error) {
	var ret []byte
	val, err := c.trie.Get(ctx, addr[:])
	if err != nil {
		return common.Hash{}, err
	}
	rlp.DecodeBytes(val, &ret)
	return common.BytesToHash(ret), nil
}
Example #28
0
func New(xeth *xeth.XEth, tx string, http *docserver.DocServer) (self *NatSpec, err error) {

	// extract contract address from tx

	var obj map[string]json.RawMessage
	err = json.Unmarshal([]byte(tx), &obj)
	if err != nil {
		return
	}
	var tmp []map[string]string
	err = json.Unmarshal(obj["params"], &tmp)
	if err != nil {
		return
	}
	contractAddress := tmp[0]["to"]

	// retrieve contract hash from state
	if !xeth.IsContract(contractAddress) {
		err = fmt.Errorf("NatSpec error: contract not found")
		return
	}
	codehex := xeth.CodeAt(contractAddress)
	codeHash := common.BytesToHash(crypto.Sha3(common.Hex2Bytes(codehex[2:])))
	// parse out host/domain

	// set up nameresolver with natspecreg + urlhint contract addresses
	res := resolver.New(
		xeth,
		resolver.URLHintContractAddress,
		resolver.HashRegContractAddress,
	)

	// resolve host via HashReg/UrlHint Resolver
	uri, hash, err := res.KeyToUrl(codeHash)
	if err != nil {
		return
	}

	// get content via http client and authenticate content using hash
	content, err := http.GetAuthContent(uri, hash)
	if err != nil {
		return
	}

	// get abi, userdoc
	var obj2 map[string]json.RawMessage
	err = json.Unmarshal(content, &obj2)
	if err != nil {
		return
	}

	abi := []byte(obj2["abi"])
	userdoc := []byte(obj2["userdoc"])

	self, err = NewWithDocs(abi, userdoc, tx)
	return
}
Example #29
0
func SaveInfo(info *ContractInfo, filename string) (contenthash common.Hash, err error) {
	infojson, err := json.Marshal(info)
	if err != nil {
		return
	}
	contenthash = common.BytesToHash(crypto.Keccak256(infojson))
	err = ioutil.WriteFile(filename, infojson, 0600)
	return
}
Example #30
0
// DeliverNodeData injects a node state data retrieval response into the queue.
func (q *queue) DeliverNodeData(id string, data [][]byte, callback func(error, int)) error {
	q.lock.Lock()
	defer q.lock.Unlock()

	// Short circuit if the data was never requested
	request := q.statePendPool[id]
	if request == nil {
		return errNoFetchesPending
	}
	stateReqTimer.UpdateSince(request.Time)
	delete(q.statePendPool, id)

	// If no data was retrieved, mark their hashes as unavailable for the origin peer
	if len(data) == 0 {
		for hash, _ := range request.Hashes {
			request.Peer.ignored.Add(hash)
		}
	}
	// Iterate over the downloaded data and verify each of them
	errs := make([]error, 0)
	process := []trie.SyncResult{}
	for _, blob := range data {
		// Skip any blocks that were not requested
		hash := common.BytesToHash(crypto.Sha3(blob))
		if _, ok := request.Hashes[hash]; !ok {
			errs = append(errs, fmt.Errorf("non-requested state data %x", hash))
			continue
		}
		// Inject the next state trie item into the processing queue
		process = append(process, trie.SyncResult{hash, blob})

		delete(request.Hashes, hash)
		delete(q.stateTaskPool, hash)
	}
	// Start the asynchronous node state data injection
	atomic.AddInt32(&q.stateProcessors, 1)
	go func() {
		defer atomic.AddInt32(&q.stateProcessors, -1)
		q.deliverNodeData(process, callback)
	}()
	// Return all failed or missing fetches to the queue
	for hash, index := range request.Hashes {
		q.stateTaskQueue.Push(hash, float32(index))
	}
	// If none of the data items were good, it's a stale delivery
	switch {
	case len(errs) == 0:
		return nil

	case len(errs) == len(request.Hashes):
		return errStaleDelivery

	default:
		return fmt.Errorf("multiple failures: %v", errs)
	}
}