func (bc *ChainManager) setLastState() error {
	head := GetHeadBlockHash(bc.chainDb)
	if head != (common.Hash{}) {
		block := bc.GetBlock(head)
		if block != nil {
			bc.currentBlock = block
		} else {
			glog.Infof("LastBlock (%x) not found. Recovering...\n", head)
			if bc.recover() {
				glog.Infof("Recover successful")
			} else {
				glog.Fatalf("Recover failed. Please report")
			}
		}
	} else {
		bc.Reset()
	}
	bc.td = bc.GetTd(bc.currentBlock.Hash())
	bc.currentGasLimit = CalcGasLimit(bc.currentBlock)

	if glog.V(logger.Info) {
		glog.Infof("Last block (#%v) %x TD=%v\n", bc.currentBlock.Number(), bc.currentBlock.Hash(), bc.td)
	}

	return nil
}
Exemple #2
0
func (bc *ChainManager) setLastState() error {
	data, _ := bc.chainDb.Get([]byte("LastBlock"))
	if len(data) != 0 {
		block := bc.GetBlock(common.BytesToHash(data))
		if block != nil {
			bc.currentBlock = block
			bc.lastBlockHash = block.Hash()
		} else {
			glog.Infof("LastBlock (%x) not found. Recovering...\n", data)
			if bc.recover() {
				glog.Infof("Recover successful")
			} else {
				glog.Fatalf("Recover failed. Please report")
			}
		}
	} else {
		bc.Reset()
	}
	bc.td = bc.currentBlock.Td
	bc.currentGasLimit = CalcGasLimit(bc.currentBlock)

	if glog.V(logger.Info) {
		glog.Infof("Last block (#%v) %x TD=%v\n", bc.currentBlock.Number(), bc.currentBlock.Hash(), bc.td)
	}

	return nil
}
Exemple #3
0
func runProgram(program *Program, pcstart uint64, mem *Memory, stack *stack, env Environment, contract *Contract, input []byte) ([]byte, error) {
	contract.Input = input

	var (
		pc         uint64 = program.mapping[pcstart]
		instrCount        = 0
	)

	if glog.V(logger.Debug) {
		glog.Infof("running JIT program %x\n", program.Id[:4])
		tstart := time.Now()
		defer func() {
			glog.Infof("JIT program %x done. time: %v instrc: %v\n", program.Id[:4], time.Since(tstart), instrCount)
		}()
	}

	for pc < uint64(len(program.instructions)) {
		instrCount++

		instr := program.instructions[pc]

		ret, err := instr.do(program, &pc, env, contract, mem, stack)
		if err != nil {
			return nil, err
		}

		if instr.halts() {
			return contract.Return(ret), nil
		}
	}

	contract.Input = nil

	return contract.Return(nil), nil
}
// validatePool removes invalid and processed transactions from the main pool.
// If a transaction is removed for being invalid (e.g. out of funds), all sub-
// sequent (Still valid) transactions are moved back into the future queue. This
// is important to prevent a drained account from DOSing the network with non
// executable transactions.
func (pool *TxPool) validatePool() {
	state, err := pool.currentState()
	if err != nil {
		glog.V(logger.Info).Infoln("failed to get current state: %v", err)
		return
	}
	balanceCache := make(map[common.Address]*big.Int)

	// Clean up the pending pool, accumulating invalid nonces
	gaps := make(map[common.Address]uint64)

	for hash, tx := range pool.pending {
		sender, _ := tx.From() // err already checked

		// Perform light nonce and balance validation
		balance := balanceCache[sender]
		if balance == nil {
			balance = state.GetBalance(sender)
			balanceCache[sender] = balance
		}
		if past := state.GetNonce(sender) > tx.Nonce(); past || balance.Cmp(tx.Cost()) < 0 {
			// Remove an already past it invalidated transaction
			if glog.V(logger.Core) {
				glog.Infof("removed tx (%v) from pool: low tx nonce or out of funds\n", tx)
			}
			delete(pool.pending, hash)

			// Track the smallest invalid nonce to postpone subsequent transactions
			if !past {
				if prev, ok := gaps[sender]; !ok || tx.Nonce() < prev {
					gaps[sender] = tx.Nonce()
				}
			}
		}
	}
	// Move all transactions after a gap back to the future queue
	if len(gaps) > 0 {
		for hash, tx := range pool.pending {
			sender, _ := tx.From()
			if gap, ok := gaps[sender]; ok && tx.Nonce() >= gap {
				if glog.V(logger.Core) {
					glog.Infof("postponed tx (%v) due to introduced gap\n", tx)
				}
				pool.queueTx(hash, tx)
				delete(pool.pending, hash)
			}
		}
	}
}
Exemple #5
0
func blockRecovery(ctx *cli.Context) {
	utils.CheckLegalese(ctx.GlobalString(utils.DataDirFlag.Name))

	arg := ctx.Args().First()
	if len(ctx.Args()) < 1 && len(arg) > 0 {
		glog.Fatal("recover requires block number or hash")
	}

	cfg := utils.MakeEthConfig(ClientIdentifier, nodeNameVersion, ctx)
	utils.CheckLegalese(cfg.DataDir)

	blockDb, err := ethdb.NewLDBDatabase(filepath.Join(cfg.DataDir, "blockchain"), cfg.DatabaseCache)
	if err != nil {
		glog.Fatalln("could not open db:", err)
	}

	var block *types.Block
	if arg[0] == '#' {
		block = core.GetBlockByNumber(blockDb, common.String2Big(arg[1:]).Uint64())
	} else {
		block = core.GetBlockByHash(blockDb, common.HexToHash(arg))
	}

	if block == nil {
		glog.Fatalln("block not found. Recovery failed")
	}

	err = core.WriteHead(blockDb, block)
	if err != nil {
		glog.Fatalln("block write err", err)
	}
	glog.Infof("Recovery succesful. New HEAD %x\n", block.Hash())
}
// validate and queue transactions.
func (self *TxPool) add(tx *types.Transaction) error {
	hash := tx.Hash()

	if self.pending[hash] != nil {
		return fmt.Errorf("Known transaction (%x)", hash[:4])
	}
	err := self.validateTx(tx)
	if err != nil {
		return err
	}
	self.queueTx(hash, tx)

	if glog.V(logger.Debug) {
		var toname string
		if to := tx.To(); to != nil {
			toname = common.Bytes2Hex(to[:4])
		} else {
			toname = "[NEW_CONTRACT]"
		}
		// we can ignore the error here because From is
		// verified in ValidateTransaction.
		f, _ := tx.From()
		from := common.Bytes2Hex(f[:4])
		glog.Infof("(t) %x => %s (%v) %x\n", from, toname, tx.Value, hash)
	}

	return nil
}
Exemple #7
0
// enqueue schedules a new future import operation, if the block to be imported
// has not yet been seen.
func (f *Fetcher) enqueue(peer string, block *types.Block) {
	hash := block.Hash()

	// Ensure the peer isn't DOSing us
	count := f.queues[peer] + 1
	if count > blockLimit {
		glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x], exceeded allowance (%d)", peer, block.NumberU64(), hash.Bytes()[:4], blockLimit)
		return
	}
	// Discard any past or too distant blocks
	if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
		glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x], distance %d", peer, block.NumberU64(), hash.Bytes()[:4], dist)
		return
	}
	// Schedule the block for future importing
	if _, ok := f.queued[hash]; !ok {
		op := &inject{
			origin: peer,
			block:  block,
		}
		f.queues[peer] = count
		f.queued[hash] = op
		f.queue.Push(op, -float32(block.NumberU64()))

		if glog.V(logger.Debug) {
			glog.Infof("Peer %s: queued block #%d [%x], total %v", peer, block.NumberU64(), hash.Bytes()[:4], f.queue.Size())
		}
	}
}
func (self *StateObject) SetGasLimit(gasLimit *big.Int) {
	self.gasPool = new(big.Int).Set(gasLimit)

	if glog.V(logger.Core) {
		glog.Infof("%x: gas (+ %v)", self.Address(), self.gasPool)
	}
}
Exemple #9
0
func (c *StateObject) SubBalance(amount *big.Int) {
	c.SetBalance(new(big.Int).Sub(c.balance, amount))

	if glog.V(logger.Core) {
		glog.Infof("%x: #%d %v (- %v)\n", c.Address(), c.nonce, c.balance, amount)
	}
}
Exemple #10
0
func (env *Work) commitTransactions(transactions types.Transactions, gasPrice *big.Int, proc *core.BlockProcessor) {
	gp := new(core.GasPool).AddGas(env.header.GasLimit)

	for _, tx := range transactions {
		// We can skip err. It has already been validated in the tx pool
		from, _ := tx.From()

		// Check if it falls within margin. Txs from owned accounts are always processed.
		if tx.GasPrice().Cmp(gasPrice) < 0 && !env.ownedAccounts.Has(from) {
			// ignore the transaction and transactor. We ignore the transactor
			// because nonce will fail after ignoring this transaction so there's
			// no point
			env.lowGasTransactors.Add(from)

			glog.V(logger.Info).Infof("transaction(%x) below gas price (tx=%v ask=%v). All sequential txs from this address(%x) will be ignored\n", tx.Hash().Bytes()[:4], common.CurrencyToString(tx.GasPrice()), common.CurrencyToString(gasPrice), from[:4])
		}

		// Continue with the next transaction if the transaction sender is included in
		// the low gas tx set. This will also remove the tx and all sequential transaction
		// from this transactor
		if env.lowGasTransactors.Has(from) {
			// add tx to the low gas set. This will be removed at the end of the run
			// owned accounts are ignored
			if !env.ownedAccounts.Has(from) {
				env.lowGasTxs = append(env.lowGasTxs, tx)
			}
			continue
		}

		// Move on to the next transaction when the transactor is in ignored transactions set
		// This may occur when a transaction hits the gas limit. When a gas limit is hit and
		// the transaction is processed (that could potentially be included in the block) it
		// will throw a nonce error because the previous transaction hasn't been processed.
		// Therefor we need to ignore any transaction after the ignored one.
		if env.ignoredTransactors.Has(from) {
			continue
		}

		env.state.StartRecord(tx.Hash(), common.Hash{}, 0)

		err := env.commitTransaction(tx, proc, gp)
		switch {
		case core.IsGasLimitErr(err):
			// ignore the transactor so no nonce errors will be thrown for this account
			// next time the worker is run, they'll be picked up again.
			env.ignoredTransactors.Add(from)

			glog.V(logger.Detail).Infof("Gas limit reached for (%x) in this block. Continue to try smaller txs\n", from[:4])
		case err != nil:
			env.remove.Add(tx.Hash())

			if glog.V(logger.Detail) {
				glog.Infof("TX (%x) failed, will be removed: %v\n", tx.Hash().Bytes()[:4], err)
			}
		default:
			env.tcount++
		}
	}
}
Exemple #11
0
func (self *StateObject) MarkForDeletion() {
	self.remove = true
	self.dirty = true

	if glog.V(logger.Core) {
		glog.Infof("%x: #%d %v X\n", self.Address(), self.nonce, self.balance)
	}
}
// checkQueue moves transactions that have become processable to main pool.
func (pool *TxPool) checkQueue() {
	// init delayed since tx pool could have been started before any state sync
	if pool.pendingState == nil {
		pool.resetState()
	}

	var addq txQueue
	for address, txs := range pool.queue {
		// guessed nonce is the nonce currently kept by the tx pool (pending state)
		guessedNonce := pool.pendingState.GetNonce(address)
		// true nonce is the nonce known by the last state
		currentState, err := pool.currentState()
		if err != nil {
			glog.Errorf("could not get current state: %v", err)
			return
		}
		trueNonce := currentState.GetNonce(address)
		addq := addq[:0]
		for hash, tx := range txs {
			if tx.Nonce() < trueNonce {
				// Drop queued transactions whose nonce is lower than
				// the account nonce because they have been processed.
				delete(txs, hash)
			} else {
				// Collect the remaining transactions for the next pass.
				addq = append(addq, txQueueEntry{hash, address, tx})
			}
		}
		// Find the next consecutive nonce range starting at the
		// current account nonce.
		sort.Sort(addq)
		for i, e := range addq {
			// start deleting the transactions from the queue if they exceed the limit
			if i > maxQueued {
				delete(pool.queue[address], e.hash)
				continue
			}

			if e.Nonce() > guessedNonce {
				if len(addq)-i > maxQueued {
					if glog.V(logger.Debug) {
						glog.Infof("Queued tx limit exceeded for %s. Tx %s removed\n", common.PP(address[:]), common.PP(e.hash[:]))
					}
					for j := i + maxQueued; j < len(addq); j++ {
						delete(txs, addq[j].hash)
					}
				}
				break
			}
			delete(txs, e.hash)
			pool.addTx(e.hash, address, e.Transaction)
		}
		// Delete the entire queue entry if it became empty.
		if len(txs) == 0 {
			delete(pool.queue, address)
		}
	}
}
Exemple #13
0
func sendJSON(w io.Writer, v interface{}) {
	if glog.V(logger.Detail) {
		if payload, err := json.MarshalIndent(v, "", "\t"); err == nil {
			glog.Infof("Sending payload: %s", payload)
		}
	}
	if err := json.NewEncoder(w).Encode(v); err != nil {
		glog.V(logger.Error).Infoln("Error sending JSON:", err)
	}
}
Exemple #14
0
// NewStateObject create a state object whether it exist in the trie or not
func (self *StateDB) newStateObject(addr common.Address) *StateObject {
	if glog.V(logger.Core) {
		glog.Infof("(+) %x\n", addr)
	}

	stateObject := NewStateObject(addr, self.db)
	self.stateObjects[addr.Str()] = stateObject

	return stateObject
}
Exemple #15
0
// newStateObject creates a state object whether it exists in the state or not
func (self *LightState) newStateObject(addr common.Address) *StateObject {
	if glog.V(logger.Core) {
		glog.Infof("(+) %x\n", addr)
	}

	stateObject := NewStateObject(addr, self.odr)
	stateObject.SetNonce(StartingNonce)
	self.stateObjects[addr.Str()] = stateObject

	return stateObject
}
Exemple #16
0
func runProgram(program *Program, pcstart uint64, mem *Memory, stack *stack, env Environment, contract *Contract, input []byte) ([]byte, error) {
	contract.Input = input

	var (
		pc         uint64 = program.mapping[pcstart]
		instrCount        = 0
	)

	if glog.V(logger.Debug) {
		glog.Infof("running JIT program %x\n", program.Id[:4])
		tstart := time.Now()
		defer func() {
			glog.Infof("JIT program %x done. time: %v instrc: %v\n", program.Id[:4], time.Since(tstart), instrCount)
		}()
	}

	homestead := env.RuleSet().IsHomestead(env.BlockNumber())
	for pc < uint64(len(program.instructions)) {
		instrCount++

		instr := program.instructions[pc]
		if instr.Op() == DELEGATECALL && !homestead {
			return nil, fmt.Errorf("Invalid opcode 0x%x", instr.Op())
		}

		ret, err := instr.do(program, &pc, env, contract, mem, stack)
		if err != nil {
			return nil, err
		}

		if instr.halts() {
			return ret, nil
		}
	}

	contract.Input = nil

	return nil, nil
}
// validatePool removes invalid and processed transactions from the main pool.
func (pool *TxPool) validatePool() {
	state := pool.currentState()
	for hash, tx := range pool.pending {
		from, _ := tx.From() // err already checked
		// perform light nonce validation
		if state.GetNonce(from) > tx.Nonce() {
			if glog.V(logger.Core) {
				glog.Infof("removed tx (%x) from pool: low tx nonce\n", hash[:4])
			}
			delete(pool.pending, hash)
		}
	}
}
func (sm *BlockProcessor) TransitionState(statedb *state.StateDB, parent, block *types.Block, transientProcess bool) (receipts types.Receipts, err error) {
	gp := new(GasPool).AddGas(block.GasLimit())
	if glog.V(logger.Core) {
		glog.Infof("%x: gas (+ %v)", block.Coinbase(), gp)
	}

	// Process the transactions on to parent state
	receipts, err = sm.ApplyTransactions(gp, statedb, block, block.Transactions(), transientProcess)
	if err != nil {
		return nil, err
	}

	return receipts, nil
}
Exemple #19
0
func (bc *ChainManager) write(block *types.Block) {
	tstart := time.Now()

	enc, _ := rlp.EncodeToBytes((*types.StorageBlock)(block))
	key := append(blockHashPre, block.Hash().Bytes()...)
	err := bc.blockDb.Put(key, enc)
	if err != nil {
		glog.Fatal("db write fail:", err)
	}

	if glog.V(logger.Debug) {
		glog.Infof("wrote block #%v %s. Took %v\n", block.Number(), common.PP(block.Hash().Bytes()), time.Since(tstart))
	}
}
func (pool *TxPool) validatePool() {
	pool.mu.Lock()
	defer pool.mu.Unlock()

	for hash, tx := range pool.txs {
		if err := pool.ValidateTransaction(tx); err != nil {
			if glog.V(logger.Info) {
				glog.Infof("removed tx (%x) from pool: %v\n", hash[:4], err)
			}

			pool.removeTx(hash)
		}
	}
}
Exemple #21
0
// diff takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
// to be part of the new canonical chain.
func (self *ChainManager) diff(oldBlock, newBlock *types.Block) (types.Blocks, error) {
	var (
		newChain    types.Blocks
		commonBlock *types.Block
		oldStart    = oldBlock
		newStart    = newBlock
	)

	// first reduce whoever is higher bound
	if oldBlock.NumberU64() > newBlock.NumberU64() {
		// reduce old chain
		for oldBlock = oldBlock; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = self.GetBlock(oldBlock.ParentHash()) {
		}
	} else {
		// reduce new chain and append new chain blocks for inserting later on
		for newBlock = newBlock; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = self.GetBlock(newBlock.ParentHash()) {
			newChain = append(newChain, newBlock)
		}
	}
	if oldBlock == nil {
		return nil, fmt.Errorf("Invalid old chain")
	}
	if newBlock == nil {
		return nil, fmt.Errorf("Invalid new chain")
	}

	numSplit := newBlock.Number()
	for {
		if oldBlock.Hash() == newBlock.Hash() {
			commonBlock = oldBlock
			break
		}
		newChain = append(newChain, newBlock)

		oldBlock, newBlock = self.GetBlock(oldBlock.ParentHash()), self.GetBlock(newBlock.ParentHash())
		if oldBlock == nil {
			return nil, fmt.Errorf("Invalid old chain")
		}
		if newBlock == nil {
			return nil, fmt.Errorf("Invalid new chain")
		}
	}

	if glog.V(logger.Debug) {
		commonHash := commonBlock.Hash()
		glog.Infof("Chain split detected @ %x. Reorganising chain from #%v %x to %x", commonHash[:4], numSplit, oldStart.Hash().Bytes()[:4], newStart.Hash().Bytes()[:4])
	}

	return newChain, nil
}
Exemple #22
0
// doRefresh performs a lookup for a random target to keep buckets
// full. seed nodes are inserted if the table is empty (initial
// bootstrap or discarded faulty peers).
func (tab *Table) doRefresh(done chan struct{}) {
	defer close(done)

	// The Kademlia paper specifies that the bucket refresh should
	// perform a lookup in the least recently used bucket. We cannot
	// adhere to this because the findnode target is a 512bit value
	// (not hash-sized) and it is not easily possible to generate a
	// sha3 preimage that falls into a chosen bucket.
	// We perform a lookup with a random target instead.
	var target NodeID
	rand.Read(target[:])
	result := tab.lookup(target, false)
	if len(result) > 0 {
		return
	}

	// The table is empty. Load nodes from the database and insert
	// them. This should yield a few previously seen nodes that are
	// (hopefully) still alive.
	seeds := tab.db.querySeeds(seedCount, seedMaxAge)
	seeds = tab.bondall(append(seeds, tab.nursery...))
	if glog.V(logger.Debug) {
		if len(seeds) == 0 {
			glog.Infof("no seed nodes found")
		}
		for _, n := range seeds {
			age := time.Since(tab.db.lastPong(n.ID))
			glog.Infof("seed node (age %v): %v", age, n)
		}
	}
	tab.mutex.Lock()
	tab.stuff(seeds)
	tab.mutex.Unlock()

	// Finally, do a self lookup to fill up the buckets.
	tab.lookup(tab.self.ID, false)
}
func Start() {
	go func() {
		for range time.Tick(15 * time.Second) {
			mutex.Lock()
			var sum, tracked = 0, []string{}
			for what, n := range all {
				sum += n
				tracked = append(tracked, fmt.Sprintf("%s:%d", what, n))
			}
			mutex.Unlock()
			used, _ := fdusage()
			sort.Strings(tracked)
			glog.Infof("fd usage %d/%d, tracked %d %v", used, fdlimit(), sum, tracked)
		}
	}()
}
Exemple #24
0
func (d *Downloader) AddHashes(id string, hashes []common.Hash) error {
	// make sure that the hashes that are being added are actually from the peer
	// that's the current active peer. hashes that have been received from other
	// peers are dropped and ignored.
	if d.activePeer != id {
		return fmt.Errorf("received hashes from %s while active peer is %s", id, d.activePeer)
	}

	if glog.V(logger.Detail) && len(hashes) != 0 {
		from, to := hashes[0], hashes[len(hashes)-1]
		glog.Infof("adding %d (T=%d) hashes [ %x / %x ] from: %s\n", len(hashes), d.queue.hashPool.Size(), from[:4], to[:4], id)
	}

	d.hashCh <- hashPack{id, hashes}

	return nil
}
// WriteBlock writes a block to the database
func WriteBlock(db common.Database, block *types.Block) error {
	tstart := time.Now()

	enc, _ := rlp.EncodeToBytes((*types.StorageBlock)(block))
	key := append(blockHashPre, block.Hash().Bytes()...)
	err := db.Put(key, enc)
	if err != nil {
		glog.Fatal("db write fail:", err)
		return err
	}

	if glog.V(logger.Debug) {
		glog.Infof("wrote block #%v %s. Took %v\n", block.Number(), common.PP(block.Hash().Bytes()), time.Since(tstart))
	}

	return nil
}
// checkQueue moves transactions that have become processable to main pool.
func (pool *TxPool) checkQueue() {
	state := pool.pendingState

	var addq txQueue
	for address, txs := range pool.queue {
		// guessed nonce is the nonce currently kept by the tx pool (pending state)
		guessedNonce := state.GetNonce(address)
		// true nonce is the nonce known by the last state
		trueNonce := pool.currentState().GetNonce(address)
		addq := addq[:0]
		for hash, tx := range txs {
			if tx.AccountNonce < trueNonce {
				// Drop queued transactions whose nonce is lower than
				// the account nonce because they have been processed.
				delete(txs, hash)
			} else {
				// Collect the remaining transactions for the next pass.
				addq = append(addq, txQueueEntry{hash, address, tx})
			}
		}
		// Find the next consecutive nonce range starting at the
		// current account nonce.
		sort.Sort(addq)
		for i, e := range addq {
			// start deleting the transactions from the queue if they exceed the limit
			if i > maxQueued {
				if glog.V(logger.Debug) {
					glog.Infof("Queued tx limit exceeded for %s. Tx %s removed\n", common.PP(address[:]), common.PP(e.hash[:]))
				}
				delete(pool.queue[address], e.hash)
				continue
			}

			if e.AccountNonce > guessedNonce {
				break
			}
			delete(txs, e.hash)
			pool.addTx(e.hash, address, e.Transaction)
		}
		// Delete the entire queue entry if it became empty.
		if len(txs) == 0 {
			delete(pool.queue, address)
		}
	}
}
func (bc *ChainManager) setLastState() {
	data, _ := bc.blockDb.Get([]byte("LastBlock"))
	if len(data) != 0 {
		block := bc.GetBlock(common.BytesToHash(data))
		bc.currentBlock = block
		bc.lastBlockHash = block.Hash()

		// Set the last know difficulty (might be 0x0 as initial value, Genesis)
		bc.td = common.BigD(bc.blockDb.LastKnownTD())
	} else {
		bc.Reset()
	}
	bc.currentGasLimit = CalcGasLimit(bc.currentBlock)

	if glog.V(logger.Info) {
		glog.Infof("Last block (#%v) %x TD=%v\n", bc.currentBlock.Number(), bc.currentBlock.Hash(), bc.td)
	}
}
Exemple #28
0
func (bc *ChainManager) setLastState() {
	data, _ := bc.blockDb.Get([]byte("LastBlock"))
	if len(data) != 0 {
		block := bc.GetBlock(common.BytesToHash(data))
		if block != nil {
			bc.currentBlock = block
			bc.lastBlockHash = block.Hash()
		} else {
			glog.Fatalf("Fatal. LastBlock not found. Please run removedb and resync")
		}
	} else {
		bc.Reset()
	}
	bc.td = bc.currentBlock.Td
	bc.currentGasLimit = CalcGasLimit(bc.currentBlock)

	if glog.V(logger.Info) {
		glog.Infof("Last block (#%v) %x TD=%v\n", bc.currentBlock.Number(), bc.currentBlock.Hash(), bc.td)
	}
}
func (self *TxPool) add(tx *types.Transaction) error {
	hash := tx.Hash()

	/* XXX I'm unsure about this. This is extremely dangerous and may result
	 in total black listing of certain transactions
	if self.invalidHashes.Has(hash) {
		return fmt.Errorf("Invalid transaction (%x)", hash[:4])
	}
	*/
	if self.txs[hash] != nil {
		return fmt.Errorf("Known transaction (%x)", hash[:4])
	}
	err := self.ValidateTransaction(tx)
	if err != nil {
		return err
	}

	self.queueTx(tx)

	var toname string
	if to := tx.To(); to != nil {
		toname = common.Bytes2Hex(to[:4])
	} else {
		toname = "[NEW_CONTRACT]"
	}
	// we can ignore the error here because From is
	// verified in ValidateTransaction.
	f, _ := tx.From()
	from := common.Bytes2Hex(f[:4])

	if glog.V(logger.Debug) {
		glog.Infof("(t) %x => %s (%v) %x\n", from, toname, tx.Value, tx.Hash())
	}

	return nil
}
Exemple #30
0
// Run loops and evaluates the contract's code with the given input data
func (evm *EVM) Run(contract *Contract, input []byte) (ret []byte, err error) {
	evm.env.SetDepth(evm.env.Depth() + 1)
	defer evm.env.SetDepth(evm.env.Depth() - 1)

	if contract.CodeAddr != nil {
		if p := Precompiled[contract.CodeAddr.Str()]; p != nil {
			return evm.RunPrecompiled(p, input, contract)
		}
	}

	// Don't bother with the execution if there's no code.
	if len(contract.Code) == 0 {
		return nil, nil
	}

	var (
		codehash = crypto.Keccak256Hash(contract.Code) // codehash is used when doing jump dest caching
		program  *Program
	)
	if evm.cfg.EnableJit {
		// If the JIT is enabled check the status of the JIT program,
		// if it doesn't exist compile a new program in a separate
		// goroutine or wait for compilation to finish if the JIT is
		// forced.
		switch GetProgramStatus(codehash) {
		case progReady:
			return RunProgram(GetProgram(codehash), evm.env, contract, input)
		case progUnknown:
			if evm.cfg.ForceJit {
				// Create and compile program
				program = NewProgram(contract.Code)
				perr := CompileProgram(program)
				if perr == nil {
					return RunProgram(program, evm.env, contract, input)
				}
				glog.V(logger.Info).Infoln("error compiling program", err)
			} else {
				// create and compile the program. Compilation
				// is done in a separate goroutine
				program = NewProgram(contract.Code)
				go func() {
					err := CompileProgram(program)
					if err != nil {
						glog.V(logger.Info).Infoln("error compiling program", err)
						return
					}
				}()
			}
		}
	}

	var (
		caller     = contract.caller
		code       = contract.Code
		instrCount = 0

		op      OpCode         // current opcode
		mem     = NewMemory()  // bound memory
		stack   = newstack()   // local stack
		statedb = evm.env.Db() // current state
		// For optimisation reason we're using uint64 as the program counter.
		// It's theoretically possible to go above 2^64. The YP defines the PC to be uint256. Practically much less so feasible.
		pc = uint64(0) // program counter

		// jump evaluates and checks whether the given jump destination is a valid one
		// if valid move the `pc` otherwise return an error.
		jump = func(from uint64, to *big.Int) error {
			if !contract.jumpdests.has(codehash, code, to) {
				nop := contract.GetOp(to.Uint64())
				return fmt.Errorf("invalid jump destination (%v) %v", nop, to)
			}

			pc = to.Uint64()

			return nil
		}

		newMemSize *big.Int
		cost       *big.Int
	)
	contract.Input = input

	// User defer pattern to check for an error and, based on the error being nil or not, use all gas and return.
	defer func() {
		if err != nil && evm.cfg.Debug {
			evm.logger.captureState(pc, op, contract.Gas, cost, mem, stack, contract, err)
		}
	}()

	if glog.V(logger.Debug) {
		glog.Infof("running byte VM %x\n", codehash[:4])
		tstart := time.Now()
		defer func() {
			glog.Infof("byte VM %x done. time: %v instrc: %v\n", codehash[:4], time.Since(tstart), instrCount)
		}()
	}

	for ; ; instrCount++ {
		/*
			if EnableJit && it%100 == 0 {
				if program != nil && progStatus(atomic.LoadInt32(&program.status)) == progReady {
					// move execution
					fmt.Println("moved", it)
					glog.V(logger.Info).Infoln("Moved execution to JIT")
					return runProgram(program, pc, mem, stack, evm.env, contract, input)
				}
			}
		*/

		// Get the memory location of pc
		op = contract.GetOp(pc)
		// calculate the new memory size and gas price for the current executing opcode
		newMemSize, cost, err = calculateGasAndSize(evm.env, contract, caller, op, statedb, mem, stack)
		if err != nil {
			return nil, err
		}

		// Use the calculated gas. When insufficient gas is present, use all gas and return an
		// Out Of Gas error
		if !contract.UseGas(cost) {
			return nil, OutOfGasError
		}

		// Resize the memory calculated previously
		mem.Resize(newMemSize.Uint64())
		// Add a log message
		if evm.cfg.Debug {
			evm.logger.captureState(pc, op, contract.Gas, cost, mem, stack, contract, nil)
		}

		if opPtr := evm.jumpTable[op]; opPtr.valid {
			if opPtr.fn != nil {
				opPtr.fn(instruction{}, &pc, evm.env, contract, mem, stack)
			} else {
				switch op {
				case PC:
					opPc(instruction{data: new(big.Int).SetUint64(pc)}, &pc, evm.env, contract, mem, stack)
				case JUMP:
					if err := jump(pc, stack.pop()); err != nil {
						return nil, err
					}

					continue
				case JUMPI:
					pos, cond := stack.pop(), stack.pop()

					if cond.Cmp(common.BigTrue) >= 0 {
						if err := jump(pc, pos); err != nil {
							return nil, err
						}

						continue
					}
				case RETURN:
					offset, size := stack.pop(), stack.pop()
					ret := mem.GetPtr(offset.Int64(), size.Int64())

					return ret, nil
				case SUICIDE:
					opSuicide(instruction{}, nil, evm.env, contract, mem, stack)

					fallthrough
				case STOP: // Stop the contract
					return nil, nil
				}
			}
		} else {
			return nil, fmt.Errorf("Invalid opcode %x", op)
		}

		pc++

	}
}