Esempio n. 1
0
func runProgram(program *Program, pcstart uint64, mem *Memory, stack *stack, env Environment, contract *Contract, input []byte) ([]byte, error) {
	contract.Input = input

	var (
		pc         uint64 = program.mapping[pcstart]
		instrCount        = 0
	)

	if glog.V(logger.Debug) {
		glog.Infof("running JIT program %x\n", program.Id[:4])
		tstart := time.Now()
		defer func() {
			glog.Infof("JIT program %x done. time: %v instrc: %v\n", program.Id[:4], time.Since(tstart), instrCount)
		}()
	}

	for pc < uint64(len(program.instructions)) {
		instrCount++

		instr := program.instructions[pc]

		ret, err := instr.do(program, &pc, env, contract, mem, stack)
		if err != nil {
			return nil, err
		}

		if instr.halts() {
			return contract.Return(ret), nil
		}
	}

	contract.Input = nil

	return contract.Return(nil), nil
}
Esempio n. 2
0
func (bc *ChainManager) setLastState() error {
	data, _ := bc.chainDb.Get([]byte("LastBlock"))
	if len(data) != 0 {
		block := bc.GetBlock(common.BytesToHash(data))
		if block != nil {
			bc.currentBlock = block
			bc.lastBlockHash = block.Hash()
		} else {
			glog.Infof("LastBlock (%x) not found. Recovering...\n", data)
			if bc.recover() {
				glog.Infof("Recover successful")
			} else {
				glog.Fatalf("Recover failed. Please report")
			}
		}
	} else {
		bc.Reset()
	}
	bc.td = bc.currentBlock.Td
	bc.currentGasLimit = CalcGasLimit(bc.currentBlock)

	if glog.V(logger.Info) {
		glog.Infof("Last block (#%v) %x TD=%v\n", bc.currentBlock.Number(), bc.currentBlock.Hash(), bc.td)
	}

	return nil
}
// validatePool removes invalid and processed transactions from the main pool.
// If a transaction is removed for being invalid (e.g. out of funds), all sub-
// sequent (Still valid) transactions are moved back into the future queue. This
// is important to prevent a drained account from DOSing the network with non
// executable transactions.
func (pool *TxPool) validatePool() {
	state, err := pool.currentState()
	if err != nil {
		glog.V(logger.Info).Infoln("failed to get current state: %v", err)
		return
	}
	balanceCache := make(map[common.Address]*big.Int)

	// Clean up the pending pool, accumulating invalid nonces
	gaps := make(map[common.Address]uint64)

	for hash, tx := range pool.pending {
		sender, _ := tx.From() // err already checked

		// Perform light nonce and balance validation
		balance := balanceCache[sender]
		if balance == nil {
			balance = state.GetBalance(sender)
			balanceCache[sender] = balance
		}
		if past := state.GetNonce(sender) > tx.Nonce(); past || balance.Cmp(tx.Cost()) < 0 {
			// Remove an already past it invalidated transaction
			if glog.V(logger.Core) {
				glog.Infof("removed tx (%v) from pool: low tx nonce or out of funds\n", tx)
			}
			delete(pool.pending, hash)

			// Track the smallest invalid nonce to postpone subsequent transactions
			if !past {
				if prev, ok := gaps[sender]; !ok || tx.Nonce() < prev {
					gaps[sender] = tx.Nonce()
				}
			}
		}
	}
	// Move all transactions after a gap back to the future queue
	if len(gaps) > 0 {
		for hash, tx := range pool.pending {
			sender, _ := tx.From()
			if gap, ok := gaps[sender]; ok && tx.Nonce() >= gap {
				if glog.V(logger.Core) {
					glog.Infof("postponed tx (%v) due to introduced gap\n", tx)
				}
				pool.queueTx(hash, tx)
				delete(pool.pending, hash)
			}
		}
	}
}
Esempio n. 4
0
func blockRecovery(ctx *cli.Context) {
	if len(ctx.Args()) < 1 {
		glog.Fatal("recover requires block number or hash")
	}
	arg := ctx.Args().First()

	cfg := utils.MakeEthConfig(ClientIdentifier, nodeNameVersion, ctx)
	blockDb, err := ethdb.NewLDBDatabase(filepath.Join(cfg.DataDir, "blockchain"), cfg.DatabaseCache)
	if err != nil {
		glog.Fatalln("could not open db:", err)
	}

	var block *types.Block
	if arg[0] == '#' {
		block = core.GetBlock(blockDb, core.GetCanonicalHash(blockDb, common.String2Big(arg[1:]).Uint64()))
	} else {
		block = core.GetBlock(blockDb, common.HexToHash(arg))
	}

	if block == nil {
		glog.Fatalln("block not found. Recovery failed")
	}

	if err = core.WriteHeadBlockHash(blockDb, block.Hash()); err != nil {
		glog.Fatalln("block write err", err)
	}
	glog.Infof("Recovery succesful. New HEAD %x\n", block.Hash())
}
// validate and queue transactions.
func (self *TxPool) add(tx *types.Transaction) error {
	hash := tx.Hash()

	if self.pending[hash] != nil {
		return fmt.Errorf("Known transaction (%x)", hash[:4])
	}
	err := self.validateTx(tx)
	if err != nil {
		return err
	}
	self.queueTx(hash, tx)

	if glog.V(logger.Debug) {
		var toname string
		if to := tx.To(); to != nil {
			toname = common.Bytes2Hex(to[:4])
		} else {
			toname = "[NEW_CONTRACT]"
		}
		// we can ignore the error here because From is
		// verified in ValidateTransaction.
		f, _ := tx.From()
		from := common.Bytes2Hex(f[:4])
		glog.Infof("(t) %x => %s (%v) %x\n", from, toname, tx.Value, hash)
	}

	return nil
}
Esempio n. 6
0
func (self *StateObject) SetGasLimit(gasLimit *big.Int) {
	self.gasPool = new(big.Int).Set(gasLimit)

	if glog.V(logger.Core) {
		glog.Infof("%x: gas (+ %v)", self.Address(), self.gasPool)
	}
}
Esempio n. 7
0
func (c *StateObject) SubBalance(amount *big.Int) {
	c.SetBalance(new(big.Int).Sub(c.balance, amount))

	if glog.V(logger.Core) {
		glog.Infof("%x: #%d %v (- %v)\n", c.Address(), c.nonce, c.balance, amount)
	}
}
Esempio n. 8
0
// enqueue schedules a new future import operation, if the block to be imported
// has not yet been seen.
func (f *Fetcher) enqueue(peer string, block *types.Block) {
	hash := block.Hash()

	// Ensure the peer isn't DOSing us
	count := f.queues[peer] + 1
	if count > blockLimit {
		glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x], exceeded allowance (%d)", peer, block.NumberU64(), hash.Bytes()[:4], blockLimit)
		return
	}
	// Discard any past or too distant blocks
	if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
		glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x], distance %d", peer, block.NumberU64(), hash.Bytes()[:4], dist)
		discardMeter.Mark(1)
		return
	}
	// Schedule the block for future importing
	if _, ok := f.queued[hash]; !ok {
		op := &inject{
			origin: peer,
			block:  block,
		}
		f.queues[peer] = count
		f.queued[hash] = op
		f.queue.Push(op, -float32(block.NumberU64()))

		if glog.V(logger.Debug) {
			glog.Infof("Peer %s: queued block #%d [%x], total %v", peer, block.NumberU64(), hash.Bytes()[:4], f.queue.Size())
		}
	}
}
Esempio n. 9
0
func (env *Work) commitTransactions(transactions types.Transactions, gasPrice *big.Int, bc *core.BlockChain) {
	gp := new(core.GasPool).AddGas(env.header.GasLimit)
	for _, tx := range transactions {
		// Error may be ignored here. The error has already been checked
		// during transaction acceptance is the transaction pool.
		from, _ := tx.From()

		// Check if it falls within margin. Txs from owned accounts are always processed.
		if tx.GasPrice().Cmp(gasPrice) < 0 && !env.ownedAccounts.Has(from) {
			// ignore the transaction and transactor. We ignore the transactor
			// because nonce will fail after ignoring this transaction so there's
			// no point
			env.lowGasTransactors.Add(from)

			glog.V(logger.Info).Infof("transaction(%x) below gas price (tx=%v ask=%v). All sequential txs from this address(%x) will be ignored\n", tx.Hash().Bytes()[:4], common.CurrencyToString(tx.GasPrice()), common.CurrencyToString(gasPrice), from[:4])
		}

		// Continue with the next transaction if the transaction sender is included in
		// the low gas tx set. This will also remove the tx and all sequential transaction
		// from this transactor
		if env.lowGasTransactors.Has(from) {
			// add tx to the low gas set. This will be removed at the end of the run
			// owned accounts are ignored
			if !env.ownedAccounts.Has(from) {
				env.lowGasTxs = append(env.lowGasTxs, tx)
			}
			continue
		}

		// Move on to the next transaction when the transactor is in ignored transactions set
		// This may occur when a transaction hits the gas limit. When a gas limit is hit and
		// the transaction is processed (that could potentially be included in the block) it
		// will throw a nonce error because the previous transaction hasn't been processed.
		// Therefor we need to ignore any transaction after the ignored one.
		if env.ignoredTransactors.Has(from) {
			continue
		}

		env.state.StartRecord(tx.Hash(), common.Hash{}, 0)

		err := env.commitTransaction(tx, bc, gp)
		switch {
		case core.IsGasLimitErr(err):
			// ignore the transactor so no nonce errors will be thrown for this account
			// next time the worker is run, they'll be picked up again.
			env.ignoredTransactors.Add(from)

			glog.V(logger.Detail).Infof("Gas limit reached for (%x) in this block. Continue to try smaller txs\n", from[:4])
		case err != nil:
			env.remove.Add(tx.Hash())

			if glog.V(logger.Detail) {
				glog.Infof("TX (%x) failed, will be removed: %v\n", tx.Hash().Bytes()[:4], err)
			}
		default:
			env.tcount++
		}
	}
}
Esempio n. 10
0
func (self *StateObject) MarkForDeletion() {
	self.remove = true
	self.dirty = true

	if glog.V(logger.Core) {
		glog.Infof("%x: #%d %v X\n", self.Address(), self.nonce, self.balance)
	}
}
Esempio n. 11
0
func (self *StateObject) markSuicided() {
	self.suicided = true
	if self.onDirty != nil {
		self.onDirty(self.Address())
		self.onDirty = nil
	}
	if glog.V(logger.Core) {
		glog.Infof("%x: #%d %v X\n", self.Address(), self.Nonce(), self.Balance())
	}
}
Esempio n. 12
0
func sendJSON(w io.Writer, v interface{}) {
	if glog.V(logger.Detail) {
		if payload, err := json.MarshalIndent(v, "", "\t"); err == nil {
			glog.Infof("Sending payload: %s", payload)
		}
	}
	if err := json.NewEncoder(w).Encode(v); err != nil {
		glog.V(logger.Error).Infoln("Error sending JSON:", err)
	}
}
Esempio n. 13
0
func (c *StateObject) SubBalance(amount *big.Int) {
	if amount.Cmp(common.Big0) == 0 {
		return
	}
	c.SetBalance(new(big.Int).Sub(c.Balance(), amount))

	if glog.V(logger.Core) {
		glog.Infof("%x: #%d %v (- %v)\n", c.Address(), c.Nonce(), c.Balance(), amount)
	}
}
Esempio n. 14
0
// NewStateObject create a state object whether it exist in the trie or not
func (self *StateDB) newStateObject(addr common.Address) *StateObject {
	if glog.V(logger.Core) {
		glog.Infof("(+) %x\n", addr)
	}

	stateObject := NewStateObject(addr, self.db)
	self.stateObjects[addr.Str()] = stateObject

	return stateObject
}
Esempio n. 15
0
// newStateObject creates a state object whether it exists in the state or not
func (self *LightState) newStateObject(addr common.Address) *StateObject {
	if glog.V(logger.Core) {
		glog.Infof("(+) %x\n", addr)
	}

	stateObject := NewStateObject(addr, self.odr)
	stateObject.SetNonce(StartingNonce)
	self.stateObjects[addr.Str()] = stateObject

	return stateObject
}
Esempio n. 16
0
func runProgram(program *Program, pcstart uint64, mem *Memory, stack *stack, env Environment, contract *Contract, input []byte) ([]byte, error) {
	contract.Input = input

	var (
		pc         uint64 = program.mapping[pcstart]
		instrCount        = 0
	)

	if glog.V(logger.Debug) {
		glog.Infof("running JIT program %x\n", program.Id[:4])
		tstart := time.Now()
		defer func() {
			glog.Infof("JIT program %x done. time: %v instrc: %v\n", program.Id[:4], time.Since(tstart), instrCount)
		}()
	}

	homestead := params.IsHomestead(env.BlockNumber())
	for pc < uint64(len(program.instructions)) {
		instrCount++

		instr := program.instructions[pc]
		if instr.Op() == DELEGATECALL && !homestead {
			return nil, fmt.Errorf("Invalid opcode 0x%x", instr.Op())
		}

		ret, err := instr.do(program, &pc, env, contract, mem, stack)
		if err != nil {
			return nil, err
		}

		if instr.halts() {
			return ret, nil
		}
	}

	contract.Input = nil

	return nil, nil
}
Esempio n. 17
0
// validatePool removes invalid and processed transactions from the main pool.
func (pool *TxPool) validatePool() {
	state := pool.currentState()
	for hash, tx := range pool.pending {
		from, _ := tx.From() // err already checked
		// perform light nonce validation
		if state.GetNonce(from) > tx.Nonce() {
			if glog.V(logger.Core) {
				glog.Infof("removed tx (%x) from pool: low tx nonce\n", hash[:4])
			}
			delete(pool.pending, hash)
		}
	}
}
Esempio n. 18
0
// demoteUnexecutables removes invalid and processed transactions from the pools
// executable/pending queue and any subsequent transactions that become unexecutable
// are moved back into the future queue.
func (pool *TxPool) demoteUnexecutables() {
	// Retrieve the current state to allow nonce and balance checking
	state, err := pool.currentState()
	if err != nil {
		glog.V(logger.Info).Infoln("failed to get current state: %v", err)
		return
	}
	// Iterate over all accounts and demote any non-executable transactions
	for addr, list := range pool.pending {
		nonce := state.GetNonce(addr)

		// Drop all transactions that are deemed too old (low nonce)
		for _, tx := range list.Forward(nonce) {
			if glog.V(logger.Core) {
				glog.Infof("Removed old pending transaction: %v", tx)
			}
			delete(pool.all, tx.Hash())
		}
		// Drop all transactions that are too costly (low balance), and queue any invalids back for later
		drops, invalids := list.Filter(state.GetBalance(addr))
		for _, tx := range drops {
			if glog.V(logger.Core) {
				glog.Infof("Removed unpayable pending transaction: %v", tx)
			}
			delete(pool.all, tx.Hash())
		}
		for _, tx := range invalids {
			if glog.V(logger.Core) {
				glog.Infof("Demoting pending transaction: %v", tx)
			}
			pool.enqueueTx(tx.Hash(), tx)
		}
		// Delete the entire queue entry if it became empty.
		if list.Empty() {
			delete(pool.pending, addr)
			delete(pool.beats, addr)
		}
	}
}
Esempio n. 19
0
// diff takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
// to be part of the new canonical chain.
func (self *ChainManager) diff(oldBlock, newBlock *types.Block) (types.Blocks, error) {
	var (
		newChain    types.Blocks
		commonBlock *types.Block
		oldStart    = oldBlock
		newStart    = newBlock
	)

	// first reduce whoever is higher bound
	if oldBlock.NumberU64() > newBlock.NumberU64() {
		// reduce old chain
		for oldBlock = oldBlock; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = self.GetBlock(oldBlock.ParentHash()) {
		}
	} else {
		// reduce new chain and append new chain blocks for inserting later on
		for newBlock = newBlock; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = self.GetBlock(newBlock.ParentHash()) {
			newChain = append(newChain, newBlock)
		}
	}
	if oldBlock == nil {
		return nil, fmt.Errorf("Invalid old chain")
	}
	if newBlock == nil {
		return nil, fmt.Errorf("Invalid new chain")
	}

	numSplit := newBlock.Number()
	for {
		if oldBlock.Hash() == newBlock.Hash() {
			commonBlock = oldBlock
			break
		}
		newChain = append(newChain, newBlock)

		oldBlock, newBlock = self.GetBlock(oldBlock.ParentHash()), self.GetBlock(newBlock.ParentHash())
		if oldBlock == nil {
			return nil, fmt.Errorf("Invalid old chain")
		}
		if newBlock == nil {
			return nil, fmt.Errorf("Invalid new chain")
		}
	}

	if glog.V(logger.Debug) {
		commonHash := commonBlock.Hash()
		glog.Infof("Chain split detected @ %x. Reorganising chain from #%v %x to %x", commonHash[:4], numSplit, oldStart.Hash().Bytes()[:4], newStart.Hash().Bytes()[:4])
	}

	return newChain, nil
}
Esempio n. 20
0
// checkQueue moves transactions that have become processable to main pool.
func (pool *TxPool) checkQueue() {
	state := pool.pendingState

	var addq txQueue
	for address, txs := range pool.queue {
		// guessed nonce is the nonce currently kept by the tx pool (pending state)
		guessedNonce := state.GetNonce(address)
		// true nonce is the nonce known by the last state
		trueNonce := pool.currentState().GetNonce(address)
		addq := addq[:0]
		for hash, tx := range txs {
			if tx.Nonce() < trueNonce {
				// Drop queued transactions whose nonce is lower than
				// the account nonce because they have been processed.
				delete(txs, hash)
			} else {
				// Collect the remaining transactions for the next pass.
				addq = append(addq, txQueueEntry{hash, address, tx})
			}
		}
		// Find the next consecutive nonce range starting at the
		// current account nonce.
		sort.Sort(addq)
		for i, e := range addq {
			// start deleting the transactions from the queue if they exceed the limit
			if i > maxQueued {
				delete(pool.queue[address], e.hash)
				continue
			}

			if e.Nonce() > guessedNonce {
				if len(addq)-i > maxQueued {
					if glog.V(logger.Debug) {
						glog.Infof("Queued tx limit exceeded for %s. Tx %s removed\n", common.PP(address[:]), common.PP(e.hash[:]))
					}
					for j := i + maxQueued; j < len(addq); j++ {
						delete(txs, addq[j].hash)
					}
				}
				break
			}
			delete(txs, e.hash)
			pool.addTx(e.hash, address, e.Transaction)
		}
		// Delete the entire queue entry if it became empty.
		if len(txs) == 0 {
			delete(pool.queue, address)
		}
	}
}
Esempio n. 21
0
// doRefresh performs a lookup for a random target to keep buckets
// full. seed nodes are inserted if the table is empty (initial
// bootstrap or discarded faulty peers).
func (tab *Table) doRefresh(done chan struct{}) {
	defer close(done)

	// The Kademlia paper specifies that the bucket refresh should
	// perform a lookup in the least recently used bucket. We cannot
	// adhere to this because the findnode target is a 512bit value
	// (not hash-sized) and it is not easily possible to generate a
	// sha3 preimage that falls into a chosen bucket.
	// We perform a lookup with a random target instead.
	var target NodeID
	rand.Read(target[:])
	result := tab.Lookup(target)
	if len(result) > 0 {
		return
	}

	// The table is empty. Load nodes from the database and insert
	// them. This should yield a few previously seen nodes that are
	// (hopefully) still alive.
	seeds := tab.db.querySeeds(seedCount, seedMaxAge)
	seeds = tab.bondall(append(seeds, tab.nursery...))
	if glog.V(logger.Debug) {
		if len(seeds) == 0 {
			glog.Infof("no seed nodes found")
		}
		for _, n := range seeds {
			age := time.Since(tab.db.lastPong(n.ID))
			glog.Infof("seed node (age %v): %v", age, n)
		}
	}
	tab.mutex.Lock()
	tab.stuff(seeds)
	tab.mutex.Unlock()

	// Finally, do a self lookup to fill up the buckets.
	tab.Lookup(tab.self.ID)
}
Esempio n. 22
0
// createObject creates a new state object. If there is an existing account with
// the given address, it is overwritten and returned as the second return value.
func (self *StateDB) createObject(addr common.Address) (newobj, prev *StateObject) {
	prev = self.GetStateObject(addr)
	newobj = newObject(self, addr, Account{}, self.MarkStateObjectDirty)
	newobj.setNonce(StartingNonce) // sets the object to dirty
	if prev == nil {
		if glog.V(logger.Core) {
			glog.Infof("(+) %x\n", addr)
		}
		self.journal = append(self.journal, createObjectChange{account: &addr})
	} else {
		self.journal = append(self.journal, resetObjectChange{prev: prev})
	}
	self.setStateObject(newobj)
	return newobj, prev
}
Esempio n. 23
0
func Start() {
	go func() {
		for range time.Tick(15 * time.Second) {
			mutex.Lock()
			var sum, tracked = 0, []string{}
			for what, n := range all {
				sum += n
				tracked = append(tracked, fmt.Sprintf("%s:%d", what, n))
			}
			mutex.Unlock()
			used, _ := fdusage()
			sort.Strings(tracked)
			glog.Infof("fd usage %d/%d, tracked %d %v", used, fdlimit(), sum, tracked)
		}
	}()
}
Esempio n. 24
0
// WriteBlock writes a block to the database
func WriteBlock(db common.Database, block *types.Block) error {
	tstart := time.Now()

	enc, _ := rlp.EncodeToBytes((*types.StorageBlock)(block))
	key := append(blockHashPre, block.Hash().Bytes()...)
	err := db.Put(key, enc)
	if err != nil {
		glog.Fatal("db write fail:", err)
		return err
	}

	if glog.V(logger.Debug) {
		glog.Infof("wrote block #%v %s. Took %v\n", block.Number(), common.PP(block.Hash().Bytes()), time.Since(tstart))
	}

	return nil
}
Esempio n. 25
0
// add validates a transaction and inserts it into the non-executable queue for
// later pending promotion and execution.
func (pool *TxPool) add(tx *types.Transaction) error {
	// If the transaction is alreayd known, discard it
	hash := tx.Hash()
	if pool.all[hash] != nil {
		return fmt.Errorf("Known transaction: %x", hash[:4])
	}
	// Otherwise ensure basic validation passes and queue it up
	if err := pool.validateTx(tx); err != nil {
		return err
	}
	pool.enqueueTx(hash, tx)

	// Print a log message if low enough level is set
	if glog.V(logger.Debug) {
		rcpt := "[NEW_CONTRACT]"
		if to := tx.To(); to != nil {
			rcpt = common.Bytes2Hex(to[:4])
		}
		from, _ := tx.From() // from already verified during tx validation
		glog.Infof("(t) 0x%x => %s (%v) %x\n", from[:4], rcpt, tx.Value, hash)
	}
	return nil
}
Esempio n. 26
0
// promoteExecutables moves transactions that have become processable from the
// future queue to the set of pending transactions. During this process, all
// invalidated transactions (low nonce, low balance) are deleted.
func (pool *TxPool) promoteExecutables() {
	// Init delayed since tx pool could have been started before any state sync
	if pool.pendingState == nil {
		pool.resetState()
	}
	// Retrieve the current state to allow nonce and balance checking
	state, err := pool.currentState()
	if err != nil {
		glog.Errorf("Could not get current state: %v", err)
		return
	}
	// Iterate over all accounts and promote any executable transactions
	queued := uint64(0)

	for addr, list := range pool.queue {
		// Drop all transactions that are deemed too old (low nonce)
		for _, tx := range list.Forward(state.GetNonce(addr)) {
			if glog.V(logger.Core) {
				glog.Infof("Removed old queued transaction: %v", tx)
			}
			delete(pool.all, tx.Hash())
		}
		// Drop all transactions that are too costly (low balance)
		drops, _ := list.Filter(state.GetBalance(addr))
		for _, tx := range drops {
			if glog.V(logger.Core) {
				glog.Infof("Removed unpayable queued transaction: %v", tx)
			}
			delete(pool.all, tx.Hash())
		}
		// Gather all executable transactions and promote them
		for _, tx := range list.Ready(pool.pendingState.GetNonce(addr)) {
			if glog.V(logger.Core) {
				glog.Infof("Promoting queued transaction: %v", tx)
			}
			pool.promoteTx(addr, tx.Hash(), tx)
		}
		// Drop all transactions over the allowed limit
		for _, tx := range list.Cap(int(maxQueuedPerAccount)) {
			if glog.V(logger.Core) {
				glog.Infof("Removed cap-exceeding queued transaction: %v", tx)
			}
			delete(pool.all, tx.Hash())
		}
		queued += uint64(list.Len())

		// Delete the entire queue entry if it became empty.
		if list.Empty() {
			delete(pool.queue, addr)
		}
	}
	// If we've queued more transactions than the hard limit, drop oldest ones
	if queued > maxQueuedInTotal {
		// Sort all accounts with queued transactions by heartbeat
		addresses := make(addresssByHeartbeat, 0, len(pool.queue))
		for addr, _ := range pool.queue {
			addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]})
		}
		sort.Sort(addresses)

		// Drop transactions until the total is below the limit
		for drop := queued - maxQueuedInTotal; drop > 0; {
			addr := addresses[len(addresses)-1]
			list := pool.queue[addr.address]

			addresses = addresses[:len(addresses)-1]

			// Drop all transactions if they are less than the overflow
			if size := uint64(list.Len()); size <= drop {
				for _, tx := range list.Flatten() {
					pool.removeTx(tx.Hash())
				}
				drop -= size
				continue
			}
			// Otherwise drop only last few transactions
			txs := list.Flatten()
			for i := len(txs) - 1; i >= 0 && drop > 0; i-- {
				pool.removeTx(txs[i].Hash())
				drop--
			}
		}
	}
}
Esempio n. 27
0
// checkQueue moves transactions that have become processable to main pool.
func (pool *TxPool) checkQueue() {
	// init delayed since tx pool could have been started before any state sync
	if pool.pendingState == nil {
		pool.resetState()
	}

	var promote txQueue
	for address, txs := range pool.queue {
		currentState, err := pool.currentState()
		if err != nil {
			glog.Errorf("could not get current state: %v", err)
			return
		}
		balance := currentState.GetBalance(address)

		var (
			guessedNonce = pool.pendingState.GetNonce(address) // nonce currently kept by the tx pool (pending state)
			trueNonce    = currentState.GetNonce(address)      // nonce known by the last state
		)
		promote = promote[:0]
		for hash, tx := range txs {
			// Drop processed or out of fund transactions
			if tx.Nonce() < trueNonce || balance.Cmp(tx.Cost()) < 0 {
				if glog.V(logger.Core) {
					glog.Infof("removed tx (%v) from pool queue: low tx nonce or out of funds\n", tx)
				}
				delete(txs, hash)
				continue
			}
			// Collect the remaining transactions for the next pass.
			promote = append(promote, txQueueEntry{hash, address, tx})
		}
		// Find the next consecutive nonce range starting at the current account nonce,
		// pushing the guessed nonce forward if we add consecutive transactions.
		sort.Sort(promote)
		for i, entry := range promote {
			// If we reached a gap in the nonces, enforce transaction limit and stop
			if entry.Nonce() > guessedNonce {
				if len(promote)-i > maxQueued {
					if glog.V(logger.Debug) {
						glog.Infof("Queued tx limit exceeded for %s. Tx %s removed\n", common.PP(address[:]), common.PP(entry.hash[:]))
					}
					for _, drop := range promote[i+maxQueued:] {
						delete(txs, drop.hash)
					}
				}
				break
			}
			// Otherwise promote the transaction and move the guess nonce if needed
			pool.addTx(entry.hash, address, entry.Transaction)
			delete(txs, entry.hash)

			if entry.Nonce() == guessedNonce {
				guessedNonce++
			}
		}
		// Delete the entire queue entry if it became empty.
		if len(txs) == 0 {
			delete(pool.queue, address)
		}
	}
}
Esempio n. 28
0
// reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
// to be part of the new canonical chain and accumulates potential missing transactions and post an
// event about them
func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
	var (
		newChain    types.Blocks
		commonBlock *types.Block
		oldStart    = oldBlock
		newStart    = newBlock
		deletedTxs  types.Transactions
	)

	// first reduce whoever is higher bound
	if oldBlock.NumberU64() > newBlock.NumberU64() {
		// reduce old chain
		for oldBlock = oldBlock; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = self.GetBlock(oldBlock.ParentHash()) {
			deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
		}
	} else {
		// reduce new chain and append new chain blocks for inserting later on
		for newBlock = newBlock; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = self.GetBlock(newBlock.ParentHash()) {
			newChain = append(newChain, newBlock)
		}
	}
	if oldBlock == nil {
		return fmt.Errorf("Invalid old chain")
	}
	if newBlock == nil {
		return fmt.Errorf("Invalid new chain")
	}

	numSplit := newBlock.Number()
	for {
		if oldBlock.Hash() == newBlock.Hash() {
			commonBlock = oldBlock
			break
		}
		newChain = append(newChain, newBlock)
		deletedTxs = append(deletedTxs, oldBlock.Transactions()...)

		oldBlock, newBlock = self.GetBlock(oldBlock.ParentHash()), self.GetBlock(newBlock.ParentHash())
		if oldBlock == nil {
			return fmt.Errorf("Invalid old chain")
		}
		if newBlock == nil {
			return fmt.Errorf("Invalid new chain")
		}
	}

	if glog.V(logger.Debug) {
		commonHash := commonBlock.Hash()
		glog.Infof("Chain split detected @ %x. Reorganising chain from #%v %x to %x", commonHash[:4], numSplit, oldStart.Hash().Bytes()[:4], newStart.Hash().Bytes()[:4])
	}

	var addedTxs types.Transactions
	// insert blocks. Order does not matter. Last block will be written in ImportChain itself which creates the new head properly
	for _, block := range newChain {
		// insert the block in the canonical way, re-writing history
		self.insert(block)
		// write canonical receipts and transactions
		if err := WriteTransactions(self.chainDb, block); err != nil {
			return err
		}
		receipts := GetBlockReceipts(self.chainDb, block.Hash())
		// write receipts
		if err := WriteReceipts(self.chainDb, receipts); err != nil {
			return err
		}
		// Write map map bloom filters
		if err := WriteMipmapBloom(self.chainDb, block.NumberU64(), receipts); err != nil {
			return err
		}

		addedTxs = append(addedTxs, block.Transactions()...)
	}

	// calculate the difference between deleted and added transactions
	diff := types.TxDifference(deletedTxs, addedTxs)
	// When transactions get deleted from the database that means the
	// receipts that were created in the fork must also be deleted
	for _, tx := range diff {
		DeleteReceipt(self.chainDb, tx.Hash())
		DeleteTransaction(self.chainDb, tx.Hash())
	}
	// Must be posted in a goroutine because of the transaction pool trying
	// to acquire the chain manager lock
	go self.eventMux.Post(RemovedTransactionEvent{diff})

	return nil
}
Esempio n. 29
0
// InsertChain will attempt to insert the given chain in to the canonical chain or, otherwise, create a fork. It an error is returned
// it will return the index number of the failing block as well an error describing what went wrong (for possible errors see core/errors.go).
func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
	self.wg.Add(1)
	defer self.wg.Done()

	self.chainmu.Lock()
	defer self.chainmu.Unlock()

	// A queued approach to delivering events. This is generally
	// faster than direct delivery and requires much less mutex
	// acquiring.
	var (
		stats         struct{ queued, processed, ignored int }
		events        = make([]interface{}, 0, len(chain))
		coalescedLogs vm.Logs
		tstart        = time.Now()

		nonceChecked = make([]bool, len(chain))
	)

	// Start the parallel nonce verifier.
	nonceAbort, nonceResults := verifyNoncesFromBlocks(self.pow, chain)
	defer close(nonceAbort)

	txcount := 0
	for i, block := range chain {
		if atomic.LoadInt32(&self.procInterrupt) == 1 {
			glog.V(logger.Debug).Infoln("Premature abort during block chain processing")
			break
		}

		bstart := time.Now()
		// Wait for block i's nonce to be verified before processing
		// its state transition.
		for !nonceChecked[i] {
			r := <-nonceResults
			nonceChecked[r.index] = true
			if !r.valid {
				block := chain[r.index]
				return r.index, &BlockNonceErr{Hash: block.Hash(), Number: block.Number(), Nonce: block.Nonce()}
			}
		}

		if BadHashes[block.Hash()] {
			err := BadHashError(block.Hash())
			reportBlock(block, err)
			return i, err
		}
		// Stage 1 validation of the block using the chain's validator
		// interface.
		err := self.Validator().ValidateBlock(block)
		if err != nil {
			if IsKnownBlockErr(err) {
				stats.ignored++
				continue
			}

			if err == BlockFutureErr {
				// Allow up to MaxFuture second in the future blocks. If this limit
				// is exceeded the chain is discarded and processed at a later time
				// if given.
				max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks)
				if block.Time().Cmp(max) == 1 {
					return i, fmt.Errorf("%v: BlockFutureErr, %v > %v", BlockFutureErr, block.Time(), max)
				}

				self.futureBlocks.Add(block.Hash(), block)
				stats.queued++
				continue
			}

			if IsParentErr(err) && self.futureBlocks.Contains(block.ParentHash()) {
				self.futureBlocks.Add(block.Hash(), block)
				stats.queued++
				continue
			}

			reportBlock(block, err)

			return i, err
		}

		// Create a new statedb using the parent block and report an
		// error if it fails.
		statedb, err := state.New(self.GetBlock(block.ParentHash()).Root(), self.chainDb)
		if err != nil {
			reportBlock(block, err)
			return i, err
		}
		// Process block using the parent state as reference point.
		receipts, logs, usedGas, err := self.processor.Process(block, statedb)
		if err != nil {
			reportBlock(block, err)
			return i, err
		}
		// Validate the state using the default validator
		err = self.Validator().ValidateState(block, self.GetBlock(block.ParentHash()), statedb, receipts, usedGas)
		if err != nil {
			reportBlock(block, err)
			return i, err
		}
		// Write state changes to database
		_, err = statedb.Commit()
		if err != nil {
			return i, err
		}

		// coalesce logs for later processing
		coalescedLogs = append(coalescedLogs, logs...)

		if err := WriteBlockReceipts(self.chainDb, block.Hash(), receipts); err != nil {
			return i, err
		}

		txcount += len(block.Transactions())
		// write the block to the chain and get the status
		status, err := self.WriteBlock(block)
		if err != nil {
			return i, err
		}

		switch status {
		case CanonStatTy:
			if glog.V(logger.Debug) {
				glog.Infof("[%v] inserted block #%d (%d TXs %v G %d UNCs) (%x...). Took %v\n", time.Now().UnixNano(), block.Number(), len(block.Transactions()), block.GasUsed(), len(block.Uncles()), block.Hash().Bytes()[0:4], time.Since(bstart))
			}
			events = append(events, ChainEvent{block, block.Hash(), logs})

			// This puts transactions in a extra db for rpc
			if err := WriteTransactions(self.chainDb, block); err != nil {
				return i, err
			}
			// store the receipts
			if err := WriteReceipts(self.chainDb, receipts); err != nil {
				return i, err
			}
			// Write map map bloom filters
			if err := WriteMipmapBloom(self.chainDb, block.NumberU64(), receipts); err != nil {
				return i, err
			}
		case SideStatTy:
			if glog.V(logger.Detail) {
				glog.Infof("inserted forked block #%d (TD=%v) (%d TXs %d UNCs) (%x...). Took %v\n", block.Number(), block.Difficulty(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4], time.Since(bstart))
			}
			events = append(events, ChainSideEvent{block, logs})

		case SplitStatTy:
			events = append(events, ChainSplitEvent{block, logs})
		}
		stats.processed++
	}

	if (stats.queued > 0 || stats.processed > 0 || stats.ignored > 0) && bool(glog.V(logger.Info)) {
		tend := time.Since(tstart)
		start, end := chain[0], chain[len(chain)-1]
		glog.Infof("imported %d block(s) (%d queued %d ignored) including %d txs in %v. #%v [%x / %x]\n", stats.processed, stats.queued, stats.ignored, txcount, tend, end.Number(), start.Hash().Bytes()[:4], end.Hash().Bytes()[:4])
	}
	go self.postChainEvents(events, coalescedLogs)

	return 0, nil
}
Esempio n. 30
0
// Run loops and evaluates the contract's code with the given input data
func (self *Vm) Run(contract *Contract, input []byte) (ret []byte, err error) {
	self.env.SetDepth(self.env.Depth() + 1)
	defer self.env.SetDepth(self.env.Depth() - 1)

	// User defer pattern to check for an error and, based on the error being nil or not, use all gas and return.
	defer func() {
		if err != nil {
			// In case of a VM exception (known exceptions) all gas consumed (panics NOT included).
			contract.UseGas(contract.Gas)

			ret = contract.Return(nil)
		}
	}()

	if contract.CodeAddr != nil {
		if p := Precompiled[contract.CodeAddr.Str()]; p != nil {
			return self.RunPrecompiled(p, input, contract)
		}
	}

	// Don't bother with the execution if there's no code.
	if len(contract.Code) == 0 {
		return contract.Return(nil), nil
	}

	var (
		codehash = crypto.Sha3Hash(contract.Code) // codehash is used when doing jump dest caching
		program  *Program
	)
	if EnableJit {
		// If the JIT is enabled check the status of the JIT program,
		// if it doesn't exist compile a new program in a seperate
		// goroutine or wait for compilation to finish if the JIT is
		// forced.
		switch GetProgramStatus(codehash) {
		case progReady:
			return RunProgram(GetProgram(codehash), self.env, contract, input)
		case progUnknown:
			if ForceJit {
				// Create and compile program
				program = NewProgram(contract.Code)
				perr := CompileProgram(program)
				if perr == nil {
					return RunProgram(program, self.env, contract, input)
				}
				glog.V(logger.Info).Infoln("error compiling program", err)
			} else {
				// create and compile the program. Compilation
				// is done in a seperate goroutine
				program = NewProgram(contract.Code)
				go func() {
					err := CompileProgram(program)
					if err != nil {
						glog.V(logger.Info).Infoln("error compiling program", err)
						return
					}
				}()
			}
		}
	}

	var (
		caller     = contract.caller
		code       = contract.Code
		instrCount = 0

		op      OpCode          // current opcode
		mem     = NewMemory()   // bound memory
		stack   = newstack()    // local stack
		statedb = self.env.Db() // current state
		// For optimisation reason we're using uint64 as the program counter.
		// It's theoretically possible to go above 2^64. The YP defines the PC to be uint256. Pratically much less so feasible.
		pc = uint64(0) // program counter

		// jump evaluates and checks whether the given jump destination is a valid one
		// if valid move the `pc` otherwise return an error.
		jump = func(from uint64, to *big.Int) error {
			if !contract.jumpdests.has(codehash, code, to) {
				nop := contract.GetOp(to.Uint64())
				return fmt.Errorf("invalid jump destination (%v) %v", nop, to)
			}

			pc = to.Uint64()

			return nil
		}

		newMemSize *big.Int
		cost       *big.Int
	)
	contract.Input = input

	// User defer pattern to check for an error and, based on the error being nil or not, use all gas and return.
	defer func() {
		if err != nil {
			self.log(pc, op, contract.Gas, cost, mem, stack, contract, err)
		}
	}()

	if glog.V(logger.Debug) {
		glog.Infof("running byte VM %x\n", codehash[:4])
		tstart := time.Now()
		defer func() {
			glog.Infof("byte VM %x done. time: %v instrc: %v\n", codehash[:4], time.Since(tstart), instrCount)
		}()
	}

	for ; ; instrCount++ {
		/*
			if EnableJit && it%100 == 0 {
				if program != nil && progStatus(atomic.LoadInt32(&program.status)) == progReady {
					// move execution
					fmt.Println("moved", it)
					glog.V(logger.Info).Infoln("Moved execution to JIT")
					return runProgram(program, pc, mem, stack, self.env, contract, input)
				}
			}
		*/

		// Get the memory location of pc
		op = contract.GetOp(pc)

		// calculate the new memory size and gas price for the current executing opcode
		newMemSize, cost, err = calculateGasAndSize(self.env, contract, caller, op, statedb, mem, stack)
		if err != nil {
			return nil, err
		}

		// Use the calculated gas. When insufficient gas is present, use all gas and return an
		// Out Of Gas error
		if !contract.UseGas(cost) {
			return nil, OutOfGasError
		}

		// Resize the memory calculated previously
		mem.Resize(newMemSize.Uint64())
		// Add a log message
		self.log(pc, op, contract.Gas, cost, mem, stack, contract, nil)

		if opPtr := jumpTable[op]; opPtr.valid {
			if opPtr.fn != nil {
				opPtr.fn(instruction{}, &pc, self.env, contract, mem, stack)
			} else {
				switch op {
				case PC:
					opPc(instruction{data: new(big.Int).SetUint64(pc)}, &pc, self.env, contract, mem, stack)
				case JUMP:
					if err := jump(pc, stack.pop()); err != nil {
						return nil, err
					}

					continue
				case JUMPI:
					pos, cond := stack.pop(), stack.pop()

					if cond.Cmp(common.BigTrue) >= 0 {
						if err := jump(pc, pos); err != nil {
							return nil, err
						}

						continue
					}
				case RETURN:
					offset, size := stack.pop(), stack.pop()
					ret := mem.GetPtr(offset.Int64(), size.Int64())

					return contract.Return(ret), nil
				case SUICIDE:
					opSuicide(instruction{}, nil, self.env, contract, mem, stack)

					fallthrough
				case STOP: // Stop the contract
					return contract.Return(nil), nil
				}
			}
		} else {
			return nil, fmt.Errorf("Invalid opcode %x", op)
		}

		pc++

	}
}