Beispiel #1
0
// BroadcastBlock will either propagate a block to a subset of it's peers, or
// will only announce it's availability (depending what's requested).
func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
	hash := block.Hash()
	peers := pm.peers.PeersWithoutBlock(hash)

	// If propagation is requested, send to a subset of the peer
	if propagate {
		// Calculate the TD of the block (it's not imported yet, so block.Td is not valid)
		var td *big.Int
		if parent := pm.blockchain.GetBlock(block.ParentHash()); parent != nil {
			td = new(big.Int).Add(block.Difficulty(), pm.blockchain.GetTd(block.ParentHash()))
		} else {
			glog.V(logger.Error).Infof("propagating dangling block #%d [%x]", block.NumberU64(), hash[:4])
			return
		}
		// Send the block to a subset of our peers
		transfer := peers[:int(math.Sqrt(float64(len(peers))))]
		for _, peer := range transfer {
			peer.SendNewBlock(block, td)
		}
		glog.V(logger.Detail).Infof("propagated block %x to %d peers in %v", hash[:4], len(transfer), time.Since(block.ReceivedAt))
	}
	// Otherwise if the block is indeed in out own chain, announce it
	if pm.blockchain.HasBlock(hash) {
		for _, peer := range peers {
			if peer.version < eth62 {
				peer.SendNewBlockHashes61([]common.Hash{hash})
			} else {
				peer.SendNewBlockHashes([]common.Hash{hash}, []uint64{block.NumberU64()})
			}
		}
		glog.V(logger.Detail).Infof("announced block %x to %d peers in %v", hash[:4], len(peers), time.Since(block.ReceivedAt))
	}
}
Beispiel #2
0
func (self *Registrar) SetHashReg(hashreg string, addr common.Address) (txhash string, err error) {
	if hashreg != "" {
		HashRegAddr = hashreg
	} else {
		if !zero.MatchString(HashRegAddr) {
			return
		}
		nameHex, extra := encodeName(HashRegName, 2)
		hashRegAbi := resolveAbi + nameHex + extra
		glog.V(logger.Detail).Infof("\ncall HashRegAddr %v with %v\n", GlobalRegistrarAddr, hashRegAbi)
		var res string
		res, _, err = self.backend.Call("", GlobalRegistrarAddr, "", "", "", hashRegAbi)
		if len(res) >= 40 {
			HashRegAddr = "0x" + res[len(res)-40:len(res)]
		}
		if err != nil || zero.MatchString(HashRegAddr) {
			if (addr == common.Address{}) {
				err = fmt.Errorf("HashReg address not found and sender for creation not given")
				return
			}

			txhash, err = self.backend.Transact(addr.Hex(), "", "", "", "", "", HashRegCode)
			if err != nil {
				err = fmt.Errorf("HashReg address not found and sender for creation failed: %v", err)
			}
			glog.V(logger.Detail).Infof("created HashRegAddr @ txhash %v\n", txhash)
		} else {
			glog.V(logger.Detail).Infof("HashRegAddr found at @ %v\n", HashRegAddr)
			return
		}
	}

	return
}
Beispiel #3
0
func (self *Registrar) SetUrlHint(urlhint string, addr common.Address) (txhash string, err error) {
	if urlhint != "" {
		UrlHintAddr = urlhint
	} else {
		if !zero.MatchString(UrlHintAddr) {
			return
		}
		nameHex, extra := encodeName(UrlHintName, 2)
		urlHintAbi := resolveAbi + nameHex + extra
		glog.V(logger.Detail).Infof("UrlHint address query data: %s to %s", urlHintAbi, GlobalRegistrarAddr)
		var res string
		res, _, err = self.backend.Call("", GlobalRegistrarAddr, "", "", "", urlHintAbi)
		if len(res) >= 40 {
			UrlHintAddr = "0x" + res[len(res)-40:len(res)]
		}
		if err != nil || zero.MatchString(UrlHintAddr) {
			if (addr == common.Address{}) {
				err = fmt.Errorf("UrlHint address not found and sender for creation not given")
				return
			}
			txhash, err = self.backend.Transact(addr.Hex(), "", "", "", "210000", "", UrlHintCode)
			if err != nil {
				err = fmt.Errorf("UrlHint address not found and sender for creation failed: %v", err)
			}
			glog.V(logger.Detail).Infof("created UrlHint @ txhash %v\n", txhash)
		} else {
			glog.V(logger.Detail).Infof("UrlHint found @ %v\n", HashRegAddr)
			return
		}
	}

	return
}
Beispiel #4
0
// wsHandshakeValidator returns a handler that verifies the origin during the
// websocket upgrade process. When a '*' is specified as an allowed origins all
// connections are accepted.
func wsHandshakeValidator(allowedOrigins []string) func(*websocket.Config, *http.Request) error {
	origins := set.New()
	allowAllOrigins := false

	for _, origin := range allowedOrigins {
		if origin == "*" {
			allowAllOrigins = true
		}
		if origin != "" {
			origins.Add(origin)
		}
	}

	// allow localhost if no allowedOrigins are specified
	if len(origins.List()) == 0 {
		origins.Add("http://localhost")
		if hostname, err := os.Hostname(); err == nil {
			origins.Add("http://" + hostname)
		}
	}

	glog.V(logger.Debug).Infof("Allowed origin(s) for WS RPC interface %v\n", origins.List())

	f := func(cfg *websocket.Config, req *http.Request) error {
		origin := req.Header.Get("Origin")
		if allowAllOrigins || origins.Has(origin) {
			return nil
		}
		glog.V(logger.Debug).Infof("origin '%s' not allowed on WS-RPC interface\n", origin)
		return fmt.Errorf("origin %s not allowed", origin)
	}

	return f
}
Beispiel #5
0
func ecrecoverFunc(in []byte) []byte {
	in = common.RightPadBytes(in, 128)
	// "in" is (hash, v, r, s), each 32 bytes
	// but for ecrecover we want (r, s, v)

	r := common.BytesToBig(in[64:96])
	s := common.BytesToBig(in[96:128])
	// Treat V as a 256bit integer
	vbig := common.Bytes2Big(in[32:64])
	v := byte(vbig.Uint64())

	if !crypto.ValidateSignatureValues(v, r, s) {
		glog.V(logger.Debug).Infof("EC RECOVER FAIL: v, r or s value invalid")
		return nil
	}

	// v needs to be at the end and normalized for libsecp256k1
	vbignormal := new(big.Int).Sub(vbig, big.NewInt(27))
	vnormal := byte(vbignormal.Uint64())
	rsv := append(in[64:128], vnormal)
	pubKey, err := crypto.Ecrecover(in[:32], rsv)
	// make sure the public key is a valid one
	if err != nil {
		glog.V(logger.Error).Infof("EC RECOVER FAIL: ", err)
		return nil
	}

	// the first byte of pubkey is bitcoin heritage
	return common.LeftPadBytes(crypto.Sha3(pubKey[1:])[12:], 32)
}
Beispiel #6
0
// parsePersistentNodes parses a list of discovery node URLs loaded from a .json
// file from within the data directory.
func (c *Config) parsePersistentNodes(file string) []*discover.Node {
	// Short circuit if no node config is present
	if c.DataDir == "" {
		return nil
	}
	path := filepath.Join(c.DataDir, file)
	if _, err := os.Stat(path); err != nil {
		return nil
	}
	// Load the nodes from the config file
	blob, err := ioutil.ReadFile(path)
	if err != nil {
		glog.V(logger.Error).Infof("Failed to access nodes: %v", err)
		return nil
	}
	nodelist := []string{}
	if err := json.Unmarshal(blob, &nodelist); err != nil {
		glog.V(logger.Error).Infof("Failed to load nodes: %v", err)
		return nil
	}
	// Interpret the list as a discovery node array
	var nodes []*discover.Node
	for _, url := range nodelist {
		if url == "" {
			continue
		}
		node, err := discover.ParseNode(url)
		if err != nil {
			glog.V(logger.Error).Infof("Node URL %s: %v\n", url, err)
			continue
		}
		nodes = append(nodes, node)
	}
	return nodes
}
Beispiel #7
0
// GetTransactionByHash returns the transaction for the given hash
func (s *PublicTransactionPoolAPI) GetTransactionByHash(txHash common.Hash) (*RPCTransaction, error) {
	var tx *types.Transaction
	var isPending bool
	var err error

	if tx, isPending, err = getTransaction(s.chainDb, s.txPool, txHash); err != nil {
		glog.V(logger.Debug).Infof("%v\n", err)
		return nil, nil
	} else if tx == nil {
		return nil, nil
	}

	if isPending {
		return newRPCPendingTransaction(tx), nil
	}

	blockHash, _, _, err := getTransactionBlockData(s.chainDb, txHash)
	if err != nil {
		glog.V(logger.Debug).Infof("%v\n", err)
		return nil, nil
	}

	if block := s.bc.GetBlock(blockHash); block != nil {
		return newRPCTransaction(block, txHash)
	}

	return nil, nil
}
Beispiel #8
0
// Map adds a port mapping on m and keeps it alive until c is closed.
// This function is typically invoked in its own goroutine.
func Map(m Interface, c chan struct{}, protocol string, extport, intport int, name string) {
	refresh := time.NewTimer(mapUpdateInterval)
	defer func() {
		refresh.Stop()
		glog.V(logger.Debug).Infof("deleting port mapping: %s %d -> %d (%s) using %s\n", protocol, extport, intport, name, m)
		m.DeleteMapping(protocol, extport, intport)
	}()
	if err := m.AddMapping(protocol, intport, extport, name, mapTimeout); err != nil {
		glog.V(logger.Debug).Infof("network port %s:%d could not be mapped: %v\n", protocol, intport, err)
	} else {
		glog.V(logger.Info).Infof("mapped network port %s:%d -> %d (%s) using %s\n", protocol, extport, intport, name, m)
	}
	for {
		select {
		case _, ok := <-c:
			if !ok {
				return
			}
		case <-refresh.C:
			glog.V(logger.Detail).Infof("refresh port mapping %s:%d -> %d (%s) using %s\n", protocol, extport, intport, name, m)
			if err := m.AddMapping(protocol, intport, extport, name, mapTimeout); err != nil {
				glog.V(logger.Debug).Infof("network port %s:%d could not be mapped: %v\n", protocol, intport, err)
			}
			refresh.Reset(mapUpdateInterval)
		}
	}
}
Beispiel #9
0
// enqueue schedules a new future import operation, if the block to be imported
// has not yet been seen.
func (f *Fetcher) enqueue(peer string, block *types.Block) {
	hash := block.Hash()

	// Ensure the peer isn't DOSing us
	count := f.queues[peer] + 1
	if count > blockLimit {
		glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x…], exceeded allowance (%d)", peer, block.NumberU64(), hash.Bytes()[:4], blockLimit)
		propBroadcastDOSMeter.Mark(1)
		f.forgetHash(hash)
		return
	}
	// Discard any past or too distant blocks
	if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
		glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x…], distance %d", peer, block.NumberU64(), hash.Bytes()[:4], dist)
		propBroadcastDropMeter.Mark(1)
		f.forgetHash(hash)
		return
	}
	// Schedule the block for future importing
	if _, ok := f.queued[hash]; !ok {
		op := &inject{
			origin: peer,
			block:  block,
		}
		f.queues[peer] = count
		f.queued[hash] = op
		f.queue.Push(op, -float32(block.NumberU64()))
		if f.queueChangeHook != nil {
			f.queueChangeHook(op.block.Hash(), true)
		}
		if glog.V(logger.Debug) {
			glog.Infof("Peer %s: queued block #%d [%x…], total %v", peer, block.NumberU64(), hash.Bytes()[:4], f.queue.Size())
		}
	}
}
Beispiel #10
0
// add inserts a new envelope into the message pool to be distributed within the
// whisper network. It also inserts the envelope into the expiration pool at the
// appropriate time-stamp.
func (self *Whisper) add(envelope *Envelope) error {
	self.poolMu.Lock()
	defer self.poolMu.Unlock()

	// short circuit when a received envelope has already expired
	if envelope.Expiry <= uint32(time.Now().Unix()) {
		return nil
	}

	// Insert the message into the tracked pool
	hash := envelope.Hash()
	if _, ok := self.messages[hash]; ok {
		glog.V(logger.Detail).Infof("whisper envelope already cached: %x\n", envelope)
		return nil
	}
	self.messages[hash] = envelope

	// Insert the message into the expiration pool for later removal
	if self.expirations[envelope.Expiry] == nil {
		self.expirations[envelope.Expiry] = set.NewNonTS()
	}
	if !self.expirations[envelope.Expiry].Has(hash) {
		self.expirations[envelope.Expiry].Add(hash)

		// Notify the local node of a message arrival
		go self.postEvent(envelope)
	}
	glog.V(logger.Detail).Infof("cached whisper envelope %x\n", envelope)

	return nil
}
Beispiel #11
0
func (pool *TxPool) resetState() {
	currentState, err := pool.currentState()
	if err != nil {
		glog.V(logger.Info).Infoln("failed to get current state: %v", err)
		return
	}
	managedState := state.ManageState(currentState)
	if err != nil {
		glog.V(logger.Info).Infoln("failed to get managed state: %v", err)
		return
	}
	pool.pendingState = managedState

	// validate the pool of pending transactions, this will remove
	// any transactions that have been included in the block or
	// have been invalidated because of another transaction (e.g.
	// higher gas price)
	pool.validatePool()

	// Loop over the pending transactions and base the nonce of the new
	// pending transaction set.
	for _, tx := range pool.pending {
		if addr, err := tx.From(); err == nil {
			// Set the nonce. Transaction nonce can never be lower
			// than the state nonce; validatePool took care of that.
			if pool.pendingState.GetNonce(addr) <= tx.Nonce() {
				pool.pendingState.SetNonce(addr, tx.Nonce()+1)
			}
		}
	}
	// Check the queue and move transactions over to the pending if possible
	// or remove those that have become invalid
	pool.checkQueue()
}
Beispiel #12
0
// resolve attempts to find the current endpoint for the destination
// using discovery.
//
// Resolve operations are throttled with backoff to avoid flooding the
// discovery network with useless queries for nodes that don't exist.
// The backoff delay resets when the node is found.
func (t *dialTask) resolve(srv *Server) bool {
	if srv.ntab == nil {
		glog.V(logger.Debug).Infof("can't resolve node %x: discovery is disabled", t.dest.ID[:6])
		return false
	}
	if t.resolveDelay == 0 {
		t.resolveDelay = initialResolveDelay
	}
	if time.Since(t.lastResolved) < t.resolveDelay {
		return false
	}
	resolved := srv.ntab.Resolve(t.dest.ID)
	t.lastResolved = time.Now()
	if resolved == nil {
		t.resolveDelay *= 2
		if t.resolveDelay > maxResolveDelay {
			t.resolveDelay = maxResolveDelay
		}
		glog.V(logger.Debug).Infof("resolving node %x failed (new delay: %v)", t.dest.ID[:6], t.resolveDelay)
		return false
	}
	// The node was found.
	t.resolveDelay = initialResolveDelay
	t.dest = resolved
	glog.V(logger.Debug).Infof("resolved node %x: %v:%d", t.dest.ID[:6], t.dest.IP, t.dest.TCP)
	return true
}
Beispiel #13
0
func (env *Work) commitTransactions(transactions types.Transactions, gasPrice *big.Int, bc *core.BlockChain) {
	gp := new(core.GasPool).AddGas(env.header.GasLimit)
	for _, tx := range transactions {
		// We can skip err. It has already been validated in the tx pool
		from, _ := tx.From()

		// Check if it falls within margin. Txs from owned accounts are always processed.
		if tx.GasPrice().Cmp(gasPrice) < 0 && !env.ownedAccounts.Has(from) {
			// ignore the transaction and transactor. We ignore the transactor
			// because nonce will fail after ignoring this transaction so there's
			// no point
			env.lowGasTransactors.Add(from)

			glog.V(logger.Info).Infof("transaction(%x) below gas price (tx=%v ask=%v). All sequential txs from this address(%x) will be ignored\n", tx.Hash().Bytes()[:4], common.CurrencyToString(tx.GasPrice()), common.CurrencyToString(gasPrice), from[:4])
		}

		// Continue with the next transaction if the transaction sender is included in
		// the low gas tx set. This will also remove the tx and all sequential transaction
		// from this transactor
		if env.lowGasTransactors.Has(from) {
			// add tx to the low gas set. This will be removed at the end of the run
			// owned accounts are ignored
			if !env.ownedAccounts.Has(from) {
				env.lowGasTxs = append(env.lowGasTxs, tx)
			}
			continue
		}

		// Move on to the next transaction when the transactor is in ignored transactions set
		// This may occur when a transaction hits the gas limit. When a gas limit is hit and
		// the transaction is processed (that could potentially be included in the block) it
		// will throw a nonce error because the previous transaction hasn't been processed.
		// Therefor we need to ignore any transaction after the ignored one.
		if env.ignoredTransactors.Has(from) {
			continue
		}

		env.state.StartRecord(tx.Hash(), common.Hash{}, 0)

		err := env.commitTransaction(tx, bc, gp)
		switch {
		case core.IsGasLimitErr(err):
			// ignore the transactor so no nonce errors will be thrown for this account
			// next time the worker is run, they'll be picked up again.
			env.ignoredTransactors.Add(from)

			glog.V(logger.Detail).Infof("Gas limit reached for (%x) in this block. Continue to try smaller txs\n", from[:4])
		case err != nil:
			env.remove.Add(tx.Hash())

			if glog.V(logger.Detail) {
				glog.Infof("TX (%x) failed, will be removed: %v\n", tx.Hash().Bytes()[:4], err)
			}
		default:
			env.tcount++
		}
	}
}
Beispiel #14
0
func (p *Peer) run() DiscReason {
	var (
		writeStart = make(chan struct{}, 1)
		writeErr   = make(chan error, 1)
		readErr    = make(chan error, 1)
		reason     DiscReason
		requested  bool
	)
	p.wg.Add(2)
	go p.readLoop(readErr)
	go p.pingLoop()

	// Start all protocol handlers.
	writeStart <- struct{}{}
	p.startProtocols(writeStart, writeErr)

	// Wait for an error or disconnect.
loop:
	for {
		select {
		case err := <-writeErr:
			// A write finished. Allow the next write to start if
			// there was no error.
			if err != nil {
				glog.V(logger.Detail).Infof("%v: write error: %v\n", p, err)
				reason = DiscNetworkError
				break loop
			}
			writeStart <- struct{}{}
		case err := <-readErr:
			if r, ok := err.(DiscReason); ok {
				glog.V(logger.Debug).Infof("%v: remote requested disconnect: %v\n", p, r)
				requested = true
				reason = r
			} else {
				glog.V(logger.Detail).Infof("%v: read error: %v\n", p, err)
				reason = DiscNetworkError
			}
			break loop
		case err := <-p.protoErr:
			reason = discReasonForError(err)
			glog.V(logger.Debug).Infof("%v: protocol error: %v (%v)\n", p, err, reason)
			break loop
		case reason = <-p.disc:
			glog.V(logger.Debug).Infof("%v: locally requested disconnect: %v\n", p, reason)
			break loop
		}
	}

	close(p.closed)
	p.rw.close(reason)
	p.wg.Wait()
	if requested {
		reason = DiscRequested
	}
	return reason
}
Beispiel #15
0
func (t *udp) send(toaddr *net.UDPAddr, ptype byte, req interface{}) error {
	packet, err := encodePacket(t.priv, ptype, req)
	if err != nil {
		return err
	}
	glog.V(logger.Detail).Infof(">>> %v %T\n", toaddr, req)
	if _, err = t.conn.WriteToUDP(packet, toaddr); err != nil {
		glog.V(logger.Detail).Infoln("UDP send failed:", err)
	}
	return err
}
Beispiel #16
0
// dial performs the actual connection attempt.
func (t *dialTask) dial(srv *Server, dest *discover.Node) bool {
	addr := &net.TCPAddr{IP: dest.IP, Port: int(dest.TCP)}
	glog.V(logger.Debug).Infof("dial tcp %v (%x)\n", addr, dest.ID[:6])
	fd, err := srv.Dialer.Dial("tcp", addr.String())
	if err != nil {
		glog.V(logger.Detail).Infof("%v", err)
		return false
	}
	mfd := newMeteredConn(fd, false)
	srv.setupConn(mfd, t.flags, dest)
	return true
}
Beispiel #17
0
func (self *StateTransition) transitionDb() (ret []byte, usedGas *big.Int, err error) {
	if err = self.preCheck(); err != nil {
		return
	}

	msg := self.msg
	sender, _ := self.from() // err checked in preCheck

	// Pay intrinsic gas
	if err = self.useGas(IntrinsicGas(self.data)); err != nil {
		return nil, nil, InvalidTxError(err)
	}

	vmenv := self.env
	var addr common.Address
	if MessageCreatesContract(msg) {
		ret, addr, err = vmenv.Create(sender, self.data, self.gas, self.gasPrice, self.value)
		if err == nil {
			dataGas := big.NewInt(int64(len(ret)))
			dataGas.Mul(dataGas, params.CreateDataGas)
			if err := self.useGas(dataGas); err == nil {
				self.state.SetCode(addr, ret)
			} else {
				ret = nil // does not affect consensus but useful for StateTests validations
				glog.V(logger.Core).Infoln("Insufficient gas for creating code. Require", dataGas, "and have", self.gas)
			}
		}
		glog.V(logger.Core).Infoln("VM create err:", err)
	} else {
		// Increment the nonce for the next transaction
		self.state.SetNonce(sender.Address(), self.state.GetNonce(sender.Address())+1)
		ret, err = vmenv.Call(sender, self.to().Address(), self.data, self.gas, self.gasPrice, self.value)
		glog.V(logger.Core).Infoln("VM call err:", err)
	}

	if err != nil && IsValueTransferErr(err) {
		return nil, nil, InvalidTxError(err)
	}

	// We aren't interested in errors here. Errors returned by the VM are non-consensus errors and therefor shouldn't bubble up
	if err != nil {
		err = nil
	}

	if vm.Debug {
		vm.StdErrFormat(vmenv.StructLogs())
	}

	self.refundGas()
	self.state.AddBalance(self.env.Coinbase(), new(big.Int).Mul(self.gasUsed(), self.gasPrice))

	return ret, self.gasUsed(), err
}
Beispiel #18
0
// NewBlockChain returns a fully initialised block chain using information
// available in the database. It initialiser the default Ethereum Validator and
// Processor.
func NewBlockChain(chainDb ethdb.Database, pow pow.PoW, mux *event.TypeMux) (*BlockChain, error) {
	headerCache, _ := lru.New(headerCacheLimit)
	bodyCache, _ := lru.New(bodyCacheLimit)
	bodyRLPCache, _ := lru.New(bodyCacheLimit)
	tdCache, _ := lru.New(tdCacheLimit)
	blockCache, _ := lru.New(blockCacheLimit)
	futureBlocks, _ := lru.New(maxFutureBlocks)

	bc := &BlockChain{
		chainDb:      chainDb,
		eventMux:     mux,
		quit:         make(chan struct{}),
		headerCache:  headerCache,
		bodyCache:    bodyCache,
		bodyRLPCache: bodyRLPCache,
		tdCache:      tdCache,
		blockCache:   blockCache,
		futureBlocks: futureBlocks,
		pow:          pow,
	}
	// Seed a fast but crypto originating random generator
	seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64))
	if err != nil {
		return nil, err
	}
	bc.rand = mrand.New(mrand.NewSource(seed.Int64()))
	bc.SetValidator(NewBlockValidator(bc, pow))
	bc.SetProcessor(NewStateProcessor(bc))

	bc.genesisBlock = bc.GetBlockByNumber(0)
	if bc.genesisBlock == nil {
		bc.genesisBlock, err = WriteDefaultGenesisBlock(chainDb)
		if err != nil {
			return nil, err
		}
		glog.V(logger.Info).Infoln("WARNING: Wrote default ethereum genesis block")
	}
	if err := bc.loadLastState(); err != nil {
		return nil, err
	}
	// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
	for hash, _ := range BadHashes {
		if header := bc.GetHeader(hash); header != nil {
			glog.V(logger.Error).Infof("Found bad hash, rewinding chain to block #%d [%x…]", header.Number, header.ParentHash[:4])
			bc.SetHead(header.Number.Uint64() - 1)
			glog.V(logger.Error).Infoln("Chain rewind was successful, resuming normal operation")
		}
	}
	// Take ownership of this particular state
	go bc.update()
	return bc, nil
}
Beispiel #19
0
func (t *udp) handlePacket(from *net.UDPAddr, buf []byte) error {
	packet, fromID, hash, err := decodePacket(buf)
	if err != nil {
		glog.V(logger.Debug).Infof("Bad packet from %v: %v\n", from, err)
		return err
	}
	status := "ok"
	if err = packet.handle(t, from, fromID, hash); err != nil {
		status = err.Error()
	}
	glog.V(logger.Detail).Infof("<<< %v %T: %s\n", from, packet, status)
	return err
}
Beispiel #20
0
// node retrieves a node with a given id from the database.
func (db *nodeDB) node(id NodeID) *Node {
	blob, err := db.lvl.Get(makeKey(id, nodeDBDiscoverRoot), nil)
	if err != nil {
		glog.V(logger.Detail).Infof("failed to retrieve node %v: %v", id, err)
		return nil
	}
	node := new(Node)
	if err := rlp.DecodeBytes(blob, node); err != nil {
		glog.V(logger.Warn).Infof("failed to decode node RLP: %v", err)
		return nil
	}
	node.sha = crypto.Sha3Hash(node.ID[:])
	return node
}
Beispiel #21
0
// generate creates the actual cache. it can be called from multiple
// goroutines. the first call will generate the cache, subsequent
// calls wait until it is generated.
func (cache *cache) generate() {
	cache.gen.Do(func() {
		started := time.Now()
		seedHash := makeSeedHash(cache.epoch)
		glog.V(logger.Debug).Infof("Generating cache for epoch %d (%x)", cache.epoch, seedHash)
		size := C.ethash_get_cachesize(C.uint64_t(cache.epoch * epochLength))
		if cache.test {
			size = cacheSizeForTesting
		}
		cache.ptr = C.ethash_light_new_internal(size, (*C.ethash_h256_t)(unsafe.Pointer(&seedHash[0])))
		runtime.SetFinalizer(cache, freeCache)
		glog.V(logger.Debug).Infof("Done generating cache for epoch %d, it took %v", cache.epoch, time.Since(started))
	})
}
Beispiel #22
0
// validatePool removes invalid and processed transactions from the main pool.
// If a transaction is removed for being invalid (e.g. out of funds), all sub-
// sequent (Still valid) transactions are moved back into the future queue. This
// is important to prevent a drained account from DOSing the network with non
// executable transactions.
func (pool *TxPool) validatePool() {
	state, err := pool.currentState()
	if err != nil {
		glog.V(logger.Info).Infoln("failed to get current state: %v", err)
		return
	}
	balanceCache := make(map[common.Address]*big.Int)

	// Clean up the pending pool, accumulating invalid nonces
	gaps := make(map[common.Address]uint64)

	for hash, tx := range pool.pending {
		sender, _ := tx.From() // err already checked

		// Perform light nonce and balance validation
		balance := balanceCache[sender]
		if balance == nil {
			balance = state.GetBalance(sender)
			balanceCache[sender] = balance
		}
		if past := state.GetNonce(sender) > tx.Nonce(); past || balance.Cmp(tx.Cost()) < 0 {
			// Remove an already past it invalidated transaction
			if glog.V(logger.Core) {
				glog.Infof("removed tx (%v) from pool: low tx nonce or out of funds\n", tx)
			}
			delete(pool.pending, hash)

			// Track the smallest invalid nonce to postpone subsequent transactions
			if !past {
				if prev, ok := gaps[sender]; !ok || tx.Nonce() < prev {
					gaps[sender] = tx.Nonce()
				}
			}
		}
	}
	// Move all transactions after a gap back to the future queue
	if len(gaps) > 0 {
		for hash, tx := range pool.pending {
			sender, _ := tx.From()
			if gap, ok := gaps[sender]; ok && tx.Nonce() >= gap {
				if glog.V(logger.Core) {
					glog.Infof("postponed tx (%v) due to introduced gap\n", tx)
				}
				pool.queueTx(hash, tx)
				delete(pool.pending, hash)
			}
		}
	}
}
Beispiel #23
0
// GetTransactionReceipt returns the transaction receipt for the given transaction hash.
func (s *PublicTransactionPoolAPI) GetTransactionReceipt(txHash common.Hash) (map[string]interface{}, error) {
	receipt := core.GetReceipt(s.chainDb, txHash)
	if receipt == nil {
		glog.V(logger.Debug).Infof("receipt not found for transaction %s", txHash.Hex())
		return nil, nil
	}

	tx, _, err := getTransaction(s.chainDb, s.txPool, txHash)
	if err != nil {
		glog.V(logger.Debug).Infof("%v\n", err)
		return nil, nil
	}

	txBlock, blockIndex, index, err := getTransactionBlockData(s.chainDb, txHash)
	if err != nil {
		glog.V(logger.Debug).Infof("%v\n", err)
		return nil, nil
	}

	from, err := tx.From()
	if err != nil {
		glog.V(logger.Debug).Infof("%v\n", err)
		return nil, nil
	}

	fields := map[string]interface{}{
		"blockHash":         txBlock,
		"blockNumber":       rpc.NewHexNumber(blockIndex),
		"transactionHash":   txHash,
		"transactionIndex":  rpc.NewHexNumber(index),
		"from":              from,
		"to":                tx.To(),
		"gasUsed":           rpc.NewHexNumber(receipt.GasUsed),
		"cumulativeGasUsed": rpc.NewHexNumber(receipt.CumulativeGasUsed),
		"contractAddress":   nil,
		"logs":              receipt.Logs,
	}

	if receipt.Logs == nil {
		fields["logs"] = []vm.Logs{}
	}

	// If the ContractAddress is 20 0x0 bytes, assume it is not a contract creation
	if bytes.Compare(receipt.ContractAddress.Bytes(), bytes.Repeat([]byte{0}, 20)) != 0 {
		fields["contractAddress"] = receipt.ContractAddress
	}

	return fields, nil
}
Beispiel #24
0
func (pm *ProtocolManager) Stop() {
	// Showing a log message. During download / process this could actually
	// take between 5 to 10 seconds and therefor feedback is required.
	glog.V(logger.Info).Infoln("Stopping ethereum protocol handler...")

	pm.quit = true
	pm.txSub.Unsubscribe()         // quits txBroadcastLoop
	pm.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop
	close(pm.quitSync)             // quits syncer, fetcher, txsyncLoop

	// Wait for any process action
	pm.wg.Wait()

	glog.V(logger.Info).Infoln("Ethereum protocol handler stopped")
}
Beispiel #25
0
// Get returns the value for key stored in the trie.
// The value bytes must not be modified by the caller.
func (t *Trie) Get(key []byte) []byte {
	res, err := t.TryGet(key)
	if err != nil && glog.V(logger.Error) {
		glog.Errorf("Unhandled trie error: %v", err)
	}
	return res
}
Beispiel #26
0
// NewInProcRPCClient will start a new RPC server for the given node and returns a client to interact with it.
func NewInProcRPCClient(stack *node.Node) *inProcClient {
	server := rpc.NewServer()

	offered := stack.APIs()
	for _, api := range offered {
		server.RegisterName(api.Namespace, api.Service)
	}

	web3 := node.NewPublicWeb3API(stack)
	server.RegisterName("web3", web3)

	var ethereum *eth.Ethereum
	if err := stack.Service(&ethereum); err == nil {
		net := eth.NewPublicNetAPI(stack.Server(), ethereum.NetVersion())
		server.RegisterName("net", net)
	} else {
		glog.V(logger.Warn).Infof("%v\n", err)
	}

	buf := &buf{
		requests:  make(chan []byte),
		responses: make(chan []byte),
	}
	client := &inProcClient{
		server: server,
		buf:    buf,
	}

	go func() {
		server.ServeCodec(rpc.NewJSONCodec(client.buf))
	}()

	return client
}
Beispiel #27
0
// Schedule61 adds a set of hashes for the download queue for scheduling, returning
// the new hashes encountered.
func (q *queue) Schedule61(hashes []common.Hash, fifo bool) []common.Hash {
	q.lock.Lock()
	defer q.lock.Unlock()

	// Insert all the hashes prioritised in the arrival order
	inserts := make([]common.Hash, 0, len(hashes))
	for _, hash := range hashes {
		// Skip anything we already have
		if old, ok := q.hashPool[hash]; ok {
			glog.V(logger.Warn).Infof("Hash %x already scheduled at index %v", hash, old)
			continue
		}
		// Update the counters and insert the hash
		q.hashCounter = q.hashCounter + 1
		inserts = append(inserts, hash)

		q.hashPool[hash] = q.hashCounter
		if fifo {
			q.hashQueue.Push(hash, -float32(q.hashCounter)) // Lowest gets schedules first
		} else {
			q.hashQueue.Push(hash, float32(q.hashCounter)) // Highest gets schedules first
		}
	}
	return inserts
}
Beispiel #28
0
// update keeps track of the downloader events. Please be aware that this is a one shot type of update loop.
// It's entered once and as soon as `Done` or `Failed` has been broadcasted the events are unregistered and
// the loop is exited. This to prevent a major security vuln where external parties can DOS you with blocks
// and halt your mining operation for as long as the DOS continues.
func (self *Miner) update() {
	events := self.mux.Subscribe(downloader.StartEvent{}, downloader.DoneEvent{}, downloader.FailedEvent{})
out:
	for ev := range events.Chan() {
		switch ev.Data.(type) {
		case downloader.StartEvent:
			atomic.StoreInt32(&self.canStart, 0)
			if self.Mining() {
				self.Stop()
				atomic.StoreInt32(&self.shouldStart, 1)
				glog.V(logger.Info).Infoln("Mining operation aborted due to sync operation")
			}
		case downloader.DoneEvent, downloader.FailedEvent:
			shouldStart := atomic.LoadInt32(&self.shouldStart) == 1

			atomic.StoreInt32(&self.canStart, 1)
			atomic.StoreInt32(&self.shouldStart, 0)
			if shouldStart {
				self.Start(self.coinbase, self.threads)
			}
			// unsubscribe. we're only interested in this event once
			events.Unsubscribe()
			// stop immediately and ignore all further pending events
			break out
		}
	}
}
Beispiel #29
0
func (self *Iterator) key(node interface{}) []byte {
	switch node := node.(type) {
	case shortNode:
		// Leaf node
		k := remTerm(node.Key)
		if vnode, ok := node.Val.(valueNode); ok {
			self.Value = vnode
			return k
		}
		return append(k, self.key(node.Val)...)
	case fullNode:
		if node[16] != nil {
			self.Value = node[16].(valueNode)
			return []byte{16}
		}
		for i := 0; i < 16; i++ {
			k := self.key(node[i])
			if k != nil {
				return append([]byte{byte(i)}, k...)
			}
		}
	case hashNode:
		rn, err := self.trie.resolveHash(node, nil, nil)
		if err != nil && glog.V(logger.Error) {
			glog.Errorf("Unhandled trie error: %v", err)
		}
		return self.key(rn)
	}

	return nil
}
Beispiel #30
0
func New(solcPath string) (sol *Solidity, err error) {
	// set default solc
	if len(solcPath) == 0 {
		solcPath = "solc"
	}
	solcPath, err = exec.LookPath(solcPath)
	if err != nil {
		return
	}

	cmd := exec.Command(solcPath, "--version")
	var out bytes.Buffer
	cmd.Stdout = &out
	err = cmd.Run()
	if err != nil {
		return
	}

	fullVersion := out.String()
	version := versionRegexp.FindString(fullVersion)
	legacy := legacyRegexp.MatchString(version)

	sol = &Solidity{
		solcPath:    solcPath,
		version:     version,
		fullVersion: fullVersion,
		legacy:      legacy,
	}
	glog.V(logger.Info).Infoln(sol.Info())
	return
}