Ejemplo n.º 1
0
func ecrecoverFunc(in []byte) []byte {
	in = common.RightPadBytes(in, 128)
	// "in" is (hash, v, r, s), each 32 bytes
	// but for ecrecover we want (r, s, v)

	r := common.BytesToBig(in[64:96])
	s := common.BytesToBig(in[96:128])
	// Treat V as a 256bit integer
	vbig := common.Bytes2Big(in[32:64])
	v := byte(vbig.Uint64())

	if !crypto.ValidateSignatureValues(v, r, s) {
		glog.V(logger.Debug).Infof("EC RECOVER FAIL: v, r or s value invalid")
		return nil
	}

	// v needs to be at the end and normalized for libsecp256k1
	vbignormal := new(big.Int).Sub(vbig, big.NewInt(27))
	vnormal := byte(vbignormal.Uint64())
	rsv := append(in[64:128], vnormal)
	pubKey, err := crypto.Ecrecover(in[:32], rsv)
	// make sure the public key is a valid one
	if err != nil {
		glog.V(logger.Error).Infof("EC RECOVER FAIL: ", err)
		return nil
	}

	// the first byte of pubkey is bitcoin heritage
	return common.LeftPadBytes(crypto.Sha3(pubKey[1:])[12:], 32)
}
Ejemplo n.º 2
0
// add inserts a new envelope into the message pool to be distributed within the
// whisper network. It also inserts the envelope into the expiration pool at the
// appropriate time-stamp.
func (self *Whisper) add(envelope *Envelope) error {
	self.poolMu.Lock()
	defer self.poolMu.Unlock()

	// Insert the message into the tracked pool
	hash := envelope.Hash()
	if _, ok := self.messages[hash]; ok {
		glog.V(logger.Detail).Infof("whisper envelope already cached: %x\n", envelope)
		return nil
	}
	self.messages[hash] = envelope

	// Insert the message into the expiration pool for later removal
	if self.expirations[envelope.Expiry] == nil {
		self.expirations[envelope.Expiry] = set.NewNonTS()
	}
	if !self.expirations[envelope.Expiry].Has(hash) {
		self.expirations[envelope.Expiry].Add(hash)

		// Notify the local node of a message arrival
		go self.postEvent(envelope)
	}
	glog.V(logger.Detail).Infof("cached whisper envelope %x\n", envelope)

	return nil
}
Ejemplo n.º 3
0
// enqueue schedules a new future import operation, if the block to be imported
// has not yet been seen.
func (f *Fetcher) enqueue(peer string, block *types.Block) {
	hash := block.Hash()

	// Ensure the peer isn't DOSing us
	count := f.queues[peer] + 1
	if count > blockLimit {
		glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x], exceeded allowance (%d)", peer, block.NumberU64(), hash.Bytes()[:4], blockLimit)
		return
	}
	// Discard any past or too distant blocks
	if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
		glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x], distance %d", peer, block.NumberU64(), hash.Bytes()[:4], dist)
		discardMeter.Mark(1)
		return
	}
	// Schedule the block for future importing
	if _, ok := f.queued[hash]; !ok {
		op := &inject{
			origin: peer,
			block:  block,
		}
		f.queues[peer] = count
		f.queued[hash] = op
		f.queue.Push(op, -float32(block.NumberU64()))

		if glog.V(logger.Debug) {
			glog.Infof("Peer %s: queued block #%d [%x], total %v", peer, block.NumberU64(), hash.Bytes()[:4], f.queue.Size())
		}
	}
}
Ejemplo n.º 4
0
// handle is the callback invoked to manage the life cycle of an eth peer. When
// this function terminates, the peer is disconnected.
func (pm *ProtocolManager) handle(p *peer) error {
	glog.V(logger.Debug).Infof("%v: peer connected [%s]", p, p.Name())

	// Execute the Shift handshake
	td, head, genesis := pm.chainman.Status()
	if err := p.Handshake(td, head, genesis); err != nil {
		glog.V(logger.Debug).Infof("%v: handshake failed: %v", p, err)
		return err
	}
	// Register the peer locally
	glog.V(logger.Detail).Infof("%v: adding peer", p)
	if err := pm.peers.Register(p); err != nil {
		glog.V(logger.Error).Infof("%v: addition failed: %v", p, err)
		return err
	}
	defer pm.removePeer(p.id)

	// Register the peer in the downloader. If the downloader considers it banned, we disconnect
	if err := pm.downloader.RegisterPeer(p.id, p.version, p.Head(), p.RequestHashes, p.RequestHashesFromNumber, p.RequestBlocks); err != nil {
		return err
	}
	// Propagate existing transactions. new transactions appearing
	// after this will be sent via broadcasts.
	pm.syncTransactions(p)

	// main loop. handle incoming messages.
	for {
		if err := pm.handleMsg(p); err != nil {
			glog.V(logger.Debug).Infof("%v: message handling failed: %v", p, err)
			return err
		}
	}
	return nil
}
Ejemplo n.º 5
0
func (self *Registrar) SetHashReg(hashreg string, addr common.Address) (txhash string, err error) {
	if hashreg != "" {
		HashRegAddr = hashreg
	} else {
		if !zero.MatchString(HashRegAddr) {
			return
		}
		nameHex, extra := encodeName(HashRegName, 2)
		hashRegAbi := resolveAbi + nameHex + extra
		glog.V(logger.Detail).Infof("\ncall HashRegAddr %v with %v\n", GlobalRegistrarAddr, hashRegAbi)
		var res string
		res, _, err = self.backend.Call("", GlobalRegistrarAddr, "", "", "", hashRegAbi)
		if len(res) >= 40 {
			HashRegAddr = "0x" + res[len(res)-40:len(res)]
		}
		if err != nil || zero.MatchString(HashRegAddr) {
			if (addr == common.Address{}) {
				err = fmt.Errorf("HashReg address not found and sender for creation not given")
				return
			}

			txhash, err = self.backend.Transact(addr.Hex(), "", "", "", "", "", HashRegCode)
			if err != nil {
				err = fmt.Errorf("HashReg address not found and sender for creation failed: %v", err)
			}
			glog.V(logger.Detail).Infof("created HashRegAddr @ txhash %v\n", txhash)
		} else {
			glog.V(logger.Detail).Infof("HashRegAddr found at @ %v\n", HashRegAddr)
			return
		}
	}

	return
}
Ejemplo n.º 6
0
// UnlockAccount asks the user agent for the user password and tries to unlock the account.
// It will try 3 attempts before giving up.
func (fe *RemoteFrontend) UnlockAccount(address []byte) bool {
	if !fe.enabled {
		return false
	}

	err := fe.send(AskPasswordMethod, common.Bytes2Hex(address))
	if err != nil {
		glog.V(logger.Error).Infof("Unable to send password request to agent - %v\n", err)
		return false
	}

	passwdRes, err := fe.recv()
	if err != nil {
		glog.V(logger.Error).Infof("Unable to recv password response from agent - %v\n", err)
		return false
	}

	if passwd, ok := passwdRes.Result.(string); ok {
		err = fe.mgr.Unlock(common.BytesToAddress(address), passwd)
	}

	if err == nil {
		return true
	}

	glog.V(logger.Debug).Infoln("3 invalid account unlock attempts")
	return false
}
Ejemplo n.º 7
0
func (self *Registrar) SetUrlHint(urlhint string, addr common.Address) (txhash string, err error) {
	if urlhint != "" {
		UrlHintAddr = urlhint
	} else {
		if !zero.MatchString(UrlHintAddr) {
			return
		}
		nameHex, extra := encodeName(UrlHintName, 2)
		urlHintAbi := resolveAbi + nameHex + extra
		glog.V(logger.Detail).Infof("UrlHint address query data: %s to %s", urlHintAbi, GlobalRegistrarAddr)
		var res string
		res, _, err = self.backend.Call("", GlobalRegistrarAddr, "", "", "", urlHintAbi)
		if len(res) >= 40 {
			UrlHintAddr = "0x" + res[len(res)-40:len(res)]
		}
		if err != nil || zero.MatchString(UrlHintAddr) {
			if (addr == common.Address{}) {
				err = fmt.Errorf("UrlHint address not found and sender for creation not given")
				return
			}
			txhash, err = self.backend.Transact(addr.Hex(), "", "", "", "210000", "", UrlHintCode)
			if err != nil {
				err = fmt.Errorf("UrlHint address not found and sender for creation failed: %v", err)
			}
			glog.V(logger.Detail).Infof("created UrlHint @ txhash %v\n", txhash)
		} else {
			glog.V(logger.Detail).Infof("UrlHint found @ %v\n", HashRegAddr)
			return
		}
	}

	return
}
Ejemplo n.º 8
0
// parseNodes parses a list of discovery node URLs loaded from a .json file.
func (cfg *Config) parseNodes(file string) []*discover.Node {
	// Short circuit if no node config is present
	path := filepath.Join(cfg.DataDir, file)
	if _, err := os.Stat(path); err != nil {
		return nil
	}
	// Load the nodes from the config file
	blob, err := ioutil.ReadFile(path)
	if err != nil {
		glog.V(logger.Error).Infof("Failed to access nodes: %v", err)
		return nil
	}
	nodelist := []string{}
	if err := json.Unmarshal(blob, &nodelist); err != nil {
		glog.V(logger.Error).Infof("Failed to load nodes: %v", err)
		return nil
	}
	// Interpret the list as a discovery node array
	var nodes []*discover.Node
	for _, url := range nodelist {
		if url == "" {
			continue
		}
		node, err := discover.ParseNode(url)
		if err != nil {
			glog.V(logger.Error).Infof("Node URL %s: %v\n", url, err)
			continue
		}
		nodes = append(nodes, node)
	}
	return nodes
}
Ejemplo n.º 9
0
// Map adds a port mapping on m and keeps it alive until c is closed.
// This function is typically invoked in its own goroutine.
func Map(m Interface, c chan struct{}, protocol string, extport, intport int, name string) {
	refresh := time.NewTimer(mapUpdateInterval)
	defer func() {
		refresh.Stop()
		glog.V(logger.Debug).Infof("deleting port mapping: %s %d -> %d (%s) using %s\n", protocol, extport, intport, name, m)
		m.DeleteMapping(protocol, extport, intport)
	}()
	if err := m.AddMapping(protocol, intport, extport, name, mapTimeout); err != nil {
		glog.V(logger.Debug).Infof("network port %s:%d could not be mapped: %v\n", protocol, intport, err)
	} else {
		glog.V(logger.Info).Infof("mapped network port %s:%d -> %d (%s) using %s\n", protocol, extport, intport, name, m)
	}
	for {
		select {
		case _, ok := <-c:
			if !ok {
				return
			}
		case <-refresh.C:
			glog.V(logger.Detail).Infof("refresh port mapping %s:%d -> %d (%s) using %s\n", protocol, extport, intport, name, m)
			if err := m.AddMapping(protocol, intport, extport, name, mapTimeout); err != nil {
				glog.V(logger.Debug).Infof("network port %s:%d could not be mapped: %v\n", protocol, intport, err)
			}
			refresh.Reset(mapUpdateInterval)
		}
	}
}
Ejemplo n.º 10
0
// BroadcastBlock will either propagate a block to a subset of it's peers, or
// will only announce it's availability (depending what's requested).
func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
	hash := block.Hash()
	peers := pm.peers.PeersWithoutBlock(hash)

	// If propagation is requested, send to a subset of the peer
	if propagate {
		// Calculate the TD of the block (it's not imported yet, so block.Td is not valid)
		var td *big.Int
		if parent := pm.chainman.GetBlock(block.ParentHash()); parent != nil {
			td = new(big.Int).Add(parent.Td, block.Difficulty())
		} else {
			glog.V(logger.Error).Infof("propagating dangling block #%d [%x]", block.NumberU64(), hash[:4])
			return
		}
		// Send the block to a subset of our peers
		transfer := peers[:int(math.Sqrt(float64(len(peers))))]
		for _, peer := range transfer {
			peer.SendNewBlock(block, td)
		}
		glog.V(logger.Detail).Infof("propagated block %x to %d peers in %v", hash[:4], len(transfer), time.Since(block.ReceivedAt))
	}
	// Otherwise if the block is indeed in out own chain, announce it
	if pm.chainman.HasBlock(hash) {
		for _, peer := range peers {
			peer.SendNewBlockHashes([]common.Hash{hash})
		}
		glog.V(logger.Detail).Infof("announced block %x to %d peers in %v", hash[:4], len(peers), time.Since(block.ReceivedAt))
	}
}
Ejemplo n.º 11
0
func (self *XEth) PushTx(encodedTx string) (string, error) {
	tx := new(types.Transaction)
	err := rlp.DecodeBytes(common.FromHex(encodedTx), tx)
	if err != nil {
		glog.V(logger.Error).Infoln(err)
		return "", err
	}

	err = self.backend.TxPool().Add(tx, true)
	if err != nil {
		return "", err
	}

	if tx.To() == nil {
		from, err := tx.From()
		if err != nil {
			return "", err
		}

		addr := crypto.CreateAddress(from, tx.Nonce())
		glog.V(logger.Info).Infof("Tx(%x) created: %x\n", tx.Hash(), addr)
	} else {
		glog.V(logger.Info).Infof("Tx(%x) to: %x\n", tx.Hash(), tx.To())
	}

	return tx.Hash().Hex(), nil
}
Ejemplo n.º 12
0
// UnregisterPeer remove a peer from the known list, preventing any action from
// the specified peer.
func (d *Downloader) UnregisterPeer(id string) error {
	glog.V(logger.Detail).Infoln("Unregistering peer", id)
	if err := d.peers.Unregister(id); err != nil {
		glog.V(logger.Error).Infoln("Unregister failed:", err)
		return err
	}
	return nil
}
Ejemplo n.º 13
0
func (p *Peer) run() DiscReason {
	var (
		writeStart = make(chan struct{}, 1)
		writeErr   = make(chan error, 1)
		readErr    = make(chan error, 1)
		reason     DiscReason
		requested  bool
	)
	p.wg.Add(2)
	go p.readLoop(readErr)
	go p.pingLoop()

	// Start all protocol handlers.
	writeStart <- struct{}{}
	p.startProtocols(writeStart, writeErr)

	// Wait for an error or disconnect.
loop:
	for {
		select {
		case err := <-writeErr:
			// A write finished. Allow the next write to start if
			// there was no error.
			if err != nil {
				glog.V(logger.Detail).Infof("%v: write error: %v\n", p, err)
				reason = DiscNetworkError
				break loop
			}
			writeStart <- struct{}{}
		case err := <-readErr:
			if r, ok := err.(DiscReason); ok {
				glog.V(logger.Debug).Infof("%v: remote requested disconnect: %v\n", p, r)
				requested = true
				reason = r
			} else {
				glog.V(logger.Detail).Infof("%v: read error: %v\n", p, err)
				reason = DiscNetworkError
			}
			break loop
		case err := <-p.protoErr:
			reason = discReasonForError(err)
			glog.V(logger.Debug).Infof("%v: protocol error: %v (%v)\n", p, err, reason)
			break loop
		case reason = <-p.disc:
			glog.V(logger.Debug).Infof("%v: locally requested disconnect: %v\n", p, reason)
			break loop
		}
	}

	close(p.closed)
	p.rw.close(reason)
	p.wg.Wait()
	if requested {
		reason = DiscRequested
	}
	return reason
}
Ejemplo n.º 14
0
func (env *Work) commitTransactions(transactions types.Transactions, gasPrice *big.Int, proc *core.BlockProcessor) {
	for _, tx := range transactions {
		// We can skip err. It has already been validated in the tx pool
		from, _ := tx.From()

		// Check if it falls within margin. Txs from owned accounts are always processed.
		if tx.GasPrice().Cmp(gasPrice) < 0 && !env.ownedAccounts.Has(from) {
			// ignore the transaction and transactor. We ignore the transactor
			// because nonce will fail after ignoring this transaction so there's
			// no point
			env.lowGasTransactors.Add(from)

			glog.V(logger.Info).Infof("transaction(%x) below gas price (tx=%v ask=%v). All sequential txs from this address(%x) will be ignored\n", tx.Hash().Bytes()[:4], common.CurrencyToString(tx.GasPrice()), common.CurrencyToString(gasPrice), from[:4])
		}

		// Continue with the next transaction if the transaction sender is included in
		// the low gas tx set. This will also remove the tx and all sequential transaction
		// from this transactor
		if env.lowGasTransactors.Has(from) {
			// add tx to the low gas set. This will be removed at the end of the run
			// owned accounts are ignored
			if !env.ownedAccounts.Has(from) {
				env.lowGasTxs = append(env.lowGasTxs, tx)
			}
			continue
		}

		// Move on to the next transaction when the transactor is in ignored transactions set
		// This may occur when a transaction hits the gas limit. When a gas limit is hit and
		// the transaction is processed (that could potentially be included in the block) it
		// will throw a nonce error because the previous transaction hasn't been processed.
		// Therefor we need to ignore any transaction after the ignored one.
		if env.ignoredTransactors.Has(from) {
			continue
		}

		env.state.StartRecord(tx.Hash(), common.Hash{}, 0)

		err := env.commitTransaction(tx, proc)
		switch {
		case state.IsGasLimitErr(err):
			// ignore the transactor so no nonce errors will be thrown for this account
			// next time the worker is run, they'll be picked up again.
			env.ignoredTransactors.Add(from)

			glog.V(logger.Detail).Infof("Gas limit reached for (%x) in this block. Continue to try smaller txs\n", from[:4])
		case err != nil:
			env.remove.Add(tx.Hash())

			if glog.V(logger.Detail) {
				glog.Infof("TX (%x) failed, will be removed: %v\n", tx.Hash().Bytes()[:4], err)
			}
		default:
			env.tcount++
		}
	}
}
Ejemplo n.º 15
0
func sendJSON(w io.Writer, v interface{}) {
	if glog.V(logger.Detail) {
		if payload, err := json.MarshalIndent(v, "", "\t"); err == nil {
			glog.Infof("Sending payload: %s", payload)
		}
	}
	if err := json.NewEncoder(w).Encode(v); err != nil {
		glog.V(logger.Error).Infoln("Error sending JSON:", err)
	}
}
Ejemplo n.º 16
0
func (t *udp) send(toaddr *net.UDPAddr, ptype byte, req interface{}) error {
	packet, err := encodePacket(t.priv, ptype, req)
	if err != nil {
		return err
	}
	glog.V(logger.Detail).Infof(">>> %v %T\n", toaddr, req)
	if _, err = t.conn.WriteToUDP(packet, toaddr); err != nil {
		glog.V(logger.Detail).Infoln("UDP send failed:", err)
	}
	return err
}
Ejemplo n.º 17
0
func (self *StateTransition) transitionState() (ret []byte, usedGas *big.Int, err error) {
	if err = self.preCheck(); err != nil {
		return
	}

	msg := self.msg
	sender, _ := self.From() // err checked in preCheck

	// Pay intrinsic gas
	if err = self.UseGas(IntrinsicGas(self.data)); err != nil {
		return nil, nil, InvalidTxError(err)
	}

	vmenv := self.env
	var ref vm.ContextRef
	if MessageCreatesContract(msg) {
		ret, err, ref = vmenv.Create(sender, self.data, self.gas, self.gasPrice, self.value)
		if err == nil {
			dataGas := big.NewInt(int64(len(ret)))
			dataGas.Mul(dataGas, params.CreateDataGas)
			if err := self.UseGas(dataGas); err == nil {
				ref.SetCode(ret)
			} else {
				ret = nil // does not affect consensus but useful for StateTests validations
				glog.V(logger.Core).Infoln("Insufficient gas for creating code. Require", dataGas, "and have", self.gas)
			}
		}
		glog.V(logger.Core).Infoln("VM create err:", err)
	} else {
		// Increment the nonce for the next transaction
		self.state.SetNonce(sender.Address(), sender.Nonce()+1)
		ret, err = vmenv.Call(sender, self.To().Address(), self.data, self.gas, self.gasPrice, self.value)
		glog.V(logger.Core).Infoln("VM call err:", err)
	}

	if err != nil && IsValueTransferErr(err) {
		return nil, nil, InvalidTxError(err)
	}

	// We aren't interested in errors here. Errors returned by the VM are non-consensus errors and therefor shouldn't bubble up
	if err != nil {
		err = nil
	}

	if vm.Debug {
		vm.StdErrFormat(vmenv.StructLogs())
	}

	self.refundGas()
	self.state.AddBalance(self.env.Coinbase(), new(big.Int).Mul(self.gasUsed(), self.gasPrice))

	return ret, self.gasUsed(), err
}
Ejemplo n.º 18
0
func (t *dialTask) Do(srv *Server) {
	addr := &net.TCPAddr{IP: t.dest.IP, Port: int(t.dest.TCP)}
	glog.V(logger.Debug).Infof("dialing %v\n", t.dest)
	fd, err := srv.Dialer.Dial("tcp", addr.String())
	if err != nil {
		glog.V(logger.Detail).Infof("dial error: %v", err)
		return
	}
	mfd := newMeteredConn(fd, false)

	srv.setupConn(mfd, t.flags, t.dest)
}
Ejemplo n.º 19
0
func (t *udp) handlePacket(from *net.UDPAddr, buf []byte) error {
	packet, fromID, hash, err := decodePacket(buf)
	if err != nil {
		glog.V(logger.Debug).Infof("Bad packet from %v: %v\n", from, err)
		return err
	}
	status := "ok"
	if err = packet.handle(t, from, fromID, hash); err != nil {
		status = err.Error()
	}
	glog.V(logger.Detail).Infof("<<< %v %T: %s\n", from, packet, status)
	return err
}
Ejemplo n.º 20
0
func NewChainManager(chainDb common.Database, sqlDB types.SQLDatabase, pow pow.PoW, mux *event.TypeMux) (*ChainManager, error) {
	cache, _ := lru.New(blockCacheLimit)
	bc := &ChainManager{
		chainDb:  chainDb,
		sqlDB:    sqlDB,
		eventMux: mux,
		quit:     make(chan struct{}),
		cache:    cache,
		pow:      pow,
	}

	bc.genesisBlock = bc.GetBlockByNumber(0)
	if bc.genesisBlock == nil {
		reader, err := NewDefaultGenesisReader()
		if err != nil {
			return nil, err
		}
		bc.genesisBlock, err = WriteGenesisBlock(chainDb, reader)
		if err != nil {
			return nil, err
		}
		glog.V(logger.Info).Infoln("WARNING: Wrote default shift genesis block")
	}

	if err := bc.setLastState(); err != nil {
		return nil, err
	}

	// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
	for hash, _ := range BadHashes {
		if block := bc.GetBlock(hash); block != nil {
			glog.V(logger.Error).Infof("Found bad hash. Reorganising chain to state %x\n", block.ParentHash().Bytes()[:4])
			block = bc.GetBlock(block.ParentHash())
			if block == nil {
				glog.Fatal("Unable to complete. Parent block not found. Corrupted DB?")
			}
			bc.SetHead(block)

			glog.V(logger.Error).Infoln("Chain reorg was successfull. Resuming normal operation")
		}
	}

	// Take ownership of this particular state

	bc.futureBlocks, _ = lru.New(maxFutureBlocks)
	bc.makeCache()

	go bc.update()

	return bc, nil
}
Ejemplo n.º 21
0
// RegisterPeer injects a new download peer into the set of block source to be
// used for fetching hashes and blocks from.
func (d *Downloader) RegisterPeer(id string, version int, head common.Hash, getRelHashes relativeHashFetcherFn, getAbsHashes absoluteHashFetcherFn, getBlocks blockFetcherFn) error {
	// If the peer wants to send a banned hash, reject
	if d.banned.Has(head) {
		glog.V(logger.Debug).Infoln("Register rejected, head hash banned:", id)
		return errBannedHead
	}
	// Otherwise try to construct and register the peer
	glog.V(logger.Detail).Infoln("Registering peer", id)
	if err := d.peers.Register(newPeer(id, version, head, getRelHashes, getAbsHashes, getBlocks)); err != nil {
		glog.V(logger.Error).Infoln("Register failed:", err)
		return err
	}
	return nil
}
Ejemplo n.º 22
0
// generate creates the actual cache. it can be called from multiple
// goroutines. the first call will generate the cache, subsequent
// calls wait until it is generated.
func (cache *cache) generate() {
	cache.gen.Do(func() {
		started := time.Now()
		seedHash := makeSeedHash(cache.epoch)
		glog.V(logger.Debug).Infof("Generating cache for epoch %d (%x)", cache.epoch, seedHash)
		size := C.ethash_get_cachesize(C.uint64_t(cache.epoch * epochLength))
		if cache.test {
			size = cacheSizeForTesting
		}
		cache.ptr = C.ethash_light_new_internal(size, (*C.ethash_h256_t)(unsafe.Pointer(&seedHash[0])))
		runtime.SetFinalizer(cache, freeCache)
		glog.V(logger.Debug).Infof("Done generating cache for epoch %d, it took %v", cache.epoch, time.Since(started))
	})
}
Ejemplo n.º 23
0
// node retrieves a node with a given id from the database.
func (db *nodeDB) node(id NodeID) *Node {
	blob, err := db.lvl.Get(makeKey(id, nodeDBDiscoverRoot), nil)
	if err != nil {
		glog.V(logger.Detail).Infof("failed to retrieve node %v: %v", id, err)
		return nil
	}
	node := new(Node)
	if err := rlp.DecodeBytes(blob, node); err != nil {
		glog.V(logger.Warn).Infof("failed to decode node RLP: %v", err)
		return nil
	}
	node.sha = crypto.Sha3Hash(node.ID[:])
	return node
}
Ejemplo n.º 24
0
// syncWithPeer starts a block synchronization based on the hash chain from the
// specified peer and head hash.
func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err error) {
	d.mux.Post(StartEvent{})
	defer func() {
		// reset on error
		if err != nil {
			d.cancel()
			d.mux.Post(FailedEvent{err})
		} else {
			d.mux.Post(DoneEvent{})
		}
	}()

	glog.V(logger.Debug).Infof("Synchronizing with the network using: %s, eth/%d", p.id, p.version)
	switch p.version {
	case eth60:
		// Old eth/60 version, use reverse hash retrieval algorithm
		if err = d.fetchHashes60(p, hash); err != nil {
			return err
		}
		if err = d.fetchBlocks60(); err != nil {
			return err
		}
	case eth61:
		// New eth/61, use forward, concurrent hash and block retrieval algorithm
		number, err := d.findAncestor(p)
		if err != nil {
			return err
		}
		errc := make(chan error, 2)
		go func() { errc <- d.fetchHashes(p, td, number+1) }()
		go func() { errc <- d.fetchBlocks(number + 1) }()

		// If any fetcher fails, cancel the other
		if err := <-errc; err != nil {
			d.cancel()
			<-errc
			return err
		}
		return <-errc

	default:
		// Something very wrong, stop right here
		glog.V(logger.Error).Infof("Unsupported eth protocol: %d", p.version)
		return errBadPeer
	}
	glog.V(logger.Debug).Infoln("Synchronization completed")

	return nil
}
Ejemplo n.º 25
0
func (pm *ProtocolManager) Stop() {
	// Showing a log message. During download / process this could actually
	// take between 5 to 10 seconds and therefor feedback is required.
	glog.V(logger.Info).Infoln("Stopping Shift protocol handler...")

	pm.quit = true
	pm.txSub.Unsubscribe()         // quits txBroadcastLoop
	pm.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop
	close(pm.quitSync)             // quits syncer, fetcher, txsyncLoop

	// Wait for any process action
	pm.wg.Wait()

	glog.V(logger.Info).Infoln("Shift protocol handler stopped")
}
Ejemplo n.º 26
0
// GetTransaction returns a transaction if it is contained in the pool
// and nil otherwise.
func (tp *TxPool) GetTransaction(hash common.Hash) *types.Transaction {
	// check the txs first
	if tx, ok := tp.pending[hash]; ok {
		glog.V(logger.Debug).Infoln("debug gettransaction: %v", tx.Transaction)
		return tx.Transaction
	}
	// check queue
	for _, txs := range tp.queue {
		if tx, ok := txs[hash]; ok {
			glog.V(logger.Debug).Infoln("debug tp.queue: %v", tx.Transaction)
			return tx.Transaction
		}
	}
	return nil
}
Ejemplo n.º 27
0
func (bc *ChainManager) setLastState() error {
	data, _ := bc.chainDb.Get([]byte("LastBlock"))
	if len(data) != 0 {
		block := bc.GetBlock(common.BytesToHash(data))
		if block != nil {
			bc.currentBlock = block
			bc.lastBlockHash = block.Hash()
		} else {
			glog.Infof("LastBlock (%x) not found. Recovering...\n", data)
			if bc.recover() {
				glog.Infof("Recover successful")
			} else {
				glog.Fatalf("Recover failed. Please report")
			}
		}
	} else {
		bc.Reset()
	}
	bc.td = bc.currentBlock.Td
	bc.currentGasLimit = CalcGasLimit(bc.currentBlock)

	if glog.V(logger.Info) {
		glog.Infof("Last block (#%v) %x TD=%v\n", bc.currentBlock.Number(), bc.currentBlock.Hash(), bc.td)
	}

	return nil
}
Ejemplo n.º 28
0
func (self *BlockProcessor) ApplyTransaction(gp GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *big.Int, transientProcess bool) (*types.Receipt, *big.Int, error) {
	_, gas, err := ApplyMessage(NewEnv(statedb, self.bc, tx, header), tx, gp)
	if err != nil {
		return nil, nil, err
	}

	// Update the state with pending changes
	statedb.SyncIntermediate()

	usedGas.Add(usedGas, gas)
	receipt := types.NewReceipt(statedb.Root().Bytes(), usedGas)
	receipt.TxHash = tx.Hash()
	receipt.GasUsed = new(big.Int).Set(gas)
	if MessageCreatesContract(tx) {
		from, _ := tx.From()
		receipt.ContractAddress = crypto.CreateAddress(from, tx.Nonce())
	}

	logs := statedb.GetLogs(tx.Hash())
	receipt.SetLogs(logs)
	receipt.Bloom = types.CreateBloom(types.Receipts{receipt})

	glog.V(logger.Debug).Infoln(receipt)

	// Notify all subscribers
	if !transientProcess {
		go self.eventMux.Post(TxPostEvent{tx})
		go self.eventMux.Post(logs)
	}

	return receipt, gas, err
}
Ejemplo n.º 29
0
// Insert adds a set of hashes for the download queue for scheduling, returning
// the new hashes encountered.
func (q *queue) Insert(hashes []common.Hash, fifo bool) []common.Hash {
	q.lock.Lock()
	defer q.lock.Unlock()

	// Insert all the hashes prioritized in the arrival order
	inserts := make([]common.Hash, 0, len(hashes))
	for _, hash := range hashes {
		// Skip anything we already have
		if old, ok := q.hashPool[hash]; ok {
			glog.V(logger.Warn).Infof("Hash %x already scheduled at index %v", hash, old)
			continue
		}
		// Update the counters and insert the hash
		q.hashCounter = q.hashCounter + 1
		inserts = append(inserts, hash)

		q.hashPool[hash] = q.hashCounter
		if fifo {
			q.hashQueue.Push(hash, -float32(q.hashCounter)) // Lowest gets schedules first
		} else {
			q.hashQueue.Push(hash, float32(q.hashCounter)) // Highest gets schedules first
		}
	}
	return inserts
}
Ejemplo n.º 30
0
func (self *SQLDB) DeleteBlock(block *types.Block) {
	query := `DELETE FROM blocks WHERE number = ?`
	_, err := self.db.Exec(query, block.Number().Uint64())
	if err != nil {
		glog.V(logger.Error).Infoln("Error creating SQL tables", err, query)
	}
}