Ejemplo n.º 1
0
func (d *Downloader) getFromPeer(p *peer, hash common.Hash, ignoreInitial bool) (err error) {
	d.activePeer = p.id
	defer func() {
		// reset on error
		if err != nil {
			d.queue.reset()
		}
	}()

	glog.V(logger.Detail).Infoln("Synchronising with the network using:", p.id)
	// Start the fetcher. This will block the update entirely
	// interupts need to be send to the appropriate channels
	// respectively.
	if err = d.startFetchingHashes(p, hash, ignoreInitial); err != nil {
		return err
	}

	// Start fetching blocks in paralel. The strategy is simple
	// take any available peers, seserve a chunk for each peer available,
	// let the peer deliver the chunkn and periodically check if a peer
	// has timedout.
	if err = d.startFetchingBlocks(p); err != nil {
		return err
	}

	glog.V(logger.Detail).Infoln("Sync completed")

	return nil
}
Ejemplo n.º 2
0
func (self *Registrar) SetUrlHint(urlhint string, addr common.Address) (txhash string, err error) {
	if urlhint != "" {
		UrlHintAddr = urlhint
	} else {
		if !zero.MatchString(UrlHintAddr) {
			return
		}
		nameHex, extra := encodeName(UrlHintName, 2)
		urlHintAbi := resolveAbi + nameHex + extra
		glog.V(logger.Detail).Infof("UrlHint address query data: %s to %s", urlHintAbi, GlobalRegistrarAddr)
		var res string
		res, _, err = self.backend.Call("", GlobalRegistrarAddr, "", "", "", urlHintAbi)
		if len(res) >= 40 {
			UrlHintAddr = "0x" + res[len(res)-40:len(res)]
		}
		if err != nil || zero.MatchString(UrlHintAddr) {
			if (addr == common.Address{}) {
				err = fmt.Errorf("UrlHint address not found and sender for creation not given")
				return
			}
			txhash, err = self.backend.Transact(addr.Hex(), "", "", "", "210000", "", UrlHintCode)
			if err != nil {
				err = fmt.Errorf("UrlHint address not found and sender for creation failed: %v", err)
			}
			glog.V(logger.Detail).Infof("created UrlHint @ txhash %v\n", txhash)
		} else {
			glog.V(logger.Detail).Infof("UrlHint found @ %v\n", HashRegAddr)
			return
		}
	}

	return
}
Ejemplo n.º 3
0
func (pool *TxPool) resetState() {
	currentState, err := pool.currentState()
	if err != nil {
		glog.V(logger.Info).Infoln("failed to get current state: %v", err)
		return
	}
	managedState := state.ManageState(currentState)
	if err != nil {
		glog.V(logger.Info).Infoln("failed to get managed state: %v", err)
		return
	}
	pool.pendingState = managedState

	// validate the pool of pending transactions, this will remove
	// any transactions that have been included in the block or
	// have been invalidated because of another transaction (e.g.
	// higher gas price)
	pool.validatePool()

	// Loop over the pending transactions and base the nonce of the new
	// pending transaction set.
	for _, tx := range pool.pending {
		if addr, err := tx.From(); err == nil {
			// Set the nonce. Transaction nonce can never be lower
			// than the state nonce; validatePool took care of that.
			if pool.pendingState.GetNonce(addr) <= tx.Nonce() {
				pool.pendingState.SetNonce(addr, tx.Nonce()+1)
			}
		}
	}

	// Check the queue and move transactions over to the pending if possible
	// or remove those that have become invalid
	pool.checkQueue()
}
Ejemplo n.º 4
0
// parseNodes parses a list of discovery node URLs loaded from a .json file.
func (cfg *Config) parseNodes(file string) []*discover.Node {
	// Short circuit if no node config is present
	path := filepath.Join(cfg.DataDir, file)
	if _, err := os.Stat(path); err != nil {
		return nil
	}
	// Load the nodes from the config file
	blob, err := ioutil.ReadFile(path)
	if err != nil {
		glog.V(logger.Error).Infof("Failed to access nodes: %v", err)
		return nil
	}
	nodelist := []string{}
	if err := json.Unmarshal(blob, &nodelist); err != nil {
		glog.V(logger.Error).Infof("Failed to load nodes: %v", err)
		return nil
	}
	// Interpret the list as a discovery node array
	var nodes []*discover.Node
	for _, url := range nodelist {
		if url == "" {
			continue
		}
		node, err := discover.ParseNode(url)
		if err != nil {
			glog.V(logger.Error).Infof("Node URL %s: %v\n", url, err)
			continue
		}
		nodes = append(nodes, node)
	}
	return nodes
}
Ejemplo n.º 5
0
// add inserts a new envelope into the message pool to be distributed within the
// whisper network. It also inserts the envelope into the expiration pool at the
// appropriate time-stamp.
func (self *Whisper) add(envelope *Envelope) error {
	self.poolMu.Lock()
	defer self.poolMu.Unlock()

	// short circuit when a received envelope has already expired
	if envelope.Expiry < uint32(time.Now().Unix()) {
		return nil
	}

	// Insert the message into the tracked pool
	hash := envelope.Hash()
	if _, ok := self.messages[hash]; ok {
		glog.V(logger.Detail).Infof("whisper envelope already cached: %x\n", envelope)
		return nil
	}
	self.messages[hash] = envelope

	// Insert the message into the expiration pool for later removal
	if self.expirations[envelope.Expiry] == nil {
		self.expirations[envelope.Expiry] = set.NewNonTS()
	}
	if !self.expirations[envelope.Expiry].Has(hash) {
		self.expirations[envelope.Expiry].Add(hash)

		// Notify the local node of a message arrival
		go self.postEvent(envelope)
	}
	glog.V(logger.Detail).Infof("cached whisper envelope %x\n", envelope)
	return nil
}
Ejemplo n.º 6
0
// enqueue schedules a new future import operation, if the block to be imported
// has not yet been seen.
func (f *Fetcher) enqueue(peer string, block *types.Block) {
	hash := block.Hash()

	// Ensure the peer isn't DOSing us
	count := f.queues[peer] + 1
	if count > blockLimit {
		glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x], exceeded allowance (%d)", peer, block.NumberU64(), hash.Bytes()[:4], blockLimit)
		return
	}
	// Discard any past or too distant blocks
	if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
		glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x], distance %d", peer, block.NumberU64(), hash.Bytes()[:4], dist)
		return
	}
	// Schedule the block for future importing
	if _, ok := f.queued[hash]; !ok {
		op := &inject{
			origin: peer,
			block:  block,
		}
		f.queues[peer] = count
		f.queued[hash] = op
		f.queue.Push(op, -float32(block.NumberU64()))

		if glog.V(logger.Debug) {
			glog.Infof("Peer %s: queued block #%d [%x], total %v", peer, block.NumberU64(), hash.Bytes()[:4], f.queue.Size())
		}
	}
}
Ejemplo n.º 7
0
// listenLoop runs in its own goroutine and accepts
// inbound connections.
func (srv *Server) listenLoop() {
	defer srv.loopWG.Done()
	glog.V(logger.Info).Infoln("Listening on", srv.listener.Addr())

	// This channel acts as a semaphore limiting
	// active inbound connections that are lingering pre-handshake.
	// If all slots are taken, no further connections are accepted.
	tokens := maxAcceptConns
	if srv.MaxPendingPeers > 0 {
		tokens = srv.MaxPendingPeers
	}
	slots := make(chan struct{}, tokens)
	for i := 0; i < tokens; i++ {
		slots <- struct{}{}
	}

	for {
		<-slots
		fd, err := srv.listener.Accept()
		if err != nil {
			return
		}
		glog.V(logger.Debug).Infof("Accepted conn %v\n", fd.RemoteAddr())
		go func() {
			srv.setupConn(fd, inboundConn, nil)
			slots <- struct{}{}
		}()
	}
}
Ejemplo n.º 8
0
// tries unlocking the specified account a few times.
func unlockAccount(ctx *cli.Context, accman *accounts.Manager, address string, i int, passwords []string) (accounts.Account, string) {
	account, err := utils.MakeAddress(accman, address)
	if err != nil {
		utils.Fatalf("Could not list accounts: %v", err)
	}
	for trials := 0; trials < 3; trials++ {
		prompt := fmt.Sprintf("Unlocking account %s | Attempt %d/%d", address, trials+1, 3)
		password := getPassPhrase(prompt, false, i, passwords)
		err = accman.Unlock(account, password)
		if err == nil {
			glog.V(logger.Info).Infof("Unlocked account %x", account.Address)
			return account, password
		}
		if err, ok := err.(*accounts.AmbiguousAddrError); ok {
			glog.V(logger.Info).Infof("Unlocked account %x", account.Address)
			return ambiguousAddrRecovery(accman, err, password), password
		}
		if err != accounts.ErrDecrypt {
			// No need to prompt again if the error is not decryption-related.
			break
		}
	}
	// All trials expended to unlock account, bail out
	utils.Fatalf("Failed to unlock account %s (%v)", address, err)
	return accounts.Account{}, ""
}
Ejemplo n.º 9
0
// main loop for adding connections via listening
func (srv *Server) listenLoop() {
	defer srv.loopWG.Done()

	// This channel acts as a semaphore limiting
	// active inbound connections that are lingering pre-handshake.
	// If all slots are taken, no further connections are accepted.
	slots := make(chan struct{}, maxAcceptConns)
	for i := 0; i < maxAcceptConns; i++ {
		slots <- struct{}{}
	}

	glog.V(logger.Info).Infoln("Listening on", srv.listener.Addr())
	for {
		<-slots
		conn, err := srv.listener.Accept()
		if err != nil {
			return
		}
		glog.V(logger.Debug).Infof("Accepted conn %v\n", conn.RemoteAddr())
		srv.peerWG.Add(1)
		go func() {
			srv.startPeer(conn, nil)
			slots <- struct{}{}
		}()
	}
}
Ejemplo n.º 10
0
func (p *Peer) run() DiscReason {
	readErr := make(chan error, 1)
	p.wg.Add(2)
	go p.readLoop(readErr)
	go p.pingLoop()

	p.startProtocols()

	// Wait for an error or disconnect.
	var reason DiscReason
	select {
	case err := <-readErr:
		if r, ok := err.(DiscReason); ok {
			reason = r
		} else {
			// Note: We rely on protocols to abort if there is a write
			// error. It might be more robust to handle them here as well.
			glog.V(logger.Detail).Infof("%v: Read error: %v\n", p, err)
			reason = DiscNetworkError
		}
	case err := <-p.protoErr:
		reason = discReasonForError(err)
	case reason = <-p.disc:
	}

	close(p.closed)
	p.politeDisconnect(reason)
	p.wg.Wait()
	glog.V(logger.Debug).Infof("%v: Disconnected: %v\n", p, reason)
	return reason
}
Ejemplo n.º 11
0
// Map adds a port mapping on m and keeps it alive until c is closed.
// This function is typically invoked in its own goroutine.
func Map(m Interface, c chan struct{}, protocol string, extport, intport int, name string) {
	refresh := time.NewTimer(mapUpdateInterval)
	defer func() {
		refresh.Stop()
		glog.V(logger.Debug).Infof("deleting port mapping: %s %d -> %d (%s) using %s\n", protocol, extport, intport, name, m)
		m.DeleteMapping(protocol, extport, intport)
	}()
	if err := m.AddMapping(protocol, intport, extport, name, mapTimeout); err != nil {
		glog.V(logger.Debug).Infof("network port %s:%d could not be mapped: %v\n", protocol, intport, err)
	} else {
		glog.V(logger.Info).Infof("mapped network port %s:%d -> %d (%s) using %s\n", protocol, extport, intport, name, m)
	}
	for {
		select {
		case _, ok := <-c:
			if !ok {
				return
			}
		case <-refresh.C:
			glog.V(logger.Detail).Infof("refresh port mapping %s:%d -> %d (%s) using %s\n", protocol, extport, intport, name, m)
			if err := m.AddMapping(protocol, intport, extport, name, mapTimeout); err != nil {
				glog.V(logger.Debug).Infof("network port %s:%d could not be mapped: %v\n", protocol, intport, err)
			}
			refresh.Reset(mapUpdateInterval)
		}
	}
}
Ejemplo n.º 12
0
func startIpc(cfg IpcConfig, codec codec.Codec, api shared.EthereumApi) error {
	os.Remove(cfg.Endpoint) // in case it still exists from a previous run

	l, err := net.ListenUnix("unix", &net.UnixAddr{Name: cfg.Endpoint, Net: "unix"})
	if err != nil {
		return err
	}
	os.Chmod(cfg.Endpoint, 0600)

	go func() {
		for {
			conn, err := l.AcceptUnix()
			if err != nil {
				glog.V(logger.Error).Infof("Error accepting ipc connection - %v\n", err)
				continue
			}

			go handle(conn, api, codec)
		}

		os.Remove(cfg.Endpoint)
	}()

	glog.V(logger.Info).Infof("IPC service started (%s)\n", cfg.Endpoint)

	return nil
}
Ejemplo n.º 13
0
func (pm *ProtocolManager) synchronise(peer *peer) {
	// Make sure the peer's TD is higher than our own. If not drop.
	if peer.td.Cmp(pm.chainman.Td()) <= 0 {
		return
	}
	// Check downloader if it's busy so it doesn't show the sync message
	// for every attempty
	if pm.downloader.IsBusy() {
		return
	}

	// FIXME if we have the hash in our chain and the TD of the peer is
	// much higher than ours, something is wrong with us or the peer.
	// Check if the hash is on our own chain
	if pm.chainman.HasBlock(peer.recentHash) {
		return
	}

	// Get the hashes from the peer (synchronously)
	err := pm.downloader.Synchronise(peer.id, peer.recentHash)
	if err != nil && err == downloader.ErrBadPeer {
		glog.V(logger.Debug).Infoln("removed peer from peer set due to bad action")
		pm.removePeer(peer)
	} else if err != nil {
		// handle error
		glog.V(logger.Detail).Infoln("error downloading:", err)
	}
}
Ejemplo n.º 14
0
// handle is the callback invoked to manage the life cycle of an eth peer. When
// this function terminates, the peer is disconnected.
func (pm *ProtocolManager) handle(p *peer) error {
	glog.V(logger.Debug).Infof("%v: peer connected [%s]", p, p.Name())

	// Execute the Ethereum handshake
	td, head, genesis := pm.chainman.Status()
	if err := p.Handshake(td, head, genesis); err != nil {
		glog.V(logger.Debug).Infof("%v: handshake failed: %v", p, err)
		return err
	}
	// Register the peer locally
	glog.V(logger.Detail).Infof("%v: adding peer", p)
	if err := pm.peers.Register(p); err != nil {
		glog.V(logger.Error).Infof("%v: addition failed: %v", p, err)
		return err
	}
	defer pm.removePeer(p.id)

	// Register the peer in the downloader. If the downloader considers it banned, we disconnect
	if err := pm.downloader.RegisterPeer(p.id, p.version, p.Head(), p.RequestHashes, p.RequestHashesFromNumber, p.RequestBlocks); err != nil {
		return err
	}
	// Propagate existing transactions. new transactions appearing
	// after this will be sent via broadcasts.
	pm.syncTransactions(p)

	// main loop. handle incoming messages.
	for {
		if err := pm.handleMsg(p); err != nil {
			glog.V(logger.Debug).Infof("%v: message handling failed: %v", p, err)
			return err
		}
	}
	return nil
}
Ejemplo n.º 15
0
func (srv *Server) startPeer(fd net.Conn, dest *discover.Node) {
	// TODO: handle/store session token

	// Run setupFunc, which should create an authenticated connection
	// and run the capability exchange. Note that any early error
	// returns during that exchange need to call peerWG.Done because
	// the callers of startPeer added the peer to the wait group already.
	fd.SetDeadline(time.Now().Add(handshakeTimeout))

	conn, err := srv.setupFunc(fd, srv.PrivateKey, srv.ourHandshake, dest, srv.keepconn)
	if err != nil {
		fd.Close()
		glog.V(logger.Debug).Infof("Handshake with %v failed: %v", fd.RemoteAddr(), err)
		srv.peerWG.Done()
		return
	}
	conn.MsgReadWriter = &netWrapper{
		wrapped: conn.MsgReadWriter,
		conn:    fd, rtimeout: frameReadTimeout, wtimeout: frameWriteTimeout,
	}
	p := newPeer(fd, conn, srv.Protocols)
	if ok, reason := srv.addPeer(conn, p); !ok {
		glog.V(logger.Detail).Infof("Not adding %v (%v)\n", p, reason)
		p.politeDisconnect(reason)
		srv.peerWG.Done()
		return
	}
	// The handshakes are done and it passed all checks.
	// Spawn the Peer loops.
	go srv.runPeer(p)
}
Ejemplo n.º 16
0
func (self *LDBDatabase) Close() {
	if err := self.Flush(); err != nil {
		glog.V(logger.Error).Infof("error: flush '%s': %v\n", self.fn, err)
	}
	self.db.Close()
	glog.V(logger.Error).Infoln("flushed and closed db:", self.fn)
}
Ejemplo n.º 17
0
func (self *XEth) PushTx(encodedTx string) (string, error) {
	tx := new(types.Transaction)
	err := rlp.DecodeBytes(common.FromHex(encodedTx), tx)
	if err != nil {
		glog.V(logger.Error).Infoln(err)
		return "", err
	}

	err = self.backend.TxPool().Add(tx)
	if err != nil {
		return "", err
	}

	if tx.To() == nil {
		from, err := tx.From()
		if err != nil {
			return "", err
		}

		addr := crypto.CreateAddress(from, tx.Nonce())
		glog.V(logger.Info).Infof("Tx(%x) created: %x\n", tx.Hash(), addr)
	} else {
		glog.V(logger.Info).Infof("Tx(%x) to: %x\n", tx.Hash(), tx.To())
	}

	return tx.Hash().Hex(), nil
}
Ejemplo n.º 18
0
// resolve attempts to find the current endpoint for the destination
// using discovery.
//
// Resolve operations are throttled with backoff to avoid flooding the
// discovery network with useless queries for nodes that don't exist.
// The backoff delay resets when the node is found.
func (t *dialTask) resolve(srv *Server) bool {
	if srv.ntab == nil {
		glog.V(logger.Debug).Infof("can't resolve node %x: discovery is disabled", t.dest.ID[:6])
		return false
	}
	if t.resolveDelay == 0 {
		t.resolveDelay = initialResolveDelay
	}
	if time.Since(t.lastResolved) < t.resolveDelay {
		return false
	}
	resolved := srv.ntab.Resolve(t.dest.ID)
	t.lastResolved = time.Now()
	if resolved == nil {
		t.resolveDelay *= 2
		if t.resolveDelay > maxResolveDelay {
			t.resolveDelay = maxResolveDelay
		}
		glog.V(logger.Debug).Infof("resolving node %x failed (new delay: %v)", t.dest.ID[:6], t.resolveDelay)
		return false
	}
	// The node was found.
	t.resolveDelay = initialResolveDelay
	t.dest = resolved
	glog.V(logger.Debug).Infof("resolved node %x: %v:%d", t.dest.ID[:6], t.dest.IP, t.dest.TCP)
	return true
}
Ejemplo n.º 19
0
// insert spawns a new goroutine to run a block insertion into the chain. If the
// block's number is at the same height as the current import phase, if updates
// the phase states accordingly.
func (f *Fetcher) insert(peer string, block *types.Block) {
	hash := block.Hash()

	// Run the import on a new thread
	glog.V(logger.Debug).Infof("Peer %s: importing block #%d [%x]", peer, block.NumberU64(), hash[:4])
	go func() {
		defer func() { f.done <- hash }()

		// If the parent's unknown, abort insertion
		parent := f.getBlock(block.ParentHash())
		if parent == nil {
			return
		}
		// Quickly validate the header and propagate the block if it passes
		if err := f.validateBlock(block, parent); err != nil {
			glog.V(logger.Debug).Infof("Peer %s: block #%d [%x] verification failed: %v", peer, block.NumberU64(), hash[:4], err)
			f.dropPeer(peer)
			return
		}
		go f.broadcastBlock(block, true)

		// Run the actual import and log any issues
		if _, err := f.insertChain(types.Blocks{block}); err != nil {
			glog.V(logger.Warn).Infof("Peer %s: block #%d [%x] import failed: %v", peer, block.NumberU64(), hash[:4], err)
			return
		}
		// If import succeeded, broadcast the block
		go f.broadcastBlock(block, false)

		// Invoke the testing hook if needed
		if f.importedHook != nil {
			f.importedHook(block)
		}
	}()
}
Ejemplo n.º 20
0
func (pm *ProtocolManager) handle(p *peer) error {
	// Execute the Ethereum handshake.
	if err := p.handleStatus(); err != nil {
		return err
	}

	// Register the peer locally.
	glog.V(logger.Detail).Infoln("Adding peer", p.id)
	if err := pm.peers.Register(p); err != nil {
		glog.V(logger.Error).Infoln("Addition failed:", err)
		return err
	}
	defer pm.removePeer(p.id)

	// Register the peer in the downloader. If the downloader
	// considers it banned, we disconnect.
	if err := pm.downloader.RegisterPeer(p.id, p.Head(), p.requestHashes, p.requestBlocks); err != nil {
		return err
	}

	// Propagate existing transactions. new transactions appearing
	// after this will be sent via broadcasts.
	pm.syncTransactions(p)

	// main loop. handle incoming messages.
	for {
		if err := pm.handleMsg(p); err != nil {
			return err
		}
	}
	return nil
}
Ejemplo n.º 21
0
func sendBadBlockReport(block *types.Block, err error) {
	if !EnableBadBlockReporting {
		return
	}

	var (
		blockRLP, _ = rlp.EncodeToBytes(block)
		params      = map[string]interface{}{
			"block":     common.Bytes2Hex(blockRLP),
			"blockHash": block.Hash().Hex(),
			"errortype": err.Error(),
			"client":    "go",
		}
	)
	if !block.ReceivedAt.IsZero() {
		params["receivedAt"] = block.ReceivedAt.UTC().String()
	}
	if p, ok := block.ReceivedFrom.(*peer); ok {
		params["receivedFrom"] = map[string]interface{}{
			"enode":           fmt.Sprintf("enode://%x@%v", p.ID(), p.RemoteAddr()),
			"name":            p.Name(),
			"protocolVersion": p.version,
		}
	}
	jsonStr, _ := json.Marshal(map[string]interface{}{"method": "eth_badBlock", "id": "1", "jsonrpc": "2.0", "params": []interface{}{params}})
	client := http.Client{Timeout: 8 * time.Second}
	resp, err := client.Post(badBlocksURL, "application/json", bytes.NewReader(jsonStr))
	if err != nil {
		glog.V(logger.Debug).Infoln(err)
		return
	}
	glog.V(logger.Debug).Infof("Bad Block Report posted (%d)", resp.StatusCode)
	resp.Body.Close()
}
Ejemplo n.º 22
0
// wsHandshakeValidator returns a handler that verifies the origin during the
// websocket upgrade process. When a '*' is specified as an allowed origins all
// connections are accepted.
func wsHandshakeValidator(allowedOrigins []string) func(*websocket.Config, *http.Request) error {
	origins := set.New()
	allowAllOrigins := false

	for _, origin := range allowedOrigins {
		if origin == "*" {
			allowAllOrigins = true
		}
		if origin != "" {
			origins.Add(origin)
		}
	}

	// allow localhost if no allowedOrigins are specified
	if len(origins.List()) == 0 {
		origins.Add("http://localhost")
		if hostname, err := os.Hostname(); err == nil {
			origins.Add("http://" + hostname)
		}
	}

	glog.V(logger.Debug).Infof("Allowed origin(s) for WS RPC interface %v\n", origins.List())

	f := func(cfg *websocket.Config, req *http.Request) error {
		origin := req.Header.Get("Origin")
		if allowAllOrigins || origins.Has(origin) {
			return nil
		}
		glog.V(logger.Debug).Infof("origin '%s' not allowed on WS-RPC interface\n", origin)
		return fmt.Errorf("origin %s not allowed", origin)
	}

	return f
}
Ejemplo n.º 23
0
// BroadcastBlock will either propagate a block to a subset of it's peers, or
// will only announce it's availability (depending what's requested).
func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
	hash := block.Hash()
	peers := pm.peers.PeersWithoutBlock(hash)

	// If propagation is requested, send to a subset of the peer
	if propagate {
		// Calculate the TD of the block (it's not imported yet, so block.Td is not valid)
		var td *big.Int
		if parent := pm.blockchain.GetBlock(block.ParentHash()); parent != nil {
			td = new(big.Int).Add(block.Difficulty(), pm.blockchain.GetTd(block.ParentHash()))
		} else {
			glog.V(logger.Error).Infof("propagating dangling block #%d [%x]", block.NumberU64(), hash[:4])
			return
		}
		// Send the block to a subset of our peers
		transfer := peers[:int(math.Sqrt(float64(len(peers))))]
		for _, peer := range transfer {
			peer.SendNewBlock(block, td)
		}
		glog.V(logger.Detail).Infof("propagated block %x to %d peers in %v", hash[:4], len(transfer), time.Since(block.ReceivedAt))
	}
	// Otherwise if the block is indeed in out own chain, announce it
	if pm.blockchain.HasBlock(hash) {
		for _, peer := range peers {
			if peer.version < eth62 {
				peer.SendNewBlockHashes61([]common.Hash{hash})
			} else {
				peer.SendNewBlockHashes([]common.Hash{hash}, []uint64{block.NumberU64()})
			}
		}
		glog.V(logger.Detail).Infof("announced block %x to %d peers in %v", hash[:4], len(peers), time.Since(block.ReceivedAt))
	}
}
Ejemplo n.º 24
0
// newErrorResponse creates a JSON RPC error response for a specific request id,
// containing the specified error code and error message. Beside returning the
// error to the caller, it also sets the ret_error and ret_response JavaScript
// variables.
func newErrorResponse(call otto.FunctionCall, code int, msg string, id interface{}) (response otto.Value) {
	// Bundle the error into a JSON RPC call response
	res := rpc.JSONErrResponse{
		Version: rpc.JSONRPCVersion,
		Id:      id,
		Error: rpc.JSONError{
			Code:    code,
			Message: msg,
		},
	}
	// Serialize the error response into JavaScript variables
	errObj, err := json.Marshal(res.Error)
	if err != nil {
		glog.V(logger.Error).Infof("Failed to serialize JSON RPC error: %v", err)
	}
	resObj, err := json.Marshal(res)
	if err != nil {
		glog.V(logger.Error).Infof("Failed to serialize JSON RPC error response: %v", err)
	}

	if _, err = call.Otto.Run("ret_error = " + string(errObj)); err != nil {
		glog.V(logger.Error).Infof("Failed to set `ret_error` to the occurred error: %v", err)
	}
	resVal, err := call.Otto.Run("ret_response = " + string(resObj))
	if err != nil {
		glog.V(logger.Error).Infof("Failed to set `ret_response` to the JSON RPC response: %v", err)
	}
	return resVal
}
Ejemplo n.º 25
0
func (self *Registrar) SetHashReg(hashreg string, addr common.Address) (txhash string, err error) {
	if hashreg != "" {
		HashRegAddr = hashreg
	} else {
		if !zero.MatchString(HashRegAddr) {
			return
		}
		nameHex, extra := encodeName(HashRegName, 2)
		hashRegAbi := resolveAbi + nameHex + extra
		glog.V(logger.Detail).Infof("\ncall HashRegAddr %v with %v\n", GlobalRegistrarAddr, hashRegAbi)
		var res string
		res, _, err = self.backend.Call("", GlobalRegistrarAddr, "", "", "", hashRegAbi)
		if len(res) >= 40 {
			HashRegAddr = "0x" + res[len(res)-40:len(res)]
		}
		if err != nil || zero.MatchString(HashRegAddr) {
			if (addr == common.Address{}) {
				err = fmt.Errorf("HashReg address not found and sender for creation not given")
				return
			}

			txhash, err = self.backend.Transact(addr.Hex(), "", "", "", "", "", HashRegCode)
			if err != nil {
				err = fmt.Errorf("HashReg address not found and sender for creation failed: %v", err)
			}
			glog.V(logger.Detail).Infof("created HashRegAddr @ txhash %v\n", txhash)
		} else {
			glog.V(logger.Detail).Infof("HashRegAddr found at @ %v\n", HashRegAddr)
			return
		}
	}

	return
}
Ejemplo n.º 26
0
func (self *worker) wait() {
	for {
		for block := range self.recv {
			atomic.AddInt32(&self.atWork, -1)

			if block == nil {
				continue
			}

			_, err := self.chain.WriteBlock(block, false)
			if err != nil {
				glog.V(logger.Error).Infoln("error writing block to chain", err)
				continue
			}

			// check staleness and display confirmation
			var stale, confirm string
			canonBlock := self.chain.GetBlockByNumber(block.NumberU64())
			if canonBlock != nil && canonBlock.Hash() != block.Hash() {
				stale = "stale "
			} else {
				confirm = "Wait 5 blocks for confirmation"
				self.current.localMinedBlocks = newLocalMinedBlock(block.Number().Uint64(), self.current.localMinedBlocks)
			}

			glog.V(logger.Info).Infof("🔨  Mined %sblock (#%v / %x). %s", stale, block.Number(), block.Hash().Bytes()[:4], confirm)

			// broadcast before waiting for validation
			go self.mux.Post(core.NewMinedBlockEvent{block})

			self.commitNewWork()
		}
	}
}
Ejemplo n.º 27
0
// UnlockAccount asks the user agent for the user password and tries to unlock the account.
// It will try 3 attempts before giving up.
func (fe *RemoteFrontend) UnlockAccount(address []byte) bool {
	if !fe.enabled {
		return false
	}

	err := fe.send(AskPasswordMethod, common.Bytes2Hex(address))
	if err != nil {
		glog.V(logger.Error).Infof("Unable to send password request to agent - %v\n", err)
		return false
	}

	passwdRes, err := fe.recv()
	if err != nil {
		glog.V(logger.Error).Infof("Unable to recv password response from agent - %v\n", err)
		return false
	}

	if passwd, ok := passwdRes.Result.(string); ok {
		err = fe.mgr.Unlock(common.BytesToAddress(address), passwd)
	}

	if err == nil {
		return true
	}

	glog.V(logger.Debug).Infoln("3 invalid account unlock attempts")
	return false
}
Ejemplo n.º 28
0
func ecrecoverFunc(in []byte) []byte {
	in = common.RightPadBytes(in, 128)
	// "in" is (hash, v, r, s), each 32 bytes
	// but for ecrecover we want (r, s, v)

	r := common.BytesToBig(in[64:96])
	s := common.BytesToBig(in[96:128])
	// Treat V as a 256bit integer
	vbig := common.Bytes2Big(in[32:64])
	v := byte(vbig.Uint64())

	if !crypto.ValidateSignatureValues(v, r, s) {
		glog.V(logger.Error).Infof("EC RECOVER FAIL: v, r or s value invalid")
		return nil
	}

	// v needs to be at the end and normalized for libsecp256k1
	vbignormal := new(big.Int).Sub(vbig, big.NewInt(27))
	vnormal := byte(vbignormal.Uint64())
	rsv := append(in[64:128], vnormal)
	pubKey, err := crypto.Ecrecover(in[:32], rsv)
	// make sure the public key is a valid one
	if err != nil {
		glog.V(logger.Error).Infof("EC RECOVER FAIL: ", err)
		return nil
	}

	// the first byte of pubkey is bitcoin heritage
	return common.LeftPadBytes(crypto.Sha3(pubKey[1:])[12:], 32)
}
Ejemplo n.º 29
0
func startIpc(cfg IpcConfig, codec codec.Codec, api shared.EthereumApi) error {
	os.Remove(cfg.Endpoint) // in case it still exists from a previous run

	l, err := net.Listen("unix", cfg.Endpoint)
	if err != nil {
		return err
	}
	l = fdtrack.WrapListener("ipc", l)
	os.Chmod(cfg.Endpoint, 0600)

	go func() {
		for {
			conn, err := l.Accept()
			if err != nil {
				glog.V(logger.Error).Infof("Error accepting ipc connection - %v\n", err)
				continue
			}

			id := newIpcConnId()
			glog.V(logger.Debug).Infof("New IPC connection with id %06d started\n", id)

			go handle(id, conn, api, codec)
		}

		os.Remove(cfg.Endpoint)
	}()

	glog.V(logger.Info).Infof("IPC service started (%s)\n", cfg.Endpoint)

	return nil
}
Ejemplo n.º 30
0
func (s *Ethereum) syncDatabases() {
	ticker := time.NewTicker(1 * time.Minute)
done:
	for {
		select {
		case <-ticker.C:
			// don't change the order of database flushes
			if err := s.extraDb.Flush(); err != nil {
				glog.V(logger.Error).Infof("error: flush extraDb: %v\n", err)
			}
			if err := s.stateDb.Flush(); err != nil {
				glog.V(logger.Error).Infof("error: flush stateDb: %v\n", err)
			}
			if err := s.blockDb.Flush(); err != nil {
				glog.V(logger.Error).Infof("error: flush blockDb: %v\n", err)
			}
		case <-s.shutdownChan:
			break done
		}
	}

	s.blockDb.Close()
	s.stateDb.Close()
	s.extraDb.Close()

	close(s.databasesClosed)
}