func (self *XEth) PushTx(encodedTx string) (string, error) { tx := new(types.Transaction) err := rlp.DecodeBytes(common.FromHex(encodedTx), tx) if err != nil { glog.V(logger.Error).Infoln(err) return "", err } err = self.backend.TxPool().Add(tx) if err != nil { return "", err } if tx.To() == nil { from, err := tx.From() if err != nil { return "", err } addr := crypto.CreateAddress(from, tx.Nonce()) glog.V(logger.Info).Infof("Tx(%x) created: %x\n", tx.Hash(), addr) } else { glog.V(logger.Info).Infof("Tx(%x) to: %x\n", tx.Hash(), tx.To()) } return tx.Hash().Hex(), nil }
// wsHandshakeValidator returns a handler that verifies the origin during the // websocket upgrade process. When a '*' is specified as an allowed origins all // connections are accepted. func wsHandshakeValidator(allowedOrigins []string) func(*websocket.Config, *http.Request) error { origins := set.New() allowAllOrigins := false for _, origin := range allowedOrigins { if origin == "*" { allowAllOrigins = true } if origin != "" { origins.Add(strings.ToLower(origin)) } } // allow localhost if no allowedOrigins are specified. if len(origins.List()) == 0 { origins.Add("http://localhost") if hostname, err := os.Hostname(); err == nil { origins.Add("http://" + strings.ToLower(hostname)) } } glog.V(logger.Debug).Infof("Allowed origin(s) for WS RPC interface %v\n", origins.List()) f := func(cfg *websocket.Config, req *http.Request) error { origin := strings.ToLower(req.Header.Get("Origin")) if allowAllOrigins || origins.Has(origin) { return nil } glog.V(logger.Debug).Infof("origin '%s' not allowed on WS-RPC interface\n", origin) return fmt.Errorf("origin %s not allowed", origin) } return f }
func (pool *TxPool) resetState() { currentState, err := pool.currentState() if err != nil { glog.V(logger.Error).Infof("Failed to get current state: %v", err) return } managedState := state.ManageState(currentState) if err != nil { glog.V(logger.Error).Infof("Failed to get managed state: %v", err) return } pool.pendingState = managedState // validate the pool of pending transactions, this will remove // any transactions that have been included in the block or // have been invalidated because of another transaction (e.g. // higher gas price) pool.demoteUnexecutables() // Update all accounts to the latest known pending nonce for addr, list := range pool.pending { txs := list.Flatten() // Heavy but will be cached and is needed by the miner anyway pool.pendingState.SetNonce(addr, txs[len(txs)-1].Nonce()+1) } // Check the queue and move transactions over to the pending if possible // or remove those that have become invalid pool.promoteExecutables() }
func (pool *TxPool) resetState() { currentState, err := pool.currentState() if err != nil { glog.V(logger.Info).Infoln("failed to get current state: %v", err) return } managedState := state.ManageState(currentState) if err != nil { glog.V(logger.Info).Infoln("failed to get managed state: %v", err) return } pool.pendingState = managedState // validate the pool of pending transactions, this will remove // any transactions that have been included in the block or // have been invalidated because of another transaction (e.g. // higher gas price) pool.validatePool() // Loop over the pending transactions and base the nonce of the new // pending transaction set. for _, tx := range pool.pending { if addr, err := tx.From(); err == nil { // Set the nonce. Transaction nonce can never be lower // than the state nonce; validatePool took care of that. if pool.pendingState.GetNonce(addr) <= tx.Nonce() { pool.pendingState.SetNonce(addr, tx.Nonce()+1) } } } // Check the queue and move transactions over to the pending if possible // or remove those that have become invalid pool.checkQueue() }
// resolve attempts to find the current endpoint for the destination // using discovery. // // Resolve operations are throttled with backoff to avoid flooding the // discovery network with useless queries for nodes that don't exist. // The backoff delay resets when the node is found. func (t *dialTask) resolve(srv *Server) bool { if srv.ntab == nil { glog.V(logger.Debug).Infof("can't resolve node %x: discovery is disabled", t.dest.ID[:6]) return false } if t.resolveDelay == 0 { t.resolveDelay = initialResolveDelay } if time.Since(t.lastResolved) < t.resolveDelay { return false } resolved := srv.ntab.Resolve(t.dest.ID) t.lastResolved = time.Now() if resolved == nil { t.resolveDelay *= 2 if t.resolveDelay > maxResolveDelay { t.resolveDelay = maxResolveDelay } glog.V(logger.Debug).Infof("resolving node %x failed (new delay: %v)", t.dest.ID[:6], t.resolveDelay) return false } // The node was found. t.resolveDelay = initialResolveDelay t.dest = resolved glog.V(logger.Debug).Infof("resolved node %x: %v:%d", t.dest.ID[:6], t.dest.IP, t.dest.TCP) return true }
// enqueue schedules a new future import operation, if the block to be imported // has not yet been seen. func (f *Fetcher) enqueue(peer string, block *types.Block) { hash := block.Hash() // Ensure the peer isn't DOSing us count := f.queues[peer] + 1 if count > blockLimit { glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x], exceeded allowance (%d)", peer, block.NumberU64(), hash.Bytes()[:4], blockLimit) return } // Discard any past or too distant blocks if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist { glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x], distance %d", peer, block.NumberU64(), hash.Bytes()[:4], dist) discardMeter.Mark(1) return } // Schedule the block for future importing if _, ok := f.queued[hash]; !ok { op := &inject{ origin: peer, block: block, } f.queues[peer] = count f.queued[hash] = op f.queue.Push(op, -float32(block.NumberU64())) if glog.V(logger.Debug) { glog.Infof("Peer %s: queued block #%d [%x], total %v", peer, block.NumberU64(), hash.Bytes()[:4], f.queue.Size()) } } }
func (self *Registrar) SetHashReg(hashreg string, addr common.Address) (txhash string, err error) { if hashreg != "" { HashRegAddr = hashreg } else { if !zero.MatchString(HashRegAddr) { return } nameHex, extra := encodeName(HashRegName, 2) hashRegAbi := resolveAbi + nameHex + extra glog.V(logger.Detail).Infof("\ncall HashRegAddr %v with %v\n", GlobalRegistrarAddr, hashRegAbi) var res string res, _, err = self.backend.Call("", GlobalRegistrarAddr, "", "", "", hashRegAbi) if len(res) >= 40 { HashRegAddr = "0x" + res[len(res)-40:len(res)] } if err != nil || zero.MatchString(HashRegAddr) { if (addr == common.Address{}) { err = fmt.Errorf("HashReg address not found and sender for creation not given") return } txhash, err = self.backend.Transact(addr.Hex(), "", "", "", "", "", HashRegCode) if err != nil { err = fmt.Errorf("HashReg address not found and sender for creation failed: %v", err) } glog.V(logger.Detail).Infof("created HashRegAddr @ txhash %v\n", txhash) } else { glog.V(logger.Detail).Infof("HashRegAddr found at @ %v\n", HashRegAddr) return } } return }
func ecrecoverFunc(in []byte) []byte { in = common.RightPadBytes(in, 128) // "in" is (hash, v, r, s), each 32 bytes // but for ecrecover we want (r, s, v) r := common.BytesToBig(in[64:96]) s := common.BytesToBig(in[96:128]) // Treat V as a 256bit integer vbig := common.Bytes2Big(in[32:64]) v := byte(vbig.Uint64()) if !crypto.ValidateSignatureValues(v, r, s) { glog.V(logger.Debug).Infof("EC RECOVER FAIL: v, r or s value invalid") return nil } // v needs to be at the end and normalized for libsecp256k1 vbignormal := new(big.Int).Sub(vbig, big.NewInt(27)) vnormal := byte(vbignormal.Uint64()) rsv := append(in[64:128], vnormal) pubKey, err := crypto.Ecrecover(in[:32], rsv) // make sure the public key is a valid one if err != nil { glog.V(logger.Error).Infof("EC RECOVER FAIL: ", err) return nil } // the first byte of pubkey is bitcoin heritage return common.LeftPadBytes(crypto.Sha3(pubKey[1:])[12:], 32) }
// parseNodes parses a list of discovery node URLs loaded from a .json file. func (cfg *Config) parseNodes(file string) []*discover.Node { // Short circuit if no node config is present path := filepath.Join(cfg.DataDir, file) if _, err := os.Stat(path); err != nil { return nil } // Load the nodes from the config file blob, err := ioutil.ReadFile(path) if err != nil { glog.V(logger.Error).Infof("Failed to access nodes: %v", err) return nil } nodelist := []string{} if err := json.Unmarshal(blob, &nodelist); err != nil { glog.V(logger.Error).Infof("Failed to load nodes: %v", err) return nil } // Interpret the list as a discovery node array var nodes []*discover.Node for _, url := range nodelist { if url == "" { continue } node, err := discover.ParseNode(url) if err != nil { glog.V(logger.Error).Infof("Node URL %s: %v\n", url, err) continue } nodes = append(nodes, node) } return nodes }
func (self *Registrar) SetUrlHint(urlhint string, addr common.Address) (txhash string, err error) { if urlhint != "" { UrlHintAddr = urlhint } else { if !zero.MatchString(UrlHintAddr) { return } nameHex, extra := encodeName(UrlHintName, 2) urlHintAbi := resolveAbi + nameHex + extra glog.V(logger.Detail).Infof("UrlHint address query data: %s to %s", urlHintAbi, GlobalRegistrarAddr) var res string res, _, err = self.backend.Call("", GlobalRegistrarAddr, "", "", "", urlHintAbi) if len(res) >= 40 { UrlHintAddr = "0x" + res[len(res)-40:len(res)] } if err != nil || zero.MatchString(UrlHintAddr) { if (addr == common.Address{}) { err = fmt.Errorf("UrlHint address not found and sender for creation not given") return } txhash, err = self.backend.Transact(addr.Hex(), "", "", "", "210000", "", UrlHintCode) if err != nil { err = fmt.Errorf("UrlHint address not found and sender for creation failed: %v", err) } glog.V(logger.Detail).Infof("created UrlHint @ txhash %v\n", txhash) } else { glog.V(logger.Detail).Infof("UrlHint found @ %v\n", HashRegAddr) return } } return }
// add inserts a new envelope into the message pool to be distributed within the // whisper network. It also inserts the envelope into the expiration pool at the // appropriate time-stamp. func (self *Whisper) add(envelope *Envelope) error { self.poolMu.Lock() defer self.poolMu.Unlock() // Insert the message into the tracked pool hash := envelope.Hash() if _, ok := self.messages[hash]; ok { glog.V(logger.Detail).Infof("whisper envelope already cached: %x\n", envelope) return nil } self.messages[hash] = envelope // Insert the message into the expiration pool for later removal if self.expirations[envelope.Expiry] == nil { self.expirations[envelope.Expiry] = set.NewNonTS() } if !self.expirations[envelope.Expiry].Has(hash) { self.expirations[envelope.Expiry].Add(hash) // Notify the local node of a message arrival go self.postEvent(envelope) } glog.V(logger.Detail).Infof("cached whisper envelope %x\n", envelope) return nil }
// newErrorResponse creates a JSON RPC error response for a specific request id, // containing the specified error code and error message. Beside returning the // error to the caller, it also sets the ret_error and ret_response JavaScript // variables. func newErrorResponse(call otto.FunctionCall, code int, msg string, id interface{}) (response otto.Value) { // Bundle the error into a JSON RPC call response res := rpc.JSONErrResponse{ Version: rpc.JSONRPCVersion, Id: id, Error: rpc.JSONError{ Code: code, Message: msg, }, } // Serialize the error response into JavaScript variables errObj, err := json.Marshal(res.Error) if err != nil { glog.V(logger.Error).Infof("Failed to serialize JSON RPC error: %v", err) } resObj, err := json.Marshal(res) if err != nil { glog.V(logger.Error).Infof("Failed to serialize JSON RPC error response: %v", err) } if _, err = call.Otto.Run("ret_error = " + string(errObj)); err != nil { glog.V(logger.Error).Infof("Failed to set `ret_error` to the occurred error: %v", err) } resVal, err := call.Otto.Run("ret_response = " + string(resObj)) if err != nil { glog.V(logger.Error).Infof("Failed to set `ret_response` to the JSON RPC response: %v", err) } return resVal }
// UnlockAccount asks the user agent for the user password and tries to unlock the account. // It will try 3 attempts before giving up. func (fe *RemoteFrontend) UnlockAccount(address []byte) bool { if !fe.enabled { return false } err := fe.send(AskPasswordMethod, common.Bytes2Hex(address)) if err != nil { glog.V(logger.Error).Infof("Unable to send password request to agent - %v\n", err) return false } passwdRes, err := fe.recv() if err != nil { glog.V(logger.Error).Infof("Unable to recv password response from agent - %v\n", err) return false } if passwd, ok := passwdRes.Result.(string); ok { err = fe.mgr.Unlock(common.BytesToAddress(address), passwd) } if err == nil { return true } glog.V(logger.Debug).Infoln("3 invalid account unlock attempts") return false }
// BroadcastBlock will either propagate a block to a subset of it's peers, or // will only announce it's availability (depending what's requested). func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) { hash := block.Hash() peers := pm.peers.PeersWithoutBlock(hash) // If propagation is requested, send to a subset of the peer if propagate { // Calculate the TD of the block (it's not imported yet, so block.Td is not valid) var td *big.Int if parent := pm.chainman.GetBlock(block.ParentHash()); parent != nil { td = new(big.Int).Add(parent.Td, block.Difficulty()) } else { glog.V(logger.Error).Infof("propagating dangling block #%d [%x]", block.NumberU64(), hash[:4]) return } // Send the block to a subset of our peers transfer := peers[:int(math.Sqrt(float64(len(peers))))] for _, peer := range transfer { peer.SendNewBlock(block, td) } glog.V(logger.Detail).Infof("propagated block %x to %d peers in %v", hash[:4], len(transfer), time.Since(block.ReceivedAt)) } // Otherwise if the block is indeed in out own chain, announce it if pm.chainman.HasBlock(hash) { for _, peer := range peers { peer.SendNewBlockHashes([]common.Hash{hash}) } glog.V(logger.Detail).Infof("announced block %x to %d peers in %v", hash[:4], len(peers), time.Since(block.ReceivedAt)) } }
// Map adds a port mapping on m and keeps it alive until c is closed. // This function is typically invoked in its own goroutine. func Map(m Interface, c chan struct{}, protocol string, extport, intport int, name string) { refresh := time.NewTimer(mapUpdateInterval) defer func() { refresh.Stop() glog.V(logger.Debug).Infof("deleting port mapping: %s %d -> %d (%s) using %s\n", protocol, extport, intport, name, m) m.DeleteMapping(protocol, extport, intport) }() if err := m.AddMapping(protocol, intport, extport, name, mapTimeout); err != nil { glog.V(logger.Debug).Infof("network port %s:%d could not be mapped: %v\n", protocol, intport, err) } else { glog.V(logger.Info).Infof("mapped network port %s:%d -> %d (%s) using %s\n", protocol, extport, intport, name, m) } for { select { case _, ok := <-c: if !ok { return } case <-refresh.C: glog.V(logger.Detail).Infof("refresh port mapping %s:%d -> %d (%s) using %s\n", protocol, extport, intport, name, m) if err := m.AddMapping(protocol, intport, extport, name, mapTimeout); err != nil { glog.V(logger.Debug).Infof("network port %s:%d could not be mapped: %v\n", protocol, intport, err) } refresh.Reset(mapUpdateInterval) } } }
// handle is the callback invoked to manage the life cycle of an exp peer. When // this function terminates, the peer is disconnected. func (pm *ProtocolManager) handle(p *peer) error { glog.V(logger.Debug).Infof("%v: peer connected [%s]", p, p.Name()) // Execute the Expanse handshake td, head, genesis := pm.chainman.Status() if err := p.Handshake(td, head, genesis); err != nil { glog.V(logger.Debug).Infof("%v: handshake failed: %v", p, err) return err } // Register the peer locally glog.V(logger.Detail).Infof("%v: adding peer", p) if err := pm.peers.Register(p); err != nil { glog.V(logger.Error).Infof("%v: addition failed: %v", p, err) return err } defer pm.removePeer(p.id) // Register the peer in the downloader. If the downloader considers it banned, we disconnect if err := pm.downloader.RegisterPeer(p.id, p.version, p.Head(), p.RequestHashes, p.RequestHashesFromNumber, p.RequestBlocks); err != nil { return err } // Propagate existing transactions. new transactions appearing // after this will be sent via broadcasts. pm.syncTransactions(p) // main loop. handle incoming messages. for { if err := pm.handleMsg(p); err != nil { glog.V(logger.Debug).Infof("%v: message handling failed: %v", p, err) return err } } return nil }
func sendBadBlockReport(block *types.Block, err error) { if !EnableBadBlockReporting { return } var ( blockRLP, _ = rlp.EncodeToBytes(block) params = map[string]interface{}{ "block": common.Bytes2Hex(blockRLP), "blockHash": block.Hash().Hex(), "errortype": err.Error(), "client": "go", } ) if !block.ReceivedAt.IsZero() { params["receivedAt"] = block.ReceivedAt.UTC().String() } if p, ok := block.ReceivedFrom.(*peer); ok { params["receivedFrom"] = map[string]interface{}{ "enode": fmt.Sprintf("enode://%x@%v", p.ID(), p.RemoteAddr()), "name": p.Name(), "protocolVersion": p.version, } } jsonStr, _ := json.Marshal(map[string]interface{}{"method": "eth_badBlock", "id": "1", "jsonrpc": "2.0", "params": []interface{}{params}}) client := http.Client{Timeout: 8 * time.Second} resp, err := client.Post(badBlocksURL, "application/json", bytes.NewReader(jsonStr)) if err != nil { glog.V(logger.Debug).Infoln(err) return } glog.V(logger.Debug).Infof("Bad Block Report posted (%d)", resp.StatusCode) resp.Body.Close() }
func (env *Work) commitTransactions(transactions types.Transactions, gasPrice *big.Int, bc *core.BlockChain) { gp := new(core.GasPool).AddGas(env.header.GasLimit) for _, tx := range transactions { // Error may be ignored here. The error has already been checked // during transaction acceptance is the transaction pool. from, _ := tx.From() // Check if it falls within margin. Txs from owned accounts are always processed. if tx.GasPrice().Cmp(gasPrice) < 0 && !env.ownedAccounts.Has(from) { // ignore the transaction and transactor. We ignore the transactor // because nonce will fail after ignoring this transaction so there's // no point env.lowGasTransactors.Add(from) glog.V(logger.Info).Infof("transaction(%x) below gas price (tx=%v ask=%v). All sequential txs from this address(%x) will be ignored\n", tx.Hash().Bytes()[:4], common.CurrencyToString(tx.GasPrice()), common.CurrencyToString(gasPrice), from[:4]) } // Continue with the next transaction if the transaction sender is included in // the low gas tx set. This will also remove the tx and all sequential transaction // from this transactor if env.lowGasTransactors.Has(from) { // add tx to the low gas set. This will be removed at the end of the run // owned accounts are ignored if !env.ownedAccounts.Has(from) { env.lowGasTxs = append(env.lowGasTxs, tx) } continue } // Move on to the next transaction when the transactor is in ignored transactions set // This may occur when a transaction hits the gas limit. When a gas limit is hit and // the transaction is processed (that could potentially be included in the block) it // will throw a nonce error because the previous transaction hasn't been processed. // Therefor we need to ignore any transaction after the ignored one. if env.ignoredTransactors.Has(from) { continue } env.state.StartRecord(tx.Hash(), common.Hash{}, 0) err := env.commitTransaction(tx, bc, gp) switch { case core.IsGasLimitErr(err): // ignore the transactor so no nonce errors will be thrown for this account // next time the worker is run, they'll be picked up again. env.ignoredTransactors.Add(from) glog.V(logger.Detail).Infof("Gas limit reached for (%x) in this block. Continue to try smaller txs\n", from[:4]) case err != nil: env.remove.Add(tx.Hash()) if glog.V(logger.Detail) { glog.Infof("TX (%x) failed, will be removed: %v\n", tx.Hash().Bytes()[:4], err) } default: env.tcount++ } } }
// UnregisterPeer remove a peer from the known list, preventing any action from // the specified peer. func (d *Downloader) UnregisterPeer(id string) error { glog.V(logger.Detail).Infoln("Unregistering peer", id) if err := d.peers.Unregister(id); err != nil { glog.V(logger.Error).Infoln("Unregister failed:", err) return err } return nil }
// MustMakeChainConfigFromDb reads the chain configuration from the given database. func MustMakeChainConfigFromDb(ctx *cli.Context, db ethdb.Database) *core.ChainConfig { // If the chain is already initialized, use any existing chain configs config := new(core.ChainConfig) genesis := core.GetBlock(db, core.GetCanonicalHash(db, 0)) if genesis != nil { storedConfig, err := core.GetChainConfig(db, genesis.Hash()) switch err { case nil: config = storedConfig case core.ChainConfigNotFoundErr: // No configs found, use empty, will populate below default: Fatalf("Could not make chain configuration: %v", err) } } // Set any missing fields due to them being unset or system upgrade if config.HomesteadBlock == nil { if ctx.GlobalBool(TestNetFlag.Name) { config.HomesteadBlock = params.TestNetHomesteadBlock } else { config.HomesteadBlock = params.MainNetHomesteadBlock } } if config.DAOForkBlock == nil { if ctx.GlobalBool(TestNetFlag.Name) { config.DAOForkBlock = params.TestNetDAOForkBlock } else { config.DAOForkBlock = params.MainNetDAOForkBlock } config.DAOForkSupport = true } // Force override any existing configs if explicitly requested switch { case ctx.GlobalBool(SupportDAOFork.Name): config.DAOForkSupport = true case ctx.GlobalBool(OpposeDAOFork.Name): config.DAOForkSupport = false } // Temporarilly display a proper message so the user knows which fork its on if !ctx.GlobalBool(TestNetFlag.Name) && (genesis == nil || genesis.Hash() == common.HexToHash("0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3")) { choice := "SUPPORT" if !config.DAOForkSupport { choice = "OPPOSE" } current := fmt.Sprintf(" is currently configured to %s the DAO hard-fork!", choice) howtoswap := fmt.Sprintf("You can change your choice prior to block #%v with --support-dao-fork or --oppose-dao-fork.", config.DAOForkBlock) howtosync := fmt.Sprintf("After the hard-fork block #%v passed, changing chains requires a resync from scratch!", config.DAOForkBlock) separator := strings.Repeat("-", len(howtoswap)) glog.V(logger.Warn).Info(separator) glog.V(logger.Warn).Info(current) glog.V(logger.Warn).Info(howtoswap) glog.V(logger.Warn).Info(howtosync) glog.V(logger.Warn).Info(separator) } return config }
func (p *Peer) run() DiscReason { var ( writeStart = make(chan struct{}, 1) writeErr = make(chan error, 1) readErr = make(chan error, 1) reason DiscReason requested bool ) p.wg.Add(2) go p.readLoop(readErr) go p.pingLoop() // Start all protocol handlers. writeStart <- struct{}{} p.startProtocols(writeStart, writeErr) // Wait for an error or disconnect. loop: for { select { case err := <-writeErr: // A write finished. Allow the next write to start if // there was no error. if err != nil { glog.V(logger.Detail).Infof("%v: write error: %v\n", p, err) reason = DiscNetworkError break loop } writeStart <- struct{}{} case err := <-readErr: if r, ok := err.(DiscReason); ok { glog.V(logger.Debug).Infof("%v: remote requested disconnect: %v\n", p, r) requested = true reason = r } else { glog.V(logger.Detail).Infof("%v: read error: %v\n", p, err) reason = DiscNetworkError } break loop case err := <-p.protoErr: reason = discReasonForError(err) glog.V(logger.Debug).Infof("%v: protocol error: %v (%v)\n", p, err, reason) break loop case reason = <-p.disc: glog.V(logger.Debug).Infof("%v: locally requested disconnect: %v\n", p, reason) break loop } } close(p.closed) p.rw.close(reason) p.wg.Wait() if requested { reason = DiscRequested } return reason }
// NewBlockChain returns a fully initialised block chain using information // available in the database. It initialiser the default Ethereum Validator and // Processor. func NewBlockChain(chainDb ethdb.Database, pow pow.PoW, mux *event.TypeMux) (*BlockChain, error) { headerCache, _ := lru.New(headerCacheLimit) bodyCache, _ := lru.New(bodyCacheLimit) bodyRLPCache, _ := lru.New(bodyCacheLimit) tdCache, _ := lru.New(tdCacheLimit) blockCache, _ := lru.New(blockCacheLimit) futureBlocks, _ := lru.New(maxFutureBlocks) bc := &BlockChain{ chainDb: chainDb, eventMux: mux, quit: make(chan struct{}), headerCache: headerCache, bodyCache: bodyCache, bodyRLPCache: bodyRLPCache, tdCache: tdCache, blockCache: blockCache, futureBlocks: futureBlocks, pow: pow, } // Seed a fast but crypto originating random generator seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64)) if err != nil { return nil, err } bc.rand = mrand.New(mrand.NewSource(seed.Int64())) bc.SetValidator(NewBlockValidator(bc, pow)) bc.SetProcessor(NewStateProcessor(bc)) bc.genesisBlock = bc.GetBlockByNumber(0) if bc.genesisBlock == nil { reader, err := NewDefaultGenesisReader() if err != nil { return nil, err } bc.genesisBlock, err = WriteGenesisBlock(chainDb, reader) if err != nil { return nil, err } glog.V(logger.Info).Infoln("WARNING: Wrote default expanse genesis block") } if err := bc.loadLastState(); err != nil { return nil, err } // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain for hash, _ := range BadHashes { if header := bc.GetHeader(hash); header != nil { glog.V(logger.Error).Infof("Found bad hash, rewinding chain to block #%d [%x…]", header.Number, header.ParentHash[:4]) bc.SetHead(header.Number.Uint64() - 1) glog.V(logger.Error).Infoln("Chain rewind was successful, resuming normal operation") } } // Take ownership of this particular state go bc.update() return bc, nil }
func sendJSON(w io.Writer, v interface{}) { if glog.V(logger.Detail) { if payload, err := json.MarshalIndent(v, "", "\t"); err == nil { glog.Infof("Sending payload: %s", payload) } } if err := json.NewEncoder(w).Encode(v); err != nil { glog.V(logger.Error).Infoln("Error sending JSON:", err) } }
func (w *watcher) loop() { defer func() { w.ac.mu.Lock() w.running = false w.starting = false w.ac.mu.Unlock() }() err := notify.Watch(w.ac.keydir, w.ev, notify.All) if err != nil { glog.V(logger.Detail).Infof("can't watch %s: %v", w.ac.keydir, err) return } defer notify.Stop(w.ev) glog.V(logger.Detail).Infof("now watching %s", w.ac.keydir) defer glog.V(logger.Detail).Infof("no longer watching %s", w.ac.keydir) w.ac.mu.Lock() w.running = true w.ac.mu.Unlock() // Wait for file system events and reload. // When an event occurs, the reload call is delayed a bit so that // multiple events arriving quickly only cause a single reload. var ( debounce = time.NewTimer(0) debounceDuration = 500 * time.Millisecond inCycle, hadEvent bool ) defer debounce.Stop() for { select { case <-w.quit: return case <-w.ev: if !inCycle { debounce.Reset(debounceDuration) inCycle = true } else { hadEvent = true } case <-debounce.C: w.ac.mu.Lock() w.ac.reload() w.ac.mu.Unlock() if hadEvent { debounce.Reset(debounceDuration) inCycle, hadEvent = true, false } else { inCycle, hadEvent = false, false } } } }
func (t *udp) send(toaddr *net.UDPAddr, ptype byte, req interface{}) error { packet, err := encodePacket(t.priv, ptype, req) if err != nil { return err } glog.V(logger.Detail).Infof(">>> %v %T\n", toaddr, req) if _, err = t.conn.WriteToUDP(packet, toaddr); err != nil { glog.V(logger.Detail).Infoln("UDP send failed:", err) } return err }
// dial performs the actual connection attempt. func (t *dialTask) dial(srv *Server, dest *discover.Node) bool { addr := &net.TCPAddr{IP: dest.IP, Port: int(dest.TCP)} glog.V(logger.Debug).Infof("dial tcp %v (%x)\n", addr, dest.ID[:6]) fd, err := srv.Dialer.Dial("tcp", addr.String()) if err != nil { glog.V(logger.Detail).Infof("%v", err) return false } mfd := newMeteredConn(fd, false) srv.setupConn(mfd, t.flags, dest) return true }
func (self *StateTransition) transitionState() (ret []byte, usedGas *big.Int, err error) { if err = self.preCheck(); err != nil { return } msg := self.msg sender, _ := self.From() // err checked in preCheck // Pay intrinsic gas if err = self.UseGas(IntrinsicGas(self.data)); err != nil { return nil, nil, InvalidTxError(err) } vmenv := self.env var ref vm.ContextRef if MessageCreatesContract(msg) { ret, err, ref = vmenv.Create(sender, self.data, self.gas, self.gasPrice, self.value) if err == nil { dataGas := big.NewInt(int64(len(ret))) dataGas.Mul(dataGas, params.CreateDataGas) if err := self.UseGas(dataGas); err == nil { ref.SetCode(ret) } else { ret = nil // does not affect consensus but useful for StateTests validations glog.V(logger.Core).Infoln("Insufficient gas for creating code. Require", dataGas, "and have", self.gas) } } glog.V(logger.Core).Infoln("VM create err:", err) } else { // Increment the nonce for the next transaction self.state.SetNonce(sender.Address(), sender.Nonce()+1) ret, err = vmenv.Call(sender, self.To().Address(), self.data, self.gas, self.gasPrice, self.value) glog.V(logger.Core).Infoln("VM call err:", err) } if err != nil && IsValueTransferErr(err) { return nil, nil, InvalidTxError(err) } // We aren't interested in errors here. Errors returned by the VM are non-consensus errors and therefor shouldn't bubble up if err != nil { err = nil } if vm.Debug { vm.StdErrFormat(vmenv.StructLogs()) } self.refundGas() self.state.AddBalance(self.env.Coinbase(), new(big.Int).Mul(self.gasUsed(), self.gasPrice)) return ret, self.gasUsed(), err }
func (t *dialTask) Do(srv *Server) { addr := &net.TCPAddr{IP: t.dest.IP, Port: int(t.dest.TCP)} glog.V(logger.Debug).Infof("dialing %v\n", t.dest) fd, err := srv.Dialer.Dial("tcp", addr.String()) if err != nil { glog.V(logger.Detail).Infof("dial error: %v", err) return } mfd := newMeteredConn(fd, false) srv.setupConn(mfd, t.flags, t.dest) }
func (self *StateTransition) transitionDb() (ret []byte, usedGas *big.Int, err error) { if err = self.preCheck(); err != nil { return } msg := self.msg sender, _ := self.from() // err checked in preCheck homestead := params.IsHomestead(self.env.BlockNumber()) contractCreation := MessageCreatesContract(msg) // Pay intrinsic gas if err = self.useGas(IntrinsicGas(self.data, contractCreation, homestead)); err != nil { return nil, nil, InvalidTxError(err) } vmenv := self.env //var addr common.Address if contractCreation { ret, _, err = vmenv.Create(sender, self.data, self.gas, self.gasPrice, self.value) if homestead && err == vm.CodeStoreOutOfGasError { self.gas = Big0 } if err != nil { ret = nil glog.V(logger.Core).Infoln("VM create err:", err) } } else { // Increment the nonce for the next transaction self.state.SetNonce(sender.Address(), self.state.GetNonce(sender.Address())+1) ret, err = vmenv.Call(sender, self.to().Address(), self.data, self.gas, self.gasPrice, self.value) if err != nil { glog.V(logger.Core).Infoln("VM call err:", err) } } if err != nil && IsValueTransferErr(err) { return nil, nil, InvalidTxError(err) } // We aren't interested in errors here. Errors returned by the VM are non-consensus errors and therefor shouldn't bubble up if err != nil { err = nil } if vm.Debug { vm.StdErrFormat(vmenv.StructLogs()) } self.refundGas() self.state.AddBalance(self.env.Coinbase(), new(big.Int).Mul(self.gasUsed(), self.gasPrice)) return ret, self.gasUsed(), err }
func (t *udp) handlePacket(from *net.UDPAddr, buf []byte) error { packet, fromID, hash, err := decodePacket(buf) if err != nil { glog.V(logger.Debug).Infof("Bad packet from %v: %v\n", from, err) return err } status := "ok" if err = packet.handle(t, from, fromID, hash); err != nil { status = err.Error() } glog.V(logger.Detail).Infof("<<< %v %T: %s\n", from, packet, status) return err }