// reportBlock reports the given block and error using the canonical block // reporting tool. Reporting the block to the service is handled in a separate // goroutine. func reportBlock(block *types.Block, err error) { if glog.V(logger.Error) { glog.Errorf("Bad block #%v (%s)\n", block.Number(), block.Hash().Hex()) glog.Errorf(" %v", err) } go ReportBlock(block, err) }
// Get returns the value for key stored in the trie. // The value bytes must not be modified by the caller. func (t *Trie) Get(key []byte) []byte { res, err := t.TryGet(key) if err != nil && glog.V(logger.Error) { glog.Errorf("Unhandled trie error: %v", err) } return res }
func (self *Iterator) key(node interface{}) []byte { switch node := node.(type) { case shortNode: // Leaf node k := remTerm(node.Key) if vnode, ok := node.Val.(valueNode); ok { self.Value = vnode return k } return append(k, self.key(node.Val)...) case fullNode: if node[16] != nil { self.Value = node[16].(valueNode) return []byte{16} } for i := 0; i < 16; i++ { k := self.key(node[i]) if k != nil { return append([]byte{byte(i)}, k...) } } case hashNode: rn, err := self.trie.resolveHash(node, nil, nil) if err != nil && glog.V(logger.Error) { glog.Errorf("Unhandled trie error: %v", err) } return self.key(rn) } return nil }
// Prove constructs a merkle proof for key. The result contains all // encoded nodes on the path to the value at key. The value itself is // also included in the last node and can be retrieved by verifying // the proof. // // If the trie does not contain a value for key, the returned proof // contains all nodes of the longest existing prefix of the key // (at least the root node), ending with the node that proves the // absence of the key. func (t *Trie) Prove(key []byte) []rlp.RawValue { // Collect all nodes on the path to key. key = compactHexDecode(key) nodes := []node{} tn := t.root for len(key) > 0 && tn != nil { switch n := tn.(type) { case shortNode: if len(key) < len(n.Key) || !bytes.Equal(n.Key, key[:len(n.Key)]) { // The trie doesn't contain the key. tn = nil } else { tn = n.Val key = key[len(n.Key):] } nodes = append(nodes, n) case fullNode: tn = n[key[0]] key = key[1:] nodes = append(nodes, n) case hashNode: var err error tn, err = t.resolveHash(n, nil, nil) if err != nil { if glog.V(logger.Error) { glog.Errorf("Unhandled trie error: %v", err) } return nil } default: panic(fmt.Sprintf("%T: invalid node: %v", tn, tn)) } } if t.hasher == nil { t.hasher = newHasher() } proof := make([]rlp.RawValue, 0, len(nodes)) for i, n := range nodes { // Don't bother checking for errors here since hasher panics // if encoding doesn't work and we're not writing to any database. n, _ = t.hasher.replaceChildren(n, nil) hn, _ := t.hasher.store(n, nil, false) if _, ok := hn.(hashNode); ok || i == 0 { // If the node's database encoding is a hash (or is the // root node), it becomes a proof element. enc, _ := rlp.EncodeToBytes(n) proof = append(proof, enc) } } return proof }
// reads the next node record from the iterator, skipping over other // database entries. func nextNode(it iterator.Iterator) *Node { for end := false; !end; end = !it.Next() { id, field := splitKey(it.Key()) if field != nodeDBDiscoverRoot { continue } var n Node if err := rlp.DecodeBytes(it.Value(), &n); err != nil { if glog.V(logger.Warn) { glog.Errorf("invalid node %x: %v", id, err) } continue } return &n } return nil }
func (self *LDBDatabase) Close() { // Stop the metrics collection to avoid internal database races self.quitLock.Lock() defer self.quitLock.Unlock() if self.quitChan != nil { errc := make(chan error) self.quitChan <- errc if err := <-errc; err != nil { glog.V(logger.Error).Infof("metrics failure in '%s': %v\n", self.fn, err) } } err := self.db.Close() if glog.V(logger.Error) { if err == nil { glog.Infoln("closed db:", self.fn) } else { glog.Errorf("error closing db %s: %v", self.fn, err) } } }
// Retrieve a state object given my the address. Nil if not found func (self *StateDB) GetStateObject(addr common.Address) (stateObject *StateObject) { stateObject = self.stateObjects[addr.Str()] if stateObject != nil { if stateObject.deleted { stateObject = nil } return stateObject } data := self.trie.Get(addr[:]) if len(data) == 0 { return nil } stateObject, err := DecodeObject(addr, self.db, data) if err != nil { glog.Errorf("can't decode object at %x: %v", addr[:], err) return nil } self.SetStateObject(stateObject) return stateObject }
// Delete removes any existing value for key from the trie. func (t *Trie) Delete(key []byte) { if err := t.TryDelete(key); err != nil && glog.V(logger.Error) { glog.Errorf("Unhandled trie error: %v", err) } }
// Update associates key with value in the trie. Subsequent calls to // Get will return value. If value has length zero, any existing value // is deleted from the trie and calls to Get will return nil. // // The value bytes must not be modified by the caller while they are // stored in the trie. func (t *Trie) Update(key, value []byte) { if err := t.TryUpdate(key, value); err != nil && glog.V(logger.Error) { glog.Errorf("Unhandled trie error: %v", err) } }
func (self *Iterator) next(node interface{}, key []byte, isIterStart bool) []byte { if node == nil { return nil } switch node := node.(type) { case fullNode: if len(key) > 0 { k := self.next(node[key[0]], key[1:], isIterStart) if k != nil { return append([]byte{key[0]}, k...) } } var r byte if len(key) > 0 { r = key[0] + 1 } for i := r; i < 16; i++ { k := self.key(node[i]) if k != nil { return append([]byte{i}, k...) } } case shortNode: k := remTerm(node.Key) if vnode, ok := node.Val.(valueNode); ok { switch bytes.Compare([]byte(k), key) { case 0: if isIterStart { self.Value = vnode return k } case 1: self.Value = vnode return k } } else { cnode := node.Val var ret []byte skey := key[len(k):] if bytes.HasPrefix(key, k) { ret = self.next(cnode, skey, isIterStart) } else if bytes.Compare(k, key[:len(k)]) > 0 { return self.key(node) } if ret != nil { return append(k, ret...) } } case hashNode: rn, err := self.trie.resolveHash(node, nil, nil) if err != nil && glog.V(logger.Error) { glog.Errorf("Unhandled trie error: %v", err) } return self.next(rn, key, isIterStart) } return nil }
// checkQueue moves transactions that have become processable to main pool. func (pool *TxPool) checkQueue() { // init delayed since tx pool could have been started before any state sync if pool.pendingState == nil { pool.resetState() } var promote txQueue for address, txs := range pool.queue { currentState, err := pool.currentState() if err != nil { glog.Errorf("could not get current state: %v", err) return } balance := currentState.GetBalance(address) var ( guessedNonce = pool.pendingState.GetNonce(address) // nonce currently kept by the tx pool (pending state) trueNonce = currentState.GetNonce(address) // nonce known by the last state ) promote = promote[:0] for hash, tx := range txs { // Drop processed or out of fund transactions if tx.Nonce() < trueNonce || balance.Cmp(tx.Cost()) < 0 { if glog.V(logger.Core) { glog.Infof("removed tx (%v) from pool queue: low tx nonce or out of funds\n", tx) } delete(txs, hash) continue } // Collect the remaining transactions for the next pass. promote = append(promote, txQueueEntry{hash, address, tx}) } // Find the next consecutive nonce range starting at the current account nonce, // pushing the guessed nonce forward if we add consecutive transactions. sort.Sort(promote) for i, entry := range promote { // If we reached a gap in the nonces, enforce transaction limit and stop if entry.Nonce() > guessedNonce { if len(promote)-i > maxQueued { if glog.V(logger.Debug) { glog.Infof("Queued tx limit exceeded for %s. Tx %s removed\n", common.PP(address[:]), common.PP(entry.hash[:])) } for _, drop := range promote[i+maxQueued:] { delete(txs, drop.hash) } } break } // Otherwise promote the transaction and move the guess nonce if needed pool.addTx(entry.hash, address, entry.Transaction) delete(txs, entry.hash) if entry.Nonce() == guessedNonce { guessedNonce++ } } // Delete the entire queue entry if it became empty. if len(txs) == 0 { delete(pool.queue, address) } } }