// Try a new transaction in the mempool. // Potentially blocking if we're blocking on Update() or Reap(). func (mem *Mempool) AddTx(tx types.Tx) (err error) { mem.mtx.Lock() defer mem.mtx.Unlock() // CACHE if _, exists := mem.cacheMap[mem.TxID(tx)]; exists { return nil } if mem.cacheList.Len() >= cacheSize { popped := mem.cacheList.Front() poppedTx := popped.Value.(types.Tx) delete(mem.cacheMap, mem.TxID(poppedTx)) mem.cacheList.Remove(popped) } mem.cacheMap[mem.TxID(tx)] = struct{}{} mem.cacheList.PushBack(tx) // END CACHE err = sm.ExecTx(mem.cache, tx, false, nil) if err != nil { log.Info("AddTx() error", "tx", tx, "error", err) return err } else { log.Info("AddTx() success", "tx", tx) mem.counter++ memTx := &mempoolTx{ counter: mem.counter, height: int64(mem.height), tx: tx, } mem.txs.PushBack(memTx) return nil } return nil }
// NOTE: pass in goodTxs because mem.txs can mutate concurrently. func (mem *Mempool) recheckTxs(goodTxs []types.Tx) { if len(goodTxs) == 0 { return } atomic.StoreInt32(&mem.rechecking, 1) mem.recheckCursor = mem.txs.Front() mem.recheckEnd = mem.txs.Back() for _, tx := range goodTxs { err := sm.ExecTx(mem.cache, tx, false, nil) if err != nil { // Tx became invalidated due to newly committed block. mem.txs.Remove(mem.recheckCursor) mem.recheckCursor.DetachPrev() } if mem.recheckCursor == mem.recheckEnd { mem.recheckCursor = nil } else { mem.recheckCursor = mem.recheckCursor.Next() } if mem.recheckCursor == nil { // Done! atomic.StoreInt32(&mem.rechecking, 0) } } }
// Apply tx to the state and remember it. func (mem *Mempool) AddTx(tx types.Tx) (err error) { mem.mtx.Lock() defer mem.mtx.Unlock() err = sm.ExecTx(mem.cache, tx, false, nil) if err != nil { log.Info("AddTx() error", "tx", tx, "error", err) return err } else { log.Info("AddTx() success", "tx", tx) mem.txs = append(mem.txs, tx) return nil } }
// "block" is the new block being committed. // "state" is the result of state.AppendBlock("block"). // Txs that are present in "block" are discarded from mempool. // Txs that have become invalid in the new "state" are also discarded. func (mem *Mempool) ResetForBlockAndState(block *types.Block, state *sm.State) ResetInfo { mem.mtx.Lock() defer mem.mtx.Unlock() mem.state = state.Copy() mem.cache = sm.NewBlockCache(mem.state) // First, create a lookup map of txns in new block. blockTxsMap := make(map[string]struct{}) for _, tx := range block.Data.Txs { blockTxsMap[string(types.TxID(state.ChainID, tx))] = struct{}{} } // Now we filter all txs from mem.txs that are in blockTxsMap, // and ExecTx on what remains. Only valid txs are kept. // We track the ranges of txs included in the block and invalidated by it // so we can tell peer routines var ri = ResetInfo{Height: block.Height} var validTxs []types.Tx includedStart, invalidStart := -1, -1 for i, tx := range mem.txs { txID := types.TxID(state.ChainID, tx) if _, ok := blockTxsMap[string(txID)]; ok { startRange(&includedStart, i) // start counting included txs endRange(&invalidStart, i, &ri.Invalid) // stop counting invalid txs log.Info("Filter out, already committed", "tx", tx, "txID", txID) } else { endRange(&includedStart, i, &ri.Included) // stop counting included txs err := sm.ExecTx(mem.cache, tx, false, nil) if err != nil { startRange(&invalidStart, i) // start counting invalid txs log.Info("Filter out, no longer valid", "tx", tx, "error", err) } else { endRange(&invalidStart, i, &ri.Invalid) // stop counting invalid txs log.Info("Filter in, new, valid", "tx", tx, "txID", txID) validTxs = append(validTxs, tx) } } } endRange(&includedStart, len(mem.txs)-1, &ri.Included) // stop counting included txs endRange(&invalidStart, len(mem.txs)-1, &ri.Invalid) // stop counting invalid txs // We're done! log.Info("New txs", "txs", validTxs, "oldTxs", mem.txs) mem.txs = validTxs return ri }
// "block" is the new block being committed. // "state" is the result of state.AppendBlock("block"). // Txs that are present in "block" are discarded from mempool. // Txs that have become invalid in the new "state" are also discarded. func (mem *Mempool) ResetForBlockAndState(block *types.Block, state *sm.State) { mem.mtx.Lock() defer mem.mtx.Unlock() mem.state = state.Copy() mem.cache = sm.NewBlockCache(mem.state) // First, create a lookup map of txns in new block. blockTxsMap := make(map[string]struct{}) for _, tx := range block.Data.Txs { blockTxsMap[string(types.TxID(state.ChainID, tx))] = struct{}{} } // Next, filter all txs from mem.txs that are in blockTxsMap txs := []types.Tx{} for _, tx := range mem.txs { txID := types.TxID(state.ChainID, tx) if _, ok := blockTxsMap[string(txID)]; ok { log.Debug("Filter out, already committed", "tx", tx, "txID", txID) continue } else { log.Debug("Filter in, still new", "tx", tx, "txID", txID) txs = append(txs, tx) } } // Next, filter all txs that aren't valid given new state. validTxs := []types.Tx{} for _, tx := range txs { err := sm.ExecTx(mem.cache, tx, false, nil) if err == nil { log.Debug("Filter in, valid", "tx", tx) validTxs = append(validTxs, tx) } else { // tx is no longer valid. log.Debug("Filter out, no longer valid", "tx", tx, "error", err) } } // We're done! log.Debug("New txs", "txs", validTxs, "oldTxs", mem.txs) mem.txs = validTxs }