// ConnectBlock is invoked by the index manager when a new block has been // connected to the main chain. This indexer adds a mapping for each address // the transactions in the block involve. // // This is part of the Indexer interface. func (idx *AddrIndex) ConnectBlock(dbTx database.Tx, block *btcutil.Block, view *blockchain.UtxoViewpoint) error { // The offset and length of the transactions within the serialized // block. txLocs, err := block.TxLoc() if err != nil { return err } // Get the internal block ID associated with the block. blockID, err := dbFetchBlockIDByHash(dbTx, block.Hash()) if err != nil { return err } // Build all of the address to transaction mappings in a local map. addrsToTxns := make(writeIndexData) idx.indexBlock(addrsToTxns, block, view) // Add all of the index entries for each address. addrIdxBucket := dbTx.Metadata().Bucket(addrIndexKey) for addrKey, txIdxs := range addrsToTxns { for _, txIdx := range txIdxs { err := dbPutAddrIndexEntry(addrIdxBucket, addrKey, blockID, txLocs[txIdx]) if err != nil { return err } } } return nil }
// dbAddTxIndexEntries uses an existing database transaction to add a // transaction index entry for every transaction in the passed block. func dbAddTxIndexEntries(dbTx database.Tx, block *btcutil.Block, blockID uint32) error { // The offset and length of the transactions within the serialized // block. txLocs, err := block.TxLoc() if err != nil { return err } // As an optimization, allocate a single slice big enough to hold all // of the serialized transaction index entries for the block and // serialize them directly into the slice. Then, pass the appropriate // subslice to the database to be written. This approach significantly // cuts down on the number of required allocations. offset := 0 serializedValues := make([]byte, len(block.Transactions())*txEntrySize) for i, tx := range block.Transactions() { putTxIndexEntry(serializedValues[offset:], blockID, txLocs[i]) endOffset := offset + txEntrySize err := dbPutTxIndexEntry(dbTx, tx.Hash(), serializedValues[offset:endOffset:endOffset]) if err != nil { return err } offset += txEntrySize } return nil }
// indexBlockAddrs returns a populated index of the all the transactions in the // passed block based on the addresses involved in each transaction. func (a *addrIndexer) indexBlockAddrs(blk *btcutil.Block) (database.BlockAddrIndex, error) { addrIndex := make(database.BlockAddrIndex) txLocs, err := blk.TxLoc() if err != nil { return nil, err } for txIdx, tx := range blk.Transactions() { // Tx's offset and length in the block. locInBlock := &txLocs[txIdx] // Coinbases don't have any inputs. if !blockchain.IsCoinBase(tx) { // Index the SPK's of each input's previous outpoint // transaction. for _, txIn := range tx.MsgTx().TxIn { // Lookup and fetch the referenced output's tx. prevOut := txIn.PreviousOutPoint txList, err := a.server.db.FetchTxBySha(&prevOut.Hash) if len(txList) == 0 { return nil, fmt.Errorf("transaction %v not found", prevOut.Hash) } if err != nil { adxrLog.Errorf("Error fetching tx %v: %v", prevOut.Hash, err) return nil, err } prevOutTx := txList[len(txList)-1] inputOutPoint := prevOutTx.Tx.TxOut[prevOut.Index] indexScriptPubKey(addrIndex, inputOutPoint.PkScript, locInBlock) } } for _, txOut := range tx.MsgTx().TxOut { indexScriptPubKey(addrIndex, txOut.PkScript, locInBlock) } } return addrIndex, nil }
// InsertBlock inserts raw block and transaction data from a block into the // database. The first block inserted into the database will be treated as the // genesis block. Every subsequent block insert requires the referenced parent // block to already exist. func (db *LevelDb) InsertBlock(block *btcutil.Block) (height int64, rerr error) { db.dbLock.Lock() defer db.dbLock.Unlock() defer func() { if rerr == nil { rerr = db.processBatches() } else { db.lBatch().Reset() } }() blocksha := block.Sha() mblock := block.MsgBlock() rawMsg, err := block.Bytes() if err != nil { log.Warnf("Failed to obtain raw block sha %v", blocksha) return 0, err } txloc, err := block.TxLoc() if err != nil { log.Warnf("Failed to obtain raw block sha %v", blocksha) return 0, err } // Insert block into database newheight, err := db.insertBlockData(blocksha, &mblock.Header.PrevBlock, rawMsg) if err != nil { log.Warnf("Failed to insert block %v %v %v", blocksha, &mblock.Header.PrevBlock, err) return 0, err } // At least two blocks in the long past were generated by faulty // miners, the sha of the transaction exists in a previous block, // detect this condition and 'accept' the block. for txidx, tx := range mblock.Transactions { txsha, err := block.TxSha(txidx) if err != nil { log.Warnf("failed to compute tx name block %v idx %v err %v", blocksha, txidx, err) return 0, err } spentbuflen := (len(tx.TxOut) + 7) / 8 spentbuf := make([]byte, spentbuflen, spentbuflen) if len(tx.TxOut)%8 != 0 { for i := uint(len(tx.TxOut) % 8); i < 8; i++ { spentbuf[spentbuflen-1] |= (byte(1) << i) } } err = db.insertTx(txsha, newheight, txloc[txidx].TxStart, txloc[txidx].TxLen, spentbuf) if err != nil { log.Warnf("block %v idx %v failed to insert tx %v %v err %v", blocksha, newheight, &txsha, txidx, err) return 0, err } // Some old blocks contain duplicate transactions // Attempt to cleanly bypass this problem by marking the // first as fully spent. // http://blockexplorer.com/b/91812 dup in 91842 // http://blockexplorer.com/b/91722 dup in 91880 if newheight == 91812 { dupsha, err := wire.NewShaHashFromStr("d5d27987d2a3dfc724e359870c6644b40e497bdc0589a033220fe15429d88599") if err != nil { panic("invalid sha string in source") } if txsha.IsEqual(dupsha) { // marking TxOut[0] as spent po := wire.NewOutPoint(dupsha, 0) txI := wire.NewTxIn(po, []byte("garbage")) var spendtx wire.MsgTx spendtx.AddTxIn(txI) err = db.doSpend(&spendtx) if err != nil { log.Warnf("block %v idx %v failed to spend tx %v %v err %v", blocksha, newheight, &txsha, txidx, err) } } } if newheight == 91722 { dupsha, err := wire.NewShaHashFromStr("e3bf3d07d4b0375638d5f1db5255fe07ba2c4cb067cd81b84ee974b6585fb468") if err != nil { panic("invalid sha string in source") } if txsha.IsEqual(dupsha) { // marking TxOut[0] as spent po := wire.NewOutPoint(dupsha, 0) txI := wire.NewTxIn(po, []byte("garbage")) var spendtx wire.MsgTx spendtx.AddTxIn(txI) err = db.doSpend(&spendtx) if err != nil { log.Warnf("block %v idx %v failed to spend tx %v %v err %v", blocksha, newheight, &txsha, txidx, err) } } } err = db.doSpend(tx) if err != nil { log.Warnf("block %v idx %v failed to spend tx %v %v err %v", blocksha, newheight, txsha, txidx, err) return 0, err } } return newheight, nil }
// testAddrIndexOperations ensures that all normal operations concerning // the optional address index function correctly. func testAddrIndexOperations(t *testing.T, db database.Db, newestBlock *btcutil.Block, newestSha *wire.ShaHash, newestBlockIdx int32) { // Metadata about the current addr index state should be unset. sha, height, err := db.FetchAddrIndexTip() if err != database.ErrAddrIndexDoesNotExist { t.Fatalf("Address index metadata shouldn't be in db, hasn't been built up yet.") } var zeroHash wire.ShaHash if !sha.IsEqual(&zeroHash) { t.Fatalf("AddrIndexTip wrong hash got: %s, want %s", sha, &zeroHash) } if height != -1 { t.Fatalf("Addrindex not built up, yet a block index tip has been set to: %d.", height) } // Test enforcement of constraints for "limit" and "skip" var fakeAddr btcutil.Address _, err = db.FetchTxsForAddr(fakeAddr, -1, 0) if err == nil { t.Fatalf("Negative value for skip passed, should return an error") } _, err = db.FetchTxsForAddr(fakeAddr, 0, -1) if err == nil { t.Fatalf("Negative value for limit passed, should return an error") } // Simple test to index outputs(s) of the first tx. testIndex := make(database.BlockAddrIndex) testTx, err := newestBlock.Tx(0) if err != nil { t.Fatalf("Block has no transactions, unable to test addr "+ "indexing, err %v", err) } // Extract the dest addr from the tx. _, testAddrs, _, err := txscript.ExtractPkScriptAddrs(testTx.MsgTx().TxOut[0].PkScript, &chaincfg.MainNetParams) if err != nil { t.Fatalf("Unable to decode tx output, err %v", err) } // Extract the hash160 from the output script. var hash160Bytes [ripemd160.Size]byte testHash160 := testAddrs[0].(*btcutil.AddressPubKey).AddressPubKeyHash().ScriptAddress() copy(hash160Bytes[:], testHash160[:]) // Create a fake index. blktxLoc, _ := newestBlock.TxLoc() testIndex[hash160Bytes] = []*wire.TxLoc{&blktxLoc[0]} // Insert our test addr index into the DB. err = db.UpdateAddrIndexForBlock(newestSha, newestBlockIdx, testIndex) if err != nil { t.Fatalf("UpdateAddrIndexForBlock: failed to index"+ " addrs for block #%d (%s) "+ "err %v", newestBlockIdx, newestSha, err) } // Chain Tip of address should've been updated. assertAddrIndexTipIsUpdated(db, t, newestSha, newestBlockIdx) // Check index retrieval. txReplies, err := db.FetchTxsForAddr(testAddrs[0], 0, 1000) if err != nil { t.Fatalf("FetchTxsForAddr failed to correctly fetch txs for an "+ "address, err %v", err) } // Should have one reply. if len(txReplies) != 1 { t.Fatalf("Failed to properly index tx by address.") } // Our test tx and indexed tx should have the same sha. indexedTx := txReplies[0] if !bytes.Equal(indexedTx.Sha.Bytes(), testTx.Sha().Bytes()) { t.Fatalf("Failed to fetch proper indexed tx. Expected sha %v, "+ "fetched %v", testTx.Sha(), indexedTx.Sha) } // Shut down DB. db.Sync() db.Close() // Re-Open, tip still should be updated to current height and sha. db, err = database.OpenDB("leveldb", "tstdbopmode") if err != nil { t.Fatalf("Unable to re-open created db, err %v", err) } assertAddrIndexTipIsUpdated(db, t, newestSha, newestBlockIdx) // Delete the entire index. err = db.DeleteAddrIndex() if err != nil { t.Fatalf("Couldn't delete address index, err %v", err) } // Former index should no longer exist. txReplies, err = db.FetchTxsForAddr(testAddrs[0], 0, 1000) if err != nil { t.Fatalf("Unable to fetch transactions for address: %v", err) } if len(txReplies) != 0 { t.Fatalf("Address index was not successfully deleted. "+ "Should have 0 tx's indexed, %v were returned.", len(txReplies)) } // Tip should be blanked out. if _, _, err := db.FetchAddrIndexTip(); err != database.ErrAddrIndexDoesNotExist { t.Fatalf("Address index was not fully deleted.") } }