func loadBlocks(t *testing.T, file string) (blocks []*dcrutil.Block, err error) { fi, err := os.Open(file) if err != nil { t.Errorf("failed to open file %v, err %v", file, err) return nil, err } bcStream := bzip2.NewReader(fi) defer fi.Close() // Create a buffer of the read file bcBuf := new(bytes.Buffer) bcBuf.ReadFrom(bcStream) // Create decoder from the buffer and a map to store the data bcDecoder := gob.NewDecoder(bcBuf) blockchain := make(map[int64][]byte) // Decode the blockchain into the map if err := bcDecoder.Decode(&blockchain); err != nil { t.Errorf("error decoding test blockchain") } blocks = make([]*dcrutil.Block, 0, len(blockchain)) for height := int64(1); height < int64(len(blockchain)); height++ { block, err := dcrutil.NewBlockFromBytes(blockchain[height]) if err != nil { t.Errorf("failed to parse block %v", height) return nil, err } block.SetHeight(height - 1) blocks = append(blocks, block) } return }
// TestNewBlockFromBytes tests creation of a Block from serialized bytes. func TestNewBlockFromBytes(t *testing.T) { // Serialize the test block. var block100000Buf bytes.Buffer err := Block100000.Serialize(&block100000Buf) if err != nil { t.Errorf("Serialize: %v", err) } block100000Bytes := block100000Buf.Bytes() // Create a new block from the serialized bytes. b, err := dcrutil.NewBlockFromBytes(block100000Bytes) if err != nil { t.Errorf("NewBlockFromBytes: %v", err) return } // Ensure we get the same data back out. serializedBytes, err := b.Bytes() if err != nil { t.Errorf("Bytes: %v", err) return } if !bytes.Equal(serializedBytes, block100000Bytes) { t.Errorf("Bytes: wrong bytes - got %v, want %v", spew.Sdump(serializedBytes), spew.Sdump(block100000Bytes)) } // Ensure the generated MsgBlock is correct. if msgBlock := b.MsgBlock(); !reflect.DeepEqual(msgBlock, &Block100000) { t.Errorf("MsgBlock: mismatched MsgBlock - got %v, want %v", spew.Sdump(msgBlock), spew.Sdump(&Block100000)) } }
// processBlock potentially imports the block into the database. It first // deserializes the raw block while checking for errors. Already known blocks // are skipped and orphan blocks are considered errors. Finally, it runs the // block through the chain rules to ensure it follows all rules and matches // up to the known checkpoint. Returns whether the block was imported along // with any potential errors. func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) { // Deserialize the block which includes checks for malformed blocks. block, err := dcrutil.NewBlockFromBytes(serializedBlock) if err != nil { return false, err } // update progress statistics bi.lastBlockTime = block.MsgBlock().Header.Timestamp bi.receivedLogTx += int64(len(block.MsgBlock().Transactions)) // Skip blocks that already exist. blockSha := block.Sha() exists, err := bi.db.ExistsSha(blockSha) if err != nil { return false, err } if exists { return false, nil } // Don't bother trying to process orphans. prevHash := &block.MsgBlock().Header.PrevBlock if !prevHash.IsEqual(&zeroHash) { exists, err := bi.db.ExistsSha(prevHash) if err != nil { return false, err } if !exists { return false, fmt.Errorf("import file contains block "+ "%v which does not link to the available "+ "block chain", prevHash) } } // Ensure the blocks follows all of the chain rules and match up to the // known checkpoints. _, isOrphan, err := bi.chain.ProcessBlock(block, bi.medianTime, blockchain.BFFastAdd) if err != nil { return false, err } if isOrphan { return false, fmt.Errorf("import file contains an orphan "+ "block: %v", blockSha) } return true, nil }
// fetchBlockBySha - return a dcrutil Block // Must be called with db lock held. func (db *LevelDb) fetchBlockBySha(sha *chainhash.Hash) (blk *dcrutil.Block, err error) { buf, height, err := db.fetchSha(sha) if err != nil { return } blk, err = dcrutil.NewBlockFromBytes(buf) if err != nil { return } blk.SetHeight(height) return }
func TestMerkleBlock3(t *testing.T) { blockStr := "0100000073cf056852529ffadc50b49589218795adc4d3f24170950d49f201000000000033fd46dda0acfa5c0651c58bee00362b04186c5b4d1045d37751b25779148649000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000ffff011b00c2eb0b00000000000100007e0100006614b956bee4fc44442bf144050552b3010000000000000000000000000000000000000000000000000000000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff00ffffffff03fa1a981200000000000017a914f5916158e3e2c4551c1796708db8367207ed13bb8700000000000000000000266a24000100000000000000000000000000000000000000000000000000004dfb774daa8f3a76dea1906f0000000000001976a914b74b7476bdbcf03d18f12cca1766b0ddfd030cdd88ac000000000000000001d8bc28820000000000000000ffffffff0800002f646372642f00" blockBytes, err := hex.DecodeString(blockStr) if err != nil { t.Errorf("TestMerkleBlock3 DecodeString failed: %v", err) return } blk, err := dcrutil.NewBlockFromBytes(blockBytes) if err != nil { t.Errorf("TestMerkleBlock3 NewBlockFromBytes failed: %v", err) return } f := bloom.NewFilter(10, 0, 0.000001, wire.BloomUpdateAll) inputStr := "4986147957b25177d345104d5b6c18042b3600ee8bc551065cfaaca0dd46fd33" hash, err := chainhash.NewHashFromStr(inputStr) if err != nil { t.Errorf("TestMerkleBlock3 NewHashFromStr failed: %v", err) return } f.AddHash(hash) mBlock, _ := bloom.NewMerkleBlock(blk, f) wantStr := "0100000073cf056852529ffadc50b49589218795adc4d3f24170950d49f201000000000033fd46dda0acfa5c0651c58bee00362b04186c5b4d1045d37751b25779148649000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000ffff011b00c2eb0b00000000000100007e0100006614b956bee4fc44442bf144050552b3010000000000000000000000000000000000000000000000000000000100000001d0f51e5a4978d736eb3d4a8d615bee74943756b4745d29b27ab2943bc1307cc800000000000100" want, err := hex.DecodeString(wantStr) if err != nil { t.Errorf("TestMerkleBlock3 DecodeString failed: %v", err) return } got := bytes.NewBuffer(nil) err = mBlock.BtcEncode(got, wire.ProtocolVersion) if err != nil { t.Errorf("TestMerkleBlock3 BtcEncode failed: %v", err) return } if !bytes.Equal(want, got.Bytes()) { t.Errorf("TestMerkleBlock3 failed merkle block comparison: "+ "got %v want %v", got.Bytes(), want) return } }
// loadBlocks loads the blocks contained in the testdata directory and returns // a slice of them. func loadBlocks(t *testing.T, dataFile string, network wire.CurrencyNet) ([]*dcrutil.Block, error) { // Open the file that contains the blocks for reading. fi, err := os.Open(dataFile) if err != nil { t.Errorf("failed to open file %v, err %v", dataFile, err) return nil, err } defer func() { if err := fi.Close(); err != nil { t.Errorf("failed to close file %v %v", dataFile, err) } }() bcStream := bzip2.NewReader(fi) // Create a buffer of the read file. bcBuf := new(bytes.Buffer) bcBuf.ReadFrom(bcStream) // Create decoder from the buffer and a map to store the data. bcDecoder := gob.NewDecoder(bcBuf) blockChain := make(map[int64][]byte) // Decode the blockchain into the map. if err := bcDecoder.Decode(&blockChain); err != nil { t.Errorf("error decoding test blockchain: %v", err.Error()) } // Fetch blocks 1 to 168 and perform various tests. blocks := make([]*dcrutil.Block, 169) for i := 0; i <= 168; i++ { bl, err := dcrutil.NewBlockFromBytes(blockChain[int64(i)]) if err != nil { t.Errorf("NewBlockFromBytes error: %v", err.Error()) } bl.SetHeight(int64(i)) blocks[i] = bl } return blocks, nil }
// loadReorgBlocks reads files containing decred block data (bzipped but // otherwise in the format bitcoind writes) from disk and returns them as an // array of dcrutil.Block. This is copied from the blockchain package, which // itself largely borrowed it from the test code in this package. func loadReorgBlocks(filename string) ([]*dcrutil.Block, error) { filename = filepath.Join("../blockchain/testdata/", filename) fi, err := os.Open(filename) if err != nil { return nil, err } bcStream := bzip2.NewReader(fi) defer fi.Close() // Create a buffer of the read file bcBuf := new(bytes.Buffer) bcBuf.ReadFrom(bcStream) // Create decoder from the buffer and a map to store the data bcDecoder := gob.NewDecoder(bcBuf) blockchain := make(map[int64][]byte) // Decode the blockchain into the map if err := bcDecoder.Decode(&blockchain); err != nil { return nil, err } var block *dcrutil.Block blocks := make([]*dcrutil.Block, 0, len(blockchain)) for height := int64(0); height < int64(len(blockchain)); height++ { block, err = dcrutil.NewBlockFromBytes(blockchain[height]) if err != nil { return blocks, err } block.SetHeight(height) blocks = append(blocks, block) } return blocks, nil }
// TestBlockErrors tests the error paths for the Block API. func TestBlockErrors(t *testing.T) { // Ensure out of range errors are as expected. wantErr := "transaction index -1 is out of range - max 3" testErr := dcrutil.OutOfRangeError(wantErr) if testErr.Error() != wantErr { t.Errorf("OutOfRangeError: wrong error - got %v, want %v", testErr.Error(), wantErr) } // Serialize the test block. var block100000Buf bytes.Buffer err := Block100000.Serialize(&block100000Buf) if err != nil { t.Errorf("Serialize: %v", err) } block100000Bytes := block100000Buf.Bytes() // Create a new block from the serialized bytes. b, err := dcrutil.NewBlockFromBytes(block100000Bytes) if err != nil { t.Errorf("NewBlockFromBytes: %v", err) return } // Truncate the block byte buffer to force errors. shortBytes := block100000Bytes[:100] _, err = dcrutil.NewBlockFromBytes(shortBytes) if err != io.EOF { t.Errorf("NewBlockFromBytes: did not get expected error - "+ "got %v, want %v", err, io.EOF) } // Ensure TxSha returns expected error on invalid indices. _, err = b.TxSha(-1) if _, ok := err.(dcrutil.OutOfRangeError); !ok { t.Errorf("TxSha: wrong error - got: %v <%T>, "+ "want: <%T>", err, err, dcrutil.OutOfRangeError("")) } _, err = b.TxSha(len(Block100000.Transactions) + 1) if _, ok := err.(dcrutil.OutOfRangeError); !ok { t.Errorf("TxSha: wrong error - got: %v <%T>, "+ "want: <%T>", err, err, dcrutil.OutOfRangeError("")) } // Ensure Tx returns expected error on invalid indices. _, err = b.Tx(-1) if _, ok := err.(dcrutil.OutOfRangeError); !ok { t.Errorf("Tx: wrong error - got: %v <%T>, "+ "want: <%T>", err, err, dcrutil.OutOfRangeError("")) } _, err = b.Tx(len(Block100000.Transactions) + 1) if _, ok := err.(dcrutil.OutOfRangeError); !ok { t.Errorf("Tx: wrong error - got: %v <%T>, "+ "want: <%T>", err, err, dcrutil.OutOfRangeError("")) } // Ensure TxLoc returns expected error with short byte buffer. // This makes use of the test package only function, SetBlockBytes, to // inject a short byte buffer. b.SetBlockBytes(shortBytes) _, _, err = b.TxLoc() if err != io.EOF { t.Errorf("TxLoc: did not get expected error - "+ "got %v, want %v", err, io.EOF) } }
func TestTicketDB(t *testing.T) { // Declare some useful variables testBCHeight := int64(168) // Set up a DB database, err := database.CreateDB("leveldb", "ticketdb_test") if err != nil { t.Errorf("Db create error: %v", err.Error()) } // Make a new tmdb to fill with dummy live and used tickets var tmdb stake.TicketDB tmdb.Initialize(simNetParams, database) filename := filepath.Join("..", "/../blockchain/testdata", "blocks0to168.bz2") fi, err := os.Open(filename) bcStream := bzip2.NewReader(fi) defer fi.Close() // Create a buffer of the read file bcBuf := new(bytes.Buffer) bcBuf.ReadFrom(bcStream) // Create decoder from the buffer and a map to store the data bcDecoder := gob.NewDecoder(bcBuf) blockchain := make(map[int64][]byte) // Decode the blockchain into the map if err := bcDecoder.Decode(&blockchain); err != nil { t.Errorf("error decoding test blockchain") } var CopyOfMapsAtBlock50, CopyOfMapsAtBlock168 stake.TicketMaps var ticketsToSpendIn167 []chainhash.Hash var sortedTickets167 []*stake.TicketData for i := int64(0); i <= testBCHeight; i++ { block, err := dcrutil.NewBlockFromBytes(blockchain[i]) if err != nil { t.Errorf("block deserialization error on block %v", i) } block.SetHeight(i) database.InsertBlock(block) tmdb.InsertBlock(block) if i == 50 { // Create snapshot of tmdb at block 50 CopyOfMapsAtBlock50, err = cloneTicketDB(&tmdb) if err != nil { t.Errorf("db cloning at block 50 failure! %v", err) } } // Test to make sure that ticket selection is working correctly. if i == 167 { // Sort the entire list of tickets lexicographically by sorting // each bucket and then appending it to the list. Then store it // to use in the next block. totalTickets := 0 sortedSlice := make([]*stake.TicketData, 0) for i := 0; i < stake.BucketsSize; i++ { tix, err := tmdb.DumpLiveTickets(uint8(i)) if err != nil { t.Errorf("error dumping live tickets") } mapLen := len(tix) totalTickets += mapLen tempTdSlice := stake.NewTicketDataSlice(mapLen) itr := 0 // Iterator for _, td := range tix { tempTdSlice[itr] = td itr++ } sort.Sort(tempTdSlice) sortedSlice = append(sortedSlice, tempTdSlice...) } sortedTickets167 = sortedSlice } if i == 168 { parentBlock, err := dcrutil.NewBlockFromBytes(blockchain[i-1]) if err != nil { t.Errorf("block deserialization error on block %v", i-1) } pbhB, err := parentBlock.MsgBlock().Header.Bytes() if err != nil { t.Errorf("block header serialization error") } prng := stake.NewHash256PRNG(pbhB) ts, err := stake.FindTicketIdxs(int64(len(sortedTickets167)), int(simNetParams.TicketsPerBlock), prng) if err != nil { t.Errorf("failure on FindTicketIdxs") } for _, idx := range ts { ticketsToSpendIn167 = append(ticketsToSpendIn167, sortedTickets167[idx].SStxHash) } // Make sure that the tickets that were supposed to be spent or // missed were. spentTix, err := tmdb.DumpSpentTickets(i) if err != nil { t.Errorf("DumpSpentTickets failure") } for _, h := range ticketsToSpendIn167 { if _, ok := spentTix[h]; !ok { t.Errorf("missing ticket %v that should have been missed "+ "or spent in block %v", h, i) } } // Create snapshot of tmdb at block 168 CopyOfMapsAtBlock168, err = cloneTicketDB(&tmdb) if err != nil { t.Errorf("db cloning at block 168 failure! %v", err) } } } // Remove five blocks from HEAD~1 _, _, _, err = tmdb.RemoveBlockToHeight(50) if err != nil { t.Errorf("error: %v", err) } // Test if the roll back was symmetric to the earlier snapshot if !reflect.DeepEqual(tmdb.DumpMapsPointer(), CopyOfMapsAtBlock50) { t.Errorf("The td did not restore to a previous block height correctly!") } // Test rescanning a ticket db err = tmdb.RescanTicketDB() if err != nil { t.Errorf("rescanticketdb err: %v", err.Error()) } // Test if the db file storage was symmetric to the earlier snapshot if !reflect.DeepEqual(tmdb.DumpMapsPointer(), CopyOfMapsAtBlock168) { t.Errorf("The td did not rescan to HEAD correctly!") } err = os.Mkdir("testdata/", os.FileMode(0700)) if err != nil { t.Error(err) } // Store the ticket db to disk err = tmdb.Store("testdata/", "testtmdb") if err != nil { t.Errorf("error: %v", err) } var tmdb2 stake.TicketDB err = tmdb2.LoadTicketDBs("testdata/", "testtmdb", simNetParams, database) if err != nil { t.Errorf("error: %v", err) } // Test if the db file storage was symmetric to previously rescanned one if !reflect.DeepEqual(tmdb.DumpMapsPointer(), tmdb2.DumpMapsPointer()) { t.Errorf("The td did not rescan to a previous block height correctly!") } tmdb2.Close() // Test dumping missing tickets from block 152 missedIn152, _ := chainhash.NewHashFromStr( "84f7f866b0af1cc278cb8e0b2b76024a07542512c76487c83628c14c650de4fa") tmdb.RemoveBlockToHeight(152) missedTix, err := tmdb.DumpMissedTickets() if err != nil { t.Errorf("err dumping missed tix: %v", err.Error()) } if _, exists := missedTix[*missedIn152]; !exists { t.Errorf("couldn't finding missed tx 1 %v in tmdb @ block 152!", missedIn152) } tmdb.RescanTicketDB() // Make sure that the revoked map contains the revoked tx revokedSlice := []*chainhash.Hash{missedIn152} revokedTix, err := tmdb.DumpRevokedTickets() if err != nil { t.Errorf("err dumping missed tix: %v", err.Error()) } if len(revokedTix) != 1 { t.Errorf("revoked ticket map is wrong len, got %v, want %v", len(revokedTix), 1) } _, wasMissedIn152 := revokedTix[*revokedSlice[0]] ticketsRevoked := wasMissedIn152 if !ticketsRevoked { t.Errorf("revoked ticket map did not include tickets missed in " + "block 152 and later revoked") } database.Close() tmdb.Close() os.RemoveAll("ticketdb_test") os.Remove("./ticketdb_test.ver") os.Remove("testdata/testtmdb") os.Remove("testdata") }
// TestReorganization loads a set of test blocks which force a chain // reorganization to test the block chain handling code. func TestReorganization(t *testing.T) { // Create a new database and chain instance to run tests against. chain, teardownFunc, err := chainSetup("reorgunittest", simNetParams) if err != nil { t.Errorf("Failed to setup chain instance: %v", err) return } defer teardownFunc() err = chain.GenerateInitialIndex() if err != nil { t.Errorf("GenerateInitialIndex: %v", err) } // The genesis block should fail to connect since it's already // inserted. genesisBlock := simNetParams.GenesisBlock err = chain.CheckConnectBlock(dcrutil.NewBlock(genesisBlock)) if err == nil { t.Errorf("CheckConnectBlock: Did not receive expected error") } // Load up the rest of the blocks up to HEAD. filename := filepath.Join("testdata/", "reorgto179.bz2") fi, err := os.Open(filename) bcStream := bzip2.NewReader(fi) defer fi.Close() // Create a buffer of the read file bcBuf := new(bytes.Buffer) bcBuf.ReadFrom(bcStream) // Create decoder from the buffer and a map to store the data bcDecoder := gob.NewDecoder(bcBuf) blockChain := make(map[int64][]byte) // Decode the blockchain into the map if err := bcDecoder.Decode(&blockChain); err != nil { t.Errorf("error decoding test blockchain: %v", err.Error()) } // Load up the short chain timeSource := blockchain.NewMedianTime() finalIdx1 := 179 for i := 1; i < finalIdx1+1; i++ { bl, err := dcrutil.NewBlockFromBytes(blockChain[int64(i)]) if err != nil { t.Errorf("NewBlockFromBytes error: %v", err.Error()) } bl.SetHeight(int64(i)) _, _, err = chain.ProcessBlock(bl, timeSource, blockchain.BFNone) if err != nil { t.Errorf("ProcessBlock error: %v", err.Error()) } } // Load the long chain and begin loading blocks from that too, // forcing a reorganization // Load up the rest of the blocks up to HEAD. filename = filepath.Join("testdata/", "reorgto180.bz2") fi, err = os.Open(filename) bcStream = bzip2.NewReader(fi) defer fi.Close() // Create a buffer of the read file bcBuf = new(bytes.Buffer) bcBuf.ReadFrom(bcStream) // Create decoder from the buffer and a map to store the data bcDecoder = gob.NewDecoder(bcBuf) blockChain = make(map[int64][]byte) // Decode the blockchain into the map if err := bcDecoder.Decode(&blockChain); err != nil { t.Errorf("error decoding test blockchain: %v", err.Error()) } forkPoint := 131 finalIdx2 := 180 for i := forkPoint; i < finalIdx2+1; i++ { bl, err := dcrutil.NewBlockFromBytes(blockChain[int64(i)]) if err != nil { t.Errorf("NewBlockFromBytes error: %v", err.Error()) } bl.SetHeight(int64(i)) _, _, err = chain.ProcessBlock(bl, timeSource, blockchain.BFNone) if err != nil { t.Errorf("ProcessBlock error: %v", err.Error()) } } // Ensure our blockchain is at the correct best tip topBlock, _ := chain.GetTopBlock() tipHash := topBlock.Sha() expected, _ := chainhash.NewHashFromStr("5ab969d0afd8295b6cd1506f2a310d" + "259322015c8bd5633f283a163ce0e50594") if *tipHash != *expected { t.Errorf("Failed to correctly reorg; expected tip %v, got tip %v", expected, tipHash) } have, err := chain.HaveBlock(expected) if !have { t.Errorf("missing tip block after reorganization test") } if err != nil { t.Errorf("unexpected error testing for presence of new tip block "+ "after reorg test: %v", err) } return }
func TestFilterInsertP2PubKeyOnly(t *testing.T) { blockStr := "000000004ad131bae9cb9f74b8bcd928" + "a60dfe4dadabeb31b1e79403385f9ac4" + "ccc28b7400429e56f7df2872aaaa0c16" + "221cb09059bd3ea897de156ff51202ff" + "72b2cd8d000000000000000000000000" + "00000000000000000000000000000000" + "00000000010000000000000000000000" + "22000000ffff7f20002d310100000000" + "640000007601000063a0815601000000" + "00000000000000000000000000000000" + "00000000000000000000000000000000" + "00000000010100000001000000000000" + "00000000000000000000000000000000" + "00000000000000000000ffffffff00ff" + "ffffff0380b2e60e00000000000017a9" + "144fa6cbd0dbe5ec407fe4c8ad374e66" + "7771fa0d448700000000000000000000" + "226a2000000000000000000000000000" + "0000009e0453a6ab10610e17a7a5fadc" + "f6c34f002f68590000000000001976a9" + "141b79e6496226f89ad4e049667c1344" + "c16a75815188ac000000000000000001" + "000000000000000000000000ffffffff" + "04deadbeef00" blockBytes, err := hex.DecodeString(blockStr) if err != nil { t.Errorf("TestFilterInsertP2PubKeyOnly DecodeString failed: %v", err) return } block, err := dcrutil.NewBlockFromBytes(blockBytes) if err != nil { t.Errorf("TestFilterInsertP2PubKeyOnly NewBlockFromBytes failed: %v", err) return } f := bloom.NewFilter(10, 0, 0.000001, wire.BloomUpdateP2PubkeyOnly) // Generation pubkey inputStr := "04eaafc2314def4ca98ac970241bcab022b9c1e1f4ea423a20f134c" + "876f2c01ec0f0dd5b2e86e7168cefe0d81113c3807420ce13ad1357231a" + "2252247d97a46a91" inputBytes, err := hex.DecodeString(inputStr) if err != nil { t.Errorf("TestFilterInsertP2PubKeyOnly DecodeString failed: %v", err) return } f.Add(inputBytes) // Output address of 4th transaction inputStr = "b6efd80d99179f4f4ff6f4dd0a007d018c385d21" inputBytes, err = hex.DecodeString(inputStr) if err != nil { t.Errorf("TestFilterInsertP2PubKeyOnly DecodeString failed: %v", err) return } f.Add(inputBytes) // Ignore return value -- this is just used to update the filter. _, _ = bloom.NewMerkleBlock(block, f) // We should match the generation pubkey inputStr = "147caa76786596590baa4e98f5d9f48b86c7765e489f7a6ff3360fe5c674360b" sha, err := chainhash.NewHashFromStr(inputStr) if err != nil { t.Errorf("TestMerkleBlockP2PubKeyOnly NewShaHashFromStr failed: %v", err) return } outpoint := wire.NewOutPoint(sha, 0, dcrutil.TxTreeRegular) if !f.MatchesOutPoint(outpoint) { t.Errorf("TestMerkleBlockP2PubKeyOnly didn't match the generation "+ "outpoint %s", inputStr) return } // We should not match the 4th transaction, which is not p2pk inputStr = "02981fa052f0481dbc5868f4fc2166035a10f27a03cfd2de67326471df5bc041" sha, err = chainhash.NewHashFromStr(inputStr) if err != nil { t.Errorf("TestMerkleBlockP2PubKeyOnly NewShaHashFromStr failed: %v", err) return } outpoint = wire.NewOutPoint(sha, 0, dcrutil.TxTreeRegular) if f.MatchesOutPoint(outpoint) { t.Errorf("TestMerkleBlockP2PubKeyOnly matched outpoint %s", inputStr) return } }
// reorgTestsForced tests a forced reorganization of a single block at HEAD. func reorgTestForced(t *testing.T) { // Create a new database and chain instance to run tests against. chain, teardownFunc, err := chainSetup("reorgunittest", simNetParams) if err != nil { t.Errorf("Failed to setup chain instance: %v", err) return } defer teardownFunc() // The genesis block should fail to connect since it's already // inserted. genesisBlock := simNetParams.GenesisBlock err = chain.CheckConnectBlock(dcrutil.NewBlock(genesisBlock)) if err == nil { t.Errorf("CheckConnectBlock: Did not receive expected error") } // Load up the rest of the blocks up to HEAD. filename := filepath.Join("testdata/", "reorgto179.bz2") fi, err := os.Open(filename) bcStream := bzip2.NewReader(fi) defer fi.Close() // Create a buffer of the read file bcBuf := new(bytes.Buffer) bcBuf.ReadFrom(bcStream) // Create decoder from the buffer and a map to store the data bcDecoder := gob.NewDecoder(bcBuf) blockChain := make(map[int64][]byte) // Decode the blockchain into the map if err := bcDecoder.Decode(&blockChain); err != nil { t.Errorf("error decoding test blockchain: %v", err.Error()) } // Load up the short chain finalIdx1 := 131 var oldBestHash *chainhash.Hash for i := 1; i < finalIdx1+1; i++ { bl, err := dcrutil.NewBlockFromBytes(blockChain[int64(i)]) if err != nil { t.Fatalf("NewBlockFromBytes error: %v", err.Error()) } bl.SetHeight(int64(i)) if i == finalIdx1 { oldBestHash = bl.Sha() } _, _, err = chain.ProcessBlock(bl, blockchain.BFNone) if err != nil { t.Fatalf("ProcessBlock error at height %v: %v", i, err.Error()) } } // Load the long chain and begin loading blocks from that too, // forcing a reorganization // Load up the rest of the blocks up to HEAD. filename = filepath.Join("testdata/", "reorgto180.bz2") fi, err = os.Open(filename) bcStream = bzip2.NewReader(fi) defer fi.Close() // Create a buffer of the read file bcBuf = new(bytes.Buffer) bcBuf.ReadFrom(bcStream) // Create decoder from the buffer and a map to store the data bcDecoder = gob.NewDecoder(bcBuf) blockChain = make(map[int64][]byte) // Decode the blockchain into the map if err := bcDecoder.Decode(&blockChain); err != nil { t.Errorf("error decoding test blockchain: %v", err.Error()) } forkPoint := int64(131) forkBl, err := dcrutil.NewBlockFromBytes(blockChain[forkPoint]) if err != nil { t.Fatalf("NewBlockFromBytes error: %v", err.Error()) } forkBl.SetHeight(forkPoint) _, _, err = chain.ProcessBlock(forkBl, blockchain.BFNone) if err != nil { t.Fatalf("ProcessBlock error: %v", err.Error()) } newBestHash := forkBl.Sha() err = chain.ForceHeadReorganization(*oldBestHash, *newBestHash) if err != nil { t.Fatalf("failed forced reorganization: %v", err.Error()) } // Ensure our blockchain is at the correct best tip for our forced // reorganization topBlock, _ := chain.GetTopBlock() tipHash := topBlock.Sha() expected, _ := chainhash.NewHashFromStr("0df603f434be1dca22d706c7c47be16a8" + "edcef2f151bcf08b51138aa1cda26e2") if *tipHash != *expected { t.Errorf("Failed to correctly reorg; expected tip %v, got tip %v", expected, tipHash) } have, err := chain.HaveBlock(expected) if !have { t.Errorf("missing tip block after reorganization test") } if err != nil { t.Errorf("unexpected error testing for presence of new tip block "+ "after reorg test: %v", err) } return }
func TestTicketDBGeneral(t *testing.T) { // Declare some useful variables. testBCHeight := int64(168) filename := filepath.Join("..", "/../blockchain/testdata", "blocks0to168.bz2") fi, err := os.Open(filename) bcStream := bzip2.NewReader(fi) defer fi.Close() // Create a buffer of the read file. bcBuf := new(bytes.Buffer) bcBuf.ReadFrom(bcStream) // Create decoder from the buffer and a map to store the data. bcDecoder := gob.NewDecoder(bcBuf) testBlockchainBytes := make(map[int64][]byte) // Decode the blockchain into the map. if err := bcDecoder.Decode(&testBlockchainBytes); err != nil { t.Errorf("error decoding test blockchain") } testBlockchain := make(map[int64]*dcrutil.Block, len(testBlockchainBytes)) for k, v := range testBlockchainBytes { bl, err := dcrutil.NewBlockFromBytes(v) if err != nil { t.Fatalf("couldn't decode block") } testBlockchain[k] = bl } // Create a new database to store the accepted stake node data into. dbName := "ffldb_staketest" dbPath := filepath.Join(testDbRoot, dbName) _ = os.RemoveAll(dbPath) testDb, err := database.Create(testDbType, dbPath, simNetParams.Net) if err != nil { t.Fatalf("error creating db: %v", err) } // Setup a teardown. defer os.RemoveAll(dbPath) defer os.RemoveAll(testDbRoot) defer testDb.Close() // Load the genesis block and begin testing exported functions. var bestNode *Node err = testDb.Update(func(dbTx database.Tx) error { var errLocal error bestNode, errLocal = InitDatabaseState(dbTx, simNetParams) if errLocal != nil { return errLocal } return nil }) if err != nil { t.Fatalf(err.Error()) } // Cache all of our nodes so that we can check them when we start // disconnecting and going backwards through the blockchain. nodesForward := make([]*Node, testBCHeight+1) loadedNodesForward := make([]*Node, testBCHeight+1) nodesForward[0] = bestNode loadedNodesForward[0] = bestNode err = testDb.Update(func(dbTx database.Tx) error { for i := int64(1); i <= testBCHeight; i++ { block := testBlockchain[i] ticketsToAdd := make([]chainhash.Hash, 0) if i >= simNetParams.StakeEnabledHeight { matureHeight := (i - int64(simNetParams.TicketMaturity)) ticketsToAdd = ticketsInBlock(testBlockchain[matureHeight]) } header := block.MsgBlock().Header if int(header.PoolSize) != len(bestNode.LiveTickets()) { t.Errorf("bad number of live tickets: want %v, got %v", header.PoolSize, len(bestNode.LiveTickets())) } if header.FinalState != bestNode.FinalState() { t.Errorf("bad final state: want %x, got %x", header.FinalState, bestNode.FinalState()) } // In memory addition test. bestNode, err = bestNode.ConnectNode(header, ticketsSpentInBlock(block), revokedTicketsInBlock(block), ticketsToAdd) if err != nil { return fmt.Errorf("couldn't connect node: %v", err.Error()) } // Write the new node to db. nodesForward[i] = bestNode blockSha := block.Sha() err := WriteConnectedBestNode(dbTx, bestNode, *blockSha) if err != nil { return fmt.Errorf("failure writing the best node: %v", err.Error()) } // Reload the node from DB and make sure it's the same. blockHash := block.Sha() loadedNode, err := LoadBestNode(dbTx, bestNode.Height(), *blockHash, header, simNetParams) if err != nil { return fmt.Errorf("failed to load the best node: %v", err.Error()) } err = nodesEqual(loadedNode, bestNode) if err != nil { return fmt.Errorf("loaded best node was not same as "+ "in memory best node: %v", err.Error()) } loadedNodesForward[i] = loadedNode } return nil }) if err != nil { t.Fatalf(err.Error()) } nodesBackward := make([]*Node, testBCHeight+1) nodesBackward[testBCHeight] = bestNode for i := testBCHeight; i >= int64(1); i-- { parentBlock := testBlockchain[i-1] ticketsToAdd := make([]chainhash.Hash, 0) if i >= simNetParams.StakeEnabledHeight { matureHeight := (i - 1 - int64(simNetParams.TicketMaturity)) ticketsToAdd = ticketsInBlock(testBlockchain[matureHeight]) } header := parentBlock.MsgBlock().Header blockUndoData := nodesForward[i-1].UndoData() formerBestNode := bestNode // In memory disconnection test. bestNode, err = bestNode.DisconnectNode(header, blockUndoData, ticketsToAdd, nil) if err != nil { t.Fatalf(err.Error()) } err = nodesEqual(bestNode, nodesForward[i-1]) if err != nil { t.Errorf("non-equiv stake nodes at height %v: %v", i-1, err.Error()) } // Try again using the database instead of the in memory // data to disconnect the node, too. var bestNodeUsingDB *Node err = testDb.View(func(dbTx database.Tx) error { // Negative test. bestNodeUsingDB, err = formerBestNode.DisconnectNode(header, nil, nil, nil) if err == nil && formerBestNode.height > 1 { return fmt.Errorf("expected error when no in memory data " + "or dbtx is passed") } bestNodeUsingDB, err = formerBestNode.DisconnectNode(header, nil, nil, dbTx) if err != nil { return err } return nil }) if err != nil { t.Errorf("couldn't disconnect using the database: %v", err.Error()) } err = nodesEqual(bestNode, bestNodeUsingDB) if err != nil { t.Errorf("non-equiv stake nodes using db when disconnecting: %v", err.Error()) } // Write the new best node to the database. nodesBackward[i-1] = bestNode err = testDb.Update(func(dbTx database.Tx) error { nodesForward[i] = bestNode parentBlockSha := parentBlock.Sha() err := WriteDisconnectedBestNode(dbTx, bestNode, *parentBlockSha, formerBestNode.UndoData()) if err != nil { return fmt.Errorf("failure writing the best node: %v", err.Error()) } return nil }) if err != nil { t.Errorf("%s", err.Error()) } // Check the best node against the loaded best node from // the database after. err = testDb.View(func(dbTx database.Tx) error { parentBlockHash := parentBlock.Sha() loadedNode, err := LoadBestNode(dbTx, bestNode.Height(), *parentBlockHash, header, simNetParams) if err != nil { return fmt.Errorf("failed to load the best node: %v", err.Error()) } err = nodesEqual(loadedNode, bestNode) if err != nil { return fmt.Errorf("loaded best node was not same as "+ "in memory best node: %v", err.Error()) } err = nodesEqual(loadedNode, loadedNodesForward[i-1]) if err != nil { return fmt.Errorf("loaded best node was not same as "+ "previously cached node: %v", err.Error()) } return nil }) if err != nil { t.Errorf("%s", err.Error()) } } // Unit testing the in-memory implementation negatively. b161 := testBlockchain[161] b162 := testBlockchain[162] n162Test := copyNode(nodesForward[162]) // No node. _, err = connectNode(nil, b162.MsgBlock().Header, n162Test.SpentByBlock(), revokedTicketsInBlock(b162), n162Test.NewTickets()) if err == nil { t.Errorf("expect error for no node") } // Best node missing ticket in live ticket bucket to spend. n161Copy := copyNode(nodesForward[161]) n161Copy.liveTickets.Delete(tickettreap.Key(n162Test.SpentByBlock()[0])) _, err = n161Copy.ConnectNode(b162.MsgBlock().Header, n162Test.SpentByBlock(), revokedTicketsInBlock(b162), n162Test.NewTickets()) if err == nil || err.(RuleError).GetCode() != ErrMissingTicket { t.Errorf("unexpected wrong or no error for "+ "Best node missing ticket in live ticket bucket to spend: %v", err) } // Duplicate best winners. n161Copy = copyNode(nodesForward[161]) n162Copy := copyNode(nodesForward[162]) n161Copy.nextWinners[0] = n161Copy.nextWinners[1] spentInBlock := n162Copy.SpentByBlock() spentInBlock[0] = spentInBlock[1] _, err = n161Copy.ConnectNode(b162.MsgBlock().Header, spentInBlock, revokedTicketsInBlock(b162), n162Test.NewTickets()) if err == nil || err.(RuleError).GetCode() != ErrMissingTicket { t.Errorf("unexpected wrong or no error for "+ "Best node missing ticket in live ticket bucket to spend: %v", err) } // Test for corrupted spentInBlock. someHash := chainhash.HashFuncH([]byte{0x00}) spentInBlock = n162Test.SpentByBlock() spentInBlock[4] = someHash _, err = nodesForward[161].ConnectNode(b162.MsgBlock().Header, spentInBlock, revokedTicketsInBlock(b162), n162Test.NewTickets()) if err == nil || err.(RuleError).GetCode() != ErrUnknownTicketSpent { t.Errorf("unexpected wrong or no error for "+ "Test for corrupted spentInBlock: %v", err) } // Corrupt winners. n161Copy = copyNode(nodesForward[161]) n161Copy.nextWinners[4] = someHash _, err = n161Copy.ConnectNode(b162.MsgBlock().Header, spentInBlock, revokedTicketsInBlock(b162), n162Test.NewTickets()) if err == nil || err.(RuleError).GetCode() != ErrMissingTicket { t.Errorf("unexpected wrong or no error for "+ "Corrupt winners: %v", err) } // Unknown missed ticket. n162Copy = copyNode(nodesForward[162]) spentInBlock = n162Copy.SpentByBlock() _, err = nodesForward[161].ConnectNode(b162.MsgBlock().Header, spentInBlock, append(revokedTicketsInBlock(b162), someHash), n162Copy.NewTickets()) if err == nil || err.(RuleError).GetCode() != ErrMissingTicket { t.Errorf("unexpected wrong or no error for "+ "Unknown missed ticket: %v", err) } // Insert a duplicate new ticket. spentInBlock = n162Test.SpentByBlock() newTicketsDup := []chainhash.Hash{someHash, someHash} _, err = nodesForward[161].ConnectNode(b162.MsgBlock().Header, spentInBlock, revokedTicketsInBlock(b162), newTicketsDup) if err == nil || err.(RuleError).GetCode() != ErrDuplicateTicket { t.Errorf("unexpected wrong or no error for "+ "Insert a duplicate new ticket: %v", err) } // Impossible undo data for disconnecting. n161Copy = copyNode(nodesForward[161]) n162Copy = copyNode(nodesForward[162]) n162Copy.databaseUndoUpdate[0].Expired = false n162Copy.databaseUndoUpdate[0].Missed = false n162Copy.databaseUndoUpdate[0].Spent = false n162Copy.databaseUndoUpdate[0].Revoked = true _, err = n162Copy.DisconnectNode(b161.MsgBlock().Header, n161Copy.UndoData(), n161Copy.NewTickets(), nil) if err == nil { t.Errorf("unexpected wrong or no error for "+ "Impossible undo data for disconnecting: %v", err) } // Missing undo data for disconnecting. n161Copy = copyNode(nodesForward[161]) n162Copy = copyNode(nodesForward[162]) n162Copy.databaseUndoUpdate = n162Copy.databaseUndoUpdate[0:3] _, err = n162Copy.DisconnectNode(b161.MsgBlock().Header, n161Copy.UndoData(), n161Copy.NewTickets(), nil) if err == nil { t.Errorf("unexpected wrong or no error for "+ "Missing undo data for disconnecting: %v", err) } // Unknown undo data hash when disconnecting (missing). n161Copy = copyNode(nodesForward[161]) n162Copy = copyNode(nodesForward[162]) n162Copy.databaseUndoUpdate[0].TicketHash = someHash n162Copy.databaseUndoUpdate[0].Expired = false n162Copy.databaseUndoUpdate[0].Missed = true n162Copy.databaseUndoUpdate[0].Spent = false n162Copy.databaseUndoUpdate[0].Revoked = false _, err = n162Copy.DisconnectNode(b161.MsgBlock().Header, n161Copy.UndoData(), n161Copy.NewTickets(), nil) if err == nil || err.(RuleError).GetCode() != ErrMissingTicket { t.Errorf("unexpected wrong or no error for "+ "Unknown undo data for disconnecting (missing): %v", err) } // Unknown undo data hash when disconnecting (revoked). n161Copy = copyNode(nodesForward[161]) n162Copy = copyNode(nodesForward[162]) n162Copy.databaseUndoUpdate[0].TicketHash = someHash n162Copy.databaseUndoUpdate[0].Expired = false n162Copy.databaseUndoUpdate[0].Missed = true n162Copy.databaseUndoUpdate[0].Spent = false n162Copy.databaseUndoUpdate[0].Revoked = true _, err = n162Copy.DisconnectNode(b161.MsgBlock().Header, n161Copy.UndoData(), n161Copy.NewTickets(), nil) if err == nil || err.(RuleError).GetCode() != ErrMissingTicket { t.Errorf("unexpected wrong or no error for "+ "Unknown undo data for disconnecting (revoked): %v", err) } }
func TestTicketDBLongChain(t *testing.T) { // Declare some useful variables. testBCHeight := int64(1001) filename := filepath.Join("..", "/../blockchain/testdata", "testexpiry.bz2") fi, err := os.Open(filename) bcStream := bzip2.NewReader(fi) defer fi.Close() // Create a buffer of the read file. bcBuf := new(bytes.Buffer) bcBuf.ReadFrom(bcStream) // Create decoder from the buffer and a map to store the data. bcDecoder := gob.NewDecoder(bcBuf) testBlockchainBytes := make(map[int64][]byte) // Decode the blockchain into the map. if err := bcDecoder.Decode(&testBlockchainBytes); err != nil { t.Errorf("error decoding test blockchain") } testBlockchain := make(map[int64]*dcrutil.Block, len(testBlockchainBytes)) for k, v := range testBlockchainBytes { bl, err := dcrutil.NewBlockFromBytes(v) if err != nil { t.Fatalf("couldn't decode block") } testBlockchain[k] = bl } // Connect to the best block (1001). bestNode := genesisNode(simNetParams) nodesForward := make([]*Node, testBCHeight+1) nodesForward[0] = bestNode for i := int64(1); i <= testBCHeight; i++ { block := testBlockchain[i] ticketsToAdd := make([]chainhash.Hash, 0) if i >= simNetParams.StakeEnabledHeight { matureHeight := (i - int64(simNetParams.TicketMaturity)) ticketsToAdd = ticketsInBlock(testBlockchain[matureHeight]) } header := block.MsgBlock().Header if int(header.PoolSize) != len(bestNode.LiveTickets()) { t.Errorf("bad number of live tickets: want %v, got %v", header.PoolSize, len(bestNode.LiveTickets())) } if header.FinalState != bestNode.FinalState() { t.Errorf("bad final state: want %x, got %x", header.FinalState, bestNode.FinalState()) } // In memory addition test. bestNode, err = bestNode.ConnectNode(header, ticketsSpentInBlock(block), revokedTicketsInBlock(block), ticketsToAdd) if err != nil { t.Fatalf("couldn't connect node: %v", err.Error()) } nodesForward[i] = bestNode } // Disconnect all the way back to the genesis block. for i := testBCHeight; i >= int64(1); i-- { parentBlock := testBlockchain[i-1] ticketsToAdd := make([]chainhash.Hash, 0) if i >= simNetParams.StakeEnabledHeight { matureHeight := (i - 1 - int64(simNetParams.TicketMaturity)) ticketsToAdd = ticketsInBlock(testBlockchain[matureHeight]) } header := parentBlock.MsgBlock().Header blockUndoData := nodesForward[i-1].UndoData() // In memory disconnection test. bestNode, err = bestNode.DisconnectNode(header, blockUndoData, ticketsToAdd, nil) if err != nil { t.Errorf(err.Error()) } } // Test some accessory functions. accessoryTestNode := nodesForward[450] exists := accessoryTestNode.ExistsLiveTicket(accessoryTestNode.nextWinners[0]) if !exists { t.Errorf("expected winner to exist in node live tickets") } missedExp := make([]chainhash.Hash, 0) accessoryTestNode.missedTickets.ForEach(func(k tickettreap.Key, v *tickettreap.Value) bool { if v.Expired { missedExp = append(missedExp, chainhash.Hash(k)) } return true }) revokedExp := make([]chainhash.Hash, 0) accessoryTestNode.revokedTickets.ForEach(func(k tickettreap.Key, v *tickettreap.Value) bool { if v.Expired { revokedExp = append(revokedExp, chainhash.Hash(k)) } return true }) exists = accessoryTestNode.ExistsMissedTicket(missedExp[0]) if !exists { t.Errorf("expected expired and missed ticket to be missed") } exists = accessoryTestNode.ExistsExpiredTicket(missedExp[0]) if !exists { t.Errorf("expected expired and missed ticket to be expired") } exists = accessoryTestNode.ExistsRevokedTicket(revokedExp[0]) if !exists { t.Errorf("expected expired and revoked ticket to be revoked") } exists = accessoryTestNode.ExistsExpiredTicket(revokedExp[0]) if !exists { t.Errorf("expected expired and revoked ticket to be expired") } exists = accessoryTestNode.ExistsExpiredTicket( accessoryTestNode.nextWinners[0]) if exists { t.Errorf("live ticket was expired") } // ---------------------------------------------------------------------------- // A longer, more strenuous test is given below. Uncomment to execute it. // ---------------------------------------------------------------------------- /* // Create a new database to store the accepted stake node data into. dbName := "ffldb_staketest" dbPath := filepath.Join(testDbRoot, dbName) _ = os.RemoveAll(dbPath) testDb, err := database.Create(testDbType, dbPath, simNetParams.Net) if err != nil { t.Fatalf("error creating db: %v", err) } // Setup a teardown. defer os.RemoveAll(dbPath) defer os.RemoveAll(testDbRoot) defer testDb.Close() // Load the genesis block and begin testing exported functions. err = testDb.Update(func(dbTx database.Tx) error { var errLocal error bestNode, errLocal = InitDatabaseState(dbTx, simNetParams) if errLocal != nil { return errLocal } return nil }) if err != nil { t.Fatalf(err.Error()) } // Cache all of our nodes so that we can check them when we start // disconnecting and going backwards through the blockchain. nodesForward = make([]*Node, testBCHeight+1) loadedNodesForward := make([]*Node, testBCHeight+1) nodesForward[0] = bestNode loadedNodesForward[0] = bestNode err = testDb.Update(func(dbTx database.Tx) error { for i := int64(1); i <= testBCHeight; i++ { block := testBlockchain[i] ticketsToAdd := make([]chainhash.Hash, 0) if i >= simNetParams.StakeEnabledHeight { matureHeight := (i - int64(simNetParams.TicketMaturity)) ticketsToAdd = ticketsInBlock(testBlockchain[matureHeight]) } header := block.MsgBlock().Header if int(header.PoolSize) != len(bestNode.LiveTickets()) { t.Errorf("bad number of live tickets: want %v, got %v", header.PoolSize, len(bestNode.LiveTickets())) } if header.FinalState != bestNode.FinalState() { t.Errorf("bad final state: want %x, got %x", header.FinalState, bestNode.FinalState()) } // In memory addition test. bestNode, err = bestNode.ConnectNode(header, ticketsSpentInBlock(block), revokedTicketsInBlock(block), ticketsToAdd) if err != nil { return fmt.Errorf("couldn't connect node: %v", err.Error()) } // Write the new node to db. nodesForward[i] = bestNode blockSha := block.Sha() err := WriteConnectedBestNode(dbTx, bestNode, *blockSha) if err != nil { return fmt.Errorf("failure writing the best node: %v", err.Error()) } // Reload the node from DB and make sure it's the same. blockHash := block.Sha() loadedNode, err := LoadBestNode(dbTx, bestNode.Height(), *blockHash, header, simNetParams) if err != nil { return fmt.Errorf("failed to load the best node: %v", err.Error()) } err = nodesEqual(loadedNode, bestNode) if err != nil { return fmt.Errorf("loaded best node was not same as "+ "in memory best node: %v", err.Error()) } loadedNodesForward[i] = loadedNode } return nil }) if err != nil { t.Fatalf(err.Error()) } nodesBackward := make([]*Node, testBCHeight+1) nodesBackward[testBCHeight] = bestNode for i := testBCHeight; i >= int64(1); i-- { parentBlock := testBlockchain[i-1] ticketsToAdd := make([]chainhash.Hash, 0) if i >= simNetParams.StakeEnabledHeight { matureHeight := (i - 1 - int64(simNetParams.TicketMaturity)) ticketsToAdd = ticketsInBlock(testBlockchain[matureHeight]) } header := parentBlock.MsgBlock().Header blockUndoData := nodesForward[i-1].UndoData() formerBestNode := bestNode // In memory disconnection test. bestNode, err = bestNode.DisconnectNode(header, blockUndoData, ticketsToAdd, nil) if err != nil { t.Errorf(err.Error()) } err = nodesEqual(bestNode, nodesForward[i-1]) if err != nil { t.Errorf("non-equiv stake nodes at height %v: %v", i-1, err.Error()) } // Try again using the database instead of the in memory // data to disconnect the node, too. var bestNodeUsingDB *Node err = testDb.View(func(dbTx database.Tx) error { bestNodeUsingDB, err = formerBestNode.DisconnectNode(header, nil, nil, dbTx) if err != nil { return err } return nil }) if err != nil { t.Errorf("couldn't disconnect using the database: %v", err.Error()) } err = nodesEqual(bestNode, bestNodeUsingDB) if err != nil { t.Errorf("non-equiv stake nodes using db when disconnecting: %v", err.Error()) } // Write the new best node to the database. nodesBackward[i-1] = bestNode err = testDb.Update(func(dbTx database.Tx) error { nodesForward[i] = bestNode parentBlockSha := parentBlock.Sha() err := WriteDisconnectedBestNode(dbTx, bestNode, *parentBlockSha, formerBestNode.UndoData()) if err != nil { return fmt.Errorf("failure writing the best node: %v", err.Error()) } return nil }) if err != nil { t.Errorf("%s", err.Error()) } // Check the best node against the loaded best node from // the database after. err = testDb.View(func(dbTx database.Tx) error { parentBlockHash := parentBlock.Sha() loadedNode, err := LoadBestNode(dbTx, bestNode.Height(), *parentBlockHash, header, simNetParams) if err != nil { return fmt.Errorf("failed to load the best node: %v", err.Error()) } err = nodesEqual(loadedNode, bestNode) if err != nil { return fmt.Errorf("loaded best node %v was not same as "+ "in memory best node: %v", loadedNode.Height(), err.Error()) } err = nodesEqual(loadedNode, loadedNodesForward[i-1]) if err != nil { return fmt.Errorf("loaded best node %v was not same as "+ "cached best node: %v", loadedNode.Height(), err.Error()) } return nil }) if err != nil { t.Errorf("%s", err.Error()) } } */ }
func TestMerkleBlock3(t *testing.T) { blockStr := "000000004ad131bae9cb9f74b8bcd928" + "a60dfe4dadabeb31b1e79403385f9ac4" + "ccc28b7400429e56f7df2872aaaa0c16" + "221cb09059bd3ea897de156ff51202ff" + "72b2cd8d000000000000000000000000" + "00000000000000000000000000000000" + "00000000010000000000000000000000" + "22000000ffff7f20002d310100000000" + "640000007601000063a0815601000000" + "00000000000000000000000000000000" + "00000000000000000000000000000000" + "00000000010100000001000000000000" + "00000000000000000000000000000000" + "00000000000000000000ffffffff00ff" + "ffffff0380b2e60e00000000000017a9" + "144fa6cbd0dbe5ec407fe4c8ad374e66" + "7771fa0d448700000000000000000000" + "226a2000000000000000000000000000" + "0000009e0453a6ab10610e17a7a5fadc" + "f6c34f002f68590000000000001976a9" + "141b79e6496226f89ad4e049667c1344" + "c16a75815188ac000000000000000001" + "000000000000000000000000ffffffff" + "04deadbeef00" blockBytes, err := hex.DecodeString(blockStr) if err != nil { t.Errorf("TestMerkleBlock3 DecodeString failed: %v", err) return } blk, err := dcrutil.NewBlockFromBytes(blockBytes) if err != nil { t.Errorf("TestMerkleBlock3 NewBlockFromBytes failed: %v", err) return } f := bloom.NewFilter(10, 0, 0.000001, wire.BloomUpdateAll) inputStr := "4797be83be7b4c4f833739c3542c2c1c403ffb01f0b721b5bc5dee3ff655a856" sha, err := chainhash.NewHashFromStr(inputStr) if err != nil { t.Errorf("TestMerkleBlock3 NewShaHashFromStr failed: %v", err) return } f.AddShaHash(sha) mBlock, _ := bloom.NewMerkleBlock(blk, f) wantStr := "0100000079cda856b143d9db2c1caff01d1aecc8630d30625d10e8b4" + "b8b0000000000000b50cc069d6a3e33e3ff84a5c41d9d3febe7c770fdcc" + "96b2c3ff60abe184f196367291b4d4c86041b8fa45d630100000001b50c" + "c069d6a3e33e3ff84a5c41d9d3febe7c770fdcc96b2c3ff60abe184f196" + "30101" want, err := hex.DecodeString(wantStr) if err != nil { t.Errorf("TestMerkleBlock3 DecodeString failed: %v", err) return } got := bytes.NewBuffer(nil) err = mBlock.BtcEncode(got, wire.ProtocolVersion) if err != nil { t.Errorf("TestMerkleBlock3 BtcEncode failed: %v", err) return } if !bytes.Equal(want, got.Bytes()) { t.Errorf("TestMerkleBlock3 failed merkle block comparison: "+ "got %v want %v", got.Bytes, want) return } }
func TestFilterInsertP2PubKeyOnly(t *testing.T) { blockStr := "000000003ffad804971ce6b8a13c8162287222d91395fa77b6bea6b4" + "626b4827000000004259bd8a4d5d5f8469f390903d27b9cab2ea03822fbe" + "616478756ada751a283be8e4aa58eeb75810e22031cc2756442b7f68c77b" + "3735522fc5490be1676253c301005f031703bb61050000005c1300006fb2" + "381c30dd35680000000091940000fc0900000cadf15600d9781d3a5f237b" + "083799410f00000000000000000000000000000000000000000000000000" + "000002010000000100000000000000000000000000000000000000000000" + "00000000000000000000ffffffff00ffffffff032727750c000000000000" + "17a9144fa6cbd0dbe5ec407fe4c8ad374e667771fa0d4487000000000000" + "00000000266a249194000000000000000000000000000000000000000000" + "0000000000b90c53023ee736e6d7eebe4a0000000000001976a914bfe0f6" + "3eda5d7db3ee05661051c026f5296ffc7888ac0000000000000000011512" + "34570000000000000000ffffffff0800002f646372642f01000000020c57" + "441e66eaa72bc76ab54faaa0ec87941f4423a463c5e34d7f23f247d65e9d" + "0200000001ffffffff31b7ffaec935e787dc03670d4d13ee0066717cb750" + "2cb4d925b09b7f692a73580400000001ffffffff0200ca9a3b0000000000" + "0023210351b64c01163b9184637671e27d6d29b3a205203710f6fbc5a6e1" + "b646a11984d6ac81001f060000000000001976a9144feba3e04d91d9dc90" + "ebb13ce91bf9bcfe32ff9288ac000000000000000002c29f493300000000" + "8f9400000a0000006a4730440220186741de60d6fe75d2206b62780edabe" + "8a0a13f823aaeca58f0d5a8dedfa99f202201dc84a1bd26d14723ddf2fe4" + "216d4a24d788597fa22255d144ce119d7544ce39012103879a3b3666bf03" + "88ab33e6e24e77c3c219044f7c5d778b71ac1837817095830da72e700e00" + "000000909400000e0000006b4830450221008cc441f58be0eedc713b867d" + "c8dfff21e2f6c2e38a906a0af5aa0625b2683f6b0220308bcaf6628b3c68" + "19da76ebb2ca5e5d1ddde6e5035193d75e3e667731deadc4012102a871fe" + "4e4f77121f1cbaf2db62190539687092b465e252a3bd732791ca442d5405" + "010000000200000000000000000000000000000000000000000000000000" + "00000000000000ffffffff00ffffffff91ed802c4a23cca0517dbc2d89f7" + "b6f8a2d1491a27ed25e3d3f621bac03316da0000000001ffffffff030000" + "0000000000000000266a243ffad804971ce6b8a13c8162287222d91395fa" + "77b6bea6b4626b4827000000009094000000000000000000000000046a02" + "010003d0db190100000000001abb76a914339f1f6a41d7ef65ffd80bbb8c" + "daaa215a16472d88ac000000000000000002e47d79070000000000000000" + "ffffffff04deadbeef1f52621201000000938b0000090000006a47304402" + "207184b4d7a95a559b1142f50cfde3e40bf460572aea4d5eeabad062e45c" + "9a8f5102202304acbcc9cc55e58770e51d4fc9ac81af9c790a65d251ea4d" + "556bc7ee96fb47012103c46bdbf6b24be97eac230ca9b23e928e2e76750d" + "3d3c8b29e15cebb64ba01f86010000000200000000000000000000000000" + "00000000000000000000000000000000000000ffffffff00ffffffff17be" + "9d003549030c20c018a71745af6f6a02d96637f49f5a548ee7fe0ac84420" + "0000000001ffffffff0300000000000000000000266a243ffad804971ce6" + "b8a13c8162287222d91395fa77b6bea6b4626b4827000000009094000000" + "000000000000000000046a02010071c339cf0000000000001abb76a9147f" + "1aaac9f04febbf9dadd262b1c710729970445988ac000000000000000002" + "e47d79070000000000000000ffffffff04deadbeef8d45c0c70000000099" + "8f0000070000006a473044022054a09af013f1a74960686bf5c36f3eb822" + "5ffc96e76a54dc1aac9fdea65cff2b022010359dfcd16ac77f3a6ab47309" + "e106a885fa2d031d5bac0824f4432a77ccdae001210389e445603d66ce44" + "92ec74d9b1974268a2e83413c84df87895184bc6f46ae1d7010000000200" + "000000000000000000000000000000000000000000000000000000000000" + "00ffffffff00ffffffff49a883465f82c0483b3ff7da057cab44a42ecbc8" + "27c344a960a2a33c95263f760000000001ffffffff030000000000000000" + "0000266a243ffad804971ce6b8a13c8162287222d91395fa77b6bea6b462" + "6b4827000000009094000000000000000000000000046a0201002c9d63bf" + "0000000000001abb76a91449d402e31fc08414f565febaa1b69eccc2fb8d" + "2688ac000000000000000002e47d79070000000000000000ffffffff04de" + "adbeef481feab70000000064870000120000006a473044022008daa49081" + "d7dc6c466a4d3fd4184927e8b77fd1f5721f206bf0d13a2c00b07802202d" + "89841ad5346de14db95c3e5b15a0d82e0df1f50a235f67aeec08b98f4b36" + "7a012102907261a4670a9d0ff918724af50f2ecb7947831dad30ac285afa" + "1b90549aa282010000000200000000000000000000000000000000000000" + "00000000000000000000000000ffffffff00ffffffffb6df4ea74c958f39" + "65ecc90b9226cfdc2d7986b1df30998cf3e827d7e9fbf4b80000000001ff" + "ffffff0300000000000000000000266a243ffad804971ce6b8a13c816228" + "7222d91395fa77b6bea6b4626b4827000000009094000000000000000000" + "000000046a020100f3a613b90000000000001abb76a9147f9321c65ee805" + "d87f67b0b0e9dac4f0779c1d3a88ac000000000000000002e47d79070000" + "000000000000ffffffff04deadbeef0f299ab100000000408d0000130000" + "006b4830450221008c5759dd9f0c40c61f61f5b67c0b3f860d4d59859fb2" + "451dbeab9956b3ceb05602203e81d5b88b83e279dc7feca13c8e5eb58676" + "158243058ec43d6366933397be4a0121031e6757ee86e94a02e567db7d4e" + "c4ba5088f6b5fdbfe9b2794b70b42b52b879200100000002000000000000" + "0000000000000000000000000000000000000000000000000000ffffffff" + "00ffffffff9e74ead771c313f0c0bb739860eba39d04a8cce201cda626dc" + "6a7dd4e83b3f770000000001ffffffff0400000000000000000000266a24" + "3ffad804971ce6b8a13c8162287222d91395fa77b6bea6b4626b48270000" + "00009094000000000000000000000000046a0201000793a0e00000000000" + "001abb76a914c8c5a2cbd183871718dcb14b5f198561450d157e88ac596e" + "8f250000000000001abb76a9147112f7d015baeb129e732b9c4805e492ef" + "d0a2fb88ac000000000000000002e47d79070000000000000000ffffffff" + "04deadbeef7d83b6fe0000000022920000070000006a4730440220160ff7" + "bd84190d8d837ac6bd782be475d23d50832872626453bd70ea63d9a6a002" + "20603afdd51fc8e3fead1fd9dbae410b8d0617969ad5119e6dc2b0855d7b" + "48e73501210297ab850cae270e9438693353e40444c7e714e179505e8b12" + "1c042f4869e42072" blockBytes, err := hex.DecodeString(blockStr) if err != nil { t.Errorf("TestFilterInsertP2PubKeyOnly DecodeString failed: %v", err) return } block, err := dcrutil.NewBlockFromBytes(blockBytes) if err != nil { t.Errorf("TestFilterInsertP2PubKeyOnly NewBlockFromBytes failed: %v", err) return } f := bloom.NewFilter(10, 0, 0.000001, wire.BloomUpdateP2PubkeyOnly) // Generation pubkey inputStr := "0351b64c01163b9184637671e27d6d29b3a205203710f6fbc5a6e1b646a11984d6" inputBytes, err := hex.DecodeString(inputStr) if err != nil { t.Errorf("TestFilterInsertP2PubKeyOnly DecodeString failed: %v", err) return } f.Add(inputBytes) // Output address of 2nd transaction inputStr = "4feba3e04d91d9dc90ebb13ce91bf9bcfe32ff92" inputBytes, err = hex.DecodeString(inputStr) if err != nil { t.Errorf("TestFilterInsertP2PubKeyOnly DecodeString failed: %v", err) return } f.Add(inputBytes) // Ignore return value -- this is just used to update the filter. _, _ = bloom.NewMerkleBlock(block, f) // We should match the generation pubkey inputStr = "8199f30ccc006056ed79cf0a3cd0b67a195ffd46903d42adc7babe2ed2f2e371" hash, err := chainhash.NewHashFromStr(inputStr) if err != nil { t.Errorf("TestMerkleBlockP2PubKeyOnly NewHashFromStr failed: %v", err) return } outpoint := wire.NewOutPoint(hash, 0, wire.TxTreeRegular) if !f.MatchesOutPoint(outpoint) { t.Errorf("TestMerkleBlockP2PubKeyOnly didn't match the generation "+ "outpoint %s", inputStr) return } // We should not match the 4th transaction, which is not p2pk inputStr = "d7314aaf54253c651e8258c7d22c574af8804611f0dcea79fd9a47f4565d85ad" hash, err = chainhash.NewHashFromStr(inputStr) if err != nil { t.Errorf("TestMerkleBlockP2PubKeyOnly NewHashFromStr failed: %v", err) return } outpoint = wire.NewOutPoint(hash, 0, wire.TxTreeRegular) if f.MatchesOutPoint(outpoint) { t.Errorf("TestMerkleBlockP2PubKeyOnly matched outpoint %s", inputStr) return } }
// DropAfterBlockBySha will remove any blocks from the database after // the given block. func (db *LevelDb) DropAfterBlockBySha(sha *chainhash.Hash) (rerr error) { db.dbLock.Lock() defer db.dbLock.Unlock() defer func() { if rerr == nil { rerr = db.processBatches() } else { db.lBatch().Reset() } }() startheight := db.nextBlock - 1 keepidx, err := db.getBlkLoc(sha) if err != nil { // should the error here be normalized ? log.Tracef("block loc failed %v ", sha) return err } for height := startheight; height > keepidx; height = height - 1 { var blk *dcrutil.Block blksha, buf, err := db.getBlkByHeight(height) if err != nil { return err } blk, err = dcrutil.NewBlockFromBytes(buf) if err != nil { return err } // Obtain previous block sha and buffer var blkprev *dcrutil.Block _, bufprev, errprev := db.getBlkByHeight(height - 1) // discard blkshaprev if errprev != nil { return errprev } // Do the same thing for the parent block blkprev, errprev = dcrutil.NewBlockFromBytes(bufprev) if errprev != nil { return errprev } // Unspend the stake tx in the current block for _, tx := range blk.MsgBlock().STransactions { err = db.unSpend(tx) if err != nil { return err } } // rather than iterate the list of tx backward, do it twice. for _, tx := range blk.STransactions() { var txUo txUpdateObj txUo.delete = true db.txUpdateMap[*tx.Sha()] = &txUo } // Check to see if the regular txs of the parent were even included; if // they are, unspend all of these regular tx too votebits := blk.MsgBlock().Header.VoteBits if dcrutil.IsFlagSet16(votebits, dcrutil.BlockValid) && height != 0 { // Unspend the regular tx in the current block for _, tx := range blkprev.MsgBlock().Transactions { err = db.unSpend(tx) if err != nil { return err } } // rather than iterate the list of tx backward, do it twice. for _, tx := range blkprev.Transactions() { var txUo txUpdateObj txUo.delete = true db.txUpdateMap[*tx.Sha()] = &txUo } } db.lBatch().Delete(shaBlkToKey(blksha)) db.lBatch().Delete(int64ToKey(height)) } // update the last block cache db.lastBlkShaCached = true db.lastBlkSha = *sha db.lastBlkIdx = keepidx db.nextBlock = keepidx + 1 return nil }
// TestBlockchainFunction tests the various blockchain API to ensure proper // functionality. func TestBlockchainFunctions(t *testing.T) { // Create a new database and chain instance to run tests against. chain, teardownFunc, err := chainSetup("validateunittests", simNetParams) if err != nil { t.Errorf("Failed to setup chain instance: %v", err) return } defer teardownFunc() // The genesis block should fail to connect since it's already inserted. genesisBlock := simNetParams.GenesisBlock err = chain.CheckConnectBlock(dcrutil.NewBlock(genesisBlock)) if err == nil { t.Errorf("CheckConnectBlock: Did not receive expected error") } // Load up the rest of the blocks up to HEAD~1. filename := filepath.Join("testdata/", "blocks0to168.bz2") fi, err := os.Open(filename) bcStream := bzip2.NewReader(fi) defer fi.Close() // Create a buffer of the read file. bcBuf := new(bytes.Buffer) bcBuf.ReadFrom(bcStream) // Create decoder from the buffer and a map to store the data. bcDecoder := gob.NewDecoder(bcBuf) blockChain := make(map[int64][]byte) // Decode the blockchain into the map. if err := bcDecoder.Decode(&blockChain); err != nil { t.Errorf("error decoding test blockchain: %v", err.Error()) } // Insert blocks 1 to 168 and perform various tests. for i := 1; i <= 168; i++ { bl, err := dcrutil.NewBlockFromBytes(blockChain[int64(i)]) if err != nil { t.Errorf("NewBlockFromBytes error: %v", err.Error()) } bl.SetHeight(int64(i)) _, _, err = chain.ProcessBlock(bl, blockchain.BFNone) if err != nil { t.Fatalf("ProcessBlock error at height %v: %v", i, err.Error()) } } val, err := chain.TicketPoolValue() if err != nil { t.Errorf("Failed to get ticket pool value: %v", err) } expectedVal := dcrutil.Amount(3495091704) if val != expectedVal { t.Errorf("Failed to get correct result for ticket pool value; "+ "want %v, got %v", expectedVal, val) } a, _ := dcrutil.DecodeNetworkAddress("SsbKpMkPnadDcZFFZqRPY8nvdFagrktKuzB") hs, err := chain.TicketsWithAddress(a) if err != nil { t.Errorf("Failed to do TicketsWithAddress: %v", err) } expectedLen := 223 if len(hs) != expectedLen { t.Errorf("Failed to get correct number of tickets for "+ "TicketsWithAddress; want %v, got %v", expectedLen, len(hs)) } totalSubsidy := chain.TotalSubsidy() expectedSubsidy := int64(35783267326630) if expectedSubsidy != totalSubsidy { t.Errorf("Failed to get correct total subsidy for "+ "TotalSubsidy; want %v, got %v", expectedSubsidy, totalSubsidy) } }