// This example demonstrates creating a new database and inserting the genesis // block into it. func ExampleCreateDB() { // Notice in these example imports that the memdb driver is loaded. // Ordinarily this would be whatever driver(s) your application // requires. // import ( // "github.com/decred/dcrd/database" // _ "github.com/decred/dcrd/database/memdb" // ) // Create a database and schedule it to be closed on exit. This example // uses a memory-only database to avoid needing to write anything to // the disk. Typically, you would specify a persistent database driver // such as "leveldb" and give it a database name as the second // parameter. db, err := database.CreateDB("memdb") if err != nil { fmt.Println(err) return } defer db.Close() // Insert the main network genesis block. genesis := dcrutil.NewBlock(chaincfg.TestNetParams.GenesisBlock) genesis.SetHeight(0) newHeight, err := db.InsertBlock(genesis) if err != nil { fmt.Println(err) return } fmt.Println("New height:", newHeight) // Output: // New height: 0 }
// Receive waits for the response promised by the future and returns the raw // block requested from the server given its hash. func (r FutureGetBlockResult) Receive() (*dcrutil.Block, error) { res, err := receiveFuture(r) if err != nil { return nil, err } // Unmarshal result as a string. var blockHex string err = json.Unmarshal(res, &blockHex) if err != nil { return nil, err } // Decode the serialized block hex to raw bytes. serializedBlock, err := hex.DecodeString(blockHex) if err != nil { return nil, err } // Deserialize the block and return it. var msgBlock wire.MsgBlock err = msgBlock.Deserialize(bytes.NewReader(serializedBlock)) if err != nil { return nil, err } return dcrutil.NewBlock(&msgBlock), nil }
func (db *MemDb) fetchBlockBySha(sha *chainhash.Hash) (*dcrutil.Block, error) { if db.closed { return nil, ErrDbClosed } if blockHeight, exists := db.blocksBySha[*sha]; exists { block := dcrutil.NewBlock(db.blocks[int(blockHeight)]) block.SetHeight(blockHeight) return block, nil } return nil, fmt.Errorf("block %v is not in database", sha) }
// TODO Make more elaborate tests for difficulty. The difficulty algorithms // have already been tested to death in simnet/testnet/mainnet simulations, // but we should really have a unit test for them that includes tests for // edge cases. func TestDiff(t *testing.T) { db, err := database.CreateDB("memdb") if err != nil { t.Errorf("Failed to create database: %v\n", err) return } defer db.Close() var tmdb *stake.TicketDB genesisBlock := dcrutil.NewBlock(chaincfg.MainNetParams.GenesisBlock) _, err = db.InsertBlock(genesisBlock) if err != nil { t.Errorf("Failed to insert genesis block: %v\n", err) return } chain := blockchain.New(db, tmdb, &chaincfg.MainNetParams, nil, nil) //timeSource := blockchain.NewMedianTime() // Grab some blocks // Build fake blockchain // Calc new difficulty ts := time.Now() d, err := chain.CalcNextRequiredDifficulty(ts) if err != nil { t.Errorf("Failed to get difficulty: %v\n", err) return } if d != 486604799 { // This is hardcoded in genesis block but not exported anywhere. t.Error("Failed to get initial difficulty.") } sd, err := chain.CalcNextRequiredStakeDifficulty() if err != nil { t.Errorf("Failed to get stake difficulty: %v\n", err) return } if sd != chaincfg.MainNetParams.MinimumStakeDiff { t.Error("Incorrect initial stake difficulty.") } // Compare // Repeat for a few more }
// This example demonstrates how to create a new chain instance and use // ProcessBlock to attempt to attempt add a block to the chain. As the package // overview documentation describes, this includes all of the Decred consensus // rules. This example intentionally attempts to insert a duplicate genesis // block to illustrate how an invalid block is handled. func ExampleBlockChain_ProcessBlock() { // Create a new database to store the accepted blocks into. Typically // this would be opening an existing database and would not be deleting // and creating a new database like this, but it is done here so this is // a complete working example and does not leave temporary files laying // around. dbPath := filepath.Join(os.TempDir(), "exampleprocessblock") _ = os.RemoveAll(dbPath) db, err := database.Create("ffldb", dbPath, chaincfg.MainNetParams.Net) if err != nil { fmt.Printf("Failed to create database: %v\n", err) return } defer os.RemoveAll(dbPath) defer db.Close() // Create a new BlockChain instance using the underlying database for // the main bitcoin network. This example does not demonstrate some // of the other available configuration options such as specifying a // notification callback and signature cache. Also, the caller would // ordinarily keep a reference to the median time source and add time // values obtained from other peers on the network so the local time is // adjusted to be in agreement with other peers. chain, err := blockchain.New(&blockchain.Config{ DB: db, ChainParams: &chaincfg.MainNetParams, TimeSource: blockchain.NewMedianTime(), }) if err != nil { fmt.Printf("Failed to create chain instance: %v\n", err) return } // Process a block. For this example, we are going to intentionally // cause an error by trying to process the genesis block which already // exists. genesisBlock := dcrutil.NewBlock(chaincfg.MainNetParams.GenesisBlock) _, isOrphan, err := chain.ProcessBlock(genesisBlock, blockchain.BFNone) if err != nil { fmt.Printf("Failed to create chain instance: %v\n", err) return } fmt.Printf("Block accepted. Is it an orphan?: %v", isOrphan) // This output is dependent on the genesis block, and needs to be // updated if the mainnet genesis block is updated. // Output: // Failed to process block: already have block 267a53b5ee86c24a48ec37aee4f4e7c0c4004892b7259e695e9f5b321f1ab9d2 }
// This example demonstrates how to create a new chain instance and use // ProcessBlock to attempt to attempt add a block to the chain. As the package // overview documentation describes, this includes all of the Decred consensus // rules. This example intentionally attempts to insert a duplicate genesis // block to illustrate how an invalid block is handled. func ExampleBlockChain_ProcessBlock() { // Create a new database to store the accepted blocks into. Typically // this would be opening an existing database and would not use memdb // which is a memory-only database backend, but we create a new db // here so this is a complete working example. db, err := database.CreateDB("memdb") if err != nil { fmt.Printf("Failed to create database: %v\n", err) return } defer db.Close() var tmdb *stake.TicketDB // Insert the main network genesis block. This is part of the initial // database setup. Like above, this typically would not be needed when // opening an existing database. genesisBlock := dcrutil.NewBlock(chaincfg.MainNetParams.GenesisBlock) _, err = db.InsertBlock(genesisBlock) if err != nil { fmt.Printf("Failed to insert genesis block: %v\n", err) return } // Create a new BlockChain instance without an initialized signature // verification cache, using the underlying database for the main // bitcoin network and ignore notifications. chain := blockchain.New(db, tmdb, &chaincfg.MainNetParams, nil, nil) // Create a new median time source that is required by the upcoming // call to ProcessBlock. Ordinarily this would also add time values // obtained from other peers on the network so the local time is // adjusted to be in agreement with other peers. timeSource := blockchain.NewMedianTime() // Process a block. For this example, we are going to intentionally // cause an error by trying to process the genesis block which already // exists. isOrphan, _, err := chain.ProcessBlock(genesisBlock, timeSource, blockchain.BFNone) if err != nil { fmt.Printf("Failed to process block: %v\n", err) return } fmt.Printf("Block accepted. Is it an orphan?: %v", isOrphan) // This output is dependent on the genesis block, and needs to be // updated if the mainnet genesis block is updated. // Output: // Failed to process block: already have block 267a53b5ee86c24a48ec37aee4f4e7c0c4004892b7259e695e9f5b321f1ab9d2 }
// exampleLoadDB is used in the example to elide the setup code. func exampleLoadDB() (database.Db, error) { db, err := database.CreateDB("memdb") if err != nil { return nil, err } // Insert the main network genesis block. genesis := dcrutil.NewBlock(chaincfg.TestNetParams.GenesisBlock) genesis.SetHeight(0) _, err = db.InsertBlock(genesis) if err != nil { return nil, err } return db, err }
// BenchmarkBlockHeader benchmarks how long it takes to load the mainnet genesis // block. func BenchmarkBlock(b *testing.B) { // Start by creating a new database and populating it with the mainnet // genesis block. dbPath := filepath.Join(os.TempDir(), "ffldb-benchblk") _ = os.RemoveAll(dbPath) db, err := database.Create("ffldb", dbPath, blockDataNet) if err != nil { b.Fatal(err) } defer os.RemoveAll(dbPath) defer db.Close() err = db.Update(func(tx database.Tx) error { block := dcrutil.NewBlock(chaincfg.MainNetParams.GenesisBlock) if err := tx.StoreBlock(block); err != nil { return err } return nil }) if err != nil { b.Fatal(err) } b.ReportAllocs() b.ResetTimer() err = db.View(func(tx database.Tx) error { blockHash := chaincfg.MainNetParams.GenesisHash for i := 0; i < b.N; i++ { _, err := tx.FetchBlock(blockHash) if err != nil { return err } } return nil }) if err != nil { b.Fatal(err) } // Don't benchmark teardown. b.StopTimer() }
// reorgTestsForced tests a forced reorganization of a single block at HEAD. func reorgTestForced(t *testing.T) { // Create a new database and chain instance to run tests against. chain, teardownFunc, err := chainSetup("reorgunittest", simNetParams) if err != nil { t.Errorf("Failed to setup chain instance: %v", err) return } defer teardownFunc() // The genesis block should fail to connect since it's already // inserted. genesisBlock := simNetParams.GenesisBlock err = chain.CheckConnectBlock(dcrutil.NewBlock(genesisBlock)) if err == nil { t.Errorf("CheckConnectBlock: Did not receive expected error") } // Load up the rest of the blocks up to HEAD. filename := filepath.Join("testdata/", "reorgto179.bz2") fi, err := os.Open(filename) bcStream := bzip2.NewReader(fi) defer fi.Close() // Create a buffer of the read file bcBuf := new(bytes.Buffer) bcBuf.ReadFrom(bcStream) // Create decoder from the buffer and a map to store the data bcDecoder := gob.NewDecoder(bcBuf) blockChain := make(map[int64][]byte) // Decode the blockchain into the map if err := bcDecoder.Decode(&blockChain); err != nil { t.Errorf("error decoding test blockchain: %v", err.Error()) } // Load up the short chain finalIdx1 := 131 var oldBestHash *chainhash.Hash for i := 1; i < finalIdx1+1; i++ { bl, err := dcrutil.NewBlockFromBytes(blockChain[int64(i)]) if err != nil { t.Fatalf("NewBlockFromBytes error: %v", err.Error()) } bl.SetHeight(int64(i)) if i == finalIdx1 { oldBestHash = bl.Sha() } _, _, err = chain.ProcessBlock(bl, blockchain.BFNone) if err != nil { t.Fatalf("ProcessBlock error at height %v: %v", i, err.Error()) } } // Load the long chain and begin loading blocks from that too, // forcing a reorganization // Load up the rest of the blocks up to HEAD. filename = filepath.Join("testdata/", "reorgto180.bz2") fi, err = os.Open(filename) bcStream = bzip2.NewReader(fi) defer fi.Close() // Create a buffer of the read file bcBuf = new(bytes.Buffer) bcBuf.ReadFrom(bcStream) // Create decoder from the buffer and a map to store the data bcDecoder = gob.NewDecoder(bcBuf) blockChain = make(map[int64][]byte) // Decode the blockchain into the map if err := bcDecoder.Decode(&blockChain); err != nil { t.Errorf("error decoding test blockchain: %v", err.Error()) } forkPoint := int64(131) forkBl, err := dcrutil.NewBlockFromBytes(blockChain[forkPoint]) if err != nil { t.Fatalf("NewBlockFromBytes error: %v", err.Error()) } forkBl.SetHeight(forkPoint) _, _, err = chain.ProcessBlock(forkBl, blockchain.BFNone) if err != nil { t.Fatalf("ProcessBlock error: %v", err.Error()) } newBestHash := forkBl.Sha() err = chain.ForceHeadReorganization(*oldBestHash, *newBestHash) if err != nil { t.Fatalf("failed forced reorganization: %v", err.Error()) } // Ensure our blockchain is at the correct best tip for our forced // reorganization topBlock, _ := chain.GetTopBlock() tipHash := topBlock.Sha() expected, _ := chainhash.NewHashFromStr("0df603f434be1dca22d706c7c47be16a8" + "edcef2f151bcf08b51138aa1cda26e2") if *tipHash != *expected { t.Errorf("Failed to correctly reorg; expected tip %v, got tip %v", expected, tipHash) } have, err := chain.HaveBlock(expected) if !have { t.Errorf("missing tip block after reorganization test") } if err != nil { t.Errorf("unexpected error testing for presence of new tip block "+ "after reorg test: %v", err) } return }
// TestBlock tests the API for Block. func TestBlock(t *testing.T) { b := dcrutil.NewBlock(&Block100000) // Ensure we get the same data back out. if msgBlock := b.MsgBlock(); !reflect.DeepEqual(msgBlock, &Block100000) { t.Errorf("MsgBlock: mismatched MsgBlock - got %v, want %v", spew.Sdump(msgBlock), spew.Sdump(&Block100000)) } // Ensure block height set and get work properly. wantHeight := int64(100000) b.SetHeight(wantHeight) if gotHeight := b.Height(); gotHeight != wantHeight { t.Errorf("Height: mismatched height - got %v, want %v", gotHeight, wantHeight) } // Hash for block 100,000. wantShaStr := "85457e2420d265386a84fc48aaee4f6dc98bac015dcc8d536ead20e2faf66a9d" wantSha, err := chainhash.NewHashFromStr(wantShaStr) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } // Request the sha multiple times to test generation and caching. for i := 0; i < 2; i++ { sha := b.Sha() if !sha.IsEqual(wantSha) { t.Errorf("Sha #%d mismatched sha - got %v, want %v", i, sha, wantSha) } } // Shas for the transactions in Block100000. wantTxShas := []string{ "1cbd9fe1a143a265cc819ff9d8132a7cbc4ca48eb68c0de39cfdf7ecf42cbbd1", "f3f9bc9473b6fe18d66e3ac2a1a95b6317b280f4e6687a074075b56aebf1eb53", "ba2ed6210a561a4dab34ec8668ad61ec97f126826dae893719dff7383b9d6928", "c5dde35b55b856cf73b2d85737c68b0dcfdaad01d0271ee509f3a7efacc025b3", } // Create a new block to nuke all cached data. b = dcrutil.NewBlock(&Block100000) // Request sha for all transactions one at a time via Tx. for i, txSha := range wantTxShas { wantSha, err := chainhash.NewHashFromStr(txSha) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } // Request the sha multiple times to test generation and caching. for j := 0; j < 2; j++ { tx, err := b.Tx(i) if err != nil { t.Errorf("Tx #%d: %v", i, err) continue } sha := tx.Sha() if !sha.IsEqual(wantSha) { t.Errorf("Sha #%d mismatched sha - got %v, "+ "want %v", j, sha, wantSha) continue } } } // Create a new block to nuke all cached data. b = dcrutil.NewBlock(&Block100000) // Request slice of all transactions multiple times to test generation // and caching. for i := 0; i < 2; i++ { transactions := b.Transactions() // Ensure we get the expected number of transactions. if len(transactions) != len(wantTxShas) { t.Errorf("Transactions #%d mismatched number of "+ "transactions - got %d, want %d", i, len(transactions), len(wantTxShas)) continue } // Ensure all of the shas match. for j, tx := range transactions { wantSha, err := chainhash.NewHashFromStr(wantTxShas[j]) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } sha := tx.Sha() if !sha.IsEqual(wantSha) { t.Errorf("Transactions #%d mismatched shas - "+ "got %v, want %v", j, sha, wantSha) continue } } } // Serialize the test block. var block100000Buf bytes.Buffer err = Block100000.Serialize(&block100000Buf) if err != nil { t.Errorf("Serialize: %v", err) } block100000Bytes := block100000Buf.Bytes() // Request serialized bytes multiple times to test generation and // caching. for i := 0; i < 2; i++ { serializedBytes, err := b.Bytes() if err != nil { t.Errorf("Bytes: %v", err) continue } if !bytes.Equal(serializedBytes, block100000Bytes) { t.Errorf("Bytes #%d wrong bytes - got %v, want %v", i, spew.Sdump(serializedBytes), spew.Sdump(block100000Bytes)) continue } } // Transaction offsets and length for the transaction in Block100000. wantTxLocs := []wire.TxLoc{ {TxStart: 181, TxLen: 159}, {TxStart: 340, TxLen: 285}, {TxStart: 625, TxLen: 283}, {TxStart: 908, TxLen: 249}, } // Ensure the transaction location information is accurate. txLocs, _, err := b.TxLoc() if err != nil { t.Errorf("TxLoc: %v", err) return } if !reflect.DeepEqual(txLocs, wantTxLocs) { t.Errorf("TxLoc: mismatched transaction location information "+ "- got %v, want %v", spew.Sdump(txLocs), spew.Sdump(wantTxLocs)) } }
// GenerateNBlocks generates the requested number of blocks. It is self // contained in that it creates block templates and attempts to solve them while // detecting when it is performing stale work and reacting accordingly by // generating a new block template. When a block is solved, it is submitted. // The function returns a list of the hashes of generated blocks. func (m *CPUMiner) GenerateNBlocks(n uint32) ([]*chainhash.Hash, error) { m.Lock() // Respond with an error if there's virtually 0 chance of CPU-mining a block. if !m.server.chainParams.GenerateSupported { m.Unlock() return nil, errors.New("No support for `generate` on the current " + "network, " + m.server.chainParams.Net.String() + ", as it's unlikely to be possible to CPU-mine a block.") } // Respond with an error if server is already mining. if m.started || m.discreteMining { m.Unlock() return nil, errors.New("Server is already CPU mining. Please call " + "`setgenerate 0` before calling discrete `generate` commands.") } m.started = true m.discreteMining = true m.speedMonitorQuit = make(chan struct{}) m.wg.Add(1) go m.speedMonitor() m.Unlock() minrLog.Tracef("Generating %d blocks", n) i := uint32(0) blockHashes := make([]*chainhash.Hash, n, n) // Start a ticker which is used to signal checks for stale work and // updates to the speed monitor. ticker := time.NewTicker(time.Second * hashUpdateSecs) defer ticker.Stop() for { // Read updateNumWorkers in case someone tries a `setgenerate` while // we're generating. We can ignore it as the `generate` RPC call only // uses 1 worker. select { case <-m.updateNumWorkers: default: } // Grab the lock used for block submission, since the current block will // be changing and this would otherwise end up building a new block // template on a block that is in the process of becoming stale. m.submitBlockLock.Lock() // Choose a payment address at random. rand.Seed(time.Now().UnixNano()) payToAddr := cfg.miningAddrs[rand.Intn(len(cfg.miningAddrs))] // Create a new block template using the available transactions // in the memory pool as a source of transactions to potentially // include in the block. template, err := NewBlockTemplate(m.server.txMemPool, payToAddr) m.submitBlockLock.Unlock() if err != nil { errStr := fmt.Sprintf("Failed to create new block "+ "template: %v", err) minrLog.Errorf(errStr) continue } if template == nil { errStr := fmt.Sprintf("Not enough voters on parent block " + "and failed to pull parent template") minrLog.Debugf(errStr) continue } // Attempt to solve the block. The function will exit early // with false when conditions that trigger a stale block, so // a new block template can be generated. When the return is // true a solution was found, so submit the solved block. if m.solveBlock(template.block, ticker, nil) { block := dcrutil.NewBlock(template.block) m.submitBlock(block) blockHashes[i] = block.Sha() i++ if i == n { minrLog.Tracef("Generated %d blocks", i) m.Lock() close(m.speedMonitorQuit) m.wg.Wait() m.started = false m.discreteMining = false m.Unlock() return blockHashes, nil } } } }
// generateBlocks is a worker that is controlled by the miningWorkerController. // It is self contained in that it creates block templates and attempts to solve // them while detecting when it is performing stale work and reacting // accordingly by generating a new block template. When a block is solved, it // is submitted. // // It must be run as a goroutine. func (m *CPUMiner) generateBlocks(quit chan struct{}) { minrLog.Tracef("Starting generate blocks worker") // Start a ticker which is used to signal checks for stale work and // updates to the speed monitor. ticker := time.NewTicker(333 * time.Millisecond) defer ticker.Stop() out: for { // Quit when the miner is stopped. select { case <-quit: break out default: // Non-blocking select to fall through } // No point in searching for a solution before the chain is // synced. Also, grab the same lock as used for block // submission, since the current block will be changing and // this would otherwise end up building a new block template on // a block that is in the process of becoming stale. m.submitBlockLock.Lock() time.Sleep(100 * time.Millisecond) // Hacks to make dcr work with Decred PoC (simnet only) // TODO Remove before production. if cfg.SimNet { _, curHeight := m.server.blockManager.chainState.Best() if curHeight == 1 { time.Sleep(5500 * time.Millisecond) // let wallet reconn } else if curHeight > 100 && curHeight < 201 { // slow down to i time.Sleep(10 * time.Millisecond) // 2500 } else { // burn through the first pile of blocks time.Sleep(10 * time.Millisecond) } } // Choose a payment address at random. rand.Seed(time.Now().UnixNano()) payToAddr := cfg.miningAddrs[rand.Intn(len(cfg.miningAddrs))] // Create a new block template using the available transactions // in the memory pool as a source of transactions to potentially // include in the block. template, err := NewBlockTemplate(m.server.txMemPool, payToAddr) m.submitBlockLock.Unlock() if err != nil { errStr := fmt.Sprintf("Failed to create new block "+ "template: %v", err) minrLog.Errorf(errStr) continue } // Not enough voters. if template == nil { continue } // This prevents you from causing memory exhaustion issues // when mining aggressively in a simulation network. if cfg.SimNet { if m.minedOnParents[template.block.Header.PrevBlock] >= maxSimnetToMine { minrLog.Tracef("too many blocks mined on parent, stopping " + "until there are enough votes on these to make a new " + "block") continue } } // Attempt to solve the block. The function will exit early // with false when conditions that trigger a stale block, so // a new block template can be generated. When the return is // true a solution was found, so submit the solved block. if m.solveBlock(template.block, ticker, quit) { block := dcrutil.NewBlock(template.block) m.submitBlock(block) m.minedOnParents[template.block.Header.PrevBlock]++ } } m.workerWg.Done() minrLog.Tracef("Generate blocks worker done") }
// TestBlockchainFunction tests the various blockchain API to ensure proper // functionality. func TestBlockchainFunctions(t *testing.T) { // Create a new database and chain instance to run tests against. chain, teardownFunc, err := chainSetup("validateunittests", simNetParams) if err != nil { t.Errorf("Failed to setup chain instance: %v", err) return } defer teardownFunc() // The genesis block should fail to connect since it's already inserted. genesisBlock := simNetParams.GenesisBlock err = chain.CheckConnectBlock(dcrutil.NewBlock(genesisBlock)) if err == nil { t.Errorf("CheckConnectBlock: Did not receive expected error") } // Load up the rest of the blocks up to HEAD~1. filename := filepath.Join("testdata/", "blocks0to168.bz2") fi, err := os.Open(filename) bcStream := bzip2.NewReader(fi) defer fi.Close() // Create a buffer of the read file. bcBuf := new(bytes.Buffer) bcBuf.ReadFrom(bcStream) // Create decoder from the buffer and a map to store the data. bcDecoder := gob.NewDecoder(bcBuf) blockChain := make(map[int64][]byte) // Decode the blockchain into the map. if err := bcDecoder.Decode(&blockChain); err != nil { t.Errorf("error decoding test blockchain: %v", err.Error()) } // Insert blocks 1 to 168 and perform various tests. for i := 1; i <= 168; i++ { bl, err := dcrutil.NewBlockFromBytes(blockChain[int64(i)]) if err != nil { t.Errorf("NewBlockFromBytes error: %v", err.Error()) } bl.SetHeight(int64(i)) _, _, err = chain.ProcessBlock(bl, blockchain.BFNone) if err != nil { t.Fatalf("ProcessBlock error at height %v: %v", i, err.Error()) } } val, err := chain.TicketPoolValue() if err != nil { t.Errorf("Failed to get ticket pool value: %v", err) } expectedVal := dcrutil.Amount(3495091704) if val != expectedVal { t.Errorf("Failed to get correct result for ticket pool value; "+ "want %v, got %v", expectedVal, val) } a, _ := dcrutil.DecodeNetworkAddress("SsbKpMkPnadDcZFFZqRPY8nvdFagrktKuzB") hs, err := chain.TicketsWithAddress(a) if err != nil { t.Errorf("Failed to do TicketsWithAddress: %v", err) } expectedLen := 223 if len(hs) != expectedLen { t.Errorf("Failed to get correct number of tickets for "+ "TicketsWithAddress; want %v, got %v", expectedLen, len(hs)) } totalSubsidy := chain.TotalSubsidy() expectedSubsidy := int64(35783267326630) if expectedSubsidy != totalSubsidy { t.Errorf("Failed to get correct total subsidy for "+ "TotalSubsidy; want %v, got %v", expectedSubsidy, totalSubsidy) } }
// TestReorganization loads a set of test blocks which force a chain // reorganization to test the block chain handling code. func TestReorganization(t *testing.T) { // Create a new database and chain instance to run tests against. chain, teardownFunc, err := chainSetup("reorgunittest", simNetParams) if err != nil { t.Errorf("Failed to setup chain instance: %v", err) return } defer teardownFunc() err = chain.GenerateInitialIndex() if err != nil { t.Errorf("GenerateInitialIndex: %v", err) } // The genesis block should fail to connect since it's already // inserted. genesisBlock := simNetParams.GenesisBlock err = chain.CheckConnectBlock(dcrutil.NewBlock(genesisBlock)) if err == nil { t.Errorf("CheckConnectBlock: Did not receive expected error") } // Load up the rest of the blocks up to HEAD. filename := filepath.Join("testdata/", "reorgto179.bz2") fi, err := os.Open(filename) bcStream := bzip2.NewReader(fi) defer fi.Close() // Create a buffer of the read file bcBuf := new(bytes.Buffer) bcBuf.ReadFrom(bcStream) // Create decoder from the buffer and a map to store the data bcDecoder := gob.NewDecoder(bcBuf) blockChain := make(map[int64][]byte) // Decode the blockchain into the map if err := bcDecoder.Decode(&blockChain); err != nil { t.Errorf("error decoding test blockchain: %v", err.Error()) } // Load up the short chain timeSource := blockchain.NewMedianTime() finalIdx1 := 179 for i := 1; i < finalIdx1+1; i++ { bl, err := dcrutil.NewBlockFromBytes(blockChain[int64(i)]) if err != nil { t.Errorf("NewBlockFromBytes error: %v", err.Error()) } bl.SetHeight(int64(i)) _, _, err = chain.ProcessBlock(bl, timeSource, blockchain.BFNone) if err != nil { t.Errorf("ProcessBlock error: %v", err.Error()) } } // Load the long chain and begin loading blocks from that too, // forcing a reorganization // Load up the rest of the blocks up to HEAD. filename = filepath.Join("testdata/", "reorgto180.bz2") fi, err = os.Open(filename) bcStream = bzip2.NewReader(fi) defer fi.Close() // Create a buffer of the read file bcBuf = new(bytes.Buffer) bcBuf.ReadFrom(bcStream) // Create decoder from the buffer and a map to store the data bcDecoder = gob.NewDecoder(bcBuf) blockChain = make(map[int64][]byte) // Decode the blockchain into the map if err := bcDecoder.Decode(&blockChain); err != nil { t.Errorf("error decoding test blockchain: %v", err.Error()) } forkPoint := 131 finalIdx2 := 180 for i := forkPoint; i < finalIdx2+1; i++ { bl, err := dcrutil.NewBlockFromBytes(blockChain[int64(i)]) if err != nil { t.Errorf("NewBlockFromBytes error: %v", err.Error()) } bl.SetHeight(int64(i)) _, _, err = chain.ProcessBlock(bl, timeSource, blockchain.BFNone) if err != nil { t.Errorf("ProcessBlock error: %v", err.Error()) } } // Ensure our blockchain is at the correct best tip topBlock, _ := chain.GetTopBlock() tipHash := topBlock.Sha() expected, _ := chainhash.NewHashFromStr("5ab969d0afd8295b6cd1506f2a310d" + "259322015c8bd5633f283a163ce0e50594") if *tipHash != *expected { t.Errorf("Failed to correctly reorg; expected tip %v, got tip %v", expected, tipHash) } have, err := chain.HaveBlock(expected) if !have { t.Errorf("missing tip block after reorganization test") } if err != nil { t.Errorf("unexpected error testing for presence of new tip block "+ "after reorg test: %v", err) } return }
func Test_dupTx(t *testing.T) { // Ignore db remove errors since it means we didn't have an old one. dbname := fmt.Sprintf("tstdbdup0") dbnamever := dbname + ".ver" _ = os.RemoveAll(dbname) _ = os.RemoveAll(dbnamever) db, err := database.CreateDB("leveldb", dbname) if err != nil { t.Errorf("Failed to open test database %v", err) return } defer os.RemoveAll(dbname) defer os.RemoveAll(dbnamever) defer func() { if err := db.Close(); err != nil { t.Errorf("Close: unexpected error: %v", err) } }() testdatafile := filepath.Join("../", "../blockchain/testdata", "blocks0to168.bz2") blocks, err := loadBlocks(t, testdatafile) if err != nil { t.Errorf("Unable to load blocks from test data for: %v", err) return } var lastSha *chainhash.Hash // Populate with the fisrt 256 blocks, so we have blocks to 'mess with' err = nil out: for height := int64(0); height < int64(len(blocks)); height++ { block := blocks[height] if height != 0 { // except for NoVerify which does not allow lookups check inputs mblock := block.MsgBlock() //t.Errorf("%v", blockchain.DebugBlockString(block)) parentBlock := blocks[height-1] mParentBlock := parentBlock.MsgBlock() var txneededList []*chainhash.Hash opSpentInBlock := make(map[wire.OutPoint]struct{}) if dcrutil.IsFlagSet16(dcrutil.BlockValid, mParentBlock.Header.VoteBits) { for _, tx := range mParentBlock.Transactions { for _, txin := range tx.TxIn { if txin.PreviousOutPoint.Index == uint32(4294967295) { continue } if existsInOwnBlockRegTree(mParentBlock, txin.PreviousOutPoint.Hash) { _, used := opSpentInBlock[txin.PreviousOutPoint] if !used { // Origin tx is in the block and so hasn't been // added yet, continue opSpentInBlock[txin.PreviousOutPoint] = struct{}{} continue } else { t.Errorf("output ref %v attempted double spend of previously spend output", txin.PreviousOutPoint) } } origintxsha := &txin.PreviousOutPoint.Hash txneededList = append(txneededList, origintxsha) exists, err := db.ExistsTxSha(origintxsha) if err != nil { t.Errorf("ExistsTxSha: unexpected error %v ", err) } if !exists { t.Errorf("referenced tx not found %v (height %v)", origintxsha, height) } _, err = db.FetchTxBySha(origintxsha) if err != nil { t.Errorf("referenced tx not found %v err %v ", origintxsha, err) } } } } for _, stx := range mblock.STransactions { for _, txin := range stx.TxIn { if txin.PreviousOutPoint.Index == uint32(4294967295) { continue } if existsInOwnBlockRegTree(mParentBlock, txin.PreviousOutPoint.Hash) { _, used := opSpentInBlock[txin.PreviousOutPoint] if !used { // Origin tx is in the block and so hasn't been // added yet, continue opSpentInBlock[txin.PreviousOutPoint] = struct{}{} continue } else { t.Errorf("output ref %v attempted double spend of previously spend output", txin.PreviousOutPoint) } } origintxsha := &txin.PreviousOutPoint.Hash txneededList = append(txneededList, origintxsha) exists, err := db.ExistsTxSha(origintxsha) if err != nil { t.Errorf("ExistsTxSha: unexpected error %v ", err) } if !exists { t.Errorf("referenced tx not found %v", origintxsha) } _, err = db.FetchTxBySha(origintxsha) if err != nil { t.Errorf("referenced tx not found %v err %v ", origintxsha, err) } } } txlist := db.FetchUnSpentTxByShaList(txneededList) for _, txe := range txlist { if txe.Err != nil { t.Errorf("tx list fetch failed %v err %v ", txe.Sha, txe.Err) break out } } } newheight, err := db.InsertBlock(block) if err != nil { t.Errorf("failed to insert block %v err %v", height, err) break out } if newheight != height { t.Errorf("height mismatch expect %v returned %v", height, newheight) break out } newSha, blkid, err := db.NewestSha() if err != nil { t.Errorf("failed to obtain latest sha %v %v", height, err) } if blkid != height { t.Errorf("height doe not match latest block height %v %v %v", blkid, height, err) } blkSha := block.Sha() if *newSha != *blkSha { t.Errorf("Newest block sha does not match freshly inserted one %v %v %v ", newSha, blkSha, err) } lastSha = blkSha } // generate a new block based on the last sha // these block are not verified, so there are a bunch of garbage fields // in the 'generated' block. var bh wire.BlockHeader bh.Version = 0 bh.PrevBlock = *lastSha // Bits, Nonce are not filled in mblk := wire.NewMsgBlock(&bh) hash, _ := chainhash.NewHashFromStr("c23953c56cb2ef8e4698e3ed3b0fc4c837754d3cd16485192d893e35f32626b4") po := wire.NewOutPoint(hash, 0, dcrutil.TxTreeRegular) txI := wire.NewTxIn(po, []byte("garbage")) txO := wire.NewTxOut(50000000, []byte("garbageout")) var tx wire.MsgTx tx.AddTxIn(txI) tx.AddTxOut(txO) mblk.AddTransaction(&tx) blk := dcrutil.NewBlock(mblk) fetchList := []*chainhash.Hash{hash} listReply := db.FetchUnSpentTxByShaList(fetchList) for _, lr := range listReply { if lr.Err != nil { t.Errorf("sha %v spent %v err %v\n", lr.Sha, lr.TxSpent, lr.Err) } } _, err = db.InsertBlock(blk) if err != nil { t.Errorf("failed to insert phony block %v", err) } // ok, did it 'spend' the tx ? listReply = db.FetchUnSpentTxByShaList(fetchList) for _, lr := range listReply { if lr.Err != nil && lr.Err != database.ErrTxShaMissing { t.Errorf("sha %v spent %v err %v\n", lr.Sha, lr.TxSpent, lr.Err) } } txlist := blk.Transactions() for _, tx := range txlist { txsha := tx.Sha() txReply, err := db.FetchTxBySha(txsha) if err != nil { t.Errorf("fully spent lookup %v err %v\n", hash, err) } else { for _, lr := range txReply { if lr.Err != nil { t.Errorf("stx %v spent %v err %v\n", lr.Sha, lr.TxSpent, lr.Err) } } } } err = db.DropAfterBlockBySha(lastSha) if err != nil { t.Errorf("failed to drop spending block %v", err) } }
// TestClosed ensure calling the interface functions on a closed database // returns appropriate errors for the interface functions that return errors // and does not panic or otherwise misbehave for functions which do not return // errors. func TestClosed(t *testing.T) { db, err := database.CreateDB("memdb") if err != nil { t.Errorf("Failed to open test database %v", err) return } _, err = db.InsertBlock(dcrutil.NewBlock(chaincfg.MainNetParams.GenesisBlock)) if err != nil { t.Errorf("InsertBlock: %v", err) } if err := db.Close(); err != nil { t.Errorf("Close: unexpected error %v", err) } genesisHash := chaincfg.MainNetParams.GenesisHash if err := db.DropAfterBlockBySha(genesisHash); err != memdb.ErrDbClosed { t.Errorf("DropAfterBlockBySha: unexpected error %v", err) } if _, err := db.ExistsSha(genesisHash); err != memdb.ErrDbClosed { t.Errorf("ExistsSha: Unexpected error: %v", err) } if _, err := db.FetchBlockBySha(genesisHash); err != memdb.ErrDbClosed { t.Errorf("FetchBlockBySha: unexpected error %v", err) } if _, err := db.FetchBlockShaByHeight(0); err != memdb.ErrDbClosed { t.Errorf("FetchBlockShaByHeight: unexpected error %v", err) } if _, err := db.FetchHeightRange(0, 1); err != memdb.ErrDbClosed { t.Errorf("FetchHeightRange: unexpected error %v", err) } genesisCoinbaseTx := chaincfg.MainNetParams.GenesisBlock.Transactions[0] coinbaseHash := genesisCoinbaseTx.TxSha() if _, err := db.ExistsTxSha(&coinbaseHash); err != memdb.ErrDbClosed { t.Errorf("ExistsTxSha: unexpected error %v", err) } if _, err := db.FetchTxBySha(genesisHash); err != memdb.ErrDbClosed { t.Errorf("FetchTxBySha: unexpected error %v", err) } requestHashes := []*chainhash.Hash{genesisHash} reply := db.FetchTxByShaList(requestHashes) if len(reply) != len(requestHashes) { t.Errorf("FetchUnSpentTxByShaList unexpected number of replies "+ "got: %d, want: %d", len(reply), len(requestHashes)) } for i, txLR := range reply { wantReply := &database.TxListReply{ Sha: requestHashes[i], Err: memdb.ErrDbClosed, } if !reflect.DeepEqual(wantReply, txLR) { t.Errorf("FetchTxByShaList unexpected reply\ngot: %v\n"+ "want: %v", txLR, wantReply) } } reply = db.FetchUnSpentTxByShaList(requestHashes) if len(reply) != len(requestHashes) { t.Errorf("FetchUnSpentTxByShaList unexpected number of replies "+ "got: %d, want: %d", len(reply), len(requestHashes)) } for i, txLR := range reply { wantReply := &database.TxListReply{ Sha: requestHashes[i], Err: memdb.ErrDbClosed, } if !reflect.DeepEqual(wantReply, txLR) { t.Errorf("FetchUnSpentTxByShaList unexpected reply\n"+ "got: %v\nwant: %v", txLR, wantReply) } } if _, _, err := db.NewestSha(); err != memdb.ErrDbClosed { t.Errorf("NewestSha: unexpected error %v", err) } if err := db.Sync(); err != memdb.ErrDbClosed { t.Errorf("Sync: unexpected error %v", err) } if err := db.RollbackClose(); err != memdb.ErrDbClosed { t.Errorf("RollbackClose: unexpected error %v", err) } if err := db.Close(); err != memdb.ErrDbClosed { t.Errorf("Close: unexpected error %v", err) } }
// This example demonstrates creating a new database, using a managed read-write // transaction to store a block, and using a managed read-only transaction to // fetch the block. func Example_blockStorageAndRetrieval() { // This example assumes the ffldb driver is imported. // // import ( // "github.com/decred/dcrd/database2" // _ "github.com/decred/dcrd/database/ffldb" // ) // Create a database and schedule it to be closed and removed on exit. // Typically you wouldn't want to remove the database right away like // this, nor put it in the temp directory, but it's done here to ensure // the example cleans up after itself. dbPath := filepath.Join(os.TempDir(), "exampleblkstorage") db, err := database.Create("ffldb", dbPath, wire.MainNet) if err != nil { fmt.Println(err) return } defer os.RemoveAll(dbPath) defer db.Close() // Use the Update function of the database to perform a managed // read-write transaction and store a genesis block in the database as // and example. err = db.Update(func(tx database.Tx) error { genesisBlock := chaincfg.MainNetParams.GenesisBlock return tx.StoreBlock(dcrutil.NewBlock(genesisBlock)) }) if err != nil { fmt.Println(err) return } // Use the View function of the database to perform a managed read-only // transaction and fetch the block stored above. var loadedBlockBytes []byte err = db.Update(func(tx database.Tx) error { genesisHash := chaincfg.MainNetParams.GenesisHash blockBytes, err := tx.FetchBlock(genesisHash) if err != nil { return err } // As documented, all data fetched from the database is only // valid during a database transaction in order to support // zero-copy backends. Thus, make a copy of the data so it // can be used outside of the transaction. loadedBlockBytes = make([]byte, len(blockBytes)) copy(loadedBlockBytes, blockBytes) return nil }) if err != nil { fmt.Println(err) return } // Typically at this point, the block could be deserialized via the // wire.MsgBlock.Deserialize function or used in its serialized form // depending on need. However, for this example, just display the // number of serialized bytes to show it was loaded as expected. fmt.Printf("Serialized block size: %d bytes\n", len(loadedBlockBytes)) // Output: // Serialized block size: 300 bytes }
// chainSetup is used to create a new db and chain instance with the genesis // block already inserted. In addition to the new chain instnce, it returns // a teardown function the caller should invoke when done testing to clean up. func chainSetup(dbName string, params *chaincfg.Params) (*blockchain.BlockChain, func(), error) { if !isSupportedDbType(testDbType) { return nil, nil, fmt.Errorf("unsupported db type %v", testDbType) } // Handle memory database specially since it doesn't need the disk // specific handling. var db database.Db tmdb := new(stake.TicketDB) var teardown func() if testDbType == "memdb" { ndb, err := database.CreateDB(testDbType) if err != nil { return nil, nil, fmt.Errorf("error creating db: %v", err) } db = ndb // Setup a teardown function for cleaning up. This function is // returned to the caller to be invoked when it is done testing. teardown = func() { tmdb.Close() db.Close() } } else { // Create the root directory for test databases. if !fileExists(testDbRoot) { if err := os.MkdirAll(testDbRoot, 0700); err != nil { err := fmt.Errorf("unable to create test db "+ "root: %v", err) return nil, nil, err } } // Create a new database to store the accepted blocks into. dbPath := filepath.Join(testDbRoot, dbName) _ = os.RemoveAll(dbPath) ndb, err := database.CreateDB(testDbType, dbPath) if err != nil { return nil, nil, fmt.Errorf("error creating db: %v", err) } db = ndb // Setup a teardown function for cleaning up. This function is // returned to the caller to be invoked when it is done testing. teardown = func() { dbVersionPath := filepath.Join(testDbRoot, dbName+".ver") tmdb.Close() db.Sync() db.Close() os.RemoveAll(dbPath) os.Remove(dbVersionPath) os.RemoveAll(testDbRoot) } } // Insert the main network genesis block. This is part of the initial // database setup. genesisBlock := dcrutil.NewBlock(params.GenesisBlock) genesisBlock.SetHeight(int64(0)) _, err := db.InsertBlock(genesisBlock) if err != nil { teardown() err := fmt.Errorf("failed to insert genesis block: %v", err) return nil, nil, err } // Start the ticket database. tmdb.Initialize(params, db) tmdb.RescanTicketDB() chain := blockchain.New(db, tmdb, params, nil) return chain, teardown, nil }
// TestPersistence ensures that values stored are still valid after closing and // reopening the database. func TestPersistence(t *testing.T) { t.Parallel() // Create a new database to run tests against. dbPath := filepath.Join(os.TempDir(), "ffldb-persistencetest") _ = os.RemoveAll(dbPath) db, err := database.Create(dbType, dbPath, blockDataNet) if err != nil { t.Errorf("Failed to create test database (%s) %v", dbType, err) return } defer os.RemoveAll(dbPath) defer db.Close() // Create a bucket, put some values into it, and store a block so they // can be tested for existence on re-open. bucket1Key := []byte("bucket1") storeValues := map[string]string{ "b1key1": "foo1", "b1key2": "foo2", "b1key3": "foo3", } genesisBlock := dcrutil.NewBlock(chaincfg.MainNetParams.GenesisBlock) genesisHash := chaincfg.MainNetParams.GenesisHash err = db.Update(func(tx database.Tx) error { metadataBucket := tx.Metadata() if metadataBucket == nil { return fmt.Errorf("Metadata: unexpected nil bucket") } bucket1, err := metadataBucket.CreateBucket(bucket1Key) if err != nil { return fmt.Errorf("CreateBucket: unexpected error: %v", err) } for k, v := range storeValues { err := bucket1.Put([]byte(k), []byte(v)) if err != nil { return fmt.Errorf("Put: unexpected error: %v", err) } } if err := tx.StoreBlock(genesisBlock); err != nil { return fmt.Errorf("StoreBlock: unexpected error: %v", err) } return nil }) if err != nil { t.Errorf("Update: unexpected error: %v", err) return } // Close and reopen the database to ensure the values persist. db.Close() db, err = database.Open(dbType, dbPath, blockDataNet) if err != nil { t.Errorf("Failed to open test database (%s) %v", dbType, err) return } defer db.Close() // Ensure the values previously stored in the 3rd namespace still exist // and are correct. err = db.View(func(tx database.Tx) error { metadataBucket := tx.Metadata() if metadataBucket == nil { return fmt.Errorf("Metadata: unexpected nil bucket") } bucket1 := metadataBucket.Bucket(bucket1Key) if bucket1 == nil { return fmt.Errorf("Bucket1: unexpected nil bucket") } for k, v := range storeValues { gotVal := bucket1.Get([]byte(k)) if !reflect.DeepEqual(gotVal, []byte(v)) { return fmt.Errorf("Get: key '%s' does not "+ "match expected value - got %s, want %s", k, gotVal, v) } } genesisBlockBytes, _ := genesisBlock.Bytes() gotBytes, err := tx.FetchBlock(genesisHash) if err != nil { return fmt.Errorf("FetchBlock: unexpected error: %v", err) } if !reflect.DeepEqual(gotBytes, genesisBlockBytes) { return fmt.Errorf("FetchBlock: stored block mismatch") } return nil }) if err != nil { t.Errorf("View: unexpected error: %v", err) return } }
func TestLimitAndSkipFetchTxsForAddr(t *testing.T) { testDb, err := setUpTestDb(t, "tstdbtxaddr") if err != nil { t.Errorf("Failed to open test database %v", err) return } defer testDb.cleanUpFunc() _, err = testDb.db.InsertBlock(testDb.blocks[0]) if err != nil { t.Fatalf("failed to insert initial block") } // Insert a block with some fake test transactions. The block will have // 10 copies of a fake transaction involving same address. addrString := "DsZEAobx6qJ7K2qaHZBA2vBn66Nor8KYAKk" targetAddr, err := dcrutil.DecodeAddress(addrString, &chaincfg.MainNetParams) if err != nil { t.Fatalf("Unable to decode test address: %v", err) } outputScript, err := txscript.PayToAddrScript(targetAddr) if err != nil { t.Fatalf("Unable make test pkScript %v", err) } fakeTxOut := wire.NewTxOut(10, outputScript) var emptyHash chainhash.Hash fakeHeader := wire.NewBlockHeader(0, &emptyHash, &emptyHash, &emptyHash, 1, [6]byte{}, 1, 1, 1, 1, 1, 1, 1, 1, 1, [36]byte{}) msgBlock := wire.NewMsgBlock(fakeHeader) for i := 0; i < 10; i++ { mtx := wire.NewMsgTx() mtx.AddTxOut(fakeTxOut) msgBlock.AddTransaction(mtx) } lastBlock := testDb.blocks[0] msgBlock.Header.PrevBlock = *lastBlock.Sha() // Insert the test block into the DB. testBlock := dcrutil.NewBlock(msgBlock) newheight, err := testDb.db.InsertBlock(testBlock) if err != nil { t.Fatalf("Unable to insert block into db: %v", err) } // Create and insert an address index for out test addr. txLoc, _, _ := testBlock.TxLoc() index := make(database.BlockAddrIndex, len(txLoc)) for i := range testBlock.Transactions() { var hash160 [ripemd160.Size]byte scriptAddr := targetAddr.ScriptAddress() copy(hash160[:], scriptAddr[:]) txAddrIndex := &database.TxAddrIndex{ Hash160: hash160, Height: uint32(newheight), TxOffset: uint32(txLoc[i].TxStart), TxLen: uint32(txLoc[i].TxLen), } index[i] = txAddrIndex } blkSha := testBlock.Sha() err = testDb.db.UpdateAddrIndexForBlock(blkSha, newheight, index) if err != nil { t.Fatalf("UpdateAddrIndexForBlock: failed to index"+ " addrs for block #%d (%s) "+ "err %v", newheight, blkSha, err) return } // Try skipping the first 4 results, should get 6 in return. txReply, err := testDb.db.FetchTxsForAddr(targetAddr, 4, 100000) if err != nil { t.Fatalf("Unable to fetch transactions for address: %v", err) } if len(txReply) != 6 { t.Fatalf("Did not correctly skip forward in txs for address reply"+ " got %v txs, expected %v", len(txReply), 6) } // Limit the number of results to 3. txReply, err = testDb.db.FetchTxsForAddr(targetAddr, 0, 3) if err != nil { t.Fatalf("Unable to fetch transactions for address: %v", err) } if len(txReply) != 3 { t.Fatalf("Did not correctly limit in txs for address reply"+ " got %v txs, expected %v", len(txReply), 3) } // Skip 1, limit 5. txReply, err = testDb.db.FetchTxsForAddr(targetAddr, 1, 5) if err != nil { t.Fatalf("Unable to fetch transactions for address: %v", err) } if len(txReply) != 5 { t.Fatalf("Did not correctly limit in txs for address reply"+ " got %v txs, expected %v", len(txReply), 5) } }
// TestFullBlocks ensures all tests generated by the fullblocktests package // have the expected result when processed via ProcessBlock. func TestFullBlocks(t *testing.T) { tests, err := fullblocktests.Generate() if err != nil { t.Fatalf("failed to generate tests: %v", err) } // Create a new database and chain instance to run tests against. chain, teardownFunc, err := chainSetup("fullblocktest", &chaincfg.SimNetParams) if err != nil { t.Fatalf("Failed to setup chain instance: %v", err) } defer teardownFunc() // testAcceptedBlock attempts to process the block in the provided test // instance and ensures that it was accepted according to the flags // specified in the test. testAcceptedBlock := func(item fullblocktests.AcceptedBlock) { blockHeight := item.Block.Header.Height block := dcrutil.NewBlock(item.Block) t.Logf("Testing block %s (hash %s, height %d)", item.Name, block.Sha(), blockHeight) isMainChain, isOrphan, err := chain.ProcessBlock(block, blockchain.BFNone) if err != nil { t.Fatalf("block %q (hash %s, height %d) should "+ "have been accepted: %v", item.Name, block.Sha(), blockHeight, err) } // Ensure the main chain and orphan flags match the values // specified in the test. if isMainChain != item.IsMainChain { t.Fatalf("block %q (hash %s, height %d) unexpected main "+ "chain flag -- got %v, want %v", item.Name, block.Sha(), blockHeight, isMainChain, item.IsMainChain) } if isOrphan != item.IsOrphan { t.Fatalf("block %q (hash %s, height %d) unexpected "+ "orphan flag -- got %v, want %v", item.Name, block.Sha(), blockHeight, isOrphan, item.IsOrphan) } } // testRejectedBlock attempts to process the block in the provided test // instance and ensures that it was rejected with the reject code // specified in the test. testRejectedBlock := func(item fullblocktests.RejectedBlock) { blockHeight := item.Block.Header.Height block := dcrutil.NewBlock(item.Block) t.Logf("Testing block %s (hash %s, height %d)", item.Name, block.Sha(), blockHeight) _, _, err := chain.ProcessBlock(block, blockchain.BFNone) if err == nil { t.Fatalf("block %q (hash %s, height %d) should not "+ "have been accepted", item.Name, block.Sha(), blockHeight) } // Ensure the error code is of the expected type and the reject // code matches the value specified in the test instance. rerr, ok := err.(blockchain.RuleError) if !ok { t.Fatalf("block %q (hash %s, height %d) returned "+ "unexpected error type -- got %T, want "+ "blockchain.RuleError", item.Name, block.Sha(), blockHeight, err) } if rerr.ErrorCode != item.RejectCode { t.Fatalf("block %q (hash %s, height %d) does not have "+ "expected reject code -- got %v, want %v", item.Name, block.Sha(), blockHeight, rerr.ErrorCode, item.RejectCode) } } // testOrphanOrRejectedBlock attempts to process the block in the // provided test instance and ensures that it was either accepted as an // orphan or rejected with a rule violation. testOrphanOrRejectedBlock := func(item fullblocktests.OrphanOrRejectedBlock) { blockHeight := item.Block.Header.Height block := dcrutil.NewBlock(item.Block) t.Logf("Testing block %s (hash %s, height %d)", item.Name, block.Sha(), blockHeight) _, isOrphan, err := chain.ProcessBlock(block, blockchain.BFNone) if err != nil { // Ensure the error code is of the expected type. if _, ok := err.(blockchain.RuleError); !ok { t.Fatalf("block %q (hash %s, height %d) "+ "returned unexpected error type -- "+ "got %T, want blockchain.RuleError", item.Name, block.Sha(), blockHeight, err) } } if !isOrphan { t.Fatalf("block %q (hash %s, height %d) was accepted, "+ "but is not considered an orphan", item.Name, block.Sha(), blockHeight) } } // testExpectedTip ensures the current tip of the blockchain is the // block specified in the provided test instance. testExpectedTip := func(item fullblocktests.ExpectedTip) { blockHeight := item.Block.Header.Height block := dcrutil.NewBlock(item.Block) t.Logf("Testing tip for block %s (hash %s, height %d)", item.Name, block.Sha(), blockHeight) // Ensure hash and height match. best := chain.BestSnapshot() if *best.Hash != item.Block.BlockSha() || best.Height != int64(item.Block.Header.Height) { t.Fatalf("block %q (hash %s, height %d) should be "+ "the current tip -- got (hash %s, height %d)", item.Name, block.Sha(), blockHeight, best.Hash, best.Height) } } for testNum, test := range tests { for itemNum, item := range test { switch item := item.(type) { case fullblocktests.AcceptedBlock: testAcceptedBlock(item) case fullblocktests.RejectedBlock: testRejectedBlock(item) case fullblocktests.OrphanOrRejectedBlock: testOrphanOrRejectedBlock(item) case fullblocktests.ExpectedTip: testExpectedTip(item) default: t.Fatalf("test #%d, item #%d is not one of "+ "the supported test instance types -- "+ "got type: %T", testNum, itemNum, item) } } } }