Esempio n. 1
0
// TestNewBlockFromBytes tests creation of a Block from serialized bytes.
func TestNewBlockFromBytes(t *testing.T) {
	// Serialize the test block.
	var block100000Buf bytes.Buffer
	err := Block100000.Serialize(&block100000Buf)
	if err != nil {
		t.Errorf("Serialize: %v", err)
	}
	block100000Bytes := block100000Buf.Bytes()

	// Create a new block from the serialized bytes.
	b, err := btcutil.NewBlockFromBytes(block100000Bytes)
	if err != nil {
		t.Errorf("NewBlockFromBytes: %v", err)
		return
	}

	// Ensure we get the same data back out.
	serializedBytes, err := b.Bytes()
	if err != nil {
		t.Errorf("Bytes: %v", err)
		return
	}
	if !bytes.Equal(serializedBytes, block100000Bytes) {
		t.Errorf("Bytes: wrong bytes - got %v, want %v",
			spew.Sdump(serializedBytes),
			spew.Sdump(block100000Bytes))
	}

	// Ensure the generated MsgBlock is correct.
	if msgBlock := b.MsgBlock(); !reflect.DeepEqual(msgBlock, &Block100000) {
		t.Errorf("MsgBlock: mismatched MsgBlock - got %v, want %v",
			spew.Sdump(msgBlock), spew.Sdump(&Block100000))
	}
}
Esempio n. 2
0
// processBlock potentially imports the block into the database.  It first
// deserializes the raw block while checking for errors.  Already known blocks
// are skipped and orphan blocks are considered errors.  Returns whether the
// block was imported along with any potential errors.
//
// NOTE: This is not a safe import as it does not verify chain rules.
func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) {
	// Deserialize the block which includes checks for malformed blocks.
	block, err := btcutil.NewBlockFromBytes(serializedBlock)
	if err != nil {
		return false, err
	}

	// update progress statistics
	bi.lastBlockTime = block.MsgBlock().Header.Timestamp
	bi.receivedLogTx += int64(len(block.MsgBlock().Transactions))

	// Skip blocks that already exist.
	var exists bool
	err = bi.db.View(func(tx database.Tx) error {
		exists, err = tx.HasBlock(block.Sha())
		if err != nil {
			return err
		}
		return nil
	})
	if err != nil {
		return false, err
	}
	if exists {
		return false, nil
	}

	// Don't bother trying to process orphans.
	prevHash := &block.MsgBlock().Header.PrevBlock
	if !prevHash.IsEqual(&zeroHash) {
		var exists bool
		err := bi.db.View(func(tx database.Tx) error {
			exists, err = tx.HasBlock(prevHash)
			if err != nil {
				return err
			}
			return nil
		})
		if err != nil {
			return false, err
		}
		if !exists {
			return false, fmt.Errorf("import file contains block "+
				"%v which does not link to the available "+
				"block chain", prevHash)
		}
	}

	// Put the blocks into the database with no checking of chain rules.
	err = bi.db.Update(func(tx database.Tx) error {
		return tx.StoreBlock(block)
	})
	if err != nil {
		return false, err
	}

	return true, nil
}
Esempio n. 3
0
func TestMerkleBlock3(t *testing.T) {
	blockStr := "0100000079cda856b143d9db2c1caff01d1aecc8630d30625d10e8b" +
		"4b8b0000000000000b50cc069d6a3e33e3ff84a5c41d9d3febe7c770fdc" +
		"c96b2c3ff60abe184f196367291b4d4c86041b8fa45d630101000000010" +
		"00000000000000000000000000000000000000000000000000000000000" +
		"0000ffffffff08044c86041b020a02ffffffff0100f2052a01000000434" +
		"104ecd3229b0571c3be876feaac0442a9f13c5a572742927af1dc623353" +
		"ecf8c202225f64868137a18cdd85cbbb4c74fbccfd4f49639cf1bdc94a5" +
		"672bb15ad5d4cac00000000"
	blockBytes, err := hex.DecodeString(blockStr)
	if err != nil {
		t.Errorf("TestMerkleBlock3 DecodeString failed: %v", err)
		return
	}
	blk, err := btcutil.NewBlockFromBytes(blockBytes)
	if err != nil {
		t.Errorf("TestMerkleBlock3 NewBlockFromBytes failed: %v", err)
		return
	}

	f := bloom.NewFilter(10, 0, 0.000001, wire.BloomUpdateAll)

	inputStr := "63194f18be0af63f2c6bc9dc0f777cbefed3d9415c4af83f3ee3a3d669c00cb5"
	sha, err := wire.NewShaHashFromStr(inputStr)
	if err != nil {
		t.Errorf("TestMerkleBlock3 NewShaHashFromStr failed: %v", err)
		return
	}

	f.AddShaHash(sha)

	mBlock, _ := bloom.NewMerkleBlock(blk, f)

	wantStr := "0100000079cda856b143d9db2c1caff01d1aecc8630d30625d10e8b4" +
		"b8b0000000000000b50cc069d6a3e33e3ff84a5c41d9d3febe7c770fdcc" +
		"96b2c3ff60abe184f196367291b4d4c86041b8fa45d630100000001b50c" +
		"c069d6a3e33e3ff84a5c41d9d3febe7c770fdcc96b2c3ff60abe184f196" +
		"30101"
	want, err := hex.DecodeString(wantStr)
	if err != nil {
		t.Errorf("TestMerkleBlock3 DecodeString failed: %v", err)
		return
	}

	got := bytes.NewBuffer(nil)
	err = mBlock.BtcEncode(got, wire.ProtocolVersion)
	if err != nil {
		t.Errorf("TestMerkleBlock3 BtcEncode failed: %v", err)
		return
	}

	if !bytes.Equal(want, got.Bytes()) {
		t.Errorf("TestMerkleBlock3 failed merkle block comparison: "+
			"got %v want %v", got.Bytes, want)
		return
	}
}
Esempio n. 4
0
// DropAfterBlockBySha will remove any blocks from the database after
// the given block.
func (db *LevelDb) DropAfterBlockBySha(sha *wire.ShaHash) (rerr error) {
	db.dbLock.Lock()
	defer db.dbLock.Unlock()
	defer func() {
		if rerr == nil {
			rerr = db.processBatches()
		} else {
			db.lBatch().Reset()
		}
	}()

	startheight := db.nextBlock - 1

	keepidx, err := db.getBlkLoc(sha)
	if err != nil {
		// should the error here be normalized ?
		log.Tracef("block loc failed %v ", sha)
		return err
	}

	for height := startheight; height > keepidx; height = height - 1 {
		var blk *btcutil.Block
		blksha, buf, err := db.getBlkByHeight(height)
		if err != nil {
			return err
		}
		blk, err = btcutil.NewBlockFromBytes(buf)
		if err != nil {
			return err
		}

		for _, tx := range blk.MsgBlock().Transactions {
			err = db.unSpend(tx)
			if err != nil {
				return err
			}
		}
		// rather than iterate the list of tx backward, do it twice.
		for _, tx := range blk.Transactions() {
			var txUo txUpdateObj
			txUo.delete = true
			db.txUpdateMap[*tx.Sha()] = &txUo
		}
		db.lBatch().Delete(shaBlkToKey(blksha))
		db.lBatch().Delete(int64ToKey(height))
	}

	// update the last block cache
	db.lastBlkShaCached = true
	db.lastBlkSha = *sha
	db.lastBlkIdx = keepidx
	db.nextBlock = keepidx + 1

	return nil
}
// loadBlocks reads files containing bitcoin block data (gzipped but otherwise
// in the format bitcoind writes) from disk and returns them as an array of
// btcutil.Block.  This is largely borrowed from the test code in btcdb.
func loadBlocks(filename string) (blocks []*btcutil.Block, err error) {
	filename = filepath.Join("testdata/", filename)

	var network = wire.MainNet
	var dr io.Reader
	var fi io.ReadCloser

	fi, err = os.Open(filename)
	if err != nil {
		return
	}

	if strings.HasSuffix(filename, ".bz2") {
		dr = bzip2.NewReader(fi)
	} else {
		dr = fi
	}
	defer fi.Close()

	var block *btcutil.Block

	err = nil
	for height := int64(1); err == nil; height++ {
		var rintbuf uint32
		err = binary.Read(dr, binary.LittleEndian, &rintbuf)
		if err == io.EOF {
			// hit end of file at expected offset: no warning
			height--
			err = nil
			break
		}
		if err != nil {
			break
		}
		if rintbuf != uint32(network) {
			break
		}
		err = binary.Read(dr, binary.LittleEndian, &rintbuf)
		blocklen := rintbuf

		rbytes := make([]byte, blocklen)

		// read block
		dr.Read(rbytes)

		block, err = btcutil.NewBlockFromBytes(rbytes)
		if err != nil {
			return
		}
		blocks = append(blocks, block)
	}

	return
}
Esempio n. 6
0
// processBlock potentially imports the block into the database.  It first
// deserializes the raw block while checking for errors.  Already known blocks
// are skipped and orphan blocks are considered errors.  Finally, it runs the
// block through the chain rules to ensure it follows all rules and matches
// up to the known checkpoint.  Returns whether the block was imported along
// with any potential errors.
func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) {
	// Deserialize the block which includes checks for malformed blocks.
	block, err := btcutil.NewBlockFromBytes(serializedBlock)
	if err != nil {
		return false, err
	}

	// update progress statistics
	bi.lastBlockTime = block.MsgBlock().Header.Timestamp
	bi.receivedLogTx += int64(len(block.MsgBlock().Transactions))

	// Skip blocks that already exist.
	blockHash := block.Hash()
	exists, err := bi.chain.HaveBlock(blockHash)
	if err != nil {
		return false, err
	}
	if exists {
		return false, nil
	}

	// Don't bother trying to process orphans.
	prevHash := &block.MsgBlock().Header.PrevBlock
	if !prevHash.IsEqual(&zeroHash) {
		exists, err := bi.chain.HaveBlock(prevHash)
		if err != nil {
			return false, err
		}
		if !exists {
			return false, fmt.Errorf("import file contains block "+
				"%v which does not link to the available "+
				"block chain", prevHash)
		}
	}

	// Ensure the blocks follows all of the chain rules and match up to the
	// known checkpoints.
	isMainChain, isOrphan, err := bi.chain.ProcessBlock(block,
		blockchain.BFFastAdd)
	if err != nil {
		return false, err
	}
	if !isMainChain {
		return false, fmt.Errorf("import file contains an block that "+
			"does not extend the main chain: %v", blockHash)
	}
	if isOrphan {
		return false, fmt.Errorf("import file contains an orphan "+
			"block: %v", blockHash)
	}

	return true, nil
}
Esempio n. 7
0
// fetchBlockBySha - return a btcutil Block
// Must be called with db lock held.
func (db *LevelDb) fetchBlockBySha(sha *wire.ShaHash) (blk *btcutil.Block, err error) {

	buf, height, err := db.fetchSha(sha)
	if err != nil {
		return
	}

	blk, err = btcutil.NewBlockFromBytes(buf)
	if err != nil {
		return
	}
	blk.SetHeight(height)

	return
}
Esempio n. 8
0
// dbFetchBlockByHeight uses an existing database transaction to retrieve the
// raw block for the provided height, deserialize it, and return a btcutil.Block
// with the height set.
func dbFetchBlockByHeight(dbTx database.Tx, height int32) (*btcutil.Block, error) {
	// First find the hash associated with the provided height in the index.
	hash, err := dbFetchHashByHeight(dbTx, height)
	if err != nil {
		return nil, err
	}

	// Load the raw block bytes from the database.
	blockBytes, err := dbTx.FetchBlock(hash)
	if err != nil {
		return nil, err
	}

	// Create the encapsulated block and set the height appropriately.
	block, err := btcutil.NewBlockFromBytes(blockBytes)
	if err != nil {
		return nil, err
	}
	block.SetHeight(height)

	return block, nil
}
Esempio n. 9
0
func loadBlocks(t *testing.T, file string) (blocks []*btcutil.Block, err error) {
	if len(savedblocks) != 0 {
		blocks = savedblocks
		return
	}
	testdatafile := filepath.Join("..", "testdata", "blocks1-256.bz2")
	var dr io.Reader
	var fi io.ReadCloser
	fi, err = os.Open(testdatafile)
	if err != nil {
		t.Errorf("failed to open file %v, err %v", testdatafile, err)
		return
	}
	if strings.HasSuffix(testdatafile, ".bz2") {
		z := bzip2.NewReader(fi)
		dr = z
	} else {
		dr = fi
	}

	defer func() {
		if err := fi.Close(); err != nil {
			t.Errorf("failed to close file %v %v", testdatafile, err)
		}
	}()

	// Set the first block as the genesis block.
	genesis := btcutil.NewBlock(chaincfg.MainNetParams.GenesisBlock)
	blocks = append(blocks, genesis)

	var block *btcutil.Block
	err = nil
	for height := int32(1); err == nil; height++ {
		var rintbuf uint32
		err = binary.Read(dr, binary.LittleEndian, &rintbuf)
		if err == io.EOF {
			// hit end of file at expected offset: no warning
			height--
			err = nil
			break
		}
		if err != nil {
			t.Errorf("failed to load network type, err %v", err)
			break
		}
		if rintbuf != uint32(network) {
			t.Errorf("Block doesn't match network: %v expects %v",
				rintbuf, network)
			break
		}
		err = binary.Read(dr, binary.LittleEndian, &rintbuf)
		blocklen := rintbuf

		rbytes := make([]byte, blocklen)

		// read block
		dr.Read(rbytes)

		block, err = btcutil.NewBlockFromBytes(rbytes)
		if err != nil {
			t.Errorf("failed to parse block %v", height)
			return
		}
		blocks = append(blocks, block)
	}
	savedblocks = blocks
	return
}
Esempio n. 10
0
func TestFilterInsertP2PubKeyOnly(t *testing.T) {
	blockStr := "0100000082bb869cf3a793432a66e826e05a6fc37469f8efb7421dc" +
		"880670100000000007f16c5962e8bd963659c793ce370d95f093bc7e367" +
		"117b3c30c1f8fdd0d9728776381b4d4c86041b554b85290701000000010" +
		"00000000000000000000000000000000000000000000000000000000000" +
		"0000ffffffff07044c86041b0136ffffffff0100f2052a0100000043410" +
		"4eaafc2314def4ca98ac970241bcab022b9c1e1f4ea423a20f134c876f2" +
		"c01ec0f0dd5b2e86e7168cefe0d81113c3807420ce13ad1357231a22522" +
		"47d97a46a91ac000000000100000001bcad20a6a29827d1424f08989255" +
		"120bf7f3e9e3cdaaa6bb31b0737fe048724300000000494830450220356" +
		"e834b046cadc0f8ebb5a8a017b02de59c86305403dad52cd77b55af062e" +
		"a10221009253cd6c119d4729b77c978e1e2aa19f5ea6e0e52b3f16e32fa" +
		"608cd5bab753901ffffffff02008d380c010000001976a9142b4b8072ec" +
		"bba129b6453c63e129e643207249ca88ac0065cd1d000000001976a9141" +
		"b8dd13b994bcfc787b32aeadf58ccb3615cbd5488ac0000000001000000" +
		"03fdacf9b3eb077412e7a968d2e4f11b9a9dee312d666187ed77ee7d26a" +
		"f16cb0b000000008c493046022100ea1608e70911ca0de5af51ba57ad23" +
		"b9a51db8d28f82c53563c56a05c20f5a87022100a8bdc8b4a8acc8634c6" +
		"b420410150775eb7f2474f5615f7fccd65af30f310fbf01410465fdf49e" +
		"29b06b9a1582287b6279014f834edc317695d125ef623c1cc3aaece245b" +
		"d69fcad7508666e9c74a49dc9056d5fc14338ef38118dc4afae5fe2c585" +
		"caffffffff309e1913634ecb50f3c4f83e96e70b2df071b497b8973a3e7" +
		"5429df397b5af83000000004948304502202bdb79c596a9ffc24e96f438" +
		"6199aba386e9bc7b6071516e2b51dda942b3a1ed022100c53a857e76b72" +
		"4fc14d45311eac5019650d415c3abb5428f3aae16d8e69bec2301ffffff" +
		"ff2089e33491695080c9edc18a428f7d834db5b6d372df13ce2b1b0e0cb" +
		"cb1e6c10000000049483045022100d4ce67c5896ee251c810ac1ff9cecc" +
		"d328b497c8f553ab6e08431e7d40bad6b5022033119c0c2b7d792d31f11" +
		"87779c7bd95aefd93d90a715586d73801d9b47471c601ffffffff010071" +
		"4460030000001976a914c7b55141d097ea5df7a0ed330cf794376e53ec8" +
		"d88ac0000000001000000045bf0e214aa4069a3e792ecee1e1bf0c1d397" +
		"cde8dd08138f4b72a00681743447000000008b48304502200c45de8c4f3" +
		"e2c1821f2fc878cba97b1e6f8807d94930713aa1c86a67b9bf1e4022100" +
		"8581abfef2e30f957815fc89978423746b2086375ca8ecf359c85c2a5b7" +
		"c88ad01410462bb73f76ca0994fcb8b4271e6fb7561f5c0f9ca0cf64852" +
		"61c4a0dc894f4ab844c6cdfb97cd0b60ffb5018ffd6238f4d87270efb1d" +
		"3ae37079b794a92d7ec95ffffffffd669f7d7958d40fc59d2253d88e0f2" +
		"48e29b599c80bbcec344a83dda5f9aa72c000000008a473044022078124" +
		"c8beeaa825f9e0b30bff96e564dd859432f2d0cb3b72d3d5d93d38d7e93" +
		"0220691d233b6c0f995be5acb03d70a7f7a65b6bc9bdd426260f38a1346" +
		"669507a3601410462bb73f76ca0994fcb8b4271e6fb7561f5c0f9ca0cf6" +
		"485261c4a0dc894f4ab844c6cdfb97cd0b60ffb5018ffd6238f4d87270e" +
		"fb1d3ae37079b794a92d7ec95fffffffff878af0d93f5229a68166cf051" +
		"fd372bb7a537232946e0a46f53636b4dafdaa4000000008c49304602210" +
		"0c717d1714551663f69c3c5759bdbb3a0fcd3fab023abc0e522fe6440de" +
		"35d8290221008d9cbe25bffc44af2b18e81c58eb37293fd7fe1c2e7b46f" +
		"c37ee8c96c50ab1e201410462bb73f76ca0994fcb8b4271e6fb7561f5c0" +
		"f9ca0cf6485261c4a0dc894f4ab844c6cdfb97cd0b60ffb5018ffd6238f" +
		"4d87270efb1d3ae37079b794a92d7ec95ffffffff27f2b668859cd7f2f8" +
		"94aa0fd2d9e60963bcd07c88973f425f999b8cbfd7a1e2000000008c493" +
		"046022100e00847147cbf517bcc2f502f3ddc6d284358d102ed20d47a8a" +
		"a788a62f0db780022100d17b2d6fa84dcaf1c95d88d7e7c30385aecf415" +
		"588d749afd3ec81f6022cecd701410462bb73f76ca0994fcb8b4271e6fb" +
		"7561f5c0f9ca0cf6485261c4a0dc894f4ab844c6cdfb97cd0b60ffb5018" +
		"ffd6238f4d87270efb1d3ae37079b794a92d7ec95ffffffff0100c817a8" +
		"040000001976a914b6efd80d99179f4f4ff6f4dd0a007d018c385d2188a" +
		"c000000000100000001834537b2f1ce8ef9373a258e10545ce5a50b758d" +
		"f616cd4356e0032554ebd3c4000000008b483045022100e68f422dd7c34" +
		"fdce11eeb4509ddae38201773dd62f284e8aa9d96f85099d0b002202243" +
		"bd399ff96b649a0fad05fa759d6a882f0af8c90cf7632c2840c29070aec" +
		"20141045e58067e815c2f464c6a2a15f987758374203895710c2d452442" +
		"e28496ff38ba8f5fd901dc20e29e88477167fe4fc299bf818fd0d9e1632" +
		"d467b2a3d9503b1aaffffffff0280d7e636030000001976a914f34c3e10" +
		"eb387efe872acb614c89e78bfca7815d88ac404b4c00000000001976a91" +
		"4a84e272933aaf87e1715d7786c51dfaeb5b65a6f88ac00000000010000" +
		"000143ac81c8e6f6ef307dfe17f3d906d999e23e0189fda838c5510d850" +
		"927e03ae7000000008c4930460221009c87c344760a64cb8ae6685a3eec" +
		"2c1ac1bed5b88c87de51acd0e124f266c16602210082d07c037359c3a25" +
		"7b5c63ebd90f5a5edf97b2ac1c434b08ca998839f346dd40141040ba7e5" +
		"21fa7946d12edbb1d1e95a15c34bd4398195e86433c92b431cd315f455f" +
		"e30032ede69cad9d1e1ed6c3c4ec0dbfced53438c625462afb792dcb098" +
		"544bffffffff0240420f00000000001976a9144676d1b820d63ec272f19" +
		"00d59d43bc6463d96f888ac40420f00000000001976a914648d04341d00" +
		"d7968b3405c034adc38d4d8fb9bd88ac00000000010000000248cc91750" +
		"1ea5c55f4a8d2009c0567c40cfe037c2e71af017d0a452ff705e3f10000" +
		"00008b483045022100bf5fdc86dc5f08a5d5c8e43a8c9d5b1ed8c65562e" +
		"280007b52b133021acd9acc02205e325d613e555f772802bf413d36ba80" +
		"7892ed1a690a77811d3033b3de226e0a01410429fa713b124484cb2bd7b" +
		"5557b2c0b9df7b2b1fee61825eadc5ae6c37a9920d38bfccdc7dc3cb0c4" +
		"7d7b173dbc9db8d37db0a33ae487982c59c6f8606e9d1791ffffffff41e" +
		"d70551dd7e841883ab8f0b16bf04176b7d1480e4f0af9f3d4c3595768d0" +
		"68000000008b4830450221008513ad65187b903aed1102d1d0c47688127" +
		"658c51106753fed0151ce9c16b80902201432b9ebcb87bd04ceb2de6603" +
		"5fbbaf4bf8b00d1cfe41f1a1f7338f9ad79d210141049d4cf80125bf50b" +
		"e1709f718c07ad15d0fc612b7da1f5570dddc35f2a352f0f27c978b0682" +
		"0edca9ef982c35fda2d255afba340068c5035552368bc7200c1488fffff" +
		"fff0100093d00000000001976a9148edb68822f1ad580b043c7b3df2e40" +
		"0f8699eb4888ac00000000"
	blockBytes, err := hex.DecodeString(blockStr)
	if err != nil {
		t.Errorf("TestFilterInsertP2PubKeyOnly DecodeString failed: %v", err)
		return
	}
	block, err := btcutil.NewBlockFromBytes(blockBytes)
	if err != nil {
		t.Errorf("TestFilterInsertP2PubKeyOnly NewBlockFromBytes failed: %v", err)
		return
	}

	f := bloom.NewFilter(10, 0, 0.000001, wire.BloomUpdateP2PubkeyOnly)

	// Generation pubkey
	inputStr := "04eaafc2314def4ca98ac970241bcab022b9c1e1f4ea423a20f134c" +
		"876f2c01ec0f0dd5b2e86e7168cefe0d81113c3807420ce13ad1357231a" +
		"2252247d97a46a91"
	inputBytes, err := hex.DecodeString(inputStr)
	if err != nil {
		t.Errorf("TestFilterInsertP2PubKeyOnly DecodeString failed: %v", err)
		return
	}
	f.Add(inputBytes)

	// Output address of 4th transaction
	inputStr = "b6efd80d99179f4f4ff6f4dd0a007d018c385d21"
	inputBytes, err = hex.DecodeString(inputStr)
	if err != nil {
		t.Errorf("TestFilterInsertP2PubKeyOnly DecodeString failed: %v", err)
		return
	}
	f.Add(inputBytes)

	// Ignore return value -- this is just used to update the filter.
	_, _ = bloom.NewMerkleBlock(block, f)

	// We should match the generation pubkey
	inputStr = "147caa76786596590baa4e98f5d9f48b86c7765e489f7a6ff3360fe5c674360b"
	sha, err := wire.NewShaHashFromStr(inputStr)
	if err != nil {
		t.Errorf("TestMerkleBlockP2PubKeyOnly NewShaHashFromStr failed: %v", err)
		return
	}
	outpoint := wire.NewOutPoint(sha, 0)
	if !f.MatchesOutPoint(outpoint) {
		t.Errorf("TestMerkleBlockP2PubKeyOnly didn't match the generation "+
			"outpoint %s", inputStr)
		return
	}

	// We should not match the 4th transaction, which is not p2pk
	inputStr = "02981fa052f0481dbc5868f4fc2166035a10f27a03cfd2de67326471df5bc041"
	sha, err = wire.NewShaHashFromStr(inputStr)
	if err != nil {
		t.Errorf("TestMerkleBlockP2PubKeyOnly NewShaHashFromStr failed: %v", err)
		return
	}
	outpoint = wire.NewOutPoint(sha, 0)
	if f.MatchesOutPoint(outpoint) {
		t.Errorf("TestMerkleBlockP2PubKeyOnly matched outpoint %s", inputStr)
		return
	}
}
Esempio n. 11
0
// Init initializes the enabled indexes.  This is called during chain
// initialization and primarily consists of catching up all indexes to the
// current best chain tip.  This is necessary since each index can be disabled
// and re-enabled at any time and attempting to catch-up indexes at the same
// time new blocks are being downloaded would lead to an overall longer time to
// catch up due to the I/O contention.
//
// This is part of the blockchain.IndexManager interface.
func (m *Manager) Init(chain *blockchain.BlockChain) error {
	// Nothing to do when no indexes are enabled.
	if len(m.enabledIndexes) == 0 {
		return nil
	}

	// Finish and drops that were previously interrupted.
	if err := m.maybeFinishDrops(); err != nil {
		return err
	}

	// Create the initial state for the indexes as needed.
	err := m.db.Update(func(dbTx database.Tx) error {
		// Create the bucket for the current tips as needed.
		meta := dbTx.Metadata()
		_, err := meta.CreateBucketIfNotExists(indexTipsBucketName)
		if err != nil {
			return err
		}

		return m.maybeCreateIndexes(dbTx)
	})
	if err != nil {
		return err
	}

	// Initialize each of the enabled indexes.
	for _, indexer := range m.enabledIndexes {
		if err := indexer.Init(); err != nil {
			return err
		}
	}

	// Rollback indexes to the main chain if their tip is an orphaned fork.
	// This is fairly unlikely, but it can happen if the chain is
	// reorganized while the index is disabled.  This has to be done in
	// reverse order because later indexes can depend on earlier ones.
	for i := len(m.enabledIndexes); i > 0; i-- {
		indexer := m.enabledIndexes[i-1]

		// Fetch the current tip for the index.
		var height int32
		var hash *wire.ShaHash
		err := m.db.View(func(dbTx database.Tx) error {
			idxKey := indexer.Key()
			hash, height, err = dbFetchIndexerTip(dbTx, idxKey)
			if err != nil {
				return err
			}
			return nil
		})
		if err != nil {
			return err
		}

		// Nothing to do if the index does not have any entries yet.
		if height == -1 {
			continue
		}

		// Loop until the tip is a block that exists in the main chain.
		initialHeight := height
		for {
			exists, err := chain.MainChainHasBlock(hash)
			if err != nil {
				return err
			}
			if exists {
				break
			}

			// At this point the index tip is orphaned, so load the
			// orphaned block from the database directly and
			// disconnect it from the index.  The block has to be
			// loaded directly since it is no longer in the main
			// chain and thus the chain.BlockByHash function would
			// error.
			err = m.db.Update(func(dbTx database.Tx) error {
				blockBytes, err := dbTx.FetchBlock(hash)
				if err != nil {
					return err
				}
				block, err := btcutil.NewBlockFromBytes(blockBytes)
				if err != nil {
					return err
				}
				block.SetHeight(height)

				// When the index requires all of the referenced
				// txouts they need to be retrieved from the
				// transaction index.
				var view *blockchain.UtxoViewpoint
				if indexNeedsInputs(indexer) {
					var err error
					view, err = makeUtxoView(dbTx, block)
					if err != nil {
						return err
					}
				}

				// Remove all of the index entries associated
				// with the block and update the indexer tip.
				err = dbIndexDisconnectBlock(dbTx, indexer,
					block, view)
				if err != nil {
					return err
				}

				// Update the tip to the previous block.
				hash = &block.MsgBlock().Header.PrevBlock
				height--

				return nil
			})
			if err != nil {
				return err
			}
		}

		if initialHeight != height {
			log.Infof("Removed %d orphaned blocks from %s "+
				"(heights %d to %d)", initialHeight-height,
				indexer.Name(), height+1, initialHeight)
		}
	}

	// Fetch the current tip heights for each index along with tracking the
	// lowest one so the catchup code only needs to start at the earliest
	// block and is able to skip connecting the block for the indexes that
	// don't need it.
	bestHeight := chain.BestSnapshot().Height
	lowestHeight := bestHeight
	indexerHeights := make([]int32, len(m.enabledIndexes))
	err = m.db.View(func(dbTx database.Tx) error {
		for i, indexer := range m.enabledIndexes {
			idxKey := indexer.Key()
			hash, height, err := dbFetchIndexerTip(dbTx, idxKey)
			if err != nil {
				return err
			}

			log.Debugf("Current %s tip (height %d, hash %v)",
				indexer.Name(), height, hash)
			indexerHeights[i] = height
			if height < lowestHeight {
				lowestHeight = height
			}
		}
		return nil
	})
	if err != nil {
		return err
	}

	// Nothing to index if all of the indexes are caught up.
	if lowestHeight == bestHeight {
		return nil
	}

	// Create a progress logger for the indexing process below.
	progressLogger := newBlockProgressLogger("Indexed", log)

	// At this point, one or more indexes are behind the current best chain
	// tip and need to be caught up, so log the details and loop through
	// each block that needs to be indexed.
	log.Infof("Catching up indexes from height %d to %d", lowestHeight,
		bestHeight)
	for height := lowestHeight + 1; height <= bestHeight; height++ {
		// Load the block for the height since it is required to index
		// it.
		block, err := chain.BlockByHeight(height)
		if err != nil {
			return err
		}

		// Connect the block for all indexes that need it.
		var view *blockchain.UtxoViewpoint
		for i, indexer := range m.enabledIndexes {
			// Skip indexes that don't need to be updated with this
			// block.
			if indexerHeights[i] >= height {
				continue
			}

			err := m.db.Update(func(dbTx database.Tx) error {
				// When the index requires all of the referenced
				// txouts and they haven't been loaded yet, they
				// need to be retrieved from the transaction
				// index.
				if view == nil && indexNeedsInputs(indexer) {
					var err error
					view, err = makeUtxoView(dbTx, block)
					if err != nil {
						return err
					}
				}
				return dbIndexConnectBlock(dbTx, indexer, block,
					view)
			})
			if err != nil {
				return err
			}
			indexerHeights[i] = height
		}

		// Log indexing progress.
		progressLogger.LogBlockHeight(block)
	}

	log.Infof("Indexes caught up to height %d", bestHeight)
	return nil
}
Esempio n. 12
0
// loadBlocks loads the blocks contained in the testdata directory and returns
// a slice of them.
func loadBlocks(t *testing.T, dataFile string, network wire.BitcoinNet) ([]*btcutil.Block, error) {
	// Open the file that contains the blocks for reading.
	fi, err := os.Open(dataFile)
	if err != nil {
		t.Errorf("failed to open file %v, err %v", dataFile, err)
		return nil, err
	}
	defer func() {
		if err := fi.Close(); err != nil {
			t.Errorf("failed to close file %v %v", dataFile,
				err)
		}
	}()
	dr := bzip2.NewReader(fi)

	// Set the first block as the genesis block.
	blocks := make([]*btcutil.Block, 0, 256)
	genesis := btcutil.NewBlock(chaincfg.MainNetParams.GenesisBlock)
	blocks = append(blocks, genesis)

	// Load the remaining blocks.
	for height := 1; ; height++ {
		var net uint32
		err := binary.Read(dr, binary.LittleEndian, &net)
		if err == io.EOF {
			// Hit end of file at the expected offset.  No error.
			break
		}
		if err != nil {
			t.Errorf("Failed to load network type for block %d: %v",
				height, err)
			return nil, err
		}
		if net != uint32(network) {
			t.Errorf("Block doesn't match network: %v expects %v",
				net, network)
			return nil, err
		}

		var blockLen uint32
		err = binary.Read(dr, binary.LittleEndian, &blockLen)
		if err != nil {
			t.Errorf("Failed to load block size for block %d: %v",
				height, err)
			return nil, err
		}

		// Read the block.
		blockBytes := make([]byte, blockLen)
		_, err = io.ReadFull(dr, blockBytes)
		if err != nil {
			t.Errorf("Failed to load block %d: %v", height, err)
			return nil, err
		}

		// Deserialize and store the block.
		block, err := btcutil.NewBlockFromBytes(blockBytes)
		if err != nil {
			t.Errorf("Failed to parse block %v: %v", height, err)
			return nil, err
		}
		blocks = append(blocks, block)
	}

	return blocks, nil
}
Esempio n. 13
0
// TestBlockErrors tests the error paths for the Block API.
func TestBlockErrors(t *testing.T) {
	// Ensure out of range errors are as expected.
	wantErr := "transaction index -1 is out of range - max 3"
	testErr := btcutil.OutOfRangeError(wantErr)
	if testErr.Error() != wantErr {
		t.Errorf("OutOfRangeError: wrong error - got %v, want %v",
			testErr.Error(), wantErr)
	}

	// Serialize the test block.
	var block100000Buf bytes.Buffer
	err := Block100000.Serialize(&block100000Buf)
	if err != nil {
		t.Errorf("Serialize: %v", err)
	}
	block100000Bytes := block100000Buf.Bytes()

	// Create a new block from the serialized bytes.
	b, err := btcutil.NewBlockFromBytes(block100000Bytes)
	if err != nil {
		t.Errorf("NewBlockFromBytes: %v", err)
		return
	}

	// Truncate the block byte buffer to force errors.
	shortBytes := block100000Bytes[:80]
	_, err = btcutil.NewBlockFromBytes(shortBytes)
	if err != io.EOF {
		t.Errorf("NewBlockFromBytes: did not get expected error - "+
			"got %v, want %v", err, io.EOF)
	}

	// Ensure TxSha returns expected error on invalid indices.
	_, err = b.TxSha(-1)
	if _, ok := err.(btcutil.OutOfRangeError); !ok {
		t.Errorf("TxSha: wrong error - got: %v <%T>, "+
			"want: <%T>", err, err, btcutil.OutOfRangeError(""))
	}
	_, err = b.TxSha(len(Block100000.Transactions) + 1)
	if _, ok := err.(btcutil.OutOfRangeError); !ok {
		t.Errorf("TxSha: wrong error - got: %v <%T>, "+
			"want: <%T>", err, err, btcutil.OutOfRangeError(""))
	}

	// Ensure Tx returns expected error on invalid indices.
	_, err = b.Tx(-1)
	if _, ok := err.(btcutil.OutOfRangeError); !ok {
		t.Errorf("Tx: wrong error - got: %v <%T>, "+
			"want: <%T>", err, err, btcutil.OutOfRangeError(""))
	}
	_, err = b.Tx(len(Block100000.Transactions) + 1)
	if _, ok := err.(btcutil.OutOfRangeError); !ok {
		t.Errorf("Tx: wrong error - got: %v <%T>, "+
			"want: <%T>", err, err, btcutil.OutOfRangeError(""))
	}

	// Ensure TxLoc returns expected error with short byte buffer.
	// This makes use of the test package only function, SetBlockBytes, to
	// inject a short byte buffer.
	b.SetBlockBytes(shortBytes)
	_, err = b.TxLoc()
	if err != io.EOF {
		t.Errorf("TxLoc: did not get expected error - "+
			"got %v, want %v", err, io.EOF)
	}
}