func TestBlockfileMgrFileRolling(t *testing.T) { env := newTestEnv(t) blocks := testutil.ConstructTestBlocks(t, 100) size := 0 for _, block := range blocks { by, _, err := serializeBlock(block) testutil.AssertNoError(t, err, "Error while serializing block") blockBytesSize := len(by) encodedLen := proto.EncodeVarint(uint64(blockBytesSize)) size += blockBytesSize + len(encodedLen) } env.conf.maxBlockfileSize = int(0.75 * float64(size)) blkfileMgrWrapper := newTestBlockfileWrapper(t, env) blkfileMgrWrapper.addBlocks(blocks) testutil.AssertEquals(t, blkfileMgrWrapper.blockfileMgr.cpInfo.latestFileChunkSuffixNum, 1) blkfileMgrWrapper.testGetBlockByHash(blocks) blkfileMgrWrapper.close() env.Cleanup() env = newTestEnv(t) defer env.Cleanup() env.conf.maxBlockfileSize = int(0.40 * float64(size)) blkfileMgrWrapper = newTestBlockfileWrapper(t, env) defer blkfileMgrWrapper.close() blkfileMgrWrapper.addBlocks(blocks) testutil.AssertEquals(t, blkfileMgrWrapper.blockfileMgr.cpInfo.latestFileChunkSuffixNum, 2) blkfileMgrWrapper.testGetBlockByHash(blocks) }
func TestRangeScanIteratorEmptyArray(t *testing.T) { testDBWrapper.CreateFreshDB(t) stateImplTestWrapper := newStateImplTestWrapper(t) stateDelta := statemgmt.NewStateDelta() // insert keys stateDelta.Set("chaincodeID1", "key1", []byte("value1"), nil) stateDelta.Set("chaincodeID1", "key2", []byte{}, nil) stateDelta.Set("chaincodeID1", "key3", []byte{}, nil) stateImplTestWrapper.prepareWorkingSet(stateDelta) stateImplTestWrapper.persistChangesAndResetInMemoryChanges() // test range scan for chaincodeID2 rangeScanItr := stateImplTestWrapper.getRangeScanIterator("chaincodeID1", "key1", "key3") var results = make(map[string][]byte) for rangeScanItr.Next() { key, value := rangeScanItr.GetKeyValue() results[key] = value } t.Logf("Results = %s", results) testutil.AssertEquals(t, len(results), 3) testutil.AssertEquals(t, results["key3"], []byte{}) rangeScanItr.Close() }
func testIteratorWithDeletes(t *testing.T, env testEnv) { cID := "cID" txMgr := env.getTxMgr() txMgrHelper := newTxMgrTestHelper(t, txMgr) s, _ := txMgr.NewTxSimulator() for i := 1; i <= 10; i++ { k := createTestKey(i) v := createTestValue(i) t.Logf("Adding k=[%s], v=[%s]", k, v) s.SetState(cID, k, v) } s.Done() // validate and commit RWset txRWSet1, _ := s.GetTxSimulationResults() txMgrHelper.validateAndCommitRWSet(txRWSet1) s, _ = txMgr.NewTxSimulator() s.DeleteState(cID, createTestKey(4)) s.Done() // validate and commit RWset txRWSet2, _ := s.GetTxSimulationResults() txMgrHelper.validateAndCommitRWSet(txRWSet2) queryExecuter, _ := txMgr.NewQueryExecutor() itr, _ := queryExecuter.GetStateRangeScanIterator(cID, createTestKey(3), createTestKey(6)) defer itr.Close() kv, _ := itr.Next() testutil.AssertEquals(t, kv.(*ledger.KV).Key, createTestKey(3)) kv, _ = itr.Next() testutil.AssertEquals(t, kv.(*ledger.KV).Key, createTestKey(5)) }
func TestIndexesAsync_IndexPendingBlocks(t *testing.T) { defaultSetting := indexBlockDataSynchronously indexBlockDataSynchronously = false defer func() { indexBlockDataSynchronously = defaultSetting }() testDBWrapper.CleanDB(t) testBlockchainWrapper := newTestBlockchainWrapper(t) // stop the original indexer and change the indexer to Noop - so, no block is indexed chain := testBlockchainWrapper.blockchain chain.indexer.stop() chain.indexer = &NoopIndexer{} blocks, _, err := testBlockchainWrapper.populateBlockChainWithSampleData() if err != nil { t.Fatalf("Error populating block chain with sample data: %s", err) } // close the db testDBWrapper.CloseDB(t) // open the db again and create new instance of blockchain (and the associated async indexer) // the indexer should index the pending blocks testDBWrapper.OpenDB(t) testBlockchainWrapper = newTestBlockchainWrapper(t) defer chain.indexer.stop() blockHash, _ := blocks[0].GetHash() block := testBlockchainWrapper.getBlockByHash(blockHash) testutil.AssertEquals(t, block, blocks[0]) blockHash, _ = blocks[len(blocks)-1].GetHash() block = testBlockchainWrapper.getBlockByHash(blockHash) testutil.AssertEquals(t, block, blocks[len(blocks)-1]) }
func TestRawLedger(t *testing.T) { cleanup(t) rawLedger := NewFSBasedRawLedger(testFolder) defer rawLedger.Close() defer cleanup(t) // Construct test blocks and add to raw ledger blocks := testutil.ConstructTestBlocks(t, 10) for _, block := range blocks { rawLedger.CommitBlock(block) } // test GetBlockchainInfo() bcInfo, err := rawLedger.GetBlockchainInfo() testutil.AssertNoError(t, err, "Error in getting BlockchainInfo") testutil.AssertEquals(t, bcInfo.Height, uint64(10)) // test GetBlockByNumber() block, err := rawLedger.GetBlockByNumber(2) testutil.AssertNoError(t, err, "Error in getting block by number") testutil.AssertEquals(t, block, blocks[1]) // get blocks iterator for block number starting from 3 itr, err := rawLedger.GetBlocksIterator(3) testutil.AssertNoError(t, err, "Error in getting iterator") blockHolder, err := itr.Next() testutil.AssertNoError(t, err, "") testutil.AssertEquals(t, blockHolder.(ledger.BlockHolder).GetBlock(), blocks[2]) // get next block from iterator. The block should be 4th block blockHolder, err = itr.Next() testutil.AssertNoError(t, err, "") testutil.AssertEquals(t, blockHolder.(ledger.BlockHolder).GetBlock(), blocks[3]) }
func TestBlockChain_SimpleChain(t *testing.T) { testDBWrapper.CleanDB(t) blockchainTestWrapper := newTestBlockchainWrapper(t) blockchain := blockchainTestWrapper.blockchain allBlocks, allStateHashes, err := blockchainTestWrapper.populateBlockChainWithSampleData() if err != nil { t.Logf("Error populating block chain with sample data: %s", err) t.Fail() } testutil.AssertEquals(t, blockchain.getSize(), uint64(len(allBlocks))) testutil.AssertEquals(t, blockchainTestWrapper.fetchBlockchainSizeFromDB(), uint64(len(allBlocks))) for i := range allStateHashes { t.Logf("Checking state hash for block number = [%d]", i) testutil.AssertEquals(t, blockchainTestWrapper.getBlock(uint64(i)).GetStateHash(), allStateHashes[i]) } for i := range allBlocks { t.Logf("Checking block hash for block number = [%d]", i) blockhash, _ := blockchainTestWrapper.getBlock(uint64(i)).GetHash() expectedBlockHash, _ := allBlocks[i].GetHash() testutil.AssertEquals(t, blockhash, expectedBlockHash) } testutil.AssertNil(t, blockchainTestWrapper.getBlock(uint64(0)).PreviousBlockHash) i := 1 for i < len(allBlocks) { t.Logf("Checking previous block hash for block number = [%d]", i) expectedPreviousBlockHash, _ := allBlocks[i-1].GetHash() testutil.AssertEquals(t, blockchainTestWrapper.getBlock(uint64(i)).PreviousBlockHash, expectedPreviousBlockHash) i++ } }
// TestBasicRW tests basic read-write func TestBasicRW(t *testing.T, db statedb.VersionedDB) { db.Open() defer db.Close() val, err := db.GetState("ns", "key1") testutil.AssertNoError(t, err, "") testutil.AssertNil(t, val) batch := statedb.NewUpdateBatch() vv1 := statedb.VersionedValue{Value: []byte("value1"), Version: version.NewHeight(1, 1)} vv2 := statedb.VersionedValue{Value: []byte("value2"), Version: version.NewHeight(1, 2)} vv3 := statedb.VersionedValue{Value: []byte("value3"), Version: version.NewHeight(1, 3)} vv4 := statedb.VersionedValue{Value: []byte{}, Version: version.NewHeight(1, 4)} batch.Put("ns1", "key1", vv1.Value, vv1.Version) batch.Put("ns1", "key2", vv2.Value, vv2.Version) batch.Put("ns2", "key3", vv3.Value, vv3.Version) batch.Put("ns2", "key4", vv4.Value, vv4.Version) savePoint := version.NewHeight(2, 5) db.ApplyUpdates(batch, savePoint) vv, _ := db.GetState("ns1", "key1") testutil.AssertEquals(t, vv, &vv1) vv, _ = db.GetState("ns2", "key4") testutil.AssertEquals(t, vv, &vv4) sp, err := db.GetLatestSavePoint() testutil.AssertNoError(t, err, "") testutil.AssertEquals(t, sp, savePoint) }
func TestTransactionResult(t *testing.T) { ledgerTestWrapper := createFreshDBAndTestLedgerWrapper(t) ledger := ledgerTestWrapper.ledger // Block 0 ledger.BeginTxBatch(0) ledger.TxBegin("txUuid1") ledger.SetState("chaincode1", "key1", []byte("value1A")) ledger.SetState("chaincode2", "key2", []byte("value2A")) ledger.SetState("chaincode3", "key3", []byte("value3A")) ledger.TxFinished("txUuid1", true) transaction, uuid := buildTestTx(t) transactionResult := &protos.TransactionResult{Uuid: uuid, ErrorCode: 500, Error: "bad"} ledger.CommitTxBatch(0, []*protos.Transaction{transaction}, []*protos.TransactionResult{transactionResult}, []byte("proof")) block := ledgerTestWrapper.GetBlockByNumber(0) nonHashData := block.GetNonHashData() if nonHashData == nil { t.Fatal("Expected block to have non hash data, but non hash data was nil.") } if nonHashData.TransactionResults == nil || len(nonHashData.TransactionResults) == 0 { t.Fatal("Expected block to have non hash data transaction results.") } testutil.AssertEquals(t, nonHashData.TransactionResults[0].Uuid, uuid) testutil.AssertEquals(t, nonHashData.TransactionResults[0].Error, "bad") testutil.AssertEquals(t, nonHashData.TransactionResults[0].ErrorCode, uint32(500)) }
func testCompositeKey(t *testing.T, ns string, key string) { compositeKey := constructCompositeKey(ns, key) t.Logf("compositeKey=%#v", compositeKey) ns1, key1 := splitCompositeKey(compositeKey) testutil.AssertEquals(t, ns1, ns) testutil.AssertEquals(t, key1, key) }
func TestGetCouchDBDefinition(t *testing.T) { setUpCoreYAMLConfig() defer testutil.ResetConfigToDefaultValues() viper.Set("ledger.state.stateDatabase", "CouchDB") couchDBDef := GetCouchDBDefinition() testutil.AssertEquals(t, couchDBDef.URL, "127.0.0.1:5984") testutil.AssertEquals(t, couchDBDef.Username, "") testutil.AssertEquals(t, couchDBDef.Password, "") }
func TestConfigInit(t *testing.T) { configs := viper.GetStringMap("ledger.state.dataStructure.configs") t.Logf("Configs loaded from yaml = %#v", configs) testDBWrapper.CleanDB(t) stateImpl := NewStateImpl() stateImpl.Initialize(configs) testutil.AssertEquals(t, conf.getNumBucketsAtLowestLevel(), configs[ConfigNumBuckets]) testutil.AssertEquals(t, conf.getMaxGroupingAtEachLevel(), configs[ConfigMaxGroupingAtEachLevel]) }
func TestBucketCache(t *testing.T) { testutil.SetLogLevel(logging.INFO, "buckettree") rootHash1, rootHash2, rootHash3, rootHash4 := testGetRootHashes(t, false) rootHash5, rootHash6, rootHash7, rootHash8 := testGetRootHashes(t, true) testutil.AssertEquals(t, rootHash1, rootHash5) testutil.AssertEquals(t, rootHash2, rootHash6) testutil.AssertEquals(t, rootHash3, rootHash7) testutil.AssertEquals(t, rootHash4, rootHash8) }
func testIterator(t *testing.T, env testEnv, numKeys int, startKeyNum int, endKeyNum int) { cID := "cID" txMgr := env.getTxMgr() txMgrHelper := newTxMgrTestHelper(t, txMgr) s, _ := txMgr.NewTxSimulator() for i := 1; i <= numKeys; i++ { k := createTestKey(i) v := createTestValue(i) t.Logf("Adding k=[%s], v=[%s]", k, v) s.SetState(cID, k, v) } s.Done() // validate and commit RWset txRWSet, _ := s.GetTxSimulationResults() txMgrHelper.validateAndCommitRWSet(txRWSet) var startKey string var endKey string var begin int var end int if startKeyNum != 0 { begin = startKeyNum startKey = createTestKey(startKeyNum) } else { begin = 1 //first key in the db startKey = "" } if endKeyNum != 0 { endKey = createTestKey(endKeyNum) end = endKeyNum } else { endKey = "" end = numKeys + 1 //last key in the db } expectedCount := end - begin queryExecuter, _ := txMgr.NewQueryExecutor() itr, _ := queryExecuter.GetStateRangeScanIterator(cID, startKey, endKey) count := 0 for { kv, _ := itr.Next() if kv == nil { break } keyNum := begin + count k := kv.(*ledger.KV).Key v := kv.(*ledger.KV).Value t.Logf("Retrieved k=%s, v=%s at count=%d start=%s end=%s", k, v, count, startKey, endKey) testutil.AssertEquals(t, k, createTestKey(keyNum)) testutil.AssertEquals(t, v, createTestValue(keyNum)) count++ } testutil.AssertEquals(t, count, expectedCount) }
func testTrieNodeMarshalUnmarshal(trieNode *trieNode, t *testing.T) { trieNodeTestWrapper := &trieNodeTestWrapper{trieNode, t} serializedContent := trieNodeTestWrapper.marshal() trieNodeFromUnmarshal := trieNodeTestWrapper.unmarshal(trieNode.trieKey, serializedContent) testutil.AssertEquals(t, trieNodeFromUnmarshal.trieKey, trieNode.trieKey) testutil.AssertEquals(t, trieNodeFromUnmarshal.value, trieNode.value) testutil.AssertEquals(t, trieNodeFromUnmarshal.childrenCryptoHashes, trieNode.childrenCryptoHashes) testutil.AssertEquals(t, trieNodeFromUnmarshal.getNumChildren(), trieNode.getNumChildren()) }
func TestBlockchain_InfoNoBlock(t *testing.T) { testDBWrapper.CleanDB(t) blockchainTestWrapper := newTestBlockchainWrapper(t) blockchain := blockchainTestWrapper.blockchain blockchainInfo, err := blockchain.getBlockchainInfo() testutil.AssertNoError(t, err, "Error while invoking getBlockchainInfo() on an emply blockchain") testutil.AssertEquals(t, blockchainInfo.Height, uint64(0)) testutil.AssertEquals(t, blockchainInfo.CurrentBlockHash, nil) testutil.AssertEquals(t, blockchainInfo.PreviousBlockHash, nil) }
func testBlockIndexSelectiveIndexing(t *testing.T, indexItems []blkstorage.IndexableAttr) { env := newTestEnv(t) env.indexConfig.AttrsToIndex = indexItems defer env.Cleanup() blkfileMgrWrapper := newTestBlockfileWrapper(t, env) defer blkfileMgrWrapper.close() blocks := testutil.ConstructTestBlocks(t, 3) // add test blocks blkfileMgrWrapper.addBlocks(blocks) blockfileMgr := blkfileMgrWrapper.blockfileMgr // if index has been configured for an indexItem then the item should be indexed else not // test 'retrieveBlockByHash' block, err := blockfileMgr.retrieveBlockByHash(blocks[0].Header.Hash()) if testutil.Contains(indexItems, blkstorage.IndexableAttrBlockHash) { testutil.AssertNoError(t, err, "Error while retrieving block by hash") testutil.AssertEquals(t, block, blocks[0]) } else { testutil.AssertSame(t, err, blkstorage.ErrAttrNotIndexed) } // test 'retrieveBlockByNumber' block, err = blockfileMgr.retrieveBlockByNumber(1) if testutil.Contains(indexItems, blkstorage.IndexableAttrBlockNum) { testutil.AssertNoError(t, err, "Error while retrieving block by number") testutil.AssertEquals(t, block, blocks[0]) } else { testutil.AssertSame(t, err, blkstorage.ErrAttrNotIndexed) } // test 'retrieveTransactionByID' txid, err := extractTxID(blocks[0].Data.Data[0]) testutil.AssertNoError(t, err, "") tx, err := blockfileMgr.retrieveTransactionByID(txid) if testutil.Contains(indexItems, blkstorage.IndexableAttrTxID) { testutil.AssertNoError(t, err, "Error while retrieving tx by id") txOrig, err := extractTransaction(blocks[0].Data.Data[0]) testutil.AssertNoError(t, err, "") testutil.AssertEquals(t, tx, txOrig) } else { testutil.AssertSame(t, err, blkstorage.ErrAttrNotIndexed) } //test 'retrieveTrasnactionsByBlockNumTranNum tx2, err := blockfileMgr.retrieveTransactionForBlockNumTranNum(1, 1) if testutil.Contains(indexItems, blkstorage.IndexableAttrBlockNumTranNum) { testutil.AssertNoError(t, err, "Error while retrieving tx by blockNum and tranNum") txOrig2, err2 := extractTransaction(blocks[0].Data.Data[0]) testutil.AssertNoError(t, err2, "") testutil.AssertEquals(t, tx2, txOrig2) } else { testutil.AssertSame(t, err, blkstorage.ErrAttrNotIndexed) } }
func TestStateSnapshotIterator(t *testing.T) { testDBWrapper.CreateFreshDB(t) stateTrieTestWrapper := newStateTrieTestWrapper(t) stateTrie := stateTrieTestWrapper.stateTrie stateDelta := statemgmt.NewStateDelta() // insert keys stateDelta.Set("chaincodeID1", "key1", []byte("value1"), nil) stateDelta.Set("chaincodeID2", "key2", []byte("value2"), nil) stateDelta.Set("chaincodeID3", "key3", []byte("value3"), nil) stateDelta.Set("chaincodeID4", "key4", []byte("value4"), nil) stateDelta.Set("chaincodeID5", "key5", []byte("value5"), nil) stateDelta.Set("chaincodeID6", "key6", []byte("value6"), nil) stateTrie.PrepareWorkingSet(stateDelta) stateTrieTestWrapper.PersistChangesAndResetInMemoryChanges() //check that the key is persisted testutil.AssertEquals(t, stateTrieTestWrapper.Get("chaincodeID1", "key1"), []byte("value1")) testutil.AssertEquals(t, stateTrieTestWrapper.Get("chaincodeID2", "key2"), []byte("value2")) testutil.AssertEquals(t, stateTrieTestWrapper.Get("chaincodeID3", "key3"), []byte("value3")) testutil.AssertEquals(t, stateTrieTestWrapper.Get("chaincodeID4", "key4"), []byte("value4")) testutil.AssertEquals(t, stateTrieTestWrapper.Get("chaincodeID5", "key5"), []byte("value5")) testutil.AssertEquals(t, stateTrieTestWrapper.Get("chaincodeID6", "key6"), []byte("value6")) // take db snapeshot dbSnapshot := db.GetDBHandle().GetSnapshot() stateDelta1 := statemgmt.NewStateDelta() // delete a few keys stateDelta1.Delete("chaincodeID1", "key1", nil) stateDelta1.Delete("chaincodeID3", "key3", nil) stateDelta1.Delete("chaincodeID4", "key4", nil) stateDelta1.Delete("chaincodeID6", "key6", nil) // update remaining keys stateDelta1.Set("chaincodeID2", "key2", []byte("value2_new"), nil) stateDelta1.Set("chaincodeID5", "key5", []byte("value5_new"), nil) stateTrie.PrepareWorkingSet(stateDelta1) stateTrieTestWrapper.PersistChangesAndResetInMemoryChanges() //check that the keys are updated testutil.AssertNil(t, stateTrieTestWrapper.Get("chaincodeID1", "key1")) testutil.AssertNil(t, stateTrieTestWrapper.Get("chaincodeID3", "key3")) testutil.AssertNil(t, stateTrieTestWrapper.Get("chaincodeID4", "key4")) testutil.AssertNil(t, stateTrieTestWrapper.Get("chaincodeID6", "key6")) testutil.AssertEquals(t, stateTrieTestWrapper.Get("chaincodeID2", "key2"), []byte("value2_new")) testutil.AssertEquals(t, stateTrieTestWrapper.Get("chaincodeID5", "key5"), []byte("value5_new")) itr, err := newStateSnapshotIterator(dbSnapshot) testutil.AssertNoError(t, err, "Error while getting state snapeshot iterator") stateDeltaFromSnapshot := statemgmt.NewStateDelta() for itr.Next() { keyBytes, valueBytes := itr.GetRawKeyValue() t.Logf("key=[%s], value=[%s]", string(keyBytes), string(valueBytes)) chaincodeID, key := statemgmt.DecodeCompositeKey(keyBytes) stateDeltaFromSnapshot.Set(chaincodeID, key, valueBytes, nil) } testutil.AssertEquals(t, stateDelta, stateDeltaFromSnapshot) }
func TestBucketKeyGetChildIndex(t *testing.T) { conf = newConfig(26, 3, fnvHash) bucketKey := newBucketKey(3, 22) testutil.AssertEquals(t, bucketKey.getParentKey().getChildIndex(bucketKey), 0) bucketKey = newBucketKey(3, 23) testutil.AssertEquals(t, bucketKey.getParentKey().getChildIndex(bucketKey), 1) bucketKey = newBucketKey(3, 24) testutil.AssertEquals(t, bucketKey.getParentKey().getChildIndex(bucketKey), 2) }
func (w *testBlockfileMgrWrapper) testGetBlockByNumber(blocks []*common.Block, startingNum uint64) { for i := 0; i < len(blocks); i++ { b, err := w.blockfileMgr.retrieveBlockByNumber(startingNum + uint64(i)) testutil.AssertNoError(w.t, err, fmt.Sprintf("Error while retrieving [%d]th block from blockfileMgr", i)) testutil.AssertEquals(w.t, b, blocks[i]) } // test getting the last block b, err := w.blockfileMgr.retrieveBlockByNumber(math.MaxUint64) iLastBlock := len(blocks) - 1 testutil.AssertNoError(w.t, err, fmt.Sprintf("Error while retrieving last block from blockfileMgr")) testutil.AssertEquals(w.t, b, blocks[iLastBlock]) }
func TestBucketKeyEqual(t *testing.T) { conf = newConfig(26, 3, fnvHash) bucketKey1 := newBucketKey(1, 2) bucketKey2 := newBucketKey(1, 2) testutil.AssertEquals(t, bucketKey1.equals(bucketKey2), true) bucketKey2 = newBucketKey(2, 2) testutil.AssertEquals(t, bucketKey1.equals(bucketKey2), false) bucketKey2 = newBucketKey(1, 3) testutil.AssertEquals(t, bucketKey1.equals(bucketKey2), false) bucketKey2 = newBucketKey(2, 3) testutil.AssertEquals(t, bucketKey1.equals(bucketKey2), false) }
func TestHostConfig(t *testing.T) { config.SetupTestConfig("./../../../peer") var hostConfig = new(docker.HostConfig) err := viper.UnmarshalKey("vm.docker.hostConfig", hostConfig) if err != nil { t.Fatalf("Load docker HostConfig wrong, error: %s", err.Error()) } testutil.AssertNotEquals(t, hostConfig.LogConfig, nil) testutil.AssertEquals(t, hostConfig.LogConfig.Type, "json-file") testutil.AssertEquals(t, hostConfig.LogConfig.Config["max-size"], "50m") testutil.AssertEquals(t, hostConfig.LogConfig.Config["max-file"], "5") }
func TestBlockchain_Info(t *testing.T) { testDBWrapper.CleanDB(t) blockchainTestWrapper := newTestBlockchainWrapper(t) blocks, _, _ := blockchainTestWrapper.populateBlockChainWithSampleData() blockchain := blockchainTestWrapper.blockchain blockchainInfo, _ := blockchain.getBlockchainInfo() testutil.AssertEquals(t, blockchainInfo.Height, uint64(3)) currentBlockHash, _ := blocks[len(blocks)-1].GetHash() previousBlockHash, _ := blocks[len(blocks)-2].GetHash() testutil.AssertEquals(t, blockchainInfo.CurrentBlockHash, currentBlockHash) testutil.AssertEquals(t, blockchainInfo.PreviousBlockHash, previousBlockHash) }
func TestBlockfileMgrBlockchainInfo(t *testing.T) { env := newTestEnv(t) defer env.Cleanup() blkfileMgrWrapper := newTestBlockfileWrapper(t, env) defer blkfileMgrWrapper.close() bcInfo := blkfileMgrWrapper.blockfileMgr.getBlockchainInfo() testutil.AssertEquals(t, bcInfo, &pb.BlockchainInfo{Height: 0, CurrentBlockHash: nil, PreviousBlockHash: nil}) blocks := testutil.ConstructTestBlocks(t, 10) blkfileMgrWrapper.addBlocks(blocks) bcInfo = blkfileMgrWrapper.blockfileMgr.getBlockchainInfo() testutil.AssertEquals(t, bcInfo.Height, uint64(10)) }
func TestLedgerCommit(t *testing.T) { ledgerTestWrapper := createFreshDBAndTestLedgerWrapper(t) ledger := ledgerTestWrapper.ledger ledger.BeginTxBatch(1) ledger.TxBegin("txUuid") ledger.SetState("chaincode1", "key1", []byte("value1")) ledger.SetState("chaincode2", "key2", []byte("value2")) ledger.SetState("chaincode3", "key3", []byte("value3")) ledger.TxFinished("txUuid", true) transaction, _ := buildTestTx(t) ledger.CommitTxBatch(1, []*protos.Transaction{transaction}, nil, []byte("proof")) testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode1", "key1", false), []byte("value1")) testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode1", "key1", true), []byte("value1")) }
func TestBucketNodeMarshalUnmarshal(t *testing.T) { conf = newConfig(26, 3, fnvHash) bucketNode := newBucketNode(newBucketKey(2, 7)) childKey1 := newBucketKey(3, 19) bucketNode.setChildCryptoHash(childKey1, []byte("cryptoHashChild1")) childKey3 := newBucketKey(3, 21) bucketNode.setChildCryptoHash(childKey3, []byte("cryptoHashChild3")) serializedBytes := bucketNode.marshal() deserializedBucketNode := unmarshalBucketNode(newBucketKey(2, 7), serializedBytes) testutil.AssertEquals(t, bucketNode.bucketKey, deserializedBucketNode.bucketKey) testutil.AssertEquals(t, bucketNode.childrenCryptoHash, deserializedBucketNode.childrenCryptoHash) }
func TestStateDeltaCryptoHash(t *testing.T) { stateDelta := NewStateDelta() testutil.AssertNil(t, stateDelta.ComputeCryptoHash()) stateDelta.Set("chaincodeID1", "key2", []byte("value2"), nil) stateDelta.Set("chaincodeID1", "key1", []byte("value1"), nil) stateDelta.Set("chaincodeID2", "key2", []byte("value2"), nil) stateDelta.Set("chaincodeID2", "key1", []byte("value1"), nil) testutil.AssertEquals(t, stateDelta.ComputeCryptoHash(), testutil.ComputeCryptoHash([]byte("chaincodeID1key1value1key2value2chaincodeID2key1value1key2value2"))) stateDelta.Delete("chaincodeID2", "key1", nil) testutil.AssertEquals(t, stateDelta.ComputeCryptoHash(), testutil.ComputeCryptoHash([]byte("chaincodeID1key1value1key2value2chaincodeID2key1key2value2"))) }
func TestBucketNodeMerge(t *testing.T) { conf = newConfig(26, 3, fnvHash) bucketNode := newBucketNode(newBucketKey(2, 7)) bucketNode.childrenCryptoHash[0] = []byte("cryptoHashChild1") bucketNode.childrenCryptoHash[2] = []byte("cryptoHashChild3") dbBucketNode := newBucketNode(newBucketKey(2, 7)) dbBucketNode.childrenCryptoHash[0] = []byte("DBcryptoHashChild1") dbBucketNode.childrenCryptoHash[1] = []byte("DBcryptoHashChild2") bucketNode.mergeBucketNode(dbBucketNode) testutil.AssertEquals(t, bucketNode.childrenCryptoHash[0], []byte("cryptoHashChild1")) testutil.AssertEquals(t, bucketNode.childrenCryptoHash[1], []byte("DBcryptoHashChild2")) testutil.AssertEquals(t, bucketNode.childrenCryptoHash[2], []byte("cryptoHashChild3")) }
// AssertIteratorContains - tests wether the iterator (itr) contains expected results (provided in map) func AssertIteratorContains(t *testing.T, itr RangeScanIterator, expected map[string][]byte) { count := 0 actual := make(map[string][]byte) for itr.Next() { count++ k, v := itr.GetKeyValue() actual[k] = v } t.Logf("Results from iterator: %s", actual) testutil.AssertEquals(t, count, len(expected)) for k, v := range expected { testutil.AssertEquals(t, actual[k], v) } }
func TestTrieNode_MergeAttributes(t *testing.T) { trieNode := newTrieNode(newTrieKey("chaincodeID", "key"), []byte("newValue!"), true) trieNode.setChildCryptoHash(0, []byte("crypto-hash-for-test-0")) trieNode.setChildCryptoHash(5, []byte("crypto-hash-for-test-5")) existingTrieNode := newTrieNode(newTrieKey("chaincodeID", "key"), []byte("existingValue"), false) existingTrieNode.setChildCryptoHash(5, []byte("crypto-hash-for-test-5-existing")) existingTrieNode.setChildCryptoHash(10, []byte("crypto-hash-for-test-10-existing")) trieNode.mergeMissingAttributesFrom(existingTrieNode) testutil.AssertEquals(t, trieNode.value, []byte("newValue!")) testutil.AssertEquals(t, trieNode.childrenCryptoHashes[0], []byte("crypto-hash-for-test-0")) testutil.AssertEquals(t, trieNode.childrenCryptoHashes[5], []byte("crypto-hash-for-test-5")) testutil.AssertEquals(t, trieNode.childrenCryptoHashes[10], []byte("crypto-hash-for-test-10-existing")) }
func testBlockIndexSync(t *testing.T, numBlocks int, numBlocksToIndex int, syncByRestart bool) { env := newTestEnv(t) defer env.Cleanup() blkfileMgrWrapper := newTestBlockfileWrapper(t, env) defer blkfileMgrWrapper.close() blkfileMgr := blkfileMgrWrapper.blockfileMgr origIndex := blkfileMgr.index // construct blocks for testing blocks := testutil.ConstructTestBlocks(t, numBlocks) // add a few blocks blkfileMgrWrapper.addBlocks(blocks[:numBlocksToIndex]) // Plug-in a noop index and add remaining blocks blkfileMgr.index = &noopIndex{} blkfileMgrWrapper.addBlocks(blocks[numBlocksToIndex:]) // Plug-in back the original index blkfileMgr.index = origIndex // The first set of blocks should be present in the orginal index for i := 1; i <= numBlocksToIndex; i++ { block, err := blkfileMgr.retrieveBlockByNumber(uint64(i)) testutil.AssertNoError(t, err, fmt.Sprintf("block [%d] should have been present in the index", i)) testutil.AssertEquals(t, block, blocks[i-1]) } // The last set of blocks should not be present in the original index for i := numBlocksToIndex + 1; i <= numBlocks; i++ { _, err := blkfileMgr.retrieveBlockByNumber(uint64(i)) testutil.AssertSame(t, err, blkstorage.ErrNotFoundInIndex) } // perform index sync if syncByRestart { blkfileMgrWrapper.close() blkfileMgrWrapper = newTestBlockfileWrapper(t, env) defer blkfileMgrWrapper.close() blkfileMgr = blkfileMgrWrapper.blockfileMgr } else { blkfileMgr.syncIndex() } // Now, last set of blocks should also be present in original index for i := numBlocksToIndex + 1; i <= numBlocks; i++ { block, err := blkfileMgr.retrieveBlockByNumber(uint64(i)) testutil.AssertNoError(t, err, fmt.Sprintf("block [%d] should have been present in the index", i)) testutil.AssertEquals(t, block, blocks[i-1]) } }