예제 #1
0
func ConstructRandomStateDelta(
	t testing.TB,
	chaincodeIDPrefix string,
	numChaincodes int,
	maxKeySuffix int,
	numKeysToInsert int,
	kvSize int) *StateDelta {
	delta := NewStateDelta()
	s2 := rand.NewSource(time.Now().UnixNano())
	r2 := rand.New(s2)

	for i := 0; i < numKeysToInsert; i++ {
		chaincodeID := chaincodeIDPrefix + "_" + strconv.Itoa(r2.Intn(numChaincodes))
		key := "key_" + strconv.Itoa(r2.Intn(maxKeySuffix))
		valueSize := kvSize - len(key)
		if valueSize < 1 {
			panic(fmt.Errorf("valueSize cannot be less than one. ValueSize=%d", valueSize))
		}
		value := testutil.ConstructRandomBytes(t, valueSize)
		delta.Set(chaincodeID, key, value, nil)
	}

	for _, chaincodeDelta := range delta.ChaincodeStateDeltas {
		sortedKeys := chaincodeDelta.getSortedKeys()
		smallestKey := sortedKeys[0]
		largestKey := sortedKeys[len(sortedKeys)-1]
		t.Logf("chaincode=%s, numKeys=%d, smallestKey=%s, largestKey=%s", chaincodeDelta.ChaincodeID, len(sortedKeys), smallestKey, largestKey)
	}
	return delta
}
func TestExtractTxid(t *testing.T) {
	txid := "txID"
	txEnv, txid, _ := testutil.ConstructTransaction(t, testutil.ConstructRandomBytes(t, 50), false)
	txEnvBytes, _ := putils.GetBytesEnvelope(txEnv)
	extractedTxid, err := extractTxID(txEnvBytes)
	testutil.AssertNoError(t, err, "")
	testutil.AssertEquals(t, extractedTxid, txid)
}
예제 #3
0
func TestBlockFileStreamUnexpectedEOF(t *testing.T) {
	partialBlockBytes := []byte{}
	dummyBlockBytes := testutil.ConstructRandomBytes(t, 100)
	lenBytes := proto.EncodeVarint(uint64(len(dummyBlockBytes)))
	partialBlockBytes = append(partialBlockBytes, lenBytes...)
	partialBlockBytes = append(partialBlockBytes, dummyBlockBytes...)
	testBlockFileStreamUnexpectedEOF(t, 10, partialBlockBytes[:1])
	testBlockFileStreamUnexpectedEOF(t, 10, partialBlockBytes[:2])
	testBlockFileStreamUnexpectedEOF(t, 10, partialBlockBytes[:5])
	testBlockFileStreamUnexpectedEOF(t, 10, partialBlockBytes[:20])
}
예제 #4
0
func BenchmarkLedgerRandomTransactions(b *testing.B) {
	disableLogging()
	b.Logf("testParams:%q", testParams)
	flags := flag.NewFlagSet("testParams", flag.ExitOnError)
	keyPrefix := flags.String("KeyPrefix", "Key_", "The generated workload will have keys such as KeyPrefix_1, KeyPrefix_2, and so on")
	kvSize := flags.Int("KVSize", 1000, "size of the key-value")
	maxKeySuffix := flags.Int("MaxKeySuffix", 1, "the keys are appended with _1, _2,.. upto MaxKeySuffix")
	batchSize := flags.Int("BatchSize", 100, "size of the key-value")
	numBatches := flags.Int("NumBatches", 100, "number of batches")
	numReadsFromLedger := flags.Int("NumReadsFromLedger", 4, "Number of Key-Values to read")
	numWritesToLedger := flags.Int("NumWritesToLedger", 4, "Number of Key-Values to write")
	flags.Parse(testParams)

	b.Logf(`Running test with params: keyPrefix=%s, kvSize=%d, batchSize=%d, maxKeySuffix=%d, numBatches=%d, numReadsFromLedger=%d, numWritesToLedger=%d`,
		*keyPrefix, *kvSize, *batchSize, *maxKeySuffix, *numBatches, *numReadsFromLedger, *numWritesToLedger)

	ledger, err := GetNewLedger()
	testutil.AssertNoError(b, err, "Error while constructing ledger")

	chaincode := "chaincodeId"
	tx := constructDummyTx(b)
	value := testutil.ConstructRandomBytes(b, *kvSize-(len(chaincode)+len(*keyPrefix)))

	b.ResetTimer()
	startTime := time.Now()
	for i := 0; i < b.N; i++ {
		for batchID := 0; batchID < *numBatches; batchID++ {
			ledger.BeginTxBatch(1)
			// execute one batch
			var transactions []*protos.Transaction
			for j := 0; j < *batchSize; j++ {
				randomKeySuffixGen := testutil.NewTestRandomNumberGenerator(*maxKeySuffix)
				ledger.TxBegin("txUuid")
				for k := 0; k < *numReadsFromLedger; k++ {
					randomKey := *keyPrefix + strconv.Itoa(randomKeySuffixGen.Next())
					ledger.GetState(chaincode, randomKey, true)
				}
				for k := 0; k < *numWritesToLedger; k++ {
					randomKey := *keyPrefix + strconv.Itoa(randomKeySuffixGen.Next())
					ledger.SetState(chaincode, randomKey, value)
				}
				ledger.TxFinished("txUuid", true)
				transactions = append(transactions, tx)
			}
			ledger.CommitTxBatch(1, transactions, nil, []byte("proof"))
		}
	}
	b.StopTimer()
	perfstat.UpdateTimeStat("timeSpent", startTime)
	perfstat.PrintStats()
	b.Logf("DB stats afters populating: %s", testDBWrapper.GetEstimatedNumKeys(b))
}
예제 #5
0
func BenchmarkLedgerSingleKeyTransaction(b *testing.B) {
	b.Logf("testParams:%q", testParams)
	flags := flag.NewFlagSet("testParams", flag.ExitOnError)
	key := flags.String("Key", "key", "key name")
	kvSize := flags.Int("KVSize", 1000, "size of the key-value")
	batchSize := flags.Int("BatchSize", 100, "size of the key-value")
	numBatches := flags.Int("NumBatches", 100, "number of batches")
	numWritesToLedger := flags.Int("NumWritesToLedger", 4, "size of the key-value")
	flags.Parse(testParams)

	b.Logf(`Running test with params: key=%s, kvSize=%d, batchSize=%d, numBatches=%d, NumWritesToLedger=%d`,
		*key, *kvSize, *batchSize, *numBatches, *numWritesToLedger)

	ledgerTestWrapper := createFreshDBAndTestLedgerWrapper(b)
	ledger := ledgerTestWrapper.ledger

	chaincode := "chaincodeId"
	value := testutil.ConstructRandomBytes(b, *kvSize-(len(chaincode)+len(*key)))
	tx := constructDummyTx(b)
	serializedBytes, _ := tx.Bytes()
	b.Logf("Size of serialized bytes for tx = %d", len(serializedBytes))
	b.ResetTimer()
	for n := 0; n < b.N; n++ {
		for i := 0; i < *numBatches; i++ {
			ledger.BeginTxBatch(1)
			// execute one batch
			var transactions []*protos.Transaction
			for j := 0; j < *batchSize; j++ {
				ledger.TxBegin("txUuid")
				_, err := ledger.GetState(chaincode, *key, true)
				if err != nil {
					b.Fatalf("Error in getting state: %s", err)
				}
				for l := 0; l < *numWritesToLedger; l++ {
					ledger.SetState(chaincode, *key, value)
				}
				ledger.TxFinished("txUuid", true)
				transactions = append(transactions, tx)
			}
			ledger.CommitTxBatch(1, transactions, nil, []byte("proof"))
		}
	}
	b.StopTimer()

	//varify value persisted
	value, _ = ledger.GetState(chaincode, *key, true)
	size := ledger.GetBlockchainSize()
	b.Logf("Value size=%d, Blockchain height=%d", len(value), size)
}
예제 #6
0
func populateDB(tb testing.TB, kvSize int, totalKeys int, keyPrefix string) {
	dbWrapper := db.NewTestDBWrapper()
	dbWrapper.CreateFreshDB(tb)
	batch := gorocksdb.NewWriteBatch()
	for i := 0; i < totalKeys; i++ {
		key := []byte(keyPrefix + strconv.Itoa(i))
		value := testutil.ConstructRandomBytes(tb, kvSize-len(key))
		batch.Put(key, value)
		if i%1000 == 0 {
			dbWrapper.WriteToDB(tb, batch)
			batch = gorocksdb.NewWriteBatch()
		}
	}
	dbWrapper.CloseDB(tb)
}
예제 #7
0
파일: perf_test.go 프로젝트: C0rWin/fabric
func BenchmarkCryptoHash(b *testing.B) {
	flags := flag.NewFlagSet("testParams", flag.ExitOnError)
	numBytesPointer := flags.Int("NumBytes", -1, "Number of Bytes")
	flags.Parse(testParams)
	numBytes := *numBytesPointer
	if numBytes == -1 {
		b.Fatal("Missing value for parameter NumBytes")
	}

	randomBytes := testutil.ConstructRandomBytes(b, numBytes)
	//b.Logf("byte size=%d, b.N=%d", len(randomBytes), b.N)
	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		util.ComputeCryptoHash(randomBytes)
	}
}
예제 #8
0
func testBlockfileMgrCrashDuringWriting(t *testing.T, numBlocksBeforeCheckpoint int,
	numBlocksAfterCheckpoint int, numLastBlockBytes int, numPartialBytesToWrite int) {
	env := newTestEnv(t)
	defer env.Cleanup()
	blkfileMgrWrapper := newTestBlockfileWrapper(t, env)
	bg := testutil.NewBlockGenerator(t)
	blocksBeforeCP := bg.NextTestBlocks(numBlocksBeforeCheckpoint)
	blkfileMgrWrapper.addBlocks(blocksBeforeCP)
	currentCPInfo := blkfileMgrWrapper.blockfileMgr.cpInfo
	cpInfo1 := &checkpointInfo{
		currentCPInfo.latestFileChunkSuffixNum,
		currentCPInfo.latestFileChunksize,
		currentCPInfo.lastBlockNumber}

	blocksAfterCP := bg.NextTestBlocks(numBlocksAfterCheckpoint)
	blkfileMgrWrapper.addBlocks(blocksAfterCP)
	cpInfo2 := blkfileMgrWrapper.blockfileMgr.cpInfo

	// simulate a crash scenario
	lastBlockBytes := []byte{}
	encodedLen := proto.EncodeVarint(uint64(numLastBlockBytes))
	randomBytes := testutil.ConstructRandomBytes(t, numLastBlockBytes)
	lastBlockBytes = append(lastBlockBytes, encodedLen...)
	lastBlockBytes = append(lastBlockBytes, randomBytes...)
	partialBytes := lastBlockBytes[:numPartialBytesToWrite]
	blkfileMgrWrapper.blockfileMgr.currentFileWriter.append(partialBytes, true)
	blkfileMgrWrapper.blockfileMgr.saveCurrentInfo(cpInfo1, true)
	blkfileMgrWrapper.close()

	// simulate a start after a crash
	blkfileMgrWrapper = newTestBlockfileWrapper(t, env)
	defer blkfileMgrWrapper.close()
	cpInfo3 := blkfileMgrWrapper.blockfileMgr.cpInfo
	testutil.AssertEquals(t, cpInfo3, cpInfo2)

	// add fresh blocks after restart
	blocksAfterRestart := bg.NextTestBlocks(2)
	blkfileMgrWrapper.addBlocks(blocksAfterRestart)
	allBlocks := []*common.Block{}
	allBlocks = append(allBlocks, blocksBeforeCP...)
	allBlocks = append(allBlocks, blocksAfterCP...)
	allBlocks = append(allBlocks, blocksAfterRestart...)
	testBlockfileMgrBlockIterator(t, blkfileMgrWrapper.blockfileMgr, 1, len(allBlocks), allBlocks)
}
예제 #9
0
func BenchmarkLedgerPopulate(b *testing.B) {
	b.Logf("testParams:%q", testParams)
	disableLogging()
	flags := flag.NewFlagSet("testParams", flag.ExitOnError)
	kvSize := flags.Int("KVSize", 1000, "size of the key-value")
	maxKeySuffix := flags.Int("MaxKeySuffix", 1, "the keys are appended with _1, _2,.. upto MaxKeySuffix")
	keyPrefix := flags.String("KeyPrefix", "Key_", "The generated workload will have keys such as KeyPrefix_1, KeyPrefix_2, and so on")
	batchSize := flags.Int("BatchSize", 100, "size of the key-value")
	flags.Parse(testParams)

	b.Logf(`Running test with params: keyPrefix=%s, kvSize=%d, batchSize=%d, maxKeySuffix=%d`,
		*keyPrefix, *kvSize, *batchSize, *maxKeySuffix)

	ledgerTestWrapper := createFreshDBAndTestLedgerWrapper(b)
	ledger := ledgerTestWrapper.ledger

	chaincode := "chaincodeId"
	numBatches := *maxKeySuffix / *batchSize
	tx := constructDummyTx(b)
	value := testutil.ConstructRandomBytes(b, *kvSize-(len(chaincode)+len(*keyPrefix)))

	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		for batchID := 0; batchID < numBatches; batchID++ {
			ledger.BeginTxBatch(1)
			// execute one batch
			var transactions []*protos.Transaction
			for j := 0; j < *batchSize; j++ {
				ledger.TxBegin("txUuid")
				keyNumber := batchID*(*batchSize) + j
				key := *keyPrefix + strconv.Itoa(keyNumber)
				ledger.SetState(chaincode, key, value)
				ledger.TxFinished("txUuid", true)
				transactions = append(transactions, tx)
			}
			ledger.CommitTxBatch(1, transactions, nil, []byte("proof"))
		}
	}
	b.StopTimer()
	b.Logf("DB stats afters populating: %s", testDBWrapper.GetEstimatedNumKeys(b))
}