func BenchmarkLedgerRandomTransactions(b *testing.B) { disableLogging() b.Logf("testParams:%q", testParams) flags := flag.NewFlagSet("testParams", flag.ExitOnError) keyPrefix := flags.String("KeyPrefix", "Key_", "The generated workload will have keys such as KeyPrefix_1, KeyPrefix_2, and so on") kvSize := flags.Int("KVSize", 1000, "size of the key-value") maxKeySuffix := flags.Int("MaxKeySuffix", 1, "the keys are appended with _1, _2,.. upto MaxKeySuffix") batchSize := flags.Int("BatchSize", 100, "size of the key-value") numBatches := flags.Int("NumBatches", 100, "number of batches") numReadsFromLedger := flags.Int("NumReadsFromLedger", 4, "Number of Key-Values to read") numWritesToLedger := flags.Int("NumWritesToLedger", 4, "Number of Key-Values to write") flags.Parse(testParams) b.Logf(`Running test with params: keyPrefix=%s, kvSize=%d, batchSize=%d, maxKeySuffix=%d, numBatches=%d, numReadsFromLedger=%d, numWritesToLedger=%d`, *keyPrefix, *kvSize, *batchSize, *maxKeySuffix, *numBatches, *numReadsFromLedger, *numWritesToLedger) ledger, err := GetNewLedger() testutil.AssertNoError(b, err, "Error while constructing ledger") chaincode := "chaincodeId" tx := constructDummyTx(b) value := testutil.ConstructRandomBytes(b, *kvSize-(len(chaincode)+len(*keyPrefix))) b.ResetTimer() startTime := time.Now() for i := 0; i < b.N; i++ { for batchID := 0; batchID < *numBatches; batchID++ { ledger.BeginTxBatch(1) // execute one batch var transactions []*protos.Transaction for j := 0; j < *batchSize; j++ { randomKeySuffixGen := testutil.NewTestRandomNumberGenerator(*maxKeySuffix) ledger.TxBegin("txUuid") for k := 0; k < *numReadsFromLedger; k++ { randomKey := *keyPrefix + strconv.Itoa(randomKeySuffixGen.Next()) ledger.GetState(chaincode, randomKey, true) } for k := 0; k < *numWritesToLedger; k++ { randomKey := *keyPrefix + strconv.Itoa(randomKeySuffixGen.Next()) ledger.SetState(chaincode, randomKey, value) } ledger.TxFinished("txUuid", true) transactions = append(transactions, tx) } ledger.CommitTxBatch(1, transactions, nil, []byte("proof")) } } b.StopTimer() perfstat.UpdateTimeStat("timeSpent", startTime) perfstat.PrintStats() b.Logf("DB stats afters populating: %s", testDBWrapper.GetEstimatedNumKeys(b)) }
func (cache *bucketCache) get(key bucketKey) (*bucketNode, error) { defer perfstat.UpdateTimeStat("timeSpent", time.Now()) if !cache.isEnabled { return fetchBucketNodeFromDB(&key) } cache.lock.RLock() defer cache.lock.RUnlock() bucketNode := cache.c[key] if bucketNode == nil { return fetchBucketNodeFromDB(&key) } return bucketNode, nil }