// CommitTxBatch - gets invoked when the current transaction-batch needs to be committed // This function returns successfully iff the transactions details and state changes (that // may have happened during execution of this transaction-batch) have been committed to permanent storage func (ledger *Ledger) CommitTxBatch(id interface{}, transactions []*protos.Transaction, proof []byte) error { err := ledger.checkValidIDCommitORRollback(id) if err != nil { return err } success := true defer ledger.resetForNextTxGroup(success) defer ledger.blockchain.blockPersistenceStatus(success) stateHash, err := ledger.state.GetHash() if err != nil { success = false return err } writeBatch := gorocksdb.NewWriteBatch() block := protos.NewBlock(transactions) newBlockNumber, err := ledger.blockchain.addPersistenceChangesForNewBlock(context.TODO(), block, stateHash, writeBatch) if err != nil { success = false return err } ledger.state.AddChangesForPersistence(newBlockNumber, writeBatch) opt := gorocksdb.NewDefaultWriteOptions() dbErr := db.GetDBHandle().DB.Write(opt, writeBatch) if dbErr != nil { success = false return dbErr } producer.Send(producer.CreateBlockEvent(block)) return nil }
func (rh *RocksDBHandler) RedisDel(key []byte, keys ...[]byte) (int, error) { if rh.db == nil { return 0, ErrRocksIsDead } if key == nil || len(key) == 0 { return 0, ErrWrongArgumentsCount } keyData := append([][]byte{key}, keys...) count := 0 readOptions := rocks.NewDefaultReadOptions() writeOptions := rocks.NewDefaultWriteOptions() defer readOptions.Destroy() defer writeOptions.Destroy() for _, dKey := range keyData { _, err := rh.loadRedisObject(readOptions, dKey) if err == nil { batch := rocks.NewWriteBatch() batch.Delete(rh.getTypeKey(dKey)) batch.Delete(dKey) if err := rh.db.Write(writeOptions, batch); err == nil { count++ } batch.Destroy() } } return count, nil }
// WriteToDB tests can use this method for persisting a given batch to db func (testDB *TestDBWrapper) WriteToDB(t *testing.T, writeBatch *gorocksdb.WriteBatch) { opt := gorocksdb.NewDefaultWriteOptions() err := GetDBHandle().DB.Write(opt, writeBatch) if err != nil { t.Fatalf("Error while writing to db. Error:%s", err) } }
func (rh *RocksDBHandler) _list_doMerge(key []byte, value interface{}, opCode string, start, end int) error { var values [][]byte if d1Slice, ok := value.([]byte); ok { values = [][]byte{d1Slice} } if d2Slice, ok := value.([][]byte); ok { values = d2Slice } if values == nil || len(values) == 0 { return ErrWrongArgumentsCount } options := rocks.NewDefaultWriteOptions() defer options.Destroy() batch := rocks.NewWriteBatch() defer batch.Destroy() batch.Put(rh.getTypeKey(key), []byte(kRedisList)) for _, dValue := range values { operand := ListOperand{opCode, start, end, dValue} if data, err := encode(operand); err == nil { batch.Merge(key, data) } else { return err } } return rh.db.Write(options, batch) }
func doPut(db *gorocksdb.DB, from int, to int) <-chan putStat { var totalTime time.Duration var maxTime time.Duration putCount := 0 loopCount := 100000 putStatCh := make(chan putStat) wo := gorocksdb.NewDefaultWriteOptions() go func() { for { key := []byte(uuid.NewV4().String()) value := randBytes(from, to) start := time.Now() err := db.Put(wo, key, value) if err != nil { fmt.Println("dbPut error : ", err) os.Exit(1) } delta := time.Since(start) if delta > maxTime { maxTime = delta } totalTime += delta putCount++ if (putCount % loopCount) == 0 { avgTime := totalTime / time.Duration(loopCount) putStatCh <- putStat{maxTime, avgTime} maxTime = 0 totalTime = 0 } } }() return putStatCh }
func write_multi_cfs() error { dbOpts := gorocksdb.NewDefaultOptions() dbOpts.SetCreateIfMissing(true) if err := os.RemoveAll("/tmp/multicf_db"); err != nil { return err } db, err := gorocksdb.OpenDb(dbOpts, "/tmp/multicf_db") if err != nil { return err } var handles []*gorocksdb.ColumnFamilyHandle for i := 0; i < 4; i++ { handle, err := db.CreateColumnFamily(dbOpts, fmt.Sprint(i)) if err != nil { return err } handles = append(handles, handle) } writeOpts := gorocksdb.NewDefaultWriteOptions() if err := db.Put(writeOpts, []byte("default"), []byte("default")); err != nil { return err } for i := 0; i < 16; i++ { key := []byte(fmt.Sprint(i)) if err := db.PutCF(writeOpts, handles[i%4], key, key); err != nil { return err } } db.Close() return nil }
func (blockchain *blockchain) persistRawBlock(block *protos.Block, blockNumber uint64) error { blockBytes, blockBytesErr := block.Bytes() if blockBytesErr != nil { return blockBytesErr } writeBatch := gorocksdb.NewWriteBatch() defer writeBatch.Destroy() writeBatch.PutCF(db.GetDBHandle().BlockchainCF, encodeBlockNumberDBKey(blockNumber), blockBytes) blockHash, err := block.GetHash() if err != nil { return err } // Need to check as we suport out of order blocks in cases such as block/state synchronization. This is // really blockchain height, not size. if blockchain.getSize() < blockNumber+1 { sizeBytes := encodeUint64(blockNumber + 1) writeBatch.PutCF(db.GetDBHandle().BlockchainCF, blockCountKey, sizeBytes) blockchain.size = blockNumber + 1 blockchain.previousBlockHash = blockHash } if blockchain.indexer.isSynchronous() { blockchain.indexer.createIndexesSync(block, blockNumber, blockHash, writeBatch) } opt := gorocksdb.NewDefaultWriteOptions() defer opt.Destroy() err = db.GetDBHandle().DB.Write(opt, writeBatch) if err != nil { return err } return nil }
func NewRocksdbStorage(dbfname string, dbtype string, mergeOp gorocksdb.MergeOperator) (*RocksdbStorage, error) { var sto *RocksdbStorage if dbtype != "kv" && dbtype != "json" { return sto, fmt.Errorf("Unkown db type") } opts := gorocksdb.NewDefaultOptions() if mergeOp != nil { opts.SetMergeOperator(mergeOp) } // opts.IncreaseParallelism(runtime.NumCPU()) // opts.OptimizeLevelStyleCompaction(0) opts.SetCreateIfMissing(true) db, err := gorocksdb.OpenDb(opts, dbfname) if err != nil { return sto, err } ro := gorocksdb.NewDefaultReadOptions() wo := gorocksdb.NewDefaultWriteOptions() sto = &RocksdbStorage{ dbfname: dbfname, db: db, ro: ro, wo: wo, } return sto, nil }
func New(rdb *gorocksdb.DB) *DB { db := &DB{rdb: rdb} db.wo = gorocksdb.NewDefaultWriteOptions() db.ro = gorocksdb.NewDefaultReadOptions() db.caches = lru.New(1000) db.RawSet([]byte{MAXBYTE}, nil) // for Enumerator seek to last return db }
func (rh *RocksDBHandler) RedisSet(key, value []byte) error { if err := rh.checkRedisCall(key, value); err != nil { return err } options := rocks.NewDefaultWriteOptions() defer options.Destroy() return rh.saveRedisObject(options, key, value, kRedisString) }
func (openchainDB *OpenchainDB) Delete(cfHandler *gorocksdb.ColumnFamilyHandle, key []byte) error { opt := gorocksdb.NewDefaultWriteOptions() defer opt.Destroy() err := openchainDB.DB.DeleteCF(opt, cfHandler, key) if err != nil { fmt.Println("Error while trying to delete key:", key) return err } return nil }
// Put saves the key/value in the given column family func (openchainDB *OpenchainDB) Put(cfHandler *gorocksdb.ColumnFamilyHandle, key []byte, value []byte) error { opt := gorocksdb.NewDefaultWriteOptions() defer opt.Destroy() err := openchainDB.DB.PutCF(opt, cfHandler, key, value) if err != nil { dbLogger.Errorf("Error while trying to write key: %s", key) return err } return nil }
// CommitStateDelta commits the changes from state.ApplyStateDelta to the // DB. func (state *State) CommitStateDelta() error { if state.updateStateImpl { state.stateImpl.PrepareWorkingSet(state.stateDelta) state.updateStateImpl = false } writeBatch := gorocksdb.NewWriteBatch() state.stateImpl.AddChangesForPersistence(writeBatch) opt := gorocksdb.NewDefaultWriteOptions() return db.GetDBHandle().DB.Write(opt, writeBatch) }
// createIndexes adds entries into db for creating indexes on various atributes func (indexer *blockchainIndexerAsync) createIndexesInternal(block *protos.Block, blockNumber uint64, blockHash []byte) error { openchainDB := db.GetDBHandle() writeBatch := gorocksdb.NewWriteBatch() addIndexDataForPersistence(block, blockNumber, blockHash, writeBatch) writeBatch.PutCF(openchainDB.IndexesCF, lastIndexedBlockKey, encodeBlockNumber(blockNumber)) opt := gorocksdb.NewDefaultWriteOptions() err := openchainDB.DB.Write(opt, writeBatch) if err != nil { return err } indexer.indexerState.blockIndexed(blockNumber) return nil }
func (rh *RocksDBHandler) _string_doMerge(key, value []byte, opCode string) error { options := rocks.NewDefaultWriteOptions() defer options.Destroy() batch := rocks.NewWriteBatch() defer batch.Destroy() batch.Put(rh.getTypeKey(key), []byte(kRedisString)) operand := StringOperand{opCode, value} if data, err := encode(operand); err != nil { return err } else { batch.Merge(key, data) } return rh.db.Write(options, batch) }
func (s *Store) newWriteOptions() *gorocksdb.WriteOptions { wo := gorocksdb.NewDefaultWriteOptions() if s.woptSyncUse { wo.SetSync(s.woptSync) } else { // request fsync on write for safety by default wo.SetSync(true) } if s.woptDisableWALUse { wo.DisableWAL(s.woptDisableWAL) } return wo }
func create_purge_backups() error { dbOpts := gorocksdb.NewDefaultOptions() dbOpts.SetCreateIfMissing(true) if err := os.RemoveAll("/tmp/backups_db"); err != nil { return err } db, err := gorocksdb.OpenDb(dbOpts, "/tmp/backups_db") if err != nil { return err } if err := os.RemoveAll("/tmp/db_backups"); err != nil { return err } be, err := gorocksdb.OpenBackupEngine(dbOpts, "/tmp/db_backups") if err != nil { return err } writeOpts := gorocksdb.NewDefaultWriteOptions() for i := 0; i < 5; i++ { key := []byte(fmt.Sprint(i)) if err := db.Put(writeOpts, key, key); err != nil { return err } if err := be.CreateNewBackup(db); err != nil { return err } } fmt.Println("Num available initially: ", be.GetInfo().GetCount()) if err := gorocksext.PurgeOldBackups(be, 4); err != nil { return err } fmt.Println(be.GetInfo().GetCount()) if err := gorocksext.PurgeOldBackups(be, 2); err != nil { return err } fmt.Println(be.GetInfo().GetCount()) if err := gorocksext.PurgeOldBackups(be, 0); err != nil { return err } fmt.Println(be.GetInfo().GetCount()) db.Close() return nil }
func performBasicReadWrite(t *testing.T) { openchainDB := GetDBHandle() opt := gorocksdb.NewDefaultWriteOptions() defer opt.Destroy() writeBatch := gorocksdb.NewWriteBatch() defer writeBatch.Destroy() writeBatch.PutCF(openchainDB.BlockchainCF, []byte("dummyKey"), []byte("dummyValue")) writeBatch.PutCF(openchainDB.StateCF, []byte("dummyKey1"), []byte("dummyValue1")) writeBatch.PutCF(openchainDB.StateDeltaCF, []byte("dummyKey2"), []byte("dummyValue2")) writeBatch.PutCF(openchainDB.IndexesCF, []byte("dummyKey3"), []byte("dummyValue3")) err := openchainDB.DB.Write(opt, writeBatch) if err != nil { t.Fatalf("Error while writing to db: %s", err) } value, err := openchainDB.GetFromBlockchainCF([]byte("dummyKey")) if err != nil { t.Fatalf("read error = [%s]", err) } if !bytes.Equal(value, []byte("dummyValue")) { t.Fatalf("read error. Bytes not equal. Expected [%s], found [%s]", "dummyValue", value) } value, err = openchainDB.GetFromStateCF([]byte("dummyKey1")) if err != nil { t.Fatalf("read error = [%s]", err) } if !bytes.Equal(value, []byte("dummyValue1")) { t.Fatalf("read error. Bytes not equal. Expected [%s], found [%s]", "dummyValue1", value) } value, err = openchainDB.GetFromStateDeltaCF([]byte("dummyKey2")) if err != nil { t.Fatalf("read error = [%s]", err) } if !bytes.Equal(value, []byte("dummyValue2")) { t.Fatalf("read error. Bytes not equal. Expected [%s], found [%s]", "dummyValue2", value) } value, err = openchainDB.GetFromIndexesCF([]byte("dummyKey3")) if err != nil { t.Fatalf("read error = [%s]", err) } if !bytes.Equal(value, []byte("dummyValue3")) { t.Fatalf("read error. Bytes not equal. Expected [%s], found [%s]", "dummyValue3", value) } }
func (rh *RocksDBHandler) RedisMset(keyValues [][]byte) error { if rh.db == nil { return ErrRocksIsDead } if keyValues == nil || len(keyValues) == 0 || len(keyValues)%2 != 0 { return ErrWrongArgumentsCount } options := rocks.NewDefaultWriteOptions() defer options.Destroy() for i := 0; i < len(keyValues); i += 2 { err := rh.saveRedisObject(options, keyValues[i], keyValues[i+1], kRedisString) if err != nil { return err } } return nil }
func (rh *RocksDBHandler) _hash_doMerge(key []byte, values [][]byte, opCode string) error { if values == nil || len(values) == 0 || len(values)%2 != 0 { return ErrWrongArgumentsCount } options := rocks.NewDefaultWriteOptions() defer options.Destroy() batch := rocks.NewWriteBatch() defer batch.Destroy() batch.Put(rh.getTypeKey(key), []byte(kRedisHash)) for i := 0; i < len(values); i += 2 { operand := HashOperand{opCode, string(values[i]), values[i+1]} if data, err := encode(operand); err == nil { batch.Merge(key, data) } else { return err } } return rh.db.Write(options, batch) }
func performBasicReadWrite(t *testing.T) { openchainDB := GetDBHandle() opt := gorocksdb.NewDefaultWriteOptions() writeBatch := gorocksdb.NewWriteBatch() writeBatch.PutCF(openchainDB.BlockchainCF, []byte("dummyKey"), []byte("dummyValue")) err := openchainDB.DB.Write(opt, writeBatch) if err != nil { t.Fatal("Error while writing to db") } value, err := openchainDB.GetFromBlockchainCF([]byte("dummyKey")) if err != nil { t.Fatalf("read error = [%s]", err) } if !bytes.Equal(value, []byte("dummyValue")) { t.Fatal("read error. Bytes not equal") } }
func (rh *RocksDBHandler) _set_doMerge(key []byte, values [][]byte, opCode string) error { if values == nil || len(values) == 0 { return ErrWrongArgumentsCount } options := rocks.NewDefaultWriteOptions() defer options.Destroy() batch := rocks.NewWriteBatch() defer batch.Destroy() batch.Put(rh.getTypeKey(key), []byte(kRedisSet)) for _, value := range values { operand := SetOperand{opCode, value} if data, err := encode(operand); err == nil { batch.Merge(key, data) } else { return err } } return rh.db.Write(options, batch) }
// CommitTxBatch - gets invoked when the current transaction-batch needs to be committed // This function returns successfully iff the transactions details and state changes (that // may have happened during execution of this transaction-batch) have been committed to permanent storage func (ledger *Ledger) CommitTxBatch(id interface{}, transactions []*protos.Transaction, transactionResults []*protos.TransactionResult, metadata []byte) error { err := ledger.checkValidIDCommitORRollback(id) if err != nil { return err } stateHash, err := ledger.state.GetHash() if err != nil { ledger.resetForNextTxGroup(false) ledger.blockchain.blockPersistenceStatus(false) return err } writeBatch := gorocksdb.NewWriteBatch() defer writeBatch.Destroy() block := protos.NewBlock(transactions, metadata) block.NonHashData = &protos.NonHashData{} newBlockNumber, err := ledger.blockchain.addPersistenceChangesForNewBlock(context.TODO(), block, stateHash, writeBatch) if err != nil { ledger.resetForNextTxGroup(false) ledger.blockchain.blockPersistenceStatus(false) return err } ledger.state.AddChangesForPersistence(newBlockNumber, writeBatch) opt := gorocksdb.NewDefaultWriteOptions() defer opt.Destroy() dbErr := db.GetDBHandle().DB.Write(opt, writeBatch) if dbErr != nil { ledger.resetForNextTxGroup(false) ledger.blockchain.blockPersistenceStatus(false) return dbErr } ledger.resetForNextTxGroup(true) ledger.blockchain.blockPersistenceStatus(true) sendProducerBlockEvent(block) if len(transactionResults) != 0 { ledgerLogger.Debug("There were some erroneous transactions. We need to send a 'TX rejected' message here.") } return nil }
func doPut(db *gorocksdb.DB, from int, to int) <-chan putStat { var totalTime time.Duration var maxTime time.Duration putCount := 0 loopCount := 100000 putStatCh := make(chan putStat) wo := gorocksdb.NewDefaultWriteOptions() createDBEntry := func(key string) { value := randBytes(from, to) start := time.Now() err := db.Put(wo, key, value) if err != nil { fmt.Println("dbPut error : ", err) os.Exit(1) } delta := time.Since(start) if delta > maxTime { maxTime = delta } totalTime += delta putCount++ if (putCount % loopCount) == 0 { avgTime := totalTime / time.Duration(loopCount) putStatCh <- putStat{maxTime, avgTime} maxTime = 0 totalTime = 0 } } go func() { for i, key := range filenames { createDBEntry(key) } close(putStatCh) }() return putStatCh }
func check_checkpoints() error { dbOpts := gorocksdb.NewDefaultOptions() dbOpts.SetCreateIfMissing(true) if err := os.RemoveAll("/tmp/checkpoint_db"); err != nil { return err } db, err := gorocksdb.OpenDb(dbOpts, "/tmp/checkpoint_db") if err != nil { return err } writeOpts := gorocksdb.NewDefaultWriteOptions() for i := 0; i < 16; i++ { key := []byte(fmt.Sprint(i)) if err := db.Put(writeOpts, key, key); err != nil { return err } if i == 8 { if err := os.RemoveAll("/tmp/checkpoint_db1"); err != nil { return err } gorocksext.CreateCheckpoint(db, "/tmp/checkpoint_db1") } } db.Close() fmt.Println("Full Db") if err := read_all("/tmp/checkpoint_db"); err != nil { return err } fmt.Println("Checkpointed snapshot") if err := read_all("/tmp/checkpoint_db1"); err != nil { return err } return nil }
func (db *rocksDB) initialize(path string, conf *config) error { if conf == nil { conf = newDefaultConfig() } // Create path if not exists first if err := os.MkdirAll(path, 0700); err != nil { return errors.Trace(err) } opts := gorocksdb.NewDefaultOptions() opts.SetCreateIfMissing(true) opts.SetErrorIfExists(false) opts.SetCompression(gorocksdb.CompressionType(conf.CompressionType)) opts.SetWriteBufferSize(conf.WriteBufferSize) opts.SetMaxOpenFiles(conf.MaxOpenFiles) opts.SetNumLevels(conf.NumLevels) opts.SetMaxWriteBufferNumber(conf.MaxWriteBufferNumber) opts.SetMinWriteBufferNumberToMerge(conf.MinWriteBufferNumberToMerge) opts.SetLevel0FileNumCompactionTrigger(conf.Level0FileNumCompactionTrigger) opts.SetLevel0SlowdownWritesTrigger(conf.Level0SlowdownWritesTrigger) opts.SetLevel0StopWritesTrigger(conf.Level0StopWritesTrigger) opts.SetTargetFileSizeBase(uint64(conf.TargetFileSizeBase)) opts.SetTargetFileSizeMultiplier(conf.TargetFileSizeMultiplier) opts.SetMaxBytesForLevelBase(uint64(conf.MaxBytesForLevelBase)) opts.SetMaxBytesForLevelMultiplier(conf.MaxBytesForLevelMultiplier) opts.SetDisableAutoCompactions(conf.DisableAutoCompactions) opts.SetDisableDataSync(conf.DisableDataSync) opts.SetUseFsync(conf.UseFsync) opts.SetMaxBackgroundCompactions(conf.MaxBackgroundCompactions) opts.SetMaxBackgroundFlushes(conf.MaxBackgroundFlushes) opts.SetAllowOsBuffer(conf.AllowOSBuffer) topts := gorocksdb.NewDefaultBlockBasedTableOptions() topts.SetBlockSize(conf.BlockSize) cache := gorocksdb.NewLRUCache(conf.CacheSize) topts.SetBlockCache(cache) topts.SetFilterPolicy(gorocksdb.NewBloomFilter(conf.BloomFilterSize)) opts.SetBlockBasedTableFactory(topts) env := gorocksdb.NewDefaultEnv() env.SetBackgroundThreads(conf.BackgroundThreads) env.SetHighPriorityBackgroundThreads(conf.HighPriorityBackgroundThreads) opts.SetEnv(env) db.path = path db.opts = opts db.ropt = gorocksdb.NewDefaultReadOptions() db.wopt = gorocksdb.NewDefaultWriteOptions() db.env = env db.topts = topts db.cache = cache db.snapshotFillCache = conf.SnapshotFillCache var err error if db.rkdb, err = gorocksdb.OpenDb(db.opts, db.path); err != nil { return errors.Trace(err) } return nil }
// NewStore returns the Store a rocksdb wrapper func NewStore(options StoreOptions) (*Store, error) { options.SetDefaults() if options.Directory == "" { return nil, fmt.Errorf("Empty directory of store options") } if options.IsDebug { log.EnableDebug() } s := &Store{ directory: options.Directory, useTailing: !options.DisableTailing, cfHandles: make(map[string]*rocks.ColumnFamilyHandle), queues: make(map[string]*Queue), } opts := rocks.NewDefaultOptions() opts.SetCreateIfMissing(true) opts.IncreaseParallelism(options.Parallel) opts.SetMergeOperator(&_CountMerger{}) opts.SetMaxSuccessiveMerges(64) opts.SetWriteBufferSize(options.WriteBufferSize) opts.SetMaxWriteBufferNumber(options.WriteBufferNumber) opts.SetTargetFileSizeBase(options.FileSizeBase) opts.SetLevel0FileNumCompactionTrigger(8) opts.SetLevel0SlowdownWritesTrigger(16) opts.SetLevel0StopWritesTrigger(24) opts.SetNumLevels(4) opts.SetMaxBytesForLevelBase(512 * 1024 * 1024) opts.SetMaxBytesForLevelMultiplier(8) opts.SetCompression(options.Compression) opts.SetDisableAutoCompactions(options.DisableAutoCompaction) bbto := rocks.NewDefaultBlockBasedTableOptions() bbto.SetBlockCache(rocks.NewLRUCache(options.MemorySize)) bbto.SetFilterPolicy(rocks.NewBloomFilter(10)) opts.SetBlockBasedTableFactory(bbto) opts.SetMaxOpenFiles(-1) opts.SetMemtablePrefixBloomBits(8 * 1024 * 1024) var err error if err = os.MkdirAll(options.Directory, 0755); err != nil { log.Errorf("Failed to mkdir %q, %s", options.Directory, err) return nil, err } cfNames, err := rocks.ListColumnFamilies(opts, options.Directory) if err != nil { // FIXME: we need to be sure if this means the db does not exist for now // so that we cannot list the column families log.Errorf("Failed to collect the column family names, %s", err) } else { log.Debugf("Got column family names for the existing db, %+v", cfNames) } if len(cfNames) == 0 { // We create the default column family to get the column family handle cfNames = []string{"default"} } cfOpts := make([]*rocks.Options, len(cfNames)) for i := range cfNames { cfOpts[i] = opts } db, cfHandles, err := rocks.OpenDbColumnFamilies(opts, options.Directory, cfNames, cfOpts) if err != nil { log.Errorf("Failed to open rocks database, %s", err) return nil, err } s.DB = db s.dbOpts = opts s.ro = rocks.NewDefaultReadOptions() s.ro.SetFillCache(false) s.ro.SetTailing(!options.DisableTailing) s.wo = rocks.NewDefaultWriteOptions() s.wo.DisableWAL(options.DisableWAL) s.wo.SetSync(options.Sync) if len(cfNames) > 0 { for i := range cfNames { s.cfHandles[cfNames[i]] = cfHandles[i] } } return s, nil }
func defaultWriteOptions() *gorocksdb.WriteOptions { wo := gorocksdb.NewDefaultWriteOptions() // request fsync on write for safety wo.SetSync(true) return wo }
// CommitTxBatch - gets invoked when the current transaction-batch needs to be committed // This function returns successfully iff the transactions details and state changes (that // may have happened during execution of this transaction-batch) have been committed to permanent storage func (ledger *Ledger) CommitTxBatch(id interface{}, transactions []*protos.Transaction, transactionResults []*protos.TransactionResult, metadata []byte) error { err := ledger.checkValidIDCommitORRollback(id) if err != nil { return err } stateHash, err := ledger.state.GetHash() if err != nil { ledger.resetForNextTxGroup(false) ledger.blockchain.blockPersistenceStatus(false) return err } writeBatch := gorocksdb.NewWriteBatch() defer writeBatch.Destroy() block := protos.NewBlock(transactions, metadata) ccEvents := []*protos.ChaincodeEvent{} if transactionResults != nil { ccEvents = make([]*protos.ChaincodeEvent, len(transactionResults)) for i := 0; i < len(transactionResults); i++ { if transactionResults[i].ChaincodeEvent != nil { ccEvents[i] = transactionResults[i].ChaincodeEvent } else { //We need the index so we can map the chaincode //event to the transaction that generated it. //Hence need an entry for cc event even if one //wasn't generated for the transaction. We cannot //use a nil cc event as protobuf does not like //elements of a repeated array to be nil. // //We should discard empty events without chaincode //ID when sending out events. ccEvents[i] = &protos.ChaincodeEvent{} } } } //store chaincode events directly in NonHashData. This will likely change in New Consensus where we can move them to Transaction block.NonHashData = &protos.NonHashData{ChaincodeEvents: ccEvents} newBlockNumber, err := ledger.blockchain.addPersistenceChangesForNewBlock(context.TODO(), block, stateHash, writeBatch) if err != nil { ledger.resetForNextTxGroup(false) ledger.blockchain.blockPersistenceStatus(false) return err } ledger.state.AddChangesForPersistence(newBlockNumber, writeBatch) opt := gorocksdb.NewDefaultWriteOptions() defer opt.Destroy() dbErr := db.GetDBHandle().DB.Write(opt, writeBatch) if dbErr != nil { ledger.resetForNextTxGroup(false) ledger.blockchain.blockPersistenceStatus(false) return dbErr } ledger.resetForNextTxGroup(true) ledger.blockchain.blockPersistenceStatus(true) sendProducerBlockEvent(block) //send chaincode events from transaction results sendChaincodeEvents(transactionResults) if len(transactionResults) != 0 { ledgerLogger.Debug("There were some erroneous transactions. We need to send a 'TX rejected' message here.") } return nil }
import "github.com/tecbot/gorocksdb" type RocksDB struct { db *gorocksdb.DB } func NewRocksDB(path string) (*RocksDB, error) { opts := gorocksdb.NewDefaultOptions() //opts.SetBlockCache(gorocksdb.NewLRUCache(3 << 30)) opts.SetCreateIfMissing(true) db, err := gorocksdb.OpenDb(opts, path) return &RocksDB{db: db}, err } var wo = gorocksdb.NewDefaultWriteOptions() func (r *RocksDB) Write(k, v []byte) error { return r.db.Put(wo, k, v) } func (r *RocksDB) Close() { r.db.Close() } type rocksIter struct { iter *gorocksdb.Iterator } func (r *rocksIter) Next() ([]byte, bool) { if !r.iter.Valid() {