コード例 #1
0
func main() {
	dbName := "/data/mydb"
	cache := gorocksdb.NewLRUCache(512 * 1024 * 1024)
	filter := gorocksdb.NewBloomFilter(15)
	to := gorocksdb.NewDefaultBlockBasedTableOptions()
	to.SetBlockSize(256 * 1024)
	to.SetBlockCache(cache)
	to.SetFilterPolicy(filter)
	options := gorocksdb.NewDefaultOptions()
	options.SetBlockBasedTableFactory(to)
	options.SetCreateIfMissing(true)
	options.SetStatsDumpPeriodSec(60 * 1) // dump stats at 10 minute interval
	options.SetCompactionStyle(gorocksdb.UniversalCompactionStyle)
	options.SetWriteBufferSize(512 * 1024 * 1024)
	options.SetMaxWriteBufferNumber(5)
	options.SetMinWriteBufferNumberToMerge(2)
	db, err := gorocksdb.OpenDb(options, dbName)
	if err != nil {
		fmt.Println(err)
		os.Exit(1)
	}
	putStatch := doPut(db, 16*1024, 32*1024)
	for {
		p := <-putStatch
		fmt.Println("avgTime ", p.avgTime, "maxTime", p.maxTime)
	}
}
コード例 #2
0
ファイル: config.go プロジェクト: ferhatelmas/blevex
func applyConfig(o *gorocksdb.Options, config map[string]interface{}) (
	*gorocksdb.Options, error) {

	cim, ok := config["create_if_missing"].(bool)
	if ok {
		o.SetCreateIfMissing(cim)
	}

	eie, ok := config["error_if_exists"].(bool)
	if ok {
		o.SetErrorIfExists(eie)
	}

	wbs, ok := config["write_buffer_size"].(float64)
	if ok {
		o.SetWriteBufferSize(int(wbs))
	}

	mof, ok := config["max_open_files"].(float64)
	if ok {
		o.SetMaxOpenFiles(int(mof))
	}

	tt, ok := config["total_threads"].(float64)
	if ok {
		o.IncreaseParallelism(int(tt))
	}

	// options in the block based table options object
	bbto := gorocksdb.NewDefaultBlockBasedTableOptions()

	lcc, ok := config["lru_cache_capacity"].(float64)
	if ok {
		c := gorocksdb.NewLRUCache(int(lcc))
		bbto.SetBlockCache(c)
	}

	bfbpk, ok := config["bloom_filter_bits_per_key"].(float64)
	if ok {
		bf := gorocksdb.NewBloomFilter(int(bfbpk))
		bbto.SetFilterPolicy(bf)
	}

	// set the block based table options
	o.SetBlockBasedTableFactory(bbto)

	return o, nil
}
コード例 #3
0
ファイル: rocksdb.go プロジェクト: robcat/ripple
func NewRocksDB(path string) (*RocksDB, error) {
	opts := gorocksdb.NewDefaultOptions()
	filter := gorocksdb.NewBloomFilter(14)
	opts.SetFilterPolicy(filter)
	opts.SetMaxOpenFiles(10000)
	db, err := gorocksdb.OpenDbForReadOnly(opts, path, false)
	if err != nil {
		return nil, err
	}
	return &RocksDB{
		db:     db,
		ro:     gorocksdb.NewDefaultReadOptions(),
		hits:   metrics.NewMeter(),
		misses: metrics.NewMeter(),
		cache:  lru.New(1000000),
	}, nil
}
コード例 #4
0
ファイル: store.go プロジェクト: mijia/rocksq
// NewStore returns the Store a rocksdb wrapper
func NewStore(options StoreOptions) (*Store, error) {
	options.SetDefaults()
	if options.Directory == "" {
		return nil, fmt.Errorf("Empty directory of store options")
	}
	if options.IsDebug {
		log.EnableDebug()
	}

	s := &Store{
		directory:  options.Directory,
		useTailing: !options.DisableTailing,
		cfHandles:  make(map[string]*rocks.ColumnFamilyHandle),
		queues:     make(map[string]*Queue),
	}

	opts := rocks.NewDefaultOptions()
	opts.SetCreateIfMissing(true)
	opts.IncreaseParallelism(options.Parallel)
	opts.SetMergeOperator(&_CountMerger{})
	opts.SetMaxSuccessiveMerges(64)

	opts.SetWriteBufferSize(options.WriteBufferSize)
	opts.SetMaxWriteBufferNumber(options.WriteBufferNumber)
	opts.SetTargetFileSizeBase(options.FileSizeBase)
	opts.SetLevel0FileNumCompactionTrigger(8)
	opts.SetLevel0SlowdownWritesTrigger(16)
	opts.SetLevel0StopWritesTrigger(24)
	opts.SetNumLevels(4)
	opts.SetMaxBytesForLevelBase(512 * 1024 * 1024)
	opts.SetMaxBytesForLevelMultiplier(8)
	opts.SetCompression(options.Compression)
	opts.SetDisableAutoCompactions(options.DisableAutoCompaction)

	bbto := rocks.NewDefaultBlockBasedTableOptions()
	bbto.SetBlockCache(rocks.NewLRUCache(options.MemorySize))
	bbto.SetFilterPolicy(rocks.NewBloomFilter(10))
	opts.SetBlockBasedTableFactory(bbto)

	opts.SetMaxOpenFiles(-1)
	opts.SetMemtablePrefixBloomBits(8 * 1024 * 1024)

	var err error
	if err = os.MkdirAll(options.Directory, 0755); err != nil {
		log.Errorf("Failed to mkdir %q, %s", options.Directory, err)
		return nil, err
	}

	cfNames, err := rocks.ListColumnFamilies(opts, options.Directory)
	if err != nil {
		// FIXME: we need to be sure if this means the db does not exist for now
		// so that we cannot list the column families
		log.Errorf("Failed to collect the column family names, %s", err)
	} else {
		log.Debugf("Got column family names for the existing db, %+v", cfNames)
	}

	if len(cfNames) == 0 {
		// We create the default column family to get the column family handle
		cfNames = []string{"default"}
	}
	cfOpts := make([]*rocks.Options, len(cfNames))
	for i := range cfNames {
		cfOpts[i] = opts
	}
	db, cfHandles, err := rocks.OpenDbColumnFamilies(opts, options.Directory, cfNames, cfOpts)
	if err != nil {
		log.Errorf("Failed to open rocks database, %s", err)
		return nil, err
	}

	s.DB = db
	s.dbOpts = opts
	s.ro = rocks.NewDefaultReadOptions()
	s.ro.SetFillCache(false)
	s.ro.SetTailing(!options.DisableTailing)
	s.wo = rocks.NewDefaultWriteOptions()
	s.wo.DisableWAL(options.DisableWAL)
	s.wo.SetSync(options.Sync)

	if len(cfNames) > 0 {
		for i := range cfNames {
			s.cfHandles[cfNames[i]] = cfHandles[i]
		}
	}
	return s, nil
}
コード例 #5
0
ファイル: config.go プロジェクト: blevesearch/blevex
func applyConfig(o *gorocksdb.Options, config map[string]interface{}) (
	*gorocksdb.Options, error) {

	cim, ok := config["create_if_missing"].(bool)
	if ok {
		o.SetCreateIfMissing(cim)
	}

	eie, ok := config["error_if_exists"].(bool)
	if ok {
		o.SetErrorIfExists(eie)
	}

	pc, ok := config["paranoid_checks"].(bool)
	if ok {
		o.SetParanoidChecks(pc)
	}

	ill, ok := config["info_log_level"].(float64)
	if ok {
		o.SetInfoLogLevel(gorocksdb.InfoLogLevel(int(ill)))
	}

	tt, ok := config["total_threads"].(float64)
	if ok {
		o.IncreaseParallelism(int(tt))
	}

	ofpl, ok := config["optimize_for_point_lookup"].(float64)
	if ok {
		o.OptimizeForPointLookup(uint64(ofpl))
	}

	olsc, ok := config["optimize_level_style_compaction"].(float64)
	if ok {
		o.OptimizeLevelStyleCompaction(uint64(olsc))
	}

	ousc, ok := config["optimize_universal_style_compaction"].(float64)
	if ok {
		o.OptimizeUniversalStyleCompaction(uint64(ousc))
	}

	wbs, ok := config["write_buffer_size"].(float64)
	if ok {
		o.SetWriteBufferSize(int(wbs))
	}

	mwbn, ok := config["max_write_buffer_number"].(float64)
	if ok {
		o.SetMaxWriteBufferNumber(int(mwbn))
	}

	mwbntm, ok := config["min_write_buffer_number_to_merge"].(float64)
	if ok {
		o.SetMinWriteBufferNumberToMerge(int(mwbntm))
	}

	mof, ok := config["max_open_files"].(float64)
	if ok {
		o.SetMaxOpenFiles(int(mof))
	}

	c, ok := config["compression"].(float64)
	if ok {
		o.SetCompression(gorocksdb.CompressionType(int(c)))
	}

	mltc, ok := config["min_level_to_compress"].(float64)
	if ok {
		o.SetMinLevelToCompress(int(mltc))
	}

	nl, ok := config["num_levels"].(float64)
	if ok {
		o.SetNumLevels(int(nl))
	}

	lfnct, ok := config["level0_file_num_compaction_trigger"].(float64)
	if ok {
		o.SetLevel0FileNumCompactionTrigger(int(lfnct))
	}

	lswt, ok := config["level0_slowdown_writes_trigger"].(float64)
	if ok {
		o.SetLevel0SlowdownWritesTrigger(int(lswt))
	}

	lstopwt, ok := config["level0_stop_writes_trigger"].(float64)
	if ok {
		o.SetLevel0StopWritesTrigger(int(lstopwt))
	}

	mmcl, ok := config["max_mem_compaction_level"].(float64)
	if ok {
		o.SetMaxMemCompactionLevel(int(mmcl))
	}

	tfsb, ok := config["target_file_size_base"].(float64)
	if ok {
		o.SetTargetFileSizeBase(uint64(tfsb))
	}

	tfsm, ok := config["target_file_size_multiplier"].(float64)
	if ok {
		o.SetTargetFileSizeMultiplier(int(tfsm))
	}

	mbflb, ok := config["max_bytes_for_level_base"].(float64)
	if ok {
		o.SetMaxBytesForLevelBase(uint64(mbflb))
	}

	mbflm, ok := config["max_bytes_for_level_multiplier"].(float64)
	if ok {
		o.SetMaxBytesForLevelMultiplier(int(mbflm))
	}

	ecf, ok := config["expanded_compaction_factor"].(float64)
	if ok {
		o.SetExpandedCompactionFactor(int(ecf))
	}

	scf, ok := config["source_compaction_factor"].(float64)
	if ok {
		o.SetSourceCompactionFactor(int(scf))
	}

	mgof, ok := config["max_grandparent_overlap_factor"].(float64)
	if ok {
		o.SetMaxGrandparentOverlapFactor(int(mgof))
	}

	dds, ok := config["disable_data_sync"].(bool)
	if ok {
		o.SetDisableDataSync(dds)
	}

	uf, ok := config["use_fsync"].(bool)
	if ok {
		o.SetUseFsync(uf)
	}

	dofpm, ok := config["delete_obsolete_files_period_micros"].(float64)
	if ok {
		o.SetDeleteObsoleteFilesPeriodMicros(uint64(dofpm))
	}

	mbc, ok := config["max_background_compactions"].(float64)
	if ok {
		o.SetMaxBackgroundCompactions(int(mbc))
	}

	mbf, ok := config["max_background_flushes"].(float64)
	if ok {
		o.SetMaxBackgroundFlushes(int(mbf))
	}

	mlfs, ok := config["max_log_file_size"].(float64)
	if ok {
		o.SetMaxLogFileSize(int(mlfs))
	}

	lfttr, ok := config["log_file_time_to_roll"].(float64)
	if ok {
		o.SetLogFileTimeToRoll(int(lfttr))
	}

	klfn, ok := config["keep_log_file_num"].(float64)
	if ok {
		o.SetKeepLogFileNum(int(klfn))
	}

	hrl, ok := config["hard_rate_limit"].(float64)
	if ok {
		o.SetHardRateLimit(hrl)
	}

	rldmm, ok := config["rate_limit_delay_max_millisecond"].(float64)
	if ok {
		o.SetRateLimitDelayMaxMilliseconds(uint(rldmm))
	}

	mmfs, ok := config["max_manifest_file_size"].(float64)
	if ok {
		o.SetMaxManifestFileSize(uint64(mmfs))
	}

	tcnsb, ok := config["table_cache_numshardbits"].(float64)
	if ok {
		o.SetTableCacheNumshardbits(int(tcnsb))
	}

	tcrscl, ok := config["table_cache_remove_scan_count_limit"].(float64)
	if ok {
		o.SetTableCacheRemoveScanCountLimit(int(tcrscl))
	}

	abs, ok := config["arena_block_size"].(float64)
	if ok {
		o.SetArenaBlockSize(int(abs))
	}

	dac, ok := config["disable_auto_compactions"].(bool)
	if ok {
		o.SetDisableAutoCompactions(dac)
	}

	wts, ok := config["WAL_ttl_seconds"].(float64)
	if ok {
		o.SetWALTtlSeconds(uint64(wts))
	}

	wslm, ok := config["WAL_size_limit_MB"].(float64)
	if ok {
		o.SetWalSizeLimitMb(uint64(wslm))
	}

	mps, ok := config["manifest_preallocation_size"].(float64)
	if ok {
		o.SetManifestPreallocationSize(int(mps))
	}

	prkwf, ok := config["purge_redundant_kvs_while_flush"].(bool)
	if ok {
		o.SetPurgeRedundantKvsWhileFlush(prkwf)
	}

	aob, ok := config["allow_os_buffer"].(bool)
	if ok {
		o.SetAllowOsBuffer(aob)
	}

	amr, ok := config["allow_mmap_reads"].(bool)
	if ok {
		o.SetAllowMmapReads(amr)
	}

	amw, ok := config["allow_mmap_writes"].(bool)
	if ok {
		o.SetAllowMmapWrites(amw)
	}

	sleor, ok := config["skip_log_error_on_recovery"].(bool)
	if ok {
		o.SetSkipLogErrorOnRecovery(sleor)
	}

	sdps, ok := config["stats_dump_period_sec"].(float64)
	if ok {
		o.SetStatsDumpPeriodSec(uint(sdps))
	}

	aroo, ok := config["advise_random_on_open"].(bool)
	if ok {
		o.SetAdviseRandomOnOpen(aroo)
	}

	ahocs, ok := config["access_hint_on_compaction_start"].(float64)
	if ok {
		o.SetAccessHintOnCompactionStart(gorocksdb.CompactionAccessPattern(uint(ahocs)))
	}

	uam, ok := config["use_adaptive_mutex"].(bool)
	if ok {
		o.SetUseAdaptiveMutex(uam)
	}

	bps, ok := config["bytes_per_sync"].(float64)
	if ok {
		o.SetBytesPerSync(uint64(bps))
	}

	cs, ok := config["compaction_style"].(float64)
	if ok {
		o.SetCompactionStyle(gorocksdb.CompactionStyle(uint(cs)))
	}

	vcic, ok := config["verify_checksums_in_compaction"].(bool)
	if ok {
		o.SetVerifyChecksumsInCompaction(vcic)
	}

	mssii, ok := config["max_sequential_skip_in_iterations"].(float64)
	if ok {
		o.SetMaxSequentialSkipInIterations(uint64(mssii))
	}

	ius, ok := config["inplace_update_support"].(bool)
	if ok {
		o.SetInplaceUpdateSupport(ius)
	}

	iunl, ok := config["inplace_update_num_locks"].(float64)
	if ok {
		o.SetInplaceUpdateNumLocks(int(iunl))
	}

	es, ok := config["enable_statistics"].(bool)
	if ok && es {
		o.EnableStatistics()
	}

	pfbl, ok := config["prepare_for_bulk_load"].(bool)
	if ok && pfbl {
		o.PrepareForBulkLoad()
	}

	// options in the block based table options object
	bbto := gorocksdb.NewDefaultBlockBasedTableOptions()

	lcc, ok := config["lru_cache_capacity"].(float64)
	if ok {
		c := gorocksdb.NewLRUCache(int(lcc))
		bbto.SetBlockCache(c)
	}

	bfbpk, ok := config["bloom_filter_bits_per_key"].(float64)
	if ok {
		bf := gorocksdb.NewBloomFilter(int(bfbpk))
		bbto.SetFilterPolicy(bf)
	}

	// set the block based table options
	o.SetBlockBasedTableFactory(bbto)

	return o, nil
}
コード例 #6
0
ファイル: db.go プロジェクト: pingcap/tidbrocks
func (db *rocksDB) initialize(path string, conf *config) error {
	if conf == nil {
		conf = newDefaultConfig()
	}

	// Create path if not exists first
	if err := os.MkdirAll(path, 0700); err != nil {
		return errors.Trace(err)
	}

	opts := gorocksdb.NewDefaultOptions()
	opts.SetCreateIfMissing(true)
	opts.SetErrorIfExists(false)

	opts.SetCompression(gorocksdb.CompressionType(conf.CompressionType))
	opts.SetWriteBufferSize(conf.WriteBufferSize)
	opts.SetMaxOpenFiles(conf.MaxOpenFiles)
	opts.SetNumLevels(conf.NumLevels)

	opts.SetMaxWriteBufferNumber(conf.MaxWriteBufferNumber)
	opts.SetMinWriteBufferNumberToMerge(conf.MinWriteBufferNumberToMerge)
	opts.SetLevel0FileNumCompactionTrigger(conf.Level0FileNumCompactionTrigger)
	opts.SetLevel0SlowdownWritesTrigger(conf.Level0SlowdownWritesTrigger)
	opts.SetLevel0StopWritesTrigger(conf.Level0StopWritesTrigger)
	opts.SetTargetFileSizeBase(uint64(conf.TargetFileSizeBase))
	opts.SetTargetFileSizeMultiplier(conf.TargetFileSizeMultiplier)
	opts.SetMaxBytesForLevelBase(uint64(conf.MaxBytesForLevelBase))
	opts.SetMaxBytesForLevelMultiplier(conf.MaxBytesForLevelMultiplier)

	opts.SetDisableAutoCompactions(conf.DisableAutoCompactions)
	opts.SetDisableDataSync(conf.DisableDataSync)
	opts.SetUseFsync(conf.UseFsync)
	opts.SetMaxBackgroundCompactions(conf.MaxBackgroundCompactions)
	opts.SetMaxBackgroundFlushes(conf.MaxBackgroundFlushes)
	opts.SetAllowOsBuffer(conf.AllowOSBuffer)

	topts := gorocksdb.NewDefaultBlockBasedTableOptions()
	topts.SetBlockSize(conf.BlockSize)

	cache := gorocksdb.NewLRUCache(conf.CacheSize)
	topts.SetBlockCache(cache)

	topts.SetFilterPolicy(gorocksdb.NewBloomFilter(conf.BloomFilterSize))
	opts.SetBlockBasedTableFactory(topts)

	env := gorocksdb.NewDefaultEnv()
	env.SetBackgroundThreads(conf.BackgroundThreads)
	env.SetHighPriorityBackgroundThreads(conf.HighPriorityBackgroundThreads)
	opts.SetEnv(env)

	db.path = path
	db.opts = opts
	db.ropt = gorocksdb.NewDefaultReadOptions()
	db.wopt = gorocksdb.NewDefaultWriteOptions()
	db.env = env
	db.topts = topts
	db.cache = cache
	db.snapshotFillCache = conf.SnapshotFillCache

	var err error
	if db.rkdb, err = gorocksdb.OpenDb(db.opts, db.path); err != nil {
		return errors.Trace(err)
	}
	return nil
}
コード例 #7
0
ファイル: rocks.go プロジェクト: ngaut/gorockdis
func (rh *RocksDBHandler) Init() error {
	rh.options = rocks.NewDefaultOptions()
	rh.options.SetBlockCache(rocks.NewLRUCache(rh.cacheSize))
	rh.options.SetBlockSize(rh.blockSize)
	rh.options.SetCreateIfMissing(rh.createIfMissing)
	if rh.bloomFilter > 0 {
		rh.options.SetFilterPolicy(rocks.NewBloomFilter(rh.bloomFilter))
	}
	if rh.maxOpenFiles > 0 {
		rh.options.SetMaxOpenFiles(rh.maxOpenFiles)
	}

	switch rh.compression {
	case "no":
		rh.options.SetCompression(rocks.NoCompression)
	case "snappy":
		rh.options.SetCompression(rocks.SnappyCompression)
	case "zlib":
		rh.options.SetCompression(rocks.ZlibCompression)
	case "bzip2":
		rh.options.SetCompression(rocks.BZip2Compression)
	}

	switch rh.compactionStyle {
	case "level":
		rh.options.SetCompactionStyle(rocks.LevelCompactionStyle)
	case "universal":
		rh.options.SetCompactionStyle(rocks.UniversalCompactionStyle)
	}

	rh.dsMergers = make(map[string]DataStructureMerger)
	rh.dsMergers[kRedisString] = &StringMerger{}
	rh.dsMergers[kRedisList] = &ListMerger{}
	rh.dsMergers[kRedisHash] = &HashMerger{}
	rh.dsMergers[kRedisSet] = &SetMerger{}

	if rh.maxMerge > 0 {
		rh.options.SetMaxSuccessiveMerges(rh.maxMerge)
	}
	rh.options.SetMergeOperator(rocks.NewMergeOperator(rh))

	db, err := rocks.OpenDb(rh.options, rh.dbDir)
	if err != nil {
		rh.Close()
		return err
	}
	rh.db = db

	infos := []string{
		fmt.Sprintf("dbDir=%s", rh.dbDir),
		fmt.Sprintf("cacheSize=%d", rh.cacheSize),
		fmt.Sprintf("blockSize=%d", rh.blockSize),
		fmt.Sprintf("createIfMissing=%v", rh.createIfMissing),
		fmt.Sprintf("bloomFilter=%d", rh.bloomFilter),
		fmt.Sprintf("compression=%s", rh.compression),
		fmt.Sprintf("compactionStyle=%s", rh.compactionStyle),
		fmt.Sprintf("maxOpenFiles=%d", rh.maxOpenFiles),
		fmt.Sprintf("maxMerge=%d", rh.maxMerge),
	}
	log.Printf("[RocksDBHandler] Inited, %s", strings.Join(infos, ", "))
	return nil
}