Example #1
0
func read_multi_cfs() error {
	dbOpts := gorocksdb.NewDefaultOptions()
	defaultRO := gorocksdb.NewDefaultReadOptions()
	db, handles, err := gorocksdb.OpenDbColumnFamilies(
		dbOpts,
		"/tmp/multicf_db",
		[]string{"default", "0", "1", "2", "3"},
		[]*gorocksdb.Options{dbOpts, dbOpts, dbOpts, dbOpts, dbOpts},
	)
	iters, err := gorocksext.NewIterators(defaultRO, db, handles)
	if err != nil {
		return err
	}
	for i, iter := range iters {
		fmt.Printf("COUTING FOR ITER: %d\n", i)
		iter.SeekToFirst()
		for iter.Valid() {
			fmt.Println(string(iter.Key().Data()))
			defer iter.Key().Free()
			iter.Next()
		}
	}
	db.Close()
	return nil
}
Example #2
0
func NewRocksdbStorage(dbfname string, dbtype string, mergeOp gorocksdb.MergeOperator) (*RocksdbStorage, error) {
	var sto *RocksdbStorage

	if dbtype != "kv" && dbtype != "json" {
		return sto, fmt.Errorf("Unkown db type")
	}

	opts := gorocksdb.NewDefaultOptions()

	if mergeOp != nil {
		opts.SetMergeOperator(mergeOp)
	}
	// opts.IncreaseParallelism(runtime.NumCPU())
	// opts.OptimizeLevelStyleCompaction(0)
	opts.SetCreateIfMissing(true)

	db, err := gorocksdb.OpenDb(opts, dbfname)
	if err != nil {
		return sto, err
	}
	ro := gorocksdb.NewDefaultReadOptions()
	wo := gorocksdb.NewDefaultWriteOptions()

	sto = &RocksdbStorage{
		dbfname: dbfname,
		db:      db,
		ro:      ro,
		wo:      wo,
	}
	return sto, nil
}
Example #3
0
// Only support key prefix or all keys, e.g. "KEYS *" or "KEYS test*"
func (rh *RocksDBHandler) RedisKeys(pattern []byte) ([][]byte, error) {
	if rh.db == nil {
		return nil, ErrRocksIsDead
	}
	if pattern == nil || len(pattern) == 0 {
		return nil, ErrWrongArgumentsCount
	}
	if pattern[len(pattern)-1] == '*' {
		pattern = pattern[:len(pattern)-1]
	}

	options := rocks.NewDefaultReadOptions()
	defer options.Destroy()
	options.SetFillCache(false)

	data := make([][]byte, 0)
	it := rh.db.NewIterator(options)
	defer it.Close()
	it.Seek(pattern)
	for ; it.Valid(); it.Next() {
		key := it.Key()
		dKey := rh.copySlice(key, false)
		if bytes.HasPrefix(dKey, kTypeKeyPrefix) {
			continue
		}
		if !bytes.HasPrefix(dKey, pattern) {
			break
		}
		data = append(data, dKey)
	}
	if err := it.Err(); err != nil {
		return nil, err
	}
	return data, nil
}
Example #4
0
func (rh *RocksDBHandler) RedisDel(key []byte, keys ...[]byte) (int, error) {
	if rh.db == nil {
		return 0, ErrRocksIsDead
	}
	if key == nil || len(key) == 0 {
		return 0, ErrWrongArgumentsCount
	}

	keyData := append([][]byte{key}, keys...)
	count := 0
	readOptions := rocks.NewDefaultReadOptions()
	writeOptions := rocks.NewDefaultWriteOptions()
	defer readOptions.Destroy()
	defer writeOptions.Destroy()

	for _, dKey := range keyData {
		_, err := rh.loadRedisObject(readOptions, dKey)
		if err == nil {
			batch := rocks.NewWriteBatch()
			batch.Delete(rh.getTypeKey(dKey))
			batch.Delete(dKey)
			if err := rh.db.Write(writeOptions, batch); err == nil {
				count++
			}
			batch.Destroy()
		}
	}
	return count, nil
}
Example #5
0
func (r *RocksDB) Iterate(key []byte) Iter {
	ro := gorocksdb.NewDefaultReadOptions()
	ro.SetFillCache(false)
	it := r.db.NewIterator(ro)
	it.Seek(key)
	return &rocksIter{iter: it}
}
Example #6
0
func (openchainDB *OpenchainDB) getSnapshotIterator(snapshot *gorocksdb.Snapshot, cfHandler *gorocksdb.ColumnFamilyHandle) *gorocksdb.Iterator {
	opt := gorocksdb.NewDefaultReadOptions()
	defer opt.Destroy()
	opt.SetSnapshot(snapshot)
	iter := openchainDB.DB.NewIteratorCF(opt, cfHandler)
	return iter
}
Example #7
0
File: db.go Project: CowLeo/GoRedis
func New(rdb *gorocksdb.DB) *DB {
	db := &DB{rdb: rdb}
	db.wo = gorocksdb.NewDefaultWriteOptions()
	db.ro = gorocksdb.NewDefaultReadOptions()
	db.caches = lru.New(1000)
	db.RawSet([]byte{MAXBYTE}, nil) // for Enumerator seek to last
	return db
}
Example #8
0
File: db.go Project: CowLeo/GoRedis
func (d *DB) RangeEnumerate(min, max []byte, direction IterDirection, fn func(i int, key, value []byte, quit *bool)) {
	opts := gorocksdb.NewDefaultReadOptions()
	opts.SetFillCache(false)
	defer opts.Destroy()
	iter := d.rdb.NewIterator(opts)
	defer iter.Close()
	d.Enumerate(iter, min, max, direction, fn)
}
Example #9
0
func (openchainDB *OpenchainDB) get(cfHandler *gorocksdb.ColumnFamilyHandle, key []byte) ([]byte, error) {
	opt := gorocksdb.NewDefaultReadOptions()
	slice, err := openchainDB.DB.GetCF(opt, cfHandler, key)
	if err != nil {
		fmt.Println("Error while trying to retrieve key:", key)
		return nil, err
	}
	return slice.Data(), nil
}
Example #10
0
// GetFromDB gets the value for the given key from default column-family
func (testDB *TestDBWrapper) GetFromDB(t testing.TB, key []byte) []byte {
	db := GetDBHandle().DB
	opt := gorocksdb.NewDefaultReadOptions()
	defer opt.Destroy()
	slice, err := db.Get(opt, key)
	defer slice.Free()
	if err != nil {
		t.Fatalf("Error while getting key-value from DB: %s", err)
	}
	value := append([]byte(nil), slice.Data()...)
	return value
}
Example #11
0
func (openchainDB *OpenchainDB) get(cfHandler *gorocksdb.ColumnFamilyHandle, key []byte) ([]byte, error) {
	opt := gorocksdb.NewDefaultReadOptions()
	defer opt.Destroy()
	slice, err := openchainDB.DB.GetCF(opt, cfHandler, key)
	if err != nil {
		fmt.Println("Error while trying to retrieve key:", key)
		return nil, err
	}
	defer slice.Free()
	data := append([]byte(nil), slice.Data()...)
	return data, nil
}
Example #12
0
func (openchainDB *OpenchainDB) getFromSnapshot(snapshot *gorocksdb.Snapshot, cfHandler *gorocksdb.ColumnFamilyHandle, key []byte) ([]byte, error) {
	opt := gorocksdb.NewDefaultReadOptions()
	defer opt.Destroy()
	opt.SetSnapshot(snapshot)
	slice, err := openchainDB.DB.GetCF(opt, cfHandler, key)
	if err != nil {
		dbLogger.Errorf("Error while trying to retrieve key: %s", key)
		return nil, err
	}
	defer slice.Free()
	data := append([]byte(nil), slice.Data()...)
	return data, nil
}
Example #13
0
func (db *rocksDB) GetSnapshot() (engine.Snapshot, error) {
	snap := db.rkdb.NewSnapshot()

	ropt := gorocksdb.NewDefaultReadOptions()
	ropt.SetFillCache(db.snapshotFillCache)
	ropt.SetSnapshot(snap)

	return &snapshot{
		db:   db,
		snap: snap,
		ropt: ropt,
	}, nil
}
Example #14
0
func (rh *RocksDBHandler) _list_getData(key []byte) ([][]byte, error) {
	options := rocks.NewDefaultReadOptions()
	defer options.Destroy()
	if obj, err := rh.loadRedisObject(options, key); err != nil {
		if err == ErrDoesNotExist {
			return [][]byte{}, nil
		}
		return nil, err
	} else {
		if obj.Type != kRedisList {
			return nil, ErrWrongTypeRedisObject
		}
		return obj.Data.([][]byte), nil
	}
}
Example #15
0
func (s *Store) newReadOptions() *gorocksdb.ReadOptions {
	ro := gorocksdb.NewDefaultReadOptions()

	if s.roptVerifyChecksumsUse {
		ro.SetVerifyChecksums(s.roptVerifyChecksums)
	}
	if s.roptFillCacheUse {
		ro.SetFillCache(s.roptFillCache)
	}
	if s.roptReadTierUse {
		ro.SetReadTier(gorocksdb.ReadTier(s.roptReadTier))
	}

	return ro
}
Example #16
0
// Get returns the valud for the given column family and key
func (openchainDB *OpenchainDB) Get(cfHandler *gorocksdb.ColumnFamilyHandle, key []byte) ([]byte, error) {
	opt := gorocksdb.NewDefaultReadOptions()
	defer opt.Destroy()
	slice, err := openchainDB.DB.GetCF(opt, cfHandler, key)
	if err != nil {
		dbLogger.Errorf("Error while trying to retrieve key: %s", key)
		return nil, err
	}
	defer slice.Free()
	if slice.Data() == nil {
		return nil, nil
	}
	data := makeCopy(slice.Data())
	return data, nil
}
Example #17
0
func (rh *RocksDBHandler) getKeyType(key []byte) (string, error) {
	if rh.db == nil {
		return "", ErrRocksIsDead
	}
	if key == nil || len(key) == 0 {
		return "", ErrWrongArgumentsCount
	}

	options := rocks.NewDefaultReadOptions()
	if slice, err := rh.db.Get(options, rh.getTypeKey(key)); err == nil {
		defer slice.Free()
		return string(slice.Data()), nil
	} else {
		return "", err
	}
}
Example #18
0
func NewRocksDB(path string) (*RocksDB, error) {
	opts := gorocksdb.NewDefaultOptions()
	filter := gorocksdb.NewBloomFilter(14)
	opts.SetFilterPolicy(filter)
	opts.SetMaxOpenFiles(10000)
	db, err := gorocksdb.OpenDbForReadOnly(opts, path, false)
	if err != nil {
		return nil, err
	}
	return &RocksDB{
		db:     db,
		ro:     gorocksdb.NewDefaultReadOptions(),
		hits:   metrics.NewMeter(),
		misses: metrics.NewMeter(),
		cache:  lru.New(1000000),
	}, nil
}
Example #19
0
func read_all(db_dir string) error {
	dbOpts := gorocksdb.NewDefaultOptions()
	db, err := gorocksdb.OpenDb(dbOpts, db_dir)
	if err != nil {
		return err
	}
	defaultRO := gorocksdb.NewDefaultReadOptions()
	iter := db.NewIterator(defaultRO)
	iter.SeekToFirst()
	for iter.Valid() {
		key := iter.Key()
		defer key.Free()
		fmt.Println(string(key.Data()))
		iter.Next()
	}
	db.Close()
	return nil
}
Example #20
0
func (rh *RocksDBHandler) RedisGet(key []byte) ([]byte, error) {
	if err := rh.checkRedisCall(key); err != nil {
		return nil, err
	}
	if err := rh.checkKeyType(key, kRedisString); err != nil {
		return nil, err
	}

	options := rocks.NewDefaultReadOptions()
	defer options.Destroy()
	if obj, err := rh.loadRedisObject(options, key); err != nil {
		if err == ErrDoesNotExist {
			return []byte{}, nil
		}
		return nil, err
	} else {
		return obj.Data.([]byte), err
	}
}
Example #21
0
func (rh *RocksDBHandler) RedisExists(key []byte) (int, error) {
	if rh.db == nil {
		return 0, ErrRocksIsDead
	}
	if key == nil || len(key) == 0 {
		return 0, ErrWrongArgumentsCount
	}
	options := rocks.NewDefaultReadOptions()
	defer options.Destroy()

	if _, err := rh.loadRedisObject(options, key); err == nil {
		return 1, nil
	} else {
		if err == ErrDoesNotExist {
			return 0, nil
		}
		return 0, err
	}
}
Example #22
0
func (rh *RocksDBHandler) _hash_getData(key []byte) (map[string][]byte, error) {
	hashData := make(map[string][]byte)
	options := rocks.NewDefaultReadOptions()
	defer options.Destroy()
	if obj, err := rh.loadRedisObject(options, key); err != nil {
		if err == ErrDoesNotExist {
			return hashData, nil
		}
		return nil, err
	} else {
		if obj.Type != kRedisHash {
			return nil, ErrWrongTypeRedisObject
		}
		data := obj.Data.([][]byte)
		for i := 0; i < len(data); i += 2 {
			hashData[string(data[i])] = data[i+1]
		}
		return hashData, nil
	}
}
Example #23
0
func (rh *RocksDBHandler) _set_getData(key []byte) (map[string]bool, error) {
	setData := make(map[string]bool)
	options := rocks.NewDefaultReadOptions()
	defer options.Destroy()
	if obj, err := rh.loadRedisObject(options, key); err != nil {
		if err == ErrDoesNotExist {
			return setData, nil
		}
		return nil, err
	} else {
		if obj.Type != kRedisSet {
			return nil, ErrWrongTypeRedisObject
		}
		data := obj.Data.([][]byte)
		for _, itemData := range data {
			setData[string(itemData)] = true
		}
		return setData, nil
	}
}
Example #24
0
func (rh *RocksDBHandler) RedisType(key []byte) ([]byte, error) {
	if rh.db == nil {
		return nil, ErrRocksIsDead
	}
	if key == nil || len(key) == 0 {
		return nil, ErrWrongArgumentsCount
	}

	options := rocks.NewDefaultReadOptions()
	defer options.Destroy()

	obj, err := rh.loadRedisObject(options, key)
	if err == nil {
		return []byte(obj.Type), nil
	}
	if err == ErrDoesNotExist {
		return []byte("none"), nil
	}
	return nil, err
}
Example #25
0
func (rh *RocksDBHandler) RedisMget(keys [][]byte) ([][]byte, error) {
	if rh.db == nil {
		return nil, ErrRocksIsDead
	}
	if keys == nil || len(keys) == 0 {
		return nil, ErrWrongArgumentsCount
	}

	options := rocks.NewDefaultReadOptions()
	defer options.Destroy()
	results := make([][]byte, len(keys))
	for i := range results {
		results[i] = []byte{}
	}
	for i := range results {
		if obj, err := rh.loadRedisObject(options, keys[i]); err == nil {
			if obj.Type == kRedisString {
				results[i] = obj.Data.([]byte)
			}
		}
	}
	return results, nil
}
Example #26
0
func (openchainDB *OpenchainDB) getIterator(cfHandler *gorocksdb.ColumnFamilyHandle) *gorocksdb.Iterator {
	opt := gorocksdb.NewDefaultReadOptions()
	defer opt.Destroy()
	return openchainDB.DB.NewIteratorCF(opt, cfHandler)
}
Example #27
0
func (hp *HostPort) AsString() string {
	return hp.Host + ":" + strconv.FormatInt(int64(hp.Port), 10)
}

func (hp *HostPort) PortString() string {
	return strconv.FormatInt(int64(hp.Port), 10)
}

// ------------ LevelDB Storage ------------------

// From reading levigo's and levelDB's docs, it looks like the DB is
// safe for concurrent use by multiple goroutines without extra
// synchronization.
var opts = gorocksdb.NewDefaultOptions()
var storeDB *gorocksdb.DB
var readOpt = gorocksdb.NewDefaultReadOptions()
var writeOpt = gorocksdb.NewDefaultWriteOptions()

func storageInit(path string) {
	//opts.IncreaseParallelism(2)
	opts.OptimizeForPointLookup(128) // 128mb cache size
	//opts.SetWriteBufferSize(4*1024*1024) // default 4mb
	opts.SetCreateIfMissing(true)
	opts.SetCompression(gorocksdb.NoCompression)
	opts.SetDisableDataSync(true)
	opts.SetUseFsync(false) // true:fsync, false:fdatasync
	db, err := gorocksdb.OpenDb(opts, path)
	if err != nil {
		panic("error opening DB at " + path + ": " + err.Error())
	}
	storeDB = db
Example #28
0
func (db *rocksDB) initialize(path string, conf *config) error {
	if conf == nil {
		conf = newDefaultConfig()
	}

	// Create path if not exists first
	if err := os.MkdirAll(path, 0700); err != nil {
		return errors.Trace(err)
	}

	opts := gorocksdb.NewDefaultOptions()
	opts.SetCreateIfMissing(true)
	opts.SetErrorIfExists(false)

	opts.SetCompression(gorocksdb.CompressionType(conf.CompressionType))
	opts.SetWriteBufferSize(conf.WriteBufferSize)
	opts.SetMaxOpenFiles(conf.MaxOpenFiles)
	opts.SetNumLevels(conf.NumLevels)

	opts.SetMaxWriteBufferNumber(conf.MaxWriteBufferNumber)
	opts.SetMinWriteBufferNumberToMerge(conf.MinWriteBufferNumberToMerge)
	opts.SetLevel0FileNumCompactionTrigger(conf.Level0FileNumCompactionTrigger)
	opts.SetLevel0SlowdownWritesTrigger(conf.Level0SlowdownWritesTrigger)
	opts.SetLevel0StopWritesTrigger(conf.Level0StopWritesTrigger)
	opts.SetTargetFileSizeBase(uint64(conf.TargetFileSizeBase))
	opts.SetTargetFileSizeMultiplier(conf.TargetFileSizeMultiplier)
	opts.SetMaxBytesForLevelBase(uint64(conf.MaxBytesForLevelBase))
	opts.SetMaxBytesForLevelMultiplier(conf.MaxBytesForLevelMultiplier)

	opts.SetDisableAutoCompactions(conf.DisableAutoCompactions)
	opts.SetDisableDataSync(conf.DisableDataSync)
	opts.SetUseFsync(conf.UseFsync)
	opts.SetMaxBackgroundCompactions(conf.MaxBackgroundCompactions)
	opts.SetMaxBackgroundFlushes(conf.MaxBackgroundFlushes)
	opts.SetAllowOsBuffer(conf.AllowOSBuffer)

	topts := gorocksdb.NewDefaultBlockBasedTableOptions()
	topts.SetBlockSize(conf.BlockSize)

	cache := gorocksdb.NewLRUCache(conf.CacheSize)
	topts.SetBlockCache(cache)

	topts.SetFilterPolicy(gorocksdb.NewBloomFilter(conf.BloomFilterSize))
	opts.SetBlockBasedTableFactory(topts)

	env := gorocksdb.NewDefaultEnv()
	env.SetBackgroundThreads(conf.BackgroundThreads)
	env.SetHighPriorityBackgroundThreads(conf.HighPriorityBackgroundThreads)
	opts.SetEnv(env)

	db.path = path
	db.opts = opts
	db.ropt = gorocksdb.NewDefaultReadOptions()
	db.wopt = gorocksdb.NewDefaultWriteOptions()
	db.env = env
	db.topts = topts
	db.cache = cache
	db.snapshotFillCache = conf.SnapshotFillCache

	var err error
	if db.rkdb, err = gorocksdb.OpenDb(db.opts, db.path); err != nil {
		return errors.Trace(err)
	}
	return nil
}
Example #29
0
func defaultReadOptions() *gorocksdb.ReadOptions {
	ro := gorocksdb.NewDefaultReadOptions()
	return ro
}
Example #30
0
// NewStore returns the Store a rocksdb wrapper
func NewStore(options StoreOptions) (*Store, error) {
	options.SetDefaults()
	if options.Directory == "" {
		return nil, fmt.Errorf("Empty directory of store options")
	}
	if options.IsDebug {
		log.EnableDebug()
	}

	s := &Store{
		directory:  options.Directory,
		useTailing: !options.DisableTailing,
		cfHandles:  make(map[string]*rocks.ColumnFamilyHandle),
		queues:     make(map[string]*Queue),
	}

	opts := rocks.NewDefaultOptions()
	opts.SetCreateIfMissing(true)
	opts.IncreaseParallelism(options.Parallel)
	opts.SetMergeOperator(&_CountMerger{})
	opts.SetMaxSuccessiveMerges(64)

	opts.SetWriteBufferSize(options.WriteBufferSize)
	opts.SetMaxWriteBufferNumber(options.WriteBufferNumber)
	opts.SetTargetFileSizeBase(options.FileSizeBase)
	opts.SetLevel0FileNumCompactionTrigger(8)
	opts.SetLevel0SlowdownWritesTrigger(16)
	opts.SetLevel0StopWritesTrigger(24)
	opts.SetNumLevels(4)
	opts.SetMaxBytesForLevelBase(512 * 1024 * 1024)
	opts.SetMaxBytesForLevelMultiplier(8)
	opts.SetCompression(options.Compression)
	opts.SetDisableAutoCompactions(options.DisableAutoCompaction)

	bbto := rocks.NewDefaultBlockBasedTableOptions()
	bbto.SetBlockCache(rocks.NewLRUCache(options.MemorySize))
	bbto.SetFilterPolicy(rocks.NewBloomFilter(10))
	opts.SetBlockBasedTableFactory(bbto)

	opts.SetMaxOpenFiles(-1)
	opts.SetMemtablePrefixBloomBits(8 * 1024 * 1024)

	var err error
	if err = os.MkdirAll(options.Directory, 0755); err != nil {
		log.Errorf("Failed to mkdir %q, %s", options.Directory, err)
		return nil, err
	}

	cfNames, err := rocks.ListColumnFamilies(opts, options.Directory)
	if err != nil {
		// FIXME: we need to be sure if this means the db does not exist for now
		// so that we cannot list the column families
		log.Errorf("Failed to collect the column family names, %s", err)
	} else {
		log.Debugf("Got column family names for the existing db, %+v", cfNames)
	}

	if len(cfNames) == 0 {
		// We create the default column family to get the column family handle
		cfNames = []string{"default"}
	}
	cfOpts := make([]*rocks.Options, len(cfNames))
	for i := range cfNames {
		cfOpts[i] = opts
	}
	db, cfHandles, err := rocks.OpenDbColumnFamilies(opts, options.Directory, cfNames, cfOpts)
	if err != nil {
		log.Errorf("Failed to open rocks database, %s", err)
		return nil, err
	}

	s.DB = db
	s.dbOpts = opts
	s.ro = rocks.NewDefaultReadOptions()
	s.ro.SetFillCache(false)
	s.ro.SetTailing(!options.DisableTailing)
	s.wo = rocks.NewDefaultWriteOptions()
	s.wo.DisableWAL(options.DisableWAL)
	s.wo.SetSync(options.Sync)

	if len(cfNames) > 0 {
		for i := range cfNames {
			s.cfHandles[cfNames[i]] = cfHandles[i]
		}
	}
	return s, nil
}