Пример #1
0
func (db *DB) Open(name string, createIfMissing bool, maxOpenFiles int,
	writeBufSize int, cacheSize int64, compression int) error {
	db.opt = C.rocksdb_options_create()
	C.rocksdb_options_set_create_if_missing(db.opt, boolToUchar(createIfMissing))
	C.rocksdb_options_set_write_buffer_size(db.opt, C.size_t(writeBufSize))
	C.rocksdb_options_set_max_open_files(db.opt, C.int(maxOpenFiles))
	C.rocksdb_options_set_compression(db.opt, C.int(compression))

	var block_options = C.rocksdb_block_based_options_create()
	if cacheSize > 0 {
		db.cache = C.rocksdb_cache_create_lru(C.size_t(cacheSize))
		C.rocksdb_block_based_options_set_block_cache(block_options, db.cache)
	} else {
		C.rocksdb_block_based_options_set_no_block_cache(block_options, 1)
	}
	db.fp = C.rocksdb_filterpolicy_create_bloom(10)
	C.rocksdb_block_based_options_set_filter_policy(block_options, db.fp)

	C.rocksdb_options_set_block_based_table_factory(db.opt, block_options)

	cname := C.CString(name)
	defer C.free(unsafe.Pointer(cname))

	var errStr *C.char
	db.db = C.rocksdb_open(db.opt, cname, &errStr)
	if errStr != nil {
		defer C.free(unsafe.Pointer(errStr))
		return errors.New(C.GoString(errStr))
	}

	db.rOpt = C.rocksdb_readoptions_create()
	db.wOpt = C.rocksdb_writeoptions_create()

	return nil
}
Пример #2
0
// createOptions sets the default options for creating, reading, and writing
// from the db. destroyOptions should be called when the options aren't needed
// anymore.
func (r *RocksDB) createOptions() {
	// TODO(andybons): Set the cache size.
	r.opts = C.rocksdb_options_create()
	C.rocksdb_options_set_create_if_missing(r.opts, 1)

	r.wOpts = C.rocksdb_writeoptions_create()
	r.rOpts = C.rocksdb_readoptions_create()
}
Пример #3
0
func (db *DB) NewReadOptions(createSnapshot bool) *ReadOptions {
	var opt = new(ReadOptions)
	opt.rOpt = C.rocksdb_readoptions_create()
	if createSnapshot {
		opt.snap = C.rocksdb_create_snapshot(db.db)
		C.rocksdb_readoptions_set_snapshot(opt.rOpt, opt.snap)
		opt.db = db.db
	}
	return opt
}
Пример #4
0
// GetSnapshot returns the value for the given key from the given
// snapshotID, nil otherwise.
func (r *RocksDB) GetSnapshot(key Key, snapshotID string) ([]byte, error) {
	snapshotHandle, ok := r.snapshots[snapshotID]
	if !ok {
		return nil, util.Errorf("snapshotID %s does not exist", snapshotID)
	}

	opts := C.rocksdb_readoptions_create()
	C.rocksdb_readoptions_set_snapshot(opts, snapshotHandle)
	defer C.rocksdb_readoptions_destroy(opts)
	return r.getInternal(key, opts)
}
Пример #5
0
// createOptions sets the default options for creating, reading, and writing
// from the db. destroyOptions should be called when the options aren't needed
// anymore.
func (r *RocksDB) createOptions() {
	// TODO(andybons): Set the cache size.
	r.opts = C.rocksdb_options_create()
	C.rocksdb_options_set_create_if_missing(r.opts, 1)
	// This enables us to use rocksdb_merge with counter semantics.
	// See rocksdb.{c,h} for the C implementation.
	r.mergeOperator = C.MakeMergeOperator()
	C.rocksdb_options_set_merge_operator(r.opts, r.mergeOperator)

	r.wOpts = C.rocksdb_writeoptions_create()
	r.rOpts = C.rocksdb_readoptions_create()
}
Пример #6
0
// ScanSnapshot returns up to max key/value objects starting from
// start (inclusive) and ending at end (non-inclusive) from the
// given snapshotID.
// Specify max=0 for unbounded scans.
func (r *RocksDB) ScanSnapshot(start, end Key, max int64, snapshotID string) ([]proto.RawKeyValue, error) {
	snapshotHandle, ok := r.snapshots[snapshotID]
	if !ok {
		return nil, util.Errorf("snapshotID %s does not exist", snapshotID)
	}

	opts := C.rocksdb_readoptions_create()
	C.rocksdb_readoptions_set_fill_cache(opts, 0)
	C.rocksdb_readoptions_set_snapshot(opts, snapshotHandle)
	defer C.rocksdb_readoptions_destroy(opts)
	return r.scanInternal(start, end, max, opts)
}
Пример #7
0
// Scan returns up to max key/value objects starting from
// start (inclusive) and ending at end (non-inclusive).
// If max is zero then the number of key/values returned is unbounded.
func (r *RocksDB) Scan(start, end Key, max int64) ([]RawKeyValue, error) {
	var keyVals []RawKeyValue
	if bytes.Compare(start, end) >= 0 {
		return keyVals, nil
	}
	// In order to prevent content displacement, caching is disabled
	// when performing scans. Any options set within the shared read
	// options field that should be carried over needs to be set here
	// as well.
	opts := C.rocksdb_readoptions_create()
	C.rocksdb_readoptions_set_fill_cache(opts, 0)
	defer C.rocksdb_readoptions_destroy(opts)
	it := C.rocksdb_create_iterator(r.rdb, opts)
	defer C.rocksdb_iter_destroy(it)

	byteCount := len(start)
	if byteCount == 0 {
		// start=Key("") needs special treatment since we need
		// to access start[0] in an explicit seek.
		C.rocksdb_iter_seek_to_first(it)
	} else {
		C.rocksdb_iter_seek(it, bytesPointer(start), C.size_t(byteCount))
	}
	for i := int64(1); C.rocksdb_iter_valid(it) == 1; C.rocksdb_iter_next(it) {
		if max > 0 && i > max {
			break
		}
		var l C.size_t
		// The data returned by rocksdb_iter_{key,value} is not meant to be
		// freed by the client. It is a direct reference to the data managed
		// by the iterator, so it is copied instead of freed.
		data := C.rocksdb_iter_key(it, &l)
		k := C.GoBytes(unsafe.Pointer(data), C.int(l))
		if bytes.Compare(k, end) >= 0 {
			break
		}
		data = C.rocksdb_iter_value(it, &l)
		v := C.GoBytes(unsafe.Pointer(data), C.int(l))
		keyVals = append(keyVals, RawKeyValue{
			Key:   k,
			Value: v,
		})
		i++
	}
	// Check for any errors during iteration.
	var cErr *C.char
	C.rocksdb_iter_get_error(it, &cErr)
	if cErr != nil {
		return nil, charToErr(cErr)
	}
	return keyVals, nil
}
Пример #8
0
// createOptions sets the default options for creating, reading, and writing
// from the db. destroyOptions should be called when the options aren't needed
// anymore.
func (r *RocksDB) createOptions() {
	// TODO(andybons): Set the cache size.
	r.opts = C.rocksdb_options_create()
	C.rocksdb_options_set_create_if_missing(r.opts, 1)
	// This enables us to use rocksdb_merge with counter semantics.
	// See rocksdb.{c,h} for the C implementation.
	r.mergeOperator = C.make_merge_operator()
	C.rocksdb_options_set_merge_operator(r.opts, r.mergeOperator)
	// This enables garbage collection of transaction and response cache rows.
	r.compactionFilterFactory = C.make_gc_compaction_filter_factory(unsafe.Pointer(r))
	C.rocksdb_options_set_compaction_filter_factory(r.opts, r.compactionFilterFactory)

	r.wOpts = C.rocksdb_writeoptions_create()
	r.rOpts = C.rocksdb_readoptions_create()
}
Пример #9
0
// scan returns up to max key/value objects starting from
// start (inclusive) and ending at end (non-inclusive).
// If max is zero then the number of key/values returned is unbounded.
func (r *RocksDB) scan(start, end Key, max int64) ([]KeyValue, error) {
	// In order to prevent content displacement, caching is disabled
	// when performing scans. Any options set within the shared read
	// options field that should be carried over needs to be set here
	// as well.
	opts := C.rocksdb_readoptions_create()
	C.rocksdb_readoptions_set_fill_cache(opts, 0)
	defer C.rocksdb_readoptions_destroy(opts)
	it := C.rocksdb_create_iterator(r.rdb, opts)
	defer C.rocksdb_iter_destroy(it)

	keyVals := []KeyValue{}
	C.rocksdb_iter_seek(it, (*C.char)(unsafe.Pointer(&start[0])), C.size_t(len(start)))
	for i := int64(1); C.rocksdb_iter_valid(it) == 1; C.rocksdb_iter_next(it) {
		if max > 0 && i > max {
			break
		}
		var l C.size_t
		// The data returned by rocksdb_iter_{key,value} is not meant to be
		// freed by the client. It is a direct reference to the data managed
		// by the iterator, so it is copied instead of freed.
		data := C.rocksdb_iter_key(it, &l)
		k := C.GoBytes(unsafe.Pointer(data), C.int(l))
		if bytes.Equal(k, end) {
			break
		}
		data = C.rocksdb_iter_value(it, &l)
		v := C.GoBytes(unsafe.Pointer(data), C.int(l))
		keyVals = append(keyVals, KeyValue{
			Key:   k,
			Value: Value{Bytes: v},
		})
		i++
	}
	// Check for any errors during iteration.
	var cErr *C.char
	C.rocksdb_iter_get_error(it, &cErr)
	if cErr != nil {
		return nil, charToErr(cErr)
	}
	return keyVals, nil
}
Пример #10
0
// NewReadOptions allocates a new ReadOptions object.
func NewReadOptions() *ReadOptions {
	opt := C.rocksdb_readoptions_create()
	return &ReadOptions{opt}
}
Пример #11
0
// NewDefaultReadOptions creates a default ReadOptions object.
func NewDefaultReadOptions() *ReadOptions {
	return NewNativeReadOptions(C.rocksdb_readoptions_create())
}
Пример #12
0
// Scan returns up to max key/value objects starting from
// start (inclusive) and ending at end (non-inclusive).
// If max is zero then the number of key/values returned is unbounded.
func (r *RocksDB) Scan(start, end Key, max int64) ([]proto.RawKeyValue, error) {
	opts := C.rocksdb_readoptions_create()
	C.rocksdb_readoptions_set_fill_cache(opts, 0)
	defer C.rocksdb_readoptions_destroy(opts)
	return r.scanInternal(start, end, max, opts)
}
Пример #13
0
// NewReadOptions creates a default ReadOptions object.
func NewReadOptions() *ReadOptions {
	return newNativeReadOptions(C.rocksdb_readoptions_create())
}