Exemplo n.º 1
0
// ScanSnapshot returns up to max key/value objects starting from
// start (inclusive) and ending at end (non-inclusive) from the
// given snapshotID.
// Specify max=0 for unbounded scans.
func (r *RocksDB) ScanSnapshot(start, end Key, max int64, snapshotID string) ([]proto.RawKeyValue, error) {
	snapshotHandle, ok := r.snapshots[snapshotID]
	if !ok {
		return nil, util.Errorf("snapshotID %s does not exist", snapshotID)
	}

	opts := C.rocksdb_readoptions_create()
	C.rocksdb_readoptions_set_fill_cache(opts, 0)
	C.rocksdb_readoptions_set_snapshot(opts, snapshotHandle)
	defer C.rocksdb_readoptions_destroy(opts)
	return r.scanInternal(start, end, max, opts)
}
Exemplo n.º 2
0
// Scan returns up to max key/value objects starting from
// start (inclusive) and ending at end (non-inclusive).
// If max is zero then the number of key/values returned is unbounded.
func (r *RocksDB) Scan(start, end Key, max int64) ([]RawKeyValue, error) {
	var keyVals []RawKeyValue
	if bytes.Compare(start, end) >= 0 {
		return keyVals, nil
	}
	// In order to prevent content displacement, caching is disabled
	// when performing scans. Any options set within the shared read
	// options field that should be carried over needs to be set here
	// as well.
	opts := C.rocksdb_readoptions_create()
	C.rocksdb_readoptions_set_fill_cache(opts, 0)
	defer C.rocksdb_readoptions_destroy(opts)
	it := C.rocksdb_create_iterator(r.rdb, opts)
	defer C.rocksdb_iter_destroy(it)

	byteCount := len(start)
	if byteCount == 0 {
		// start=Key("") needs special treatment since we need
		// to access start[0] in an explicit seek.
		C.rocksdb_iter_seek_to_first(it)
	} else {
		C.rocksdb_iter_seek(it, bytesPointer(start), C.size_t(byteCount))
	}
	for i := int64(1); C.rocksdb_iter_valid(it) == 1; C.rocksdb_iter_next(it) {
		if max > 0 && i > max {
			break
		}
		var l C.size_t
		// The data returned by rocksdb_iter_{key,value} is not meant to be
		// freed by the client. It is a direct reference to the data managed
		// by the iterator, so it is copied instead of freed.
		data := C.rocksdb_iter_key(it, &l)
		k := C.GoBytes(unsafe.Pointer(data), C.int(l))
		if bytes.Compare(k, end) >= 0 {
			break
		}
		data = C.rocksdb_iter_value(it, &l)
		v := C.GoBytes(unsafe.Pointer(data), C.int(l))
		keyVals = append(keyVals, RawKeyValue{
			Key:   k,
			Value: v,
		})
		i++
	}
	// Check for any errors during iteration.
	var cErr *C.char
	C.rocksdb_iter_get_error(it, &cErr)
	if cErr != nil {
		return nil, charToErr(cErr)
	}
	return keyVals, nil
}
Exemplo n.º 3
0
// scan returns up to max key/value objects starting from
// start (inclusive) and ending at end (non-inclusive).
// If max is zero then the number of key/values returned is unbounded.
func (r *RocksDB) scan(start, end Key, max int64) ([]KeyValue, error) {
	// In order to prevent content displacement, caching is disabled
	// when performing scans. Any options set within the shared read
	// options field that should be carried over needs to be set here
	// as well.
	opts := C.rocksdb_readoptions_create()
	C.rocksdb_readoptions_set_fill_cache(opts, 0)
	defer C.rocksdb_readoptions_destroy(opts)
	it := C.rocksdb_create_iterator(r.rdb, opts)
	defer C.rocksdb_iter_destroy(it)

	keyVals := []KeyValue{}
	C.rocksdb_iter_seek(it, (*C.char)(unsafe.Pointer(&start[0])), C.size_t(len(start)))
	for i := int64(1); C.rocksdb_iter_valid(it) == 1; C.rocksdb_iter_next(it) {
		if max > 0 && i > max {
			break
		}
		var l C.size_t
		// The data returned by rocksdb_iter_{key,value} is not meant to be
		// freed by the client. It is a direct reference to the data managed
		// by the iterator, so it is copied instead of freed.
		data := C.rocksdb_iter_key(it, &l)
		k := C.GoBytes(unsafe.Pointer(data), C.int(l))
		if bytes.Equal(k, end) {
			break
		}
		data = C.rocksdb_iter_value(it, &l)
		v := C.GoBytes(unsafe.Pointer(data), C.int(l))
		keyVals = append(keyVals, KeyValue{
			Key:   k,
			Value: Value{Bytes: v},
		})
		i++
	}
	// Check for any errors during iteration.
	var cErr *C.char
	C.rocksdb_iter_get_error(it, &cErr)
	if cErr != nil {
		return nil, charToErr(cErr)
	}
	return keyVals, nil
}
Exemplo n.º 4
0
// SetFillCache controls whether reads performed with this ReadOptions will
// fill the Cache of the server. It defaults to true.
//
// It is useful to turn this off on ReadOptions for DB.Iterator (and DB.Get)
// calls used in offline threads to prevent bulk scans from flushing out live
// user data in the cache.
//
// See also Options.SetCache
func (ro *ReadOptions) SetFillCache(b bool) {
	C.rocksdb_readoptions_set_fill_cache(ro.Opt, boolToUchar(b))
}
Exemplo n.º 5
0
// Should the "data block"/"index block"/"filter block" read for this
// iteration be cached in memory?
// Callers may wish to set this field to false for bulk scans.
// Default: true
func (self *ReadOptions) SetFillCache(value bool) {
	C.rocksdb_readoptions_set_fill_cache(self.c, boolToChar(value))
}
Exemplo n.º 6
0
// Scan returns up to max key/value objects starting from
// start (inclusive) and ending at end (non-inclusive).
// If max is zero then the number of key/values returned is unbounded.
func (r *RocksDB) Scan(start, end Key, max int64) ([]proto.RawKeyValue, error) {
	opts := C.rocksdb_readoptions_create()
	C.rocksdb_readoptions_set_fill_cache(opts, 0)
	defer C.rocksdb_readoptions_destroy(opts)
	return r.scanInternal(start, end, max, opts)
}
Exemplo n.º 7
0
Arquivo: db.go Projeto: tradia/gotable
func (opt *ReadOptions) SetFillCache(fillCache bool) {
	C.rocksdb_readoptions_set_fill_cache(opt.rOpt, boolToUchar(fillCache))
}