func Start() { dbfile := settings.DBFILE var err error // try to open an existing database db, err = sublevel.Open(dbfile, &opt.Options{ Filter: filter.NewBloomFilter(10), ErrorIfMissing: true, }) if err != nil { // database is missing, create it and do initial setup db, err = sublevel.Open(dbfile, &opt.Options{ Filter: filter.NewBloomFilter(10), ErrorIfExist: true, }) // admin party SetRulesAt("", map[string]interface{}{ "_read": "*", "_write": "*", "_admin": "*", }) } if err != nil { log.WithFields(log.Fields{ "error": err, "DBFILE": settings.DBFILE, }).Fatal("couldn't open database file.") } }
// NewLDBDatabase returns a LevelDB wrapped object. func NewLDBDatabase(file string, cache int, handles int) (*LDBDatabase, error) { // Calculate the cache and file descriptor allowance for this particular database cache = int(float64(cache) * cacheRatio[filepath.Base(file)]) if cache < 16 { cache = 16 } handles = int(float64(handles) * handleRatio[filepath.Base(file)]) if handles < 16 { handles = 16 } glog.V(logger.Info).Infof("Alloted %dMB cache and %d file handles to %s", cache, handles, file) // Open the db and recover any potential corruptions db, err := leveldb.OpenFile(file, &opt.Options{ OpenFilesCacheCapacity: handles, BlockCacheCapacity: cache / 2 * opt.MiB, WriteBuffer: cache / 4 * opt.MiB, // Two of these are used internally Filter: filter.NewBloomFilter(10), }) if _, corrupted := err.(*errors.ErrCorrupted); corrupted { db, err = leveldb.RecoverFile(file, nil) } // (Re)check for errors and abort if opening of the db failed if err != nil { return nil, err } return &LDBDatabase{ fn: file, db: db, }, nil }
func Open(cfg skv.Config) (*DB, error) { var ( db = &DB{} err error ) cfg.ReFix() os.MkdirAll(cfg.DataDir, 0750) db.ldb, err = leveldb.OpenFile(cfg.DataDir, &opt.Options{ WriteBuffer: cfg.WriteBuffer * opt.MiB, BlockCacheCapacity: cfg.BlockCacheCapacity * opt.MiB, OpenFilesCacheCapacity: cfg.OpenFilesCacheCapacity, CompactionTableSize: cfg.CompactionTableSize * opt.MiB, Compression: opt.SnappyCompression, Filter: filter.NewBloomFilter(10), }) if err == nil { db.ttl_worker() } return db, err }
func newOptions(cfg *config.LevelDBConfig) *opt.Options { opts := &opt.Options{} opts.ErrorIfMissing = false opts.BlockCacheCapacity = cfg.CacheSize //we must use bloomfilter opts.Filter = filter.NewBloomFilter(defaultFilterBits) if !cfg.Compression { opts.Compression = opt.NoCompression } else { opts.Compression = opt.SnappyCompression } opts.BlockSize = cfg.BlockSize opts.WriteBuffer = cfg.WriteBufferSize opts.OpenFilesCacheCapacity = cfg.MaxOpenFiles //here we use default value, later add config support opts.CompactionTableSize = 32 * 1024 * 1024 opts.WriteL0SlowdownTrigger = 16 opts.WriteL0PauseTrigger = 64 return opts }
func NewLevelStore(path string, sync bool) *LevelStore { opts := &opt.Options{ Filter: filter.NewBloomFilter(10), ErrorIfMissing: false, } db, err := leveldb.OpenFile(path, opts) if err != nil { panic(fmt.Sprintf("queued.LevelStore: Unable to open db: %v", err)) } id := 0 iter := db.NewIterator(nil, nil) iter.Last() if iter.Valid() { id, err = strconv.Atoi(string(iter.Key())) if err != nil { panic(fmt.Sprintf("queued.LevelStore: Error loading db: %v", err)) } } return &LevelStore{ id: id, path: path, sync: sync, db: db, } }
// newKeyValueFromJSONConfig returns a KeyValue implementation on top of a // github.com/syndtr/goleveldb/leveldb file. func newKeyValueFromJSONConfig(cfg jsonconfig.Obj) (sorted.KeyValue, error) { file := cfg.RequiredString("file") if err := cfg.Validate(); err != nil { return nil, err } strictness := opt.DefaultStrict if env.IsDev() { // Be more strict in dev mode. strictness = opt.StrictAll } opts := &opt.Options{ // The default is 10, // 8 means 2.126% or 1/47th disk check rate, // 10 means 0.812% error rate (1/2^(bits/1.44)) or 1/123th disk check rate, // 12 means 0.31% or 1/322th disk check rate. // TODO(tgulacsi): decide which number is the best here. Till that go with the default. Filter: filter.NewBloomFilter(10), Strict: strictness, } db, err := leveldb.OpenFile(file, opts) if err != nil { return nil, err } is := &kvis{ db: db, path: file, opts: opts, readOpts: &opt.ReadOptions{Strict: strictness}, // On machine crash we want to reindex anyway, and // fsyncs may impose great performance penalty. writeOpts: &opt.WriteOptions{Sync: false}, } return is, nil }
func BenchmarkLevelGetPut(b *testing.B) { o := &opt.Options{ Filter: filter.NewBloomFilter(10), } db2, err := leveldb.OpenFile("./leveldb_test.db2", o) if err != nil { panic("Unable to open database") } kv := Wrap(db2, NewConfig()) defer kv.Close() numRoutines := runtime.NumCPU() finished := make(chan bool, numRoutines) for j := 0; j < numRoutines; j++ { go func(n int) { key := []byte("keyaaaa") key[2] = 'a' + byte(n) for i := 0; i < b.N/numRoutines; i++ { kv.Put(key, []byte("value"), nil) kv.Get(key, nil) key[0]++ if key[0] == '0' { key[1]++ } } finished <- true }(j) } for j := 0; j < numRoutines; j++ { <-finished } }
func leveldbOpener(url *config.URL) (driver.Driver, error) { value := url.Value if !filepath.IsAbs(value) { value = pathutil.Relative(value) } opts := &opt.Options{} if url.Fragment["nocompress"] != "" { opts.Compression = opt.NoCompression } if url.Fragment["nocreate"] != "" { opts.ErrorIfMissing = true } filesDir := filepath.Join(value, "files") files, err := leveldb.OpenFile(filesDir, opts) if err != nil { return nil, err } copts := *opts copts.Filter = filter.NewBloomFilter(8 * sha1.Size) chunksDir := filepath.Join(value, "chunks") chunks, err := leveldb.OpenFile(chunksDir, &copts) if err != nil { return nil, err } return &leveldbDriver{ files: files, chunks: chunks, dir: value, }, nil }
func internalOpen(path string) (*leveldb.DB, error) { o := &opt.Options{ Filter: filter.NewBloomFilter(10), } return leveldb.OpenFile(path, o) }
func TestCorruptDB_RecoverTable(t *testing.T) { h := newDbCorruptHarnessWopt(t, &opt.Options{ WriteBuffer: 112 * opt.KiB, CompactionTableSize: 90 * opt.KiB, Filter: filter.NewBloomFilter(10), }) h.build(1000) h.compactMem() h.compactRangeAt(0, "", "") h.compactRangeAt(1, "", "") seq := h.db.seq h.closeDB() h.corrupt(storage.TypeTable, 0, 1000, 1) h.corrupt(storage.TypeTable, 3, 10000, 1) // Corrupted filter shouldn't affect recovery. h.corrupt(storage.TypeTable, 3, 113888, 10) h.corrupt(storage.TypeTable, -1, 20000, 1) h.recover() if h.db.seq != seq { t.Errorf("invalid seq, want=%d got=%d", seq, h.db.seq) } h.check(985, 985) h.close() }
func TestDb_BloomFilter(t *testing.T) { h := newDbHarnessWopt(t, &opt.Options{ BlockCache: opt.NoCache, Filter: filter.NewBloomFilter(10), }) defer h.close() key := func(i int) string { return fmt.Sprintf("key%06d", i) } const ( n = 10000 indexOverheat = 19898 filterOverheat = 19799 ) // Populate multiple layers for i := 0; i < n; i++ { h.put(key(i), key(i)) } h.compactMem() h.compactRange("a", "z") for i := 0; i < n; i += 100 { h.put(key(i), key(i)) } h.compactMem() // Prevent auto compactions triggered by seeks h.stor.DelaySync(storage.TypeTable) // Lookup present keys. Should rarely read from small sstable. h.stor.SetReadCounter(storage.TypeTable) for i := 0; i < n; i++ { h.getVal(key(i), key(i)) } cnt := int(h.stor.ReadCounter()) t.Logf("lookup of %d present keys yield %d sstable I/O reads", n, cnt) if min, max := n+indexOverheat+filterOverheat, n+indexOverheat+filterOverheat+2*n/100; cnt < min || cnt > max { t.Errorf("num of sstable I/O reads of present keys not in range of %d - %d, got %d", min, max, cnt) } // Lookup missing keys. Should rarely read from either sstable. h.stor.ResetReadCounter() for i := 0; i < n; i++ { h.get(key(i)+".missing", false) } cnt = int(h.stor.ReadCounter()) t.Logf("lookup of %d missing keys yield %d sstable I/O reads", n, cnt) if max := 3*n/100 + indexOverheat + filterOverheat; cnt > max { t.Errorf("num of sstable I/O reads of missing keys was more than %d, got %d", max, cnt) } h.stor.ReleaseSync(storage.TypeTable) }
// Open a DB by fileName. func Open(fileName string) (*DB, error) { opts := &leveldbOpt.Options{ Filter: leveldbFilter.NewBloomFilter(LevelDBBloomFilterBitsPerKey), } db, err := leveldb.OpenFile(fileName, opts) if err != nil { return nil, err } return &DB{db}, nil }
// SetBloomFilter sets the bits per key for a bloom filter. This filter // will reduce the number of unnecessary disk reads needed for Get() calls // by a large factor. func (opts *goKeyValueOptions) SetBloomFilterBitsPerKey(bitsPerKey int) { if bitsPerKey != opts.bloomBitsPerKey { if opts.filter != nil { // NOTE -- No destructor for bloom filter in goleveldb? } opts.filter = filter.NewBloomFilter(bitsPerKey) opts.Options.Filter = opts.filter opts.bloomBitsPerKey = bitsPerKey } }
// OpenDB opens (creates) LevelDB database func OpenDB(path string) (Storage, error) { o := &opt.Options{ Filter: filter.NewBloomFilter(10), } db, err := leveldb.OpenFile(path, o) if err != nil { return nil, err } return &levelDB{db: db}, nil }
func TestDb_BloomFilter(t *testing.T) { h := newDbHarnessWopt(t, &opt.Options{ Flag: opt.OFCreateIfMissing, BlockCache: cache.EmptyCache{}, Filter: filter.NewBloomFilter(10), }) key := func(i int) string { return fmt.Sprintf("key%06d", i) } n := 10000 // Populate multiple layers for i := 0; i < n; i++ { h.put(key(i), key(i)) } h.compactMem() h.compactRange("a", "z") for i := 0; i < n; i += 100 { h.put(key(i), key(i)) } h.compactMem() // Prevent auto compactions triggered by seeks h.desc.DelaySync(desc.TypeTable) // Lookup present keys. Should rarely read from small sstable. h.desc.SetReadAtCounter(desc.TypeTable) for i := 0; i < n; i++ { h.getVal(key(i), key(i)) } cnt := int(h.desc.ReadAtCounter()) t.Logf("lookup of %d present keys yield %d sstable I/O reads", n, cnt) if min, max := n, n+2*n/100; cnt < min || cnt > max { t.Errorf("num of sstable I/O reads of present keys not in range of %d - %d, got %d", min, max, cnt) } // Lookup missing keys. Should rarely read from either sstable. h.desc.ResetReadAtCounter() for i := 0; i < n; i++ { h.get(key(i)+".missing", false) } cnt = int(h.desc.ReadAtCounter()) t.Logf("lookup of %d missing keys yield %d sstable I/O reads", n, cnt) if max := 3 * n / 100; cnt > max { t.Errorf("num of sstable I/O reads of missing keys was more than %d, got %d", max, cnt) } h.desc.ReleaseSync(desc.TypeTable) h.close() }
func BenchmarkLevelGetSingleKey(b *testing.B) { o := &opt.Options{ Filter: filter.NewBloomFilter(10), } db2, err := leveldb.OpenFile("./leveldb_test.db2", o) if err != nil { panic("Unable to open database") } kv := Wrap(db2, NewConfig()) defer kv.Close() key := []byte("keyaaaa") for i := 0; i < b.N; i++ { kv.Get(key, nil) } }
func Open(filename string) (*leveldb.DB, error) { fmt.Printf("Opening %s\n", filename) //Options taken from ledisdb opts := &opt.Options{} opts.BlockSize = 32768 opts.WriteBuffer = 67108864 opts.BlockCacheCapacity = 524288000 opts.OpenFilesCacheCapacity = 1024 opts.CompactionTableSize = 32 * 1024 * 1024 opts.WriteL0SlowdownTrigger = 16 opts.WriteL0PauseTrigger = 64 opts.Filter = filter.NewBloomFilter(10) db, err := leveldb.OpenFile(filename, opts) return db, err }
func applyConfig(o *opt.Options, config map[string]interface{}) ( *opt.Options, error) { ro, ok := config["read_only"].(bool) if ok { o.ReadOnly = ro } cim, ok := config["create_if_missing"].(bool) if ok { o.ErrorIfMissing = !cim } eie, ok := config["error_if_exists"].(bool) if ok { o.ErrorIfExist = eie } wbs, ok := config["write_buffer_size"].(float64) if ok { o.WriteBuffer = int(wbs) } bs, ok := config["block_size"].(float64) if ok { o.BlockSize = int(bs) } bri, ok := config["block_restart_interval"].(float64) if ok { o.BlockRestartInterval = int(bri) } lcc, ok := config["lru_cache_capacity"].(float64) if ok { o.BlockCacheCapacity = int(lcc) } bfbpk, ok := config["bloom_filter_bits_per_key"].(float64) if ok { bf := filter.NewBloomFilter(int(bfbpk)) o.Filter = bf } return o, nil }
// NewLevelDB returns a newly allocated LevelDB-backed KeyValueStore ready to // use. func NewLevelDB(o LevelDBOptions) (KeyValueStore, error) { options := &leveldb_opt.Options{ BlockCacheCapacity: o.CacheSizeBytes, Filter: leveldb_filter.NewBloomFilter(10), } storage, err := leveldb.OpenFile(o.Path, options) if err != nil { return nil, err } return &LevelDB{ storage: storage, readOpts: &leveldb_opt.ReadOptions{}, writeOpts: &leveldb_opt.WriteOptions{}, }, nil }
func (db *GoLevelDB) init(path string, conf *Config, repair bool) error { if conf == nil { conf = NewDefaultConfig() } // Create path if not exists first if err := os.MkdirAll(path, 0700); err != nil { return errors.Trace(err) } opts := &opt.Options{} opts.ErrorIfMissing = false opts.ErrorIfExist = false opts.Filter = filter.NewBloomFilter(conf.BloomFilterSize) opts.Compression = opt.SnappyCompression opts.BlockSize = conf.BlockSize opts.WriteBuffer = conf.WriteBufferSize opts.OpenFilesCacheCapacity = conf.MaxOpenFiles opts.CompactionTableSize = 32 * 1024 * 1024 opts.WriteL0SlowdownTrigger = 16 opts.WriteL0PauseTrigger = 64 db.path = path db.opts = opts db.ropt = nil db.wopt = nil if repair { if rdb, err := leveldb.RecoverFile(db.path, db.opts); err != nil { return errors.Trace(err) } else { db.lvdb = rdb return nil } } var err error if db.lvdb, err = leveldb.OpenFile(path, db.opts); err != nil { return errors.Trace(err) } return nil }
func newBackingStore(dir string, maxFileHandles int, dumpStats bool) *internalLevelDBStore { d.Exp.NotEmpty(dir) d.Exp.NoError(os.MkdirAll(dir, 0700)) db, err := leveldb.OpenFile(dir, &opt.Options{ Compression: opt.NoCompression, Filter: filter.NewBloomFilter(10), // 10 bits/key OpenFilesCacheCapacity: maxFileHandles, WriteBuffer: 1 << 24, // 16MiB, }) d.Chk.NoError(err, "opening internalLevelDBStore in %s", dir) return &internalLevelDBStore{ db: db, mu: &sync.Mutex{}, concurrentWriteLimit: make(chan struct{}, maxFileHandles), dumpStats: dumpStats, } }
func newOrderedChunkCache() *orderedChunkCache { dir, err := ioutil.TempDir("", "") d.PanicIfError(err) db, err := leveldb.OpenFile(dir, &opt.Options{ Compression: opt.NoCompression, Filter: filter.NewBloomFilter(10), // 10 bits/key OpenFilesCacheCapacity: 24, NoSync: true, // We dont need this data to be durable. LDB is acting as sorting temporary storage that can be larger than main memory. WriteBuffer: 1 << 27, // 128MiB }) d.Chk.NoError(err, "opening put cache in %s", dir) return &orderedChunkCache{ orderedChunks: db, chunkIndex: map[hash.Hash][]byte{}, dbDir: dir, mu: &sync.RWMutex{}, } }
/* NewKVDBBackend receives a filename with path and creates a new Backend instance */ func NewKVDBBackend(filename string) (*KVDBBackend, error) { var err error b := KVDBBackend{db: nil, ro: nil, wo: nil} b.filename = filename opts := opt.Options{ Filter: filter.NewBloomFilter(32), } b.db, err = leveldb.OpenFile(filename, &opts) b.ro = new(opt.ReadOptions) b.wo = new(opt.WriteOptions) if err != nil { return nil, err } return &b, nil }
func OpenDB(options Options) (that *DB, err error) { mainDBPath := options.Path cacheSize := options.CacheSize writeBufferSize := 4 blockSize := 4 compression := options.Compression if options.ExpireDelay <= time.Second { options.ExpireDelay = time.Second } if cacheSize <= 0 { cacheSize = 8 } //log::path,cacheSize,blockSize,write_buffer,compression d := newDB() d.options.ErrorIfMissing = false d.options.Filter = filter.NewBloomFilter(10) //d.Options.BlockCacher = leveldb::NewLRUCache(cacheSize * 1048576) d.options.BlockCacheCapacity = cacheSize * 1024 * 1024 d.options.BlockSize = blockSize * 1024 d.options.WriteBuffer = writeBufferSize * 1024 * 1024 d.expireDelay = options.ExpireDelay if compression { d.options.Compression = opt.SnappyCompression } else { d.options.Compression = opt.NoCompression } if tdb, err := leveldb.OpenFile(mainDBPath, &d.options); err == nil { //runtime.SetFinalizer(d, // func(d *DB) { // d.db.Close() // }) d.db = tdb d.writer = NewWriter(d.db) go d.expireDaemon() return d, nil } else { return nil, err } }
func NewLevelDBStore(filename string) (IpStore, error) { //Options taken from ledisdb opts := &opt.Options{} opts.BlockSize = 32768 opts.WriteBuffer = 67108864 opts.BlockCacheCapacity = 524288000 opts.OpenFilesCacheCapacity = 1024 opts.CompactionTableSize = 32 * 1024 * 1024 opts.WriteL0SlowdownTrigger = 16 opts.WriteL0PauseTrigger = 64 opts.Filter = filter.NewBloomFilter(10) db, err := leveldb.OpenFile(filename, opts) if err != nil { return nil, err } newStore := &LevelDBStore{db: db, batch: nil, filename: filename, codecFactory: func() Codec { return NewBitsetCodec() }} newStore.fixDocId() return newStore, nil }
func openLevelDB(filepath string) (*leveldb.DB, error) { o := &opt.Options{ Filter: filter.NewBloomFilter(10), Strict: opt.StrictAll, } db, err := leveldb.OpenFile(filepath, o) if err == nil { return db, nil } if _, ok := err.(*errors.ErrCorrupted); ok { log.Printf("recovering leveldb: %v", err) db, err = leveldb.RecoverFile(filepath, o) if err != nil { log.Printf("failed to recover leveldb: %v", err) return nil, err } return db, nil } log.Printf("failed to open leveldb: %v", err) return nil, err }
func newOptions(cfg *config.LevelDBConfig) *opt.Options { opts := &opt.Options{} opts.ErrorIfMissing = false cfg.Adjust() opts.BlockCache = cache.NewLRUCache(cfg.CacheSize) //we must use bloomfilter opts.Filter = filter.NewBloomFilter(defaultFilterBits) if !cfg.Compression { opts.Compression = opt.NoCompression } else { opts.Compression = opt.SnappyCompression } opts.BlockSize = cfg.BlockSize opts.WriteBuffer = cfg.WriteBufferSize return opts }
func (p *stConstructor_Table) finish() (size int, err error) { p.t.Logf("table: contains %d entries and %d blocks", p.tw.Len(), p.tw.CountBlock()) err = p.tw.Finish() if err != nil { return } p.w.Close() tsize := uint64(p.tw.Size()) fsize, _ := p.file.Size() if fsize != tsize { p.t.Errorf("table: calculated size doesn't equal with actual size, calculated=%d actual=%d", tsize, fsize) } p.r, _ = p.file.Open() o := &opt.Options{ BlockRestartInterval: 3, Filter: filter.NewBloomFilter(10), } p.tr, err = table.NewReader(p.r, fsize, o, nil) return int(fsize), nil }
Key() []byte Value() []byte } func testKeyVal(t *testing.T, kv keyValue, want string) { res := string(kv.Key()) + "->" + string(kv.Value()) if res != want { t.Errorf("invalid key/value, want=%q, got=%q", want, res) } } func numKey(num int) string { return fmt.Sprintf("key%06d", num) } var _bloom_filter = filter.NewBloomFilter(10) func runAllOpts(t *testing.T, f func(h *dbHarness)) { for i := 0; i < 4; i++ { h := newDbHarness(t) switch i { case 0: case 1: h.oo.SetFilter(_bloom_filter) case 2: h.oo.SetCompressionType(opt.NoCompression) case 3: h.reopenDB() } f(h) h.close()
func (l *Leveldb) SetBloomFilter(bits int) { l.opt = &opt.Options{ Filter: filter.NewBloomFilter(bits), } }