func main() { dbname := "leveldb" opts := levigo.NewOptions() opts.SetCache(levigo.NewLRUCache(3 << 20)) opts.SetCreateIfMissing(true) _ = levigo.DestroyDatabase(dbname, opts) db, _ := levigo.Open(dbname, opts) wo := levigo.NewWriteOptions() ro := levigo.NewReadOptions() start := time.Now() for i := 0; i < 10e4; i++ { db.Put(wo, []byte(fmt.Sprintf("a%v", i)), []byte(strconv.Itoa(i))) } for i := 0; i < 10e4; i++ { db.Get(ro, []byte(fmt.Sprintf("a%v", i))) } for i := 0; i < 10e4; i++ { db.Delete(wo, []byte(fmt.Sprintf("a%v", i))) } duration := time.Since(start) log.Printf("Elapsed: %v.", duration) }
func NewLeveldbCache(dbname string, cacheM int) (*LeveldbCache, error) { opts := levigo.NewOptions() filter := levigo.NewBloomFilter(10) cache := levigo.NewLRUCache(1024 * 1024 * cacheM) opts.SetFilterPolicy(filter) opts.SetCache(cache) opts.SetCreateIfMissing(true) opts.SetWriteBufferSize(8 * 1024 * 104) // 8M opts.SetCompression(levigo.SnappyCompression) if ldb, err := levigo.Open(dbname, opts); err == nil { so := levigo.NewReadOptions() so.SetFillCache(false) return &LeveldbCache{ db: ldb, fp: filter, cache: cache, Ro: levigo.NewReadOptions(), Wo: levigo.NewWriteOptions(), So: so, }, nil } else { return nil, err } }
func NewLevelDBPersistence(storageRoot string, cacheCapacity, bitsPerBloomFilterEncoded int) (p *LevelDBPersistence, err error) { options := levigo.NewOptions() options.SetCreateIfMissing(true) options.SetParanoidChecks(true) cache := levigo.NewLRUCache(cacheCapacity) options.SetCache(cache) filterPolicy := levigo.NewBloomFilter(bitsPerBloomFilterEncoded) options.SetFilterPolicy(filterPolicy) storage, err := levigo.Open(storageRoot, options) if err != nil { return } readOptions := levigo.NewReadOptions() writeOptions := levigo.NewWriteOptions() writeOptions.SetSync(true) p = &LevelDBPersistence{ cache: cache, filterPolicy: filterPolicy, options: options, readOptions: readOptions, storage: storage, writeOptions: writeOptions, } return }
func (s *Store) Put(record *Record) error { s.mutex.Lock() defer s.mutex.Unlock() id := s.id + 1 var buf bytes.Buffer enc := gob.NewEncoder(&buf) err := enc.Encode(record) if err != nil { panic(fmt.Sprintf("queued.Store: Error encoding record: %v", err)) } wopts := levigo.NewWriteOptions() wopts.SetSync(s.sync) err = s.db.Put(wopts, key(id), buf.Bytes()) if err != nil { return err } record.id = id s.id = id return nil }
func (c *cache) open(path string) error { opts := levigo.NewOptions() opts.SetCreateIfMissing(true) if c.options.CacheSizeM > 0 { c.cache = levigo.NewLRUCache(c.options.CacheSizeM * 1024 * 1024) opts.SetCache(c.cache) } if c.options.MaxOpenFiles > 0 { opts.SetMaxOpenFiles(c.options.MaxOpenFiles) } if c.options.BlockRestartInterval > 0 { opts.SetBlockRestartInterval(c.options.BlockRestartInterval) } if c.options.WriteBufferSizeM > 0 { opts.SetWriteBufferSize(c.options.WriteBufferSizeM * 1024 * 1024) } if c.options.BlockSizeK > 0 { opts.SetBlockSize(c.options.BlockSizeK * 1024) } db, err := levigo.Open(path, opts) if err != nil { return err } c.db = db c.wo = levigo.NewWriteOptions() c.ro = levigo.NewReadOptions() return nil }
//func ApplyToStateMachine(kvs *KVserver) { func (kvs *KVserver) ApplyToStateMachine() { kvs.P("Apply to State Machine Routine", 0, 0) for { logdata := <-kvs.ReplServer.Inbox() switch logdata.Data.(type) { case SMCommand: { com, _ := logdata.Data.(SMCommand) kvs.P("Received msg from State machine ", com.UserId, com.RequestId) if com.Command == "PUT" || com.Command == "put" { wo := levigo.NewWriteOptions() err := kvs.DataDb.Put(wo, []byte(com.Key), []byte(com.Value)) if kvs.ReplServer.WhoIsLeader() == kvs.MyPid { res := new(SMResp) res.RequestId = com.RequestId kvs.P("I am leader so will allow to send to client, req id", com.UserId, com.RequestId) if err != nil { res.Value = []byte("ERROR_IN_DB") res.Error = 1 } else { res.Value = []byte("SUCCESS") res.Error = 0 } kvs.P("Finally as a Leader Will send to client, req id", com.UserId, res.RequestId) kvs.OuterComm.Outbox() <- &Envelope{Pid: com.UserId, MsgId: 0, Msg: res} kvs.RepComplete <- res //Now can accept the next command } } } //end of case for SMC } //end of switch } }
func TestLevigo(t *testing.T) { path := "/tmp/levigo_test_10101" os.RemoveAll(path) opts := levigo.NewOptions() filter := levigo.NewBloomFilter(10) opts.SetFilterPolicy(filter) opts.SetCache(levigo.NewLRUCache(1024 << 20)) // 1G opts.SetCreateIfMissing(true) if ldb, err := levigo.Open(path, opts); err == nil { key := []byte("test-test hwl0dsfds") val := []byte("value") if err = ldb.Put(levigo.NewWriteOptions(), key, val); err != nil { t.Fail() } else { ro := levigo.NewReadOptions() if data, err := ldb.Get(ro, key); err == nil && reflect.DeepEqual(data, val) { ro.SetFillCache(false) it := ldb.NewIterator(ro) it.Seek([]byte{0}) for ; it.Valid(); it.Next() { log.Printf("%s => %s", it.Key(), it.Value()) } } else { t.Fail() } } } else { t.Fail() } }
func NewLevelDB(path string, config interface{}) (Engine, error) { c, ok := config.(*LevelDbConfiguration) if !ok { return nil, fmt.Errorf("Config is of type %T instead of %T", config, LevelDbConfiguration{}) } // if it wasn't set, set it to 100 if c.MaxOpenFiles == 0 { c.MaxOpenFiles = 100 } // if it wasn't set, set it to 200 MB if c.LruCacheSize == 0 { c.LruCacheSize = 200 * 1024 * 1024 } // initialize the global cache if cache == nil { cacheLock.Lock() if cache == nil { cache = levigo.NewLRUCache(int(c.LruCacheSize)) } cacheLock.Unlock() } opts := levigo.NewOptions() opts.SetCache(cache) opts.SetCreateIfMissing(true) opts.SetMaxOpenFiles(c.MaxOpenFiles) db, err := levigo.Open(path, opts) wopts := levigo.NewWriteOptions() ropts := levigo.NewReadOptions() return LevelDB{db, opts, wopts, ropts, path}, err }
func NewLevelDbDatastore(dbDir string) (Datastore, error) { opts := levigo.NewOptions() opts.SetCache(levigo.NewLRUCache(ONE_GIGABYTE)) opts.SetCreateIfMissing(true) opts.SetBlockSize(TWO_FIFTY_SIX_KILOBYTES) filter := levigo.NewBloomFilter(BLOOM_FILTER_BITS_PER_KEY) opts.SetFilterPolicy(filter) db, err := levigo.Open(dbDir, opts) if err != nil { return nil, err } ro := levigo.NewReadOptions() lastIdBytes, err2 := db.Get(ro, NEXT_ID_KEY) if err2 != nil { return nil, err2 } lastId := uint64(0) if lastIdBytes != nil { lastId, err2 = binary.ReadUvarint(bytes.NewBuffer(lastIdBytes)) if err2 != nil { return nil, err2 } } wo := levigo.NewWriteOptions() return &LevelDbDatastore{db: db, lastIdUsed: lastId, readOptions: ro, writeOptions: wo}, nil }
// Delete the record by key // Concurrent-safe func (l *Level) Delete(key []byte) { l.mu.Lock() defer l.mu.Unlock() wo := levigo.NewWriteOptions() l.db.Delete(wo, key) wo.Close() }
func (db *TorrentDB) Add(t *core.Torrent) error { db.lock.Lock() defer db.lock.Unlock() data, err := json.Marshal(t) wo := levigo.NewWriteOptions() defer wo.Close() err = db.db.Put(wo, []byte("t"+t.Hash), data) if err != nil { return err } bad := make([]int, 0) for i, w := range db.writers { select { case w <- t: default: close(w) bad = append(bad, i) } } for c, i := range bad { i = i - c db.writers = append(db.writers[:i], db.writers[i+1:]...) } return nil }
func (store *LevelDbStore) DeleteAllRecords() error { store.dbOpenLock.Lock() defer store.dbOpenLock.Unlock() if store.readOptions == nil && store.writeOptions == nil { panic("You may only call DeleteAllRecords after starting reading or writing") } writeOptions := store.writeOptions if writeOptions == nil { writeOptions = levigo.NewWriteOptions() defer writeOptions.Close() } readOptions := store.readOptions if readOptions == nil { readOptions = levigo.NewReadOptions() defer readOptions.Close() } it := store.db.NewIterator(readOptions) defer it.Close() it.SeekToFirst() for ; it.Valid(); it.Next() { if err := store.db.Delete(writeOptions, it.Key()); err != nil { return fmt.Errorf("Error clearing keys from database: %v", err) } } if err := it.GetError(); err != nil { return fmt.Errorf("Error iterating through database: %v", err) } return nil }
func AddProc(procID int, db *levigo.DB) error { ro := levigo.NewReadOptions() wo := levigo.NewWriteOptions() data, err := db.Get(ro, []byte("procs")) spdata := bytes.Split(data, []byte(":")) for i, e := range spdata { if string(e) != "" { fmt.Println("ProcID: #", i, string(e)) pid, err := strconv.Atoi(string(e)) if err != nil { return err } if pid == procID { return errors.New("Process already exists") } if isProcAlive(pid) == false { removeProc(pid, db) } } if err != nil { return err } } strdata := string(data) strdata = strdata + ":" + strconv.Itoa(procID) err = db.Put(wo, []byte("procs"), []byte(strdata)) return err }
func UpdateDB(k string, v interface{}, dbName string) error { dbfile, err := getDbName(dbName) if err != nil { return err } dbfile = DBRoot + "/" + dbfile db, err := openDB(dbfile) if err != nil { return err } data, err := json.Marshal(v) if err != nil { return err } wo := levigo.NewWriteOptions() err = db.Delete(wo, []byte(k)) if err != nil { return err } err = db.Put(wo, []byte(k), data) if err != nil { return err } //db.Close() return nil }
// Open returns a keyvalue DB backed by a LevelDB database at the given // filepath. If opts==nil, the DefaultOptions are used. func Open(path string, opts *Options) (keyvalue.DB, error) { if opts == nil { opts = DefaultOptions } options := levigo.NewOptions() defer options.Close() cache := levigo.NewLRUCache(opts.CacheCapacity) options.SetCache(cache) options.SetCreateIfMissing(!opts.MustExist) if opts.WriteBufferSize > 0 { options.SetWriteBufferSize(opts.WriteBufferSize) } db, err := levigo.Open(path, options) if err != nil { return nil, fmt.Errorf("could not open LevelDB at %q: %v", path, err) } largeReadOpts := levigo.NewReadOptions() largeReadOpts.SetFillCache(opts.CacheLargeReads) return &levelDB{ db: db, cache: cache, readOpts: levigo.NewReadOptions(), largeReadOpts: largeReadOpts, writeOpts: levigo.NewWriteOptions(), }, nil }
func Test_DelKey(t *testing.T) { _key, _val, expectedVal := "name", "levigoNS", "" dbpath := "/tmp/delete-this-leveldb" db := CreateDB(dbpath) writer := levigo.NewWriteOptions() defer writer.Close() keyname := []byte(_key) value := []byte(_val) err := db.Put(writer, keyname, value) if err != nil { t.Error("Fail: (DelKey) Pushing key " + _key + " for value " + _val + " failed") } statusDelete := DelKey(_key, db) reader := levigo.NewReadOptions() defer reader.Close() resultVal, err := db.Get(reader, []byte(_key)) if err != nil { t.Error("Fail: (DelKey) Reading key " + _key + " failed") } if string(resultVal) != expectedVal { t.Error("Fail: DelKey sets " + string(resultVal)) } if !statusDelete { t.Error("Fail: DelKey returns False status") } CloseAndDeleteDB(dbpath, db) }
func (dw *DbWriter) ProcessInput() { wo := levigo.NewWriteOptions() ro := levigo.NewReadOptions() defer wo.Close() defer ro.Close() var buffer bytes.Buffer enc := gob.NewEncoder(&buffer) enc.Encode(Minute{}) dec := gob.NewDecoder(&buffer) var m Minute dec.Decode(&m) buffer.Reset() for input := range dw.Input { //first write minutes key := []byte(input.Key) err := enc.Encode(input.Minute) if err != nil { //there is smth really wrong...some kind of help cry would good panic(err) } payload := buffer.Bytes() //log.Println(len(payload)) //save dw.Db.Put(wo, key, payload) var m Minute dec.Decode(&m) log.Println(m) buffer.Reset() } }
func addBadToken(app string, token string) { wo := levigo.NewWriteOptions() defer wo.Close() err := getDB().Put(wo, []byte("BT:"+app+"_"+token), []byte("1")) if err != nil { log.Println("error when add bad token to db") } }
func recoverToken(app string, token string) { wo := levigo.NewWriteOptions() defer wo.Close() err := getDB().Delete(wo, []byte("BT:"+app+"_"+token)) if err != nil { log.Println("error when recover token") } }
// Save record to levelDB func (l *Level) Put(key, value []byte) error { l.mu.Lock() defer l.mu.Unlock() wo := levigo.NewWriteOptions() err := l.db.Put(wo, key, value) wo.Close() return err }
func (connection *LevelDbConnection) Delete(options *proto.DbWriteOptions, key []byte) error { wo := levigo.NewWriteOptions() defer wo.Close() if options != nil { wo.SetSync(options.Sync) } return connection.db.Delete(wo, key) }
func (s *Store) Remove(id int) error { s.mutex.Lock() defer s.mutex.Unlock() wopts := levigo.NewWriteOptions() wopts.SetSync(s.sync) return s.db.Delete(wopts, key(id)) }
func TestTrigger(t *testing.T) { opts := levigo.NewOptions() levigo.DestroyDatabase("test.ldb", opts) // opts.SetCache(levigo.NewLRUCache(3<<30)) opts.SetCreateIfMissing(true) db, err := levigo.Open("test.ldb", opts) if err != nil { t.Fatal(err) } ro := levigo.NewReadOptions() wo := levigo.NewWriteOptions() sub1 := sublevel.Sublevel(db, "input") index := sublevel.Sublevel(db, "index") job := sublevel.Sublevel(db, "job") task := Trigger(sub1, job, func(key, value []byte) []byte { if strings.HasPrefix(string(key), "Doc_") { return key } return nil }, func(key, value []byte) bool { doc := make(map[string]string) err := json.Unmarshal(value, &doc) if err != nil { t.Fatal(err) } index.Put(wo, []byte(doc["id"]), []byte(doc["number"])) // Make sure that the next task invocation comes in concurrently to this one time.Sleep(300 * time.Millisecond) return true }) sub1.Put(wo, []byte("foobar"), []byte("do-not-process")) // Two put operations which will both trigger the task for the same taskKey. sub1.Put(wo, []byte("Doc_1"), []byte("{\"id\":\"01234\", \"number\": \"42\"}")) sub1.Put(wo, []byte("Doc_1"), []byte("{\"id\":\"01234\", \"number\": \"43\"}")) val, err := sub1.Get(ro, []byte("Doc_1")) if err != nil || string(val) != "{\"id\":\"01234\", \"number\": \"43\"}" { t.Fatal(err, string(val)) } time.Sleep(800 * time.Millisecond) val, err = index.Get(ro, []byte("01234")) if err != nil || string(val) != "43" { t.Fatal(err, string(val)) } task.Close() ro.Close() wo.Close() db.Close() }
func NewLevelDbDatastore(dbDir string) (Datastore, error) { mainDbDir := filepath.Join(dbDir, DATABASE_DIR) requestLogDir := filepath.Join(dbDir, REQUEST_LOG_BASE_DIR) err := os.MkdirAll(mainDbDir, 0744) if err != nil { return nil, err } previousLog, err := NewRequestLogDb(getRequestLogDirForDate(requestLogDir, time.Now().Add(-time.Hour*24))) if err != nil { return nil, err } currentLog, err := NewRequestLogDb(getRequestLogDirForDate(requestLogDir, time.Now())) if err != nil { return nil, err } opts := levigo.NewOptions() opts.SetCache(levigo.NewLRUCache(ONE_GIGABYTE)) opts.SetCreateIfMissing(true) opts.SetBlockSize(TWO_FIFTY_SIX_KILOBYTES) filter := levigo.NewBloomFilter(BLOOM_FILTER_BITS_PER_KEY) opts.SetFilterPolicy(filter) db, err := levigo.Open(dbDir, opts) if err != nil { return nil, err } ro := levigo.NewReadOptions() lastIdBytes, err2 := db.Get(ro, NEXT_ID_KEY) if err2 != nil { return nil, err2 } lastId := uint64(0) if lastIdBytes != nil { lastId, err2 = binary.ReadUvarint(bytes.NewBuffer(lastIdBytes)) if err2 != nil { return nil, err2 } } wo := levigo.NewWriteOptions() leveldbStore := &LevelDbDatastore{ db: db, lastIdUsed: lastId, readOptions: ro, writeOptions: wo, requestLogDir: requestLogDir, currentRequestLog: currentLog, previousRequestLog: previousLog} go leveldbStore.periodicallyRotateRequestLog() return leveldbStore, nil }
func Reduce(source *sublevel.DB, target *sublevel.DB, name string, reduceFunc ReduceFunc, rereduceFunc RereduceFunc, valueFactory ValueFactory, level int) *ReduceTask { task := &ReduceTask{source: source, target: target, taskDb: sublevel.Sublevel(target.LevelDB(), name+string([]byte{0, 65})), reduceFunc: reduceFunc, rereduceFunc: rereduceFunc, valueFactory: valueFactory, level: level} task.ro = levigo.NewReadOptions() task.wo = levigo.NewWriteOptions() filter := func(key, value []byte) []byte { return []byte{32} /* if task.level == 0 { return []byte{0} } s := bytes.Split(key[:len(key)-17], []byte{32}) if len(s) < task.level { return nil } return bytes.Join(s[:task.level], []byte{32}) */ } f := func(key, value []byte) bool { // println("Working on", string(key), string(value)) s := bytes.Split(key[4:len(key)-17], []byte{32}) off := 16 for i := len(s); i >= task.level; i-- { val := task.valueFactory() if i > 0 { k := append(joinReduceKey(s[:i], false), 32) // Iterate over all similar rows in the source DB it := task.source.NewIterator(task.ro) for it.Seek(k); it.Valid(); it.Next() { if !bytes.HasPrefix(it.Key(), k) { break } val = task.reduceFunc(val, it.Value()) } it.Close() } // Iterate over all rows in the target DB which are more specific it := task.target.NewIterator(task.ro) k := joinReduceKey(s[:i], true) for it.Seek(k); it.Valid(); it.Next() { if !bytes.HasPrefix(it.Key(), k) { break } val = task.rereduceFunc(val, it.Value()) } it.Close() task.target.Put(task.wo, joinReduceKey(s[:i], false), serializeValue(val)) if i > 0 { off += len(s[i-1]) + 1 } } return true } task.task = runlevel.Trigger(source, sublevel.Sublevel(target.LevelDB(), name+string([]byte{0, 66})), filter, f) return task }
func removeProc(pid int, db *levigo.DB) error { wo := levigo.NewWriteOptions() err := db.Delete(wo, []byte(strconv.Itoa(pid))) if err != nil { return err } return killProc(pid) }
func (r *Replicator) DelLog(lognr int) int { wo := levigo.NewWriteOptions() err := r.LogDb.Delete(wo, []byte(strconv.Itoa(lognr))) if err != nil { return 1 } else { return 0 } }
func (conn *DbConnection) open(path string) error { opts := levigo.NewOptions() opts.SetCache(levigo.NewLRUCache(3 << 10)) opts.SetCreateIfMissing(true) var err error conn.db, err = levigo.Open(path, opts) conn.ro = levigo.NewReadOptions() conn.wo = levigo.NewWriteOptions() return err }
func storeLatestIdentity(msgID int32) { wo := levigo.NewWriteOptions() defer wo.Close() buf := bytes.NewBuffer([]byte{}) binary.Write(buf, binary.BigEndian, msgID) err := getDB().Put(wo, []byte("latest_indentity"), buf.Bytes()) if err != nil { log.Println("error when store latestIdentity", err) } }
/* DelKey deletes key from provided DB handle. */ func DelKey(key string, db *levigo.DB) bool { writer := levigo.NewWriteOptions() defer writer.Close() err := db.Delete(writer, []byte(key)) if err != nil { golerror.Boohoo("Key "+key+" query failed.", false) return false } return true }