// Open returns a keyvalue DB backed by a LevelDB database at the given // filepath. If opts==nil, the DefaultOptions are used. func Open(path string, opts *Options) (keyvalue.DB, error) { if opts == nil { opts = DefaultOptions } options := levigo.NewOptions() defer options.Close() cache := levigo.NewLRUCache(opts.CacheCapacity) options.SetCache(cache) options.SetCreateIfMissing(!opts.MustExist) if opts.WriteBufferSize > 0 { options.SetWriteBufferSize(opts.WriteBufferSize) } db, err := levigo.Open(path, options) if err != nil { return nil, fmt.Errorf("could not open LevelDB at %q: %v", path, err) } largeReadOpts := levigo.NewReadOptions() largeReadOpts.SetFillCache(opts.CacheLargeReads) return &levelDB{ db: db, cache: cache, readOpts: levigo.NewReadOptions(), largeReadOpts: largeReadOpts, writeOpts: levigo.NewWriteOptions(), }, nil }
func NewLeveldbCache(dbname string, cacheM int) (*LeveldbCache, error) { opts := levigo.NewOptions() filter := levigo.NewBloomFilter(10) cache := levigo.NewLRUCache(1024 * 1024 * cacheM) opts.SetFilterPolicy(filter) opts.SetCache(cache) opts.SetCreateIfMissing(true) opts.SetWriteBufferSize(8 * 1024 * 104) // 8M opts.SetCompression(levigo.SnappyCompression) if ldb, err := levigo.Open(dbname, opts); err == nil { so := levigo.NewReadOptions() so.SetFillCache(false) return &LeveldbCache{ db: ldb, fp: filter, cache: cache, Ro: levigo.NewReadOptions(), Wo: levigo.NewWriteOptions(), So: so, }, nil } else { return nil, err } }
func newSnapshot(db *DB) *Snapshot { s := new(Snapshot) s.db = db s.s = db.db.NewSnapshot() s.readOpts = levigo.NewReadOptions() s.readOpts.SetSnapshot(s.s) s.iteratorOpts = levigo.NewReadOptions() s.iteratorOpts.SetSnapshot(s.s) s.iteratorOpts.SetFillCache(false) return s }
func OpenWithConfig(cfg *Config) (*DB, error) { db := new(DB) db.cfg = cfg db.opts = db.initOptions(cfg) db.readOpts = levigo.NewReadOptions() db.writeOpts = levigo.NewWriteOptions() db.iteratorOpts = levigo.NewReadOptions() db.iteratorOpts.SetFillCache(false) var err error db.db, err = levigo.Open(cfg.Path, db.opts) return db, err }
func AddProc(procID int, db *levigo.DB) error { ro := levigo.NewReadOptions() wo := levigo.NewWriteOptions() data, err := db.Get(ro, []byte("procs")) spdata := bytes.Split(data, []byte(":")) for i, e := range spdata { if string(e) != "" { fmt.Println("ProcID: #", i, string(e)) pid, err := strconv.Atoi(string(e)) if err != nil { return err } if pid == procID { return errors.New("Process already exists") } if isProcAlive(pid) == false { removeProc(pid, db) } } if err != nil { return err } } strdata := string(data) strdata = strdata + ":" + strconv.Itoa(procID) err = db.Put(wo, []byte("procs"), []byte(strdata)) return err }
func (l *Level) GetAllKeysByAnchor(anchor []byte) []byte { l.mu.Lock() defer l.mu.Unlock() var buffer bytes.Buffer ro := levigo.NewReadOptions() defer ro.Close() ro.SetFillCache(false) it := l.db.NewIterator(ro) defer it.Close() buffer.WriteByte('[') it.Seek(anchor) if !it.Valid() { return nil } it.Next() for it.Valid() && bytes.HasPrefix(it.Key(), anchor) { buffer.Write(it.Value()) buffer.WriteByte(',') it.Next() } bs := buffer.Bytes() bs = bytes.TrimRight(bs, ",") bs = append(bs, ']') if err := it.GetError(); err != nil { fmt.Println(err) } return bs }
func main() { dbname := "leveldb" opts := levigo.NewOptions() opts.SetCache(levigo.NewLRUCache(3 << 20)) opts.SetCreateIfMissing(true) _ = levigo.DestroyDatabase(dbname, opts) db, _ := levigo.Open(dbname, opts) wo := levigo.NewWriteOptions() ro := levigo.NewReadOptions() start := time.Now() for i := 0; i < 10e4; i++ { db.Put(wo, []byte(fmt.Sprintf("a%v", i)), []byte(strconv.Itoa(i))) } for i := 0; i < 10e4; i++ { db.Get(ro, []byte(fmt.Sprintf("a%v", i))) } for i := 0; i < 10e4; i++ { db.Delete(wo, []byte(fmt.Sprintf("a%v", i))) } duration := time.Since(start) log.Printf("Elapsed: %v.", duration) }
func TestLevigo(t *testing.T) { path := "/tmp/levigo_test_10101" os.RemoveAll(path) opts := levigo.NewOptions() filter := levigo.NewBloomFilter(10) opts.SetFilterPolicy(filter) opts.SetCache(levigo.NewLRUCache(1024 << 20)) // 1G opts.SetCreateIfMissing(true) if ldb, err := levigo.Open(path, opts); err == nil { key := []byte("test-test hwl0dsfds") val := []byte("value") if err = ldb.Put(levigo.NewWriteOptions(), key, val); err != nil { t.Fail() } else { ro := levigo.NewReadOptions() if data, err := ldb.Get(ro, key); err == nil && reflect.DeepEqual(data, val) { ro.SetFillCache(false) it := ldb.NewIterator(ro) it.Seek([]byte{0}) for ; it.Valid(); it.Next() { log.Printf("%s => %s", it.Key(), it.Value()) } } else { t.Fail() } } } else { t.Fail() } }
func (store *LevelDbStore) DeleteAllRecords() error { store.dbOpenLock.Lock() defer store.dbOpenLock.Unlock() if store.readOptions == nil && store.writeOptions == nil { panic("You may only call DeleteAllRecords after starting reading or writing") } writeOptions := store.writeOptions if writeOptions == nil { writeOptions = levigo.NewWriteOptions() defer writeOptions.Close() } readOptions := store.readOptions if readOptions == nil { readOptions = levigo.NewReadOptions() defer readOptions.Close() } it := store.db.NewIterator(readOptions) defer it.Close() it.SeekToFirst() for ; it.Valid(); it.Next() { if err := store.db.Delete(writeOptions, it.Key()); err != nil { return fmt.Errorf("Error clearing keys from database: %v", err) } } if err := it.GetError(); err != nil { return fmt.Errorf("Error iterating through database: %v", err) } return nil }
func NewLevelDB(path string, config interface{}) (Engine, error) { c, ok := config.(*LevelDbConfiguration) if !ok { return nil, fmt.Errorf("Config is of type %T instead of %T", config, LevelDbConfiguration{}) } // if it wasn't set, set it to 100 if c.MaxOpenFiles == 0 { c.MaxOpenFiles = 100 } // if it wasn't set, set it to 200 MB if c.LruCacheSize == 0 { c.LruCacheSize = 200 * 1024 * 1024 } // initialize the global cache if cache == nil { cacheLock.Lock() if cache == nil { cache = levigo.NewLRUCache(int(c.LruCacheSize)) } cacheLock.Unlock() } opts := levigo.NewOptions() opts.SetCache(cache) opts.SetCreateIfMissing(true) opts.SetMaxOpenFiles(c.MaxOpenFiles) db, err := levigo.Open(path, opts) wopts := levigo.NewWriteOptions() ropts := levigo.NewReadOptions() return LevelDB{db, opts, wopts, ropts, path}, err }
func (self *LevelDbDatastore) replayFromLog(seekKey []byte, requestLog *requestLogDb, yield func(*[]byte) error) error { ro := levigo.NewReadOptions() defer ro.Close() ro.SetFillCache(false) it := requestLog.db.NewIterator(ro) defer it.Close() startingKey := seekKey[:len(seekKey)-8] sliceTo := len(startingKey) it.Seek(seekKey) if it.Valid() { if bytes.Equal(it.Key(), seekKey) { it.Next() } } for it = it; it.Valid(); it.Next() { k := it.Key() if !bytes.Equal(k[:sliceTo], startingKey) { return nil } b := it.Value() err := yield(&b) if err != nil { return err } } return nil }
// Get the data count from leveldb // // @param string dbName The levelDb database name. // @param func filter The condition filter function. // func ReadDBCount(dbName, condition string) (int, error) { dbfile, err := getDbName(dbName) if err != nil { return 0, err } dbfile = DBRoot + "/" + dbfile db, err := openDB(dbfile) if err != nil { return 0, err } ro := levigo.NewReadOptions() ro.SetFillCache(false) it := db.NewIterator(ro) defer it.Close() foundCnt := 0 for it.Seek([]byte(condition)); it.Valid(); it.Next() { foundCnt++ } if err := it.GetError(); err != nil { return 0, err } return foundCnt, nil }
func NewLevelDBPersistence(storageRoot string, cacheCapacity, bitsPerBloomFilterEncoded int) (p *LevelDBPersistence, err error) { options := levigo.NewOptions() options.SetCreateIfMissing(true) options.SetParanoidChecks(true) cache := levigo.NewLRUCache(cacheCapacity) options.SetCache(cache) filterPolicy := levigo.NewBloomFilter(bitsPerBloomFilterEncoded) options.SetFilterPolicy(filterPolicy) storage, err := levigo.Open(storageRoot, options) if err != nil { return } readOptions := levigo.NewReadOptions() writeOptions := levigo.NewWriteOptions() writeOptions.SetSync(true) p = &LevelDBPersistence{ cache: cache, filterPolicy: filterPolicy, options: options, readOptions: readOptions, storage: storage, writeOptions: writeOptions, } return }
func getPlaylistFromDB(db *levigo.DB) []playListItem { log.Println("Loading Playlist from file...") ro := levigo.NewReadOptions() data, err := db.Get(ro, []byte("playlist")) if err != nil { log.Fatal(err) } p := bytes.NewBuffer(data) dec := gob.NewDecoder(p) var playlist []playListItem //we must decode into a pointer, so we'll take the address of e err = dec.Decode(&playlist) if err != nil { log.Print(err) playlist = []playListItem{} } log.Println("Loaded ", len(playlist), " items into playlist") log.Println(playlist) return playlist }
func Test_DelKey(t *testing.T) { _key, _val, expectedVal := "name", "levigoNS", "" dbpath := "/tmp/delete-this-leveldb" db := CreateDB(dbpath) writer := levigo.NewWriteOptions() defer writer.Close() keyname := []byte(_key) value := []byte(_val) err := db.Put(writer, keyname, value) if err != nil { t.Error("Fail: (DelKey) Pushing key " + _key + " for value " + _val + " failed") } statusDelete := DelKey(_key, db) reader := levigo.NewReadOptions() defer reader.Close() resultVal, err := db.Get(reader, []byte(_key)) if err != nil { t.Error("Fail: (DelKey) Reading key " + _key + " failed") } if string(resultVal) != expectedVal { t.Error("Fail: DelKey sets " + string(resultVal)) } if !statusDelete { t.Error("Fail: DelKey returns False status") } CloseAndDeleteDB(dbpath, db) }
func (c *cache) open(path string) error { opts := levigo.NewOptions() opts.SetCreateIfMissing(true) if c.options.CacheSizeM > 0 { c.cache = levigo.NewLRUCache(c.options.CacheSizeM * 1024 * 1024) opts.SetCache(c.cache) } if c.options.MaxOpenFiles > 0 { opts.SetMaxOpenFiles(c.options.MaxOpenFiles) } if c.options.BlockRestartInterval > 0 { opts.SetBlockRestartInterval(c.options.BlockRestartInterval) } if c.options.WriteBufferSizeM > 0 { opts.SetWriteBufferSize(c.options.WriteBufferSizeM * 1024 * 1024) } if c.options.BlockSizeK > 0 { opts.SetBlockSize(c.options.BlockSizeK * 1024) } db, err := levigo.Open(path, opts) if err != nil { return err } c.db = db c.wo = levigo.NewWriteOptions() c.ro = levigo.NewReadOptions() return nil }
// Fetch a Tx by hash func GetTx(db *levigo.DB, txHash string) (tx *Tx, err error) { ro := levigo.NewReadOptions() defer ro.Close() txKey, blockErr := db.Get(ro, []byte(fmt.Sprintf("tx!%s", txHash))) if blockErr != nil { err = errors.New("Tx not found") return } txData, blockErr := db.Get(ro, txKey) if blockErr != nil { err = errors.New("Tx not found") return } tx = new(Tx) err = json.Unmarshal(txData, tx) if err != nil { return } for txo_index, txo := range tx.TxOuts { txo.Spent, _ = GetTxoSpent(db, txo.Addr, tx.Hash, txo_index) } return }
func loadUsers(db *levigo.DB) map[string]User { log.Println("Loading Users from file...") ro := levigo.NewReadOptions() data, err := db.Get(ro, []byte("Users")) if err != nil { log.Fatal(err) } p := bytes.NewBuffer(data) dec := gob.NewDecoder(p) var users map[string]User //we must decode into a pointer, so we'll take the address of e err = dec.Decode(&users) if err != nil { log.Print(err) users = map[string]User{} } log.Println("Loaded ", len(users), " users") log.Printf("%+v", users) return users }
func (dw *DbWriter) ProcessInput() { wo := levigo.NewWriteOptions() ro := levigo.NewReadOptions() defer wo.Close() defer ro.Close() var buffer bytes.Buffer enc := gob.NewEncoder(&buffer) enc.Encode(Minute{}) dec := gob.NewDecoder(&buffer) var m Minute dec.Decode(&m) buffer.Reset() for input := range dw.Input { //first write minutes key := []byte(input.Key) err := enc.Encode(input.Minute) if err != nil { //there is smth really wrong...some kind of help cry would good panic(err) } payload := buffer.Bytes() //log.Println(len(payload)) //save dw.Db.Put(wo, key, payload) var m Minute dec.Decode(&m) log.Println(m) buffer.Reset() } }
func (self *LevelDbDatastore) replayFromLog(seekKey WALKey, requestLog *requestLogDb, yield func(*[]byte) error) error { ro := levigo.NewReadOptions() defer ro.Close() ro.SetFillCache(false) it := requestLog.db.NewIterator(ro) defer it.Close() it.Seek(seekKey) if it.Valid() { if bytes.Equal(it.Key(), seekKey) { it.Next() } } for it = it; it.Valid(); it.Next() { key := NewWALKeyFromBytes(it.Key()) if !key.EqualsIgnoreSequenceNumber(seekKey) { return nil } b := it.Value() err := yield(&b) if err != nil { return err } } return nil }
func NewStore(path string, sync bool) *Store { opts := levigo.NewOptions() opts.SetCreateIfMissing(true) db, err := levigo.Open(path, opts) if err != nil { panic(fmt.Sprintf("queued.Store: Unable to open db: %v", err)) } id := 0 it := db.NewIterator(levigo.NewReadOptions()) defer it.Close() it.SeekToLast() if it.Valid() { id, err = strconv.Atoi(string(it.Key())) if err != nil { panic(fmt.Sprintf("queued: Error loading db: %v", err)) } } store := &Store{ id: id, path: path, sync: sync, db: db, } return store }
func NewLevelDbDatastore(dbDir string) (Datastore, error) { opts := levigo.NewOptions() opts.SetCache(levigo.NewLRUCache(ONE_GIGABYTE)) opts.SetCreateIfMissing(true) opts.SetBlockSize(TWO_FIFTY_SIX_KILOBYTES) filter := levigo.NewBloomFilter(BLOOM_FILTER_BITS_PER_KEY) opts.SetFilterPolicy(filter) db, err := levigo.Open(dbDir, opts) if err != nil { return nil, err } ro := levigo.NewReadOptions() lastIdBytes, err2 := db.Get(ro, NEXT_ID_KEY) if err2 != nil { return nil, err2 } lastId := uint64(0) if lastIdBytes != nil { lastId, err2 = binary.ReadUvarint(bytes.NewBuffer(lastIdBytes)) if err2 != nil { return nil, err2 } } wo := levigo.NewWriteOptions() return &LevelDbDatastore{db: db, lastIdUsed: lastId, readOptions: ro, writeOptions: wo}, nil }
// Return the last height processed func GetLastHeight(db *levigo.DB) (lastHeight uint, err error) { ro := levigo.NewReadOptions() defer ro.Close() lastHeightRaw, _ := db.Get(ro, []byte("last-height")) lastHeightInt, _ := strconv.Atoi(string(lastHeightRaw[:])) lastHeight = uint(lastHeightInt) return }
func (l *Level) Get(key []byte) ([]byte, error) { l.mu.Lock() defer l.mu.Unlock() ro := levigo.NewReadOptions() bs, err := l.db.Get(ro, key) ro.Close() return bs, err }
func NewGetTxRPC(tx_id string) (tx *Tx, err error) { // Hard coded genesis tx since it's not included in bitcoind RPC API if tx_id == GenesisTx { return //return TxData{GenesisTx, []TxIn{}, []TxOut{{"1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa", 5000000000}}}, nil } // Get the TX from bitcoind RPC API res_tx, err := CallBitcoinRPC("http://*****:*****@localhost:7334", "getrawtransaction", 1, []interface{}{tx_id, 1}) if err != nil { log.Fatalf("Err: %v", err) } txjson := res_tx["result"].(map[string]interface{}) tx = new(Tx) tx.Hash = tx_id tx.Version = uint32(txjson["version"].(float64)) tx.LockTime = uint32(txjson["locktime"].(float64)) tx.Size = uint32(len(txjson["hex"].(string)) / 2) //tx. total_tx_out := uint(0) total_tx_in := uint(0) ro := levigo.NewReadOptions() defer ro.Close() for _, txijson := range txjson["vin"].([]interface{}) { _, coinbase := txijson.(map[string]interface{})["coinbase"] if !coinbase { txi := new(TxIn) txinjsonprevout := new(PrevOut) txinjsonprevout.Hash = txijson.(map[string]interface{})["txid"].(string) txinjsonprevout.Vout = uint32(txijson.(map[string]interface{})["vout"].(float64)) txi.PrevOut = txinjsonprevout tx.TxIns = append(tx.TxIns, txi) } } for _, txojson := range txjson["vout"].([]interface{}) { txo := new(TxOut) txo.Value = uint64(txojson.(map[string]interface{})["value"].(float64) * 1e8) if txojson.(map[string]interface{})["scriptPubKey"].(map[string]interface{})["type"].(string) != "nonstandard" { txo.Addr = txojson.(map[string]interface{})["scriptPubKey"].(map[string]interface{})["addresses"].([]interface{})[0].(string) } else { txo.Addr = "" } tx.TxOuts = append(tx.TxOuts, txo) total_tx_out += uint(txo.Value) } tx.TxOutCnt = uint32(len(tx.TxOuts)) tx.TxInCnt = uint32(len(tx.TxIns)) tx.TotalOut = uint64(total_tx_out) tx.TotalIn = uint64(total_tx_in) return }
func Reduce(source *sublevel.DB, target *sublevel.DB, name string, reduceFunc ReduceFunc, rereduceFunc RereduceFunc, valueFactory ValueFactory, level int) *ReduceTask { task := &ReduceTask{source: source, target: target, taskDb: sublevel.Sublevel(target.LevelDB(), name+string([]byte{0, 65})), reduceFunc: reduceFunc, rereduceFunc: rereduceFunc, valueFactory: valueFactory, level: level} task.ro = levigo.NewReadOptions() task.wo = levigo.NewWriteOptions() filter := func(key, value []byte) []byte { return []byte{32} /* if task.level == 0 { return []byte{0} } s := bytes.Split(key[:len(key)-17], []byte{32}) if len(s) < task.level { return nil } return bytes.Join(s[:task.level], []byte{32}) */ } f := func(key, value []byte) bool { // println("Working on", string(key), string(value)) s := bytes.Split(key[4:len(key)-17], []byte{32}) off := 16 for i := len(s); i >= task.level; i-- { val := task.valueFactory() if i > 0 { k := append(joinReduceKey(s[:i], false), 32) // Iterate over all similar rows in the source DB it := task.source.NewIterator(task.ro) for it.Seek(k); it.Valid(); it.Next() { if !bytes.HasPrefix(it.Key(), k) { break } val = task.reduceFunc(val, it.Value()) } it.Close() } // Iterate over all rows in the target DB which are more specific it := task.target.NewIterator(task.ro) k := joinReduceKey(s[:i], true) for it.Seek(k); it.Valid(); it.Next() { if !bytes.HasPrefix(it.Key(), k) { break } val = task.rereduceFunc(val, it.Value()) } it.Close() task.target.Put(task.wo, joinReduceKey(s[:i], false), serializeValue(val)) if i > 0 { off += len(s[i-1]) + 1 } } return true } task.task = runlevel.Trigger(source, sublevel.Sublevel(target.LevelDB(), name+string([]byte{0, 66})), filter, f) return task }
func NewLevelDbDatastore(dbDir string) (Datastore, error) { mainDbDir := filepath.Join(dbDir, DATABASE_DIR) requestLogDir := filepath.Join(dbDir, REQUEST_LOG_BASE_DIR) err := os.MkdirAll(mainDbDir, 0744) if err != nil { return nil, err } previousLog, err := NewRequestLogDb(getRequestLogDirForDate(requestLogDir, time.Now().Add(-time.Hour*24))) if err != nil { return nil, err } currentLog, err := NewRequestLogDb(getRequestLogDirForDate(requestLogDir, time.Now())) if err != nil { return nil, err } opts := levigo.NewOptions() opts.SetCache(levigo.NewLRUCache(ONE_GIGABYTE)) opts.SetCreateIfMissing(true) opts.SetBlockSize(TWO_FIFTY_SIX_KILOBYTES) filter := levigo.NewBloomFilter(BLOOM_FILTER_BITS_PER_KEY) opts.SetFilterPolicy(filter) db, err := levigo.Open(dbDir, opts) if err != nil { return nil, err } ro := levigo.NewReadOptions() lastIdBytes, err2 := db.Get(ro, NEXT_ID_KEY) if err2 != nil { return nil, err2 } lastId := uint64(0) if lastIdBytes != nil { lastId, err2 = binary.ReadUvarint(bytes.NewBuffer(lastIdBytes)) if err2 != nil { return nil, err2 } } wo := levigo.NewWriteOptions() leveldbStore := &LevelDbDatastore{ db: db, lastIdUsed: lastId, readOptions: ro, writeOptions: wo, requestLogDir: requestLogDir, currentRequestLog: currentLog, previousRequestLog: previousLog} go leveldbStore.periodicallyRotateRequestLog() return leveldbStore, nil }
func TestTrigger(t *testing.T) { opts := levigo.NewOptions() levigo.DestroyDatabase("test.ldb", opts) // opts.SetCache(levigo.NewLRUCache(3<<30)) opts.SetCreateIfMissing(true) db, err := levigo.Open("test.ldb", opts) if err != nil { t.Fatal(err) } ro := levigo.NewReadOptions() wo := levigo.NewWriteOptions() sub1 := sublevel.Sublevel(db, "input") index := sublevel.Sublevel(db, "index") job := sublevel.Sublevel(db, "job") task := Trigger(sub1, job, func(key, value []byte) []byte { if strings.HasPrefix(string(key), "Doc_") { return key } return nil }, func(key, value []byte) bool { doc := make(map[string]string) err := json.Unmarshal(value, &doc) if err != nil { t.Fatal(err) } index.Put(wo, []byte(doc["id"]), []byte(doc["number"])) // Make sure that the next task invocation comes in concurrently to this one time.Sleep(300 * time.Millisecond) return true }) sub1.Put(wo, []byte("foobar"), []byte("do-not-process")) // Two put operations which will both trigger the task for the same taskKey. sub1.Put(wo, []byte("Doc_1"), []byte("{\"id\":\"01234\", \"number\": \"42\"}")) sub1.Put(wo, []byte("Doc_1"), []byte("{\"id\":\"01234\", \"number\": \"43\"}")) val, err := sub1.Get(ro, []byte("Doc_1")) if err != nil || string(val) != "{\"id\":\"01234\", \"number\": \"43\"}" { t.Fatal(err, string(val)) } time.Sleep(800 * time.Millisecond) val, err = index.Get(ro, []byte("01234")) if err != nil || string(val) != "43" { t.Fatal(err, string(val)) } task.Close() ro.Close() wo.Close() db.Close() }
func (db LevelDB) Iterator() Iterator { ropts := levigo.NewReadOptions() ropts.SetFillCache(false) defer ropts.Close() itr := db.db.NewIterator(ropts) return &LevelDbIterator{itr, nil} }
func (e *rdbEncoder) encodeKey(key []byte, dump bool) error { snapshot := DB.NewSnapshot() opts := levigo.NewReadOptions() opts.SetSnapshot(snapshot) defer DB.ReleaseSnapshot(snapshot) defer opts.Close() res, err := DB.Get(opts, metaKey(key)) if err != nil { return err } if res == nil { return nil } if len(res) < 5 { return InvalidDataError } length := binary.BigEndian.Uint32(res[1:]) switch res[0] { case StringLengthValue: e.r.EncodeType(rdb.TypeString) case HashLengthValue: e.r.EncodeType(rdb.TypeHash) case SetCardValue: e.r.EncodeType(rdb.TypeSet) case ZCardValue: e.r.EncodeType(rdb.TypeZSet) case ListLengthValue: e.r.EncodeType(rdb.TypeList) default: panic("unknown key type") } if !dump { e.r.EncodeString(key) } switch res[0] { case StringLengthValue: e.encodeString(key, opts) case HashLengthValue: e.encodeHash(key, length, opts) case SetCardValue: e.encodeSet(key, length, opts) case ZCardValue: e.encodeZSet(key, length, opts) case ListLengthValue: e.encodeList(key, length, opts) } if dump { e.r.EncodeDumpFooter() } return nil }