// Create new memdb and froze the old one; need external synchronization. func (d *DB) newMem() (m *memdb.DB, err error) { s := d.s num := s.allocFileNum() w, err := newJournalWriter(s.getJournalFile(num)) if err != nil { s.reuseFileNum(num) return } old := d.journal d.journal = w if old != nil { old.close() d.fjournal = old } d.fseq = d.seq m = memdb.New(s.cmp) mem := &memSet{cur: m} if old := d.getMem_NB(); old != nil { mem.froze = old.cur } atomic.StorePointer(&d.mem, unsafe.Pointer(mem)) return }
// Create new memdb and froze the old one; need external synchronization. // newMem only called synchronously by the writer. func (d *DB) newMem() (mem *memdb.DB, err error) { s := d.s num := s.allocFileNum() file := s.getJournalFile(num) w, err := file.Create() if err != nil { s.reuseFileNum(num) return } d.memMu.Lock() if d.journal == nil { d.journal = journal.NewWriter(w) } else { d.journal.Reset(w) d.journalWriter.Close() d.frozenJournalFile = d.journalFile } d.journalWriter = w d.journalFile = file d.frozenMem = d.mem d.mem = memdb.New(s.cmp, toPercent(d.s.o.GetWriteBuffer(), kWriteBufferPercent)) mem = d.mem // The seq only incremented by the writer. d.frozenSeq = d.seq d.memMu.Unlock() return }
// Create new memdb and froze the old one; need external synchronization. // newMem only called synchronously by the writer. func (db *DB) newMem(n int) (mem *memdb.DB, err error) { num := db.s.allocFileNum() file := db.s.getJournalFile(num) w, err := file.Create() if err != nil { db.s.reuseFileNum(num) return } db.memMu.Lock() defer db.memMu.Unlock() if db.journal == nil { db.journal = journal.NewWriter(w) } else { db.journal.Reset(w) db.journalWriter.Close() db.frozenJournalFile = db.journalFile } db.journalWriter = w db.journalFile = file db.frozenMem = db.mem db.mem = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n)) mem = db.mem // The seq only incremented by the writer. And whoever called newMem // should hold write lock, so no need additional synchronization here. db.frozenSeq = db.seq return }
func (mc *stConstructor_MergedMemDB) init(t *testing.T, ho *stHarnessOpt) error { ho.Randomize = true mc.t = t for i := range mc.db { mc.db[i] = memdb.New(comparer.DefaultComparer, 0) } return nil }
func (p *stConstructor_MergedMemDB) init(t *testing.T, ho *stHarnessOpt) error { ho.Randomize = true p.t = t for i := range p.mem { p.mem[i] = memdb.New(comparer.BytesComparer{}) } return nil }
func (db *DB) mpoolGet(n int) *memDB { var mdb *memdb.DB select { case mdb = <-db.memPool: default: } if mdb == nil || mdb.Capacity() < n { mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n)) } return &memDB{ db: db, DB: mdb, } }
func TestBatch_Size(t *testing.T) { b := new(Batch) for i := 0; i < 2; i++ { b.Put([]byte("key1"), []byte("value1")) b.Put([]byte("key2"), []byte("value2")) b.Delete([]byte("key1")) b.Put([]byte("foo"), []byte("foovalue")) b.Put([]byte("bar"), []byte("barvalue")) mem := memdb.New(&iComparer{comparer.DefaultComparer}, 0) b.memReplay(mem) if b.size() != mem.Size() { t.Errorf("invalid batch size calculation, want=%d got=%d", mem.Size(), b.size()) } b.Reset() } }
// Create new memdb and froze the old one; need external synchronization. // newMem only called synchronously by the writer. func (db *DB) newMem(n int) (mem *memDB, err error) { num := db.s.allocFileNum() file := db.s.getJournalFile(num) w, err := file.Create() if err != nil { db.s.reuseFileNum(num) return } db.memMu.Lock() defer db.memMu.Unlock() if db.frozenMem != nil { panic("still has frozen mem") } if db.journal == nil { db.journal = journal.NewWriter(w) } else { db.journal.Reset(w) db.journalWriter.Close() db.frozenJournalFile = db.journalFile } db.journalWriter = w db.journalFile = file db.frozenMem = db.mem mem, ok := db.memPool.Get().(*memDB) if ok && mem.db.Capacity() >= n { mem.db.Reset() mem.incref() } else { mem = &memDB{ pool: db.memPool, db: memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n)), ref: 1, } } mem.incref() db.mem = mem // The seq only incremented by the writer. And whoever called newMem // should hold write lock, so no need additional synchronization here. db.frozenSeq = db.seq return }
// Create new memdb and froze the old one; need external synchronization. // newMem only called synchronously by the writer. func (db *DB) newMem(n int) (mem *memDB, err error) { fd := storage.FileDesc{Type: storage.TypeJournal, Num: db.s.allocFileNum()} w, err := db.s.stor.Create(fd) if err != nil { db.s.reuseFileNum(fd.Num) return } db.memMu.Lock() defer db.memMu.Unlock() if db.frozenMem != nil { panic("still has frozen mem") } if db.journal == nil { db.journal = journal.NewWriter(w) } else { db.journal.Reset(w) db.journalWriter.Close() db.frozenJournalFd = db.journalFd } db.journalWriter = w db.journalFd = fd db.frozenMem = db.mem mdb := db.mpoolGet() if mdb == nil || mdb.Capacity() < n { mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n)) } mem = &memDB{ db: db, DB: mdb, ref: 2, } db.mem = mem // The seq only incremented by the writer. And whoever called newMem // should hold write lock, so no need additional synchronization here. db.frozenSeq = db.seq return }
func (d *DB) recoverLog() (err error) { s := d.s icmp := s.cmp s.printf("LogRecovery: started, min=%d", s.stLogNum) var mem *memdb.DB batch := new(Batch) cm := newCMem(s) logs, skip := files(s.getFiles(desc.TypeLog)), 0 logs.sort() for _, log := range logs { if log.Num() < s.stLogNum { skip++ continue } s.markFileNum(log.Num()) } var r, fr *logReader for _, log := range logs[skip:] { s.printf("LogRecovery: recovering, num=%d", log.Num()) r, err = newLogReader(log, true, s.logDropFunc("log", log.Num())) if err != nil { return } if mem != nil { if mem.Len() > 0 { err = cm.flush(mem, 0) if err != nil { return } } err = cm.commit(r.file.Num(), d.seq) if err != nil { return } cm.reset() fr.remove() fr = nil } mem = memdb.New(icmp) for r.log.Next() { err = batch.decode(r.log.Record()) if err != nil { return } err = batch.memReplay(mem) if err != nil { return } d.seq = batch.seq + uint64(batch.len()) if mem.Size() > s.o.GetWriteBuffer() { // flush to table err = cm.flush(mem, 0) if err != nil { return } // create new memdb mem = memdb.New(icmp) } } err = r.log.Error() if err != nil { return } r.close() fr = r } // create new log _, err = d.newMem() if err != nil { return } if mem != nil && mem.Len() > 0 { err = cm.flush(mem, 0) if err != nil { return } } err = cm.commit(d.log.file.Num(), d.seq) if err != nil { return } if fr != nil { fr.remove() } return }
// NewMemDbBuffer creates a new memDbBuffer. func NewMemDbBuffer() MemBuffer { return &memDbBuffer{db: memdb.New(comparer.DefaultComparer, 4*1024)} }
func (mc *stConstructor_MemDB) init(t *testing.T, ho *stHarnessOpt) error { ho.Randomize = true mc.t = t mc.db = memdb.New(comparer.DefaultComparer, 0) return nil }
func (p *stConstructor_MemDB) init(t *testing.T, ho *stHarnessOpt) error { ho.Randomize = true p.t = t p.mem = memdb.New(comparer.BytesComparer{}) return nil }
// conditionType is the type for condition consts. type conditionType int const ( // conditionIfNotExist means the condition doesn't exist. conditionIfNotExist conditionType = iota + 1 // conditionIfEqual means the condition is equals. conditionIfEqual // conditionForceSet means the condition is force set. conditionForceSet ) var ( p = pool.NewCache("memdb pool", 100, func() interface{} { return memdb.New(comparer.DefaultComparer, 1*1024*1024) }) ) // conditionValue is a data structure used to store current stored data and data verification condition. type conditionValue struct { originValue []byte condition conditionType } // IsErrNotFound checks if err is a kind of NotFound error. func IsErrNotFound(err error) bool { if errors2.ErrorEqual(err, leveldb.ErrNotFound) || errors2.ErrorEqual(err, ErrNotExist) { return true }
func (db *DB) recoverJournalRO() error { // Get all journals and sort it by file number. rawFds, err := db.s.stor.List(storage.TypeJournal) if err != nil { return err } sortFds(rawFds) // Journals that will be recovered. var fds []storage.FileDesc for _, fd := range rawFds { if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum { fds = append(fds, fd) } } var ( // Options. strict = db.s.o.GetStrict(opt.StrictJournal) checksum = db.s.o.GetStrict(opt.StrictJournalChecksum) writeBuffer = db.s.o.GetWriteBuffer() mdb = memdb.New(db.s.icmp, writeBuffer) ) // Recover journals. if len(fds) > 0 { db.logf("journal@recovery RO·Mode F·%d", len(fds)) var ( jr *journal.Reader buf = &util.Buffer{} batch = &Batch{} ) for _, fd := range fds { db.logf("journal@recovery recovering @%d", fd.Num) fr, err := db.s.stor.Open(fd) if err != nil { return err } // Create or reset journal reader instance. if jr == nil { jr = journal.NewReader(fr, dropper{db.s, fd}, strict, checksum) } else { jr.Reset(fr, dropper{db.s, fd}, strict, checksum) } // Replay journal to memdb. for { r, err := jr.Next() if err != nil { if err == io.EOF { break } fr.Close() return errors.SetFd(err, fd) } buf.Reset() if _, err := buf.ReadFrom(r); err != nil { if err == io.ErrUnexpectedEOF { // This is error returned due to corruption, with strict == false. continue } fr.Close() return errors.SetFd(err, fd) } if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil { if !strict && errors.IsCorrupted(err) { db.s.logf("journal error: %v (skipped)", err) // We won't apply sequence number as it might be corrupted. continue } fr.Close() return errors.SetFd(err, fd) } // Save sequence number. db.seq = batch.seq + uint64(batch.Len()) } fr.Close() } } // Set memDB. db.mem = &memDB{db: db, DB: mdb, ref: 1} return nil }
func (d *DB) recoverJournal() (err error) { s := d.s icmp := s.cmp s.printf("JournalRecovery: started, min=%d", s.stJournalNum) var mem *memdb.DB batch := new(Batch) cm := newCMem(s) journals := files(s.getFiles(storage.TypeJournal)) journals.sort() rJournals := make([]storage.File, 0, len(journals)) for _, journal := range journals { if journal.Num() >= s.stJournalNum || journal.Num() == s.stPrevJournalNum { s.markFileNum(journal.Num()) rJournals = append(rJournals, journal) } } var r, fr *journalReader for _, journal := range rJournals { s.printf("JournalRecovery: recovering, num=%d", journal.Num()) r, err = newJournalReader(journal, true, s.journalDropFunc("journal", journal.Num())) if err != nil { return } if mem != nil { if mem.Len() > 0 { err = cm.flush(mem, 0) if err != nil { return } } err = cm.commit(r.file.Num(), d.seq) if err != nil { return } cm.reset() fr.remove() fr = nil } mem = memdb.New(icmp) for r.journal.Next() { err = batch.decode(r.journal.Record()) if err != nil { return } err = batch.memReplay(mem) if err != nil { return } d.seq = batch.seq + uint64(batch.len()) if mem.Size() > s.o.GetWriteBuffer() { // flush to table err = cm.flush(mem, 0) if err != nil { return } // create new memdb mem = memdb.New(icmp) } } err = r.journal.Error() if err != nil { return } r.close() fr = r } // create new journal _, err = d.newMem() if err != nil { return } if mem != nil && mem.Len() > 0 { err = cm.flush(mem, 0) if err != nil { return } } err = cm.commit(d.journal.file.Num(), d.seq) if err != nil { return } if fr != nil { fr.remove() } return }
// NewMemoryKeyValue returns a KeyValue implementation that's backed only // by memory. It's mostly useful for tests and development. func NewMemoryKeyValue() KeyValue { db := memdb.New(comparer.DefaultComparer, 128) return &memKeys{db: db} }
func (d *DB) recoverJournal() error { s := d.s ff0, err := s.getFiles(storage.TypeJournal) if err != nil { return err } ff1 := files(ff0) ff1.sort() ff2 := make([]storage.File, 0, len(ff1)) for _, file := range ff1 { if file.Num() >= s.stJournalNum || file.Num() == s.stPrevJournalNum { s.markFileNum(file.Num()) ff2 = append(ff2, file) } } var jr *journal.Reader var of storage.File var mem *memdb.DB batch := new(Batch) cm := newCMem(s) buf := new(util.Buffer) // Options. strict := s.o.GetStrict(opt.StrictJournal) checksum := s.o.GetStrict(opt.StrictJournalChecksum) writeBuffer := s.o.GetWriteBuffer() recoverJournal := func(file storage.File) error { s.logf("journal@recovery recovering @%d", file.Num()) reader, err := file.Open() if err != nil { return err } defer reader.Close() if jr == nil { jr = journal.NewReader(reader, dropper{s, file}, strict, checksum) } else { jr.Reset(reader, dropper{s, file}, strict, checksum) } if of != nil { if mem.Len() > 0 { if err := cm.flush(mem, 0); err != nil { return err } } if err := cm.commit(file.Num(), d.seq); err != nil { return err } cm.reset() of.Remove() of = nil } // Reset memdb. mem.Reset() for { r, err := jr.Next() if err != nil { if err == io.EOF { break } return err } buf.Reset() if _, err := buf.ReadFrom(r); err != nil { if strict { return err } continue } if err := batch.decode(buf.Bytes()); err != nil { return err } if err := batch.memReplay(mem); err != nil { return err } d.seq = batch.seq + uint64(batch.len()) if mem.Size() >= writeBuffer { // Large enough, flush it. if err := cm.flush(mem, 0); err != nil { return err } // Reset memdb. mem.Reset() } } of = file return nil } // Recover all journals. if len(ff2) > 0 { s.logf("journal@recovery F·%d", len(ff2)) mem = memdb.New(s.icmp, writeBuffer) for _, file := range ff2 { if err := recoverJournal(file); err != nil { return err } } // Flush the last journal. if mem.Len() > 0 { if err := cm.flush(mem, 0); err != nil { return err } } } // Create a new journal. if _, err := d.newMem(0); err != nil { return err } // Commit. if err := cm.commit(d.journalFile.Num(), d.seq); err != nil { return err } // Remove the last journal. if of != nil { of.Remove() } return nil }
func (db *DB) recoverJournal() error { // Get all tables and sort it by file number. journalFiles_, err := db.s.getFiles(storage.TypeJournal) if err != nil { return err } journalFiles := files(journalFiles_) journalFiles.sort() // Discard older journal. prev := -1 for i, file := range journalFiles { if file.Num() >= db.s.stJournalNum { if prev >= 0 { i-- journalFiles[i] = journalFiles[prev] } journalFiles = journalFiles[i:] break } else if file.Num() == db.s.stPrevJournalNum { prev = i } } var jr *journal.Reader var of storage.File var mem *memdb.DB batch := new(Batch) cm := newCMem(db.s) buf := new(util.Buffer) // Options. strict := db.s.o.GetStrict(opt.StrictJournal) checksum := db.s.o.GetStrict(opt.StrictJournalChecksum) writeBuffer := db.s.o.GetWriteBuffer() recoverJournal := func(file storage.File) error { db.logf("journal@recovery recovering @%d", file.Num()) reader, err := file.Open() if err != nil { return err } defer reader.Close() // Create/reset journal reader instance. if jr == nil { jr = journal.NewReader(reader, dropper{db.s, file}, strict, checksum) } else { jr.Reset(reader, dropper{db.s, file}, strict, checksum) } // Flush memdb and remove obsolete journal file. if of != nil { if mem.Len() > 0 { if err := cm.flush(mem, 0); err != nil { return err } } if err := cm.commit(file.Num(), db.seq); err != nil { return err } cm.reset() of.Remove() of = nil } // Replay journal to memdb. mem.Reset() for { r, err := jr.Next() if err != nil { if err == io.EOF { break } return err } buf.Reset() if _, err := buf.ReadFrom(r); err != nil { if err == io.ErrUnexpectedEOF { continue } else { return err } } if err := batch.decode(buf.Bytes()); err != nil { return err } if err := batch.memReplay(mem); err != nil { return err } // Save sequence number. db.seq = batch.seq + uint64(batch.len()) // Flush it if large enough. if mem.Size() >= writeBuffer { if err := cm.flush(mem, 0); err != nil { return err } mem.Reset() } } of = file return nil } // Recover all journals. if len(journalFiles) > 0 { db.logf("journal@recovery F·%d", len(journalFiles)) // Mark file number as used. db.s.markFileNum(journalFiles[len(journalFiles)-1].Num()) mem = memdb.New(db.s.icmp, writeBuffer) for _, file := range journalFiles { if err := recoverJournal(file); err != nil { return err } } // Flush the last journal. if mem.Len() > 0 { if err := cm.flush(mem, 0); err != nil { return err } } } // Create a new journal. if _, err := db.newMem(0); err != nil { return err } // Commit. if err := cm.commit(db.journalFile.Num(), db.seq); err != nil { // Close journal. if db.journal != nil { db.journal.Close() db.journalWriter.Close() } return err } // Remove the last obsolete journal file. if of != nil { of.Remove() } return nil }
func (db *DB) recoverJournal() error { // Get all journals and sort it by file number. rawFds, err := db.s.stor.List(storage.TypeJournal) if err != nil { return err } sortFds(rawFds) // Journals that will be recovered. var fds []storage.FileDesc for _, fd := range rawFds { if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum { fds = append(fds, fd) } } var ( ofd storage.FileDesc // Obsolete file. rec = &sessionRecord{} ) // Recover journals. if len(fds) > 0 { db.logf("journal@recovery F·%d", len(fds)) // Mark file number as used. db.s.markFileNum(fds[len(fds)-1].Num) var ( // Options. strict = db.s.o.GetStrict(opt.StrictJournal) checksum = db.s.o.GetStrict(opt.StrictJournalChecksum) writeBuffer = db.s.o.GetWriteBuffer() jr *journal.Reader mdb = memdb.New(db.s.icmp, writeBuffer) buf = &util.Buffer{} batch = &Batch{} ) for _, fd := range fds { db.logf("journal@recovery recovering @%d", fd.Num) fr, err := db.s.stor.Open(fd) if err != nil { return err } // Create or reset journal reader instance. if jr == nil { jr = journal.NewReader(fr, dropper{db.s, fd}, strict, checksum) } else { jr.Reset(fr, dropper{db.s, fd}, strict, checksum) } // Flush memdb and remove obsolete journal file. if !ofd.Nil() { if mdb.Len() > 0 { if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil { fr.Close() return err } } rec.setJournalNum(fd.Num) rec.setSeqNum(db.seq) if err := db.s.commit(rec); err != nil { fr.Close() return err } rec.resetAddedTables() db.s.stor.Remove(ofd) ofd = storage.FileDesc{} } // Replay journal to memdb. mdb.Reset() for { r, err := jr.Next() if err != nil { if err == io.EOF { break } fr.Close() return errors.SetFd(err, fd) } buf.Reset() if _, err := buf.ReadFrom(r); err != nil { if err == io.ErrUnexpectedEOF { // This is error returned due to corruption, with strict == false. continue } fr.Close() return errors.SetFd(err, fd) } if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil { if !strict && errors.IsCorrupted(err) { db.s.logf("journal error: %v (skipped)", err) // We won't apply sequence number as it might be corrupted. continue } fr.Close() return errors.SetFd(err, fd) } // Save sequence number. db.seq = batch.seq + uint64(batch.Len()) // Flush it if large enough. if mdb.Size() >= writeBuffer { if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil { fr.Close() return err } mdb.Reset() } } fr.Close() ofd = fd } // Flush the last memdb. if mdb.Len() > 0 { if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil { return err } } } // Create a new journal. if _, err := db.newMem(0); err != nil { return err } // Commit. rec.setJournalNum(db.journalFd.Num) rec.setSeqNum(db.seq) if err := db.s.commit(rec); err != nil { // Close journal on error. if db.journal != nil { db.journal.Close() db.journalWriter.Close() } return err } // Remove the last obsolete journal file. if !ofd.Nil() { db.s.stor.Remove(ofd) } return nil }