func TestBatch_Size(t *testing.T) { b := new(Batch) for i := 0; i < 2; i++ { b.Put([]byte("key1"), []byte("value1")) b.Put([]byte("key2"), []byte("value2")) b.Delete([]byte("key1")) b.Put([]byte("foo"), []byte("foovalue")) b.Put([]byte("bar"), []byte("barvalue")) mem := memdb.New(&iComparer{comparer.DefaultComparer}, 0) b.memReplay(mem) if b.size() != mem.Size() { t.Errorf("invalid batch size calculation, want=%d got=%d", mem.Size(), b.size()) } b.Reset() } }
// Create new memdb and froze the old one; need external synchronization. // newMem only called synchronously by the writer. func (db *DB) newMem(n int) (mem *memDB, err error) { num := db.s.allocFileNum() file := db.s.getJournalFile(num) w, err := file.Create() if err != nil { db.s.reuseFileNum(num) return } db.memMu.Lock() defer db.memMu.Unlock() if db.frozenMem != nil { panic("still has frozen mem") } if db.journal == nil { db.journal = journal.NewWriter(w) } else { db.journal.Reset(w) db.journalWriter.Close() db.frozenJournalFile = db.journalFile } db.journalWriter = w db.journalFile = file db.frozenMem = db.mem mdb := db.mpoolGet() if mdb == nil || mdb.Capacity() < n { mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n)) } mem = &memDB{ db: db, DB: mdb, ref: 2, } db.mem = mem // The seq only incremented by the writer. And whoever called newMem // should hold write lock, so no need additional synchronization here. db.frozenSeq = db.seq return }
func (db *DB) recoverJournalRO() error { // Get all journals and sort it by file number. allJournalFiles, err := db.s.getFiles(storage.TypeJournal) if err != nil { return err } files(allJournalFiles).sort() // Journals that will be recovered. var recJournalFiles []storage.File for _, jf := range allJournalFiles { if jf.Num() >= db.s.stJournalNum || jf.Num() == db.s.stPrevJournalNum { recJournalFiles = append(recJournalFiles, jf) } } var ( // Options. strict = db.s.o.GetStrict(opt.StrictJournal) checksum = db.s.o.GetStrict(opt.StrictJournalChecksum) writeBuffer = db.s.o.GetWriteBuffer() mdb = memdb.New(db.s.icmp, writeBuffer) ) // Recover journals. if len(recJournalFiles) > 0 { db.logf("journal@recovery RO·Mode F·%d", len(recJournalFiles)) var ( jr *journal.Reader buf = &util.Buffer{} batch = &Batch{} ) for _, jf := range recJournalFiles { db.logf("journal@recovery recovering @%d", jf.Num()) fr, err := jf.Open() if err != nil { return err } // Create or reset journal reader instance. if jr == nil { jr = journal.NewReader(fr, dropper{db.s, jf}, strict, checksum) } else { jr.Reset(fr, dropper{db.s, jf}, strict, checksum) } // Replay journal to memdb. for { r, err := jr.Next() if err != nil { if err == io.EOF { break } fr.Close() return errors.SetFile(err, jf) } buf.Reset() if _, err := buf.ReadFrom(r); err != nil { if err == io.ErrUnexpectedEOF { // This is error returned due to corruption, with strict == false. continue } fr.Close() return errors.SetFile(err, jf) } if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil { if !strict && errors.IsCorrupted(err) { db.s.logf("journal error: %v (skipped)", err) // We won't apply sequence number as it might be corrupted. continue } fr.Close() return errors.SetFile(err, jf) } // Save sequence number. db.seq = batch.seq + uint64(batch.Len()) } fr.Close() } } // Set memDB. db.mem = &memDB{db: db, DB: mdb, ref: 1} return nil }
func (db *DB) recoverJournal() error { // Get all journals and sort it by file number. allJournalFiles, err := db.s.getFiles(storage.TypeJournal) if err != nil { return err } files(allJournalFiles).sort() // Journals that will be recovered. var recJournalFiles []storage.File for _, jf := range allJournalFiles { if jf.Num() >= db.s.stJournalNum || jf.Num() == db.s.stPrevJournalNum { recJournalFiles = append(recJournalFiles, jf) } } var ( of storage.File // Obsolete file. rec = &sessionRecord{} ) // Recover journals. if len(recJournalFiles) > 0 { db.logf("journal@recovery F·%d", len(recJournalFiles)) // Mark file number as used. db.s.markFileNum(recJournalFiles[len(recJournalFiles)-1].Num()) var ( // Options. strict = db.s.o.GetStrict(opt.StrictJournal) checksum = db.s.o.GetStrict(opt.StrictJournalChecksum) writeBuffer = db.s.o.GetWriteBuffer() jr *journal.Reader mdb = memdb.New(db.s.icmp, writeBuffer) buf = &util.Buffer{} batch = &Batch{} ) for _, jf := range recJournalFiles { db.logf("journal@recovery recovering @%d", jf.Num()) fr, err := jf.Open() if err != nil { return err } // Create or reset journal reader instance. if jr == nil { jr = journal.NewReader(fr, dropper{db.s, jf}, strict, checksum) } else { jr.Reset(fr, dropper{db.s, jf}, strict, checksum) } // Flush memdb and remove obsolete journal file. if of != nil { if mdb.Len() > 0 { if _, err := db.s.flushMemdb(rec, mdb, -1); err != nil { fr.Close() return err } } rec.setJournalNum(jf.Num()) rec.setSeqNum(db.seq) if err := db.s.commit(rec); err != nil { fr.Close() return err } rec.resetAddedTables() of.Remove() of = nil } // Replay journal to memdb. mdb.Reset() for { r, err := jr.Next() if err != nil { if err == io.EOF { break } fr.Close() return errors.SetFile(err, jf) } buf.Reset() if _, err := buf.ReadFrom(r); err != nil { if err == io.ErrUnexpectedEOF { // This is error returned due to corruption, with strict == false. continue } fr.Close() return errors.SetFile(err, jf) } if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil { if !strict && errors.IsCorrupted(err) { db.s.logf("journal error: %v (skipped)", err) // We won't apply sequence number as it might be corrupted. continue } fr.Close() return errors.SetFile(err, jf) } // Save sequence number. db.seq = batch.seq + uint64(batch.Len()) // Flush it if large enough. if mdb.Size() >= writeBuffer { if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil { fr.Close() return err } mdb.Reset() } } fr.Close() of = jf } // Flush the last memdb. if mdb.Len() > 0 { if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil { return err } } } // Create a new journal. if _, err := db.newMem(0); err != nil { return err } // Commit. rec.setJournalNum(db.journalFile.Num()) rec.setSeqNum(db.seq) if err := db.s.commit(rec); err != nil { // Close journal on error. if db.journal != nil { db.journal.Close() db.journalWriter.Close() } return err } // Remove the last obsolete journal file. if of != nil { of.Remove() } return nil }