func newJournalWriter(file storage.File) (p *journalWriter, err error) { w := new(journalWriter) w.file = file w.writer, err = file.Create() if err != nil { return } w.journal = journal.NewWriter(w.writer) return w, nil }
func newJournalReader(file storage.File, checksum bool, dropf journal.DropFunc) (p *journalReader, err error) { r := new(journalReader) r.file = file r.reader, err = file.Open() if err != nil { return } r.journal = journal.NewReader(r.reader, checksum, dropf) return r, nil }
func newJournalReader(file storage.File, checksum bool, dropf journal.DropFunc) (p *journalReader, err error) { r, err := file.Open() if err != nil { return nil, err } jr, err := journal.NewReader(r, 0, checksum, dropf) if err != nil { return nil, err } return &journalReader{ file: file, reader: r, journal: jr, }, nil }
func (db *DB) recoverJournal() error { // Get all tables and sort it by file number. journalFiles_, err := db.s.getFiles(storage.TypeJournal) if err != nil { return err } journalFiles := files(journalFiles_) journalFiles.sort() // Discard older journal. prev := -1 for i, file := range journalFiles { if file.Num() >= db.s.stJournalNum { if prev >= 0 { i-- journalFiles[i] = journalFiles[prev] } journalFiles = journalFiles[i:] break } else if file.Num() == db.s.stPrevJournalNum { prev = i } } var jr *journal.Reader var of storage.File var mem *memdb.DB batch := new(Batch) cm := newCMem(db.s) buf := new(util.Buffer) // Options. strict := db.s.o.GetStrict(opt.StrictJournal) checksum := db.s.o.GetStrict(opt.StrictJournalChecksum) writeBuffer := db.s.o.GetWriteBuffer() recoverJournal := func(file storage.File) error { db.logf("journal@recovery recovering @%d", file.Num()) reader, err := file.Open() if err != nil { return err } defer reader.Close() // Create/reset journal reader instance. if jr == nil { jr = journal.NewReader(reader, dropper{db.s, file}, strict, checksum) } else { jr.Reset(reader, dropper{db.s, file}, strict, checksum) } // Flush memdb and remove obsolete journal file. if of != nil { if mem.Len() > 0 { if err := cm.flush(mem, 0); err != nil { return err } } if err := cm.commit(file.Num(), db.seq); err != nil { return err } cm.reset() of.Remove() of = nil } // Replay journal to memdb. mem.Reset() for { r, err := jr.Next() if err != nil { if err == io.EOF { break } return err } buf.Reset() if _, err := buf.ReadFrom(r); err != nil { if err == io.ErrUnexpectedEOF { continue } else { return err } } if err := batch.decode(buf.Bytes()); err != nil { return err } if err := batch.memReplay(mem); err != nil { return err } // Save sequence number. db.seq = batch.seq + uint64(batch.len()) // Flush it if large enough. if mem.Size() >= writeBuffer { if err := cm.flush(mem, 0); err != nil { return err } mem.Reset() } } of = file return nil } // Recover all journals. if len(journalFiles) > 0 { db.logf("journal@recovery F·%d", len(journalFiles)) // Mark file number as used. db.s.markFileNum(journalFiles[len(journalFiles)-1].Num()) mem = memdb.New(db.s.icmp, writeBuffer) for _, file := range journalFiles { if err := recoverJournal(file); err != nil { return err } } // Flush the last journal. if mem.Len() > 0 { if err := cm.flush(mem, 0); err != nil { return err } } } // Create a new journal. if _, err := db.newMem(0); err != nil { return err } // Commit. if err := cm.commit(db.journalFile.Num(), db.seq); err != nil { // Close journal. if db.journal != nil { db.journal.Close() db.journalWriter.Close() } return err } // Remove the last obsolete journal file. if of != nil { of.Remove() } return nil }
func (h *dbCorruptHarness) corrupt(ft storage.FileType, offset, n int) { p := &h.dbHarness t := p.t var file storage.File ff, _ := p.stor.GetFiles(ft) for _, f := range ff { if file == nil || f.Num() > file.Num() { file = f } } if file == nil { t.Fatalf("no such file with type %q", ft) } r, err := file.Open() if err != nil { t.Fatal("cannot open file: ", err) } x, err := r.Seek(0, 2) if err != nil { t.Fatal("cannot query file size: ", err) } m := int(x) if _, err := r.Seek(0, 0); err != nil { t.Fatal(err) } if offset < 0 { if -offset > m { offset = 0 } else { offset = m + offset } } if offset > m { offset = m } if offset+n > m { n = m - offset } buf := make([]byte, m) _, err = io.ReadFull(r, buf) if err != nil { t.Fatal("cannot read file: ", err) } r.Close() for i := 0; i < n; i++ { buf[offset+i] ^= 0x80 } err = file.Remove() if err != nil { t.Fatal("cannot remove old file: ", err) } w, err := file.Create() if err != nil { t.Fatal("cannot create new file: ", err) } _, err = w.Write(buf) if err != nil { t.Fatal("cannot write new file: ", err) } w.Close() }
func (db *DB) recoverJournal() error { // Get all journals and sort it by file number. allJournalFiles, err := db.s.getFiles(storage.TypeJournal) if err != nil { return err } files(allJournalFiles).sort() // Journals that will be recovered. var recJournalFiles []storage.File for _, jf := range allJournalFiles { if jf.Num() >= db.s.stJournalNum || jf.Num() == db.s.stPrevJournalNum { recJournalFiles = append(recJournalFiles, jf) } } var ( of storage.File // Obsolete file. rec = &sessionRecord{} ) // Recover journals. if len(recJournalFiles) > 0 { db.logf("journal@recovery F·%d", len(recJournalFiles)) // Mark file number as used. db.s.markFileNum(recJournalFiles[len(recJournalFiles)-1].Num()) var ( // Options. strict = db.s.o.GetStrict(opt.StrictJournal) checksum = db.s.o.GetStrict(opt.StrictJournalChecksum) writeBuffer = db.s.o.GetWriteBuffer() jr *journal.Reader mdb = memdb.New(db.s.icmp, writeBuffer) buf = &util.Buffer{} batch = &Batch{} ) for _, jf := range recJournalFiles { db.logf("journal@recovery recovering @%d", jf.Num()) fr, err := jf.Open() if err != nil { return err } // Create or reset journal reader instance. if jr == nil { jr = journal.NewReader(fr, dropper{db.s, jf}, strict, checksum) } else { jr.Reset(fr, dropper{db.s, jf}, strict, checksum) } // Flush memdb and remove obsolete journal file. if of != nil { if mdb.Len() > 0 { if _, err := db.s.flushMemdb(rec, mdb, -1); err != nil { fr.Close() return err } } rec.setJournalNum(jf.Num()) rec.setSeqNum(db.seq) if err := db.s.commit(rec); err != nil { fr.Close() return err } rec.resetAddedTables() of.Remove() of = nil } // Replay journal to memdb. mdb.Reset() for { r, err := jr.Next() if err != nil { if err == io.EOF { break } fr.Close() return errors.SetFile(err, jf) } buf.Reset() if _, err := buf.ReadFrom(r); err != nil { if err == io.ErrUnexpectedEOF { // This is error returned due to corruption, with strict == false. continue } fr.Close() return errors.SetFile(err, jf) } if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil { if !strict && errors.IsCorrupted(err) { db.s.logf("journal error: %v (skipped)", err) // We won't apply sequence number as it might be corrupted. continue } fr.Close() return errors.SetFile(err, jf) } // Save sequence number. db.seq = batch.seq + uint64(batch.Len()) // Flush it if large enough. if mdb.Size() >= writeBuffer { if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil { fr.Close() return err } mdb.Reset() } } fr.Close() of = jf } // Flush the last memdb. if mdb.Len() > 0 { if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil { return err } } } // Create a new journal. if _, err := db.newMem(0); err != nil { return err } // Commit. rec.setJournalNum(db.journalFile.Num()) rec.setSeqNum(db.seq) if err := db.s.commit(rec); err != nil { // Close journal on error. if db.journal != nil { db.journal.Close() db.journalWriter.Close() } return err } // Remove the last obsolete journal file. if of != nil { of.Remove() } return nil }
func scanTable(f storage.File, checksum bool) (corrupted bool) { fi := storage.NewFileInfo(f) r, err := f.Open() if err != nil { log.Fatal(err) } defer r.Close() size, err := r.Seek(0, os.SEEK_END) if err != nil { log.Fatal(err) } o := &opt.Options{Strict: opt.NoStrict} if checksum { o.Strict = opt.StrictBlockChecksum | opt.StrictReader } tr, err := table.NewReader(r, size, fi, nil, bpool, o) if err != nil { log.Fatal(err) } defer tr.Release() checkData := func(i int, t string, data []byte) bool { if len(data) == 0 { panic(fmt.Sprintf("[%v] nil data: i=%d t=%s", fi, i, t)) } checksum0, checksum1 := dataChecksum(data) if checksum0 != checksum1 { atomic.StoreUint32(&fail, 1) atomic.StoreUint32(&done, 1) corrupted = true data0, data1 := dataSplit(data) data0c0, data0c1 := dataChecksum(data0) data1c0, data1c1 := dataChecksum(data1) log.Printf("FATAL: [%v] Corrupted data i=%d t=%s (%#x != %#x): %x(%v) vs %x(%v)", fi, i, t, checksum0, checksum1, data0, data0c0 == data0c1, data1, data1c0 == data1c1) return true } return false } iter := tr.NewIterator(nil, nil) defer iter.Release() for i := 0; iter.Next(); i++ { ukey, _, kt, kerr := parseIkey(iter.Key()) if kerr != nil { atomic.StoreUint32(&fail, 1) atomic.StoreUint32(&done, 1) corrupted = true log.Printf("FATAL: [%v] Corrupted ikey i=%d: %v", fi, i, kerr) return } if checkData(i, "key", ukey) { return } if kt == ktVal && checkData(i, "value", iter.Value()) { return } } if err := iter.Error(); err != nil { if errors.IsCorrupted(err) { atomic.StoreUint32(&fail, 1) atomic.StoreUint32(&done, 1) corrupted = true log.Printf("FATAL: [%v] Corruption detected: %v", fi, err) } else { log.Fatal(err) } } return }
func (d *DB) recoverJournal() error { s := d.s ff0, err := s.getFiles(storage.TypeJournal) if err != nil { return err } ff1 := files(ff0) ff1.sort() ff2 := make([]storage.File, 0, len(ff1)) for _, file := range ff1 { if file.Num() >= s.stJournalNum || file.Num() == s.stPrevJournalNum { s.markFileNum(file.Num()) ff2 = append(ff2, file) } } var jr *journal.Reader var of storage.File var mem *memdb.DB batch := new(Batch) cm := newCMem(s) buf := new(util.Buffer) // Options. strict := s.o.GetStrict(opt.StrictJournal) checksum := s.o.GetStrict(opt.StrictJournalChecksum) writeBuffer := s.o.GetWriteBuffer() recoverJournal := func(file storage.File) error { s.logf("journal@recovery recovering @%d", file.Num()) reader, err := file.Open() if err != nil { return err } defer reader.Close() if jr == nil { jr = journal.NewReader(reader, dropper{s, file}, strict, checksum) } else { jr.Reset(reader, dropper{s, file}, strict, checksum) } if of != nil { if mem.Len() > 0 { if err := cm.flush(mem, 0); err != nil { return err } } if err := cm.commit(file.Num(), d.seq); err != nil { return err } cm.reset() of.Remove() of = nil } // Reset memdb. mem.Reset() for { r, err := jr.Next() if err != nil { if err == io.EOF { break } return err } buf.Reset() if _, err := buf.ReadFrom(r); err != nil { if strict { return err } continue } if err := batch.decode(buf.Bytes()); err != nil { return err } if err := batch.memReplay(mem); err != nil { return err } d.seq = batch.seq + uint64(batch.len()) if mem.Size() >= writeBuffer { // Large enough, flush it. if err := cm.flush(mem, 0); err != nil { return err } // Reset memdb. mem.Reset() } } of = file return nil } // Recover all journals. if len(ff2) > 0 { s.logf("journal@recovery F·%d", len(ff2)) mem = memdb.New(s.icmp, writeBuffer) for _, file := range ff2 { if err := recoverJournal(file); err != nil { return err } } // Flush the last journal. if mem.Len() > 0 { if err := cm.flush(mem, 0); err != nil { return err } } } // Create a new journal. if _, err := d.newMem(0); err != nil { return err } // Commit. if err := cm.commit(d.journalFile.Num(), d.seq); err != nil { return err } // Remove the last journal. if of != nil { of.Remove() } return nil }