func (w *Writer) writeBlock(buf *util.Buffer, compression opt.Compression) (bh blockHandle, err error) { // Compress the buffer if necessary. var b []byte if compression == opt.SnappyCompression { // Allocate scratch enough for compression and block trailer. if n := snappy.MaxEncodedLen(buf.Len()) + blockTrailerLen; len(w.compressionScratch) < n { w.compressionScratch = make([]byte, n) } compressed := snappy.Encode(w.compressionScratch, buf.Bytes()) n := len(compressed) b = compressed[:n+blockTrailerLen] b[n] = blockTypeSnappyCompression } else { tmp := buf.Alloc(blockTrailerLen) tmp[0] = blockTypeNoCompression b = buf.Bytes() } // Calculate the checksum. n := len(b) - 4 checksum := util.NewCRC(b[:n]).Value() binary.LittleEndian.PutUint32(b[n:], checksum) // Write the buffer to the file. _, err = w.writer.Write(b) if err != nil { return } bh = blockHandle{w.offset, uint64(len(b) - blockTrailerLen)} w.offset += uint64(len(b)) return }
func (db *DB) recoverJournal() error { // Get all tables and sort it by file number. journalFiles_, err := db.s.getFiles(storage.TypeJournal) if err != nil { return err } journalFiles := files(journalFiles_) journalFiles.sort() // Discard older journal. prev := -1 for i, file := range journalFiles { if file.Num() >= db.s.stJournalNum { if prev >= 0 { i-- journalFiles[i] = journalFiles[prev] } journalFiles = journalFiles[i:] break } else if file.Num() == db.s.stPrevJournalNum { prev = i } } var jr *journal.Reader var of storage.File var mem *memdb.DB batch := new(Batch) cm := newCMem(db.s) buf := new(util.Buffer) // Options. strict := db.s.o.GetStrict(opt.StrictJournal) checksum := db.s.o.GetStrict(opt.StrictJournalChecksum) writeBuffer := db.s.o.GetWriteBuffer() recoverJournal := func(file storage.File) error { db.logf("journal@recovery recovering @%d", file.Num()) reader, err := file.Open() if err != nil { return err } defer reader.Close() // Create/reset journal reader instance. if jr == nil { jr = journal.NewReader(reader, dropper{db.s, file}, strict, checksum) } else { jr.Reset(reader, dropper{db.s, file}, strict, checksum) } // Flush memdb and remove obsolete journal file. if of != nil { if mem.Len() > 0 { if err := cm.flush(mem, 0); err != nil { return err } } if err := cm.commit(file.Num(), db.seq); err != nil { return err } cm.reset() of.Remove() of = nil } // Replay journal to memdb. mem.Reset() for { r, err := jr.Next() if err != nil { if err == io.EOF { break } return err } buf.Reset() if _, err := buf.ReadFrom(r); err != nil { if err == io.ErrUnexpectedEOF { continue } else { return err } } if err := batch.decode(buf.Bytes()); err != nil { return err } if err := batch.memReplay(mem); err != nil { return err } // Save sequence number. db.seq = batch.seq + uint64(batch.len()) // Flush it if large enough. if mem.Size() >= writeBuffer { if err := cm.flush(mem, 0); err != nil { return err } mem.Reset() } } of = file return nil } // Recover all journals. if len(journalFiles) > 0 { db.logf("journal@recovery F·%d", len(journalFiles)) // Mark file number as used. db.s.markFileNum(journalFiles[len(journalFiles)-1].Num()) mem = memdb.New(db.s.icmp, writeBuffer) for _, file := range journalFiles { if err := recoverJournal(file); err != nil { return err } } // Flush the last journal. if mem.Len() > 0 { if err := cm.flush(mem, 0); err != nil { return err } } } // Create a new journal. if _, err := db.newMem(0); err != nil { return err } // Commit. if err := cm.commit(db.journalFile.Num(), db.seq); err != nil { // Close journal. if db.journal != nil { db.journal.Close() db.journalWriter.Close() } return err } // Remove the last obsolete journal file. if of != nil { of.Remove() } return nil }
func (d *DB) recoverJournal() error { s := d.s ff0, err := s.getFiles(storage.TypeJournal) if err != nil { return err } ff1 := files(ff0) ff1.sort() ff2 := make([]storage.File, 0, len(ff1)) for _, file := range ff1 { if file.Num() >= s.stJournalNum || file.Num() == s.stPrevJournalNum { s.markFileNum(file.Num()) ff2 = append(ff2, file) } } var jr *journal.Reader var of storage.File var mem *memdb.DB batch := new(Batch) cm := newCMem(s) buf := new(util.Buffer) // Options. strict := s.o.GetStrict(opt.StrictJournal) checksum := s.o.GetStrict(opt.StrictJournalChecksum) writeBuffer := s.o.GetWriteBuffer() recoverJournal := func(file storage.File) error { s.logf("journal@recovery recovering @%d", file.Num()) reader, err := file.Open() if err != nil { return err } defer reader.Close() if jr == nil { jr = journal.NewReader(reader, dropper{s, file}, strict, checksum) } else { jr.Reset(reader, dropper{s, file}, strict, checksum) } if of != nil { if mem.Len() > 0 { if err := cm.flush(mem, 0); err != nil { return err } } if err := cm.commit(file.Num(), d.seq); err != nil { return err } cm.reset() of.Remove() of = nil } // Reset memdb. mem.Reset() for { r, err := jr.Next() if err != nil { if err == io.EOF { break } return err } buf.Reset() if _, err := buf.ReadFrom(r); err != nil { if strict { return err } continue } if err := batch.decode(buf.Bytes()); err != nil { return err } if err := batch.memReplay(mem); err != nil { return err } d.seq = batch.seq + uint64(batch.len()) if mem.Size() >= writeBuffer { // Large enough, flush it. if err := cm.flush(mem, 0); err != nil { return err } // Reset memdb. mem.Reset() } } of = file return nil } // Recover all journals. if len(ff2) > 0 { s.logf("journal@recovery F·%d", len(ff2)) mem = memdb.New(s.icmp, writeBuffer) for _, file := range ff2 { if err := recoverJournal(file); err != nil { return err } } // Flush the last journal. if mem.Len() > 0 { if err := cm.flush(mem, 0); err != nil { return err } } } // Create a new journal. if _, err := d.newMem(0); err != nil { return err } // Commit. if err := cm.commit(d.journalFile.Num(), d.seq); err != nil { return err } // Remove the last journal. if of != nil { of.Remove() } return nil }