func (w *Writer) writeBlock(buf *util.Buffer, compression opt.Compression) (bh blockHandle, err error) { // Compress the buffer if necessary. var b []byte if compression == opt.SnappyCompression { // Allocate scratch enough for compression and block trailer. if n := snappy.MaxEncodedLen(buf.Len()) + blockTrailerLen; len(w.compressionScratch) < n { w.compressionScratch = make([]byte, n) } var compressed []byte compressed, err = snappy.Encode(w.compressionScratch, buf.Bytes()) if err != nil { return } n := len(compressed) b = compressed[:n+blockTrailerLen] b[n] = blockTypeSnappyCompression } else { tmp := buf.Alloc(blockTrailerLen) tmp[0] = blockTypeNoCompression b = buf.Bytes() } // Calculate the checksum. n := len(b) - 4 checksum := util.NewCRC(b[:n]).Value() binary.LittleEndian.PutUint32(b[n:], checksum) // Write the buffer to the file. _, err = w.writer.Write(b) if err != nil { return } bh = blockHandle{w.offset, uint64(len(b) - blockTrailerLen)} w.offset += uint64(len(b)) return }
func (d *DB) recoverJournal() error { s := d.s icmp := s.cmp ff0, err := s.getFiles(storage.TypeJournal) if err != nil { return err } ff1 := files(ff0) ff1.sort() ff2 := make([]storage.File, 0, len(ff1)) for _, file := range ff1 { if file.Num() >= s.stJournalNum || file.Num() == s.stPrevJournalNum { s.markFileNum(file.Num()) ff2 = append(ff2, file) } } var jr *journal.Reader var of storage.File var mem *memdb.DB batch := new(Batch) cm := newCMem(s) buf := new(util.Buffer) // Options. strict := s.o.GetStrict(opt.StrictJournal) checksum := s.o.GetStrict(opt.StrictJournalChecksum) writeBuffer := s.o.GetWriteBuffer() recoverJournal := func(file storage.File) error { s.logf("journal@recovery recovering @%d", file.Num()) reader, err := file.Open() if err != nil { return err } defer reader.Close() if jr == nil { jr = journal.NewReader(reader, dropper{s, file}, strict, checksum) } else { jr.Reset(reader, dropper{s, file}, strict, checksum) } if of != nil { if mem.Len() > 0 { if err := cm.flush(mem, 0); err != nil { return err } } if err := cm.commit(file.Num(), d.seq); err != nil { return err } cm.reset() of.Remove() of = nil } // Reset memdb. mem.Reset() for { r, err := jr.Next() if err != nil { if err == io.EOF { break } return err } buf.Reset() if _, err := buf.ReadFrom(r); err != nil { if strict { return err } continue } if err := batch.decode(buf.Bytes()); err != nil { return err } if err := batch.memReplay(mem); err != nil { return err } d.seq = batch.seq + uint64(batch.len()) if mem.Size() >= writeBuffer { // Large enough, flush it. if err := cm.flush(mem, 0); err != nil { return err } // Reset memdb. mem.Reset() } } of = file return nil } // Recover all journals. if len(ff2) > 0 { s.logf("journal@recovery F·%d", len(ff2)) mem = memdb.New(icmp, toPercent(writeBuffer, kWriteBufferPercent)) for _, file := range ff2 { if err := recoverJournal(file); err != nil { return err } } // Flush the last journal. if mem.Len() > 0 { if err := cm.flush(mem, 0); err != nil { return err } } } // Create a new journal. if _, err := d.newMem(); err != nil { return err } // Commit. if err := cm.commit(d.journalFile.Num(), d.seq); err != nil { return err } // Remove the last journal. if of != nil { of.Remove() } return nil }