// fillHeader fills in the header for the pending chunk. func (w *Writer) fillHeader(last bool) { if w.i+headerSize > w.j || w.j > blockSize { panic("leveldb/journal: bad writer state") } if last { if w.first { w.buf[w.i+6] = fullChunkType } else { w.buf[w.i+6] = lastChunkType } } else { if w.first { w.buf[w.i+6] = firstChunkType } else { w.buf[w.i+6] = middleChunkType } } binary.LittleEndian.PutUint32(w.buf[w.i+0:w.i+4], util.NewCRC(w.buf[w.i+6:w.j]).Value()) binary.LittleEndian.PutUint16(w.buf[w.i+4:w.i+6], uint16(w.j-w.i-headerSize)) }
func (w *Writer) writeBlock(buf *util.Buffer, compression opt.Compression) (bh blockHandle, err error) { // Compress the buffer if necessary. var b []byte if compression == opt.SnappyCompression { // Allocate scratch enough for compression and block trailer. if n := snappy.MaxEncodedLen(buf.Len()) + blockTrailerLen; len(w.compressionScratch) < n { w.compressionScratch = make([]byte, n) } var compressed []byte compressed, err = snappy.Encode(w.compressionScratch, buf.Bytes()) if err != nil { return } n := len(compressed) b = compressed[:n+blockTrailerLen] b[n] = blockTypeSnappyCompression } else { tmp := buf.Alloc(blockTrailerLen) tmp[0] = blockTypeNoCompression b = buf.Bytes() } // Calculate the checksum. n := len(b) - 4 checksum := util.NewCRC(b[:n]).Value() binary.LittleEndian.PutUint32(b[n:], checksum) // Write the buffer to the file. _, err = w.writer.Write(b) if err != nil { return } bh = blockHandle{w.offset, uint64(len(b) - blockTrailerLen)} w.offset += uint64(len(b)) return }
func verifyChecksum(data []byte) bool { n := len(data) - 4 checksum0 := binary.LittleEndian.Uint32(data[n:]) checksum1 := util.NewCRC(data[:n]).Value() return checksum0 == checksum1 }
// nextChunk sets r.buf[r.i:r.j] to hold the next chunk's payload, reading the // next block into the buffer if necessary. func (r *Reader) nextChunk(wantFirst, skip bool) error { for { if r.j+headerSize <= r.n { checksum := binary.LittleEndian.Uint32(r.buf[r.j+0 : r.j+4]) length := binary.LittleEndian.Uint16(r.buf[r.j+4 : r.j+6]) chunkType := r.buf[r.j+6] var err error if checksum == 0 && length == 0 && chunkType == 0 { // Drop entire block. err = DroppedError{r.n - r.j, "zero header"} r.i = r.n r.j = r.n } else { m := r.n - r.j r.i = r.j + headerSize r.j = r.j + headerSize + int(length) if r.j > r.n { // Drop entire block. err = DroppedError{m, "chunk length overflows block"} r.i = r.n r.j = r.n } else if r.checksum && checksum != util.NewCRC(r.buf[r.i-1:r.j]).Value() { // Drop entire block. err = DroppedError{m, "checksum mismatch"} r.i = r.n r.j = r.n } } if wantFirst && err == nil && chunkType != fullChunkType && chunkType != firstChunkType { if skip { // The chunk are intentionally skipped. if chunkType == lastChunkType { skip = false } continue } else { // Drop the chunk. err = DroppedError{r.j - r.i + headerSize, "orphan chunk"} } } if err == nil { r.last = chunkType == fullChunkType || chunkType == lastChunkType } else { if r.dropper != nil { r.dropper.Drop(err) } if r.strict { r.err = err } } return err } if r.n < blockSize && r.n > 0 { // This is the last block. if r.j != r.n { r.err = io.ErrUnexpectedEOF } else { r.err = io.EOF } return r.err } n, err := io.ReadFull(r.r, r.buf[:]) if err != nil && err != io.ErrUnexpectedEOF { r.err = err return r.err } if n == 0 { r.err = io.EOF return r.err } r.i, r.j, r.n = 0, 0, n } }