func Fuzz(data []byte) int { n, err := snappy.DecodedLen(data) if err != nil || n > 1e6 { return 0 } if n < 0 { panic("negative decoded len") } dec, err := snappy.Decode(nil, data) if err != nil { if dec != nil { panic("dec is not nil") } return 0 } if len(dec) != n { println(len(dec), n) panic("bad decoded len") } n = snappy.MaxEncodedLen(len(dec)) enc := snappy.Encode(nil, dec) if len(enc) > n { panic("bad encoded len") } dec1, err := snappy.Decode(nil, enc) if err != nil { panic(err) } if bytes.Compare(dec, dec1) != 0 { panic("not equal") } return 1 }
func (snappyMessageCompressor) decompressData(dst, src []byte) (n int, err error) { n, err = snappy.DecodedLen(src) if err != nil { return } if n < 0 || n > len(dst) { err = io.ErrShortBuffer return } _, err = snappy.Decode(dst, src) return }
func (r *Reader) readRawBlock(bh blockHandle, verifyChecksum bool) ([]byte, error) { data := r.bpool.Get(int(bh.length + blockTrailerLen)) if _, err := r.reader.ReadAt(data, int64(bh.offset)); err != nil && err != io.EOF { return nil, err } if verifyChecksum { n := bh.length + 1 checksum0 := binary.LittleEndian.Uint32(data[n:]) checksum1 := util.NewCRC(data[:n]).Value() if checksum0 != checksum1 { r.bpool.Put(data) return nil, r.newErrCorruptedBH(bh, fmt.Sprintf("checksum mismatch, want=%#x got=%#x", checksum0, checksum1)) } } switch data[bh.length] { case blockTypeNoCompression: data = data[:bh.length] case blockTypeSnappyCompression: decLen, err := snappy.DecodedLen(data[:bh.length]) if err != nil { return nil, r.newErrCorruptedBH(bh, err.Error()) } decData := r.bpool.Get(decLen) decData, err = snappy.Decode(decData, data[:bh.length]) r.bpool.Put(data) if err != nil { r.bpool.Put(decData) return nil, r.newErrCorruptedBH(bh, err.Error()) } data = decData default: r.bpool.Put(data) return nil, r.newErrCorruptedBH(bh, fmt.Sprintf("unknown compression type %#x", data[bh.length])) } return data, nil }
// Next indicates if there is a value to read func (r *WALSegmentReader) Next() bool { b := getBuf(defaultBufLen) defer putBuf(b) var nReadOK int // read the type and the length of the entry n, err := io.ReadFull(r.r, b[:5]) if err == io.EOF { return false } if err != nil { r.err = err // We return true here because we want the client code to call read which // will return the this error to be handled. return true } nReadOK += n entryType := b[0] length := binary.BigEndian.Uint32(b[1:5]) // read the compressed block and decompress it if int(length) > len(b) { b = make([]byte, length) } n, err = io.ReadFull(r.r, b[:length]) if err != nil { r.err = err return true } nReadOK += n decLen, err := snappy.DecodedLen(b[:length]) if err != nil { r.err = err return true } decBuf := getBuf(decLen) defer putBuf(decBuf) data, err := snappy.Decode(decBuf, b[:length]) if err != nil { r.err = err return true } // and marshal it and send it to the cache switch WalEntryType(entryType) { case WriteWALEntryType: r.entry = &WriteWALEntry{ Values: map[string][]Value{}, } case DeleteWALEntryType: r.entry = &DeleteWALEntry{} case DeleteRangeWALEntryType: r.entry = &DeleteRangeWALEntry{} default: r.err = fmt.Errorf("unknown wal entry type: %v", entryType) return true } r.err = r.entry.UnmarshalBinary(data) if r.err == nil { // Read and decode of this entry was successful. r.n += int64(nReadOK) } return true }
// writeIndex writes a set of points for a single key. func (e *Engine) writeIndex(tx *bolt.Tx, key string, a [][]byte) error { // Ignore if there are no points. if len(a) == 0 { return nil } e.statMap.Add(statPointsWrite, int64(len(a))) // Create or retrieve series bucket. bkt, err := tx.Bucket([]byte("points")).CreateBucketIfNotExists([]byte(key)) if err != nil { return fmt.Errorf("create series bucket: %s", err) } c := bkt.Cursor() // Ensure the slice is sorted before retrieving the time range. a = tsdb.DedupeEntries(a) e.statMap.Add(statPointsWriteDedupe, int64(len(a))) // Convert the raw time and byte slices to entries with lengths for i, p := range a { timestamp := int64(btou64(p[0:8])) a[i] = MarshalEntry(timestamp, p[8:]) } // Determine time range of new data. tmin, tmax := int64(btou64(a[0][0:8])), int64(btou64(a[len(a)-1][0:8])) // If tmin is after the last block then append new blocks. // // This is the optimized fast path. Otherwise we need to merge the points // with existing blocks on disk and rewrite all the blocks for that range. if k, v := c.Last(); k == nil { bkt.FillPercent = 1.0 if err := e.writeBlocks(bkt, a); err != nil { return fmt.Errorf("new blocks: %s", err) } return nil } else { // Determine uncompressed block size. sz, err := snappy.DecodedLen(v[8:]) if err != nil { return fmt.Errorf("snappy decoded len: %s", err) } // Append new blocks if our time range is past the last on-disk time // and if our previous block was at least the minimum block size. if int64(btou64(v[0:8])) < tmin && sz >= e.BlockSize { bkt.FillPercent = 1.0 if err := e.writeBlocks(bkt, a); err != nil { return fmt.Errorf("append blocks: %s", err) } return nil } // Otherwise fallthrough to slower insert mode. e.statMap.Add(statSlowInsert, 1) } // Generate map of inserted keys. m := make(map[int64]struct{}) for _, b := range a { m[int64(btou64(b[0:8]))] = struct{}{} } // If time range overlaps existing blocks then unpack full range and reinsert. var existing [][]byte for k, v := c.First(); k != nil; k, v = c.Next() { // Determine block range. bmin, bmax := int64(btou64(k)), int64(btou64(v[0:8])) // Skip over all blocks before the time range. // Exit once we reach a block that is beyond our time range. if bmax < tmin { continue } else if bmin > tmax { break } // Decode block. buf, err := snappy.Decode(nil, v[8:]) if err != nil { return fmt.Errorf("decode block: %s", err) } // Copy out any entries that aren't being overwritten. for _, entry := range SplitEntries(buf) { if _, ok := m[int64(btou64(entry[0:8]))]; !ok { existing = append(existing, entry) } } // Delete block in database. c.Delete() } // Merge entries before rewriting. a = append(existing, a...) sort.Sort(tsdb.ByteSlices(a)) // Rewrite points to new blocks. if err := e.writeBlocks(bkt, a); err != nil { return fmt.Errorf("rewrite blocks: %s", err) } return nil }