func Fuzz(data []byte) int { n, err := snappy.DecodedLen(data) if err != nil || n > 1e6 { return 0 } if n < 0 { panic("negative decoded len") } dec, err := snappy.Decode(nil, data) if err != nil { if dec != nil { panic("dec is not nil") } return 0 } if len(dec) != n { println(len(dec), n) panic("bad decoded len") } n = snappy.MaxEncodedLen(len(dec)) enc := snappy.Encode(nil, dec) if len(enc) > n { panic("bad encoded len") } dec1, err := snappy.Decode(nil, enc) if err != nil { panic(err) } if bytes.Compare(dec, dec1) != 0 { panic("not equal") } return 1 }
func (*Segment) snappyDecode(src []byte) ([]byte, error) { if bytes.Equal(src[:8], snappyMagic) { var ( pos = uint32(16) max = uint32(len(src)) dst = make([]byte, 0, len(src)) chunk []byte err error ) for pos < max { size := binary.BigEndian.Uint32(src[pos : pos+4]) pos += 4 chunk, err = snappy.Decode(chunk, src[pos:pos+size]) if err != nil { return nil, err } pos += size dst = append(dst, chunk...) } return dst, nil } return snappy.Decode(nil, src) }
func snappyDecode(b []byte) ([]byte, error) { if !bytes.HasPrefix(b, snappyJavaMagic) { return snappy.Decode(nil, b) } // See https://github.com/xerial/snappy-java/blob/develop/src/main/java/org/xerial/snappy/SnappyInputStream.java version := binary.BigEndian.Uint32(b[8:12]) if version != 1 { return nil, fmt.Errorf("cannot handle snappy-java codec version other than 1 (got %d)", version) } // b[12:16] is the "compatible version"; ignore for now var ( decoded = make([]byte, 0, len(b)) chunk []byte err error ) for i := 16; i < len(b); { n := int(binary.BigEndian.Uint32(b[i : i+4])) i += 4 chunk, err = snappy.Decode(chunk, b[i:i+n]) if err != nil { return nil, err } i += n decoded = append(decoded, chunk...) } return decoded, nil }
// Decode decodes snappy data whether it is traditional unframed // or includes the xerial framing format. func Decode(src []byte) ([]byte, error) { if !bytes.Equal(src[:8], xerialHeader) { return master.Decode(nil, src) } var ( pos = uint32(16) max = uint32(len(src)) dst = make([]byte, 0, len(src)) chunk []byte err error ) for pos < max { size := binary.BigEndian.Uint32(src[pos : pos+4]) pos += 4 chunk, err = master.Decode(chunk, src[pos:pos+size]) if err != nil { return nil, err } pos += size dst = append(dst, chunk...) } return dst, nil }
func (e *Engine) unsnappy() error { data, err := snappy.Decode(nil, e.stack.Pop()) if err == nil { e.stack.Push(data) } return err }
func loadDigest(w http.ResponseWriter, r *http.Request, lg loghttp.FuncBufUniv, fs fsi.FileSystem, fnDigest string, treeX *DirTree) { fnDigestSnappied := strings.Replace(fnDigest, ".json", ".json.snappy", -1) bts, err := fs.ReadFile(fnDigestSnappied) if err == nil { btsDec := []byte{} lg("encoded digest loaded, size %vkB", len(bts)/1024) btsDec, err := snappy.Decode(nil, bts) if err != nil { lg(err) return } lg("digest decoded from %vkB to %vkB", len(bts)/1024, len(btsDec)/1024) bts = btsDec } else { bts, err = fs.ReadFile(fnDigest) lg(err) } if err == nil { err = json.Unmarshal(bts, &treeX) lg(err) } lg("DirTree %5.2vkB loaded for %v", len(bts)/1024, fnDigest) }
func TestSnappyCompressor(t *testing.T) { c := SnappyCompressor{} if c.Name() != "snappy" { t.Fatalf("expected name to be 'snappy', got %v", c.Name()) } str := "My Test String" //Test Encoding expected := snappy.Encode(nil, []byte(str)) if res, err := c.Encode([]byte(str)); err != nil { t.Fatalf("failed to encode '%v' with error %v", str, err) } else if bytes.Compare(expected, res) != 0 { t.Fatal("failed to match the expected encoded value with the result encoded value.") } val, err := c.Encode([]byte(str)) if err != nil { t.Fatalf("failed to encode '%v' with error '%v'", str, err) } //Test Decoding if expected, err := snappy.Decode(nil, val); err != nil { t.Fatalf("failed to decode '%v' with error %v", val, err) } else if res, err := c.Decode(val); err != nil { t.Fatalf("failed to decode '%v' with error %v", val, err) } else if bytes.Compare(expected, res) != 0 { t.Fatal("failed to match the expected decoded value with the result decoded value.") } }
// Load gets data from a file. func (d *Data) Load() error { if fileExists(d.File) { // open file fh, err := os.OpenFile(d.File, os.O_RDONLY, 0655) if err != nil { return err } defer fh.Close() // read content rh, err := ioutil.ReadAll(fh) if err != nil { return err } // decompress content dh, err := snappy.Decode(nil, rh) if err != nil { return err } // decode content gh := bytes.NewReader(dh) dedata := gob.NewDecoder(gh) err = dedata.Decode(&d.Store) if err != nil { return err } } return nil }
func (b *EncryptBackend) Get(hash string) (data []byte, err error) { ref, _ := b.index[hash] enc, err := b.dest.Get(ref) if err != nil { return data, err } box := enc[headerSize:] var nonce [24]byte encData := make([]byte, len(box)-24) copy(nonce[:], box[:24]) copy(encData[:], box[24:]) out := make([]byte, len(box)-24) out, success := secretbox.Open(nil, encData, &nonce, b.key) if !success { return data, fmt.Errorf("failed to decrypt blob %v/%v", hash, ref) } // Decode snappy data data, err = snappy.Decode(nil, out) if err != nil { return data, fmt.Errorf("failed to decode blob %v/%v", hash, ref) } blobsDownloaded.Add(b.dest.String(), 1) bytesDownloaded.Add(b.dest.String(), int64(len(enc))) return }
// Next indicates if there is a value to read func (r *WALSegmentReader) Next() bool { b := getBuf(defaultBufLen) defer putBuf(b) var nReadOK int // read the type and the length of the entry n, err := io.ReadFull(r.r, b[:5]) if err == io.EOF { return false } if err != nil { r.err = err // We return true here because we want the client code to call read which // will return the this error to be handled. return true } nReadOK += n entryType := b[0] length := btou32(b[1:5]) // read the compressed block and decompress it if int(length) > len(b) { b = make([]byte, length) } n, err = io.ReadFull(r.r, b[:length]) if err != nil { r.err = err return true } nReadOK += n data, err := snappy.Decode(nil, b[:length]) if err != nil { r.err = err return true } // and marshal it and send it to the cache switch WalEntryType(entryType) { case WriteWALEntryType: r.entry = &WriteWALEntry{ Values: map[string][]Value{}, } case DeleteWALEntryType: r.entry = &DeleteWALEntry{} default: r.err = fmt.Errorf("unknown wal entry type: %v", entryType) return true } r.err = r.entry.UnmarshalBinary(data) if r.err == nil { // Read and decode of this entry was successful. r.n += int64(nReadOK) } return true }
func (c *CompressionSnappyDecoder) readHeader() (int, error) { header := make([]byte, 4, 4) _, err := c.source.Read(header[:3]) if err != nil { return 0, err } headerVal := binary.LittleEndian.Uint32(header) c.isOriginal = headerVal%2 == 1 c.chunkLength = int(headerVal / 2) if !c.isOriginal { // ORC does not use snappy's framing as implemented in the // github.com/golang/snappy Reader implementation. As a result // we have to read and decompress the entire chunk. // TODO: find reader implementation with optional framing. r := io.LimitReader(c.source, int64(c.chunkLength)) src, err := ioutil.ReadAll(r) if err != nil { return 0, err } decodedBytes, err := snappy.Decode(nil, src) if err != nil { return 0, err } c.decoded = bytes.NewReader(decodedBytes) } else { c.decoded = io.LimitReader(c.source, int64(c.chunkLength)) } return 0, nil }
func (db *Database) getDoc(c redis.Conn, path string) (*doc.Package, time.Time, error) { r, err := redis.Values(getDocScript.Do(c, path)) if err == redis.ErrNil { return nil, time.Time{}, nil } else if err != nil { return nil, time.Time{}, err } var p []byte var t int64 if _, err := redis.Scan(r, &p, &t); err != nil { return nil, time.Time{}, err } p, err = snappy.Decode(nil, p) if err != nil { return nil, time.Time{}, err } var pdoc doc.Package if err := gob.NewDecoder(bytes.NewReader(p)).Decode(&pdoc); err != nil { return nil, time.Time{}, err } nextCrawl := pdoc.Updated if t != 0 { nextCrawl = time.Unix(t, 0).UTC() } return &pdoc, nextCrawl, err }
func SnappyMustUnCompress(inb []byte) (outb []byte) { outb, err := snappy.Decode(nil, inb) if err != nil { panic(err) } return outb }
// Read next record. The returned slice may be a sub-slice of dst if dst was // large enough to hold the entire record. Otherwise, a newly allocated slice // will be returned. It's valid to pass nil dst func (rr *Reader) ReadRecord(dst []byte) (output []byte, err error) { if rr.Err != nil { return nil, rr.Err } headerBytes := [recordHeaderStorageSize]byte{} if _, err = io.ReadFull(rr.bytesReader, headerBytes[:]); err != nil { return nil, rr.err(err) } header := recordHeader{} if err = header.decode(headerBytes[:]); err != nil { return nil, rr.err(err) } if header.flags&NoCompression != 0 { return rr.readBody(header, dst) } rawBuf, err := rr.readBody(header, rr.uncompressBuf) if err != nil { return nil, err } buf, err := snappy.Decode(dst, rawBuf) if err != nil { return nil, rr.err(ErrReadBytes) } return buf, nil }
func readRequestBody(r io.Reader, header *wire.RequestHeader, request proto.Message) error { // recv body (end) compressedPbRequest, err := recvFrame(r) if err != nil { return err } // checksum if crc32.ChecksumIEEE(compressedPbRequest) != header.GetChecksum() { return fmt.Errorf("protorpc.readRequestBody: unexpected checksum.") } // decode the compressed data pbRequest, err := snappy.Decode(nil, compressedPbRequest) if err != nil { return err } // check wire header: rawMsgLen if uint32(len(pbRequest)) != header.GetRawRequestLen() { return fmt.Errorf("protorpc.readRequestBody: Unexcpeted header.RawRequestLen.") } // Unmarshal to proto message if request != nil { err = proto.Unmarshal(pbRequest, request) if err != nil { return err } } return nil }
func readSeries(path string) (map[string]*tsdb.Series, error) { series := make(map[string]*tsdb.Series) f, err := os.OpenFile(filepath.Join(path, tsm1.SeriesFileExtension), os.O_RDONLY, 0666) if os.IsNotExist(err) { return series, nil } else if err != nil { return nil, err } defer f.Close() b, err := ioutil.ReadAll(f) if err != nil { return nil, err } data, err := snappy.Decode(nil, b) if err != nil { return nil, err } if err := json.Unmarshal(data, &series); err != nil { return nil, err } return series, nil }
func readIds(path string) (map[string]uint64, error) { f, err := os.OpenFile(filepath.Join(path, tsm1.IDsFileExtension), os.O_RDONLY, 0666) if os.IsNotExist(err) { return nil, nil } else if err != nil { return nil, err } b, err := ioutil.ReadAll(f) if err != nil { return nil, err } b, err = snappy.Decode(nil, b) if err != nil { return nil, err } ids := make(map[string]uint64) if b != nil { if err := json.Unmarshal(b, &ids); err != nil { return nil, err } } return ids, err }
func (e *Engine) readFields() (map[string]*tsdb.MeasurementFields, error) { fields := make(map[string]*tsdb.MeasurementFields) f, err := os.OpenFile(filepath.Join(e.path, FieldsFileExtension), os.O_RDONLY, 0666) if os.IsNotExist(err) { return fields, nil } else if err != nil { return nil, err } b, err := ioutil.ReadAll(f) if err != nil { return nil, err } data, err := snappy.Decode(nil, b) if err != nil { return nil, err } if err := json.Unmarshal(data, &fields); err != nil { return nil, err } return fields, nil }
// readMetadataFile will read the entire contents of the meta file and return a slice of the // seriesAndFields objects that were written in. It ignores file errors since those can't be // recovered. func (l *Log) readMetadataFile(fileName string) ([]*seriesAndFields, error) { f, err := os.OpenFile(fileName, os.O_RDWR, 0666) if err != nil { return nil, err } var a []*seriesAndFields length := make([]byte, 8) for { // get the length of the compressed seriesAndFields blob _, err := io.ReadFull(f, length) if err == io.EOF { break } else if err != nil { f.Close() return nil, err } dataLength := btou64(length) if dataLength == 0 { break } // read in the compressed block and decod it b := make([]byte, dataLength) _, err = io.ReadFull(f, b) if err == io.EOF { break } else if err != nil { // print the error and move on since we can't recover the file l.logger.Println("error reading length of metadata:", err.Error()) break } buf, err := snappy.Decode(nil, b) if err != nil { // print the error and move on since we can't recover the file l.logger.Println("error reading compressed metadata info:", err.Error()) break } sf := &seriesAndFields{} if err := json.Unmarshal(buf, sf); err != nil { // print the error and move on since we can't recover the file l.logger.Println("error unmarshaling json for new series and fields:", err.Error()) break } a = append(a, sf) } if err := f.Close(); err != nil { return nil, err } return a, nil }
// Next indicates if there is a value to read func (r *WALSegmentReader) Next() bool { b := getBuf(defaultBufLen) defer putBuf(b) // read the type and the length of the entry _, err := io.ReadFull(r.r, b[:5]) if err == io.EOF { return false } if err != nil { r.err = err // We return true here because we want the client code to call read which // will return the this error to be handled. return true } entryType := b[0] length := btou32(b[1:5]) // read the compressed block and decompress it if int(length) > len(b) { b = make([]byte, length) } _, err = io.ReadFull(r.r, b[:length]) if err == io.EOF || err == io.ErrUnexpectedEOF { r.err = err return true } if err != nil { r.err = err return true } buf := getBuf(defaultBufLen) defer putBuf(buf) data, err := snappy.Decode(buf, b[:length]) if err != nil { r.err = err return true } // and marshal it and send it to the cache switch walEntryType(entryType) { case WriteWALEntryType: r.entry = &WriteWALEntry{} case DeleteWALEntryType: r.entry = &DeleteWALEntry{} default: r.err = fmt.Errorf("unknown wal entry type: %v", entryType) return true } r.err = r.entry.UnmarshalBinary(data) return true }
func decodeSnappy(src []byte) ([]byte, error) { if bytes.Equal(src[:8], []byte{130, 83, 78, 65, 80, 80, 89, 0}) { result := make([]byte, 0, len(src)) current := 16 for current < len(src) { size := int(binary.BigEndian.Uint32(src[current : current+4])) current += 4 chunk, err := snappy.Decode(nil, src[current:current+size]) if err != nil { return nil, err } current += size result = append(result, chunk...) } return result, nil } return snappy.Decode(nil, src) }
func NewStringDecoder(b []byte) (StringDecoder, error) { // First byte stores the encoding type, only have snappy format // currently so ignore for now. data, err := snappy.Decode(nil, b[1:]) if err != nil { return StringDecoder{}, fmt.Errorf("failed to decode string block: %v", err.Error()) } return StringDecoder{b: data}, nil }
func (l *internalLevelDBStore) getByKey(key []byte, ref hash.Hash) Chunk { compressed, err := l.db.Get(key, nil) l.getCount++ if err == errors.ErrNotFound { return EmptyChunk } d.Chk.NoError(err) data, err := snappy.Decode(nil, compressed) d.Chk.NoError(err) return NewChunkWithHash(ref, data) }
func (snappyMessageCompressor) decompressData(dst, src []byte) (n int, err error) { n, err = snappy.DecodedLen(src) if err != nil { return } if n < 0 || n > len(dst) { err = io.ErrShortBuffer return } _, err = snappy.Decode(dst, src) return }
func (re *SnappyDecoder) Decode(pack *PipelinePack) (packs []*PipelinePack, err error) { output, decodeErr := snappy.Decode(nil, pack.MsgBytes) packs = []*PipelinePack{pack} if decodeErr == nil { // Replace bytes with decoded data pack.MsgBytes = output } // If there is an error decoding snappy, maybe it wasn't compressed. We'll // return the original data and try to proceed. return }
// Internal callback for CSVCMsg_CreateStringTable. // XXX TODO: This is currently using an artificial, internally crafted message. // This should be replaced with the real message once we have updated protos. func (p *Parser) onCSVCMsg_CreateStringTable(m *dota.CSVCMsg_CreateStringTable) error { // Create a new string table at the next index position t := &stringTable{ index: p.stringTables.nextIndex, name: m.GetName(), Items: make(map[int32]*stringTableItem), userDataFixedSize: m.GetUserDataFixedSize(), userDataSize: m.GetUserDataSize(), } // Increment the index p.stringTables.nextIndex += 1 // Decompress the data if necessary buf := m.GetStringData() if m.GetDataCompressed() { // old replays = lzss // new replays = snappy r := newReader(buf) var err error if s := r.readStringN(4); s != "LZSS" { if buf, err = snappy.Decode(nil, buf); err != nil { return err } } else { if buf, err = unlzss(buf); err != nil { return err } } } // Parse the items out of the string table data items := parseStringTable(buf, m.GetNumEntries(), t.userDataFixedSize, t.userDataSize) // Insert the items into the table for _, item := range items { t.Items[item.Index] = item } // Add the table to the parser state p.stringTables.Tables[t.index] = t p.stringTables.NameIndex[t.name] = t.index // Apply the updates to baseline state if t.name == "instancebaseline" { p.updateInstanceBaseline() } return nil }
func snappyDecode(src []byte) ([]byte, error) { if bytes.Equal(src[:8], snappyMagicBytes) { cap := uint32(len(src)) current := uint32(16) result := make([]byte, 0, len(src)) for current < cap { size := binary.BigEndian.Uint32(src[current : current+4]) current += 4 chunk, err := snappy.Decode(nil, src[current:current+size]) if err != nil { return nil, err } current += size result = append(result, chunk...) } return result, nil } return snappy.Decode(nil, src) }
// Read the next outer message from the buffer. func (p *Parser) readOuterMessage() (*outerMessage, error) { // Read a command header, which includes both the message type // well as a flag to determine whether or not whether or not the // message is compressed with snappy. command, err := p.stream.readCommand() if err != nil { return nil, err } // Extract the type and compressed flag out of the command msgType := int32(command & ^dota.EDemoCommands_DEM_IsCompressed) msgCompressed := (command & dota.EDemoCommands_DEM_IsCompressed) == dota.EDemoCommands_DEM_IsCompressed // Read the tick that the message corresponds with. tick, err := p.stream.readVarUint32() if err != nil { return nil, err } // This appears to actually be an int32, where a -1 means pre-game. if tick == 4294967295 { tick = 0 } // Read the size and following buffer. size, err := p.stream.readVarUint32() if err != nil { return nil, err } buf, err := p.stream.readBytes(size) if err != nil { return nil, err } // If the buffer is compressed, decompress it with snappy. if msgCompressed { var err error if buf, err = snappy.Decode(nil, buf); err != nil { return nil, err } } // Return the message msg := &outerMessage{ tick: tick, typeId: msgType, data: buf, } return msg, nil }
func (s *hopCipher) decrypt(iv []byte, ctext []byte) []byte { defer func() { if err := recover(); err != nil { log.Printf("%v", err) } }() decrypter := _cipher.NewCBCDecrypter(s.block, iv) buf := make([]byte, len(ctext)) decrypter.CryptBlocks(buf, ctext) cmsg := PKCS5UnPadding(buf) msg, _ := snappy.Decode(nil, cmsg) return msg }
func (db *BlockDB) BlockGetExt(hash *btc.Uint256) (cacherec *BlckCachRec, trusted bool, e error) { db.mutex.Lock() rec, ok := db.blockIndex[hash.BIdx()] if !ok { db.mutex.Unlock() e = errors.New("btc.Block not in the index") return } trusted = rec.trusted if db.cache != nil { if crec, hit := db.cache[hash.BIdx()]; hit { cacherec = crec crec.LastUsed = time.Now() db.mutex.Unlock() return } } db.mutex.Unlock() bl := make([]byte, rec.blen) // we will re-open the data file, to not spoil the writting pointer f, e := os.Open(db.dirname + "blockchain.dat") if e != nil { return } _, e = f.Seek(int64(rec.fpos), os.SEEK_SET) if e == nil { _, e = f.Read(bl[:]) } f.Close() if rec.compressed { if rec.snappied { bl, _ = snappy.Decode(nil, bl) } else { gz, _ := gzip.NewReader(bytes.NewReader(bl)) bl, _ = ioutil.ReadAll(gz) gz.Close() } } db.mutex.Lock() cacherec = db.addToCache(hash, bl, nil) db.mutex.Unlock() return }