// lz4.Reader fuzz function func Fuzz(data []byte) int { // uncompress some data d, err := ioutil.ReadAll(lz4.NewReader(bytes.NewReader(data))) if err != nil { return 0 } // got valid compressed data // compress the uncompressed data // and compare with the original input buf := bytes.NewBuffer(nil) zw := lz4.NewWriter(buf) n, err := zw.Write(d) if err != nil { panic(err) } if n != len(d) { panic("short write") } err = zw.Close() if err != nil { panic(err) } // uncompress the newly compressed data ud, err := ioutil.ReadAll(lz4.NewReader(buf)) if err != nil { panic(err) } if bytes.Compare(d, ud) != 0 { panic("not equal") } return 1 }
func DownloadToFile(fileUrl, savePath string) (e error) { response, err := client.Get(fileUrl) if err != nil { return err } defer response.Body.Close() if response.StatusCode != http.StatusOK { return fmt.Errorf("%s: %s", fileUrl, response.Status) } var r io.Reader content_encoding := strings.ToLower(response.Header.Get("Content-Encoding")) size := response.ContentLength if n, e := strconv.ParseInt(response.Header.Get("X-Content-Length"), 10, 64); e == nil { size = n } switch content_encoding { case "lz4": r = lz4.NewReader(response.Body) default: r = response.Body } var f *os.File if f, e = os.OpenFile(savePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644); e != nil { return } if size >= 0 { _, e = io.CopyN(f, r, size) } else { _, e = io.Copy(f, r) } f.Close() return }
func (c *Compressor) BenchmarkLZ4Reader(b *testing.B) { cr := lz4.NewReader(c.w) b.ResetTimer() _, err := io.Copy(ioutil.Discard, cr) if err != nil { b.Fatal(err) } }
// DecompressLZ4 decompresses data compressed with lz4 compression. Bytes read // is returned along with any non io.EOF error that may have occurred. func DecompressLZ4(r io.Reader, w io.Writer) (int64, error) { // create the lz4 reader d := lz4.NewReader(r) n, err := io.Copy(w, d) if err != nil { return n, err } return n, nil }
// TestFrame compresses and decompresses LZ4 streams with various input data and options. func TestFrame(t *testing.T) { for _, tdata := range testDataItems { data := tdata.data // test various options for _, headerItem := range testHeaderItems { tag := tdata.label + ": " + headerItem.label rw := bytes.NewBuffer(nil) // Set all options to non default values and compress w := lz4.NewWriter(rw) w.Header = headerItem.header n, err := w.Write(data) if err != nil { t.Errorf("%s: Write(): unexpected error: %v", tag, err) t.FailNow() } if n != len(data) { t.Errorf("%s: Write(): expected %d bytes written, got %d", tag, len(data), n) t.FailNow() } if err = w.Close(); err != nil { t.Errorf("%s: Close(): unexpected error: %v", tag, err) t.FailNow() } // Decompress r := lz4.NewReader(rw) n, err = r.Read(nil) if err != nil { t.Errorf("%s: Read(): unexpected error: %v", tag, err) t.FailNow() } if n != 0 { t.Errorf("%s: Read(): expected 0 bytes read, got %d", tag, n) } buf := make([]byte, len(data)) n, err = r.Read(buf) if err != nil && err != io.EOF { t.Errorf("%s: Read(): unexpected error: %v", tag, err) t.FailNow() } if n != len(data) { t.Errorf("%s: Read(): expected %d bytes read, got %d", tag, len(data), n) } buf = buf[:n] if !bytes.Equal(buf, data) { t.Errorf("%s: decompress(compress(data)) != data (%d/%d)", tag, len(buf), len(data)) t.FailNow() } compareHeaders(w.Header, r.Header, t) } } }
func TestSkippable(t *testing.T) { w := lz4.NewWriter(nil) r := lz4.NewReader(nil) skippable := make([]byte, 1<<20) binary.LittleEndian.PutUint32(skippable, lz4.FrameSkipMagic) binary.LittleEndian.PutUint32(skippable[4:], uint32(len(skippable)-8)) buf := make([]byte, len(lorem)) tag := "skippable first" zbuf := bytes.NewBuffer(skippable) w.Reset(zbuf) w.Write(lorem) w.Close() r.Reset(zbuf) if _, err := r.Read(buf); err != nil { t.Errorf("%s: unexpected error: %s", tag, err) t.FailNow() } tag = "skippable last" zbuf = bytes.NewBuffer(nil) w.Reset(zbuf) w.Write(lorem) w.Close() zbuf.Write(skippable) r.Reset(zbuf) if _, err := r.Read(buf); err != nil { t.Errorf("%s: unexpected error: %s", tag, err) t.FailNow() } tag = "skippable middle" zbuf = bytes.NewBuffer(nil) w.Reset(zbuf) w.Write(lorem) zbuf.Write(skippable) w.Write(lorem) w.Close() r.Reset(zbuf) if _, err := r.Read(buf); err != nil { t.Errorf("%s: unexpected error: %s", tag, err) t.FailNow() } }
// TestNoWrite compresses without any call to Write() (empty frame). // It does so checking all possible headers. func TestNoWrite(t *testing.T) { // that is 2*2*2*2*2*2^4 = 512 headers! seed := map[string][]interface{}{ "BlockDependency": {true}, "BlockChecksum": {true}, "NoChecksum": {true}, "Size": {999}, // "Dict": {true}, // Enabling this substantially increase the testing time. // As this test is not really required it is disabled. // "HighCompression": {true}, } for _, bms := range lz4.BlockMaxSizeItems { seed["BlockMaxSize"] = append(seed["BlockMaxSize"], bms) } testHeaderItems := buildHeaders(seed) for _, h := range testHeaderItems { rw := bytes.NewBuffer(nil) w := lz4.NewWriter(rw) w.Header = h.header if err := w.Close(); err != nil { t.Errorf("Close(): unexpected error: %v", err) t.FailNow() } r := lz4.NewReader(rw) n, err := r.Read(nil) if err != nil { t.Errorf("Read(): unexpected error: %v", err) t.FailNow() } if n != 0 { t.Errorf("expected 0 bytes read, got %d", n) t.FailNow() } buf := make([]byte, 16) n, err = r.Read(buf) if err != nil && err != io.EOF { t.Errorf("Read(): unexpected error: %v", err) t.FailNow() } if n != 0 { t.Errorf("expected 0 bytes read, got %d", n) t.FailNow() } } }
// TestReset tests that the Reset() method resets the header on the Reader and Writer. func TestReset(t *testing.T) { h := lz4.Header{ BlockDependency: true, BlockChecksum: true, NoChecksum: true, BlockMaxSize: 123, Size: 999, // Dict: true, // DictID: 555, } dh := lz4.Header{} w := lz4.NewWriter(nil) w.Header = h w.Reset(nil) compareHeaders(w.Header, dh, t) r := lz4.NewReader(nil) r.Header = h r.Reset(nil) compareHeaders(r.Header, dh, t) }
// TestCopy will use io.Copy and avoid using Reader.WriteTo() and Writer.ReadFrom(). func TestCopy(t *testing.T) { w := lz4.NewWriter(nil) r := lz4.NewReader(nil) for _, tdata := range testDataItems { data := tdata.data // test various options for _, headerItem := range testHeaderItems { tag := "io.Copy: " + tdata.label + ": " + headerItem.label dbuf := &testBuffer{bytes.NewBuffer(data)} zbuf := bytes.NewBuffer(nil) w.Reset(zbuf) w.Header = headerItem.header if _, err := io.Copy(w, dbuf); err != nil { t.Errorf("%s: unexpected error: %s", tag, err) t.FailNow() } if err := w.Close(); err != nil { t.Errorf("%s: unexpected error: %s", tag, err) t.FailNow() } buf := &testBuffer{bytes.NewBuffer(nil)} r.Reset(zbuf) if _, err := io.Copy(buf, r); err != nil { t.Errorf("%s: unexpected error: %s", tag, err) t.FailNow() } if !bytes.Equal(buf.Bytes(), data) { t.Errorf("%s: decompress(compress(data)) != data (%d/%d)", tag, buf.Len(), len(data)) t.FailNow() } } } }
func getElevationLz4(lat float64, lon float64) int16 { boundingRectangle, err := readBoundingRectangle(aXmlFile) check(err) upperLeft, lowerRight := calculateUpperLeftAndLowerRightLikeGdalDataSet(boundingRectangle) offset := calculateOffset(upperLeft, lowerRight, lat, lon) compressedData, err := ioutil.ReadFile(aLz4File) check(err) bufSize := 2 * NO_OF_PIXELS_PER_LINE * NO_OF_PIXELS_PER_LINE var dst []byte dst = make([]byte, bufSize) reader := lz4.NewReader(bytes.NewReader(compressedData)) read, err := reader.Read(dst) check(err) if read != len(dst) { // return error, buffer doesn't fit expected file size } return int16(int(dst[offset])<<8 + int(dst[offset+1])) }
func main() { // Process command line arguments var ( blockMaxSizeDefault = 4 << 20 flagStdout = flag.Bool("c", false, "output to stdout") flagDecompress = flag.Bool("d", false, "decompress flag") flagBlockMaxSize = flag.Int("B", blockMaxSizeDefault, "block max size [64Kb,256Kb,1Mb,4Mb]") flagBlockDependency = flag.Bool("BD", false, "enable block dependency") flagBlockChecksum = flag.Bool("BX", false, "enable block checksum") flagStreamChecksum = flag.Bool("Sx", false, "disable stream checksum") flagHighCompression = flag.Bool("9", false, "enabled high compression") ) flag.Usage = func() { fmt.Fprintf(os.Stderr, "Usage:\n\t%s [arg] [input]...\n\tNo input means [de]compress stdin to stdout\n\n", os.Args[0]) flag.PrintDefaults() } flag.Parse() // Use all CPUs runtime.GOMAXPROCS(runtime.NumCPU()) zr := lz4.NewReader(nil) zw := lz4.NewWriter(nil) zh := lz4.Header{ BlockDependency: *flagBlockDependency, BlockChecksum: *flagBlockChecksum, BlockMaxSize: *flagBlockMaxSize, NoChecksum: *flagStreamChecksum, HighCompression: *flagHighCompression, } worker := func(in io.Reader, out io.Writer) { if *flagDecompress { zr.Reset(in) if _, err := io.Copy(out, zr); err != nil { log.Fatalf("Error while decompressing input: %v", err) } } else { zw.Reset(out) zw.Header = zh if _, err := io.Copy(zw, in); err != nil { log.Fatalf("Error while compressing input: %v", err) } } } // No input means [de]compress stdin to stdout if len(flag.Args()) == 0 { worker(os.Stdin, os.Stdout) os.Exit(0) } // Compress or decompress all input files for _, inputFileName := range flag.Args() { outputFileName := path.Clean(inputFileName) if !*flagStdout { if *flagDecompress { outputFileName = strings.TrimSuffix(outputFileName, lz4.Extension) if outputFileName == inputFileName { log.Fatalf("Invalid output file name: same as input: %s", inputFileName) } } else { outputFileName += lz4.Extension } } inputFile, err := os.Open(inputFileName) if err != nil { log.Fatalf("Error while opening input: %v", err) } outputFile := os.Stdout if !*flagStdout { outputFile, err = os.Create(outputFileName) if err != nil { log.Fatalf("Error while opening output: %v", err) } } worker(inputFile, outputFile) inputFile.Close() if !*flagStdout { outputFile.Close() } } }
func cpdeflate(dst, src io.ReadWriteCloser) { r := lz4.NewReader(src) io.Copy(dst, r) }
func (m *Message) decode(pd packetDecoder) (err error) { err = pd.push(&crc32Field{}) if err != nil { return err } m.Version, err = pd.getInt8() if err != nil { return err } attribute, err := pd.getInt8() if err != nil { return err } m.Codec = CompressionCodec(attribute & compressionCodecMask) if m.Version >= 1 { millis, err := pd.getInt64() if err != nil { return err } m.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond)) } m.Key, err = pd.getBytes() if err != nil { return err } m.Value, err = pd.getBytes() if err != nil { return err } // Required for deep equal assertion during tests but might be useful // for future metrics about the compression ratio in fetch requests m.compressedSize = len(m.Value) switch m.Codec { case CompressionNone: // nothing to do case CompressionGZIP: if m.Value == nil { break } reader, err := gzip.NewReader(bytes.NewReader(m.Value)) if err != nil { return err } if m.Value, err = ioutil.ReadAll(reader); err != nil { return err } if err := m.decodeSet(); err != nil { return err } case CompressionSnappy: if m.Value == nil { break } if m.Value, err = snappy.Decode(m.Value); err != nil { return err } if err := m.decodeSet(); err != nil { return err } case CompressionLZ4: if m.Value == nil { break } reader := lz4.NewReader(bytes.NewReader(m.Value)) if m.Value, err = ioutil.ReadAll(reader); err != nil { return err } if err := m.decodeSet(); err != nil { return err } default: return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", m.Codec)} } return pd.pop() }