func makePgzipWriter(output io.WriteCloser, compressionLevel int) (io.WriteCloser, error) { gzipWriter, err := pgzip.NewWriterLevel(output, compressionLevel) if err != nil { return nil, ErrInvalidCompressionLevel } gzipWriter.SetConcurrency(500000, runtime.GOMAXPROCS(-1)) return gzipWriter, nil }
func gzHandler(h http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Encoding", "identity") gz, err := pgzip.NewWriterLevel(w, pgzip.BestCompression) checkErr(err) defer gz.Close() h.ServeHTTP(gzResponseWriter{ResponseWriter: w, Writer: gz}, r) }) }
func (c *Compressor) BenchmarkPGZIPWriter(b *testing.B) { cw, _ := pgzip.NewWriterLevel(c.w, flate.BestSpeed) b.ResetTimer() _, err := io.Copy(cw, c.r) if err != nil { b.Fatal(err) } cw.Close() c.w.Sync() }
// Create a new compressing writer that will generate a multi-gzip, segmenting // the compressed stream in a way to be efficient when transferred over rsync // with slight differences in the uncompressed stream. // // This function is similar to NewWriterLevel as it creates a multi-gzip file, // but segenting happens at data-dependent offsets that make the compressed // stream resynchronize after localized changes in the uncompressed stream. In // other words, we use the same algorithm of "gzip --rsyncable", but for a // multigz file. func NewWriterLevelRsyncable(w io.Writer, level int) (Writer, error) { underw := &countWriter{Writer: w} bg, err := gzip.NewWriterLevel(underw, level) if err != nil { return nil, err } return &GzipWriterRsyncable{ Writer: bg, underw: underw, window: make([]byte, cWINDOW_SIZE), }, nil }
// Write gzipped data to a Writer. Returns bytes written and an error. func gzipWrite(w io.Writer, data []byte, speed bool) (int, error) { // Write gzipped data to the client level := pgzip.BestCompression if speed { level = pgzip.BestSpeed } gw, err := pgzip.NewWriterLevel(w, level) defer gw.Close() bytesWritten, err := gw.Write(data) if err != nil { return 0, err } return bytesWritten, nil }
// Create a new compressing writer that will generate a multi-gzip, segmenting // the compressed stream at fixed offsets. This is similar to gzip.NewWriterLevel, // but takes an additional argument that specifies the size of each gzip block. // You can use multigz.DefaultBlockSize as a reasonable default (64kb) that // balances decompression speed and compression overhead. func NewWriterLevel(w io.Writer, level int, blocksize int) (Writer, error) { underw := &countWriter{Writer: w} gz, err := gzip.NewWriterLevel(underw, level) if err != nil { return nil, err } blockw := &blockWriter{ gz: gz, underw: underw, } buf := bufio.NewWriterSize(blockw, blocksize) return normalWriter{ Writer: buf, Closer: gz, blkw: blockw, }, nil }