func NewGzipResponseWriterLevelFile(w http.ResponseWriter, r *http.Request, level int, file *os.File) *GzipResponseWriter { if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") { if level < gzip.DefaultCompression || level > gzip.BestCompression { level = gzip.DefaultCompression } var gz *gzip.Writer if file != nil { gz, _ = gzip.NewWriterLevel(io.MultiWriter(w, file), level) } else { gz, _ = gzip.NewWriterLevel(w, level) } resp := &GzipResponseWriter{ ResponseWriter: w, gzip: gz, } header := w.Header() header.Set("Content-Encoding", "gzip") if vary, exists := header["Vary"]; !exists || !validate.IsIn("Accept-Encoding", vary...) { header.Add("Vary", "Accept-Encoding") } return resp } return &GzipResponseWriter{w, nil} }
//ServeJSONEncode is eager JSON writer // with gzip encoding where possible func ServeJSONEncode(w http.ResponseWriter, r *http.Request, v interface{}) { w.Header().Set("Content-Type", applicationJson) var pipe AcceptEncoding // Accept-Encoding has gzip if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") { pipe = Gzip } else { pipe = PassThrough } switch pipe { case PassThrough: err := json.NewEncoder(w).Encode(v) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } case Gzip: w.Header().Set("Content-Encoding", "gzip") gz, err := gzip.NewWriterLevel(w, flate.DefaultCompression) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } defer gz.Close() json.NewEncoder(gz).Encode(v) } return }
func (a *Archive) writer() (*tharWriter, error) { writer := io.Writer(a.Stream) flushers := []flushableWriter{} closers := []closeableWriter{} if a.Options.GZip { if a.Options.GZipLevel > 0 { gw, err := gzip.NewWriterLevel(writer, a.Options.GZipLevel) if err != nil { return nil, err } flushers = append([]flushableWriter{gw}, flushers...) closers = append([]closeableWriter{gw}, closers...) writer = gw } else { writer = gzip.NewWriter(writer) } } tw := tar.NewWriter(writer) flushers = append([]flushableWriter{tw}, flushers...) return &tharWriter{ Writer: tw, Flushers: flushers, Closers: closers, }, nil }
// shareFiles writes the metadata of each file specified by nicknames to w. // This output can be shared with other daemons, giving them access to those // files. func (r *Renter) shareFiles(nicknames []string, w io.Writer) error { if len(nicknames) == 0 { return ErrNoNicknames } var files []file for _, nickname := range nicknames { file, exists := r.files[nickname] if !exists { return ErrUnknownNickname } active := 0 for _, piece := range file.Pieces { if piece.Active { active++ } } if active < 3 { return errors.New("Cannot share an inactive file") } files = append(files, *file) } // pipe data through json -> gzip -> w zip, _ := gzip.NewWriterLevel(w, gzip.BestCompression) err := persist.Save(shareMetadata, files, zip) if err != nil { return err } zip.Close() return nil }
func main() { flag.Parse() if flag.NArg() != 1 || *helpFlag { flag.PrintDefaults() return } out, e := os.OpenFile(*outputFlag, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) if e != nil { log.Fatalf("Error while opening output file: %s", e.Error()) } defer out.Close() comp_out, e := gzip.NewWriterLevel(out, gzip.BestCompression) if e != nil { log.Fatalf("Error while setting up the compressor: %s", e.Error()) } defer comp_out.Close() cpio_out := cpio.NewWriter(comp_out) defer cpio_out.Close() in, e := os.Open(flag.Arg(0)) if e != nil { log.Fatalf("Error while opening input file: %s", e.Error()) } defer in.Close() c := make(chan *Entry) go parseInput(in, c) createCpioArchive(cpio_out, c) }
func (codec *GzipPayloadCodec) Encode(data []byte) []byte { buf := bytes.NewBuffer([]byte{}) zipper, _ := gzip.NewWriterLevel(buf, gzip.BestSpeed) zipper.Write(data) zipper.Close() return buf.Bytes() }
// Constructor for a new Proposal given a Winlink Message. // // Reads the Winlink Message given and constructs a new proposal // based on what's read and prepares for outbound delivery, returning // a Proposal with the given data. // func NewProposal(MID, title string, code PropCode, data []byte) *Proposal { prop := &Proposal{ mid: MID, code: code, msgType: "EM", title: title, size: len(data), } if prop.title == `` { prop.title = `No title` } if prop.code == GzipProposal { // Gzip compressed var buf bytes.Buffer z, err := gzip.NewWriterLevel(&buf, gzip.BestCompression) if err != nil { panic(err) } z.Write(data) z.Close() prop.compressedData = buf.Bytes() } else { // LZHUF compressed prop.compressedData = lzhuf.Encode(data) } prop.compressedSize = len(prop.compressedData) return prop }
func packFile(path string) (*File, error) { buf := new(bytes.Buffer) zbuf, err := gzip.NewWriterLevel(buf, gzip.BestCompression) if err != nil { return nil, err } fr, err := os.Open(path) if err != nil { return nil, err } defer fr.Close() fi, err := fr.Stat() if err != nil { return nil, err } _, err = io.Copy(zbuf, fr) if err != nil { return nil, err } zbuf.Close() file := &File{ Path: filepath.Clean(path), Mode: int64(fi.Mode()), Data: buf.Bytes(), } return file, nil }
func writeToFile(fpath string, ch chan []byte) error { f, err := os.Create(fpath) if err != nil { return err } defer f.Close() x.Check(err) w := bufio.NewWriterSize(f, 1000000) gw, err := gzip.NewWriterLevel(w, gzip.BestCompression) if err != nil { return err } for buf := range ch { if _, err := gw.Write(buf); err != nil { return err } } if err := gw.Flush(); err != nil { return err } if err := gw.Close(); err != nil { return err } return w.Flush() }
func benchmarkOldGzipN(b *testing.B, level int) { dat, _ := ioutil.ReadFile("testdata/test.json") dat = append(dat, dat...) dat = append(dat, dat...) dat = append(dat, dat...) dat = append(dat, dat...) dat = append(dat, dat...) b.SetBytes(int64(len(dat))) w, _ := oldgz.NewWriterLevel(ioutil.Discard, level) b.ResetTimer() for n := 0; n < b.N; n++ { w.Reset(ioutil.Discard) n, err := w.Write(dat) if n != len(dat) { panic("short write") } if err != nil { panic(err) } err = w.Close() if err != nil { panic(err) } } }
func writeZone(z *Zone, w io.Writer) { // this function panics on errors as all encoding errors are bugs and // need to be manually resolved in the code. z.lock() defer z.unlock() var data savedZone data.X, data.Y, data.Z = z.X, z.Y, z.Z data.Version = 0 data.TileData = make([]savedTile, 256*256) i := 0 for x := 0; x < 256; x++ { x8 := uint8(x) for y := 0; y < 256; y++ { y8 := uint8(y) t := z.tile(x8, y8) data.TileData[i].Version, data.TileData[i].Data = t.save() i++ } } g, err := gzip.NewWriterLevel(w, gzip.BestCompression) if err != nil { panic(err) } defer g.Close() err = gob.NewEncoder(g).Encode(&data) if err != nil { panic(err) } }
func main() { flag.Parse() c, _ := gzip.NewWriterLevel(os.Stdout, *n) io.Copy(c, os.Stdin) c.Close() }
// merge and gzip app javascript files into a single file, run once at startup func gzipCodeJS() { codeJSModTime = time.Now() fileOut, _ := os.Create(codeJSFileName) compressor, err := gzip.NewWriterLevel(fileOut, gzip.BestCompression) if err != nil { log.Fatal("gzipCodeJS", err) } files := []string{ "main.js", "hub.js", "data.js", "lib.js", "style.js", "view_screen1.js", "view_booktabs.js", "view_noteedit.js", "view_noteview.js", "view_tabmgr.js", "view_position.js", } var fileIn *os.File for _, fileName := range files { fileIn, err = os.Open("static/code/" + fileName) if err != nil { log.Fatalln("gzipCodeJS error: ", fileName, err) } io.Copy(compressor, fileIn) fileIn.Close() } compressor.Close() fileOut.Close() }
func PrepareArchive(rootPath string) (string, int64, error) { file, err := ioutil.TempFile("", "") fail.Handle(err) defer file.Close() fileWriter := bufio.NewWriter(file) defer fileWriter.Flush() gzipWriter, err := gzip.NewWriterLevel(fileWriter, gzip.BestCompression) fail.Handle(err) defer gzipWriter.Close() tarWriter := tar.NewWriter(gzipWriter) defer tarWriter.Close() fullRootPath, err := filepath.Abs(rootPath) fail.Handle(err) err = addAllToArchive(fullRootPath, tarWriter) if err != nil { return "", 0, err } tarWriter.Close() gzipWriter.Close() fileWriter.Flush() file.Close() fileInfo, err := os.Stat(file.Name()) fail.Handle(err) return file.Name(), fileInfo.Size(), nil }
// The header must be written before any content may be written. func (w *Writer) Header(t Type, compression Comp, blockSize int) error { if w.hasHeader { return ErrHeaderOnce } w.t = t w.hasHeader = true var err error err = writeHeader(w, blockSize, t, compression) if err != nil { return err } switch compression { case CompNone: w.body = w.Writer case CompGZip: w.body, err = gzip.NewWriterLevel(w.Writer, gzip.BestCompression) if err != nil { return err } default: return ErrUnknownCompression } return nil }
func Gzip(handler http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // @TODO(mark): Swap this for some handler flags url, err := url.Parse(r.URL.String()) queryParams := url.Query() _, found := queryParams[queryStringKey] if found { handler.ServeHTTP(w, r) return } if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") { handler.ServeHTTP(w, r) return } w.Header().Set("Content-Encoding", "gzip") gz, err := gzip.NewWriterLevel(w, gzip.BestCompression) if err != nil { http.Error(w, "Error with gzip compression", http.StatusInternalServerError) return } defer gz.Close() gzw := gzipResponseWriter{ Writer: gz, ResponseWriter: w, } handler.ServeHTTP(gzw, r) }) }
// Writes the given file into tarfn func writeItem(tarfn string, fn string) (pos uint64, err error) { pos = 0 sfh, err := os.Open(fn) defer sfh.Close() if sfi, err := sfh.Stat(); err == nil { hdr := new(tar.Header) hdr.Name = sfi.Name() hdr.Size = sfi.Size() hdr.Mode = int64(sfi.Mode().Perm()) hdr.ModTime = sfi.ModTime() var ( tw *tar.Writer f ReadWriteSeekCloser ) if tw, f, pos, err = OpenForAppend(tarfn); err == nil { defer f.Close() defer tw.Close() //LIFO if err := tw.WriteHeader(hdr); err == nil { if zw, err := gzip.NewWriterLevel(tw, flate.BestCompression); err == nil { defer zw.Close() _, err = io.Copy(zw, sfh) } } } } return }
func Gzip(level int) gin.HandlerFunc { var gzPool sync.Pool gzPool.New = func() interface{} { gz, err := gzip.NewWriterLevel(ioutil.Discard, level) if err != nil { panic(err) } return gz } return func(c *gin.Context) { if !shouldCompress(c.Request) { return } gz := gzPool.Get().(*gzip.Writer) defer gzPool.Put(gz) gz.Reset(c.Writer) c.Header("Content-Encoding", "gzip") c.Header("Vary", "Accept-Encoding") c.Writer = &gzipWriter{c.Writer, gz} defer func() { c.Header("Content-Length", "0") gz.Close() }() c.Next() } }
// save saves a file to w in shareable form. Files are stored in binary format // and gzipped to reduce size. func (f *file) save(w io.Writer) error { // TODO: error checking zip, _ := gzip.NewWriterLevel(w, gzip.BestCompression) defer zip.Close() enc := encoding.NewEncoder(zip) // encode easy fields enc.Encode(f.name) enc.Encode(f.size) enc.Encode(f.masterKey) enc.Encode(f.pieceSize) enc.Encode(f.mode) enc.Encode(f.bytesUploaded) enc.Encode(f.chunksUploaded) // encode erasureCode switch code := f.erasureCode.(type) { case *rsCode: enc.Encode("Reed-Solomon") enc.Encode(uint64(code.dataPieces)) enc.Encode(uint64(code.numPieces - code.dataPieces)) default: if build.DEBUG { panic("unknown erasure code") } return errors.New("unknown erasure code") } // encode contracts enc.Encode(uint64(len(f.contracts))) for _, c := range f.contracts { enc.Encode(c) } return nil }
// getZip is equivalent to `ipfs getdag $hash | gzip` func getZip(ctx context.Context, node *core.IpfsNode, p path.Path, compression int) (io.Reader, error) { dagnode, err := core.Resolve(ctx, node, p) if err != nil { return nil, err } reader, err := uio.NewDagReader(ctx, dagnode, node.DAG) if err != nil { return nil, err } pr, pw := io.Pipe() gw, err := gzip.NewWriterLevel(pw, compression) if err != nil { return nil, err } bufin := bufio.NewReader(reader) go func() { _, err := bufin.WriteTo(gw) if err != nil { log.Error("Fail to compress the stream") } gw.Close() pw.Close() }() return pr, nil }
func (c *Controller) writeToWriter(rb []byte) { output_writer := c.Ctx.ResponseWriter.(io.Writer) if EnableGzip == true && c.Ctx.Request.Header.Get("Accept-Encoding") != "" { splitted := strings.SplitN(c.Ctx.Request.Header.Get("Accept-Encoding"), ",", -1) encodings := make([]string, len(splitted)) for i, val := range splitted { encodings[i] = strings.TrimSpace(val) } for _, val := range encodings { if val == "gzip" { c.Ctx.ResponseWriter.Header().Set("Content-Encoding", "gzip") output_writer, _ = gzip.NewWriterLevel(c.Ctx.ResponseWriter, gzip.BestSpeed) break } else if val == "deflate" { c.Ctx.ResponseWriter.Header().Set("Content-Encoding", "deflate") output_writer, _ = flate.NewWriter(c.Ctx.ResponseWriter, flate.BestSpeed) break } } } else { c.Ctx.SetHeader("Content-Length", strconv.Itoa(len(rb)), true) } output_writer.Write(rb) switch output_writer.(type) { case *gzip.Writer: output_writer.(*gzip.Writer).Close() case *flate.Writer: output_writer.(*flate.Writer).Close() case io.WriteCloser: output_writer.(io.WriteCloser).Close() } }
func grep(tr *trie.Trie) { f, scanner := getScanner(*rdf) defer f.Close() out, err := os.Create(*output) if err != nil { glog.WithError(err).Fatal("Unable to open output file.") } wr, err := gzip.NewWriterLevel(out, gzip.BestCompression) if err != nil { glog.WithError(err).Fatal("Unable to create gzip writer.") } sw := new(SyncWriter) sw.wr = wr ch := make(chan string, 100<<20) // 100 Million wg := new(sync.WaitGroup) for i := 0; i < *numroutines; i++ { wg.Add(1) go findAndWrite(i, *tr, ch, sw, wg) } for scanner.Scan() { ch <- scanner.Text() } close(ch) wg.Wait() if err := wr.Close(); err != nil { glog.WithError(err).Fatal("Unable to close output writer.") } if err := out.Close(); err != nil { glog.WithError(err).Fatal("Unable to close output file.") } }
// shareFiles writes the specified files to w. First a header is written, // followed by the gzipped concatenation of each file. func shareFiles(files []*file, w io.Writer) error { // Write header. err := encoding.NewEncoder(w).EncodeAll( shareHeader, shareVersion, uint64(len(files)), ) if err != nil { return err } // Create compressor. zip, _ := gzip.NewWriterLevel(w, gzip.BestCompression) enc := encoding.NewEncoder(zip) // Encode each file. for _, f := range files { err = enc.Encode(f) if err != nil { return err } } return zip.Close() }
func gzipStrLevel(s string, lvl int) []byte { var b bytes.Buffer w, _ := gzip.NewWriterLevel(&b, lvl) io.WriteString(w, s) w.Close() return b.Bytes() }
func (l *splunkLogger) tryPostMessages(messages []*splunkMessage) error { if len(messages) == 0 { return nil } var buffer bytes.Buffer var writer io.Writer var gzipWriter *gzip.Writer var err error // If gzip compression is enabled - create gzip writer with specified compression // level. If gzip compression is disabled, use standard buffer as a writer if l.gzipCompression { gzipWriter, err = gzip.NewWriterLevel(&buffer, l.gzipCompressionLevel) if err != nil { return err } writer = gzipWriter } else { writer = &buffer } for _, message := range messages { jsonEvent, err := json.Marshal(message) if err != nil { return err } if _, err := writer.Write(jsonEvent); err != nil { return err } } // If gzip compression is enabled, tell it, that we are done if l.gzipCompression { err = gzipWriter.Close() if err != nil { return err } } req, err := http.NewRequest("POST", l.url, bytes.NewBuffer(buffer.Bytes())) if err != nil { return err } req.Header.Set("Authorization", l.auth) // Tell if we are sending gzip compressed body if l.gzipCompression { req.Header.Set("Content-Encoding", "gzip") } res, err := l.client.Do(req) if err != nil { return err } defer res.Body.Close() if res.StatusCode != http.StatusOK { var body []byte body, err = ioutil.ReadAll(res.Body) if err != nil { return err } return fmt.Errorf("%s: failed to send event - %s - %s", driverName, res.Status, body) } io.Copy(ioutil.Discard, res.Body) return nil }
// Gziper returns a Handler that adds gzip compression to all requests. // Make sure to include the Gzip middleware above other middleware // that alter the response body (like the render middleware). func Gziper(options ...GzipOptions) Handler { opt := prepareGzipOptions(options) return func(ctx *Context) { if !strings.Contains(ctx.Req.Header.Get(HeaderAcceptEncoding), "gzip") { return } headers := ctx.Resp.Header() headers.Set(HeaderContentEncoding, "gzip") headers.Set(HeaderVary, HeaderAcceptEncoding) // We've made sure compression level is valid in prepareGzipOptions, // no need to check same error again. gz, err := gzip.NewWriterLevel(ctx.Resp, opt.CompressionLevel) if err != nil { panic(err.Error()) } defer gz.Close() gzw := gzipResponseWriter{gz, ctx.Resp} ctx.Resp = gzw ctx.MapTo(gzw, (*http.ResponseWriter)(nil)) if ctx.Render != nil { ctx.Render.SetResponseWriter(gzw) } ctx.Next() // delete content length after we know we have been written to gzw.Header().Del("Content-Length") } }
func NewQueueItem(registry *registry.Registry, command string, source *document.DocumentID, target *document.DocumentID, sourceRange string, targetRange string, payload io.Reader) (*QueueItem, error) { buf := new(bytes.Buffer) w, _ := gzip.NewWriterLevel(buf, gzip.BestSpeed) if _, err := io.Copy(w, payload); err != nil { return nil, newQueueError("Queue Item gzip copy:", err) } if err := w.Close(); err != nil { return nil, newQueueError("Queue Item gzip close:", err) } item := &QueueItem{ Id: bson.NewObjectId(), Command: command, Status: "Queued", Source: source, Target: target, SourceRange: sourceRange, TargetRange: targetRange, Payload: buf.Bytes(), } if err := item.Save(registry); err != nil { return nil, newQueueError("Queue Item save:", err) } return item, nil }
func (server *Server) handleFreezeRequest(freq *freezeRequest, fs *frozenServer) { pr, pw := io.Pipe() freq.readCloser = pr freq.done <- true zw, err := gzip.NewWriterLevel(pw, gzip.BestCompression) if err != nil { if err = pw.CloseWithError(err); err != nil { log.Panicf("Unable to close PipeWriter: %v", err.String()) } return } enc := gob.NewEncoder(zw) err = enc.Encode(fs) if err != nil { if err = pw.CloseWithError(err); err != nil { log.Panicf("Unable to close PipeWriter: %v", err.String()) } } if err = pw.CloseWithError(zw.Close()); err != nil { log.Panicf("Unable to close PipeWriter: %v", err.String()) } }
func (output *BeegoOutput) Body(content []byte) { output_writer := output.res.(io.Writer) if output.EnableGzip == true && output.Context.Input.Header("Accept-Encoding") != "" { splitted := strings.SplitN(output.Context.Input.Header("Accept-Encoding"), ",", -1) encodings := make([]string, len(splitted)) for i, val := range splitted { encodings[i] = strings.TrimSpace(val) } for _, val := range encodings { if val == "gzip" { output.Header("Content-Encoding", "gzip") output_writer, _ = gzip.NewWriterLevel(output.res, gzip.BestSpeed) break } else if val == "deflate" { output.Header("Content-Encoding", "deflate") output_writer, _ = flate.NewWriter(output.res, flate.BestSpeed) break } } } else { output.Header("Content-Length", strconv.Itoa(len(content))) } output_writer.Write(content) switch output_writer.(type) { case *gzip.Writer: output_writer.(*gzip.Writer).Close() case *flate.Writer: output_writer.(*flate.Writer).Close() } }
func BenchmarkMarshalProtoGZIP(b *testing.B) { cs := &Changeset{ ID: 38162206, UserID: 2744209, User: "******", Change: loadChange(b, "testdata/changeset_38162206.osc"), } b.ReportAllocs() b.ResetTimer() for n := 0; n < b.N; n++ { data, err := cs.Marshal() if err != nil { b.Fatalf("unable to marshal: %v", err) } w, _ := gzip.NewWriterLevel(&bytes.Buffer{}, gzip.BestCompression) _, err = w.Write(data) if err != nil { b.Fatalf("unable to write: %v", err) } err = w.Close() if err != nil { b.Fatalf("unable to close: %v", err) } } }