// setBodyGzip gzip's the body. It accepts both strings and structs as body. // The latter will be encoded via json.Marshal. func (r *Request) setBodyGzip(body interface{}) error { switch b := body.(type) { case string: buf := new(bytes.Buffer) w := gzip.NewWriter(buf) if _, err := w.Write([]byte(b)); err != nil { return err } if err := w.Close(); err != nil { return err } r.Header.Add("Content-Encoding", "gzip") r.Header.Add("Vary", "Accept-Encoding") return r.setBodyReader(bytes.NewReader(buf.Bytes())) default: data, err := json.Marshal(b) if err != nil { return err } buf := new(bytes.Buffer) w := gzip.NewWriter(buf) if _, err := w.Write(data); err != nil { return err } if err := w.Close(); err != nil { return err } r.Header.Add("Content-Encoding", "gzip") r.Header.Add("Vary", "Accept-Encoding") r.Header.Set("Content-Type", "application/json") return r.setBodyReader(bytes.NewReader(buf.Bytes())) } }
func saveSplitPages(input <-chan *PageContainer) { featuredFile, err := os.Create(featuredFilePath) if err != nil { panic(err) } featuredCompressed := gzip.NewWriter(featuredFile) defer featuredFile.Close() normalFile, err := os.Create(normalFilePath) if err != nil { panic(err) } normalCompressed := gzip.NewWriter(normalFile) defer normalFile.Close() featuredChannel, featuredWriter := ArticleWriter(featuredCompressed) normalChannel, normalWriter := ArticleWriter(normalCompressed) //featuredChannel, featuredWriter := ArticleWriter(featuredFile) //normalChannel, normalWriter := ArticleWriter(normalFile) DistrbuteArticles(input, featuredChannel, normalChannel) // Wait for all writers to finish <-featuredWriter <-normalWriter // Close all the gzip streams // I tried defering the close // call but kept getting some EOF errors featuredCompressed.Close() normalCompressed.Close() }
func init() { RootDirectory.Set("/vfiles", func() *VDir { var dir = NewVDir("/vfiles", "../vfiles", "/home/alex/local/cmd/src/github.com/influx6/assets/vfiles", false) // register the sub-directories // register the files dir.AddFile(NewVFile("/home/alex/local/cmd/src/github.com/influx6/assets/tests", "/vfiles/vfiles.go", "../vfiles/vfiles.go", 18244, true, false, func(v *VFile) ([]byte, error) { fo, err := os.Open(v.RealPath()) if err != nil { return nil, fmt.Errorf("---> assets.readFile: Error reading file: %s at %s: %s\n", v.Name(), v.RealPath(), err) } defer fo.Close() var buf bytes.Buffer gz := gzip.NewWriter(&buf) _, err = io.Copy(gz, fo) gz.Close() if err != nil { return nil, fmt.Errorf("---> assets.readFile.gzip: Error gzipping file: %s at %s: %s\n", v.Name(), v.RealPath(), err) } return buf.Bytes(), nil })) dir.AddFile(NewVFile("/home/alex/local/cmd/src/github.com/influx6/assets/tests", "/vfiles/vfiles_test.go", "../vfiles/vfiles_test.go", 4132, true, false, func(v *VFile) ([]byte, error) { fo, err := os.Open(v.RealPath()) if err != nil { return nil, fmt.Errorf("---> assets.readFile: Error reading file: %s at %s: %s\n", v.Name(), v.RealPath(), err) } defer fo.Close() var buf bytes.Buffer gz := gzip.NewWriter(&buf) _, err = io.Copy(gz, fo) gz.Close() if err != nil { return nil, fmt.Errorf("---> assets.readFile.gzip: Error gzipping file: %s at %s: %s\n", v.Name(), v.RealPath(), err) } return buf.Bytes(), nil })) return dir }()) }
func TestAssetsDir(t *testing.T) { // For any given request to $FILE, we should return the first found of // - assetsdir/$THEME/$FILE // - compiled in asset $THEME/$FILE // - assetsdir/default/$FILE // - compiled in asset default/$FILE // The asset map contains compressed assets, so create a couple of gzip compressed assets here. buf := new(bytes.Buffer) gw := gzip.NewWriter(buf) gw.Write([]byte("default")) gw.Close() def := buf.Bytes() buf = new(bytes.Buffer) gw = gzip.NewWriter(buf) gw.Write([]byte("foo")) gw.Close() foo := buf.Bytes() e := &staticsServer{ theme: "foo", mut: sync.NewRWMutex(), assetDir: "testdata", assets: map[string][]byte{ "foo/a": foo, // overridden in foo/a "foo/b": foo, "default/a": def, // overridden in default/a (but foo/a takes precedence) "default/b": def, // overridden in default/b (but foo/b takes precedence) "default/c": def, }, } s := httptest.NewServer(e) defer s.Close() // assetsdir/foo/a exists, overrides compiled in expectURLToContain(t, s.URL+"/a", "overridden-foo") // foo/b is compiled in, default/b is overridden, return compiled in expectURLToContain(t, s.URL+"/b", "foo") // only exists as compiled in default/c so use that expectURLToContain(t, s.URL+"/c", "default") // only exists as overridden default/d so use that expectURLToContain(t, s.URL+"/d", "overridden-default") }
// Open is part of the intents.file interface. realBSONFiles need to have Open called before // Read can be called func (f *realBSONFile) Open() (err error) { if f.path == "" { // This should not occur normally. All realBSONFile's should have a path return fmt.Errorf("error creating BSON file without a path, namespace: %v", f.intent.Namespace()) } err = os.MkdirAll(filepath.Dir(f.path), os.ModeDir|os.ModePerm) if err != nil { return fmt.Errorf("error creating directory for BSON file %v: %v", filepath.Dir(f.path), err) } fileName := f.path file, err := os.Create(fileName) if err != nil { return fmt.Errorf("error creating BSON file %v: %v", fileName, err) } var writeCloser io.WriteCloser if f.gzip { writeCloser = gzip.NewWriter(file) } else { // wrap writer in buffer to reduce load on disk writeCloser = writeFlushCloser{ atomicFlusher{ bufio.NewWriterSize(file, 32*1024), }, } } f.WriteCloser = &wrappedWriteCloser{ WriteCloser: writeCloser, inner: file, } return nil }
// file name filelist is like this: './source/file' func TarFilelist(filelist []string, case_dir string, object_name string) (tar_url string) { tar_url = path.Join(case_dir, object_name) + ".tar.gz" fw, err := os.Create(tar_url) if err != nil { fmt.Println("Failed in create tar file ", err) return tar_url } defer fw.Close() gw := gzip.NewWriter(fw) defer gw.Close() tw := tar.NewWriter(gw) defer tw.Close() for index := 0; index < len(filelist); index++ { source_file := filelist[index] fi, err := os.Stat(path.Join(case_dir, source_file)) if err != nil { fmt.Println(err) continue } fr, err := os.Open(path.Join(case_dir, source_file)) if err != nil { fmt.Println(err) continue } h := new(tar.Header) h.Name = source_file h.Size = fi.Size() h.Mode = int64(fi.Mode()) h.ModTime = fi.ModTime() err = tw.WriteHeader(h) _, err = io.Copy(tw, fr) } return tar_url }
func RunTestGzip(data []byte) { log.Printf("encoding/RunTestGzip: Testing comprssion Gzip\n") var compressed bytes.Buffer w := gzip.NewWriter(&compressed) defer w.Close() now := time.Now() w.Write(data) cl := compressed.Len() log.Printf("encoding/RunTestGzip: Compressed from %d bytes to %d bytes in %d ns\n", len(data), cl, time.Since(now).Nanoseconds()) recovered := make([]byte, len(data)) r, _ := gzip.NewReader(&compressed) defer r.Close() total := 0 n := 100 var err error = nil for err != io.EOF && n != 0 { n, err = r.Read(recovered[total:]) total += n } log.Printf("encoding/RunTestGzip: Uncompressed from %d bytes to %d bytes in %d ns\n", cl, len(recovered), time.Since(now).Nanoseconds()) }
func NewCompressedResponseWriter(responseWriter http.ResponseWriter) *CompressedResponseWriter { crw := &CompressedResponseWriter{ responseWriter: responseWriter, gz: gzip.NewWriter(responseWriter), } return crw }
func (c *checksums) checksumForGraphIDNoTarsplit(id, parent, newTarDataPath string) (diffID layer.DiffID, size int64, err error) { rawarchive, err := c.driver.TarStream(id, parent) if err != nil { return } defer rawarchive.Close() f, err := os.Create(newTarDataPath) if err != nil { return } defer f.Close() mfz := gzip.NewWriter(f) defer mfz.Close() metaPacker := storage.NewJSONPacker(mfz) packerCounter := &packSizeCounter{metaPacker, &size} archive, err := asm.NewInputTarStream(rawarchive, packerCounter, nil) if err != nil { return } dgst, err := digest.FromReader(archive) if err != nil { return } diffID = layer.DiffID(dgst) return }
func gzipStr(s string) []byte { var b bytes.Buffer w := gzip.NewWriter(&b) io.WriteString(w, s) w.Close() return b.Bytes() }
func openLogFile(fileName, fileType string, compress bool, rotateMode int, maxSize int64) (*jsonFile, error) { fullName := fileName + fileType if _, err := os.Stat(fullName); err == nil { os.Rename(fullName, fileName+".01"+fileType) fullName = fileName + ".02" + fileType } else if _, err := os.Stat(fileName + ".01" + fileType); err == nil { for fileId := 1; true; fileId++ { fullName = fileName + fmt.Sprintf(".%02d", fileId) + fileType if _, err := os.Stat(fullName); err != nil { break } } } file, err := os.OpenFile(fullName, os.O_WRONLY|os.O_CREATE, 0755) if err != nil { return nil, err } jsonfile := &jsonFile{file: file, curFile: fullName, rotateMode: rotateMode} if compress { jsonfile.bufio = bufio.NewWriter(jsonfile.file) jsonfile.gzip = gzip.NewWriter(jsonfile.bufio) jsonfile.json = json.NewEncoder(jsonfile.gzip) } else { jsonfile.bufio = bufio.NewWriter(jsonfile.file) jsonfile.json = json.NewEncoder(jsonfile.bufio) } if jsonfile.rotateMode == ROTATE_BY_SIZE && maxSize == 0 { jsonfile.maxSize = 1024 * 1024 * 1024 * 25 } else { jsonfile.maxSize = maxSize } return jsonfile, nil }
// TarGz implementation of Archiver. func TarGz(archiveFilename string, itemsToArchive []ArchiveItem) error { // file write fw, err := os.Create(archiveFilename) if err != nil { return err } defer fw.Close() // gzip write gw := gzip.NewWriter(fw) defer gw.Close() // tar write tw := tar.NewWriter(gw) defer tw.Close() for _, item := range itemsToArchive { err = addItemToTarGz(item, tw) if err != nil { return err } } err = tw.Close() return err }
func walkerFor(basePath string) filepath.WalkFunc { return func(name string, info os.FileInfo, err error) error { if err != nil { return err } if strings.HasPrefix(filepath.Base(name), ".") { // Skip dotfiles return nil } if info.Mode().IsRegular() { fd, err := os.Open(name) if err != nil { return err } var buf bytes.Buffer gw := gzip.NewWriter(&buf) io.Copy(gw, fd) fd.Close() gw.Flush() gw.Close() name, _ = filepath.Rel(basePath, name) assets = append(assets, asset{ Name: filepath.ToSlash(name), Data: base64.StdEncoding.EncodeToString(buf.Bytes()), }) } return nil } }
func (c *dynamoDBCollector) Add(ctx context.Context, rep report.Report) error { userid, err := c.userIDer(ctx) if err != nil { return err } var buf bytes.Buffer writer := gzip.NewWriter(&buf) if err := codec.NewEncoder(writer, &codec.MsgpackHandle{}).Encode(&rep); err != nil { return err } writer.Close() now := time.Now() rowKey := fmt.Sprintf("%s-%s", userid, strconv.FormatInt(now.UnixNano()/time.Hour.Nanoseconds(), 10)) _, err = c.db.PutItem(&dynamodb.PutItemInput{ TableName: aws.String(tableName), Item: map[string]*dynamodb.AttributeValue{ hourField: { S: aws.String(rowKey), }, tsField: { N: aws.String(strconv.FormatInt(now.UnixNano(), 10)), }, reportField: { B: buf.Bytes(), }, }, }) if err != nil { return err } return nil }
func HandleGetHasRefs(w http.ResponseWriter, req *http.Request, ps URLParams, ds DataStore) { err := d.Try(func() { d.Exp.Equal("POST", req.Method) req.ParseForm() refStrs := req.PostForm["ref"] d.Exp.True(len(refStrs) > 0) refs := make([]ref.Ref, len(refStrs)) for idx, refStr := range refStrs { refs[idx] = ref.Parse(refStr) } w.Header().Add("Content-Type", "text/plain") writer := w.(io.Writer) if strings.Contains(req.Header.Get("Accept-Encoding"), "gzip") { w.Header().Add("Content-Encoding", "gzip") gw := gzip.NewWriter(w) defer gw.Close() writer = gw } sz := chunks.NewSerializer(writer) for _, r := range refs { has := ds.transitionalChunkStore().Has(r) fmt.Fprintf(writer, "%s %t\n", r, has) } sz.Close() }) if err != nil { http.Error(w, fmt.Sprintf("Error: %v", err), http.StatusBadRequest) return } }
func (cmd *Builder) writeACI() (string, error) { mode := os.O_CREATE | os.O_WRONLY | os.O_TRUNC filename, err := cmd.custom.GetImageFileName() if err != nil { return "", err } of, err := os.OpenFile(filename, mode, 0644) if err != nil { return "", fmt.Errorf("Error opening output file: %v", err) } defer of.Close() gw := gzip.NewWriter(of) defer gw.Close() tr := tar.NewWriter(gw) defer tr.Close() // FIXME: the files in the tar archive are added with the // wrong uid/gid. The uid/gid of the aci builder leaks in the // tar archive. See: https://github.com/appc/goaci/issues/16 iw := aci.NewImageWriter(*cmd.manifest, tr) paths := cmd.custom.GetCommonPaths() if err := filepath.Walk(paths.AciDir, aci.BuildWalker(paths.AciDir, iw, nil)); err != nil { return "", err } if err := iw.Close(); err != nil { return "", err } return of.Name(), nil }
func writeResponse(w http.ResponseWriter, r *http.Request, status int, resp interface{}) { // Headers must be written before the response. header := w.Header() header.Set("Content-Type", "application/json;charset=utf-8") header.Set("Server", "clair") // Gzip the response if the client supports it. var writer io.Writer = w if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") { gzipWriter := gzip.NewWriter(w) defer gzipWriter.Close() writer = gzipWriter header.Set("Content-Encoding", "gzip") } // Write the response. w.WriteHeader(status) err := json.NewEncoder(writer).Encode(resp) if err != nil { switch err.(type) { case *json.MarshalerError, *json.UnsupportedTypeError, *json.UnsupportedValueError: panic("v1: failed to marshal response: " + err.Error()) default: log.Warningf("failed to write response: %s", err.Error()) } } }
func CreateTar() (buf bytes.Buffer, err error) { // Create a new tar archive. gw := gzip.NewWriter(&buf) defer gw.Close() tw := tar.NewWriter(gw) defer tw.Close() // Add some files to the archive. var files = []struct { Name, Body string }{ {"readme.txt", "This archive contains some text files."}, {"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"}, {"todo.txt", "Get animal handling licence."}, } for _, file := range files { hdr := &tar.Header{ Name: file.Name, Mode: 0600, Size: int64(len(file.Body)), } if err = tw.WriteHeader(hdr); err != nil { return } if _, err = tw.Write([]byte(file.Body)); err != nil { return } } return }
func GzipItem(filename string) error { gzipItem, err := os.Open(filename) if err != nil { return err } defer gzipItem.Close() //todo use tgz for tars? gzipFilename := filename + ".gz" gzf, err := os.Create(gzipFilename) if err != nil { return err } defer gzf.Close() gzw := gzip.NewWriter(gzf) defer gzw.Close() gzw.Header.Comment = "file compressed by someutils-gzip" gzw.Header.Name = filepath.Base(filename) _, err = io.Copy(gzw, gzipItem) if err != nil { return err } //get error where possible err = gzw.Close() if err != nil { return err } return nil }
// ServeHTTP serves a gzipped response if the client supports it. func (g Gzip) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) { if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") { return g.Next.ServeHTTP(w, r) } // Delete this header so gzipping isn't repeated later in the chain r.Header.Del("Accept-Encoding") w.Header().Set("Content-Encoding", "gzip") gzipWriter := gzip.NewWriter(w) defer gzipWriter.Close() gz := gzipResponseWriter{Writer: gzipWriter, ResponseWriter: w} // Any response in forward middleware will now be compressed status, err := g.Next.ServeHTTP(gz, r) // If there was an error that remained unhandled, we need // to send something back before gzipWriter gets closed at // the return of this method! if status >= 400 { gz.Header().Set("Content-Type", "text/plain") // very necessary gz.WriteHeader(status) fmt.Fprintf(gz, "%d %s", status, http.StatusText(status)) return 0, err } return status, err }
func (a *Archive) writer() (*tharWriter, error) { writer := io.Writer(a.Stream) flushers := []flushableWriter{} closers := []closeableWriter{} if a.Options.GZip { if a.Options.GZipLevel > 0 { gw, err := gzip.NewWriterLevel(writer, a.Options.GZipLevel) if err != nil { return nil, err } flushers = append([]flushableWriter{gw}, flushers...) closers = append([]closeableWriter{gw}, closers...) writer = gw } else { writer = gzip.NewWriter(writer) } } tw := tar.NewWriter(writer) flushers = append([]flushableWriter{tw}, flushers...) return &tharWriter{ Writer: tw, Flushers: flushers, Closers: closers, }, nil }
func JSON(h func(miniprofiler.Timer, http.ResponseWriter, *http.Request) (interface{}, error)) http.Handler { return miniprofiler.NewHandler(func(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) { d, err := h(t, w, r) if err != nil { serveError(w, err) return } if d == nil { return } buf := new(bytes.Buffer) if err := json.NewEncoder(buf).Encode(d); err != nil { log.Println(err) serveError(w, err) return } var tw io.Writer = w if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") { w.Header().Set("Content-Encoding", "gzip") gz := gzip.NewWriter(w) defer gz.Close() tw = gz } if cb := r.FormValue("callback"); cb != "" { w.Header().Add("Content-Type", "application/javascript") tw.Write([]byte(cb + "(")) buf.WriteTo(tw) tw.Write([]byte(")")) return } w.Header().Add("Content-Type", "application/json") buf.WriteTo(tw) }) }
func newTestACI(usedotslash bool) (*os.File, error) { tf, err := ioutil.TempFile("", "") if err != nil { return nil, err } manifestBody := `{"acKind":"ImageManifest","acVersion":"0.8.1","name":"example.com/app"}` gw := gzip.NewWriter(tf) tw := tar.NewWriter(gw) manifestPath := "manifest" if usedotslash { manifestPath = "./" + manifestPath } hdr := &tar.Header{ Name: manifestPath, Size: int64(len(manifestBody)), } if err := tw.WriteHeader(hdr); err != nil { return nil, err } if _, err := tw.Write([]byte(manifestBody)); err != nil { return nil, err } if err := tw.Close(); err != nil { return nil, err } if err := gw.Close(); err != nil { return nil, err } return tf, nil }
// writeIndex calls x.Write with the index's backing file. func writeIndex(fs rwvfs.FileSystem, name string, x persistedIndex) (err error) { vlog.Printf("%s: writing index...", name) f, err := fs.Create(fmt.Sprintf(indexFilename, name)) if err != nil { return err } defer func() { err2 := f.Close() if err == nil { err = err2 } }() w := gzip.NewWriter(f) if err := x.Write(w); err != nil { return err } if err := w.Flush(); err != nil { return err } if err := w.Close(); err != nil { return err } vlog.Printf("%s: done writing index.", name) return nil }
// Archive writes the executable files found in the given directory in // gzipped tar format to w. func Archive(w io.Writer, dir string) error { entries, err := ioutil.ReadDir(dir) if err != nil { return err } gzw := gzip.NewWriter(w) defer closeErrorCheck(&err, gzw) tarw := tar.NewWriter(gzw) defer closeErrorCheck(&err, tarw) for _, ent := range entries { h := tarHeader(ent) logger.Debugf("adding entry: %#v", h) // ignore local umask if isExecutable(ent) { h.Mode = 0755 } else { h.Mode = 0644 } err := tarw.WriteHeader(h) if err != nil { return err } fileName := filepath.Join(dir, ent.Name()) if err := copyFile(tarw, fileName); err != nil { return err } } return nil }
// NewGzipResponseWriter creates a new Writer, which gzips data passed to h. func NewGzipResponseWriter(w http.ResponseWriter) *GzipResponseWriter { w.Header().Set("Content-Encoding", "gzip") gz := gzip.NewWriter(w) return &GzipResponseWriter{ Writer: gz, ResponseWriter: w, gz: gz, } }
func (s *uploadSuite) createArchive(c *gc.C) { archive, err := os.Create(s.filename) c.Assert(err, jc.ErrorIsNil) defer archive.Close() compressed := gzip.NewWriter(archive) defer compressed.Close() tarball := tar.NewWriter(compressed) defer tarball.Close() var files = []struct{ Name, Body string }{ {"root.tar", "<state config files>"}, {"dump/oplog.bson", "<something here>"}, } for _, file := range files { hdr := &tar.Header{ Name: file.Name, Size: int64(len(file.Body)), } err := tarball.WriteHeader(hdr) c.Assert(err, jc.ErrorIsNil) _, err = tarball.Write([]byte(file.Body)) c.Assert(err, jc.ErrorIsNil) } }
func (dump *MongoDump) getArchiveOut() (out io.WriteCloser, err error) { if dump.OutputOptions.Archive == "-" { out = &nopCloseWriter{dump.stdout} } else { targetStat, err := os.Stat(dump.OutputOptions.Archive) if err == nil && targetStat.IsDir() { defaultArchiveFilePath := filepath.Join(dump.OutputOptions.Archive, "archive") if dump.OutputOptions.Gzip { defaultArchiveFilePath = defaultArchiveFilePath + ".gz" } out, err = os.Create(defaultArchiveFilePath) if err != nil { return nil, err } } else { out, err = os.Create(dump.OutputOptions.Archive) if err != nil { return nil, err } } } if dump.OutputOptions.Gzip { return &wrappedWriteCloser{ WriteCloser: gzip.NewWriter(out), inner: out, }, nil } return out, nil }
func NewDscout(waiter *sync.WaitGroup, filename string) *Dscout { d := new(Dscout) d.Operation.Waiter = waiter d.Operation.Waiter.Add(1) file, err := os.Create(filename) if err != nil { return nil } d.closer = func() { file.Close() } var writer io.WriteCloser = file var compressor *gzip.Writer if strings.HasSuffix(filename, ".gz") { compressor = gzip.NewWriter(file) d.closer = func() { compressor.Close(); file.Close() } writer = compressor } uncompressed_name := strings.TrimRight(filename, ".gz") switch { case strings.HasSuffix(uncompressed_name, ".gob"): d.marshaler = new(formats.GobMarshaler) case strings.HasSuffix(uncompressed_name, ".xml"): d.marshaler = new(formats.XmlMarshaler) } if d.marshaler != nil { d.marshaler.InitFile(writer) } return d }
func custom(log, cors, validate bool, f func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) { addr := r.RemoteAddr if ip, found := header(r, "X-Forwarded-For"); found { addr = ip } // compress settings ioWriter := w.(io.Writer) for _, val := range misc.ParseCsvLine(r.Header.Get("Accept-Encoding")) { if val == "gzip" { w.Header().Set("Content-Encoding", "gzip") g := gzip.NewWriter(w) defer g.Close() ioWriter = g break } if val == "deflate" { w.Header().Set("Content-Encoding", "deflate") z := zlib.NewWriter(w) defer z.Close() ioWriter = z break } } writer := &customResponseWriter{Writer: ioWriter, ResponseWriter: w, status: http.StatusOK} // route to the controllers f(writer, r) // access log if log && cfg.AccessLog { logs.Info.Printf("%s %s %s %s", addr, strconv.Itoa(writer.status), r.Method, r.URL) } } }