func encodeQuery(query []byte) string { var compressed_query bytes.Buffer w := zlib.NewWriter(&compressed_query) w.Write(query) w.Close() return base64.URLEncoding.EncodeToString(compressed_query.Bytes()) }
func compress(data []byte) []byte { var compressedData bytes.Buffer writer := zlib.NewWriter(&compressedData) writer.Write(data) writer.Close() return compressedData.Bytes() }
func (rs *RedisStorage) SetActionPlan(key string, ats *ActionPlan, overwrite bool) (err error) { if len(ats.ActionTimings) == 0 { // delete the key err = rs.db.Cmd("DEL", utils.ACTION_PLAN_PREFIX+key).Err cache2go.RemKey(utils.ACTION_PLAN_PREFIX + key) return err } if !overwrite { // get existing action plan to merge the account ids if existingAts, _ := rs.GetActionPlan(key, true); existingAts != nil { if ats.AccountIDs == nil && len(existingAts.AccountIDs) > 0 { ats.AccountIDs = make(utils.StringMap) } for accID := range existingAts.AccountIDs { ats.AccountIDs[accID] = true } } } result, err := rs.ms.Marshal(ats) if err != nil { return err } var b bytes.Buffer w := zlib.NewWriter(&b) w.Write(result) w.Close() return rs.db.Cmd("SET", utils.ACTION_PLAN_PREFIX+key, b.Bytes()).Err }
func main() { app := cli.NewApp() app.Name = "zlib" app.Usage = "A command-line tool for using the zlib compression algorithm." app.Action = func(c *cli.Context) { var reader io.Reader = os.Stdin if c.Bool("decompress") { compressorReadCloser, err := zlib.NewReader(reader) if err != nil { exit(err.Error(), 1) } if _, err := io.Copy(os.Stdout, compressorReadCloser); err != nil { exit(err.Error(), 1) } compressorReadCloser.Close() } else { var writer io.Writer = os.Stdout compressorWriteCloser := zlib.NewWriter(writer) if _, err := io.Copy(compressorWriteCloser, reader); err != nil { exit(err.Error(), 1) } compressorWriteCloser.Close() } } app.Flags = []cli.Flag{ cli.BoolFlag{ Name: "d, decompress", Usage: "Decompresses the input instead of compressing the output.", }, } app.Run(os.Args) }
func custom(log, cors, validate bool, f func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) { addr := r.RemoteAddr if ip, found := header(r, "X-Forwarded-For"); found { addr = ip } // compress settings ioWriter := w.(io.Writer) for _, val := range misc.ParseCsvLine(r.Header.Get("Accept-Encoding")) { if val == "gzip" { w.Header().Set("Content-Encoding", "gzip") g := gzip.NewWriter(w) defer g.Close() ioWriter = g break } if val == "deflate" { w.Header().Set("Content-Encoding", "deflate") z := zlib.NewWriter(w) defer z.Close() ioWriter = z break } } writer := &customResponseWriter{Writer: ioWriter, ResponseWriter: w, status: http.StatusOK} // route to the controllers f(writer, r) // access log if log && cfg.AccessLog { logs.Info.Printf("%s %s %s %s", addr, strconv.Itoa(writer.status), r.Method, r.URL) } } }
// serializeChunkData produces the compressed chunk NBT data. func serializeChunkData(w *nbtChunkWriter) (chunkData []byte, err os.Error) { // Reserve room for the chunk data header at the start. buffer := bytes.NewBuffer(make([]byte, chunkDataHeaderSize, chunkDataGuessSize)) if zlibWriter, err := zlib.NewWriter(buffer); err != nil { return nil, err } else { if err = nbt.Write(zlibWriter, w.RootTag()); err != nil { zlibWriter.Close() return nil, err } if err = zlibWriter.Close(); err != nil { return nil, err } } chunkData = buffer.Bytes() // Write chunk data header header := chunkDataHeader{ DataSize: uint32(len(chunkData)) - chunkDataHeaderSize, Version: chunkCompressionZlib, } buffer = bytes.NewBuffer(chunkData[:0]) if err = binary.Write(buffer, binary.BigEndian, header); err != nil { return nil, err } return chunkData, nil }
func custom(log, cors, validate bool, f func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) { // compress settings ioWriter := w.(io.Writer) for _, val := range misc.ParseCsvLine(r.Header.Get("Accept-Encoding")) { if val == "gzip" { w.Header().Set("Content-Encoding", "gzip") g := gzip.NewWriter(w) defer g.Close() ioWriter = g break } if val == "deflate" { w.Header().Set("Content-Encoding", "deflate") z := zlib.NewWriter(w) defer z.Close() ioWriter = z break } } writer := &customResponseWriter{Writer: ioWriter, ResponseWriter: w, status: 200} // route to the controllers f(writer, r) } }
func Marshal(compression Compression, out io.Writer, v interface{}) (err error) { defer func() { if r := recover(); r != nil { if s, ok := r.(string); ok { err = fmt.Errorf(s) } else { err = r.(error) } } }() if out == nil { panic(fmt.Errorf("nbt: Output stream is nil")) } switch compression { case Uncompressed: break case GZip: w := gzip.NewWriter(out) defer w.Close() out = w case ZLib: w := zlib.NewWriter(out) defer w.Close() out = w default: panic(fmt.Errorf("nbt: Unknown compression type: %d", compression)) } writeRootTag(out, reflect.ValueOf(v)) return }
func (f *ZlibFilter) committer(fr FilterRunner, h PluginHelper, wg *sync.WaitGroup) { initBatch := make([]byte, 0, 10000) f.backChan <- initBatch var ( tag string //ok bool outBatch []byte ) tag = f.ZlibTag for outBatch = range f.batchChan { pack, e := h.PipelinePack(f.msgLoopCount) if e != nil { fr.LogError(e) break } var b bytes.Buffer w := zlib.NewWriter(&b) w.Write(outBatch) w.Close() tagField, _ := message.NewField("ZlibTag", tag, "") pack.Message.AddField(tagField) pack.Message.SetUuid(uuid.NewRandom()) pack.Message.SetPayload(b.String()) fr.Inject(pack) outBatch = outBatch[:0] f.backChan <- outBatch } wg.Done() }
func (ms *MongoStorage) SetActionPlan(key string, ats *ActionPlan, overwrite bool) error { // clean dots from account ids map if len(ats.ActionTimings) == 0 { cache2go.RemKey(utils.ACTION_PLAN_PREFIX + key) err := ms.db.C(colApl).Remove(bson.M{"key": key}) if err != mgo.ErrNotFound { return err } return nil } if !overwrite { // get existing action plan to merge the account ids if existingAts, _ := ms.GetActionPlan(key, true); existingAts != nil { if ats.AccountIDs == nil && len(existingAts.AccountIDs) > 0 { ats.AccountIDs = make(utils.StringMap) } for accID := range existingAts.AccountIDs { ats.AccountIDs[accID] = true } } } result, err := ms.ms.Marshal(ats) if err != nil { return err } var b bytes.Buffer w := zlib.NewWriter(&b) w.Write(result) w.Close() _, err = ms.db.C(colApl).Upsert(bson.M{"key": key}, &struct { Key string Value []byte }{Key: key, Value: b.Bytes()}) return err }
func Compress(data []byte) bytes.Buffer { var b bytes.Buffer w := zlib.NewWriter(&b) w.Write(data) w.Close() return b }
func (f *file) Close() error { if !f.closed { f.f.Lock() defer f.f.Unlock() if !f.closed { if f.f.Mode&ModeCompress != 0 { var buf bytes.Buffer zw := zlib.NewWriter(&buf) if _, err := zw.Write(f.data); err != nil { return err } if err := zw.Close(); err != nil { return err } if buf.Len() < len(f.data) { f.f.Data = buf.Bytes() } else { f.f.Mode &= ^ModeCompress f.f.Data = f.data } } else { f.f.Data = f.data } f.closed = true } } return nil }
func compress(data []byte) []byte { var b bytes.Buffer w := zlib.NewWriter(&b) w.Write(data) w.Close() return b.Bytes() }
func (rs *RedisStorage) SetActionPlan(key string, ats *ActionPlan, overwrite bool, transactionID string) (err error) { cCommit := cacheCommit(transactionID) if len(ats.ActionTimings) == 0 { // delete the key err = rs.Cmd("DEL", utils.ACTION_PLAN_PREFIX+key).Err cache.RemKey(utils.ACTION_PLAN_PREFIX+key, cCommit, transactionID) return err } if !overwrite { // get existing action plan to merge the account ids if existingAts, _ := rs.GetActionPlan(key, true, transactionID); existingAts != nil { if ats.AccountIDs == nil && len(existingAts.AccountIDs) > 0 { ats.AccountIDs = make(utils.StringMap) } for accID := range existingAts.AccountIDs { ats.AccountIDs[accID] = true } } // do not keep this in cache (will be obsolete) cache.RemKey(utils.ACTION_PLAN_PREFIX+key, cCommit, transactionID) } result, err := rs.ms.Marshal(ats) if err != nil { return err } var b bytes.Buffer w := zlib.NewWriter(&b) w.Write(result) w.Close() err = rs.Cmd("SET", utils.ACTION_PLAN_PREFIX+key, b.Bytes()).Err cache.RemKey(utils.ACTION_PLAN_PREFIX+key, cCommit, transactionID) return }
func save(r redis.AsyncClient, key string, obj interface{}, w http.ResponseWriter) { var b bytes.Buffer z := zlib.NewWriter(&b) defer z.Close() je := json.NewEncoder(z) err := je.Encode(obj) if err != nil { log.Fatal("Failed to json Encode with error: ", err) } z.Flush() f, rerr := r.Set(key, b.Bytes()) if rerr != nil { panic(rerr) } _, rerr, timeout := f.TryGet(50000000000) if rerr != nil { panic(rerr) } if timeout { savetimeout++ log.Println("save timeout! count: ", savetimeout) fmt.Fprintf(w, "Save failed for %s", key) } }
func encode(txt *mdnsTxt) ([]string, error) { b, err := json.Marshal(txt) if err != nil { return nil, err } var buf bytes.Buffer defer buf.Reset() w := zlib.NewWriter(&buf) if _, err := w.Write(b); err != nil { return nil, err } w.Close() encoded := hex.EncodeToString(buf.Bytes()) // individual txt limit if len(encoded) <= 255 { return []string{encoded}, nil } // split encoded string var record []string for len(encoded) > 255 { record = append(record, encoded[:255]) encoded = encoded[255:] } record = append(record, encoded) return record, nil }
/** Attempts to encode the response according to the client's Accept-Encoding header. If there is an error, or if the encoding requests aren't supported then the original content is returned. Encoding type: * deflate (zlib stream) * gzip This should be the last module loaded */ func EncodeResponse(ctx *Context, content interface{}) (interface{}, error) { var compressed bytes.Buffer var output io.WriteCloser if len(ctx.Request.Header["Accept-Encoding"]) > 0 { for _, opt := range ctx.Request.Header["Accept-Encoding"] { if strings.Index(opt, "gzip") >= 0 { output = gzip.NewWriter(&compressed) ctx.SetHeader("Content-Encoding", "gzip", true) } else if strings.Index(opt, "deflate") >= 0 { output = zlib.NewWriter(&compressed) ctx.SetHeader("Content-Encoding", "deflate", true) } } } if output != nil { _, err := output.Write(content.([]byte)) if err != nil { ctx.Server.Logger.Printf("EncodeResponse write failed: %s", err) return content, &WebError{500, err.Error()} } err = output.Close() return compressed.Bytes(), nil } return content, nil }
func (i *Index) save(idxFile string) error { if idxFile == "" { err := fmt.Errorf("Yikes! Cannot save index to disk because no file was specified.") return err } fp, err := ioutil.TempFile(path.Dir(idxFile), "idx-build") if err != nil { return err } zfp := zlib.NewWriter(fp) i.m.RLock() defer i.m.RUnlock() enc := gob.NewEncoder(zfp) err = enc.Encode(i) zfp.Close() if err != nil { fp.Close() return err } err = fp.Close() if err != nil { return nil } return os.Rename(fp.Name(), idxFile) }
//打包原生字符串 func (msg *GxMessage) Package(buf []byte) error { l := len(buf) if l == 0 { return nil } var b bytes.Buffer c := false //小于指定长度不用检查是否需要压缩 if l > 10 { w := zlib.NewWriter(&b) w.Write(buf) w.Close() c = true } //压缩后长度比原来小,就保存压缩数据 if c && b.Len() < l { msg.SetUnlen(uint16(l)) msg.SetLen(uint16(b.Len())) msg.Data = make([]byte, b.Len()) copy(msg.Data[:], b.Bytes()) } else { msg.SetUnlen(uint16(l)) msg.SetLen(uint16(l)) msg.Data = make([]byte, l) copy(msg.Data[:], buf) } return nil }
func gitFlattenObject(sha1 string) (io.Reader, error) { kind, err := gitCatKind(sha1) if err != nil { return nil, errgo.Notef(err, "flatten: kind(%s) failed", sha1) } size, err := gitCatSize(sha1) if err != nil { return nil, errgo.Notef(err, "flatten: size(%s) failed", sha1) } r, err := gitCatData(sha1, kind) if err != nil { return nil, errgo.Notef(err, "flatten: data(%s) failed", sha1) } // move to exp/git pr, pw := io.Pipe() go func() { zw := zlib.NewWriter(pw) if _, err := fmt.Fprintf(zw, "%s %d\x00", kind, size); err != nil { pw.CloseWithError(errgo.Notef(err, "writing git format header failed")) return } if _, err := io.Copy(zw, r); err != nil { pw.CloseWithError(errgo.Notef(err, "copying git data failed")) return } if err := zw.Close(); err != nil { pw.CloseWithError(errgo.Notef(err, "zlib close failed")) return } pw.Close() }() return pr, nil }
func (d *Data) SetTileGrid(grid DataTileGrid) (err error) { var ( buf bytes.Buffer b64Encoder io.WriteCloser zlibWriter *zlib.Writer gids []uint32 gridTile DataTileGridTile ) d.Encoding = "base64" d.Compression = "zlib" d.RawTiles = []DataTile{} gids = make([]uint32, grid.Width*grid.Height) for y := 0; y < grid.Height; y++ { for x := 0; x < grid.Width; x++ { gridTile = grid.Tiles[x][y] gids[grid.Width*y+x] = encodeGid( gridTile.Id, gridTile.FlipX, gridTile.FlipY, gridTile.FlipD) } } b64Encoder = base64.NewEncoder(base64.StdEncoding, &buf) zlibWriter = zlib.NewWriter(b64Encoder) if err = binary.Write(zlibWriter, binary.LittleEndian, gids); err != nil { return } zlibWriter.Close() b64Encoder.Close() d.RawContents = buf.String() return }
func (c *u_compress) ZlibCompress(src []byte) []byte { var buf bytes.Buffer w := zlib.NewWriter(&buf) w.Write(src) w.Close() return buf.Bytes() }
// Encrypt encrypts a message and returns the encrypted msg (nonce + ciphertext). // If you have enabled compression, it will compress the msg before encrypting it. func (c SaltSecret) Encrypt(msg []byte) (out []byte, e error) { nonce := new([nonceSize]byte) _, err := io.ReadFull(rand.Reader, nonce[:]) if err != nil { return nil, err } // We use the last bit of the nonce as a compression indicator. // This should still keep you safe (extremely rare collisions). nonce[23] &= ^compressBit if c.compress { nonce[23] |= compressBit } key, err := scrypt.Key(c.key, nonce[:], 2<<c.NPow, 8, 1, keySize) if err != nil { return nil, err } if c.compress { var b bytes.Buffer w := zlib.NewWriter(&b) w.Write(msg) w.Close() msg = b.Bytes() } out = make([]byte, nonceSize) copy(out, nonce[:]) naclKey := new([keySize]byte) copy(naclKey[:], key) out = secretbox.Seal(out, msg, nonce, naclKey) return out, nil }
func (p *PdfDictionaryObj) Build() error { b, err := p.makeFont() if err != nil { //log.Panicf("%s", err.Error()) return err } //zipvar buff bytes.Buffer var zbuff bytes.Buffer gzipwriter := zlib.NewWriter(&zbuff) _, err = gzipwriter.Write(b) if err != nil { return err } gzipwriter.Close() p.buffer.WriteString("<</Length " + strconv.Itoa(zbuff.Len()) + "\n") p.buffer.WriteString("/Filter /FlateDecode\n") p.buffer.WriteString("/Length1 " + strconv.Itoa(len(b)) + "\n") p.buffer.WriteString(">>\n") p.buffer.WriteString("stream\n") p.buffer.Write(zbuff.Bytes()) p.buffer.WriteString("\nendstream\n") return nil }
func (i *FileIndex) Save() error { idxFile := i.file if idxFile == "" { err := fmt.Errorf("Yikes! Cannot save index to disk because no file was specified.") return err } if !i.updated { return nil } logger.Infof("Index has changed, saving to disk") fp, err := ioutil.TempFile(path.Dir(idxFile), "idx-build") if err != nil { return err } zfp := zlib.NewWriter(fp) i.m.RLock() defer i.m.RUnlock() i.updated = false enc := gob.NewEncoder(zfp) err = enc.Encode(i) zfp.Close() if err != nil { fp.Close() return err } err = fp.Close() if err != nil { return err } return os.Rename(fp.Name(), idxFile) }
// Z returns the zlib compression estimate of complexity of a segment of s defined by // start and end. func Z(s seq.Sequence, start, end int) (cz float64, err error) { if start < s.Start() || end > s.End() { err = fmt.Errorf("complex: index out of range") return } if start == end { return 0, nil } bc := new(byteCounter) z := zlib.NewWriter(bc) defer z.Close() it := s.Alphabet().LetterIndex() var N float64 for i := start; i < end; i++ { if b := byte(s.At(i).L); it[b] >= 0 { N++ z.Write([]byte{b}) } } z.Close() cz = (float64(*bc - overhead)) / N return }
func (c *Chunk) MarshallCompressed() []byte { var compressed bytes.Buffer w := zlib.NewWriter(&compressed) c.WriteTo(w) w.Close() return compressed.Bytes() }
func primeKey(key string, r redis.AsyncClient) { path := "document.json" file, err := os.Open(path) if err != nil { panic(err) } reader := bufio.NewReader(file) document, _ := ioutil.ReadAll(reader) var b bytes.Buffer z := zlib.NewWriter(&b) z.Write(document) z.Close() f, rerr := r.Set(key, b.Bytes()) if rerr != nil { panic(rerr) } _, rerr, timeout := f.TryGet(50000000000) if rerr != nil { panic(rerr) } if timeout { savetimeout++ log.Println("save timeout! count: ", savetimeout) } }
func deflate(query string) []byte { var compressed_query bytes.Buffer w := zlib.NewWriter(&compressed_query) w.Write([]byte(query)) w.Close() return compressed_query.Bytes() }
func writeEntry(w io.Writer, o Object) (err error) { var t byte t |= 0x80 switch o.Type() { case "commit": t |= OBJ_COMMIT << 4 case "tree": t |= OBJ_TREE << 4 case "blob": t |= OBJ_BLOB << 4 case "tag": t |= OBJ_TAG << 4 } t |= byte(uint64(len(o.Bytes())) &^ 0xfffffffffffffff0) sz := len(o.Bytes()) >> 4 szb := make([]byte, 16) n := binary.PutUvarint(szb, uint64(sz)) szb = szb[0:n] w.Write(append([]byte{t}, szb...)) zw := zlib.NewWriter(w) _, err = zw.Write(o.Bytes()) defer zw.Close() if err != nil { return err } zw.Flush() return }