func testCompress(t *testing.T) { assert := assert.New(t) before := ` 2e 00 00 00 03 73 65 6c 65 63 74 20 22 30 31 32 .....select "012 33 34 35 36 37 38 39 30 31 32 33 34 35 36 37 38 3456789012345678 39 30 31 32 33 34 35 36 37 38 39 30 31 32 33 34 9012345678901234 35 22 5" ` after := ` 22 00 00 00 32 00 00 78 9c d3 63 60 60 60 2e 4e "...2..x..c....N cd 49 4d 2e 51 50 32 30 34 32 36 31 35 33 b7 b0 .IM.QP20426153.. c4 cd 52 02 00 0c d1 0a 6c ..R.....l ` _, _ = before, after { data := bytes.NewBuffer(DecodeDump(after)) data.Read(make([]byte, 7)) r, err := zlib.NewReader(data) assert.NoError(err) b, err := ioutil.ReadAll(r) assert.NoError(err) assert.EqualValues(DecodeDump(before), b) } { var data bytes.Buffer w, err := zlib.NewWriterLevel(&data, zlib.BestCompression) assert.NoError(err) h, err := hex.DecodeString(strings.Replace("22 00 00 00 32 00 00", " ", "", -1)) assert.NoError(err) data.Write(h) w.Write(DecodeDump(before)) w.Close() // Important fmt.Println(hex.Dump(data.Bytes())) fmt.Println(hex.Dump(DecodeDump(after))) // fmt.Println(hex.Dump(DecodeDump(before))) // 不等,因为会刷一个 00 00 FF FF 的 deflate 块边界 // assert.EqualValues(DecodeDump(after), data.Bytes()) data.Read(make([]byte, 7)) fmt.Println(hex.Dump(data.Bytes())) r, err := zlib.NewReader(&data) assert.NoError(err) b, err := ioutil.ReadAll(r) assert.NoError(err) r.Close() assert.EqualValues(DecodeDump(before), b) } }
func load(r redis.AsyncClient, k string, w http.ResponseWriter) (obj interface{}) { f, rerr := r.Get(k) if rerr != nil { panic(rerr) } val, rerr, timeout := f.TryGet(50000000000) if rerr != nil { panic(rerr) } if timeout { loadtimeout++ log.Println("load timeout! count: ", loadtimeout) fmt.Fprintf(w, "Save failed for %s", key) return } zr, err := zlib.NewReader(bytes.NewReader(val)) if err != nil { log.Fatal("Failed to create zlib reader with error: ", err) } defer zr.Close() jd := json.NewDecoder(zr) err = jd.Decode(&obj) if err != nil { log.Fatal("Failed to decode json with error: ", err) } return }
func (this *PacketGeneric) Decompress() (err error) { if !this.compressed { return } buffer := bytes.NewReader(this.Bytes) _, err = packet.ReadVarInt(buffer) // compression length if err != nil { return } zlibReader, err := zlib.NewReader(buffer) if err != nil { return } _, err = packet.ReadVarInt(zlibReader) // id if err != nil { return } bytes, err := ioutil.ReadAll(zlibReader) if err != nil { return } this.Bytes = bytes this.compressed = false return }
func (rs *RedisStorage) GetActionPlan(key string, skipCache bool) (ats *ActionPlan, err error) { key = utils.ACTION_PLAN_PREFIX + key if !skipCache { if x, err := cache2go.Get(key); err == nil { return x.(*ActionPlan), nil } else { return nil, err } } var values []byte if values, err = rs.db.Cmd("GET", key).Bytes(); err == nil { b := bytes.NewBuffer(values) r, err := zlib.NewReader(b) if err != nil { return nil, err } out, err := ioutil.ReadAll(r) if err != nil { return nil, err } r.Close() err = rs.ms.Unmarshal(out, &ats) cache2go.Cache(key, ats) } return }
func (rs *RedisStorage) GetDestination(key string) (dest *Destination, err error) { key = utils.DESTINATION_PREFIX + key var values []byte if values, err = rs.db.Get(key); len(values) > 0 && err == nil { b := bytes.NewBuffer(values) r, err := zlib.NewReader(b) if err != nil { return nil, err } out, err := ioutil.ReadAll(r) if err != nil { return nil, err } r.Close() dest = new(Destination) err = rs.ms.Unmarshal(out, dest) // create optimized structure for _, p := range dest.Prefixes { cache2go.CachePush(utils.DESTINATION_PREFIX+p, dest.Id) } } else { return nil, errors.New("not found") } return }
func newLooseObjectEntry(root string, id SHA1) (*looseObjectEntry, error) { s := id.String() path := filepath.Join(root, "objects", s[:2], s[2:]) e := new(looseObjectEntry) file, err := os.Open(path) if err != nil { return nil, err } e.f = file zr, err := zlib.NewReader(file) if err != nil { file.Close() return nil, err } e.zr = zr var bs []byte e.br = bufio.NewReader(zr) if bs, err = e.br.ReadBytes(' '); err != nil { e.Close() return nil, err } e.typ = string(bs[:len(bs)-1]) if _, err = e.br.ReadBytes(0); err != nil { e.Close() return nil, err } return e, nil }
func readCompressed(r *io.SectionReader, offset int64, s []byte) (int, error) { zr, err := zlib.NewReader(io.NewSectionReader(r, offset, r.Size()-offset)) if err != nil { return 0, err } return io.ReadFull(zr, s) }
// recv gets a message from the connection. func (conn *Conn) recv() (s []byte, err error) { // A message is: // - a uint32 length // - a byte boolean for compression // - length-5 bytes of data (plain or zlib compressed) var buf [5]byte _, err = io.ReadFull(conn.r, buf[:]) if err != nil { return nil, err } length := binary.BigEndian.Uint32(buf[:4]) isCompressed := buf[4] == 1 if length >= 32<<20 { return nil, errMsgTooLarge } s = make([]byte, length-5) _, err = io.ReadFull(conn.r, s) if err != nil { return } if isCompressed { zr, err := zlib.NewReader(bytes.NewBuffer(s)) if err != nil { return s, err } return ioutil.ReadAll(zr) } return s, nil }
func main() { app := cli.NewApp() app.Name = "zlib" app.Usage = "A command-line tool for using the zlib compression algorithm." app.Action = func(c *cli.Context) { var reader io.Reader = os.Stdin if c.Bool("decompress") { compressorReadCloser, err := zlib.NewReader(reader) if err != nil { exit(err.Error(), 1) } if _, err := io.Copy(os.Stdout, compressorReadCloser); err != nil { exit(err.Error(), 1) } compressorReadCloser.Close() } else { var writer io.Writer = os.Stdout compressorWriteCloser := zlib.NewWriter(writer) if _, err := io.Copy(compressorWriteCloser, reader); err != nil { exit(err.Error(), 1) } compressorWriteCloser.Close() } } app.Flags = []cli.Flag{ cli.BoolFlag{ Name: "d, decompress", Usage: "Decompresses the input instead of compressing the output.", }, } app.Run(os.Args) }
func GetObject(repo *libgit.Repository, sha1 [20]byte) (GitObject, error) { objectname := fmt.Sprintf("%s/objects/%x/%x", repo.Path, sha1[0:1], sha1[1:]) fmt.Printf("File: %s\n", objectname) f, err := os.Open(objectname) if err != nil { panic("Couldn't open object file.") } defer f.Close() uncompressed, err := zlib.NewReader(f) if err != nil { return nil, err } b, err := ioutil.ReadAll(uncompressed) if err != nil { return nil, err } if string(b[0:5]) == "blob " { var size int var content []byte for idx, val := range b { if val == 0 { content = b[idx+1:] if size, err = strconv.Atoi(string(b[5:idx])); err != nil { fmt.Printf("Error converting % x to int at idx: %d", b[5:idx], idx) } break } } return GitBlobObject{size, content}, nil } else { fmt.Printf("Content: %s\n", string(b)) } return nil, InvalidObject }
//解包原生字符串 func (msg *GxMessage) Unpackage() ([]byte, error) { if msg.GetLen() == 0 { return []byte(""), nil } if msg.GetLen() == msg.GetUnlen() { data := make([]byte, msg.GetLen()) copy(data[:], msg.Data) return data, nil } else if msg.GetLen() < msg.GetUnlen() { var b bytes.Buffer b.Write(msg.Data) r, err := zlib.NewReader(&b) if err != nil { return []byte(""), err } defer r.Close() data := make([]byte, msg.GetUnlen()) l, _ := r.Read(data) if l != int(msg.GetUnlen()) { return []byte(""), errors.New("uncompress erro") } return data, nil } else { return []byte(""), errors.New("message unpackage erro") } }
func (ms *MapStorage) GetDestination(key string, skipCache bool, transactionID string) (dest *Destination, err error) { ms.mu.RLock() defer ms.mu.RUnlock() cCommit := cacheCommit(transactionID) key = utils.DESTINATION_PREFIX + key if !skipCache { if x, ok := cache.Get(key); ok { if x != nil { return x.(*Destination), nil } return nil, utils.ErrNotFound } } if values, ok := ms.dict[key]; ok { b := bytes.NewBuffer(values) r, err := zlib.NewReader(b) if err != nil { return nil, err } out, err := ioutil.ReadAll(r) if err != nil { return nil, err } r.Close() dest = new(Destination) err = ms.ms.Unmarshal(out, dest) if err != nil { cache.Set(key, dest, cCommit, transactionID) } } else { cache.Set(key, nil, cCommit, transactionID) return nil, utils.ErrNotFound } return }
func (ms *MapStorage) GetRatingPlan(key string, skipCache bool, transactionID string) (rp *RatingPlan, err error) { ms.mu.RLock() defer ms.mu.RUnlock() key = utils.RATING_PLAN_PREFIX + key if !skipCache { if x, ok := cache.Get(key); ok { if x != nil { return x.(*RatingPlan), nil } return nil, utils.ErrNotFound } } cCommit := cacheCommit(transactionID) if values, ok := ms.dict[key]; ok { b := bytes.NewBuffer(values) r, err := zlib.NewReader(b) if err != nil { return nil, err } out, err := ioutil.ReadAll(r) if err != nil { return nil, err } r.Close() rp = new(RatingPlan) err = ms.ms.Unmarshal(out, rp) } else { cache.Set(key, nil, cCommit, transactionID) return nil, utils.ErrNotFound } cache.Set(key, rp, cCommit, transactionID) return }
// ReadEntity checks the Accept header and reads the content into the entityPointer. func (r *Request) ReadEntity(entityPointer interface{}) (err error) { contentType := r.Request.Header.Get(HEADER_ContentType) contentEncoding := r.Request.Header.Get(HEADER_ContentEncoding) // check if the request body needs decompression if ENCODING_GZIP == contentEncoding { gzipReader := currentCompressorProvider.AcquireGzipReader() defer currentCompressorProvider.ReleaseGzipReader(gzipReader) gzipReader.Reset(r.Request.Body) r.Request.Body = gzipReader } else if ENCODING_DEFLATE == contentEncoding { zlibReader, err := zlib.NewReader(r.Request.Body) if err != nil { return err } r.Request.Body = zlibReader } // lookup the EntityReader, use defaultRequestContentType if needed and provided entityReader, ok := entityAccessRegistry.accessorAt(contentType) if !ok { if len(defaultRequestContentType) != 0 { entityReader, ok = entityAccessRegistry.accessorAt(defaultRequestContentType) } if !ok { return NewError(http.StatusBadRequest, "Unable to unmarshal content of type:"+contentType) } } return entityReader.Read(r, entityPointer) }
func (ms *MapStorage) GetDestination(key string) (dest *Destination, err error) { ms.mu.RLock() defer ms.mu.RUnlock() key = utils.DESTINATION_PREFIX + key if values, ok := ms.dict[key]; ok { b := bytes.NewBuffer(values) r, err := zlib.NewReader(b) if err != nil { return nil, err } out, err := ioutil.ReadAll(r) if err != nil { return nil, err } r.Close() dest = new(Destination) err = ms.ms.Unmarshal(out, dest) // create optimized structure for _, p := range dest.Prefixes { CachePush(utils.DESTINATION_PREFIX+p, dest.Id) } } else { return nil, utils.ErrNotFound } return }
// Return the bytes for a file. func (b *Bundle) Bytes(path string) ([]byte, error) { file := b.files[path] if file == nil { return nil, os.ErrNotExist } if b.compressed { if file.uncompressed == nil { r, err := zlib.NewReader(bytes.NewReader(file.data)) if err != nil { return nil, err } wb := &bytes.Buffer{} _, err = io.Copy(wb, r) if err != nil { return nil, err } if b.retainUncompressed { file.uncompressed = wb.Bytes() } return wb.Bytes(), nil } else { return file.uncompressed, nil } } return file.data, nil }
func (ms *MapStorage) GetRatingPlan(key string, skipCache bool) (rp *RatingPlan, err error) { key = RATING_PLAN_PREFIX + key if !skipCache { if x, err := cache2go.GetCached(key); err == nil { return x.(*RatingPlan), nil } else { return nil, err } } if values, ok := ms.dict[key]; ok { b := bytes.NewBuffer(values) r, err := zlib.NewReader(b) if err != nil { return nil, err } out, err := ioutil.ReadAll(r) if err != nil { return nil, err } r.Close() rp = new(RatingPlan) err = ms.ms.Unmarshal(out, rp) cache2go.Cache(key, rp) } else { return nil, errors.New(utils.ERR_NOT_FOUND) } return }
// Decrypt decrypts an encrypted message and returns it (plaintext). // If you have enabled compression, it wil detect it and decompress // the msg after decrypting it. func (c SaltSecret) Decrypt(msg []byte) ([]byte, error) { if len(msg) < nonceSize+secretbox.Overhead { return nil, errors.New("encrypted message length too short") } nonce := new([nonceSize]byte) copy(nonce[:], msg[:nonceSize]) key, err := scrypt.Key(c.key, nonce[:], 2<<c.NPow, 8, 1, keySize) if err != nil { return nil, err } naclKey := new([keySize]byte) copy(naclKey[:], key) out, ok := secretbox.Open(nil, msg[nonceSize:], nonce, naclKey) if !ok { return nil, errors.New("could not decrypt message") } if nonce[23]&compressBit == compressBit { r, err := zlib.NewReader(bytes.NewReader(out)) if err != nil { return nil, err } r.Close() out, err = ioutil.ReadAll(r) if err != nil { return nil, err } } return out, nil }
func LoadTable(url string) BatchTable { var pdf []byte if strings.HasPrefix(url, "http") { pdf = loadFromUrl(url) } else { pdf = loadFile(url) } if pdf == nil { log.Printf("PDF file wasn't loaded") return nil } table := make(BatchTable) for { begin := bytes.Index(pdf, []byte(StreamStartMarker)) if begin == -1 { break } pdf = pdf[begin+len(StreamStartMarker):] end := bytes.Index(pdf, []byte(StreamEndMarker)) if end == -1 { break } section := pdf[0:end] pdf = pdf[end+len(StreamEndMarker):] buf := bytes.NewBuffer(section) unzipReader, err := zlib.NewReader(buf) if err != nil { log.Printf("Unzip initialization failed, %v", err) continue } unzipped, err := ioutil.ReadAll(unzipReader) if err != nil { log.Printf("Unzip failed, %v", err) continue } records := make([]string, 0) for _, group := range BTETRE.FindAllSubmatch(unzipped, -1) { lines := make([][]byte, 0) for _, group := range TextRE.FindAllSubmatch(group[1], -1) { lines = append(lines, group[1]) } records = append(records, string(bytes.Join(lines, []byte{}))) } for i := 0; i < len(records)-2; i++ { v, err := strconv.ParseInt(records[i], 10, 64) if err == nil && v >= 20000000000 && v < 29000000000 { id := records[i] if _, exists := table[id]; !exists { table[id] = make([]BatchUpdate, 0) } table[id] = append(table[id], BatchUpdate{records[i+1], records[i+2]}) i += 2 } } } return table }
func readBlobData(pos block) ([]byte, error) { file, err := os.Open(pos.filename) if err != nil { return nil, newParserError("file open", err) } defer file.Close() var blob = &osmpbf.Blob{} blobData := make([]byte, pos.size) file.Seek(pos.offset, 0) io.ReadFull(file, blobData) err = proto.Unmarshal(blobData, blob) if err != nil { return nil, newParserError("unmarshaling blob", err) } // pbf contains (uncompressed) raw or zlibdata raw := blob.GetRaw() if raw == nil { buf := bytes.NewBuffer(blob.GetZlibData()) r, err := zlib.NewReader(buf) if err != nil { return nil, newParserError("zlib error", err) } raw = make([]byte, blob.GetRawSize()) _, err = io.ReadFull(r, raw) if err != nil { return nil, newParserError("zlib read error", err) } } return raw, nil }
func (c *Chunk) UnmarshallCompressed(buf []byte) error { r, err := zlib.NewReader(bytes.NewReader(buf)) if err != nil { return err } return c.ReadFrom(r) }
// inflate decodes and decompresses the data generated by codegen func inflate(data string) (string, error) { t := trackTime("inflate") defer t.finish() // fix some url-safeness that codegen does... var fixed string fixed = strings.Replace(data, "-", "+", -1) fixed = strings.Replace(fixed, "_", "/", -1) decoded, err := base64.StdEncoding.DecodeString(fixed) if err != nil { glog.Error(err) return "", err } r, err := zlib.NewReader(bytes.NewReader(decoded)) if err != nil { glog.Error(err) return "", err } defer r.Close() var buf bytes.Buffer buf.ReadFrom(r) inflated := buf.String() return inflated, nil }
func (rs *RedisStorage) GetRatingPlan(key string, skipCache bool) (rp *RatingPlan, err error) { key = utils.RATING_PLAN_PREFIX + key if !skipCache { if x, err := cache2go.Get(key); err == nil { return x.(*RatingPlan), nil } else { return nil, err } } var values []byte if values, err = rs.db.Get(key); err == nil { b := bytes.NewBuffer(values) r, err := zlib.NewReader(b) if err != nil { return nil, err } out, err := ioutil.ReadAll(r) if err != nil { return nil, err } r.Close() rp = new(RatingPlan) err = rs.ms.Unmarshal(out, rp) cache2go.Cache(key, rp) } return }
func WithTestDSN(t *testing.T, tf func(string, <-chan *resultPacket)) { pch := make(chan *resultPacket, 1) s := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { defer req.Body.Close() contentType := req.Header.Get("Content-Type") var bodyReader io.Reader = req.Body // underlying client will compress and encode payload above certain size if contentType == "application/octet-stream" { bodyReader = base64.NewDecoder(base64.StdEncoding, bodyReader) bodyReader, _ = zlib.NewReader(bodyReader) } d := json.NewDecoder(bodyReader) p := &resultPacket{} err := d.Decode(p) if err != nil { t.Fatal(err.Error()) } pch <- p })) defer s.Close() fragments := strings.SplitN(s.URL, "://", 2) dsn := fmt.Sprintf( "%s://public:secret@%s/sentry/project-id", fragments[0], fragments[1], ) tf(dsn, pch) }
// newGitObjFromZcontent constructs a new gitObj using zcontent. // zcontent has the same format as the file of a unpacked git object. func newGitObjFromZcontent(zcontent []byte) (*gitObj, error) { // Uncompress r, err := zlib.NewReader(bytes.NewReader(zcontent)) if err != nil { return nil, err } defer r.Close() var out bytes.Buffer io.Copy(&out, r) b := out.Bytes() // Find header delimiter i := bytes.IndexByte(b, '\x00') if i <= 0 || i >= len(b) { return nil, errInvalidZcontent("no header delimiter") } // Calculate SHA1 and parse header to get oid and type, size o := gitObj{Oid: Oid(fmt.Sprintf("%040x", sha1.Sum(b))), Body: b[i+1:]} var size int if n, err := fmt.Sscanf(string(b[0:i]), "%s %d", &o.Type, &size); err != nil || n < 2 { return nil, errInvalidZcontent("illegal header + " + string(b[0:i])) } if size != len(o.Body) { return nil, errInvalidZcontent(fmt.Sprintf("body size mismatch: claimed %d, actual %d", size, len(o.Body))) } return &o, nil }
func (i *FileIndex) Load() error { idxFile := i.file if idxFile == "" { err := fmt.Errorf("Yikes! Cannot load index from disk because no file was specified.") return err } fp, err := os.Open(idxFile) if err != nil { if os.IsNotExist(err) { return nil } return err } zfp, zerr := zlib.NewReader(fp) if zerr != nil { fp.Close() return zerr } dec := gob.NewDecoder(zfp) err = dec.Decode(&i) zfp.Close() if err != nil { fp.Close() return err } return fp.Close() }
func (d *Data) decodeBase64() (data []byte, err error) { rawData := bytes.TrimSpace(d.RawData) r := bytes.NewReader(rawData) encr := base64.NewDecoder(base64.StdEncoding, r) var comr io.Reader switch d.Compression { case "gzip": comr, err = gzip.NewReader(encr) if err != nil { return } case "zlib": comr, err = zlib.NewReader(encr) if err != nil { return } case "": comr = encr default: err = UnknownCompression return } return ioutil.ReadAll(comr) }
func (ms *MapStorage) GetRatingPlan(key string, skipCache bool) (rp *RatingPlan, err error) { ms.mu.RLock() defer ms.mu.RUnlock() key = utils.RATING_PLAN_PREFIX + key if !skipCache { if x, err := CacheGet(key); err == nil { return x.(*RatingPlan), nil } else { return nil, err } } if values, ok := ms.dict[key]; ok { b := bytes.NewBuffer(values) r, err := zlib.NewReader(b) if err != nil { return nil, err } out, err := ioutil.ReadAll(r) if err != nil { return nil, err } r.Close() rp = new(RatingPlan) err = ms.ms.Unmarshal(out, rp) CacheSet(key, rp) } else { return nil, utils.ErrNotFound } return }
// unpackRemoteServerListFile reads a file that contains a // zlib compressed authenticated data package, validates // the package, and returns the payload. func unpackRemoteServerListFile( config *Config, filename string) (string, error) { fileReader, err := os.Open(filename) if err != nil { return "", common.ContextError(err) } defer fileReader.Close() zlibReader, err := zlib.NewReader(fileReader) if err != nil { return "", common.ContextError(err) } dataPackage, err := ioutil.ReadAll(zlibReader) zlibReader.Close() if err != nil { return "", common.ContextError(err) } payload, err := common.ReadAuthenticatedDataPackage( dataPackage, config.RemoteServerListSignaturePublicKey) if err != nil { return "", common.ContextError(err) } return payload, nil }
func (zb ZlibCompressed) Open() (io.Reader, error) { rz, err := zlib.NewReader(strings.NewReader(string(zb))) if err != nil { return nil, fmt.Errorf("Could not open ZlibCompressed: %v", err) } return rz, nil }