func (g *generator) defineSchemaVar() error { msg, seg, _ := capnp.NewMessage(capnp.SingleSegment(nil)) req, _ := schema.NewRootCodeGeneratorRequest(seg) fnodes := g.nodes[g.fileID].nodes ids := make([]uint64, len(fnodes)) for i, n := range fnodes { ids[i] = n.Id() } sort.Sort(uint64Slice(ids)) // TODO(light): find largest object size and use that to allocate list nodes, _ := req.NewNodes(int32(len(g.nodes))) i := 0 for _, id := range ids { n := g.nodes[id] if err := nodes.Set(i, n.Node); err != nil { return err } i++ } var buf bytes.Buffer z, _ := zlib.NewWriterLevel(&buf, zlib.BestCompression) if err := capnp.NewPackedEncoder(z).Encode(msg); err != nil { return err } if err := z.Close(); err != nil { return err } return renderSchemaVar(g.r, schemaVarParams{ G: g, FileID: g.fileID, NodeIDs: ids, schema: buf.Bytes(), }) }
func (rw *RedisWrapper) SaveRequest(req *ParsedRequest, creatives []*Inventory, timeout int) error { conn := rw.redisPool.Get() defer conn.Close() creativesForRedis := make([]*InventoryForRedis, len(creatives)) for index, value := range creatives { creativesForRedis[index] = &InventoryForRedis{ AdId: value.AdId, Frequency: value.Frequency, } } req.Creatives = creativesForRedis body, err := json.Marshal(*req) if err != nil { return err } var buf bytes.Buffer zlibWriter, err := zlib.NewWriterLevel(&buf, zlib.BestSpeed) if err != nil { return err } if _, err := zlibWriter.Write(body); err != nil { return err } zlibWriter.Close() if _, err := conn.Do("setex", rw.configure.RedisCachePrefix+req.Id, timeout, buf.Bytes()); err != nil { rw.logger.Warning("redis error: %v", err.Error()) return err } return nil }
// SerializeCompressed serializes a compressed data packet to w and // returns a WriteCloser to which the literal data packets themselves // can be written and which MUST be closed on completion. If cc is // nil, sensible defaults will be used to configure the compression // algorithm. func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *CompressionConfig) (literaldata io.WriteCloser, err error) { compressed, err := serializeStreamHeader(w, packetTypeCompressed) if err != nil { return } _, err = compressed.Write([]byte{uint8(algo)}) if err != nil { return } level := DefaultCompression if cc != nil { level = cc.Level } var compressor io.WriteCloser switch algo { case CompressionZIP: compressor, err = flate.NewWriter(compressed, level) case CompressionZLIB: compressor, err = zlib.NewWriterLevel(compressed, level) default: s := strconv.Itoa(int(algo)) err = errors.UnsupportedError("Unsupported compression algorithm: " + s) } if err != nil { return } literaldata = compressedWriteCloser{compressed, compressor} return }
func (o *PrtGenerator) Start(outFilename string, total int, maxProcs int, boundary *BoundaryLocator) { o.enclosedsChan = make(chan *EnclosedChunkJob, maxProcs*2) o.completeChan = make(chan bool) o.total = total o.boundary = boundary maxProcs = 1 for i := 0; i < maxProcs; i++ { go o.chunkProcessor() } var openErr os.Error o.outFile, openErr = os.Open(outFilename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666) if openErr != nil { fmt.Fprintln(os.Stderr, openErr) // TODO: return openErr return } o.w = bufio.NewWriter(o.outFile) WriteHeader(o.w, -1, []ChannelDefinition{{"Position", 4, 3, 0}, {"BlockID", 1, 1, 12}}) var zErr os.Error o.zw, zErr = zlib.NewWriterLevel(o.w, zlib.NoCompression) if zErr != nil { fmt.Fprintln(os.Stderr, zErr) // TODO: return zErr return } }
// Returns a zlib-compressed copy of the specified byte array func sliceCompress(data []byte) []byte { var buf bytes.Buffer cmp, _ := zlib.NewWriterLevel(&buf, zlib.BestSpeed) cmp.Write(data) cmp.Close() return buf.Bytes() }
func (f *Ftail) Flush() error { if f.buf.Len() <= 0 { return nil } var b bytes.Buffer w, err := zlib.NewWriterLevel(&b, zlib.BestCompression) if err != nil { return err } row := core.Row{Time: f.lastTime, Pos: f.Pos} row.Text = f.buf.String() _, err = io.Copy(w, &f.buf) if cerr := w.Close(); cerr != nil { log.Printf("zlib close err:%s", cerr) } if err != nil { return err } if b.Len() < f.buf.Len() { row.Bin = b.Bytes() row.Text = "" } //log.Printf("text:'%s',bin:'%x', buf.String:%s", row.Text, row.Bin, f.buf.String()) defer f.buf.Reset() if err = f.rec.Put(row); err != nil { log.Printf("Flush %s err:%s", f.Pos.Name, err) } return err }
//func CompressData(in []byte) (data *bytes.Buffer, err error) { func CompressData(in []byte) (out []byte, err error) { if len(in) == 0 { return nil, &AcError{Value: -1, Msg: "CompressData() invalid input: ", Err: err} } // first let's compress data := new(bytes.Buffer) zbuf, err := zlib.NewWriterLevel(data, zlib.BestCompression) if err != nil { return nil, &AcError{Value: -2, Msg: "CompressData().zlib.NewWriterLevel(): ", Err: err} } n, err := zbuf.Write(in) if err != nil || n != len(in) { return nil, &AcError{Value: -3, Msg: "CompressData().zlib.Write(): ", Err: err} } //XXX funny Flush don't actually flush stuff from zlib into the writer all the time..... //zbuf.Flush() // XXX let's try... zbuf.Close() //fmt.Fprintf(os.Stderr, "CompressData(%d B): %d B\n", len(in), data.Len()) out = data.Bytes() return out, nil }
func compress(s string) string { var b bytes.Buffer w, _ := zlib.NewWriterLevel(&b, zlib.BestSpeed) // flate.BestCompression w.Write([]byte(s)) w.Close() return b.String() }
func NewPacketCodecZlibLevel(threshold int, level int) (this *PacketCodecZlib) { this = new(PacketCodecZlib) this.threshold = threshold this.level = level this.zlibWriter, _ = zlib.NewWriterLevel(nil, level) return }
func (pp *pendingPayload) Generate(hostname string) (err error) { var buffer bytes.Buffer // Begin with the nonce if _, err = buffer.Write([]byte(pp.nonce)); err != nil { return } var compressor *zlib.Writer if compressor, err = zlib.NewWriterLevel(&buffer, 3); err != nil { return } // Append all the events for _, event := range pp.events[pp.ack_events:] { // Add host field event.Event["host"] = hostname if err = pp.bufferJdatDataEvent(compressor, event); err != nil { return } } compressor.Close() pp.payload = buffer.Bytes() pp.payload_start = pp.ack_events return }
func (o *PrtGenerator) Start(outFilename string, total int, maxProcs int, boundary *BoundaryLocator) error { o.enclosedsChan = make(chan *EnclosedChunkJob, maxProcs*2) o.completeChan = make(chan bool) o.total = total o.boundary = boundary maxProcs = 1 for i := 0; i < maxProcs; i++ { go o.chunkProcessor() } var openErr error o.outFile, openErr = os.Create(outFilename) if openErr != nil { return openErr } o.w = bufio.NewWriter(o.outFile) WriteHeader(o.w, -1, []ChannelDefinition{{"Position", 4, 3, 0}, {"BlockID", 1, 1, 12}}) var zErr error o.zw, zErr = zlib.NewWriterLevel(o.w, zlib.NoCompression) if zErr != nil { return zErr } return nil }
func (h Handler) printResponse(status int, header map[string]string, content []byte) { headerEncoded := encodeData(header) h.response.WriteHeader(200) h.response.Header().Set("Content-Type", "image/gif") compressed := false if contentType, ok := header["content-type"]; ok { if strings.HasPrefix(contentType, "text/") || strings.HasPrefix(contentType, "application/json") || strings.HasPrefix(contentType, "application/javascript") { compressed = true } } if compressed { h.response.Write([]byte("1")) w, err := zlib.NewWriterLevel(h.response, zlib.BestCompression) if err != nil { h.context.Criticalf("zlib.NewWriterDict(h.response, zlib.BestCompression, nil) Error: %v", err) return } defer w.Close() binary.Write(w, binary.BigEndian, uint32(status)) binary.Write(w, binary.BigEndian, uint32(len(headerEncoded))) binary.Write(w, binary.BigEndian, uint32(len(content))) w.Write(headerEncoded) w.Write(content) } else { h.response.Write([]byte("0")) binary.Write(h.response, binary.BigEndian, uint32(status)) binary.Write(h.response, binary.BigEndian, uint32(len(headerEncoded))) binary.Write(h.response, binary.BigEndian, uint32(len(content))) h.response.Write(headerEncoded) h.response.Write(content) } }
func newLumberjackClient( conn TransportClient, compressLevel int, maxWindowSize int, timeout time.Duration, beat string, ) (*lumberjackClient, error) { // validate by creating and discarding zlib writer with configured level if compressLevel > 0 { tmp := bytes.NewBuffer(nil) w, err := zlib.NewWriterLevel(tmp, compressLevel) if err != nil { return nil, err } w.Close() } encodedBeat, err := json.Marshal(beat) if err != nil { return nil, err } return &lumberjackClient{ TransportClient: conn, windowSize: defaultStartMaxWindowSize, timeout: timeout, maxWindowSize: maxWindowSize, compressLevel: compressLevel, beat: encodedBeat, }, nil }
func (this *PacketCodecZlib) Encode(writer io.Writer, util []byte, packet Packet) (err error) { buffer := new(bytes.Buffer) err = this.codec.Encode(buffer, util, packet) if err != nil { return } if raw, ok := packet.(PacketRaw); ok && raw.Raw() { _, err = buffer.WriteTo(writer) } else if buffer.Len() < this.threshold { err = WriteVarInt(writer, util, 0) if err != nil { return } _, err = buffer.WriteTo(writer) } else { err = WriteVarInt(writer, util, buffer.Len()) if err != nil { return } var zlibWriter io.WriteCloser zlibWriter, err = zlib.NewWriterLevel(writer, this.level) if err != nil { return } _, err = buffer.WriteTo(zlibWriter) if err != nil { return } err = zlibWriter.Close() } return }
func (c *conn) packRequest(r *http.Request) (*http.Request, error) { buf := &bytes.Buffer{} zbuf, err := zlib.NewWriterLevel(buf, zlib.BestCompression) if err != nil { return nil, fmt.Errorf("conn.packRequest(zlib.NewWriterLevel)>%s", err) } url := c.url + r.URL.String() urlhex := make([]byte, hex.EncodedLen(len(url))) hex.Encode(urlhex, []byte(url)) fmt.Fprintf(zbuf, "url=%s", urlhex) fmt.Fprintf(zbuf, "&method=%s", hex.EncodeToString([]byte(r.Method))) if c.ps.password != "" { fmt.Fprintf(zbuf, "&password=%s", c.ps.password) } fmt.Fprint(zbuf, "&headers=") for k, v := range r.Header { fmt.Fprint(zbuf, hex.EncodeToString([]byte(fmt.Sprintf("%s:%s\r\n", k, v[0])))) } body, err := ioutil.ReadAll(r.Body) if err != nil { return nil, fmt.Errorf("conn.packRequest(ioutil.ReadAll(r.Body))>%s", err) } payload := hex.EncodeToString(body) fmt.Fprintf(zbuf, "&payload=%s", payload) zbuf.Close() req, err := http.NewRequest("POST", c.ps.path, buf) if err != nil { return nil, fmt.Errorf("conn.packRequest(http.NewRequest)>%s", err) } req.Host = c.ps.appid[rand.Intn(len(c.ps.appid))] + ".appspot.com" req.URL.Scheme = "http" return req, nil }
func BenchmarkWriteWithBestZlibCompression(b *testing.B) { file := benchFile() w, _ := zlib.NewWriterLevel(file, zlib.BestCompression) benchmarkWrite(b, w) fi, _ := file.Stat() b.SetBytes(int64(int(fi.Size()) / 1024 / 1024)) }
func newZlibWriter() *zlib.Writer { writer, err := zlib.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed) if err != nil { panic(err.Error()) } return writer }
func newClientProcol( conn net.Conn, timeout time.Duration, compressLevel int, beat string, ) (*protocol, error) { // validate by creating and discarding zlib writer with configured level if compressLevel > 0 { tmp := bytes.NewBuffer(nil) w, err := zlib.NewWriterLevel(tmp, compressLevel) if err != nil { return nil, err } w.Close() } encodedBeat, err := json.Marshal(beat) if err != nil { return nil, err } return &protocol{ conn: conn, timeout: timeout, compressLevel: compressLevel, eventsBuffer: bytes.NewBuffer(nil), beat: encodedBeat, }, nil }
func loadFont(f *fontdata) (font *FontMetrics) { var err error if font, err = ParseFontMetricsFile(f.Metrics, f.Label); err != nil { log.Fatalf("loading font metrics: %v", err) } if f.StemV > 0 && font.StemV <= 0 { font.StemV = f.StemV } if len(f.FontFile) > 0 { font.File = []byte(f.FontFile) var buf bytes.Buffer var writer *zlib.Writer if writer, err = zlib.NewWriterLevel(&buf, zlib.BestCompression); err != nil { log.Fatal("Setting up zlib compressor: ", err) } if _, err = writer.Write(font.File); err != nil { log.Fatal("Writing to zlib compressor: ", err) } if err = writer.Close(); err != nil { log.Fatal("Closing zlib compressor: ", err) } font.CompressedFile = buf.Bytes() } return }
// compress uses zlib to compress stuff, for transferring big stuff like // stdout, stderr and environment variables over the network, and for storing // of same on disk func compress(data []byte) []byte { var compressed bytes.Buffer w, _ := zlib.NewWriterLevel(&compressed, zlib.BestCompression) w.Write(data) w.Close() return compressed.Bytes() }
func writeEntry(f *os.File, base, path string) { fp, err := os.Open(path) if err != nil { panic(err) } buffer := &BufferCloser{} var write io.WriteCloser = buffer if *compressFlag { write, err = zlib.NewWriterLevel(buffer, 9) if err != nil { panic(err) } } _, err = io.Copy(write, fp) if err != nil { panic(err) } fp.Close() write.Close() ctx := &FileContext{ Name: strings.TrimLeft(path, "/"), Content: formatContent(buffer.Bytes()), } file.Execute(f, ctx) }
// Send encodes (and compresses, if required) a packet, and sends it. func (p *Parser) Send(packet Packet) error { p.conn.SetWriteDeadline(time.Now().Add(p.timeout)) buf := &bytes.Buffer{} // Write the packet ID, then encode it into the buffer. if err := WriteVarint(buf, VarInt(packet.Id())); err != nil { return err } if err := packet.Encode(buf); err != nil { return err } uncompSize := 0 extra := 0 // Just in case we have to compress the packet. // We only need to compress the packet if it's above the threshold. if p.compressionThreshold >= 0 && buf.Len() > p.compressionThreshold { // We're compressing the packet into this buffer. cBuf := &bytes.Buffer{} if p.zlibWriter == nil { // If we don't already have a zlib writer, set one up. p.zlibWriter, _ = zlib.NewWriterLevel(cBuf, zlib.BestSpeed) } else { p.zlibWriter.Reset(cBuf) } // Store the size it should end up being on the receiving end. uncompSize = buf.Len() extra = binary.Size(uncompSize) if _, err := buf.WriteTo(p.zlibWriter); err != nil { return err } // We have to close the zlib writer, otherwise it won't flush and // actually compress the data. if err := p.zlibWriter.Close(); err != nil { return err } buf = cBuf } if err := WriteVarint(p.w, VarInt(buf.Len()+extra)); err != nil { return err } if p.compressionThreshold >= 0 { // If compression is on, tell them how big it should end up being. if err := WriteVarint(p.w, VarInt(uncompSize)); err != nil { return err } } // Finally, the rest of the packet buffer. _, err := buf.WriteTo(p.w) return err }
// WritePacket serializes the packet to the underlying // connection, optionally encrypting and/or compressing func (c *Conn) WritePacket(packet Packet) error { // 15 second timeout c.net.SetWriteDeadline(time.Now().Add(15 * time.Second)) buf := &bytes.Buffer{} // Contents of the packet (ID + Data) if err := WriteVarInt(buf, VarInt(packet.id())); err != nil { return err } if err := packet.write(buf); err != nil { return err } uncompessedSize := 0 extra := 0 // Only compress if compression is enabled and the packet is large enough if c.compressionThreshold >= 0 && buf.Len() > c.compressionThreshold { var err error nBuf := &bytes.Buffer{} if c.zlibWriter == nil { c.zlibWriter, _ = zlib.NewWriterLevel(nBuf, zlib.BestSpeed) } else { c.zlibWriter.Reset(nBuf) } uncompessedSize = buf.Len() if _, err = buf.WriteTo(c.zlibWriter); err != nil { return err } if err = c.zlibWriter.Close(); err != nil { return err } buf = nBuf } // Account for the compression header if enabled if c.compressionThreshold >= 0 { extra = varIntSize(VarInt(uncompessedSize)) } // Write the length prefix followed by the buffer if err := WriteVarInt(c.w, VarInt(buf.Len()+extra)); err != nil { return err } // Write the uncompressed packet size if c.compressionThreshold >= 0 { if err := WriteVarInt(c.w, VarInt(uncompessedSize)); err != nil { return err } } _, err := buf.WriteTo(c.w) if c.Logger != nil { c.Logger(false, packet) } return err }
// Never exits func worker(c *appContext) { for { data := <-c.ToWork reader, err := gzip.NewReader(bytes.NewReader(data)) if err != nil { continue } decoder := json.NewDecoder(reader) iL := itemLog{} if err := decoder.Decode(&iL); err != nil { continue } for _, item := range iL.ItemLog { var contentErr, serverErr error serverErr = checkServer(item.Contents) switch item.Type { case "NPCStoreItem": contentErr = checkNPCStoreItem(item.Contents) case "GuildStoreItem": contentErr = checkGuildStoreItem(item.Contents) case "GuildSaleHistory": contentErr = checkGuildSaleHistory(item.Contents) case "CharacterInfo": contentErr = checkCharacterInfo(item.Contents) case "ItemData": contentErr = checkItemData(item.Contents) default: // If it isn't a registered item, we want to ignore it. continue } if contentErr != nil { log.Printf("Received invalid data %v", item) } else if serverErr != nil { log.Printf("Server data invalid %v", item) } else { // json encode the data str, _ := json.Marshal(item) // compress var b bytes.Buffer w, _ := zlib.NewWriterLevel(&b, zlib.BestCompression) w.Write(str) w.Close() data, err := ioutil.ReadAll(bytes.NewReader(b.Bytes())) if err != nil { continue } // and broadcast c.ToPublish <- data } } } }
func Publish(input chan []*FileEvent, server_list []string, server_timeout time.Duration) { var buffer bytes.Buffer //key := "abcdefghijklmnop" //cipher, err := aes.NewCipher([]byte(key)) socket := FFS{ Endpoints: server_list, SocketType: zmq.REQ, RecvTimeout: server_timeout, SendTimeout: server_timeout, } //defer socket.Close() for events := range input { // got a bunch of events, ship them out. log.Printf("Spooler gave me %d events\n", len(events)) // Serialize with msgpack data, err := msgpack.Marshal(events) // TODO(sissel): chefk error _ = err //log.Printf("msgpack serialized %d bytes\n", len(data)) // Compress it // A new compressor is used for every payload of events so // that any individual payload can be decompressed alone. // TODO(sissel): Make compression level tunable compressor, _ := zlib.NewWriterLevel(&buffer, 3) buffer.Truncate(0) _, err = compressor.Write(data) err = compressor.Flush() compressor.Close() //log.Printf("compressed %d bytes\n", buffer.Len()) // TODO(sissel): check err // TODO(sissel): implement security/encryption/etc // Send full payload over zeromq REQ/REP // TODO(sissel): check error //buffer.Write(data) // Loop forever trying to send. // This will cause reconnects/etc on failures automatically for { err = socket.Send(buffer.Bytes(), 0) data, err = socket.Recv(0) if err == nil { // success! break } } // TODO(sissel): Check data value of reply? // TODO(sissel): retry on failure or timeout // TODO(sissel): notify registrar of success } /* for each event payload */ } // Publish
func main() { data, err := ioutil.ReadFile("table.txt") if err != nil { panic(err) } var buf bytes.Buffer for _, line := range strings.Split(string(data), "\n") { if strings.HasPrefix(line, "/*") || line == "" { continue } sep := strings.IndexByte(line, ':') if sep == -1 { panic(line) } val, err := strconv.ParseInt(line[:sep], 0, 32) if err != nil { panic(err) } s, err := strconv.Unquote(line[sep+2:]) if err != nil { panic(err) } if s == "" { continue } if err := binary.Write(&buf, binary.LittleEndian, uint16(val)); err != nil { panic(err) } if err := binary.Write(&buf, binary.LittleEndian, uint8(len(s))); err != nil { panic(err) } buf.WriteString(s) } var cbuf bytes.Buffer w, err := zlib.NewWriterLevel(&cbuf, zlib.BestCompression) if err != nil { panic(err) } if _, err := w.Write(buf.Bytes()); err != nil { panic(err) } if err := w.Close(); err != nil { panic(err) } buf.Reset() buf.WriteString("package unidecode\n") buf.WriteString("// AUTOGENERATED - DO NOT EDIT!\n\n") fmt.Fprintf(&buf, "var tableData = %q;\n", cbuf.String()) dst, err := format.Source(buf.Bytes()) if err != nil { panic(err) } if err := ioutil.WriteFile("table.go", dst, 0644); err != nil { panic(err) } }
func (c *Consumer) Consume() { c.InfLogger.Println("Registering consumer... ") msgs, err := c.Channel.Consume(c.Queue, "", false, false, false, false, nil) if err != nil { c.ErrLogger.Fatalf("Failed to register a consumer: %s", err) } c.InfLogger.Println("Succeeded registering consumer.") c.InfLogger.Println("Consumer concurrency is ", c.Concurrency) defer c.Connection.Close() defer c.Channel.Close() forever := make(chan bool) sem := make(chan bool, c.Concurrency) go func() { for d := range msgs { d.Ack(true) /* if true { d.Ack(true) c.InfLogger.Println("Ack (true)") } else { d.Nack(true, true) c.InfLogger.Println("Nack (true, true)") } */ sem <- true go func() { defer func() { <-sem }() input := d.Body c.InfLogger.Println("---------------------------------") c.InfLogger.Println("Receive message:") c.InfLogger.Println(string(input)) if c.Compression { var b bytes.Buffer w, err := zlib.NewWriterLevel(&b, zlib.BestCompression) if err != nil { c.ErrLogger.Println("Could not create zlib handler") d.Nack(true, true) } c.InfLogger.Println("Compressed message") w.Write(input) w.Close() input = b.Bytes() } c.InfLogger.Println("Process message start") cmd := c.Factory.Create(base64.StdEncoding.EncodeToString(input)) c.Executer.Execute(cmd) }() } }() c.InfLogger.Println("Waiting for messages...") <-forever }
func TestOpenStaticFileDeflate_1(t *testing.T) { file, _ := os.Open(licenseFile) var zipBuf bytes.Buffer fileWriter, _ := zlib.NewWriterLevel(&zipBuf, zlib.BestCompression) io.Copy(fileWriter, file) fileWriter.Close() content, _ := ioutil.ReadAll(&zipBuf) testOpenFile("deflate", content, t) }
// WriteMessage sends the specified message to the GELF server // specified in the call to New(). It assumes all the fields are // filled out appropriately. In general, clients will want to use // Write, rather than WriteMessage. func (w *Writer) WriteMessage(m *Message) (err error) { mBuf := newBuffer() defer bufPool.Put(mBuf) if err = m.MarshalJSONBuf(mBuf); err != nil { return err } mBytes := mBuf.Bytes() var ( zBuf *bytes.Buffer zBytes []byte ) var zw io.WriteCloser switch w.CompressionType { case CompressGzip: zBuf = newBuffer() defer bufPool.Put(zBuf) zw, err = gzip.NewWriterLevel(zBuf, w.CompressionLevel) case CompressZlib: zBuf = newBuffer() defer bufPool.Put(zBuf) zw, err = zlib.NewWriterLevel(zBuf, w.CompressionLevel) case CompressNone: zBytes = mBytes default: panic(fmt.Sprintf("unknown compression type %d", w.CompressionType)) } if zw != nil { if err != nil { return } if _, err = zw.Write(mBytes); err != nil { zw.Close() return } zw.Close() zBytes = zBuf.Bytes() } if numChunks(zBytes) > 1 { return w.writeChunked(zBytes) } n, err := w.conn.Write(zBytes) if err != nil { return } if n != len(zBytes) { return fmt.Errorf("bad write (%d/%d)", n, len(zBytes)) } return nil }
// concatenate & compress all strings passed in func Compress(stringArguments ...string) string { var b bytes.Buffer var r *strings.Reader w, _ := zlib.NewWriterLevel(&b, zlib.BestSpeed) for i := 0; i < len(stringArguments); i++ { r = strings.NewReader(stringArguments[i]) io.Copy(w, r) } w.Close() return b.String() }