func RunTestLZW(data []byte) { log.Printf("encoding/RunTestLZW: Testing comprssion LZW\n") var compressed bytes.Buffer w := lzw.NewWriter(&compressed, lzw.MSB, 8) defer w.Close() now := time.Now() w.Write(data) cl := compressed.Len() log.Printf("encoding/RunTestLZW: Compressed from %d bytes to %d bytes in %d ns\n", len(data), cl, time.Since(now).Nanoseconds()) recovered := make([]byte, len(data)) r := lzw.NewReader(&compressed, lzw.MSB, 8) defer r.Close() total := 0 n := 100 var err error = nil for err != io.EOF && n != 0 { n, err = r.Read(recovered[total:]) total += n } log.Printf("encoding/RunTestLZW: Uncompressed from %d bytes to %d bytes in %d ns\n", cl, len(recovered), time.Since(now).Nanoseconds()) }
func (b *BackupSet) Restore(backend Backend) error { for _, record := range b.records { secretsId := hex.EncodeToString(b.secrets.Id()) err := record.Restore(func(id string) (data []byte, err error) { chunk, err := backend.ReadChunk(secretsId, id) if err != nil { return nil, err } encReader, err := newEncReader(bytes.NewReader(chunk), b.secrets, len(chunk)) if err != nil { return nil, err } compressor := lzw.NewReader(encReader, lzw.LSB, 8) data, err = ioutil.ReadAll(compressor) if err != nil { return nil, err } //fmt.Printf("retrieved %d bytes", len(data)) err = compressor.Close() if err != nil { return nil, err } err = encReader.Close() if err != nil { return nil, err } return data, nil }) if err != nil { return err } } return nil }
// parseData is used to update brain from a KV data pair func (d *DedupManager) parseData(path string, raw []byte) { // Setup the decompression and decoders r := bytes.NewReader(raw) decompress := lzw.NewReader(r, lzw.LSB, 8) defer decompress.Close() dec := gob.NewDecoder(decompress) // Decode the data var td templateData if err := dec.Decode(&td); err != nil { log.Printf("[ERR] (dedup) failed to decode '%s': %v", path, err) return } log.Printf("[INFO] (dedup) loading %d dependencies from '%s'", len(td.Data), path) // Update the data in the brain for hashCode, value := range td.Data { d.brain.ForceSet(hashCode, value) } // Trigger the updateCh select { case d.updateCh <- struct{}{}: default: } }
func make_lzw(t *Transport, config map[string]interface{}) (uint64, tagfn, tagfn) { var wbuf bytes.Buffer enc := func(in, out []byte) int { if len(in) == 0 { return 0 } wbuf.Reset() writer := lzw.NewWriter(&wbuf, lzw.LSB, 8 /*litWidth*/) if _, err := writer.Write(in); err != nil { panic(err) } writer.Close() return copy(out, wbuf.Bytes()) } dec := func(in, out []byte) int { if len(in) == 0 { return 0 } reader := lzw.NewReader(bytes.NewReader(in), lzw.LSB, 8 /*litWidth*/) n, err := readAll(reader, out) if err != nil { panic(err) } reader.Close() return n } return tagLzw, enc, dec }
// Decode reads a TIFF image from r and returns it as an image.Image. // The type of Image returned depends on the contents of the TIFF. func Decode(r io.Reader) (img image.Image, err os.Error) { d, err := newDecoder(r) if err != nil { return } // Check if we have the right number of strips, offsets and counts. rps := int(d.firstVal(tRowsPerStrip)) numStrips := (d.config.Height + rps - 1) / rps if rps == 0 || len(d.features[tStripOffsets]) < numStrips || len(d.features[tStripByteCounts]) < numStrips { return nil, FormatError("inconsistent header") } switch d.mode { case mGray, mGrayInvert: img = image.NewGray(d.config.Width, d.config.Height) case mPaletted: img = image.NewPaletted(d.config.Width, d.config.Height, d.palette) case mNRGBA: img = image.NewNRGBA(d.config.Width, d.config.Height) case mRGB, mRGBA: img = image.NewRGBA(d.config.Width, d.config.Height) } var p []byte for i := 0; i < numStrips; i++ { ymin := i * rps // The last strip may be shorter. if i == numStrips-1 && d.config.Height%rps != 0 { rps = d.config.Height % rps } offset := int64(d.features[tStripOffsets][i]) n := int64(d.features[tStripByteCounts][i]) switch d.firstVal(tCompression) { case cNone: // TODO(bsiegert): Avoid copy if r is a tiff.buffer. p = make([]byte, 0, n) _, err = d.r.ReadAt(p, offset) case cLZW: r := lzw.NewReader(io.NewSectionReader(d.r, offset, n), lzw.MSB, 8) p, err = ioutil.ReadAll(r) r.Close() case cDeflate, cDeflateOld: r, err := zlib.NewReader(io.NewSectionReader(d.r, offset, n)) if err != nil { return nil, err } p, err = ioutil.ReadAll(r) r.Close() default: err = UnsupportedError("compression") } if err != nil { return } err = d.decode(img, p, ymin, ymin+rps) } return }
func Fuzz(data []byte) int { r := lzw.NewReader(bytes.NewReader(data), lzw.MSB, 8) uncomp := make([]byte, 64<<10) n, err := r.Read(uncomp) if err != nil && err != io.EOF { return 0 } if n == len(uncomp) { return 0 // too large } uncomp = uncomp[:n] for width := 2; width <= 8; width++ { uncomp0 := append([]byte{}, uncomp...) for i, v := range uncomp0 { uncomp0[i] = v & (1<<uint(width) - 1) } for _, order := range []lzw.Order{lzw.MSB, lzw.LSB} { buf := new(bytes.Buffer) w := lzw.NewWriter(buf, order, width) n, err := w.Write(uncomp0) if err != nil { fmt.Printf("order=%v width=%v\n", order, width) panic(err) } if n != len(uncomp0) { fmt.Printf("order=%v width=%v\n", order, width) panic("short write") } if err := w.Close(); err != nil { fmt.Printf("order=%v width=%v\n", order, width) panic(err) } r1 := lzw.NewReader(buf, order, width) uncomp1, err := ioutil.ReadAll(r1) if err != nil { fmt.Printf("order=%v width=%v\n", order, width) panic(err) } if !bytes.Equal(uncomp0, uncomp1) { fmt.Printf("order=%v width=%v\n", order, width) panic("data differs") } } } return 1 }
func (self *LzwCompressor) DecompressFromReader(src io.Reader) ([]byte, error) { ddest := bytes.NewBuffer(nil) decompressor := lzw.NewReader(src, self.order, self.litWidth) _, err := io.Copy(ddest, decompressor) if err != nil { fmt.Println("DecompressFromReader err:%s", err.Error()) } return ddest.Bytes(), err }
func TestLZWStream(t *testing.T) { st := newStream(streamLZWDecode) st.WriteString(streamTestString) st.Close() output, _ := ioutil.ReadAll(lzw.NewReader(st, lzw.MSB, 8)) if string(output) != streamTestString { t.Errorf("Stream is %q, wanted %q", output, streamTestString) } }
func LzwMustUnCompress(inb []byte) (outb []byte) { buf := bytes.NewBuffer(inb) reader := lzw.NewReader(buf, lzw.LSB, 8) outb, err := ioutil.ReadAll(reader) if err != nil { reader.Close() panic(err) } err = reader.Close() if err != nil { panic(err) } return outb }
// Decompress checks if the first byte in the input matches the canary byte. // If the first byte is a canary byte, then the input past the canary byte // will be decompressed using the method specified in the given configuration. // If the first byte isn't a canary byte, then the utility returns a boolean // value indicating that the input was not compressed. func Decompress(data []byte) ([]byte, bool, error) { var err error var reader io.ReadCloser if data == nil || len(data) == 0 { return nil, false, fmt.Errorf("'data' being decompressed is empty") } switch { case data[0] == CompressionCanaryGzip: // If the first byte matches the canary byte, remove the canary // byte and try to decompress the data that is after the canary. if len(data) < 2 { return nil, false, fmt.Errorf("invalid 'data' after the canary") } data = data[1:] reader, err = gzip.NewReader(bytes.NewReader(data)) case data[0] == CompressionCanaryLzw: // If the first byte matches the canary byte, remove the canary // byte and try to decompress the data that is after the canary. if len(data) < 2 { return nil, false, fmt.Errorf("invalid 'data' after the canary") } data = data[1:] reader = lzw.NewReader(bytes.NewReader(data), lzw.LSB, 8) default: // If the first byte doesn't match the canary byte, it means // that the content was not compressed at all. Indicate the // caller that the input was not compressed. return nil, true, nil } if err != nil { return nil, false, fmt.Errorf("failed to create a compression reader; err: %v", err) } if reader == nil { return nil, false, fmt.Errorf("failed to create a compression reader") } // Close the io.ReadCloser defer reader.Close() // Read all the compressed data into a buffer var buf bytes.Buffer if _, err = io.Copy(&buf, reader); err != nil { return nil, false, err } return buf.Bytes(), false, nil }
func (e *Engine) unlzw_lsb() error { var litWidth int var data []byte var err error litWidth, err = e.stack.PopInt() if err == nil { buf := bytes.NewBuffer(e.stack.Pop()) r := lzw.NewReader(buf, lzw.LSB, litWidth) data, err = ioutil.ReadAll(r) r.Close() } if err == nil { e.stack.Push(data) } return err }
func main() { ugo.MaxProcs() ufs.NewDirWalker(false, nil, func(fullPath string) bool { blobs = append(blobs, ufs.ReadBinaryFile(fullPath, true)) return true }).Walk(dirPath) testComp("flate1", func(w io.Writer) (wc io.WriteCloser) { var err error if wc, err = flate.NewWriter(w, 1); err != nil { panic(err) } return }, flate.NewReader) testComp("flate9", func(w io.Writer) (wc io.WriteCloser) { var err error if wc, err = flate.NewWriter(w, 9); err != nil { panic(err) } return }, flate.NewReader) testComp("lzw\t", func(w io.Writer) io.WriteCloser { return lzw.NewWriter(w, lzw.MSB, 8) }, func(r io.Reader) io.ReadCloser { return lzw.NewReader(r, lzw.MSB, 8) }) testComp("zlib", func(w io.Writer) io.WriteCloser { return zlib.NewWriter(w) }, func(r io.Reader) (rc io.ReadCloser) { var err error if rc, err = zlib.NewReader(r); err != nil { panic(err) } return }) testComp("gzip", func(w io.Writer) io.WriteCloser { return gzip.NewWriter(w) }, func(r io.Reader) (rc io.ReadCloser) { var err error if rc, err = gzip.NewReader(r); err != nil { panic(err) } return }) printStats("PACK:", packStats) printStats("UNPACK:", unpackStats) }
func main() { flag.Parse() fname := "event.gob" switch *compr { case "gzip": fname = fmt.Sprintf("%s.gz", fname) case "zlib": fname = fmt.Sprintf("%s.z", fname) case "lzw": fname = fmt.Sprintf("%s.lzw", fname) case "none", "": fname = fname default: fname = fname } f, err := os.Open(fname) if err != nil { panic(err) } var ff io.ReadCloser = nil switch *compr { case "gzip": ff, err = gzip.NewReader(f) case "zlib": ff, err = zlib.NewReader(f) case "lzw": ff = lzw.NewReader(f, lzw.MSB, 8) err = nil default: ff = f } if err != nil { panic(err) } tree0(ff) ff.Close() f.Close() }
// decompressBuffer is used to decompress the buffer of // a single compress message, handling multiple algorithms func decompressBuffer(c *compress) ([]byte, error) { // Verify the algorithm if c.Algo != lzwAlgo { return nil, fmt.Errorf("Cannot decompress unknown algorithm %d", c.Algo) } // Create a uncompressor uncomp := lzw.NewReader(bytes.NewReader(c.Buf), lzw.LSB, lzwLitWidth) defer uncomp.Close() // Read all the data var b bytes.Buffer _, err := io.Copy(&b, uncomp) if err != nil { return nil, err } // Return the uncompressed bytes return b.Bytes(), nil }
func getDecompressor(in io.Reader) io.Reader { switch *algorithm { case "bzip2": return bzip2.NewReader(in) case "flate": return flate.NewReader(in) case "gzip": decompressor, err := gzip.NewReader(in) if err != nil { log.Fatalf("failed making gzip decompressor") } return decompressor case "lzw": return lzw.NewReader(in, lzw.MSB, 8) case "zlib": decompressor, err := zlib.NewReader(in) if err != nil { log.Fatalf("failed making zlib decompressor") } return decompressor } panic("not reached") }
// Produit un réseau tesseract en chargeant un fichier conteneur // (produit de la sérialization) func NewRetoFromFile(filename string) *Reto { reto := &Reto{Icc: nil} file, err := os.Open(filename) if err != nil { log.Fatal(err) } defer file.Close() stream := lzw.NewReader(file, lzw.LSB, 8) defer stream.Close() decoder := json.NewDecoder(stream) err = decoder.Decode(&reto) if err != nil { log.Fatal(err) } for _, neuron := range reto.Neurons { tmp := neuron.OutSynapses neuron.OutSynapses = nil for _, syn := range tmp { nsyn := NewSynapseFromTo(neuron, reto.Neurons[syn.IdOut], syn.Weight) nsyn.Act = syn.Act } } return reto }
func (p *compressEncoding) Decoder(req Request, cxt Context, reader io.Reader) io.Reader { return lzw.NewReader(reader, lzw.LSB, 8) }
// decode reads a GIF image from r and stores the result in d. func (d *decoder) decode(r io.Reader, configOnly bool) error { // Add buffering if r does not provide ReadByte. if rr, ok := r.(reader); ok { d.r = rr } else { d.r = bufio.NewReader(r) } err := d.readHeaderAndScreenDescriptor() if err != nil { return err } if configOnly { return nil } if d.headerFields&fColorMapFollows != 0 { if d.globalColorMap, err = d.readColorMap(); err != nil { return err } } for { c, err := d.r.ReadByte() if err != nil { return err } switch c { case sExtension: if err = d.readExtension(); err != nil { return err } case sImageDescriptor: m, err := d.newImageFromDescriptor() if err != nil { return err } useLocalColorMap := d.imageFields&fColorMapFollows != 0 if useLocalColorMap { m.Palette, err = d.readColorMap() if err != nil { return err } } else { m.Palette = d.globalColorMap } if d.hasTransparentIndex && int(d.transparentIndex) < len(m.Palette) { if !useLocalColorMap { // Clone the global color map. m.Palette = append(color.Palette(nil), d.globalColorMap...) } m.Palette[d.transparentIndex] = color.RGBA{} } litWidth, err := d.r.ReadByte() if err != nil { return err } if litWidth < 2 || litWidth > 8 { return fmt.Errorf("gif: pixel size in decode out of range: %d", litWidth) } // A wonderfully Go-like piece of magic. br := &blockReader{r: d.r} lzwr := lzw.NewReader(br, lzw.LSB, int(litWidth)) defer lzwr.Close() if _, err = io.ReadFull(lzwr, m.Pix); err != nil { if err != io.ErrUnexpectedEOF { return err } return errNotEnough } // Both lzwr and br should be exhausted. Reading from them // should yield (0, io.EOF). if n, err := lzwr.Read(d.tmp[:1]); n != 0 || err != io.EOF { if err != nil { return err } return errTooMuch } if n, err := br.Read(d.tmp[:1]); n != 0 || err != io.EOF { if err != nil { return err } return errTooMuch } // Check that the color indexes are inside the palette. if len(m.Palette) < 256 { for _, pixel := range m.Pix { if int(pixel) >= len(m.Palette) { return errBadPixel } } } // Undo the interlacing if necessary. if d.imageFields&ifInterlace != 0 { uninterlace(m) } d.image = append(d.image, m) d.delay = append(d.delay, d.delayTime) d.disposal = append(d.disposal, d.disposalMethod) // The GIF89a spec, Section 23 (Graphic Control Extension) says: // "The scope of this extension is the first graphic rendering block // to follow." We therefore reset the GCE fields to zero. d.delayTime = 0 d.hasTransparentIndex = false case sTrailer: if len(d.image) == 0 { return io.ErrUnexpectedEOF } return nil default: return fmt.Errorf("gif: unknown block type: 0x%.2x", c) } } }
// decode reads a GIF image from r and stores the result in d. func (d *decoder) decode(r io.Reader, configOnly bool) error { // Add buffering if r does not provide ReadByte. if rr, ok := r.(reader); ok { d.r = rr } else { d.r = bufio.NewReader(r) } err := d.readHeaderAndScreenDescriptor() if err != nil { return err } if configOnly { return nil } if d.headerFields&fColorMapFollows != 0 { if d.globalColorMap, err = d.readColorMap(); err != nil { return err } } frame := 0 for { c, err := d.r.ReadByte() if err != nil { return err } switch c { case sExtension: if err = d.readExtension(); err != nil { return err } case sImageDescriptor: m, err := d.newImageFromDescriptor() if err != nil { return err } if d.imageFields&fColorMapFollows != 0 { m.Palette, err = d.readColorMap() if err != nil { return err } } else { m.Palette = d.globalColorMap } litWidth, err := d.r.ReadByte() if err != nil { return err } if litWidth < 2 || litWidth > 8 { return fmt.Errorf("gif: pixel size in decode out of range: %d", litWidth) } // A wonderfully Go-like piece of magic. br := &blockReader{r: d.r} lzwr := lzw.NewReader(br, lzw.LSB, int(litWidth)) defer lzwr.Close() if _, err = io.ReadFull(lzwr, m.Pix); err != nil { if err != io.ErrUnexpectedEOF { return err } return errNotEnough } // Both lzwr and br should be exhausted. Reading from them // should yield (0, io.EOF). if n, err := lzwr.Read(d.tmp[:1]); n != 0 || err != io.EOF { if err != nil { return err } return errTooMuch } if n, err := br.Read(d.tmp[:1]); n != 0 || err != io.EOF { if err != nil { return err } return errTooMuch } // Check that the color indexes are inside the palette. //if len(m.Palette) < 256 { // for _, pixel := range m.Pix { // if int(pixel) >= len(m.Palette) { // return errBadPixel // } // } //} // Undo the interlacing if necessary. if d.imageFields&ifInterlace != 0 { uninterlace(m) } // Create a new full-frame image: fr := image.NewPaletted(image.Rect(0, 0, d.width, d.height), m.Palette) isTransparent := d.flags&gcTransparentColorSet != 0 if frame == 0 || d.lastDisposalMethod == fdmBGClear { clearColor := d.backgroundIndex if d.lastFlags&gcTransparentColorSet != 0 { clearColor = d.transparentIndex } for y := d.lastFrameBounds.Min.Y; y < d.lastFrameBounds.Max.Y; y++ { for x := d.lastFrameBounds.Min.X; x < d.lastFrameBounds.Max.X; x++ { fr.SetColorIndex(x, y, clearColor) } } } else if frame > 0 && (d.lastDisposalMethod == fdmCombine || d.lastDisposalMethod == fdmNone) { // Copy in the previous frame: src := d.image[frame-1] for y := 0; y < d.height; y++ { for x := 0; x < d.width; x++ { c := src.ColorIndexAt(x, y) fr.SetColorIndex(x, y, c) } } } else { // TODO(jsd): Handle other clear methods. } // Copy the temporary frame onto the current frame, skipping transparent: if isTransparent { // Transparent overlay: for y := m.Rect.Min.Y; y < m.Rect.Max.Y; y++ { for x := m.Rect.Min.X; x < m.Rect.Max.X; x++ { c := m.ColorIndexAt(x, y) if c == d.transparentIndex { continue } fr.SetColorIndex(x, y, c) } } } else { // Opaque copy: for y := m.Rect.Min.Y; y < m.Rect.Max.Y; y++ { for x := m.Rect.Min.X; x < m.Rect.Max.X; x++ { c := m.ColorIndexAt(x, y) fr.SetColorIndex(x, y, c) } } } d.image = append(d.image, fr) d.delay = append(d.delay, d.delayTime) if isTransparent { d.transparentIndices = append(d.transparentIndices, int(d.transparentIndex)) } else { d.transparentIndices = append(d.transparentIndices, int(-2)) } frame++ d.delayTime = 0 // TODO: is this correct, or should we hold on to the value? case sTrailer: if len(d.image) == 0 { return io.ErrUnexpectedEOF } return nil default: return fmt.Errorf("gif: unknown block type: 0x%.2x", c) } } }
// decode reads a GIF image from r and stores the result in d. func (d *decoder) decode(r io.Reader, configOnly bool) error { // Add buffering if r does not provide ReadByte. if rr, ok := r.(reader); ok { d.r = rr } else { d.r = bufio.NewReader(r) } err := d.readHeaderAndScreenDescriptor() if err != nil { return err } if configOnly { return nil } if d.headerFields&fColorMapFollows != 0 { if d.globalColorMap, err = d.readColorMap(); err != nil { return err } } Loop: for err == nil { var c byte c, err = d.r.ReadByte() if err == io.EOF { break } switch c { case sExtension: err = d.readExtension() case sImageDescriptor: var m *image.Paletted m, err = d.newImageFromDescriptor() if err != nil { break } if d.imageFields&fColorMapFollows != 0 { m.Palette, err = d.readColorMap() if err != nil { break } // TODO: do we set transparency in this map too? That would be // d.setTransparency(m.Palette) } else { m.Palette = d.globalColorMap } var litWidth uint8 litWidth, err = d.r.ReadByte() if err != nil { return err } if litWidth < 2 || litWidth > 8 { return fmt.Errorf("gif: pixel size in decode out of range: %d", litWidth) } // A wonderfully Go-like piece of magic. lzwr := lzw.NewReader(&blockReader{r: d.r}, lzw.LSB, int(litWidth)) if _, err = io.ReadFull(lzwr, m.Pix); err != nil { break } // There should be a "0" block remaining; drain that. c, err = d.r.ReadByte() if err != nil { return err } if c != 0 { return errors.New("gif: extra data after image") } // Undo the interlacing if necessary. if d.imageFields&ifInterlace != 0 { uninterlace(m) } d.image = append(d.image, m) d.delay = append(d.delay, d.delayTime) d.delayTime = 0 // TODO: is this correct, or should we hold on to the value? case sTrailer: break Loop default: err = fmt.Errorf("gif: unknown block type: 0x%.2x", c) } } if err != nil { return err } if len(d.image) == 0 { return io.ErrUnexpectedEOF } return nil }
func decompressStream(file io.Reader) io.Reader { if file == nil { return nil } return lzw.NewReader(file, lzw.LSB, 8) }
func (c compressor) GetReader(r io.Reader) (io.ReadCloser, error) { return lzw.NewReader(r, lzw.LSB, 8), nil }
// decode reads a GIF image from r and stores the result in d. func (d *decoder) decode(r io.Reader, configOnly bool) error { // Add buffering if r does not provide ReadByte. if rr, ok := r.(reader); ok { d.r = rr } else { d.r = bufio.NewReader(r) } err := d.readHeaderAndScreenDescriptor() if err != nil { return err } if configOnly { return nil } for { c, err := d.r.ReadByte() if err != nil { return err } switch c { case sExtension: if err = d.readExtension(); err != nil { return err } case sImageDescriptor: m, err := d.newImageFromDescriptor() if err != nil { return err } useLocalColorTable := d.imageFields&fColorTable != 0 if useLocalColorTable { m.Palette, err = d.readColorTable(d.imageFields) if err != nil { return err } } else { if d.globalColorTable == nil { return errors.New("gif: no color table") } m.Palette = d.globalColorTable } if d.hasTransparentIndex { if !useLocalColorTable { // Clone the global color table. m.Palette = append(color.Palette(nil), d.globalColorTable...) } if ti := int(d.transparentIndex); ti < len(m.Palette) { m.Palette[ti] = color.RGBA{} } else { // The transparentIndex is out of range, which is an error // according to the spec, but Firefox and Google Chrome // seem OK with this, so we enlarge the palette with // transparent colors. See golang.org/issue/15059. p := make(color.Palette, ti+1) copy(p, m.Palette) for i := len(m.Palette); i < len(p); i++ { p[i] = color.RGBA{} } m.Palette = p } } litWidth, err := d.r.ReadByte() if err != nil { return err } if litWidth < 2 || litWidth > 8 { return fmt.Errorf("gif: pixel size in decode out of range: %d", litWidth) } // A wonderfully Go-like piece of magic. br := &blockReader{r: d.r} lzwr := lzw.NewReader(br, lzw.LSB, int(litWidth)) defer lzwr.Close() if _, err = io.ReadFull(lzwr, m.Pix); err != nil { if err != io.ErrUnexpectedEOF { return err } return errNotEnough } // Both lzwr and br should be exhausted. Reading from them should // yield (0, io.EOF). // // The spec (Appendix F - Compression), says that "An End of // Information code... must be the last code output by the encoder // for an image". In practice, though, giflib (a widely used C // library) does not enforce this, so we also accept lzwr returning // io.ErrUnexpectedEOF (meaning that the encoded stream hit io.EOF // before the LZW decoder saw an explicit end code), provided that // the io.ReadFull call above successfully read len(m.Pix) bytes. // See https://golang.org/issue/9856 for an example GIF. if n, err := lzwr.Read(d.tmp[:1]); n != 0 || (err != io.EOF && err != io.ErrUnexpectedEOF) { if err != nil { return err } return errTooMuch } if n, err := br.Read(d.tmp[:1]); n != 0 || err != io.EOF { if err != nil { return err } return errTooMuch } // Check that the color indexes are inside the palette. if len(m.Palette) < 256 { for _, pixel := range m.Pix { if int(pixel) >= len(m.Palette) { return errBadPixel } } } // Undo the interlacing if necessary. if d.imageFields&fInterlace != 0 { uninterlace(m) } d.image = append(d.image, m) d.delay = append(d.delay, d.delayTime) d.disposal = append(d.disposal, d.disposalMethod) // The GIF89a spec, Section 23 (Graphic Control Extension) says: // "The scope of this extension is the first graphic rendering block // to follow." We therefore reset the GCE fields to zero. d.delayTime = 0 d.hasTransparentIndex = false case sTrailer: if len(d.image) == 0 { return io.ErrUnexpectedEOF } return nil default: return fmt.Errorf("gif: unknown block type: 0x%.2x", c) } } }
func newGzipReader(r io.Reader) io.ReadCloser { gr, err := gzip.NewReader(r) if err != nil { panic(err) } return gr } var ( ErrUsed = errors.New("the slice has already been used") ErrUnaligned = errors.New("the size must be a multiple of the data size") readerGen = map[Compressor]ReaderGen{ Gzip: newGzipReader, Lzw: func(r io.Reader) io.ReadCloser { return lzw.NewReader(r, lzw.LSB, 8) }, } writerGen = map[Compressor]WriterGen{ Gzip: func(w io.Writer) io.WriteCloser { return gzip.NewWriter(w) }, Lzw: func(w io.Writer) io.WriteCloser { return lzw.NewWriter(w, lzw.LSB, 8) }, } ) type zblock []byte // A UintSlice stores large slices of uint64 that are compressed in large blocks. type UintSlice struct { blocks []zblock cur struct { slice []byte pos int
// Decode reads a TIFF image from r and returns it as an image.Image. // The type of Image returned depends on the contents of the TIFF. func Decode(r io.Reader) (img image.Image, err error) { d, err := newDecoder(r) if err != nil { return } // Check if we have the right number of strips, offsets and counts. rps := int(d.firstVal(tRowsPerStrip)) if rps == 0 { // Assume only one strip. rps = d.config.Height } numStrips := (d.config.Height + rps - 1) / rps if rps == 0 || len(d.features[tStripOffsets]) < numStrips || len(d.features[tStripByteCounts]) < numStrips { return nil, FormatError("inconsistent header") } switch d.mode { case mGray, mGrayInvert: img = image.NewGray(image.Rect(0, 0, d.config.Width, d.config.Height)) case mPaletted: img = image.NewPaletted(image.Rect(0, 0, d.config.Width, d.config.Height), d.palette) case mNRGBA: img = image.NewNRGBA(image.Rect(0, 0, d.config.Width, d.config.Height)) case mRGB, mRGBA: img = image.NewRGBA(image.Rect(0, 0, d.config.Width, d.config.Height)) } for i := 0; i < numStrips; i++ { ymin := i * rps // The last strip may be shorter. if i == numStrips-1 && d.config.Height%rps != 0 { rps = d.config.Height % rps } offset := int64(d.features[tStripOffsets][i]) n := int64(d.features[tStripByteCounts][i]) switch d.firstVal(tCompression) { case cNone: if b, ok := d.r.(*buffer); ok { d.buf, err = b.Slice(int(offset), int(n)) } else { d.buf = make([]byte, n) _, err = d.r.ReadAt(d.buf, offset) } case cLZW: r := lzw.NewReader(io.NewSectionReader(d.r, offset, n), lzw.MSB, 8) d.buf, err = ioutil.ReadAll(r) r.Close() case cDeflate, cDeflateOld: r, err := zlib.NewReader(io.NewSectionReader(d.r, offset, n)) if err != nil { return nil, err } d.buf, err = ioutil.ReadAll(r) r.Close() case cPackBits: d.buf, err = unpackBits(io.NewSectionReader(d.r, offset, n)) default: err = UnsupportedError("compression") } if err != nil { return } err = d.decode(img, ymin, ymin+rps) } return }
// decode reads a GIF image from r and stores the result in d. func (d *decoder) decode(r io.Reader, configOnly bool) error { // Add buffering if r does not provide ReadByte. if rr, ok := r.(reader); ok { d.r = rr } else { d.r = bufio.NewReader(r) } err := d.readHeaderAndScreenDescriptor() if err != nil { return err } if configOnly { return nil } if d.headerFields&fColorMapFollows != 0 { if d.globalColorMap, err = d.readColorMap(); err != nil { return err } } for { c, err := d.r.ReadByte() if err != nil { return err } switch c { case sExtension: if err = d.readExtension(); err != nil { return err } case sImageDescriptor: m, err := d.newImageFromDescriptor() if err != nil { return err } if d.imageFields&fColorMapFollows != 0 { m.Palette, err = d.readColorMap() if err != nil { return err } // TODO: do we set transparency in this map too? That would be // d.setTransparency(m.Palette) } else { m.Palette = d.globalColorMap } litWidth, err := d.r.ReadByte() if err != nil { return err } if litWidth < 2 || litWidth > 8 { return fmt.Errorf("gif: pixel size in decode out of range: %d", litWidth) } // A wonderfully Go-like piece of magic. br := &blockReader{r: d.r} lzwr := lzw.NewReader(br, lzw.LSB, int(litWidth)) if _, err = io.ReadFull(lzwr, m.Pix); err != nil { if err != io.ErrUnexpectedEOF { return err } return errNotEnough } // Both lzwr and br should be exhausted. Reading from them // should yield (0, io.EOF). if n, err := lzwr.Read(d.tmp[:1]); n != 0 || err != io.EOF { if err != nil { return err } return errTooMuch } if n, err := br.Read(d.tmp[:1]); n != 0 || err != io.EOF { if err != nil { return err } return errTooMuch } // Check that the color indexes are inside the palette. if len(m.Palette) < 256 { for _, pixel := range m.Pix { if int(pixel) >= len(m.Palette) { return errBadPixel } } } // Undo the interlacing if necessary. if d.imageFields&ifInterlace != 0 { uninterlace(m) } d.image = append(d.image, m) d.delay = append(d.delay, d.delayTime) d.delayTime = 0 // TODO: is this correct, or should we hold on to the value? case sTrailer: if len(d.image) == 0 { return io.ErrUnexpectedEOF } return nil default: return fmt.Errorf("gif: unknown block type: 0x%.2x", c) } } }