func (d *decoder) parsePLTE(r io.Reader, crc hash.Hash32, length uint32) os.Error { np := int(length / 3) // The number of palette entries. if length%3 != 0 || np <= 0 || np > 256 { return FormatError("bad PLTE length") } n, err := io.ReadFull(r, d.tmp[0:3*np]) if err != nil { return err } crc.Write(d.tmp[0:n]) switch d.colorType { case ctPaletted: palette := make([]image.Color, np) for i := 0; i < np; i++ { palette[i] = image.RGBAColor{d.tmp[3*i+0], d.tmp[3*i+1], d.tmp[3*i+2], 0xff} } d.image.(*image.Paletted).Palette = image.PalettedColorModel(palette) case ctTrueColor, ctTrueColorAlpha: // As per the PNG spec, a PLTE chunk is optional (and for practical purposes, // ignorable) for the ctTrueColor and ctTrueColorAlpha color types (section 4.1.2). return nil default: return FormatError("PLTE, color type mismatch") } return nil }
func render(x, y, z int) []byte { // tileX and tileY is the absolute position of this tile at the current zoom level. tileX, tileY := x*tileSize, y*tileSize scale := 1 / float64(int(1<<uint(z))*tileSize) img := image.NewPaletted(tileSize, tileSize, image.PalettedColorModel(color[:])) for i := 0; i < tileSize; i++ { for j := 0; j < tileSize; j++ { c := complex(float64(tileX+i)*scale, float64(tileY+j)*scale) img.SetColorIndex(i, j, mandelbrotValue(c)) } } buf := new(bytes.Buffer) png.Encode(buf, img) return buf.Bytes() }
func newDecoder(r io.Reader) (*decoder, os.Error) { d := &decoder{ r: newReaderAt(r), features: make(map[int][]uint), } p := make([]byte, 8) if _, err := d.r.ReadAt(p, 0); err != nil { return nil, err } switch string(p[0:4]) { case leHeader: d.byteOrder = binary.LittleEndian case beHeader: d.byteOrder = binary.BigEndian default: return nil, FormatError("malformed header") } ifdOffset := int64(d.byteOrder.Uint32(p[4:8])) // The first two bytes contain the number of entries (12 bytes each). if _, err := d.r.ReadAt(p[0:2], ifdOffset); err != nil { return nil, err } numItems := int(d.byteOrder.Uint16(p[0:2])) // All IFD entries are read in one chunk. p = make([]byte, ifdLen*numItems) if _, err := d.r.ReadAt(p, ifdOffset+2); err != nil { return nil, err } for i := 0; i < len(p); i += ifdLen { if err := d.parseIFD(p[i : i+ifdLen]); err != nil { return nil, err } } d.config.Width = int(d.firstVal(tImageWidth)) d.config.Height = int(d.firstVal(tImageLength)) // Determine the image mode. switch d.firstVal(tPhotometricInterpretation) { case pRGB: d.config.ColorModel = image.RGBAColorModel // RGB images normally have 3 samples per pixel. // If there are more, ExtraSamples (p. 31-32 of the spec) // gives their meaning (usually an alpha channel). switch len(d.features[tBitsPerSample]) { case 3: d.mode = mRGB case 4: switch d.firstVal(tExtraSamples) { case 1: d.mode = mRGBA case 2: d.mode = mNRGBA d.config.ColorModel = image.NRGBAColorModel default: // The extra sample is discarded. d.mode = mRGB } default: return nil, FormatError("wrong number of samples for RGB") } case pPaletted: d.mode = mPaletted d.config.ColorModel = image.PalettedColorModel(d.palette) case pWhiteIsZero: d.mode = mGrayInvert d.config.ColorModel = image.GrayColorModel case pBlackIsZero: d.mode = mGray d.config.ColorModel = image.GrayColorModel default: return nil, UnsupportedError("color model") } if _, ok := d.features[tBitsPerSample]; !ok { return nil, FormatError("BitsPerSample tag missing") } for _, b := range d.features[tBitsPerSample] { if b != 8 { return nil, UnsupportedError("not an 8-bit image") } } return d, nil }