func ti_read_string(rd *bytes.Reader, str_off, table int16) (string, error) { var off int16 _, err := rd.Seek(int64(str_off), 0) if err != nil { return "", err } err = binary.Read(rd, binary.LittleEndian, &off) if err != nil { return "", err } _, err = rd.Seek(int64(table+off), 0) if err != nil { return "", err } var bs []byte for { b, err := rd.ReadByte() if err != nil { return "", err } if b == byte(0x00) { break } bs = append(bs, b) } return string(bs), nil }
//Skip skip func (t *TTFParser) Skip(fd *bytes.Reader, length int) error { _, err := fd.Seek(int64(length), 1) if err != nil { return err } return nil }
// degob converts a gob into a new copy of a subtree. func degob(buf *bytes.Reader) Node { var tree Node buf.Seek(0, 0) dec := gob.NewDecoder(buf) CkErr(dec.Decode(&tree)) return tree }
// Removes the oldest entries to limit the log's length to `maxLength`. // This is the same as ChangeLog.Truncate except it works directly on the encoded form, which is // much faster than decoding+truncating+encoding. func TruncateEncodedChangeLog(r *bytes.Reader, maxLength, minLength int, w io.Writer) (removed int, newLength int) { since := readSequence(r) // Find the starting position and sequence of each entry: entryPos := make([]int64, 0, 1000) entrySeq := make([]uint64, 0, 1000) for { pos, err := r.Seek(0, 1) if err != nil { panic("Seek??") } flags, err := r.ReadByte() if err != nil { if err == io.EOF { break // eof } panic("ReadByte failed") } seq := readSequence(r) skipString(r) skipString(r) skipString(r) if flags > kMaxFlag { panic(fmt.Sprintf("TruncateEncodedChangeLog: bad flags 0x%x, entry %d, offset %d", flags, len(entryPos), pos)) } entryPos = append(entryPos, pos) entrySeq = append(entrySeq, seq) } // How many entries to remove? // * Leave no more than maxLength entries // * Every sequence value removed should be less than every sequence remaining. // * The new 'since' value should be the maximum sequence removed. oldLength := len(entryPos) removed = oldLength - maxLength if removed <= 0 { removed = 0 } else { pivot, newSince := findPivot(entrySeq, removed-1) removed = pivot + 1 if oldLength-removed >= minLength { since = newSince } else { removed = 0 base.Warn("TruncateEncodedChangeLog: Couldn't find a safe place to truncate") //TODO: Possibly find a pivot earlier than desired? } } // Write the updated Since and the remaining entries: writeSequence(since, w) if _, err := r.Seek(entryPos[removed], 0); err != nil { panic("Seek back???") } if _, err := io.Copy(w, r); err != nil { panic("Copy???") } return removed, oldLength - removed }
func UnmarshalStream(url string, reader io.Reader) (feed *Feed, err error) { // Read the stream into memory (we'll need to parse it twice) var contentReader *bytes.Reader var content []byte if content, err = ioutil.ReadAll(reader); err == nil { contentReader = bytes.NewReader(content) genericFeed := GenericFeed{} decoder := xml.NewDecoder(contentReader) decoder.CharsetReader = charset.NewReader // First pass - parse the feed as-is if err = decoder.Decode(&genericFeed); err != nil { // Error - check for invalid entities and correct as appropriate if fixed, fixedContent := fixEntities(content); fixed { // At least one replacement was made. Retry contentReader = bytes.NewReader(fixedContent) decoder = xml.NewDecoder(contentReader) decoder.CharsetReader = charset.NewReader // Try decoding again err = decoder.Decode(&genericFeed) } } if err != nil { return } var xmlFeed FeedMarshaler if genericFeed.XMLName.Space == "http://www.w3.org/1999/02/22-rdf-syntax-ns#" && genericFeed.XMLName.Local == "RDF" { xmlFeed = &rss1Feed{} } else if genericFeed.XMLName.Local == "rss" { xmlFeed = &rss2Feed{} } else if genericFeed.XMLName.Space == "http://www.w3.org/2005/Atom" && genericFeed.XMLName.Local == "feed" { xmlFeed = &atomFeed{} } else { err = errors.New("Unsupported type of feed (" + genericFeed.XMLName.Space + ":" + genericFeed.XMLName.Local + ")") return } contentReader.Seek(0, 0) decoder = xml.NewDecoder(contentReader) decoder.CharsetReader = charset.NewReader if err = decoder.Decode(xmlFeed); err == nil { if feed, err = xmlFeed.Marshal(); err == nil { feed.URL = url } } } return }
func copyResp(code int, headers http.Header, body *bytes.Reader, newRw http.ResponseWriter) { h := newRw.Header() for k, v := range headers { h[k] = append(h[k], v...) } newRw.WriteHeader(code) body.WriteTo(newRw) body.Seek(0, 0) }
//Seek seek by tag func (t *TTFParser) Seek(fd *bytes.Reader, tag string) error { table, ok := t.tables[tag] if !ok { return ErrTableNotFound } val := table.Offset _, err := fd.Seek(int64(val), 0) if err != nil { return err } return nil }
func UnmarshalStream(reader io.Reader) (*Feed, string, error) { format := "" // Read the stream into memory (we'll need to parse it twice) var contentReader *bytes.Reader if buffer, err := ioutil.ReadAll(reader); err == nil { contentReader = bytes.NewReader(buffer) } else { return nil, format, err } genericFeed := GenericFeed{} decoder := xml.NewDecoder(contentReader) decoder.CharsetReader = charsetReader if err := decoder.Decode(&genericFeed); err != nil { return nil, format, err } var xmlFeed FeedMarshaler if genericFeed.XMLName.Space == "http://www.w3.org/1999/02/22-rdf-syntax-ns#" && genericFeed.XMLName.Local == "RDF" { xmlFeed = &rss1Feed{} format = "RSS1" } else if genericFeed.XMLName.Local == "rss" { xmlFeed = &rss2Feed{} format = "RSS2" } else if genericFeed.XMLName.Space == "http://www.w3.org/2005/Atom" && genericFeed.XMLName.Local == "feed" { xmlFeed = &atomFeed{} format = "Atom" } else { return nil, format, errors.New("Unsupported type of feed (" + genericFeed.XMLName.Space + ":" + genericFeed.XMLName.Local + ")") } contentReader.Seek(0, 0) decoder = xml.NewDecoder(contentReader) decoder.CharsetReader = charsetReader if err := decoder.Decode(xmlFeed); err != nil { return nil, format, err } feed, err := xmlFeed.Marshal() if err != nil { return nil, format, err } return &feed, format, nil }
func processQueries( db *cablastp.DB, transQueries *bytes.Reader, searchBuf *bytes.Buffer) error { // now we will read from queryBuf! // I think we create a NewReader from queryBuf? // this now needs to become the replacement for inputFastaQuery // so must use a different buffer for that. // we need a buffer for the query trans/reduce // and a buffer for coarse blast results cablastp.Vprintln("\nBlasting query on coarse database...") err := blastCoarse(db, transQueries, searchBuf) handleFatalError("Error blasting coarse database", err) cablastp.Vprintln("Decompressing blast hits...") expandedSequences, err := expandBlastHits(db, searchBuf) handleFatalError("Error decompressing blast hits", err) if len(expandedSequences) == 0 { cablastp.Vprintln("No results from coarse search") } else { // Write the contents of the expanded sequences to a fasta file. // It is then indexed using makeblastdb. searchBuf.Reset() err = writeFasta(expandedSequences, searchBuf) handleFatalError("Could not create FASTA input from coarse hits", err) // Create the fine blast db in a temporary directory cablastp.Vprintln("Building fine BLAST database...") tmpDir, err := makeFineBlastDB(db, searchBuf) handleFatalError("Could not create fine database to search on", err) // retrieve the cluster members for the original representative query seq // pass them to blastx on the expanded (fine) db // Finally, run the query against the fine fasta database and pass on the // stdout and stderr... cablastp.Vprintln("Blasting query on fine database...") _, err = transQueries.Seek(0, 0) // First 0 is amount to offset, Second 0 // is code for absolute handleFatalError("Could not seek to start of query fasta input", err) err = blastFine(db, tmpDir, transQueries) handleFatalError("Error blasting fine database", err) // Delete the temporary fine database. if !flagNoCleanup { err := os.RemoveAll(tmpDir) handleFatalError("Could not delete fine BLAST database", err) } } return nil }
func ReadEntryValueAsLong(reader *bytes.Reader, StartOfs int64, prologue PerfDataPrologue, entry *PerfDataEntry) error { reader.Seek(StartOfs+int64(entry.DataOffset), os.SEEK_SET) var order binary.ByteOrder if prologue.ByteOrder == 0 { order = binary.BigEndian } else { order = binary.LittleEndian } binary.Read(reader, order, &entry.LongValue) return nil }
func (this *HSPerfData) readEntryValueAsString(reader *bytes.Reader, StartOfs int64, entry *PerfDataEntry) error { reader.Seek(StartOfs+int64(entry.DataOffset), os.SEEK_SET) DataLen := entry.EntryLength - entry.DataOffset n, err := reader.Read(this.globalbuf[:DataLen]) if err != nil { return err } else if n != int(DataLen) { return errors.New("Could not read entry value.") } n = bytes.Index(this.globalbuf[:DataLen], []byte{0}) entry.StringValue = string(this.globalbuf[:n]) return nil }
func ReadEntryName(reader *bytes.Reader, StartOfs int64, entry *PerfDataEntry) error { reader.Seek(StartOfs+int64(entry.NameOffset), os.SEEK_SET) NameLen := entry.DataOffset - entry.NameOffset var buf []byte = make([]byte, NameLen) n, err := reader.Read(buf) if err != nil { return err } else if n != int(NameLen) { return errors.New("Could not read entry name.") } n = bytes.Index(buf, []byte{0}) entry.EntryName = string(buf[:n]) return nil }
func ReadEntryValueAsString(reader *bytes.Reader, StartOfs int64, entry *PerfDataEntry) error { reader.Seek(StartOfs+int64(entry.DataOffset), os.SEEK_SET) DataLen := entry.EntryLength - entry.DataOffset var buf []byte = make([]byte, DataLen) n, err := reader.Read(buf) if err != nil { return err } else if n != int(DataLen) { return errors.New("Could not read entry value.") } n = bytes.Index(buf, []byte{0}) entry.StringValue = string(buf[:n]) return nil }
func (x *Exif) loadSubDir(r *bytes.Reader, tagId uint16) error { tag, ok := x.main[tagId] if !ok { return nil } offset := tag.Int(0) _, err := r.Seek(offset, 0) if err != nil { return errors.New("exif: seek to sub-IFD failed: " + err.Error()) } subDir, _, err := tiff.DecodeDir(r, x.tif.Order) if err != nil { return errors.New("exif: sub-IFD decode failed: " + err.Error()) } for _, tag := range subDir.Tags { x.main[tag.Id] = tag } return nil }
func (this *HSPerfData) readEntryName(reader *bytes.Reader, StartOfs int64, entry *PerfDataEntry) error { reader.Seek(StartOfs+int64(entry.NameOffset), os.SEEK_SET) NameLen := entry.DataOffset - entry.NameOffset n, err := reader.Read(this.globalbuf[:NameLen]) if err != nil { return err } else if n != int(NameLen) { return errors.New("Could not read entry name.") } n = bytes.Index(this.globalbuf[:NameLen], []byte{0}) for i := 0; i < n; i++ { // Convert '.' to '/' if this.globalbuf[i] == '.' { this.globalbuf[i] = '/' } } entry.EntryName = string(this.globalbuf[:n]) return nil }
func (x *Exif) loadSubDir(r *bytes.Reader, ptrName FieldName, fieldMap map[uint16]FieldName) error { tag, ok := x.main[ptrName] if !ok { return nil } offset := tag.Int(0) _, err := r.Seek(offset, 0) if err != nil { return errors.New("exif: seek to sub-IFD failed: " + err.Error()) } subDir, _, err := tiff.DecodeDir(r, x.tif.Order) if err != nil { return errors.New("exif: sub-IFD decode failed: " + err.Error()) } for _, tag := range subDir.Tags { name := fieldMap[tag.Id] x.main[name] = tag } return nil }
// Removes the oldest entries to limit the log's length to `maxLength`. // This is the same as ChangeLog.Truncate except it works directly on the encoded form, which is // much faster than decoding+truncating+encoding. func TruncateEncodedChangeLog(r *bytes.Reader, maxLength int, w io.Writer) int { since := readSequence(r) // Find the starting position of each entry: entryPos := make([]int64, 0, 1000) for { pos, _ := r.Seek(0, 1) flags, err := r.ReadByte() if err != nil { break // eof } entryPos = append(entryPos, pos) readSequence(r) skipString(r) skipString(r) skipString(r) if flags > 7 { panic(fmt.Sprintf("bad flags %x, entry %d, offset %d", flags, len(entryPos)-1, pos)) } } // How many entries to remove? remove := len(entryPos) - maxLength if remove <= 0 { return 0 } // Update the log's Since to the sequence of the last entry being removed: r.Seek(entryPos[remove-1]+1, 0) since = readSequence(r) // Write the updated Since and the remaining entries: writeSequence(since, w) r.Seek(entryPos[remove], 0) io.Copy(w, r) return remove }
// Reads a Pattern. func readPattern(rd *bytes.Reader) (*Pattern, error) { var err error var magic []byte = make([]byte, 6) var patLen byte var version []byte = make([]byte, 32) var versionString string var tempo float32 var tracks []*Track // Read and verify magic number. err = binary.Read(rd, binary.LittleEndian, &magic) if err != nil { return nil, err } if string(magic) != "SPLICE" { return nil, errors.New("Not a valid SPLICE file.") } // Skip 7 null bytes. rd.Seek(7, 1) // Read pattern length. Note that length is from current offset (14). err = binary.Read(rd, binary.LittleEndian, &patLen) if err != nil { return nil, err } // Read the version and convert to string. err = binary.Read(rd, binary.LittleEndian, &version) if err != nil { return nil, err } versionString = string(bytes.Trim(version, "\x00")) // Read the tempo. err = binary.Read(rd, binary.LittleEndian, &tempo) if err != nil { return nil, err } // Read the tracks of this pattern. for pos, _ := rd.Seek(0, 1); pos < int64(patLen)+14; pos, _ = rd.Seek(0, 1) { t, err := readTrack(rd) if err != nil { return nil, err } tracks = append(tracks, t) } p := &Pattern{ Version: versionString, Tempo: tempo, Tracks: tracks, } return p, nil }
func (c *Client) merchantUploadImageFromBytesReader(filename string, reader *bytes.Reader) (imageURL string, err error) { originalOffset, err := reader.Seek(0, 1) if err != nil { return } FormDataFileName := escapeQuotes(filename) ContentLength := int64(multipart_constPartLen + len(FormDataFileName) + reader.Len()) hasRetry := false RETRY: token, err := c.Token() if err != nil { return } url_ := merchantUploadImageURL(token, filename) if hasRetry { if _, err = reader.Seek(originalOffset, 0); err != nil { return } } mr := io.MultiReader( strings.NewReader(multipart_formDataFront), strings.NewReader(FormDataFileName), strings.NewReader(multipart_formDataMiddle), reader, strings.NewReader(multipart_formDataEnd), ) httpReq, err := http.NewRequest("POST", url_, mr) if err != nil { return } httpReq.Header.Set("Content-Type", multipart_ContentType) httpReq.ContentLength = ContentLength httpResp, err := c.httpClient.Do(httpReq) if err != nil { return } defer httpResp.Body.Close() if httpResp.StatusCode != http.StatusOK { err = fmt.Errorf("http.Status: %s", httpResp.Status) return } var result struct { Error ImageURL string `json:"image_url"` } if err = json.NewDecoder(httpResp.Body).Decode(&result); err != nil { return } switch result.ErrCode { case errCodeOK: imageURL = result.ImageURL return case errCodeTimeout: if !hasRetry { hasRetry = true timeoutRetryWait() goto RETRY } fallthrough default: err = &result.Error return } }
func main() { isUTF8 := flag.Bool("u", false, "Enable UTF8 output") width := flag.Uint("x", 0, `Scale to n*2 columns wide in ANSI mode, n columns wide in UTF8 mode. When -x=0 (the default), aspect ratio is maintained. For example if -y is provided without -x, width is scaled to maintain aspect ratio`) height := flag.Uint("y", 0, `Scale to n rows high in ANSI mode, n/2 rows high in UTF8 mode. When -y=0 (the default), aspect ratio is maintained. For example if -x is provided without -y, height is scaled to maintain aspect ratio`) loopTimes := flag.Uint("l", 0, `Loop animation n times When -l=0 (the default), animation is looped indefinitely. Supersedes -s Only applies to multi-frame gifs`) loopSeconds := flag.Uint("s", 0, `Loop animation n seconds When -s=0 (the default), this option is ignored. Only applies to multi-frame gifs`) flag.Usage = func() { fmt.Fprint(os.Stderr, `Usage: gotermimg [-u] [-x=n] [-y=n] [-l=n|-s=n] [IMAGEFILE] IMAGEFILE - png, gif or jpg. gif will auto-play. Image data can be piped to stdin instead of providing IMAGEFILE. If neither -x or -y are provided, and the image is larger than your current terminal, it will be automatically scaled to fit. `) flag.PrintDefaults() } flag.Parse() var buf *bytes.Reader switch { case !termutil.Isatty(os.Stdin.Fd()): bufData, err := ioutil.ReadAll(os.Stdin) if err != nil { log.Fatal(err) } buf = bytes.NewReader(bufData) case len(flag.Args()) < 1: flag.Usage() os.Exit(1) default: file, err := os.Open(flag.Arg(0)) if err != nil { log.Fatal(err) } bufData, err := ioutil.ReadAll(file) if err != nil { log.Fatal(err) } file.Close() buf = bytes.NewReader(bufData) } conf, imgformat, err := image.DecodeConfig(buf) if err != nil { log.Fatal(err) } buf.Seek(0, 0) var conv timg.Converter if *isUTF8 { conv = timg.UTF8 } else { conv = timg.ANSI } var trans timg.Transformer if *width != 0 || *height != 0 { trans = timg.Resize(*width, *height) } else if termutil.Isatty(os.Stdout.Fd()) { x, y, err := terminal.Size(os.Stdout.Fd()) if err != nil { log.Fatal(err) } y = y - 1 // Convert the actual terminal dimensions into effective dimensions switch { case *isUTF8: y = y * 2 case x%2 == 0: x = x / 2 default: x = (x - 1) / 2 } if uint(conf.Width) > x || uint(conf.Height) > y { aspectTerm := float32(x) / float32(y) aspectImg := float32(conf.Width) / float32(conf.Height) if aspectImg > aspectTerm { trans = timg.Resize(x, 0) } else { trans = timg.Resize(0, y) } } } if imgformat == "gif" { gifimg, err := gif.DecodeAll(buf) if err != nil { log.Fatal(err) } if len(gifimg.Image) > 1 { var loop timg.KeepLooping switch { // Don't bother looping if we're not outputting to a tty case !termutil.Isatty(os.Stdout.Fd()): loop = timg.LoopTimes(1) case *loopTimes > 0: loop = timg.LoopTimes(*loopTimes) case *loopSeconds > 0: loop = timg.LoopSeconds(*loopSeconds) } timg.PrintAnimation(timg.Gif(gifimg, conv, trans), loop) } else { timg.PrintImage(gifimg.Image[0], conv, trans) } } else { img, _, err := image.Decode(buf) if err != nil { log.Fatal(err) } timg.PrintImage(img, conv, trans) } }
func (this *HSPerfData) readEntryValueAsLong(reader *bytes.Reader, StartOfs int64, entry *PerfDataEntry) error { reader.Seek(StartOfs+int64(entry.DataOffset), os.SEEK_SET) reader.Read(this.globalbuf[:8]) entry.LongValue = int64(this.byteOrder.Uint64(this.globalbuf[:8])) return nil }
//ParseCmap parse cmap table format 4 https://www.microsoft.com/typography/otspec/cmap.htm func (t *TTFParser) ParseCmap(fd *bytes.Reader) error { t.Seek(fd, "cmap") t.Skip(fd, 2) // version numTables, err := t.ReadUShort(fd) if err != nil { return err } offset31 := uint(0) for i := 0; i < int(numTables); i++ { platformID, err := t.ReadUShort(fd) if err != nil { return err } encodingID, err := t.ReadUShort(fd) if err != nil { return err } offset, err := t.ReadULong(fd) if err != nil { return err } t.symbol = false //init if platformID == 3 && encodingID == 1 { if encodingID == 0 { t.symbol = true } offset31 = offset } //fmt.Printf("me.symbol=%d\n", me.symbol) } //end for if offset31 == 0 { //No Unicode encoding found return ERROR_NO_UNICODE_ENCODING_FOUND } var startCount, endCount, idDelta, idRangeOffset, glyphIDArray []uint _, err = fd.Seek(int64(t.tables["cmap"].Offset+offset31), 0) if err != nil { return err } format, err := t.ReadUShort(fd) if err != nil { return err } if format != 4 { //Unexpected subtable format return ERROR_UNEXPECTED_SUBTABLE_FORMAT } length, err := t.ReadUShort(fd) if err != nil { return err } //fmt.Printf("\nlength=%d\n", length) err = t.Skip(fd, 2) // language if err != nil { return err } segCount, err := t.ReadUShort(fd) if err != nil { return err } segCount = segCount / 2 t.SegCount = segCount err = t.Skip(fd, 3*2) // searchRange, entrySelector, rangeShift if err != nil { return err } glyphCount := (length - (16 + 8*segCount)) / 2 //fmt.Printf("\nglyphCount=%d\n", glyphCount) for i := 0; i < int(segCount); i++ { tmp, err := t.ReadUShort(fd) if err != nil { return err } endCount = append(endCount, tmp) } t.EndCount = endCount err = t.Skip(fd, 2) // reservedPad if err != nil { return err } for i := 0; i < int(segCount); i++ { tmp, err := t.ReadUShort(fd) if err != nil { return err } startCount = append(startCount, tmp) } t.StartCount = startCount for i := 0; i < int(segCount); i++ { tmp, err := t.ReadUShort(fd) if err != nil { return err } idDelta = append(idDelta, tmp) } t.IdDelta = idDelta offset, err := t.FTell(fd) if err != nil { return err } for i := 0; i < int(segCount); i++ { tmp, err := t.ReadUShort(fd) if err != nil { return err } idRangeOffset = append(idRangeOffset, tmp) } t.IdRangeOffset = idRangeOffset //_ = glyphIdArray for i := 0; i < int(glyphCount); i++ { tmp, err := t.ReadUShort(fd) if err != nil { return err } glyphIDArray = append(glyphIDArray, tmp) } t.GlyphIdArray = glyphIDArray t.chars = make(map[int]uint) for i := 0; i < int(segCount); i++ { c1 := startCount[i] c2 := endCount[i] d := idDelta[i] ro := idRangeOffset[i] if ro > 0 { _, err = fd.Seek(int64(offset+uint(2*i)+ro), 0) if err != nil { return err } } for c := c1; c <= c2; c++ { var gid uint if c == 0xFFFF { break } if ro > 0 { gid, err = t.ReadUShort(fd) if err != nil { return err } if gid > 0 { gid += d } } else { gid = c + d } if gid >= 65536 { gid -= 65536 } if gid > 0 { //fmt.Printf("%d gid = %d, ", int(c), gid) t.chars[int(c)] = gid } } } _, err = t.ParseCmapFormat12(fd) if err != nil { return err } return nil }
func (t *TTFParser) FTell(fd *bytes.Reader) (uint, error) { offset, err := fd.Seek(0, os.SEEK_CUR) return uint(offset), err }
func pad(data *bytes.Reader, align int64) { for pos, _ := data.Seek(0, 1); pos%align != 0; pos++ { data.ReadByte() } }
func decodeChangeLog(r *bytes.Reader, afterSeq uint64) *ChangeLog { type docAndRev struct { docID, revID string } ch := ChangeLog{ Since: readSequence(r), Entries: make([]*LogEntry, 0, 500), } parents := map[docAndRev]*LogEntry{} cleanup := false skipping := (afterSeq > 0) var flagBuf [1]byte for { n, err := r.Read(flagBuf[0:1]) if n == 0 { if err == io.EOF { break } panic("Error reading flags") } if flagBuf[0] > kMaxFlag { pos, _ := r.Seek(0, 1) base.Warn("DecodeChangeLog: bad flags 0x%x, entry %d, offset %d", flagBuf[0], len(ch.Entries), pos-1) return nil } seq := readSequence(r) if skipping { if seq >= afterSeq { skipping = false } if seq <= afterSeq { skipString(r) skipString(r) skipString(r) continue // ignore this sequence } } entry := &LogEntry{ Flags: flagBuf[0], Sequence: seq, DocID: readString(r), RevID: readString(r), } if !entry.checkValid() { return nil } if parentID := readString(r); parentID != "" { if parent := parents[docAndRev{entry.DocID, parentID}]; parent != nil { // Clear out the parent rev that was overwritten by this one parent.DocID = "" parent.RevID = "" cleanup = true } } parents[docAndRev{entry.DocID, entry.RevID}] = entry ch.Entries = append(ch.Entries, entry) } // Now remove any overwritten entries: if cleanup { iDst := 0 for iSrc, entry := range ch.Entries { if entry.DocID != "" { // only copy non-cleared entries if iDst < iSrc { ch.Entries[iDst] = entry } iDst++ } } ch.Entries = ch.Entries[0:iDst] } if afterSeq > ch.Since { ch.Since = afterSeq } return &ch }
// RoundTrip implements http.RoundTripper for the Transport type. // It calls its underlying http.RoundTripper to execute the request, and // adds retry logic as per its configuration. func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { var attempt int preventRetry := req.Body != nil && t.PreventRetryWithBody // get the done cancellation channel for the context, will be nil // for < go1.7. done := contextForRequest(req) // buffer the body if needed var br *bytes.Reader if req.Body != nil && !preventRetry { var buf bytes.Buffer if _, err := io.Copy(&buf, req.Body); err != nil { // cannot even try the first attempt, body has been consumed req.Body.Close() return nil, err } req.Body.Close() br = bytes.NewReader(buf.Bytes()) req.Body = ioutil.NopCloser(br) } for { res, err := t.RoundTripper.RoundTrip(req) if preventRetry { return res, err } retry, delay := t.retry(Attempt{ Request: req, Response: res, Index: attempt, Error: err, }) if !retry { return res, err } if br != nil { // Per Go's doc: "RoundTrip should not modify the request, // except for consuming and closing the Body", so the only thing // to reset on the request is the body, if any. if _, serr := br.Seek(0, 0); serr != nil { // failed to retry, return the results return res, err } req.Body = ioutil.NopCloser(br) } // close the disposed response's body, if any if res != nil { io.Copy(ioutil.Discard, res.Body) res.Body.Close() } select { case <-time.After(delay): attempt++ case <-done: // request canceled by caller (post-1.7), don't retry return nil, errors.New("net/http: request canceled") case <-req.Cancel: // request canceled by caller (pre-1.7), don't retry return nil, errors.New("net/http: request canceled") } } }
func storeImage(rw http.ResponseWriter, req *http.Request) { // Appengine var c appengine.Context // Google Cloud Storage authentication var cc gcscontext.Context // Google Cloud Storage bucket name var bucketName string = "" // Google Cloud Storage client var client *storage.Client // Google Cloud Storage bucket var bucketHandle *storage.BucketHandle // User uploaded image file name var fileName string = uuid.New() // Transform user uploaded image to a thumbnail file name var fileNameThumbnail string = uuid.New() // User uploaded image file type var contentType string = "" // User uploaded image file raw data var b []byte // Google Cloud Storage file writer var wc *storage.Writer = nil // Error var err error = nil // Result, 0: success, 1: failed var r int = http.StatusCreated // Set response in the end defer func() { // Return status. WriteHeader() must be called before call to Write if r == http.StatusCreated { // Changing the header after a call to WriteHeader (or Write) has no effect. // rw.Header().Set("Location", req.URL.String()+"/"+cKey.Encode()) rw.Header().Set("Location", "http://"+bucketName+".storage.googleapis.com/"+fileName) rw.Header().Set("X-Thumbnail", "http://"+bucketName+".storage.googleapis.com/"+fileNameThumbnail) rw.WriteHeader(r) } else { http.Error(rw, http.StatusText(r), r) } }() // To log information in Google APP Engine console c = appengine.NewContext(req) // Get data from body b, err = ioutil.ReadAll(req.Body) if err != nil { c.Errorf("%s in reading body", err) r = http.StatusInternalServerError return } c.Infof("Body length %d bytes, read %d bytes", req.ContentLength, len(b)) // Determine filename extension from content type contentType = req.Header["Content-Type"][0] switch contentType { case "image/jpeg": fileName += ".jpg" fileNameThumbnail += ".jpg" default: c.Errorf("Unknown or unsupported content type '%s'. Valid: image/jpeg", contentType) r = http.StatusBadRequest return } c.Infof("Content type %s is received, %s is detected.", contentType, http.DetectContentType(b)) // Prepare Google Cloud Storage authentication cc = gcsappengine.NewContext(req) if client, err = storage.NewClient(cc); err != nil { c.Errorf("%s in initializing a GCS client", err) r = http.StatusInternalServerError return } defer client.Close() // Get default bucket if bucketName, err = gcsfile.DefaultBucketName(cc); err != nil { c.Errorf("%s in getting default GCS bucket name", err) r = http.StatusInternalServerError return } bucketHandle = client.Bucket(bucketName) c.Infof("APP Engine Version: %s", gcsappengine.VersionID(cc)) c.Infof("Using bucket name: %s", bucketName) // Change default object ACLs if err = bucketHandle.DefaultObjectACL().Set(cc, storage.AllUsers, storage.RoleReader); err != nil { c.Errorf("%v in saving default object ACL rule for bucket %q", err, bucketName) r = http.StatusInternalServerError return } // Store rotated image in Google Cloud Storage var in *bytes.Reader = bytes.NewReader(b) var x *exif.Exif = nil var orientation *tiff.Tag = nil var beforeImage image.Image var afterImage *image.NRGBA = nil // Read EXIF if _, err = in.Seek(0, 0); err != nil { c.Errorf("%s in moving the reader offset to the beginning in order to read EXIF", err) return } if x, err = exif.Decode(in); err != nil { c.Errorf("%s in decoding JPEG image", err) return } // Get Orientation if orientation, err = x.Get(exif.Orientation); err != nil { c.Warningf("%s in getting orientation from EXIF", err) return } c.Debugf("Orientation %s", orientation.String()) // Open image if _, err = in.Seek(0, 0); err != nil { c.Errorf("%s in moving the reader offset to the beginning in order to read EXIF", err) return } if beforeImage, err = imaging.Decode(in); err != nil { c.Errorf("%s in opening image %s", err) return } switch orientation.String() { case "1": afterImage = beforeImage.(*image.NRGBA) case "2": afterImage = imaging.FlipH(beforeImage) case "3": afterImage = imaging.Rotate180(beforeImage) case "4": afterImage = imaging.FlipV(beforeImage) case "5": afterImage = imaging.Transverse(beforeImage) case "6": afterImage = imaging.Rotate270(beforeImage) case "7": afterImage = imaging.Transpose(beforeImage) case "8": afterImage = imaging.Rotate90(beforeImage) } // Save rotated image wc = bucketHandle.Object(fileName).NewWriter(cc) wc.ContentType = contentType if err = imaging.Encode(wc, afterImage, imaging.JPEG); err != nil { c.Errorf("%s in saving rotated image", err) return } if err = wc.Close(); err != nil { c.Errorf("CreateFile: unable to close bucket %q, file %q: %v", bucketName, fileName, err) r = 1 return } wc = nil // Make thumbnail if afterImage.Rect.Dx() > afterImage.Rect.Dy() { afterImage = imaging.Resize(afterImage, 1920, 0, imaging.Lanczos) } else { afterImage = imaging.Resize(afterImage, 0, 1920, imaging.Lanczos) } // Save thumbnail wc = bucketHandle.Object(fileNameThumbnail).NewWriter(cc) wc.ContentType = contentType if imaging.Encode(wc, afterImage, imaging.JPEG); err != nil { c.Errorf("%s in saving image thumbnail", err) return } if err = wc.Close(); err != nil { c.Errorf("CreateFileThumbnail: unable to close bucket %q, file %q: %v", bucketName, fileNameThumbnail, err) r = 1 return } c.Infof("/%v/%v, /%v/%v created", bucketName, fileName, bucketName, fileNameThumbnail) }
//ParseName parse name table https://www.microsoft.com/typography/otspec/name.htm func (t *TTFParser) ParseName(fd *bytes.Reader) error { //$this->Seek('name'); err := t.Seek(fd, "name") if err != nil { return err } tableOffset, err := t.FTell(fd) if err != nil { return err } t.postScriptName = "" err = t.Skip(fd, 2) // format if err != nil { return err } count, err := t.ReadUShort(fd) if err != nil { return err } stringOffset, err := t.ReadUShort(fd) if err != nil { return err } for i := 0; i < int(count); i++ { err = t.Skip(fd, 3*2) // platformID, encodingID, languageID if err != nil { return err } nameID, err := t.ReadUShort(fd) if err != nil { return err } length, err := t.ReadUShort(fd) if err != nil { return err } offset, err := t.ReadUShort(fd) if err != nil { return err } if nameID == 6 { // PostScript name _, err = fd.Seek(int64(tableOffset+stringOffset+offset), 0) if err != nil { return err } stmp, err := t.Read(fd, int(length)) if err != nil { return err } var tmpStmp []byte for _, v := range stmp { if v != 0 { tmpStmp = append(tmpStmp, v) } } s := fmt.Sprintf("%s", string(tmpStmp)) //strings(stmp) s = strings.Replace(s, strconv.Itoa(0), "", -1) s, err = t.PregReplace("|[ \\[\\](){}<>/%]|", "", s) if err != nil { return err } t.postScriptName = s break } } if t.postScriptName == "" { return ERROR_POSTSCRIPT_NAME_NOT_FOUND } //fmt.Printf("%s\n", me.postScriptName) return nil }
// Decode parses EXIF-encoded data from r and returns a queryable Exif // object. After the exif data section is called and the tiff structure // decoded, each registered parser is called (in order of registration). If // one parser returns an error, decoding terminates and the remaining // parsers are not called. func Decode(r io.Reader) (*Exif, error) { // EXIF data in JPEG is stored in the APP1 marker. EXIF data uses the TIFF // format to store data. // If we're parsing a TIFF image, we don't need to strip away any data. // If we're parsing a JPEG image, we need to strip away the JPEG APP1 // marker and also the EXIF header. header := make([]byte, 4) n, err := r.Read(header) if err != nil { return nil, err } if n < len(header) { return nil, errors.New("exif: short read on header") } var isTiff bool switch string(header) { case "II*\x00": // TIFF - Little endian (Intel) isTiff = true case "MM\x00*": // TIFF - Big endian (Motorola) isTiff = true default: // Not TIFF, assume JPEG } // Put the header bytes back into the reader. r = io.MultiReader(bytes.NewReader(header), r) var ( er *bytes.Reader tif *tiff.Tiff ) if isTiff { // Functions below need the IFDs from the TIFF data to be stored in a // *bytes.Reader. We use TeeReader to get a copy of the bytes as a // side-effect of tiff.Decode() doing its work. b := &bytes.Buffer{} tr := io.TeeReader(r, b) tif, err = tiff.Decode(tr) er = bytes.NewReader(b.Bytes()) } else { // Locate the JPEG APP1 header. var sec *appSec sec, err = newAppSec(jpeg_APP1, r) if err != nil { return nil, err } // Strip away EXIF header. er, err = sec.exifReader() if err != nil { return nil, err } tif, err = tiff.Decode(er) } if err != nil { return nil, fmt.Errorf("exif: decode failed (%v) ", err) } er.Seek(0, 0) raw, err := ioutil.ReadAll(er) if err != nil { return nil, fmt.Errorf("exif: decode failed (%v) ", err) } // build an exif structure from the tiff x := &Exif{ main: map[FieldName]*tiff.Tag{}, Tiff: tif, Raw: raw, } for i, p := range parsers { if err := p.Parse(x); err != nil { return x, fmt.Errorf("exif: parser %v failed (%v)", i, err) } } return x, nil }
func decodeChangeLog(r *bytes.Reader, afterSeq uint64, oldLog *ChangeLog) *ChangeLog { type docAndRev struct { docID, revID string } ch := ChangeLog{ Since: readSequence(r), Entries: make([]*LogEntry, 0, 500), } parents := map[docAndRev]int{} if oldLog != nil { // If a pre-existing log is given, copy its entries so we'll append to them: ch.Entries = append(ch.Entries, oldLog.Entries...) ch.Since = oldLog.Since for i, entry := range ch.Entries { parents[docAndRev{entry.DocID, entry.RevID}] = i } afterSeq = oldLog.LastSequence() } skipping := afterSeq > ch.Since var flagBuf [1]byte for { n, err := r.Read(flagBuf[0:1]) if n == 0 { if err == io.EOF { break } panic("Error reading flags") } if flagBuf[0] > kMaxFlag { pos, _ := r.Seek(0, 1) base.Warn("DecodeChangeLog: bad flags 0x%x, entry %d, offset %d", flagBuf[0], len(ch.Entries), pos-1) return nil } seq := readSequence(r) if skipping { if seq == afterSeq { skipping = false } skipString(r) skipString(r) skipString(r) continue // ignore this sequence } entry := &LogEntry{ Flags: flagBuf[0], Sequence: seq, DocID: readString(r), RevID: readString(r), } if !entry.checkValid() { return nil } if parentID := readString(r); parentID != "" { if parentIndex, found := parents[docAndRev{entry.DocID, parentID}]; found { // Clear out the parent rev that was overwritten by this one ch.Entries[parentIndex] = &LogEntry{Sequence: ch.Entries[parentIndex].Sequence} } } parents[docAndRev{entry.DocID, entry.RevID}] = len(ch.Entries) ch.Entries = append(ch.Entries, entry) } if oldLog == nil && afterSeq > ch.Since { ch.Since = afterSeq } return &ch }