func (c *Client) merchantUploadImageFromBytesReader(filename string, reader *bytes.Reader) (imageURL string, err error) { originalOffset, err := reader.Seek(0, 1) if err != nil { return } FormDataFileName := escapeQuotes(filename) ContentLength := int64(multipart_constPartLen + len(FormDataFileName) + reader.Len()) hasRetry := false RETRY: token, err := c.Token() if err != nil { return } url_ := merchantUploadImageURL(token, filename) if hasRetry { if _, err = reader.Seek(originalOffset, 0); err != nil { return } } mr := io.MultiReader( strings.NewReader(multipart_formDataFront), strings.NewReader(FormDataFileName), strings.NewReader(multipart_formDataMiddle), reader, strings.NewReader(multipart_formDataEnd), ) httpReq, err := http.NewRequest("POST", url_, mr) if err != nil { return } httpReq.Header.Set("Content-Type", multipart_ContentType) httpReq.ContentLength = ContentLength httpResp, err := c.httpClient.Do(httpReq) if err != nil { return } defer httpResp.Body.Close() if httpResp.StatusCode != http.StatusOK { err = fmt.Errorf("http.Status: %s", httpResp.Status) return } var result struct { Error ImageURL string `json:"image_url"` } if err = json.NewDecoder(httpResp.Body).Decode(&result); err != nil { return } switch result.ErrCode { case errCodeOK: imageURL = result.ImageURL return case errCodeTimeout: if !hasRetry { hasRetry = true timeoutRetryWait() goto RETRY } fallthrough default: err = &result.Error return } }
// HandleLog is the default http log handler func HandleLog(h HTTP, entries <-chan *log.Entry) { var e *log.Entry var b []byte var reader *bytes.Reader formatter := h.FormatFunc()(h) remoteHost := h.RemoteHost() httpClient := stdhttp.Client{} req, _ := stdhttp.NewRequest(h.Method(), remoteHost, nil) req.Header = h.Headers() for e = range entries { b = formatter(e) reader = bytes.NewReader(b) req.Body = ioutil.NopCloser(reader) req.ContentLength = int64(reader.Len()) resp, err := httpClient.Do(req) if err != nil { log.Error("Could not post data to %s: %v\n", remoteHost, err) goto END } if resp.StatusCode < 200 || resp.StatusCode >= 299 { bt, _ := ioutil.ReadAll(resp.Body) log.Error("Received HTTP %d during POST request to %s body: %s\n", resp.StatusCode, remoteHost, string(bt)) } END: e.Consumed() } }
func installZip(source *bytes.Reader, dest string) error { zr, err := zip.NewReader(source, int64(source.Len())) if err != nil { return err } for _, f := range zr.File { fileCopy, err := os.OpenFile(dest, installFlag, f.Mode()) if err != nil { return err } defer fileCopy.Close() rc, err := f.Open() if err != nil { return err } defer rc.Close() _, err = io.Copy(fileCopy, rc) if err != nil { return err } } return nil }
func readTracks(reader *bytes.Reader, encodedDataSize int) ([]Track, error) { var tracks []Track position := encodedDataSize - reader.Len() for position < encodedDataSize { var id int32 binary.Read(reader, binary.LittleEndian, &id) channelNameSize, _ := reader.ReadByte() channelBytes := make([]byte, channelNameSize) _, err := reader.Read(channelBytes) if err != nil { return []Track{}, errors.New("Could not read Track name with id " + string(id)) } pattern := make([]uint32, 4) patternReadErr := binary.Read(reader, binary.LittleEndian, &pattern) if patternReadErr != nil { return []Track{}, errors.New("Could not read Track step with id " + string(id)) } tracks = append(tracks, Track{ id, string(channelBytes), pattern}) position += int(21) + int(channelNameSize) } return tracks, nil }
func (k *KeenIoMetrics) AuthedRequest(method, path string, body *bytes.Reader) (resp *http.Response, err error) { path = fmt.Sprintf("https://api.keen.io/3.0/projects/%s%s", k.ProjectToken, path) req, err := http.NewRequest(method, path, body) if err != nil { return } req.Header.Add("Authorization", k.ApiKey) if body != nil { req.Header.Add("Content-Type", "application/json") req.ContentLength = int64(body.Len()) } resp, err = k.HttpClient.Do(req) if err != nil { k.Error("Failed to send metric event to keen.io %v", err) } else { defer resp.Body.Close() if resp.StatusCode != 201 { bytes, _ := ioutil.ReadAll(resp.Body) k.Error("Got %v response from keen.io: %s", resp.StatusCode, bytes) } } return }
func (sitemap *SitemapPage) parseSitemapPageNoGzip(mem_seek *bytes.Reader) (byte_reader *bytes.Reader, err error) { // validate uncompressed size if mem_seek.Len() > max_sitemap_page_size { return mem_seek, max_sitemap_page_size_error } err = sitemap.determineIteratorFormat(mem_seek) return mem_seek, err }
func (self *Entity) Store( storageObj *data.Storage, objectObj *data.Object, input *bytes.Reader, ) error { ctx := self.Ctx() closer := ctx.LogMark("[Entity.Store]") defer closer() uri := strings.Join([]string{storageObj.Uri, objectObj.InternalName}, "/") cl := input.Len() ctx.Debugf("Going to store %d bytes in %s", cl, uri) req, err := http.NewRequest("PUT", uri, input) if err != nil { ctx.Debugf("Failed to create request: %s", err) return err } // XXX Need to check if this vanilla http client is ok client := &http.Client{} resp, err := client.Do(req) if err != nil { ctx.Debugf("Failed to send PUT request to %s (storage = %d): %s", uri, storageObj.Id, err) return err } if resp.StatusCode != 201 { err = errors.New( fmt.Sprintf( "Expected response 201 for PUT request, but did not get it: %s", resp.Status, ), ) ctx.Debugf("Failed to store PUT request to %s (storage = %d): %s", uri, storageObj.Id, err) return err } ctx.Debugf("Successfully stored object in %s", uri) err = self.Create( objectObj.Id, storageObj.Id, ) if err != nil { return err } return nil }
func (p *Pattern) parseTracks(r *bytes.Reader) error { for r.Len() > 0 { t := &Track{} err := t.parse(r) if err != nil { fmt.Println("parse Track failed:", err) return err } p.Tracks = append(p.Tracks, *t) } return nil }
func (h *HTTPSender) buildBaseRequest(contextPath string, method string, headers map[string][]string, bodyReader *bytes.Reader) *http.Request { var req http.Request req.Method = h.method req.ProtoMajor = 1 req.ProtoMinor = 1 req.Close = false req.Header = h.headers req.URL = h.parsedContextPath req.URL.Host = h.hosts[h.currentHost] req.Body = ioutil.NopCloser(bodyReader) req.ContentLength = int64(bodyReader.Len()) return &req }
// putBucketRequest wrapper creates a new putBucket request func (a apiCore) putBucketRequest(bucket, acl, location string) (*request, error) { var r *request var err error op := &operation{ HTTPServer: a.config.Endpoint, HTTPMethod: "PUT", HTTPPath: separator + bucket, } var createBucketConfigBuffer *bytes.Reader // If location is set use it and create proper bucket configuration switch { case location != "": createBucketConfig := new(createBucketConfiguration) createBucketConfig.Location = location var createBucketConfigBytes []byte switch { case a.config.AcceptType == "application/xml": createBucketConfigBytes, err = xml.Marshal(createBucketConfig) case a.config.AcceptType == "application/json": createBucketConfigBytes, err = json.Marshal(createBucketConfig) default: createBucketConfigBytes, err = xml.Marshal(createBucketConfig) } if err != nil { return nil, err } createBucketConfigBuffer = bytes.NewReader(createBucketConfigBytes) } switch { case createBucketConfigBuffer == nil: r, err = newRequest(op, a.config, nil) if err != nil { return nil, err } default: r, err = newRequest(op, a.config, createBucketConfigBuffer) if err != nil { return nil, err } r.req.ContentLength = int64(createBucketConfigBuffer.Len()) } // by default bucket is private switch { case acl != "": r.Set("x-amz-acl", acl) default: r.Set("x-amz-acl", "private") } return r, nil }
func (table_entry *EWF_Table_Section_Entry) Parse(buf *bytes.Reader) { var b *bytes.Reader val := make([]byte, int64(buf.Len())) buf.Read(val) //parse struct attributes table_entry.IsCompressed = val[3] << 1 & 1 val[3] &= 0x7F //exlude MSB b = bytes.NewReader(val) parseutil.Parse(b, &table_entry.ChunkDataOffset) }
func readUnsubscribePayload(r *bytes.Reader) (s UnsubscribePacket, err error) { if r.Len() == 0 { err = errors.New("cannot unsubscribe from zero topics") return } var unsub Unsubscription for r.Len() > 0 { unsub, err = readUnsubscription(r) if err != nil { return } s.Unsubscriptions = append(s.Unsubscriptions, unsub) } return }
func unpackLabels(in *bytes.Reader, n uint16, p []byte) ([]string, error) { if n == 0 { return nil, errors.New("zero labels len") } was := in.Len() d, e := domain.UnpackLabels(in, p) now := in.Len() if was-now != int(n) { return nil, fmt.Errorf("domain length expect %d, actual %d", n, was-now) } return d, e }
func readSubscribePayload(r *bytes.Reader) (s SubscribePacket, err error) { s.MessageID, err = readMessageID(r) if err != nil { return } if r.Len() == 0 { err = errors.New("zero subscriptions") return } var sub Subscription for r.Len() > 0 { sub, err = readSubscription(r) if err != nil { return } s.Subscriptions = append(s.Subscriptions, sub) } return }
func readSubAckPayload(r *bytes.Reader) (p SubAckPacket, err error) { p.MessageID, err = readMessageID(r) if err != nil { return } qos_size := r.Len() if qos_size == 0 { err = errors.New("Missing grantedqos") return } p.GrantedQoS = make([]QoSLevel, qos_size) for i := range p.GrantedQoS { p.GrantedQoS[i], err = readQoS(r) if err != nil { return } } return }
func unpack(t, c uint16, in *bytes.Reader, p []byte) (Rdata, error) { n := uint16(in.Len()) if c == IN { switch t { case A: return UnpackIPv4(in, n) case NS, CNAME: return UnpackDomain(in, n, p) case AAAA: return UnpackIPv6(in, n) case TXT: return UnpackString(in, n) case MX: return UnpackMailEx(in, n, p) case SOA: return UnpackSrcOfAuth(in, n, p) } } return UnpackBytes(in, n) }
func (ewf_table_section *EWF_Table_Section) Parse(buf *bytes.Reader) { defer parseutil.TimeTrack(time.Now(), "Parsing") val := make([]byte, int64(buf.Len())) buf.Read(val) ewf_table_section.table_header.Parse(bytes.NewReader(val[0:24])) ewf_table_section.table_footer.Parse(bytes.NewReader(val[len(val)-4 : len(val)])) val = val[24 : len(val)-4] k := 0 ewf_table_section.Table_entries = make([]EWF_Table_Section_Entry, ewf_table_section.table_header.nofEntries) for i := uint32(0); i < ewf_table_section.table_header.nofEntries; i += 1 { ewf_table_section.Table_entries[i].Parse(bytes.NewReader(val[0+k : 4+k])) // fmt.Println("EFW in by",i, // ewf_table_section.table_entries[i].IsCompressed,ewf_table_section.table_entries[i].ChunkDataOffset) k += 4 } }
func UnpackSrcOfAuth(in *bytes.Reader, n uint16, p []byte) (*SrcOfAuth, error) { if n <= 22 { return nil, fmt.Errorf("soa with %d bytes", n) } ret := new(SrcOfAuth) was := in.Len() labels, e := domain.UnpackLabels(in, p) if e != nil { return nil, e } ret.Mname = labels labels, e = domain.UnpackLabels(in, p) if e != nil { return nil, e } ret.Rname = labels now := in.Len() if was-now+20 != int(n) { return nil, errors.New("invalid soa field length") } buf := make([]byte, 20) _, e = in.Read(buf) if e != nil { return nil, e } ret.Serial = enc.Uint32(buf[0:4]) ret.Refresh = enc.Uint32(buf[4:8]) ret.Retry = enc.Uint32(buf[8:12]) ret.Expire = enc.Uint32(buf[12:16]) ret.Minimum = enc.Uint32(buf[16:20]) return ret, nil }
// Read variable integer in big endian func ReadVarint(reader *bytes.Reader) (ret uint64) { if reader.Len() == 8 { var num uint64 binary.Read(reader, binary.BigEndian, &num) ret = uint64(num) } else if reader.Len() == 4 { var num uint32 binary.Read(reader, binary.BigEndian, &num) ret = uint64(num) } else if reader.Len() == 2 { var num uint16 binary.Read(reader, binary.BigEndian, &num) ret = uint64(num) } else { var num uint8 binary.Read(reader, binary.BigEndian, &num) ret = uint64(num) } return ret }
func (wav *Wave) parse(r *bytes.Reader, formatWasRead bool) error { if r.Len() == 0 { return nil } var header chunkHeader if err := binary.Read(r, endiannes, &header); err != nil { return loadErr("unable to read chunk header", err) } if header.ChunkID == formatChunkID { if formatWasRead { return errors.New("load WAV: two format chunks detected") } var chunk formatChunkExtended if header.ChunkSize == 16 { if err := binary.Read(r, endiannes, &(chunk.formatChunkBase)); err != nil { return loadErr("reading format chunk", err) } } else if header.ChunkSize == 18 { err := binary.Read(r, endiannes, &(chunk.formatChunkWithExtension)) if err != nil { return loadErr("reading format chunk", err) } } else if header.ChunkSize == 40 { if err := binary.Read(r, endiannes, &chunk); err != nil { return loadErr("reading format chunk", err) } } else { return fmt.Errorf("load WAV: illegal format chunk header size: %v", header.ChunkSize) } if chunk.FormatTag != pcmFormat { return fmt.Errorf( "load WAV: unsupported format: %v (only PCM is supported)", chunk.FormatTag) } wav.ChannelCount = int(chunk.Channels) wav.SamplesPerSecond = int(chunk.SamplesPerSec) wav.BitsPerSample = int(chunk.BitsPerSample) formatWasRead = true } else if header.ChunkID == dataChunkID { data := make([]byte, header.ChunkSize) if _, err := io.ReadFull(r, data); err != nil { return err } if len(wav.Data) > 0 { return errors.New("load WAV: multiple data chunks found") } if !formatWasRead { return errors.New("load WAV: found data chunk before format chunk") } wav.Data = data if header.ChunkSize%2 == 1 { // there is one byte padding if the chunk size is odd if _, err := r.ReadByte(); err != nil { return loadErr("reading data chunk padding", err) } } } else { // skip unknown chunks io.CopyN(ioutil.Discard, r, int64(header.ChunkSize)) } if r.Len() == 0 { if !formatWasRead { return errors.New("load WAV: file does not contain format information") } return nil } return wav.parse(r, formatWasRead) }
func (c *Conn) readPacket() (Packet, error) { // Length prefix size, err := ReadVarInt(c.r) if err != nil { return nil, err } if size < 0 { return nil, errNegativeLength } buf := make([]byte, size) if _, err := io.ReadFull(c.r, buf); err != nil { return nil, err } var r *bytes.Reader r = bytes.NewReader(buf) // If compression is enabled then we may need to decompress the packet if c.compressionThreshold >= 0 { // With compression enabled an extra length prefix is added // which is the length of the packet when uncompressed. uncompSize, err := ReadVarInt(r) if err != nil { return nil, err } // A uncompressed size of 0 means the packet wasn't compressed // and when can continue normally. if uncompSize != 0 { // Reuse the old reader to save on allocations if c.zlibReader == nil { c.zlibReader, err = zlib.NewReader(r) if err != nil { return nil, err } } else { err = c.zlibReader.(zlib.Resetter).Reset(r, nil) if err != nil { return nil, err } } // Read the whole packet at once instead of in tiny steps data := make([]byte, uncompSize) _, err := io.ReadFull(c.zlibReader, data) if err != nil { return nil, err } r = bytes.NewReader(data) } } // Packet ID id, err := ReadVarInt(r) if err != nil { return nil, err } // Direction is swapped as this is coming from the other way packets := packetCreator[c.State][(c.direction+1)&1] if id < 0 || int(id) >= len(packets) || packets[id] == nil { return nil, fmt.Errorf("Unknown packet %s:%02X", c.State, id) } packet := packets[id]() if err := packet.read(r); err != nil { return packet, fmt.Errorf("packet(%s:%02X): %s", c.State, id, err) } // If we haven't fully read the whole buffer then something went wrong. // Mostly likely our packet definitions are out of date or incorrect if r.Len() > 0 { return packet, fmt.Errorf("Didn't finish reading packet %s:%02X, have %d bytes left", c.State, id, r.Len()) } if c.Logger != nil { c.Logger(true, packet) } return packet, nil }
func readRemainingBytes(r *bytes.Reader) (buf []byte, err error) { buf, err = readBytes(r.Len(), r) return }
func isValidReader(reader *bytes.Reader) { if reader == nil || reader.Len() == 0 { logger.Fatal("Deserialize Error! Buffer Is Empty") } }
// Recv returns the next packet parsed out of the stream. func (p *Parser) Recv() (Packet, error) { p.conn.SetReadDeadline(time.Now().Add(p.timeout)) length, err := ReadVarint(p.r) if err != nil { return nil, err } if length < 0 { return nil, ErrNegativeLength } b := make([]byte, length) if _, err := io.ReadFull(p.r, b); err != nil { return nil, err } var r *bytes.Reader r = bytes.NewReader(b) // If compression is enabled, we need to know how much data to expect first. if p.compressionThreshold >= 0 { // newSize is the decompressed size of the packet. newSize, err := ReadVarint(r) if err != nil { return nil, err } /* // Technically, this is meant to disconnect you, but we can just // ignore this case. Most parsers seem to. if newSize < p.compressionThreshold { return nil, ErrNotCompressed } */ // If newSize == 0, it's not compressed so we can just carry on anyway. if newSize != 0 { if p.zlibReader == nil { // If we don't already have a zlib reader, set one up. p.zlibReader, err = zlib.NewReader(r) if err != nil { return nil, err } } else { // If we do, reset with the current packet. err = p.zlibReader.(zlib.Resetter).Reset(r, nil) if err != nil { return nil, err } } data := make([]byte, newSize) _, err := io.ReadFull(p.zlibReader, data) if err != nil { return nil, err } r = bytes.NewReader(data) } } id, err := ReadVarint(r) if err != nil { return nil, err } // f() returns an empty Packet, that we'll decode into. f, ok := Packets[p.State][p.Direction.Flip()][byte(id)] if !ok { //fmt.Errorf("kyubu: unknown packet %s:%#.2x", p.State, id) return nil, ErrUnknownPacket } packet := f() if err := packet.Decode(r); err != nil { return nil, err } // If there are bytes left over, the decoding step missed something. if r.Len() > 0 { //fmt.Errorf("kyubu: Lost sync on packet %s:%#.2x, %d bytes left", p.State, id, r.Len()) return nil, ErrLostSync } return packet, nil }
func (ewf_h2_section *EWF_Header2_Section) Parse(buf *bytes.Reader) { //0x09 tab 0x0a new line delimiter //function to parse header2 section attributes //to do take into account endianess val := make([]byte, buf.Len()) buf.Read(val) val = parseutil.Decompress(val) defer parseutil.TimeTrack(time.Now(), "Parsing") line_del, _ := hex.DecodeString("0a") tab_del, err := hex.DecodeString("09") if err != nil { log.Fatal(err) } var b *bytes.Reader for line_number, line := range bytes.Split(val, line_del) { for id_num, attr := range bytes.Split(line, tab_del) { b = bytes.NewReader(attr) if line_number == 0 { parseutil.Parse(b, &ewf_h2_section.BOM) parseutil.Parse(b, &ewf_h2_section.NofCategories) } else if line_number == 1 { parseutil.Parse(b, &ewf_h2_section.CategoryName) } else if line_number == 2 { } else if line_number == 3 { if id_num == EWF_HEADER_VALUES_INDEX_DESCRIPTION { ewf_h2_section.a = string(attr) fmt.Println("TIME", ewf_h2_section.a) } else if id_num == EWF_HEADER_VALUES_INDEX_CASE_NUMBER { ewf_h2_section.c = string(attr) } else if id_num == EWF_HEADER_VALUES_INDEX_EXAMINER_NAME { ewf_h2_section.n = string(attr) } else if id_num == EWF_HEADER_VALUES_INDEX_EVIDENCE_NUMBER { ewf_h2_section.e = string(attr) } else if id_num == EWF_HEADER_VALUES_INDEX_NOTES { ewf_h2_section.t = string(attr) } else if id_num == EWF_HEADER_VALUES_INDEX_ACQUIRY_SOFTWARE_VERSION { ewf_h2_section.av = string(attr) } else if id_num == EWF_HEADER_VALUES_INDEX_ACQUIRY_OPERATING_SYSTEM { ewf_h2_section.ov = string(attr) } else if id_num == EWF_HEADER_VALUES_INDEX_ACQUIRY_DATE { ewf_h2_section.m = parseutil.SetTime(attr) } else if id_num == EWF_HEADER_VALUES_INDEX_SYSTEM_DATE { ewf_h2_section.u = parseutil.SetTime(attr) } else if id_num == EWF_HEADER_VALUES_INDEX_PASSWORD { ewf_h2_section.p = string(attr) } else if id_num == EWF_HEADER_VALUES_INDEX_PROCESS_IDENTIFIER { ewf_h2_section.pid = string(attr) } } } } }