func LargeFileStream(r *multipart.Reader) { p, err := r.NextPart() if err == io.EOF { return } file, err := os.OpenFile(p.FileName(), os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0644) if err != nil { return } defer file.Close() for { err := WriteFileChunk(p, file) if err == io.EOF { return } } for { p, err := r.NextPart() if err == io.EOF { return } for { err := WriteFileChunk(p, file) if err == io.EOF { break } } } return }
func extractHTTPFormValues(reader *multipart.Reader) (io.Reader, map[string]string, *probe.Error) { /// HTML Form values formValues := make(map[string]string) filePart := new(bytes.Buffer) var err error for err == nil { var part *multipart.Part part, err = reader.NextPart() if part != nil { if part.FileName() == "" { buffer, err := ioutil.ReadAll(part) if err != nil { return nil, nil, probe.NewError(err) } formValues[http.CanonicalHeaderKey(part.FormName())] = string(buffer) } else { _, err := io.Copy(filePart, part) if err != nil { return nil, nil, probe.NewError(err) } } } } return filePart, formValues, nil }
// Extract form fields and file data from a HTTP POST Policy func extractPostPolicyFormValues(reader *multipart.Reader) (filePart io.Reader, fileName string, formValues map[string]string, err error) { /// HTML Form values formValues = make(map[string]string) fileName = "" for err == nil { var part *multipart.Part part, err = reader.NextPart() if part != nil { canonicalFormName := http.CanonicalHeaderKey(part.FormName()) if canonicalFormName != "File" { var buffer []byte limitReader := io.LimitReader(part, maxFormFieldSize+1) buffer, err = ioutil.ReadAll(limitReader) if err != nil { return nil, "", nil, err } if int64(len(buffer)) > maxFormFieldSize { return nil, "", nil, errSizeUnexpected } formValues[canonicalFormName] = string(buffer) } else { filePart = part fileName = part.FileName() // As described in S3 spec, we expect file to be the last form field break } } } return filePart, fileName, formValues, nil }
func parseMultipartContent(content, boundary string) (Content, error) { var ( reader *multipart.Reader = multipart.NewReader(strings.NewReader(content), boundary) part *multipart.Part mailParts Content = make(Content, 1) partHeader PMIMEHeader err error ) for { if part, err = reader.NextPart(); err == io.EOF { // 1) EOF error means, we're finished reading the multiparts break } else if err != nil { // 2) other errors are real => print a warning for that part and continue with the next fmt.Printf("[watney] WARNING: Couldn't parse multipart 'Part' Header & Content: %s\n", err.Error()) continue } // 3) Try to read the content of this multipart body ... if readBytes, err := ioutil.ReadAll(part); err != nil { fmt.Printf("[watney] WARNING: Couldn't read multipart body content: %s\n", err.Error()) continue } else { // 4) The body of this part has been successfully parsed => extract content if partHeader = parseMIMEHeader(part.Header); len(partHeader.ContentType) > 0 { // 5) We got a Content-Type, check if that one is multipart again if strings.Contains(partHeader.ContentType, "multipart") { // 5a) It is multipart => recursively add its parts if innerParts, err := parseMultipartContent(string(readBytes), partHeader.MultipartBoundary); err != nil { fmt.Printf("[watney] WARNING: Couldn't parse inner multipart body: %s\n", err.Error()) continue } else { for key, value := range innerParts { mailParts[key] = value } } } else { // 5b) We have a content type other than multipart, just add it mailParts[partHeader.ContentType] = ContentPart{ Encoding: partHeader.Encoding, Charset: "UTF-8", Body: string(readBytes), } } } else { // 4b) This part has no MIME information -> assume text/plain // ATTENTION: We're overwriting previously parsed text/plain parts mailParts["text/plain"] = ContentPart{ Encoding: "quoted-printable", Charset: "UTF-8", Body: string(readBytes), } } } } return mailParts, nil }
func (r *Resumable) MakeChunk(reader *multipart.Reader) (*Chunk, error) { var total int64 = 0 chunk := &Chunk{Body: make([]byte, 0), Extra: make(map[string]string)} for { part, err := reader.NextPart() if err == io.EOF { break } if err != nil { return nil, err } name := part.FormName() switch name { case r.OffsetParamName: v, err := ConsumePart(part, 8, ConsumeInt) if err != nil { return nil, errors.New(err.Error() + " (" + name + ")") } chunk.Offset = v.(int64) break case r.TotalParamName: v, err := ConsumePart(part, 8, ConsumeInt) if err != nil { return nil, errors.New(err.Error() + " (" + name + ")") } total = v.(int64) break case r.UploadIdParamName: v, err := ConsumePart(part, 1024, ConsumeString) if err != nil { return nil, errors.New(err.Error() + " (" + name + ")") } chunk.UploadId = v.(string) break case r.FileParamName: chunk.Filename = part.FileName() err = r.ReadBody(part, chunk) if err != nil { return nil, errors.New(err.Error() + " (" + name + ")") } break default: v, err := ConsumePart(part, 1024, ConsumeString) if err != nil { return nil, errors.New(err.Error() + " (" + name + ")") } chunk.Extra[name] = v.(string) break } } if len(chunk.UploadId) == 0 { return nil, errors.New("empty" + " (" + r.UploadIdParamName + ")") } if len(chunk.Filename) == 0 { return nil, errors.New("empty" + " (filename)") } chunk.Final = chunk.Offset+int64(len(chunk.Body)) >= total return chunk, nil }
func formToMap(c *check.C, mr *multipart.Reader) map[string]string { formData := map[string]string{} for { p, err := mr.NextPart() if err == io.EOF { break } c.Assert(err, check.IsNil) slurp, err := ioutil.ReadAll(p) c.Assert(err, check.IsNil) formData[p.FormName()] = string(slurp) } return formData }
// OpenStream sends request to target and handles // response. It opens MJPEG-stream and copies received // frame to m.curImg. It closes stream if m.CloseStream() // is called or if difference between current time and // time of last request to ServeHTTP is bigger than timeout. func (m *Mjpegproxy) openstream(mjpegStream, user, pass string, timeout time.Duration) { m.setRunning(true) m.conChan = make(chan time.Time) m.mjpegStream = mjpegStream var lastconn time.Time var img *multipart.Part request, err := http.NewRequest("GET", mjpegStream, nil) if err != nil { log.Fatal(m.mjpegStream, err) } if user != "" && pass != "" { request.SetBasicAuth(user, pass) } var response *http.Response var boundary string var mpread *multipart.Reader var starttime time.Time buf := new(bytes.Buffer) log.Println("Starting streaming from", mjpegStream) for m.GetRunning() { lastconn = <-m.conChan m.lastConnLock.Lock() m.lastConn = lastconn m.lastConnLock.Unlock() if !m.GetRunning() { continue } response, err = m.getresponse(request) if err != nil { log.Println(m.mjpegStream, err) time.Sleep(m.waittime) continue } starttime = time.Now() boundary, err = m.getboundary(response) if err != nil { log.Println(m.mjpegStream, err) response.Body.Close() time.Sleep(m.waittime) continue } mpread = multipart.NewReader(response.Body, boundary) for m.GetRunning() && (time.Since(lastconn) < timeout) && err == nil { if time.Since(starttime) > m.responseduration { break } if time.Since(lastconn) > timeout/2 { m.lastConnLock.RLock() lastconn = m.lastConn m.lastConnLock.RUnlock() } img, err = mpread.NextPart() if err != nil { log.Println(m.mjpegStream, err) break } // buf is an additional buffer that allows // serving curImg while loading next part. buf.Reset() _, err = buf.ReadFrom(io.LimitReader(img, m.partbufsize)) if err != nil { img.Close() log.Println(m.mjpegStream, err) break } if m.caching { m.lastModLock.Lock() m.lastModified = time.Now().UTC() m.lastModLock.Unlock() } m.curImgLock.Lock() m.curImg.Reset() _, err = m.curImg.ReadFrom(buf) m.curImgLock.Unlock() img.Close() if err != nil { log.Println(m.mjpegStream, err) break } } response.Body.Close() time.Sleep(m.waittime) } log.Println("Stopped streaming from", mjpegStream) }
func ReadMultipartDocument(reader *multipart.Reader) (Body, error) { // First read the main JSON document body: mainPart, err := reader.NextPart() if err != nil { return nil, err } var body Body err = ReadJSONFromMIME(http.Header(mainPart.Header), mainPart, &body) mainPart.Close() if err != nil { return nil, err } digestIndex := map[string]string{} // maps digests -> names // Now look for "following" attachments: attachments := BodyAttachments(body) for name, value := range attachments { meta := value.(map[string]interface{}) if meta["follows"] == true { digest, ok := meta["digest"].(string) if !ok { return nil, &base.HTTPError{http.StatusBadRequest, "Missing digest in attachment"} } digestIndex[digest] = name } } // Read the parts one by one: for i := 0; i < len(digestIndex); i++ { part, err := reader.NextPart() if err != nil { if err == io.EOF { err = &base.HTTPError{http.StatusBadRequest, "Too few MIME parts"} } return nil, err } data, err := ioutil.ReadAll(part) part.Close() if err != nil { return nil, err } // Look up the attachment by its digest: digest := sha1DigestKey(data) name, ok := digestIndex[digest] if !ok { name, ok = digestIndex[md5DigestKey(data)] } if !ok { return nil, &base.HTTPError{http.StatusBadRequest, fmt.Sprintf("MIME part #%d doesn't match any attachment", i+2)} } meta := attachments[name].(map[string]interface{}) length, ok := meta["encoded_length"].(float64) if !ok { length, ok = meta["length"].(float64) } if ok { if int(length) != len(data) { return nil, &base.HTTPError{http.StatusBadRequest, fmt.Sprintf("Attachment length mismatch for %q: read %d bytes, should be %g", name, len(data), length)} } } delete(meta, "follows") meta["data"] = data meta["digest"] = digest } // Make sure there are no unused MIME parts: _, err = reader.NextPart() if err != io.EOF { return nil, &base.HTTPError{http.StatusBadRequest, "Too many MIME parts"} } return body, nil }
func ReadMultipartDocument(reader *multipart.Reader) (Body, error) { // First read the main JSON document body: mainPart, err := reader.NextPart() if err != nil { return nil, err } var body Body err = ReadJSONFromMIME(http.Header(mainPart.Header), mainPart, &body) mainPart.Close() if err != nil { return nil, err } // Collect the attachments with a "follows" property, which will appear as MIME parts: followingAttachments := map[string]map[string]interface{}{} for name, value := range BodyAttachments(body) { if meta := value.(map[string]interface{}); meta["follows"] == true { followingAttachments[name] = meta } } // Subroutine to look up a following attachment given its digest. (I used to precompute a // map from digest->name, which was faster, but that broke down if there were multiple // attachments with the same contents! See #96) findFollowingAttachment := func(withDigest string) (string, map[string]interface{}) { for name, meta := range followingAttachments { if meta["follows"] == true { if digest, ok := meta["digest"].(string); ok && digest == withDigest { return name, meta } } } return "", nil } // Read the parts one by one: for i := 0; i < len(followingAttachments); i++ { part, err := reader.NextPart() if err != nil { if err == io.EOF { err = base.HTTPErrorf(http.StatusBadRequest, "Too few MIME parts: expected %d attachments, got %d", len(followingAttachments), i) } return nil, err } data, err := ioutil.ReadAll(part) part.Close() if err != nil { return nil, err } // Look up the attachment by its digest: digest := sha1DigestKey(data) name, meta := findFollowingAttachment(digest) if meta == nil { name, meta = findFollowingAttachment(md5DigestKey(data)) if meta == nil { return nil, base.HTTPErrorf(http.StatusBadRequest, "MIME part #%d doesn't match any attachment", i+2) } } length, ok := base.ToInt64(meta["encoded_length"]) if !ok { length, ok = base.ToInt64(meta["length"]) } if ok { if length != int64(len(data)) { return nil, base.HTTPErrorf(http.StatusBadRequest, "Attachment length mismatch for %q: read %d bytes, should be %g", name, len(data), length) } } // Stuff the data into the attachment metadata and remove the "follows" property: delete(meta, "follows") meta["data"] = data meta["digest"] = digest } // Make sure there are no unused MIME parts: if _, err = reader.NextPart(); err != io.EOF { if err == nil { err = base.HTTPErrorf(http.StatusBadRequest, "Too many MIME parts (expected %d)", len(followingAttachments)+1) } return nil, err } return body, nil }
func (receiver *endpointReceiver) handleMultipartMessage(mreader *multipart.Reader) (*todoList, error) { todo := &todoList{} // iterate through parts of multipart/form-data content for { part, err := mreader.NextPart() if err == io.EOF { receiver.log.Printf("End of stream reached") break } else if err != nil { return nil, fmt.Errorf("Error reading part: %v", err) } formName := part.FormName() receiver.log.Printf("Handling part: %v", formName) switch formName { case "buyersecret": if receiver.buyerSecret != ZEROHASH { return nil, fmt.Errorf("Buyer's secret already received") } b := make([]byte, 64) n, err := part.Read(b) if err != nil || n != len(b) { return nil, fmt.Errorf("Error reading buyersecret: %v (%v bytes read)", err, n) } n, err = hex.Decode(receiver.buyerSecret[:], b) if err != nil || n != len(receiver.buyerSecret) { return nil, fmt.Errorf("Error decoding buyersecret: %v (%v bytes written)", err, n) } case "work": if receiver.builder != nil || receiver.workFile != nil { return nil, fmt.Errorf("Work already received") } temp := receiver.storage.Create(receiver.info) defer temp.Dispose() const MAXBYTES = 2 << 24 // 16MB // Copy up to MAXBYTES and expect EOF if n, err := io.CopyN(temp, part, MAXBYTES); err != io.EOF { return nil, fmt.Errorf("Work too long or error: %v (%v bytes read)", err, n) } if err := temp.Close(); err != nil { return nil, fmt.Errorf("Error creating file from temporary data: %v", err) } receiver.workFile = temp.File() todo.mustHandleWork = true case "a32chunks": if receiver.builder != nil || receiver.workFile != nil { return nil, fmt.Errorf("Work already received on 'a32chunks'") } if b, err := cafs.NewBuilder(receiver.storage, part, receiver.info); err != nil { return nil, fmt.Errorf("Error receiving chunk hashes: %v", err) } else { receiver.builder = b todo.mustHandleChunkHashes = true } case "chunkdata": if receiver.builder == nil { return nil, fmt.Errorf("Didn't receive chunk hashes") } if receiver.workFile != nil { return nil, fmt.Errorf("Work already received") } if f, err := receiver.builder.ReconstructFileFromRequestedChunks(part); err != nil { return nil, fmt.Errorf("Error reconstructing work from sent chunks: %v", err) } else { receiver.workFile = f } todo.mustHandleWork = true default: return nil, fmt.Errorf("Don't know what to do with part %#v", formName) } } return todo, nil }
func main() { var rr *http.Response var myreader multipart.Reader syscall.Umask(0000) if !parseME() { os.Exit(1) } var requesturl string requesturl = ("http://" + (*camera_ip) + ":" + strconv.Itoa(*camera_port) + "/mjpg/1/video.mjpg") fmt.Println("request sent to " + requesturl) rr, _, _ = http.Get(requesturl) myreader, _ = MultipartReader(rr) var p *multipart.Part var curr_length int = 0 var templen int var buff []byte var s string var m int var info *os.FileInfo var err os.Error info, err = os.Lstat(*save_folder) if err != nil { fmt.Println("Folder " + (*save_folder) + " Is problematic") fmt.Println(err.String()) os.Exit(1) } if !info.IsDirectory() { fmt.Println("Folder " + (*save_folder) + " Is not a directory") os.Exit(1) } var foldertime *time.Time = nil var foldersecs int64 var folderstamp string var tstamp_secs int64 var tstamp_nsecs int64 var msecs int64 var update bool var foldername string var imagename string var mywriter *os.File for i := 0; i < 1; { p, _ = myreader.NextPart() update = false tstamp_secs, tstamp_nsecs, _ = os.Time() if foldertime == nil { foldertime = time.SecondsToLocalTime(tstamp_secs) foldersecs = tstamp_secs update = true } else { if tstamp_secs > foldersecs { foldertime = time.SecondsToLocalTime(tstamp_secs) foldersecs = tstamp_secs update = true } } if update { folderstamp = strconv.Itoa64(foldertime.Year) + "_" + strconv.Itoa(foldertime.Month) + "_" + strconv.Itoa(foldertime.Day) + "_" + strconv.Itoa(foldertime.Hour) + "_" + strconv.Itoa(foldertime.Minute) + "_" + strconv.Itoa(foldertime.Second) foldername = (*save_folder) + "/" + (*camera_name) + "_" + folderstamp err = os.Mkdir(foldername, 0700) if err != nil { fmt.Fprintf(os.Stderr, "error creating %s because : %s\n", foldername, err.String()) os.Exit(1) } } templen, _ = strconv.Atoi(p.Header["Content-Length"]) if templen > curr_length { curr_length = templen buff = make([]byte, curr_length) } for counter := 0; counter < templen; { m, _ = p.Read(buff[counter:templen]) counter += m } p.Close() msecs = tstamp_nsecs / 1e6 imagename = "image_" + folderstamp + "_" + strconv.Itoa64(msecs) + ".jpg" s = foldername + "/" + imagename mywriter, err = os.Open(s, os.O_CREAT|os.O_WRONLY, 0600) if err != nil { fmt.Fprintf(os.Stderr, "error writing %d bytes because : %s\n", templen, err.String()) os.Exit(1) } for counter := 0; counter < templen; { m, _ = mywriter.Write(buff[counter:templen]) counter += m } } }