/** Check a value against a bounded(!) buffer */ func valCheck(buffer []byte, refVal []byte, checkedVal *multipart.Part) bool { totalBytesRead := 0 bufferLength := len(buffer) for { if totalBytesRead >= bufferLength { break } bytesRead, err := checkedVal.Read(buffer[totalBytesRead:]) if bytesRead < 0 || err == io.EOF { break } totalBytesRead += bytesRead } i := 0 refValLength := len(refVal) if totalBytesRead != refValLength { return false } for i < refValLength { if refVal[i] != buffer[i] { return false } i++ } return true }
// Extract form fields and file data from a HTTP POST Policy func extractPostPolicyFormValues(reader *multipart.Reader) (filePart io.Reader, fileName string, formValues map[string]string, err error) { /// HTML Form values formValues = make(map[string]string) fileName = "" for err == nil { var part *multipart.Part part, err = reader.NextPart() if part != nil { canonicalFormName := http.CanonicalHeaderKey(part.FormName()) if canonicalFormName != "File" { var buffer []byte limitReader := io.LimitReader(part, maxFormFieldSize+1) buffer, err = ioutil.ReadAll(limitReader) if err != nil { return nil, "", nil, err } if int64(len(buffer)) > maxFormFieldSize { return nil, "", nil, errSizeUnexpected } formValues[canonicalFormName] = string(buffer) } else { filePart = part fileName = part.FileName() // As described in S3 spec, we expect file to be the last form field break } } } return filePart, fileName, formValues, nil }
func sendFileBlobs(c *blobserv.Client, part *multipart.Part) (respMeta map[string]interface{}) { meta := blob.NewMeta() defer func() { respMeta = map[string]interface{}{} respMeta["name"] = meta.Name respMeta["size"] = meta.Size if r := recover(); r != nil { respMeta["error"] = r.(error).Error() } }() obj := blob.NewObject() meta.RcasObjectRef = obj.Ref() meta.Name = part.FileName() data, err := ioutil.ReadAll(part) util.Check(err) meta.Size = int64(len(data)) blobs := blob.SplitRaw(data, blob.DefaultChunkSize) meta.ContentRefs = blob.RefsFor(blobs) m, err := blob.Marshal(meta) util.Check(err) blobs = append(blobs, m, obj) for _, b := range blobs { err = c.PutBlob(b) util.Check(err) } return respMeta }
func handleUpload(r *http.Request, p *multipart.Part) (fi *FileInfo) { fi = &FileInfo{ Name: p.FileName(), Type: p.Header.Get("Content-Type"), } if !fi.ValidateType() { return } defer func() { if rec := recover(); rec != nil { log.Println(rec) fi.Error = rec.(error).Error() } }() lr := &io.LimitedReader{R: p, N: MAX_FILE_SIZE + 1} context := appengine.NewContext(r) w, err := blobstore.Create(context, fi.Type) defer func() { w.Close() fi.Size = MAX_FILE_SIZE + 1 - lr.N fi.Key, err = w.Key() check(err) if !fi.ValidateSize() { err := blobstore.Delete(context, fi.Key) check(err) return } delayedDelete(context, fi) fi.CreateUrls(r, context) }() check(err) _, err = io.Copy(w, lr) return }
func extractHTTPFormValues(reader *multipart.Reader) (io.Reader, map[string]string, *probe.Error) { /// HTML Form values formValues := make(map[string]string) filePart := new(bytes.Buffer) var err error for err == nil { var part *multipart.Part part, err = reader.NextPart() if part != nil { if part.FileName() == "" { buffer, err := ioutil.ReadAll(part) if err != nil { return nil, nil, probe.NewError(err) } formValues[http.CanonicalHeaderKey(part.FormName())] = string(buffer) } else { _, err := io.Copy(filePart, part) if err != nil { return nil, nil, probe.NewError(err) } } } } return filePart, formValues, nil }
func testMultipartInlineWithStdLib(t *testing.T, originalPart *Message, stdlibAltPart *multipart.Part) { // confirm stdlib headers match our headers if !reflect.DeepEqual(map[string][]string(originalPart.Header), map[string][]string(stdlibAltPart.Header)) { t.Fatal("Message does not match its parsed counterpart") } // multipart/alternative with inlines should have text/plain and multipart/related parts alternativeReader := multipart.NewReader(stdlibAltPart, boundary(map[string][]string(stdlibAltPart.Header))) plainPart, err := alternativeReader.NextPart() if err != nil { t.Fatal("Couldn't get next part", err) } testBodyPartWithStdLib(t, originalPart.Parts[0], plainPart) relatedPart, err := alternativeReader.NextPart() if err != nil { t.Fatal("Couldn't get next part", err) } testMultipartRelatedWithStdLib(t, originalPart.Parts[1], relatedPart) // confirm EOF and Close if _, err = alternativeReader.NextPart(); err != io.EOF || stdlibAltPart.Close() != nil { t.Fatal("Should be EOF", err) } }
func testMultipartRelatedWithStdLib(t *testing.T, originalPart *Message, stdlibRelatedPart *multipart.Part) { // confirm stdlib headers match our headers if !reflect.DeepEqual(map[string][]string(originalPart.Header), map[string][]string(stdlibRelatedPart.Header)) { t.Fatal("Message does not match its parsed counterpart") } // multipart/related should have text/html, image/gif, and image/png parts relatedReader := multipart.NewReader(stdlibRelatedPart, boundary(map[string][]string(stdlibRelatedPart.Header))) htmlPart, err := relatedReader.NextPart() if err != nil { t.Fatal("Couldn't get next part", err) } testBodyPartWithStdLib(t, originalPart.Parts[0], htmlPart) gifPart, err := relatedReader.NextPart() if err != nil { t.Fatal("Couldn't get next part", err) } testBodyPartWithStdLib(t, originalPart.Parts[1], gifPart) pngPart, err := relatedReader.NextPart() if err != nil { t.Fatal("Couldn't get next part", err) } testBodyPartWithStdLib(t, originalPart.Parts[2], pngPart) // confirm EOF and Close if _, err = relatedReader.NextPart(); err != io.EOF || stdlibRelatedPart.Close() != nil { t.Fatal("Should be EOF", err) } }
func upload(w http.ResponseWriter, r *http.Request) { if r.Method != "POST" { uploadTemplate.Execute(w, nil) } else { part_reader, err := r.MultipartReader() if err != nil { log.Println("get file:", err) w.WriteHeader(http.StatusInternalServerError) } log.Println("start copy") var file_part *multipart.Part for { if file_part, err = part_reader.NextPart(); err != nil { if err == io.EOF { err = nil } break } if file_part.FormName() == "file" { if err = write_file(file_part); err != nil { break } } file_part.Close() } if err != nil { log.Println("write file:", err) w.WriteHeader(http.StatusInternalServerError) return } http.Redirect(w, r, "/upload", 302) } }
func testBodyPartWithStdLib(t *testing.T, originalPart *Message, stdlibPart *multipart.Part) { // decode base64 if exists var stdlibPartBodyReader io.Reader if stdlibPart.Header.Get("Content-Transfer-Encoding") == "base64" { stdlibPart.Header.Del("Content-Transfer-Encoding") stdlibPartBodyReader = base64.NewDecoder(base64.StdEncoding, stdlibPart) } else { stdlibPartBodyReader = stdlibPart } // confirm stdlib headers match our headers if !reflect.DeepEqual(map[string][]string(originalPart.Header), map[string][]string(stdlibPart.Header)) { t.Fatal("Message header does not match its parsed counterpart") } // read content content, err := ioutil.ReadAll(stdlibPartBodyReader) if err != nil || stdlibPart.Close() != nil { t.Fatal("Couldn't read or close part body", err) } // confirm content is deeply equal if !reflect.DeepEqual(originalPart.Body, content) { t.Fatal("Message body does not match its parsed counterpart") } }
func multipartUploadHandler(r *http.Request) (albumName, contentType, fileName, contentLength string, fileReader io.Reader, err error) { mbound, err := checkMultipart(r) if err != nil { return } // Count reader, counts bytes read as they are read. creader := iomod.NewReadCounter(r.Body) mreader := multipart.NewReader(creader, mbound) sconlen := r.Header.Get("Content-Length") conlen, err := strconv.Atoi(sconlen) // Picasa REQUIRES Content-Length! if err != nil { log.Println("No Content-Length header or invalid value!", sconlen) return } for { var mpart *multipart.Part mpart, err = mreader.NextPart() if mpart != nil { log.Println("Multipart handler:", mpart, mpart.FormName(), err) } else { return } conlen -= 1 name := mpart.FormName() switch name { case "album": var albumNameBytes []byte albumNameBytes, err = ioutil.ReadAll(mpart) if err != nil { log.Println("Error reading album name!", albumName, err) return } log.Println("Read", creader.Count, "bytes so far ( content-length is", r.Header["Content-Length"], ")") albumName = string(albumNameBytes) case "Filedata": contentType = mpart.Header.Get("Content-Type") var mtypes map[string]string _, mtypes, err = mime.ParseMediaType(mpart.Header.Get("Content-Disposition")) if err != nil { return } fileName = mtypes["filename"] log.Println("Read", creader.Count, "bytes so far ( content-length is", r.Header.Get("Content-Length"), ")") // We have to do this, because it seems like the only reliable way to determine the size of the file... Hopefully the files they send are not too large... // WARNING: Security vunerability with large files, could overrun the server. buf := new(bytes.Buffer) io.Copy(buf, mpart) fileReader = buf contentLength = strconv.Itoa(buf.Len()) } } return }
func WriteFileChunk(chunk *multipart.Part, file *os.File) error { buffer := make([]byte, 4096) bufbytes, err := chunk.Read(buffer) if err == io.EOF { return err } file.Write(buffer[:bufbytes]) return err }
func readAttachmentFromMimePart(part *multipart.Part) NNTPAttachment { hdr := part.Header content_type := hdr.Get("Content-Type") media_type, _, err := mime.ParseMediaType(content_type) buff := new(bytes.Buffer) fname := part.FileName() idx := strings.LastIndex(fname, ".") ext := ".txt" if idx > 0 { ext = fname[idx:] } transfer_encoding := hdr.Get("Content-Transfer-Encoding") if transfer_encoding == "base64" { // read the attachment entirely io.Copy(buff, part) // clear reference part = nil // allocate a buffer for the decoded part att_bytes := make([]byte, base64.StdEncoding.DecodedLen(buff.Len())) decoded_bytes := make([]byte, len(att_bytes)) // decode _, err = base64.StdEncoding.Decode(decoded_bytes, buff.Bytes()) // reset original attachment buffer buff.Reset() // copy and wrap copy(att_bytes, decoded_bytes) buff = bytes.NewBuffer(att_bytes) att_bytes = nil // clear reference decoded_bytes = nil } else { _, err = io.Copy(buff, part) // clear reference part = nil } if err != nil { log.Println("failed to read attachment from mimepart", err) return nil } sha := sha512.Sum512(buff.Bytes()) hashstr := base32.StdEncoding.EncodeToString(sha[:]) fpath := hashstr + ext return nntpAttachment{ body: *buff, header: hdr, mime: media_type, filename: fname, filepath: fpath, ext: ext, hash: sha[:], } }
// save a single image func handleSaveSingleImage(part *multipart.Part) (info models.ImageInfo, err error) { newID := bson.NewObjectId() date := time.Now().Format("20060102") err = helper.CreateDirIfNotExists(filepath.Join(helper.Config.SaveDir, date)) if err != nil { return } path := filepath.Join(date, newID.Hex()) savePath := filepath.Join(helper.Config.SaveDir, path) dst, err := os.Create(savePath) if err != nil { return } defer dst.Close() var bytes int64 if bytes, err = io.Copy(dst, part); err != nil { return } width, height := helper.GetImageDimensions(savePath) var hash models.HashInfo hash, err = helper.CalculateBasicHashes(savePath) if err != nil { return } URL := helper.Config.BaseURL + "/img/" + newID.Hex() info = models.ImageInfo{ ID: newID, Name: part.FileName(), Extension: filepath.Ext(part.FileName()), Path: path, Width: width, Height: height, URL: URL, Resizes: map[string]string{}, Hash: hash, Size: bytes, CreatedAt: time.Now(), } err = db.StoreImage(&info) if err != nil { return } return info, nil }
func uploadPhoto(part *multipart.Part, c *http.Cookie) *http.Cookie { fmt.Printf("%v", part.FileName()) fName := getSha(part) + filepath.Ext(part.FileName()) wd, _ := os.Getwd() path := filepath.Join(wd, "assets", "imgs", fName) dst, _ := os.Create(path) defer dst.Close() //src.Seek(0,0) io.Copy(dst, part) return addPhoto(fName, c) }
func unpackPart(part *multipart.Part, emlbase string, errors chan error) { defer part.Close() partFileName := part.FileName() if partFileName == "" { return } attachmentFileName := emlbase + " " + partFileName attachmentFile, err := os.Create(attachmentFileName) if err != nil { errors <- MessageError( fmt.Sprintf( "Problem opening the %q file: %s", attachmentFileName, err.Error())) return } defer attachmentFile.Close() enc := part.Header.Get("Content-Transfer-Encoding") var partReader io.Reader switch enc { case "", "7bit", "8bit": partReader = part case "base64", "BASE64", "Base64": partReader = base64.NewDecoder(base64.StdEncoding, part) default: errors <- MessageError( fmt.Sprintf( "Attachment %q: unknown encoging %q", attachmentFileName, enc)) return } _, err = io.Copy(attachmentFile, partReader) if err != nil { errors <- MessageError( fmt.Sprintf( "Problem copying the %q part of the %q message: %s", attachmentFile, emlbase, err.Error())) return } }
func checkPartFileExists(t *testing.T, part *multipart.Part) (ok, skipped bool) { if part.FormName() != "file" { return false, true } if part.FileName() != "file1.ext" { t.Errorf("Filename not set") return } return true, false }
func ConsumePart(p *multipart.Part, sz int, f func([]byte, int) (interface{}, error)) (interface{}, error) { value := make([]byte, sz, sz) n, err := p.Read(value) if err != nil { return nil, err } i, err := f(value, n) if err != nil { return nil, err } return i, err }
func mimeTypeForPart(part *multipart.Part) string { if contentType := part.Header.Get("Content-Type"); contentType != "" { if _, _, err := mime.ParseMediaType(contentType); err == nil { return contentType } } ext := path.Ext(part.FileName()) mimeType := mime.TypeByExtension(ext) if mimeType == "" { mimeType = "image/jpeg" } return mimeType }
func handleUpload(p *multipart.Part, dir string) (fi *fileInfo) { fi = &fileInfo{ Name: p.FileName(), Type: p.Header.Get("Content-Type"), } path := filepath.Clean(dir + "/" + fi.Name) f, _ := os.Create(path) io.Copy(f, p) f.Close() return }
func checkPartSignatureIsValid(t *testing.T, part *multipart.Part) (ok, skipped bool) { if part.FormName() != "signature" { return false, true } var pbody bytes.Buffer if n, err := pbody.ReadFrom(part); err != nil { t.Errorf("Unable to read part: %d %s, %+v", n, err, part) return } if pbody.String() != "ljNZVWWNydBahCG5wWD64fTFEOU=" { t.Errorf("Signature: Expected ljNZVWWNydBahCG5wWD64fTFEOU= got: %s", pbody.String()) } ok = true return }
func getParam(part *multipart.Part) (string, float32, error) { name := part.FormName() fmt.Printf("Read name %s", name) if name == "submit" { return name, float32(0), nil } buffer := make([]byte, 1024) n, err := part.Read(buffer) fmt.Printf("Read %s", string(buffer[:n])) if err != nil { return "", 0.0, err } value, err := strconv.ParseFloat(string(buffer[:n]), 32) if err != nil { return "", 0.0, err } return name, float32(value), nil }
func (r *Resumable) ReadBody(p *multipart.Part, c *Chunk) error { data := make([]byte, r.MaxChunkSize) // read := 0 // TODO: find a way to identify oversized chunks (read > r.MaxChunkSize?) for { n, err := p.Read(data) // read += n if err != nil { if err == io.EOF { break } else { return err } } c.Body = append(c.Body, data[:n]...) } return nil }
/* func delayedDelete(c appengine.Context, fi *FileInfo) { if key := string(fi.Key); key != "" { task := &taskqueue.Task{ Path: "/" + escape(key) + "/-", Method: "DELETE", Delay: time.Duration(EXPIRATION_TIME) * time.Second, } taskqueue.Add(c, task, "") } } */ func handleUpload(r *http.Request, p *multipart.Part) (fi *FileInfo) { fi = &FileInfo{ Name: p.FileName(), Type: p.Header.Get("Content-Type"), } if !fi.ValidateType() { return } defer func() { if rec := recover(); rec != nil { fmt.Println(rec) fi.Error = rec.(error).Error() } }() lr := &io.LimitedReader{R: p, N: MAX_FILE_SIZE + 1} err := os.MkdirAll(path.Join(UPLOAD_DIR,target_dir), 0777) if err != nil { fmt.Println(err) } f, err := os.Create(path.Join(UPLOAD_DIR,target_dir,p.FileName())) /* context := appengine.NewContext(r) w, err := blobstore.Create(context, fi.Type) */ defer func() { f.Close() fi.Size = MAX_FILE_SIZE + 1 - lr.N //fi.Key, err = w.Key() //check(err) /* if !fi.ValidateSize() { err := blobstore.Delete(context, fi.Key) check(err) return } delayedDelete(context, fi) fi.CreateUrls(r, context) */ }() check(err) _, err = io.Copy(f, lr) return }
func handleUpload(r *http.Request, p *multipart.Part) (fi *FileInfo) { fi = &FileInfo{ Name: p.FileName(), Type: p.Header.Get("Content-Type"), } if !fi.ValidateType() { return } defer func() { if rec := recover(); rec != nil { log.Println(rec) fi.Error = rec.(os.Error).String() } }() var b bytes.Buffer lr := &io.LimitedReader{p, MAX_FILE_SIZE + 1} context := appengine.NewContext(r) w, err := blobstore.Create(context, fi.Type) defer func() { w.Close() fi.Size = MAX_FILE_SIZE + 1 - lr.N fi.Key, err = w.Key() check(err) if !fi.ValidateSize() { err := blobstore.Delete(context, fi.Key) check(err) return } delayedDelete(context, fi) if b.Len() > 0 { fi.CreateThumbnail(&b, context) } fi.CreateUrls(r, context) }() check(err) var wr io.Writer = w if imageTypes.MatchString(fi.Type) { wr = io.MultiWriter(&b, w) } _, err = io.Copy(wr, lr) return }
/** Uploader has a function to drain an http request off to a filename Note that writing to a file is not the only possible course of action. The part name (or file name, content type, etc) may insinuate that the file is small, and should be held in memory. */ func (h uploader) serveHTTPUploadPOSTDrain(fileName string, w http.ResponseWriter, part *multipart.Part) (bytesWritten int64, partsWritten int64) { log.Printf("read part %s", fileName) //Dangerous... Should whitelist char names to prevent writes //outside the homeBucket! drainTo, drainErr := os.Create(fileName) defer drainTo.Close() if drainErr != nil { log.Printf("cannot write out file %s, %v", fileName, drainErr) http.Error(w, "cannot write out file", 500) return bytesWritten, partsWritten } drain := bufio.NewWriter(drainTo) var lastBytesRead int buffer := make([]byte, h.BufferSize) for lastBytesRead >= 0 { bytesRead, berr := part.Read(buffer) lastBytesRead = bytesRead if berr == io.EOF { break } if berr != nil { log.Printf("error reading data! %v", berr) http.Error(w, "error reading data", 500) return bytesWritten, partsWritten } if lastBytesRead > 0 { bytesWritten += int64(lastBytesRead) drain.Write(buffer[:bytesRead]) partsWritten++ } } drain.Flush() log.Printf("wrote file %s of length %d", fileName, bytesWritten) //Watchout for hardcoding. This is here to make it convenient to retrieve what you downloaded log.Printf("https://127.0.0.1:%d/download/%s", h.Port, fileName[1+len(h.HomeBucket):]) return bytesWritten, partsWritten }
func handleUpload(r *http.Request, p *multipart.Part, root string) { defer func() { if rec := recover(); rec != nil { logger.Println(rec) } }() lr := &io.LimitedReader{R: p, N: MaxFileSize + 1} filename := filepath.Join(root, conf.Server.StaticDir, p.FileName()) fo, err := os.Create(filename) if err != nil { logger.Printf("err writing %q!, err = %s\n", filename, err.Error()) } defer fo.Close() w := bufio.NewWriter(fo) _, err = io.Copy(w, lr) if err != nil { logger.Printf("err writing %q!, err = %s\n", filename, err.Error()) } if err = w.Flush(); err != nil { logger.Printf("err flushing writer for %q!, err = %s\n", filename, err.Error()) } return }
// parsePart parses a multipart mime part for an input or carrier form // entry. File uploads will be buffered entirely into memory. func parsePart(part *multipart.Part) (u *url.URL, rc io.ReadCloser, err error) { filename := part.FileName() if filename != "" { // We assume entire file. // Parts are being read from multipart mime, so we // buffer the entire thing into memory. :[ buf := new(bytes.Buffer) _, err := buf.ReadFrom(part) if err != nil { return nil, nil, err } return nil, bytesBufferCloser{buf}, nil } // We assume URL. inputBytes, err := ioutil.ReadAll(part) if err != nil { return nil, nil, err } rawurl := string(inputBytes) if rawurl == "" { // Form may have just been // submited with no URL, but // with file input in another // field of the same name. // We'll check for missing // values after we go through // all the parts. return nil, nil, nil } u, err = parseURL(rawurl) if err != nil { return nil, nil, err } return u, nil, nil }
func handleUpload(r *http.Request, p *multipart.Part) (fi *FileInfo) { fi = &FileInfo{ Name: p.FileName(), Type: p.Header.Get("Content-Type"), } if !fi.ValidateType() { return } defer func() { if rec := recover(); rec != nil { log.Println(rec) fi.Error = rec.(error).Error() } }() var buffer bytes.Buffer hash := crc32.NewIEEE() mw := io.MultiWriter(&buffer, hash) lr := &io.LimitedReader{R: p, N: MAX_FILE_SIZE + 1} _, err := io.Copy(mw, lr) check(err) fi.Size = MAX_FILE_SIZE + 1 - lr.N if !fi.ValidateSize() { return } fi.SetKey(hash.Sum32()) item := &memcache.Item{ Key: fi.Key, Value: buffer.Bytes(), } context := appengine.NewContext(r) err = memcache.Set(context, item) check(err) fi.createThumb(&buffer, context) fi.CreateUrls(r, context) return }
func write_file(part *multipart.Part) error { dir_name := *dist_dir + "/" + time.Now().Format("2006-01-02") file_name := dir_name + "/" + part.FileName() if err := os.Mkdir(dir_name, 0755); err != nil { if os.IsNotExist(err) { return err } } if fd, err := os.Open(file_name); err == nil { fd.Close() return nil } var err error var newfile *os.File if newfile, err = os.Create(file_name); err != nil { return err } log.Println("create", file_name) defer newfile.Close() buf := make([]byte, 1024*1024) for { n, err := part.Read(buf) newfile.Write(buf[:n]) if err == io.EOF { err = nil break } if err != nil { os.Remove(file_name) log.Print("remove", file_name) break } } return err }
// save a single image func handleSaveSingleFile(part *multipart.Part) (info models.FileInfo, err error) { newID := bson.NewObjectId() date := time.Now().Format("20060102") err = helper.CreateDirIfNotExists(filepath.Join(helper.Config.SaveDir, date)) if err != nil { return } path := filepath.Join(date, newID.Hex()) savePath := filepath.Join(helper.Config.SaveDir, path) dst, err := os.Create(savePath) if err != nil { return } defer dst.Close() var bytes int64 if bytes, err = io.Copy(dst, part); err != nil { return } var hash models.HashInfo hash, err = helper.CalculateBasicHashes(savePath) if err != nil { return } URL := helper.Config.BaseURL + "/file/" + newID.Hex() info = models.FileInfo{ ID: newID, Name: part.FileName(), Extension: filepath.Ext(part.FileName()), Path: path, URL: URL, Hash: hash, Size: bytes, CreatedAt: time.Now(), ContentType: getSimpleContentTypeByFileName(part.FileName()), } err = db.StoreResource(&info) if err != nil { return } return info, nil }