func testBodyPartWithStdLib(t *testing.T, originalPart *Message, stdlibPart *multipart.Part) { // decode base64 if exists var stdlibPartBodyReader io.Reader if stdlibPart.Header.Get("Content-Transfer-Encoding") == "base64" { stdlibPart.Header.Del("Content-Transfer-Encoding") stdlibPartBodyReader = base64.NewDecoder(base64.StdEncoding, stdlibPart) } else { stdlibPartBodyReader = stdlibPart } // confirm stdlib headers match our headers if !reflect.DeepEqual(map[string][]string(originalPart.Header), map[string][]string(stdlibPart.Header)) { t.Fatal("Message header does not match its parsed counterpart") } // read content content, err := ioutil.ReadAll(stdlibPartBodyReader) if err != nil || stdlibPart.Close() != nil { t.Fatal("Couldn't read or close part body", err) } // confirm content is deeply equal if !reflect.DeepEqual(originalPart.Body, content) { t.Fatal("Message body does not match its parsed counterpart") } }
func testMultipartInlineWithStdLib(t *testing.T, originalPart *Message, stdlibAltPart *multipart.Part) { // confirm stdlib headers match our headers if !reflect.DeepEqual(map[string][]string(originalPart.Header), map[string][]string(stdlibAltPart.Header)) { t.Fatal("Message does not match its parsed counterpart") } // multipart/alternative with inlines should have text/plain and multipart/related parts alternativeReader := multipart.NewReader(stdlibAltPart, boundary(map[string][]string(stdlibAltPart.Header))) plainPart, err := alternativeReader.NextPart() if err != nil { t.Fatal("Couldn't get next part", err) } testBodyPartWithStdLib(t, originalPart.Parts[0], plainPart) relatedPart, err := alternativeReader.NextPart() if err != nil { t.Fatal("Couldn't get next part", err) } testMultipartRelatedWithStdLib(t, originalPart.Parts[1], relatedPart) // confirm EOF and Close if _, err = alternativeReader.NextPart(); err != io.EOF || stdlibAltPart.Close() != nil { t.Fatal("Should be EOF", err) } }
func testMultipartRelatedWithStdLib(t *testing.T, originalPart *Message, stdlibRelatedPart *multipart.Part) { // confirm stdlib headers match our headers if !reflect.DeepEqual(map[string][]string(originalPart.Header), map[string][]string(stdlibRelatedPart.Header)) { t.Fatal("Message does not match its parsed counterpart") } // multipart/related should have text/html, image/gif, and image/png parts relatedReader := multipart.NewReader(stdlibRelatedPart, boundary(map[string][]string(stdlibRelatedPart.Header))) htmlPart, err := relatedReader.NextPart() if err != nil { t.Fatal("Couldn't get next part", err) } testBodyPartWithStdLib(t, originalPart.Parts[0], htmlPart) gifPart, err := relatedReader.NextPart() if err != nil { t.Fatal("Couldn't get next part", err) } testBodyPartWithStdLib(t, originalPart.Parts[1], gifPart) pngPart, err := relatedReader.NextPart() if err != nil { t.Fatal("Couldn't get next part", err) } testBodyPartWithStdLib(t, originalPart.Parts[2], pngPart) // confirm EOF and Close if _, err = relatedReader.NextPart(); err != io.EOF || stdlibRelatedPart.Close() != nil { t.Fatal("Should be EOF", err) } }
func upload(w http.ResponseWriter, r *http.Request) { if r.Method != "POST" { uploadTemplate.Execute(w, nil) } else { part_reader, err := r.MultipartReader() if err != nil { log.Println("get file:", err) w.WriteHeader(http.StatusInternalServerError) } log.Println("start copy") var file_part *multipart.Part for { if file_part, err = part_reader.NextPart(); err != nil { if err == io.EOF { err = nil } break } if file_part.FormName() == "file" { if err = write_file(file_part); err != nil { break } } file_part.Close() } if err != nil { log.Println("write file:", err) w.WriteHeader(http.StatusInternalServerError) return } http.Redirect(w, r, "/upload", 302) } }
func unpackPart(part *multipart.Part, emlbase string, errors chan error) { defer part.Close() partFileName := part.FileName() if partFileName == "" { return } attachmentFileName := emlbase + " " + partFileName attachmentFile, err := os.Create(attachmentFileName) if err != nil { errors <- MessageError( fmt.Sprintf( "Problem opening the %q file: %s", attachmentFileName, err.Error())) return } defer attachmentFile.Close() enc := part.Header.Get("Content-Transfer-Encoding") var partReader io.Reader switch enc { case "", "7bit", "8bit": partReader = part case "base64", "BASE64", "Base64": partReader = base64.NewDecoder(base64.StdEncoding, part) default: errors <- MessageError( fmt.Sprintf( "Attachment %q: unknown encoging %q", attachmentFileName, enc)) return } _, err = io.Copy(attachmentFile, partReader) if err != nil { errors <- MessageError( fmt.Sprintf( "Problem copying the %q part of the %q message: %s", attachmentFile, emlbase, err.Error())) return } }
// OpenStream sends request to target and handles // response. It opens MJPEG-stream and copies received // frame to m.curImg. It closes stream if m.CloseStream() // is called or if difference between current time and // time of last request to ServeHTTP is bigger than timeout. func (m *Mjpegproxy) openstream(mjpegStream, user, pass string, timeout time.Duration) { m.setRunning(true) m.conChan = make(chan time.Time) m.mjpegStream = mjpegStream var lastconn time.Time var img *multipart.Part request, err := http.NewRequest("GET", mjpegStream, nil) if err != nil { log.Fatal(m.mjpegStream, err) } if user != "" && pass != "" { request.SetBasicAuth(user, pass) } var response *http.Response var boundary string var mpread *multipart.Reader var starttime time.Time buf := new(bytes.Buffer) log.Println("Starting streaming from", mjpegStream) for m.GetRunning() { lastconn = <-m.conChan m.lastConnLock.Lock() m.lastConn = lastconn m.lastConnLock.Unlock() if !m.GetRunning() { continue } response, err = m.getresponse(request) if err != nil { log.Println(m.mjpegStream, err) time.Sleep(m.waittime) continue } starttime = time.Now() boundary, err = m.getboundary(response) if err != nil { log.Println(m.mjpegStream, err) response.Body.Close() time.Sleep(m.waittime) continue } mpread = multipart.NewReader(response.Body, boundary) for m.GetRunning() && (time.Since(lastconn) < timeout) && err == nil { if time.Since(starttime) > m.responseduration { break } if time.Since(lastconn) > timeout/2 { m.lastConnLock.RLock() lastconn = m.lastConn m.lastConnLock.RUnlock() } img, err = mpread.NextPart() if err != nil { log.Println(m.mjpegStream, err) break } // buf is an additional buffer that allows // serving curImg while loading next part. buf.Reset() _, err = buf.ReadFrom(io.LimitReader(img, m.partbufsize)) if err != nil { img.Close() log.Println(m.mjpegStream, err) break } if m.caching { m.lastModLock.Lock() m.lastModified = time.Now().UTC() m.lastModLock.Unlock() } m.curImgLock.Lock() m.curImg.Reset() _, err = m.curImg.ReadFrom(buf) m.curImgLock.Unlock() img.Close() if err != nil { log.Println(m.mjpegStream, err) break } } response.Body.Close() time.Sleep(m.waittime) } log.Println("Stopped streaming from", mjpegStream) }
func main() { var rr *http.Response var myreader multipart.Reader syscall.Umask(0000) if !parseME() { os.Exit(1) } var requesturl string requesturl = ("http://" + (*camera_ip) + ":" + strconv.Itoa(*camera_port) + "/mjpg/1/video.mjpg") fmt.Println("request sent to " + requesturl) rr, _, _ = http.Get(requesturl) myreader, _ = MultipartReader(rr) var p *multipart.Part var curr_length int = 0 var templen int var buff []byte var s string var m int var info *os.FileInfo var err os.Error info, err = os.Lstat(*save_folder) if err != nil { fmt.Println("Folder " + (*save_folder) + " Is problematic") fmt.Println(err.String()) os.Exit(1) } if !info.IsDirectory() { fmt.Println("Folder " + (*save_folder) + " Is not a directory") os.Exit(1) } var foldertime *time.Time = nil var foldersecs int64 var folderstamp string var tstamp_secs int64 var tstamp_nsecs int64 var msecs int64 var update bool var foldername string var imagename string var mywriter *os.File for i := 0; i < 1; { p, _ = myreader.NextPart() update = false tstamp_secs, tstamp_nsecs, _ = os.Time() if foldertime == nil { foldertime = time.SecondsToLocalTime(tstamp_secs) foldersecs = tstamp_secs update = true } else { if tstamp_secs > foldersecs { foldertime = time.SecondsToLocalTime(tstamp_secs) foldersecs = tstamp_secs update = true } } if update { folderstamp = strconv.Itoa64(foldertime.Year) + "_" + strconv.Itoa(foldertime.Month) + "_" + strconv.Itoa(foldertime.Day) + "_" + strconv.Itoa(foldertime.Hour) + "_" + strconv.Itoa(foldertime.Minute) + "_" + strconv.Itoa(foldertime.Second) foldername = (*save_folder) + "/" + (*camera_name) + "_" + folderstamp err = os.Mkdir(foldername, 0700) if err != nil { fmt.Fprintf(os.Stderr, "error creating %s because : %s\n", foldername, err.String()) os.Exit(1) } } templen, _ = strconv.Atoi(p.Header["Content-Length"]) if templen > curr_length { curr_length = templen buff = make([]byte, curr_length) } for counter := 0; counter < templen; { m, _ = p.Read(buff[counter:templen]) counter += m } p.Close() msecs = tstamp_nsecs / 1e6 imagename = "image_" + folderstamp + "_" + strconv.Itoa64(msecs) + ".jpg" s = foldername + "/" + imagename mywriter, err = os.Open(s, os.O_CREAT|os.O_WRONLY, 0600) if err != nil { fmt.Fprintf(os.Stderr, "error writing %d bytes because : %s\n", templen, err.String()) os.Exit(1) } for counter := 0; counter < templen; { m, _ = mywriter.Write(buff[counter:templen]) counter += m } } }
// read an article via dotreader func (c *v1Conn) readArticle(newpost bool, hooks EventHooks) (ps PolicyStatus, err error) { store_r, store_w := io.Pipe() article_r, article_w := io.Pipe() article_body_r, article_body_w := io.Pipe() accept_chnl := make(chan PolicyStatus) store_info_chnl := make(chan ArticleEntry) store_result_chnl := make(chan error) hdr_chnl := make(chan message.Header) log.WithFields(log.Fields{ "pkg": "nntp-conn", }).Debug("start reading") done_chnl := make(chan PolicyStatus) go func() { var err error dr := c.C.DotReader() var buff [1024]byte var n int64 n, err = io.CopyBuffer(article_w, dr, buff[:]) log.WithFields(log.Fields{ "n": n, }).Debug("read from connection") if err != nil && err != io.EOF { article_w.CloseWithError(err) } else { article_w.Close() } st := <-accept_chnl close(accept_chnl) // get result from storage err2, ok := <-store_result_chnl if ok && err2 != io.EOF { err = err2 } close(store_result_chnl) done_chnl <- st }() // parse message and store attachments in bg go func(msgbody io.ReadCloser) { defer msgbody.Close() hdr, ok := <-hdr_chnl if !ok { return } // all text in this post // txt := new(bytes.Buffer) // the article itself // a := new(model.Article) var err error if hdr.IsMultipart() { var params map[string]string _, params, err = hdr.GetMediaType() if err == nil { boundary, ok := params["boundary"] if ok { part_r := multipart.NewReader(msgbody, boundary) for err == nil { var part *multipart.Part part, err = part_r.NextPart() if err == io.EOF { // we done break } else if err == nil { // we gots a part // get header part_hdr := part.Header // check for base64 encoding var part_body io.Reader if part_hdr.Get("Content-Transfer-Encoding") == "base64" { part_body = base64.NewDecoder(base64.StdEncoding, part) } else { part_body = part } // get content type content_type := part_hdr.Get("Content-Type") if len(content_type) == 0 { // assume text/plain content_type = "text/plain; charset=UTF8" } var part_type string // extract mime type part_type, _, err = mime.ParseMediaType(content_type) if err == nil { if part_type == "text/plain" { // if we are plaintext save it to the text buffer _, err = io.Copy(util.Discard, part_body) } else { var fpath string fname := part.FileName() fpath, err = c.storage.StoreAttachment(part_body, fname) if err == nil { // stored attachment good log.WithFields(log.Fields{ "pkg": "nntp-conn", "state": &c.state, "version": "1", "filename": fname, "filepath": fpath, }).Debug("attachment stored") } else { // failed to save attachment log.WithFields(log.Fields{ "pkg": "nntp-conn", "state": &c.state, "version": "1", }).Error("failed to save attachment ", err) } } } else { // cannot read part header log.WithFields(log.Fields{ "pkg": "nntp-conn", "state": &c.state, "version": "1", }).Error("bad attachment in multipart message ", err) } err = nil part.Close() } else if err != io.EOF { // error reading part log.WithFields(log.Fields{ "pkg": "nntp-conn", "state": &c.state, "version": "1", }).Error("error reading part ", err) } } } } } else if hdr.IsSigned() { // signed message // discard for now _, err = io.Copy(util.Discard, msgbody) } else { // plaintext message var n int64 n, err = io.Copy(util.Discard, msgbody) log.WithFields(log.Fields{ "bytes": n, "pkg": "nntp-conn", }).Debug("text body copied") } if err != nil && err != io.EOF { log.WithFields(log.Fields{ "pkg": "nntp-conn", "state": &c.state, }).Error("error handing message body", err) } }(article_body_r) // store function go func(r io.ReadCloser) { e, ok := <-store_info_chnl if !ok { // failed to get info // don't read anything r.Close() store_result_chnl <- io.EOF return } msgid := e.MessageID() if msgid.Valid() { // valid message-id log.WithFields(log.Fields{ "pkg": "nntp-conn", "msgid": msgid, "version": "1", "state": &c.state, }).Debug("storing article") fpath, err := c.storage.StoreArticle(r, msgid.String(), e.Newsgroup().String()) r.Close() if err == nil { log.WithFields(log.Fields{ "pkg": "nntp-conn", "msgid": msgid, "version": "1", "state": &c.state, }).Debug("stored article okay to ", fpath) // we got the article if hooks != nil { hooks.GotArticle(msgid, e.Newsgroup()) } store_result_chnl <- io.EOF log.Debugf("store informed") } else { // error storing article log.WithFields(log.Fields{ "pkg": "nntp-conn", "msgid": msgid, "state": &c.state, "version": "1", }).Error("failed to store article ", err) io.Copy(util.Discard, r) store_result_chnl <- err } } else { // invalid message-id // discard log.WithFields(log.Fields{ "pkg": "nntp-conn", "msgid": msgid, "state": &c.state, "version": "1", }).Warn("store will discard message with invalid message-id") io.Copy(util.Discard, r) store_result_chnl <- nil r.Close() } }(store_r) // acceptor function go func(r io.ReadCloser, out_w, body_w io.WriteCloser) { var w io.WriteCloser defer r.Close() status := PolicyAccept hdr, err := c.hdrio.ReadHeader(r) if err == nil { // append path hdr.AppendPath(c.serverName) // get message-id var msgid MessageID if newpost { // new post // generate it msgid = GenMessageID(c.serverName) hdr.Set("Message-ID", msgid.String()) } else { // not a new post, get from header msgid = MessageID(hdr.MessageID()) if msgid.Valid() { // check store fo existing article err = c.storage.HasArticle(msgid.String()) if err == store.ErrNoSuchArticle { // we don't have the article status = PolicyAccept log.Infof("accept article %s", msgid) } else if err == nil { // we do have the article, reject it we don't need it again status = PolicyReject } else { // some other error happened log.WithFields(log.Fields{ "pkg": "nntp-conn", "state": c.state, }).Error("failed to check store for article ", err) } err = nil } else { // bad article status = PolicyBan } } // check the header if we have an acceptor and the previous checks are good if status.Accept() && c.acceptor != nil { status = c.acceptor.CheckHeader(hdr) } if status.Accept() { // we have accepted the article // store to disk w = out_w } else { // we have not accepted the article // discard w = util.Discard out_w.Close() } store_info_chnl <- ArticleEntry{msgid.String(), hdr.Newsgroup()} hdr_chnl <- hdr // close the channel for headers close(hdr_chnl) // write header out to storage err = c.hdrio.WriteHeader(hdr, w) if err == nil { mw := io.MultiWriter(body_w, w) // we wrote header var n int64 if c.acceptor == nil { // write the rest of the body // we don't care about article size log.WithFields(log.Fields{}).Debug("copying body") var buff [128]byte n, err = io.CopyBuffer(mw, r, buff[:]) } else { // we care about the article size max := c.acceptor.MaxArticleSize() var n int64 // copy it out n, err = io.CopyN(mw, r, max) if err == nil { if n < max { // under size limit // we gud log.WithFields(log.Fields{ "pkg": "nntp-conn", "bytes": n, "state": &c.state, }).Debug("body fits") } else { // too big, discard the rest _, err = io.Copy(util.Discard, r) // ... and ban it status = PolicyBan } } } log.WithFields(log.Fields{ "pkg": "nntp-conn", "bytes": n, "state": &c.state, }).Debug("body wrote") // TODO: inform store to delete article and attachments } else { // error writing header log.WithFields(log.Fields{ "msgid": msgid, }).Error("error writing header ", err) } } else { // error reading header // possibly a read error? status = PolicyDefer } // close info channel for store close(store_info_chnl) w.Close() // close body pipe body_w.Close() // inform result log.Debugf("status %s", status) accept_chnl <- status log.Debugf("informed") }(article_r, store_w, article_body_w) ps = <-done_chnl close(done_chnl) log.Debug("read article done") return }