func TestECBEncrypter(t *testing.T) { var plain, crypt [256]byte for i := 0; i < len(plain); i++ { plain[i] = byte(i) } b := new(bytes.Buffer) for block := 1; block <= 64; block *= 2 { // compute encrypted version delta := byte(0) for i := 0; i < len(crypt); i++ { if i%block == 0 { delta++ } crypt[i] = plain[i] + delta } for frag := 0; frag < 2; frag++ { c := &IncCipher{block, 0, true} b.Reset() r := bytes.NewBuffer(&plain) w := NewECBEncrypter(c, b) // copy plain into w in increasingly large chunks: 1, 1, 2, 4, 8, ... // if frag != 0, move the 1 to the end to cause fragmentation. if frag == 0 { _, err := io.Copyn(w, r, 1) if err != nil { t.Errorf("block=%d frag=0: first Copyn: %s", block, err) continue } } for n := 1; n <= len(plain)/2; n *= 2 { _, err := io.Copyn(w, r, int64(n)) if err != nil { t.Errorf("block=%d frag=%d: Copyn %d: %s", block, frag, n, err) } } if frag != 0 { _, err := io.Copyn(w, r, 1) if err != nil { t.Errorf("block=%d frag=1: last Copyn: %s", block, err) continue } } // check output data := b.Bytes() if len(data) != len(crypt) { t.Errorf("block=%d frag=%d: want %d bytes, got %d", block, frag, len(crypt), len(data)) continue } if string(data) != string(&crypt) { t.Errorf("block=%d frag=%d: want %x got %x", block, frag, data, crypt) } } } }
func testXorWriter(t *testing.T, maxio int) { var plain, crypt [256]byte for i := 0; i < len(plain); i++ { plain[i] = byte(i) } b := new(bytes.Buffer) for block := 1; block <= 64 && block <= maxio; block *= 2 { // compute encrypted version n := byte(0) for i := 0; i < len(crypt); i++ { if i%block == 0 { n++ } crypt[i] = plain[i] ^ n n++ } for frag := 0; frag < 2; frag++ { test := fmt.Sprintf("block=%d frag=%d maxio=%d", block, frag, maxio) b.Reset() r := bytes.NewBuffer(plain[0:]) s := newIncStream(block) w := newXorWriter(s, b) // copy plain into w in increasingly large chunks: 1, 1, 2, 4, 8, ... // if frag != 0, move the 1 to the end to cause fragmentation. if frag == 0 { _, err := io.Copyn(w, r, 1) if err != nil { t.Errorf("%s: first Copyn: %s", test, err) continue } } for n := 1; n <= len(plain)/2; n *= 2 { _, err := io.Copyn(w, r, int64(n)) if err != nil { t.Errorf("%s: Copyn %d: %s", test, n, err) } } // check output crypt := crypt[0 : len(crypt)-frag] data := b.Bytes() if len(data) != len(crypt) { t.Errorf("%s: want %d bytes, got %d", test, len(crypt), len(data)) continue } if string(data) != string(crypt) { t.Errorf("%s: want %x got %x", test, data, crypt) } } } }
func (self *TarwFS) Create(name string, flags, mode uint32) (file fuse.File, eno fuse.Status) { //log.Printf("Create:%s", name) self.lock.Lock() _, exists := self.isDir[name] if !exists { self.files[name] = &os.FileInfo{Mode: mode | syscall.S_IFREG} self.isDir[name] = false self.lock.Unlock() start := time.Seconds() file = newFile(func(r io.Reader, rlen int64) (err os.Error) { self.lock.Lock() err = self.w.WriteHeader(&tar.Header{ Typeflag: tar.TypeReg, Name: name, Mode: int64(mode), Size: rlen, Ctime: start, Mtime: time.Seconds(), Atime: time.Seconds(), }) if err == nil { _, err = io.Copyn(self.w, r, rlen) } self.lock.Unlock() return }) eno = fuse.OK } else { self.lock.Unlock() eno = fuse.EINVAL } return }
func (cl *CommitLog) Execute(mutation Mutation) { typ := reflect.TypeOf(mutation) mutInfo, found := cl.mutationsType[typ] if !found { log.Fatal("CommitLog: Tried to execute an unknown mutation: %s of type %s", mutation, typ) } // write the modification into a buffer buf := buffer.New() buf.WriteUint8(mutInfo.id) // mutation id mutation.Serialize(buf) // mutation record buf.WriteInt64(buf.Size + 8) // record length // write record to disk buf.Seek(0, 0) cl.fdMutex.Lock() cl.fd.Seek(cl.writePtr, 0) io.Copyn(cl.fd, buf, buf.Size) // update commit log write pointer cl.fd.Seek(HEADER_WRITE_POS, 0) cl.writePtr += buf.Size cl.typedFd.WriteInt64(cl.writePtr) cl.fdMutex.Unlock() // execute the mutation mutation.Execute() }
func (p *TBinaryProtocol) WriteBinaryFromReader(reader io.Reader, size int) TProtocolException { e := p.WriteI32(int32(size)) if e != nil { return e } _, err := io.Copyn(p.trans, reader, int64(size)) return NewTProtocolExceptionFromOsError(err) }
func serveFile(c *ServiceContext, file string) bool { var err os.Error var f *os.File var t *time.Time var modified int64 file = path.Clean(file) if f, err = os.Open(file, os.O_RDONLY, 0); err != nil { c.Status(404) return false } defer f.Close() stat, _ := f.Stat() modified = stat.Mtime_ns / 1e9 if v, ok := c.Req.Header["If_Modified_Since"]; ok { v = v[0:len(v)-3] + "UTC" if t, err = time.Parse(v, time.RFC1123); err != nil { fmt.Fprintf(os.Stderr, "Unrecognized time format in If_Modified_Since header: %s", v) return false } if modified > t.Seconds() { c.Status(200) } else { c.Status(304) } return true } if ctype := mime.TypeByExtension(path.Ext(file)); ctype != "" { c.SetHeaders(200, 2592000, ctype, modified) } else { var data []byte var num int64 buf := bytes.NewBuffer(data) if num, err = io.Copyn(buf, f, 1024); err != nil { c.Status(500) return false } data = buf.Bytes() if isText(data[0:num]) { c.SetHeaders(200, 2592000, "text/plain; charset=utf-8", modified) } else { c.SetHeaders(200, 2592000, "application/octet-stream", modified) } } _, err = c.SendReadWriter(f) return err == nil }
// Skip any unread bytes in the existing file entry, as well as any alignment padding. func (tr *Reader) skipUnread() { nr := tr.nb + tr.pad // number of bytes to skip if sr, ok := tr.r.(io.Seeker); ok { _, tr.err = sr.Seek(nr, 1) } else { _, tr.err = io.Copyn(ignoreWriter{}, tr.r, nr) } tr.nb, tr.pad = 0, 0 }
// Skip any unread bytes in the existing file entry, as well as any alignment padding. func (tr *Reader) skipUnread() { nr := tr.nb + tr.pad // number of bytes to skip tr.nb, tr.pad = 0, 0 if sr, ok := tr.r.(io.Seeker); ok { if _, err := sr.Seek(nr, os.SEEK_CUR); err == nil { return } } _, tr.err = io.Copyn(ignoreWriter{}, tr.r, nr) }
func (ltc *LocalTempCopy) Exec(srcStore fs.BlockStore) (err os.Error) { _, err = ltc.Temp.localFh.Seek(ltc.LocalOffset, 0) if err != nil { return err } _, err = ltc.Temp.tempFh.Seek(ltc.TempOffset, 0) if err != nil { return err } _, err = io.Copyn(ltc.Temp.tempFh, ltc.Temp.localFh, ltc.Length) return err }
func (me *ZipFile) Data() []byte { zf := (*me) rc, err := zf.Open() if err != nil { panic(err) } dest := bytes.NewBuffer(make([]byte, 0, me.UncompressedSize)) _, err = io.Copyn(dest, rc, int64(me.UncompressedSize)) if err != nil { panic(err) } return dest.Bytes() }
func echoClient(c net.Conn) { // Close the socket when the function returns defer c.Close() // Copy from Stdin to the socket (Untill EOF is received) nr, err := io.Copy(c, os.Stdin) if err != nil { panic("Copy os.Stdio -> c: ", err.String()) } // Copy from the socket to Stdout nw, err := io.Copyn(os.Stdout, c, nr) if err != nil { panic("Copy c -> os.Stdout: ", err.String()) } else if nr != nw { panic("nr != nw") } }
func main() { goopt.Parse(func() []string { return []string{} }) if len(goopt.Args) < 1 { fmt.Println("You need to provide a go file argument.") os.Exit(1) } if *outname == "FILENAME" { *outname = goopt.Args[0][0 : len(goopt.Args[0])-3] } fs := make([]string, len(goopt.Args)+1) for i, f := range goopt.Args { fs[i] = f } fs[len(goopt.Args)] = "testing.go" makeSource("testing.go") e := compile.Compile(*outname, fs) if e != nil { fmt.Println(e) os.Exit(1) } if e != nil { return } fi, err := os.Stat(*outname) if err != nil { return } enc0, err := os.Open(*outname+".encrypted", os.O_WRONLY+os.O_TRUNC+os.O_CREAT, 0644) if err != nil { return } enc, err := crypt.Encrypt(key, privatekey, enc0, fi.Size, sequence) if err != nil { return } plain, err := os.Open(*outname, os.O_RDONLY, 0644) if err != nil { return } _, err = io.Copyn(enc, plain, fi.Size) if err != nil { return } }
// Read reads the body of a part, after its headers and before the // next part (if any) begins. func (bp *Part) Read(p []byte) (n int, err os.Error) { if bp.buffer.Len() >= len(p) { // Internal buffer of unconsumed data is large enough for // the read request. No need to parse more at the moment. return bp.buffer.Read(p) } peek, err := bp.mr.bufReader.Peek(4096) // TODO(bradfitz): add buffer size accessor unexpectedEof := err == os.EOF if err != nil && !unexpectedEof { return 0, fmt.Errorf("multipart: Part Read: %v", err) } if peek == nil { panic("nil peek buf") } // Search the peek buffer for "\r\n--boundary". If found, // consume everything up to the boundary. If not, consume only // as much of the peek buffer as cannot hold the boundary // string. nCopy := 0 foundBoundary := false if idx := bytes.Index(peek, bp.mr.nlDashBoundary); idx != -1 { nCopy = idx foundBoundary = true } else if safeCount := len(peek) - len(bp.mr.nlDashBoundary); safeCount > 0 { nCopy = safeCount } else if unexpectedEof { // If we've run out of peek buffer and the boundary // wasn't found (and can't possibly fit), we must have // hit the end of the file unexpectedly. return 0, io.ErrUnexpectedEOF } if nCopy > 0 { if _, err := io.Copyn(bp.buffer, bp.mr.bufReader, int64(nCopy)); err != nil { return 0, err } } n, err = bp.buffer.Read(p) if err == os.EOF && !foundBoundary { // If the boundary hasn't been reached there's more to // read, so don't pass through an EOF from the buffer err = nil } return }
func (store *localBase) readInto(path string, from int64, length int64, writer io.Writer) (int64, os.Error) { fh, err := os.Open(path) if fh == nil { return 0, err } _, err = fh.Seek(from, 0) if err != nil { return 0, err } n, err := io.Copyn(writer, fh, length) if err != nil { return n, err } return n, nil }
func isSchemaPicker(thenSto, elseSto blobserver.Storage) storageFunc { return func(src io.Reader) (dest blobserver.Storage, overRead []byte, err os.Error) { // TODO: make decision earlier, by parsing JSON as it comes in, // not after we have up to 1 MB. var buf bytes.Buffer _, err = io.Copyn(&buf, src, 1<<20) if err != nil && err != os.EOF { return } ss := new(schema.Superset) if err = json.NewDecoder(bytes.NewBuffer(buf.Bytes())).Decode(ss); err != nil { log.Printf("cond: json parse failure => not schema => else") return elseSto, buf.Bytes(), nil } if ss.Type == "" { log.Printf("cond: json => but not schema => else") return elseSto, buf.Bytes(), nil } log.Printf("cond: json => schema => then") return thenSto, buf.Bytes(), nil } }
/* * Read */ func (fss *FsService) Read(path *Path, offset int64, size int64, version int64, writer io.Writer, context *Context) (returnReadN int64, returnError os.Error) { if context == nil { context = fss.NewContext() } message := fss.comm.NewMsgMessage(fss.serviceId) message.Function = "RemoteRead" context.ApplyContext(message) // write payload message.Message.WriteString(path.String()) // path message.Message.WriteInt64(offset) // offset message.Message.WriteInt64(size) // size message.Message.WriteInt64(version) // version message.Message.WriteBool(context.ForceLocal) // force local message.OnResponse = func(response *comm.Message) { returnReadN, returnError = io.Copyn(writer, response.Data, response.DataSize) message.Wait <- true } message.OnError = func(response *comm.Message, error os.Error) { returnError = error message.Wait <- false } if context.ForceLocal { // handle locally fss.comm.SendNode(fss.cluster.MyNode, message) } else { resolveResult := fss.ring.Resolve(path.String()) fss.comm.SendOne(resolveResult, message) } <-message.Wait return }
func (pf *PacmanFetcher) httpDownload(url *http.URL) (string, os.Error) { req, err := http.NewRequest("GET", url.String(), nil) if err != nil { return "", err } req.UserAgent = MAW_USERAGENT resp, err := http.DefaultClient.Do(req) if err != nil { return "", err } defer resp.Body.Close() if resp.StatusCode != 200 { return "", os.NewError("Download of " + url.String() + " failed: HTTP " + resp.Status) } _, filename := path.Split(url.Path) destpath := path.Join(pf.pkgdest, filename) destfile, err := os.Create(destpath) if err != nil { return "", err } defer destfile.Close() if resp.ContentLength < 0 { _, err = io.Copy(destfile, resp.Body) } else { _, err = io.Copyn(destfile, resp.Body, resp.ContentLength) } if err != nil { return "", err } return destpath, nil }
func (rw *recWriter) copyn(r io.Reader, n uint32) { if _, err := io.Copyn(rw, r, int64(n)); err != nil { panic(err) } }
func testECBDecrypter(t *testing.T, maxio int) { var readers = []func(io.Reader) io.Reader{ func(r io.Reader) io.Reader { return r }, iotest.OneByteReader, iotest.HalfReader, } var plain, crypt [256]byte for i := 0; i < len(plain); i++ { plain[i] = byte(255 - i) } b := new(bytes.Buffer) for block := 1; block <= 64 && block <= maxio; block *= 2 { // compute encrypted version delta := byte(0) for i := 0; i < len(crypt); i++ { if i%block == 0 { delta++ } crypt[i] = plain[i] + delta } for mode := 0; mode < len(readers); mode++ { for frag := 0; frag < 2; frag++ { test := fmt.Sprintf("block=%d mode=%d frag=%d maxio=%d", block, mode, frag, maxio) c := &IncCipher{block, 0, false} b.Reset() r := NewECBDecrypter(c, readers[mode](bytes.NewBuffer(crypt[0:maxio]))) // read from crypt in increasingly large chunks: 1, 1, 2, 4, 8, ... // if frag == 1, move the 1 to the end to cause fragmentation. if frag == 0 { _, err := io.Copyn(b, r, 1) if err != nil { t.Errorf("%s: first Copyn: %s", test, err) continue } } for n := 1; n <= maxio/2; n *= 2 { _, err := io.Copyn(b, r, int64(n)) if err != nil { t.Errorf("%s: Copyn %d: %s", test, n, err) } } if frag != 0 { _, err := io.Copyn(b, r, 1) if err != nil { t.Errorf("%s: last Copyn: %s", test, err) continue } } // check output data := b.Bytes() if len(data) != maxio { t.Errorf("%s: want %d bytes, got %d", test, maxio, len(data)) continue } if string(data) != string(plain[0:maxio]) { t.Errorf("%s: input=%x want %x got %x", test, crypt[0:maxio], plain[0:maxio], data) } } } } }
func (p *PassThroughMediaTypeHandler) OutputTo(req Request, cxt Context, writer io.Writer, resp ResponseWriter) { if !p.writtenStatusHeader { resp.WriteHeader(200) p.writtenStatusHeader = true } if req.Header().Get("Accept-Ranges") == "bytes" { rangeHeader := req.Header().Get("Range") if len(rangeHeader) > 6 && rangeHeader[0:6] == "bytes=" { ranges := p.splitRangeHeaderString(rangeHeader) outRangeString := "bytes=" for i, arange := range ranges { if i > 0 { outRangeString += "," } outRangeString += strconv.Itoa64(arange[0]) + "-" + strconv.Itoa64(arange[1]-1) } outRangeString += "/" + strconv.Itoa64(p.numberOfBytes) resp.Header().Set("Content-Range", "bytes="+outRangeString) currentOffset := int64(0) for _, arange := range ranges { start := arange[0] end := arange[1] var err os.Error if currentOffset < start { if seeker, ok := p.reader.(io.Seeker); ok { currentOffset, err = seeker.Seek(start-currentOffset, 1) if err != nil { return } } else { if start-currentOffset >= 32768 { buf := make([]byte, 32768) for ; currentOffset+32768 < start; currentOffset += 32768 { if _, err = io.ReadFull(p.reader, buf); err != nil { return } } } if currentOffset < start { buf := make([]byte, start-currentOffset) if _, err = io.ReadFull(p.reader, buf); err != nil { return } } } } if req.Method() == HEAD { return } for currentOffset < end { written, err := io.Copyn(writer, p.reader, end-currentOffset) currentOffset += written if err != nil { return } } } return } } if req.Method() == HEAD { return } currentOffset := int64(0) //log.Print("[PTMTH]: Writer ", writer, "\n[PTMTH]: Reader ", p.reader, "\n[PTMTH]: numBytes ", p.numberOfBytes, "\n[PTMTH]: currentOffset ", currentOffset, "\n") for currentOffset < int64(p.numberOfBytes) { bytesToSend := p.numberOfBytes - currentOffset data := make([]byte, bytesToSend+10000) numBytesRead, err := p.reader.Read(data[0:bytesToSend]) currentOffset += int64(numBytesRead) if err != nil { return } //log.Print("[PTMTH]: About to write ", len(data[0:bytesToSend]), " bytes to the writer\n") _, err = writer.Write(data[0:bytesToSend]) if err != nil { return } //written, err := io.Copyn(writer, p.reader, p.numberOfBytes - currentOffset) //if err != nil { // return //} //currentOffset += int64(written) } }
func TestTransportGzip(t *testing.T) { const testString = "The test string aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" const nRandBytes = 1024 * 1024 ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) { if g, e := req.Header.Get("Accept-Encoding"), "gzip"; g != e { t.Errorf("Accept-Encoding = %q, want %q", g, e) } rw.Header().Set("Content-Encoding", "gzip") var w io.Writer = rw var buf bytes.Buffer if req.FormValue("chunked") == "0" { w = &buf defer io.Copy(rw, &buf) defer func() { rw.Header().Set("Content-Length", strconv.Itoa(buf.Len())) }() } gz, _ := gzip.NewWriter(w) gz.Write([]byte(testString)) if req.FormValue("body") == "large" { io.Copyn(gz, rand.Reader, nRandBytes) } gz.Close() })) defer ts.Close() for _, chunked := range []string{"1", "0"} { c := &Client{Transport: &Transport{}} // First fetch something large, but only read some of it. res, _, err := c.Get(ts.URL + "?body=large&chunked=" + chunked) if err != nil { t.Fatalf("large get: %v", err) } buf := make([]byte, len(testString)) n, err := io.ReadFull(res.Body, buf) if err != nil { t.Fatalf("partial read of large response: size=%d, %v", n, err) } if e, g := testString, string(buf); e != g { t.Errorf("partial read got %q, expected %q", g, e) } res.Body.Close() // Read on the body, even though it's closed n, err = res.Body.Read(buf) if n != 0 || err == nil { t.Errorf("expected error post-closed large Read; got = %d, %v", n, err) } // Then something small. res, _, err = c.Get(ts.URL + "?chunked=" + chunked) if err != nil { t.Fatal(err) } body, err := ioutil.ReadAll(res.Body) if err != nil { t.Fatal(err) } if g, e := string(body), testString; g != e { t.Fatalf("body = %q; want %q", g, e) } if g, e := res.Header.Get("Content-Encoding"), ""; g != e { t.Fatalf("Content-Encoding = %q; want %q", g, e) } // Read on the body after it's been fully read: n, err = res.Body.Read(buf) if n != 0 || err == nil { t.Errorf("expected Read error after exhausted reads; got %d, %v", n, err) } res.Body.Close() n, err = res.Body.Read(buf) if n != 0 || err == nil { t.Errorf("expected Read error after Close; got %d, %v", n, err) } } }
// Write writes an HTTP/1.1 request -- header and body -- in wire format. // This method consults the following fields of req: // URL // Method (defaults to "GET") // UserAgent (defaults to defaultUserAgent) // Referer // Header // Body // // If Body is present, and is a Seeker, then "Content-length" is forced as a // header, else if Body is present "Transfer-Encoding: chunked" is forced as a header. func (req *Request) Write(w io.Writer) os.Error { uri := urlEscape(req.URL.Path, false) if req.URL.RawQuery != "" { uri += "?" + req.URL.RawQuery } fmt.Fprintf(w, "%s %s HTTP/1.1\r\n", valueOrDefault(req.Method, "GET"), uri) fmt.Fprintf(w, "Host: %s\r\n", req.URL.Host) fmt.Fprintf(w, "User-Agent: %s\r\n", valueOrDefault(req.UserAgent, defaultUserAgent)) if req.Referer != "" { fmt.Fprintf(w, "Referer: %s\r\n", req.Referer) } useContentLength := false contentLength := int64(0) if req.Body != nil { var seeker io.Seeker seeker, useContentLength = req.Body.(io.Seeker) if useContentLength { // Seek to the end of the stream and back to // discover the content length. currentPos, err := seeker.Seek(0, 1) if err != nil { return err } endPos, err := seeker.Seek(0, 2) if err != nil { return err } _, err = seeker.Seek(currentPos, 0) if err != nil { return err } contentLength = endPos - currentPos // Force Content-length req.Header["Content-length"] = strconv.Itoa64(contentLength) } else { // Force chunked encoding req.Header["Transfer-Encoding"] = "chunked" } } // TODO: split long values? (If so, should share code with Conn.Write) // TODO: if Header includes values for Host, User-Agent, or Referer, this // may conflict with the User-Agent or Referer headers we add manually. // One solution would be to remove the Host, UserAgent, and Referer fields // from Request, and introduce Request methods along the lines of // Response.{GetHeader,AddHeader} and string constants for "Host", // "User-Agent" and "Referer". for k, v := range req.Header { // Host, User-Agent, and Referer were sent from structure fields // above; ignore them if they also appear in req.Header. if k == "Host" || k == "User-Agent" || k == "Referer" { continue } io.WriteString(w, k+": "+v+"\r\n") } io.WriteString(w, "\r\n") if req.Body != nil { if useContentLength { _, ec := io.Copyn(w, req.Body, contentLength) if ec != nil { return ec } } else { buf := make([]byte, chunkSize) Loop: for { var nr, nw int var er, ew os.Error if nr, er = req.Body.Read(buf); nr > 0 { if er == nil || er == os.EOF { fmt.Fprintf(w, "%x\r\n", nr) nw, ew = w.Write(buf[0:nr]) fmt.Fprint(w, "\r\n") } } switch { case er != nil: if er == os.EOF { break Loop } return er case ew != nil: return ew case nw < nr: return io.ErrShortWrite } } // last-chunk CRLF fmt.Fprint(w, "0\r\n\r\n") } } return nil }
func (r *Message) writeMessage(writer io.Writer) (err os.Error) { twriter := typedio.NewWriter(writer) err = twriter.WriteUint16(r.Id) // message id if err != nil { return } // prepare flags var flags byte if r.InitId != 0 { flags = flags | prm_has_init_msg_id } if r.Type == T_DATA { flags = flags | prm_is_data } if r.srcNodeAdhoc { flags = flags | prm_src_node_adhoc } if r.middleNodePresent { flags = flags | prm_has_middle_node } if r.middleNodeAdhoc { flags = flags | prm_middle_node_adhoc } err = twriter.WriteUint8(flags) // flags if err != nil { return } if r.InitId != 0 { err = twriter.WriteUint16(r.InitId) // initial message id if err != nil { return } } err = twriter.WriteUint8(r.ServiceId) // service id if err != nil { return } msgSize := uint16(r.Message.Size) err = twriter.WriteUint16(msgSize) // message size if err != nil { return } if r.Type == T_DATA { twriter.WriteInt64(r.DataSize) // data size if err != nil { return } } if r.srcNodeAdhoc { // source node information err = twriter.WriteString(r.srcNodeAdr.String()) // addr if err != nil { return } err = twriter.WriteUint16(r.srcNodeTcpPort) // tcp port if err != nil { return } err = twriter.WriteUint16(r.srcNodeUdpPort) // udp port if err != nil { return } } else { err = twriter.WriteUint16(r.srcNodeId) // node id if err != nil { return } } if r.middleNodePresent { // middle node information if r.middleNodeAdhoc { err = twriter.WriteString(r.middleNodeAdr.String()) // addr if err != nil { return } err = twriter.WriteUint16(r.middleNodeTcpPort) // tcp port if err != nil { return } err = twriter.WriteUint16(r.middleNodeUdpPort) // udp port if err != nil { return } } else { err = twriter.WriteUint16(r.middleNodeId) // node id if err != nil { return } } } err = twriter.WriteUint8(r.FunctionId) // function id if err != nil { return } // Write message r.Message.Seek(0, 0) w, err := io.Copyn(writer, r.Message, r.Message.Size) // message if err != nil { log.Error("Couldn't write message message to writer: %s\n", err) return err } if w != int64(msgSize) { log.Error("Couldn't write the whole message message to write: written %d out of %d\n", w, msgSize) return os.NewError("Message write truncated") } // Write data if r.Type == T_DATA { io.Copyn(writer, r.Data, r.DataSize) // data } return nil }
func (r *Message) readMessage(reader io.Reader) (err os.Error) { treader := typedio.NewReader(reader) r.Id, err = treader.ReadUint16() // message id if err != nil { return } flags, err := treader.ReadUint8() // flags if err != nil { return } hasInitId := false if flags&prm_has_init_msg_id == prm_has_init_msg_id { hasInitId = true } if flags&prm_is_data == prm_is_data { r.Type = T_DATA } else { r.Type = T_MSG } if flags&prm_src_node_adhoc == prm_src_node_adhoc { r.srcNodeAdhoc = true } if flags&prm_has_middle_node == prm_has_middle_node { r.middleNodePresent = true } if flags&prm_middle_node_adhoc == prm_middle_node_adhoc { r.middleNodeAdhoc = true } if hasInitId { r.InitId, err = treader.ReadUint16() // initial message id if err != nil { return } } r.ServiceId, err = treader.ReadUint8() // service id if err != nil { return } msgSize, err := treader.ReadUint16() // message size if err != nil { return } if r.Type == T_DATA { r.DataSize, err = treader.ReadInt64() // data size if err != nil { return } } if r.srcNodeAdhoc { // source node information adr, err := treader.ReadString() // addr if err != nil { return } r.srcNodeAdr = net.ParseIP(adr) r.srcNodeTcpPort, err = treader.ReadUint16() // tcp port if err != nil { return } r.srcNodeUdpPort, err = treader.ReadUint16() // udp port if err != nil { return } } else { r.srcNodeId, err = treader.ReadUint16() // node id if err != nil { return } } // TODO: Adhoc if r.middleNodePresent { if r.middleNodeAdhoc { adr, err := treader.ReadString() // addr if err != nil { return } r.middleNodeAdr = net.ParseIP(adr) r.middleNodeTcpPort, err = treader.ReadUint16() // tcp port if err != nil { return } r.middleNodeUdpPort, err = treader.ReadUint16() // udp port if err != nil { return } } else { r.middleNodeId, err = treader.ReadUint16() // node id if err != nil { return } } } r.FunctionId, err = treader.ReadUint8() // function id if err != nil { return } // Load message r.Message = buffer.NewWithSize(int64(msgSize), false) // message n, err := io.Copyn(r.Message, reader, int64(msgSize)) r.Message.Seek(0, 0) if err != nil { log.Error("COMM: Got an error reading message from message: %s", err) return err } if n != int64(msgSize) { log.Error("COMM: Couldn't read the whole message. Read %d out of %d", n, msgSize) return os.NewError("Message truncated") } // release the connection if its a message if r.Type == T_MSG { r.Release() } else { r.Data = reader } return nil }
// MimeTypeFromReader takes a reader, sniffs the beginning of it, // and returns the mime (if sniffed, else "") and a new reader // that's the concatenation of the bytes sniffed and the remaining // reader. func MimeTypeFromReader(r io.Reader) (mime string, reader io.Reader) { var buf bytes.Buffer io.Copyn(&buf, r, 1024) mime = MimeType(buf.Bytes()) return mime, io.MultiReader(&buf, r) }
func (p *TSimpleJSONProtocol) OutputStringData(s string) TProtocolException { _, e := io.Copyn(p.writer, strings.NewReader(s), int64(len(s))) return NewTProtocolExceptionFromOsError(e) }
func serveFile(w ResponseWriter, r *Request, name string, redirect bool) { const indexPage = "/index.html" // redirect .../index.html to .../ if strings.HasSuffix(r.URL.Path, indexPage) { Redirect(w, r, r.URL.Path[0:len(r.URL.Path)-len(indexPage)+1], StatusMovedPermanently) return } f, err := os.Open(name) if err != nil { // TODO expose actual error? NotFound(w, r) return } defer f.Close() d, err1 := f.Stat() if err1 != nil { // TODO expose actual error? NotFound(w, r) return } if redirect { // redirect to canonical path: / at end of directory url // r.URL.Path always begins with / url := r.URL.Path if d.IsDirectory() { if url[len(url)-1] != '/' { Redirect(w, r, url+"/", StatusMovedPermanently) return } } else { if url[len(url)-1] == '/' { Redirect(w, r, url[0:len(url)-1], StatusMovedPermanently) return } } } if t, _ := time.Parse(TimeFormat, r.Header.Get("If-Modified-Since")); t != nil && d.Mtime_ns/1e9 <= t.Seconds() { w.WriteHeader(StatusNotModified) return } w.Header().Set("Last-Modified", time.SecondsToUTC(d.Mtime_ns/1e9).Format(TimeFormat)) // use contents of index.html for directory, if present if d.IsDirectory() { index := name + filepath.FromSlash(indexPage) ff, err := os.Open(index) if err == nil { defer ff.Close() dd, err := ff.Stat() if err == nil { name = index d = dd f = ff } } } if d.IsDirectory() { dirList(w, f) return } // serve file size := d.Size code := StatusOK // If Content-Type isn't set, use the file's extension to find it. if w.Header().Get("Content-Type") == "" { ctype := mime.TypeByExtension(filepath.Ext(name)) if ctype == "" { // read a chunk to decide between utf-8 text and binary var buf [1024]byte n, _ := io.ReadFull(f, buf[:]) b := buf[:n] if isText(b) { ctype = "text/plain; charset=utf-8" } else { // generic binary ctype = "application/octet-stream" } f.Seek(0, os.SEEK_SET) // rewind to output whole file } w.Header().Set("Content-Type", ctype) } // handle Content-Range header. // TODO(adg): handle multiple ranges ranges, err := parseRange(r.Header.Get("Range"), size) if err == nil && len(ranges) > 1 { err = os.ErrorString("multiple ranges not supported") } if err != nil { Error(w, err.String(), StatusRequestedRangeNotSatisfiable) return } if len(ranges) == 1 { ra := ranges[0] if _, err := f.Seek(ra.start, os.SEEK_SET); err != nil { Error(w, err.String(), StatusRequestedRangeNotSatisfiable) return } size = ra.length code = StatusPartialContent w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", ra.start, ra.start+ra.length-1, d.Size)) } w.Header().Set("Accept-Ranges", "bytes") w.Header().Set("Content-Length", strconv.Itoa64(size)) w.WriteHeader(code) if r.Method != "HEAD" { io.Copyn(w, f, size) } }
func getFormValue(p *multipart.Part) string { var b bytes.Buffer io.Copyn(&b, p, int64(1<<20)) // Copy max: 1 MiB return b.String() }
func (p *PassThroughMediaTypeInputHandler) OutputTo(req Request, cxt Context, writer io.Writer) (int, http.Header, os.Error) { fileInfo, err := os.Stat(p.filename) var file *os.File m := make(map[string]string) w := json.NewEncoder(writer) dirname, _ := path.Split(p.filename) file = nil defer func() { if file != nil { file.Close() } }() if fileInfo == nil { if err = os.MkdirAll(dirname, 0644); err != nil { log.Print("[PTMTIH]: Unable to create directory to store file due to error: ", err) headers := make(http.Header) //headers.Set("Content-Type", MIME_TYPE_JSON) m["status"] = "error" m["message"] = err.String() m["result"] = p.urlPath w.Encode(m) return 500, headers, err } if file, err = os.OpenFile(p.filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644); err != nil { log.Print("[PTMTIH]: Unable to create file named: \"", p.filename, "\" due to error: ", err) headers := make(http.Header) //headers.Set("Content-Type", MIME_TYPE_JSON) m["status"] = "error" m["message"] = err.String() m["result"] = p.urlPath w.Encode(m) return 500, headers, err } } else { if p.append { file, err = os.OpenFile(p.filename, os.O_WRONLY|os.O_APPEND, 0644) } else { file, err = os.OpenFile(p.filename, os.O_WRONLY|os.O_TRUNC, 0644) } if err != nil { log.Print("[PTMTIH]: Unable to open file \"", p.filename, "\"for writing due to error: ", err) headers := make(http.Header) //headers.Set("Content-Type", MIME_TYPE_JSON) m["status"] = "error" m["message"] = err.String() m["result"] = p.urlPath w.Encode(m) return 500, headers, err } } var n int64 if p.numberOfBytes >= 0 { n, err = io.Copyn(file, p.reader, p.numberOfBytes) } else { n, err = io.Copy(file, p.reader) } log.Print("[PTMTIH]: Wrote ", n, " bytes to file with error: ", err) if err != nil && err != os.EOF { headers := make(http.Header) //headers.Set("Content-Type", MIME_TYPE_JSON) m["status"] = "error" m["message"] = err.String() m["result"] = p.urlPath w.Encode(m) return 500, headers, err } headers := make(http.Header) //headers.Set("Content-Type", MIME_TYPE_JSON) m["status"] = "success" m["message"] = "" m["result"] = p.urlPath w.Encode(m) return 200, headers, nil }
func (rr *recReader) copyn(w io.Writer, n uint32) { if _, err := io.Copyn(w, rr, int64(n)); err != nil { panic(err) } }