func sendBodySplitIntoChunk(w io.Writer, r *bufio.Reader) (err error) { // debug.Printf("sendBodySplitIntoChunk called\n") var b []byte for { b, err = r.ReadNext() // debug.Println("split into chunk n =", n, "err =", err) if err != nil { if err == io.EOF { // EOF is expected here as the server is closing connection. // debug.Println("end chunked encoding") _, err = w.Write([]byte(chunkEnd)) if err != nil { debug.Println("write chunk end 0", err) } return } debug.Println("read error in sendBodySplitIntoChunk", err) return } chunkSize := []byte(fmt.Sprintf("%x\r\n", len(b))) if _, err = w.Write(chunkSize); err != nil { debug.Printf("write chunk size %v\n", err) return } if _, err = w.Write(b); err != nil { debug.Println("write chunk data:", err) return } if _, err = w.Write([]byte(CRLF)); err != nil { debug.Println("write chunk ending CRLF:", err) return } } }
// copyN copys N bytes from src to dst, reading at most rdSize for each read. // rdSize should <= buffer size of the buffered reader. // Returns any encountered error. func copyN(dst io.Writer, src *bufio.Reader, n, rdSize int) (err error) { // Most of the copy is copied from io.Copy for n > 0 { var b []byte var er error if n > rdSize { b, er = src.ReadN(rdSize) } else { b, er = src.ReadN(n) } nr := len(b) n -= nr if nr > 0 { nw, ew := dst.Write(b) if ew != nil { err = ew break } if nr != nw { err = io.ErrShortWrite break } } if er == io.EOF { break } if er != nil { err = er break } } return err }
func skipCRLF(r *bufio.Reader) error { // There maybe servers using single '\n' for line ending if _, err := r.ReadSlice('\n'); err != nil { errl.Println("Error reading CRLF:", err) return err } return nil }
// Send response body if header specifies chunked encoding. rdSize specifies // the size of each read on Reader, it should be set to be the buffer size of // the Reader, this parameter is added for testing. func sendBodyChunked(w io.Writer, r *bufio.Reader, rdSize int) (err error) { // debug.Println("Sending chunked body") for { var s []byte // Read chunk size line, ignore chunk extension if any. if s, err = r.PeekSlice('\n'); err != nil { errl.Println("peek chunk size:", err) return } smid := bytes.IndexByte(s, ';') if smid == -1 { smid = len(s) } else { // use error log to find usage of chunk extension errl.Printf("got chunk extension: %s\n", s) } var size int64 if size, err = ParseIntFromBytes(TrimSpace(s[:smid]), 16); err != nil { errl.Println("chunk size invalid:", err) return } /* if debug { // To debug getting malformed response status line with "0\r\n". if c, ok := w.(*clientConn); ok { debug.Printf("cli(%s) chunk size %d %#v\n", c.RemoteAddr(), size, string(s)) } } */ if size == 0 { r.Skip(len(s)) if err = skipCRLF(r); err != nil { return } if _, err = w.Write([]byte(chunkEnd)); err != nil { debug.Println("send chunk ending:", err) } return } // RFC 2616 19.3 only suggest tolerating single LF for // headers, not for chunked encoding. So assume the server will send // CRLF. If not, the following parse int may find errors. total := len(s) + int(size) + 2 // total data size for this chunk, including ending CRLF // PeekSlice will not advance reader, so we can just copy total sized data. if err = copyN(w, r, total, rdSize); err != nil { debug.Println("copy chunked data:", err) return } } }
// Only add headers that are of interest for a proxy into request/response's header map. func (h *Header) parseHeader(reader *bufio.Reader, raw *bytes.Buffer, url *URL) (err error) { h.ContLen = -1 dummyLastLine := []byte{} // Read request header and body var s, name, val, lastLine []byte for { if s, err = reader.ReadSlice('\n'); err != nil { return } // There are servers that use \n for line ending, so trim first before check ending. // For example, the 404 page for http://plan9.bell-labs.com/magic/man2html/1/2l trimmed := TrimSpace(s) if len(trimmed) == 0 { // end of headers return } if (s[0] == ' ' || s[0] == '\t') && lastLine != nil { // multi-line header // I've never seen multi-line header used in headers that's of interest. // Disable multi-line support to avoid copy for now. errl.Printf("Multi-line support disabled: %v %s", url, s) return errNotSupported // combine previous line with current line // trimmed = bytes.Join([][]byte{lastLine, []byte{' '}, trimmed}, nil) } if name, val, err = splitHeader(trimmed); err != nil { return } // Wait Go to solve/provide the string<->[]byte optimization kn := string(name) if parseFunc, ok := headerParser[kn]; ok { // lastLine = append([]byte(nil), trimmed...) // copy to avoid next read invalidating the trimmed line lastLine = dummyLastLine val = TrimSpace(val) if len(val) == 0 { continue } parseFunc(h, ASCIIToLower(val), raw) } else { // mark this header as not of interest to proxy lastLine = nil } if hopByHopHeader[kn] { continue } raw.Write(s) // debug.Printf("len %d %s", len(s), s) } return }
func skipSpace(r *bufio.Reader) int { n := 0 for { c, err := r.ReadByte() if err != nil { // Bufio will keep err until next read. break } if c != ' ' && c != '\t' { r.UnreadByte() break } n++ } return n }
// ReadLineBytes read till '\n' is found or encounter error. The returned line // does not include ending '\r\n' or '\n'. Returns err != nil if and only if // len(line) == 0. Note the returned byte should not be used for append and // maybe overwritten by next I/O operation. Copied code of readLineSlice from // $GOROOT/src/pkg/net/textproto/reader.go func ReadLineSlice(r *bufio.Reader) (line []byte, err error) { for { l, more, err := r.ReadLine() if err != nil { return nil, err } // Avoid the copy if the first call produced a full line. if line == nil && !more { return l, nil } line = append(line, l...) if !more { break } } return line, nil }
// Use this function until we find Trailer headers actually in use. func skipTrailer(r *bufio.Reader) error { // It's possible to get trailer headers, but the body will always end with // a line with just CRLF. for { s, err := r.ReadSlice('\n') if err != nil { errl.Println("skip trailer:", err) return err } if len(s) == 2 && s[0] == '\r' && s[1] == '\n' { return nil } errl.Printf("skip trailer: %#v", string(s)) if len(s) == 1 || len(s) == 2 { return fmt.Errorf("malformed chunk body end: %#v", string(s)) } } }
// Learned from net.textproto. One difference is that this one keeps the // ending '\n' in the returned line. Buf if there's only CRLF in the line, // return nil for the line. func readContinuedLineSlice(r *bufio.Reader) ([]byte, error) { // feedly.com request headers contains things like: // "$Authorization.feedly: $FeedlyAuth\r\n", so we must test for only // continuation spaces. isspace := func(b byte) bool { return b == ' ' || b == '\t' } // Read the first line. line, err := r.ReadSlice('\n') if err != nil { return nil, err } // There are servers that use \n for line ending, so trim first before check ending. // For example, the 404 page for http://plan9.bell-labs.com/magic/man2html/1/2l trimmed := TrimSpace(line) if len(trimmed) == 0 { if len(line) > 2 { return nil, fmt.Errorf("malformed end of headers, len: %d, %#v", len(line), string(line)) } return nil, nil } if isspace(line[0]) { return nil, fmt.Errorf("malformed header, start with space: %#v", string(line)) } // Optimistically assume that we have started to buffer the next line // and it starts with an ASCII letter (the next header key), so we can // avoid copying that buffered data around in memory and skipping over // non-existent whitespace. if r.Buffered() > 0 { peek, err := r.Peek(1) if err == nil && !isspace(peek[0]) { return line, nil } } var buf []byte buf = append(buf, trimmed...) // Read continuation lines. for skipSpace(r) > 0 { line, err := r.ReadSlice('\n') if err != nil { break } buf = append(buf, ' ') buf = append(buf, TrimTrailingSpace(line)...) } buf = append(buf, '\r', '\n') return buf, nil }
// Send response body if header specifies chunked encoding. rdSize specifies // the size of each read on Reader, it should be set to be the buffer size of // the Reader, this parameter is added for testing. func sendBodyChunked(r *bufio.Reader, w io.Writer, rdSize int) (err error) { // debug.Println("Sending chunked body") for { var s []byte // Read chunk size line, ignore chunk extension if any. if s, err = r.PeekSlice('\n'); err != nil { errl.Println("peeking chunk size:", err) return } // debug.Printf("Chunk size line %s\n", s) smid := bytes.IndexByte(s, ';') if smid == -1 { smid = len(s) } var size int64 if size, err = ParseIntFromBytes(TrimSpace(s[:smid]), 16); err != nil { errl.Println("chunk size invalid:", err) return } // end of chunked data. As we remove trailer header in request sending // to server, there should be no trailer in response. // TODO: Is it possible for client request body to have trailers in it? if size == 0 { r.Skip(len(s)) skipCRLF(r) if _, err = w.Write([]byte(chunkEnd)); err != nil { debug.Println("sending chunk ending:", err) } return } // The spec section 19.3 only suggest toleranting single LF for // headers, not for chunked encoding. So assume the server will send // CRLF. If not, the following parse int may find errors. total := len(s) + int(size) + 2 // total data size for this chunk, including ending CRLF // PeekSlice will not advance reader, so we can just copy total sized data. if err = copyN(w, r, total, rdSize); err != nil { debug.Println("copying chunked data:", err) return } } return }