// ReadResponse unmarshalls a HTTP response. func (c *client) ReadResponse() (*Response, error) { version, code, msg, err := c.ReadStatusLine() var headers []Header if err != nil { return nil, fmt.Errorf("ReadStatusLine: %v", err) } for { var key, value string var done bool key, value, done, err = c.ReadHeader() if err != nil || done { break } if key == "" { // empty header values are valid, rfc 2616 s4.2. err = errors.New("invalid header") break } headers = append(headers, Header{key, value}) } var resp = Response{ Version: version, Status: Status{code, msg}, Headers: headers, Body: c.ReadBody(), } if l := resp.ContentLength(); l >= 0 { resp.Body = io.LimitReader(resp.Body, l) } else if resp.TransferEncoding() == "chunked" { resp.Body = httputil.NewChunkedReader(resp.Body) } return &resp, err }
// Request returns a HTTP Response with Header and Body // from fcgi responder func (this *FCGIClient) Request(p map[string]string, req io.Reader) (resp *http.Response, err error) { r, err := this.Do(p, req) if err != nil { return } rb := bufio.NewReader(r) tp := textproto.NewReader(rb) resp = new(http.Response) // Parse the response headers. mimeHeader, err := tp.ReadMIMEHeader() if err != nil { return } resp.Header = http.Header(mimeHeader) // TODO: fixTransferEncoding ? resp.TransferEncoding = resp.Header["Transfer-Encoding"] resp.ContentLength, _ = strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) if chunked(resp.TransferEncoding) { resp.Body = ioutil.NopCloser(httputil.NewChunkedReader(rb)) } else { resp.Body = ioutil.NopCloser(rb) } return }
// BodyReader returns an io.ReadCloser that reads the HTTP request or response // body. If mv.skipBody was set the reader will immediately return io.EOF. // // If the Decode option is passed the body will be unchunked if // Transfer-Encoding is set to "chunked", and will decode the following // Content-Encodings: gzip, deflate. func (mv *MessageView) BodyReader(opts ...Option) (io.ReadCloser, error) { var r io.Reader conf := &config{} for _, o := range opts { o(conf) } br := bytes.NewReader(mv.message) r = io.NewSectionReader(br, mv.bodyoffset, mv.traileroffset-mv.bodyoffset) if !conf.decode { return ioutil.NopCloser(r), nil } if mv.chunked { r = httputil.NewChunkedReader(r) } switch mv.compress { case "gzip": gr, err := gzip.NewReader(r) if err != nil { return nil, err } return gr, nil case "deflate": return flate.NewReader(r), nil default: return ioutil.NopCloser(r), nil } }
func doChunkedResponse(w http.ResponseWriter, resp *http.Response, client *httputil.ClientConn) { // Because we can't go back to request/response after we // hijack the connection, we need to close it and make the // client open another. w.Header().Add("Connection", "close") w.WriteHeader(resp.StatusCode) down, _, up, rem, err := hijack(w, client) if err != nil { http.Error(w, "Unable to hijack response stream for chunked response", http.StatusInternalServerError) return } defer up.Close() defer down.Close() // Copy the chunked response body to downstream, // stopping at the end of the chunked section. rawResponseBody := io.MultiReader(rem, up) if _, err := io.Copy(ioutil.Discard, httputil.NewChunkedReader(io.TeeReader(rawResponseBody, down))); err != nil { http.Error(w, "Error copying chunked response body", http.StatusInternalServerError) return } resp.Trailer.Write(down) // a chunked response ends with a CRLF down.Write([]byte("\r\n")) }
// ParseRequest in []byte returns a http request or an error func ParseRequest(data []byte) (request *http.Request, err error) { var body []byte // Test if request have Transfer-Encoding: chunked isChunked := bytes.Contains(data, []byte(": chunked\r\n")) buf := bytes.NewBuffer(data) reader := bufio.NewReader(buf) // ReadRequest does not read POST bodies, we have to do it by ourseves request, err = http.ReadRequest(reader) if err != nil { return } if request.Method == "POST" { // This works, because ReadRequest method modify buffer and strips all headers, leaving only body if isChunked { body, _ = ioutil.ReadAll(httputil.NewChunkedReader(reader)) } else { body, _ = ioutil.ReadAll(reader) } bodyBuf := bytes.NewBuffer(body) request.Body = ioutil.NopCloser(bodyBuf) request.ContentLength = int64(bodyBuf.Len()) } return }
func dump(b []byte, te []string) (status, headers, body []byte, err error) { p := bytes.SplitN(b, []byte("\r\n\r\n"), 2) headers, body = p[0], p[1] p = bytes.SplitN(headers, []byte("\r\n"), 2) status, headers = p[0], p[1] if len(body) > 0 && isChunked(te) { r := httputil.NewChunkedReader(bytes.NewReader(body)) body, err = ioutil.ReadAll(r) if err != nil { return } } return }
// Request returns a HTTP Response with Header and Body // from fcgi responder func (c *FCGIClient) Request(p map[string]string, req io.Reader) (resp *http.Response, err error) { r, err := c.Do(p, req) if err != nil { return } rb := bufio.NewReader(r) tp := textproto.NewReader(rb) resp = new(http.Response) // Parse the response headers. mimeHeader, err := tp.ReadMIMEHeader() if err != nil && err != io.EOF { return } resp.Header = http.Header(mimeHeader) if resp.Header.Get("Status") != "" { statusParts := strings.SplitN(resp.Header.Get("Status"), " ", 2) resp.StatusCode, err = strconv.Atoi(statusParts[0]) if err != nil { return } if len(statusParts) > 1 { resp.Status = statusParts[1] } } else { resp.StatusCode = http.StatusOK } // TODO: fixTransferEncoding ? resp.TransferEncoding = resp.Header["Transfer-Encoding"] resp.ContentLength, _ = strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) if chunked(resp.TransferEncoding) { resp.Body = ioutil.NopCloser(httputil.NewChunkedReader(rb)) } else { resp.Body = ioutil.NopCloser(rb) } return }
// Norimalize requests with `Transfer-Encoding: chunked` header, because they have special body format func fixChunkedEncoding(data []byte) []byte { if bytes.Equal(data[0:4], bPOST) { body_idx := bytes.Index(data, b2xCRLF) chunked_header_idx := bytes.Index(data[:body_idx], bTransferEncodingChunked) if chunked_header_idx != -1 { buf := bytes.NewBuffer(data[body_idx+4:]) // Adding 4 bytes to skip 2xCLRF bodyReader := bufio.NewReader(buf) body, _ := ioutil.ReadAll(httputil.NewChunkedReader(bodyReader)) // Exclude Transfer-Encoding header and append new body return append(append(append(data[:chunked_header_idx], data[chunked_header_idx+len(bTransferEncodingChunked):body_idx]...), b2xCRLF...), body...) } } return data }
func newDecoder(rec Record, encodings []string) Record { if len(encodings) == 0 { return rec } pd := &payloadDecoder{Record: rec, rdr: rec} for i, v := range encodings { switch v { case "chunked": if i == 0 { if peek, err := rec.peek(10); err != nil || !ischunk(peek) { return rec } } pd.rdr = httputil.NewChunkedReader(pd.rdr) case "deflate": if i == 0 { if peek, err := rec.peek(2); err != nil || !iszlib(peek) { return rec } } rdr, err := zlib.NewReader(pd.rdr) if err == nil { pd.rdr = rdr } case "gzip": if i == 0 { if peek, err := rec.peek(3); err != nil || !isgzip(peek) { return rec } } rdr, err := gzip.NewReader(pd.rdr) if err == nil { pd.rdr = rdr } } } return pd }
// sseClient is a specialized SSE client that connects to a server and // issues a request for the events handler, then waits for events to be // returned from the server and puts them in the returned channel. It // only handles the initial connect event and one subsequent event. // This client supports HTTP/1.1 on non-TLS sockets. func sseClient(serverURL string) (chan *serverSentEvent, error) { u, err := url.Parse(serverURL) if err != nil { return nil, err } if u.Scheme != "http" { return nil, errors.New("Unsupported URL scheme") } ev := make(chan *serverSentEvent, 2) tp, err := textproto.Dial("tcp", u.Host) if err != nil { return nil, err } tp.Cmd("GET %s HTTP/1.1\r\nHost: %s\r\n", u.Path, u.Host) line, err := tp.ReadLine() if err != nil { tp.Close() return nil, err } if line != "HTTP/1.1 200 OK" { tp.Close() return nil, errors.New("Unexpected response:" + line) } m, err := tp.ReadMIMEHeader() if err != nil { tp.Close() return nil, err } if v := m.Get("Content-Type"); v != "text/event-stream" { tp.Close() return nil, errors.New("Unexpected Content-Type: " + v) } if m.Get("Transfer-Encoding") == "chunked" { tp.R = bufio.NewReader(httputil.NewChunkedReader(tp.R)) } go func() { defer close(ev) defer tp.Close() m, err = tp.ReadMIMEHeader() if err != nil { ev <- &serverSentEvent{Error: err} return } ev <- &serverSentEvent{ Event: m.Get("Event"), Data: m.Get("Data"), } if m.Get("Event") != "connect" { return } // If the first event is connect, we proceed and ship // the next one in line. m, err = tp.ReadMIMEHeader() if err != nil { ev <- &serverSentEvent{Error: err} return } ev <- &serverSentEvent{ Event: m.Get("Event"), Data: m.Get("Data"), } }() return ev, nil }
// msg is *Request or *Response. func readTransfer(msg interface{}, r *bufio.Reader) (err error) { t := &transferReader{} // Unify input isResponse := false switch rr := msg.(type) { case *http.Response: t.Header = rr.Header t.StatusCode = rr.StatusCode t.RequestMethod = rr.Request.Method t.ProtoMajor = rr.ProtoMajor t.ProtoMinor = rr.ProtoMinor t.Close = shouldClose(t.ProtoMajor, t.ProtoMinor, t.Header) isResponse = true case *http.Request: t.Header = rr.Header t.ProtoMajor = rr.ProtoMajor t.ProtoMinor = rr.ProtoMinor // Transfer semantics for Requests are exactly like those for // Responses with status code 200, responding to a GET method t.StatusCode = 200 t.RequestMethod = "GET" default: panic("unexpected type") } // Default to HTTP/1.1 if t.ProtoMajor == 0 && t.ProtoMinor == 0 { t.ProtoMajor, t.ProtoMinor = 1, 1 } // Transfer encoding, content length t.TransferEncoding, err = fixTransferEncoding(t.RequestMethod, t.Header) if err != nil { return err } t.ContentLength, err = fixLength(isResponse, t.StatusCode, t.RequestMethod, t.Header, t.TransferEncoding) if err != nil { return err } // Trailer t.Trailer, err = fixTrailer(t.Header, t.TransferEncoding) if err != nil { return err } // If there is no Content-Length or chunked Transfer-Encoding on a *Response // and the status is not 1xx, 204 or 304, then the body is unbounded. // See RFC2616, section 4.4. switch msg.(type) { case *http.Response: if t.ContentLength == -1 && !chunked(t.TransferEncoding) && bodyAllowedForStatus(t.StatusCode) { // Unbounded body. t.Close = true } } // Prepare body reader. ContentLength < 0 means chunked encoding // or close connection when finished, since multipart is not supported yet switch { case chunked(t.TransferEncoding): t.Body = &body{Reader: httputil.NewChunkedReader(r), hdr: msg, r: r, closing: t.Close} case t.ContentLength >= 0: // TODO: limit the Content-Length. This is an easy DoS vector. t.Body = &body{Reader: io.LimitReader(r, t.ContentLength), closing: t.Close} default: // t.ContentLength < 0, i.e. "Content-Length" not mentioned in header if t.Close { // Close semantics (i.e. HTTP/1.0) t.Body = &body{Reader: r, closing: t.Close} } else { // Persistent connection (i.e. HTTP/1.1) t.Body = &body{Reader: io.LimitReader(r, 0), closing: t.Close} } } // Unify output switch rr := msg.(type) { case *http.Request: rr.Body = t.Body rr.ContentLength = t.ContentLength rr.TransferEncoding = t.TransferEncoding rr.Close = t.Close rr.Trailer = t.Trailer case *http.Response: rr.Body = t.Body rr.ContentLength = t.ContentLength rr.TransferEncoding = t.TransferEncoding rr.Close = t.Close rr.Trailer = t.Trailer } return nil }