func ReadBody(req *http.Request) *string { save := req.Body var err error if req.Body == nil { req.Body = nil } else { save, req.Body, err = drainBody(req.Body) if err != nil { return nil } } b := bytes.NewBuffer([]byte("")) chunked := len(req.TransferEncoding) > 0 && req.TransferEncoding[0] == "chunked" if req.Body == nil { return nil } var dest io.Writer = b if chunked { dest = httputil.NewChunkedWriter(dest) } _, err = io.Copy(dest, req.Body) if chunked { dest.(io.Closer).Close() } req.Body = save body := b.String() return &body }
// SnapshotResponse reads the response into the MessageView. If mv.headersOnly // is false it will also read the body into memory and replace the existing // body with the in-memory copy. This method is semantically a no-op. func (mv *MessageView) SnapshotResponse(res *http.Response) error { buf := new(bytes.Buffer) fmt.Fprintf(buf, "HTTP/%d.%d %s\r\n", res.ProtoMajor, res.ProtoMinor, res.Status) if tec := len(res.TransferEncoding); tec > 0 { mv.chunked = res.TransferEncoding[tec-1] == "chunked" fmt.Fprintf(buf, "Transfer-Encoding: %s\r\n", strings.Join(res.TransferEncoding, ", ")) } if !mv.chunked && res.ContentLength >= 0 { fmt.Fprintf(buf, "Content-Length: %d\r\n", res.ContentLength) } mv.compress = res.Header.Get("Content-Encoding") res.Header.WriteSubset(buf, map[string]bool{ "Content-Length": true, "Transfer-Encoding": true, }) fmt.Fprint(buf, "\r\n") mv.bodyoffset = int64(buf.Len()) mv.traileroffset = int64(buf.Len()) ct := res.Header.Get("Content-Type") if mv.skipBody && !mv.matchContentType(ct) || res.Body == nil { mv.message = buf.Bytes() return nil } data, err := ioutil.ReadAll(res.Body) if err != nil { return err } res.Body.Close() if mv.chunked { cw := httputil.NewChunkedWriter(buf) cw.Write(data) cw.Close() } else { buf.Write(data) } mv.traileroffset = int64(buf.Len()) res.Body = ioutil.NopCloser(bytes.NewReader(data)) if res.Trailer != nil { res.Trailer.Write(buf) } else if mv.chunked { fmt.Fprint(buf, "\r\n") } mv.message = buf.Bytes() return nil }
func dumpRequest(req *http.Request, body bool) (dump []byte, err error) { save := req.Body if !body || req.Body == nil { req.Body = nil } else { save, req.Body, err = drainBody(req.Body) if err != nil { return } } var b bytes.Buffer fmt.Fprintf(&b, "%s %s HTTP/%d.%d\r\n", valueOrDefault(req.Method, "GET"), req.URL.RequestURI(), req.ProtoMajor, req.ProtoMinor) host := req.Host if host == "" && req.URL != nil { host = req.URL.Host } if host != "" { fmt.Fprintf(&b, "Host: %s\r\n", host) } chunked := len(req.TransferEncoding) > 0 && req.TransferEncoding[0] == "chunked" if len(req.TransferEncoding) > 0 { fmt.Fprintf(&b, "Transfer-Encoding: %s\r\n", strings.Join(req.TransferEncoding, ",")) } if req.Close { fmt.Fprintf(&b, "Connection: close\r\n") } err = req.Header.WriteSubset(&b, reqWriteExcludeHeaderDump) if err != nil { return } io.WriteString(&b, "\r\n") if req.Body != nil { var dest io.Writer = &b if chunked { dest = httputil.NewChunkedWriter(dest) } _, err = io.Copy(dest, req.Body) if chunked { dest.(io.Closer).Close() io.WriteString(&b, "\r\n") } } req.Body = save if err != nil { return } dump = b.Bytes() return }
// WriteChunked writes the contents of r in chunked format to the wire. func (w *writer) WriteChunked(r io.Reader) error { if w.phase != body { return &phaseError{body, w.phase} } cw := httputil.NewChunkedWriter(w) if _, err := io.Copy(cw, r); err != nil { return nil } w.phase = requestline return cw.Close() }
func (s *Server) sendMessage(i int, w *bufio.ReadWriter, b []byte) { c := httputil.NewChunkedWriter(w) _, err := c.Write(b) if err == nil { err = w.Flush() } if err != nil { s.mu.Lock() if len(s.streamers) == 1 { s.streamers = s.streamers[:0] } else { s.streamers[i] = s.streamers[len(s.streamers)-1] s.streamers = s.streamers[:len(s.streamers)-1] } s.mu.Unlock() } }
// Simple Wrapper for http handlers func restWrapper(handlerFunc restFunc) http.HandlerFunc { // Create a closure and return an anonymous function return func(w1 http.ResponseWriter, r *http.Request) { w := httputil.NewChunkedWriter(w1) flusher, ok := w1.(http.Flusher) if !ok { log.Errorf("Could not get flusher") http.NotFound(w1, r) return } w1.Header().Set("Transfer-Encoding", "chunked") w1.WriteHeader(http.StatusOK) flusher.Flush() // Call the handler count := 0 for { resp, done, err := handlerFunc(r, count) if err != nil { // Send HTTP response http.Error(w1, err.Error(), http.StatusInternalServerError) } else { // Send HTTP response as Json content, err := json.Marshal(resp) if err != nil { log.Errorf("Marshal failed: %v", err) http.Error(w1, err.Error(), http.StatusInternalServerError) return } _, err = w.Write(content) if err != nil { log.Errorf("Write failed: %v", err) } flusher.Flush() } if done { break } count++ } } }
func (t *transferWriter) WriteBody(w io.Writer) (err error) { var ncopy int64 // Write body if t.Body != nil { if chunked(t.TransferEncoding) { cw := httputil.NewChunkedWriter(w) _, err = io.Copy(cw, t.Body) if err == nil { err = cw.Close() } } else if t.ContentLength == -1 { ncopy, err = io.Copy(w, t.Body) } else { ncopy, err = io.Copy(w, io.LimitReader(t.Body, t.ContentLength)) nextra, err := io.Copy(ioutil.Discard, t.Body) if err != nil { return err } ncopy += nextra } if err != nil { return err } if err = t.BodyCloser.Close(); err != nil { return err } } if t.ContentLength != -1 && t.ContentLength != ncopy { return fmt.Errorf("http: Request.ContentLength=%d with Body length %d", t.ContentLength, ncopy) } // TODO(petar): Place trailer writer code here. if chunked(t.TransferEncoding) { // Last chunk, empty trailer _, err = io.WriteString(w, "\r\n") } return }
// WriteHeader writes the status line and headers. func (rw *responseWriter) WriteHeader(status int) { if rw.wroteHeader { return } rw.wroteHeader = true fmt.Fprintf(rw.ow, "HTTP/1.1 %d %s\r\n", status, http.StatusText(status)) if rw.closing { rw.hdr.Set("Connection", "close") } if rw.hdr.Get("Content-Length") == "" { rw.hdr.Set("Transfer-Encoding", "chunked") rw.chunked = true rw.cw = httputil.NewChunkedWriter(rw.ow) } rw.hdr.Write(rw.ow) rw.ow.Write([]byte("\r\n")) }
// Dump request body, strongly inspired from httputil.DumpRequest func dumpReqBody(req *http.Request) ([]byte, error) { if req.Body == nil { return nil, nil } var save io.ReadCloser var err error save, req.Body, err = drainBody(req.Body) if err != nil { return nil, err } var b bytes.Buffer var dest io.Writer = &b chunked := len(req.TransferEncoding) > 0 && req.TransferEncoding[0] == "chunked" if chunked { dest = httputil.NewChunkedWriter(dest) } _, err = io.Copy(dest, req.Body) if chunked { dest.(io.Closer).Close() io.WriteString(&b, "\r\n") } req.Body = save return b.Bytes(), err }
// SnapshotRequest reads the request into the MessageView. If mv.skipBody is false // it will also read the body into memory and replace the existing body with // the in-memory copy. This method is semantically a no-op. func (mv *MessageView) SnapshotRequest(req *http.Request) error { buf := new(bytes.Buffer) fmt.Fprintf(buf, "%s %s HTTP/%d.%d\r\n", req.Method, req.URL, req.ProtoMajor, req.ProtoMinor) if req.Host != "" { fmt.Fprintf(buf, "Host: %s\r\n", req.Host) } if tec := len(req.TransferEncoding); tec > 0 { mv.chunked = req.TransferEncoding[tec-1] == "chunked" fmt.Fprintf(buf, "Transfer-Encoding: %s\r\n", strings.Join(req.TransferEncoding, ", ")) } if !mv.chunked { fmt.Fprintf(buf, "Content-Length: %d\r\n", req.ContentLength) } mv.compress = req.Header.Get("Content-Encoding") req.Header.WriteSubset(buf, map[string]bool{ "Host": true, "Content-Length": true, "Transfer-Encoding": true, }) fmt.Fprint(buf, "\r\n") mv.bodyoffset = int64(buf.Len()) mv.traileroffset = int64(buf.Len()) if mv.skipBody { mv.message = buf.Bytes() return nil } data, err := ioutil.ReadAll(req.Body) if err != nil { return err } req.Body.Close() if mv.chunked { cw := httputil.NewChunkedWriter(buf) cw.Write(data) cw.Close() } else { buf.Write(data) } mv.traileroffset = int64(buf.Len()) req.Body = ioutil.NopCloser(bytes.NewReader(data)) if req.Trailer != nil { req.Trailer.Write(buf) } else if mv.chunked { fmt.Fprint(buf, "\r\n") } mv.message = buf.Bytes() return nil }
func TestProxyRequestContentLengthAndTransferEncoding(t *testing.T) { chunk := func(data []byte) []byte { out := &bytes.Buffer{} chunker := httputil.NewChunkedWriter(out) for _, b := range data { if _, err := chunker.Write([]byte{b}); err != nil { panic(err) } } chunker.Close() out.Write([]byte("\r\n")) return out.Bytes() } zip := func(data []byte) []byte { out := &bytes.Buffer{} zipper := gzip.NewWriter(out) if _, err := zipper.Write(data); err != nil { panic(err) } zipper.Close() return out.Bytes() } sampleData := []byte("abcde") table := map[string]struct { reqHeaders http.Header reqBody []byte expectedHeaders http.Header expectedBody []byte }{ "content-length": { reqHeaders: http.Header{ "Content-Length": []string{"5"}, }, reqBody: sampleData, expectedHeaders: http.Header{ "Content-Length": []string{"5"}, "Content-Encoding": nil, // none set "Transfer-Encoding": nil, // none set }, expectedBody: sampleData, }, "content-length + identity transfer-encoding": { reqHeaders: http.Header{ "Content-Length": []string{"5"}, "Transfer-Encoding": []string{"identity"}, }, reqBody: sampleData, expectedHeaders: http.Header{ "Content-Length": []string{"5"}, "Content-Encoding": nil, // none set "Transfer-Encoding": nil, // gets removed }, expectedBody: sampleData, }, "content-length + gzip content-encoding": { reqHeaders: http.Header{ "Content-Length": []string{strconv.Itoa(len(zip(sampleData)))}, "Content-Encoding": []string{"gzip"}, }, reqBody: zip(sampleData), expectedHeaders: http.Header{ "Content-Length": []string{strconv.Itoa(len(zip(sampleData)))}, "Content-Encoding": []string{"gzip"}, "Transfer-Encoding": nil, // none set }, expectedBody: zip(sampleData), }, "chunked transfer-encoding": { reqHeaders: http.Header{ "Transfer-Encoding": []string{"chunked"}, }, reqBody: chunk(sampleData), expectedHeaders: http.Header{ "Content-Length": nil, // none set "Content-Encoding": nil, // none set "Transfer-Encoding": nil, // Transfer-Encoding gets removed }, expectedBody: sampleData, // sample data is unchunked }, "chunked transfer-encoding + gzip content-encoding": { reqHeaders: http.Header{ "Content-Encoding": []string{"gzip"}, "Transfer-Encoding": []string{"chunked"}, }, reqBody: chunk(zip(sampleData)), expectedHeaders: http.Header{ "Content-Length": nil, // none set "Content-Encoding": []string{"gzip"}, "Transfer-Encoding": nil, // gets removed }, expectedBody: zip(sampleData), // sample data is unchunked, but content-encoding is preserved }, // "Transfer-Encoding: gzip" is not supported by go // See http/transfer.go#fixTransferEncoding (https://golang.org/src/net/http/transfer.go#L427) // Once it is supported, this test case should succeed // // "gzip+chunked transfer-encoding": { // reqHeaders: http.Header{ // "Transfer-Encoding": []string{"chunked,gzip"}, // }, // reqBody: chunk(zip(sampleData)), // // expectedHeaders: http.Header{ // "Content-Length": nil, // no content-length headers // "Transfer-Encoding": nil, // Transfer-Encoding gets removed // }, // expectedBody: sampleData, // }, } successfulResponse := "backend passed tests" for k, item := range table { // Start the downstream server downstreamServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { // Verify headers for header, v := range item.expectedHeaders { if !reflect.DeepEqual(v, req.Header[header]) { t.Errorf("%s: Expected headers for %s to be %v, got %v", k, header, v, req.Header[header]) } } // Read body body, err := ioutil.ReadAll(req.Body) if err != nil { t.Errorf("%s: unexpected error %v", k, err) } req.Body.Close() // Verify length if req.ContentLength > 0 && req.ContentLength != int64(len(body)) { t.Errorf("%s: ContentLength was %d, len(data) was %d", k, req.ContentLength, len(body)) } // Verify content if !bytes.Equal(item.expectedBody, body) { t.Errorf("%s: Expected %q, got %q", k, string(item.expectedBody), string(body)) } // Write successful response w.Write([]byte(successfulResponse)) })) defer downstreamServer.Close() // Start the proxy server serverURL, _ := url.Parse(downstreamServer.URL) simpleStorage := &SimpleRESTStorage{ errors: map[string]error{}, resourceLocation: serverURL, expectedResourceNamespace: "default", } namespaceHandler := handleNamespaced(map[string]rest.Storage{"foo": simpleStorage}) server := httptest.NewServer(namespaceHandler) defer server.Close() // Dial the proxy server conn, err := net.Dial(server.Listener.Addr().Network(), server.Listener.Addr().String()) if err != nil { t.Errorf("%s: unexpected error %v", err) continue } defer conn.Close() // Add standard http 1.1 headers if item.reqHeaders == nil { item.reqHeaders = http.Header{} } item.reqHeaders.Add("Connection", "close") item.reqHeaders.Add("Host", server.Listener.Addr().String()) // We directly write to the connection to bypass the Go library's manipulation of the Request.Header. // Write the request headers post := fmt.Sprintf("POST /%s/%s/%s/proxy/namespaces/default/foo/id/some/dir HTTP/1.1\r\n", prefix, newGroupVersion.Group, newGroupVersion.Version) if _, err := fmt.Fprint(conn, post); err != nil { t.Fatalf("%s: unexpected error %v", err) } for header, values := range item.reqHeaders { for _, value := range values { if _, err := fmt.Fprintf(conn, "%s: %s\r\n", header, value); err != nil { t.Fatalf("%s: unexpected error %v", err) } } } // Header separator if _, err := fmt.Fprint(conn, "\r\n"); err != nil { t.Fatalf("%s: unexpected error %v", err) } // Body if _, err := conn.Write(item.reqBody); err != nil { t.Fatalf("%s: unexpected error %v", err) } // Read response response, err := ioutil.ReadAll(conn) if err != nil { t.Errorf("%s: unexpected error %v", err) continue } if !strings.HasSuffix(string(response), successfulResponse) { t.Errorf("%s: Did not get successful response: %s", k, string(response)) continue } } }
func activeConnectionState(c *conn) connectionStateFn { select { case <-time.After(c.DisconnectDelay): // timout connection // log.Println("timeout in active:", c) return nil case httpTx := <-c.httpTransactions: writer := httpTx.rw // continue with protocol handling with hijacked connection conn, err := hijack(writer) if err != nil { // TODO log.Fatal(err) } httpTx.done <- true // let baseHandler finish chunked := httputil.NewChunkedWriter(conn) defer func() { chunked.Close() conn.Write([]byte("\r\n")) // close chunked data conn.Close() }() // start protocol handling conn_closed := make(chan bool) defer func() { conn_closed <- true }() go c.activeConnectionGuard(conn_closed) conn_interrupted := make(chan bool) go connectionClosedGuard(conn, conn_interrupted) bytes_sent := 0 for loop := true; loop; { select { case frame, ok := <-c.output_channel: if !ok { httpTx.writeClose(chunked, 3000, "Go away!") return closedConnectionState } frames := [][]byte{frame} for drain := true; drain; { select { case frame, ok = <-c.output_channel: frames = append(frames, frame) default: drain = false } } n, _ := httpTx.writeData(chunked, frames...) bytes_sent = bytes_sent + n case <-time.After(c.HeartbeatDelay): httpTx.writeHeartbeat(chunked) case <-conn_interrupted: c.Close() return nil } if httpTx.isStreaming() { if bytes_sent > c.ResponseLimit { loop = false } } else { loop = false } } return activeConnectionState } panic("unreachable") }
func (c *Client) logRequest(req *http.Request) error { if c.logHTTP { var err error body := true save := req.Body if !body || req.Body == nil { req.Body = nil } else { save, req.Body, err = drainBody(req.Body) if err != nil { return err } } fmt.Fprintln(os.Stderr, "----------- request start -----------") fmt.Fprintf( os.Stderr, "%s %s HTTP/%d.%d\r\n", valueOrDefault(req.Method, "GET"), req.URL.RequestURI(), req.ProtoMajor, req.ProtoMinor, ) host := req.Host if host == "" && req.URL != nil { host = req.URL.Host } if host != "" { fmt.Fprintf(os.Stderr, "Host: %s\r\n", host) } chunked := len(req.TransferEncoding) > 0 && req.TransferEncoding[0] == "chunked" if len(req.TransferEncoding) > 0 { fmt.Fprintf(os.Stderr, "Transfer-Encoding: %s\r\n", strings.Join(req.TransferEncoding, ",")) } if req.Close { fmt.Fprintf(os.Stderr, "Connection: close\r\n") } err = req.Header.WriteSubset(os.Stderr, reqWriteExcludeHeaderDump) if err != nil { return err } io.WriteString(os.Stderr, "\r\n") fmt.Fprintln(os.Stderr, "----------- body start -----------") if req.Body != nil { var dest io.Writer = os.Stderr if chunked { dest = httputil.NewChunkedWriter(dest) } _, err = io.Copy(dest, req.Body) if chunked { dest.(io.Closer).Close() io.WriteString(os.Stderr, "\r\n") } } fmt.Fprintln(os.Stderr, "----------- body end -----------") req.Body = save if err != nil { return err } fmt.Fprintln(os.Stderr, "----------- request end -----------") } return nil }
func (w *respWriter) WriteHeader(code int, httpMessage interface{}, hasBody bool) { if w.wroteHeader { log.Println("Called WriteHeader twice on the same connection") return } // Make the HTTP header and the Encapsulated: header. var header []byte var encap string var err error switch msg := httpMessage.(type) { case *http.Request: header, err = httpRequestHeader(msg) if err != nil { break } if hasBody { encap = fmt.Sprintf("req-hdr=0, req-body=%d", len(header)) } else { encap = fmt.Sprintf("req-hdr=0, null-body=%d", len(header)) } case *http.Response: header, err = httpResponseHeader(msg) if err != nil { break } if hasBody { encap = fmt.Sprintf("res-hdr=0, res-body=%d", len(header)) } else { encap = fmt.Sprintf("res-hdr=0, null-body=%d", len(header)) } } if encap == "" { if hasBody { method := w.req.Method if len(method) > 3 { method = method[0:3] } method = strings.ToLower(method) encap = fmt.Sprintf("%s-body=0", method) } else { encap = "null-body=0" } } w.header.Set("Encapsulated", encap) if _, ok := w.header["Date"]; !ok { w.Header().Set("Date", time.Now().UTC().Format(http.TimeFormat)) } w.header.Set("Connection", "close") bw := w.conn.buf.Writer status := StatusText(code) if status == "" { status = fmt.Sprintf("status code %d", code) } fmt.Fprintf(bw, "ICAP/1.0 %d %s\r\n", code, status) w.header.Write(bw) io.WriteString(bw, "\r\n") if header != nil { bw.Write(header) } w.wroteHeader = true if hasBody { w.cw = httputil.NewChunkedWriter(w.conn.buf.Writer) } }
func newStreamWriter(bufrw *bufio.ReadWriter) io.WriteCloser { sw := new(streamWriter) sw.bufrw = bufrw sw.wc = httputil.NewChunkedWriter(bufrw) return sw }
// ResponseView returns a new MessageView for res. If conf.HeadersOnly is false // it will also read the body into memory and replace the existing body with // the in-memory copy. This method is semantically a no-op. func ResponseView(res *http.Response, conf *ViewConfig) (*MessageView, error) { if conf == nil { conf = &ViewConfig{} } mv := &MessageView{ conf: conf, } buf := new(bytes.Buffer) fmt.Fprintf(buf, "HTTP/%d.%d %s\r\n", res.ProtoMajor, res.ProtoMinor, res.Status) if tec := len(res.TransferEncoding); tec > 0 { mv.chunked = res.TransferEncoding[tec-1] == "chunked" fmt.Fprintf(buf, "Transfer-Encoding: %s\r\n", strings.Join(res.TransferEncoding, ", ")) } if !mv.chunked { fmt.Fprintf(buf, "Content-Length: %d\r\n", res.ContentLength) } mv.compress = res.Header.Get("Content-Encoding") res.Header.WriteSubset(buf, map[string]bool{ "Content-Length": true, "Transfer-Encoding": true, }) fmt.Fprint(buf, "\r\n") mv.bodyoffset = int64(buf.Len()) mv.traileroffset = int64(buf.Len()) if mv.conf.HeadersOnly { mv.message = buf.Bytes() return mv, nil } data, err := ioutil.ReadAll(res.Body) if err != nil { return nil, err } res.Body.Close() if mv.chunked { cw := httputil.NewChunkedWriter(buf) cw.Write(data) cw.Close() } else { buf.Write(data) } mv.traileroffset = int64(buf.Len()) res.Body = ioutil.NopCloser(bytes.NewReader(data)) if res.Trailer != nil { res.Trailer.Write(buf) } else if mv.chunked { fmt.Fprint(buf, "\r\n") } mv.message = buf.Bytes() return mv, nil }
func (p *chunkedEncoding) Encoder(req Request, cxt Context, writer io.Writer) io.Writer { return httputil.NewChunkedWriter(writer) }