func (h *Headers) wrapResponseWriter(w http.ResponseWriter) http.ResponseWriter { var newW = httputils.NewFlexibleResponseWriter(func(frw *httputils.FlexibleResponseWriter) { httputils.CopyHeaders(frw.Header(), w.Header()) h.response.rewrite(w.Header()) frw.BodyWriter = utils.NopCloser(w) w.WriteHeader(frw.Code) }) httputils.CopyHeaders(w.Header(), newW.Header()) return newW }
func (m *mp4Handler) RequestHandle(ctx context.Context, w http.ResponseWriter, r *http.Request) { // Handle only GET requests with ContentLength of 0 without a Range header if r.Method != "GET" || len(r.Header.Get("Range")) > 0 || r.ContentLength > 0 { m.next.RequestHandle(ctx, w, r) return } // parse the request var start, err = strconv.Atoi(r.URL.Query().Get(startKey)) if err != nil || 0 >= start { // that start is not ok m.next.RequestHandle(ctx, w, r) return } var startTime = time.Duration(start) * time.Second var newreq = copyRequest(r) removeQueryArgument(newreq.URL, startKey) var header = make(http.Header) var rr = &rangeReader{ ctx: ctx, req: copyRequest(newreq), location: m.loc, next: m.next, callback: func(frw *httputils.FlexibleResponseWriter) bool { if len(header) == 0 { httputils.CopyHeadersWithout(frw.Header(), header, skipHeaders...) } else { return frw.Header().Get("Last-Modified") == header.Get("Last-Modified") } return true }, } var video *mp4.MP4 video, err = mp4.Decode(rr) if err != nil { m.loc.Logger.Errorf("error from the mp4.Decode - %s", err) m.next.RequestHandle(ctx, w, r) return } if video == nil || video.Moov == nil { // missing something? m.next.RequestHandle(ctx, w, r) return } cl, err := clip.New(video, startTime, rr) if err != nil { m.loc.Logger.Errorf("error while clipping a video(%+v) - %s", video, err) m.next.RequestHandle(ctx, w, r) return } httputils.CopyHeaders(header, w.Header()) w.Header().Set("Content-Length", strconv.FormatUint(cl.Size(), 10)) size, err := cl.WriteTo(w) m.loc.Logger.Debugf("wrote %d", size) if err != nil { m.loc.Logger.Logf("error on writing the clip response - %s", err) } if uint64(size) != cl.Size() { m.loc.Logger.Debugf("handler.mp4[%p]: expected to write %d but wrote %d", m, cl.Size(), size) } }
func (h *reqHandler) knownRanged() { ranges, err := httputils.ParseRequestRange(h.req.Header.Get("Range"), h.obj.Size) if err != nil { err := http.StatusRequestedRangeNotSatisfiable http.Error(h.resp, http.StatusText(err), err) return } if len(ranges) != 1 { // We do not support multiple ranges but maybe the upstream does //!TODO: implement support for multiple ranges h.carbonCopyProxy() return } reqRange := ranges[0] httputils.CopyHeaders(h.obj.Headers, h.resp.Header()) h.resp.Header().Set("Content-Range", reqRange.ContentRange(h.obj.Size)) h.resp.Header().Set("Content-Length", strconv.FormatUint(reqRange.Length, 10)) h.rewriteTimeBasedHeaders() h.resp.WriteHeader(http.StatusPartialContent) if h.req.Method == "HEAD" { return } h.lazilyRespond(ranges[0].Start, ranges[0].Start+ranges[0].Length-1) }
func copyRequest(r *http.Request) *http.Request { req := *r req.Header = http.Header{} url := *r.URL req.URL = &url httputils.CopyHeaders(r.Header, req.Header) return &req }
func (h *reqHandler) knownFull() { httputils.CopyHeaders(h.obj.Headers, h.resp.Header()) h.resp.Header().Set("Content-Length", strconv.FormatUint(h.obj.Size, 10)) h.rewriteTimeBasedHeaders() h.resp.WriteHeader(h.obj.Code) if h.req.Method == "HEAD" { return } h.lazilyRespond(0, h.obj.Size-1) }
func (p *ReverseProxy) ServeHTTP(ctx context.Context, rw http.ResponseWriter, req *http.Request) { var upstream = p.defaultUpstream res, err := p.doRequestFor(rw, req, upstream) if err != nil { p.Logger.Logf("[%p] Proxy error: %v", req, err) httputils.Error(rw, http.StatusInternalServerError) return } if newUpstream, ok := p.CodesToRetry[res.StatusCode]; ok { upstream = getUpstreamFromContext(ctx, newUpstream) if upstream != nil { if err = res.Body.Close(); err != nil { p.Logger.Logf("[%p] Proxy error on closing response which will be retried: %v", req, err) } res, err = p.doRequestFor(rw, req, upstream) if err != nil { p.Logger.Logf("[%p] Proxy error: %v", req, err) httputils.Error(rw, http.StatusInternalServerError) return } } else { p.Logger.Errorf("[%p] Proxy was configured to retry on code %d with upstream %s but no such upstream exist", req, res.StatusCode, newUpstream) } } for _, h := range hopHeaders { res.Header.Del(h) } httputils.CopyHeaders(res.Header, rw.Header()) // The "Trailer" header isn't included in the Transport's response, // at least for *http.Transport. Build it up from Trailer. if len(res.Trailer) > 0 { var trailerKeys []string for k := range res.Trailer { trailerKeys = append(trailerKeys, k) } rw.Header().Add("Trailer", strings.Join(trailerKeys, ", ")) } rw.WriteHeader(res.StatusCode) if len(res.Trailer) > 0 { // Force chunking if we saw a response trailer. // This prevents net/http from calculating the length for short // bodies and adding a Content-Length. if fl, ok := rw.(http.Flusher); ok { fl.Flush() } } if _, err := io.Copy(rw, res.Body); err != nil { p.Logger.Logf("[%p] Proxy error during copying: %v", req, err) } // Close now, instead of defer, to populate res.Trailer if err := res.Body.Close(); err != nil { p.Logger.Errorf("[%p] Proxy error during response close: %v", req, err) } httputils.CopyHeaders(res.Trailer, rw.Header()) }