func (h *reqHandler) getUpstreamReader(start, end uint64) io.ReadCloser { subh := *h subh.req = subh.getNormalizedRequest() subh.req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end)) h.Logger.Debugf("[%p] Making upstream request for %s, bytes [%d-%d]...", subh.req, subh.req.URL, start, end) //!TODO: optimize requests for the same pieces? if possible, make only 1 request to the upstream for the same part r, w := io.Pipe() subh.resp = httputils.NewFlexibleResponseWriter(func(rw *httputils.FlexibleResponseWriter) { respRng, err := httputils.GetResponseRange(rw.Code, rw.Headers) if err != nil { h.Logger.Errorf("[%p] Could not parse the content-range for the partial upstream request: %s", subh.req, err) _ = w.CloseWithError(err) } h.Logger.Debugf("[%p] Received response with status %d and range %v", subh.req, rw.Code, respRng) if rw.Code == http.StatusPartialContent { //!TODO: check whether the returned range corresponds to the requested range rw.BodyWriter = w } else if rw.Code == http.StatusOK { //!TODO: handle this, use skipWriter or something like that _ = w.CloseWithError(fmt.Errorf("NOT IMPLEMENTED")) } else { _ = w.CloseWithError(fmt.Errorf("Upstream responded with status %d", rw.Code)) } }) go subh.carbonCopyProxy() return newWholeChunkReadCloser(r, h.Cache.PartSize.Bytes()) }
func (h *reqHandler) getResponseHook() func(*httputils.FlexibleResponseWriter) { return func(rw *httputils.FlexibleResponseWriter) { h.Logger.Debugf("[%p] Received headers for %s, sending them to client...", h.req, h.req.URL) httputils.CopyHeadersWithout(rw.Headers, h.resp.Header(), hopHeaders...) h.resp.WriteHeader(rw.Code) isCacheable := cacheutils.IsResponseCacheable(rw.Code, rw.Headers) if !isCacheable { h.Logger.Debugf("[%p] Response is non-cacheable", h.req) rw.BodyWriter = utils.AddCloser(h.resp) return } expiresIn := cacheutils.ResponseExpiresIn(rw.Headers, h.CacheDefaultDuration) if expiresIn <= 0 { h.Logger.Debugf("[%p] Response expires in the past: %s", h.req, expiresIn) rw.BodyWriter = utils.AddCloser(h.resp) return } responseRange, err := httputils.GetResponseRange(rw.Code, rw.Headers) if err != nil { h.Logger.Debugf("[%p] Was not able to get response range (%s)", h.req, err) rw.BodyWriter = utils.AddCloser(h.resp) return } h.Logger.Debugf("[%p] Response is cacheable! Caching metadata and parts...", h.req) code := rw.Code if code == http.StatusPartialContent { // 206 is returned only if the server would have returned 200 with a normal request code = http.StatusOK } //!TODO: maybe call cached time.Now. See the comment in utils.IsMetadataFresh now := time.Now() obj := &types.ObjectMetadata{ ID: h.objID, ResponseTimestamp: now.Unix(), Code: code, Size: responseRange.ObjSize, Headers: make(http.Header), ExpiresAt: now.Add(expiresIn).Unix(), } httputils.CopyHeadersWithout(rw.Headers, obj.Headers, metadataHeadersToFilter...) if obj.Headers.Get("Date") == "" { // maybe the server does not return date, we should set it then obj.Headers.Set("Date", now.Format(http.TimeFormat)) } //!TODO: consult the cache algorithm whether to save the metadata //!TODO: optimize this, save the metadata only when it's newer //!TODO: also, error if we already have fresh metadata but the received metadata is different if err := h.Cache.Storage.SaveMetadata(obj); err != nil { h.Logger.Errorf("[%p] Could not save metadata for %s: %s", h.req, obj.ID, err) rw.BodyWriter = utils.AddCloser(h.resp) return } if h.req.Method == "HEAD" { rw.BodyWriter = utils.AddCloser(h.resp) return } rw.BodyWriter = utils.MultiWriteCloser( utils.AddCloser(h.resp), PartWriter(h.Cache, h.objID, *responseRange), ) h.Logger.Debugf("[%p] Setting the cached data to expire in %s", h.req, expiresIn) h.Cache.Scheduler.AddEvent( h.objID.Hash(), storage.GetExpirationHandler(h.Cache, h.Logger, h.objID), expiresIn, ) } }