func (h *httpFSHandler) open(w http.ResponseWriter, r *http.Request) error { fi, err := h.fs.Stat(r.URL.Path) if err != nil { return err } notMod := false if ifModSince, err := http.ParseTime(r.Header.Get("if-modified-since")); err == nil { if !fi.ModTime().IsZero() && fi.ModTime().Before(ifModSince) { w.WriteHeader(http.StatusNotModified) notMod = true } } writeFileInfoHeaders(w, fi, true) if notMod { return nil } f, err := h.fs.Open(r.URL.Path) if err != nil { return err } defer f.Close() _, err = io.Copy(w, f) return err }
func (r *Resource) Expires() (time.Time, error) { if expires := r.header.Get("Expires"); expires != "" { return http.ParseTime(expires) } return time.Time{}, nil }
func (r *Resource) MaxAge(shared bool) (time.Duration, error) { cc, err := r.cacheControl() if err != nil { return time.Duration(0), err } if cc.Has("s-maxage") && shared { if maxAge, err := cc.Duration("s-maxage"); err != nil { return time.Duration(0), err } else if maxAge > 0 { return maxAge, nil } } if cc.Has("max-age") { if maxAge, err := cc.Duration("max-age"); err != nil { return time.Duration(0), err } else if maxAge > 0 { return maxAge, nil } } if expiresVal := r.header.Get("Expires"); expiresVal != "" { expires, err := http.ParseTime(expiresVal) if err != nil { return time.Duration(0), err } return expires.Sub(Clock()), nil } return time.Duration(0), nil }
func (h *Host) storeRateLimit(call string, headers http.Header) { // check for Bucket header var err error l, err := strconv.ParseInt(headers.Get("x-ratelimit-limit"), 10, 32) if err != nil { debugLogger.Println("Error parsing rate limit:", err) } u, err := strconv.ParseInt(headers.Get("x-ratelimit-used"), 10, 32) if err != nil { debugLogger.Println("Error parsing rate limit used:", err) } r, err := strconv.ParseInt(headers.Get("x-ratelimit-remaining"), 10, 32) if err != nil { debugLogger.Println("Error parsing rate limit remaining:", err) } debugLogger.Println(headers.Get("Date")) lc, err := http.ParseTime(headers.Get("Date")) if err != nil { debugLogger.Println("Error parsing date header:", err) } info := RateLimitInfo{Bucket: headers.Get("x-ratelimit-bucket"), Used: int(u), Limit: int(l), Remaining: int(r), LastCall: lc, Drift: time.Now().Sub(lc)} debugLogger.Println("storing rate limit info for", call, "info", info) h.rateLimitLock.Lock() defer h.rateLimitLock.Unlock() if info.LastCall.After(h.rateLimits[info.Bucket].LastCall) || info.LastCall == h.rateLimits[info.Bucket].LastCall { h.callToBucket[call] = info.Bucket h.rateLimits[info.Bucket] = info } }
// Check the server time is the same as ours, once for each server func checkServerTime(req *http.Request, resp *http.Response) { host := req.URL.Host if req.Host != "" { host = req.Host } checkedHostMu.RLock() _, ok := checkedHost[host] checkedHostMu.RUnlock() if ok { return } dateString := resp.Header.Get("Date") if dateString == "" { return } date, err := http.ParseTime(dateString) if err != nil { Debug(nil, "Couldn't parse Date: from server %s: %q: %v", host, dateString, err) return } dt := time.Since(date) const window = 5 * 60 * time.Second if dt > window || dt < -window { Log(nil, "Time may be set wrong - time from %q is %v different from this computer", host, dt) } checkedHostMu.Lock() checkedHost[host] = struct{}{} checkedHostMu.Unlock() }
func parseSearchResponse(httpResponse io.Reader, responseAddr *net.UDPAddr) (*SearchResponse, error) { reader := bufio.NewReader(httpResponse) request := &http.Request{} // Needed for ReadResponse but doesn't have to be real response, err := http.ReadResponse(reader, request) if err != nil { return nil, err } headers := response.Header res := &SearchResponse{} res.Control = headers.Get("cache-control") res.Server = headers.Get("server") res.ST = headers.Get("st") res.Ext = headers.Get("ext") res.USN = headers.Get("usn") res.ResponseAddr = responseAddr if headers.Get("location") != "" { res.Location, err = response.Location() if err != nil { return nil, err } } date := headers.Get("date") if date != "" { res.Date, err = http.ParseTime(date) if err != nil { return nil, err } } return res, nil }
func parse_time2(txt string) time.Time { t, err := http.ParseTime(txt) if err == nil { t = time.Now() } return t }
func timeHeader(key string, h http.Header) (time.Time, error) { if header := h.Get(key); header != "" { return http.ParseTime(header) } else { return time.Time{}, errNoHeader } }
func retryAfter(v string) time.Duration { if i, err := strconv.Atoi(v); err == nil { return time.Duration(i) * time.Second } if t, err := http.ParseTime(v); err == nil { return t.Sub(time.Now()) } return time.Second }
func (self *JenkinsClientDownloader) downloadJar(config *util.Config) error { util.GOut("DOWNLOAD", "Getting latest Jenkins client %v", (config.CIHostURI + "/" + ClientJarURL)) // Create the HTTP request. request, err := config.CIRequest("GET", ClientJarURL, nil) if err != nil { return err } if fi, err := os.Stat(ClientJarName); err == nil { request.Header.Add("If-Modified-Since", fi.ModTime().Format(http.TimeFormat)) } // Perform the HTTP request. var source io.ReadCloser sourceTime := time.Now() if response, err := config.CIClient().Do(request); err == nil { defer response.Body.Close() source = response.Body if response.StatusCode == 304 { util.GOut("DOWNLOAD", "Jenkins client is up-to-date, no need to download.") return nil } else if response.StatusCode != 200 { return fmt.Errorf("Failed downloading jenkins client. Cause: HTTP-%v %v", response.StatusCode, response.Status) } if value := response.Header.Get("Last-Modified"); value != "" { if time, err := http.ParseTime(value); err == nil { sourceTime = time } } } else { return fmt.Errorf("Failed downloading jenkins client. Connect failed. Cause: %v", err) } target, err := os.Create(ClientJarDownloadName) defer target.Close() if err != nil { return fmt.Errorf("Failed downloading jenkins client. Cannot create local file. Cause: %v", err) } if _, err = io.Copy(target, source); err == nil { target.Close() if err = os.Remove(ClientJarName); err == nil || os.IsNotExist(err) { if err = os.Rename(ClientJarDownloadName, ClientJarName); err == nil { os.Chtimes(ClientJarName, sourceTime, sourceTime) } } return err } else { return fmt.Errorf("Failed downloading jenkins client. Transfer failed. Cause: %v", err) } }
func (r *Resource) DateAfter(d time.Time) bool { if dateHeader := r.header.Get("Date"); dateHeader != "" { if t, err := http.ParseTime(dateHeader); err != nil { return false } else { return t.After(d) } } return false }
func retryAfter(v string) (time.Duration, error) { if i, err := strconv.Atoi(v); err == nil { return time.Duration(i) * time.Second, nil } t, err := http.ParseTime(v) if err != nil { return 0, err } return t.Sub(timeNow()), nil }
func (r *Resource) LastModified() time.Time { var modTime time.Time if lastModHeader := r.header.Get("Last-Modified"); lastModHeader != "" { if t, err := http.ParseTime(lastModHeader); err == nil { modTime = t } } return modTime }
// return later time func GetLaterTimeStr(a, b string) (result string, err error) { timeA, err := http.ParseTime(a) if nil != err { log.Printf("[ERROR] failed to parse string %s as http time", a) return } timeB, err := http.ParseTime(b) if nil != err { log.Printf("[ERROR] failed to parse string %s as http time", b) return } if timeA.After(timeB) { result = a } else { result = b } return }
// checkETag implements If-None-Match and If-Range checks. // // The ETag or modtime must have been previously set in the // ResponseWriter's headers. The modtime is only compared at second // granularity and may be the zero value to mean unknown. // // The return value is the effective request "Range" header to use and // whether this request is now considered done. func checkETag(w http.ResponseWriter, r *http.Request, modtime time.Time) (rangeReq string, done bool) { etag := w.Header().Get("Etag") rangeReq = r.Header.Get("Range") // Invalidate the range request if the entity doesn't match the one // the client was expecting. // "If-Range: version" means "ignore the Range: header unless version matches the // current file." // We only support ETag versions. // The caller must have set the ETag on the response already. if ir := r.Header.Get("If-Range"); ir != "" && ir != etag { // The If-Range value is typically the ETag value, but it may also be // the modtime date. See golang.org/issue/8367. timeMatches := false if !modtime.IsZero() { if t, err := http.ParseTime(ir); err == nil && t.Unix() == modtime.Unix() { timeMatches = true } } if !timeMatches { rangeReq = "" } } if inm := r.Header.Get("If-None-Match"); inm != "" { // Must know ETag. if etag == "" { return rangeReq, false } // TODO(bradfitz): non-GET/HEAD requests require more work: // sending a different status code on matches, and // also can't use weak cache validators (those with a "W/ // prefix). But most users of ServeContent will be using // it on GET or HEAD, so only support those for now. if r.Method != "GET" && r.Method != "HEAD" { return rangeReq, false } // TODO(bradfitz): deal with comma-separated or multiple-valued // list of If-None-match values. For now just handle the common // case of a single item. if inm == etag || inm == "*" { h := w.Header() delete(h, "Content-Type") delete(h, "Content-Length") w.WriteHeader(http.StatusNotModified) return "", true } } return rangeReq, false }
func (r *responderimpl) Modified(t time.Time, ot OperationType) bool { t = t.UTC() r.hw.Header().Set("Last-Modified", t.Format(http.TimeFormat)) nt, te := http.ParseTime(r.r.Header.Get("If-Modified-Since")) if te != nil { return false } b := nt.Second() == t.Second() if (b && ot&SIMULATE == 0) || ot&FORCE != 0 { r.hw.WriteHeader(http.StatusNotModified) } return b }
func main() { req, _ := http.NewRequest("GET", "http://www.example.com/", nil) res, _ := http.DefaultClient.Do(req) _, _ = ioutil.ReadAll(res.Body) reqDir, _ := cacheobject.ParseRequestCacheControl(req.Header.Get("Cache-Control")) resDir, _ := cacheobject.ParseResponseCacheControl(res.Header.Get("Cache-Control")) expiresHeader, _ := http.ParseTime(res.Header.Get("Expires")) dateHeader, _ := http.ParseTime(res.Header.Get("Date")) lastModifiedHeader, _ := http.ParseTime(res.Header.Get("Last-Modified")) obj := cacheobject.Object{ RespDirectives: resDir, RespHeaders: res.Header, RespStatusCode: res.StatusCode, RespExpiresHeader: expiresHeader, RespDateHeader: dateHeader, RespLastModifiedHeader: lastModifiedHeader, ReqDirectives: reqDir, ReqHeaders: req.Header, ReqMethod: req.Method, NowUTC: time.Now().UTC(), } rv := cacheobject.ObjectResults{} cacheobject.CachableObject(&obj, &rv) cacheobject.ExpirationObject(&obj, &rv) fmt.Println("Errors: ", rv.OutErr) fmt.Println("Reasons to not cache: ", rv.OutReasons) fmt.Println("Warning headers to add: ", rv.OutWarnings) fmt.Println("Expiration: ", rv.OutExpirationTime.String()) }
// fetchRemoteFile retrieves codelab resource from url. // It is a special case of fetchRemote function. func fetchRemoteFile(url string) (*resource, error) { res, err := retryGet(nil, url, 3) if err != nil { return nil, err } t, err := http.ParseTime(res.Header.Get("last-modified")) if err != nil { t = time.Now() } return &resource{ body: res.Body, mod: t, typ: srcMarkdown, }, nil }
func checkETag(reply *Reply, r *http.Request, modtime time.Time) (rangeReq string, done bool) { etag := reply.GetHeader("Etag") rangeReq = r.Header.Get("Range") if ir := r.Header.Get("If-Range"); ir != "" && ir != etag { // The If-Range value is typically the ETag value, but it may also be // the modtime date. See golang.org/issue/8367. timeMatches := false if !modtime.IsZero() { if t, err := http.ParseTime(ir); err == nil && t.Unix() == modtime.Unix() { timeMatches = true } } if !timeMatches { rangeReq = "" } } if inm := r.Header.Get("If-None-Match"); inm != "" { // Must know ETag. if etag == "" { return rangeReq, false } // TODO(bradfitz): non-GET/HEAD requests require more work: // sending a different status code on matches, and // also can't use weak cache validators (those with a "W/ // prefix). But most users of ServeContent will be using // it on GET or HEAD, so only support those for now. if r.Method != "GET" && r.Method != "HEAD" { return rangeReq, false } // TODO(bradfitz): deal with comma-separated or multiple-valued // list of If-None-match values. For now just handle the common // case of a single item. if inm == etag || inm == "*" { // h := w.Header() reply.DelHeader("Content-Type") reply.DelHeader("Content-Length") reply.SetCode(http.StatusNotModified) return "", true } } return rangeReq, false }
// ParseRetryAfter parses a Retry-After header value. Per RFC 7231 // section 7.1.3, the value may be either an absolute time or the // number of seconds to wait. func ParseRetryAfter(header string) (d time.Duration, ok bool) { if len(header) == 0 { return 0, false } sec, err := strconv.ParseInt(header, 10, 64) if err != nil { t, err := http.ParseTime(header) if err != nil { return 0, false } d = t.Sub(timeNow()) } else { d = time.Duration(sec) * time.Second } if d > 0 { return d, true } return 0, false }
// Saves the response to the cache. func saveResponse(response *http.Response, cacheEntry CacheEntry) error { lastModifiedHeader := response.Header.Get("Last-Modified") if lastModifiedHeader == "" { lastModifiedHeader = response.Header.Get("Date") if lastModifiedHeader == "" { return errors.New("Missing Last-Modified and Date headers") } } lastModified, err := http.ParseTime(lastModifiedHeader) if err != nil { return err } return cacheEntry.Write(response.Body, lastModified) }
// DateTime parses date/time from string an returns a new DateTime object. // // If layout is given, DateTime() uses time.Parse() with given layout. // Otherwise, it uses http.ParseTime(). If pasing error occurred, // DateTime reports failure and returns empty (but non-nil) object. // // Example: // str := NewString(t, "Tue, 15 Nov 1994 08:12:31 GMT") // str.DateTime().Lt(time.Now()) // // str := NewString(t, "15 Nov 94 08:12 GMT") // str.DateTime(time.RFC822).Lt(time.Now()) func (s *String) DateTime(layout ...string) *DateTime { if s.chain.failed() { return &DateTime{s.chain, time.Unix(0, 0)} } var ( t time.Time err error ) if len(layout) != 0 { t, err = time.Parse(layout[0], s.value) } else { t, err = http.ParseTime(s.value) } if err != nil { s.chain.fail(err.Error()) return &DateTime{s.chain, time.Unix(0, 0)} } return &DateTime{s.chain, t} }
func (h *httpFSHandler) open(w http.ResponseWriter, r *http.Request) error { fi, err := h.fs.Stat(r.URL.Path) if err != nil { return err } notMod := false if ifModSince, err := http.ParseTime(r.Header.Get("if-modified-since")); err == nil { if !fi.ModTime().IsZero() && fi.ModTime().Before(ifModSince) { w.WriteHeader(http.StatusNotModified) notMod = true } } writeFileInfoHeaders(w, fi) if notMod { return nil } f, err := h.fs.Open(r.URL.Path) if err != nil { return err } defer f.Close() rdr := io.Reader(f) // Support Range header. if rangeHeader := r.Header.Get("range"); rangeHeader != "" { bstart, bend, err := parseHTTPRange(rangeHeader) if err != nil { return err } if _, err := f.Seek(bstart, 0); err != nil { return err } rdr = io.LimitReader(f, bend-bstart) } _, err = io.Copy(w, rdr) return err }
func setHTTPResponseFileInfo(resp *http.Response, fi *fileInfo) error { if lastMod := resp.Header.Get("last-modified"); lastMod != "" { var err error fi.modTime, err = http.ParseTime(lastMod) if err != nil { return err } } if resp.StatusCode >= 300 && resp.StatusCode <= 399 { fi.symlink = true } switch resp.Header.Get("content-type") { case httpFileContentType: // default, nothing to do case httpDirContentType: fi.dir = true case httpSymlinkContentType: fi.symlink = true } fi.size = resp.ContentLength return nil }
// Release unlocks the bucket and reads the headers to update the buckets ratelimit info // and locks up the whole thing in case if there's a global ratelimit. func (b *Bucket) Release(headers http.Header) error { defer b.Unlock() if headers == nil { return nil } remaining := headers.Get("X-RateLimit-Remaining") reset := headers.Get("X-RateLimit-Reset") global := headers.Get("X-RateLimit-Global") retryAfter := headers.Get("Retry-After") // If it's global just keep the main ratelimit mutex locked if global != "" { parsedAfter, err := strconv.Atoi(retryAfter) if err != nil { return err } // Lock it in a new goroutine so that this isn't a blocking call go func() { // Make sure if several requests were waiting we don't sleep for n * retry-after // where n is the amount of requests that were going on sleepTo := time.Now().Add(time.Duration(parsedAfter) * time.Millisecond) b.global.Lock() sleepDuration := sleepTo.Sub(time.Now()) if sleepDuration > 0 { time.Sleep(sleepDuration) } b.global.Unlock() }() return nil } // Update reset time if either retry after or reset headers are present // Prefer retryafter because it's more accurate with time sync and whatnot if retryAfter != "" { parsedAfter, err := strconv.ParseInt(retryAfter, 10, 64) if err != nil { return err } b.reset = time.Now().Add(time.Duration(parsedAfter) * time.Millisecond) } else if reset != "" { // Calculate the reset time by using the date header returned from discord discordTime, err := http.ParseTime(headers.Get("Date")) if err != nil { return err } unix, err := strconv.ParseInt(reset, 10, 64) if err != nil { return err } // Calculate the time until reset and add it to the current local time // some extra time is added because without it i still encountered 429's. // The added amount is the lowest amount that gave no 429's // in 1k requests delta := time.Unix(unix, 0).Sub(discordTime) + time.Millisecond*250 b.reset = time.Now().Add(delta) } // Udpate remaining if header is present if remaining != "" { parsedRemaining, err := strconv.ParseInt(remaining, 10, 32) if err != nil { return err } b.remaining = int(parsedRemaining) } return nil }
func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool) error { if origPath != "" && origPath[0] == '/' && len(origPath) > 1 { origPath = origPath[1:] } origPath = strings.TrimPrefix(origPath, "./") // Twiddle the destPath when its a relative path - meaning, make it // relative to the WORKINGDIR if !filepath.IsAbs(destPath) { hasSlash := strings.HasSuffix(destPath, "/") destPath = filepath.Join("/", b.Config.WorkingDir, destPath) // Make sure we preserve any trailing slash if hasSlash { destPath += "/" } } // In the remote/URL case, download it and gen its hashcode if urlutil.IsURL(origPath) { if !allowRemote { return fmt.Errorf("Source can't be a URL for %s", cmdName) } ci := copyInfo{} ci.origPath = origPath ci.hash = origPath // default to this but can change ci.destPath = destPath ci.decompress = false *cInfos = append(*cInfos, &ci) // Initiate the download resp, err := utils.Download(ci.origPath) if err != nil { return err } // Create a tmp dir tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote") if err != nil { return err } ci.tmpDir = tmpDirName // Create a tmp file within our tmp dir tmpFileName := path.Join(tmpDirName, "tmp") tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) if err != nil { return err } // Download and dump result to tmp file if _, err := io.Copy(tmpFile, utils.ProgressReader(resp.Body, int(resp.ContentLength), b.OutOld, b.StreamFormatter, true, "", "Downloading")); err != nil { tmpFile.Close() return err } fmt.Fprintf(b.OutStream, "\n") tmpFile.Close() // Set the mtime to the Last-Modified header value if present // Otherwise just remove atime and mtime times := make([]syscall.Timespec, 2) lastMod := resp.Header.Get("Last-Modified") if lastMod != "" { mTime, err := http.ParseTime(lastMod) // If we can't parse it then just let it default to 'zero' // otherwise use the parsed time value if err == nil { times[1] = syscall.NsecToTimespec(mTime.UnixNano()) } } if err := system.UtimesNano(tmpFileName, times); err != nil { return err } ci.origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName)) // If the destination is a directory, figure out the filename. if strings.HasSuffix(ci.destPath, "/") { u, err := url.Parse(origPath) if err != nil { return err } path := u.Path if strings.HasSuffix(path, "/") { path = path[:len(path)-1] } parts := strings.Split(path, "/") filename := parts[len(parts)-1] if filename == "" { return fmt.Errorf("cannot determine filename from url: %s", u) } ci.destPath = ci.destPath + filename } // Calc the checksum, even if we're using the cache r, err := archive.Tar(tmpFileName, archive.Uncompressed) if err != nil { return err } tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version0) if err != nil { return err } if _, err := io.Copy(ioutil.Discard, tarSum); err != nil { return err } ci.hash = tarSum.Sum(nil) r.Close() return nil } // Deal with wildcards if ContainsWildcards(origPath) { for _, fileInfo := range b.context.GetSums() { if fileInfo.Name() == "" { continue } match, _ := path.Match(origPath, fileInfo.Name()) if !match { continue } calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression) } return nil } // Must be a dir or a file if err := b.checkPathForAddition(origPath); err != nil { return err } fi, _ := os.Stat(path.Join(b.contextPath, origPath)) ci := copyInfo{} ci.origPath = origPath ci.hash = origPath ci.destPath = destPath ci.decompress = allowDecompression *cInfos = append(*cInfos, &ci) // Deal with the single file case if !fi.IsDir() { // This will match first file in sums of the archive fis := b.context.GetSums().GetFile(ci.origPath) if fis != nil { ci.hash = "file:" + fis.Sum() } return nil } // Must be a dir var subfiles []string absOrigPath := path.Join(b.contextPath, ci.origPath) // Add a trailing / to make sure we only pick up nested files under // the dir and not sibling files of the dir that just happen to // start with the same chars if !strings.HasSuffix(absOrigPath, "/") { absOrigPath += "/" } // Need path w/o / too to find matching dir w/o trailing / absOrigPathNoSlash := absOrigPath[:len(absOrigPath)-1] for _, fileInfo := range b.context.GetSums() { absFile := path.Join(b.contextPath, fileInfo.Name()) // Any file in the context that starts with the given path will be // picked up and its hashcode used. However, we'll exclude the // root dir itself. We do this for a coupel of reasons: // 1 - ADD/COPY will not copy the dir itself, just its children // so there's no reason to include it in the hash calc // 2 - the metadata on the dir will change when any child file // changes. This will lead to a miss in the cache check if that // child file is in the .dockerignore list. if strings.HasPrefix(absFile, absOrigPath) && absFile != absOrigPathNoSlash { subfiles = append(subfiles, fileInfo.Sum()) } } sort.Strings(subfiles) hasher := sha256.New() hasher.Write([]byte(strings.Join(subfiles, ","))) ci.hash = "dir:" + hex.EncodeToString(hasher.Sum(nil)) return nil }
func (b *Builder) download(srcURL string) (fi builder.FileInfo, err error) { // get filename from URL u, err := url.Parse(srcURL) if err != nil { return } path := filepath.FromSlash(u.Path) // Ensure in platform semantics if strings.HasSuffix(path, string(os.PathSeparator)) { path = path[:len(path)-1] } parts := strings.Split(path, string(os.PathSeparator)) filename := parts[len(parts)-1] if filename == "" { err = fmt.Errorf("cannot determine filename from url: %s", u) return } // Initiate the download resp, err := httputils.Download(srcURL) if err != nil { return } // Prepare file in a tmp dir tmpDir, err := ioutils.TempDir("", "docker-remote") if err != nil { return } defer func() { if err != nil { os.RemoveAll(tmpDir) } }() tmpFileName := filepath.Join(tmpDir, filename) tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) if err != nil { return } stdoutFormatter := b.Stdout.(*streamformatter.StdoutFormatter) progressOutput := stdoutFormatter.StreamFormatter.NewProgressOutput(stdoutFormatter.Writer, true) progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading") // Download and dump result to tmp file if _, err = io.Copy(tmpFile, progressReader); err != nil { tmpFile.Close() return } fmt.Fprintln(b.Stdout) // ignoring error because the file was already opened successfully tmpFileSt, err := tmpFile.Stat() if err != nil { return } tmpFile.Close() // Set the mtime to the Last-Modified header value if present // Otherwise just remove atime and mtime mTime := time.Time{} lastMod := resp.Header.Get("Last-Modified") if lastMod != "" { // If we can't parse it then just let it default to 'zero' // otherwise use the parsed time value if parsedMTime, err := http.ParseTime(lastMod); err == nil { mTime = parsedMTime } } if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil { return } // Calc the checksum, even if we're using the cache r, err := archive.Tar(tmpFileName, archive.Uncompressed) if err != nil { return } tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1) if err != nil { return } if _, err = io.Copy(ioutil.Discard, tarSum); err != nil { return } hash := tarSum.Sum(nil) r.Close() return &builder.HashedFileInfo{FileInfo: builder.PathFileInfo{FileInfo: tmpFileSt, FilePath: tmpFileName}, FileHash: hash}, nil }
// Evaluate cachability based on an HTTP request, and parts of the response. func UsingRequestResponse(req *http.Request, statusCode int, respHeaders http.Header, privateCache bool) ([]Reason, time.Time, error) { var reqHeaders http.Header var reqMethod string var reqDir *RequestCacheDirectives = nil respDir, err := ParseResponseCacheControl(respHeaders.Get("Cache-Control")) if err != nil { return nil, time.Time{}, err } if req != nil { reqDir, err = ParseRequestCacheControl(req.Header.Get("Cache-Control")) if err != nil { return nil, time.Time{}, err } reqHeaders = req.Header reqMethod = req.Method } var expiresHeader time.Time var dateHeader time.Time var lastModifiedHeader time.Time if respHeaders.Get("Expires") != "" { expiresHeader, err = http.ParseTime(respHeaders.Get("Expires")) if err != nil { return nil, time.Time{}, err } expiresHeader = expiresHeader.UTC() } if respHeaders.Get("Date") != "" { dateHeader, err = http.ParseTime(respHeaders.Get("Date")) if err != nil { return nil, time.Time{}, err } dateHeader = dateHeader.UTC() } if respHeaders.Get("Last-Modified") != "" { lastModifiedHeader, err = http.ParseTime(respHeaders.Get("Last-Modified")) if err != nil { return nil, time.Time{}, err } lastModifiedHeader = lastModifiedHeader.UTC() } obj := Object{ CacheIsPrivate: privateCache, RespDirectives: respDir, RespHeaders: respHeaders, RespStatusCode: statusCode, RespExpiresHeader: expiresHeader, RespDateHeader: dateHeader, RespLastModifiedHeader: lastModifiedHeader, ReqDirectives: reqDir, ReqHeaders: reqHeaders, ReqMethod: reqMethod, NowUTC: time.Now().UTC(), } rv := ObjectResults{} CachableObject(&obj, &rv) if rv.OutErr != nil { return nil, time.Time{}, rv.OutErr } ExpirationObject(&obj, &rv) if rv.OutErr != nil { return nil, time.Time{}, rv.OutErr } return rv.OutReasons, rv.OutExpirationTime, nil }
func ExecQuerySQL(dbPath string, expectSize int, sqlStr string, args ...interface{}) (caches []*HtmlCache, err error) { _, err = os.Stat(dbPath) if nil != err { // db file not exists log.Printf("[ERROR] db %s not exists", dbPath) return } db, err := sql.Open(DB_DRIVER, dbPath) if nil != err { log.Printf("[ERROR] failed to open db %s: %s", dbPath, err) return } defer db.Close() statmt, err := db.Prepare(sqlStr) if nil != err { log.Printf("[ERROR] failed to prepare statment %s for db %s: %s", sqlStr, dbPath, err) return } defer statmt.Close() rows, err := statmt.Query(args...) if nil != err { log.Printf("[ERROR] failed to query with statment %s, %s", sqlStr, err) return } defer rows.Close() if expectSize > 0 { caches = make([]*HtmlCache, expectSize) } rowInd := 0 for rows.Next() { c := new(HtmlCache) var urlStr, lastmod, expires, dateStr string err = rows.Scan( &urlStr, &dateStr, &c.CacheControl, &lastmod, &c.Etag, &expires, &c.Html) if nil != err { log.Printf("[ERROR] failed to scan data from result row: %s", err) return } // decompress html data if 0 != *gGzipCompressLevel { buff := bytes.NewBuffer(c.Html) gzipR, err := gzip.NewReader(buff) if nil != err { if *gDebug { log.Printf("[WARN] failed to decompress html data for %s: %s", urlStr, err) } } else { c.Html, err = ioutil.ReadAll(gzipR) } } if c.URL, err = url.Parse(urlStr); nil != err { log.Printf("[ERROR] failed to parse url from rawurl string %s: %s", urlStr, err) } if "" != lastmod { c.LastModified = new(time.Time) if *c.LastModified, err = http.ParseTime(lastmod); nil != err { log.Printf("[ERROR] failed to parse lastmod time string %s: %s", lastmod, err) } } if "" != expires { c.Expires = new(time.Time) if *c.Expires, err = http.ParseTime(expires); nil != err { log.Printf("[ERROR] failed to parse expires time string %s: %s", expires, err) } } if "" != dateStr { c.Date = new(time.Time) if *c.Date, err = http.ParseTime(dateStr); nil != err { log.Printf("[ERROR] failed to parse cache date %s: %s", dateStr, err) } } caches = append(caches[:rowInd], c) rowInd += 1 } // no result is also an error if 0 == rowInd { err = DBNoRecordError{} return } return }
// returned inode has nil Id func (fs *Ossvfs) LookUpInodeMaybeDir(name string, fullName string) (inode *Inode, err error) { errObjectChan := make(chan error, 1) objectChan := make(chan *http.Response, 1) errDirChan := make(chan error, 1) dirChan := make(chan *oss.ListResp, 1) go fs.LookUpInodeNotDir(fullName, objectChan, errObjectChan) go fs.LookUpInodeDir(fullName, dirChan, errDirChan) notFound := false for { select { // TODO: if both object and object/ exists, return dir case resp := <-objectChan: inode = NewInode(&name, &fullName, fs.flags) lastModifiedTime, tmperr := http.ParseTime(resp.Header.Get("Last-Modified")) if tmperr != nil { panic("Last " + resp.Header.Get("Last-Modified") + " modified time is invalid") } inode.Attributes = &fuseops.InodeAttributes{ Size: uint64(resp.ContentLength), Nlink: 1, Mode: fs.flags.FileMode, Atime: lastModifiedTime, Mtime: lastModifiedTime, Ctime: lastModifiedTime, Crtime: lastModifiedTime, Uid: fs.flags.Uid, Gid: fs.flags.Gid, } return case err = <-errObjectChan: if err == fuse.ENOENT { if notFound { return nil, err } else { notFound = true err = nil } } else { //TODO: retry } case resp := <-dirChan: if len(resp.CommonPrefixes) != 0 || len(resp.Contents) != 0 { inode = NewInode(&name, &fullName, fs.flags) inode.Attributes = &fs.rootAttrs return } else { // 404 if notFound { return nil, fuse.ENOENT } else { notFound = true } } case err = <-errDirChan: //TODO: retry } } }