// create tar.Header from os.FileInfo func Finfo2Theader(fi os.FileInfo) (hdr *tar.Header, err error) { m := fi.Mode() var ( ln string tm byte ) tm = tar.TypeReg switch { case m&os.ModeSymlink != 0: tm = tar.TypeSymlink /*if lfi, err := os.Lstat(fi.Name()); err == nil { ln = lfi.Name() }*/ case m&os.ModeDevice != 0 && m&os.ModeCharDevice != 0: tm = tar.TypeChar case m&os.ModeDevice != 0: tm = tar.TypeBlock case m&os.ModeNamedPipe != 0 || m&os.ModeSocket != 0: tm = tar.TypeFifo } tim := fi.ModTime() hdr = &tar.Header{Name: fi.Name(), Mode: int64(m.Perm()), Size: fi.Size(), ModTime: tim, Typeflag: tm, Linkname: ln} FillHeader(hdr) return }
func shouldUpdate(resp *http.Response, fi os.FileInfo, newer, newermetamtime bool) bool { var stale bool = false filemtime := fi.ModTime() if newermetamtime { parsed, err := time.Parse(time.RFC1123, resp.Header.Get("x-amz-meta-last-modified")) if err != nil { // can't see metamtime upload anyhow stale = true fmt.Fprint(os.Stderr, "Can't read metamtime, setting stale to upload again\n") } if parsed.Before(filemtime) { stale = true } } if newer { parsed, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified")) if err != nil { // can't see metamtime upload anyhow stale = true fmt.Fprint(os.Stderr, "Can't read metamtime, setting stale to upload again\n") } if parsed.Before(filemtime) { stale = true } } return stale }
func archiveFile(fname string, cfg *conf.Config, rootStat os.FileInfo, fs os.FileInfo) (newname string, err error) { year := strconv.Itoa(fs.ModTime().Year()) month := fs.ModTime().Month().String() archivePath := filepath.Join(cfg.ArchivePath(rootStat.Name()), year, month) err = os.MkdirAll(archivePath, rootStat.Mode()) if err != nil && !os.IsExist(err) { return } zipPath := archivePath + ".zip" if util.FileExists(zipPath) { //unzip so we can archive the new file ... it will be rezipped later if err = util.Unzip(zipPath, archivePath); err != nil { return } } newname = filepath.Join(archivePath, fs.Name()) if _, err = os.Stat(newname); err == nil { err = fmt.Errorf("A file of the same name already exists in the archive") return } err = os.Rename(fname, newname) return }
func (self *F) visit(path string, f os.FileInfo, err error) error { if f == nil { return err } //如果是txt文本 if strings.HasSuffix(f.Name(), "txt") { var tp int if f.IsDir() { tp = IsDirectory } else if (f.Mode() & os.ModeSymlink) > 0 { tp = IsSymlink } else { tp = IsRegular } inoFile := &sysFile{ fName: path, fType: tp, fPerm: f.Mode(), fMtime: f.ModTime(), fSize: f.Size(), fShortName: f.Name(), } self.files = append(self.files, inoFile) } return nil }
func (p *Page) checkHtmlDoWrite(tplFi, htmlFi os.FileInfo, htmlErr error) bool { var doWrite bool if p.Config.AutoGenerateHtmlCycleTime <= 0 { doWrite = true } else { if htmlErr != nil { doWrite = true } else { switch { case tplFi.ModTime().Unix() >= htmlFi.ModTime().Unix(): doWrite = true case tplFi.ModTime().Unix() >= htmlFi.ModTime().Unix(): doWrite = true case time.Now().Unix()-htmlFi.ModTime().Unix() >= p.Config.AutoGenerateHtmlCycleTime: doWrite = true default: if globalTplCache := p.site.GetTemplateCache("globalTpl"); globalTplCache.ModTime > 0 && globalTplCache.ModTime >= htmlFi.ModTime().Unix() { doWrite = true } } } } return doWrite }
func procesImage(path string, f os.FileInfo, err error) error { if f.IsDir() { return nil } log.Debugf("Processing %s", path) extension := filepath.Ext(f.Name()) if !isSupportPhotoType(strings.ToLower(extension)) { log.Warnf("%s's file type %s is unsupported", path, extension) return nil } reader := exif.New() err = reader.Open(path) if err != nil { log.Fatal(err) } str := fmt.Sprintf("%s", reader.Tags["Date and Time"]) t := f.ModTime() if len(str) == 0 { log.Warnf("Date and Time EXIF tag missing for %s", path) } else { layout := "2006:01:02 15:04:05" t, err = time.Parse(layout, str) if err != nil { log.Fatal(err) } } newDir := fmt.Sprintf("%s/%4d/%02d/%02d", destPath, t.Year(), t.Month(), t.Day()) err = os.MkdirAll(newDir, 0777) if err != nil { log.Fatal(err) } newFile := fmt.Sprintf("%s/%s", newDir, f.Name()) if mode == "move" { log.Debugf("Moving %s %s", path, newFile) err = os.Rename(path, newFile) } else { if _, err := os.Stat(newFile); err == nil { log.Warnf("Photo %s already exists", newFile) } else { log.Debugf("Copying %s %s", path, newFile) err = copyFile(path, newFile) } } if err != nil { log.Fatal(err) } return nil }
// Write a single file to TarGz func TarGzWrite(item ArchiveItem, tw *tar.Writer, fi os.FileInfo) (err error) { if item.FileSystemPath != "" { fr, err := os.Open(item.FileSystemPath) if err == nil { defer fr.Close() h := new(tar.Header) h.Name = item.ArchivePath h.Size = fi.Size() h.Mode = int64(fi.Mode()) h.ModTime = fi.ModTime() err = tw.WriteHeader(h) if err == nil { _, err = io.Copy(tw, fr) } } } else { h := new(tar.Header) //backslash-only paths h.Name = strings.Replace(item.ArchivePath, "\\", "/", -1) h.Size = int64(len(item.Data)) h.Mode = int64(0644) //? is this ok? h.ModTime = time.Now() err = tw.WriteHeader(h) if err == nil { _, err = tw.Write(item.Data) } } return err }
// watch watchers the file for changes func (d *FileQuery) watch(lastStat os.FileInfo) <-chan *watchResult { ch := make(chan *watchResult, 1) go func(lastStat os.FileInfo) { for { stat, err := os.Stat(d.path) if err != nil { select { case <-d.stopCh: return case ch <- &watchResult{err: err}: return } } changed := lastStat == nil || lastStat.Size() != stat.Size() || lastStat.ModTime() != stat.ModTime() if changed { select { case <-d.stopCh: return case ch <- &watchResult{stat: stat}: return } } time.Sleep(FileQuerySleepTime) } }(lastStat) return ch }
func (repo *LocalfsImagerep) fillLocalfsImage(image *models.Image, fileinfo os.FileInfo) bool { // ubuntu-14.04_x86_64_raw.img -> name: ubuntu-14.04, arch: x86_64, type: raw.img imginfo := strings.SplitN(fileinfo.Name(), "_", 3) if len(imginfo) != 3 { log.WithField("image", fileinfo.Name()).Info("skipping image with invalid name") return false } image.Name = imginfo[0] image.Size = fileinfo.Size() image.Date = fileinfo.ModTime() image.Filename = fileinfo.Name() image.FullPath = filepath.Join(repo.Root, fileinfo.Name()) switch imginfo[1] { default: log.WithField("filename", fileinfo.Name()).WithField("parts", imginfo).Info("skipping unknown image architecture") return false case "amd64": image.Arch = models.IMAGE_ARCH_X86_64 case "i386": image.Arch = models.IMAGE_ARCH_X86 } switch imginfo[2] { default: log.WithField("filename", fileinfo.Name()).WithField("parts", imginfo).Info("skipping unknown image type") return false case "raw.img": image.Type = models.IMAGE_FMT_RAW case "qcow2.img": image.Type = models.IMAGE_FMT_QCOW2 } return true }
// List all the volumes from diskPath. func listVols(dirPath string) ([]VolInfo, error) { if err := checkPathLength(dirPath); err != nil { return nil, err } entries, err := readDir(dirPath) if err != nil { return nil, errDiskNotFound } var volsInfo []VolInfo for _, entry := range entries { if !strings.HasSuffix(entry, slashSeparator) || !isValidVolname(slashpath.Clean(entry)) { // Skip if entry is neither a directory not a valid volume name. continue } var fi os.FileInfo fi, err = os.Stat(preparePath(pathJoin(dirPath, entry))) if err != nil { // If the file does not exist, skip the entry. if os.IsNotExist(err) { continue } return nil, err } volsInfo = append(volsInfo, VolInfo{ Name: fi.Name(), // As os.Stat() doesn't carry other than ModTime(), use // ModTime() as CreatedTime. Created: fi.ModTime(), }) } return volsInfo, nil }
// GetLatestReport searches the storageLocation to find the latest report file. // It searches based on filename, not on modified or created time, because // both can be updated after/before the date in the filename. func (fs *FilesystemBackend) GetLatestReport() (File, error) { var reporterFile File files, err := ioutil.ReadDir(fs.storageLocation) if err != nil { return reporterFile, err } var latestDate time.Time var latestFile os.FileInfo for _, file := range files { if strings.Contains(file.Name(), "-reporter-export.json") { filenameDate, err := dateForFilename(file.Name()) if err != nil { return reporterFile, err } if filenameDate.After(latestDate) { latestDate = filenameDate latestFile = file } } } filePath := filepath.Join(fs.storageLocation, latestFile.Name()) fileContents, err := ioutil.ReadFile(filePath) if err != nil { return reporterFile, err } return File{ Name: latestFile.Name(), Path: filePath, Source: "filesystem", ModifiedTime: latestFile.ModTime(), TimeFromFilename: latestDate, Contents: string(fileContents), }, nil }
func (node *Node) isNewer(path string, fi os.FileInfo) bool { if node.Type != "file" { debug.Log("node.isNewer", "node %v is newer: not file", path) return true } tpe := nodeTypeFromFileInfo(fi) if node.Name != fi.Name() || node.Type != tpe { debug.Log("node.isNewer", "node %v is newer: name or type changed", path) return true } extendedStat := fi.Sys().(*syscall.Stat_t) inode := extendedStat.Ino size := uint64(extendedStat.Size) if node.ModTime != fi.ModTime() || node.ChangeTime != changeTime(extendedStat) || node.Inode != uint64(inode) || node.Size != size { debug.Log("node.isNewer", "node %v is newer: timestamp, size or inode changed", path) return true } debug.Log("node.isNewer", "node %v is not newer", path) return false }
// isModifiedSince checks to see if the file has changed since the client last requested // // Checks for 'If-Modified-Since' header and compares timestamp against current // timestamp of file. Returns true if the files timestamp is different to the one the // client sent along func isModifiedSince(req *http.Request, url string, fi os.FileInfo) bool { modifiedSince, msPresent := req.Header[HeaderIfModifiedSince] if msPresent && len(modifiedSince) > 0 { ms := modifiedSince[0] var parsedTime time.Time var err error // http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html (3.3 Date/Time Formats) switch ms[3] { // RFC 822, updated by RFC 1123 - Sun, 06 Nov 1994 08:49:37 GMT case ',': parsedTime, err = time.Parse(time.RFC1123, ms) // ANSI C's asctime() format - Sunday, 06-Nov-94 08:49:37 GMT case ' ': parsedTime, err = time.Parse(time.ANSIC, ms) // RFC 850, obsoleted by RFC 1036 - Sun Nov 6 08:49:37 1994 default: parsedTime, err = time.Parse(time.RFC850, ms) } // Can only continue with this if we have a valid date if err == nil { if fi.ModTime().Truncate(time.Second).Equal(parsedTime) { return false } } } return true }
// walker implements filepath.WalkFunc. func walker(path string, info os.FileInfo, err error) error { cutoff, err := time.ParseDuration(fmt.Sprintf("%dh", days*24)) if err != nil { log.Fatal(err) } if time.Now().Sub(info.ModTime()) < cutoff { return nil } // Don't delete a directory unless it's empty. if info.IsDir() { files, err := ioutil.ReadDir(path) if err != nil { log.Fatal(err) } if len(files) > 0 { log.Printf("directory %s contains files; skipping\n", path) return nil } log.Printf("directory %s is empty\n", path) } log.Printf("deleting %s\n", path) if !test { err = os.Remove(path) } return nil }
// getModificationTime returns the modification time of the file. func getModificationTime(file os.FileInfo) time.Time { if *printUTC { return file.ModTime().UTC() } else { return file.ModTime() } }
// GetModifyDate returns last modify date of given mirror repository. func (mirror *Mirror) GetModifyDate() (time.Time, error) { dirs := []string{ "refs/heads", "refs/tags", } var ( modDate time.Time err error fileinfo os.FileInfo ) for _, dir := range dirs { fileinfo, err = os.Stat(mirror.Dir + "/" + dir) if err != nil { if os.IsNotExist(err) { continue } break } newModDate := fileinfo.ModTime() if newModDate.Unix() > modDate.Unix() { modDate = newModDate } } return modDate, err }
func (w *walker) walkDir(relPath string, info os.FileInfo, dchan chan protocol.FileInfo) error { // A directory is "unchanged", if it // - exists // - has the same permissions as previously, unless we are ignoring permissions // - was not marked deleted (since it apparently exists now) // - was a directory previously (not a file or something else) // - was not a symlink (since it's a directory now) // - was not invalid (since it looks valid now) cf, ok := w.CurrentFiler.CurrentFile(relPath) permUnchanged := w.IgnorePerms || !cf.HasPermissionBits() || PermsEqual(cf.Permissions, uint32(info.Mode())) if ok && permUnchanged && !cf.IsDeleted() && cf.IsDirectory() && !cf.IsSymlink() && !cf.IsInvalid() { return nil } f := protocol.FileInfo{ Name: relPath, Type: protocol.FileInfoTypeDirectory, Version: cf.Version.Update(w.ShortID), Permissions: uint32(info.Mode() & maskModePerm), NoPermissions: w.IgnorePerms, ModifiedS: info.ModTime().Unix(), ModifiedNs: int32(info.ModTime().Nanosecond()), ModifiedBy: w.ShortID, } l.Debugln("dir:", relPath, f) select { case dchan <- f: case <-w.Cancel: return errors.New("cancelled") } return nil }
func isYounger(ofn string, ifn string) bool { var oinf os.FileInfo var err error if ofn == "" { return false } oinf, err = os.Stat(ofn) if err != nil || !oinf.Mode().IsRegular() { return false } var wf = func(p string, iinf os.FileInfo, e error) error { if e != nil { return e } if !oinf.ModTime().After(iinf.ModTime()) { err := errors.New("this is younger") return err } return nil } err = filepath.Walk(ifn, wf) if err != nil { return false } return true }
// fingerprint generates a hash of a file's Last Modified date and the size of // the file. This technique isn't as reliable as hashing the entire body of // the file, but it's good enough. func fingerprint(info os.FileInfo) string { hasher := md5.New() _, _ = io.WriteString(hasher, info.Name()) _ = binary.Write(hasher, binary.LittleEndian, info.ModTime().UnixNano()) h := hasher.Sum(nil) return hex.EncodeToString(h[:4]) }
func writeTarGz(filePath, baseDir string, tarGzWriter *tar.Writer, fileInfo os.FileInfo) error { file, err := os.Open(filePath) if err != nil { return err } defer file.Close() relativePath, err := filepath.Rel(baseDir, filePath) if err != nil { return err } header := new(tar.Header) header.Name = relativePath header.Size = fileInfo.Size() header.Mode = int64(fileInfo.Mode()) header.ModTime = fileInfo.ModTime() err = tarGzWriter.WriteHeader(header) if err != nil { return err } _, err = io.Copy(tarGzWriter, file) if err != nil { return err } return nil }
// Return the hash for path stored in the xattrs. If the hash is out of date, // the hash is computed anew, unless `compute` is false in which case nil is // returned. func GetHash(path string, info os.FileInfo, compute bool) (mh.Multihash, error) { if info.Mode()&os.ModeSymlink != 0 { return symlinkHash(path) } hashTimeStr, err := attrs.Get(path, XattrHashTime) if err != nil { if compute { return HashFile(path, info) } else if IsNoData(err) { // ignore error return nil, nil } else { return nil, err } } hashTime, err := time.Parse(time.RFC3339Nano, string(hashTimeStr)) if err != nil { return nil, err } if hashTime != info.ModTime() { if compute { return HashFile(path, info) } else { return nil, nil } } return attrs.Get(path, XattrHash) }
func (fs *memFileSystem) refreshCache(path string, info os.FileInfo) (err error) { // Delete the file if fi is nil. if info == nil { fs.lock.Lock() delete(fs.cache, path) fs.lock.Unlock() return } // Create memory fileinfo and read contents. fi := &memFileInfo{ name: info.Name(), size: info.Size(), mode: info.Mode(), modTime: info.ModTime(), isDir: info.IsDir(), path: path, fs: fs, } // Fill content of the file from disk. if !fi.isDir { fi.content, err = ioutil.ReadFile(path) if err != nil { return } } // Update cache and return. fs.lock.Lock() fs.cache[path] = fi fs.lock.Unlock() return }
func parseProvidesRequires(fi os.FileInfo, path string, f io.Reader) (provides, requires []string, err error) { mt := fi.ModTime() depCacheMu.Lock() defer depCacheMu.Unlock() if ci := depCache[path]; ci.modTime.Equal(mt) { return ci.provides, ci.requires, nil } scanner := bufio.NewScanner(f) for scanner.Scan() { l := scanner.Text() if !strings.HasPrefix(l, "goog.") { continue } m := provReqRx.FindStringSubmatch(l) if m != nil { if m[1] == "provide" { provides = append(provides, m[2]) } else { requires = append(requires, m[2]) } } } if err := scanner.Err(); err != nil { return nil, nil, err } depCache[path] = depCacheItem{provides: provides, requires: requires, modTime: mt} return provides, requires, nil }
func fetchFileDetailsJSON(m *FileDetailsJSON, fi os.FileInfo) { if !*excludeSize { m.Size = fi.Size() } if !*excludeMtime { tmp := fi.ModTime() m.Mtime = tmp.Format(*timeFormat) } stat := fi.Sys().(*syscall.Stat_t) if !*excludeUid { m.Uid = stat.Uid } if !*excludeGid { m.Gid = stat.Gid } if !*excludeInode { m.Inode = stat.Ino } if !*excludeAtime { tmp := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) m.Atime = tmp.Format(*timeFormat) } if !*excludeCtime { tmp := time.Unix(int64(stat.Ctim.Sec), int64(stat.Ctim.Nsec)) m.Ctime = tmp.Format(*timeFormat) } /* if 1==0 { log.Print("Number of links: ",stat.Nlink) } */ }
func dir2Dir(path string, d os.FileInfo, dotu bool, upool p.Users) *p.Dir { sysMode := d.Sys().(*syscall.Stat_t) dir := new(Dir) dir.Qid = *dir2Qid(d) dir.Mode = dir2Npmode(d, dotu) dir.Atime = uint32(atime(sysMode).Unix()) dir.Mtime = uint32(d.ModTime().Unix()) dir.Length = uint64(d.Size()) dir.Name = path[strings.LastIndex(path, "/")+1:] if dotu { dir.dotu(path, d, upool, sysMode) return &dir.Dir } unixUid := int(sysMode.Uid) unixGid := int(sysMode.Gid) dir.Uid = strconv.Itoa(unixUid) dir.Gid = strconv.Itoa(unixGid) dir.Muid = "none" // BUG(akumar): LookupId will never find names for // groups, as it only operates on user ids. u, err := user.LookupId(dir.Uid) if err == nil { dir.Uid = u.Username } g, err := user.LookupId(dir.Gid) if err == nil { dir.Gid = g.Username } return &dir.Dir }
func list(path string, info os.FileInfo, node *fsNode, n *int) error { if (!info.IsDir() && !info.Mode().IsRegular()) || strings.HasPrefix(info.Name(), ".") { return errors.New("Non-regular file") } (*n)++ if (*n) > fileNumberLimit { return errors.New("Over file limit") //limit number of files walked } node.Name = info.Name() node.Size = info.Size() node.Modified = info.ModTime() if !info.IsDir() { return nil } children, err := ioutil.ReadDir(path) if err != nil { return fmt.Errorf("Failed to list files") } node.Size = 0 for _, i := range children { c := &fsNode{} p := filepath.Join(path, i.Name()) if err := list(p, i, c, n); err != nil { continue } node.Size += c.Size node.Children = append(node.Children, c) } return nil }
func (show *Slideshow) walkSlideshow(localpath string, info os.FileInfo, err error) error { if err != nil { return err } if info.IsDir() { return nil } ext := filepath.Ext(localpath) ext = strings.ToLower(ext) if ext == ".jpg" { photo := new(SlideshowPhoto) photo.path = localpath photo.Photo = path.Join("/slideshow", info.Name()) photo.Timestamp = info.ModTime() //info.ModTime().Format(time.RFC3339) show.Photos = append(show.Photos, photo) } return nil }
//uses local absolute path, generates metadata for file func createFileMeta(path string, f os.FileInfo) (bt bt_file, err error) { d, err := ioutil.ReadFile(path) if err != nil { return bt, err } fmt.Println(f.Size()) //TODO compute this smarter, not just min(256k, len(file)) plength := int(math.Min(float64(PIECE_LENGTH), float64(f.Size()))) if plength == 0 { return bt, err } iters := len(d) / plength if len(d)%plength > 0 { iters += 1 } //compute sha1 of each piece var wg sync.WaitGroup pieces := make([]byte, 0, iters*20) wg.Add(iters) for i := 0; i < iters; i++ { go func(i int) { s := sha1.Sum(d[plength*i : int(math.Min(float64(plength*(i+1)), float64(len(d))))]) pieces = append(pieces[:i*20], append(s[:], pieces[i*20:]...)...) wg.Done() }(i) } wg.Wait() return bt_file{f.ModTime().Unix(), f.Size(), plength, string(pieces)}, nil }
func (h *fsHandler) newFSFile(f *os.File, fileInfo os.FileInfo, compressed bool) (*fsFile, error) { n := fileInfo.Size() contentLength := int(n) if n != int64(contentLength) { f.Close() return nil, fmt.Errorf("too big file: %d bytes", n) } // detect content-type ext := fileExtension(fileInfo.Name(), compressed) contentType := mime.TypeByExtension(ext) if len(contentType) == 0 { data, err := readFileHeader(f, compressed) if err != nil { return nil, fmt.Errorf("cannot read header of the file %q: %s", f.Name(), err) } contentType = http.DetectContentType(data) } lastModified := fileInfo.ModTime() ff := &fsFile{ h: h, f: f, contentType: contentType, contentLength: contentLength, compressed: compressed, lastModified: lastModified, lastModifiedStr: AppendHTTPDate(nil, lastModified), t: time.Now(), } return ff, nil }
func retryCopyDir(sourceInfo os.FileInfo, source, destination string) error { if stat, err := os.Stat(destination); err == nil { if !stat.IsDir() { return errors.New("target exists but is not a directory: " + destination) } fmt.Println("Using existing destination:", destination) } else { if err := os.Mkdir(destination, sourceInfo.Mode()&os.ModePerm); err != nil { return err } } listing := retryListDir(source) for _, info := range listing { newSource := filepath.Join(source, info.Name()) newDest := filepath.Join(destination, info.Name()) if info.IsDir() { if err := retryCopyDir(info, newSource, newDest); err != nil { return err } } else { if err := retryCopyFile(info, newSource, newDest); err != nil { return err } } } // Change the times after copying the contents to avoid updating the modification time. if err := os.Chtimes(destination, sourceInfo.ModTime(), sourceInfo.ModTime()); err != nil { return err } return nil }