// getReaderSize gets the size of the underlying reader, if possible. func getReaderSize(reader io.Reader) (size int64, err error) { size = -1 if reader != nil { switch v := reader.(type) { case *bytes.Buffer: size = int64(v.Len()) case *bytes.Reader: size = int64(v.Len()) case *strings.Reader: size = int64(v.Len()) case *os.File: var st os.FileInfo st, err = v.Stat() if err != nil { return 0, err } size = st.Size() case *Object: var st ObjectInfo st, err = v.Stat() if err != nil { return 0, err } size = st.Size } } return size, nil }
// Stat produces a styxproto.Stat from an open file. If the value // provides a Stat method matching that of os.File, that is used. // Otherwise, the styxfile package determines the file's attributes // based on other characteristics. func Stat(buf []byte, file Interface, name string, qid styxproto.Qid) (styxproto.Stat, error) { var ( fi os.FileInfo err error ) type hasStat interface { Stat() (os.FileInfo, error) } if v, ok := file.(hasStat); ok { fi, err = v.Stat() if err != nil { return nil, err } } else { fi = statGuess{file, name, qid.Type()} } uid, gid, muid := sys.FileOwner(fi) stat, _, err := styxproto.NewStat(buf, fi.Name(), uid, gid, muid) if err != nil { return nil, err } stat.SetLength(fi.Size()) stat.SetMode(Mode9P(fi.Mode())) stat.SetAtime(uint32(fi.ModTime().Unix())) stat.SetMtime(uint32(fi.ModTime().Unix())) stat.SetQid(qid) return stat, nil }
func dir2Dir(path string, d os.FileInfo, dotu bool, upool p.Users) *p.Dir { sysMode := d.Sys().(*syscall.Stat_t) dir := new(Dir) dir.Qid = *dir2Qid(d) dir.Mode = dir2Npmode(d, dotu) dir.Atime = uint32(atime(sysMode).Unix()) dir.Mtime = uint32(d.ModTime().Unix()) dir.Length = uint64(d.Size()) dir.Name = path[strings.LastIndex(path, "/")+1:] if dotu { dir.dotu(path, d, upool, sysMode) return &dir.Dir } unixUid := int(sysMode.Uid) unixGid := int(sysMode.Gid) dir.Uid = strconv.Itoa(unixUid) dir.Gid = strconv.Itoa(unixGid) dir.Muid = "none" // BUG(akumar): LookupId will never find names for // groups, as it only operates on user ids. u, err := user.LookupId(dir.Uid) if err == nil { dir.Uid = u.Username } g, err := user.LookupId(dir.Gid) if err == nil { dir.Gid = g.Username } return &dir.Dir }
func LoadConfig(fname string) (cfg Config, err error) { var fd *os.File var fs os.FileInfo var data []byte // Check if the config file exists, and create a buffer to hold it's content if fs, err = os.Stat(fname); err != nil { return } data = make([]byte, fs.Size()) // Open and read the file into the buffer if fd, err = os.Open(fname); err != nil { return } defer fd.Close() if _, err = fd.Read(data); err != nil { return } // Parse the yaml into a struct cfg = Config{} err = yaml.Unmarshal(data, &cfg) if err != nil { return } // Set the hostname cfg.Hostname, _ = os.Hostname() return }
func StitchWalk(path string, info os.FileInfo, err error) error { fmt.Println(path, info, err) if info.IsDir() { fmt.Println("Is a dir") } else { /* f0, err0 := os.Open(path) if err0 != nil { return err0 } */ fmt.Printf("writing %s. %v bytes", path, info.Size()) b0, err0 := ioutil.ReadFile(path) if err0 != nil { return err0 } s0 := string(b0) // s1 := fmt.Sprintf("%s,\n", s0) s1 := info.Name() s1 = s1[:len(s1)-5] s2 := fmt.Sprintf("%s:%s,\n", s1, s0) W.WriteString(s2) } return err }
func (t *GcsfuseTest) OnlyDir_TrailingSlash() { var err error var fi os.FileInfo // Mount only a single directory from the bucket, including a trailing slash. args := []string{ "--only-dir", path.Dir(canned.ExplicitDirFile) + "/", canned.FakeBucketName, t.dir, } err = t.runGcsfuse(args) AssertEq(nil, err) defer unmount(t.dir) // It should be as if t.dir points into the bucket's first-level directory. entries, err := fusetesting.ReadDirPicky(t.dir) AssertEq(nil, err) AssertEq(1, len(entries)) fi = entries[0] ExpectEq(path.Base(canned.ExplicitDirFile), fi.Name()) ExpectEq(len(canned.ExplicitDirFile_Contents), fi.Size()) }
func (store *fileStore) acquireNewBlock() (Id, []byte, error) { var ( err error fileInfo os.FileInfo id Id memSlice []byte ) if store.free.Len() > 0 { store.freeMtx.Lock() el := store.free.Front() store.free.Remove(el) store.freeMtx.Unlock() id = el.Value.(Id) } else { store.fdMtx.Lock() if fileInfo, err = store.fd.Stat(); err != nil { return Id(0), nil, err } id = Id(fileInfo.Size() / blockSize) store.fd.Truncate(fileInfo.Size() + blockSize) store.fdMtx.Unlock() } memSlice, err = store.acquireBlock(id) return id, memSlice, err }
func (m *MetaInfo) addFiles(fs MetaInfoFileSystem, file string) (err error) { var fileInfo os.FileInfo fileInfo, err = fs.Stat(file) if err != nil { return } if fileInfo.IsDir() { var f MetaInfoFile f, err = fs.Open(file) if err != nil { return } var fi []string fi, err = f.Readdirnames(0) if err != nil { return } for _, name := range fi { err = m.addFiles(fs, path.Join(file, name)) if err != nil { return } } } else { fileDict := FileDict{Length: fileInfo.Size()} cleanFile := path.Clean(file) parts := strings.Split(cleanFile, string(os.PathSeparator)) fileDict.Path = parts m.Info.Files = append(m.Info.Files, fileDict) } return }
func (t *CachingWithImplicitDirsTest) SymlinksWork() { var fi os.FileInfo var err error // Create a file. fileName := path.Join(t.Dir, "foo") const contents = "taco" err = ioutil.WriteFile(fileName, []byte(contents), 0400) AssertEq(nil, err) // Create a symlink to it. symlinkName := path.Join(t.Dir, "bar") err = os.Symlink("foo", symlinkName) AssertEq(nil, err) // Stat the link. fi, err = os.Lstat(symlinkName) AssertEq(nil, err) ExpectEq("bar", fi.Name()) ExpectEq(0, fi.Size()) ExpectEq(filePerms|os.ModeSymlink, fi.Mode()) // Stat the target via the link. fi, err = os.Stat(symlinkName) AssertEq(nil, err) ExpectEq("bar", fi.Name()) ExpectEq(len(contents), fi.Size()) ExpectEq(filePerms, fi.Mode()) }
// watch watchers the file for changes func (d *FileQuery) watch(lastStat os.FileInfo) <-chan *watchResult { ch := make(chan *watchResult, 1) go func(lastStat os.FileInfo) { for { stat, err := os.Stat(d.path) if err != nil { select { case <-d.stopCh: return case ch <- &watchResult{err: err}: return } } changed := lastStat == nil || lastStat.Size() != stat.Size() || lastStat.ModTime() != stat.ModTime() if changed { select { case <-d.stopCh: return case ch <- &watchResult{stat: stat}: return } } time.Sleep(FileQuerySleepTime) } }(lastStat) return ch }
func (repo *LocalfsImagerep) fillLocalfsImage(image *models.Image, fileinfo os.FileInfo) bool { // ubuntu-14.04_x86_64_raw.img -> name: ubuntu-14.04, arch: x86_64, type: raw.img imginfo := strings.SplitN(fileinfo.Name(), "_", 3) if len(imginfo) != 3 { log.WithField("image", fileinfo.Name()).Info("skipping image with invalid name") return false } image.Name = imginfo[0] image.Size = fileinfo.Size() image.Date = fileinfo.ModTime() image.Filename = fileinfo.Name() image.FullPath = filepath.Join(repo.Root, fileinfo.Name()) switch imginfo[1] { default: log.WithField("filename", fileinfo.Name()).WithField("parts", imginfo).Info("skipping unknown image architecture") return false case "amd64": image.Arch = models.IMAGE_ARCH_X86_64 case "i386": image.Arch = models.IMAGE_ARCH_X86 } switch imginfo[2] { default: log.WithField("filename", fileinfo.Name()).WithField("parts", imginfo).Info("skipping unknown image type") return false case "raw.img": image.Type = models.IMAGE_FMT_RAW case "qcow2.img": image.Type = models.IMAGE_FMT_QCOW2 } return true }
func (gw *gameWalker) visit(path string, f os.FileInfo, err error) error { if f == nil || f.Name() == ".DS_Store" { return nil } if f.IsDir() { return nil } hh, err := HashesForFile(path) if err != nil { return err } romName, err := filepath.Rel(gw.gamepath, path) if err != nil { return err } rom := new(types.Rom) rom.Name = romName rom.Size = f.Size() rom.Crc = hh.Crc rom.Md5 = hh.Md5 rom.Sha1 = hh.Sha1 gw.game.Roms = append(gw.game.Roms, rom) return nil }
// create tar.Header from os.FileInfo func Finfo2Theader(fi os.FileInfo) (hdr *tar.Header, err error) { m := fi.Mode() var ( ln string tm byte ) tm = tar.TypeReg switch { case m&os.ModeSymlink != 0: tm = tar.TypeSymlink /*if lfi, err := os.Lstat(fi.Name()); err == nil { ln = lfi.Name() }*/ case m&os.ModeDevice != 0 && m&os.ModeCharDevice != 0: tm = tar.TypeChar case m&os.ModeDevice != 0: tm = tar.TypeBlock case m&os.ModeNamedPipe != 0 || m&os.ModeSocket != 0: tm = tar.TypeFifo } tim := fi.ModTime() hdr = &tar.Header{Name: fi.Name(), Mode: int64(m.Perm()), Size: fi.Size(), ModTime: tim, Typeflag: tm, Linkname: ln} FillHeader(hdr) return }
func fileEndsWithString(path string, info os.FileInfo, str string, caseSensitive bool) bool { f, err := os.Open(path) if err != nil { return false } defer f.Close() bsize := len(str) buff := make([]byte, bsize) cbuff := make([]byte, 1) j := bsize - 1 for i := info.Size() - 1; i >= 0 && j >= 0; i-- { _, readErr := f.ReadAt(cbuff, i) if readErr != nil { return false } if cbuff[0] >= 32 || j < bsize-1 { buff[j] = cbuff[0] j-- } } endValue := string(buff) if !caseSensitive { endValue = strings.ToLower(endValue) } return str == endValue }
// NewIndexer new a indexer for async merge index data to disk. func NewIndexer(file string, options Options) (i *Indexer, err error) { var stat os.FileInfo i = &Indexer{} i.File = file i.closed = false i.syncOffset = 0 i.Options = options i.ring = NewRing(options.RingBuffer) i.bn = 0 i.buf = make([]byte, options.BufferSize) if i.f, err = os.OpenFile(file, os.O_RDWR|os.O_CREATE|myos.O_NOATIME, 0664); err != nil { log.Errorf("os.OpenFile(\"%s\") error(%v)", file, err) return nil, err } if stat, err = i.f.Stat(); err != nil { log.Errorf("index: %s Stat() error(%v)", i.File, err) return nil, err } if stat.Size() == 0 { if err = myos.Fallocate(i.f.Fd(), myos.FALLOC_FL_KEEP_SIZE, 0, fallocSize); err != nil { log.Errorf("index: %s fallocate() error(err)", i.File, err) i.Close() return nil, err } } i.wg.Add(1) i.signal = make(chan int, 1) go i.merge() return }
func (t *ImplicitDirsTest) FileObjectPresent() { var fi os.FileInfo var entries []os.FileInfo var err error // Set up contents. AssertEq( nil, t.createObjects( map[string]string{ // File "foo": "taco", })) // Statting the name should return an entry for the file. fi, err = os.Stat(path.Join(t.mfs.Dir(), "foo")) AssertEq(nil, err) ExpectEq("foo", fi.Name()) ExpectEq(4, fi.Size()) ExpectFalse(fi.IsDir()) // ReadDir should show the file. entries, err = fusetesting.ReadDirPicky(t.mfs.Dir()) AssertEq(nil, err) AssertEq(1, len(entries)) fi = entries[0] ExpectEq("foo", fi.Name()) ExpectEq(4, fi.Size()) ExpectFalse(fi.IsDir()) }
func list(path string, info os.FileInfo, node *fsNode, n *int) error { if (!info.IsDir() && !info.Mode().IsRegular()) || strings.HasPrefix(info.Name(), ".") { return errors.New("Non-regular file") } (*n)++ if (*n) > fileNumberLimit { return errors.New("Over file limit") //limit number of files walked } node.Name = info.Name() node.Size = info.Size() node.Modified = info.ModTime() if !info.IsDir() { return nil } children, err := ioutil.ReadDir(path) if err != nil { return fmt.Errorf("Failed to list files") } node.Size = 0 for _, i := range children { c := &fsNode{} p := filepath.Join(path, i.Name()) if err := list(p, i, c, n); err != nil { continue } node.Size += c.Size node.Children = append(node.Children, c) } return nil }
// init init block file, add/parse meta info. func (b *SuperBlock) init() (err error) { var stat os.FileInfo if stat, err = b.r.Stat(); err != nil { log.Errorf("block: %s Stat() error(%v)", b.File, err) return } if b.Size = stat.Size(); b.Size == 0 { // falloc(FALLOC_FL_KEEP_SIZE) if err = myos.Fallocate(b.w.Fd(), myos.FALLOC_FL_KEEP_SIZE, 0, _maxSize); err != nil { log.Errorf("block: %s Fallocate() error(%s)", b.File, err) return } if err = b.writeMeta(); err != nil { log.Errorf("block: %s writeMeta() error(%v)", b.File, err) return } b.Size = _headerSize } else { if err = b.parseMeta(); err != nil { log.Errorf("block: %s parseMeta() error(%v)", b.File, err) return } if _, err = b.w.Seek(_headerOffset, os.SEEK_SET); err != nil { log.Errorf("block: %s Seek() error(%v)", b.File, err) return } } b.Offset = needle.NeedleOffset(_headerOffset) return }
func (t *GcsfuseTest) OnlyDir_WithImplicitDir() { var err error var fi os.FileInfo // Mount only a single implicit directory from the bucket. args := []string{ "--only-dir", path.Dir(canned.ImplicitDirFile), canned.FakeBucketName, t.dir, } err = t.runGcsfuse(args) AssertEq(nil, err) defer unmount(t.dir) // It should be as if t.dir points into the implicit directory entries, err := fusetesting.ReadDirPicky(t.dir) AssertEq(nil, err) AssertEq(1, len(entries)) fi = entries[0] ExpectEq(path.Base(canned.ImplicitDirFile), fi.Name()) ExpectEq(len(canned.ImplicitDirFile_Contents), fi.Size()) }
func addFile(path string, fi os.FileInfo, hash uint64, size int64) { p := fullName(path, fi) //fmt.Printf("addFile: path=%q, fi.Name()=%q, hash=%#x, p=%q\n", path, fi.Name(), hash, p) //fmt.Printf("addFile: hash=%016x, p=%q\n", hash, p) k1 := kfe{p, fi.Size(), 0} skey := fi.Size() // 0 length files are currently silently ignored // they are not identical //hkey := uint64(0) /* if skey > fthreshold { if *fr { hkey = readFullHash(path, fi) } else { hkey = readPartialHash(path, fi) } } */ add(hash, skey, &k1) // smap not used _, ok2 := smap[skey] if !ok2 { smap[skey] = []kfe{k1} } else { smap[skey] = append(smap[skey], k1) } }
func GetFileSize(file *os.File) (size int64, err error) { var fi os.FileInfo if fi, err = file.Stat(); err == nil { size = fi.Size() } return }
func size(stat os.FileInfo, filename string) (int64, error) { if !stat.IsDir() { return stat.Size(), nil } file, err := os.Open(filename) if err != nil { return 0, err } files, err := file.Readdir(0) if err != nil { return 0, err } file.Close() var output int64 for _, child := range files { s, err := size(child, fp.Join(filename, child.Name())) if err != nil { return 0, err } output += s } return output, nil }
// Write a single file to TarGz func TarGzWrite(item ArchiveItem, tw *tar.Writer, fi os.FileInfo) (err error) { if item.FileSystemPath != "" { fr, err := os.Open(item.FileSystemPath) if err == nil { defer fr.Close() h := new(tar.Header) h.Name = item.ArchivePath h.Size = fi.Size() h.Mode = int64(fi.Mode()) h.ModTime = fi.ModTime() err = tw.WriteHeader(h) if err == nil { _, err = io.Copy(tw, fr) } } } else { h := new(tar.Header) //backslash-only paths h.Name = strings.Replace(item.ArchivePath, "\\", "/", -1) h.Size = int64(len(item.Data)) h.Mode = int64(0644) //? is this ok? h.ModTime = time.Now() err = tw.WriteHeader(h) if err == nil { _, err = tw.Write(item.Data) } } return err }
func writeTarGz(filePath, baseDir string, tarGzWriter *tar.Writer, fileInfo os.FileInfo) error { file, err := os.Open(filePath) if err != nil { return err } defer file.Close() relativePath, err := filepath.Rel(baseDir, filePath) if err != nil { return err } header := new(tar.Header) header.Name = relativePath header.Size = fileInfo.Size() header.Mode = int64(fileInfo.Mode()) header.ModTime = fileInfo.ModTime() err = tarGzWriter.WriteHeader(header) if err != nil { return err } _, err = io.Copy(tarGzWriter, file) if err != nil { return err } return nil }
func (self *F) visit(path string, f os.FileInfo, err error) error { if f == nil { return err } //如果是txt文本 if strings.HasSuffix(f.Name(), "txt") { var tp int if f.IsDir() { tp = IsDirectory } else if (f.Mode() & os.ModeSymlink) > 0 { tp = IsSymlink } else { tp = IsRegular } inoFile := &sysFile{ fName: path, fType: tp, fPerm: f.Mode(), fMtime: f.ModTime(), fSize: f.Size(), fShortName: f.Name(), } self.files = append(self.files, inoFile) } return nil }
func (fs *memFileSystem) refreshCache(path string, info os.FileInfo) (err error) { // Delete the file if fi is nil. if info == nil { fs.lock.Lock() delete(fs.cache, path) fs.lock.Unlock() return } // Create memory fileinfo and read contents. fi := &memFileInfo{ name: info.Name(), size: info.Size(), mode: info.Mode(), modTime: info.ModTime(), isDir: info.IsDir(), path: path, fs: fs, } // Fill content of the file from disk. if !fi.isDir { fi.content, err = ioutil.ReadFile(path) if err != nil { return } } // Update cache and return. fs.lock.Lock() fs.cache[path] = fi fs.lock.Unlock() return }
func (h *fsHandler) newFSFile(f *os.File, fileInfo os.FileInfo, compressed bool) (*fsFile, error) { n := fileInfo.Size() contentLength := int(n) if n != int64(contentLength) { f.Close() return nil, fmt.Errorf("too big file: %d bytes", n) } // detect content-type ext := fileExtension(fileInfo.Name(), compressed) contentType := mime.TypeByExtension(ext) if len(contentType) == 0 { data, err := readFileHeader(f, compressed) if err != nil { return nil, fmt.Errorf("cannot read header of the file %q: %s", f.Name(), err) } contentType = http.DetectContentType(data) } lastModified := fileInfo.ModTime() ff := &fsFile{ h: h, f: f, contentType: contentType, contentLength: contentLength, compressed: compressed, lastModified: lastModified, lastModifiedStr: AppendHTTPDate(nil, lastModified), t: time.Now(), } return ff, nil }
func fetchFileDetailsJSON(m *FileDetailsJSON, fi os.FileInfo) { if !*excludeSize { m.Size = fi.Size() } if !*excludeMtime { tmp := fi.ModTime() m.Mtime = tmp.Format(*timeFormat) } stat := fi.Sys().(*syscall.Stat_t) if !*excludeUid { m.Uid = stat.Uid } if !*excludeGid { m.Gid = stat.Gid } if !*excludeInode { m.Inode = stat.Ino } if !*excludeAtime { tmp := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) m.Atime = tmp.Format(*timeFormat) } if !*excludeCtime { tmp := time.Unix(int64(stat.Ctim.Sec), int64(stat.Ctim.Nsec)) m.Ctime = tmp.Format(*timeFormat) } /* if 1==0 { log.Print("Number of links: ",stat.Nlink) } */ }
func (t *tarmonster) walk(path string, info os.FileInfo, err error) error { if err != nil { return err } if !info.Mode().IsRegular() || info.Size() == 0 { return nil } file, err := os.Open(path) if err != nil { return err } defer file.Close() // Get tar.Header fih, err := tar.FileInfoHeader(info, "") if err != nil { return err } fih.Name = strings.TrimPrefix(path, t.src+string(filepath.Separator)) // Begin a new file if err := t.writer.WriteHeader(fih); err != nil { return err } // Write the file if _, err := io.CopyBuffer(t.writer, file, t.buffer); err != nil { return err } return err }
// NewIndexer new a indexer for async merge index data to disk. func NewIndexer(file string, ring int) (i *Indexer, err error) { var ( stat os.FileInfo ) i = &Indexer{} i.signal = make(chan int, signalNum) i.ring = NewRing(ring) i.sigNum = ring / 2 i.File = file if i.f, err = os.OpenFile(file, os.O_RDWR|os.O_CREATE, 0664); err != nil { log.Errorf("os.OpenFile(\"%s\") error(%v)", file, err) return } if stat, err = i.f.Stat(); err != nil { log.Errorf("index: %s Stat() error(%v)", i.File, err) return } if stat.Size() == 0 { // falloc(FALLOC_FL_KEEP_SIZE) if err = Fallocate(i.f.Fd(), 1, 0, indexMaxSize); err != nil { log.Errorf("Fallocate(i.f.Fd(), 1, 0, 100MB) error(err)", err) return } } i.bw = bufio.NewWriterSize(i.f, NeedleMaxSize) go i.write() return }