// Add an entry for a child. // // REQUIRES: in.isDir() // REQUIRES: dt != fuseutil.DT_Unknown func (in *inode) AddChild( id fuseops.InodeID, name string, dt fuseutil.DirentType) { var index int // Update the modification time. in.attrs.Mtime = time.Now() // No matter where we place the entry, make sure it has the correct Offset // field. defer func() { in.entries[index].Offset = fuseops.DirOffset(index + 1) }() // Set up the entry. e := fuseutil.Dirent{ Inode: id, Name: name, Type: dt, } // Look for a gap in which we can insert it. for index = range in.entries { if in.entries[index].Type == fuseutil.DT_Unknown { in.entries[index] = e return } } // Append it to the end. index = len(in.entries) in.entries = append(in.entries, e) }
func (fs *helloFS) ReadDir( op *fuseops.ReadDirOp) (err error) { // Find the info for this inode. info, ok := gInodeInfo[op.Inode] if !ok { err = fuse.ENOENT return } if !info.dir { err = fuse.EIO return } entries := info.children // Grab the range of interest. if op.Offset > fuseops.DirOffset(len(entries)) { err = fuse.EIO return } entries = entries[op.Offset:] // Resume at the specified offset into the array. for _, e := range entries { op.Data = fuseutil.AppendDirent(op.Data, e) if len(op.Data) > op.Size { op.Data = op.Data[:op.Size] break } } return }
func (s *OssvfsTest) TestReadFiles(t *C) { parent := s.getRoot(t) dh := parent.OpenDir() defer dh.CloseDir() for i := fuseops.DirOffset(0); ; i++ { en, err := dh.ReadDir(s.fs, i) t.Assert(err, IsNil) if en == nil { break } if en.Type == fuseutil.DT_File { in, err := parent.LookUp(s.fs, en.Name) t.Assert(err, IsNil) fh := in.OpenFile(s.fs) buf := make([]byte, 4096) nread, err := fh.ReadFile(s.fs, 0, buf) if en.Name == "zero" { t.Assert(nread, Equals, 0) } else { t.Assert(nread, Equals, len(en.Name)) buf = buf[0:nread] t.Assert(string(buf), Equals, en.Name) } } else { } } }
// ReadEntries returns entries starting from specified offset position. If local // cache is empty, it'll fetch from remote. func (d *Dir) ReadEntries(offset fuseops.DirOffset) ([]*fuseutil.Dirent, error) { d.Lock() defer d.Unlock() var entries = d.Entries if len(entries) == 0 { if err := d.updateEntriesFromRemote(); err != nil { return nil, err } entries = d.Entries } // return err if offset is greather than list of entries if offset > fuseops.DirOffset(len(entries)) { return nil, fuse.EIO } // filter by entries whose type is not to set fuse.DT_Unknown var liveEntries []*fuseutil.Dirent for _, e := range entries[offset:] { if e.Type != fuseutil.DT_Unknown { liveEntries = append(liveEntries, e) } } return liveEntries, nil }
func (fs *flushFS) ReadDir( op *fuseops.ReadDirOp) (err error) { fs.mu.Lock() defer fs.mu.Unlock() // Create the appropriate listing. var dirents []fuseutil.Dirent switch op.Inode { case fuseops.RootInodeID: dirents = []fuseutil.Dirent{ fuseutil.Dirent{ Offset: 1, Inode: fooID, Name: "foo", Type: fuseutil.DT_File, }, fuseutil.Dirent{ Offset: 2, Inode: barID, Name: "bar", Type: fuseutil.DT_Directory, }, } case barID: default: err = fmt.Errorf("Unexpected inode: %v", op.Inode) return } // If the offset is for the end of the listing, we're done. Otherwise we // expect it to be for the start. switch op.Offset { case fuseops.DirOffset(len(dirents)): return case 0: default: err = fmt.Errorf("Unexpected offset: %v", op.Offset) return } // Fill in the listing. for _, de := range dirents { op.Data = fuseutil.AppendDirent(op.Data, de) } // We don't support doing this in anything more than one shot. if len(op.Data) > op.Size { err = fmt.Errorf("Couldn't fit listing in %v bytes", op.Size) return } return }
// Read all entries for the directory, fix up conflicting names, and fill in // offset fields. // // LOCKS_REQUIRED(in) func readAllEntries( ctx context.Context, in inode.DirInode) (entries []fuseutil.Dirent, err error) { // Read one batch at a time. var tok string for { // Read a batch. var batch []fuseutil.Dirent batch, tok, err = in.ReadEntries(ctx, tok) if err != nil { err = fmt.Errorf("ReadEntries: %v", err) return } // Accumulate. entries = append(entries, batch...) // Are we done? if tok == "" { break } } // Ensure that the entries are sorted, for use in fixConflictingNames // below. sort.Sort(sortedDirents(entries)) // Fix name conflicts. err = fixConflictingNames(entries) if err != nil { err = fmt.Errorf("fixConflictingNames: %v", err) return } // Fix up offset fields. for i := 0; i < len(entries); i++ { entries[i].Offset = fuseops.DirOffset(i) + 1 } // Return a bogus inode ID for each entry, but not the root inode ID. // // NOTE(jacobsa): As far as I can tell this is harmless. Minting and // returning a real inode ID is difficult because fuse does not count // readdir as an operation that increases the inode ID's lookup count and // we therefore don't get a forget for it later, but we would like to not // have to remember every inode ID that we've ever minted for readdir. // // If it turns out this is not harmless, we'll need to switch to something // like inode IDs based on (object name, generation) hashes. But then what // about the birthday problem? And more importantly, what about our // semantic of not minting a new inode ID when the generation changes due // to a local action? for i, _ := range entries { entries[i].Inode = fuseops.RootInodeID + 1 } return }
func (in *inode) CheckInvariants() { // INVARIANT: attrs.Mode &^ (os.ModePerm|os.ModeDir|os.ModeSymlink) == 0 if !(in.attrs.Mode&^(os.ModePerm|os.ModeDir|os.ModeSymlink) == 0) { panic(fmt.Sprintf("Unexpected mode: %v", in.attrs.Mode)) } // INVARIANT: !(isDir() && isSymlink()) if in.isDir() && in.isSymlink() { panic(fmt.Sprintf("Unexpected mode: %v", in.attrs.Mode)) } // INVARIANT: attrs.Size == len(contents) if in.attrs.Size != uint64(len(in.contents)) { panic(fmt.Sprintf( "Size mismatch: %d vs. %d", in.attrs.Size, len(in.contents))) } // INVARIANT: If !isDir(), len(entries) == 0 if !in.isDir() && len(in.entries) != 0 { panic(fmt.Sprintf("Unexpected entries length: %d", len(in.entries))) } // INVARIANT: For each i, entries[i].Offset == i+1 for i, e := range in.entries { if !(e.Offset == fuseops.DirOffset(i+1)) { panic(fmt.Sprintf("Unexpected offset for index %d: %d", i, e.Offset)) } } // INVARIANT: Contains no duplicate names in used entries. childNames := make(map[string]struct{}) for _, e := range in.entries { if e.Type != fuseutil.DT_Unknown { if _, ok := childNames[e.Name]; ok { panic(fmt.Sprintf("Duplicate name: %s", e.Name)) } childNames[e.Name] = struct{}{} } } // INVARIANT: If !isFile(), len(contents) == 0 if !in.isFile() && len(in.contents) != 0 { panic(fmt.Sprintf("Unexpected length: %d", len(in.contents))) } // INVARIANT: If !isSymlink(), len(target) == 0 if !in.isSymlink() && len(in.target) != 0 { panic(fmt.Sprintf("Unexpected target length: %d", len(in.target))) } return }
func (s *OssvfsTest) readDirFully(t *C, dh *DirHandle) (entries []fuseutil.Dirent) { en, err := dh.ReadDir(s.fs, fuseops.DirOffset(0)) t.Assert(err, IsNil) t.Assert(en.Name, Equals, ".") en, err = dh.ReadDir(s.fs, fuseops.DirOffset(1)) t.Assert(err, IsNil) t.Assert(en.Name, Equals, "..") for i := fuseops.DirOffset(2); ; i++ { en, err = dh.ReadDir(s.fs, i) t.Assert(err, IsNil) if en == nil { return } entries = append(entries, *en) } }
func (s *GoofysTest) readDirFully(t *C, dh *DirHandle) (entries []fuseutil.Dirent) { for i := fuseops.DirOffset(0); ; i++ { en, err := dh.ReadDir(s.fs, i) t.Assert(err, IsNil) if en == nil { return } entries = append(entries, *en) } }
func (d *Dir) initializeChild(e *tempEntry) (Node, error) { node, err := d.findEntry(e.Name) if err == nil { return node, nil } var t = e.Time if t.IsZero() { t = time.Now() } attrs := &fuseops.InodeAttributes{ Size: e.Size, Uid: d.Attrs.Uid, Gid: d.Attrs.Gid, Mode: e.Mode, Atime: t, Mtime: t, Ctime: t, Crtime: t, } n := NewEntry(d, e.Name) n.Attrs = attrs dirEntry := &fuseutil.Dirent{ Offset: fuseops.DirOffset(len(d.Entries)) + 1, // offset is 1 indexed Inode: n.ID, Name: e.Name, Type: e.Type, } d.Entries = append(d.Entries, dirEntry) var dt Node switch e.Type { case fuseutil.DT_Directory: dt = NewDir(n, d.IDGen) case fuseutil.DT_File: dt = NewFile(n) default: return nil, fmt.Errorf("Unknown file type: %v", e.Type) } d.EntriesList[e.Name] = dt return dt, nil }
// Remove an entry for a child. // // REQUIRES: in.isDir() // REQUIRES: An entry for the given name exists. func (in *inode) RemoveChild(name string) { // Update the modification time. in.attrs.Mtime = time.Now() // Find the entry. i, ok := in.findChild(name) if !ok { panic(fmt.Sprintf("Unknown child: %s", name)) } // Mark it as unused. in.entries[i] = fuseutil.Dirent{ Type: fuseutil.DT_Unknown, Offset: fuseops.DirOffset(i + 1), } }
func (fs *helloFS) ReadDir( ctx context.Context, op *fuseops.ReadDirOp) (err error) { // Find the info for this inode. info, ok := gInodeInfo[op.Inode] if !ok { err = fuse.ENOENT return } if !info.dir { err = fuse.EIO return } entries := info.children // Grab the range of interest. if op.Offset > fuseops.DirOffset(len(entries)) { err = fuse.EIO return } entries = entries[op.Offset:] // Resume at the specified offset into the array. for _, e := range entries { n := fuseutil.WriteDirent(op.Dst[op.BytesRead:], e) if n == 0 { break } op.BytesRead += n } return }
func (dh *DirHandle) ReadDir(fs *Goofys, offset fuseops.DirOffset) (*fuseutil.Dirent, error) { // If the request is for offset zero, we assume that either this is the first // call or rewinddir has been called. Reset state. if offset == 0 { dh.Entries = nil } if offset == 0 { e := makeDirEntry(".", fuseutil.DT_Directory) e.Offset = 1 dh.NameToEntry["."] = fs.rootAttrs return &e, nil } else if offset == 1 { e := makeDirEntry("..", fuseutil.DT_Directory) e.Offset = 2 dh.NameToEntry[".."] = fs.rootAttrs return &e, nil } i := int(offset) - dh.BaseOffset - 2 if i < 0 { panic(fmt.Sprintf("invalid offset %v, base=%v", offset, dh.BaseOffset)) } if i >= len(dh.Entries) { if dh.Marker != nil { // we need to fetch the next page dh.Entries = nil dh.BaseOffset += i i = 0 } } if i > 5000 { // XXX prevent infinite loop, raise the limit later panic("too many results") } if dh.Entries == nil { prefix := *dh.inode.FullName if len(prefix) != 0 { prefix += "/" } params := &s3.ListObjectsInput{ Bucket: &fs.bucket, Delimiter: aws.String("/"), Marker: dh.Marker, Prefix: &prefix, //MaxKeys: aws.Int64(3), } resp, err := fs.s3.ListObjects(params) if err != nil { return nil, mapAwsError(err) } s3Log.Debug(resp) dh.Entries = make([]fuseutil.Dirent, 0, len(resp.CommonPrefixes)+len(resp.Contents)) for _, dir := range resp.CommonPrefixes { // strip trailing / dirName := (*dir.Prefix)[0 : len(*dir.Prefix)-1] // strip previous prefix dirName = dirName[len(*params.Prefix):] dh.Entries = append(dh.Entries, makeDirEntry(dirName, fuseutil.DT_Directory)) dh.NameToEntry[dirName] = fs.rootAttrs } for _, obj := range resp.Contents { baseName := (*obj.Key)[len(prefix):] if len(baseName) == 0 { // this is a directory blob continue } dh.Entries = append(dh.Entries, makeDirEntry(baseName, fuseutil.DT_File)) dh.NameToEntry[baseName] = fuseops.InodeAttributes{ Size: uint64(*obj.Size), Nlink: 1, Mode: fs.flags.FileMode, Atime: *obj.LastModified, Mtime: *obj.LastModified, Ctime: *obj.LastModified, Crtime: *obj.LastModified, Uid: fs.flags.Uid, Gid: fs.flags.Gid, } } sort.Sort(sortedDirents(dh.Entries)) // Fix up offset fields. for i := 0; i < len(dh.Entries); i++ { en := &dh.Entries[i] // offset is 1 based, also need to account for "." and ".." en.Offset = fuseops.DirOffset(i+dh.BaseOffset) + 1 + 2 } if *resp.IsTruncated { dh.Marker = resp.NextMarker } else { dh.Marker = nil } } if i == len(dh.Entries) { // we've reached the end return nil, nil } else if i > len(dh.Entries) { return nil, fuse.EINVAL } return &dh.Entries[i], nil }
// Convert a kernel message to an appropriate op. If the op is unknown, a // special unexported type will be used. // // The caller is responsible for arranging for the message to be destroyed. func convertInMessage( inMsg *buffer.InMessage, outMsg *buffer.OutMessage, protocol fusekernel.Protocol) (o interface{}, err error) { switch inMsg.Header().Opcode { case fusekernel.OpLookup: buf := inMsg.ConsumeBytes(inMsg.Len()) n := len(buf) if n == 0 || buf[n-1] != '\x00' { err = errors.New("Corrupt OpLookup") return } o = &fuseops.LookUpInodeOp{ Parent: fuseops.InodeID(inMsg.Header().Nodeid), Name: string(buf[:n-1]), } case fusekernel.OpGetattr: o = &fuseops.GetInodeAttributesOp{ Inode: fuseops.InodeID(inMsg.Header().Nodeid), } case fusekernel.OpSetattr: type input fusekernel.SetattrIn in := (*input)(inMsg.Consume(unsafe.Sizeof(input{}))) if in == nil { err = errors.New("Corrupt OpSetattr") return } to := &fuseops.SetInodeAttributesOp{ Inode: fuseops.InodeID(inMsg.Header().Nodeid), } o = to valid := fusekernel.SetattrValid(in.Valid) if valid&fusekernel.SetattrSize != 0 { to.Size = &in.Size } if valid&fusekernel.SetattrMode != 0 { mode := convertFileMode(in.Mode) to.Mode = &mode } if valid&fusekernel.SetattrAtime != 0 { t := time.Unix(int64(in.Atime), int64(in.AtimeNsec)) to.Atime = &t } if valid&fusekernel.SetattrMtime != 0 { t := time.Unix(int64(in.Mtime), int64(in.MtimeNsec)) to.Mtime = &t } case fusekernel.OpForget: type input fusekernel.ForgetIn in := (*input)(inMsg.Consume(unsafe.Sizeof(input{}))) if in == nil { err = errors.New("Corrupt OpForget") return } o = &fuseops.ForgetInodeOp{ Inode: fuseops.InodeID(inMsg.Header().Nodeid), N: in.Nlookup, } case fusekernel.OpMkdir: in := (*fusekernel.MkdirIn)(inMsg.Consume(fusekernel.MkdirInSize(protocol))) if in == nil { err = errors.New("Corrupt OpMkdir") return } name := inMsg.ConsumeBytes(inMsg.Len()) i := bytes.IndexByte(name, '\x00') if i < 0 { err = errors.New("Corrupt OpMkdir") return } name = name[:i] o = &fuseops.MkDirOp{ Parent: fuseops.InodeID(inMsg.Header().Nodeid), Name: string(name), // On Linux, vfs_mkdir calls through to the inode with at most // permissions and sticky bits set (cf. https://goo.gl/WxgQXk), and fuse // passes that on directly (cf. https://goo.gl/f31aMo). In other words, // the fact that this is a directory is implicit in the fact that the // opcode is mkdir. But we want the correct mode to go through, so ensure // that os.ModeDir is set. Mode: convertFileMode(in.Mode) | os.ModeDir, } case fusekernel.OpCreate: in := (*fusekernel.CreateIn)(inMsg.Consume(fusekernel.CreateInSize(protocol))) if in == nil { err = errors.New("Corrupt OpCreate") return } name := inMsg.ConsumeBytes(inMsg.Len()) i := bytes.IndexByte(name, '\x00') if i < 0 { err = errors.New("Corrupt OpCreate") return } name = name[:i] o = &fuseops.CreateFileOp{ Parent: fuseops.InodeID(inMsg.Header().Nodeid), Name: string(name), Mode: convertFileMode(in.Mode), } case fusekernel.OpSymlink: // The message is "newName\0target\0". names := inMsg.ConsumeBytes(inMsg.Len()) if len(names) == 0 || names[len(names)-1] != 0 { err = errors.New("Corrupt OpSymlink") return } i := bytes.IndexByte(names, '\x00') if i < 0 { err = errors.New("Corrupt OpSymlink") return } newName, target := names[0:i], names[i+1:len(names)-1] o = &fuseops.CreateSymlinkOp{ Parent: fuseops.InodeID(inMsg.Header().Nodeid), Name: string(newName), Target: string(target), } case fusekernel.OpRename: type input fusekernel.RenameIn in := (*input)(inMsg.Consume(unsafe.Sizeof(input{}))) if in == nil { err = errors.New("Corrupt OpRename") return } names := inMsg.ConsumeBytes(inMsg.Len()) // names should be "old\x00new\x00" if len(names) < 4 { err = errors.New("Corrupt OpRename") return } if names[len(names)-1] != '\x00' { err = errors.New("Corrupt OpRename") return } i := bytes.IndexByte(names, '\x00') if i < 0 { err = errors.New("Corrupt OpRename") return } oldName, newName := names[:i], names[i+1:len(names)-1] o = &fuseops.RenameOp{ OldParent: fuseops.InodeID(inMsg.Header().Nodeid), OldName: string(oldName), NewParent: fuseops.InodeID(in.Newdir), NewName: string(newName), } case fusekernel.OpUnlink: buf := inMsg.ConsumeBytes(inMsg.Len()) n := len(buf) if n == 0 || buf[n-1] != '\x00' { err = errors.New("Corrupt OpUnlink") return } o = &fuseops.UnlinkOp{ Parent: fuseops.InodeID(inMsg.Header().Nodeid), Name: string(buf[:n-1]), } case fusekernel.OpRmdir: buf := inMsg.ConsumeBytes(inMsg.Len()) n := len(buf) if n == 0 || buf[n-1] != '\x00' { err = errors.New("Corrupt OpRmdir") return } o = &fuseops.RmDirOp{ Parent: fuseops.InodeID(inMsg.Header().Nodeid), Name: string(buf[:n-1]), } case fusekernel.OpOpen: o = &fuseops.OpenFileOp{ Inode: fuseops.InodeID(inMsg.Header().Nodeid), } case fusekernel.OpOpendir: o = &fuseops.OpenDirOp{ Inode: fuseops.InodeID(inMsg.Header().Nodeid), } case fusekernel.OpRead: in := (*fusekernel.ReadIn)(inMsg.Consume(fusekernel.ReadInSize(protocol))) if in == nil { err = errors.New("Corrupt OpRead") return } to := &fuseops.ReadFileOp{ Inode: fuseops.InodeID(inMsg.Header().Nodeid), Handle: fuseops.HandleID(in.Fh), Offset: int64(in.Offset), } o = to readSize := int(in.Size) p := outMsg.GrowNoZero(uintptr(readSize)) if p == nil { err = fmt.Errorf("Can't grow for %d-byte read", readSize) return } sh := (*reflect.SliceHeader)(unsafe.Pointer(&to.Dst)) sh.Data = uintptr(p) sh.Len = readSize sh.Cap = readSize case fusekernel.OpReaddir: in := (*fusekernel.ReadIn)(inMsg.Consume(fusekernel.ReadInSize(protocol))) if in == nil { err = errors.New("Corrupt OpReaddir") return } to := &fuseops.ReadDirOp{ Inode: fuseops.InodeID(inMsg.Header().Nodeid), Handle: fuseops.HandleID(in.Fh), Offset: fuseops.DirOffset(in.Offset), } o = to readSize := int(in.Size) p := outMsg.GrowNoZero(uintptr(readSize)) if p == nil { err = fmt.Errorf("Can't grow for %d-byte read", readSize) return } sh := (*reflect.SliceHeader)(unsafe.Pointer(&to.Dst)) sh.Data = uintptr(p) sh.Len = readSize sh.Cap = readSize case fusekernel.OpRelease: type input fusekernel.ReleaseIn in := (*input)(inMsg.Consume(unsafe.Sizeof(input{}))) if in == nil { err = errors.New("Corrupt OpRelease") return } o = &fuseops.ReleaseFileHandleOp{ Handle: fuseops.HandleID(in.Fh), } case fusekernel.OpReleasedir: type input fusekernel.ReleaseIn in := (*input)(inMsg.Consume(unsafe.Sizeof(input{}))) if in == nil { err = errors.New("Corrupt OpReleasedir") return } o = &fuseops.ReleaseDirHandleOp{ Handle: fuseops.HandleID(in.Fh), } case fusekernel.OpWrite: in := (*fusekernel.WriteIn)(inMsg.Consume(fusekernel.WriteInSize(protocol))) if in == nil { err = errors.New("Corrupt OpWrite") return } buf := inMsg.ConsumeBytes(inMsg.Len()) if len(buf) < int(in.Size) { err = errors.New("Corrupt OpWrite") return } o = &fuseops.WriteFileOp{ Inode: fuseops.InodeID(inMsg.Header().Nodeid), Handle: fuseops.HandleID(in.Fh), Data: buf, Offset: int64(in.Offset), } case fusekernel.OpFsync: type input fusekernel.FsyncIn in := (*input)(inMsg.Consume(unsafe.Sizeof(input{}))) if in == nil { err = errors.New("Corrupt OpFsync") return } o = &fuseops.SyncFileOp{ Inode: fuseops.InodeID(inMsg.Header().Nodeid), Handle: fuseops.HandleID(in.Fh), } case fusekernel.OpFlush: type input fusekernel.FlushIn in := (*input)(inMsg.Consume(unsafe.Sizeof(input{}))) if in == nil { err = errors.New("Corrupt OpFlush") return } o = &fuseops.FlushFileOp{ Inode: fuseops.InodeID(inMsg.Header().Nodeid), Handle: fuseops.HandleID(in.Fh), } case fusekernel.OpReadlink: o = &fuseops.ReadSymlinkOp{ Inode: fuseops.InodeID(inMsg.Header().Nodeid), } case fusekernel.OpStatfs: o = &statFSOp{} case fusekernel.OpInterrupt: type input fusekernel.InterruptIn in := (*input)(inMsg.Consume(unsafe.Sizeof(input{}))) if in == nil { err = errors.New("Corrupt OpInterrupt") return } o = &interruptOp{ FuseID: in.Unique, } case fusekernel.OpInit: type input fusekernel.InitIn in := (*input)(inMsg.Consume(unsafe.Sizeof(input{}))) if in == nil { err = errors.New("Corrupt OpInit") return } o = &initOp{ Kernel: fusekernel.Protocol{in.Major, in.Minor}, MaxReadahead: in.MaxReadahead, Flags: fusekernel.InitFlags(in.Flags), } default: o = &unknownOp{ OpCode: inMsg.Header().Opcode, Inode: fuseops.InodeID(inMsg.Header().Nodeid), } } return }
func (dh *DirHandle) ReadDir(fs *Ossvfs, offset fuseops.DirOffset) (*fuseutil.Dirent, error) { // If the request is for offset zero, we assume that either this is the first // call or rewinddir has been called. Reset state. if offset == 0 { dh.Entries = nil } if offset == 0 { e := makeDirEntry(".", fuseutil.DT_Directory) e.Offset = 1 dh.NameToEntry["."] = fs.rootAttrs return &e, nil } else if offset == 1 { e := makeDirEntry("..", fuseutil.DT_Directory) e.Offset = 2 dh.NameToEntry[".."] = fs.rootAttrs return &e, nil } i := int(offset) - dh.BaseOffset - 2 if i < 0 { panic(fmt.Sprintf("invalid offset %v, base=%v", offset, dh.BaseOffset)) } if i >= len(dh.Entries) { if dh.Marker != nil { dh.Entries = nil dh.BaseOffset += i i = 0 } } if i > 5000 { // XXX prevent infinite loop, raise the limit later panic("too many results") } if dh.Entries == nil { prefix := *dh.inode.FullName if len(prefix) != 0 { prefix += "/" } resp, err := fs.bucket.List(prefix, "/", *dh.Marker, 0) if err != nil { return nil, mapOssError(err) } ossLog.Debug(resp) dh.Entries = make([]fuseutil.Dirent, 0, len(resp.CommonPrefixes)+len(resp.Contents)) for _, dir := range resp.CommonPrefixes { // strip trailing / dirName := dir[0 : len(dir)-1] // strip previous prefix dirName = dirName[len(prefix):] dh.Entries = append(dh.Entries, makeDirEntry(dirName, fuseutil.DT_Directory)) dh.NameToEntry[dirName] = fs.rootAttrs } for _, obj := range resp.Contents { baseName := obj.Key[len(prefix):] if len(baseName) == 0 { // this is a directory blob continue } dh.Entries = append(dh.Entries, makeDirEntry(baseName, fuseutil.DT_File)) lastModifiedTime, err := time.Parse("2006-01-02T15:04:05.000Z", obj.LastModified) if err != nil { panic("Last " + obj.LastModified + " modified time is invalid") } dh.NameToEntry[baseName] = fuseops.InodeAttributes{ Size: uint64(obj.Size), Nlink: 1, Mode: fs.flags.FileMode, Atime: lastModifiedTime, Mtime: lastModifiedTime, Ctime: lastModifiedTime, Crtime: lastModifiedTime, Uid: fs.flags.Uid, Gid: fs.flags.Gid, } } sort.Sort(sortedDirents(dh.Entries)) // Fix up offset fields. for i := 0; i < len(dh.Entries); i++ { en := &dh.Entries[i] // offset is 1 based, also need to account for "." and ".." en.Offset = fuseops.DirOffset(i+dh.BaseOffset) + 1 + 2 } if resp.IsTruncated { dh.Marker = &resp.NextMarker } else { dh.Marker = nil } } if i == len(dh.Entries) { // we've reached the end return nil, nil } else if i > len(dh.Entries) { return nil, fuse.EINVAL } return &dh.Entries[i], nil }