// listFiles walks the path returning files and directories to out func (f *Fs) listFiles(out fs.ListOpts, dir string) { defer out.Finished() // List the objects err := f.list(dir, out.Level(), "", 0, false, func(remote string, object *api.File, isDirectory bool) error { if isDirectory { dir := &fs.Dir{ Name: remote, Bytes: -1, Count: -1, } if out.AddDir(dir) { return fs.ErrorListAborted } } else { if o := f.newFsObjectWithInfo(remote, object); o != nil { if out.Add(o) { return fs.ErrorListAborted } } } return nil }) if err != nil { out.SetError(err) } }
// listBuckets lists the buckets to out func (f *Fs) listBuckets(out fs.ListOpts, dir string) { defer out.Finished() if dir != "" { out.SetError(fs.ErrorListOnlyRoot) return } if f.projectNumber == "" { out.SetError(errors.New("can't list buckets without project number")) return } listBuckets := f.svc.Buckets.List(f.projectNumber).MaxResults(listChunks) for { buckets, err := listBuckets.Do() if err != nil { out.SetError(err) return } for _, bucket := range buckets.Items { dir := &fs.Dir{ Name: bucket.Name, Bytes: 0, Count: 0, } if out.AddDir(dir) { return } } if buckets.NextPageToken == "" { break } listBuckets.PageToken(buckets.NextPageToken) } }
// List the path into out // // Ignores everything which isn't Storable, eg links etc func (f *Fs) List(out fs.ListOpts, dir string) { defer out.Finished() dir = filterFragment(f.cleanUtf8(dir)) root := filepath.Join(f.root, dir) _, err := os.Stat(root) if err != nil { out.SetError(fs.ErrorDirNotFound) return } in := make(chan listArgs, out.Buffer()) var wg sync.WaitGroup // sync closing of go routines var traversing sync.WaitGroup // running directory traversals // Start the process traversing.Add(1) in <- listArgs{remote: dir, dirpath: root, level: out.Level() - 1} for i := 0; i < fs.Config.Checkers; i++ { wg.Add(1) go func() { defer wg.Done() for job := range in { if out.IsFinished() { continue } newJobs := f.list(out, job.remote, job.dirpath, job.level) // Now we have traversed this directory, send // these ones off for traversal if len(newJobs) != 0 { traversing.Add(len(newJobs)) go func() { for _, newJob := range newJobs { in <- newJob } }() } traversing.Done() } }() } // Wait for traversal to finish traversing.Wait() close(in) wg.Wait() }
// ListDir reads the directory specified by the job into out, returning any more jobs func (f *Fs) ListDir(out fs.ListOpts, job dircache.ListDirJob) (jobs []dircache.ListDirJob, err error) { fs.Debug(f, "Reading %q", job.Path) _, err = f.listAll(job.DirID, "", false, false, func(item *drive.File) bool { remote := job.Path + item.Title switch { case *driveAuthOwnerOnly && !isAuthOwned(item): // ignore object or directory case item.MimeType == driveFolderType: if out.IncludeDirectory(remote) { dir := &fs.Dir{ Name: remote, Bytes: -1, Count: -1, } dir.When, _ = time.Parse(timeFormatIn, item.ModifiedDate) if out.AddDir(dir) { return true } if job.Depth > 0 { jobs = append(jobs, dircache.ListDirJob{DirID: item.Id, Path: remote + "/", Depth: job.Depth - 1}) } } case item.Md5Checksum != "": // If item has MD5 sum it is a file stored on drive if o := f.newFsObjectWithInfo(remote, item); o != nil { if out.Add(o) { return true } } case len(item.ExportLinks) != 0: // If item has export links then it is a google doc extension, link := f.findExportFormat(remote, item) if extension == "" { fs.Debug(remote, "No export formats found") } else { if o := f.newFsObjectWithInfo(remote+"."+extension, item); o != nil { obj := o.(*Object) obj.isDocument = true obj.url = link obj.bytes = -1 if out.Add(o) { return true } } } default: fs.Debug(remote, "Ignoring unknown object") } return false }) fs.Debug(f, "Finished reading %q", job.Path) return jobs, err }
// listOneLevel walks the path one level deep func (f *Fs) listOneLevel(out fs.ListOpts, dir string) { root := f.root if dir != "" { root += "/" + dir } dirEntry, err := f.db.Metadata(root, true, false, "", "", metadataLimit) if err != nil { out.SetError(errors.Wrap(err, "couldn't list single level")) return } for i := range dirEntry.Contents { entry := &dirEntry.Contents[i] remote, err := strip(entry.Path, root) if err != nil { out.SetError(err) return } if entry.IsDir { dir := &fs.Dir{ Name: remote, When: time.Time(entry.ClientMtime), Bytes: entry.Bytes, Count: -1, } if out.AddDir(dir) { return } } else { o, err := f.newObjectWithInfo(remote, entry) if err != nil { out.SetError(err) return } if out.Add(o) { return } } } }
// ListDir reads the directory specified by the job into out, returning any more jobs func (f *Fs) ListDir(out fs.ListOpts, job dircache.ListDirJob) (jobs []dircache.ListDirJob, err error) { fs.Debug(f, "Reading %q", job.Path) maxTries := fs.Config.LowLevelRetries for tries := 1; tries <= maxTries; tries++ { _, err = f.listAll(job.DirID, "", false, false, func(node *acd.Node) bool { remote := job.Path + *node.Name switch *node.Kind { case folderKind: if out.IncludeDirectory(remote) { dir := &fs.Dir{ Name: remote, Bytes: -1, Count: -1, } dir.When, _ = time.Parse(timeFormat, *node.ModifiedDate) // FIXME if out.AddDir(dir) { return true } if job.Depth > 0 { jobs = append(jobs, dircache.ListDirJob{DirID: *node.Id, Path: remote + "/", Depth: job.Depth - 1}) } } case fileKind: o, err := f.newObjectWithInfo(remote, node) if err != nil { out.SetError(err) return true } if out.Add(o) { return true } default: // ignore ASSET etc } return false }) if fs.IsRetryError(err) { fs.Debug(f, "Directory listing error for %q: %v - low level retry %d/%d", job.Path, err, tries, maxTries) continue } if err != nil { return nil, err } break } fs.Debug(f, "Finished reading %q", job.Path) return jobs, err }
// listFiles walks the path returning files and directories to out func (f *Fs) listFiles(out fs.ListOpts, dir string) { defer out.Finished() // List the objects last := "" err := f.list(dir, out.Level(), "", 0, *b2Versions, func(remote string, object *api.File, isDirectory bool) error { if isDirectory { dir := &fs.Dir{ Name: remote, Bytes: -1, Count: -1, } if out.AddDir(dir) { return fs.ErrorListAborted } } else { if remote == last { remote = object.UploadTimestamp.AddVersion(remote) } else { last = remote } // hide objects represent deleted files which we don't list if object.Action == "hide" { return nil } o, err := f.newObjectWithInfo(remote, object) if err != nil { return err } if out.Add(o) { return fs.ErrorListAborted } } return nil }) if err != nil { out.SetError(err) } }
// List walks the path returning a channel of Objects func (f *Fs) List(out fs.ListOpts, dir string) { defer out.Finished() level := out.Level() switch level { case 1: f.listOneLevel(out, dir) case fs.MaxLevel: f.list(out, dir) default: out.SetError(fs.ErrorLevelNotSupported) } }
// listBuckets returns all the buckets to out func (f *Fs) listBuckets(out fs.ListOpts, dir string) { defer out.Finished() if dir != "" { out.SetError(fs.ErrorListOnlyRoot) return } err := f.listBucketsToFn(func(bucket *api.Bucket) error { dir := &fs.Dir{ Name: bucket.Name, Bytes: -1, Count: -1, } if out.AddDir(dir) { return fs.ErrorListAborted } return nil }) if err != nil { out.SetError(err) } }
// listContainers lists the containers func (f *Fs) listContainers(out fs.ListOpts, dir string) { defer out.Finished() if dir != "" { out.SetError(fs.ErrorListOnlyRoot) return } containers, err := f.c.ContainersAll(nil) if err != nil { out.SetError(err) return } for _, container := range containers { dir := &fs.Dir{ Name: container.Name, Bytes: container.Bytes, Count: container.Count, } if out.AddDir(dir) { break } } }
// ListDir reads the directory specified by the job into out, returning any more jobs func (f *Fs) ListDir(out fs.ListOpts, job dircache.ListDirJob) (jobs []dircache.ListDirJob, err error) { fs.Debug(f, "Reading %q", job.Path) _, err = f.listAll(job.DirID, false, false, func(info *api.Item) bool { remote := job.Path + info.Name if info.Folder != nil { if out.IncludeDirectory(remote) { dir := &fs.Dir{ Name: remote, Bytes: -1, Count: -1, When: time.Time(info.LastModifiedDateTime), } if info.Folder != nil { dir.Count = info.Folder.ChildCount } if out.AddDir(dir) { return true } if job.Depth > 0 { jobs = append(jobs, dircache.ListDirJob{DirID: info.ID, Path: remote + "/", Depth: job.Depth - 1}) } } } else { o, err := f.newObjectWithInfo(remote, info) if err != nil { out.SetError(err) return true } if out.Add(o) { return true } } return false }) fs.Debug(f, "Finished reading %q", job.Path) return jobs, err }
// listBuckets lists the buckets to out func (f *Fs) listBuckets(out fs.ListOpts, dir string) { defer out.Finished() if dir != "" { out.SetError(fs.ErrorListOnlyRoot) return } req := s3.ListBucketsInput{} resp, err := f.c.ListBuckets(&req) if err != nil { out.SetError(err) return } for _, bucket := range resp.Buckets { dir := &fs.Dir{ Name: aws.StringValue(bucket.Name), When: aws.TimeValue(bucket.CreationDate), Bytes: -1, Count: -1, } if out.AddDir(dir) { break } } }
// List walks the path returning iles and directories into out func (dc *DirCache) List(f ListDirer, out fs.ListOpts, dir string) { defer out.Finished() err := dc.FindRoot(false) if err != nil { out.SetError(err) return } id, err := dc.FindDir(dir, false) if err != nil { out.SetError(err) return } if dir != "" { dir += "/" } listDir(f, out, id, dir) }
// listDir lists the directory using a recursive list from the root // // It does this in parallel, calling f.ListDir to do the actual reading func listDir(f ListDirer, out fs.ListOpts, dirID string, path string) { // Start some directory listing go routines var wg sync.WaitGroup // sync closing of go routines var traversing sync.WaitGroup // running directory traversals buffer := out.Buffer() in := make(chan ListDirJob, buffer) for i := 0; i < buffer; i++ { wg.Add(1) go func() { defer wg.Done() for job := range in { jobs, err := f.ListDir(out, job) if err != nil { out.SetError(err) fs.Debug(f, "Error reading %s: %s", path, err) } else { traversing.Add(len(jobs)) go func() { // Now we have traversed this directory, send these // jobs off for traversal in the background for _, job := range jobs { in <- job } }() } traversing.Done() } }() } // Start the process traversing.Add(1) in <- ListDirJob{DirID: dirID, Path: path, Depth: out.Level() - 1} traversing.Wait() close(in) wg.Wait() }
// Walk the root returning a channel of Objects func (f *Fs) list(out fs.ListOpts, dir string) { // Track path component case, it could be different for entries coming from DropBox API // See https://www.dropboxforum.com/hc/communities/public/questions/201665409-Wrong-character-case-of-folder-name-when-calling-listFolder-using-Sync-API?locale=en-us // and https://github.com/ncw/rclone/issues/53 nameTree := newNameTree() cursor := "" root := f.slashRoot if dir != "" { root += "/" + dir // We assume that dir is entered in the correct case // here which is likely since it probably came from a // directory listing nameTree.PutCaseCorrectPath(strings.Trim(root, "/")) } for { deltaPage, err := f.db.Delta(cursor, root) if err != nil { out.SetError(errors.Wrap(err, "couldn't list")) return } if deltaPage.Reset && cursor != "" { err = errors.New("unexpected reset during listing") out.SetError(err) break } fs.Debug(f, "%d delta entries received", len(deltaPage.Entries)) for i := range deltaPage.Entries { deltaEntry := &deltaPage.Entries[i] entry := deltaEntry.Entry if entry == nil { // This notifies of a deleted object } else { if len(entry.Path) <= 1 || entry.Path[0] != '/' { fs.Log(f, "dropbox API inconsistency: a path should always start with a slash and be at least 2 characters: %s", entry.Path) continue } lastSlashIndex := strings.LastIndex(entry.Path, "/") var parentPath string if lastSlashIndex == 0 { parentPath = "" } else { parentPath = entry.Path[1:lastSlashIndex] } lastComponent := entry.Path[lastSlashIndex+1:] if entry.IsDir { nameTree.PutCaseCorrectDirectoryName(parentPath, lastComponent) name, err := f.stripRoot(entry.Path + "/") if err != nil { out.SetError(err) return } name = strings.Trim(name, "/") if name != "" && name != dir { dir := &fs.Dir{ Name: name, When: time.Time(entry.ClientMtime), Bytes: entry.Bytes, Count: -1, } if out.AddDir(dir) { return } } } else { parentPathCorrectCase := nameTree.GetPathWithCorrectCase(parentPath) if parentPathCorrectCase != nil { path, err := f.stripRoot(*parentPathCorrectCase + "/" + lastComponent) if err != nil { out.SetError(err) return } o, err := f.newObjectWithInfo(path, entry) if err != nil { out.SetError(err) return } if out.Add(o) { return } } else { nameTree.PutFile(parentPath, lastComponent, entry) } } } } if !deltaPage.HasMore { break } cursor = deltaPage.Cursor.Cursor } walkFunc := func(caseCorrectFilePath string, entry *dropbox.Entry) error { path, err := f.stripRoot("/" + caseCorrectFilePath) if err != nil { return err } o, err := f.newObjectWithInfo(path, entry) if err != nil { return err } if out.Add(o) { return fs.ErrorListAborted } return nil } err := nameTree.WalkFiles(f.root, walkFunc) if err != nil { out.SetError(err) } }
// listFiles lists files and directories to out func (f *Fs) listFiles(out fs.ListOpts, dir string) { defer out.Finished() if f.bucket == "" { // Return no objects at top level list out.SetError(errors.New("can't list objects at root - choose a bucket using lsd")) return } // List the objects and directories err := f.list(dir, out.Level(), func(remote string, object *s3.Object, isDirectory bool) error { if isDirectory { size := int64(0) if object.Size != nil { size = *object.Size } dir := &fs.Dir{ Name: remote, Bytes: size, Count: 0, } if out.AddDir(dir) { return fs.ErrorListAborted } } else { o, err := f.newObjectWithInfo(remote, object) if err != nil { return err } if out.Add(o) { return fs.ErrorListAborted } } return nil }) if err != nil { if awsErr, ok := err.(awserr.RequestFailure); ok { if awsErr.StatusCode() == http.StatusNotFound { err = fs.ErrorDirNotFound } } out.SetError(err) } }
// list traverses the directory passed in, listing to out. // it returns a boolean whether it is finished or not. func (f *Fs) list(out fs.ListOpts, remote string, dirpath string, level int) (subdirs []listArgs) { fd, err := os.Open(dirpath) if err != nil { out.SetError(errors.Wrapf(err, "failed to open directory %q", dirpath)) return nil } defer func() { err := fd.Close() if err != nil { out.SetError(errors.Wrapf(err, "failed to close directory %q:", dirpath)) } }() for { fis, err := fd.Readdir(1024) if err == io.EOF && len(fis) == 0 { break } if err != nil { out.SetError(errors.Wrapf(err, "failed to read directory %q", dirpath)) return nil } for _, fi := range fis { name := fi.Name() newRemote := path.Join(remote, name) newPath := filepath.Join(dirpath, name) if fi.IsDir() { if out.IncludeDirectory(newRemote) { dir := &fs.Dir{ Name: normString(f.cleanUtf8(newRemote)), When: fi.ModTime(), Bytes: 0, Count: 0, } if out.AddDir(dir) { return nil } if level > 0 { subdirs = append(subdirs, listArgs{remote: newRemote, dirpath: newPath, level: level - 1}) } } } else { fso, err := f.newObjectWithInfo(newRemote, fi) if err != nil { out.SetError(err) return nil } if fso.Storable() && out.Add(fso) { return nil } } } } return subdirs }
// listFiles walks the path returning a channel of Objects func (f *Fs) listFiles(out fs.ListOpts, dir string) { defer out.Finished() if f.container == "" { out.SetError(errors.New("can't list objects at root - choose a container using lsd")) return } // List the objects err := f.list(dir, out.Level(), func(remote string, object *swift.Object, isDirectory bool) error { if isDirectory { dir := &fs.Dir{ Name: remote, Bytes: object.Bytes, Count: 0, } if out.AddDir(dir) { return fs.ErrorListAborted } } else { o, err := f.newObjectWithInfo(remote, object) if err != nil { return err } // Storable does a full metadata read on 0 size objects which might be dynamic large objects if o.Storable() { if out.Add(o) { return fs.ErrorListAborted } } } return nil }) if err != nil { if err == swift.ContainerNotFound { err = fs.ErrorDirNotFound } out.SetError(err) } }
// listFiles lists files and directories to out func (f *Fs) listFiles(out fs.ListOpts, dir string) { defer out.Finished() if f.bucket == "" { out.SetError(errors.New("can't list objects at root - choose a bucket using lsd")) return } // List the objects err := f.list(dir, out.Level(), func(remote string, object *storage.Object, isDirectory bool) error { if isDirectory { dir := &fs.Dir{ Name: remote, Bytes: int64(object.Size), Count: 0, } if out.AddDir(dir) { return fs.ErrorListAborted } } else { o, err := f.newObjectWithInfo(remote, object) if err != nil { return err } if out.Add(o) { return fs.ErrorListAborted } } return nil }) if err != nil { if gErr, ok := err.(*googleapi.Error); ok { if gErr.Code == http.StatusNotFound { err = fs.ErrorDirNotFound } } out.SetError(err) } }
// List walks the path returning a channel of Objects func (f *Fs) List(out fs.ListOpts, dir string) { defer out.Finished() listItem := func(remote string, object *yandex.ResourceInfoResponse, isDirectory bool) error { if isDirectory { t, err := time.Parse(time.RFC3339Nano, object.Modified) if err != nil { return err } dir := &fs.Dir{ Name: remote, When: t, Bytes: int64(object.Size), Count: -1, } if out.AddDir(dir) { return fs.ErrorListAborted } } else { o, err := f.newObjectWithInfo(remote, object) if err != nil { return err } if out.Add(o) { return fs.ErrorListAborted } } return nil } var err error switch out.Level() { case 1: if dir == "" { err = f.listDir(listItem) } else { err = f.list(dir, listItem) } case fs.MaxLevel: err = f.list(dir, listItem) default: out.SetError(fs.ErrorLevelNotSupported) } if err != nil { // FIXME // if err == swift.ContainerNotFound { // err = fs.ErrorDirNotFound // } out.SetError(err) } }
// list traverses the directory passed in, listing to out. // it returns a boolean whether it is finished or not. func (f *Fs) list(out fs.ListOpts, remote string, dirpath string, level int) (subdirs []listArgs) { fd, err := os.Open(dirpath) if err != nil { out.SetError(errors.Wrapf(err, "failed to open directory %q", dirpath)) return nil } defer func() { err := fd.Close() if err != nil { out.SetError(errors.Wrapf(err, "failed to close directory %q:", dirpath)) } }() for { fis, err := fd.Readdir(1024) if err == io.EOF && len(fis) == 0 { break } if err != nil { out.SetError(errors.Wrapf(err, "failed to read directory %q", dirpath)) return nil } for _, fi := range fis { name := fi.Name() newRemote := path.Join(remote, name) newPath := filepath.Join(dirpath, name) if fi.IsDir() { // Ignore directories which are symlinks. These are junction points under windows which // are kind of a souped up symlink. Unix doesn't have directories which are symlinks. if (fi.Mode()&os.ModeSymlink) == 0 && out.IncludeDirectory(newRemote) { dir := &fs.Dir{ Name: f.cleanRemote(newRemote), When: fi.ModTime(), Bytes: 0, Count: 0, } if out.AddDir(dir) { return nil } if level > 0 { subdirs = append(subdirs, listArgs{remote: newRemote, dirpath: newPath, level: level - 1}) } } } else { fso, err := f.newObjectWithInfo(newRemote, fi) if err != nil { out.SetError(err) return nil } if fso.Storable() && out.Add(fso) { return nil } } } } return subdirs }