// TestFsListDirEmpty tests listing the directories from an empty directory func TestFsListDirEmpty(t *testing.T) { skipIfNotOk(t) objs, dirs, err := fs.NewLister().SetLevel(1).Start(remote, "").GetAll() require.NoError(t, err) assert.Equal(t, []string{}, objsToNames(objs)) assert.Equal(t, []string{}, dirsToNames(dirs)) }
func TestDeduplicateRename(t *testing.T) { if *RemoteName != "TestDrive:" { t.Skip("Can only test deduplicate on google drive") } r := NewRun(t) defer r.Finalise() file1 := r.WriteUncheckedObject("one.txt", "This is one", t1) file2 := r.WriteUncheckedObject("one.txt", "This is one too", t2) file3 := r.WriteUncheckedObject("one.txt", "This is another one", t3) r.checkWithDuplicates(t, file1, file2, file3) err := fs.Deduplicate(r.fremote, fs.DeduplicateRename) require.NoError(t, err) list := fs.NewLister().Start(r.fremote, "") for { o, err := list.GetObject() require.NoError(t, err) // Check if we are finished if o == nil { break } remote := o.Remote() if remote != "one-1.txt" && remote != "one-2.txt" && remote != "one-3.txt" { t.Errorf("Bad file name after rename %q", remote) } size := o.Size() if size != file1.Size && size != file2.Size && size != file3.Size { t.Errorf("Size not one of the object sizes %d", size) } } }
// NewRun initialise the remote and local for testing and returns a // run object. Call this from the tests. // // r.flocal is an empty local Fs // r.fremote is an empty remote Fs // // Finalise() will tidy them away when done. func NewRun(t *testing.T) *Run { var r *Run if *Individual { r = newRun() } else { // If not individual, use the global one with the clean method overridden r = new(Run) *r = *oneRun r.cleanRemote = func() { list := fs.NewLister().Start(r.fremote, "") for { o, err := list.GetObject() if err != nil { t.Fatalf("Error listing: %v", err) } // Check if we are finished if o == nil { break } err = o.Remove() if err != nil { t.Errorf("Error removing file: %v", err) } } // Check remote is empty fstest.CheckItems(t, r.fremote) } } r.Logf = t.Logf r.Fatalf = t.Fatalf r.Logf("Remote %q, Local %q, Modify Window %q", r.fremote, r.flocal, fs.Config.ModifyWindow) return r }
// TestFsListDirRoot tests that DirList works in the root func TestFsListDirRoot(t *testing.T) { skipIfNotOk(t) rootRemote, err := fs.NewFs(RemoteName) require.NoError(t, err) dirs, err := fs.NewLister().SetLevel(1).Start(rootRemote, "").GetDirs() require.NoError(t, err) assert.Contains(t, dirsToNames(dirs), subRemoteLeaf, "Remote leaf not found") }
// TestFsListLevel2 tests List works for 2 levels func TestFsListLevel2(t *testing.T) { skipIfNotOk(t) objs, dirs, err := fs.NewLister().SetLevel(2).Start(remote, "").GetAll() if err == fs.ErrorLevelNotSupported { return } require.NoError(t, err) assert.Equal(t, []string{file1.Path}, objsToNames(objs)) assert.Equal(t, []string{`hello_ sausage`, `hello_ sausage/êé`}, dirsToNames(dirs)) }
// TestFsListDirRoot tests that DirList works in the root func TestFsListDirRoot(t *testing.T) { skipIfNotOk(t) rootRemote, err := fs.NewFs(RemoteName) if err != nil { t.Fatalf("Failed to make remote %q: %v", RemoteName, err) } dirs, err := fs.NewLister().SetLevel(1).Start(rootRemote, "").GetDirs() require.NoError(t, err) assert.Contains(t, dirsToNames(dirs), subRemoteLeaf, "Remote leaf not found") }
// read the directory func (d *Dir) readDir() error { d.mu.Lock() defer d.mu.Unlock() when := time.Now() if d.read.IsZero() { fs.Debug(d.path, "Reading directory") } else { age := when.Sub(d.read) if age < dirCacheTime { return nil } fs.Debug(d.path, "Re-reading directory (%v old)", age) } objs, dirs, err := fs.NewLister().SetLevel(1).Start(d.f, d.path).GetAll() if err == fs.ErrorDirNotFound { // We treat directory not found as empty because we // create directories on the fly } else if err != nil { return err } // NB when we re-read a directory after its cache has expired // we drop the old files which should lead to correct // behaviour but may not be very efficient. // Keep a note of the previous contents of the directory oldItems := d.items // Cache the items by name d.items = make(map[string]*DirEntry, len(objs)+len(dirs)) for _, obj := range objs { name := path.Base(obj.Remote()) d.items[name] = &DirEntry{ o: obj, node: nil, } } for _, dir := range dirs { name := path.Base(dir.Remote()) // Use old dir value if it exists if oldItem, ok := oldItems[name]; ok { if _, ok := oldItem.o.(*fs.Dir); ok { d.items[name] = oldItem continue } } d.items[name] = &DirEntry{ o: dir, node: nil, } } d.read = when return nil }
// CheckListingWithPrecision checks the fs to see if it has the // expected contents with the given precision. // // If expectedDirs is non nil then we check those too. Note that no // directories returned is also OK as some remotes don't return // directories. func CheckListingWithPrecision(t *testing.T, f fs.Fs, items []Item, expectedDirs []string, precision time.Duration) { is := NewItems(items) oldErrors := fs.Stats.GetErrors() var objs []fs.Object var dirs []*fs.Dir var err error var retries = *listRetries sleep := time.Second / 2 for i := 1; i <= retries; i++ { objs, dirs, err = fs.NewLister().Start(f, "").GetAll() if err != nil && err != fs.ErrorDirNotFound { t.Fatalf("Error listing: %v", err) } if len(objs) == len(items) && (expectedDirs == nil || len(dirs) == 0 || len(dirs) == len(expectedDirs)) { // Put an extra sleep in if we did any retries just to make sure it really // is consistent (here is looking at you Amazon Drive!) if i != 1 { extraSleep := 5*time.Second + sleep t.Logf("Sleeping for %v just to make sure", extraSleep) time.Sleep(extraSleep) } break } sleep *= 2 t.Logf("Sleeping for %v for list eventual consistency: %d/%d", sleep, i, retries) time.Sleep(sleep) if doDirCacheFlush := f.Features().DirCacheFlush; doDirCacheFlush != nil { t.Logf("Flushing the directory cache") doDirCacheFlush() } } for _, obj := range objs { require.NotNil(t, obj) is.Find(t, obj, precision) } is.Done(t) // Don't notice an error when listing an empty directory if len(items) == 0 && oldErrors == 0 && fs.Stats.GetErrors() == 1 { fs.Stats.ResetErrors() } // Check the directories - ignore if no directories returned // for remotes which can't do directories if expectedDirs != nil && len(dirs) != 0 { actualDirs := []string{} for _, dir := range dirs { actualDirs = append(actualDirs, dir.Name) } sort.Strings(actualDirs) sort.Strings(expectedDirs) assert.Equal(t, expectedDirs, actualDirs, "directories") } }
// TestFsListSubdir tests List works for a subdirectory func TestFsListSubdir(t *testing.T) { skipIfNotOk(t) fileName := file2.Path if runtime.GOOS == "windows" { fileName = file2.WinPath } dir, _ := path.Split(fileName) dir = dir[:len(dir)-1] objs, dirs, err := fs.NewLister().Start(remote, dir).GetAll() require.NoError(t, err) require.Len(t, objs, 1) assert.Equal(t, fileName, objs[0].Remote()) require.Len(t, dirs, 0) }
// reads the remote tree into dir func (r *Run) readRemote(t *testing.T, dir dirMap, filepath string) { objs, dirs, err := fs.NewLister().SetLevel(1).Start(r.fremote, filepath).GetAll() if err == fs.ErrorDirNotFound { return } require.NoError(t, err) for _, obj := range objs { dir[fmt.Sprintf("%s %d", obj.Remote(), obj.Size())] = struct{}{} } for _, d := range dirs { name := d.Remote() dir[name+"/"] = struct{}{} r.readRemote(t, dir, name) } }
// TestFsListDirFile2 tests the files are correctly uploaded func TestFsListDirFile2(t *testing.T) { skipIfNotOk(t) var objNames, dirNames []string for i := 1; i <= eventualConsistencyRetries; i++ { objs, dirs, err := fs.NewLister().SetLevel(1).Start(remote, "").GetAll() require.NoError(t, err) objNames = objsToNames(objs) dirNames = dirsToNames(dirs) if len(objNames) >= 1 && len(dirNames) >= 1 { break } t.Logf("Sleeping for 1 second for TestFsListDirFile2 eventual consistency: %d/%d", i, eventualConsistencyRetries) time.Sleep(1 * time.Second) } assert.Equal(t, []string{`hello_ sausage`}, dirNames) assert.Equal(t, []string{file1.Path}, objNames) }
// TestFsListSubdir tests List works for a subdirectory func TestFsListSubdir(t *testing.T) { skipIfNotOk(t) fileName := file2.Path var err error var objs []fs.Object var dirs []*fs.Dir for i := 0; i < 2; i++ { dir, _ := path.Split(fileName) dir = dir[:len(dir)-1] objs, dirs, err = fs.NewLister().Start(remote, dir).GetAll() if err != fs.ErrorDirNotFound { break } fileName = file2.WinPath } require.NoError(t, err) require.Len(t, objs, 1) assert.Equal(t, fileName, objs[0].Remote()) require.Len(t, dirs, 0) }
// cleanFs runs a single clean fs for left over directories func (t *test) cleanFs() error { f, err := fs.NewFs(t.remote) if err != nil { return err } dirs, err := fs.NewLister().SetLevel(1).Start(f, "").GetDirs() for _, dir := range dirs { if fstest.MatchTestRemote.MatchString(dir.Name) { log.Printf("Purging %s%s", t.remote, dir.Name) dir, err := fs.NewFs(t.remote + dir.Name) if err != nil { return err } err = fs.Purge(dir) if err != nil { return err } } } return nil }
// CheckListingWithPrecision checks the fs to see if it has the // expected contents with the given precision. func CheckListingWithPrecision(t *testing.T, f fs.Fs, items []Item, precision time.Duration) { is := NewItems(items) oldErrors := fs.Stats.GetErrors() var objs []fs.Object var err error const retries = 6 sleep := time.Second / 2 for i := 1; i <= retries; i++ { objs, err = fs.NewLister().Start(f, "").GetObjects() if err != nil && err != fs.ErrorDirNotFound { t.Fatalf("Error listing: %v", err) } if len(objs) == len(items) { // Put an extra sleep in if we did any retries just to make sure it really // is consistent (here is looking at you Amazon Cloud Drive!) if i != 1 { extraSleep := 5*time.Second + sleep t.Logf("Sleeping for %v just to make sure", extraSleep) time.Sleep(extraSleep) } break } sleep *= 2 t.Logf("Sleeping for %v for list eventual consistency: %d/%d", sleep, i, retries) time.Sleep(sleep) } for _, obj := range objs { require.NotNil(t, obj) is.Find(t, obj, precision) } is.Done(t) // Don't notice an error when listing an empty directory if len(items) == 0 && oldErrors == 0 && fs.Stats.GetErrors() == 1 { fs.Stats.ResetErrors() } }