// SetModTime sets the modification time of the local fs object func (o *Object) SetModTime(modTime time.Time) { err := o.readMetaData() if err != nil { fs.Stats.Error() fs.ErrorLog(o, "Failed to read metadata: %s", err) return } meta := o.headers.ObjectMetadata() meta.SetModTime(modTime) newHeaders := meta.ObjectHeaders() for k, v := range newHeaders { (*o.headers)[k] = v } // Include any other metadata from request for k, v := range *o.headers { if strings.HasPrefix(k, "X-Object-") { newHeaders[k] = v } } err = o.fs.c.ObjectUpdate(o.fs.container, o.fs.root+o.remote, newHeaders) if err != nil { fs.Stats.Error() fs.ErrorLog(o, "Failed to update remote mtime: %s", err) } }
// Walk the path returning a channel of FsObjects func (f *FsDrive) ListDir() fs.DirChan { out := make(fs.DirChan, fs.Config.Checkers) go func() { defer close(out) err := f.dirCache.FindRoot(false) if err != nil { fs.Stats.Error() fs.ErrorLog(f, "Couldn't find root: %s", err) } else { _, err := f.listAll(f.dirCache.RootID(), "", true, false, func(item *drive.File) bool { dir := &fs.Dir{ Name: item.Title, Bytes: -1, Count: -1, } dir.When, _ = time.Parse(timeFormatIn, item.ModifiedDate) out <- dir return false }) if err != nil { fs.Stats.Error() fs.ErrorLog(f, "ListDir failed: %s", err) } } }() return out }
// SetModTime sets the modification time of the local fs object func (o *Object) SetModTime(modTime time.Time) { err := o.readMetaData() if err != nil { fs.Stats.Error() fs.ErrorLog(o, "Failed to read metadata: %s", err) return } o.meta[metaMtime] = aws.String(swift.TimeToFloatString(modTime)) // Guess the content type contentType := fs.MimeType(o) // Copy the object to itself to update the metadata key := o.fs.root + o.remote sourceKey := o.fs.bucket + "/" + key directive := s3.MetadataDirectiveReplace // replace metadata with that passed in req := s3.CopyObjectInput{ Bucket: &o.fs.bucket, ACL: &o.fs.perm, Key: &key, ContentType: &contentType, CopySource: aws.String(url.QueryEscape(sourceKey)), Metadata: o.meta, MetadataDirective: &directive, } _, err = o.fs.c.CopyObject(&req) if err != nil { fs.Stats.Error() fs.ErrorLog(o, "Failed to update remote mtime: %s", err) } }
// List the path returning a channel of FsObjects // // Ignores everything which isn't Storable, eg links etc func (f *FsLocal) List() fs.ObjectsChan { out := make(fs.ObjectsChan, fs.Config.Checkers) go func() { err := filepath.Walk(f.root, func(path string, fi os.FileInfo, err error) error { if err != nil { fs.Stats.Error() fs.ErrorLog(f, "Failed to open directory: %s: %s", path, err) } else { remote, err := filepath.Rel(f.root, path) if err != nil { fs.Stats.Error() fs.ErrorLog(f, "Failed to get relative path %s: %s", path, err) return nil } if remote == "." { return nil // remote = "" } if fs := f.newFsObjectWithInfo(remote, fi); fs != nil { if fs.Storable() { out <- fs } } } return nil }) if err != nil { fs.Stats.Error() fs.ErrorLog(f, "Failed to open directory: %s: %s", f.root, err) } close(out) }() return out }
// Sets the modification time of the drive fs object func (o *FsObjectDrive) SetModTime(modTime time.Time) { err := o.readMetaData() if err != nil { fs.Stats.Error() fs.ErrorLog(o, "Failed to read metadata: %s", err) return } // New metadata updateInfo := &drive.File{ ModifiedDate: modTime.Format(timeFormatOut), } // Set modified date var info *drive.File err = o.drive.pacer.Call(func() (bool, error) { info, err = o.drive.svc.Files.Update(o.id, updateInfo).SetModifiedDate(true).Do() return shouldRetry(err) }) if err != nil { fs.Stats.Error() fs.ErrorLog(o, "Failed to update remote mtime: %s", err) return } // Update info from read data o.setMetaData(info) }
// List walks the path returning a channel of FsObjects func (f *Fs) List() fs.ObjectsChan { out := make(fs.ObjectsChan, fs.Config.Checkers) if f.bucket == "" { // Return no objects at top level list close(out) fs.Stats.Error() fs.ErrorLog(f, "Can't list objects at root - choose a bucket using lsd") } else { // List the objects go func() { defer close(out) err := f.list("", 0, false, func(remote string, object *api.File) error { if o := f.newFsObjectWithInfo(remote, object); o != nil { out <- o } return nil }) if err != nil { fs.Stats.Error() fs.ErrorLog(f, "Couldn't list bucket %q: %s", f.bucket, err) } }() } return out }
// Md5sum calculates the Md5sum of a file returning a lowercase hex string func (o *FsObjectLocal) Md5sum() (string, error) { if o.md5sum != "" { return o.md5sum, nil } in, err := os.Open(o.path) if err != nil { fs.Stats.Error() fs.ErrorLog(o, "Failed to open: %s", err) return "", err } hash := md5.New() _, err = io.Copy(hash, in) closeErr := in.Close() if err != nil { fs.Stats.Error() fs.ErrorLog(o, "Failed to read: %s", err) return "", err } if closeErr != nil { fs.Stats.Error() fs.ErrorLog(o, "Failed to close: %s", closeErr) return "", closeErr } o.md5sum = hex.EncodeToString(hash.Sum(nil)) return o.md5sum, nil }
// ListDir lists the directories func (f *Fs) ListDir() fs.DirChan { out := make(fs.DirChan, fs.Config.Checkers) go func() { defer close(out) err := f.dirCache.FindRoot(false) if err != nil { fs.Stats.Error() fs.ErrorLog(f, "Couldn't find root: %s", err) } else { _, err := f.listAll(f.dirCache.RootID(), true, false, func(item *api.Item) bool { dir := &fs.Dir{ Name: item.Name, Bytes: -1, Count: -1, When: time.Time(item.LastModifiedDateTime), } if item.Folder != nil { dir.Count = item.Folder.ChildCount } out <- dir return false }) if err != nil { fs.Stats.Error() fs.ErrorLog(f, "ListDir failed: %s", err) } } }() return out }
// SetModTime sets the modification time of the local fs object func (o *FsObjectS3) SetModTime(modTime time.Time) { err := o.readMetaData() if err != nil { fs.Stats.Error() fs.ErrorLog(o, "Failed to read metadata: %s", err) return } o.meta[metaMtime] = aws.String(swift.TimeToFloatString(modTime)) // Copy the object to itself to update the metadata key := o.s3.root + o.remote sourceKey := o.s3.bucket + "/" + key directive := s3.MetadataDirectiveReplace // replace metadata with that passed in req := s3.CopyObjectInput{ Bucket: &o.s3.bucket, ACL: &o.s3.perm, Key: &key, CopySource: &sourceKey, Metadata: o.meta, MetadataDirective: &directive, } _, err = o.s3.c.CopyObject(&req) if err != nil { fs.Stats.Error() fs.ErrorLog(o, "Failed to update remote mtime: %s", err) } }
func (tree *nameTreeNode) PutCaseCorrectDirectoryName(parentPath string, caseCorrectDirectoryName string) { if len(caseCorrectDirectoryName) == 0 { fs.Stats.Error() fs.ErrorLog(tree, "PutCaseCorrectDirectoryName: empty caseCorrectDirectoryName is not allowed (parentPath: %q)", parentPath) return } node := tree.getTreeNode(parentPath) if node == nil { return } lowerCaseDirectoryName := strings.ToLower(caseCorrectDirectoryName) directory := node.Directories[lowerCaseDirectoryName] if directory == nil { directory = newNameTreeNode(caseCorrectDirectoryName) node.Directories[lowerCaseDirectoryName] = directory } else { if len(directory.CaseCorrectName) > 0 { fs.Stats.Error() fs.ErrorLog(tree, "PutCaseCorrectDirectoryName: directory %q is already exists under parent path %q", caseCorrectDirectoryName, parentPath) return } directory.CaseCorrectName = caseCorrectDirectoryName } }
// Run the function with stats and retries if required func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) { var err error var stopStats chan struct{} if !showStats && ShowStats() { showStats = true } if showStats { stopStats = StartStats() } for try := 1; try <= *retries; try++ { err = f() if !Retry || (err == nil && !fs.Stats.Errored()) { if try > 1 { fs.ErrorLog(nil, "Attempt %d/%d succeeded", try, *retries) } break } if fs.IsFatalError(err) { fs.ErrorLog(nil, "Fatal error received - not attempting retries") break } if fs.IsNoRetryError(err) { fs.ErrorLog(nil, "Can't retry this error - not attempting retries") break } if err != nil { fs.ErrorLog(nil, "Attempt %d/%d failed with %d errors and: %v", try, *retries, fs.Stats.GetErrors(), err) } else { fs.ErrorLog(nil, "Attempt %d/%d failed with %d errors", try, *retries, fs.Stats.GetErrors()) } if try < *retries { fs.Stats.ResetErrors() } } if showStats { close(stopStats) } if err != nil { log.Fatalf("Failed to %s: %v", cmd.Name(), err) } if showStats && (!fs.Config.Quiet || fs.Stats.Errored() || *statsInterval > 0) { fs.Log(nil, "%s", fs.Stats) } if fs.Config.Verbose { fs.Debug(nil, "Go routines at exit %d\n", runtime.NumGoroutine()) } if fs.Stats.Errored() { os.Exit(1) } }
// Sets the modification time of the local fs object func (o *FsObjectSwift) SetModTime(modTime time.Time) { err := o.readMetaData() if err != nil { fs.Stats.Error() fs.ErrorLog(o, "Failed to read metadata: %s", err) return } o.meta.SetModTime(modTime) err = o.swift.c.ObjectUpdate(o.swift.container, o.swift.root+o.remote, o.meta.ObjectHeaders()) if err != nil { fs.Stats.Error() fs.ErrorLog(o, "Failed to update remote mtime: %s", err) } }
// Lists the buckets func (f *FsStorage) ListDir() fs.DirChan { out := make(fs.DirChan, fs.Config.Checkers) if f.bucket == "" { // List the buckets go func() { defer close(out) if f.projectNumber == "" { fs.Stats.Error() fs.ErrorLog(f, "Can't list buckets without project number") return } listBuckets := f.svc.Buckets.List(f.projectNumber).MaxResults(listChunks) for { buckets, err := listBuckets.Do() if err != nil { fs.Stats.Error() fs.ErrorLog(f, "Couldn't list buckets: %v", err) break } else { for _, bucket := range buckets.Items { out <- &fs.Dir{ Name: bucket.Name, Bytes: 0, Count: 0, } } } if buckets.NextPageToken == "" { break } listBuckets.PageToken(buckets.NextPageToken) } }() } else { // List the directories in the path in the bucket go func() { defer close(out) f.list(true, func(remote string, object *storage.Object) { out <- &fs.Dir{ Name: remote, Bytes: int64(object.Size), Count: 0, } }) }() } return out }
// ListDir lists the buckets func (f *Fs) ListDir() fs.DirChan { out := make(fs.DirChan, fs.Config.Checkers) if f.bucket == "" { // List the buckets go func() { defer close(out) err := f.listBuckets(func(bucket *api.Bucket) { out <- &fs.Dir{ Name: bucket.Name, Bytes: -1, Count: -1, } }) if err != nil { fs.Stats.Error() fs.ErrorLog(f, "Error listing buckets: %v", err) } }() } else { // List the directories in the path in the bucket go func() { defer close(out) lastDir := "" err := f.list("", 0, false, func(remote string, object *api.File) error { slash := strings.IndexRune(remote, '/') if slash < 0 { return nil } dir := remote[:slash] if dir == lastDir { return nil } out <- &fs.Dir{ Name: dir, Bytes: -1, Count: -1, } lastDir = dir return nil }) if err != nil { fs.Stats.Error() fs.ErrorLog(f, "Couldn't list bucket %q: %s", f.bucket, err) } }() } return out }
// Flush is called each time the file or directory is closed. // Because there can be multiple file descriptors referring to a // single opened file, Flush can be called multiple times. func (fh *ReadFileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error { fh.mu.Lock() defer fh.mu.Unlock() fs.Debug(fh.o, "ReadFileHandle.Flush") // Ignore the Flush as there is nothing we can sensibly do and // it seems quite common for Flush to be called from // different threads each of which have read some data. if false { // If Read hasn't been called then ignore the Flush - Release // will pick it up if !fh.readCalled { fs.Debug(fh.o, "ReadFileHandle.Flush ignoring flush on unread handle") return nil } err := fh.close() if err != nil { fs.ErrorLog(fh.o, "ReadFileHandle.Flush error: %v", err) return err } } fs.Debug(fh.o, "ReadFileHandle.Flush OK") return nil }
// listFiles walks the path returning a channel of FsObjects // // if ignoreStorable is set then it outputs the file even if Storable() is false func (f *Fs) listFiles(ignoreStorable bool) fs.ObjectsChan { out := make(fs.ObjectsChan, fs.Config.Checkers) if f.container == "" { // Return no objects at top level list close(out) fs.Stats.Error() fs.ErrorLog(f, "Can't list objects at root - choose a container using lsd") } else { // List the objects go func() { defer close(out) f.list(false, func(remote string, object *swift.Object) error { if o := f.newFsObjectWithInfo(remote, object); o != nil { // Storable does a full metadata read on 0 size objects which might be manifest files storable := o.Storable() if storable || ignoreStorable { out <- o } } return nil }) }() } return out }
// ReadDirAll reads the contents of the directory func (d *Dir) ReadDirAll(ctx context.Context) (dirents []fuse.Dirent, err error) { fs.Debug(d.path, "Dir.ReadDirAll") err = d.readDir() if err != nil { fs.Debug(d.path, "Dir.ReadDirAll error: %v", err) return nil, err } d.mu.RLock() defer d.mu.RUnlock() for _, item := range d.items { var dirent fuse.Dirent switch x := item.o.(type) { case fs.Object: dirent = fuse.Dirent{ // Inode FIXME ??? Type: fuse.DT_File, Name: path.Base(x.Remote()), } case *fs.Dir: dirent = fuse.Dirent{ // Inode FIXME ??? Type: fuse.DT_Dir, Name: path.Base(x.Remote()), } default: err = errors.Errorf("unknown type %T", item) fs.ErrorLog(d.path, "Dir.ReadDirAll error: %v", err) return nil, err } dirents = append(dirents, dirent) } fs.Debug(d.path, "Dir.ReadDirAll OK with %d entries", len(dirents)) return dirents, nil }
// ListDir walks the path returning a channel of FsObjects func (f *Fs) ListDir() fs.DirChan { out := make(fs.DirChan, fs.Config.Checkers) go func() { defer close(out) entry, err := f.db.Metadata(f.root, true, false, "", "", metadataLimit) if err != nil { fs.Stats.Error() fs.ErrorLog(f, "Couldn't list directories in root: %s", err) } else { for i := range entry.Contents { entry := &entry.Contents[i] if entry.IsDir { name := f.stripRoot(entry.Path) if name == nil { // an error occurred and logged by stripRoot continue } out <- &fs.Dir{ Name: *name, When: time.Time(entry.ClientMtime), Bytes: entry.Bytes, Count: -1, } } } } }() return out }
// List walks the path returning a channel of FsObjects func (f *FsSwift) List() fs.ObjectsChan { out := make(fs.ObjectsChan, fs.Config.Checkers) if f.container == "" { // Return no objects at top level list close(out) fs.Stats.Error() fs.ErrorLog(f, "Can't list objects at root - choose a container using lsd") } else { // List the objects go func() { defer close(out) f.list(false, func(remote string, object *swift.Object) error { if o := f.newFsObjectWithInfo(remote, object); o != nil { // Do full metadata read on 0 size objects which might be manifest files if o.Size() == 0 { err := o.(*FsObjectSwift).readMetaData() if err != nil { fs.Debug(o, "Failed to read metadata: %v", err) } } out <- o } return nil }) }() } return out }
// list the objects into the function supplied // // If directories is set it only sends directories func (f *Fs) list(directories bool, fn listFn) { err := f.listContainerRoot(f.container, f.root, directories, fn) if err != nil { fs.Stats.Error() fs.ErrorLog(f, "Couldn't read container %q: %s", f.container, err) } }
// Lists the containers func (f *FsSwift) ListDir() fs.DirChan { out := make(fs.DirChan, fs.Config.Checkers) if f.container == "" { // List the containers go func() { defer close(out) containers, err := f.c.ContainersAll(nil) if err != nil { fs.Stats.Error() fs.ErrorLog(f, "Couldn't list containers: %v", err) } else { for _, container := range containers { out <- &fs.Dir{ Name: container.Name, Bytes: container.Bytes, Count: container.Count, } } } }() } else { // List the directories in the path in the container go func() { defer close(out) f.list(true, func(remote string, object *swift.Object) { out <- &fs.Dir{ Name: remote, Bytes: object.Bytes, Count: 0, } }) }() } return out }
func (tree *nameTreeNode) GetPathWithCorrectCase(path string) *string { if path == "" { empty := "" return &empty } var result bytes.Buffer current := tree for _, component := range strings.Split(path, "/") { if component == "" { fs.Stats.Error() fs.ErrorLog(tree, "GetPathWithCorrectCase: path component is empty (full path %q)", path) return nil } lowercase := strings.ToLower(component) current = current.Directories[lowercase] if current == nil || current.CaseCorrectName == "" { return nil } _, _ = result.WriteString("/") _, _ = result.WriteString(current.CaseCorrectName) } resultString := result.String() return &resultString }
func (tree *nameTreeNode) getTreeNode(path string) *nameTreeNode { if len(path) == 0 { // no lookup required, just return root return tree } current := tree for _, component := range strings.Split(path, "/") { if len(component) == 0 { fs.Stats.Error() fs.ErrorLog(tree, "getTreeNode: path component is empty (full path %q)", path) return nil } lowercase := strings.ToLower(component) lookup := current.Directories[lowercase] if lookup == nil { lookup = newNameTreeNode("") current.Directories[lowercase] = lookup } current = lookup } return current }
func (tree *nameTreeNode) walkFilesRec(currentPath string, walkFunc nameTreeFileWalkFunc) error { var prefix string if currentPath == "" { prefix = "" } else { prefix = currentPath + "/" } for name, entry := range tree.Files { err := walkFunc(prefix+name, entry) if err != nil { return err } } for lowerCaseName, directory := range tree.Directories { caseCorrectName := directory.CaseCorrectName if caseCorrectName == "" { fs.Stats.Error() fs.ErrorLog(tree, "WalkFiles: exact name of the directory %q is unknown (parent path: %q)", lowerCaseName, currentPath) continue } err := directory.walkFilesRec(prefix+caseCorrectName, walkFunc) if err != nil { return err } } return nil }
// initConfig is run by cobra after initialising the flags func initConfig() { // Log file output if *logFile != "" { f, err := os.OpenFile(*logFile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0640) if err != nil { log.Fatalf("Failed to open log file: %v", err) } _, err = f.Seek(0, os.SEEK_END) if err != nil { fs.ErrorLog(nil, "Failed to seek log file to end: %v", err) } log.SetOutput(f) fs.DebugLogger.SetOutput(f) redirectStderr(f) } // Load the rest of the config now we have started the logger fs.LoadConfig() // Write the args for debug purposes fs.Debug("rclone", "Version %q starting with parameters %q", fs.Version, os.Args) // Setup CPU profiling if desired if *cpuProfile != "" { fs.Log(nil, "Creating CPU profile %q\n", *cpuProfile) f, err := os.Create(*cpuProfile) if err != nil { fs.Stats.Error() log.Fatal(err) } err = pprof.StartCPUProfile(f) if err != nil { fs.Stats.Error() log.Fatal(err) } defer pprof.StopCPUProfile() } // Setup memory profiling if desired if *memProfile != "" { defer func() { fs.Log(nil, "Saving Memory profile %q\n", *memProfile) f, err := os.Create(*memProfile) if err != nil { fs.Stats.Error() log.Fatal(err) } err = pprof.WriteHeapProfile(f) if err != nil { fs.Stats.Error() log.Fatal(err) } err = f.Close() if err != nil { fs.Stats.Error() log.Fatal(err) } }() } }
// Path should be directory path either "" or "path/" // // List the directory using a recursive list from the root // // This fetches the minimum amount of stuff but does more API calls // which makes it slow func (f *Fs) listDirRecursive(dirID string, path string, out fs.ObjectsChan) error { var subError error // Make the API request var wg sync.WaitGroup _, err := f.listAll(dirID, false, false, func(info *api.Item) bool { // Recurse on directories if info.Folder != nil { wg.Add(1) folder := path + info.Name + "/" fs.Debug(f, "Reading %s", folder) go func() { defer wg.Done() err := f.listDirRecursive(info.ID, folder, out) if err != nil { subError = err fs.ErrorLog(f, "Error reading %s:%s", folder, err) } }() } else { if fs := f.newObjectWithInfo(path+info.Name, info); fs != nil { out <- fs } } return false }) wg.Wait() fs.Debug(f, "Finished reading %s", path) if err != nil { return err } if subError != nil { return subError } return nil }
// SetModTime sets the modification time of the local fs object func (o *Object) SetModTime(modTime time.Time) { info, err := o.setModTime(modTime) if err != nil { fs.Stats.Error() fs.ErrorLog(o, "Failed to update remote mtime: %v", err) } o.setMetaData(info) }
// List walks the path returning a channel of Objects func (f *Fs) List() fs.ObjectsChan { out := make(fs.ObjectsChan, fs.Config.Checkers) go func() { defer close(out) err := f.dirCache.FindRoot(false) if err != nil { fs.Stats.Error() fs.ErrorLog(f, "Couldn't find root: %s", err) } else { err = f.listDirRecursive(f.dirCache.RootID(), "", out) if err != nil { fs.Stats.Error() fs.ErrorLog(f, "List failed: %s", err) } } }() return out }
// SetModTime sets the modification time of the local fs object func (o *FsObjectSwift) SetModTime(modTime time.Time) { err := o.readMetaData() if err != nil { fs.Stats.Error() fs.ErrorLog(o, "Failed to read metadata: %s", err) return } meta := o.headers.ObjectMetadata() meta.SetModTime(modTime) newHeaders := meta.ObjectHeaders() for k, v := range newHeaders { (*o.headers)[k] = v } err = o.swift.c.ObjectUpdate(o.swift.container, o.swift.root+o.remote, newHeaders) if err != nil { fs.Stats.Error() fs.ErrorLog(o, "Failed to update remote mtime: %s", err) } }
// Write data to the file handle func (fh *WriteFileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error { fs.Debug(fh.remote, "WriteFileHandle.Write len=%d", len(req.Data)) fh.mu.Lock() defer fh.mu.Unlock() if fh.closed { fs.ErrorLog(fh.remote, "WriteFileHandle.Write error: %v", errClosedFileHandle) return errClosedFileHandle } fh.writeCalled = true // FIXME should probably check the file isn't being seeked? n, err := fh.pipeWriter.Write(req.Data) resp.Size = n fh.file.written(int64(n)) if err != nil { fs.ErrorLog(fh.remote, "WriteFileHandle.Write error: %v", err) return err } fs.Debug(fh.remote, "WriteFileHandle.Write OK (%d bytes written)", n) return nil }