// setMetaData sets the fs data from a storage.Object func (o *FsObjectStorage) setMetaData(info *storage.Object) { o.url = info.MediaLink o.bytes = int64(info.Size) // Read md5sum md5sumData, err := base64.StdEncoding.DecodeString(info.Md5Hash) if err != nil { fs.Log(o, "Bad MD5 decode: %v", err) } else { o.md5sum = hex.EncodeToString(md5sumData) } // read mtime out of metadata if available mtimeString, ok := info.Metadata[metaMtime] if ok { modTime, err := time.Parse(timeFormatIn, mtimeString) if err == nil { o.modTime = modTime return } else { fs.Debug(o, "Failed to read mtime from metadata: %s", err) } } // Fallback to the Updated time modTime, err := time.Parse(timeFormatIn, info.Updated) if err != nil { fs.Log(o, "Bad time decode: %v", err) } else { o.modTime = modTime } }
// initConfig is run by cobra after initialising the flags func initConfig() { // Log file output if *logFile != "" { f, err := os.OpenFile(*logFile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0640) if err != nil { log.Fatalf("Failed to open log file: %v", err) } _, err = f.Seek(0, os.SEEK_END) if err != nil { fs.ErrorLog(nil, "Failed to seek log file to end: %v", err) } log.SetOutput(f) fs.DebugLogger.SetOutput(f) redirectStderr(f) } // Load the rest of the config now we have started the logger fs.LoadConfig() // Write the args for debug purposes fs.Debug("rclone", "Version %q starting with parameters %q", fs.Version, os.Args) // Setup CPU profiling if desired if *cpuProfile != "" { fs.Log(nil, "Creating CPU profile %q\n", *cpuProfile) f, err := os.Create(*cpuProfile) if err != nil { fs.Stats.Error() log.Fatal(err) } err = pprof.StartCPUProfile(f) if err != nil { fs.Stats.Error() log.Fatal(err) } defer pprof.StopCPUProfile() } // Setup memory profiling if desired if *memProfile != "" { defer func() { fs.Log(nil, "Saving Memory profile %q\n", *memProfile) f, err := os.Create(*memProfile) if err != nil { fs.Stats.Error() log.Fatal(err) } err = pprof.WriteHeapProfile(f) if err != nil { fs.Stats.Error() log.Fatal(err) } err = f.Close() if err != nil { fs.Stats.Error() log.Fatal(err) } }() } }
// List the path returning a channel of FsObjects // // Ignores everything which isn't Storable, eg links etc func (f *FsLocal) List() fs.ObjectsChan { out := make(fs.ObjectsChan, fs.Config.Checkers) go func() { err := filepath.Walk(f.root, func(path string, fi os.FileInfo, err error) error { if err != nil { fs.Stats.Error() fs.Log(f, "Failed to open directory: %s: %s", path, err) } else { remote, err := filepath.Rel(f.root, path) if err != nil { fs.Stats.Error() fs.Log(f, "Failed to get relative path %s: %s", path, err) return nil } if remote == "." { return nil // remote = "" } if fs := f.newFsObjectWithInfo(remote, fi); fs != nil { if fs.Storable() { out <- fs } } } return nil }) if err != nil { fs.Stats.Error() fs.Log(f, "Failed to open directory: %s: %s", f.root, err) } close(out) }() return out }
// Walk the path returning a channel of FsObjects func (f *FsDrive) ListDir() fs.DirChan { out := make(fs.DirChan, fs.Config.Checkers) go func() { defer close(out) err := f.findRoot(false) if err != nil { fs.Stats.Error() fs.Log(f, "Couldn't find root: %s", err) } else { _, err := f.listAll(f.rootId, "", true, false, func(item *drive.File) bool { dir := &fs.Dir{ Name: item.Title, Bytes: -1, Count: -1, } dir.When, _ = time.Parse(timeFormatIn, item.ModifiedDate) out <- dir return false }) if err != nil { fs.Stats.Error() fs.Log(f, "ListDir failed: %s", err) } } }() return out }
// Md5sum calculates the Md5sum of a file returning a lowercase hex string func (o *FsObjectLocal) Md5sum() (string, error) { if o.md5sum != "" { return o.md5sum, nil } in, err := os.Open(o.path) if err != nil { fs.Stats.Error() fs.Log(o, "Failed to open: %s", err) return "", err } hash := md5.New() _, err = io.Copy(hash, in) closeErr := in.Close() if err != nil { fs.Stats.Error() fs.Log(o, "Failed to read: %s", err) return "", err } if closeErr != nil { fs.Stats.Error() fs.Log(o, "Failed to close: %s", closeErr) return "", closeErr } o.md5sum = hex.EncodeToString(hash.Sum(nil)) return o.md5sum, nil }
func (tree *NameTreeNode) PutCaseCorrectDirectoryName(parentPath string, caseCorrectDirectoryName string) { if len(caseCorrectDirectoryName) == 0 { fs.Stats.Error() fs.Log(tree, "PutCaseCorrectDirectoryName: empty caseCorrectDirectoryName is not allowed (parentPath: %q)", parentPath) return } node := tree.getTreeNode(parentPath) if node == nil { return } lowerCaseDirectoryName := strings.ToLower(caseCorrectDirectoryName) directory := node.Directories[lowerCaseDirectoryName] if directory == nil { directory = newNameTreeNode(caseCorrectDirectoryName) node.Directories[lowerCaseDirectoryName] = directory } else { if len(directory.CaseCorrectName) > 0 { fs.Stats.Error() fs.Log(tree, "PutCaseCorrectDirectoryName: directory %q is already exists under parent path %q", caseCorrectDirectoryName, parentPath) return } directory.CaseCorrectName = caseCorrectDirectoryName } }
// ModTime returns the modification time of the object // // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers func (o *FsObjectDrive) ModTime() time.Time { err := o.readMetaData() if err != nil { fs.Log(o, "Failed to read metadata: %s", err) return time.Now() } modTime, err := time.Parse(timeFormatIn, o.modifiedDate) if err != nil { fs.Log(o, "Failed to read mtime from object: %s", err) return time.Now() } return modTime }
// ModTime returns the modification time of the object // // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers func (o *Object) ModTime() time.Time { err := o.readMetaData() if err != nil { fs.Log(o, "Failed to read metadata: %v", err) return time.Now() } modTime, err := time.Parse(timeFormat, *o.info.ModifiedDate) if err != nil { fs.Log(o, "Failed to read mtime from object: %v", err) return time.Now() } return modTime }
// Sets the modification time of the local fs object func (o *FsObjectS3) SetModTime(modTime time.Time) { err := o.readMetaData() if err != nil { fs.Stats.Error() fs.Log(o, "Failed to read metadata: %s", err) return } o.meta[metaMtime] = swift.TimeToFloatString(modTime) _, err = o.s3.b.Update(o.s3.root+o.remote, o.s3.perm, o.meta) if err != nil { fs.Stats.Error() fs.Log(o, "Failed to update remote mtime: %s", err) } }
// Sets the modification time of the local fs object func (o *FsObjectSwift) SetModTime(modTime time.Time) { err := o.readMetaData() if err != nil { fs.Stats.Error() fs.Log(o, "Failed to read metadata: %s", err) return } o.meta.SetModTime(modTime) err = o.swift.c.ObjectUpdate(o.swift.container, o.swift.root+o.remote, o.meta.ObjectHeaders()) if err != nil { fs.Stats.Error() fs.Log(o, "Failed to update remote mtime: %s", err) } }
// Lists the buckets func (f *FsStorage) ListDir() fs.DirChan { out := make(fs.DirChan, fs.Config.Checkers) if f.bucket == "" { // List the buckets go func() { defer close(out) if f.projectNumber == "" { fs.Stats.Error() fs.Log(f, "Can't list buckets without project number") return } listBuckets := f.svc.Buckets.List(f.projectNumber).MaxResults(listChunks) for { buckets, err := listBuckets.Do() if err != nil { fs.Stats.Error() fs.Log(f, "Couldn't list buckets: %v", err) break } else { for _, bucket := range buckets.Items { out <- &fs.Dir{ Name: bucket.Name, Bytes: 0, Count: 0, } } } if buckets.NextPageToken == "" { break } listBuckets.PageToken(buckets.NextPageToken) } }() } else { // List the directories in the path in the bucket go func() { defer close(out) f.list(true, func(remote string, object *storage.Object) { out <- &fs.Dir{ Name: remote, Bytes: int64(object.Size), Count: 0, } }) }() } return out }
// Return an FsObject from a path // // May return nil if an error occurred func (f *Fs) newFsObjectWithInfo(remote string, info *drive.File) fs.Object { o, err := f.newFsObjectWithInfoErr(remote, info) if err != nil { fs.Log(o, "Failed to read metadata: %v", err) } return o }
func (tree *NameTreeNode) GetPathWithCorrectCase(path string) *string { if path == "" { empty := "" return &empty } var result bytes.Buffer current := tree for _, component := range strings.Split(path, "/") { if component == "" { fs.Stats.Error() fs.Log(tree, "GetPathWithCorrectCase: path component is empty (full path %q)", path) return nil } lowercase := strings.ToLower(component) current = current.Directories[lowercase] if current == nil || current.CaseCorrectName == "" { return nil } result.WriteString("/") result.WriteString(current.CaseCorrectName) } resultString := result.String() return &resultString }
func (tree *NameTreeNode) getTreeNode(path string) *NameTreeNode { if len(path) == 0 { // no lookup required, just return root return tree } current := tree for _, component := range strings.Split(path, "/") { if len(component) == 0 { fs.Stats.Error() fs.Log(tree, "getTreeNode: path component is empty (full path %q)", path) return nil } lowercase := strings.ToLower(component) lookup := current.Directories[lowercase] if lookup == nil { lookup = newNameTreeNode("") current.Directories[lowercase] = lookup } current = lookup } return current }
// Walk the path returning a channel of FsObjects func (f *FsDropbox) ListDir() fs.DirChan { out := make(fs.DirChan, fs.Config.Checkers) go func() { defer close(out) entry, err := f.db.Metadata(f.root, true, false, "", "", metadataLimit) if err != nil { fs.Stats.Error() fs.Log(f, "Couldn't list directories in root: %s", err) } else { for i := range entry.Contents { entry := &entry.Contents[i] if entry.IsDir { name := f.stripRoot(entry.Path) if name == nil { // an error occurred and logged by stripRoot continue } out <- &fs.Dir{ Name: *name, When: time.Time(entry.ClientMtime), Bytes: int64(entry.Bytes), Count: -1, } } } } }() return out }
// Md5sum returns the Md5sum of an object returning a lowercase hex string // // FIXME has to download the file! func (o *FsObjectDropbox) Md5sum() (string, error) { if o.md5sum != "" { return o.md5sum, nil } err := o.readMetaData() if err != nil { fs.Log(o, "Failed to read metadata: %s", err) return "", fmt.Errorf("Failed to read metadata: %s", err) } // For pre-existing files which have no md5sum can read it and set it? // in, err := o.Open() // if err != nil { // return "", err // } // defer in.Close() // hash := md5.New() // _, err = io.Copy(hash, in) // if err != nil { // return "", err // } // o.md5sum = fmt.Sprintf("%x", hash.Sum(nil)) return o.md5sum, nil }
// Sets the modification time of the local fs object // // Commits the datastore func (o *FsObjectDropbox) SetModTime(modTime time.Time) { err := o.setModTimeAndMd5sum(modTime, "") if err != nil { fs.Stats.Error() fs.Log(o, err.Error()) } }
// Open opens the file for read. Call Close() on the returned io.ReadCloser func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) { var offset int64 for _, option := range options { switch x := option.(type) { case *fs.SeekOption: offset = x.Offset default: if option.Mandatory() { fs.Log(o, "Unsupported mandatory option: %v", option) } } } rc, err = o.f.cipher.DecryptDataSeek(func(underlyingOffset int64) (io.ReadCloser, error) { if underlyingOffset == 0 { // Open with no seek return o.Object.Open() } // Open stream with a seek of underlyingOffset return o.Object.Open(&fs.SeekOption{Offset: underlyingOffset}) }, offset) if err != nil { return nil, err } return rc, err }
// readMetaData gets the metadata if it hasn't already been fetched // // it also sets the info func (o *Object) readMetaData() (err error) { if o.meta != nil { return nil } key := o.fs.root + o.remote req := s3.HeadObjectInput{ Bucket: &o.fs.bucket, Key: &key, } resp, err := o.fs.c.HeadObject(&req) if err != nil { if awsErr, ok := err.(awserr.RequestFailure); ok { if awsErr.StatusCode() == http.StatusNotFound { return fs.ErrorObjectNotFound } } return err } var size int64 // Ignore missing Content-Length assuming it is 0 // Some versions of ceph do this due their apache proxies if resp.ContentLength != nil { size = *resp.ContentLength } o.etag = aws.StringValue(resp.ETag) o.bytes = size o.meta = resp.Metadata if resp.LastModified == nil { fs.Log(o, "Failed to read last modified from HEAD: %v", err) o.lastModified = time.Now() } else { o.lastModified = *resp.LastModified } return nil }
// Open an object for read func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { var offset int64 for _, option := range options { switch x := option.(type) { case *fs.SeekOption: offset = x.Offset default: if option.Mandatory() { fs.Log(o, "Unsupported mandatory option: %v", option) } } } fd, err := os.Open(o.path) if err != nil { return } if offset != 0 { // seek the object _, err = fd.Seek(offset, 0) // don't attempt to make checksums return fd, err } // Update the md5sum as we go along in = &localOpenFile{ o: o, in: fd, hash: fs.NewMultiHasher(), } return in, nil }
// Lists the containers func (f *FsSwift) ListDir() fs.DirChan { out := make(fs.DirChan, fs.Config.Checkers) if f.container == "" { // List the containers go func() { defer close(out) containers, err := f.c.ContainersAll(nil) if err != nil { fs.Stats.Error() fs.Log(f, "Couldn't list containers: %v", err) } else { for _, container := range containers { out <- &fs.Dir{ Name: container.Name, Bytes: container.Bytes, Count: container.Count, } } } }() } else { // List the directories in the path in the container go func() { defer close(out) f.list(true, func(remote string, object *swift.Object) { out <- &fs.Dir{ Name: remote, Bytes: object.Bytes, Count: 0, } }) }() } return out }
// readMetaData gets the metadata if it hasn't already been fetched // // it also sets the info func (o *FsObjectS3) readMetaData() (err error) { if o.meta != nil { return nil } key := o.s3.root + o.remote req := s3.HeadObjectInput{ Bucket: &o.s3.bucket, Key: &key, } resp, err := o.s3.c.HeadObject(&req) if err != nil { fs.Debug(o, "Failed to read info: %s", err) return err } var size int64 // Ignore missing Content-Length assuming it is 0 // Some versions of ceph do this due their apache proxies if resp.ContentLength != nil { size = *resp.ContentLength } o.etag = aws.StringValue(resp.ETag) o.bytes = size o.meta = resp.Metadata if resp.LastModified == nil { fs.Log(o, "Failed to read last modified from HEAD: %s", err) o.lastModified = time.Now() } else { o.lastModified = *resp.LastModified } return nil }
// list the objects into the function supplied // // If directories is set it only sends directories func (f *FsS3) list(directories bool, fn func(string, *s3.Key)) { delimiter := "" if directories { delimiter = "/" } marker := "" for { objects, err := f.b.List(f.root, delimiter, marker, listChunkSize) if err != nil { fs.Stats.Error() fs.Log(f, "Couldn't read bucket %q: %s", f.bucket, err) } else { rootLength := len(f.root) if directories { for _, remote := range objects.CommonPrefixes { if !strings.HasPrefix(remote, f.root) { fs.Log(f, "Odd name received %q", remote) continue } remote := remote[rootLength:] if strings.HasSuffix(remote, "/") { remote = remote[:len(remote)-1] } fn(remote, &s3.Key{Key: remote}) } } else { for i := range objects.Contents { object := &objects.Contents[i] if !strings.HasPrefix(object.Key, f.root) { fs.Log(f, "Odd name received %q", object.Key) continue } remote := object.Key[rootLength:] fn(remote, object) } } } if !objects.IsTruncated { break } // Use NextMarker if set, otherwise use last Key marker = objects.NextMarker if marker == "" { marker = objects.Contents[len(objects.Contents)-1].Key } } }
// Size returns the size of an object in bytes func (o *Object) Size() int64 { err := o.readMetaData() if err != nil { fs.Log(o, "Failed to read metadata: %s", err) return 0 } return o.size }
// ModTime returns the modification time of the object // // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers func (o *Object) ModTime() time.Time { err := o.readMetaData() if err != nil { fs.Log(o, "Failed to read metadata: %s", err) return time.Now() } return o.modTime }
// Deletes the medadata associated with this file // // It logs any errors func (o *FsObjectDropbox) deleteMetadata() { fs.Debug(o, "Deleting metadata from datastore") err := o.dropbox.deleteMetadata(o.metadataKey()) if err != nil { fs.Log(o, "Error deleting metadata: %v", err) fs.Stats.Error() } }
// shouldRetry returns a boolean as to whether this resp and err // deserve to be retried. It returns the err as a convenience func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) { if resp != nil { if resp.StatusCode == 401 { f.ts.Invalidate() fs.Log(f, "401 error received - invalidating token") return true, err } // Work around receiving this error sporadically on authentication // // HTTP code 403: "403 Forbidden", reponse body: {"message":"Authorization header requires 'Credential' parameter. Authorization header requires 'Signature' parameter. Authorization header requires 'SignedHeaders' parameter. Authorization header requires existence of either a 'X-Amz-Date' or a 'Date' header. Authorization=Bearer"} if resp.StatusCode == 403 && strings.Contains(err.Error(), "Authorization header requires") { fs.Log(f, "403 \"Authorization header requires...\" error received - retry") return true, err } } return fs.ShouldRetry(err) || fs.ShouldRetryHTTP(resp, retryErrorCodes), err }
// MimeType of an Object if known, "" otherwise func (o *Object) MimeType() string { err := o.readMetaData() if err != nil { fs.Log(o, "Failed to read metadata: %v", err) return "" } return o.mimeType }
// ModTime returns the modification time of the object // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers func (o *Object) ModTime() time.Time { err := o.readMetaData() if err != nil { fs.Log(o, "Failed to read metadata: %v", err) return time.Now() } // read mtime out of metadata if available d, ok := o.meta[metaMtime] if !ok || d == nil { // fs.Debug(o, "No metadata") return o.lastModified } modTime, err := swift.FloatStringToTime(*d) if err != nil { fs.Log(o, "Failed to read mtime from object: %v", err) return o.lastModified } return modTime }
// Open opens the file for read. Call Close() on the returned io.ReadCloser func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) { var offset int64 for _, option := range options { switch x := option.(type) { case *fs.SeekOption: offset = x.Offset default: if option.Mandatory() { fs.Log(o, "Unsupported mandatory option: %v", option) } } } in, err := o.Object.Open() if err != nil { return nil, err } // This reads the header and checks it is OK rc, err := o.f.cipher.DecryptData(in) if err != nil { return nil, err } // If seeking required, then... if offset != 0 { // FIXME could cache the unseeked decrypter as we re-read the header on every seek decrypter := rc.(*decrypter) // Seek the decrypter and work out where to seek the // underlying file and how many bytes to discard underlyingOffset, discard := decrypter.seek(offset) // Re-open stream with a seek of underlyingOffset err = in.Close() if err != nil { return nil, err } in, err := o.Object.Open(&fs.SeekOption{Offset: underlyingOffset}) if err != nil { return nil, err } // Update the stream decrypter.rc = in // Discard the bytes _, err = io.CopyN(ioutil.Discard, decrypter, discard) if err != nil { return nil, err } } return rc, err }