// PutUnchecked uploads the object // // This will create a duplicate if we upload a new file without // checking to see if there is one already - use Put() for that. func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo) (fs.Object, error) { remote := src.Remote() size := src.Size() modTime := src.ModTime() o, createInfo, err := f.createFileInfo(remote, modTime, size) if err != nil { return nil, err } var info *drive.File if size == 0 || size < int64(driveUploadCutoff) { // Make the API request to upload metadata and file data. // Don't retry, return a retry error instead err = f.pacer.CallNoRetry(func() (bool, error) { info, err = f.svc.Files.Insert(createInfo).Media(in, googleapi.ContentType("")).Do() return shouldRetry(err) }) if err != nil { return o, err } } else { // Upload the file in chunks info, err = f.Upload(in, size, createInfo.MimeType, createInfo, remote) if err != nil { return o, err } } o.setMetaData(info) return o, nil }
// Update the object with the contents of the io.Reader, modTime and size // // The new object may have been created if an error is returned func (o *Object) Update(in io.Reader, src fs.ObjectInfo) (err error) { size := src.Size() modTime := src.ModTime() var info *api.Item if size <= int64(uploadCutoff) { // This is for less than 100 MB of content var resp *http.Response opts := rest.Opts{ Method: "PUT", Path: "/drive/root:/" + o.srvPath() + ":/content", Body: in, } err = o.fs.pacer.CallNoRetry(func() (bool, error) { resp, err = o.fs.srv.CallJSON(&opts, nil, &info) return shouldRetry(resp, err) }) if err != nil { return err } o.setMetaData(info) } else { err = o.uploadMultipart(in, size) if err != nil { return err } } // Set the mod time now and read metadata info, err = o.setModTime(modTime) if err != nil { return err } o.setMetaData(info) return nil }
// Update the already existing object // // Copy the reader into the object updating modTime and size // // The new object may have been created if an error is returned func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error { size := src.Size() modTime := src.ModTime() if o.isDocument { return errors.New("can't update a google document") } updateInfo := &drive.File{ Id: o.id, MimeType: fs.MimeType(o), ModifiedDate: modTime.Format(timeFormatOut), } // Make the API request to upload metadata and file data. var err error var info *drive.File if size == 0 || size < int64(driveUploadCutoff) { // Don't retry, return a retry error instead err = o.fs.pacer.CallNoRetry(func() (bool, error) { info, err = o.fs.svc.Files.Update(updateInfo.Id, updateInfo).SetModifiedDate(true).Media(in, googleapi.ContentType("")).Do() return shouldRetry(err) }) if err != nil { return err } } else { // Upload the file in chunks info, err = o.fs.Upload(in, size, fs.MimeType(o), updateInfo, o.remote) if err != nil { return err } } o.setMetaData(info) return nil }
// Put the object into the container // // Copy the reader in to the new object which is returned // // The new object may have been created if an error is returned func (f *Fs) Put(in io.Reader, src fs.ObjectInfo) (fs.Object, error) { remote := src.Remote() size := src.Size() modTime := src.ModTime() o, _, _, err := f.createObject(remote, modTime, size) if err != nil { return nil, err } return o, o.Update(in, src) }
// Update the Object from in with modTime and size func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error { modTime := src.ModTime() uploader := s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) { u.Concurrency = 2 u.LeavePartsOnError = false u.S3 = o.fs.c u.PartSize = s3manager.MinUploadPartSize size := src.Size() // Adjust PartSize until the number of parts is small enough. if size/u.PartSize >= s3manager.MaxUploadParts { // Calculate partition size rounded up to the nearest MB u.PartSize = (((size / s3manager.MaxUploadParts) >> 20) + 1) << 20 } }) // Set the mtime in the meta data metadata := map[string]*string{ metaMtime: aws.String(swift.TimeToFloatString(modTime)), } // Guess the content type mimeType := fs.MimeType(src) key := o.fs.root + o.remote req := s3manager.UploadInput{ Bucket: &o.fs.bucket, ACL: &o.fs.acl, Key: &key, Body: in, ContentType: &mimeType, Metadata: metadata, //ContentLength: &size, } if o.fs.sse != "" { req.ServerSideEncryption = &o.fs.sse } if o.fs.storageClass != "" { req.StorageClass = &o.fs.storageClass } _, err := uploader.Upload(&req) if err != nil { return err } // Read the metadata from the newly created object o.meta = nil // wipe old metadata err = o.readMetaData() return err }
// newLargeUpload starts an upload of object o from in with metadata in src func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) { remote := o.remote size := src.Size() parts := size / int64(chunkSize) if size%int64(chunkSize) != 0 { parts++ } if parts > maxParts { return nil, errors.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts) } modTime := src.ModTime() opts := rest.Opts{ Method: "POST", Path: "/b2_start_large_file", } bucketID, err := f.getBucketID() if err != nil { return nil, err } var request = api.StartLargeFileRequest{ BucketID: bucketID, Name: o.fs.root + remote, ContentType: fs.MimeType(src), Info: map[string]string{ timeKey: timeString(modTime), }, } // Set the SHA1 if known if calculatedSha1, err := src.Hash(fs.HashSHA1); err == nil && calculatedSha1 != "" { request.Info[sha1Key] = calculatedSha1 } var response api.StartLargeFileResponse err = f.pacer.Call(func() (bool, error) { resp, err := f.srv.CallJSON(&opts, &request, &response) return f.shouldRetry(resp, err) }) if err != nil { return nil, err } up = &largeUpload{ f: f, o: o, in: in, id: response.ID, size: size, parts: parts, sha1s: make([]string, parts), } return up, nil }
// Put the object // // Copy the reader in to the new object which is returned // // The new object may have been created if an error is returned func (f *Fs) Put(in io.Reader, src fs.ObjectInfo) (fs.Object, error) { remote := src.Remote() size := src.Size() modTime := src.ModTime() o := &Object{ fs: f, remote: remote, bytes: uint64(size), modTime: modTime, } //TODO maybe read metadata after upload to check if file uploaded successfully return o, o.Update(in, src) }
// Update the object with the contents of the io.Reader, modTime and size // // The new object may have been created if an error is returned func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error { size := src.Size() modTime := src.ModTime() object := storage.Object{ Bucket: o.fs.bucket, Name: o.fs.root + o.remote, ContentType: fs.MimeType(o), Size: uint64(size), Updated: modTime.Format(timeFormatOut), // Doesn't get set Metadata: metadataFromModTime(modTime), } newObject, err := o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name).PredefinedAcl(o.fs.objectAcl).Do() if err != nil { return err } // Set the metadata for the new object while we have it o.setMetaData(newObject) return nil }
// Update the object with the contents of the io.Reader, modTime and size // // The new object may have been created if an error is returned func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error { size := src.Size() modTime := src.ModTime() // Note whether this is a dynamic large object before starting isDynamicLargeObject, err := o.isDynamicLargeObject() if err != nil { return err } // Set the mtime m := swift.Metadata{} m.SetModTime(modTime) contentType := fs.MimeType(src) headers := m.ObjectHeaders() uniquePrefix := "" if size > int64(chunkSize) { uniquePrefix, err = o.updateChunks(in, headers, size, contentType) if err != nil { return err } } else { headers["Content-Length"] = strconv.FormatInt(size, 10) // set Content-Length as we know it _, err := o.fs.c.ObjectPut(o.fs.container, o.fs.root+o.remote, in, true, "", contentType, headers) if err != nil { return err } } // If file was a dynamic large object then remove old/all segments if isDynamicLargeObject { err = o.removeSegments(uniquePrefix) if err != nil { fs.Log(o, "Failed to remove old segments - carrying on with upload: %v", err) } } // Read the metadata from the newly created object o.headers = nil // wipe old metadata return o.readMetaData() }
// Update the object from in with modTime and size func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error { err := o.mkdirAll() if err != nil { return err } out, err := os.Create(o.path) if err != nil { return err } // Calculate the hash of the object we are reading as we go along hash := fs.NewMultiHasher() in = io.TeeReader(in, hash) _, err = io.Copy(out, in) closeErr := out.Close() if err == nil { err = closeErr } if err != nil { fs.Debug(o, "Removing partially written file on error: %v", err) if removeErr := os.Remove(o.path); removeErr != nil { fs.ErrorLog(o, "Failed to remove partially written file: %v", removeErr) } return err } // All successful so update the hashes o.hashes = hash.Sums() // Set the mtime err = o.SetModTime(src.ModTime()) if err != nil { return err } // ReRead info now that we have finished return o.lstat() }
// Update the already existing object // // Copy the reader into the object updating modTime and size // // The new object may have been created if an error is returned func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error { size := src.Size() modTime := src.ModTime() remote := o.remotePath() //create full path to file before upload. err1 := mkDirFullPath(o.fs.yd, remote) if err1 != nil { return err1 } //upload file overwrite := true //overwrite existing file err := o.fs.yd.Upload(in, remote, overwrite) if err == nil { //if file uploaded sucessfully then return metadata o.bytes = uint64(size) o.modTime = modTime o.md5sum = "" // according to unit tests after put the md5 is empty. //and set modTime of uploaded file err = o.SetModTime(modTime) } return err }
// Update the object from in with modTime and size func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error { err := o.mkdirAll() if err != nil { return err } out, err := os.Create(o.path) if err != nil { return err } // Calculate the hash of the object we are reading as we go along hash := fs.NewMultiHasher() in = io.TeeReader(in, hash) _, err = io.Copy(out, in) outErr := out.Close() if err != nil { return err } if outErr != nil { return outErr } // All successful so update the hashes o.hashes = hash.Sums() // Set the mtime err = o.SetModTime(src.ModTime()) if err != nil { return err } // ReRead info now that we have finished return o.lstat() }
// Update the object with the contents of the io.Reader, modTime and size // // The new object may have been created if an error is returned func (o *Object) Update(in io.Reader, src fs.ObjectInfo) (err error) { if *b2Versions { return errNotWithVersions } size := src.Size() // If a large file upload in chunks - see upload.go if size >= int64(uploadCutoff) { up, err := o.fs.newLargeUpload(o, in, src) if err != nil { return err } return up.Upload() } modTime := src.ModTime() calculatedSha1, _ := src.Hash(fs.HashSHA1) // If source cannot provide the hash, copy to a temporary file // and calculate the hash while doing so. // Then we serve the temporary file. if calculatedSha1 == "" { // Open a temp file to copy the input fd, err := ioutil.TempFile("", "rclone-b2-") if err != nil { return err } _ = os.Remove(fd.Name()) // Delete the file - may not work on Windows defer func() { _ = fd.Close() // Ignore error may have been closed already _ = os.Remove(fd.Name()) // Delete the file - may have been deleted already }() // Copy the input while calculating the sha1 hash := sha1.New() teed := io.TeeReader(in, hash) n, err := io.Copy(fd, teed) if err != nil { return err } if n != size { return errors.Errorf("read %d bytes expecting %d", n, size) } calculatedSha1 = fmt.Sprintf("%x", hash.Sum(nil)) // Rewind the temporary file _, err = fd.Seek(0, 0) if err != nil { return err } // Set input to temporary file in = fd } // Get upload Token o.fs.getUploadToken() defer o.fs.returnUploadToken() // Get upload URL upload, err := o.fs.getUploadURL() if err != nil { return err } defer func() { // return it like this because we might nil it out o.fs.returnUploadURL(upload) }() // Headers for upload file // // Authorization // required // An upload authorization token, from b2_get_upload_url. // // X-Bz-File-Name // required // // The name of the file, in percent-encoded UTF-8. See Files for requirements on file names. See String Encoding. // // Content-Type // required // // The MIME type of the content of the file, which will be returned in // the Content-Type header when downloading the file. Use the // Content-Type b2/x-auto to automatically set the stored Content-Type // post upload. In the case where a file extension is absent or the // lookup fails, the Content-Type is set to application/octet-stream. The // Content-Type mappings can be purused here. // // X-Bz-Content-Sha1 // required // // The SHA1 checksum of the content of the file. B2 will check this when // the file is uploaded, to make sure that the file arrived correctly. It // will be returned in the X-Bz-Content-Sha1 header when the file is // downloaded. // // X-Bz-Info-src_last_modified_millis // optional // // If the original source of the file being uploaded has a last modified // time concept, Backblaze recommends using this spelling of one of your // ten X-Bz-Info-* headers (see below). Using a standard spelling allows // different B2 clients and the B2 web user interface to interoperate // correctly. The value should be a base 10 number which represents a UTC // time when the original source file was last modified. It is a base 10 // number of milliseconds since midnight, January 1, 1970 UTC. This fits // in a 64 bit integer such as the type "long" in the programming // language Java. It is intended to be compatible with Java's time // long. For example, it can be passed directly into the Java call // Date.setTime(long time). // // X-Bz-Info-* // optional // // Up to 10 of these headers may be present. The * part of the header // name is replace with the name of a custom field in the file // information stored with the file, and the value is an arbitrary UTF-8 // string, percent-encoded. The same info headers sent with the upload // will be returned with the download. opts := rest.Opts{ Method: "POST", Absolute: true, Path: upload.UploadURL, Body: in, ExtraHeaders: map[string]string{ "Authorization": upload.AuthorizationToken, "X-Bz-File-Name": urlEncode(o.fs.root + o.remote), "Content-Type": fs.MimeType(src), sha1Header: calculatedSha1, timeHeader: timeString(modTime), }, ContentLength: &size, } var response api.FileInfo // Don't retry, return a retry error instead err = o.fs.pacer.CallNoRetry(func() (bool, error) { resp, err := o.fs.srv.CallJSON(&opts, nil, &response) retry, err := o.fs.shouldRetry(resp, err) // On retryable error clear UploadURL if retry { fs.Debug(o, "Clearing upload URL because of error: %v", err) upload = nil } return retry, err }) if err != nil { return err } return o.decodeMetaDataFileInfo(&response) }