// Upload uploads the chunks from the input // It retries each chunk maxTries times (with a pause of uploadPause between attempts). func (rx *resumableUpload) Upload() (*drive.File, error) { start := int64(0) buf := make([]byte, chunkSize) var StatusCode int for start < rx.ContentLength { reqSize := rx.ContentLength - start if reqSize >= int64(chunkSize) { reqSize = int64(chunkSize) } else { buf = buf[:reqSize] } // Read the chunk _, err := io.ReadFull(rx.Media, buf) if err != nil { return nil, err } // Transfer the chunk for try := 1; try <= maxTries; try++ { fs.Debug(rx.remote, "Sending chunk %d length %d, %d/%d", start, reqSize, try, maxTries) rx.f.beginCall() StatusCode, err = rx.transferChunk(start, buf) rx.f.endCall(err) if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK { goto success } fs.Debug(rx.remote, "Retrying chunk %d/%d, code=%d, err=%v", try, maxTries, StatusCode, err) } fs.Debug(rx.remote, "Failed to send chunk") return nil, fs.RetryErrorf("Chunk upload failed - retry: code=%d, err=%v", StatusCode, err) success: start += reqSize } // Resume or retry uploads that fail due to connection interruptions or // any 5xx errors, including: // // 500 Internal Server Error // 502 Bad Gateway // 503 Service Unavailable // 504 Gateway Timeout // // Use an exponential backoff strategy if any 5xx server error is // returned when resuming or retrying upload requests. These errors can // occur if a server is getting overloaded. Exponential backoff can help // alleviate these kinds of problems during periods of high volume of // requests or heavy network traffic. Other kinds of requests should not // be handled by exponential backoff but you can still retry a number of // them. When retrying these requests, limit the number of times you // retry them. For example your code could limit to ten retries or less // before reporting an error. // // Handle 404 Not Found errors when doing resumable uploads by starting // the entire upload over from the beginning. if rx.ret == nil { return nil, fs.RetryErrorf("Incomplete upload - retry, last error %d", StatusCode) } return rx.ret, nil }
// Update the already existing object // // Copy the reader into the object updating modTime and size // // The new object may have been created if an error is returned func (o *FsObjectDrive) Update(in io.Reader, modTime time.Time, size int64) error { updateInfo := &drive.File{ Id: o.id, ModifiedDate: modTime.Format(timeFormatOut), } // Make the API request to upload metadata and file data. var err error var info *drive.File if size == 0 || size < int64(driveUploadCutoff) { // Don't retry, return a retry error instead o.drive.beginCall() info, err = o.drive.svc.Files.Update(updateInfo.Id, updateInfo).SetModifiedDate(true).Media(in).Do() if o.drive.endCall(err) { return fs.RetryErrorf("Update failed - retry: %s", err) } if err != nil { return fmt.Errorf("Update failed: %s", err) } } else { // Upload the file in chunks info, err = o.drive.Upload(in, size, fs.MimeType(o), updateInfo, o.remote) if err != nil { return err } } o.setMetaData(info) return nil }
// Put the object // // This assumes that the object doesn't not already exists - if you // call it when it does exist then it will create a duplicate. Call // object.Update() in this case. // // Copy the reader in to the new object which is returned // // The new object may have been created if an error is returned func (f *FsDrive) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) { o, createInfo, err := f.createFileInfo(remote, modTime, size) if err != nil { return nil, err } var info *drive.File if size == 0 || size < int64(driveUploadCutoff) { // Make the API request to upload metadata and file data. // Don't retry, return a retry error instead f.beginCall() info, err = f.svc.Files.Insert(createInfo).Media(in).Do() if f.endCall(err) { return o, fs.RetryErrorf("Upload failed - retry: %s", err) } if err != nil { return o, fmt.Errorf("Upload failed: %s", err) } } else { // Upload the file in chunks info, err = f.Upload(in, size, createInfo.MimeType, createInfo, remote) if err != nil { return o, err } } o.setMetaData(info) return o, nil }
// Put the object // // This assumes that the object doesn't not already exists - if you // call it when it does exist then it will create a duplicate. Call // object.Update() in this case. // // Copy the reader in to the new object which is returned // // The new object may have been created if an error is returned func (f *FsDrive) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) { // Temporary FsObject under construction o := &FsObjectDrive{drive: f, remote: remote} directory, leaf := splitPath(o.remote) directoryId, err := f.findDir(directory, true) if err != nil { return o, fmt.Errorf("Couldn't find or make directory: %s", err) } // Define the metadata for the file we are going to create. createInfo := &drive.File{ Title: leaf, Description: leaf, Parents: []*drive.ParentReference{{Id: directoryId}}, MimeType: fs.MimeType(o), ModifiedDate: modTime.Format(timeFormatOut), } var info *drive.File if size == 0 || size < int64(driveUploadCutoff) { // Make the API request to upload metadata and file data. // Don't retry, return a retry error instead f.beginCall() info, err = f.svc.Files.Insert(createInfo).Media(in).Do() if f.endCall(err) { return o, fs.RetryErrorf("Upload failed - retry: %s", err) } if err != nil { return o, fmt.Errorf("Upload failed: %s", err) } } else { // Upload the file in chunks info, err = f.Upload(in, size, createInfo.MimeType, createInfo, remote) if err != nil { return o, err } } o.setMetaData(info) return o, nil }