Exemplo n.º 1
0
// PutUnchecked uploads the object
//
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo) (fs.Object, error) {
	remote := src.Remote()
	size := src.Size()
	modTime := src.ModTime()

	o, createInfo, err := f.createFileInfo(remote, modTime, size)
	if err != nil {
		return nil, err
	}

	var info *drive.File
	if size == 0 || size < int64(driveUploadCutoff) {
		// Make the API request to upload metadata and file data.
		// Don't retry, return a retry error instead
		err = f.pacer.CallNoRetry(func() (bool, error) {
			info, err = f.svc.Files.Insert(createInfo).Media(in, googleapi.ContentType("")).Do()
			return shouldRetry(err)
		})
		if err != nil {
			return o, err
		}
	} else {
		// Upload the file in chunks
		info, err = f.Upload(in, size, createInfo.MimeType, createInfo, remote)
		if err != nil {
			return o, err
		}
	}
	o.setMetaData(info)
	return o, nil
}
Exemplo n.º 2
0
// checkUpload checks to see if an error occurred after the file was
// completely uploaded.
//
// If it was then it waits for a while to see if the file really
// exists and is the right size and returns an updated info.
//
// If the file wasn't found or was the wrong size then it returns the
// original error.
//
// This is a workaround for Amazon sometimes returning
//
//  * 408 REQUEST_TIMEOUT
//  * 504 GATEWAY_TIMEOUT
//  * 500 Internal server error
//
// At the end of large uploads.  The speculation is that the timeout
// is waiting for the sha1 hashing to complete and the file may well
// be properly uploaded.
func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, inInfo *acd.File, inErr error, uploadTime time.Duration) (fixedError bool, info *acd.File, err error) {
	// Return if no error - all is well
	if inErr == nil {
		return false, inInfo, inErr
	}
	// If not one of the errors we can fix return
	// if resp == nil || resp.StatusCode != 408 && resp.StatusCode != 500 && resp.StatusCode != 504 {
	// 	return false, inInfo, inErr
	// }

	// The HTTP status
	httpStatus := "HTTP status UNKNOWN"
	if resp != nil {
		httpStatus = resp.Status
	}

	// check to see if we read to the end
	buf := make([]byte, 1)
	n, err := in.Read(buf)
	if !(n == 0 && err == io.EOF) {
		fs.Debug(src, "Upload error detected but didn't finish upload: %v (%q)", inErr, httpStatus)
		return false, inInfo, inErr
	}

	// Don't wait for uploads - assume they will appear later
	if *uploadWaitPerGB <= 0 {
		fs.Debug(src, "Upload error detected but waiting disabled: %v (%q)", inErr, httpStatus)
		return false, inInfo, inErr
	}

	// Time we should wait for the upload
	uploadWaitPerByte := float64(*uploadWaitPerGB) / 1024 / 1024 / 1024
	timeToWait := time.Duration(uploadWaitPerByte * float64(src.Size()))

	const sleepTime = 5 * time.Second                        // sleep between tries
	retries := int((timeToWait + sleepTime - 1) / sleepTime) // number of retries, rounded up

	fs.Debug(src, "Error detected after finished upload - waiting to see if object was uploaded correctly: %v (%q)", inErr, httpStatus)
	remote := src.Remote()
	for i := 1; i <= retries; i++ {
		o, err := f.NewObject(remote)
		if err == fs.ErrorObjectNotFound {
			fs.Debug(src, "Object not found - waiting (%d/%d)", i, retries)
		} else if err != nil {
			fs.Debug(src, "Object returned error - waiting (%d/%d): %v", i, retries, err)
		} else {
			if src.Size() == o.Size() {
				fs.Debug(src, "Object found with correct size %d after waiting (%d/%d) - %v - returning with no error", src.Size(), i, retries, sleepTime*time.Duration(i-1))
				info = &acd.File{
					Node: o.(*Object).info,
				}
				return true, info, nil
			}
			fs.Debug(src, "Object found but wrong size %d vs %d - waiting (%d/%d)", src.Size(), o.Size(), i, retries)
		}
		time.Sleep(sleepTime)
	}
	fs.Debug(src, "Giving up waiting for object - returning original error: %v (%q)", inErr, httpStatus)
	return false, inInfo, inErr
}
Exemplo n.º 3
0
// Put the object
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo) (fs.Object, error) {
	// Temporary Object under construction
	o := &Object{
		fs:     f,
		remote: src.Remote(),
	}
	return o, o.Update(in, src)
}
Exemplo n.º 4
0
// Put the Object to the local filesystem
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo) (fs.Object, error) {
	remote := src.Remote()
	// Temporary Object under construction - info filled in by Update()
	o := f.newObject(remote)
	err := o.Update(in, src)
	if err != nil {
		return nil, err
	}
	return o, nil
}
Exemplo n.º 5
0
// Put the object into the container
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo) (fs.Object, error) {
	remote := src.Remote()
	size := src.Size()
	modTime := src.ModTime()

	o, _, _, err := f.createObject(remote, modTime, size)
	if err != nil {
		return nil, err
	}
	return o, o.Update(in, src)
}
Exemplo n.º 6
0
// Put the object
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo) (fs.Object, error) {
	exisitingObj, err := f.newObjectWithInfo(src.Remote(), nil)
	switch err {
	case nil:
		return exisitingObj, exisitingObj.Update(in, src)
	case fs.ErrorObjectNotFound:
		// Not found so create it
		return f.PutUnchecked(in, src)
	default:
		return nil, err
	}
}
Exemplo n.º 7
0
// Put the object
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo) (fs.Object, error) {
	remote := src.Remote()
	size := src.Size()
	modTime := src.ModTime()

	o := &Object{
		fs:      f,
		remote:  remote,
		bytes:   uint64(size),
		modTime: modTime,
	}
	//TODO maybe read metadata after upload to check if file uploaded successfully
	return o, o.Update(in, src)
}
Exemplo n.º 8
0
// newLargeUpload starts an upload of object o from in with metadata in src
func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
	remote := src.Remote()
	size := src.Size()
	parts := size / int64(chunkSize)
	if size%int64(chunkSize) != 0 {
		parts++
	}
	if parts > maxParts {
		return nil, errors.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
	}
	modTime := src.ModTime()
	opts := rest.Opts{
		Method: "POST",
		Path:   "/b2_start_large_file",
	}
	bucketID, err := f.getBucketID()
	if err != nil {
		return nil, err
	}
	var request = api.StartLargeFileRequest{
		BucketID:    bucketID,
		Name:        remote,
		ContentType: fs.MimeType(src),
		Info: map[string]string{
			timeKey: timeString(modTime),
		},
	}
	// Set the SHA1 if known
	if calculatedSha1, err := src.Hash(fs.HashSHA1); err == nil && calculatedSha1 != "" {
		request.Info[sha1Key] = calculatedSha1
	}
	var response api.StartLargeFileResponse
	err = f.pacer.Call(func() (bool, error) {
		resp, err := f.srv.CallJSON(&opts, &request, &response)
		return f.shouldRetry(resp, err)
	})
	if err != nil {
		return nil, err
	}
	up = &largeUpload{
		f:     f,
		o:     o,
		in:    in,
		id:    response.ID,
		size:  size,
		parts: parts,
		sha1s: make([]string, parts),
	}
	return up, nil
}
Exemplo n.º 9
0
// Put the object into the container
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo) (fs.Object, error) {
	remote := src.Remote()
	size := src.Size()
	// Temporary Object under construction
	o := &Object{
		fs:     f,
		remote: remote,
	}
	// Check if object already exists
	err := o.readMetaData()
	switch err {
	case nil:
		return o, o.Update(in, src)
	case fs.ErrorObjectNotFound:
		// Not found so create it
	default:
		return nil, err
	}
	// If not create it
	leaf, directoryID, err := f.dirCache.FindPath(remote, true)
	if err != nil {
		return nil, err
	}
	if size > warnFileSize {
		fs.Debug(f, "Warning: file %q may fail because it is too big. Use --max-size=%dGB to skip large files.", remote, warnFileSize>>30)
	}
	folder := acd.FolderFromId(directoryID, o.fs.c.Nodes)
	var info *acd.File
	var resp *http.Response
	err = f.pacer.CallNoRetry(func() (bool, error) {
		f.startUpload()
		if src.Size() != 0 {
			info, resp, err = folder.Put(in, leaf)
		} else {
			info, resp, err = folder.PutSized(in, size, leaf)
		}
		f.stopUpload()
		var ok bool
		ok, info, err = f.checkUpload(resp, in, src, info, err)
		if ok {
			return false, nil
		}
		return f.shouldRetry(resp, err)
	})
	if err != nil {
		return nil, err
	}
	o.info = info.Node
	return o, nil
}
Exemplo n.º 10
0
func newWriteFileHandle(d *Dir, f *File, src fs.ObjectInfo) (*WriteFileHandle, error) {
	fh := &WriteFileHandle{
		remote: src.Remote(),
		result: make(chan error, 1),
		file:   f,
	}
	fh.pipeReader, fh.pipeWriter = io.Pipe()
	go func() {
		o, err := d.f.Put(fh.pipeReader, src)
		fh.o = o
		fh.result <- err
	}()
	fh.file.addWriters(1)
	return fh, nil
}
Exemplo n.º 11
0
Arquivo: write.go Projeto: ncw/rclone
func newWriteFileHandle(d *Dir, f *File, src fs.ObjectInfo) (*WriteFileHandle, error) {
	fh := &WriteFileHandle{
		remote: src.Remote(),
		result: make(chan error, 1),
		file:   f,
	}
	fh.pipeReader, fh.pipeWriter = io.Pipe()
	r := fs.NewAccountSizeName(fh.pipeReader, 0, src.Remote()) // account the transfer
	go func() {
		o, err := d.f.Put(r, src)
		fh.o = o
		fh.result <- err
	}()
	fh.file.addWriters(1)
	fs.Stats.Transferring(fh.remote)
	return fh, nil
}
Exemplo n.º 12
0
// checkUpload checks to see if an error occurred after the file was
// completely uploaded.
//
// If it was then it waits for a while to see if the file really
// exists and is the right size and returns an updated info.
//
// If the file wasn't found or was the wrong size then it returns the
// original error.
//
// This is a workaround for Amazon sometimes returning
//
//  * 408 REQUEST_TIMEOUT
//  * 504 GATEWAY_TIMEOUT
//  * 500 Internal server error
//
// At the end of large uploads.  The speculation is that the timeout
// is waiting for the sha1 hashing to complete and the file may well
// be properly uploaded.
func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, inInfo *acd.File, inErr error) (fixedError bool, info *acd.File, err error) {
	// Return if no error - all is well
	if inErr == nil {
		return false, inInfo, inErr
	}
	// If not one of the errors we can fix return
	if resp == nil || resp.StatusCode != 408 && resp.StatusCode != 500 && resp.StatusCode != 504 {
		return false, inInfo, inErr
	}
	const sleepTime = 5 * time.Second           // sleep between tries
	retries := int(*uploadWaitTime / sleepTime) // number of retries
	if retries <= 0 {
		retries = 1
	}
	buf := make([]byte, 1)
	n, err := in.Read(buf)
	if !(n == 0 && err == io.EOF) {
		fs.Debug(src, "Upload error detected but didn't finish upload (n=%d, err=%v): %v", n, err, inErr)
		return false, inInfo, inErr
	}
	fs.Debug(src, "Error detected after finished upload - waiting to see if object was uploaded correctly: %v", inErr)
	remote := src.Remote()
	for i := 1; i <= retries; i++ {
		o, err := f.NewObject(remote)
		if err == fs.ErrorObjectNotFound {
			fs.Debug(src, "Object not found - waiting (%d/%d)", i, retries)
		} else if err != nil {
			fs.Debug(src, "Object returned error - waiting (%d/%d): %v", i, retries, err)
		} else {
			if src.Size() == o.Size() {
				fs.Debug(src, "Object found with correct size - returning with no error")
				info = &acd.File{
					Node: o.(*Object).info,
				}
				return true, info, nil
			}
			fs.Debug(src, "Object found but wrong size %d vs %d - waiting (%d/%d)", src.Size(), o.Size(), i, retries)
		}
		time.Sleep(sleepTime)
	}
	fs.Debug(src, "Finished waiting for object - returning original error: %v", inErr)
	return false, inInfo, inErr
}