Ejemplo n.º 1
0
// Upload uploads the chunks from the input
// It retries each chunk maxTries times (with a pause of uploadPause between attempts).
func (rx *resumableUpload) Upload() (*drive.File, error) {
	start := int64(0)
	buf := make([]byte, chunkSize)
	var StatusCode int
	for start < rx.ContentLength {
		reqSize := rx.ContentLength - start
		if reqSize >= int64(chunkSize) {
			reqSize = int64(chunkSize)
		} else {
			buf = buf[:reqSize]
		}

		// Read the chunk
		_, err := io.ReadFull(rx.Media, buf)
		if err != nil {
			return nil, err
		}

		// Transfer the chunk
		for try := 1; try <= maxTries; try++ {
			fs.Debug(rx.remote, "Sending chunk %d length %d, %d/%d", start, reqSize, try, maxTries)
			rx.f.beginCall()
			StatusCode, err = rx.transferChunk(start, buf)
			rx.f.endCall(err)
			if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
				goto success
			}
			fs.Debug(rx.remote, "Retrying chunk %d/%d, code=%d, err=%v", try, maxTries, StatusCode, err)
		}
		fs.Debug(rx.remote, "Failed to send chunk")
		return nil, fs.RetryErrorf("Chunk upload failed - retry: code=%d, err=%v", StatusCode, err)
	success:

		start += reqSize
	}
	// Resume or retry uploads that fail due to connection interruptions or
	// any 5xx errors, including:
	//
	// 500 Internal Server Error
	// 502 Bad Gateway
	// 503 Service Unavailable
	// 504 Gateway Timeout
	//
	// Use an exponential backoff strategy if any 5xx server error is
	// returned when resuming or retrying upload requests. These errors can
	// occur if a server is getting overloaded. Exponential backoff can help
	// alleviate these kinds of problems during periods of high volume of
	// requests or heavy network traffic.  Other kinds of requests should not
	// be handled by exponential backoff but you can still retry a number of
	// them. When retrying these requests, limit the number of times you
	// retry them. For example your code could limit to ten retries or less
	// before reporting an error.
	//
	// Handle 404 Not Found errors when doing resumable uploads by starting
	// the entire upload over from the beginning.
	if rx.ret == nil {
		return nil, fs.RetryErrorf("Incomplete upload - retry, last error %d", StatusCode)
	}
	return rx.ret, nil
}
Ejemplo n.º 2
0
// Flush is called each time the file or directory is closed.
// Because there can be multiple file descriptors referring to a
// single opened file, Flush can be called multiple times.
func (fh *ReadFileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
	fh.mu.Lock()
	defer fh.mu.Unlock()
	fs.Debug(fh.o, "ReadFileHandle.Flush")

	// Ignore the Flush as there is nothing we can sensibly do and
	// it seems quite common for Flush to be called from
	// different threads each of which have read some data.
	if false {
		// If Read hasn't been called then ignore the Flush - Release
		// will pick it up
		if !fh.readCalled {
			fs.Debug(fh.o, "ReadFileHandle.Flush ignoring flush on unread handle")
			return nil

		}
		err := fh.close()
		if err != nil {
			fs.ErrorLog(fh.o, "ReadFileHandle.Flush error: %v", err)
			return err
		}
	}
	fs.Debug(fh.o, "ReadFileHandle.Flush OK")
	return nil
}
Ejemplo n.º 3
0
Archivo: dir.go Proyecto: ncw/rclone
// ReadDirAll reads the contents of the directory
func (d *Dir) ReadDirAll(ctx context.Context) (dirents []fuse.Dirent, err error) {
	fs.Debug(d.path, "Dir.ReadDirAll")
	err = d.readDir()
	if err != nil {
		fs.Debug(d.path, "Dir.ReadDirAll error: %v", err)
		return nil, err
	}
	d.mu.RLock()
	defer d.mu.RUnlock()
	for _, item := range d.items {
		var dirent fuse.Dirent
		switch x := item.o.(type) {
		case fs.Object:
			dirent = fuse.Dirent{
				// Inode FIXME ???
				Type: fuse.DT_File,
				Name: path.Base(x.Remote()),
			}
		case *fs.Dir:
			dirent = fuse.Dirent{
				// Inode FIXME ???
				Type: fuse.DT_Dir,
				Name: path.Base(x.Remote()),
			}
		default:
			err = errors.Errorf("unknown type %T", item)
			fs.ErrorLog(d.path, "Dir.ReadDirAll error: %v", err)
			return nil, err
		}
		dirents = append(dirents, dirent)
	}
	fs.Debug(d.path, "Dir.ReadDirAll OK with %d entries", len(dirents))
	return dirents, nil
}
Ejemplo n.º 4
0
// readMetaData gets the info if it hasn't already been fetched
func (o *FsObjectDrive) readMetaData() (err error) {
	if o.id != "" {
		return nil
	}

	directory, leaf := splitPath(o.remote)
	directoryId, err := o.drive.findDir(directory, false)
	if err != nil {
		fs.Debug(o, "Couldn't find directory: %s", err)
		return fmt.Errorf("Couldn't find directory: %s", err)
	}

	found, err := o.drive.listAll(directoryId, leaf, false, true, func(item *drive.File) bool {
		if item.Title == leaf {
			o.setMetaData(item)
			return true
		}
		return false
	})
	if err != nil {
		return err
	}
	if !found {
		fs.Debug(o, "Couldn't find object")
		return fmt.Errorf("Couldn't find object")
	}
	return nil
}
Ejemplo n.º 5
0
// Path should be directory path either "" or "path/"
//
// List the directory using a recursive list from the root
//
// This fetches the minimum amount of stuff but does more API calls
// which makes it slow
func (f *Fs) listDirRecursive(dirID string, path string, out fs.ObjectsChan) error {
	var subError error
	// Make the API request
	var wg sync.WaitGroup
	_, err := f.listAll(dirID, false, false, func(info *api.Item) bool {
		// Recurse on directories
		if info.Folder != nil {
			wg.Add(1)
			folder := path + info.Name + "/"
			fs.Debug(f, "Reading %s", folder)
			go func() {
				defer wg.Done()
				err := f.listDirRecursive(info.ID, folder, out)
				if err != nil {
					subError = err
					fs.ErrorLog(f, "Error reading %s:%s", folder, err)
				}
			}()
		} else {
			if fs := f.newObjectWithInfo(path+info.Name, info); fs != nil {
				out <- fs
			}
		}
		return false
	})
	wg.Wait()
	fs.Debug(f, "Finished reading %s", path)
	if err != nil {
		return err
	}
	if subError != nil {
		return subError
	}
	return nil
}
Ejemplo n.º 6
0
Archivo: read.go Proyecto: ncw/rclone
// seek to a new offset
//
// if reopen is true, then we won't attempt to use an io.Seeker interface
//
// Must be called with fh.mu held
func (fh *ReadFileHandle) seek(offset int64, reopen bool) error {
	// Can we seek it directly?
	oldReader := fh.r.GetReader()
	if do, ok := oldReader.(io.Seeker); !reopen && ok {
		fs.Debug(fh.o, "ReadFileHandle.seek from %d to %d (io.Seeker)", fh.offset, offset)
		_, err := do.Seek(offset, 0)
		if err != nil {
			fs.Debug(fh.o, "ReadFileHandle.Read io.Seeker failed: %v", err)
			return err
		}
	} else {
		fs.Debug(fh.o, "ReadFileHandle.seek from %d to %d", fh.offset, offset)
		// if not re-open with a seek
		r, err := fh.o.Open(&fs.SeekOption{Offset: offset})
		if err != nil {
			fs.Debug(fh.o, "ReadFileHandle.Read seek failed: %v", err)
			return err
		}
		err = oldReader.Close()
		if err != nil {
			fs.Debug(fh.o, "ReadFileHandle.Read seek close old failed: %v", err)
		}
		// fh.r = fs.NewAccount(r, fh.o) // account the transfer
		fh.r.UpdateReader(r)
	}
	fh.offset = offset
	return nil
}
Ejemplo n.º 7
0
// ListDir reads the directory specified by the job into out, returning any more jobs
func (f *Fs) ListDir(out fs.ListOpts, job dircache.ListDirJob) (jobs []dircache.ListDirJob, err error) {
	fs.Debug(f, "Reading %q", job.Path)
	_, err = f.listAll(job.DirID, "", false, false, func(item *drive.File) bool {
		remote := job.Path + item.Title
		switch {
		case *driveAuthOwnerOnly && !isAuthOwned(item):
			// ignore object or directory
		case item.MimeType == driveFolderType:
			if out.IncludeDirectory(remote) {
				dir := &fs.Dir{
					Name:  remote,
					Bytes: -1,
					Count: -1,
				}
				dir.When, _ = time.Parse(timeFormatIn, item.ModifiedDate)
				if out.AddDir(dir) {
					return true
				}
				if job.Depth > 0 {
					jobs = append(jobs, dircache.ListDirJob{DirID: item.Id, Path: remote + "/", Depth: job.Depth - 1})
				}
			}
		case item.Md5Checksum != "":
			// If item has MD5 sum it is a file stored on drive
			o, err := f.newObjectWithInfo(remote, item)
			if err != nil {
				out.SetError(err)
				return true
			}
			if out.Add(o) {
				return true
			}
		case len(item.ExportLinks) != 0:
			// If item has export links then it is a google doc
			extension, link := f.findExportFormat(remote, item)
			if extension == "" {
				fs.Debug(remote, "No export formats found")
			} else {
				o, err := f.newObjectWithInfo(remote+"."+extension, item)
				if err != nil {
					out.SetError(err)
					return true
				}
				obj := o.(*Object)
				obj.isDocument = true
				obj.url = link
				obj.bytes = -1
				if out.Add(o) {
					return true
				}
			}
		default:
			fs.Debug(remote, "Ignoring unknown object")
		}
		return false
	})
	fs.Debug(f, "Finished reading %q", job.Path)
	return jobs, err
}
Ejemplo n.º 8
0
Archivo: b2.go Proyecto: ncw/rclone
// purge deletes all the files and directories
//
// if oldOnly is true then it deletes only non current files.
//
// Implemented here so we can make sure we delete old versions.
func (f *Fs) purge(oldOnly bool) error {
	var errReturn error
	var checkErrMutex sync.Mutex
	var checkErr = func(err error) {
		if err == nil {
			return
		}
		checkErrMutex.Lock()
		defer checkErrMutex.Unlock()
		if errReturn == nil {
			errReturn = err
		}
	}

	// Delete Config.Transfers in parallel
	toBeDeleted := make(chan *api.File, fs.Config.Transfers)
	var wg sync.WaitGroup
	wg.Add(fs.Config.Transfers)
	for i := 0; i < fs.Config.Transfers; i++ {
		go func() {
			defer wg.Done()
			for object := range toBeDeleted {
				fs.Stats.Checking(object.Name)
				checkErr(f.deleteByID(object.ID, object.Name))
				fs.Stats.DoneChecking(object.Name)
			}
		}()
	}
	last := ""
	checkErr(f.list("", fs.MaxLevel, "", 0, true, func(remote string, object *api.File, isDirectory bool) error {
		if !isDirectory {
			fs.Stats.Checking(remote)
			if oldOnly && last != remote {
				if object.Action == "hide" {
					fs.Debug(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
					toBeDeleted <- object
				} else {
					fs.Debug(remote, "Not deleting current version (id %q) %q", object.ID, object.Action)
				}
			} else {
				fs.Debug(remote, "Deleting (id %q)", object.ID)
				toBeDeleted <- object
			}
			last = remote
			fs.Stats.DoneChecking(remote)
		}
		return nil
	}))
	close(toBeDeleted)
	wg.Wait()

	if !oldOnly {
		checkErr(f.Rmdir(""))
	}
	return errReturn
}
Ejemplo n.º 9
0
// s3Connection makes a connection to s3
func s3Connection(name string) (*s3.S3, *session.Session, error) {
	// Make the auth
	accessKeyID := fs.ConfigFile.MustValue(name, "access_key_id")
	secretAccessKey := fs.ConfigFile.MustValue(name, "secret_access_key")
	var auth *credentials.Credentials
	switch {
	case accessKeyID == "" && secretAccessKey == "":
		fs.Debug(name, "Using anonymous access for S3")
		auth = credentials.AnonymousCredentials
	case accessKeyID == "":
		return nil, nil, errors.New("access_key_id not found")
	case secretAccessKey == "":
		return nil, nil, errors.New("secret_access_key not found")
	default:
		auth = credentials.NewStaticCredentials(accessKeyID, secretAccessKey, "")
	}

	endpoint := fs.ConfigFile.MustValue(name, "endpoint")
	region := fs.ConfigFile.MustValue(name, "region")
	if region == "" && endpoint == "" {
		endpoint = "https://s3.amazonaws.com/"
	}
	if region == "" {
		region = "us-east-1"
	}
	awsConfig := aws.NewConfig().
		WithRegion(region).
		WithMaxRetries(maxRetries).
		WithCredentials(auth).
		WithEndpoint(endpoint).
		WithHTTPClient(fs.Config.Client()).
		WithS3ForcePathStyle(true)
	// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
	ses := session.New()
	c := s3.New(ses, awsConfig)
	if region == "other-v2-signature" {
		fs.Debug(name, "Using v2 auth")
		signer := func(req *request.Request) {
			// Ignore AnonymousCredentials object
			if req.Config.Credentials == credentials.AnonymousCredentials {
				return
			}
			sign(accessKeyID, secretAccessKey, req.HTTPRequest)
		}
		c.Handlers.Sign.Clear()
		c.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
		c.Handlers.Sign.PushBack(signer)
	}
	// Add user agent
	c.Handlers.Build.PushBack(func(r *request.Request) {
		r.HTTPRequest.Header.Set("User-Agent", fs.UserAgent)
	})
	return c, ses, nil
}
Ejemplo n.º 10
0
// readMetaData gets the info if it hasn't already been fetched
func (o *FsObjectDropbox) readMetaData() (err error) {
	if o.md5sum != "" {
		return nil
	}

	// fs.Debug(o, "Reading metadata from datastore")
	record, err := o.dropbox.readRecord(o.metadataKey())
	if err != nil {
		fs.Debug(o, "Couldn't read metadata: %s", err)
		record = nil
	}

	if record != nil {
		// Read md5sum
		md5sumInterface, ok, err := record.Get(md5sumField)
		if err != nil {
			return err
		}
		if !ok {
			fs.Debug(o, "Couldn't find md5sum in record")
		} else {
			md5sum, ok := md5sumInterface.(string)
			if !ok {
				fs.Debug(o, "md5sum not a string")
			} else {
				o.md5sum = md5sum
			}
		}

		// read mtime
		mtimeInterface, ok, err := record.Get(mtimeField)
		if err != nil {
			return err
		}
		if !ok {
			fs.Debug(o, "Couldn't find mtime in record")
		} else {
			mtime, ok := mtimeInterface.(string)
			if !ok {
				fs.Debug(o, "mtime not a string")
			} else {
				modTime, err := time.Parse(timeFormatIn, mtime)
				if err != nil {
					return err
				}
				o.modTime = modTime
			}
		}
	}

	// Last resort
	return o.readEntryAndSetMetadata()
}
Ejemplo n.º 11
0
Archivo: dir.go Proyecto: ncw/rclone
// read the directory
func (d *Dir) readDir() error {
	d.mu.Lock()
	defer d.mu.Unlock()
	when := time.Now()
	if d.read.IsZero() {
		fs.Debug(d.path, "Reading directory")
	} else {
		age := when.Sub(d.read)
		if age < dirCacheTime {
			return nil
		}
		fs.Debug(d.path, "Re-reading directory (%v old)", age)
	}
	objs, dirs, err := fs.NewLister().SetLevel(1).Start(d.f, d.path).GetAll()
	if err == fs.ErrorDirNotFound {
		// We treat directory not found as empty because we
		// create directories on the fly
	} else if err != nil {
		return err
	}
	// NB when we re-read a directory after its cache has expired
	// we drop the old files which should lead to correct
	// behaviour but may not be very efficient.

	// Keep a note of the previous contents of the directory
	oldItems := d.items

	// Cache the items by name
	d.items = make(map[string]*DirEntry, len(objs)+len(dirs))
	for _, obj := range objs {
		name := path.Base(obj.Remote())
		d.items[name] = &DirEntry{
			o:    obj,
			node: nil,
		}
	}
	for _, dir := range dirs {
		name := path.Base(dir.Remote())
		// Use old dir value if it exists
		if oldItem, ok := oldItems[name]; ok {
			if _, ok := oldItem.o.(*fs.Dir); ok {
				d.items[name] = oldItem
				continue
			}
		}
		d.items[name] = &DirEntry{
			o:    dir,
			node: nil,
		}
	}
	d.read = when
	return nil
}
Ejemplo n.º 12
0
// Sets the modification time of the local fs object
func (o *FsObjectLocal) SetModTime(modTime time.Time) {
	err := os.Chtimes(o.path, modTime, modTime)
	if err != nil {
		fs.Debug(o, "Failed to set mtime on file: %s", err)
		return
	}
	// Re-read metadata
	err = o.lstat()
	if err != nil {
		fs.Debug(o, "Failed to stat: %s", err)
		return
	}
}
Ejemplo n.º 13
0
Archivo: dir.go Proyecto: ncw/rclone
// Lookup looks up a specific entry in the receiver.
//
// Lookup should return a Node corresponding to the entry.  If the
// name does not exist in the directory, Lookup should return ENOENT.
//
// Lookup need not to handle the names "." and "..".
func (d *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (node fusefs.Node, err error) {
	path := path.Join(d.path, req.Name)
	fs.Debug(path, "Dir.Lookup")
	item, err := d.lookupNode(req.Name)
	if err != nil {
		if err != fuse.ENOENT {
			fs.ErrorLog(path, "Dir.Lookup error: %v", err)
		}
		return nil, err
	}
	fs.Debug(path, "Dir.Lookup OK")
	return item.node, nil
}
Ejemplo n.º 14
0
// checkUpload checks to see if an error occurred after the file was
// completely uploaded.
//
// If it was then it waits for a while to see if the file really
// exists and is the right size and returns an updated info.
//
// If the file wasn't found or was the wrong size then it returns the
// original error.
//
// This is a workaround for Amazon sometimes returning
//
//  * 408 REQUEST_TIMEOUT
//  * 504 GATEWAY_TIMEOUT
//  * 500 Internal server error
//
// At the end of large uploads.  The speculation is that the timeout
// is waiting for the sha1 hashing to complete and the file may well
// be properly uploaded.
func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, inInfo *acd.File, inErr error, uploadTime time.Duration) (fixedError bool, info *acd.File, err error) {
	// Return if no error - all is well
	if inErr == nil {
		return false, inInfo, inErr
	}
	// If not one of the errors we can fix return
	// if resp == nil || resp.StatusCode != 408 && resp.StatusCode != 500 && resp.StatusCode != 504 {
	// 	return false, inInfo, inErr
	// }

	// The HTTP status
	httpStatus := "HTTP status UNKNOWN"
	if resp != nil {
		httpStatus = resp.Status
	}

	// check to see if we read to the end
	buf := make([]byte, 1)
	n, err := in.Read(buf)
	if !(n == 0 && err == io.EOF) {
		fs.Debug(src, "Upload error detected but didn't finish upload: %v (%q)", inErr, httpStatus)
		return false, inInfo, inErr
	}

	// Don't wait for uploads - assume they will appear later
	if *uploadWaitPerGB <= 0 {
		fs.Debug(src, "Upload error detected but waiting disabled: %v (%q)", inErr, httpStatus)
		return false, inInfo, inErr
	}

	// Time we should wait for the upload
	uploadWaitPerByte := float64(*uploadWaitPerGB) / 1024 / 1024 / 1024
	timeToWait := time.Duration(uploadWaitPerByte * float64(src.Size()))

	const sleepTime = 5 * time.Second                        // sleep between tries
	retries := int((timeToWait + sleepTime - 1) / sleepTime) // number of retries, rounded up

	fs.Debug(src, "Error detected after finished upload - waiting to see if object was uploaded correctly: %v (%q)", inErr, httpStatus)
	remote := src.Remote()
	for i := 1; i <= retries; i++ {
		o, err := f.NewObject(remote)
		if err == fs.ErrorObjectNotFound {
			fs.Debug(src, "Object not found - waiting (%d/%d)", i, retries)
		} else if err != nil {
			fs.Debug(src, "Object returned error - waiting (%d/%d): %v", i, retries, err)
		} else {
			if src.Size() == o.Size() {
				fs.Debug(src, "Object found with correct size %d after waiting (%d/%d) - %v - returning with no error", src.Size(), i, retries, sleepTime*time.Duration(i-1))
				info = &acd.File{
					Node: o.(*Object).info,
				}
				return true, info, nil
			}
			fs.Debug(src, "Object found but wrong size %d vs %d - waiting (%d/%d)", src.Size(), o.Size(), i, retries)
		}
		time.Sleep(sleepTime)
	}
	fs.Debug(src, "Giving up waiting for object - returning original error: %v (%q)", inErr, httpStatus)
	return false, inInfo, inErr
}
Ejemplo n.º 15
0
Archivo: dir.go Proyecto: ncw/rclone
// Create makes a new file
func (d *Dir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (fusefs.Node, fusefs.Handle, error) {
	path := path.Join(d.path, req.Name)
	fs.Debug(path, "Dir.Create")
	src := newCreateInfo(d.f, path)
	// This gets added to the directory when the file is written
	file := newFile(d, nil)
	fh, err := newWriteFileHandle(d, file, src)
	if err != nil {
		fs.ErrorLog(path, "Dir.Create error: %v", err)
		return nil, nil, err
	}
	fs.Debug(path, "Dir.Create OK")
	return file, fh, nil
}
Ejemplo n.º 16
0
// Mkdir creates a new directory
func (d *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fusefs.Node, error) {
	// We just pretend to have created the directory - rclone will
	// actually create the directory if we write files into it
	path := path.Join(d.path, req.Name)
	fs.Debug(path, "Dir.Mkdir")
	fsDir := &fs.Dir{
		Name: path,
		When: time.Now(),
	}
	dir := newDir(d.f, path)
	d.addObject(fsDir, dir)
	fs.Debug(path, "Dir.Mkdir OK")
	return dir, nil
}
Ejemplo n.º 17
0
// End a call to the drive API
//
// Refresh the pace given an error that was returned.  It returns a
// boolean as to whether the operation should be retried.
//
// See https://developers.google.com/drive/web/handle-errors
// http://stackoverflow.com/questions/18529524/403-rate-limit-after-only-1-insert-per-second
func (f *FsDrive) endCall(err error) bool {
	again := false
	oldSleepTime := f.sleepTime
	if err == nil {
		f.sleepTime = (f.sleepTime<<decayConstant - f.sleepTime) >> decayConstant
		if f.sleepTime < minSleep {
			f.sleepTime = minSleep
		}
		if f.sleepTime != oldSleepTime {
			fs.Debug(f, "Reducing sleep to %v", f.sleepTime)
		}
	} else {
		fs.Debug(f, "Error recived: %T %#v", err, err)
		// Check for net error Timeout()
		if x, ok := err.(interface {
			Timeout() bool
		}); ok && x.Timeout() {
			again = true
		}
		// Check for net error Temporary()
		if x, ok := err.(interface {
			Temporary() bool
		}); ok && x.Temporary() {
			again = true
		}
		switch gerr := err.(type) {
		case *googleapi.Error:
			if gerr.Code >= 500 && gerr.Code < 600 {
				// All 5xx errors should be retried
				again = true
			} else if len(gerr.Errors) > 0 {
				reason := gerr.Errors[0].Reason
				if reason == "rateLimitExceeded" || reason == "userRateLimitExceeded" {
					again = true
				}
			}
		}
	}
	if again {
		f.sleepTime *= 2
		if f.sleepTime > maxSleep {
			f.sleepTime = maxSleep
		}
		if f.sleepTime != oldSleepTime {
			fs.Debug(f, "Rate limited, increasing sleep to %v", f.sleepTime)
		}
	}
	return again
}
Ejemplo n.º 18
0
// uploadMultipart uploads a file using multipart upload
func (o *Object) uploadMultipart(in io.Reader, size int64) (err error) {
	if chunkSize%(320*1024) != 0 {
		return fmt.Errorf("Chunk size %d is not a multiple of 320k", chunkSize)
	}

	// Create upload session
	fs.Debug(o, "Starting multipart upload")
	session, err := o.createUploadSession()
	if err != nil {
		return err
	}
	uploadURL := session.UploadURL

	// Cancel the session if something went wrong
	defer func() {
		if err != nil {
			fs.Debug(o, "Cancelling multipart upload")
			cancelErr := o.cancelUploadSession(uploadURL)
			if cancelErr != nil {
				fs.Log(o, "Failed to cancel multipart upload: %v", err)
			}
		}
	}()

	// Upload the chunks
	remaining := size
	position := int64(0)
	buf := make([]byte, int64(chunkSize))
	for remaining > 0 {
		n := int64(chunkSize)
		if remaining < n {
			n = remaining
			buf = buf[:n]
		}
		_, err = io.ReadFull(in, buf)
		if err != nil {
			return err
		}
		fs.Debug(o, "Uploading segment %d/%d size %d", position, size, n)
		err = o.uploadFragment(uploadURL, position, size, buf)
		if err != nil {
			return err
		}
		remaining -= n
		position += n
	}

	return nil
}
Ejemplo n.º 19
0
// seek to a new offset
func (fh *ReadFileHandle) seek(offset int64) error {
	fs.Debug(fh.o, "ReadFileHandle.seek from %d to %d", fh.offset, offset)
	r, err := fh.o.Open(&fs.SeekOption{Offset: offset})
	if err != nil {
		fs.Debug(fh.o, "ReadFileHandle.Read seek failed: %v", err)
		return err
	}
	err = fh.r.Close()
	if err != nil {
		fs.Debug(fh.o, "ReadFileHandle.Read seek close old failed: %v", err)
	}
	fh.r = r
	fh.offset = offset
	return nil
}
Ejemplo n.º 20
0
// ListDir reads the directory specified by the job into out, returning any more jobs
func (f *Fs) ListDir(out fs.ListOpts, job dircache.ListDirJob) (jobs []dircache.ListDirJob, err error) {
	fs.Debug(f, "Reading %q", job.Path)
	maxTries := fs.Config.LowLevelRetries
	for tries := 1; tries <= maxTries; tries++ {
		_, err = f.listAll(job.DirID, "", false, false, func(node *acd.Node) bool {
			remote := job.Path + *node.Name
			switch *node.Kind {
			case folderKind:
				if out.IncludeDirectory(remote) {
					dir := &fs.Dir{
						Name:  remote,
						Bytes: -1,
						Count: -1,
					}
					dir.When, _ = time.Parse(timeFormat, *node.ModifiedDate) // FIXME
					if out.AddDir(dir) {
						return true
					}
					if job.Depth > 0 {
						jobs = append(jobs, dircache.ListDirJob{DirID: *node.Id, Path: remote + "/", Depth: job.Depth - 1})
					}
				}
			case fileKind:
				o, err := f.newObjectWithInfo(remote, node)
				if err != nil {
					out.SetError(err)
					return true
				}
				if out.Add(o) {
					return true
				}
			default:
				// ignore ASSET etc
			}
			return false
		})
		if fs.IsRetryError(err) {
			fs.Debug(f, "Directory listing error for %q: %v - low level retry %d/%d", job.Path, err, tries, maxTries)
			continue
		}
		if err != nil {
			return nil, err
		}
		break
	}
	fs.Debug(f, "Finished reading %q", job.Path)
	return jobs, err
}
Ejemplo n.º 21
0
// setMetaData sets the fs data from a storage.Object
func (o *FsObjectStorage) setMetaData(info *storage.Object) {
	o.url = info.MediaLink
	o.bytes = int64(info.Size)

	// Read md5sum
	md5sumData, err := base64.StdEncoding.DecodeString(info.Md5Hash)
	if err != nil {
		fs.Log(o, "Bad MD5 decode: %v", err)
	} else {
		o.md5sum = hex.EncodeToString(md5sumData)
	}

	// read mtime out of metadata if available
	mtimeString, ok := info.Metadata[metaMtime]
	if ok {
		modTime, err := time.Parse(timeFormatIn, mtimeString)
		if err == nil {
			o.modTime = modTime
			return
		} else {
			fs.Debug(o, "Failed to read mtime from metadata: %s", err)
		}
	}

	// Fallback to the Updated time
	modTime, err := time.Parse(timeFormatIn, info.Updated)
	if err != nil {
		fs.Log(o, "Bad time decode: %v", err)
	} else {
		o.modTime = modTime
	}
}
Ejemplo n.º 22
0
// initConfig is run by cobra after initialising the flags
func initConfig() {
	// Log file output
	if *logFile != "" {
		f, err := os.OpenFile(*logFile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0640)
		if err != nil {
			log.Fatalf("Failed to open log file: %v", err)
		}
		_, err = f.Seek(0, os.SEEK_END)
		if err != nil {
			fs.ErrorLog(nil, "Failed to seek log file to end: %v", err)
		}
		log.SetOutput(f)
		fs.DebugLogger.SetOutput(f)
		redirectStderr(f)
	}

	// Load the rest of the config now we have started the logger
	fs.LoadConfig()

	// Write the args for debug purposes
	fs.Debug("rclone", "Version %q starting with parameters %q", fs.Version, os.Args)

	// Setup CPU profiling if desired
	if *cpuProfile != "" {
		fs.Log(nil, "Creating CPU profile %q\n", *cpuProfile)
		f, err := os.Create(*cpuProfile)
		if err != nil {
			fs.Stats.Error()
			log.Fatal(err)
		}
		err = pprof.StartCPUProfile(f)
		if err != nil {
			fs.Stats.Error()
			log.Fatal(err)
		}
		defer pprof.StopCPUProfile()
	}

	// Setup memory profiling if desired
	if *memProfile != "" {
		defer func() {
			fs.Log(nil, "Saving Memory profile %q\n", *memProfile)
			f, err := os.Create(*memProfile)
			if err != nil {
				fs.Stats.Error()
				log.Fatal(err)
			}
			err = pprof.WriteHeapProfile(f)
			if err != nil {
				fs.Stats.Error()
				log.Fatal(err)
			}
			err = f.Close()
			if err != nil {
				fs.Stats.Error()
				log.Fatal(err)
			}
		}()
	}
}
Ejemplo n.º 23
0
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
	//  go test -v -run '^Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|FsMove)$'
	srcObj, ok := src.(*Object)
	if !ok {
		fs.Debug(src, "Can't move - not same remote type")
		return nil, fs.ErrorCantMove
	}

	// create the destination directory if necessary
	err := f.dirCache.FindRoot(true)
	if err != nil {
		return nil, err
	}
	srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(srcObj.remote, false)
	if err != nil {
		return nil, err
	}
	dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(remote, true)
	if err != nil {
		return nil, err
	}
	dstInfo, err := f.moveNode(srcObj.remote, dstLeaf, dstDirectoryID, srcObj.info, srcLeaf, srcDirectoryID, false)
	if err != nil {
		return nil, err
	}
	dstObj := &Object{
		fs:     f,
		remote: remote,
		info:   dstInfo,
	}
	return dstObj, nil
}
Ejemplo n.º 24
0
// readMetaData gets the info if it hasn't already been fetched
func (o *FsObjectDrive) readMetaData() (err error) {
	if o.id != "" {
		return nil
	}

	leaf, directoryID, err := o.drive.dirCache.FindPath(o.remote, false)
	if err != nil {
		return err
	}

	found, err := o.drive.listAll(directoryID, leaf, false, true, func(item *drive.File) bool {
		if item.Title == leaf {
			o.setMetaData(item)
			return true
		}
		return false
	})
	if err != nil {
		return err
	}
	if !found {
		fs.Debug(o, "Couldn't find object")
		return fmt.Errorf("Couldn't find object")
	}
	return nil
}
Ejemplo n.º 25
0
// FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
	//fs.Debug(f, "FindLeaf(%q, %q)", pathID, leaf)
	folder := acd.FolderFromId(pathID, f.c.Nodes)
	var resp *http.Response
	var subFolder *acd.Folder
	err = f.pacer.Call(func() (bool, error) {
		subFolder, resp, err = folder.GetFolder(leaf)
		return f.shouldRetry(resp, err)
	})
	if err != nil {
		if err == acd.ErrorNodeNotFound {
			//fs.Debug(f, "...Not found")
			return "", false, nil
		}
		//fs.Debug(f, "...Error %v", err)
		return "", false, err
	}
	if subFolder.Status != nil && *subFolder.Status != statusAvailable {
		fs.Debug(f, "Ignoring folder %q in state %q", leaf, *subFolder.Status)
		time.Sleep(1 * time.Second) // FIXME wait for problem to go away!
		return "", false, nil
	}
	//fs.Debug(f, "...Found(%q, %v)", *subFolder.Id, leaf)
	return *subFolder.Id, true, nil
}
Ejemplo n.º 26
0
// updateChunks updates the existing object using chunks to a separate
// container.  It returns a string which prefixes current segments.
func (o *Object) updateChunks(in io.Reader, headers swift.Headers, size int64) (string, error) {
	// Create the segmentsContainer if it doesn't exist
	err := o.fs.c.ContainerCreate(o.fs.segmentsContainer, nil)
	if err != nil {
		return "", err
	}
	// Upload the chunks
	left := size
	i := 0
	uniquePrefix := fmt.Sprintf("%s/%d", swift.TimeToFloatString(time.Now()), size)
	segmentsPath := fmt.Sprintf("%s%s/%s", o.fs.root, o.remote, uniquePrefix)
	for left > 0 {
		n := min(left, int64(chunkSize))
		headers["Content-Length"] = strconv.FormatInt(n, 10) // set Content-Length as we know it
		segmentReader := io.LimitReader(in, n)
		segmentPath := fmt.Sprintf("%s/%08d", segmentsPath, i)
		fs.Debug(o, "Uploading segment file %q into %q", segmentPath, o.fs.segmentsContainer)
		_, err := o.fs.c.ObjectPut(o.fs.segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
		if err != nil {
			return "", err
		}
		left -= n
		i++
	}
	// Upload the manifest
	headers["X-Object-Manifest"] = fmt.Sprintf("%s/%s", o.fs.segmentsContainer, segmentsPath)
	headers["Content-Length"] = "0" // set Content-Length as we know it
	emptyReader := bytes.NewReader(nil)
	manifestName := o.fs.root + o.remote
	_, err = o.fs.c.ObjectPut(o.fs.container, manifestName, emptyReader, true, "", "", headers)
	return uniquePrefix + "/", err
}
Ejemplo n.º 27
0
// getCredentials reads the OpenStack Credentials using the Hubic API
//
// The credentials are read into the Fs
func (f *Fs) getCredentials() (err error) {
	req, err := http.NewRequest("GET", "https://api.hubic.com/1.0/account/credentials", nil)
	if err != nil {
		return err
	}
	req.Header.Add("User-Agent", fs.UserAgent)
	resp, err := f.client.Do(req)
	if err != nil {
		return err
	}
	defer fs.CheckClose(resp.Body, &err)
	if resp.StatusCode < 200 || resp.StatusCode > 299 {
		return errors.Errorf("failed to get credentials: %s", resp.Status)
	}
	decoder := json.NewDecoder(resp.Body)
	var result credentials
	err = decoder.Decode(&result)
	if err != nil {
		return err
	}
	// fs.Debug(f, "Got credentials %+v", result)
	if result.Token == "" || result.Endpoint == "" || result.Expires == "" {
		return errors.New("couldn't read token, result and expired from credentials")
	}
	f.credentials = result
	expires, err := time.Parse(time.RFC3339, result.Expires)
	if err != nil {
		return err
	}
	f.expires = expires
	fs.Debug(f, "Got swift credentials (expiry %v in %v)", f.expires, f.expires.Sub(time.Now()))
	return nil
}
Ejemplo n.º 28
0
// Move src to this remote using server side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (dstFs *FsDrive) DirMove(src fs.Fs) error {
	srcFs, ok := src.(*FsDrive)
	if !ok {
		fs.Debug(srcFs, "Can't move directory - not same remote type")
		return fs.ErrorCantDirMove
	}

	// Check if destination exists
	dstFs.dirCache.ResetRoot()
	err := dstFs.dirCache.FindRoot(false)
	if err == nil {
		return fs.ErrorDirExists
	}

	// Find ID of parent
	leaf, directoryId, err := dstFs.dirCache.FindPath(dstFs.root, true)
	if err != nil {
		return err
	}

	// Do the move
	patch := drive.File{
		Title:   leaf,
		Parents: []*drive.ParentReference{{Id: directoryId}},
	}
	_, err = dstFs.svc.Files.Patch(srcFs.dirCache.RootID(), &patch).Do()
	if err != nil {
		return err
	}
	srcFs.dirCache.ResetRoot()
	return nil
}
Ejemplo n.º 29
0
// DirMove moves src directory to this remote using server side move
// operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(src fs.Fs) error {
	srcFs, ok := src.(*Fs)
	if !ok {
		fs.Debug(srcFs, "Can't move directory - not same remote type")
		return fs.ErrorCantDirMove
	}
	// Check if source exists
	sstat, err := os.Lstat(srcFs.root)
	if err != nil {
		return err
	}
	// And is a directory
	if !sstat.IsDir() {
		return fs.ErrorCantDirMove
	}

	// Check if destination exists
	_, err = os.Lstat(f.root)
	if !os.IsNotExist(err) {
		return fs.ErrorDirExists
	}

	// Do the move
	return os.Rename(srcFs.root, f.root)
}
Ejemplo n.º 30
0
Archivo: mount.go Proyecto: ncw/rclone
// Mount mounts the remote at mountpoint.
//
// If noModTime is set then it
func Mount(f fs.Fs, mountpoint string) error {
	if debugFUSE {
		fuse.Debug = func(msg interface{}) {
			fs.Debug("fuse", "%v", msg)
		}
	}

	// Set permissions
	dirPerms = 0777 &^ os.FileMode(umask)
	filePerms = 0666 &^ os.FileMode(umask)

	// Show stats if the user has specifically requested them
	if cmd.ShowStats() {
		stopStats := cmd.StartStats()
		defer close(stopStats)
	}

	// Mount it
	errChan, err := mount(f, mountpoint)
	if err != nil {
		return errors.Wrap(err, "failed to mount FUSE fs")
	}

	// Wait for umount
	err = <-errChan
	if err != nil {
		return errors.Wrap(err, "failed to umount FUSE fs")
	}

	return nil
}