// NewFsWithConnection contstructs an Fs from the path, container:path // and authenticated connection func NewFsWithConnection(name, root string, c *swift.Connection) (fs.Fs, error) { container, directory, err := parsePath(root) if err != nil { return nil, err } f := &Fs{ name: name, c: *c, container: container, segmentsContainer: container + "_segments", root: directory, } if f.root != "" { f.root += "/" // Check to see if the object exists - ignoring directory markers info, _, err := f.c.Object(container, directory) if err == nil && info.ContentType != directoryMarkerContentType { remote := path.Base(directory) f.root = path.Dir(directory) if f.root == "." { f.root = "" } else { f.root += "/" } obj := f.NewFsObject(remote) // return a Fs Limited to this object return fs.NewLimited(f, obj), nil } } return f, nil }
// NewFs contstructs an FsSwift from the path, container:path func NewFs(name, root string) (fs.Fs, error) { container, directory, err := parsePath(root) if err != nil { return nil, err } c, err := swiftConnection(name) if err != nil { return nil, err } f := &FsSwift{ name: name, c: *c, container: container, root: directory, } if f.root != "" { f.root += "/" // Check to see if the object exists _, _, err = f.c.Object(container, directory) if err == nil { remote := path.Base(directory) f.root = path.Dir(directory) if f.root == "." { f.root = "" } else { f.root += "/" } obj := f.NewFsObject(remote) // return a Fs Limited to this object return fs.NewLimited(f, obj), nil } } return f, nil }
// NewFs contstructs an FsDropbox from the path, container:path func NewFs(name, root string) (fs.Fs, error) { db := newDropbox(name) f := &FsDropbox{ db: db, } f.setRoot(root) // Read the token from the config file token := fs.ConfigFile.MustValue(name, "token") // Authorize the client db.SetAccessToken(token) // Make a db to store rclone metadata in f.datastoreManager = db.NewDatastoreManager() // Open the datastore in the background go f.openDataStore() // See if the root is actually an object entry, err := f.db.Metadata(f.slashRoot, false, false, "", "", metadataLimit) if err == nil && !entry.IsDir { remote := path.Base(f.root) newRoot := path.Dir(f.root) if newRoot == "." { newRoot = "" } f.setRoot(newRoot) obj := f.NewFsObject(remote) // return a Fs Limited to this object return fs.NewLimited(f, obj), nil } return f, nil }
// NewFsS3 contstructs an FsS3 from the path, bucket:path func NewFs(name, root string) (fs.Fs, error) { bucket, directory, err := s3ParsePath(root) if err != nil { return nil, err } c, err := s3Connection(name) if err != nil { return nil, err } f := &FsS3{ c: c, bucket: bucket, b: c.Bucket(bucket), perm: s3.Private, // FIXME need user to specify root: directory, } if f.root != "" { f.root += "/" // Check to see if the object exists _, err = f.b.Head(directory, nil) if err == nil { remote := path.Base(directory) f.root = path.Dir(directory) if f.root == "." { f.root = "" } else { f.root += "/" } obj := f.NewFsObject(remote) // return a Fs Limited to this object return fs.NewLimited(f, obj), nil } } return f, nil }
// NewFs contstructs an Fs from the path, bucket:path func NewFs(name, root string) (fs.Fs, error) { bucket, directory, err := parsePath(root) if err != nil { return nil, err } f := &Fs{ name: name, bucket: bucket, root: directory, } account := fs.ConfigFile.MustValue(name, "account") if account == "" { return nil, errors.New("account not found") } key := fs.ConfigFile.MustValue(name, "key") if key == "" { return nil, errors.New("key not found") } endpoint := fs.ConfigFile.MustValue(name, "endpoint", defaultEndpoint) f.srv = rest.NewClient(fs.Config.Client()).SetRoot(endpoint + "/b2api/v1").SetErrorHandler(errorHandler) opts := rest.Opts{ Method: "GET", Path: "/b2_authorize_account", UserName: account, Password: key, } _, err = f.srv.CallJSON(&opts, nil, &f.info) if err != nil { return nil, fmt.Errorf("Failed to authenticate: %v", err) } f.srv.SetRoot(f.info.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken) if f.root != "" { f.root += "/" // Check to see if the (bucket,directory) is actually an existing file oldRoot := f.root remote := path.Base(directory) f.root = path.Dir(directory) if f.root == "." { f.root = "" } else { f.root += "/" } obj := f.NewFsObject(remote) if obj != nil { return fs.NewLimited(f, obj), nil } f.root = oldRoot } return f, nil }
// NewFs contstructs an Fs from the path, bucket:path func NewFs(name, root string) (fs.Fs, error) { if uploadCutoff < chunkSize { return nil, errors.Errorf("b2: upload cutoff must be less than chunk size %v - was %v", chunkSize, uploadCutoff) } if chunkSize < minChunkSize { return nil, errors.Errorf("b2: chunk size can't be less than %v - was %v", minChunkSize, chunkSize) } bucket, directory, err := parsePath(root) if err != nil { return nil, err } account := fs.ConfigFile.MustValue(name, "account") if account == "" { return nil, errors.New("account not found") } key := fs.ConfigFile.MustValue(name, "key") if key == "" { return nil, errors.New("key not found") } endpoint := fs.ConfigFile.MustValue(name, "endpoint", defaultEndpoint) f := &Fs{ name: name, bucket: bucket, root: directory, account: account, key: key, endpoint: endpoint, srv: rest.NewClient(fs.Config.Client()).SetErrorHandler(errorHandler), pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant), } err = f.authorizeAccount() if err != nil { return nil, errors.Wrap(err, "failed to authorize account") } if f.root != "" { f.root += "/" // Check to see if the (bucket,directory) is actually an existing file oldRoot := f.root remote := path.Base(directory) f.root = path.Dir(directory) if f.root == "." { f.root = "" } else { f.root += "/" } obj := f.NewFsObject(remote) if obj != nil { return fs.NewLimited(f, obj), nil } f.root = oldRoot } return f, nil }
// NewFs contstructs an FsStorage from the path, bucket:path func NewFs(name, root string) (fs.Fs, error) { oAuthClient, err := oauthutil.NewClient(name, storageConfig) if err != nil { log.Fatalf("Failed to configure Google Cloud Storage: %v", err) } bucket, directory, err := parsePath(root) if err != nil { return nil, err } f := &FsStorage{ name: name, bucket: bucket, root: directory, projectNumber: fs.ConfigFile.MustValue(name, "project_number"), objectAcl: fs.ConfigFile.MustValue(name, "object_acl"), bucketAcl: fs.ConfigFile.MustValue(name, "bucket_acl"), } if f.objectAcl == "" { f.objectAcl = "private" } if f.bucketAcl == "" { f.bucketAcl = "private" } // Create a new authorized Drive client. f.client = oAuthClient f.svc, err = storage.New(f.client) if err != nil { return nil, fmt.Errorf("Couldn't create Google Cloud Storage client: %s", err) } if f.root != "" { f.root += "/" // Check to see if the object exists _, err = f.svc.Objects.Get(bucket, directory).Do() if err == nil { remote := path.Base(directory) f.root = path.Dir(directory) if f.root == "." { f.root = "" } else { f.root += "/" } obj := f.NewFsObject(remote) // return a Fs Limited to this object return fs.NewLimited(f, obj), nil } } return f, nil }
// NewFs constructs an Fs from the path, container:path func NewFs(name, root string) (fs.Fs, error) { root = parsePath(root) oAuthClient, err := oauthutil.NewClient(name, oauthConfig) if err != nil { log.Fatalf("Failed to configure One Drive: %v", err) } f := &Fs{ name: name, root: root, srv: rest.NewClient(oAuthClient).SetRoot(rootURL), pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant), } f.srv.SetErrorHandler(errorHandler) // Get rootID rootInfo, _, err := f.readMetaDataForPath("") if err != nil || rootInfo.ID == "" { return nil, fmt.Errorf("Failed to get root: %v", err) } f.dirCache = dircache.New(root, rootInfo.ID, f) // Find the current root err = f.dirCache.FindRoot(false) if err != nil { // Assume it is a file newRoot, remote := dircache.SplitPath(root) newF := *f newF.dirCache = dircache.New(newRoot, rootInfo.ID, &newF) newF.root = newRoot // Make new Fs which is the parent err = newF.dirCache.FindRoot(false) if err != nil { // No root so return old f return f, nil } obj := newF.newObjectWithInfo(remote, nil) if obj == nil { // File doesn't exist so return old f return f, nil } // return a Fs Limited to this object return fs.NewLimited(&newF, obj), nil } return f, nil }
// NewFs contstructs an FsLocal from the path func NewFs(name, root string) (fs.Fs, error) { root = filepath.ToSlash(path.Clean(root)) f := &FsLocal{ root: root, warned: make(map[string]struct{}), } // Check to see if this points to a file fi, err := os.Lstat(f.root) if err == nil && fi.Mode().IsRegular() { // It is a file, so use the parent as the root remote := path.Base(root) f.root = path.Dir(root) obj := f.NewFsObject(remote) // return a Fs Limited to this object return fs.NewLimited(f, obj), nil } return f, nil }
// NewFs contstructs an Fs from the path, bucket:path func NewFs(name, root string) (fs.Fs, error) { bucket, directory, err := s3ParsePath(root) if err != nil { return nil, err } c, ses, err := s3Connection(name) if err != nil { return nil, err } f := &Fs{ name: name, c: c, bucket: bucket, ses: ses, // FIXME perm: s3.Private, // FIXME need user to specify root: directory, locationConstraint: fs.ConfigFile.MustValue(name, "location_constraint"), sse: fs.ConfigFile.MustValue(name, "server_side_encryption"), } if f.root != "" { f.root += "/" // Check to see if the object exists req := s3.HeadObjectInput{ Bucket: &f.bucket, Key: &directory, } _, err = f.c.HeadObject(&req) if err == nil { remote := path.Base(directory) f.root = path.Dir(directory) if f.root == "." { f.root = "" } else { f.root += "/" } obj := f.NewFsObject(remote) // return a Fs Limited to this object return fs.NewLimited(f, obj), nil } } // f.listMultipartUploads() return f, nil }
// NewFs constructs an FsLocal from the path func NewFs(name, root string) (fs.Fs, error) { var err error f := &FsLocal{ name: name, warned: make(map[string]struct{}), } f.root = filterPath(f.cleanUtf8(root)) // Check to see if this points to a file fi, err := os.Lstat(f.root) if err == nil && fi.Mode().IsRegular() { // It is a file, so use the parent as the root var remote string f.root, remote = getDirFile(f.root) obj := f.NewFsObject(remote) // return a Fs Limited to this object return fs.NewLimited(f, obj), nil } return f, nil }
// NewFs contstructs an Fs from the path, container:path func NewFs(name, root string) (fs.Fs, error) { if uploadChunkSize > maxUploadChunkSize { return nil, fmt.Errorf("Chunk size too big, must be < %v", maxUploadChunkSize) } db, err := newDropbox(name) if err != nil { return nil, err } f := &Fs{ name: name, db: db, } f.setRoot(root) // Read the token from the config file token := fs.ConfigFile.MustValue(name, "token") // Set our custom context which enables our custom transport for timeouts etc db.SetContext(oauthutil.Context()) // Authorize the client db.SetAccessToken(token) // See if the root is actually an object entry, err := f.db.Metadata(f.slashRoot, false, false, "", "", metadataLimit) if err == nil && !entry.IsDir { remote := path.Base(f.root) newRoot := path.Dir(f.root) if newRoot == "." { newRoot = "" } f.setRoot(newRoot) obj := f.NewFsObject(remote) // return a Fs Limited to this object return fs.NewLimited(f, obj), nil } return f, nil }
// NewFsWithConnection contstructs an Fs from the path, container:path // and authenticated connection func NewFsWithConnection(name, root string, c *swift.Connection) (fs.Fs, error) { container, directory, err := parsePath(root) if err != nil { return nil, err } f := &Fs{ name: name, c: c, container: container, segmentsContainer: container + "_segments", root: directory, } // StorageURL overloading storageURL := fs.ConfigFile.MustValue(name, "storage_url") if storageURL != "" { f.c.StorageUrl = storageURL f.c.Auth = newAuth(f.c.Auth, storageURL) } if f.root != "" { f.root += "/" // Check to see if the object exists - ignoring directory markers info, _, err := f.c.Object(container, directory) if err == nil && info.ContentType != directoryMarkerContentType { remote := path.Base(directory) f.root = path.Dir(directory) if f.root == "." { f.root = "" } else { f.root += "/" } obj := f.NewFsObject(remote) // return a Fs Limited to this object return fs.NewLimited(f, obj), nil } } return f, nil }
// NewFs constructs an Fs from the path, container:path func NewFs(name, root string) (fs.Fs, error) { //read access token from config token, err := getAccessToken(name) if err != nil { return nil, err } //create new client yandexDisk := yandex.NewClient(token.AccessToken, fs.Config.Client()) f := &Fs{ yd: yandexDisk, } f.setRoot(root) //limited fs // Check to see if the object exists and is a file //request object meta info var opt2 yandex.ResourceInfoRequestOptions if ResourceInfoResponse, err := yandexDisk.NewResourceInfoRequest(root, opt2).Exec(); err != nil { //return err } else { if ResourceInfoResponse.ResourceType == "file" { //limited fs remote := path.Base(root) f.setRoot(path.Dir(root)) obj := f.newFsObjectWithInfo(remote, ResourceInfoResponse) // return a Fs Limited to this object return fs.NewLimited(f, obj), nil } } return f, nil }
// NewFs contstructs an FsDrive from the path, container:path func NewFs(name, path string) (fs.Fs, error) { if !isPowerOfTwo(int64(chunkSize)) { return nil, fmt.Errorf("drive: chunk size %v isn't a power of two", chunkSize) } if chunkSize < 256*1024 { return nil, fmt.Errorf("drive: chunk size can't be less than 256k - was %v", chunkSize) } t, err := driveAuth.NewTransport(name) if err != nil { return nil, err } root, err := parseDrivePath(path) if err != nil { return nil, err } f := &FsDrive{ root: root, dirCache: newDirCache(), pacer: make(chan struct{}, 1), sleepTime: minSleep, } // Put the first pacing token in f.pacer <- struct{}{} // Create a new authorized Drive client. f.client = t.Client() f.svc, err = drive.New(f.client) if err != nil { return nil, fmt.Errorf("Couldn't create Drive client: %s", err) } // Read About so we know the root path f.call(&err, func() { f.about, err = f.svc.About.Get().Do() }) if err != nil { return nil, fmt.Errorf("Couldn't read info about Drive: %s", err) } // Find the Id of the true root and clear everything f.resetRoot() // Find the current root err = f.findRoot(false) if err != nil { // Assume it is a file newRoot, remote := splitPath(root) newF := *f newF.root = newRoot // Make new Fs which is the parent err = newF.findRoot(false) if err != nil { // No root so return old f return f, nil } obj, err := newF.newFsObjectWithInfoErr(remote, nil) if err != nil { // File doesn't exist so return old f return f, nil } // return a Fs Limited to this object return fs.NewLimited(&newF, obj), nil } // fmt.Printf("Root id %s", f.rootId) return f, nil }
// NewFs contstructs an FsDrive from the path, container:path func NewFs(name, path string) (fs.Fs, error) { if !isPowerOfTwo(int64(chunkSize)) { return nil, fmt.Errorf("drive: chunk size %v isn't a power of two", chunkSize) } if chunkSize < 256*1024 { return nil, fmt.Errorf("drive: chunk size can't be less than 256k - was %v", chunkSize) } oAuthClient, err := oauthutil.NewClient(name, driveConfig) if err != nil { log.Fatalf("Failed to configure drive: %v", err) } root, err := parseDrivePath(path) if err != nil { return nil, err } f := &FsDrive{ name: name, root: root, pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant), } // Create a new authorized Drive client. f.client = oAuthClient f.svc, err = drive.New(f.client) if err != nil { return nil, fmt.Errorf("Couldn't create Drive client: %s", err) } // Read About so we know the root path err = f.pacer.Call(func() (bool, error) { f.about, err = f.svc.About.Get().Do() return shouldRetry(err) }) if err != nil { return nil, fmt.Errorf("Couldn't read info about Drive: %s", err) } f.dirCache = dircache.New(root, f.about.RootFolderId, f) // Find the current root err = f.dirCache.FindRoot(false) if err != nil { // Assume it is a file newRoot, remote := dircache.SplitPath(root) newF := *f newF.dirCache = dircache.New(newRoot, f.about.RootFolderId, &newF) newF.root = newRoot // Make new Fs which is the parent err = newF.dirCache.FindRoot(false) if err != nil { // No root so return old f return f, nil } obj, err := newF.newFsObjectWithInfoErr(remote, nil) if err != nil { // File doesn't exist so return old f return f, nil } // return a Fs Limited to this object return fs.NewLimited(&newF, obj), nil } // fmt.Printf("Root id %s", f.dirCache.RootID()) return f, nil }
// NewFs constructs an Fs from the path, container:path func NewFs(name, root string) (fs.Fs, error) { root = parsePath(root) oAuthClient, err := oauthutil.NewClient(name, acdConfig) if err != nil { log.Fatalf("Failed to configure amazon cloud drive: %v", err) } c := acd.NewClient(oAuthClient) c.UserAgent = fs.UserAgent f := &Fs{ name: name, root: root, c: c, pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.AmazonCloudDrivePacer), noAuthClient: fs.Config.Client(), } // Update endpoints var resp *http.Response err = f.pacer.Call(func() (bool, error) { _, resp, err = f.c.Account.GetEndpoints() return shouldRetry(resp, err) }) if err != nil { return nil, fmt.Errorf("Failed to get endpoints: %v", err) } // Get rootID var rootInfo *acd.Folder err = f.pacer.Call(func() (bool, error) { rootInfo, resp, err = f.c.Nodes.GetRoot() return shouldRetry(resp, err) }) if err != nil || rootInfo.Id == nil { return nil, fmt.Errorf("Failed to get root: %v", err) } f.dirCache = dircache.New(root, *rootInfo.Id, f) // Find the current root err = f.dirCache.FindRoot(false) if err != nil { // Assume it is a file newRoot, remote := dircache.SplitPath(root) newF := *f newF.dirCache = dircache.New(newRoot, *rootInfo.Id, &newF) newF.root = newRoot // Make new Fs which is the parent err = newF.dirCache.FindRoot(false) if err != nil { // No root so return old f return f, nil } obj := newF.newFsObjectWithInfo(remote, nil) if obj == nil { // File doesn't exist so return old f return f, nil } // return a Fs Limited to this object return fs.NewLimited(&newF, obj), nil } return f, nil }