// Configuration helper - called after the user has put in the defaults func configHelper(name string) { // See if already have a token token := fs.ConfigFileGet(name, "token") if token != "" { fmt.Printf("Already have a dropbox token - refresh?\n") if !fs.Confirm() { return } } // Get a dropbox db, err := newDropbox(name) if err != nil { log.Fatalf("Failed to create dropbox client: %v", err) } // This method will ask the user to visit an URL and paste the generated code. if err := db.Auth(); err != nil { log.Fatalf("Failed to authorize: %v", err) } // Get the token token = db.AccessToken() // Stuff it in the config file if it has changed old := fs.ConfigFileGet(name, "token") if token != old { fs.ConfigFileSet(name, "token", token) fs.SaveConfig() } }
// NewFs contstructs an Fs from the path, container:path func NewFs(name, rpath string) (fs.Fs, error) { mode, err := NewNameEncryptionMode(fs.ConfigFileGet(name, "filename_encryption", "standard")) if err != nil { return nil, err } password := fs.ConfigFileGet(name, "password", "") if password == "" { return nil, errors.New("password not set in config file") } password, err = fs.Reveal(password) if err != nil { return nil, errors.Wrap(err, "failed to decrypt password") } salt := fs.ConfigFileGet(name, "password2", "") if salt != "" { salt, err = fs.Reveal(salt) if err != nil { return nil, errors.Wrap(err, "failed to decrypt password2") } } cipher, err := newCipher(mode, password, salt) if err != nil { return nil, errors.Wrap(err, "failed to make cipher") } remote := fs.ConfigFileGet(name, "remote") if strings.HasPrefix(remote, name+":") { return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting") } // Look for a file first remotePath := path.Join(remote, cipher.EncryptFileName(rpath)) wrappedFs, err := fs.NewFs(remotePath) // if that didn't produce a file, look for a directory if err != fs.ErrorIsFile { remotePath = path.Join(remote, cipher.EncryptDirName(rpath)) wrappedFs, err = fs.NewFs(remotePath) } if err != fs.ErrorIsFile && err != nil { return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remotePath) } f := &Fs{ Fs: wrappedFs, name: name, root: rpath, cipher: cipher, mode: mode, } // the features here are ones we could support, and they are // ANDed with the ones from wrappedFs f.features = (&fs.Features{ CaseInsensitive: mode == NameEncryptionOff, DuplicateFiles: true, ReadMimeType: false, // MimeTypes not supported with crypt WriteMimeType: false, }).Fill(f).Mask(wrappedFs) return f, err }
// NewFs constructs an Fs from the path, bucket:path func NewFs(name, root string) (fs.Fs, error) { bucket, directory, err := s3ParsePath(root) if err != nil { return nil, err } c, ses, err := s3Connection(name) if err != nil { return nil, err } f := &Fs{ name: name, c: c, bucket: bucket, ses: ses, acl: fs.ConfigFileGet(name, "acl"), root: directory, locationConstraint: fs.ConfigFileGet(name, "location_constraint"), sse: fs.ConfigFileGet(name, "server_side_encryption"), storageClass: fs.ConfigFileGet(name, "storage_class"), } f.features = (&fs.Features{ReadMimeType: true, WriteMimeType: true}).Fill(f) if *s3ACL != "" { f.acl = *s3ACL } if *s3StorageClass != "" { f.storageClass = *s3StorageClass } if f.root != "" { f.root += "/" // Check to see if the object exists req := s3.HeadObjectInput{ Bucket: &f.bucket, Key: &directory, } _, err = f.c.HeadObject(&req) if err == nil { f.root = path.Dir(directory) if f.root == "." { f.root = "" } else { f.root += "/" } // return an error with an fs which points to the parent return f, fs.ErrorIsFile } } // f.listMultipartUploads() return f, nil }
// Makes a new dropbox from the config func newDropbox(name string) (*dropbox.Dropbox, error) { db := dropbox.NewDropbox() appKey := fs.ConfigFileGet(name, "app_key") if appKey == "" { appKey = rcloneAppKey } appSecret := fs.ConfigFileGet(name, "app_secret") if appSecret == "" { appSecret = fs.MustReveal(rcloneEncryptedAppSecret) } err := db.SetAppInfo(appKey, appSecret) return db, err }
// overrideCredentials sets the ClientID and ClientSecret from the // config file if they are not blank. // If any value is overridden, true is returned. // the origConfig is copied func overrideCredentials(name string, origConfig *oauth2.Config) (config *oauth2.Config, changed bool) { config = new(oauth2.Config) *config = *origConfig changed = false ClientID := fs.ConfigFileGet(name, fs.ConfigClientID) if ClientID != "" { config.ClientID = ClientID changed = true } ClientSecret := fs.ConfigFileGet(name, fs.ConfigClientSecret) if ClientSecret != "" { config.ClientSecret = ClientSecret changed = true } return config, changed }
// NewFs constructs an Fs from the path func NewFs(name, root string) (fs.Fs, error) { var err error nounc := fs.ConfigFileGet(name, "nounc") f := &Fs{ name: name, warned: make(map[string]struct{}), nounc: nounc == "true", dev: devUnset, } f.root = f.cleanPath(root) f.features = (&fs.Features{CaseInsensitive: f.caseInsensitive()}).Fill(f) // Check to see if this points to a file fi, err := os.Lstat(f.root) if err == nil { f.dev = readDevice(fi) } if err == nil && fi.Mode().IsRegular() { // It is a file, so use the parent as the root f.root, _ = getDirFile(f.root) // return an error with an fs which points to the parent return f, fs.ErrorIsFile } return f, nil }
// getToken returns the token saved in the config file under // section name. func getToken(name string) (*oauth2.Token, error) { tokenString := fs.ConfigFileGet(name, fs.ConfigToken) if tokenString == "" { return nil, errors.New("empty token found - please run rclone config again") } token := new(oauth2.Token) err := json.Unmarshal([]byte(tokenString), token) if err != nil { return nil, err } // if has data then return it if token.AccessToken != "" && token.RefreshToken != "" { return token, nil } // otherwise try parsing as oldToken oldtoken := new(oldToken) err = json.Unmarshal([]byte(tokenString), oldtoken) if err != nil { return nil, err } // Fill in result into new token token.AccessToken = oldtoken.AccessToken token.RefreshToken = oldtoken.RefreshToken token.Expiry = oldtoken.Expiry // Save new format in config file err = putToken(name, token) if err != nil { return nil, err } return token, nil }
// swiftConnection makes a connection to swift func swiftConnection(name string) (*swift.Connection, error) { userName := fs.ConfigFileGet(name, "user") if userName == "" { return nil, errors.New("user not found") } apiKey := fs.ConfigFileGet(name, "key") if apiKey == "" { return nil, errors.New("key not found") } authURL := fs.ConfigFileGet(name, "auth") if authURL == "" { return nil, errors.New("auth not found") } c := &swift.Connection{ UserName: userName, ApiKey: apiKey, AuthUrl: authURL, AuthVersion: fs.ConfigFileGetInt(name, "auth_version", 0), Tenant: fs.ConfigFileGet(name, "tenant"), Region: fs.ConfigFileGet(name, "region"), Domain: fs.ConfigFileGet(name, "domain"), TenantDomain: fs.ConfigFileGet(name, "tenant_domain"), ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport Transport: fs.Config.Transport(), } err := c.Authenticate() if err != nil { return nil, err } return c, nil }
// read access token from ConfigFile string func getAccessToken(name string) (*oauth2.Token, error) { // Read the token from the config file tokenConfig := fs.ConfigFileGet(name, "token") //Get access token from config string decoder := json.NewDecoder(strings.NewReader(tokenConfig)) var result *oauth2.Token err := decoder.Decode(&result) if err != nil { return nil, err } return result, nil }
// putToken stores the token in the config file // // This saves the config file if it changes func putToken(name string, token *oauth2.Token) error { tokenBytes, err := json.Marshal(token) if err != nil { return err } tokenString := string(tokenBytes) old := fs.ConfigFileGet(name, fs.ConfigToken) if tokenString != old { err = fs.ConfigSetValueAndSave(name, fs.ConfigToken, tokenString) if err != nil { fs.ErrorLog(nil, "Failed to save new token in config file: %v", err) } else { fs.Debug(name, "Saved new token in config file") } } return nil }
// NewFs contstructs an Fs from the path, container:path func NewFs(name, root string) (fs.Fs, error) { if uploadChunkSize > maxUploadChunkSize { return nil, errors.Errorf("chunk size too big, must be < %v", maxUploadChunkSize) } db, err := newDropbox(name) if err != nil { return nil, err } f := &Fs{ name: name, db: db, } f.features = (&fs.Features{CaseInsensitive: true, ReadMimeType: true}).Fill(f) f.setRoot(root) // Read the token from the config file token := fs.ConfigFileGet(name, "token") // Set our custom context which enables our custom transport for timeouts etc db.SetContext(oauthutil.Context()) // Authorize the client db.SetAccessToken(token) // See if the root is actually an object entry, err := f.db.Metadata(f.slashRoot, false, false, "", "", metadataLimit) if err == nil && !entry.IsDir { newRoot := path.Dir(f.root) if newRoot == "." { newRoot = "" } f.setRoot(newRoot) // return an error with an fs which points to the parent return f, fs.ErrorIsFile } return f, nil }
// NewFsWithConnection contstructs an Fs from the path, container:path // and authenticated connection func NewFsWithConnection(name, root string, c *swift.Connection) (fs.Fs, error) { container, directory, err := parsePath(root) if err != nil { return nil, err } f := &Fs{ name: name, c: c, container: container, segmentsContainer: container + "_segments", root: directory, } f.features = (&fs.Features{ReadMimeType: true, WriteMimeType: true}).Fill(f) // StorageURL overloading storageURL := fs.ConfigFileGet(name, "storage_url") if storageURL != "" { f.c.StorageUrl = storageURL f.c.Auth = newAuth(f.c.Auth, storageURL) } if f.root != "" { f.root += "/" // Check to see if the object exists - ignoring directory markers info, _, err := f.c.Object(container, directory) if err == nil && info.ContentType != directoryMarkerContentType { f.root = path.Dir(directory) if f.root == "." { f.root = "" } else { f.root += "/" } // return an error with an fs which points to the parent return f, fs.ErrorIsFile } } return f, nil }
// NewFs contstructs an Fs from the path, bucket:path func NewFs(name, root string) (fs.Fs, error) { if uploadCutoff < chunkSize { return nil, errors.Errorf("b2: upload cutoff must be less than chunk size %v - was %v", chunkSize, uploadCutoff) } if chunkSize < minChunkSize { return nil, errors.Errorf("b2: chunk size can't be less than %v - was %v", minChunkSize, chunkSize) } bucket, directory, err := parsePath(root) if err != nil { return nil, err } account := fs.ConfigFileGet(name, "account") if account == "" { return nil, errors.New("account not found") } key := fs.ConfigFileGet(name, "key") if key == "" { return nil, errors.New("key not found") } endpoint := fs.ConfigFileGet(name, "endpoint", defaultEndpoint) f := &Fs{ name: name, bucket: bucket, root: directory, account: account, key: key, endpoint: endpoint, srv: rest.NewClient(fs.Config.Client()).SetErrorHandler(errorHandler), pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant), uploadTokens: make(chan struct{}, fs.Config.Transfers), extraTokens: make(chan struct{}, fs.Config.Transfers), } f.features = (&fs.Features{ReadMimeType: true, WriteMimeType: true}).Fill(f) // Set the test flag if required if *b2TestMode != "" { testMode := strings.TrimSpace(*b2TestMode) f.srv.SetHeader(testModeHeader, testMode) fs.Debug(f, "Setting test header \"%s: %s\"", testModeHeader, testMode) } // Fill up the upload and extra tokens for i := 0; i < fs.Config.Transfers; i++ { f.returnUploadToken() f.extraTokens <- struct{}{} } err = f.authorizeAccount() if err != nil { return nil, errors.Wrap(err, "failed to authorize account") } if f.root != "" { f.root += "/" // Check to see if the (bucket,directory) is actually an existing file oldRoot := f.root remote := path.Base(directory) f.root = path.Dir(directory) if f.root == "." { f.root = "" } else { f.root += "/" } _, err := f.NewObject(remote) if err != nil { if err == fs.ErrorObjectNotFound { // File doesn't exist so return old f f.root = oldRoot return f, nil } return nil, err } // return an error with an fs which points to the parent return f, fs.ErrorIsFile } return f, nil }
// Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "google cloud storage", Description: "Google Cloud Storage (this is not Google Drive)", NewFs: NewFs, Config: func(name string) { if fs.ConfigFileGet(name, "service_account_file") != "" { return } err := oauthutil.Config("google cloud storage", name, storageConfig) if err != nil { log.Fatalf("Failed to configure token: %v", err) } }, Options: []fs.Option{{ Name: fs.ConfigClientID, Help: "Google Application Client Id - leave blank normally.", }, { Name: fs.ConfigClientSecret, Help: "Google Application Client Secret - leave blank normally.", }, { Name: "project_number", Help: "Project number optional - needed only for list/create/delete buckets - see your developer console.", }, { Name: "service_account_file", Help: "Service Account Credentials JSON file path - needed only if you want use SA instead of interactive login.", }, { Name: "object_acl", Help: "Access Control List for new objects.", Examples: []fs.OptionExample{{ Value: "authenticatedRead", Help: "Object owner gets OWNER access, and all Authenticated Users get READER access.", }, { Value: "bucketOwnerFullControl", Help: "Object owner gets OWNER access, and project team owners get OWNER access.", }, { Value: "bucketOwnerRead", Help: "Object owner gets OWNER access, and project team owners get READER access.", }, { Value: "private", Help: "Object owner gets OWNER access [default if left blank].", }, { Value: "projectPrivate", Help: "Object owner gets OWNER access, and project team members get access according to their roles.", }, { Value: "publicRead", Help: "Object owner gets OWNER access, and all Users get READER access.", }}, }, { Name: "bucket_acl", Help: "Access Control List for new buckets.", Examples: []fs.OptionExample{{ Value: "authenticatedRead", Help: "Project team owners get OWNER access, and all Authenticated Users get READER access.", }, { Value: "private", Help: "Project team owners get OWNER access [default if left blank].", }, { Value: "projectPrivate", Help: "Project team members get access according to their roles.", }, { Value: "publicRead", Help: "Project team owners get OWNER access, and all Users get READER access.", }, { Value: "publicReadWrite", Help: "Project team owners get OWNER access, and all Users get WRITER access.", }}, }}, }) }
// NewFs contstructs an Fs from the path, bucket:path func NewFs(name, root string) (fs.Fs, error) { var oAuthClient *http.Client var err error serviceAccountPath := fs.ConfigFileGet(name, "service_account_file") if serviceAccountPath != "" { oAuthClient, err = getServiceAccountClient(serviceAccountPath) if err != nil { log.Fatalf("Failed configuring Google Cloud Storage Service Account: %v", err) } } else { oAuthClient, _, err = oauthutil.NewClient(name, storageConfig) if err != nil { log.Fatalf("Failed to configure Google Cloud Storage: %v", err) } } bucket, directory, err := parsePath(root) if err != nil { return nil, err } f := &Fs{ name: name, bucket: bucket, root: directory, projectNumber: fs.ConfigFileGet(name, "project_number"), objectACL: fs.ConfigFileGet(name, "object_acl"), bucketACL: fs.ConfigFileGet(name, "bucket_acl"), } f.features = (&fs.Features{ReadMimeType: true, WriteMimeType: true}).Fill(f) if f.objectACL == "" { f.objectACL = "private" } if f.bucketACL == "" { f.bucketACL = "private" } // Create a new authorized Drive client. f.client = oAuthClient f.svc, err = storage.New(f.client) if err != nil { return nil, errors.Wrap(err, "couldn't create Google Cloud Storage client") } if f.root != "" { f.root += "/" // Check to see if the object exists _, err = f.svc.Objects.Get(bucket, directory).Do() if err == nil { f.root = path.Dir(directory) if f.root == "." { f.root = "" } else { f.root += "/" } // return an error with an fs which points to the parent return f, fs.ErrorIsFile } } return f, nil }
// Config does the initial creation of the token // // It may run an internal webserver to receive the results func Config(id, name string, config *oauth2.Config) error { config, changed := overrideCredentials(name, config) automatic := fs.ConfigFileGet(name, fs.ConfigAutomatic) != "" // See if already have a token tokenString := fs.ConfigFileGet(name, "token") if tokenString != "" { fmt.Printf("Already have a token - refresh?\n") if !fs.Confirm() { return nil } } // Detect whether we should use internal web server useWebServer := false switch config.RedirectURL { case RedirectURL, RedirectPublicURL, RedirectLocalhostURL: useWebServer = true if automatic { break } fmt.Printf("Use auto config?\n") fmt.Printf(" * Say Y if not sure\n") fmt.Printf(" * Say N if you are working on a remote or headless machine\n") auto := fs.Confirm() if !auto { fmt.Printf("For this to work, you will need rclone available on a machine that has a web browser available.\n") fmt.Printf("Execute the following on your machine:\n") if changed { fmt.Printf("\trclone authorize %q %q %q\n", id, config.ClientID, config.ClientSecret) } else { fmt.Printf("\trclone authorize %q\n", id) } fmt.Println("Then paste the result below:") code := "" for code == "" { fmt.Printf("result> ") code = strings.TrimSpace(fs.ReadLine()) } token := &oauth2.Token{} err := json.Unmarshal([]byte(code), token) if err != nil { return err } return putToken(name, token) } case TitleBarRedirectURL: useWebServer = automatic if !automatic { fmt.Printf("Use auto config?\n") fmt.Printf(" * Say Y if not sure\n") fmt.Printf(" * Say N if you are working on a remote or headless machine or Y didn't work\n") useWebServer = fs.Confirm() } if useWebServer { // copy the config and set to use the internal webserver configCopy := *config config = &configCopy config.RedirectURL = RedirectURL } } // Make random state stateBytes := make([]byte, 16) _, err := rand.Read(stateBytes) if err != nil { return err } state := fmt.Sprintf("%x", stateBytes) authURL := config.AuthCodeURL(state) // Prepare webserver server := authServer{ state: state, bindAddress: bindAddress, authURL: authURL, } if useWebServer { server.code = make(chan string, 1) go server.Start() defer server.Stop() authURL = "http://" + bindAddress + "/auth" } // Generate a URL for the user to visit for authorization. _ = open.Start(authURL) fmt.Printf("If your browser doesn't open automatically go to the following link: %s\n", authURL) fmt.Printf("Log in and authorize rclone for access\n") var authCode string if useWebServer { // Read the code, and exchange it for a token. fmt.Printf("Waiting for code...\n") authCode = <-server.code if authCode != "" { fmt.Printf("Got code\n") } else { return errors.New("failed to get code") } } else { // Read the code, and exchange it for a token. fmt.Printf("Enter verification code> ") authCode = fs.ReadLine() } token, err := config.Exchange(oauth2.NoContext, authCode) if err != nil { return errors.Wrap(err, "failed to get token") } // Print code if we do automatic retrieval if automatic { result, err := json.Marshal(token) if err != nil { return errors.Wrap(err, "failed to marshal token") } fmt.Printf("Paste the following into your remote machine --->\n%s\n<---End paste", result) } return putToken(name, token) }
// s3Connection makes a connection to s3 func s3Connection(name string) (*s3.S3, *session.Session, error) { // Make the auth v := credentials.Value{ AccessKeyID: fs.ConfigFileGet(name, "access_key_id"), SecretAccessKey: fs.ConfigFileGet(name, "secret_access_key"), } // first provider to supply a credential set "wins" providers := []credentials.Provider{ // use static credentials if they're present (checked by provider) &credentials.StaticProvider{Value: v}, // * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY // * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY &credentials.EnvProvider{}, // Pick up IAM role in case we're on EC2 &ec2rolecreds.EC2RoleProvider{ Client: ec2metadata.New(session.New(), &aws.Config{ HTTPClient: &http.Client{Timeout: 1 * time.Second}, // low timeout to ec2 metadata service }), ExpiryWindow: 3, }, } cred := credentials.NewChainCredentials(providers) switch { case fs.ConfigFileGetBool(name, "env_auth", false): // No need for empty checks if "env_auth" is true case v.AccessKeyID == "" && v.SecretAccessKey == "": // if no access key/secret and iam is explicitly disabled then fall back to anon interaction cred = credentials.AnonymousCredentials case v.AccessKeyID == "": return nil, nil, errors.New("access_key_id not found") case v.SecretAccessKey == "": return nil, nil, errors.New("secret_access_key not found") } endpoint := fs.ConfigFileGet(name, "endpoint") region := fs.ConfigFileGet(name, "region") if region == "" && endpoint == "" { endpoint = "https://s3.amazonaws.com/" } if region == "" { region = "us-east-1" } awsConfig := aws.NewConfig(). WithRegion(region). WithMaxRetries(maxRetries). WithCredentials(cred). WithEndpoint(endpoint). WithHTTPClient(fs.Config.Client()). WithS3ForcePathStyle(true) // awsConfig.WithLogLevel(aws.LogDebugWithSigning) ses := session.New() c := s3.New(ses, awsConfig) if region == "other-v2-signature" { fs.Debug(name, "Using v2 auth") signer := func(req *request.Request) { // Ignore AnonymousCredentials object if req.Config.Credentials == credentials.AnonymousCredentials { return } sign(v.AccessKeyID, v.SecretAccessKey, req.HTTPRequest) } c.Handlers.Sign.Clear() c.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) c.Handlers.Sign.PushBack(signer) } return c, ses, nil }
} var commandDefintion = &cobra.Command{ Use: "listremotes", Short: `List all the remotes in the config file.`, Long: ` rclone listremotes lists all the available remotes from the config file. When uses with the -l flag it lists the types too. `, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(0, 0, command, args) remotes := fs.ConfigFileSections() sort.Strings(remotes) maxlen := 1 for _, remote := range remotes { if len(remote) > maxlen { maxlen = len(remote) } } for _, remote := range remotes { if listLong { remoteType := fs.ConfigFileGet(remote, "type", "UNKNOWN") fmt.Printf("%-*s %s\n", maxlen+1, remote+":", remoteType) } else { fmt.Printf("%s:\n", remote) } } }, }