// NewFs contstructs an Fs from the path, bucket:path func NewFs(name, root string) (fs.Fs, error) { bucket, directory, err := parsePath(root) if err != nil { return nil, err } f := &Fs{ name: name, bucket: bucket, root: directory, } account := fs.ConfigFile.MustValue(name, "account") if account == "" { return nil, errors.New("account not found") } key := fs.ConfigFile.MustValue(name, "key") if key == "" { return nil, errors.New("key not found") } endpoint := fs.ConfigFile.MustValue(name, "endpoint", defaultEndpoint) f.srv = rest.NewClient(fs.Config.Client()).SetRoot(endpoint + "/b2api/v1").SetErrorHandler(errorHandler) opts := rest.Opts{ Method: "GET", Path: "/b2_authorize_account", UserName: account, Password: key, } _, err = f.srv.CallJSON(&opts, nil, &f.info) if err != nil { return nil, fmt.Errorf("Failed to authenticate: %v", err) } f.srv.SetRoot(f.info.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken) if f.root != "" { f.root += "/" // Check to see if the (bucket,directory) is actually an existing file oldRoot := f.root remote := path.Base(directory) f.root = path.Dir(directory) if f.root == "." { f.root = "" } else { f.root += "/" } obj := f.NewFsObject(remote) if obj != nil { return fs.NewLimited(f, obj), nil } f.root = oldRoot } return f, nil }
// NewFs contstructs an Fs from the path, bucket:path func NewFs(name, root string) (fs.Fs, error) { if uploadCutoff < chunkSize { return nil, errors.Errorf("b2: upload cutoff must be less than chunk size %v - was %v", chunkSize, uploadCutoff) } if chunkSize < minChunkSize { return nil, errors.Errorf("b2: chunk size can't be less than %v - was %v", minChunkSize, chunkSize) } bucket, directory, err := parsePath(root) if err != nil { return nil, err } account := fs.ConfigFile.MustValue(name, "account") if account == "" { return nil, errors.New("account not found") } key := fs.ConfigFile.MustValue(name, "key") if key == "" { return nil, errors.New("key not found") } endpoint := fs.ConfigFile.MustValue(name, "endpoint", defaultEndpoint) f := &Fs{ name: name, bucket: bucket, root: directory, account: account, key: key, endpoint: endpoint, srv: rest.NewClient(fs.Config.Client()).SetErrorHandler(errorHandler), pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant), } err = f.authorizeAccount() if err != nil { return nil, errors.Wrap(err, "failed to authorize account") } if f.root != "" { f.root += "/" // Check to see if the (bucket,directory) is actually an existing file oldRoot := f.root remote := path.Base(directory) f.root = path.Dir(directory) if f.root == "." { f.root = "" } else { f.root += "/" } obj := f.NewFsObject(remote) if obj != nil { return fs.NewLimited(f, obj), nil } f.root = oldRoot } return f, nil }
// NewFs constructs an Fs from the path, container:path func NewFs(name, root string) (fs.Fs, error) { root = parsePath(root) oAuthClient, _, err := oauthutil.NewClient(name, oauthConfig) if err != nil { log.Fatalf("Failed to configure One Drive: %v", err) } f := &Fs{ name: name, root: root, srv: rest.NewClient(oAuthClient).SetRoot(rootURL), pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant), } f.features = (&fs.Features{CaseInsensitive: true, ReadMimeType: true}).Fill(f) f.srv.SetErrorHandler(errorHandler) // Get rootID rootInfo, _, err := f.readMetaDataForPath("") if err != nil || rootInfo.ID == "" { return nil, errors.Wrap(err, "failed to get root") } f.dirCache = dircache.New(root, rootInfo.ID, f) // Find the current root err = f.dirCache.FindRoot(false) if err != nil { // Assume it is a file newRoot, remote := dircache.SplitPath(root) newF := *f newF.dirCache = dircache.New(newRoot, rootInfo.ID, &newF) newF.root = newRoot // Make new Fs which is the parent err = newF.dirCache.FindRoot(false) if err != nil { // No root so return old f return f, nil } _, err := newF.newObjectWithInfo(remote, nil) if err != nil { if err == fs.ErrorObjectNotFound { // File doesn't exist so return old f return f, nil } return nil, err } // return an error with an fs which points to the parent return &newF, fs.ErrorIsFile } return f, nil }
// NewFs constructs an Fs from the path, container:path func NewFs(name, root string) (fs.Fs, error) { root = parsePath(root) oAuthClient, err := oauthutil.NewClient(name, oauthConfig) if err != nil { log.Fatalf("Failed to configure One Drive: %v", err) } f := &Fs{ name: name, root: root, srv: rest.NewClient(oAuthClient).SetRoot(rootURL), pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant), } f.srv.SetErrorHandler(errorHandler) // Get rootID rootInfo, _, err := f.readMetaDataForPath("") if err != nil || rootInfo.ID == "" { return nil, fmt.Errorf("Failed to get root: %v", err) } f.dirCache = dircache.New(root, rootInfo.ID, f) // Find the current root err = f.dirCache.FindRoot(false) if err != nil { // Assume it is a file newRoot, remote := dircache.SplitPath(root) newF := *f newF.dirCache = dircache.New(newRoot, rootInfo.ID, &newF) newF.root = newRoot // Make new Fs which is the parent err = newF.dirCache.FindRoot(false) if err != nil { // No root so return old f return f, nil } obj := newF.newObjectWithInfo(remote, nil) if obj == nil { // File doesn't exist so return old f return f, nil } // return a Fs Limited to this object return fs.NewLimited(&newF, obj), nil } return f, nil }
// NewFs contstructs an Fs from the path, bucket:path func NewFs(name, root string) (fs.Fs, error) { if uploadCutoff < chunkSize { return nil, errors.Errorf("b2: upload cutoff must be less than chunk size %v - was %v", chunkSize, uploadCutoff) } if chunkSize < minChunkSize { return nil, errors.Errorf("b2: chunk size can't be less than %v - was %v", minChunkSize, chunkSize) } bucket, directory, err := parsePath(root) if err != nil { return nil, err } account := fs.ConfigFileGet(name, "account") if account == "" { return nil, errors.New("account not found") } key := fs.ConfigFileGet(name, "key") if key == "" { return nil, errors.New("key not found") } endpoint := fs.ConfigFileGet(name, "endpoint", defaultEndpoint) f := &Fs{ name: name, bucket: bucket, root: directory, account: account, key: key, endpoint: endpoint, srv: rest.NewClient(fs.Config.Client()).SetErrorHandler(errorHandler), pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant), uploadTokens: make(chan struct{}, fs.Config.Transfers), extraTokens: make(chan struct{}, fs.Config.Transfers), } f.features = (&fs.Features{ReadMimeType: true, WriteMimeType: true}).Fill(f) // Set the test flag if required if *b2TestMode != "" { testMode := strings.TrimSpace(*b2TestMode) f.srv.SetHeader(testModeHeader, testMode) fs.Debug(f, "Setting test header \"%s: %s\"", testModeHeader, testMode) } // Fill up the upload and extra tokens for i := 0; i < fs.Config.Transfers; i++ { f.returnUploadToken() f.extraTokens <- struct{}{} } err = f.authorizeAccount() if err != nil { return nil, errors.Wrap(err, "failed to authorize account") } if f.root != "" { f.root += "/" // Check to see if the (bucket,directory) is actually an existing file oldRoot := f.root remote := path.Base(directory) f.root = path.Dir(directory) if f.root == "." { f.root = "" } else { f.root += "/" } _, err := f.NewObject(remote) if err != nil { if err == fs.ErrorObjectNotFound { // File doesn't exist so return old f f.root = oldRoot return f, nil } return nil, err } // return an error with an fs which points to the parent return f, fs.ErrorIsFile } return f, nil }