func (r *Remote) restoreMount(m *mount.Mount) (err error) { if r.mockedRestoreMount != nil { return r.mockedRestoreMount(m) } // The two New methods is to tweak how the log is displayed. log := logging.NewLogger("remote").New("restoreMount").New( "mountName", m.MountName, "syncMount", m.MountFolder.OneWaySyncMount, "prefetchAll", m.MountFolder.PrefetchAll, ) // Enable debug for the mount that was originally using debug. if m.MountFolder.Debug { log.SetLevel(logging.DEBUG) } // First get the plain machine, we don't care about it being dialed or valid as // we will potentially just be setting the status with it. remoteMachine, err := r.GetMachine(m.MountName) if err != nil { return err } // If the machine does not have an http tracker, create it so that we can // get accurate online/offline information. if !remoteMachine.HasHTTPTracker() { // No need to return here, this just means we won't get accurate information // about online/offline *before the machine is valid*. This is used mainly // in the defer, to mark the remounting machine as offline. // // Later, we'll get a valid and dialed machine, which is assured to have an // http tracker or fail trying. if err := remoteMachine.InitHTTPTracker(); err != nil { log.Error("Unable to init http tracker before remount. err:%s", err) } } // Update the status based on the return value. Note that it's possible to // return before this call, if we can't get the machine, but that's a non-issue // for updating the machine status, since we failed to get the machine, and // can't possible update the status. defer func() { if err != nil { // Update the user that we failed, and are retrying. switch { case !remoteMachine.IsOnline() && remoteMachine.HasHTTPTracker(): // The machine is offline / unreachable, so don't set the status to // remounting specifically. // // TODO: Check if we have internet here? remoteMachine.SetStatus(machine.MachineOffline, remountingButOffline) default: // Machine status is not offline/disconnected, therefor it may be // online and/or connected - but we failed to mount for another unknown // reason. Use a generic status. remoteMachine.SetStatus(machine.MachineRemounting, autoRemountingAgain) } } else { // If there's no errors, clear the status. remoteMachine.SetStatus(machine.MachineStatusUnknown, "") } }() // Now try to get a valid, dialed machine. We're doing this *after* the // machine's setstatus defer, so that we can set autoRemountingAgain as needed. // // Note that we're not getting the instance here, because if we cannot get a // dialed machine then remoteMachine will be set to nil, causing a panic // in the defer above. Regardless, it's the same instance, we don't need it. if _, err := r.GetDialedMachine(m.MountName); err != nil { return err } if remoteMachine.IsMountingLocked() { log.Warning("Restore mount was attempted but the machine is mount locked") return machine.ErrMachineActionIsLocked } // Lock and defer unlock the machine mount actions remoteMachine.LockMounting() defer remoteMachine.UnlockMounting() fsMountName, _ := mountcli.NewMountcli().FindMountNameByPath(m.LocalPath) if fsMountName != "" { failOnUnmount := true // Mount path exists, but the name doesn't match our mount name. // This occurs if the folder has been mounted by something else (ie, // the user), so to be safe we should not mount this folder. if fsMountName != m.MountName { log.Warning( "The path %q has a fs mountName of %q, but %q was expected.", m.LocalPath, fsMountName, m.MountName, ) failOnUnmount = false } log.Info("Automatically unmounting") m.Log = mount.MountLogger(m, log) // Mount path exists, and the names match. Unmount it, so that we // can remount it below. if err := m.Unmount(); err != nil { if failOnUnmount { log.Error("Failed to automatically unmount. err:%s", err) return err } else { log.Error( "Failed to automatically unmount, but ignoring unmount error. Continuing. err:%s", err, ) } } } // Mount path has been unmounted, or didn't exist locally. // Remount it, to improve UX. log.Info("Automatically mounting") // Construct our mounter mounter := &mount.Mounter{ Log: log, Options: m.MountFolder, Machine: remoteMachine, IP: remoteMachine.IP, KiteTracker: remoteMachine.KiteTracker, Transport: remoteMachine, PathUnmounter: fuseklient.Unmount, EventSub: r.eventSub, } if err := mounter.MountExisting(m); err != nil { return err } // remote.cache is normally responsible for creating the intervaler, but // because cache is not creating one here, we need to do it manually. if remoteMachine.Intervaler == nil { if !m.SyncIntervalOpts.IsZero() { rs := rsync.NewClient(log) // After the progress chan is done, start our SyncInterval startIntervalerIfNeeded(log, remoteMachine, rs, m.SyncIntervalOpts) // Assign the rsync intervaler to the mount. m.Intervaler = remoteMachine.Intervaler } else { log.Warning( "Unable to restore Interval for remote, SyncOpts is zero value. This likely means that SyncOpts were not saved or didn't exist in the previous binary. machineName:%s", remoteMachine.Name, ) } } return nil }
// CacheFolderHandler implements a prefetching / caching mechanism, currently // implemented func (r *Remote) CacheFolderHandler(kreq *kite.Request) (interface{}, error) { log := logging.NewLogger("remote").New("remote.cacheFolder") var params struct { req.Cache // klient uses vendored version of dnode with path rewrite that's not // compatible with other apps, hence we embed common fields into req.Cache // and specify dnode.Function by itself Progress dnode.Function `json:"progress"` } if kreq.Args == nil { return nil, errors.New("Required arguments were not passed.") } if err := kreq.Args.One().Unmarshal(¶ms); err != nil { err = fmt.Errorf( "remote.cacheFolder: Error '%s' while unmarshalling request '%s'\n", err, kreq.Args.One(), ) r.log.Error(err.Error()) return nil, err } if params.Debug { log.SetLevel(logging.DEBUG) } switch { case params.Name == "": return nil, errors.New("Missing required argument `name`.") case params.LocalPath == "": return nil, errors.New("Missing required argument `localPath`.") case params.Username == "": return nil, errors.New("Missing required argument `username`.") case params.SSHAuthSock == "": return nil, errors.New("Missing required argument `sshAuthSock`.") } log = log.New( "mountName", params.Name, "localPath", params.LocalPath, ) remoteMachine, err := r.GetDialedMachine(params.Name) if err != nil { log.Error("Error getting dialed, valid machine. err:%s", err) return nil, err } if params.RemotePath == "" { home, err := remoteMachine.HomeWithDefault() if err != nil { return nil, err } params.RemotePath = home } if !filepath.IsAbs(params.RemotePath) { home, err := remoteMachine.HomeWithDefault() if err != nil { return nil, err } params.RemotePath = path.Join(home, params.RemotePath) } if !params.LocalToRemote { exists, err := remoteMachine.DoesRemotePathExist(params.RemotePath) if err != nil { return nil, err } if !exists { return nil, mount.ErrRemotePathDoesNotExist } } var remoteSize int64 if params.LocalToRemote { remoteSize, err = getSizeOfLocalPath(params.LocalPath) if err != nil { return nil, err } } else { remoteSize, err = remoteMachine.GetFolderSize(params.RemotePath) if err != nil { return nil, err } else { log.Debug("Remote path %q is size: %d", params.RemotePath, remoteSize) } } // If there is an actively running intervaler, run the requested cache // *between* intervals. Locking to prevent any conflicts between the cache // implementation. runBetweenIntervals := remoteMachine.Intervaler != nil && params.Interval == 0 // If there is an interval already running, we may need to stop or pause it. replaceIntervaler := remoteMachine.Intervaler != nil && params.Interval != 0 if replaceIntervaler { log.Info("Unsubscribing from existing Sync Intervaler to replace it.") remoteMachine.Intervaler.Stop() } rs := rsync.NewClient(log) syncOpts := rsync.SyncIntervalOpts{ SyncOpts: rsync.SyncOpts{ Host: remoteMachine.IP, Username: params.Username, RemoteDir: params.RemotePath, LocalDir: params.LocalPath, SSHAuthSock: params.SSHAuthSock, SSHPrivateKeyPath: params.SSHPrivateKeyPath, DirSize: remoteSize, LocalToRemote: params.LocalToRemote, IgnoreFile: params.IgnoreFile, IncludePath: params.IncludePath, }, Interval: params.Interval, } if params.OnlyInterval { startIntervalerIfNeeded(log, remoteMachine, rs, syncOpts) return nil, nil } log.Info("Caching remote via RSync, with options:%#v", syncOpts) progCh := rs.Sync(syncOpts.SyncOpts) // If a valid callback is not provided, this method blocks until the data is done // transferring. if !params.Progress.IsValid() { log.Debug( "Progress callback is not valid. Running remote.cache in synchronous mode.", ) // If there is an existing Intervaler, lock it for the duration of this // synchronous method. if runBetweenIntervals { remoteMachine.Intervaler.Lock() defer remoteMachine.Intervaler.Unlock() } // For predictable behavior we log any errors, but do not immediately return on // them. If we return early, RSync may still be running - by blocking until the // channel is closed, we ensure that this method, in blocking form, only returns // after RSync is done. var err error for p := range progCh { if p.Error.Message != "" { log.Error( "Error encountered in blocking remote.cache. progress:%d, err:%s", p.Progress, p.Error.Message, ) err = errors.New(p.Error.Message) } } // After the progress chan is done, start our SyncInterval startIntervalerIfNeeded(r.log, remoteMachine, rs, syncOpts) return nil, err } go func() { log.Debug( "Progress callback is valid. Running remote.cache in asynchronous mode.", ) // If there is an existing Intervaler, lock it for the duration of this synchronous // method. if runBetweenIntervals { remoteMachine.Intervaler.Lock() defer remoteMachine.Intervaler.Unlock() } for p := range progCh { if p.Error.Message != "" { log.Error( "Error encountered in nonblocking remote.cache. progress:%d, err:%s", p.Progress, p.Error.Message, ) } params.Progress.Call(p) } // After the progress chan is done, start our SyncInterval startIntervalerIfNeeded(log, remoteMachine, rs, syncOpts) }() return nil, nil }