Esempio n. 1
0
func (r *RunCommand) runOnRemote(localPath string, cmdWithArgsStr string) (*ExecRes, error) {
	machine, err := mountcli.NewMountcli().FindMountNameByPath(localPath)
	if err != nil {
		return nil, err
	}

	fullCmdPath, err := r.getCmdRemotePath(machine, localPath)
	if err != nil {
		return nil, err
	}

	// track metrics
	metrics.TrackRun(machine, config.VersionNum())

	return r.runOnMachine(machine, fullCmdPath, cmdWithArgsStr)
}
Esempio n. 2
0
func (m *MetricClient) StartMountStatusTicker(mount string) (err error) {
	var (
		i        int
		failures int
	)

	// stop previous tickers if any
	m.StopMountStatusTicker(mount)

	// start new ticker and save for future use
	ticker := time.NewTicker(m.Interval)
	m.tickers[mount] = ticker

	path, err := mountcli.NewMountcli().FindMountedPathByName(mount)
	if err != nil {
		TrackMountCheckFailure(mount, err.Error())
		return err
	}

	for range ticker.C {
		ms := NewDefaultMountStatus(path)

		// alterate between reading & writing; this is req. since kernel
		// catches file on write, if we read right away, it'll return contents
		// from kernel cache and not from mount like we want it to
		if i%2 == 0 {
			err = ms.Write()
		} else {
			err = ms.CheckContents()
		}

		// we only care about failures and not success
		if err != nil {
			TrackMountCheckFailure(mount, err.Error())
			failures += 1
		}

		// if it errors more than limit, return from ticker
		if failures > m.LimitFailures {
			return nil
		}

		i += 1
	}

	return nil
}
Esempio n. 3
0
// getCmdRemotePath return the path on remote machine where the command should
// be run.
func (r *RunCommand) getCmdRemotePath(machine, localPath string) (string, error) {
	relativePath, err := mountcli.NewMountcli().FindRelativeMountPath(localPath)
	if err != nil {
		return "", err
	}

	mounts, err := r.getMounts()
	if err != nil {
		return "", err
	}

	for _, m := range mounts {
		if m.MountName == machine {
			// join path in remote machine
			return filepath.Join(m.RemotePath, relativePath), nil
		}
	}

	return "", mountcli.ErrNotInMount
}
Esempio n. 4
0
// initDefaultRepairers creates the repairers for this Command if the
// Command.Repairers field is *nil*. This allows a caller can specify their own
// repairers if desired.
func (c *Command) initDefaultRepairers() error {
	if c.Repairers != nil {
		return nil
	}

	// TODO: Re-enable. Currently disabled because we're manually running it
	// before the checkMachineExist() call inside of Run().
	//
	//// The kontrol repairer will check if we're connected to kontrol yet, and
	//// attempt to wait for it. Eventually restarting if needed.
	//kontrolRepair := &KontrolRepair{
	//	Log:    c.Log.New("KontrolRepair"),
	//	Stdout: c.Stdout,
	//	Klient: c.Klient,
	//	RetryOptions: RetryOptions{
	//		StatusRetries: 3,
	//		StatusDelay:   10 * time.Second,
	//	},
	//	Exec: &exec.CommandRun{
	//		Stdin:  c.Stdin,
	//		Stdout: c.Stdout,
	//	},
	//}

	// The kite unreachable repairer ensures that the remote machine is on, and
	// kite is reachable. No repair action is possible.
	kiteUnreachableRepair := &KiteUnreachableRepair{
		Log:           c.Log.New("KiteUnreachableRepair"),
		Stdout:        c.Stdout,
		Klient:        c.Klient,
		StatusRetries: 10,
		StatusDelay:   1 * time.Second,
		MachineName:   c.Options.MountName,
	}

	// The token expired repair checks for token expired. This should be placed *before*
	// TokenNotYetValidRepair, so that after we restart, we can check if the token
	// is valid.
	tokenExpired := &TokenExpiredRepair{
		Log:                c.Log.New("TokenExpiredRepair"),
		Stdout:             c.Stdout,
		Klient:             c.Klient,
		RepairWaitForToken: 5 * time.Second,
		MachineName:        c.Options.MountName,
	}

	// The token not yet valid repairer will check if we're failing from the token
	// not yet valid error, and wait for it to become valid.
	tokenNotValidYetRepair := &TokenNotYetValidRepair{
		Log:           c.Log.New("TokenNotYetValidRepair"),
		Stdout:        c.Stdout,
		Klient:        c.Klient,
		RepairRetries: 5,
		RepairDelay:   3 * time.Second,
		MachineName:   c.Options.MountName,
	}

	mountExistsRepair := &MountExistsRepair{
		Log:       c.Log.New("MountExistsRepair"),
		Stdout:    util.NewFprint(c.Stdout),
		MountName: c.Options.MountName,
		Klient:    c.Klient,
		Mountcli:  mountcli.NewMountcli(),
	}

	permDeniedRepair := &PermDeniedRepair{
		Log:       c.Log.New("PermDeniedRepair"),
		Stdout:    util.NewFprint(c.Stdout),
		MountName: c.Options.MountName,
		Klient:    c.Klient,
	}

	mountEmptyRepair := &MountEmptyRepair{
		Log:       c.Log.New("MountEmptyRepair"),
		Stdout:    util.NewFprint(c.Stdout),
		MountName: c.Options.MountName,
		Klient:    c.Klient,
	}

	deviceNotConfiguredRepair := &DeviceNotConfiguredRepair{
		Log:       c.Log.New("DeviceNotConfiguredRepair"),
		Stdout:    util.NewFprint(c.Stdout),
		MountName: c.Options.MountName,
		Klient:    c.Klient,
	}

	writeReadRepair := &WriteReadRepair{
		Log:       c.Log.New("WriteReadRepair"),
		Stdout:    util.NewFprint(c.Stdout),
		MountName: c.Options.MountName,
		Klient:    c.Klient,
	}

	// A collection of Repairers responsible for actually repairing a given mount.
	// Executed in the order they are defined, the effectiveness of the Repairers
	// may depend on the order they are run in. An example being TokenNotValidYetRepair
	// likely should be run *after* a restart, as Tokens not being valid yet usually
	// happens after a restart.
	c.Repairers = []Repairer{
		//kontrolRepair,
		kiteUnreachableRepair,
		tokenExpired,
		tokenNotValidYetRepair,
		mountExistsRepair,
		permDeniedRepair,
		mountEmptyRepair,
		deviceNotConfiguredRepair,
		writeReadRepair,
	}

	return nil
}
Esempio n. 5
0
func (r *Remote) restoreMount(m *mount.Mount) (err error) {
	if r.mockedRestoreMount != nil {
		return r.mockedRestoreMount(m)
	}

	// The two New methods is to tweak how the log is displayed.
	log := logging.NewLogger("remote").New("restoreMount").New(
		"mountName", m.MountName,
		"syncMount", m.MountFolder.OneWaySyncMount,
		"prefetchAll", m.MountFolder.PrefetchAll,
	)

	// Enable debug for the mount that was originally using debug.
	if m.MountFolder.Debug {
		log.SetLevel(logging.DEBUG)
	}

	// First get the plain machine, we don't care about it being dialed or valid as
	// we will potentially just be setting the status with it.
	remoteMachine, err := r.GetMachine(m.MountName)
	if err != nil {
		return err
	}

	// If the machine does not have an http tracker, create it so that we can
	// get accurate online/offline information.
	if !remoteMachine.HasHTTPTracker() {
		// No need to return here, this just means we won't get accurate information
		// about online/offline *before the machine is valid*. This is used mainly
		// in the defer, to mark the remounting machine as offline.
		//
		// Later, we'll get a valid and dialed machine, which is assured to have an
		// http tracker or fail trying.
		if err := remoteMachine.InitHTTPTracker(); err != nil {
			log.Error("Unable to init http tracker before remount. err:%s", err)
		}
	}

	// Update the status based on the return value. Note that it's possible to
	// return before this call, if we can't get the machine, but that's a non-issue
	// for updating the machine status, since we failed to get the machine, and
	// can't possible update the status.
	defer func() {
		if err != nil {
			// Update the user that we failed, and are retrying.
			switch {
			case !remoteMachine.IsOnline() && remoteMachine.HasHTTPTracker():
				// The machine is offline / unreachable, so don't set the status to
				// remounting specifically.
				//
				// TODO: Check if we have internet here?
				remoteMachine.SetStatus(machine.MachineOffline, remountingButOffline)
			default:
				// Machine status is not offline/disconnected, therefor it may be
				// online and/or connected - but we failed to mount for another unknown
				// reason. Use a generic status.
				remoteMachine.SetStatus(machine.MachineRemounting, autoRemountingAgain)
			}
		} else {
			// If there's no errors, clear the status.
			remoteMachine.SetStatus(machine.MachineStatusUnknown, "")
		}
	}()

	// Now try to get a valid, dialed machine. We're doing this *after* the
	// machine's setstatus defer, so that we can set autoRemountingAgain as needed.
	//
	// Note that we're not getting the instance here, because if we cannot get a
	// dialed machine then remoteMachine will be set to nil, causing a panic
	// in the defer above. Regardless, it's the same instance, we don't need it.
	if _, err := r.GetDialedMachine(m.MountName); err != nil {
		return err
	}

	if remoteMachine.IsMountingLocked() {
		log.Warning("Restore mount was attempted but the machine is mount locked")
		return machine.ErrMachineActionIsLocked
	}

	// Lock and defer unlock the machine mount actions
	remoteMachine.LockMounting()
	defer remoteMachine.UnlockMounting()

	fsMountName, _ := mountcli.NewMountcli().FindMountNameByPath(m.LocalPath)
	if fsMountName != "" {
		failOnUnmount := true

		// Mount path exists, but the name doesn't match our mount name.
		// This occurs if the folder has been mounted by something else (ie,
		// the user), so to be safe we should not mount this folder.
		if fsMountName != m.MountName {
			log.Warning(
				"The path %q has a fs mountName of %q, but %q was expected.",
				m.LocalPath, fsMountName, m.MountName,
			)

			failOnUnmount = false
		}

		log.Info("Automatically unmounting")

		m.Log = mount.MountLogger(m, log)

		// Mount path exists, and the names match. Unmount it, so that we
		// can remount it below.
		if err := m.Unmount(); err != nil {
			if failOnUnmount {
				log.Error("Failed to automatically unmount. err:%s", err)
				return err
			} else {
				log.Error(
					"Failed to automatically unmount, but ignoring unmount error. Continuing. err:%s",
					err,
				)
			}
		}
	}

	// Mount path has been unmounted, or didn't exist locally.
	// Remount it, to improve UX.
	log.Info("Automatically mounting")

	// Construct our mounter
	mounter := &mount.Mounter{
		Log:           log,
		Options:       m.MountFolder,
		Machine:       remoteMachine,
		IP:            remoteMachine.IP,
		KiteTracker:   remoteMachine.KiteTracker,
		Transport:     remoteMachine,
		PathUnmounter: fuseklient.Unmount,
		EventSub:      r.eventSub,
	}

	if err := mounter.MountExisting(m); err != nil {
		return err
	}

	// remote.cache is normally responsible for creating the intervaler, but
	// because cache is not creating one here, we need to do it manually.
	if remoteMachine.Intervaler == nil {
		if !m.SyncIntervalOpts.IsZero() {
			rs := rsync.NewClient(log)
			// After the progress chan is done, start our SyncInterval
			startIntervalerIfNeeded(log, remoteMachine, rs, m.SyncIntervalOpts)
			// Assign the rsync intervaler to the mount.
			m.Intervaler = remoteMachine.Intervaler
		} else {
			log.Warning(
				"Unable to restore Interval for remote, SyncOpts is zero value. This likely means that SyncOpts were not saved or didn't exist in the previous binary. machineName:%s",
				remoteMachine.Name,
			)
		}
	}

	return nil
}