Example #1
0
func (dc *DaemonConfig) pollRuntime() {
	volumeChan := make(chan *watch.Watch)
	dc.Client.WatchVolumeRuntimes(volumeChan)
	for {
		volWatch := <-volumeChan

		if volWatch.Config == nil {
			continue
		}

		var vol *config.Volume
		var ok bool

		if vol, ok = volWatch.Config.(*config.Volume); !ok {
			logrus.Error(errored.Errorf("Error processing runtime update for volume %q: assertion failed", vol))
			continue
		}

		logrus.Infof("Adjusting runtime parameters for volume %q", vol)
		thisMC, err := dc.API.MountCollection.Get(vol.String())

		if er, ok := err.(*errored.Error); ok && !er.Contains(errors.NotExists) {
			logrus.Errorf("Unknown error processing runtime configuration parameters for volume %q: %v", vol, er)
			continue
		}

		// if we can't look it up, it's possible it was mounted on a different host.
		if err != nil {
			logrus.Errorf("Error retrieving mount information for %q from cache: %v", vol, err)
			continue
		}

		if err := cgroup.ApplyCGroupRateLimit(vol.RuntimeOptions, thisMC); err != nil {
			logrus.Error(errored.Errorf("Error processing runtime update for volume %q", vol).Combine(err))
			continue
		}
	}
}
Example #2
0
// Mount is the request to mount a volume.
func (a *API) Mount(w http.ResponseWriter, r *http.Request) {
	request, err := a.ReadMount(r)
	if err != nil {
		a.HTTPError(w, errors.ConfiguringVolume.Combine(err))
		return
	}

	logrus.Infof("Mounting volume %q", request)
	logrus.Debugf("%#v", a.MountCollection)

	driver, volConfig, driverOpts, err := a.GetStorageParameters(request)
	if err != nil {
		a.HTTPError(w, errors.ConfiguringVolume.Combine(err))
		return
	}

	volName := volConfig.String()
	ut := &config.UseMount{
		Volume:   volName,
		Reason:   lock.ReasonMount,
		Hostname: a.Hostname,
	}

	if !volConfig.Unlocked {
		// XXX the only times a use lock cannot be acquired when there are no
		// previous mounts, is when in locked mode and a mount is held on another
		// host. So we take an indefinite lock HERE while we calculate whether or not
		// we already have one.
		if err := a.Client.PublishUse(ut); err != nil {
			a.HTTPError(w, errors.LockFailed.Combine(err))
			return
		}
	}

	// XXX docker issues unmount request after every mount failure so, this evens out
	//     decreaseMount() in unmount
	if a.MountCounter.Add(volName) > 1 {
		if volConfig.Unlocked {
			logrus.Warnf("Duplicate mount of %q detected: returning existing mount path", volName)
			path, err := a.getMountPath(driver, driverOpts)
			if err != nil {
				a.HTTPError(w, errors.MarshalResponse.Combine(err))
				return
			}
			a.WriteMount(path, w)
			return
		}

		logrus.Warnf("Duplicate mount of %q detected: Lock failed", volName)
		a.HTTPError(w, errors.LockFailed.Combine(errored.Errorf("Duplicate mount")))
		return
	}

	// so. if EBUSY is returned here, the resulting unmount will unmount an
	// existing mount. However, this should never happen because of the above
	// counter check.
	// I'm leaving this in because it will break tons of tests if it double
	// mounts something, after the resulting unmount occurs. This seems like a
	// great way to fix tons of errors in our code before they ever accidentally
	// reach a user.
	mc, err := driver.Mount(driverOpts)
	if err != nil {
		a.clearMount(mountState{w, err, ut, driver, driverOpts, volConfig})
		return
	}

	a.MountCollection.Add(mc)

	// Only perform the TTL refresh if the driver is in unlocked mode.
	if !volConfig.Unlocked {
		if err := a.startTTLRefresh(volName); err != nil {
			a.RemoveStopChan(volName)
			a.clearMount(mountState{w, err, ut, driver, driverOpts, volConfig})
			return
		}
	}

	if err := cgroup.ApplyCGroupRateLimit(volConfig.RuntimeOptions, mc); err != nil {
		logrus.Errorf("Could not apply cgroups to volume %q", volConfig)
	}

	path, err := driver.MountPath(driverOpts)
	if err != nil {
		a.RemoveStopChan(volName)
		a.clearMount(mountState{w, err, ut, driver, driverOpts, volConfig})
		return
	}

	a.WriteMount(path, w)
}