func (d *DaemonConfig) completeRemove(req *config.VolumeRequest, vc *config.Volume) error { if err := control.RemoveVolume(vc, d.Global.Timeout); err != nil && err != errors.NoActionTaken { logrus.Warn(errors.RemoveImage.Combine(errored.New(vc.String())).Combine(err)) } return d.removeVolume(req, vc) }
func (d *DaemonConfig) removeVolume(req *config.VolumeRequest, vc *config.Volume) error { if err := d.Config.RemoveVolume(req.Policy, req.Name); err != nil { return errors.ClearVolume.Combine(errored.New(vc.String())).Combine(err) } return nil }
func (dc *DaemonConfig) pruneSnapshots(val *config.Volume) { logrus.Infof("starting snapshot prune for %q", val.VolumeName) if val.Backends.Snapshot == "" { logrus.Debugf("Snapshot driver for volume %v was empty, not snapshotting.", val) return } uc := &config.UseSnapshot{ Volume: val.String(), Reason: lock.ReasonSnapshotPrune, } stopChan, err := lock.NewDriver(dc.Config).AcquireWithTTLRefresh(uc, dc.Global.TTL, dc.Global.Timeout) if err != nil { logrus.Error(errors.LockFailed.Combine(err)) return } defer func() { stopChan <- struct{}{} }() driver, err := backend.NewSnapshotDriver(val.Backends.Snapshot) if err != nil { logrus.Errorf("failed to get driver: %v", err) return } driverOpts := storage.DriverOptions{ Volume: storage.Volume{ Name: val.String(), Params: storage.Params{ "pool": val.DriverOptions["pool"], }, }, Timeout: dc.Global.Timeout, } list, err := driver.ListSnapshots(driverOpts) if err != nil { logrus.Errorf("Could not list snapshots for volume %q: %v", val.VolumeName, err) return } logrus.Debugf("Volume %q: keeping %d snapshots", val, val.RuntimeOptions.Snapshot.Keep) toDeleteCount := len(list) - int(val.RuntimeOptions.Snapshot.Keep) if toDeleteCount < 0 { return } for i := 0; i < toDeleteCount; i++ { logrus.Infof("Removing snapshot %q for volume %q", list[i], val.VolumeName) if err := driver.RemoveSnapshot(list[i], driverOpts); err != nil { logrus.Errorf("Removing snapshot %q for volume %q failed: %v", list[i], val.VolumeName, err) } } }
func (d *DaemonConfig) handleForceRemoveLock(req *config.VolumeRequest, vc *config.Volume, locks []config.UseLocker) error { exists, err := control.ExistsVolume(vc, d.Global.Timeout) if err != nil && err != errors.NoActionTaken { return errors.RemoveVolume.Combine(errored.New(vc.String())).Combine(err) } if err == errors.NoActionTaken { if err := d.completeRemove(req, vc); err != nil { return err } d.removeVolumeUse(locks[0], vc) } if err != nil { return errors.RemoveVolume.Combine(errored.New(vc.String())).Combine(err) } if !exists { d.removeVolume(req, vc) return errors.RemoveVolume.Combine(errored.New(vc.String())).Combine(errors.NotExists) } err = d.completeRemove(req, vc) if err != nil { return errors.RemoveVolume.Combine(errored.New(vc.String())).Combine(errors.NotExists) } d.removeVolumeUse(locks[0], vc) return nil }
func (d *DaemonConfig) createRemoveLocks(vc *config.Volume) ([]config.UseLocker, error) { hostname, err := os.Hostname() if err != nil { return nil, errors.GetHostname.Combine(err) } uc := &config.UseMount{ Volume: vc.String(), Reason: lock.ReasonRemove, Hostname: hostname, } snapUC := &config.UseSnapshot{ Volume: vc.String(), Reason: lock.ReasonRemove, } return []config.UseLocker{uc, snapUC}, nil }
func (dc *DaemonConfig) pollRuntime() { volumeChan := make(chan *watch.Watch) dc.Client.WatchVolumeRuntimes(volumeChan) for { volWatch := <-volumeChan if volWatch.Config == nil { continue } var vol *config.Volume var ok bool if vol, ok = volWatch.Config.(*config.Volume); !ok { logrus.Error(errored.Errorf("Error processing runtime update for volume %q: assertion failed", vol)) continue } logrus.Infof("Adjusting runtime parameters for volume %q", vol) thisMC, err := dc.API.MountCollection.Get(vol.String()) if er, ok := err.(*errored.Error); ok && !er.Contains(errors.NotExists) { logrus.Errorf("Unknown error processing runtime configuration parameters for volume %q: %v", vol, er) continue } // if we can't look it up, it's possible it was mounted on a different host. if err != nil { logrus.Errorf("Error retrieving mount information for %q from cache: %v", vol, err) continue } if err := cgroup.ApplyCGroupRateLimit(vol.RuntimeOptions, thisMC); err != nil { logrus.Error(errored.Errorf("Error processing runtime update for volume %q", vol).Combine(err)) continue } } }
func (dc *DaemonConfig) createSnapshot(val *config.Volume) { logrus.Infof("Snapshotting %q.", val) uc := &config.UseSnapshot{ Volume: val.String(), Reason: lock.ReasonSnapshot, } stopChan, err := lock.NewDriver(dc.Config).AcquireWithTTLRefresh(uc, dc.Global.TTL, dc.Global.Timeout) if err != nil { logrus.Error(err) return } defer func() { stopChan <- struct{}{} }() driver, err := backend.NewSnapshotDriver(val.Backends.Snapshot) if err != nil { logrus.Errorf("Error establishing driver backend %q; cannot snapshot", val.Backends.Snapshot) return } driverOpts := storage.DriverOptions{ Volume: storage.Volume{ Name: val.String(), Params: storage.Params{ "pool": val.DriverOptions["pool"], }, }, Timeout: dc.Global.Timeout, } if err := driver.CreateSnapshot(time.Now().String(), driverOpts); err != nil { logrus.Errorf("Error creating snapshot for volume %q: %v", val, err) } }
// CreateVolume performs the dirty work of actually constructing a volume. func CreateVolume(policy *config.Policy, config *config.Volume, timeout time.Duration) (storage.DriverOptions, error) { var ( fscmd string ok bool ) if config.Backends.CRUD == "" { logrus.Debugf("Not creating volume %q, backend is unspecified", config) return storage.DriverOptions{}, errors.NoActionTaken } if policy.FileSystems == nil { fscmd = defaultFsCmd } else { fscmd, ok = policy.FileSystems[config.CreateOptions.FileSystem] if !ok { return storage.DriverOptions{}, errored.Errorf("Invalid filesystem %q", config.CreateOptions.FileSystem) } } actualSize, err := config.CreateOptions.ActualSize() if err != nil { return storage.DriverOptions{}, err } driver, err := backend.NewCRUDDriver(config.Backends.CRUD) if err != nil { return storage.DriverOptions{}, err } driverOpts := storage.DriverOptions{ Volume: storage.Volume{ Name: config.String(), Size: actualSize, Params: config.DriverOptions, }, FSOptions: storage.FSOptions{ Type: config.CreateOptions.FileSystem, CreateCommand: fscmd, }, Timeout: timeout, } logrus.Infof("Creating volume %v with size %d", config, actualSize) return driverOpts, driver.Create(driverOpts) }
// ExistsVolume tells if a volume exists. It is *not* suitable for any locking primitive. func ExistsVolume(config *config.Volume, timeout time.Duration) (bool, error) { if config.Backends.CRUD == "" { logrus.Debugf("volume %q, backend is unspecified", config) return true, errors.NoActionTaken } driver, err := backend.NewCRUDDriver(config.Backends.CRUD) if err != nil { return false, err } driverOpts := storage.DriverOptions{ Volume: storage.Volume{ Name: config.String(), Params: config.DriverOptions, }, Timeout: timeout, } return driver.Exists(driverOpts) }
// RemoveVolume removes a volume. func RemoveVolume(config *config.Volume, timeout time.Duration) error { if config.Backends.CRUD == "" { logrus.Debugf("Not removing volume %q, backend is unspecified", config) return errors.NoActionTaken } driver, err := backend.NewCRUDDriver(config.Backends.CRUD) if err != nil { return err } driverOpts := storage.DriverOptions{ Volume: storage.Volume{ Name: config.String(), Params: config.DriverOptions, }, Timeout: timeout, } logrus.Infof("Destroying volume %v", config) return driver.Destroy(driverOpts) }
// this cleans up uses when forcing the removal func (d *DaemonConfig) removeVolumeUse(lock config.UseLocker, vc *config.Volume) { // locks[0] is the usemount lock if err := d.Config.RemoveUse(lock, true); err != nil { logrus.Warn(errors.RemoveImage.Combine(errored.New(vc.String())).Combine(err)) } }