// Unmounts the bind mount, and detaches the disk only if the PD // resource was the last reference to that disk on the kubelet. func (v *vsphereVolumeUnmounter) TearDownAt(dir string) error { glog.V(5).Infof("vSphere Volume TearDown of %s", dir) notmnt, err := v.mounter.IsLikelyNotMountPoint(dir) if err != nil { glog.V(4).Infof("Error checking if mountpoint ", dir, ": ", err) return err } if notmnt { glog.V(4).Infof("Not mount point,deleting") return os.Remove(dir) } // Find vSphere volumeID to lock the right volume refs, err := mount.GetMountRefs(v.mounter, dir) if err != nil { glog.V(4).Infof("Error getting mountrefs for ", dir, ": ", err) return err } if len(refs) == 0 { glog.V(4).Infof("Directory %s is not mounted", dir) return fmt.Errorf("directory %s is not mounted", dir) } v.volPath = path.Base(refs[0]) glog.V(4).Infof("Found volume %s mounted to %s", v.volPath, dir) // Reload list of references, there might be SetUpAt finished in the meantime refs, err = mount.GetMountRefs(v.mounter, dir) if err != nil { glog.V(4).Infof("GetMountRefs failed: %v", err) return err } if err := v.mounter.Unmount(dir); err != nil { glog.V(4).Infof("Unmount failed: %v", err) return err } glog.V(3).Infof("Successfully unmounted: %s\n", dir) // If refCount is 1, then all bind mounts have been removed, and the // remaining reference is the global mount. It is safe to detach. if len(refs) == 1 { if err := v.manager.DetachDisk(v); err != nil { glog.V(4).Infof("DetachDisk failed: %v", err) return err } glog.V(3).Infof("Volume %s detached", v.volPath) } notmnt, mntErr := v.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if notmnt { if err := os.Remove(dir); err != nil { glog.V(4).Infof("Failed to remove directory after unmount: %v", err) return err } } return nil }
// Unmounts the bind mount, and detaches the disk only if the PD // resource was the last reference to that disk on the kubelet. func (c *cinderVolumeUnmounter) TearDownAt(dir string) error { glog.V(5).Infof("Cinder TearDown of %s", dir) notmnt, err := c.mounter.IsLikelyNotMountPoint(dir) if err != nil { glog.V(4).Infof("IsLikelyNotMountPoint check failed: %v", err) return err } if notmnt { glog.V(4).Infof("Nothing is mounted to %s, ignoring", dir) return os.Remove(dir) } // Find Cinder volumeID to lock the right volume // TODO: refactor VolumePlugin.NewUnmounter to get full volume.Spec just like // NewMounter. We could then find volumeID there without probing MountRefs. refs, err := mount.GetMountRefs(c.mounter, dir) if err != nil { glog.V(4).Infof("GetMountRefs failed: %v", err) return err } if len(refs) == 0 { glog.V(4).Infof("Directory %s is not mounted", dir) return fmt.Errorf("directory %s is not mounted", dir) } c.pdName = path.Base(refs[0]) glog.V(4).Infof("Found volume %s mounted to %s", c.pdName, dir) // lock the volume (and thus wait for any concurrrent SetUpAt to finish) c.plugin.volumeLocks.LockKey(c.pdName) defer c.plugin.volumeLocks.UnlockKey(c.pdName) // Reload list of references, there might be SetUpAt finished in the meantime refs, err = mount.GetMountRefs(c.mounter, dir) if err != nil { glog.V(4).Infof("GetMountRefs failed: %v", err) return err } if err := c.mounter.Unmount(dir); err != nil { glog.V(4).Infof("Unmount failed: %v", err) return err } glog.V(3).Infof("Successfully unmounted: %s\n", dir) notmnt, mntErr := c.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if notmnt { if err := os.Remove(dir); err != nil { glog.V(4).Infof("Failed to remove directory after unmount: %v", err) return err } } return nil }
// Unmounts the bind mount, and detaches the disk only if the PD // resource was the last reference to that disk on the kubelet. func (c *cinderVolumeCleaner) TearDownAt(dir string) error { notmnt, err := c.mounter.IsLikelyNotMountPoint(dir) if err != nil { return err } exist, _ := util.FileExists(path.Join(dir, OpenStackCloudProviderTagFile)) if exist { c.withOpenStackCP = false } else { c.withOpenStackCP = true } if notmnt { if !c.withOpenStackCP && c.isNoMountSupported { volumeID, err := ioutil.ReadFile(path.Join(dir, OpenStackCloudProviderTagFile)) if err != nil { return err } c.pdName = string(volumeID) if err := c.manager.DetachDisk(c); err != nil { return err } } return os.RemoveAll(dir) } refs, err := mount.GetMountRefs(c.mounter, dir) if err != nil { return err } if err := c.mounter.Unmount(dir); err != nil { return err } glog.Infof("successfully unmounted: %s\n", dir) // If refCount is 1, then all bind mounts have been removed, and the // remaining reference is the global mount. It is safe to detach. if len(refs) == 1 { c.pdName = path.Base(refs[0]) if err := c.manager.DetachDisk(c); err != nil { return err } } notmnt, mntErr := c.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if !notmnt { if err := os.Remove(dir); err != nil { return err } } return nil }
// Unmounts the bind mount, and detaches the disk only if the PD // resource was the last reference to that disk on the kubelet. func (c *awsElasticBlockStoreCleaner) TearDownAt(dir string) error { notMnt, err := c.mounter.IsLikelyNotMountPoint(dir) if err != nil { glog.V(2).Info("Error checking if mountpoint ", dir, ": ", err) return err } if notMnt { glog.V(2).Info("Not mountpoint, deleting") return os.Remove(dir) } refs, err := mount.GetMountRefs(c.mounter, dir) if err != nil { glog.V(2).Info("Error getting mountrefs for ", dir, ": ", err) return err } if len(refs) == 0 { glog.Warning("Did not find pod-mount for ", dir, " during tear-down") } // Unmount the bind-mount inside this pod if err := c.mounter.Unmount(dir); err != nil { glog.V(2).Info("Error unmounting dir ", dir, ": ", err) return err } // If len(refs) is 1, then all bind mounts have been removed, and the // remaining reference is the global mount. It is safe to detach. if len(refs) == 1 { // c.volumeID is not initially set for volume-cleaners, so set it here. c.volumeID, err = getVolumeIDFromGlobalMount(c.plugin.host, refs[0]) if err != nil { glog.V(2).Info("Could not determine volumeID from mountpoint ", refs[0], ": ", err) return err } if err := c.manager.DetachDisk(&awsElasticBlockStoreCleaner{c.awsElasticBlockStore}); err != nil { glog.V(2).Info("Error detaching disk ", c.volumeID, ": ", err) return err } } else { glog.V(2).Infof("Found multiple refs; won't detach EBS volume: %v", refs) } notMnt, mntErr := c.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if notMnt { if err := os.Remove(dir); err != nil { glog.V(2).Info("Error removing mountpoint ", dir, ": ", err) return err } } return nil }
// Unmounts the bind mount, and detaches the disk only if the PD // resource was the last reference to that disk on the kubelet. func (c *azureDiskUnmounter) TearDownAt(dir string) error { if pathExists, pathErr := util.PathExists(dir); pathErr != nil { return fmt.Errorf("Error checking if path exists: %v", pathErr) } else if !pathExists { glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir) return nil } notMnt, err := c.mounter.IsLikelyNotMountPoint(dir) if err != nil { glog.Errorf("Error checking if mountpoint %s: %v", dir, err) return err } if notMnt { glog.V(2).Info("Not mountpoint, deleting") return os.Remove(dir) } // lock the volume (and thus wait for any concurrrent SetUpAt to finish) c.plugin.volumeLocks.LockKey(c.diskName) defer c.plugin.volumeLocks.UnlockKey(c.diskName) refs, err := mount.GetMountRefs(c.mounter, dir) if err != nil { glog.Errorf("Error getting mountrefs for %s: %v", dir, err) return err } if len(refs) == 0 { glog.Errorf("Did not find pod-mount for %s during tear down", dir) return fmt.Errorf("%s is not mounted", dir) } c.diskName = path.Base(refs[0]) glog.V(4).Infof("Found volume %s mounted to %s", c.diskName, dir) // Unmount the bind-mount inside this pod if err := c.mounter.Unmount(dir); err != nil { glog.Errorf("Error unmounting dir %s %v", dir, err) return err } notMnt, mntErr := c.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if notMnt { if err := os.Remove(dir); err != nil { glog.Errorf("Error removing mountpoint %s %v", dir, err) return err } } return nil }
// utility to tear down a disk based filesystem func diskTearDown(manager diskManager, c iscsiDiskCleaner, volPath string, mounter mount.Interface) error { notMnt, err := mounter.IsLikelyNotMountPoint(volPath) if err != nil { glog.Errorf("cannot validate mountpoint %s", volPath) return err } if notMnt { return os.Remove(volPath) } refs, err := mount.GetMountRefs(mounter, volPath) if err != nil { glog.Errorf("failed to get reference count %s", volPath) return err } if err := mounter.Unmount(volPath); err != nil { glog.Errorf("failed to unmount %s", volPath) return err } // If len(refs) is 1, then all bind mounts have been removed, and the // remaining reference is the global mount. It is safe to detach. if len(refs) == 1 { mntPath := refs[0] if err := manager.DetachDisk(c, mntPath); err != nil { glog.Errorf("failed to detach disk from %s", mntPath) return err } } notMnt, mntErr := mounter.IsLikelyNotMountPoint(volPath) if mntErr != nil { glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if notMnt { if err := os.Remove(volPath); err != nil { return err } } return nil }
// Unmounts the bind mount, and detaches the disk only if the PD // resource was the last reference to that disk on the kubelet. func (c *gcePersistentDiskCleaner) TearDownAt(dir string) error { notMnt, err := c.mounter.IsLikelyNotMountPoint(dir) if err != nil { return err } if notMnt { return os.Remove(dir) } refs, err := mount.GetMountRefs(c.mounter, dir) if err != nil { return err } // Unmount the bind-mount inside this pod if err := c.mounter.Unmount(dir); err != nil { return err } // If len(refs) is 1, then all bind mounts have been removed, and the // remaining reference is the global mount. It is safe to detach. if len(refs) == 1 { // c.pdName is not initially set for volume-cleaners, so set it here. c.pdName = path.Base(refs[0]) if err := c.manager.DetachDisk(c); err != nil { return err } } notMnt, mntErr := c.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if notMnt { if err := os.Remove(dir); err != nil { return err } } return nil }
// Unmounts the bind mount, and detaches the disk only if the PD // resource was the last reference to that disk on the kubelet. func (c *cinderVolumeCleaner) TearDownAt(dir string) error { notmnt, err := c.mounter.IsLikelyNotMountPoint(dir) if err != nil { return err } if notmnt { return os.Remove(dir) } refs, err := mount.GetMountRefs(c.mounter, dir) if err != nil { return err } if err := c.mounter.Unmount(dir); err != nil { return err } glog.Infof("successfully unmounted: %s\n", dir) // If refCount is 1, then all bind mounts have been removed, and the // remaining reference is the global mount. It is safe to detach. if len(refs) == 1 { c.pdName = path.Base(refs[0]) if err := c.manager.DetachDisk(c); err != nil { return err } } notmnt, mntErr := c.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if !notmnt { if err := os.Remove(dir); err != nil { return err } } return nil }
func (plugin *gcePersistentDiskPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) { mounter := plugin.host.GetMounter() return mount.GetMountRefs(mounter, deviceMountPath) }
// Unmounts the bind mount, and detaches the disk only if the PD // resource was the last reference to that disk on the kubelet. func (c *cinderVolumeUnmounter) TearDownAt(dir string) error { glog.V(5).Infof("Cinder TearDown of %s", dir) if _, err := os.Stat(dir); os.IsNotExist(err) { // non-exist dir for TearDown is meaningless and it is possible that this dir has been cleaned up, just omit the error for now glog.Warningf("Volume directory: %v does not exists, it may have been cleaned up by previous tear down task", dir) return nil } notmnt, err := c.mounter.IsLikelyNotMountPoint(dir) if err != nil { glog.V(4).Infof("IsLikelyNotMountPoint check failed: %v", err) return err } exist, _ := util.FileExists(path.Join(dir, OpenStackCloudProviderTagFile)) if exist { c.withOpenStackCP = false } else { c.withOpenStackCP = true } if notmnt { // Find Cinder volumeID to lock the right volume // TODO: refactor VolumePlugin.NewCleaner to get full volume.Spec just like // NewBuilder. We could then find volumeID there without probing MountRefs. if !c.withOpenStackCP && c.isNoMountSupported { volumeID, err := ioutil.ReadFile(path.Join(dir, OpenStackCloudProviderTagFile)) if err != nil { return err } c.pdName = string(volumeID) if err := c.manager.DetachDisk(c); err != nil { return err } } return os.RemoveAll(dir) } // Find Cinder volumeID to lock the right volume // TODO: refactor VolumePlugin.NewUnmounter to get full volume.Spec just like // NewMounter. We could then find volumeID there without probing MountRefs. refs, err := mount.GetMountRefs(c.mounter, dir) if err != nil { glog.V(4).Infof("GetMountRefs failed: %v", err) return err } if len(refs) == 0 { glog.V(4).Infof("Directory %s is not mounted", dir) return fmt.Errorf("directory %s is not mounted", dir) } c.pdName = path.Base(refs[0]) glog.V(4).Infof("Found volume %s mounted to %s", c.pdName, dir) // lock the volume (and thus wait for any concurrrent SetUpAt to finish) c.plugin.volumeLocks.LockKey(c.pdName) defer c.plugin.volumeLocks.UnlockKey(c.pdName) // Reload list of references, there might be SetUpAt finished in the meantime refs, err = mount.GetMountRefs(c.mounter, dir) if err != nil { glog.V(4).Infof("GetMountRefs failed: %v", err) return err } if err := c.mounter.Unmount(dir); err != nil { glog.V(4).Infof("Unmount failed: %v", err) return err } glog.V(3).Infof("Successfully unmounted: %s\n", dir) notmnt, mntErr := c.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if notmnt { if err := os.Remove(dir); err != nil { glog.V(4).Infof("Failed to remove directory after unmount: %v", err) return err } } return nil }
func (plugin *cinderPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) { mounter := plugin.host.GetMounter() return mount.GetMountRefs(mounter, deviceMountPath) }
func (plugin *awsElasticBlockStorePlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) { mounter := plugin.host.GetMounter() return mount.GetMountRefs(mounter, deviceMountPath) }
// Unmounts the bind mount, and detaches the disk only if the PD // resource was the last reference to that disk on the kubelet. func (v *vsphereVolumeUnmounter) TearDownAt(dir string) error { glog.V(5).Infof("vSphere Volume TearDown of %s", dir) notmnt, err := v.mounter.IsLikelyNotMountPoint(dir) if err != nil { glog.V(4).Infof("Error checking if mountpoint ", dir, ": ", err) return err } if notmnt { glog.V(4).Infof("Not mount point,deleting") return os.Remove(dir) } // Find vSphere volumeID to lock the right volume refs, err := mount.GetMountRefs(v.mounter, dir) if err != nil { glog.V(4).Infof("Error getting mountrefs for ", dir, ": ", err) return err } if len(refs) == 0 { glog.V(4).Infof("Directory %s is not mounted", dir) return fmt.Errorf("directory %s is not mounted", dir) } // space between datastore and vmdk name in volumePath is encoded as '\040' when returned by GetMountRefs(). // volumePath eg: "[local] xxx.vmdk" provided to attach/mount // replacing \040 with space to match the actual volumePath mountPath := strings.Replace(path.Base(refs[0]), "\\040", " ", -1) v.volPath = mountPath glog.V(4).Infof("Found volume %s mounted to %s", v.volPath, dir) // Reload list of references, there might be SetUpAt finished in the meantime refs, err = mount.GetMountRefs(v.mounter, dir) if err != nil { glog.V(4).Infof("GetMountRefs failed: %v", err) return err } if err := v.mounter.Unmount(dir); err != nil { glog.V(4).Infof("Unmount failed: %v", err) return err } glog.V(3).Infof("Successfully unmounted: %s\n", dir) // If refCount is 1, then all bind mounts have been removed, and the // remaining reference is the global mount. It is safe to detach. if len(refs) == 1 { if err := v.manager.DetachDisk(v); err != nil { glog.V(4).Infof("DetachDisk failed: %v", err) return err } glog.V(3).Infof("Volume %s detached", v.volPath) } notmnt, mntErr := v.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if notmnt { if err := os.Remove(dir); err != nil { glog.V(4).Infof("Failed to remove directory after unmount: %v", err) return err } } return nil }
// Unmounts the bind mount, and detaches the disk only if the PD // resource was the last reference to that disk on the kubelet. func (c *cinderVolumeCleaner) TearDownAt(dir string) error { glog.V(5).Infof("Cinder TearDown of %s", dir) notmnt, err := c.mounter.IsLikelyNotMountPoint(dir) if err != nil { glog.V(4).Infof("IsLikelyNotMountPoint check failed: %v", err) return err } exist, _ := util.FileExists(path.Join(dir, OpenStackCloudProviderTagFile)) if exist { c.withOpenStackCP = false } else { c.withOpenStackCP = true } if notmnt { // Find Cinder volumeID to lock the right volume // TODO: refactor VolumePlugin.NewCleaner to get full volume.Spec just like // NewBuilder. We could then find volumeID there without probing MountRefs. if !c.withOpenStackCP && c.isNoMountSupported { volumeID, err := ioutil.ReadFile(path.Join(dir, OpenStackCloudProviderTagFile)) if err != nil { return err } c.pdName = string(volumeID) if err := c.manager.DetachDisk(c); err != nil { return err } } return os.RemoveAll(dir) } refs, err := mount.GetMountRefs(c.mounter, dir) if err != nil { glog.V(4).Infof("GetMountRefs failed: %v", err) return err } if len(refs) == 0 { glog.V(4).Infof("Directory %s is not mounted", dir) return fmt.Errorf("directory %s is not mounted", dir) } c.pdName = path.Base(refs[0]) glog.V(4).Infof("Found volume %s mounted to %s", c.pdName, dir) // lock the volume (and thus wait for any concurrrent SetUpAt to finish) c.plugin.volumeLocks.LockKey(c.pdName) defer c.plugin.volumeLocks.UnlockKey(c.pdName) // Reload list of references, there might be SetUpAt finished in the meantime refs, err = mount.GetMountRefs(c.mounter, dir) if err != nil { glog.V(4).Infof("GetMountRefs failed: %v", err) return err } if err := c.mounter.Unmount(dir); err != nil { glog.V(4).Infof("Unmount failed: %v", err) return err } glog.V(3).Infof("Successfully unmounted: %s\n", dir) // If refCount is 1, then all bind mounts have been removed, and the // remaining reference is the global mount. It is safe to detach. if len(refs) == 1 { if err := c.manager.DetachDisk(c); err != nil { glog.V(4).Infof("DetachDisk failed: %v", err) return err } glog.V(3).Infof("Volume %s detached", c.pdName) } notmnt, mntErr := c.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if notmnt { if err := os.Remove(dir); err != nil { glog.V(4).Infof("Failed to remove directory after unmount: %v", err) return err } } return nil }