func unshareAndBind(workingRootDir string) bool { if *unshare { // Re-exec myself using the unshare syscall while on a locked thread. // This hack is required because syscall.Unshare() operates on only one // thread in the process, and Go switches execution between threads // randomly. Thus, the namespace can be suddenly switched for running // code. This is an aspect of Go that was not well thought out. runtime.LockOSThread() err := syscall.Unshare(syscall.CLONE_NEWNS) if err != nil { fmt.Printf("Unable to unshare mount namesace\t%s\n", err) return false } args := append(os.Args, "-unshare=false") err = syscall.Exec(args[0], args, os.Environ()) if err != nil { fmt.Printf("Unable to Exec:%s\t%s\n", args[0], err) return false } } err := syscall.Mount("none", "/", "", syscall.MS_REC|syscall.MS_PRIVATE, "") if err != nil { fmt.Printf("Unable to set mount sharing to private\t%s\n", err) return false } syscall.Unmount(workingRootDir, 0) err = syscall.Mount(*rootDir, workingRootDir, "", syscall.MS_BIND, "") if err != nil { fmt.Printf("Unable to bind mount %s to %s\t%s\n", *rootDir, workingRootDir, err) return false } return true }
func (c *libvirtContainer) cleanup() error { g := grohl.NewContext(grohl.Data{"backend": "libvirt-lxc", "fn": "cleanup", "job.id": c.job.ID}) g.Log(grohl.Data{"at": "start"}) if err := syscall.Unmount(filepath.Join(c.RootPath, ".containerinit"), 0); err != nil { g.Log(grohl.Data{"at": "unmount", "file": ".containerinit", "status": "error", "err": err}) } if err := syscall.Unmount(filepath.Join(c.RootPath, "etc/resolv.conf"), 0); err != nil { g.Log(grohl.Data{"at": "unmount", "file": "resolv.conf", "status": "error", "err": err}) } if err := c.l.pinkerton.Cleanup(c.job.ID); err != nil { g.Log(grohl.Data{"at": "pinkerton", "status": "error", "err": err}) } for _, m := range c.job.Config.Mounts { if err := syscall.Unmount(filepath.Join(c.RootPath, m.Location), 0); err != nil { g.Log(grohl.Data{"at": "unmount", "location": m.Location, "status": "error", "err": err}) } } for _, v := range c.job.Config.Volumes { if err := syscall.Unmount(filepath.Join(c.RootPath, v.Target), 0); err != nil { g.Log(grohl.Data{"at": "unmount", "target": v.Target, "volumeID": v.VolumeID, "status": "error", "err": err}) } } if !c.job.Config.HostNetwork && c.l.bridgeNet != nil { c.l.ipalloc.ReleaseIP(c.l.bridgeNet, c.IP) } g.Log(grohl.Data{"at": "finish"}) return nil }
// cleanTaskDir is an idempotent operation to clean the task directory and // should be called when tearing down the task. func (e *LinuxExecutor) cleanTaskDir() error { // Unmount dev. errs := new(multierror.Error) dev := filepath.Join(e.taskDir, "dev") if e.pathExists(dev) { if err := syscall.Unmount(dev, 0); err != nil { errs = multierror.Append(errs, fmt.Errorf("Failed to unmount dev (%v): %v", dev, err)) } if err := os.RemoveAll(dev); err != nil { errs = multierror.Append(errs, fmt.Errorf("Failed to delete dev directory (%v): %v", dev, err)) } } // Unmount proc. proc := filepath.Join(e.taskDir, "proc") if e.pathExists(proc) { if err := syscall.Unmount(proc, 0); err != nil { errs = multierror.Append(errs, fmt.Errorf("Failed to unmount proc (%v): %v", proc, err)) } if err := os.RemoveAll(proc); err != nil { errs = multierror.Append(errs, fmt.Errorf("Failed to delete proc directory (%v): %v", dev, err)) } } return errs.ErrorOrNil() }
func (d *Driver) Put(id string) error { // Protect the d.active from concurrent access d.Lock() defer d.Unlock() mount := d.active[id] if mount == nil { logrus.Debugf("Put on a non-mounted device %s", id) // but it might be still here if d.Exists(id) { mergedDir := path.Join(d.dir(id), "merged") err := syscall.Unmount(mergedDir, 0) if err != nil { logrus.Debugf("Failed to unmount %s overlay: %v", id, err) } } return nil } mount.count-- if mount.count > 0 { return nil } defer delete(d.active, id) if mount.mounted { err := syscall.Unmount(mount.path, 0) if err != nil { logrus.Debugf("Failed to unmount %s overlay: %v", id, err) } return err } return nil }
func (container *Container) unmountIpcMounts() error { if container.hostConfig.IpcMode.IsContainer() || container.hostConfig.IpcMode.IsHost() { return nil } shmPath, err := container.shmPath() if err != nil { return fmt.Errorf("shm path does not exist %v", err) } if err := syscall.Unmount(shmPath, syscall.MNT_DETACH); err != nil { return fmt.Errorf("failed to umount %s filesystem %v", shmPath, err) } mqueuePath, err := container.mqueuePath() if err != nil { return fmt.Errorf("mqueue path does not exist %v", err) } if err := syscall.Unmount(mqueuePath, syscall.MNT_DETACH); err != nil { return fmt.Errorf("failed to umount %s filesystem %v", mqueuePath, err) } return nil }
func (c *libvirtContainer) cleanup() error { g := grohl.NewContext(grohl.Data{"backend": "libvirt-lxc", "fn": "cleanup", "job.id": c.job.ID}) g.Log(grohl.Data{"at": "start"}) if err := syscall.Unmount(filepath.Join(c.RootPath, ".containerinit"), 0); err != nil { g.Log(grohl.Data{"at": "unmount", "file": ".containerinit", "status": "error", "err": err}) } if err := syscall.Unmount(filepath.Join(c.RootPath, "etc/resolv.conf"), 0); err != nil { g.Log(grohl.Data{"at": "unmount", "file": "resolv.conf", "status": "error", "err": err}) } if err := pinkerton.Cleanup(c.job.ID); err != nil { g.Log(grohl.Data{"at": "pinkerton", "status": "error", "err": err}) } for _, m := range c.job.Config.Mounts { if err := syscall.Unmount(filepath.Join(c.RootPath, m.Location), 0); err != nil { g.Log(grohl.Data{"at": "unmount", "location": m.Location, "status": "error", "err": err}) } } for _, p := range c.job.Config.Ports { if err := c.l.forwarder.Remove(&net.TCPAddr{IP: c.IP, Port: p.Port}, p.RangeEnd, p.Proto); err != nil { g.Log(grohl.Data{"at": "iptables", "status": "error", "err": err, "port": p.Port}) } c.l.ports[p.Proto].Put(uint16(p.Port)) } ipallocator.ReleaseIP(defaultNet, &c.IP) g.Log(grohl.Data{"at": "finish"}) return nil }
func (e *LinuxExecutor) cleanTaskDir() error { if e.alloc == nil { return errors.New("ConfigureTaskDir() must be called before Start()") } if !e.mounts { return nil } // Unmount dev. errs := new(multierror.Error) dev := filepath.Join(e.taskDir, "dev") if err := syscall.Unmount(dev, 0); err != nil { errs = multierror.Append(errs, fmt.Errorf("Failed to unmount dev (%v): %v", dev, err)) } // Unmount proc. proc := filepath.Join(e.taskDir, "proc") if err := syscall.Unmount(proc, 0); err != nil { errs = multierror.Append(errs, fmt.Errorf("Failed to unmount proc (%v): %v", proc, err)) } e.mounts = false return errs.ErrorOrNil() }
func unmountPod(t *testing.T, ctx *testutils.RktRunCtx, uuid string, rmNetns bool) { podDir := filepath.Join(ctx.DataDir(), "pods", "run", uuid) stage1MntPath := filepath.Join(podDir, "stage1", "rootfs") stage2MntPath := filepath.Join(stage1MntPath, "opt", "stage2", "rkt-inspect", "rootfs") netnsPath := filepath.Join(podDir, "netns") podNetNSPathBytes, err := ioutil.ReadFile(netnsPath) if err != nil { t.Fatalf(`cannot read "netns" stage1: %v`, err) } podNetNSPath := string(podNetNSPathBytes) if err := syscall.Unmount(stage2MntPath, 0); err != nil { t.Fatalf("cannot umount stage2: %v", err) } if err := syscall.Unmount(stage1MntPath, 0); err != nil { t.Fatalf("cannot umount stage1: %v", err) } if err := syscall.Unmount(podNetNSPath, 0); err != nil { t.Fatalf("cannot umount pod netns: %v", err) } if rmNetns { _ = os.RemoveAll(podNetNSPath) } }
// unmountSpecialDirs unmounts the dev and proc file system from the chroot func (d *AllocDir) unmountSpecialDirs(taskDir string) error { errs := new(multierror.Error) dev := filepath.Join(taskDir, "dev") if d.pathExists(dev) { if err := syscall.Unmount(dev, 0); err != nil { errs = multierror.Append(errs, fmt.Errorf("Failed to unmount dev (%v): %v", dev, err)) } if err := os.RemoveAll(dev); err != nil { errs = multierror.Append(errs, fmt.Errorf("Failed to delete dev directory (%v): %v", dev, err)) } } // Unmount proc. proc := filepath.Join(taskDir, "proc") if d.pathExists(proc) { if err := syscall.Unmount(proc, 0); err != nil { errs = multierror.Append(errs, fmt.Errorf("Failed to unmount proc (%v): %v", proc, err)) } if err := os.RemoveAll(proc); err != nil { errs = multierror.Append(errs, fmt.Errorf("Failed to delete proc directory (%v): %v", dev, err)) } } return errs.ErrorOrNil() }
// deletePod cleans up files and resource associated with the pod // pod must be under exclusive lock and be in either ExitedGarbage // or Garbage state func deletePod(p *pod) { if !p.isExitedGarbage && !p.isGarbage { panic(fmt.Sprintf("logic error: deletePod called with non-garbage pod %q (status %q)", p.uuid, p.getState())) } if p.isExitedGarbage { s, err := store.NewStore(getDataDir()) if err != nil { stderr("Cannot open store: %v", err) return } defer s.Close() stage1TreeStoreID, err := p.getStage1TreeStoreID() if err != nil { stderr("Error getting stage1 treeStoreID: %v", err) return } stage1RootFS := s.GetTreeStoreRootFS(stage1TreeStoreID) // execute stage1's GC if err := stage0.GC(p.path(), p.uuid, stage1RootFS, globalFlags.Debug); err != nil { stderr("Stage1 GC of pod %q failed: %v", p.uuid, err) return } if p.usesOverlay() { apps, err := p.getApps() if err != nil { stderr("Error retrieving app hashes from pod manifest: %v", err) return } for _, a := range apps { dest := filepath.Join(common.AppPath(p.path(), a.Name), "rootfs") if err := syscall.Unmount(dest, 0); err != nil { // machine could have been rebooted and mounts lost. // ignore "does not exist" and "not a mount point" errors if err != syscall.ENOENT && err != syscall.EINVAL { stderr("Error unmounting app at %v: %v", dest, err) } } } s1 := filepath.Join(common.Stage1ImagePath(p.path()), "rootfs") if err := syscall.Unmount(s1, 0); err != nil { // machine could have been rebooted and mounts lost. // ignore "does not exist" and "not a mount point" errors if err != syscall.ENOENT && err != syscall.EINVAL { stderr("Error unmounting stage1 at %v: %v", s1, err) return } } } } if err := os.RemoveAll(p.path()); err != nil { stderr("Unable to remove pod %q: %v", p.uuid, err) } }
// Put unmounts the mount path created for the give id. func (d *Driver) Put(id string) error { if count := d.ctr.Decrement(id); count > 0 { return nil } d.pathCacheLock.Lock() mountpoint, exists := d.pathCache[id] d.pathCacheLock.Unlock() if !exists { logrus.Debugf("Put on a non-mounted device %s", id) // but it might be still here if d.Exists(id) { mountpoint = path.Join(d.dir(id), "merged") } d.pathCacheLock.Lock() d.pathCache[id] = mountpoint d.pathCacheLock.Unlock() } if mounted, err := d.mounted(mountpoint); mounted || err != nil { if err = syscall.Unmount(mountpoint, 0); err != nil { logrus.Debugf("Failed to unmount %s overlay: %v", id, err) } return err } return nil }
// Teardown cleans up a produced Networking object. func (n *Networking) Teardown(flavor string) { // Teardown everything in reverse order of setup. // This should be idempotent -- be tolerant of missing stuff if flavor == "kvm" { n.kvmTeardown() return } if err := n.enterHostNS(); err != nil { log.Printf("Error switching to host netns: %v", err) return } if err := n.unforwardPorts(); err != nil { log.Printf("Error removing forwarded ports: %v", err) } n.teardownNets(n.nets) if err := syscall.Unmount(n.podNSPath(), 0); err != nil { // if already unmounted, umount(2) returns EINVAL if !os.IsNotExist(err) && err != syscall.EINVAL { log.Printf("Error unmounting %q: %v", n.podNSPath(), err) } } }
func (mp *Mountpoint) Umount() error { if !mp.Mounted() { return errors.New("Mountpoint doesn't seem to be mounted") } if err := syscall.Unmount(mp.Root, 0); err != nil { return fmt.Errorf("Unmount syscall failed: %v", err) } if mp.Mounted() { return fmt.Errorf("Umount: Filesystem still mounted after calling umount(%v)", mp.Root) } // Even though we just unmounted the filesystem, AUFS will prevent deleting the mntpoint // for some time. We'll just keep retrying until it succeeds. for retries := 0; retries < 1000; retries++ { err := os.Remove(mp.Root) if err == nil { // rm mntpoint succeeded return nil } if os.IsNotExist(err) { // mntpoint doesn't exist anymore. Success. return nil } // fmt.Printf("(%v) Remove %v returned: %v\n", retries, mp.Root, err) time.Sleep(10 * time.Millisecond) } return fmt.Errorf("Umount: Failed to umount %v", mp.Root) }
func pivotRoot(root string) error { // we need this to satisfy restriction: // "new_root and put_old must not be on the same filesystem as the current root" if err := syscall.Mount(root, root, "bind", syscall.MS_BIND|syscall.MS_REC, ""); err != nil { return fmt.Errorf("Mount rootfs to itself error: %v", err) } // create rootfs/.pivot_root as path for old_root pivotDir := filepath.Join(root, ".pivot_root") if err := os.Mkdir(pivotDir, 0777); err != nil { return err } logrus.Debugf("Pivot root dir: %s", pivotDir) logrus.Debugf("Pivot root to %s", root) // pivot_root to rootfs, now old_root is mounted in rootfs/.pivot_root // mounts from it still can be seen in `mount` if err := syscall.PivotRoot(root, pivotDir); err != nil { return fmt.Errorf("pivot_root %v", err) } // change working directory to / // it is recommendation from man-page if err := syscall.Chdir("/"); err != nil { return fmt.Errorf("chdir / %v", err) } // path to pivot root now changed, update pivotDir = filepath.Join("/", ".pivot_root") // umount rootfs/.pivot_root(which is now /.pivot_root) with all submounts // now we have only mounts that we mounted ourself in `mount` if err := syscall.Unmount(pivotDir, syscall.MNT_DETACH); err != nil { return fmt.Errorf("unmount pivot_root dir %v", err) } // remove temporary directory return os.Remove(pivotDir) }
// MountGC removes mounts from pods that couldn't be GCed cleanly. func MountGC(path, uuid string) error { mnts, err := mountinfo.ParseMounts(0) if err != nil { return errwrap.Wrap(fmt.Errorf("error getting mounts for pod %s from mountinfo", uuid), err) } mnts = mnts.Filter(mountinfo.HasPrefix(path)) for i := len(mnts) - 1; i >= 0; i-- { mnt := mnts[i] if mnt.NeedsRemountPrivate() { if err := syscall.Mount("", mnt.MountPoint, "", syscall.MS_PRIVATE, ""); err != nil { return errwrap.Wrap(fmt.Errorf("could not remount at %v", mnt.MountPoint), err) } } } for _, mnt := range mnts { if err := syscall.Unmount(mnt.MountPoint, 0); err != nil { if err != syscall.ENOENT && err != syscall.EINVAL { return errwrap.Wrap(fmt.Errorf("could not unmount %v", mnt.MountPoint), err) } } } return nil }
func switchRoot(rootfs, subdir string, rmUsr bool) error { if err := syscall.Unmount(config.OEM, 0); err != nil { log.Debugf("Not umounting OEM: %v", err) } if subdir != "" { fullRootfs := path.Join(rootfs, subdir) if _, err := os.Stat(fullRootfs); os.IsNotExist(err) { if err := os.MkdirAll(fullRootfs, 0755); err != nil { log.Errorf("Failed to create directory %s: %v", fullRootfs, err) return err } } log.Debugf("Bind mounting mount %s to %s", fullRootfs, rootfs) if err := syscall.Mount(fullRootfs, rootfs, "", syscall.MS_BIND, ""); err != nil { log.Errorf("Failed to bind mount subdir for %s: %v", fullRootfs, err) return err } } for _, i := range []string{"/dev", "/sys", "/proc", "/run"} { log.Debugf("Moving mount %s to %s", i, path.Join(rootfs, i)) if err := os.MkdirAll(path.Join(rootfs, i), 0755); err != nil { return err } if err := syscall.Mount(i, path.Join(rootfs, i), "", syscall.MS_MOVE, ""); err != nil { return err } } if err := copyMoveRoot(rootfs, rmUsr); err != nil { return err } log.Debugf("chdir %s", rootfs) if err := syscall.Chdir(rootfs); err != nil { return err } log.Debugf("mount MS_MOVE %s", rootfs) if err := syscall.Mount(rootfs, "/", "", syscall.MS_MOVE, ""); err != nil { return err } log.Debug("chroot .") if err := syscall.Chroot("."); err != nil { return err } log.Debug("chdir /") if err := syscall.Chdir("/"); err != nil { return err } log.Debugf("Successfully moved to new root at %s", path.Join(rootfs, subdir)) os.Unsetenv("DOCKER_RAMDISK") return nil }
// MountGC removes mounts from pods that couldn't be GCed cleanly. func MountGC(path, uuid string) error { mi, err := os.Open("/proc/self/mountinfo") if err != nil { return err } defer mi.Close() mnts, err := getMountsForPrefix(path, mi) if err != nil { return errwrap.Wrap(fmt.Errorf("error getting mounts for pod %s from mountinfo", uuid), err) } for i := len(mnts) - 1; i >= 0; i -= 1 { mnt := mnts[i] if needsRemountPrivate(mnt) { if err := syscall.Mount("", mnt.mountPoint, "", syscall.MS_PRIVATE, ""); err != nil { return errwrap.Wrap(fmt.Errorf("could not remount at %v", mnt.mountPoint), err) } } } for _, mnt := range mnts { if err := syscall.Unmount(mnt.mountPoint, 0); err != nil { if err != syscall.ENOENT && err != syscall.EINVAL { return errwrap.Wrap(fmt.Errorf("could not unmount %v", mnt.mountPoint), err) } } } return nil }
func (container *Container) unmountVolumes(forceSyscall bool) error { var volumeMounts []mountPoint for _, mntPoint := range container.MountPoints { dest, err := container.GetResourcePath(mntPoint.Destination) if err != nil { return err } volumeMounts = append(volumeMounts, mountPoint{Destination: dest, Volume: mntPoint.Volume}) } for _, mnt := range container.networkMounts() { dest, err := container.GetResourcePath(mnt.Destination) if err != nil { return err } volumeMounts = append(volumeMounts, mountPoint{Destination: dest}) } for _, volumeMount := range volumeMounts { if forceSyscall { syscall.Unmount(volumeMount.Destination, 0) } if volumeMount.Volume != nil { if err := volumeMount.Volume.Unmount(); err != nil { return err } } } return nil }
func (s *storageZfs) zfsDestroy(path string) error { mountpoint, err := s.zfsGet(path, "mountpoint") if err != nil { return err } if mountpoint != "none" && shared.IsMountPoint(mountpoint) { err := syscall.Unmount(mountpoint, syscall.MNT_DETACH) if err != nil { s.log.Error("umount failed", log.Ctx{"err": err}) return err } } // Due to open fds or kernel refs, this may fail for a bit, give it 10s output, err := tryExec( "zfs", "destroy", "-r", fmt.Sprintf("%s/%s", s.zfsPool, path)) if err != nil { s.log.Error("zfs destroy failed", log.Ctx{"output": string(output)}) return fmt.Errorf("Failed to destroy ZFS filesystem: %s", output) } return nil }
// Unmount device at mountpoint or decrement refcnt. If device has no // mountpoints left after this operation, it is removed from the matrix. // ErrEnoent is returned if the device or mountpoint for the device is not found. func (m *Mounter) Unmount(device, path string) error { m.Lock() defer m.Unlock() info, ok := m.mounts[device] if !ok { return ErrEnoent } for i, p := range info.Mountpoint { if p.Path == path { p.ref-- // Unmount only if refcnt is 0 if p.ref == 0 { err := syscall.Unmount(path, 0) if err != nil { return err } // Blow away this mountpoint. info.Mountpoint[i] = info.Mountpoint[len(info.Mountpoint)-1] info.Mountpoint = info.Mountpoint[0 : len(info.Mountpoint)-1] // If the device has no more mountpoints, remove it from the map if len(info.Mountpoint) == 0 { delete(m.mounts, device) } } return nil } } return ErrEnoent }
func (n *network) cleanupStaleSandboxes() { filepath.Walk(filepath.Dir(osl.GenerateKey("walk")), func(path string, info os.FileInfo, err error) error { _, fname := filepath.Split(path) pList := strings.Split(fname, "-") if len(pList) <= 1 { return nil } pattern := pList[1] if strings.Contains(n.id, pattern) { // Delete all vnis deleteVxlanByVNI(path, 0) syscall.Unmount(path, syscall.MNT_DETACH) os.Remove(path) // Now that we have destroyed this // sandbox, remove all references to // it in vniTbl so that we don't // inadvertently destroy the sandbox // created in this life. networkMu.Lock() for vni, tblPath := range vniTbl { if tblPath == path { delete(vniTbl, vni) } } networkMu.Unlock() } return nil }) }
func (devices *RbdSet) UnmountDevice(hash string) error { info, err := devices.lookupDevice(hash) if err != nil { return err } info.lock.Lock() defer info.lock.Unlock() if info.mountCount == 0 { return fmt.Errorf("UnmountDevice: device not-mounted id %s\n", hash) } info.mountCount-- if info.mountCount > 0 { return nil } log.Debugf("[rbdset] Unmount(%s)", info.mountPath) if err := syscall.Unmount(info.mountPath, 0); err != nil { return err } log.Debugf("[rbdset] Unmount done") info.mountPath = "" if err := devices.unmapImageFromRbdDevice(info); err != nil { return err } return nil }
func (b *Provider) DestroyVolume(vol volume.Volume) error { zvol, err := b.owns(vol) if err != nil { return err } if vol.IsSnapshot() { if err := syscall.Unmount(vol.Location(), 0); err != nil { return err } os.Remove(vol.Location()) } if err := zvol.dataset.Destroy(zfs.DestroyForceUmount); err != nil { for i := 0; i < 5 && err != nil && IsDatasetBusyError(err); i++ { // sometimes zfs will claim to be busy as if files are still open even when all container processes are dead. // usually this goes away, so retry a few times. time.Sleep(1 * time.Second) err = zvol.dataset.Destroy(zfs.DestroyForceUmount) } if err != nil { return err } } os.Remove(zvol.basemount) delete(b.volumes, vol.Info().ID) return nil }
// Unmount unmounts a Ceph volume, remove the mount directory and unmap // the RBD device func (cv *CephVolume) Unmount() error { cd := cv.driver // formatted image name // Directory to mount the volume dataStoreDir := filepath.Join(cd.mountBase, cd.PoolName) volumeDir := filepath.Join(dataStoreDir, cv.VolumeName) // Unmount the RBD // // MNT_DETACH will make this mountpoint unavailable to new open file requests (at // least until it is remounted) but persist for existing open requests. This // seems to work well with containers. // // The checks for ENOENT and EBUSY below are safeguards to prevent error // modes where multiple containers will be affecting a single volume. if err := syscall.Unmount(volumeDir, syscall.MNT_DETACH); err != nil && err != syscall.ENOENT { return fmt.Errorf("Failed to unmount %q: %v", volumeDir, err) } // Remove the mounted directory if err := os.Remove(volumeDir); err != nil && !os.IsNotExist(err) { if err, ok := err.(*os.PathError); ok && err.Err == syscall.EBUSY { return nil } return fmt.Errorf("error removing %q directory: %v", volumeDir, err) } if err := cv.unmapImage(); err != os.ErrNotExist { return err } return nil }
func pivotRoot(rootfs, pivotBaseDir string) error { if pivotBaseDir == "" { pivotBaseDir = "/" } tmpDir := filepath.Join(rootfs, pivotBaseDir) if err := os.MkdirAll(tmpDir, 0755); err != nil { return fmt.Errorf("can't create tmp dir %s, error %v", tmpDir, err) } pivotDir, err := ioutil.TempDir(tmpDir, ".pivot_root") if err != nil { return fmt.Errorf("can't create pivot_root dir %s, error %v", pivotDir, err) } if err := syscall.PivotRoot(rootfs, pivotDir); err != nil { return fmt.Errorf("pivot_root %s", err) } if err := syscall.Chdir("/"); err != nil { return fmt.Errorf("chdir / %s", err) } // path to pivot dir now changed, update pivotDir = filepath.Join(pivotBaseDir, filepath.Base(pivotDir)) // Make pivotDir rprivate to make sure any of the unmounts don't // propagate to parent. if err := syscall.Mount("", pivotDir, "", syscall.MS_PRIVATE|syscall.MS_REC, ""); err != nil { return err } if err := syscall.Unmount(pivotDir, syscall.MNT_DETACH); err != nil { return fmt.Errorf("unmount pivot_root dir %s", err) } return os.Remove(pivotDir) }
func (d *Driver) Put(id string) error { // Protect the d.active from concurrent access d.Lock() defer d.Unlock() mount := d.active[id] if mount == nil { log.Debugf("Put on a non-mounted device %s", id) return nil } mount.count-- if mount.count > 0 { return nil } defer delete(d.active, id) if mount.mounted { err := syscall.Unmount(mount.path, 0) if err != nil { log.Debugf("Failed to unmount %s overlay: %v", id, err) } return err } return nil }
func pivotRoot(rootfs, pivotBaseDir string) error { if pivotBaseDir == "" { pivotBaseDir = "/" } tmpDir := filepath.Join(rootfs, pivotBaseDir) if err := os.MkdirAll(tmpDir, 0755); err != nil { return fmt.Errorf("can't create tmp dir %s, error %v", tmpDir, err) } pivotDir, err := ioutil.TempDir(tmpDir, ".pivot_root") if err != nil { return fmt.Errorf("can't create pivot_root dir %s, error %v", pivotDir, err) } if err := syscall.PivotRoot(rootfs, pivotDir); err != nil { return fmt.Errorf("pivot_root %s", err) } if err := syscall.Chdir("/"); err != nil { return fmt.Errorf("chdir / %s", err) } // path to pivot dir now changed, update pivotDir = filepath.Join(pivotBaseDir, filepath.Base(pivotDir)) if err := syscall.Unmount(pivotDir, syscall.MNT_DETACH); err != nil { return fmt.Errorf("unmount pivot_root dir %s", err) } return os.Remove(pivotDir) }
func Unmount(target string) error { _, err := os.Stat(target) if err != nil { return err } if err := exec.Command("auplink", target, "flush").Run(); err != nil { utils.Debugf("[warning]: couldn't run auplink before unmount: %s", err) } if err := syscall.Unmount(target, 0); err != nil { return err } // Even though we just unmounted the filesystem, AUFS will prevent deleting the mntpoint // for some time. We'll just keep retrying until it succeeds. for retries := 0; retries < 1000; retries++ { err := os.Remove(target) if err == nil { // rm mntpoint succeeded return nil } if os.IsNotExist(err) { // mntpoint doesn't exist anymore. Success. return nil } // fmt.Printf("(%v) Remove %v returned: %v\n", retries, target, err) time.Sleep(10 * time.Millisecond) } return fmt.Errorf("Umount: Failed to umount %v", target) }
func (clnt *client) setExited(containerID string) error { clnt.lock(containerID) defer clnt.unlock(containerID) var exitCode uint32 if event, ok := clnt.remote.pastEvents[containerID]; ok { exitCode = event.Status delete(clnt.remote.pastEvents, containerID) } err := clnt.backend.StateChanged(containerID, StateInfo{ State: StateExit, ExitCode: exitCode, }) // Unmount and delete the bundle folder if mts, err := mount.GetMounts(); err == nil { for _, mts := range mts { if strings.HasSuffix(mts.Mountpoint, containerID+"/rootfs") { if err := syscall.Unmount(mts.Mountpoint, syscall.MNT_DETACH); err == nil { os.RemoveAll(strings.TrimSuffix(mts.Mountpoint, "/rootfs")) } break } } } return err }
// Unmount device at mountpoint or decrement refcnt. If device has no // mountpoints left after this operation, it is removed from the matrix. // ErrEnoent is returned if the device or mountpoint for the device is not found. func (m *Mounter) Unmount(device, path string) error { m.Lock() info, ok := m.mounts[device] if !ok { m.Unlock() return ErrEnoent } m.Unlock() info.Lock() defer info.Unlock() for i, p := range info.Mountpoint { if p.Path == path { p.ref-- // Unmount only if refcnt is 0 if p.ref == 0 { err := syscall.Unmount(path, 0) if err != nil { return err } if _, pathExists := m.paths[path]; pathExists { delete(m.paths, path) } else { dlog.Warnf("Path %q for device %q does not exist in pathMap", path, device) } // Blow away this mountpoint. info.Mountpoint[i] = info.Mountpoint[len(info.Mountpoint)-1] info.Mountpoint = info.Mountpoint[0 : len(info.Mountpoint)-1] m.maybeRemoveDevice(device) } return nil } } return ErrEnoent }