// Mountpoints should be private to the container func remountPrivate(mountPoint string) error { mounted, err := mount.Mounted(mountPoint) if err != nil { return err } if !mounted { if err := mount.Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil { return err } } return mount.ForceMount("", mountPoint, "none", "private") }
func initMounts(home, tmpDir string) { mounts := []struct{ src, tgt string }{ {"/opt/basejail", "/jail"}, {"/dev", "/jail/dev"}, {"/dev/pts", "/jail/dev/pts"}, {"/proc", "/jail/proc"}, {"/var/spool/cron/crontabs", "/jail/var/spool/cron/crontabs"}, {"/var/lib/extrausers", "/jail/var/lib/extrausers"}, {home, "/jail/home"}, {tmpDir, "/jail/tmp"}, } for _, m := range mounts { // Note the use of recursive bind mounts. // We could avoid some mounts by just arranging that /opt/basejail // already has most of the mounts. err := mount.Mount(m.src, m.tgt, "", "rbind") if err != nil { log.Fatalf("Failed to mount %s -> %s: %q", m.src, m.tgt, err) } } }
func (container *Container) Start() (err error) { container.Lock() defer container.Unlock() if container.State.IsRunning() { return fmt.Errorf("The container %s is already running.", container.ID) } defer func() { if err != nil { container.cleanup() } }() if err := container.Mount(); err != nil { return err } if container.runtime.networkManager.disabled { container.Config.NetworkDisabled = true container.buildHostnameAndHostsFiles("127.0.1.1") } else { if err := container.allocateNetwork(); err != nil { return err } container.buildHostnameAndHostsFiles(container.NetworkSettings.IPAddress) } // Make sure the config is compatible with the current kernel if container.Config.Memory > 0 && !container.runtime.sysInfo.MemoryLimit { log.Printf("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n") container.Config.Memory = 0 } if container.Config.Memory > 0 && !container.runtime.sysInfo.SwapLimit { log.Printf("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n") container.Config.MemorySwap = -1 } if container.runtime.sysInfo.IPv4ForwardingDisabled { log.Printf("WARNING: IPv4 forwarding is disabled. Networking will not work") } if container.Volumes == nil || len(container.Volumes) == 0 { container.Volumes = make(map[string]string) container.VolumesRW = make(map[string]bool) } // Apply volumes from another container if requested if err := container.applyExternalVolumes(); err != nil { return err } if err := container.createVolumes(); err != nil { return err } // Setup environment env := []string{ "HOME=/", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "HOSTNAME=" + container.Config.Hostname, } if container.Config.Tty { env = append(env, "TERM=xterm") } // Init any links between the parent and children runtime := container.runtime children, err := runtime.Children(container.Name) if err != nil { return err } if len(children) > 0 { container.activeLinks = make(map[string]*Link, len(children)) // If we encounter an error make sure that we rollback any network // config and ip table changes rollback := func() { for _, link := range container.activeLinks { link.Disable() } container.activeLinks = nil } for p, child := range children { link, err := NewLink(container, child, p, runtime.networkManager.bridgeIface) if err != nil { rollback() return err } container.activeLinks[link.Alias()] = link if err := link.Enable(); err != nil { rollback() return err } for _, envVar := range link.ToEnv() { env = append(env, envVar) } } } for _, elem := range container.Config.Env { env = append(env, elem) } if err := container.generateEnvConfig(env); err != nil { return err } root := container.RootfsPath() if container.Config.WorkingDir != "" { container.Config.WorkingDir = path.Clean(container.Config.WorkingDir) if err := os.MkdirAll(path.Join(root, container.Config.WorkingDir), 0755); err != nil { return nil } } envPath, err := container.EnvConfigPath() if err != nil { return err } // Mount docker specific files into the containers root fs if err := mount.Mount(runtime.sysInitPath, path.Join(root, "/.dockerinit"), "none", "bind,ro"); err != nil { return err } if err := mount.Mount(envPath, path.Join(root, "/.dockerenv"), "none", "bind,ro"); err != nil { return err } if err := mount.Mount(container.ResolvConfPath, path.Join(root, "/etc/resolv.conf"), "none", "bind,ro"); err != nil { return err } if container.HostnamePath != "" && container.HostsPath != "" { if err := mount.Mount(container.HostnamePath, path.Join(root, "/etc/hostname"), "none", "bind,ro"); err != nil { return err } if err := mount.Mount(container.HostsPath, path.Join(root, "/etc/hosts"), "none", "bind,ro"); err != nil { return err } } // Mount user specified volumes for r, v := range container.Volumes { mountAs := "ro" if container.VolumesRW[r] { mountAs = "rw" } r = path.Join(root, r) if p, err := utils.FollowSymlinkInScope(r, root); err != nil { return err } else { r = p } if err := mount.Mount(v, r, "none", fmt.Sprintf("bind,%s", mountAs)); err != nil { return err } } populateCommand(container) // Setup logging of stdout and stderr to disk if err := container.runtime.LogToDisk(container.stdout, container.logPath("json"), "stdout"); err != nil { return err } if err := container.runtime.LogToDisk(container.stderr, container.logPath("json"), "stderr"); err != nil { return err } container.waitLock = make(chan struct{}) // Setuping pipes and/or Pty var setup func() error if container.Config.Tty { setup = container.setupPty } else { setup = container.setupStd } if err := setup(); err != nil { return err } callbackLock := make(chan struct{}) callback := func(command *execdriver.Command) { container.State.SetRunning(command.Pid()) if command.Tty { // The callback is called after the process Start() // so we are in the parent process. In TTY mode, stdin/out/err is the PtySlace // which we close here. if c, ok := command.Stdout.(io.Closer); ok { c.Close() } } if err := container.ToDisk(); err != nil { utils.Debugf("%s", err) } close(callbackLock) } // We use a callback here instead of a goroutine and an chan for // syncronization purposes cErr := utils.Go(func() error { return container.monitor(callback) }) // Start should not return until the process is actually running select { case <-callbackLock: case err := <-cErr: return err } return nil }
func mountVolumesForContainer(container *Container, envPath string) error { // Setup the root fs as a bind mount of the base fs var ( root = container.RootfsPath() runtime = container.runtime ) if err := os.MkdirAll(root, 0755); err != nil && !os.IsExist(err) { return nil } // Create a bind mount of the base fs as a place where we can add mounts // without affecting the ability to access the base fs if err := mount.Mount(container.basefs, root, "none", "bind,rw"); err != nil { return err } // Make sure the root fs is private so the mounts here don't propagate to basefs if err := mount.ForceMount(root, root, "none", "private"); err != nil { return err } // Mount docker specific files into the containers root fs if err := mount.Mount(runtime.sysInitPath, filepath.Join(root, "/.dockerinit"), "none", "bind,ro"); err != nil { return err } if err := mount.Mount(envPath, filepath.Join(root, "/.dockerenv"), "none", "bind,ro"); err != nil { return err } if err := mount.Mount(container.ResolvConfPath, filepath.Join(root, "/etc/resolv.conf"), "none", "bind,ro"); err != nil { return err } if container.HostnamePath != "" && container.HostsPath != "" { if err := mount.Mount(container.HostnamePath, filepath.Join(root, "/etc/hostname"), "none", "bind,ro"); err != nil { return err } if err := mount.Mount(container.HostsPath, filepath.Join(root, "/etc/hosts"), "none", "bind,ro"); err != nil { return err } } // Mount user specified volumes for r, v := range container.Volumes { mountAs := "ro" if container.VolumesRW[r] { mountAs = "rw" } r = filepath.Join(root, r) if p, err := utils.FollowSymlinkInScope(r, root); err != nil { return err } else { r = p } if err := mount.Mount(v, r, "none", fmt.Sprintf("bind,%s", mountAs)); err != nil { return err } } return nil }