// start a long-running container so we have time to inspect execin processes func startLongRunningContainer(config *libcontainer.Config) (*exec.Cmd, string, chan error) { containerErr := make(chan error, 1) containerCmd := &exec.Cmd{} var statePath string createCmd := func(container *libcontainer.Config, console, dataPath, init string, pipe *os.File, args []string) *exec.Cmd { containerCmd = namespaces.DefaultCreateCommand(container, console, dataPath, init, pipe, args) statePath = dataPath return containerCmd } var containerStart sync.WaitGroup containerStart.Add(1) go func() { buffers := newStdBuffers() _, err := namespaces.Exec(config, buffers.Stdin, buffers.Stdout, buffers.Stderr, "", config.RootFs, []string{"sleep", "10"}, createCmd, containerStart.Done) containerErr <- err }() containerStart.Wait() return containerCmd, statePath, containerErr }
// startContainer starts the container. Returns the exit status or -1 and an // error. // // Signals sent to the current process will be forwarded to container. func startContainer(container *libcontainer.Config, term namespaces.Terminal, dataPath string, args []string) (int, error) { var ( cmd *exec.Cmd sigc = make(chan os.Signal, 10) ) signal.Notify(sigc) createCommand := func(container *libcontainer.Config, console, rootfs, dataPath, init string, pipe *os.File, args []string) *exec.Cmd { cmd = namespaces.DefaultCreateCommand(container, console, rootfs, dataPath, init, pipe, args) if logPath != "" { cmd.Env = append(cmd.Env, fmt.Sprintf("log=%s", logPath)) } return cmd } startCallback := func() { go func() { for sig := range sigc { cmd.Process.Signal(sig) } }() } return namespaces.Exec(container, term, "", dataPath, args, createCommand, startCallback) }
func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { // take the Command and populate the libcontainer.Config from it container, err := d.createContainer(c) if err != nil { return -1, err } d.Lock() d.activeContainers[c.ID] = &activeContainer{ container: container, cmd: &c.Cmd, } d.Unlock() var ( dataPath = filepath.Join(d.root, c.ID) args = append([]string{c.Entrypoint}, c.Arguments...) ) if err := d.createContainerRoot(c.ID); err != nil { return -1, err } defer d.removeContainerRoot(c.ID) if err := d.writeContainerFile(container, c.ID); err != nil { return -1, err } term := getTerminal(c, pipes) return namespaces.Exec(container, term, c.Rootfs, dataPath, args, func(container *libcontainer.Config, console, rootfs, dataPath, init string, child *os.File, args []string) *exec.Cmd { // we need to join the rootfs because namespaces will setup the rootfs and chroot initPath := filepath.Join(c.Rootfs, c.InitPath) c.Path = d.initPath c.Args = append([]string{ initPath, "-driver", DriverName, "-console", console, "-pipe", "3", "-root", filepath.Join(d.root, c.ID), "--", }, args...) // set this to nil so that when we set the clone flags anything else is reset c.SysProcAttr = nil system.SetCloneFlags(&c.Cmd, uintptr(namespaces.GetNamespaceFlags(container.Namespaces))) c.ExtraFiles = []*os.File{child} c.Env = container.Env c.Dir = c.Rootfs return &c.Cmd }, func() { if startCallback != nil { c.ContainerPid = c.Process.Pid startCallback(c) } }) }
// runContainer runs the container with the specific config and arguments // // buffers are returned containing the STDOUT and STDERR output for the run // along with the exit code and any go error func runContainer(config *libcontainer.Config, console string, args ...string) (buffers *stdBuffers, exitCode int, err error) { if err := writeConfig(config); err != nil { return nil, -1, err } buffers = newStdBuffers() exitCode, err = namespaces.Exec(config, buffers.Stdin, buffers.Stdout, buffers.Stderr, console, config.RootFs, args, namespaces.DefaultCreateCommand, nil) return }
// startContainer starts the container. Returns the exit status or -1 and an // error. // // Signals sent to the current process will be forwarded to container. func startContainer(container *libcontainer.Config, dataPath string, args []string) (int, error) { var ( cmd *exec.Cmd sigc = make(chan os.Signal, 10) ) signal.Notify(sigc) createCommand := func(container *libcontainer.Config, console, rootfs, dataPath, init string, pipe *os.File, args []string) *exec.Cmd { cmd = namespaces.DefaultCreateCommand(container, console, rootfs, dataPath, init, pipe, args) if logPath != "" { cmd.Env = append(cmd.Env, fmt.Sprintf("log=%s", logPath)) } return cmd } var ( master *os.File console string err error stdin = os.Stdin stdout = os.Stdout stderr = os.Stderr ) if container.Tty { stdin = nil stdout = nil stderr = nil master, console, err = consolepkg.CreateMasterAndConsole() if err != nil { return -1, err } go io.Copy(master, os.Stdin) go io.Copy(os.Stdout, master) state, err := term.SetRawTerminal(os.Stdin.Fd()) if err != nil { return -1, err } defer term.RestoreTerminal(os.Stdin.Fd(), state) } startCallback := func() { go func() { resizeTty(master) for sig := range sigc { switch sig { case syscall.SIGWINCH: resizeTty(master) default: cmd.Process.Signal(sig) } } }() } return namespaces.Exec(container, stdin, stdout, stderr, console, "", dataPath, args, createCommand, startCallback) }
func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { // take the Command and populate the libcontainer.Config from it container, err := d.createContainer(c) if err != nil { return -1, err } var term execdriver.Terminal if c.ProcessConfig.Tty { term, err = NewTtyConsole(&c.ProcessConfig, pipes) } else { term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes) } if err != nil { return -1, err } c.ProcessConfig.Terminal = term d.Lock() d.activeContainers[c.ID] = &activeContainer{ container: container, cmd: &c.ProcessConfig.Cmd, } d.Unlock() var ( dataPath = filepath.Join(d.root, c.ID) args = append([]string{c.ProcessConfig.Entrypoint}, c.ProcessConfig.Arguments...) ) if err := d.createContainerRoot(c.ID); err != nil { return -1, err } defer d.removeContainerRoot(c.ID) if err := d.writeContainerFile(container, c.ID); err != nil { return -1, err } return namespaces.Exec(container, c.ProcessConfig.Stdin, c.ProcessConfig.Stdout, c.ProcessConfig.Stderr, c.ProcessConfig.Console, c.Rootfs, dataPath, args, func(container *libcontainer.Config, console, rootfs, dataPath, init string, child *os.File, args []string) *exec.Cmd { c.ProcessConfig.Path = d.initPath c.ProcessConfig.Args = append([]string{ DriverName, "-console", console, "-pipe", "3", "-root", filepath.Join(d.root, c.ID), "--", }, args...) // set this to nil so that when we set the clone flags anything else is reset c.ProcessConfig.SysProcAttr = &syscall.SysProcAttr{ Cloneflags: uintptr(namespaces.GetNamespaceFlags(container.Namespaces)), } c.ProcessConfig.ExtraFiles = []*os.File{child} c.ProcessConfig.Env = container.Env c.ProcessConfig.Dir = c.Rootfs return &c.ProcessConfig.Cmd }, func() { if startCallback != nil { c.ContainerPid = c.ProcessConfig.Process.Pid startCallback(&c.ProcessConfig, c.ContainerPid) } }) }
func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) { // take the Command and populate the libcontainer.Config from it container, err := d.createContainer(c) if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } var term execdriver.Terminal if c.ProcessConfig.Tty { term, err = NewTtyConsole(&c.ProcessConfig, pipes) } else { term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes) } if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } c.ProcessConfig.Terminal = term d.Lock() d.activeContainers[c.ID] = &activeContainer{ container: container, cmd: &c.ProcessConfig.Cmd, } d.Unlock() var ( dataPath = filepath.Join(d.root, c.ID) args = append([]string{c.ProcessConfig.Entrypoint}, c.ProcessConfig.Arguments...) ) if err := d.createContainerRoot(c.ID); err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } defer d.cleanContainer(c.ID) if err := d.writeContainerFile(container, c.ID); err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } execOutputChan := make(chan execOutput, 1) waitForStart := make(chan struct{}) go func() { exitCode, err := namespaces.Exec(container, c.ProcessConfig.Stdin, c.ProcessConfig.Stdout, c.ProcessConfig.Stderr, c.ProcessConfig.Console, dataPath, args, func(container *libcontainer.Config, console, dataPath, init string, child *os.File, args []string) *exec.Cmd { c.ProcessConfig.Path = d.initPath c.ProcessConfig.Args = append([]string{ DriverName, "-console", console, "-pipe", "3", "-root", filepath.Join(d.root, c.ID), "--", }, args...) // set this to nil so that when we set the clone flags anything else is reset c.ProcessConfig.SysProcAttr = &syscall.SysProcAttr{ Cloneflags: uintptr(namespaces.GetNamespaceFlags(container.Namespaces)), } c.ProcessConfig.ExtraFiles = []*os.File{child} c.ProcessConfig.Env = container.Env c.ProcessConfig.Dir = container.RootFs return &c.ProcessConfig.Cmd }, func() { close(waitForStart) if startCallback != nil { c.ContainerPid = c.ProcessConfig.Process.Pid startCallback(&c.ProcessConfig, c.ContainerPid) } }) execOutputChan <- execOutput{exitCode, err} }() select { case execOutput := <-execOutputChan: return execdriver.ExitStatus{ExitCode: execOutput.exitCode}, execOutput.err case <-waitForStart: break } oomKill := false state, err := libcontainer.GetState(filepath.Join(d.root, c.ID)) if err == nil { oomKillNotification, err := libcontainer.NotifyOnOOM(state) if err == nil { _, oomKill = <-oomKillNotification } else { log.Warnf("WARNING: Your kernel does not support OOM notifications: %s", err) } } else { log.Warnf("Failed to get container state, oom notify will not work: %s", err) } // wait for the container to exit. execOutput := <-execOutputChan return execdriver.ExitStatus{ExitCode: execOutput.exitCode, OOMKilled: oomKill}, execOutput.err }