예제 #1
0
파일: exec.go 프로젝트: baoruxing/docker
// TODO(vishh): Add support for running in priviledged mode and running as a different user.
func (d *driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
	active := d.activeContainers[c.ID]
	if active == nil {
		return -1, fmt.Errorf("No active container exists with ID %s", c.ID)
	}
	state, err := libcontainer.GetState(filepath.Join(d.root, c.ID))
	if err != nil {
		return -1, fmt.Errorf("State unavailable for container with ID %s. The container may have been cleaned up already. Error: %s", c.ID, err)
	}

	var term execdriver.Terminal

	if processConfig.Tty {
		term, err = NewTtyConsole(processConfig, pipes)
	} else {
		term, err = execdriver.NewStdConsole(processConfig, pipes)
	}

	processConfig.Terminal = term

	args := append([]string{processConfig.Entrypoint}, processConfig.Arguments...)

	return namespaces.ExecIn(active.container, state, args, os.Args[0], "exec", processConfig.Stdin, processConfig.Stdout, processConfig.Stderr, processConfig.Console,
		func(cmd *exec.Cmd) {
			if startCallback != nil {
				startCallback(&c.ProcessConfig, cmd.Process.Pid)
			}
		})
}
예제 #2
0
func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
	var (
		term execdriver.Terminal
		err  error
	)

	if c.ProcessConfig.Tty {
		term, err = NewTtyConsole(&c.ProcessConfig, pipes)
	} else {
		term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes)
	}
	c.ProcessConfig.Terminal = term

	c.Mounts = append(c.Mounts, execdriver.Mount{
		Source:      d.initPath,
		Destination: c.InitPath,
		Writable:    false,
		Private:     true,
	})

	if err := d.generateEnvConfig(c); err != nil {
		return -1, err
	}
	configPath, err := d.generateLXCConfig(c)
	if err != nil {
		return -1, err
	}
	params := []string{
		"lxc-start",
		"-n", c.ID,
		"-f", configPath,
		"--",
		c.InitPath,
	}

	if c.Network.Interface != nil {
		params = append(params,
			"-g", c.Network.Interface.Gateway,
			"-i", fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen),
		)
	}
	params = append(params,
		"-mtu", strconv.Itoa(c.Network.Mtu),
	)

	if c.ProcessConfig.User != "" {
		params = append(params, "-u", c.ProcessConfig.User)
	}

	if c.ProcessConfig.Privileged {
		if d.apparmor {
			params[0] = path.Join(d.root, "lxc-start-unconfined")

		}
		params = append(params, "-privileged")
	}

	if c.WorkingDir != "" {
		params = append(params, "-w", c.WorkingDir)
	}

	if len(c.CapAdd) > 0 {
		params = append(params, fmt.Sprintf("-cap-add=%s", strings.Join(c.CapAdd, ":")))
	}

	if len(c.CapDrop) > 0 {
		params = append(params, fmt.Sprintf("-cap-drop=%s", strings.Join(c.CapDrop, ":")))
	}

	params = append(params, "--", c.ProcessConfig.Entrypoint)
	params = append(params, c.ProcessConfig.Arguments...)

	if d.sharedRoot {
		// lxc-start really needs / to be non-shared, or all kinds of stuff break
		// when lxc-start unmount things and those unmounts propagate to the main
		// mount namespace.
		// What we really want is to clone into a new namespace and then
		// mount / MS_REC|MS_SLAVE, but since we can't really clone or fork
		// without exec in go we have to do this horrible shell hack...
		shellString :=
			"mount --make-rslave /; exec " +
				utils.ShellQuoteArguments(params)

		params = []string{
			"unshare", "-m", "--", "/bin/sh", "-c", shellString,
		}
	}

	var (
		name = params[0]
		arg  = params[1:]
	)
	aname, err := exec.LookPath(name)
	if err != nil {
		aname = name
	}
	c.ProcessConfig.Path = aname
	c.ProcessConfig.Args = append([]string{name}, arg...)

	if err := nodes.CreateDeviceNodes(c.Rootfs, c.AutoCreatedDevices); err != nil {
		return -1, err
	}

	if err := c.ProcessConfig.Start(); err != nil {
		return -1, err
	}

	var (
		waitErr  error
		waitLock = make(chan struct{})
	)

	go func() {
		if err := c.ProcessConfig.Wait(); err != nil {
			if _, ok := err.(*exec.ExitError); !ok { // Do not propagate the error if it's simply a status code != 0
				waitErr = err
			}
		}
		close(waitLock)
	}()

	// Poll lxc for RUNNING status
	pid, err := d.waitForStart(c, waitLock)
	if err != nil {
		if c.ProcessConfig.Process != nil {
			c.ProcessConfig.Process.Kill()
			c.ProcessConfig.Wait()
		}
		return -1, err
	}

	c.ContainerPid = pid

	if startCallback != nil {
		startCallback(&c.ProcessConfig, pid)
	}

	<-waitLock

	return getExitCode(c), waitErr
}
예제 #3
0
파일: driver.go 프로젝트: nicholaskh/docker
func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) {
	var (
		term     execdriver.Terminal
		err      error
		dataPath = d.containerDir(c.ID)
	)

	if c.ProcessConfig.Tty {
		term, err = NewTtyConsole(&c.ProcessConfig, pipes)
	} else {
		term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes)
	}
	c.ProcessConfig.Terminal = term
	container, err := d.createContainer(c)
	if err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}
	d.Lock()
	d.activeContainers[c.ID] = &activeContainer{
		container: container,
		cmd:       &c.ProcessConfig.Cmd,
	}
	d.Unlock()

	c.Mounts = append(c.Mounts, execdriver.Mount{
		Source:      d.initPath,
		Destination: c.InitPath,
		Writable:    false,
		Private:     true,
	})

	if err := d.generateEnvConfig(c); err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}
	configPath, err := d.generateLXCConfig(c)
	if err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}
	params := []string{
		"lxc-start",
		"-n", c.ID,
		"-f", configPath,
	}

	// From lxc>=1.1 the default behavior is to daemonize containers after start
	lxcVersion := version.Version(d.version())
	if lxcVersion.GreaterThanOrEqualTo(version.Version("1.1")) {
		params = append(params, "-F")
	}

	if c.Network.ContainerID != "" {
		params = append(params,
			"--share-net", c.Network.ContainerID,
		)
	}
	if c.Ipc != nil {
		if c.Ipc.ContainerID != "" {
			params = append(params,
				"--share-ipc", c.Ipc.ContainerID,
			)
		} else if c.Ipc.HostIpc {
			params = append(params,
				"--share-ipc", "1",
			)
		}
	}

	params = append(params,
		"--",
		c.InitPath,
	)
	if c.Network.Interface != nil {
		params = append(params,
			"-g", c.Network.Interface.Gateway,
			"-i", fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen),
		)
	}
	params = append(params,
		"-mtu", strconv.Itoa(c.Network.Mtu),
	)

	if c.ProcessConfig.User != "" {
		params = append(params, "-u", c.ProcessConfig.User)
	}

	if c.ProcessConfig.Privileged {
		if d.apparmor {
			params[0] = path.Join(d.root, "lxc-start-unconfined")

		}
		params = append(params, "-privileged")
	}

	if c.WorkingDir != "" {
		params = append(params, "-w", c.WorkingDir)
	}

	params = append(params, "--", c.ProcessConfig.Entrypoint)
	params = append(params, c.ProcessConfig.Arguments...)

	if d.sharedRoot {
		// lxc-start really needs / to be non-shared, or all kinds of stuff break
		// when lxc-start unmount things and those unmounts propagate to the main
		// mount namespace.
		// What we really want is to clone into a new namespace and then
		// mount / MS_REC|MS_SLAVE, but since we can't really clone or fork
		// without exec in go we have to do this horrible shell hack...
		shellString :=
			"mount --make-rslave /; exec " +
				utils.ShellQuoteArguments(params)

		params = []string{
			"unshare", "-m", "--", "/bin/sh", "-c", shellString,
		}
	}
	logrus.Debugf("lxc params %s", params)
	var (
		name = params[0]
		arg  = params[1:]
	)
	aname, err := exec.LookPath(name)
	if err != nil {
		aname = name
	}
	c.ProcessConfig.Path = aname
	c.ProcessConfig.Args = append([]string{name}, arg...)

	if err := createDeviceNodes(c.Rootfs, c.AutoCreatedDevices); err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	if err := c.ProcessConfig.Start(); err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	var (
		waitErr  error
		waitLock = make(chan struct{})
	)

	go func() {
		if err := c.ProcessConfig.Wait(); err != nil {
			if _, ok := err.(*exec.ExitError); !ok { // Do not propagate the error if it's simply a status code != 0
				waitErr = err
			}
		}
		close(waitLock)
	}()

	terminate := func(terr error) (execdriver.ExitStatus, error) {
		if c.ProcessConfig.Process != nil {
			c.ProcessConfig.Process.Kill()
			c.ProcessConfig.Wait()
		}
		return execdriver.ExitStatus{ExitCode: -1}, terr
	}
	// Poll lxc for RUNNING status
	pid, err := d.waitForStart(c, waitLock)
	if err != nil {
		return terminate(err)
	}

	cgroupPaths, err := cgroupPaths(c.ID)
	if err != nil {
		return terminate(err)
	}

	state := &libcontainer.State{
		InitProcessPid: pid,
		CgroupPaths:    cgroupPaths,
	}

	f, err := os.Create(filepath.Join(dataPath, "state.json"))
	if err != nil {
		return terminate(err)
	}
	defer f.Close()

	if err := json.NewEncoder(f).Encode(state); err != nil {
		return terminate(err)
	}

	c.ContainerPid = pid

	if startCallback != nil {
		logrus.Debugf("Invoking startCallback")
		startCallback(&c.ProcessConfig, pid)
	}

	oomKill := false
	oomKillNotification, err := notifyOnOOM(cgroupPaths)

	<-waitLock

	if err == nil {
		_, oomKill = <-oomKillNotification
		logrus.Debugf("oomKill error %s waitErr %s", oomKill, waitErr)
	} else {
		logrus.Warnf("Your kernel does not support OOM notifications: %s", err)
	}

	// check oom error
	exitCode := getExitCode(c)
	if oomKill {
		exitCode = 137
	}
	return execdriver.ExitStatus{ExitCode: exitCode, OOMKilled: oomKill}, waitErr
}
예제 #4
0
파일: driver.go 프로젝트: Gandi/docker
func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
	// take the Command and populate the libcontainer.Config from it
	container, err := d.createContainer(c)
	if err != nil {
		return -1, err
	}

	var term execdriver.Terminal

	if c.ProcessConfig.Tty {
		term, err = NewTtyConsole(&c.ProcessConfig, pipes)
	} else {
		term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes)
	}
	if err != nil {
		return -1, err
	}
	c.ProcessConfig.Terminal = term

	d.Lock()
	d.activeContainers[c.ID] = &activeContainer{
		container: container,
		cmd:       &c.ProcessConfig.Cmd,
	}
	d.Unlock()

	var (
		dataPath = filepath.Join(d.root, c.ID)
		args     = append([]string{c.ProcessConfig.Entrypoint}, c.ProcessConfig.Arguments...)
	)

	if err := d.createContainerRoot(c.ID); err != nil {
		return -1, err
	}
	defer d.removeContainerRoot(c.ID)

	if err := d.writeContainerFile(container, c.ID); err != nil {
		return -1, err
	}

	return namespaces.Exec(container, c.ProcessConfig.Stdin, c.ProcessConfig.Stdout, c.ProcessConfig.Stderr, c.ProcessConfig.Console, c.Rootfs, dataPath, args, func(container *libcontainer.Config, console, rootfs, dataPath, init string, child *os.File, args []string) *exec.Cmd {
		c.ProcessConfig.Path = d.initPath
		c.ProcessConfig.Args = append([]string{
			DriverName,
			"-console", console,
			"-pipe", "3",
			"-root", filepath.Join(d.root, c.ID),
			"--",
		}, args...)

		// set this to nil so that when we set the clone flags anything else is reset
		c.ProcessConfig.SysProcAttr = &syscall.SysProcAttr{
			Cloneflags: uintptr(namespaces.GetNamespaceFlags(container.Namespaces)),
		}
		c.ProcessConfig.ExtraFiles = []*os.File{child}

		c.ProcessConfig.Env = container.Env
		c.ProcessConfig.Dir = c.Rootfs

		return &c.ProcessConfig.Cmd
	}, func() {
		if startCallback != nil {
			c.ContainerPid = c.ProcessConfig.Process.Pid
			startCallback(&c.ProcessConfig, c.ContainerPid)
		}
	})
}
예제 #5
0
파일: driver.go 프로젝트: hantuo/docker
func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) {
	// take the Command and populate the libcontainer.Config from it
	container, err := d.createContainer(c)
	if err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	var term execdriver.Terminal

	if c.ProcessConfig.Tty {
		term, err = NewTtyConsole(&c.ProcessConfig, pipes)
	} else {
		term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes)
	}
	if err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}
	c.ProcessConfig.Terminal = term

	d.Lock()
	d.activeContainers[c.ID] = &activeContainer{
		container: container,
		cmd:       &c.ProcessConfig.Cmd,
	}
	d.Unlock()

	var (
		dataPath = filepath.Join(d.root, c.ID)
		args     = append([]string{c.ProcessConfig.Entrypoint}, c.ProcessConfig.Arguments...)
	)

	if err := d.createContainerRoot(c.ID); err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}
	defer d.cleanContainer(c.ID)

	if err := d.writeContainerFile(container, c.ID); err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	execOutputChan := make(chan execOutput, 1)
	waitForStart := make(chan struct{})

	go func() {
		exitCode, err := namespaces.Exec(container, c.ProcessConfig.Stdin, c.ProcessConfig.Stdout, c.ProcessConfig.Stderr, c.ProcessConfig.Console, dataPath, args, func(container *libcontainer.Config, console, dataPath, init string, child *os.File, args []string) *exec.Cmd {
			c.ProcessConfig.Path = d.initPath
			c.ProcessConfig.Args = append([]string{
				DriverName,
				"-console", console,
				"-pipe", "3",
				"-root", filepath.Join(d.root, c.ID),
				"--",
			}, args...)

			// set this to nil so that when we set the clone flags anything else is reset
			c.ProcessConfig.SysProcAttr = &syscall.SysProcAttr{
				Cloneflags: uintptr(namespaces.GetNamespaceFlags(container.Namespaces)),
			}
			c.ProcessConfig.ExtraFiles = []*os.File{child}

			c.ProcessConfig.Env = container.Env
			c.ProcessConfig.Dir = container.RootFs

			return &c.ProcessConfig.Cmd
		}, func() {
			close(waitForStart)
			if startCallback != nil {
				c.ContainerPid = c.ProcessConfig.Process.Pid
				startCallback(&c.ProcessConfig, c.ContainerPid)
			}
		})
		execOutputChan <- execOutput{exitCode, err}
	}()

	select {
	case execOutput := <-execOutputChan:
		return execdriver.ExitStatus{ExitCode: execOutput.exitCode}, execOutput.err
	case <-waitForStart:
		break
	}

	oomKill := false
	state, err := libcontainer.GetState(filepath.Join(d.root, c.ID))
	if err == nil {
		oomKillNotification, err := libcontainer.NotifyOnOOM(state)
		if err == nil {
			_, oomKill = <-oomKillNotification
		} else {
			log.Warnf("WARNING: Your kernel does not support OOM notifications: %s", err)
		}
	} else {
		log.Warnf("Failed to get container state, oom notify will not work: %s", err)
	}
	// wait for the container to exit.
	execOutput := <-execOutputChan

	return execdriver.ExitStatus{ExitCode: execOutput.exitCode, OOMKilled: oomKill}, execOutput.err
}