Esempio n. 1
0
File: exec.go Progetto: nixuw/docker
// Exec implements the exec driver Driver interface,
// it calls libcontainer APIs to execute a container.
func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) {
	active := d.activeContainers[c.ID]
	if active == nil {
		return -1, fmt.Errorf("No active container exists with ID %s", c.ID)
	}

	p := &libcontainer.Process{
		Args: append([]string{processConfig.Entrypoint}, processConfig.Arguments...),
		Env:  c.ProcessConfig.Env,
		Cwd:  c.WorkingDir,
		User: processConfig.User,
	}

	if processConfig.Privileged {
		p.Capabilities = execdriver.GetAllCapabilities()
	}
	// add CAP_ prefix to all caps for new libcontainer update to match
	// the spec format.
	for i, s := range p.Capabilities {
		if !strings.HasPrefix(s, "CAP_") {
			p.Capabilities[i] = fmt.Sprintf("CAP_%s", s)
		}
	}

	config := active.Config()
	if err := setupPipes(&config, processConfig, p, pipes); err != nil {
		return -1, err
	}

	if err := active.Start(p); err != nil {
		return -1, err
	}

	if hooks.Start != nil {
		pid, err := p.Pid()
		if err != nil {
			p.Signal(os.Kill)
			p.Wait()
			return -1, err
		}

		// A closed channel for OOM is returned here as it will be
		// non-blocking and return the correct result when read.
		chOOM := make(chan struct{})
		close(chOOM)
		hooks.Start(&c.ProcessConfig, pid, chOOM)
	}

	ps, err := p.Wait()
	if err != nil {
		exitErr, ok := err.(*exec.ExitError)
		if !ok {
			return -1, err
		}
		ps = exitErr.ProcessState
	}
	return utils.ExitStatus(ps.Sys().(syscall.WaitStatus)), nil
}
Esempio n. 2
0
func restoreContainer(context *cli.Context, spec *Spec, config *configs.Config, imagePath string) (code int, err error) {
	rootuid := 0
	factory, err := loadFactory(context)
	if err != nil {
		return -1, err
	}
	container, err := factory.Load(context.GlobalString("id"))
	if err != nil {
		container, err = factory.Create(context.GlobalString("id"), config)
		if err != nil {
			return -1, err
		}
	}
	options := criuOptions(context)
	// ensure that the container is always removed if we were the process
	// that created it.
	defer func() {
		if err != nil {
			return
		}
		status, err := container.Status()
		if err != nil {
			logrus.Error(err)
		}
		if status != libcontainer.Checkpointed {
			if err := container.Destroy(); err != nil {
				logrus.Error(err)
			}
			if err := os.RemoveAll(options.ImagesDirectory); err != nil {
				logrus.Error(err)
			}
		}
	}()
	process := &libcontainer.Process{
		Stdin:  os.Stdin,
		Stdout: os.Stdout,
		Stderr: os.Stderr,
	}
	tty, err := newTty(spec.Process.Terminal, process, rootuid)
	if err != nil {
		return -1, err
	}
	defer tty.Close()
	go handleSignals(process, tty)
	if err := container.Restore(process, options); err != nil {
		return -1, err
	}
	status, err := process.Wait()
	if err != nil {
		return -1, err
	}
	return utils.ExitStatus(status.Sys().(syscall.WaitStatus)), nil
}
Esempio n. 3
0
File: exec.go Progetto: ch3lo/docker
// Exec implements the exec driver Driver interface,
// it calls libcontainer APIs to execute a container.
func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
	active := d.activeContainers[c.ID]
	if active == nil {
		return -1, fmt.Errorf("No active container exists with ID %s", c.ID)
	}

	p := &libcontainer.Process{
		Args: append([]string{processConfig.Entrypoint}, processConfig.Arguments...),
		Env:  c.ProcessConfig.Env,
		Cwd:  c.WorkingDir,
		User: processConfig.User,
	}

	if processConfig.Privileged {
		p.Capabilities = execdriver.GetAllCapabilities()
	}

	config := active.Config()
	if err := setupPipes(&config, processConfig, p, pipes); err != nil {
		return -1, err
	}

	if err := active.Start(p); err != nil {
		return -1, err
	}

	if startCallback != nil {
		pid, err := p.Pid()
		if err != nil {
			p.Signal(os.Kill)
			p.Wait()
			return -1, err
		}
		startCallback(&c.ProcessConfig, pid)
	}

	ps, err := p.Wait()
	if err != nil {
		exitErr, ok := err.(*exec.ExitError)
		if !ok {
			return -1, err
		}
		ps = exitErr.ProcessState
	}
	return utils.ExitStatus(ps.Sys().(syscall.WaitStatus)), nil
}
Esempio n. 4
0
// reap runs wait4 in a loop until we have finished processing any existing exits
// then returns all exits to the main event loop for further processing.
func (h *signalHandler) reap() (exits []exit, err error) {
	var (
		ws  syscall.WaitStatus
		rus syscall.Rusage
	)
	for {
		pid, err := syscall.Wait4(-1, &ws, syscall.WNOHANG, &rus)
		if err != nil {
			if err == syscall.ECHILD {
				return exits, nil
			}
			return nil, err
		}
		exits = append(exits, exit{
			pid:    pid,
			status: utils.ExitStatus(ws),
		})
	}
}
Esempio n. 5
0
func reap() (exits []*supervisor.Event, err error) {
	var (
		ws  syscall.WaitStatus
		rus syscall.Rusage
	)
	for {
		pid, err := syscall.Wait4(-1, &ws, syscall.WNOHANG, &rus)
		if err != nil {
			if err == syscall.ECHILD {
				return exits, nil
			}
			return exits, err
		}
		if pid <= 0 {
			return exits, nil
		}
		e := supervisor.NewEvent(supervisor.ExitEventType)
		e.Pid = pid
		e.Status = utils.ExitStatus(ws)
		exits = append(exits, e)
	}
}
Esempio n. 6
0
// Run implements the exec driver Driver interface,
// it calls libcontainer APIs to run a container.
func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) {
	// take the Command and populate the libcontainer.Config from it
	container, err := d.createContainer(c, hooks)
	if err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	p := &libcontainer.Process{
		Args: append([]string{c.ProcessConfig.Entrypoint}, c.ProcessConfig.Arguments...),
		Env:  c.ProcessConfig.Env,
		Cwd:  c.WorkingDir,
		User: c.ProcessConfig.User,
	}

	if err := setupPipes(container, &c.ProcessConfig, p, pipes); err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	cont, err := d.factory.Create(c.ID, container)
	if err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}
	d.Lock()
	d.activeContainers[c.ID] = cont
	d.Unlock()
	defer func() {
		cont.Destroy()
		d.cleanContainer(c.ID)
	}()

	if err := cont.Start(p); err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	oom := notifyOnOOM(cont)
	if hooks.Start != nil {
		pid, err := p.Pid()
		if err != nil {
			p.Signal(os.Kill)
			p.Wait()
			return execdriver.ExitStatus{ExitCode: -1}, err
		}
		hooks.Start(&c.ProcessConfig, pid, oom)
	}

	waitF := p.Wait
	if nss := cont.Config().Namespaces; !nss.Contains(configs.NEWPID) {
		// we need such hack for tracking processes with inherited fds,
		// because cmd.Wait() waiting for all streams to be copied
		waitF = waitInPIDHost(p, cont)
	}
	ps, err := waitF()
	if err != nil {
		execErr, ok := err.(*exec.ExitError)
		if !ok {
			return execdriver.ExitStatus{ExitCode: -1}, err
		}
		ps = execErr.ProcessState
	}
	cont.Destroy()
	_, oomKill := <-oom
	return execdriver.ExitStatus{ExitCode: utils.ExitStatus(ps.Sys().(syscall.WaitStatus)), OOMKilled: oomKill}, nil
}
Esempio n. 7
0
// Run implements the exec driver Driver interface,
// it calls libcontainer APIs to run a container.
func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) {
	destroyed := false
	var err error
	c.TmpDir, err = ioutil.TempDir("", c.ID)
	if err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}
	defer os.RemoveAll(c.TmpDir)

	// take the Command and populate the libcontainer.Config from it
	container, err := d.createContainer(c, hooks)
	if err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	p := &libcontainer.Process{
		Args: append([]string{c.ProcessConfig.Entrypoint}, c.ProcessConfig.Arguments...),
		Env:  c.ProcessConfig.Env,
		Cwd:  c.WorkingDir,
		User: c.ProcessConfig.User,
	}

	wg := sync.WaitGroup{}
	writers, err := setupPipes(container, &c.ProcessConfig, p, pipes, &wg)
	if err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	cont, err := d.factory.Create(c.ID, container)
	if err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}
	d.Lock()
	d.activeContainers[c.ID] = cont
	d.Unlock()
	defer func() {
		if !destroyed {
			cont.Destroy()
		}
		d.cleanContainer(c.ID)
	}()

	if err := cont.Start(p); err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	//close the write end of any opened pipes now that they are dup'ed into the container
	for _, writer := range writers {
		writer.Close()
	}
	// 'oom' is used to emit 'oom' events to the eventstream, 'oomKilled' is used
	// to set the 'OOMKilled' flag in state
	oom := notifyOnOOM(cont)
	oomKilled := notifyOnOOM(cont)
	if hooks.Start != nil {
		pid, err := p.Pid()
		if err != nil {
			p.Signal(os.Kill)
			p.Wait()
			return execdriver.ExitStatus{ExitCode: -1}, err
		}
		hooks.Start(&c.ProcessConfig, pid, oom)
	}

	waitF := p.Wait
	if nss := cont.Config().Namespaces; !nss.Contains(configs.NEWPID) {
		// we need such hack for tracking processes with inherited fds,
		// because cmd.Wait() waiting for all streams to be copied
		waitF = waitInPIDHost(p, cont)
	}
	ps, err := waitF()
	if err != nil {
		execErr, ok := err.(*exec.ExitError)
		if !ok {
			return execdriver.ExitStatus{ExitCode: -1}, err
		}
		ps = execErr.ProcessState
	}
	// wait for all IO goroutine copiers to finish
	wg.Wait()

	cont.Destroy()
	destroyed = true
	// oomKilled will have an oom event if any process within the container was
	// OOM killed at any time, not only if the init process OOMed.
	//
	// Perhaps we only want the OOMKilled flag to be set if the OOM
	// resulted in a container death, but there isn't a good way to do this
	// because the kernel's cgroup oom notification does not provide information
	// such as the PID. This could be heuristically done by checking that the OOM
	// happened within some very small time slice for the container dying (and
	// optionally exit-code 137), but I don't think the cgroup oom notification
	// can be used to reliably determine this
	//
	// Even if there were multiple OOMs, it's sufficient to read one value
	// because libcontainer's oom notify will discard the channel after the
	// cgroup is destroyed
	_, oomKill := <-oomKilled
	return execdriver.ExitStatus{ExitCode: utils.ExitStatus(ps.Sys().(syscall.WaitStatus)), OOMKilled: oomKill}, nil
}
Esempio n. 8
0
// Exec implements the exec driver Driver interface,
// it calls libcontainer APIs to execute a container.
func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) {
	active := d.activeContainers[c.ID]
	if active == nil {
		return -1, fmt.Errorf("No active container exists with ID %s", c.ID)
	}

	user := processConfig.User
	if c.RemappedRoot.UID != 0 && user == "" {
		//if user namespaces are enabled, set user explicitly so uid/gid is set to 0
		//otherwise we end up with the overflow id and no permissions (65534)
		user = "******"
	}

	p := &libcontainer.Process{
		Args: append([]string{processConfig.Entrypoint}, processConfig.Arguments...),
		Env:  c.ProcessConfig.Env,
		Cwd:  c.WorkingDir,
		User: user,
	}

	if processConfig.Privileged {
		p.Capabilities = execdriver.GetAllCapabilities()
	}
	// add CAP_ prefix to all caps for new libcontainer update to match
	// the spec format.
	for i, s := range p.Capabilities {
		if !strings.HasPrefix(s, "CAP_") {
			p.Capabilities[i] = fmt.Sprintf("CAP_%s", s)
		}
	}

	config := active.Config()
	wg := sync.WaitGroup{}
	writers, err := setupPipes(&config, processConfig, p, pipes, &wg)
	if err != nil {
		return -1, err
	}

	if err := active.Start(p); err != nil {
		return -1, err
	}
	//close the write end of any opened pipes now that they are dup'ed into the container
	for _, writer := range writers {
		writer.Close()
	}

	if hooks.Start != nil {
		pid, err := p.Pid()
		if err != nil {
			p.Signal(os.Kill)
			p.Wait()
			return -1, err
		}

		// A closed channel for OOM is returned here as it will be
		// non-blocking and return the correct result when read.
		chOOM := make(chan struct{})
		close(chOOM)
		hooks.Start(&c.ProcessConfig, pid, chOOM)
	}

	ps, err := p.Wait()
	if err != nil {
		exitErr, ok := err.(*exec.ExitError)
		if !ok {
			return -1, err
		}
		ps = exitErr.ProcessState
	}
	// wait for all IO goroutine copiers to finish
	wg.Wait()
	return utils.ExitStatus(ps.Sys().(syscall.WaitStatus)), nil
}