示例#1
0
func oomAction(context *cli.Context) {
	state, err := libcontainer.GetState(dataPath)
	if err != nil {
		log.Fatal(err)
	}
	n, err := libcontainer.NotifyOnOOM(state)
	if err != nil {
		log.Fatal(err)
	}
	for _ = range n {
		log.Printf("OOM notification received")
	}
}
示例#2
0
文件: driver.go 项目: hantuo/docker
func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) {
	// take the Command and populate the libcontainer.Config from it
	container, err := d.createContainer(c)
	if err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	var term execdriver.Terminal

	if c.ProcessConfig.Tty {
		term, err = NewTtyConsole(&c.ProcessConfig, pipes)
	} else {
		term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes)
	}
	if err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}
	c.ProcessConfig.Terminal = term

	d.Lock()
	d.activeContainers[c.ID] = &activeContainer{
		container: container,
		cmd:       &c.ProcessConfig.Cmd,
	}
	d.Unlock()

	var (
		dataPath = filepath.Join(d.root, c.ID)
		args     = append([]string{c.ProcessConfig.Entrypoint}, c.ProcessConfig.Arguments...)
	)

	if err := d.createContainerRoot(c.ID); err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}
	defer d.cleanContainer(c.ID)

	if err := d.writeContainerFile(container, c.ID); err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	execOutputChan := make(chan execOutput, 1)
	waitForStart := make(chan struct{})

	go func() {
		exitCode, err := namespaces.Exec(container, c.ProcessConfig.Stdin, c.ProcessConfig.Stdout, c.ProcessConfig.Stderr, c.ProcessConfig.Console, dataPath, args, func(container *libcontainer.Config, console, dataPath, init string, child *os.File, args []string) *exec.Cmd {
			c.ProcessConfig.Path = d.initPath
			c.ProcessConfig.Args = append([]string{
				DriverName,
				"-console", console,
				"-pipe", "3",
				"-root", filepath.Join(d.root, c.ID),
				"--",
			}, args...)

			// set this to nil so that when we set the clone flags anything else is reset
			c.ProcessConfig.SysProcAttr = &syscall.SysProcAttr{
				Cloneflags: uintptr(namespaces.GetNamespaceFlags(container.Namespaces)),
			}
			c.ProcessConfig.ExtraFiles = []*os.File{child}

			c.ProcessConfig.Env = container.Env
			c.ProcessConfig.Dir = container.RootFs

			return &c.ProcessConfig.Cmd
		}, func() {
			close(waitForStart)
			if startCallback != nil {
				c.ContainerPid = c.ProcessConfig.Process.Pid
				startCallback(&c.ProcessConfig, c.ContainerPid)
			}
		})
		execOutputChan <- execOutput{exitCode, err}
	}()

	select {
	case execOutput := <-execOutputChan:
		return execdriver.ExitStatus{ExitCode: execOutput.exitCode}, execOutput.err
	case <-waitForStart:
		break
	}

	oomKill := false
	state, err := libcontainer.GetState(filepath.Join(d.root, c.ID))
	if err == nil {
		oomKillNotification, err := libcontainer.NotifyOnOOM(state)
		if err == nil {
			_, oomKill = <-oomKillNotification
		} else {
			log.Warnf("WARNING: Your kernel does not support OOM notifications: %s", err)
		}
	} else {
		log.Warnf("Failed to get container state, oom notify will not work: %s", err)
	}
	// wait for the container to exit.
	execOutput := <-execOutputChan

	return execdriver.ExitStatus{ExitCode: execOutput.exitCode, OOMKilled: oomKill}, execOutput.err
}
示例#3
0
文件: driver.go 项目: viirya/docker
func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) {
	var (
		term     execdriver.Terminal
		err      error
		dataPath = d.containerDir(c.ID)
	)

	if c.ProcessConfig.Tty {
		term, err = NewTtyConsole(&c.ProcessConfig, pipes)
	} else {
		term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes)
	}
	c.ProcessConfig.Terminal = term
	container, err := d.createContainer(c)
	if err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}
	d.Lock()
	d.activeContainers[c.ID] = &activeContainer{
		container: container,
		cmd:       &c.ProcessConfig.Cmd,
	}
	d.Unlock()

	c.Mounts = append(c.Mounts, execdriver.Mount{
		Source:      d.initPath,
		Destination: c.InitPath,
		Writable:    false,
		Private:     true,
	})

	if err := d.generateEnvConfig(c); err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}
	configPath, err := d.generateLXCConfig(c)
	if err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}
	params := []string{
		"lxc-start",
		"-n", c.ID,
		"-f", configPath,
	}
	if c.Network.ContainerID != "" {
		params = append(params,
			"--share-net", c.Network.ContainerID,
		)
	}
	if c.Ipc != nil {
		if c.Ipc.ContainerID != "" {
			params = append(params,
				"--share-ipc", c.Ipc.ContainerID,
			)
		} else if c.Ipc.HostIpc {
			params = append(params,
				"--share-ipc", "1",
			)
		}
	}

	params = append(params,
		"--",
		c.InitPath,
	)
	if c.Network.Interface != nil {
		params = append(params,
			"-g", c.Network.Interface.Gateway,
			"-i", fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen),
		)
	}
	params = append(params,
		"-mtu", strconv.Itoa(c.Network.Mtu),
	)

	if c.ProcessConfig.User != "" {
		params = append(params, "-u", c.ProcessConfig.User)
	}

	if c.ProcessConfig.Privileged {
		if d.apparmor {
			params[0] = path.Join(d.root, "lxc-start-unconfined")

		}
		params = append(params, "-privileged")
	}

	if c.WorkingDir != "" {
		params = append(params, "-w", c.WorkingDir)
	}

	params = append(params, "--", c.ProcessConfig.Entrypoint)
	params = append(params, c.ProcessConfig.Arguments...)

	if d.sharedRoot {
		// lxc-start really needs / to be non-shared, or all kinds of stuff break
		// when lxc-start unmount things and those unmounts propagate to the main
		// mount namespace.
		// What we really want is to clone into a new namespace and then
		// mount / MS_REC|MS_SLAVE, but since we can't really clone or fork
		// without exec in go we have to do this horrible shell hack...
		shellString :=
			"mount --make-rslave /; exec " +
				utils.ShellQuoteArguments(params)

		params = []string{
			"unshare", "-m", "--", "/bin/sh", "-c", shellString,
		}
	}
	log.Debugf("lxc params %s", params)
	var (
		name = params[0]
		arg  = params[1:]
	)
	aname, err := exec.LookPath(name)
	if err != nil {
		aname = name
	}
	c.ProcessConfig.Path = aname
	c.ProcessConfig.Args = append([]string{name}, arg...)

	if err := nodes.CreateDeviceNodes(c.Rootfs, c.AutoCreatedDevices); err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	if err := c.ProcessConfig.Start(); err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	var (
		waitErr  error
		waitLock = make(chan struct{})
	)

	go func() {
		if err := c.ProcessConfig.Wait(); err != nil {
			if _, ok := err.(*exec.ExitError); !ok { // Do not propagate the error if it's simply a status code != 0
				waitErr = err
			}
		}
		close(waitLock)
	}()

	terminate := func(terr error) (execdriver.ExitStatus, error) {
		if c.ProcessConfig.Process != nil {
			c.ProcessConfig.Process.Kill()
			c.ProcessConfig.Wait()
		}
		return execdriver.ExitStatus{ExitCode: -1}, terr
	}
	// Poll lxc for RUNNING status
	pid, err := d.waitForStart(c, waitLock)
	if err != nil {
		return terminate(err)
	}

	cgroupPaths, err := cgroupPaths(c.ID)
	if err != nil {
		return terminate(err)
	}

	state := &libcontainer.State{
		InitPid:     pid,
		CgroupPaths: cgroupPaths,
	}

	if err := libcontainer.SaveState(dataPath, state); err != nil {
		return terminate(err)
	}

	c.ContainerPid = pid

	if startCallback != nil {
		log.Debugf("Invoking startCallback")
		startCallback(&c.ProcessConfig, pid)
	}
	oomKill := false
	oomKillNotification, err := libcontainer.NotifyOnOOM(state)
	if err == nil {
		_, oomKill = <-oomKillNotification
		log.Debugf("oomKill error %s waitErr %s", oomKill, waitErr)

	} else {
		log.Warnf("WARNING: Your kernel does not support OOM notifications: %s", err)
	}

	<-waitLock

	// check oom error
	exitCode := getExitCode(c)
	if oomKill {
		exitCode = 137
	}
	return execdriver.ExitStatus{ExitCode: exitCode, OOMKilled: oomKill}, waitErr
}