コード例 #1
0
func TestCustomLxcConfigMisc(t *testing.T) {
	root, err := ioutil.TempDir("", "TestCustomLxcConfig")
	if err != nil {
		t.Fatal(err)
	}
	defer os.RemoveAll(root)
	os.MkdirAll(path.Join(root, "containers", "1"), 0777)
	driver, err := NewDriver(root, "", false)
	if err != nil {
		t.Fatal(err)
	}
	processConfig := execdriver.ProcessConfig{
		Privileged: false,
	}

	processConfig.Env = []string{"HOSTNAME=testhost"}
	command := &execdriver.Command{
		ID: "1",
		LxcConfig: []string{
			"lxc.cgroup.cpuset.cpus = 0,1",
		},
		Network: &execdriver.Network{
			Mtu: 1500,
			Interface: &execdriver.NetworkInterface{
				Gateway:     "10.10.10.1",
				IPAddress:   "10.10.10.10",
				IPPrefixLen: 24,
				Bridge:      "docker0",
			},
		},
		ProcessConfig: processConfig,
		CapAdd:        []string{"net_admin", "syslog"},
		CapDrop:       []string{"kill", "mknod"},
	}

	p, err := driver.generateLXCConfig(command)
	if err != nil {
		t.Fatal(err)
	}
	// network
	grepFile(t, p, "lxc.network.type = veth")
	grepFile(t, p, "lxc.network.link = docker0")
	grepFile(t, p, "lxc.network.name = eth0")
	grepFile(t, p, "lxc.network.ipv4 = 10.10.10.10/24")
	grepFile(t, p, "lxc.network.ipv4.gateway = 10.10.10.1")
	grepFile(t, p, "lxc.network.flags = up")

	// hostname
	grepFile(t, p, "lxc.utsname = testhost")
	grepFile(t, p, "lxc.cgroup.cpuset.cpus = 0,1")
	container := nativeTemplate.New()
	for _, cap := range container.Capabilities {
		cap = strings.ToLower(cap)
		if cap != "mknod" && cap != "kill" {
			grepFile(t, p, fmt.Sprintf("lxc.cap.keep = %s", cap))
		}
	}
	grepFileWithReverse(t, p, fmt.Sprintf("lxc.cap.keep = kill"), true)
	grepFileWithReverse(t, p, fmt.Sprintf("lxc.cap.keep = mknod"), true)
}
コード例 #2
0
func populateCommand(c *Container, env []string) error {
	en := &execdriver.Network{
		Mtu:       c.daemon.config.Mtu,
		Interface: nil,
	}

	parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2)
	switch parts[0] {

	case "none":
	case "default", "": // empty string to support existing containers
		if !c.Config.NetworkDisabled {
			network := c.NetworkSettings
			en.Interface = &execdriver.NetworkInterface{
				MacAddress: network.MacAddress,
			}
		}
	default:
		return fmt.Errorf("invalid network mode: %s", c.hostConfig.NetworkMode)
	}

	pid := &execdriver.Pid{}

	// TODO Windows. This can probably be factored out.
	pid.HostPid = c.hostConfig.PidMode.IsHost()

	// TODO Windows. Resource controls to be implemented later.
	resources := &execdriver.Resources{}

	// TODO Windows. Further refactoring required (privileged/user)
	processConfig := execdriver.ProcessConfig{
		Privileged: c.hostConfig.Privileged,
		Entrypoint: c.Path,
		Arguments:  c.Args,
		Tty:        c.Config.Tty,
		User:       c.Config.User,
	}

	processConfig.Env = env

	// TODO Windows: Factor out remainder of unused fields.
	c.command = &execdriver.Command{
		ID:             c.ID,
		Rootfs:         c.RootfsPath(),
		ReadonlyRootfs: c.hostConfig.ReadonlyRootfs,
		InitPath:       "/.dockerinit",
		WorkingDir:     c.Config.WorkingDir,
		Network:        en,
		Pid:            pid,
		Resources:      resources,
		CapAdd:         c.hostConfig.CapAdd.Slice(),
		CapDrop:        c.hostConfig.CapDrop.Slice(),
		ProcessConfig:  processConfig,
		ProcessLabel:   c.GetProcessLabel(),
		MountLabel:     c.GetMountLabel(),
	}

	return nil
}
コード例 #3
0
ファイル: exec_unix.go プロジェクト: DaveDaCoda/docker
// setPlatformSpecificExecProcessConfig sets platform-specific fields in the
// ProcessConfig structure.
func setPlatformSpecificExecProcessConfig(config *types.ExecConfig, container *container.Container, pc *execdriver.ProcessConfig) {
	user := config.User
	if len(user) == 0 {
		user = container.Config.User
	}

	pc.User = user
	pc.Privileged = config.Privileged
}
コード例 #4
0
func TestCustomLxcConfigMisc(t *testing.T) {
	root, err := ioutil.TempDir("", "TestCustomLxcConfig")
	if err != nil {
		t.Fatal(err)
	}
	defer os.RemoveAll(root)
	os.MkdirAll(path.Join(root, "containers", "1"), 0777)
	driver, err := NewDriver(root, root, "", true)

	if err != nil {
		t.Fatal(err)
	}
	processConfig := execdriver.ProcessConfig{
		Privileged: false,
	}

	processConfig.Env = []string{"HOSTNAME=testhost"}
	command := &execdriver.Command{
		CommonCommand: execdriver.CommonCommand{
			ID: "1",
			Network: &execdriver.Network{
				Mtu: 1500,
			},
			ProcessConfig: processConfig,
		},
		LxcConfig: []string{
			"lxc.cgroup.cpuset.cpus = 0,1",
		},
		CapAdd:          []string{"net_admin", "syslog"},
		CapDrop:         []string{"kill", "mknod"},
		AppArmorProfile: "lxc-container-default-with-nesting",
	}

	p, err := driver.generateLXCConfig(command)
	if err != nil {
		t.Fatal(err)
	}
	grepFile(t, p, "lxc.aa_profile = lxc-container-default-with-nesting")
	// hostname
	grepFile(t, p, "lxc.utsname = testhost")
	grepFile(t, p, "lxc.cgroup.cpuset.cpus = 0,1")
	container := nativeTemplate.New()
	for _, cap := range container.Capabilities {
		realCap := execdriver.GetCapability(cap)
		numCap := fmt.Sprintf("%d", realCap.Value)
		if cap != "MKNOD" && cap != "KILL" {
			grepFile(t, p, fmt.Sprintf("lxc.cap.keep = %s", numCap))
		}
	}

	grepFileWithReverse(t, p, fmt.Sprintf("lxc.cap.keep = %d", capability.CAP_KILL), true)
	grepFileWithReverse(t, p, fmt.Sprintf("lxc.cap.keep = %d", capability.CAP_MKNOD), true)
}
コード例 #5
0
func TestCustomLxcConfigMiscOverride(t *testing.T) {
	root, err := ioutil.TempDir("", "TestCustomLxcConfig")
	if err != nil {
		t.Fatal(err)
	}
	defer os.RemoveAll(root)
	os.MkdirAll(path.Join(root, "containers", "1"), 0777)
	driver, err := NewDriver(root, root, "", false)
	if err != nil {
		t.Fatal(err)
	}
	processConfig := execdriver.ProcessConfig{
		Privileged: false,
	}

	processConfig.Env = []string{"HOSTNAME=testhost"}
	command := &execdriver.Command{
		ID: "1",
		LxcConfig: []string{
			"lxc.cgroup.cpuset.cpus = 0,1",
			"lxc.network.ipv4 = 172.0.0.1",
		},
		Network: &execdriver.Network{
			Mtu:       1500,
			Interface: nil,
		},
		ProcessConfig: processConfig,
		CapAdd:        []string{"NET_ADMIN", "SYSLOG"},
		CapDrop:       []string{"KILL", "MKNOD"},
	}

	p, err := driver.generateLXCConfig(command)
	if err != nil {
		t.Fatal(err)
	}

	// hostname
	grepFile(t, p, "lxc.utsname = testhost")
	grepFile(t, p, "lxc.cgroup.cpuset.cpus = 0,1")
	container := nativeTemplate.New()
	for _, cap := range container.Capabilities {
		realCap := execdriver.GetCapability(cap)
		numCap := fmt.Sprintf("%d", realCap.Value)
		if cap != "MKNOD" && cap != "KILL" {
			grepFile(t, p, fmt.Sprintf("lxc.cap.keep = %s", numCap))
		}
	}
	grepFileWithReverse(t, p, fmt.Sprintf("lxc.cap.keep = %d", capability.CAP_KILL), true)
	grepFileWithReverse(t, p, fmt.Sprintf("lxc.cap.keep = %d", capability.CAP_MKNOD), true)
}
コード例 #6
0
ファイル: exec.go プロジェクト: baoruxing/docker
// TODO(vishh): Add support for running in priviledged mode and running as a different user.
func (d *driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
	active := d.activeContainers[c.ID]
	if active == nil {
		return -1, fmt.Errorf("No active container exists with ID %s", c.ID)
	}
	state, err := libcontainer.GetState(filepath.Join(d.root, c.ID))
	if err != nil {
		return -1, fmt.Errorf("State unavailable for container with ID %s. The container may have been cleaned up already. Error: %s", c.ID, err)
	}

	var term execdriver.Terminal

	if processConfig.Tty {
		term, err = NewTtyConsole(processConfig, pipes)
	} else {
		term, err = execdriver.NewStdConsole(processConfig, pipes)
	}

	processConfig.Terminal = term

	args := append([]string{processConfig.Entrypoint}, processConfig.Arguments...)

	return namespaces.ExecIn(active.container, state, args, os.Args[0], "exec", processConfig.Stdin, processConfig.Stdout, processConfig.Stderr, processConfig.Console,
		func(cmd *exec.Cmd) {
			if startCallback != nil {
				startCallback(&c.ProcessConfig, cmd.Process.Pid)
			}
		})
}
コード例 #7
0
ファイル: driver.go プロジェクト: TencentSA/docker-1.3
func NewTtyConsole(processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes) (*TtyConsole, error) {
	// lxc is special in that we cannot create the master outside of the container without
	// opening the slave because we have nothing to provide to the cmd.  We have to open both then do
	// the crazy setup on command right now instead of passing the console path to lxc and telling it
	// to open up that console.  we save a couple of openfiles in the native driver because we can do
	// this.
	ptyMaster, ptySlave, err := pty.Open()
	if err != nil {
		return nil, err
	}

	tty := &TtyConsole{
		MasterPty: ptyMaster,
		SlavePty:  ptySlave,
	}

	if err := tty.AttachPipes(&processConfig.Cmd, pipes); err != nil {
		tty.Close()
		return nil, err
	}

	processConfig.Console = tty.SlavePty.Name()

	return tty, nil
}
コード例 #8
0
ファイル: driver.go プロジェクト: Gandi/docker
func NewTtyConsole(processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes) (*TtyConsole, error) {
	ptyMaster, console, err := consolepkg.CreateMasterAndConsole()
	if err != nil {
		return nil, err
	}

	tty := &TtyConsole{
		MasterPty: ptyMaster,
	}

	if err := tty.AttachPipes(&processConfig.Cmd, pipes); err != nil {
		tty.Close()
		return nil, err
	}

	processConfig.Console = console

	return tty, nil
}
コード例 #9
0
ファイル: driver.go プロジェクト: colebrumley/docker
func setupPipes(container *configs.Config, processConfig *execdriver.ProcessConfig, p *libcontainer.Process, pipes *execdriver.Pipes) error {
	var term execdriver.Terminal
	var err error

	if processConfig.Tty {
		rootuid, err := container.HostUID()
		if err != nil {
			return err
		}
		cons, err := p.NewConsole(rootuid)
		if err != nil {
			return err
		}
		term, err = NewTtyConsole(cons, pipes, rootuid)
	} else {
		p.Stdout = pipes.Stdout
		p.Stderr = pipes.Stderr
		r, w, err := os.Pipe()
		if err != nil {
			return err
		}
		if pipes.Stdin != nil {
			go func() {
				io.Copy(w, pipes.Stdin)
				w.Close()
			}()
			p.Stdin = r
		}
		term = &execdriver.StdConsole{}
	}
	if err != nil {
		return err
	}
	processConfig.Terminal = term
	return nil
}
コード例 #10
0
func (daemon *Daemon) populateCommand(c *container.Container, env []string) error {
	en := &execdriver.Network{
		Interface: nil,
	}

	var epList []string

	// Connect all the libnetwork allocated networks to the container
	if c.NetworkSettings != nil {
		for n := range c.NetworkSettings.Networks {
			sn, err := daemon.FindNetwork(n)
			if err != nil {
				continue
			}

			ep, err := c.GetEndpointInNetwork(sn)
			if err != nil {
				continue
			}

			data, err := ep.DriverInfo()
			if err != nil {
				continue
			}
			if data["hnsid"] != nil {
				epList = append(epList, data["hnsid"].(string))
			}
		}
	}

	if daemon.netController == nil {
		parts := strings.SplitN(string(c.HostConfig.NetworkMode), ":", 2)
		switch parts[0] {
		case "none":
		case "default", "": // empty string to support existing containers
			if !c.Config.NetworkDisabled {
				en.Interface = &execdriver.NetworkInterface{
					MacAddress:   c.Config.MacAddress,
					Bridge:       daemon.configStore.bridgeConfig.Iface,
					PortBindings: c.HostConfig.PortBindings,

					// TODO Windows. Include IPAddress. There already is a
					// property IPAddress on execDrive.CommonNetworkInterface,
					// but there is no CLI option in docker to pass through
					// an IPAddress on docker run.
				}
			}
		default:
			return fmt.Errorf("invalid network mode: %s", c.HostConfig.NetworkMode)
		}
	}

	// TODO Windows. More resource controls to be implemented later.
	resources := &execdriver.Resources{
		CommonResources: execdriver.CommonResources{
			CPUShares: c.HostConfig.CPUShares,
		},
	}

	processConfig := execdriver.ProcessConfig{
		CommonProcessConfig: execdriver.CommonProcessConfig{
			Entrypoint: c.Path,
			Arguments:  c.Args,
			Tty:        c.Config.Tty,
		},
		ConsoleSize: c.HostConfig.ConsoleSize,
	}

	processConfig.Env = env

	var layerPaths []string
	img, err := daemon.imageStore.Get(c.ImageID)
	if err != nil {
		return fmt.Errorf("Failed to graph.Get on ImageID %s - %s", c.ImageID, err)
	}

	if img.RootFS != nil && img.RootFS.Type == "layers+base" {
		max := len(img.RootFS.DiffIDs)
		for i := 0; i <= max; i++ {
			img.RootFS.DiffIDs = img.RootFS.DiffIDs[:i]
			path, err := layer.GetLayerPath(daemon.layerStore, img.RootFS.ChainID())
			if err != nil {
				return fmt.Errorf("Failed to get layer path from graphdriver %s for ImageID %s - %s", daemon.layerStore, img.RootFS.ChainID(), err)
			}
			// Reverse order, expecting parent most first
			layerPaths = append([]string{path}, layerPaths...)
		}
	}

	m, err := c.RWLayer.Metadata()
	if err != nil {
		return fmt.Errorf("Failed to get layer metadata - %s", err)
	}
	layerFolder := m["dir"]

	var hvPartition bool
	// Work out the isolation (whether it is a hypervisor partition)
	if c.HostConfig.Isolation.IsDefault() {
		// Not specified by caller. Take daemon default
		hvPartition = windows.DefaultIsolation.IsHyperV()
	} else {
		// Take value specified by caller
		hvPartition = c.HostConfig.Isolation.IsHyperV()
	}

	c.Command = &execdriver.Command{
		CommonCommand: execdriver.CommonCommand{
			ID:            c.ID,
			Rootfs:        c.BaseFS,
			WorkingDir:    c.Config.WorkingDir,
			Network:       en,
			MountLabel:    c.GetMountLabel(),
			Resources:     resources,
			ProcessConfig: processConfig,
			ProcessLabel:  c.GetProcessLabel(),
		},
		FirstStart:  !c.HasBeenStartedBefore,
		LayerFolder: layerFolder,
		LayerPaths:  layerPaths,
		Hostname:    c.Config.Hostname,
		Isolation:   string(c.HostConfig.Isolation),
		ArgsEscaped: c.Config.ArgsEscaped,
		HvPartition: hvPartition,
		EpList:      epList,
	}

	return nil
}
コード例 #11
0
ファイル: exec.go プロジェクト: supasate/docker
// Exec implements the exec driver Driver interface.
func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) {

	var (
		term     execdriver.Terminal
		err      error
		exitCode int32
		errno    uint32
	)

	active := d.activeContainers[c.ID]
	if active == nil {
		return -1, fmt.Errorf("Exec - No active container exists with ID %s", c.ID)
	}

	createProcessParms := hcsshim.CreateProcessParams{
		EmulateConsole:   processConfig.Tty, // Note NOT c.ProcessConfig.Tty
		WorkingDirectory: c.WorkingDir,
	}

	// Configure the environment for the process // Note NOT c.ProcessConfig.Env
	createProcessParms.Environment = setupEnvironmentVariables(processConfig.Env)

	// Create the commandline for the process // Note NOT c.ProcessConfig
	createProcessParms.CommandLine, err = createCommandLine(processConfig, false)

	if err != nil {
		return -1, err
	}

	// Start the command running in the container.
	pid, stdin, stdout, stderr, rc, err := hcsshim.CreateProcessInComputeSystem(c.ID, pipes.Stdin != nil, true, !processConfig.Tty, createProcessParms)
	if err != nil {
		// TODO Windows: TP4 Workaround. In Hyper-V containers, there is a limitation
		// of one exec per container. This should be fixed post TP4. CreateProcessInComputeSystem
		// will return a specific error which we handle here to give a good error message
		// back to the user instead of an inactionable "An invalid argument was supplied"
		if rc == hcsshim.Win32InvalidArgument {
			return -1, fmt.Errorf("The limit of docker execs per Hyper-V container has been exceeded")
		}
		logrus.Errorf("CreateProcessInComputeSystem() failed %s", err)
		return -1, err
	}

	// Now that the process has been launched, begin copying data to and from
	// the named pipes for the std handles.
	setupPipes(stdin, stdout, stderr, pipes)

	// Note NOT c.ProcessConfig.Tty
	if processConfig.Tty {
		term = NewTtyConsole(c.ID, pid)
	} else {
		term = NewStdConsole()
	}
	processConfig.Terminal = term

	// Invoke the start callback
	if hooks.Start != nil {
		// A closed channel for OOM is returned here as it will be
		// non-blocking and return the correct result when read.
		chOOM := make(chan struct{})
		close(chOOM)
		hooks.Start(&c.ProcessConfig, int(pid), chOOM)
	}

	if exitCode, errno, err = hcsshim.WaitForProcessInComputeSystem(c.ID, pid, hcsshim.TimeoutInfinite); err != nil {
		if errno == hcsshim.Win32PipeHasBeenEnded {
			logrus.Debugf("Exiting Run() after WaitForProcessInComputeSystem failed with recognised error 0x%X", errno)
			return hcsshim.WaitErrExecFailed, nil
		}
		logrus.Warnf("WaitForProcessInComputeSystem failed (container may have been killed): 0x%X %s", errno, err)
		return -1, err
	}

	logrus.Debugln("Exiting Run()", c.ID)
	return int(exitCode), nil
}
コード例 #12
0
ファイル: container_windows.go プロジェクト: KedaChe/docker
func populateCommand(ctx context.Context, c *Container, env []string) error {
	en := &execdriver.Network{
		Interface: nil,
	}

	parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2)
	switch parts[0] {
	case "none":
	case "default", "": // empty string to support existing containers
		if !c.Config.NetworkDisabled {
			en.Interface = &execdriver.NetworkInterface{
				MacAddress:   c.Config.MacAddress,
				Bridge:       c.daemon.configStore.Bridge.VirtualSwitchName,
				PortBindings: c.hostConfig.PortBindings,

				// TODO Windows. Include IPAddress. There already is a
				// property IPAddress on execDrive.CommonNetworkInterface,
				// but there is no CLI option in docker to pass through
				// an IPAddress on docker run.
			}
		}
	default:
		return derr.ErrorCodeInvalidNetworkMode.WithArgs(c.hostConfig.NetworkMode)
	}

	pid := &execdriver.Pid{}

	// TODO Windows. This can probably be factored out.
	pid.HostPid = c.hostConfig.PidMode.IsHost()

	// TODO Windows. More resource controls to be implemented later.
	resources := &execdriver.Resources{
		CPUShares: c.hostConfig.CPUShares,
	}

	// TODO Windows. Further refactoring required (privileged/user)
	processConfig := execdriver.ProcessConfig{
		Privileged:  c.hostConfig.Privileged,
		Entrypoint:  c.Path,
		Arguments:   c.Args,
		Tty:         c.Config.Tty,
		User:        c.Config.User,
		ConsoleSize: c.hostConfig.ConsoleSize,
	}

	processConfig.Env = env

	var layerPaths []string
	img, err := c.daemon.graph.Get(c.ImageID)
	if err != nil {
		return derr.ErrorCodeGetGraph.WithArgs(c.ImageID, err)
	}
	for i := img; i != nil && err == nil; i, err = c.daemon.graph.GetParent(i) {
		lp, err := c.daemon.driver.Get(i.ID, "")
		if err != nil {
			return derr.ErrorCodeGetLayer.WithArgs(c.daemon.driver.String(), i.ID, err)
		}
		layerPaths = append(layerPaths, lp)
		err = c.daemon.driver.Put(i.ID)
		if err != nil {
			return derr.ErrorCodePutLayer.WithArgs(c.daemon.driver.String(), i.ID, err)
		}
	}
	m, err := c.daemon.driver.GetMetadata(c.ID)
	if err != nil {
		return derr.ErrorCodeGetLayerMetadata.WithArgs(err)
	}
	layerFolder := m["dir"]

	// TODO Windows: Factor out remainder of unused fields.
	c.command = &execdriver.Command{
		ID:             c.ID,
		Rootfs:         c.rootfsPath(),
		ReadonlyRootfs: c.hostConfig.ReadonlyRootfs,
		InitPath:       "/.dockerinit",
		WorkingDir:     c.Config.WorkingDir,
		Network:        en,
		Pid:            pid,
		Resources:      resources,
		CapAdd:         c.hostConfig.CapAdd.Slice(),
		CapDrop:        c.hostConfig.CapDrop.Slice(),
		ProcessConfig:  processConfig,
		ProcessLabel:   c.getProcessLabel(),
		MountLabel:     c.getMountLabel(),
		FirstStart:     !c.HasBeenStartedBefore,
		LayerFolder:    layerFolder,
		LayerPaths:     layerPaths,
	}

	return nil
}
コード例 #13
0
ファイル: exec.go プロジェクト: ch3lo/docker
// Exec implements the exec driver Driver interface.
func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {

	var (
		term     execdriver.Terminal
		err      error
		exitCode int32
	)

	active := d.activeContainers[c.ID]
	if active == nil {
		return -1, fmt.Errorf("Exec - No active container exists with ID %s", c.ID)
	}

	createProcessParms := hcsshim.CreateProcessParams{
		EmulateConsole:   processConfig.Tty, // Note NOT c.ProcessConfig.Tty
		WorkingDirectory: c.WorkingDir,
	}

	// Configure the environment for the process // Note NOT c.ProcessConfig.Tty
	createProcessParms.Environment = setupEnvironmentVariables(processConfig.Env)

	// While this should get caught earlier, just in case, validate that we
	// have something to run.
	if processConfig.Entrypoint == "" {
		err = errors.New("No entrypoint specified")
		logrus.Error(err)
		return -1, err
	}

	// Build the command line of the process
	createProcessParms.CommandLine = processConfig.Entrypoint
	for _, arg := range processConfig.Arguments {
		logrus.Debugln("appending ", arg)
		createProcessParms.CommandLine += " " + arg
	}
	logrus.Debugln("commandLine: ", createProcessParms.CommandLine)

	// Start the command running in the container.
	pid, stdin, stdout, stderr, err := hcsshim.CreateProcessInComputeSystem(c.ID, pipes.Stdin != nil, true, !processConfig.Tty, createProcessParms)
	if err != nil {
		logrus.Errorf("CreateProcessInComputeSystem() failed %s", err)
		return -1, err
	}

	// Now that the process has been launched, begin copying data to and from
	// the named pipes for the std handles.
	setupPipes(stdin, stdout, stderr, pipes)

	// Note NOT c.ProcessConfig.Tty
	if processConfig.Tty {
		term = NewTtyConsole(c.ID, pid)
	} else {
		term = NewStdConsole()
	}
	processConfig.Terminal = term

	// Invoke the start callback
	if startCallback != nil {
		startCallback(&c.ProcessConfig, int(pid))
	}

	if exitCode, err = hcsshim.WaitForProcessInComputeSystem(c.ID, pid); err != nil {
		logrus.Errorf("Failed to WaitForProcessInComputeSystem %s", err)
		return -1, err
	}

	// TODO Windows - Do something with this exit code
	logrus.Debugln("Exiting Run() with ExitCode 0", c.ID)
	return int(exitCode), nil
}
コード例 #14
0
ファイル: exec.go プロジェクト: yingmsky/docker
func (d *driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
	active := d.activeContainers[c.ID]
	if active == nil {
		return -1, fmt.Errorf("No active container exists with ID %s", c.ID)
	}

	var term execdriver.Terminal
	var err error

	p := &libcontainer.Process{
		Args: append([]string{processConfig.Entrypoint}, processConfig.Arguments...),
		Env:  c.ProcessConfig.Env,
		Cwd:  c.WorkingDir,
		User: processConfig.User,
	}

	if processConfig.Privileged {
		p.Capabilities = execdriver.GetAllCapabilities()
	}

	if processConfig.Tty {
		config := active.Config()
		rootuid, err := config.HostUID()
		if err != nil {
			return -1, err
		}
		cons, err := p.NewConsole(rootuid)
		if err != nil {
			return -1, err
		}
		term, err = NewTtyConsole(cons, pipes, rootuid)
	} else {
		p.Stdout = pipes.Stdout
		p.Stderr = pipes.Stderr
		p.Stdin = pipes.Stdin
		term = &execdriver.StdConsole{}
	}
	if err != nil {
		return -1, err
	}

	processConfig.Terminal = term

	if err := active.Start(p); err != nil {
		return -1, err
	}

	if startCallback != nil {
		pid, err := p.Pid()
		if err != nil {
			p.Signal(os.Kill)
			p.Wait()
			return -1, err
		}
		startCallback(&c.ProcessConfig, pid)
	}

	ps, err := p.Wait()
	if err != nil {
		exitErr, ok := err.(*exec.ExitError)
		if !ok {
			return -1, err
		}
		ps = exitErr.ProcessState
	}
	return utils.ExitStatus(ps.Sys().(syscall.WaitStatus)), nil
}
コード例 #15
0
ファイル: container.go プロジェクト: jorik041/docker
func populateCommand(c *Container, env []string) error {
	en := &execdriver.Network{
		Mtu:       c.daemon.config.Mtu,
		Interface: nil,
	}

	parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2)
	switch parts[0] {
	case "none":
	case "host":
		en.HostNetworking = true
	case "bridge", "": // empty string to support existing containers
		if !c.Config.NetworkDisabled {
			network := c.NetworkSettings
			en.Interface = &execdriver.NetworkInterface{
				Gateway:              network.Gateway,
				Bridge:               network.Bridge,
				IPAddress:            network.IPAddress,
				IPPrefixLen:          network.IPPrefixLen,
				MacAddress:           network.MacAddress,
				LinkLocalIPv6Address: network.LinkLocalIPv6Address,
				GlobalIPv6Address:    network.GlobalIPv6Address,
				GlobalIPv6PrefixLen:  network.GlobalIPv6PrefixLen,
				IPv6Gateway:          network.IPv6Gateway,
			}
		}
	case "container":
		nc, err := c.getNetworkedContainer()
		if err != nil {
			return err
		}
		en.ContainerID = nc.ID
	default:
		return fmt.Errorf("invalid network mode: %s", c.hostConfig.NetworkMode)
	}

	ipc := &execdriver.Ipc{}

	if c.hostConfig.IpcMode.IsContainer() {
		ic, err := c.getIpcContainer()
		if err != nil {
			return err
		}
		ipc.ContainerID = ic.ID
	} else {
		ipc.HostIpc = c.hostConfig.IpcMode.IsHost()
	}

	pid := &execdriver.Pid{}
	pid.HostPid = c.hostConfig.PidMode.IsHost()

	// Build lists of devices allowed and created within the container.
	userSpecifiedDevices := make([]*configs.Device, len(c.hostConfig.Devices))
	for i, deviceMapping := range c.hostConfig.Devices {
		device, err := devices.DeviceFromPath(deviceMapping.PathOnHost, deviceMapping.CgroupPermissions)
		if err != nil {
			return fmt.Errorf("error gathering device information while adding custom device %q: %s", deviceMapping.PathOnHost, err)
		}
		device.Path = deviceMapping.PathInContainer
		userSpecifiedDevices[i] = device
	}
	allowedDevices := append(configs.DefaultAllowedDevices, userSpecifiedDevices...)

	autoCreatedDevices := append(configs.DefaultAutoCreatedDevices, userSpecifiedDevices...)

	// TODO: this can be removed after lxc-conf is fully deprecated
	lxcConfig, err := mergeLxcConfIntoOptions(c.hostConfig)
	if err != nil {
		return err
	}

	var rlimits []*ulimit.Rlimit
	ulimits := c.hostConfig.Ulimits

	// Merge ulimits with daemon defaults
	ulIdx := make(map[string]*ulimit.Ulimit)
	for _, ul := range ulimits {
		ulIdx[ul.Name] = ul
	}
	for name, ul := range c.daemon.config.Ulimits {
		if _, exists := ulIdx[name]; !exists {
			ulimits = append(ulimits, ul)
		}
	}

	for _, limit := range ulimits {
		rl, err := limit.GetRlimit()
		if err != nil {
			return err
		}
		rlimits = append(rlimits, rl)
	}

	resources := &execdriver.Resources{
		Memory:     c.hostConfig.Memory,
		MemorySwap: c.hostConfig.MemorySwap,
		CpuShares:  c.hostConfig.CpuShares,
		CpusetCpus: c.hostConfig.CpusetCpus,
		Rlimits:    rlimits,
	}

	processConfig := execdriver.ProcessConfig{
		Privileged: c.hostConfig.Privileged,
		Entrypoint: c.Path,
		Arguments:  c.Args,
		Tty:        c.Config.Tty,
		User:       c.Config.User,
	}

	processConfig.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
	processConfig.Env = env

	c.command = &execdriver.Command{
		ID:                 c.ID,
		Rootfs:             c.RootfsPath(),
		ReadonlyRootfs:     c.hostConfig.ReadonlyRootfs,
		InitPath:           "/.dockerinit",
		WorkingDir:         c.Config.WorkingDir,
		Network:            en,
		Ipc:                ipc,
		Pid:                pid,
		Resources:          resources,
		AllowedDevices:     allowedDevices,
		AutoCreatedDevices: autoCreatedDevices,
		CapAdd:             c.hostConfig.CapAdd,
		CapDrop:            c.hostConfig.CapDrop,
		ProcessConfig:      processConfig,
		ProcessLabel:       c.GetProcessLabel(),
		MountLabel:         c.GetMountLabel(),
		LxcConfig:          lxcConfig,
		AppArmorProfile:    c.AppArmorProfile,
		CgroupParent:       c.hostConfig.CgroupParent,
	}

	return nil
}
コード例 #16
0
ファイル: container_windows.go プロジェクト: sergiobuj/docker
func (daemon *Daemon) populateCommand(c *Container, env []string) error {
	en := &execdriver.Network{
		Interface: nil,
	}

	parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2)
	switch parts[0] {
	case "none":
	case "default", "": // empty string to support existing containers
		if !c.Config.NetworkDisabled {
			en.Interface = &execdriver.NetworkInterface{
				MacAddress:   c.Config.MacAddress,
				Bridge:       daemon.configStore.Bridge.VirtualSwitchName,
				PortBindings: c.hostConfig.PortBindings,

				// TODO Windows. Include IPAddress. There already is a
				// property IPAddress on execDrive.CommonNetworkInterface,
				// but there is no CLI option in docker to pass through
				// an IPAddress on docker run.
			}
		}
	default:
		return derr.ErrorCodeInvalidNetworkMode.WithArgs(c.hostConfig.NetworkMode)
	}

	// TODO Windows. More resource controls to be implemented later.
	resources := &execdriver.Resources{
		CommonResources: execdriver.CommonResources{
			CPUShares: c.hostConfig.CPUShares,
		},
	}

	processConfig := execdriver.ProcessConfig{
		CommonProcessConfig: execdriver.CommonProcessConfig{
			Entrypoint: c.Path,
			Arguments:  c.Args,
			Tty:        c.Config.Tty,
		},
		ConsoleSize: c.hostConfig.ConsoleSize,
	}

	processConfig.Env = env

	var layerPaths []string
	img, err := daemon.graph.Get(c.ImageID)
	if err != nil {
		return derr.ErrorCodeGetGraph.WithArgs(c.ImageID, err)
	}
	for i := img; i != nil && err == nil; i, err = daemon.graph.GetParent(i) {
		lp, err := daemon.driver.Get(i.ID, "")
		if err != nil {
			return derr.ErrorCodeGetLayer.WithArgs(daemon.driver.String(), i.ID, err)
		}
		layerPaths = append(layerPaths, lp)
		err = daemon.driver.Put(i.ID)
		if err != nil {
			return derr.ErrorCodePutLayer.WithArgs(daemon.driver.String(), i.ID, err)
		}
	}
	m, err := daemon.driver.GetMetadata(c.ID)
	if err != nil {
		return derr.ErrorCodeGetLayerMetadata.WithArgs(err)
	}
	layerFolder := m["dir"]

	c.command = &execdriver.Command{
		CommonCommand: execdriver.CommonCommand{
			ID:            c.ID,
			Rootfs:        c.rootfsPath(),
			InitPath:      "/.dockerinit",
			WorkingDir:    c.Config.WorkingDir,
			Network:       en,
			MountLabel:    c.getMountLabel(),
			Resources:     resources,
			ProcessConfig: processConfig,
			ProcessLabel:  c.getProcessLabel(),
		},
		FirstStart:  !c.HasBeenStartedBefore,
		LayerFolder: layerFolder,
		LayerPaths:  layerPaths,
		Hostname:    c.Config.Hostname,
		Isolation:   c.hostConfig.Isolation,
	}

	return nil
}
コード例 #17
0
ファイル: exec.go プロジェクト: newdeamon/docker
// Exec implements the exec driver Driver interface.
func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) {

	var (
		term     execdriver.Terminal
		err      error
		exitCode int32
		errno    uint32
	)

	active := d.activeContainers[c.ID]
	if active == nil {
		return -1, fmt.Errorf("Exec - No active container exists with ID %s", c.ID)
	}

	createProcessParms := hcsshim.CreateProcessParams{
		EmulateConsole:   processConfig.Tty, // Note NOT c.ProcessConfig.Tty
		WorkingDirectory: c.WorkingDir,
	}

	// Configure the environment for the process // Note NOT c.ProcessConfig.Tty
	createProcessParms.Environment = setupEnvironmentVariables(processConfig.Env)

	// While this should get caught earlier, just in case, validate that we
	// have something to run.
	if processConfig.Entrypoint == "" {
		err = errors.New("No entrypoint specified")
		logrus.Error(err)
		return -1, err
	}

	// Build the command line of the process
	createProcessParms.CommandLine = processConfig.Entrypoint
	for _, arg := range processConfig.Arguments {
		logrus.Debugln("appending ", arg)
		createProcessParms.CommandLine += " " + arg
	}
	logrus.Debugln("commandLine: ", createProcessParms.CommandLine)

	// Start the command running in the container.
	pid, stdin, stdout, stderr, err := hcsshim.CreateProcessInComputeSystem(c.ID, pipes.Stdin != nil, true, !processConfig.Tty, createProcessParms)
	if err != nil {
		logrus.Errorf("CreateProcessInComputeSystem() failed %s", err)
		return -1, err
	}

	// Now that the process has been launched, begin copying data to and from
	// the named pipes for the std handles.
	setupPipes(stdin, stdout, stderr, pipes)

	// Note NOT c.ProcessConfig.Tty
	if processConfig.Tty {
		term = NewTtyConsole(c.ID, pid)
	} else {
		term = NewStdConsole()
	}
	processConfig.Terminal = term

	// Invoke the start callback
	if hooks.Start != nil {
		// A closed channel for OOM is returned here as it will be
		// non-blocking and return the correct result when read.
		chOOM := make(chan struct{})
		close(chOOM)
		hooks.Start(&c.ProcessConfig, int(pid), chOOM)
	}

	if exitCode, errno, err = hcsshim.WaitForProcessInComputeSystem(c.ID, pid, hcsshim.TimeoutInfinite); err != nil {
		if errno == hcsshim.Win32PipeHasBeenEnded {
			logrus.Debugf("Exiting Run() after WaitForProcessInComputeSystem failed with recognised error 0x%X", errno)
			return hcsshim.WaitErrExecFailed, nil
		}
		logrus.Warnf("WaitForProcessInComputeSystem failed (container may have been killed): 0x%X %s", errno, err)
		return -1, err
	}

	logrus.Debugln("Exiting Run()", c.ID)
	return int(exitCode), nil
}
コード例 #18
0
ファイル: container.go プロジェクト: GetSerene/docker
func populateCommand(c *Container, env []string) error {
	en := &execdriver.Network{
		Mtu:       c.daemon.config.Mtu,
		Interface: nil,
	}

	parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2)
	switch parts[0] {
	case "none":
	case "host":
		en.HostNetworking = true
	case "bridge", "": // empty string to support existing containers
		if !c.Config.NetworkDisabled {
			network := c.NetworkSettings
			en.Interface = &execdriver.NetworkInterface{
				Gateway:     network.Gateway,
				Bridge:      network.Bridge,
				IPAddress:   network.IPAddress,
				IPPrefixLen: network.IPPrefixLen,
			}
		}
	case "container":
		nc, err := c.getNetworkedContainer()
		if err != nil {
			return err
		}
		en.ContainerID = nc.ID
	default:
		return fmt.Errorf("invalid network mode: %s", c.hostConfig.NetworkMode)
	}

	// Build lists of devices allowed and created within the container.
	userSpecifiedDevices := make([]*devices.Device, len(c.hostConfig.Devices))
	for i, deviceMapping := range c.hostConfig.Devices {
		device, err := devices.GetDevice(deviceMapping.PathOnHost, deviceMapping.CgroupPermissions)
		if err != nil {
			return fmt.Errorf("error gathering device information while adding custom device %q: %s", deviceMapping.PathOnHost, err)
		}
		device.Path = deviceMapping.PathInContainer
		userSpecifiedDevices[i] = device
	}
	allowedDevices := append(devices.DefaultAllowedDevices, userSpecifiedDevices...)

	autoCreatedDevices := append(devices.DefaultAutoCreatedDevices, userSpecifiedDevices...)

	// TODO: this can be removed after lxc-conf is fully deprecated
	lxcConfig := mergeLxcConfIntoOptions(c.hostConfig)

	resources := &execdriver.Resources{
		Memory:     c.Config.Memory,
		MemorySwap: c.Config.MemorySwap,
		CpuShares:  c.Config.CpuShares,
		Cpuset:     c.Config.Cpuset,
	}

	processConfig := execdriver.ProcessConfig{
		Privileged: c.hostConfig.Privileged,
		Entrypoint: c.Path,
		Arguments:  c.Args,
		Tty:        c.Config.Tty,
		User:       c.Config.User,
	}

	processConfig.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
	processConfig.Env = env

	c.command = &execdriver.Command{
		ID:                 c.ID,
		Rootfs:             c.RootfsPath(),
		InitPath:           "/.dockerinit",
		WorkingDir:         c.Config.WorkingDir,
		Network:            en,
		Resources:          resources,
		AllowedDevices:     allowedDevices,
		AutoCreatedDevices: autoCreatedDevices,
		CapAdd:             c.hostConfig.CapAdd,
		CapDrop:            c.hostConfig.CapDrop,
		ProcessConfig:      processConfig,
		ProcessLabel:       c.GetProcessLabel(),
		MountLabel:         c.GetMountLabel(),
		LxcConfig:          lxcConfig,
		AppArmorProfile:    c.AppArmorProfile,
	}

	return nil
}
コード例 #19
0
func (daemon *Daemon) populateCommand(c *container.Container, env []string) error {
	en := &execdriver.Network{
		Interface: nil,
	}

	parts := strings.SplitN(string(c.HostConfig.NetworkMode), ":", 2)
	switch parts[0] {
	case "none":
	case "default", "": // empty string to support existing containers
		if !c.Config.NetworkDisabled {
			en.Interface = &execdriver.NetworkInterface{
				MacAddress:   c.Config.MacAddress,
				Bridge:       daemon.configStore.bridgeConfig.VirtualSwitchName,
				PortBindings: c.HostConfig.PortBindings,

				// TODO Windows. Include IPAddress. There already is a
				// property IPAddress on execDrive.CommonNetworkInterface,
				// but there is no CLI option in docker to pass through
				// an IPAddress on docker run.
			}
		}
	default:
		return derr.ErrorCodeInvalidNetworkMode.WithArgs(c.HostConfig.NetworkMode)
	}

	// TODO Windows. More resource controls to be implemented later.
	resources := &execdriver.Resources{
		CommonResources: execdriver.CommonResources{
			CPUShares: c.HostConfig.CPUShares,
		},
	}

	processConfig := execdriver.ProcessConfig{
		CommonProcessConfig: execdriver.CommonProcessConfig{
			Entrypoint: c.Path,
			Arguments:  c.Args,
			Tty:        c.Config.Tty,
		},
		ConsoleSize: c.HostConfig.ConsoleSize,
	}

	processConfig.Env = env

	var layerPaths []string
	img, err := daemon.imageStore.Get(c.ImageID)
	if err != nil {
		return derr.ErrorCodeGetGraph.WithArgs(c.ImageID, err)
	}

	if img.RootFS != nil && img.RootFS.Type == "layers+base" {
		max := len(img.RootFS.DiffIDs)
		for i := 0; i <= max; i++ {
			img.RootFS.DiffIDs = img.RootFS.DiffIDs[:i]
			path, err := layer.GetLayerPath(daemon.layerStore, img.RootFS.ChainID())
			if err != nil {
				return derr.ErrorCodeGetLayer.WithArgs(err)
			}
			// Reverse order, expecting parent most first
			layerPaths = append([]string{path}, layerPaths...)
		}
	}

	m, err := c.RWLayer.Metadata()
	if err != nil {
		return derr.ErrorCodeGetLayerMetadata.WithArgs(err)
	}
	layerFolder := m["dir"]

	var hvPartition bool
	// Work out the isolation (whether it is a hypervisor partition)
	if c.HostConfig.Isolation.IsDefault() {
		// Not specified by caller. Take daemon default
		hvPartition = windows.DefaultIsolation.IsHyperV()
	} else {
		// Take value specified by caller
		hvPartition = c.HostConfig.Isolation.IsHyperV()
	}

	c.Command = &execdriver.Command{
		CommonCommand: execdriver.CommonCommand{
			ID:            c.ID,
			Rootfs:        c.BaseFS,
			WorkingDir:    c.Config.WorkingDir,
			Network:       en,
			MountLabel:    c.GetMountLabel(),
			Resources:     resources,
			ProcessConfig: processConfig,
			ProcessLabel:  c.GetProcessLabel(),
		},
		FirstStart:  !c.HasBeenStartedBefore,
		LayerFolder: layerFolder,
		LayerPaths:  layerPaths,
		Hostname:    c.Config.Hostname,
		Isolation:   string(c.HostConfig.Isolation),
		ArgsEscaped: c.Config.ArgsEscaped,
		HvPartition: hvPartition,
	}

	return nil
}
コード例 #20
0
ファイル: exec.go プロジェクト: rajasec/docker
func (d *driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {

	var (
		inListen, outListen, errListen     *npipe.PipeListener
		term                               execdriver.Terminal
		err                                error
		randomID                           string = stringid.GenerateNonCryptoID()
		serverPipeFormat, clientPipeFormat string
		pid                                uint32
		exitCode                           int32
	)

	active := d.activeContainers[c.ID]
	if active == nil {
		return -1, fmt.Errorf("Exec - No active container exists with ID %s", c.ID)
	}

	createProcessParms := hcsshim.CreateProcessParams{
		EmulateConsole:   processConfig.Tty, // Note NOT c.ProcessConfig.Tty
		WorkingDirectory: c.WorkingDir,
	}

	// Configure the environment for the process // Note NOT c.ProcessConfig.Tty
	createProcessParms.Environment = setupEnvironmentVariables(processConfig.Env)

	// We use another unique ID here for each exec instance otherwise it
	// may conflict with the pipe name being used by RUN.

	// We use a different pipe name between real and dummy mode in the HCS
	if dummyMode {
		clientPipeFormat = `\\.\pipe\docker-exec-%[1]s-%[2]s-%[3]s`
		serverPipeFormat = clientPipeFormat
	} else {
		clientPipeFormat = `\\.\pipe\docker-exec-%[2]s-%[3]s`
		serverPipeFormat = `\\.\Containers\%[1]s\Device\NamedPipe\docker-exec-%[2]s-%[3]s`
	}

	// Connect stdin
	if pipes.Stdin != nil {
		stdInPipe := fmt.Sprintf(serverPipeFormat, c.ID, randomID, "stdin")
		createProcessParms.StdInPipe = fmt.Sprintf(clientPipeFormat, c.ID, randomID, "stdin")

		// Listen on the named pipe
		inListen, err = npipe.Listen(stdInPipe)
		if err != nil {
			logrus.Errorf("stdin failed to listen on %s %s ", stdInPipe, err)
			return -1, err
		}
		defer inListen.Close()

		// Launch a goroutine to do the accept. We do this so that we can
		// cause an otherwise blocking goroutine to gracefully close when
		// the caller (us) closes the listener
		go stdinAccept(inListen, stdInPipe, pipes.Stdin)
	}

	// Connect stdout
	stdOutPipe := fmt.Sprintf(serverPipeFormat, c.ID, randomID, "stdout")
	createProcessParms.StdOutPipe = fmt.Sprintf(clientPipeFormat, c.ID, randomID, "stdout")

	outListen, err = npipe.Listen(stdOutPipe)
	if err != nil {
		logrus.Errorf("stdout failed to listen on %s %s", stdOutPipe, err)
		return -1, err
	}
	defer outListen.Close()
	go stdouterrAccept(outListen, stdOutPipe, pipes.Stdout)

	// No stderr on TTY. Note NOT c.ProcessConfig.Tty
	if !processConfig.Tty {
		// Connect stderr
		stdErrPipe := fmt.Sprintf(serverPipeFormat, c.ID, randomID, "stderr")
		createProcessParms.StdErrPipe = fmt.Sprintf(clientPipeFormat, c.ID, randomID, "stderr")

		errListen, err = npipe.Listen(stdErrPipe)
		if err != nil {
			logrus.Errorf("Stderr failed to listen on %s %s", stdErrPipe, err)
			return -1, err
		}
		defer errListen.Close()
		go stdouterrAccept(errListen, stdErrPipe, pipes.Stderr)
	}

	// While this should get caught earlier, just in case, validate that we
	// have something to run.
	if processConfig.Entrypoint == "" {
		err = errors.New("No entrypoint specified")
		logrus.Error(err)
		return -1, err
	}

	// Build the command line of the process
	createProcessParms.CommandLine = processConfig.Entrypoint
	for _, arg := range processConfig.Arguments {
		logrus.Debugln("appending ", arg)
		createProcessParms.CommandLine += " " + arg
	}
	logrus.Debugln("commandLine: ", createProcessParms.CommandLine)

	// Start the command running in the container.
	pid, err = hcsshim.CreateProcessInComputeSystem(c.ID, createProcessParms)

	if err != nil {
		logrus.Errorf("CreateProcessInComputeSystem() failed %s", err)
		return -1, err
	}

	// Note NOT c.ProcessConfig.Tty
	if processConfig.Tty {
		term = NewTtyConsole(c.ID, pid)
	} else {
		term = NewStdConsole()
	}
	processConfig.Terminal = term

	// Invoke the start callback
	if startCallback != nil {
		startCallback(&c.ProcessConfig, int(pid))
	}

	if exitCode, err = hcsshim.WaitForProcessInComputeSystem(c.ID, pid); err != nil {
		logrus.Errorf("Failed to WaitForProcessInComputeSystem %s", err)
		return -1, err
	}

	// TODO Windows - Do something with this exit code
	logrus.Debugln("Exiting Run() with ExitCode 0", c.ID)
	return int(exitCode), nil
}
コード例 #21
0
func populateCommand(c *Container, env []string) error {
	var en *execdriver.Network
	if !c.Config.NetworkDisabled {
		en = &execdriver.Network{}
		if !c.daemon.execDriver.SupportsHooks() || c.hostConfig.NetworkMode.IsHost() {
			en.NamespacePath = c.NetworkSettings.SandboxKey
		}

		parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2)
		if parts[0] == "container" {
			nc, err := c.getNetworkedContainer()
			if err != nil {
				return err
			}
			en.ContainerID = nc.ID
		}
	}

	ipc := &execdriver.Ipc{}

	if c.hostConfig.IpcMode.IsContainer() {
		ic, err := c.getIpcContainer()
		if err != nil {
			return err
		}
		ipc.ContainerID = ic.ID
	} else {
		ipc.HostIpc = c.hostConfig.IpcMode.IsHost()
	}

	pid := &execdriver.Pid{}
	pid.HostPid = c.hostConfig.PidMode.IsHost()

	uts := &execdriver.UTS{
		HostUTS: c.hostConfig.UTSMode.IsHost(),
	}

	// Build lists of devices allowed and created within the container.
	var userSpecifiedDevices []*configs.Device
	for _, deviceMapping := range c.hostConfig.Devices {
		devs, err := getDevicesFromPath(deviceMapping)
		if err != nil {
			return err
		}

		userSpecifiedDevices = append(userSpecifiedDevices, devs...)
	}

	allowedDevices := mergeDevices(configs.DefaultAllowedDevices, userSpecifiedDevices)

	autoCreatedDevices := mergeDevices(configs.DefaultAutoCreatedDevices, userSpecifiedDevices)

	// TODO: this can be removed after lxc-conf is fully deprecated
	lxcConfig, err := mergeLxcConfIntoOptions(c.hostConfig)
	if err != nil {
		return err
	}

	var rlimits []*ulimit.Rlimit
	ulimits := c.hostConfig.Ulimits

	// Merge ulimits with daemon defaults
	ulIdx := make(map[string]*ulimit.Ulimit)
	for _, ul := range ulimits {
		ulIdx[ul.Name] = ul
	}
	for name, ul := range c.daemon.configStore.Ulimits {
		if _, exists := ulIdx[name]; !exists {
			ulimits = append(ulimits, ul)
		}
	}

	for _, limit := range ulimits {
		rl, err := limit.GetRlimit()
		if err != nil {
			return err
		}
		rlimits = append(rlimits, rl)
	}

	resources := &execdriver.Resources{
		Memory:           c.hostConfig.Memory,
		MemorySwap:       c.hostConfig.MemorySwap,
		KernelMemory:     c.hostConfig.KernelMemory,
		CPUShares:        c.hostConfig.CPUShares,
		CpusetCpus:       c.hostConfig.CpusetCpus,
		CpusetMems:       c.hostConfig.CpusetMems,
		CPUPeriod:        c.hostConfig.CPUPeriod,
		CPUQuota:         c.hostConfig.CPUQuota,
		BlkioWeight:      c.hostConfig.BlkioWeight,
		BlkioReadLimit:   constructBlkioArgs(c.hostConfig.Binds, c.hostConfig.BlkioReadLimit),
		Rlimits:          rlimits,
		OomKillDisable:   c.hostConfig.OomKillDisable,
		MemorySwappiness: -1,
	}

	if c.hostConfig.MemorySwappiness != nil {
		resources.MemorySwappiness = *c.hostConfig.MemorySwappiness
	}

	processConfig := execdriver.ProcessConfig{
		Privileged: c.hostConfig.Privileged,
		Entrypoint: c.Path,
		Arguments:  c.Args,
		Tty:        c.Config.Tty,
		User:       c.Config.User,
	}

	processConfig.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
	processConfig.Env = env

	c.command = &execdriver.Command{
		ID:                 c.ID,
		Rootfs:             c.rootfsPath(),
		ReadonlyRootfs:     c.hostConfig.ReadonlyRootfs,
		InitPath:           "/.dockerinit",
		WorkingDir:         c.Config.WorkingDir,
		Network:            en,
		Ipc:                ipc,
		Pid:                pid,
		UTS:                uts,
		Resources:          resources,
		AllowedDevices:     allowedDevices,
		AutoCreatedDevices: autoCreatedDevices,
		CapAdd:             c.hostConfig.CapAdd.Slice(),
		CapDrop:            c.hostConfig.CapDrop.Slice(),
		GroupAdd:           c.hostConfig.GroupAdd,
		ProcessConfig:      processConfig,
		ProcessLabel:       c.getProcessLabel(),
		MountLabel:         c.getMountLabel(),
		LxcConfig:          lxcConfig,
		AppArmorProfile:    c.AppArmorProfile,
		CgroupParent:       c.hostConfig.CgroupParent,
	}

	return nil
}
コード例 #22
0
ファイル: driver.go プロジェクト: lblackstone/docker
func setupPipes(container *configs.Config, processConfig *execdriver.ProcessConfig, p *libcontainer.Process, pipes *execdriver.Pipes, wg *sync.WaitGroup) ([]io.WriteCloser, error) {

	writers := []io.WriteCloser{}

	rootuid, err := container.HostUID()
	if err != nil {
		return writers, err
	}

	if processConfig.Tty {
		cons, err := p.NewConsole(rootuid)
		if err != nil {
			return writers, err
		}
		term, err := NewTtyConsole(cons, pipes)
		if err != nil {
			return writers, err
		}
		processConfig.Terminal = term
		return writers, nil
	}
	// not a tty--set up stdio pipes
	term := &execdriver.StdConsole{}
	processConfig.Terminal = term

	// if we are not in a user namespace, there is no reason to go through
	// the hassle of setting up os-level pipes with proper (remapped) ownership
	// so we will do the prior shortcut for non-userns containers
	if rootuid == 0 {
		p.Stdout = pipes.Stdout
		p.Stderr = pipes.Stderr

		r, w, err := os.Pipe()
		if err != nil {
			return writers, err
		}
		if pipes.Stdin != nil {
			go func() {
				io.Copy(w, pipes.Stdin)
				w.Close()
			}()
			p.Stdin = r
		}
		return writers, nil
	}

	// if we have user namespaces enabled (rootuid != 0), we will set
	// up os pipes for stderr, stdout, stdin so we can chown them to
	// the proper ownership to allow for proper access to the underlying
	// fds
	var fds []uintptr

	copyPipes := func(out io.Writer, in io.ReadCloser) {
		defer wg.Done()
		io.Copy(out, in)
		in.Close()
	}

	//setup stdout
	r, w, err := os.Pipe()
	if err != nil {
		w.Close()
		return writers, err
	}
	writers = append(writers, w)
	fds = append(fds, r.Fd(), w.Fd())
	if pipes.Stdout != nil {
		wg.Add(1)
		go copyPipes(pipes.Stdout, r)
	}
	term.Closers = append(term.Closers, r)
	p.Stdout = w

	//setup stderr
	r, w, err = os.Pipe()
	if err != nil {
		w.Close()
		return writers, err
	}
	writers = append(writers, w)
	fds = append(fds, r.Fd(), w.Fd())
	if pipes.Stderr != nil {
		wg.Add(1)
		go copyPipes(pipes.Stderr, r)
	}
	term.Closers = append(term.Closers, r)
	p.Stderr = w

	//setup stdin
	r, w, err = os.Pipe()
	if err != nil {
		r.Close()
		return writers, err
	}
	fds = append(fds, r.Fd(), w.Fd())
	if pipes.Stdin != nil {
		go func() {
			io.Copy(w, pipes.Stdin)
			w.Close()
		}()
		p.Stdin = r
	}
	for _, fd := range fds {
		if err := syscall.Fchown(int(fd), rootuid, rootuid); err != nil {
			return writers, fmt.Errorf("Failed to chown pipes fd: %v", err)
		}
	}
	return writers, nil
}
コード例 #23
0
ファイル: driver.go プロジェクト: previousnext/kube-ingress
func setupPipes(container *configs.Config, processConfig *execdriver.ProcessConfig, p *libcontainer.Process, pipes *execdriver.Pipes) error {

	rootuid, err := container.HostUID()
	if err != nil {
		return err
	}

	if processConfig.Tty {
		cons, err := p.NewConsole(rootuid)
		if err != nil {
			return err
		}
		term, err := NewTtyConsole(cons, pipes)
		if err != nil {
			return err
		}
		processConfig.Terminal = term
		return nil
	}
	// not a tty--set up stdio pipes
	term := &execdriver.StdConsole{}
	processConfig.Terminal = term

	// if we are not in a user namespace, there is no reason to go through
	// the hassle of setting up os-level pipes with proper (remapped) ownership
	// so we will do the prior shortcut for non-userns containers
	if rootuid == 0 {
		p.Stdout = pipes.Stdout
		p.Stderr = pipes.Stderr

		r, w, err := os.Pipe()
		if err != nil {
			return err
		}
		if pipes.Stdin != nil {
			go func() {
				io.Copy(w, pipes.Stdin)
				w.Close()
			}()
			p.Stdin = r
		}
		return nil
	}

	// if we have user namespaces enabled (rootuid != 0), we will set
	// up os pipes for stderr, stdout, stdin so we can chown them to
	// the proper ownership to allow for proper access to the underlying
	// fds
	var fds []int

	//setup stdout
	r, w, err := os.Pipe()
	if err != nil {
		return err
	}
	fds = append(fds, int(r.Fd()), int(w.Fd()))
	if pipes.Stdout != nil {
		go io.Copy(pipes.Stdout, r)
	}
	term.Closers = append(term.Closers, r)
	p.Stdout = w

	//setup stderr
	r, w, err = os.Pipe()
	if err != nil {
		return err
	}
	fds = append(fds, int(r.Fd()), int(w.Fd()))
	if pipes.Stderr != nil {
		go io.Copy(pipes.Stderr, r)
	}
	term.Closers = append(term.Closers, r)
	p.Stderr = w

	//setup stdin
	r, w, err = os.Pipe()
	if err != nil {
		return err
	}
	fds = append(fds, int(r.Fd()), int(w.Fd()))
	if pipes.Stdin != nil {
		go func() {
			io.Copy(w, pipes.Stdin)
			w.Close()
		}()
		p.Stdin = r
	}
	for _, fd := range fds {
		if err := syscall.Fchown(fd, rootuid, rootuid); err != nil {
			return fmt.Errorf("Failed to chown pipes fd: %v", err)
		}
	}
	return nil
}
コード例 #24
0
ファイル: container_windows.go プロジェクト: JasonKou/docker
func populateCommand(c *Container, env []string) error {
	en := &execdriver.Network{
		Mtu:       c.daemon.config.Mtu,
		Interface: nil,
	}

	parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2)
	switch parts[0] {

	case "none":
	case "default", "": // empty string to support existing containers
		if !c.Config.NetworkDisabled {
			network := c.NetworkSettings
			en.Interface = &execdriver.NetworkInterface{
				MacAddress: network.MacAddress,
				Bridge:     c.daemon.config.Bridge.VirtualSwitchName,
			}
		}
	default:
		return fmt.Errorf("invalid network mode: %s", c.hostConfig.NetworkMode)
	}

	pid := &execdriver.Pid{}

	// TODO Windows. This can probably be factored out.
	pid.HostPid = c.hostConfig.PidMode.IsHost()

	// TODO Windows. Resource controls to be implemented later.
	resources := &execdriver.Resources{}

	// TODO Windows. Further refactoring required (privileged/user)
	processConfig := execdriver.ProcessConfig{
		Privileged:  c.hostConfig.Privileged,
		Entrypoint:  c.Path,
		Arguments:   c.Args,
		Tty:         c.Config.Tty,
		User:        c.Config.User,
		ConsoleSize: c.hostConfig.ConsoleSize,
	}

	processConfig.Env = env

	var layerFolder string
	var layerPaths []string

	// The following is specific to the Windows driver. We do this to
	// enable VFS to continue operating for development purposes.
	if wd, ok := c.daemon.driver.(*windows.WindowsGraphDriver); ok {
		var err error
		var img *image.Image
		var ids []string

		if img, err = c.daemon.graph.Get(c.ImageID); err != nil {
			return fmt.Errorf("Failed to graph.Get on ImageID %s - %s", c.ImageID, err)
		}
		if ids, err = c.daemon.graph.ParentLayerIds(img); err != nil {
			return fmt.Errorf("Failed to get parentlayer ids %s", img.ID)
		}
		layerPaths = wd.LayerIdsToPaths(ids)
		layerFolder = filepath.Join(wd.Info().HomeDir, filepath.Base(c.ID))
	}

	// TODO Windows: Factor out remainder of unused fields.
	c.command = &execdriver.Command{
		ID:             c.ID,
		Rootfs:         c.RootfsPath(),
		ReadonlyRootfs: c.hostConfig.ReadonlyRootfs,
		InitPath:       "/.dockerinit",
		WorkingDir:     c.Config.WorkingDir,
		Network:        en,
		Pid:            pid,
		Resources:      resources,
		CapAdd:         c.hostConfig.CapAdd.Slice(),
		CapDrop:        c.hostConfig.CapDrop.Slice(),
		ProcessConfig:  processConfig,
		ProcessLabel:   c.GetProcessLabel(),
		MountLabel:     c.GetMountLabel(),
		FirstStart:     !c.HasBeenStartedBefore,
		LayerFolder:    layerFolder,
		LayerPaths:     layerPaths,
	}

	return nil
}
コード例 #25
0
ファイル: container_unix.go プロジェクト: RockaLabs/docker
func (daemon *Daemon) populateCommand(c *Container, env []string) error {
	var en *execdriver.Network
	if !c.Config.NetworkDisabled {
		en = &execdriver.Network{}
		if !daemon.execDriver.SupportsHooks() || c.hostConfig.NetworkMode.IsHost() {
			en.NamespacePath = c.NetworkSettings.SandboxKey
		}

		if c.hostConfig.NetworkMode.IsContainer() {
			nc, err := daemon.getNetworkedContainer(c.ID, c.hostConfig.NetworkMode.ConnectedContainer())
			if err != nil {
				return err
			}
			en.ContainerID = nc.ID
		}
	}

	ipc := &execdriver.Ipc{}
	var err error
	c.ShmPath, err = c.shmPath()
	if err != nil {
		return err
	}

	c.MqueuePath, err = c.mqueuePath()
	if err != nil {
		return err
	}

	if c.hostConfig.IpcMode.IsContainer() {
		ic, err := daemon.getIpcContainer(c)
		if err != nil {
			return err
		}
		ipc.ContainerID = ic.ID
		c.ShmPath = ic.ShmPath
		c.MqueuePath = ic.MqueuePath
	} else {
		ipc.HostIpc = c.hostConfig.IpcMode.IsHost()
		if ipc.HostIpc {
			if _, err := os.Stat("/dev/shm"); err != nil {
				return fmt.Errorf("/dev/shm is not mounted, but must be for --ipc=host")
			}
			if _, err := os.Stat("/dev/mqueue"); err != nil {
				return fmt.Errorf("/dev/mqueue is not mounted, but must be for --ipc=host")
			}
			c.ShmPath = "/dev/shm"
			c.MqueuePath = "/dev/mqueue"
		}
	}

	pid := &execdriver.Pid{}
	pid.HostPid = c.hostConfig.PidMode.IsHost()

	uts := &execdriver.UTS{
		HostUTS: c.hostConfig.UTSMode.IsHost(),
	}

	// Build lists of devices allowed and created within the container.
	var userSpecifiedDevices []*configs.Device
	for _, deviceMapping := range c.hostConfig.Devices {
		devs, err := getDevicesFromPath(deviceMapping)
		if err != nil {
			return err
		}

		userSpecifiedDevices = append(userSpecifiedDevices, devs...)
	}

	allowedDevices := mergeDevices(configs.DefaultAllowedDevices, userSpecifiedDevices)

	autoCreatedDevices := mergeDevices(configs.DefaultAutoCreatedDevices, userSpecifiedDevices)

	var rlimits []*ulimit.Rlimit
	ulimits := c.hostConfig.Ulimits

	// Merge ulimits with daemon defaults
	ulIdx := make(map[string]*ulimit.Ulimit)
	for _, ul := range ulimits {
		ulIdx[ul.Name] = ul
	}
	for name, ul := range daemon.configStore.Ulimits {
		if _, exists := ulIdx[name]; !exists {
			ulimits = append(ulimits, ul)
		}
	}

	weightDevices, err := getBlkioWeightDevices(c.hostConfig)
	if err != nil {
		return err
	}

	for _, limit := range ulimits {
		rl, err := limit.GetRlimit()
		if err != nil {
			return err
		}
		rlimits = append(rlimits, rl)
	}

	resources := &execdriver.Resources{
		CommonResources: execdriver.CommonResources{
			Memory:            c.hostConfig.Memory,
			MemoryReservation: c.hostConfig.MemoryReservation,
			CPUShares:         c.hostConfig.CPUShares,
			BlkioWeight:       c.hostConfig.BlkioWeight,
		},
		MemorySwap:        c.hostConfig.MemorySwap,
		KernelMemory:      c.hostConfig.KernelMemory,
		CpusetCpus:        c.hostConfig.CpusetCpus,
		CpusetMems:        c.hostConfig.CpusetMems,
		CPUPeriod:         c.hostConfig.CPUPeriod,
		CPUQuota:          c.hostConfig.CPUQuota,
		Rlimits:           rlimits,
		BlkioWeightDevice: weightDevices,
		OomKillDisable:    c.hostConfig.OomKillDisable,
		MemorySwappiness:  -1,
	}

	if c.hostConfig.MemorySwappiness != nil {
		resources.MemorySwappiness = *c.hostConfig.MemorySwappiness
	}

	processConfig := execdriver.ProcessConfig{
		CommonProcessConfig: execdriver.CommonProcessConfig{
			Entrypoint: c.Path,
			Arguments:  c.Args,
			Tty:        c.Config.Tty,
		},
		Privileged: c.hostConfig.Privileged,
		User:       c.Config.User,
	}

	processConfig.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
	processConfig.Env = env

	remappedRoot := &execdriver.User{}
	rootUID, rootGID := daemon.GetRemappedUIDGID()
	if rootUID != 0 {
		remappedRoot.UID = rootUID
		remappedRoot.GID = rootGID
	}
	uidMap, gidMap := daemon.GetUIDGIDMaps()

	c.command = &execdriver.Command{
		CommonCommand: execdriver.CommonCommand{
			ID:            c.ID,
			InitPath:      "/.dockerinit",
			MountLabel:    c.getMountLabel(),
			Network:       en,
			ProcessConfig: processConfig,
			ProcessLabel:  c.getProcessLabel(),
			Rootfs:        c.rootfsPath(),
			Resources:     resources,
			WorkingDir:    c.Config.WorkingDir,
		},
		AllowedDevices:     allowedDevices,
		AppArmorProfile:    c.AppArmorProfile,
		AutoCreatedDevices: autoCreatedDevices,
		CapAdd:             c.hostConfig.CapAdd.Slice(),
		CapDrop:            c.hostConfig.CapDrop.Slice(),
		CgroupParent:       c.hostConfig.CgroupParent,
		GIDMapping:         gidMap,
		GroupAdd:           c.hostConfig.GroupAdd,
		Ipc:                ipc,
		Pid:                pid,
		ReadonlyRootfs:     c.hostConfig.ReadonlyRootfs,
		RemappedRoot:       remappedRoot,
		UIDMapping:         uidMap,
		UTS:                uts,
	}

	return nil
}
コード例 #26
0
func (daemon *Daemon) populateCommand(c *container.Container, env []string) error {
	var en *execdriver.Network
	if !c.Config.NetworkDisabled {
		en = &execdriver.Network{}
		if !daemon.execDriver.SupportsHooks() || c.HostConfig.NetworkMode.IsHost() {
			en.NamespacePath = c.NetworkSettings.SandboxKey
		}

		if c.HostConfig.NetworkMode.IsContainer() {
			nc, err := daemon.getNetworkedContainer(c.ID, c.HostConfig.NetworkMode.ConnectedContainer())
			if err != nil {
				return err
			}
			en.ContainerID = nc.ID
		}
	}

	ipc := &execdriver.Ipc{}
	var err error
	c.ShmPath, err = c.ShmResourcePath()
	if err != nil {
		return err
	}

	c.MqueuePath, err = c.MqueueResourcePath()
	if err != nil {
		return err
	}

	if c.HostConfig.IpcMode.IsContainer() {
		ic, err := daemon.getIpcContainer(c)
		if err != nil {
			return err
		}
		ipc.ContainerID = ic.ID
		c.ShmPath = ic.ShmPath
		c.MqueuePath = ic.MqueuePath
	} else {
		ipc.HostIpc = c.HostConfig.IpcMode.IsHost()
		if ipc.HostIpc {
			if _, err := os.Stat("/dev/shm"); err != nil {
				return fmt.Errorf("/dev/shm is not mounted, but must be for --ipc=host")
			}
			if _, err := os.Stat("/dev/mqueue"); err != nil {
				return fmt.Errorf("/dev/mqueue is not mounted, but must be for --ipc=host")
			}
			c.ShmPath = "/dev/shm"
			c.MqueuePath = "/dev/mqueue"
		}
	}

	pid := &execdriver.Pid{}
	pid.HostPid = c.HostConfig.PidMode.IsHost()

	uts := &execdriver.UTS{
		HostUTS: c.HostConfig.UTSMode.IsHost(),
	}

	// Build lists of devices allowed and created within the container.
	var userSpecifiedDevices []*configs.Device
	for _, deviceMapping := range c.HostConfig.Devices {
		devs, err := getDevicesFromPath(deviceMapping)
		if err != nil {
			return err
		}

		userSpecifiedDevices = append(userSpecifiedDevices, devs...)
	}

	allowedDevices := mergeDevices(configs.DefaultAllowedDevices, userSpecifiedDevices)

	autoCreatedDevices := mergeDevices(configs.DefaultAutoCreatedDevices, userSpecifiedDevices)

	var rlimits []*units.Rlimit
	ulimits := c.HostConfig.Ulimits

	// Merge ulimits with daemon defaults
	ulIdx := make(map[string]*units.Ulimit)
	for _, ul := range ulimits {
		ulIdx[ul.Name] = ul
	}
	for name, ul := range daemon.configStore.Ulimits {
		if _, exists := ulIdx[name]; !exists {
			ulimits = append(ulimits, ul)
		}
	}

	weightDevices, err := getBlkioWeightDevices(c.HostConfig)
	if err != nil {
		return err
	}

	readBpsDevice, err := getBlkioReadBpsDevices(c.HostConfig)
	if err != nil {
		return err
	}

	writeBpsDevice, err := getBlkioWriteBpsDevices(c.HostConfig)
	if err != nil {
		return err
	}

	readIOpsDevice, err := getBlkioReadIOpsDevices(c.HostConfig)
	if err != nil {
		return err
	}

	writeIOpsDevice, err := getBlkioWriteIOpsDevices(c.HostConfig)
	if err != nil {
		return err
	}

	for _, limit := range ulimits {
		rl, err := limit.GetRlimit()
		if err != nil {
			return err
		}
		rlimits = append(rlimits, rl)
	}

	resources := &execdriver.Resources{
		CommonResources: execdriver.CommonResources{
			Memory:            c.HostConfig.Memory,
			MemoryReservation: c.HostConfig.MemoryReservation,
			CPUShares:         c.HostConfig.CPUShares,
			BlkioWeight:       c.HostConfig.BlkioWeight,
		},
		MemorySwap:                   c.HostConfig.MemorySwap,
		KernelMemory:                 c.HostConfig.KernelMemory,
		CpusetCpus:                   c.HostConfig.CpusetCpus,
		CpusetMems:                   c.HostConfig.CpusetMems,
		CPUPeriod:                    c.HostConfig.CPUPeriod,
		CPUQuota:                     c.HostConfig.CPUQuota,
		Rlimits:                      rlimits,
		BlkioWeightDevice:            weightDevices,
		BlkioThrottleReadBpsDevice:   readBpsDevice,
		BlkioThrottleWriteBpsDevice:  writeBpsDevice,
		BlkioThrottleReadIOpsDevice:  readIOpsDevice,
		BlkioThrottleWriteIOpsDevice: writeIOpsDevice,
		OomKillDisable:               *c.HostConfig.OomKillDisable,
		MemorySwappiness:             -1,
	}

	if c.HostConfig.MemorySwappiness != nil {
		resources.MemorySwappiness = *c.HostConfig.MemorySwappiness
	}

	processConfig := execdriver.ProcessConfig{
		CommonProcessConfig: execdriver.CommonProcessConfig{
			Entrypoint: c.Path,
			Arguments:  c.Args,
			Tty:        c.Config.Tty,
		},
		Privileged: c.HostConfig.Privileged,
		User:       c.Config.User,
	}

	processConfig.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
	processConfig.Env = env

	remappedRoot := &execdriver.User{}
	rootUID, rootGID := daemon.GetRemappedUIDGID()
	if rootUID != 0 {
		remappedRoot.UID = rootUID
		remappedRoot.GID = rootGID
	}
	uidMap, gidMap := daemon.GetUIDGIDMaps()

	if !daemon.seccompEnabled {
		if c.SeccompProfile != "" && c.SeccompProfile != "unconfined" {
			return fmt.Errorf("Seccomp is not enabled in your kernel, cannot run a custom seccomp profile.")
		}
		logrus.Warn("Seccomp is not enabled in your kernel, running container without default profile.")
		c.SeccompProfile = "unconfined"
	}

	defaultCgroupParent := "/docker"
	if daemon.configStore.CgroupParent != "" {
		defaultCgroupParent = daemon.configStore.CgroupParent
	} else {
		for _, option := range daemon.configStore.ExecOptions {
			key, val, err := parsers.ParseKeyValueOpt(option)
			if err != nil || !strings.EqualFold(key, "native.cgroupdriver") {
				continue
			}
			if val == "systemd" {
				defaultCgroupParent = "system.slice"
			}
		}
	}
	c.Command = &execdriver.Command{
		CommonCommand: execdriver.CommonCommand{
			ID:            c.ID,
			InitPath:      "/.dockerinit",
			MountLabel:    c.GetMountLabel(),
			Network:       en,
			ProcessConfig: processConfig,
			ProcessLabel:  c.GetProcessLabel(),
			Rootfs:        c.BaseFS,
			Resources:     resources,
			WorkingDir:    c.Config.WorkingDir,
		},
		AllowedDevices:     allowedDevices,
		AppArmorProfile:    c.AppArmorProfile,
		AutoCreatedDevices: autoCreatedDevices,
		CapAdd:             c.HostConfig.CapAdd.Slice(),
		CapDrop:            c.HostConfig.CapDrop.Slice(),
		CgroupParent:       defaultCgroupParent,
		GIDMapping:         gidMap,
		GroupAdd:           c.HostConfig.GroupAdd,
		Ipc:                ipc,
		OomScoreAdj:        c.HostConfig.OomScoreAdj,
		Pid:                pid,
		ReadonlyRootfs:     c.HostConfig.ReadonlyRootfs,
		RemappedRoot:       remappedRoot,
		SeccompProfile:     c.SeccompProfile,
		UIDMapping:         uidMap,
		UTS:                uts,
	}
	if c.HostConfig.CgroupParent != "" {
		c.Command.CgroupParent = c.HostConfig.CgroupParent
	}

	return nil
}